xref: /netbsd-src/external/bsd/ntp/dist/sntp/libevent/event.c (revision eabc0478de71e4e011a5b4e0392741e01d491794)
1 /*	$NetBSD: event.c,v 1.7 2024/08/18 20:47:21 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
5  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 #include "event2/event-config.h"
30 #include "evconfig-private.h"
31 
32 #ifdef _WIN32
33 #include <winsock2.h>
34 #define WIN32_LEAN_AND_MEAN
35 #include <windows.h>
36 #undef WIN32_LEAN_AND_MEAN
37 #endif
38 #include <sys/types.h>
39 #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
40 #include <sys/time.h>
41 #endif
42 #include <sys/queue.h>
43 #ifdef EVENT__HAVE_SYS_SOCKET_H
44 #include <sys/socket.h>
45 #endif
46 #include <stdio.h>
47 #include <stdlib.h>
48 #ifdef EVENT__HAVE_UNISTD_H
49 #include <unistd.h>
50 #endif
51 #include <ctype.h>
52 #include <errno.h>
53 #include <signal.h>
54 #include <string.h>
55 #include <time.h>
56 #include <limits.h>
57 #ifdef EVENT__HAVE_FCNTL_H
58 #include <fcntl.h>
59 #endif
60 
61 #include "event2/event.h"
62 #include "event2/event_struct.h"
63 #include "event2/event_compat.h"
64 #include "event-internal.h"
65 #include "defer-internal.h"
66 #include "evthread-internal.h"
67 #include "event2/thread.h"
68 #include "event2/util.h"
69 #include "log-internal.h"
70 #include "evmap-internal.h"
71 #include "iocp-internal.h"
72 #include "changelist-internal.h"
73 #define HT_NO_CACHE_HASH_VALUES
74 #include "ht-internal.h"
75 #include "util-internal.h"
76 
77 
78 #ifdef EVENT__HAVE_WORKING_KQUEUE
79 #include "kqueue-internal.h"
80 #endif
81 
82 #ifdef EVENT__HAVE_EVENT_PORTS
83 extern const struct eventop evportops;
84 #endif
85 #ifdef EVENT__HAVE_SELECT
86 extern const struct eventop selectops;
87 #endif
88 #ifdef EVENT__HAVE_POLL
89 extern const struct eventop pollops;
90 #endif
91 #ifdef EVENT__HAVE_EPOLL
92 extern const struct eventop epollops;
93 #endif
94 #ifdef EVENT__HAVE_WORKING_KQUEUE
95 extern const struct eventop kqops;
96 #endif
97 #ifdef EVENT__HAVE_DEVPOLL
98 extern const struct eventop devpollops;
99 #endif
100 #ifdef _WIN32
101 extern const struct eventop win32ops;
102 #endif
103 
104 /* Array of backends in order of preference. */
105 static const struct eventop *eventops[] = {
106 #ifdef EVENT__HAVE_EVENT_PORTS
107 	&evportops,
108 #endif
109 #ifdef EVENT__HAVE_WORKING_KQUEUE
110 	&kqops,
111 #endif
112 #ifdef EVENT__HAVE_EPOLL
113 	&epollops,
114 #endif
115 #ifdef EVENT__HAVE_DEVPOLL
116 	&devpollops,
117 #endif
118 #ifdef EVENT__HAVE_POLL
119 	&pollops,
120 #endif
121 #ifdef EVENT__HAVE_SELECT
122 	&selectops,
123 #endif
124 #ifdef _WIN32
125 	&win32ops,
126 #endif
127 	NULL
128 };
129 
130 /* Global state; deprecated */
131 EVENT2_EXPORT_SYMBOL
132 struct event_base *event_global_current_base_ = NULL;
133 #define current_base event_global_current_base_
134 
135 /* Global state */
136 
137 static void *event_self_cbarg_ptr_ = NULL;
138 
139 /* Prototypes */
140 static void	event_queue_insert_active(struct event_base *, struct event_callback *);
141 static void	event_queue_insert_active_later(struct event_base *, struct event_callback *);
142 static void	event_queue_insert_timeout(struct event_base *, struct event *);
143 static void	event_queue_insert_inserted(struct event_base *, struct event *);
144 static void	event_queue_remove_active(struct event_base *, struct event_callback *);
145 static void	event_queue_remove_active_later(struct event_base *, struct event_callback *);
146 static void	event_queue_remove_timeout(struct event_base *, struct event *);
147 static void	event_queue_remove_inserted(struct event_base *, struct event *);
148 static void event_queue_make_later_events_active(struct event_base *base);
149 
150 static int evthread_make_base_notifiable_nolock_(struct event_base *base);
151 static int event_del_(struct event *ev, int blocking);
152 
153 #ifdef USE_REINSERT_TIMEOUT
154 /* This code seems buggy; only turn it on if we find out what the trouble is. */
155 static void	event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
156 #endif
157 
158 static int	event_haveevents(struct event_base *);
159 
160 static int	event_process_active(struct event_base *);
161 
162 static int	timeout_next(struct event_base *, struct timeval **);
163 static void	timeout_process(struct event_base *);
164 
165 static inline void	event_signal_closure(struct event_base *, struct event *ev);
166 static inline void	event_persist_closure(struct event_base *, struct event *ev);
167 
168 static int	evthread_notify_base(struct event_base *base);
169 
170 static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
171     struct event *ev);
172 
173 #ifndef EVENT__DISABLE_DEBUG_MODE
174 /* These functions implement a hashtable of which 'struct event *' structures
175  * have been setup or added.  We don't want to trust the content of the struct
176  * event itself, since we're trying to work through cases where an event gets
177  * clobbered or freed.  Instead, we keep a hashtable indexed by the pointer.
178  */
179 
180 struct event_debug_entry {
181 	HT_ENTRY(event_debug_entry) node;
182 	const struct event *ptr;
183 	unsigned added : 1;
184 };
185 
186 static inline unsigned
187 hash_debug_entry(const struct event_debug_entry *e)
188 {
189 	/* We need to do this silliness to convince compilers that we
190 	 * honestly mean to cast e->ptr to an integer, and discard any
191 	 * part of it that doesn't fit in an unsigned.
192 	 */
193 	unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
194 	/* Our hashtable implementation is pretty sensitive to low bits,
195 	 * and every struct event is over 64 bytes in size, so we can
196 	 * just say >>6. */
197 	return (u >> 6);
198 }
199 
200 static inline int
201 eq_debug_entry(const struct event_debug_entry *a,
202     const struct event_debug_entry *b)
203 {
204 	return a->ptr == b->ptr;
205 }
206 
207 int event_debug_mode_on_ = 0;
208 
209 
210 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
211 /**
212  * @brief debug mode variable which is set for any function/structure that needs
213  *        to be shared across threads (if thread support is enabled).
214  *
215  *        When and if evthreads are initialized, this variable will be evaluated,
216  *        and if set to something other than zero, this means the evthread setup
217  *        functions were called out of order.
218  *
219  *        See: "Locks and threading" in the documentation.
220  */
221 int event_debug_created_threadable_ctx_ = 0;
222 #endif
223 
224 /* Set if it's too late to enable event_debug_mode. */
225 static int event_debug_mode_too_late = 0;
226 #ifndef EVENT__DISABLE_THREAD_SUPPORT
227 static void *event_debug_map_lock_ = NULL;
228 #endif
229 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
230 	HT_INITIALIZER();
231 
232 HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
233     eq_debug_entry)
234 HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
235     eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
236 
237 /* record that ev is now setup (that is, ready for an add) */
238 static void event_debug_note_setup_(const struct event *ev)
239 {
240 	struct event_debug_entry *dent, find;
241 
242 	if (!event_debug_mode_on_)
243 		goto out;
244 
245 	find.ptr = ev;
246 	EVLOCK_LOCK(event_debug_map_lock_, 0);
247 	dent = HT_FIND(event_debug_map, &global_debug_map, &find);
248 	if (dent) {
249 		dent->added = 0;
250 	} else {
251 		dent = mm_malloc(sizeof(*dent));
252 		if (!dent)
253 			event_err(1,
254 			    "Out of memory in debugging code");
255 		dent->ptr = ev;
256 		dent->added = 0;
257 		HT_INSERT(event_debug_map, &global_debug_map, dent);
258 	}
259 	EVLOCK_UNLOCK(event_debug_map_lock_, 0);
260 
261 out:
262 	event_debug_mode_too_late = 1;
263 }
264 /* record that ev is no longer setup */
265 static void event_debug_note_teardown_(const struct event *ev)
266 {
267 	struct event_debug_entry *dent, find;
268 
269 	if (!event_debug_mode_on_)
270 		goto out;
271 
272 	find.ptr = ev;
273 	EVLOCK_LOCK(event_debug_map_lock_, 0);
274 	dent = HT_REMOVE(event_debug_map, &global_debug_map, &find);
275 	if (dent)
276 		mm_free(dent);
277 	EVLOCK_UNLOCK(event_debug_map_lock_, 0);
278 
279 out:
280 	event_debug_mode_too_late = 1;
281 }
282 /* Macro: record that ev is now added */
283 static void event_debug_note_add_(const struct event *ev)
284 {
285 	struct event_debug_entry *dent,find;
286 
287 	if (!event_debug_mode_on_)
288 		goto out;
289 
290 	find.ptr = ev;
291 	EVLOCK_LOCK(event_debug_map_lock_, 0);
292 	dent = HT_FIND(event_debug_map, &global_debug_map, &find);
293 	if (dent) {
294 		dent->added = 1;
295 	} else {
296 		event_errx(EVENT_ERR_ABORT_,
297 		    "%s: noting an add on a non-setup event %p"
298 		    " (events: 0x%x, fd: "EV_SOCK_FMT
299 		    ", flags: 0x%x)",
300 		    __func__, ev, ev->ev_events,
301 		    EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
302 	}
303 	EVLOCK_UNLOCK(event_debug_map_lock_, 0);
304 
305 out:
306 	event_debug_mode_too_late = 1;
307 }
308 /* record that ev is no longer added */
309 static void event_debug_note_del_(const struct event *ev)
310 {
311 	struct event_debug_entry *dent, find;
312 
313 	if (!event_debug_mode_on_)
314 		goto out;
315 
316 	find.ptr = ev;
317 	EVLOCK_LOCK(event_debug_map_lock_, 0);
318 	dent = HT_FIND(event_debug_map, &global_debug_map, &find);
319 	if (dent) {
320 		dent->added = 0;
321 	} else {
322 		event_errx(EVENT_ERR_ABORT_,
323 		    "%s: noting a del on a non-setup event %p"
324 		    " (events: 0x%x, fd: "EV_SOCK_FMT
325 		    ", flags: 0x%x)",
326 		    __func__, ev, ev->ev_events,
327 		    EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
328 	}
329 	EVLOCK_UNLOCK(event_debug_map_lock_, 0);
330 
331 out:
332 	event_debug_mode_too_late = 1;
333 }
334 /* assert that ev is setup (i.e., okay to add or inspect) */
335 static void event_debug_assert_is_setup_(const struct event *ev)
336 {
337 	struct event_debug_entry *dent, find;
338 
339 	if (!event_debug_mode_on_)
340 		return;
341 
342 	find.ptr = ev;
343 	EVLOCK_LOCK(event_debug_map_lock_, 0);
344 	dent = HT_FIND(event_debug_map, &global_debug_map, &find);
345 	if (!dent) {
346 		event_errx(EVENT_ERR_ABORT_,
347 		    "%s called on a non-initialized event %p"
348 		    " (events: 0x%x, fd: "EV_SOCK_FMT
349 		    ", flags: 0x%x)",
350 		    __func__, ev, ev->ev_events,
351 		    EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
352 	}
353 	EVLOCK_UNLOCK(event_debug_map_lock_, 0);
354 }
355 /* assert that ev is not added (i.e., okay to tear down or set up again) */
356 static void event_debug_assert_not_added_(const struct event *ev)
357 {
358 	struct event_debug_entry *dent, find;
359 
360 	if (!event_debug_mode_on_)
361 		return;
362 
363 	find.ptr = ev;
364 	EVLOCK_LOCK(event_debug_map_lock_, 0);
365 	dent = HT_FIND(event_debug_map, &global_debug_map, &find);
366 	if (dent && dent->added) {
367 		event_errx(EVENT_ERR_ABORT_,
368 		    "%s called on an already added event %p"
369 		    " (events: 0x%x, fd: "EV_SOCK_FMT", "
370 		    "flags: 0x%x)",
371 		    __func__, ev, ev->ev_events,
372 		    EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
373 	}
374 	EVLOCK_UNLOCK(event_debug_map_lock_, 0);
375 }
376 static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd)
377 {
378 	if (!event_debug_mode_on_)
379 		return;
380 	if (fd < 0)
381 		return;
382 
383 #ifndef _WIN32
384 	{
385 		int flags;
386 		if ((flags = fcntl(fd, F_GETFL, NULL)) >= 0) {
387 			EVUTIL_ASSERT(flags & O_NONBLOCK);
388 		}
389 	}
390 #endif
391 }
392 #else
393 static void event_debug_note_setup_(const struct event *ev) { (void)ev; }
394 static void event_debug_note_teardown_(const struct event *ev) { (void)ev; }
395 static void event_debug_note_add_(const struct event *ev) { (void)ev; }
396 static void event_debug_note_del_(const struct event *ev) { (void)ev; }
397 static void event_debug_assert_is_setup_(const struct event *ev) { (void)ev; }
398 static void event_debug_assert_not_added_(const struct event *ev) { (void)ev; }
399 static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd) { (void)fd; }
400 #endif
401 
402 #define EVENT_BASE_ASSERT_LOCKED(base)		\
403 	EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
404 
405 /* How often (in seconds) do we check for changes in wall clock time relative
406  * to monotonic time?  Set this to -1 for 'never.' */
407 #define CLOCK_SYNC_INTERVAL 5
408 
409 /** Set 'tp' to the current time according to 'base'.  We must hold the lock
410  * on 'base'.  If there is a cached time, return it.  Otherwise, use
411  * clock_gettime or gettimeofday as appropriate to find out the right time.
412  * Return 0 on success, -1 on failure.
413  */
414 static int
415 gettime(struct event_base *base, struct timeval *tp)
416 {
417 	EVENT_BASE_ASSERT_LOCKED(base);
418 
419 	if (base->tv_cache.tv_sec) {
420 		*tp = base->tv_cache;
421 		return (0);
422 	}
423 
424 	if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
425 		return -1;
426 	}
427 
428 	if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
429 	    < tp->tv_sec) {
430 		struct timeval tv;
431 		evutil_gettimeofday(&tv,NULL);
432 		evutil_timersub(&tv, tp, &base->tv_clock_diff);
433 		base->last_updated_clock_diff = tp->tv_sec;
434 	}
435 
436 	return 0;
437 }
438 
439 int
440 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
441 {
442 	int r;
443 	if (!base) {
444 		base = current_base;
445 		if (!current_base)
446 			return evutil_gettimeofday(tv, NULL);
447 	}
448 
449 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
450 	if (base->tv_cache.tv_sec == 0) {
451 		r = evutil_gettimeofday(tv, NULL);
452 	} else {
453 		evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
454 		r = 0;
455 	}
456 	EVBASE_RELEASE_LOCK(base, th_base_lock);
457 	return r;
458 }
459 
460 /** Make 'base' have no current cached time. */
461 static inline void
462 clear_time_cache(struct event_base *base)
463 {
464 	base->tv_cache.tv_sec = 0;
465 }
466 
467 /** Replace the cached time in 'base' with the current time. */
468 static inline void
469 update_time_cache(struct event_base *base)
470 {
471 	base->tv_cache.tv_sec = 0;
472 	if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
473 	    gettime(base, &base->tv_cache);
474 }
475 
476 int
477 event_base_update_cache_time(struct event_base *base)
478 {
479 
480 	if (!base) {
481 		base = current_base;
482 		if (!current_base)
483 			return -1;
484 	}
485 
486 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
487 	if (base->running_loop)
488 		update_time_cache(base);
489 	EVBASE_RELEASE_LOCK(base, th_base_lock);
490 	return 0;
491 }
492 
493 static inline struct event *
494 event_callback_to_event(struct event_callback *evcb)
495 {
496 	EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
497 	return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
498 }
499 
500 static inline struct event_callback *
501 event_to_event_callback(struct event *ev)
502 {
503 	return &ev->ev_evcallback;
504 }
505 
506 struct event_base *
507 event_init(void)
508 {
509 	struct event_base *base = event_base_new_with_config(NULL);
510 
511 	if (base == NULL) {
512 		event_errx(1, "%s: Unable to construct event_base", __func__);
513 		return NULL;
514 	}
515 
516 	current_base = base;
517 
518 	return (base);
519 }
520 
521 struct event_base *
522 event_base_new(void)
523 {
524 	struct event_base *base = NULL;
525 	struct event_config *cfg = event_config_new();
526 	if (cfg) {
527 		base = event_base_new_with_config(cfg);
528 		event_config_free(cfg);
529 	}
530 	return base;
531 }
532 
533 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
534  * avoid. */
535 static int
536 event_config_is_avoided_method(const struct event_config *cfg,
537     const char *method)
538 {
539 	struct event_config_entry *entry;
540 
541 	TAILQ_FOREACH(entry, &cfg->entries, next) {
542 		if (entry->avoid_method != NULL &&
543 		    strcmp(entry->avoid_method, method) == 0)
544 			return (1);
545 	}
546 
547 	return (0);
548 }
549 
550 /** Return true iff 'method' is disabled according to the environment. */
551 static int
552 event_is_method_disabled(const char *name)
553 {
554 	char environment[64];
555 	int i;
556 
557 	evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
558 	for (i = 8; environment[i] != '\0'; ++i)
559 		environment[i] = EVUTIL_TOUPPER_(environment[i]);
560 	/* Note that evutil_getenv_() ignores the environment entirely if
561 	 * we're setuid */
562 	return (evutil_getenv_(environment) != NULL);
563 }
564 
565 int
566 event_base_get_features(const struct event_base *base)
567 {
568 	return base->evsel->features;
569 }
570 
571 void
572 event_enable_debug_mode(void)
573 {
574 #ifndef EVENT__DISABLE_DEBUG_MODE
575 	if (event_debug_mode_on_)
576 		event_errx(1, "%s was called twice!", __func__);
577 	if (event_debug_mode_too_late)
578 		event_errx(1, "%s must be called *before* creating any events "
579 		    "or event_bases",__func__);
580 
581 	event_debug_mode_on_ = 1;
582 
583 	HT_INIT(event_debug_map, &global_debug_map);
584 #endif
585 }
586 
587 void
588 event_disable_debug_mode(void)
589 {
590 #ifndef EVENT__DISABLE_DEBUG_MODE
591 	struct event_debug_entry **ent, *victim;
592 
593 	EVLOCK_LOCK(event_debug_map_lock_, 0);
594 	for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
595 		victim = *ent;
596 		ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
597 		mm_free(victim);
598 	}
599 	HT_CLEAR(event_debug_map, &global_debug_map);
600 	EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
601 
602 	event_debug_mode_on_  = 0;
603 #endif
604 }
605 
606 struct event_base *
607 event_base_new_with_config(const struct event_config *cfg)
608 {
609 	int i;
610 	struct event_base *base;
611 	int should_check_environment;
612 
613 #ifndef EVENT__DISABLE_DEBUG_MODE
614 	event_debug_mode_too_late = 1;
615 #endif
616 
617 	if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
618 		event_warn("%s: calloc", __func__);
619 		return NULL;
620 	}
621 
622 	if (cfg)
623 		base->flags = cfg->flags;
624 
625 	should_check_environment =
626 	    !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
627 
628 	{
629 		struct timeval tmp;
630 		int precise_time =
631 		    cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
632 		int flags;
633 		if (should_check_environment && !precise_time) {
634 			precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
635 			if (precise_time) {
636 				base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
637 			}
638 		}
639 		flags = precise_time ? EV_MONOT_PRECISE : 0;
640 		evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
641 
642 		gettime(base, &tmp);
643 	}
644 
645 	min_heap_ctor_(&base->timeheap);
646 
647 	base->sig.ev_signal_pair[0] = -1;
648 	base->sig.ev_signal_pair[1] = -1;
649 	base->th_notify_fd[0] = -1;
650 	base->th_notify_fd[1] = -1;
651 
652 	TAILQ_INIT(&base->active_later_queue);
653 
654 	evmap_io_initmap_(&base->io);
655 	evmap_signal_initmap_(&base->sigmap);
656 	event_changelist_init_(&base->changelist);
657 
658 	base->evbase = NULL;
659 
660 	if (cfg) {
661 		memcpy(&base->max_dispatch_time,
662 		    &cfg->max_dispatch_interval, sizeof(struct timeval));
663 		base->limit_callbacks_after_prio =
664 		    cfg->limit_callbacks_after_prio;
665 	} else {
666 		base->max_dispatch_time.tv_sec = -1;
667 		base->limit_callbacks_after_prio = 1;
668 	}
669 	if (cfg && cfg->max_dispatch_callbacks >= 0) {
670 		base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
671 	} else {
672 		base->max_dispatch_callbacks = INT_MAX;
673 	}
674 	if (base->max_dispatch_callbacks == INT_MAX &&
675 	    base->max_dispatch_time.tv_sec == -1)
676 		base->limit_callbacks_after_prio = INT_MAX;
677 
678 	for (i = 0; eventops[i] && !base->evbase; i++) {
679 		if (cfg != NULL) {
680 			/* determine if this backend should be avoided */
681 			if (event_config_is_avoided_method(cfg,
682 				eventops[i]->name))
683 				continue;
684 			if ((eventops[i]->features & cfg->require_features)
685 			    != cfg->require_features)
686 				continue;
687 		}
688 
689 		/* also obey the environment variables */
690 		if (should_check_environment &&
691 		    event_is_method_disabled(eventops[i]->name))
692 			continue;
693 
694 		base->evsel = eventops[i];
695 
696 		base->evbase = base->evsel->init(base);
697 	}
698 
699 	if (base->evbase == NULL) {
700 		event_warnx("%s: no event mechanism available",
701 		    __func__);
702 		base->evsel = NULL;
703 		event_base_free(base);
704 		return NULL;
705 	}
706 
707 	if (evutil_getenv_("EVENT_SHOW_METHOD"))
708 		event_msgx("libevent using: %s", base->evsel->name);
709 
710 	/* allocate a single active event queue */
711 	if (event_base_priority_init(base, 1) < 0) {
712 		event_base_free(base);
713 		return NULL;
714 	}
715 
716 	/* prepare for threading */
717 
718 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
719 	event_debug_created_threadable_ctx_ = 1;
720 #endif
721 
722 #ifndef EVENT__DISABLE_THREAD_SUPPORT
723 	if (EVTHREAD_LOCKING_ENABLED() &&
724 	    (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
725 		int r;
726 		EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
727 		EVTHREAD_ALLOC_COND(base->current_event_cond);
728 		r = evthread_make_base_notifiable(base);
729 		if (r<0) {
730 			event_warnx("%s: Unable to make base notifiable.", __func__);
731 			event_base_free(base);
732 			return NULL;
733 		}
734 	}
735 #endif
736 
737 #ifdef _WIN32
738 	if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
739 		event_base_start_iocp_(base, cfg->n_cpus_hint);
740 #endif
741 
742 	return (base);
743 }
744 
745 int
746 event_base_start_iocp_(struct event_base *base, int n_cpus)
747 {
748 #ifdef _WIN32
749 	if (base->iocp)
750 		return 0;
751 	base->iocp = event_iocp_port_launch_(n_cpus);
752 	if (!base->iocp) {
753 		event_warnx("%s: Couldn't launch IOCP", __func__);
754 		return -1;
755 	}
756 	return 0;
757 #else
758 	return -1;
759 #endif
760 }
761 
762 void
763 event_base_stop_iocp_(struct event_base *base)
764 {
765 #ifdef _WIN32
766 	int rv;
767 
768 	if (!base->iocp)
769 		return;
770 	rv = event_iocp_shutdown_(base->iocp, -1);
771 	EVUTIL_ASSERT(rv >= 0);
772 	base->iocp = NULL;
773 #endif
774 }
775 
776 static int
777 event_base_cancel_single_callback_(struct event_base *base,
778     struct event_callback *evcb,
779     int run_finalizers)
780 {
781 	int result = 0;
782 
783 	if (evcb->evcb_flags & EVLIST_INIT) {
784 		struct event *ev = event_callback_to_event(evcb);
785 		if (!(ev->ev_flags & EVLIST_INTERNAL)) {
786 			event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
787 			result = 1;
788 		}
789 	} else {
790 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
791 		event_callback_cancel_nolock_(base, evcb, 1);
792 		EVBASE_RELEASE_LOCK(base, th_base_lock);
793 		result = 1;
794 	}
795 
796 	if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
797 		switch (evcb->evcb_closure) {
798 		case EV_CLOSURE_EVENT_FINALIZE:
799 		case EV_CLOSURE_EVENT_FINALIZE_FREE: {
800 			struct event *ev = event_callback_to_event(evcb);
801 			ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
802 			if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
803 				mm_free(ev);
804 			break;
805 		}
806 		case EV_CLOSURE_CB_FINALIZE:
807 			evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
808 			break;
809 		default:
810 			break;
811 		}
812 	}
813 	return result;
814 }
815 
816 static int event_base_free_queues_(struct event_base *base, int run_finalizers)
817 {
818 	int deleted = 0, i;
819 
820 	for (i = 0; i < base->nactivequeues; ++i) {
821 		struct event_callback *evcb, *next;
822 		for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
823 			next = TAILQ_NEXT(evcb, evcb_active_next);
824 			deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
825 			evcb = next;
826 		}
827 	}
828 
829 	{
830 		struct event_callback *evcb;
831 		while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
832 			deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
833 		}
834 	}
835 
836 	return deleted;
837 }
838 
839 static void
840 event_base_free_(struct event_base *base, int run_finalizers)
841 {
842 	int i, n_deleted=0;
843 	struct event *ev;
844 	/* XXXX grab the lock? If there is contention when one thread frees
845 	 * the base, then the contending thread will be very sad soon. */
846 
847 	/* event_base_free(NULL) is how to free the current_base if we
848 	 * made it with event_init and forgot to hold a reference to it. */
849 	if (base == NULL && current_base)
850 		base = current_base;
851 	/* Don't actually free NULL. */
852 	if (base == NULL) {
853 		event_warnx("%s: no base to free", __func__);
854 		return;
855 	}
856 	/* XXX(niels) - check for internal events first */
857 
858 #ifdef _WIN32
859 	event_base_stop_iocp_(base);
860 #endif
861 
862 	/* threading fds if we have them */
863 	if (base->th_notify_fd[0] != -1) {
864 		event_del(&base->th_notify);
865 		EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
866 		if (base->th_notify_fd[1] != -1)
867 			EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
868 		base->th_notify_fd[0] = -1;
869 		base->th_notify_fd[1] = -1;
870 		event_debug_unassign(&base->th_notify);
871 	}
872 
873 	/* Delete all non-internal events. */
874 	evmap_delete_all_(base);
875 
876 	while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
877 		event_del(ev);
878 		++n_deleted;
879 	}
880 	for (i = 0; i < base->n_common_timeouts; ++i) {
881 		struct common_timeout_list *ctl =
882 		    base->common_timeout_queues[i];
883 		event_del(&ctl->timeout_event); /* Internal; doesn't count */
884 		event_debug_unassign(&ctl->timeout_event);
885 		for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
886 			struct event *next = TAILQ_NEXT(ev,
887 			    ev_timeout_pos.ev_next_with_common_timeout);
888 			if (!(ev->ev_flags & EVLIST_INTERNAL)) {
889 				event_del(ev);
890 				++n_deleted;
891 			}
892 			ev = next;
893 		}
894 		mm_free(ctl);
895 	}
896 	if (base->common_timeout_queues)
897 		mm_free(base->common_timeout_queues);
898 
899 	for (;;) {
900 		/* For finalizers we can register yet another finalizer out from
901 		 * finalizer, and iff finalizer will be in active_later_queue we can
902 		 * add finalizer to activequeues, and we will have events in
903 		 * activequeues after this function returns, which is not what we want
904 		 * (we even have an assertion for this).
905 		 *
906 		 * A simple case is bufferevent with underlying (i.e. filters).
907 		 */
908 		int i = event_base_free_queues_(base, run_finalizers);
909 		event_debug(("%s: %d events freed", __func__, i));
910 		if (!i) {
911 			break;
912 		}
913 		n_deleted += i;
914 	}
915 
916 	if (n_deleted)
917 		event_debug(("%s: %d events were still set in base",
918 			__func__, n_deleted));
919 
920 	while (LIST_FIRST(&base->once_events)) {
921 		struct event_once *eonce = LIST_FIRST(&base->once_events);
922 		LIST_REMOVE(eonce, next_once);
923 		mm_free(eonce);
924 	}
925 
926 	if (base->evsel != NULL && base->evsel->dealloc != NULL)
927 		base->evsel->dealloc(base);
928 
929 	for (i = 0; i < base->nactivequeues; ++i)
930 		EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
931 
932 	EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
933 	min_heap_dtor_(&base->timeheap);
934 
935 	mm_free(base->activequeues);
936 
937 	evmap_io_clear_(&base->io);
938 	evmap_signal_clear_(&base->sigmap);
939 	event_changelist_freemem_(&base->changelist);
940 
941 	EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
942 	EVTHREAD_FREE_COND(base->current_event_cond);
943 
944 	/* If we're freeing current_base, there won't be a current_base. */
945 	if (base == current_base)
946 		current_base = NULL;
947 	mm_free(base);
948 }
949 
950 void
951 event_base_free_nofinalize(struct event_base *base)
952 {
953 	event_base_free_(base, 0);
954 }
955 
956 void
957 event_base_free(struct event_base *base)
958 {
959 	event_base_free_(base, 1);
960 }
961 
962 /* Fake eventop; used to disable the backend temporarily inside event_reinit
963  * so that we can call event_del() on an event without telling the backend.
964  */
965 static int
966 nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
967     short events, void *fdinfo)
968 {
969 	return 0;
970 }
971 const struct eventop nil_eventop = {
972 	"nil",
973 	NULL, /* init: unused. */
974 	NULL, /* add: unused. */
975 	nil_backend_del, /* del: used, so needs to be killed. */
976 	NULL, /* dispatch: unused. */
977 	NULL, /* dealloc: unused. */
978 	0, 0, 0
979 };
980 
981 /* reinitialize the event base after a fork */
982 int
983 event_reinit(struct event_base *base)
984 {
985 	const struct eventop *evsel;
986 	int res = 0;
987 	int was_notifiable = 0;
988 	int had_signal_added = 0;
989 
990 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
991 
992 	evsel = base->evsel;
993 
994 	/* check if this event mechanism requires reinit on the backend */
995 	if (evsel->need_reinit) {
996 		/* We're going to call event_del() on our notify events (the
997 		 * ones that tell about signals and wakeup events).  But we
998 		 * don't actually want to tell the backend to change its
999 		 * state, since it might still share some resource (a kqueue,
1000 		 * an epoll fd) with the parent process, and we don't want to
1001 		 * delete the fds from _that_ backend, we temporarily stub out
1002 		 * the evsel with a replacement.
1003 		 */
1004 		base->evsel = &nil_eventop;
1005 	}
1006 
1007 	/* We need to re-create a new signal-notification fd and a new
1008 	 * thread-notification fd.  Otherwise, we'll still share those with
1009 	 * the parent process, which would make any notification sent to them
1010 	 * get received by one or both of the event loops, more or less at
1011 	 * random.
1012 	 */
1013 	if (base->sig.ev_signal_added) {
1014 		event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
1015 		event_debug_unassign(&base->sig.ev_signal);
1016 		memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
1017 		had_signal_added = 1;
1018 		base->sig.ev_signal_added = 0;
1019 	}
1020 	if (base->sig.ev_signal_pair[0] != -1)
1021 		EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
1022 	if (base->sig.ev_signal_pair[1] != -1)
1023 		EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
1024 	if (base->th_notify_fn != NULL) {
1025 		was_notifiable = 1;
1026 		base->th_notify_fn = NULL;
1027 	}
1028 	if (base->th_notify_fd[0] != -1) {
1029 		event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
1030 		EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
1031 		if (base->th_notify_fd[1] != -1)
1032 			EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
1033 		base->th_notify_fd[0] = -1;
1034 		base->th_notify_fd[1] = -1;
1035 		event_debug_unassign(&base->th_notify);
1036 	}
1037 
1038 	/* Replace the original evsel. */
1039         base->evsel = evsel;
1040 
1041 	if (evsel->need_reinit) {
1042 		/* Reconstruct the backend through brute-force, so that we do
1043 		 * not share any structures with the parent process. For some
1044 		 * backends, this is necessary: epoll and kqueue, for
1045 		 * instance, have events associated with a kernel
1046 		 * structure. If didn't reinitialize, we'd share that
1047 		 * structure with the parent process, and any changes made by
1048 		 * the parent would affect our backend's behavior (and vice
1049 		 * versa).
1050 		 */
1051 		if (base->evsel->dealloc != NULL)
1052 			base->evsel->dealloc(base);
1053 		base->evbase = evsel->init(base);
1054 		if (base->evbase == NULL) {
1055 			event_errx(1,
1056 			   "%s: could not reinitialize event mechanism",
1057 			   __func__);
1058 			res = -1;
1059 			goto done;
1060 		}
1061 
1062 		/* Empty out the changelist (if any): we are starting from a
1063 		 * blank slate. */
1064 		event_changelist_freemem_(&base->changelist);
1065 
1066 		/* Tell the event maps to re-inform the backend about all
1067 		 * pending events. This will make the signal notification
1068 		 * event get re-created if necessary. */
1069 		if (evmap_reinit_(base) < 0)
1070 			res = -1;
1071 	} else {
1072 		res = evsig_init_(base);
1073 		if (res == 0 && had_signal_added) {
1074 			res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
1075 			if (res == 0)
1076 				base->sig.ev_signal_added = 1;
1077 		}
1078 	}
1079 
1080 	/* If we were notifiable before, and nothing just exploded, become
1081 	 * notifiable again. */
1082 	if (was_notifiable && res == 0)
1083 		res = evthread_make_base_notifiable_nolock_(base);
1084 
1085 done:
1086 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1087 	return (res);
1088 }
1089 
1090 /* Get the monotonic time for this event_base' timer */
1091 int
1092 event_gettime_monotonic(struct event_base *base, struct timeval *tv)
1093 {
1094   int rv = -1;
1095 
1096   if (base && tv) {
1097     EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1098     rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
1099     EVBASE_RELEASE_LOCK(base, th_base_lock);
1100   }
1101 
1102   return rv;
1103 }
1104 
1105 const char **
1106 event_get_supported_methods(void)
1107 {
1108 	static const char **methods = NULL;
1109 	const struct eventop **method;
1110 	const char **tmp;
1111 	int i = 0, k;
1112 
1113 	/* count all methods */
1114 	for (method = &eventops[0]; *method != NULL; ++method) {
1115 		++i;
1116 	}
1117 
1118 	/* allocate one more than we need for the NULL pointer */
1119 	tmp = mm_calloc((i + 1), sizeof(char *));
1120 	if (tmp == NULL)
1121 		return (NULL);
1122 
1123 	/* populate the array with the supported methods */
1124 	for (k = 0, i = 0; eventops[k] != NULL; ++k) {
1125 		tmp[i++] = eventops[k]->name;
1126 	}
1127 	tmp[i] = NULL;
1128 
1129 	if (methods != NULL)
1130 		mm_free((char**)methods);
1131 
1132 	methods = tmp;
1133 
1134 	return (methods);
1135 }
1136 
1137 struct event_config *
1138 event_config_new(void)
1139 {
1140 	struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
1141 
1142 	if (cfg == NULL)
1143 		return (NULL);
1144 
1145 	TAILQ_INIT(&cfg->entries);
1146 	cfg->max_dispatch_interval.tv_sec = -1;
1147 	cfg->max_dispatch_callbacks = INT_MAX;
1148 	cfg->limit_callbacks_after_prio = 1;
1149 
1150 	return (cfg);
1151 }
1152 
1153 static void
1154 event_config_entry_free(struct event_config_entry *entry)
1155 {
1156 	if (entry->avoid_method != NULL)
1157 		mm_free((char *)entry->avoid_method);
1158 	mm_free(entry);
1159 }
1160 
1161 void
1162 event_config_free(struct event_config *cfg)
1163 {
1164 	struct event_config_entry *entry;
1165 
1166 	while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
1167 		TAILQ_REMOVE(&cfg->entries, entry, next);
1168 		event_config_entry_free(entry);
1169 	}
1170 	mm_free(cfg);
1171 }
1172 
1173 int
1174 event_config_set_flag(struct event_config *cfg, int flag)
1175 {
1176 	if (!cfg)
1177 		return -1;
1178 	cfg->flags |= flag;
1179 	return 0;
1180 }
1181 
1182 int
1183 event_config_avoid_method(struct event_config *cfg, const char *method)
1184 {
1185 	struct event_config_entry *entry = mm_malloc(sizeof(*entry));
1186 	if (entry == NULL)
1187 		return (-1);
1188 
1189 	if ((entry->avoid_method = mm_strdup(method)) == NULL) {
1190 		mm_free(entry);
1191 		return (-1);
1192 	}
1193 
1194 	TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
1195 
1196 	return (0);
1197 }
1198 
1199 int
1200 event_config_require_features(struct event_config *cfg,
1201     int features)
1202 {
1203 	if (!cfg)
1204 		return (-1);
1205 	cfg->require_features = features;
1206 	return (0);
1207 }
1208 
1209 int
1210 event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
1211 {
1212 	if (!cfg)
1213 		return (-1);
1214 	cfg->n_cpus_hint = cpus;
1215 	return (0);
1216 }
1217 
1218 int
1219 event_config_set_max_dispatch_interval(struct event_config *cfg,
1220     const struct timeval *max_interval, int max_callbacks, int min_priority)
1221 {
1222 	if (max_interval)
1223 		memcpy(&cfg->max_dispatch_interval, max_interval,
1224 		    sizeof(struct timeval));
1225 	else
1226 		cfg->max_dispatch_interval.tv_sec = -1;
1227 	cfg->max_dispatch_callbacks =
1228 	    max_callbacks >= 0 ? max_callbacks : INT_MAX;
1229 	if (min_priority < 0)
1230 		min_priority = 0;
1231 	cfg->limit_callbacks_after_prio = min_priority;
1232 	return (0);
1233 }
1234 
1235 int
1236 event_priority_init(int npriorities)
1237 {
1238 	return event_base_priority_init(current_base, npriorities);
1239 }
1240 
1241 int
1242 event_base_priority_init(struct event_base *base, int npriorities)
1243 {
1244 	int i, r;
1245 	r = -1;
1246 
1247 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1248 
1249 	if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1250 	    || npriorities >= EVENT_MAX_PRIORITIES)
1251 		goto err;
1252 
1253 	if (npriorities == base->nactivequeues)
1254 		goto ok;
1255 
1256 	if (base->nactivequeues) {
1257 		mm_free(base->activequeues);
1258 		base->nactivequeues = 0;
1259 	}
1260 
1261 	/* Allocate our priority queues */
1262 	base->activequeues = (struct evcallback_list *)
1263 	  mm_calloc(npriorities, sizeof(struct evcallback_list));
1264 	if (base->activequeues == NULL) {
1265 		event_warn("%s: calloc", __func__);
1266 		goto err;
1267 	}
1268 	base->nactivequeues = npriorities;
1269 
1270 	for (i = 0; i < base->nactivequeues; ++i) {
1271 		TAILQ_INIT(&base->activequeues[i]);
1272 	}
1273 
1274 ok:
1275 	r = 0;
1276 err:
1277 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1278 	return (r);
1279 }
1280 
1281 int
1282 event_base_get_npriorities(struct event_base *base)
1283 {
1284 
1285 	int n;
1286 	if (base == NULL)
1287 		base = current_base;
1288 
1289 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1290 	n = base->nactivequeues;
1291 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1292 	return (n);
1293 }
1294 
1295 int
1296 event_base_get_num_events(struct event_base *base, unsigned int type)
1297 {
1298 	int r = 0;
1299 
1300 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1301 
1302 	if (type & EVENT_BASE_COUNT_ACTIVE)
1303 		r += base->event_count_active;
1304 
1305 	if (type & EVENT_BASE_COUNT_VIRTUAL)
1306 		r += base->virtual_event_count;
1307 
1308 	if (type & EVENT_BASE_COUNT_ADDED)
1309 		r += base->event_count;
1310 
1311 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1312 
1313 	return r;
1314 }
1315 
1316 int
1317 event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
1318 {
1319 	int r = 0;
1320 
1321 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1322 
1323 	if (type & EVENT_BASE_COUNT_ACTIVE) {
1324 		r += base->event_count_active_max;
1325 		if (clear)
1326 			base->event_count_active_max = 0;
1327 	}
1328 
1329 	if (type & EVENT_BASE_COUNT_VIRTUAL) {
1330 		r += base->virtual_event_count_max;
1331 		if (clear)
1332 			base->virtual_event_count_max = 0;
1333 	}
1334 
1335 	if (type & EVENT_BASE_COUNT_ADDED) {
1336 		r += base->event_count_max;
1337 		if (clear)
1338 			base->event_count_max = 0;
1339 	}
1340 
1341 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1342 
1343 	return r;
1344 }
1345 
1346 /* Returns true iff we're currently watching any events. */
1347 static int
1348 event_haveevents(struct event_base *base)
1349 {
1350 	/* Caller must hold th_base_lock */
1351 	return (base->virtual_event_count > 0 || base->event_count > 0);
1352 }
1353 
1354 /* "closure" function called when processing active signal events */
1355 static inline void
1356 event_signal_closure(struct event_base *base, struct event *ev)
1357 {
1358 	short ncalls;
1359 	int should_break;
1360 
1361 	/* Allows deletes to work */
1362 	ncalls = ev->ev_ncalls;
1363 	if (ncalls != 0)
1364 		ev->ev_pncalls = &ncalls;
1365 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1366 	while (ncalls) {
1367 		ncalls--;
1368 		ev->ev_ncalls = ncalls;
1369 		if (ncalls == 0)
1370 			ev->ev_pncalls = NULL;
1371 		(*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1372 
1373 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1374 		should_break = base->event_break;
1375 		EVBASE_RELEASE_LOCK(base, th_base_lock);
1376 
1377 		if (should_break) {
1378 			if (ncalls != 0)
1379 				ev->ev_pncalls = NULL;
1380 			return;
1381 		}
1382 	}
1383 }
1384 
1385 /* Common timeouts are special timeouts that are handled as queues rather than
1386  * in the minheap.  This is more efficient than the minheap if we happen to
1387  * know that we're going to get several thousands of timeout events all with
1388  * the same timeout value.
1389  *
1390  * Since all our timeout handling code assumes timevals can be copied,
1391  * assigned, etc, we can't use "magic pointer" to encode these common
1392  * timeouts.  Searching through a list to see if every timeout is common could
1393  * also get inefficient.  Instead, we take advantage of the fact that tv_usec
1394  * is 32 bits long, but only uses 20 of those bits (since it can never be over
1395  * 999999.)  We use the top bits to encode 4 bites of magic number, and 8 bits
1396  * of index into the event_base's aray of common timeouts.
1397  */
1398 
1399 #define MICROSECONDS_MASK       COMMON_TIMEOUT_MICROSECONDS_MASK
1400 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1401 #define COMMON_TIMEOUT_IDX_SHIFT 20
1402 #define COMMON_TIMEOUT_MASK     0xf0000000
1403 #define COMMON_TIMEOUT_MAGIC    0x50000000
1404 
1405 #define COMMON_TIMEOUT_IDX(tv) \
1406 	(((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1407 
1408 /** Return true iff if 'tv' is a common timeout in 'base' */
1409 static inline int
1410 is_common_timeout(const struct timeval *tv,
1411     const struct event_base *base)
1412 {
1413 	int idx;
1414 	if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1415 		return 0;
1416 	idx = COMMON_TIMEOUT_IDX(tv);
1417 	return idx < base->n_common_timeouts;
1418 }
1419 
1420 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1421  * one is a common timeout. */
1422 static inline int
1423 is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1424 {
1425 	return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1426 	    (tv2->tv_usec & ~MICROSECONDS_MASK);
1427 }
1428 
1429 /** Requires that 'tv' is a common timeout.  Return the corresponding
1430  * common_timeout_list. */
1431 static inline struct common_timeout_list *
1432 get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1433 {
1434 	return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1435 }
1436 
1437 #if 0
1438 static inline int
1439 common_timeout_ok(const struct timeval *tv,
1440     struct event_base *base)
1441 {
1442 	const struct timeval *expect =
1443 	    &get_common_timeout_list(base, tv)->duration;
1444 	return tv->tv_sec == expect->tv_sec &&
1445 	    tv->tv_usec == expect->tv_usec;
1446 }
1447 #endif
1448 
1449 /* Add the timeout for the first event in given common timeout list to the
1450  * event_base's minheap. */
1451 static void
1452 common_timeout_schedule(struct common_timeout_list *ctl,
1453     const struct timeval *now, struct event *head)
1454 {
1455 	struct timeval timeout = head->ev_timeout;
1456 	timeout.tv_usec &= MICROSECONDS_MASK;
1457 	event_add_nolock_(&ctl->timeout_event, &timeout, 1);
1458 }
1459 
1460 /* Callback: invoked when the timeout for a common timeout queue triggers.
1461  * This means that (at least) the first event in that queue should be run,
1462  * and the timeout should be rescheduled if there are more events. */
1463 static void
1464 common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1465 {
1466 	struct timeval now;
1467 	struct common_timeout_list *ctl = arg;
1468 	struct event_base *base = ctl->base;
1469 	struct event *ev = NULL;
1470 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1471 	gettime(base, &now);
1472 	while (1) {
1473 		ev = TAILQ_FIRST(&ctl->events);
1474 		if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1475 		    (ev->ev_timeout.tv_sec == now.tv_sec &&
1476 			(ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1477 			break;
1478 		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1479 		event_active_nolock_(ev, EV_TIMEOUT, 1);
1480 	}
1481 	if (ev)
1482 		common_timeout_schedule(ctl, &now, ev);
1483 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1484 }
1485 
1486 #define MAX_COMMON_TIMEOUTS 256
1487 
1488 const struct timeval *
1489 event_base_init_common_timeout(struct event_base *base,
1490     const struct timeval *duration)
1491 {
1492 	int i;
1493 	struct timeval tv;
1494 	const struct timeval *result=NULL;
1495 	struct common_timeout_list *new_ctl;
1496 
1497 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1498 	if (duration->tv_usec > 1000000) {
1499 		memcpy(&tv, duration, sizeof(struct timeval));
1500 		if (is_common_timeout(duration, base))
1501 			tv.tv_usec &= MICROSECONDS_MASK;
1502 		tv.tv_sec += tv.tv_usec / 1000000;
1503 		tv.tv_usec %= 1000000;
1504 		duration = &tv;
1505 	}
1506 	for (i = 0; i < base->n_common_timeouts; ++i) {
1507 		const struct common_timeout_list *ctl =
1508 		    base->common_timeout_queues[i];
1509 		if (duration->tv_sec == ctl->duration.tv_sec &&
1510 		    duration->tv_usec ==
1511 		    (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1512 			EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1513 			result = &ctl->duration;
1514 			goto done;
1515 		}
1516 	}
1517 	if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1518 		event_warnx("%s: Too many common timeouts already in use; "
1519 		    "we only support %d per event_base", __func__,
1520 		    MAX_COMMON_TIMEOUTS);
1521 		goto done;
1522 	}
1523 	if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1524 		int n = base->n_common_timeouts < 16 ? 16 :
1525 		    base->n_common_timeouts*2;
1526 		struct common_timeout_list **newqueues =
1527 		    mm_realloc(base->common_timeout_queues,
1528 			n*sizeof(struct common_timeout_queue *));
1529 		if (!newqueues) {
1530 			event_warn("%s: realloc",__func__);
1531 			goto done;
1532 		}
1533 		base->n_common_timeouts_allocated = n;
1534 		base->common_timeout_queues = newqueues;
1535 	}
1536 	new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1537 	if (!new_ctl) {
1538 		event_warn("%s: calloc",__func__);
1539 		goto done;
1540 	}
1541 	TAILQ_INIT(&new_ctl->events);
1542 	new_ctl->duration.tv_sec = duration->tv_sec;
1543 	new_ctl->duration.tv_usec =
1544 	    duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1545 	    (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1546 	evtimer_assign(&new_ctl->timeout_event, base,
1547 	    common_timeout_callback, new_ctl);
1548 	new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1549 	event_priority_set(&new_ctl->timeout_event, 0);
1550 	new_ctl->base = base;
1551 	base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1552 	result = &new_ctl->duration;
1553 
1554 done:
1555 	if (result)
1556 		EVUTIL_ASSERT(is_common_timeout(result, base));
1557 
1558 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1559 	return result;
1560 }
1561 
1562 /* Closure function invoked when we're activating a persistent event. */
1563 static inline void
1564 event_persist_closure(struct event_base *base, struct event *ev)
1565 {
1566 	void (*evcb_callback)(evutil_socket_t, short, void *);
1567 
1568         // Other fields of *ev that must be stored before executing
1569         evutil_socket_t evcb_fd;
1570         short evcb_res;
1571         void *evcb_arg;
1572 
1573 	/* reschedule the persistent event if we have a timeout. */
1574 	if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1575 		/* If there was a timeout, we want it to run at an interval of
1576 		 * ev_io_timeout after the last time it was _scheduled_ for,
1577 		 * not ev_io_timeout after _now_.  If it fired for another
1578 		 * reason, though, the timeout ought to start ticking _now_. */
1579 		struct timeval run_at, relative_to, delay, now;
1580 		ev_uint32_t usec_mask = 0;
1581 		EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1582 			&ev->ev_io_timeout));
1583 		gettime(base, &now);
1584 		if (is_common_timeout(&ev->ev_timeout, base)) {
1585 			delay = ev->ev_io_timeout;
1586 			usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1587 			delay.tv_usec &= MICROSECONDS_MASK;
1588 			if (ev->ev_res & EV_TIMEOUT) {
1589 				relative_to = ev->ev_timeout;
1590 				relative_to.tv_usec &= MICROSECONDS_MASK;
1591 			} else {
1592 				relative_to = now;
1593 			}
1594 		} else {
1595 			delay = ev->ev_io_timeout;
1596 			if (ev->ev_res & EV_TIMEOUT) {
1597 				relative_to = ev->ev_timeout;
1598 			} else {
1599 				relative_to = now;
1600 			}
1601 		}
1602 		evutil_timeradd(&relative_to, &delay, &run_at);
1603 		if (evutil_timercmp(&run_at, &now, <)) {
1604 			/* Looks like we missed at least one invocation due to
1605 			 * a clock jump, not running the event loop for a
1606 			 * while, really slow callbacks, or
1607 			 * something. Reschedule relative to now.
1608 			 */
1609 			evutil_timeradd(&now, &delay, &run_at);
1610 		}
1611 		run_at.tv_usec |= usec_mask;
1612 		event_add_nolock_(ev, &run_at, 1);
1613 	}
1614 
1615 	// Save our callback before we release the lock
1616 	evcb_callback = ev->ev_callback;
1617         evcb_fd = ev->ev_fd;
1618         evcb_res = ev->ev_res;
1619         evcb_arg = ev->ev_arg;
1620 
1621 	// Release the lock
1622  	EVBASE_RELEASE_LOCK(base, th_base_lock);
1623 
1624 	// Execute the callback
1625         (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
1626 }
1627 
1628 /*
1629   Helper for event_process_active to process all the events in a single queue,
1630   releasing the lock as we go.  This function requires that the lock be held
1631   when it's invoked.  Returns -1 if we get a signal or an event_break that
1632   means we should stop processing any active events now.  Otherwise returns
1633   the number of non-internal event_callbacks that we processed.
1634 */
1635 static int
1636 event_process_active_single_queue(struct event_base *base,
1637     struct evcallback_list *activeq,
1638     int max_to_process, const struct timeval *endtime)
1639 {
1640 	struct event_callback *evcb;
1641 	int count = 0;
1642 
1643 	EVUTIL_ASSERT(activeq != NULL);
1644 
1645 	for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
1646 		struct event *ev=NULL;
1647 		if (evcb->evcb_flags & EVLIST_INIT) {
1648 			ev = event_callback_to_event(evcb);
1649 
1650 			if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
1651 				event_queue_remove_active(base, evcb);
1652 			else
1653 				event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1654 			event_debug((
1655 			    "event_process_active: event: %p, %s%s%scall %p",
1656 			    ev,
1657 			    ev->ev_res & EV_READ ? "EV_READ " : " ",
1658 			    ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1659 			    ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
1660 			    ev->ev_callback));
1661 		} else {
1662 			event_queue_remove_active(base, evcb);
1663 			event_debug(("event_process_active: event_callback %p, "
1664 				"closure %d, call %p",
1665 				evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
1666 		}
1667 
1668 		if (!(evcb->evcb_flags & EVLIST_INTERNAL))
1669 			++count;
1670 
1671 
1672 		base->current_event = evcb;
1673 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1674 		base->current_event_waiters = 0;
1675 #endif
1676 
1677 		switch (evcb->evcb_closure) {
1678 		case EV_CLOSURE_EVENT_SIGNAL:
1679 			EVUTIL_ASSERT(ev != NULL);
1680 			event_signal_closure(base, ev);
1681 			break;
1682 		case EV_CLOSURE_EVENT_PERSIST:
1683 			EVUTIL_ASSERT(ev != NULL);
1684 			event_persist_closure(base, ev);
1685 			break;
1686 		case EV_CLOSURE_EVENT: {
1687 			void (*evcb_callback)(evutil_socket_t, short, void *);
1688 			short res;
1689 			EVUTIL_ASSERT(ev != NULL);
1690 			evcb_callback = *ev->ev_callback;
1691 			res = ev->ev_res;
1692 			EVBASE_RELEASE_LOCK(base, th_base_lock);
1693 			evcb_callback(ev->ev_fd, res, ev->ev_arg);
1694 		}
1695 		break;
1696 		case EV_CLOSURE_CB_SELF: {
1697 			void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
1698 			EVBASE_RELEASE_LOCK(base, th_base_lock);
1699 			evcb_selfcb(evcb, evcb->evcb_arg);
1700 		}
1701 		break;
1702 		case EV_CLOSURE_EVENT_FINALIZE:
1703 		case EV_CLOSURE_EVENT_FINALIZE_FREE: {
1704 			void (*evcb_evfinalize)(struct event *, void *);
1705 			int evcb_closure = evcb->evcb_closure;
1706 			EVUTIL_ASSERT(ev != NULL);
1707 			base->current_event = NULL;
1708 			evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
1709 			EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1710 			EVBASE_RELEASE_LOCK(base, th_base_lock);
1711 			event_debug_note_teardown_(ev);
1712 			evcb_evfinalize(ev, ev->ev_arg);
1713 			if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
1714 				mm_free(ev);
1715 		}
1716 		break;
1717 		case EV_CLOSURE_CB_FINALIZE: {
1718 			void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
1719 			base->current_event = NULL;
1720 			EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1721 			EVBASE_RELEASE_LOCK(base, th_base_lock);
1722 			evcb_cbfinalize(evcb, evcb->evcb_arg);
1723 		}
1724 		break;
1725 		default:
1726 			EVUTIL_ASSERT(0);
1727 		}
1728 
1729 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1730 		base->current_event = NULL;
1731 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1732 		if (base->current_event_waiters) {
1733 			base->current_event_waiters = 0;
1734 			EVTHREAD_COND_BROADCAST(base->current_event_cond);
1735 		}
1736 #endif
1737 
1738 		if (base->event_break)
1739 			return -1;
1740 		if (count >= max_to_process)
1741 			return count;
1742 		if (count && endtime) {
1743 			struct timeval now;
1744 			update_time_cache(base);
1745 			gettime(base, &now);
1746 			if (evutil_timercmp(&now, endtime, >=))
1747 				return count;
1748 		}
1749 		if (base->event_continue)
1750 			break;
1751 	}
1752 	return count;
1753 }
1754 
1755 /*
1756  * Active events are stored in priority queues.  Lower priorities are always
1757  * process before higher priorities.  Low priority events can starve high
1758  * priority ones.
1759  */
1760 
1761 static int
1762 event_process_active(struct event_base *base)
1763 {
1764 	/* Caller must hold th_base_lock */
1765 	struct evcallback_list *activeq = NULL;
1766 	int i, c = 0;
1767 	const struct timeval *endtime;
1768 	struct timeval tv;
1769 	const int maxcb = base->max_dispatch_callbacks;
1770 	const int limit_after_prio = base->limit_callbacks_after_prio;
1771 	if (base->max_dispatch_time.tv_sec >= 0) {
1772 		update_time_cache(base);
1773 		gettime(base, &tv);
1774 		evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
1775 		endtime = &tv;
1776 	} else {
1777 		endtime = NULL;
1778 	}
1779 
1780 	for (i = 0; i < base->nactivequeues; ++i) {
1781 		if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1782 			base->event_running_priority = i;
1783 			activeq = &base->activequeues[i];
1784 			if (i < limit_after_prio)
1785 				c = event_process_active_single_queue(base, activeq,
1786 				    INT_MAX, NULL);
1787 			else
1788 				c = event_process_active_single_queue(base, activeq,
1789 				    maxcb, endtime);
1790 			if (c < 0) {
1791 				goto done;
1792 			} else if (c > 0)
1793 				break; /* Processed a real event; do not
1794 					* consider lower-priority events */
1795 			/* If we get here, all of the events we processed
1796 			 * were internal.  Continue. */
1797 		}
1798 	}
1799 
1800 done:
1801 	base->event_running_priority = -1;
1802 
1803 	return c;
1804 }
1805 
1806 /*
1807  * Wait continuously for events.  We exit only if no events are left.
1808  */
1809 
1810 int
1811 event_dispatch(void)
1812 {
1813 	return (event_loop(0));
1814 }
1815 
1816 int
1817 event_base_dispatch(struct event_base *event_base)
1818 {
1819 	return (event_base_loop(event_base, 0));
1820 }
1821 
1822 const char *
1823 event_base_get_method(const struct event_base *base)
1824 {
1825 	EVUTIL_ASSERT(base);
1826 	return (base->evsel->name);
1827 }
1828 
1829 /** Callback: used to implement event_base_loopexit by telling the event_base
1830  * that it's time to exit its loop. */
1831 static void
1832 event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1833 {
1834 	struct event_base *base = arg;
1835 	base->event_gotterm = 1;
1836 }
1837 
1838 int
1839 event_loopexit(const struct timeval *tv)
1840 {
1841 	return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1842 		    current_base, tv));
1843 }
1844 
1845 int
1846 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1847 {
1848 	return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1849 		    event_base, tv));
1850 }
1851 
1852 int
1853 event_loopbreak(void)
1854 {
1855 	return (event_base_loopbreak(current_base));
1856 }
1857 
1858 int
1859 event_base_loopbreak(struct event_base *event_base)
1860 {
1861 	int r = 0;
1862 	if (event_base == NULL)
1863 		return (-1);
1864 
1865 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1866 	event_base->event_break = 1;
1867 
1868 	if (EVBASE_NEED_NOTIFY(event_base)) {
1869 		r = evthread_notify_base(event_base);
1870 	} else {
1871 		r = (0);
1872 	}
1873 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1874 	return r;
1875 }
1876 
1877 int
1878 event_base_loopcontinue(struct event_base *event_base)
1879 {
1880 	int r = 0;
1881 	if (event_base == NULL)
1882 		return (-1);
1883 
1884 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1885 	event_base->event_continue = 1;
1886 
1887 	if (EVBASE_NEED_NOTIFY(event_base)) {
1888 		r = evthread_notify_base(event_base);
1889 	} else {
1890 		r = (0);
1891 	}
1892 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1893 	return r;
1894 }
1895 
1896 int
1897 event_base_got_break(struct event_base *event_base)
1898 {
1899 	int res;
1900 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1901 	res = event_base->event_break;
1902 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1903 	return res;
1904 }
1905 
1906 int
1907 event_base_got_exit(struct event_base *event_base)
1908 {
1909 	int res;
1910 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1911 	res = event_base->event_gotterm;
1912 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1913 	return res;
1914 }
1915 
1916 /* not thread safe */
1917 
1918 int
1919 event_loop(int flags)
1920 {
1921 	return event_base_loop(current_base, flags);
1922 }
1923 
1924 int
1925 event_base_loop(struct event_base *base, int flags)
1926 {
1927 	const struct eventop *evsel = base->evsel;
1928 	struct timeval tv;
1929 	struct timeval *tv_p;
1930 	int res, done, retval = 0;
1931 
1932 	/* Grab the lock.  We will release it inside evsel.dispatch, and again
1933 	 * as we invoke user callbacks. */
1934 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1935 
1936 	if (base->running_loop) {
1937 		event_warnx("%s: reentrant invocation.  Only one event_base_loop"
1938 		    " can run on each event_base at once.", __func__);
1939 		EVBASE_RELEASE_LOCK(base, th_base_lock);
1940 		return -1;
1941 	}
1942 
1943 	base->running_loop = 1;
1944 
1945 	clear_time_cache(base);
1946 
1947 	if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1948 		evsig_set_base_(base);
1949 
1950 	done = 0;
1951 
1952 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1953 	base->th_owner_id = EVTHREAD_GET_ID();
1954 #endif
1955 
1956 	base->event_gotterm = base->event_break = 0;
1957 
1958 	while (!done) {
1959 		base->event_continue = 0;
1960 		base->n_deferreds_queued = 0;
1961 
1962 		/* Terminate the loop if we have been asked to */
1963 		if (base->event_gotterm) {
1964 			break;
1965 		}
1966 
1967 		if (base->event_break) {
1968 			break;
1969 		}
1970 
1971 		tv_p = &tv;
1972 		if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1973 			timeout_next(base, &tv_p);
1974 		} else {
1975 			/*
1976 			 * if we have active events, we just poll new events
1977 			 * without waiting.
1978 			 */
1979 			evutil_timerclear(&tv);
1980 		}
1981 
1982 		/* If we have no events, we just exit */
1983 		if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
1984 		    !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
1985 			event_debug(("%s: no events registered.", __func__));
1986 			retval = 1;
1987 			goto done;
1988 		}
1989 
1990 		event_queue_make_later_events_active(base);
1991 
1992 		clear_time_cache(base);
1993 
1994 		res = evsel->dispatch(base, tv_p);
1995 
1996 		if (res == -1) {
1997 			event_debug(("%s: dispatch returned unsuccessfully.",
1998 				__func__));
1999 			retval = -1;
2000 			goto done;
2001 		}
2002 
2003 		update_time_cache(base);
2004 
2005 		timeout_process(base);
2006 
2007 		if (N_ACTIVE_CALLBACKS(base)) {
2008 			int n = event_process_active(base);
2009 			if ((flags & EVLOOP_ONCE)
2010 			    && N_ACTIVE_CALLBACKS(base) == 0
2011 			    && n != 0)
2012 				done = 1;
2013 		} else if (flags & EVLOOP_NONBLOCK)
2014 			done = 1;
2015 	}
2016 	event_debug(("%s: asked to terminate loop.", __func__));
2017 
2018 done:
2019 	clear_time_cache(base);
2020 	base->running_loop = 0;
2021 
2022 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2023 
2024 	return (retval);
2025 }
2026 
2027 /* One-time callback to implement event_base_once: invokes the user callback,
2028  * then deletes the allocated storage */
2029 static void
2030 event_once_cb(evutil_socket_t fd, short events, void *arg)
2031 {
2032 	struct event_once *eonce = arg;
2033 
2034 	(*eonce->cb)(fd, events, eonce->arg);
2035 	EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
2036 	LIST_REMOVE(eonce, next_once);
2037 	EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
2038 	event_debug_unassign(&eonce->ev);
2039 	mm_free(eonce);
2040 }
2041 
2042 /* not threadsafe, event scheduled once. */
2043 int
2044 event_once(evutil_socket_t fd, short events,
2045     void (*callback)(evutil_socket_t, short, void *),
2046     void *arg, const struct timeval *tv)
2047 {
2048 	return event_base_once(current_base, fd, events, callback, arg, tv);
2049 }
2050 
2051 /* Schedules an event once */
2052 int
2053 event_base_once(struct event_base *base, evutil_socket_t fd, short events,
2054     void (*callback)(evutil_socket_t, short, void *),
2055     void *arg, const struct timeval *tv)
2056 {
2057 	struct event_once *eonce;
2058 	int res = 0;
2059 	int activate = 0;
2060 
2061 	if (!base)
2062 		return (-1);
2063 
2064 	/* We cannot support signals that just fire once, or persistent
2065 	 * events. */
2066 	if (events & (EV_SIGNAL|EV_PERSIST))
2067 		return (-1);
2068 
2069 	if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
2070 		return (-1);
2071 
2072 	eonce->cb = callback;
2073 	eonce->arg = arg;
2074 
2075 	if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
2076 		evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
2077 
2078 		if (tv == NULL || ! evutil_timerisset(tv)) {
2079 			/* If the event is going to become active immediately,
2080 			 * don't put it on the timeout queue.  This is one
2081 			 * idiom for scheduling a callback, so let's make
2082 			 * it fast (and order-preserving). */
2083 			activate = 1;
2084 		}
2085 	} else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
2086 		events &= EV_READ|EV_WRITE|EV_CLOSED;
2087 
2088 		event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
2089 	} else {
2090 		/* Bad event combination */
2091 		mm_free(eonce);
2092 		return (-1);
2093 	}
2094 
2095 	if (res == 0) {
2096 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2097 		if (activate)
2098 			event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
2099 		else
2100 			res = event_add_nolock_(&eonce->ev, tv, 0);
2101 
2102 		if (res != 0) {
2103 			mm_free(eonce);
2104 			return (res);
2105 		} else {
2106 			LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
2107 		}
2108 		EVBASE_RELEASE_LOCK(base, th_base_lock);
2109 	}
2110 
2111 	return (0);
2112 }
2113 
2114 int
2115 event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
2116 {
2117 	if (!base)
2118 		base = current_base;
2119 	if (arg == &event_self_cbarg_ptr_)
2120 		arg = ev;
2121 
2122 	if (!(events & EV_SIGNAL))
2123 		event_debug_assert_socket_nonblocking_(fd);
2124 	event_debug_assert_not_added_(ev);
2125 
2126 	ev->ev_base = base;
2127 
2128 	ev->ev_callback = callback;
2129 	ev->ev_arg = arg;
2130 	ev->ev_fd = fd;
2131 	ev->ev_events = events;
2132 	ev->ev_res = 0;
2133 	ev->ev_flags = EVLIST_INIT;
2134 	ev->ev_ncalls = 0;
2135 	ev->ev_pncalls = NULL;
2136 
2137 	if (events & EV_SIGNAL) {
2138 		if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
2139 			event_warnx("%s: EV_SIGNAL is not compatible with "
2140 			    "EV_READ, EV_WRITE or EV_CLOSED", __func__);
2141 			return -1;
2142 		}
2143 		ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
2144 	} else {
2145 		if (events & EV_PERSIST) {
2146 			evutil_timerclear(&ev->ev_io_timeout);
2147 			ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
2148 		} else {
2149 			ev->ev_closure = EV_CLOSURE_EVENT;
2150 		}
2151 	}
2152 
2153 	min_heap_elem_init_(ev);
2154 
2155 	if (base != NULL) {
2156 		/* by default, we put new events into the middle priority */
2157 		ev->ev_pri = base->nactivequeues / 2;
2158 	}
2159 
2160 	event_debug_note_setup_(ev);
2161 
2162 	return 0;
2163 }
2164 
2165 int
2166 event_base_set(struct event_base *base, struct event *ev)
2167 {
2168 	/* Only innocent events may be assigned to a different base */
2169 	if (ev->ev_flags != EVLIST_INIT)
2170 		return (-1);
2171 
2172 	event_debug_assert_is_setup_(ev);
2173 
2174 	ev->ev_base = base;
2175 	ev->ev_pri = base->nactivequeues/2;
2176 
2177 	return (0);
2178 }
2179 
2180 void
2181 event_set(struct event *ev, evutil_socket_t fd, short events,
2182 	  void (*callback)(evutil_socket_t, short, void *), void *arg)
2183 {
2184 	int r;
2185 	r = event_assign(ev, current_base, fd, events, callback, arg);
2186 	EVUTIL_ASSERT(r == 0);
2187 }
2188 
2189 void *
2190 event_self_cbarg(void)
2191 {
2192 	return &event_self_cbarg_ptr_;
2193 }
2194 
2195 struct event *
2196 event_base_get_running_event(struct event_base *base)
2197 {
2198 	struct event *ev = NULL;
2199 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2200 	if (EVBASE_IN_THREAD(base)) {
2201 		struct event_callback *evcb = base->current_event;
2202 		if (evcb->evcb_flags & EVLIST_INIT)
2203 			ev = event_callback_to_event(evcb);
2204 	}
2205 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2206 	return ev;
2207 }
2208 
2209 struct event *
2210 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
2211 {
2212 	struct event *ev;
2213 	ev = mm_malloc(sizeof(struct event));
2214 	if (ev == NULL)
2215 		return (NULL);
2216 	if (event_assign(ev, base, fd, events, cb, arg) < 0) {
2217 		mm_free(ev);
2218 		return (NULL);
2219 	}
2220 
2221 	return (ev);
2222 }
2223 
2224 void
2225 event_free(struct event *ev)
2226 {
2227 	/* This is disabled, so that events which have been finalized be a
2228 	 * valid target for event_free(). That's */
2229 	// event_debug_assert_is_setup_(ev);
2230 
2231 	/* make sure that this event won't be coming back to haunt us. */
2232 	event_del(ev);
2233 	event_debug_note_teardown_(ev);
2234 	mm_free(ev);
2235 
2236 }
2237 
2238 void
2239 event_debug_unassign(struct event *ev)
2240 {
2241 	event_debug_assert_not_added_(ev);
2242 	event_debug_note_teardown_(ev);
2243 
2244 	ev->ev_flags &= ~EVLIST_INIT;
2245 }
2246 
2247 #define EVENT_FINALIZE_FREE_ 0x10000
2248 static int
2249 event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2250 {
2251 	ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
2252 	    EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
2253 
2254 	event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2255 	ev->ev_closure = closure;
2256 	ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
2257 	event_active_nolock_(ev, EV_FINALIZE, 1);
2258 	ev->ev_flags |= EVLIST_FINALIZING;
2259 	return 0;
2260 }
2261 
2262 static int
2263 event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2264 {
2265 	int r;
2266 	struct event_base *base = ev->ev_base;
2267 	if (EVUTIL_FAILURE_CHECK(!base)) {
2268 		event_warnx("%s: event has no event_base set.", __func__);
2269 		return -1;
2270 	}
2271 
2272 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2273 	r = event_finalize_nolock_(base, flags, ev, cb);
2274 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2275 	return r;
2276 }
2277 
2278 int
2279 event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2280 {
2281 	return event_finalize_impl_(flags, ev, cb);
2282 }
2283 
2284 int
2285 event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2286 {
2287 	return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
2288 }
2289 
2290 void
2291 event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2292 {
2293 	struct event *ev = NULL;
2294 	if (evcb->evcb_flags & EVLIST_INIT) {
2295 		ev = event_callback_to_event(evcb);
2296 		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2297 	} else {
2298 		event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
2299 	}
2300 
2301 	evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
2302 	evcb->evcb_cb_union.evcb_cbfinalize = cb;
2303 	event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
2304 	evcb->evcb_flags |= EVLIST_FINALIZING;
2305 }
2306 
2307 void
2308 event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2309 {
2310 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2311 	event_callback_finalize_nolock_(base, flags, evcb, cb);
2312 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2313 }
2314 
2315 /** Internal: Finalize all of the n_cbs callbacks in evcbs.  The provided
2316  * callback will be invoked on *one of them*, after they have *all* been
2317  * finalized. */
2318 int
2319 event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
2320 {
2321 	int n_pending = 0, i;
2322 
2323 	if (base == NULL)
2324 		base = current_base;
2325 
2326 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2327 
2328 	event_debug(("%s: %d events finalizing", __func__, n_cbs));
2329 
2330 	/* At most one can be currently executing; the rest we just
2331 	 * cancel... But we always make sure that the finalize callback
2332 	 * runs. */
2333 	for (i = 0; i < n_cbs; ++i) {
2334 		struct event_callback *evcb = evcbs[i];
2335 		if (evcb == base->current_event) {
2336 			event_callback_finalize_nolock_(base, 0, evcb, cb);
2337 			++n_pending;
2338 		} else {
2339 			event_callback_cancel_nolock_(base, evcb, 0);
2340 		}
2341 	}
2342 
2343 	if (n_pending == 0) {
2344 		/* Just do the first one. */
2345 		event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
2346 	}
2347 
2348 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2349 	return 0;
2350 }
2351 
2352 /*
2353  * Set's the priority of an event - if an event is already scheduled
2354  * changing the priority is going to fail.
2355  */
2356 
2357 int
2358 event_priority_set(struct event *ev, int pri)
2359 {
2360 	event_debug_assert_is_setup_(ev);
2361 
2362 	if (ev->ev_flags & EVLIST_ACTIVE)
2363 		return (-1);
2364 	if (pri < 0 || pri >= ev->ev_base->nactivequeues)
2365 		return (-1);
2366 
2367 	ev->ev_pri = pri;
2368 
2369 	return (0);
2370 }
2371 
2372 /*
2373  * Checks if a specific event is pending or scheduled.
2374  */
2375 
2376 int
2377 event_pending(const struct event *ev, short event, struct timeval *tv)
2378 {
2379 	int flags = 0;
2380 
2381 	if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
2382 		event_warnx("%s: event has no event_base set.", __func__);
2383 		return 0;
2384 	}
2385 
2386 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2387 	event_debug_assert_is_setup_(ev);
2388 
2389 	if (ev->ev_flags & EVLIST_INSERTED)
2390 		flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
2391 	if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2392 		flags |= ev->ev_res;
2393 	if (ev->ev_flags & EVLIST_TIMEOUT)
2394 		flags |= EV_TIMEOUT;
2395 
2396 	event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
2397 
2398 	/* See if there is a timeout that we should report */
2399 	if (tv != NULL && (flags & event & EV_TIMEOUT)) {
2400 		struct timeval tmp = ev->ev_timeout;
2401 		tmp.tv_usec &= MICROSECONDS_MASK;
2402 		/* correctly remamp to real time */
2403 		evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
2404 	}
2405 
2406 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2407 
2408 	return (flags & event);
2409 }
2410 
2411 int
2412 event_initialized(const struct event *ev)
2413 {
2414 	if (!(ev->ev_flags & EVLIST_INIT))
2415 		return 0;
2416 
2417 	return 1;
2418 }
2419 
2420 void
2421 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
2422 {
2423 	event_debug_assert_is_setup_(event);
2424 
2425 	if (base_out)
2426 		*base_out = event->ev_base;
2427 	if (fd_out)
2428 		*fd_out = event->ev_fd;
2429 	if (events_out)
2430 		*events_out = event->ev_events;
2431 	if (callback_out)
2432 		*callback_out = event->ev_callback;
2433 	if (arg_out)
2434 		*arg_out = event->ev_arg;
2435 }
2436 
2437 size_t
2438 event_get_struct_event_size(void)
2439 {
2440 	return sizeof(struct event);
2441 }
2442 
2443 evutil_socket_t
2444 event_get_fd(const struct event *ev)
2445 {
2446 	event_debug_assert_is_setup_(ev);
2447 	return ev->ev_fd;
2448 }
2449 
2450 struct event_base *
2451 event_get_base(const struct event *ev)
2452 {
2453 	event_debug_assert_is_setup_(ev);
2454 	return ev->ev_base;
2455 }
2456 
2457 short
2458 event_get_events(const struct event *ev)
2459 {
2460 	event_debug_assert_is_setup_(ev);
2461 	return ev->ev_events;
2462 }
2463 
2464 event_callback_fn
2465 event_get_callback(const struct event *ev)
2466 {
2467 	event_debug_assert_is_setup_(ev);
2468 	return ev->ev_callback;
2469 }
2470 
2471 void *
2472 event_get_callback_arg(const struct event *ev)
2473 {
2474 	event_debug_assert_is_setup_(ev);
2475 	return ev->ev_arg;
2476 }
2477 
2478 int
2479 event_get_priority(const struct event *ev)
2480 {
2481 	event_debug_assert_is_setup_(ev);
2482 	return ev->ev_pri;
2483 }
2484 
2485 int
2486 event_add(struct event *ev, const struct timeval *tv)
2487 {
2488 	int res;
2489 
2490 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2491 		event_warnx("%s: event has no event_base set.", __func__);
2492 		return -1;
2493 	}
2494 
2495 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2496 
2497 	res = event_add_nolock_(ev, tv, 0);
2498 
2499 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2500 
2501 	return (res);
2502 }
2503 
2504 /* Helper callback: wake an event_base from another thread.  This version
2505  * works by writing a byte to one end of a socketpair, so that the event_base
2506  * listening on the other end will wake up as the corresponding event
2507  * triggers */
2508 static int
2509 evthread_notify_base_default(struct event_base *base)
2510 {
2511 	char buf[1];
2512 	int r;
2513 	buf[0] = (char) 0;
2514 #ifdef _WIN32
2515 	r = send(base->th_notify_fd[1], buf, 1, 0);
2516 #else
2517 	r = write(base->th_notify_fd[1], buf, 1);
2518 #endif
2519 	return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
2520 }
2521 
2522 #ifdef EVENT__HAVE_EVENTFD
2523 /* Helper callback: wake an event_base from another thread.  This version
2524  * assumes that you have a working eventfd() implementation. */
2525 static int
2526 evthread_notify_base_eventfd(struct event_base *base)
2527 {
2528 	ev_uint64_t msg = 1;
2529 	int r;
2530 	do {
2531 		r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
2532 	} while (r < 0 && errno == EAGAIN);
2533 
2534 	return (r < 0) ? -1 : 0;
2535 }
2536 #endif
2537 
2538 
2539 /** Tell the thread currently running the event_loop for base (if any) that it
2540  * needs to stop waiting in its dispatch function (if it is) and process all
2541  * active callbacks. */
2542 static int
2543 evthread_notify_base(struct event_base *base)
2544 {
2545 	EVENT_BASE_ASSERT_LOCKED(base);
2546 	if (!base->th_notify_fn)
2547 		return -1;
2548 	if (base->is_notify_pending)
2549 		return 0;
2550 	base->is_notify_pending = 1;
2551 	return base->th_notify_fn(base);
2552 }
2553 
2554 /* Implementation function to remove a timeout on a currently pending event.
2555  */
2556 int
2557 event_remove_timer_nolock_(struct event *ev)
2558 {
2559 	struct event_base *base = ev->ev_base;
2560 
2561 	EVENT_BASE_ASSERT_LOCKED(base);
2562 	event_debug_assert_is_setup_(ev);
2563 
2564 	event_debug(("event_remove_timer_nolock: event: %p", ev));
2565 
2566 	/* If it's not pending on a timeout, we don't need to do anything. */
2567 	if (ev->ev_flags & EVLIST_TIMEOUT) {
2568 		event_queue_remove_timeout(base, ev);
2569 		evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
2570 	}
2571 
2572 	return (0);
2573 }
2574 
2575 int
2576 event_remove_timer(struct event *ev)
2577 {
2578 	int res;
2579 
2580 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2581 		event_warnx("%s: event has no event_base set.", __func__);
2582 		return -1;
2583 	}
2584 
2585 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2586 
2587 	res = event_remove_timer_nolock_(ev);
2588 
2589 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2590 
2591 	return (res);
2592 }
2593 
2594 /* Implementation function to add an event.  Works just like event_add,
2595  * except: 1) it requires that we have the lock.  2) if tv_is_absolute is set,
2596  * we treat tv as an absolute time, not as an interval to add to the current
2597  * time */
2598 int
2599 event_add_nolock_(struct event *ev, const struct timeval *tv,
2600     int tv_is_absolute)
2601 {
2602 	struct event_base *base = ev->ev_base;
2603 	int res = 0;
2604 	int notify = 0;
2605 
2606 	EVENT_BASE_ASSERT_LOCKED(base);
2607 	event_debug_assert_is_setup_(ev);
2608 
2609 	event_debug((
2610 		 "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
2611 		 ev,
2612 		 EV_SOCK_ARG(ev->ev_fd),
2613 		 ev->ev_events & EV_READ ? "EV_READ " : " ",
2614 		 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2615 		 ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
2616 		 tv ? "EV_TIMEOUT " : " ",
2617 		 ev->ev_callback));
2618 
2619 	EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2620 
2621 	if (ev->ev_flags & EVLIST_FINALIZING) {
2622 		/* XXXX debug */
2623 		return (-1);
2624 	}
2625 
2626 	/*
2627 	 * prepare for timeout insertion further below, if we get a
2628 	 * failure on any step, we should not change any state.
2629 	 */
2630 	if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
2631 		if (min_heap_reserve_(&base->timeheap,
2632 			1 + min_heap_size_(&base->timeheap)) == -1)
2633 			return (-1);  /* ENOMEM == errno */
2634 	}
2635 
2636 	/* If the main thread is currently executing a signal event's
2637 	 * callback, and we are not the main thread, then we want to wait
2638 	 * until the callback is done before we mess with the event, or else
2639 	 * we can race on ev_ncalls and ev_pncalls below. */
2640 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2641 	if (base->current_event == event_to_event_callback(ev) &&
2642 	    (ev->ev_events & EV_SIGNAL)
2643 	    && !EVBASE_IN_THREAD(base)) {
2644 		++base->current_event_waiters;
2645 		EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2646 	}
2647 #endif
2648 
2649 	if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
2650 	    !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2651 		if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2652 			res = evmap_io_add_(base, ev->ev_fd, ev);
2653 		else if (ev->ev_events & EV_SIGNAL)
2654 			res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
2655 		if (res != -1)
2656 			event_queue_insert_inserted(base, ev);
2657 		if (res == 1) {
2658 			/* evmap says we need to notify the main thread. */
2659 			notify = 1;
2660 			res = 0;
2661 		}
2662 	}
2663 
2664 	/*
2665 	 * we should change the timeout state only if the previous event
2666 	 * addition succeeded.
2667 	 */
2668 	if (res != -1 && tv != NULL) {
2669 		struct timeval now;
2670 		int common_timeout;
2671 #ifdef USE_REINSERT_TIMEOUT
2672 		int was_common;
2673 		int old_timeout_idx;
2674 #endif
2675 
2676 		/*
2677 		 * for persistent timeout events, we remember the
2678 		 * timeout value and re-add the event.
2679 		 *
2680 		 * If tv_is_absolute, this was already set.
2681 		 */
2682 		if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
2683 			ev->ev_io_timeout = *tv;
2684 
2685 #ifndef USE_REINSERT_TIMEOUT
2686 		if (ev->ev_flags & EVLIST_TIMEOUT) {
2687 			event_queue_remove_timeout(base, ev);
2688 		}
2689 #endif
2690 
2691 		/* Check if it is active due to a timeout.  Rescheduling
2692 		 * this timeout before the callback can be executed
2693 		 * removes it from the active list. */
2694 		if ((ev->ev_flags & EVLIST_ACTIVE) &&
2695 		    (ev->ev_res & EV_TIMEOUT)) {
2696 			if (ev->ev_events & EV_SIGNAL) {
2697 				/* See if we are just active executing
2698 				 * this event in a loop
2699 				 */
2700 				if (ev->ev_ncalls && ev->ev_pncalls) {
2701 					/* Abort loop */
2702 					*ev->ev_pncalls = 0;
2703 				}
2704 			}
2705 
2706 			event_queue_remove_active(base, event_to_event_callback(ev));
2707 		}
2708 
2709 		gettime(base, &now);
2710 
2711 		common_timeout = is_common_timeout(tv, base);
2712 #ifdef USE_REINSERT_TIMEOUT
2713 		was_common = is_common_timeout(&ev->ev_timeout, base);
2714 		old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
2715 #endif
2716 
2717 		if (tv_is_absolute) {
2718 			ev->ev_timeout = *tv;
2719 		} else if (common_timeout) {
2720 			struct timeval tmp = *tv;
2721 			tmp.tv_usec &= MICROSECONDS_MASK;
2722 			evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2723 			ev->ev_timeout.tv_usec |=
2724 			    (tv->tv_usec & ~MICROSECONDS_MASK);
2725 		} else {
2726 			evutil_timeradd(&now, tv, &ev->ev_timeout);
2727 		}
2728 
2729 		event_debug((
2730 			 "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2731 			 ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
2732 
2733 #ifdef USE_REINSERT_TIMEOUT
2734 		event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
2735 #else
2736 		event_queue_insert_timeout(base, ev);
2737 #endif
2738 
2739 		if (common_timeout) {
2740 			struct common_timeout_list *ctl =
2741 			    get_common_timeout_list(base, &ev->ev_timeout);
2742 			if (ev == TAILQ_FIRST(&ctl->events)) {
2743 				common_timeout_schedule(ctl, &now, ev);
2744 			}
2745 		} else {
2746 			struct event* top = NULL;
2747 			/* See if the earliest timeout is now earlier than it
2748 			 * was before: if so, we will need to tell the main
2749 			 * thread to wake up earlier than it would otherwise.
2750 			 * We double check the timeout of the top element to
2751 			 * handle time distortions due to system suspension.
2752 			 */
2753 			if (min_heap_elt_is_top_(ev))
2754 				notify = 1;
2755 			else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
2756 					 evutil_timercmp(&top->ev_timeout, &now, <))
2757 				notify = 1;
2758 		}
2759 	}
2760 
2761 	/* if we are not in the right thread, we need to wake up the loop */
2762 	if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2763 		evthread_notify_base(base);
2764 
2765 	event_debug_note_add_(ev);
2766 
2767 	return (res);
2768 }
2769 
2770 static int
2771 event_del_(struct event *ev, int blocking)
2772 {
2773 	int res;
2774 	struct event_base *base = ev->ev_base;
2775 
2776 	if (EVUTIL_FAILURE_CHECK(!base)) {
2777 		event_warnx("%s: event has no event_base set.", __func__);
2778 		return -1;
2779 	}
2780 
2781 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2782 	res = event_del_nolock_(ev, blocking);
2783 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2784 
2785 	return (res);
2786 }
2787 
2788 int
2789 event_del(struct event *ev)
2790 {
2791 	return event_del_(ev, EVENT_DEL_AUTOBLOCK);
2792 }
2793 
2794 int
2795 event_del_block(struct event *ev)
2796 {
2797 	return event_del_(ev, EVENT_DEL_BLOCK);
2798 }
2799 
2800 int
2801 event_del_noblock(struct event *ev)
2802 {
2803 	return event_del_(ev, EVENT_DEL_NOBLOCK);
2804 }
2805 
2806 /** Helper for event_del: always called with th_base_lock held.
2807  *
2808  * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
2809  * EVEN_IF_FINALIZING} values. See those for more information.
2810  */
2811 int
2812 event_del_nolock_(struct event *ev, int blocking)
2813 {
2814 	struct event_base *base;
2815 	int res = 0, notify = 0;
2816 
2817 	event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2818 		ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
2819 
2820 	/* An event without a base has not been added */
2821 	if (ev->ev_base == NULL)
2822 		return (-1);
2823 
2824 	EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2825 
2826 	if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
2827 		if (ev->ev_flags & EVLIST_FINALIZING) {
2828 			/* XXXX Debug */
2829 			return 0;
2830 		}
2831 	}
2832 
2833 	base = ev->ev_base;
2834 
2835 	EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2836 
2837 	/* See if we are just active executing this event in a loop */
2838 	if (ev->ev_events & EV_SIGNAL) {
2839 		if (ev->ev_ncalls && ev->ev_pncalls) {
2840 			/* Abort loop */
2841 			*ev->ev_pncalls = 0;
2842 		}
2843 	}
2844 
2845 	if (ev->ev_flags & EVLIST_TIMEOUT) {
2846 		/* NOTE: We never need to notify the main thread because of a
2847 		 * deleted timeout event: all that could happen if we don't is
2848 		 * that the dispatch loop might wake up too early.  But the
2849 		 * point of notifying the main thread _is_ to wake up the
2850 		 * dispatch loop early anyway, so we wouldn't gain anything by
2851 		 * doing it.
2852 		 */
2853 		event_queue_remove_timeout(base, ev);
2854 	}
2855 
2856 	if (ev->ev_flags & EVLIST_ACTIVE)
2857 		event_queue_remove_active(base, event_to_event_callback(ev));
2858 	else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
2859 		event_queue_remove_active_later(base, event_to_event_callback(ev));
2860 
2861 	if (ev->ev_flags & EVLIST_INSERTED) {
2862 		event_queue_remove_inserted(base, ev);
2863 		if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2864 			res = evmap_io_del_(base, ev->ev_fd, ev);
2865 		else
2866 			res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
2867 		if (res == 1) {
2868 			/* evmap says we need to notify the main thread. */
2869 			notify = 1;
2870 			res = 0;
2871 		}
2872 		/* If we do not have events, let's notify event base so it can
2873 		 * exit without waiting */
2874 		if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base))
2875 			notify = 1;
2876 	}
2877 
2878 	/* if we are not in the right thread, we need to wake up the loop */
2879 	if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2880 		evthread_notify_base(base);
2881 
2882 	event_debug_note_del_(ev);
2883 
2884 	/* If the main thread is currently executing this event's callback,
2885 	 * and we are not the main thread, then we want to wait until the
2886 	 * callback is done before returning. That way, when this function
2887 	 * returns, it will be safe to free the user-supplied argument.
2888 	 */
2889 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2890 	if (blocking != EVENT_DEL_NOBLOCK &&
2891 	    base->current_event == event_to_event_callback(ev) &&
2892 	    !EVBASE_IN_THREAD(base) &&
2893 	    (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
2894 		++base->current_event_waiters;
2895 		EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2896 	}
2897 #endif
2898 
2899 	return (res);
2900 }
2901 
2902 void
2903 event_active(struct event *ev, int res, short ncalls)
2904 {
2905 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2906 		event_warnx("%s: event has no event_base set.", __func__);
2907 		return;
2908 	}
2909 
2910 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2911 
2912 	event_debug_assert_is_setup_(ev);
2913 
2914 	event_active_nolock_(ev, res, ncalls);
2915 
2916 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2917 }
2918 
2919 
2920 void
2921 event_active_nolock_(struct event *ev, int res, short ncalls)
2922 {
2923 	struct event_base *base;
2924 
2925 	event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
2926 		ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
2927 
2928 	base = ev->ev_base;
2929 	EVENT_BASE_ASSERT_LOCKED(base);
2930 
2931 	if (ev->ev_flags & EVLIST_FINALIZING) {
2932 		/* XXXX debug */
2933 		return;
2934 	}
2935 
2936 	switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2937 	default:
2938 	case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2939 		EVUTIL_ASSERT(0);
2940 		break;
2941 	case EVLIST_ACTIVE:
2942 		/* We get different kinds of events, add them together */
2943 		ev->ev_res |= res;
2944 		return;
2945 	case EVLIST_ACTIVE_LATER:
2946 		ev->ev_res |= res;
2947 		break;
2948 	case 0:
2949 		ev->ev_res = res;
2950 		break;
2951 	}
2952 
2953 	if (ev->ev_pri < base->event_running_priority)
2954 		base->event_continue = 1;
2955 
2956 	if (ev->ev_events & EV_SIGNAL) {
2957 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2958 		if (base->current_event == event_to_event_callback(ev) &&
2959 		    !EVBASE_IN_THREAD(base)) {
2960 			++base->current_event_waiters;
2961 			EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2962 		}
2963 #endif
2964 		ev->ev_ncalls = ncalls;
2965 		ev->ev_pncalls = NULL;
2966 	}
2967 
2968 	event_callback_activate_nolock_(base, event_to_event_callback(ev));
2969 }
2970 
2971 void
2972 event_active_later_(struct event *ev, int res)
2973 {
2974 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2975 	event_active_later_nolock_(ev, res);
2976 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2977 }
2978 
2979 void
2980 event_active_later_nolock_(struct event *ev, int res)
2981 {
2982 	struct event_base *base = ev->ev_base;
2983 	EVENT_BASE_ASSERT_LOCKED(base);
2984 
2985 	if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2986 		/* We get different kinds of events, add them together */
2987 		ev->ev_res |= res;
2988 		return;
2989 	}
2990 
2991 	ev->ev_res = res;
2992 
2993 	event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
2994 }
2995 
2996 int
2997 event_callback_activate_(struct event_base *base,
2998     struct event_callback *evcb)
2999 {
3000 	int r;
3001 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3002 	r = event_callback_activate_nolock_(base, evcb);
3003 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3004 	return r;
3005 }
3006 
3007 int
3008 event_callback_activate_nolock_(struct event_base *base,
3009     struct event_callback *evcb)
3010 {
3011 	int r = 1;
3012 
3013 	if (evcb->evcb_flags & EVLIST_FINALIZING)
3014 		return 0;
3015 
3016 	switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
3017 	default:
3018 		EVUTIL_ASSERT(0);
3019 		EVUTIL_FALLTHROUGH;
3020 	case EVLIST_ACTIVE_LATER:
3021 		event_queue_remove_active_later(base, evcb);
3022 		r = 0;
3023 		break;
3024 	case EVLIST_ACTIVE:
3025 		return 0;
3026 	case 0:
3027 		break;
3028 	}
3029 
3030 	event_queue_insert_active(base, evcb);
3031 
3032 	if (EVBASE_NEED_NOTIFY(base))
3033 		evthread_notify_base(base);
3034 
3035 	return r;
3036 }
3037 
3038 int
3039 event_callback_activate_later_nolock_(struct event_base *base,
3040     struct event_callback *evcb)
3041 {
3042 	if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
3043 		return 0;
3044 
3045 	event_queue_insert_active_later(base, evcb);
3046 	if (EVBASE_NEED_NOTIFY(base))
3047 		evthread_notify_base(base);
3048 	return 1;
3049 }
3050 
3051 void
3052 event_callback_init_(struct event_base *base,
3053     struct event_callback *cb)
3054 {
3055 	memset(cb, 0, sizeof(*cb));
3056 	cb->evcb_pri = base->nactivequeues - 1;
3057 }
3058 
3059 int
3060 event_callback_cancel_(struct event_base *base,
3061     struct event_callback *evcb)
3062 {
3063 	int r;
3064 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3065 	r = event_callback_cancel_nolock_(base, evcb, 0);
3066 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3067 	return r;
3068 }
3069 
3070 int
3071 event_callback_cancel_nolock_(struct event_base *base,
3072     struct event_callback *evcb, int even_if_finalizing)
3073 {
3074 	if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
3075 		return 0;
3076 
3077 	if (evcb->evcb_flags & EVLIST_INIT)
3078 		return event_del_nolock_(event_callback_to_event(evcb),
3079 		    even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
3080 
3081 	switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
3082 	default:
3083 	case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
3084 		EVUTIL_ASSERT(0);
3085 		break;
3086 	case EVLIST_ACTIVE:
3087 		/* We get different kinds of events, add them together */
3088 		event_queue_remove_active(base, evcb);
3089 		return 0;
3090 	case EVLIST_ACTIVE_LATER:
3091 		event_queue_remove_active_later(base, evcb);
3092 		break;
3093 	case 0:
3094 		break;
3095 	}
3096 
3097 	return 0;
3098 }
3099 
3100 void
3101 event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
3102 {
3103 	memset(cb, 0, sizeof(*cb));
3104 	cb->evcb_cb_union.evcb_selfcb = fn;
3105 	cb->evcb_arg = arg;
3106 	cb->evcb_pri = priority;
3107 	cb->evcb_closure = EV_CLOSURE_CB_SELF;
3108 }
3109 
3110 void
3111 event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
3112 {
3113 	cb->evcb_pri = priority;
3114 }
3115 
3116 void
3117 event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
3118 {
3119 	if (!base)
3120 		base = current_base;
3121 	event_callback_cancel_(base, cb);
3122 }
3123 
3124 #define MAX_DEFERREDS_QUEUED 32
3125 int
3126 event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
3127 {
3128 	int r = 1;
3129 	if (!base)
3130 		base = current_base;
3131 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3132 	if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
3133 		r = event_callback_activate_later_nolock_(base, cb);
3134 	} else {
3135 		r = event_callback_activate_nolock_(base, cb);
3136 		if (r) {
3137 			++base->n_deferreds_queued;
3138 		}
3139 	}
3140 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3141 	return r;
3142 }
3143 
3144 static int
3145 timeout_next(struct event_base *base, struct timeval **tv_p)
3146 {
3147 	/* Caller must hold th_base_lock */
3148 	struct timeval now;
3149 	struct event *ev;
3150 	struct timeval *tv = *tv_p;
3151 	int res = 0;
3152 
3153 	ev = min_heap_top_(&base->timeheap);
3154 
3155 	if (ev == NULL) {
3156 		/* if no time-based events are active wait for I/O */
3157 		*tv_p = NULL;
3158 		goto out;
3159 	}
3160 
3161 	if (gettime(base, &now) == -1) {
3162 		res = -1;
3163 		goto out;
3164 	}
3165 
3166 	if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
3167 		evutil_timerclear(tv);
3168 		goto out;
3169 	}
3170 
3171 	evutil_timersub(&ev->ev_timeout, &now, tv);
3172 
3173 	EVUTIL_ASSERT(tv->tv_sec >= 0);
3174 	EVUTIL_ASSERT(tv->tv_usec >= 0);
3175 	event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
3176 
3177 out:
3178 	return (res);
3179 }
3180 
3181 /* Activate every event whose timeout has elapsed. */
3182 static void
3183 timeout_process(struct event_base *base)
3184 {
3185 	/* Caller must hold lock. */
3186 	struct timeval now;
3187 	struct event *ev;
3188 
3189 	if (min_heap_empty_(&base->timeheap)) {
3190 		return;
3191 	}
3192 
3193 	gettime(base, &now);
3194 
3195 	while ((ev = min_heap_top_(&base->timeheap))) {
3196 		if (evutil_timercmp(&ev->ev_timeout, &now, >))
3197 			break;
3198 
3199 		/* delete this event from the I/O queues */
3200 		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
3201 
3202 		event_debug(("timeout_process: event: %p, call %p",
3203 			 ev, ev->ev_callback));
3204 		event_active_nolock_(ev, EV_TIMEOUT, 1);
3205 	}
3206 }
3207 
3208 #ifndef MAX
3209 #define MAX(a,b) (((a)>(b))?(a):(b))
3210 #endif
3211 
3212 #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
3213 
3214 /* These are a fancy way to spell
3215      if (~flags & EVLIST_INTERNAL)
3216          base->event_count--/++;
3217 */
3218 #define DECR_EVENT_COUNT(base,flags) \
3219 	((base)->event_count -= !((flags) & EVLIST_INTERNAL))
3220 #define INCR_EVENT_COUNT(base,flags) do {					\
3221 	((base)->event_count += !((flags) & EVLIST_INTERNAL));			\
3222 	MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count);		\
3223 } while (0)
3224 
3225 static void
3226 event_queue_remove_inserted(struct event_base *base, struct event *ev)
3227 {
3228 	EVENT_BASE_ASSERT_LOCKED(base);
3229 	if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
3230 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3231 		    ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
3232 		return;
3233 	}
3234 	DECR_EVENT_COUNT(base, ev->ev_flags);
3235 	ev->ev_flags &= ~EVLIST_INSERTED;
3236 }
3237 static void
3238 event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
3239 {
3240 	EVENT_BASE_ASSERT_LOCKED(base);
3241 	if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
3242 		event_errx(1, "%s: %p not on queue %x", __func__,
3243 			   evcb, EVLIST_ACTIVE);
3244 		return;
3245 	}
3246 	DECR_EVENT_COUNT(base, evcb->evcb_flags);
3247 	evcb->evcb_flags &= ~EVLIST_ACTIVE;
3248 	base->event_count_active--;
3249 
3250 	TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
3251 	    evcb, evcb_active_next);
3252 }
3253 static void
3254 event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
3255 {
3256 	EVENT_BASE_ASSERT_LOCKED(base);
3257 	if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
3258 		event_errx(1, "%s: %p not on queue %x", __func__,
3259 			   evcb, EVLIST_ACTIVE_LATER);
3260 		return;
3261 	}
3262 	DECR_EVENT_COUNT(base, evcb->evcb_flags);
3263 	evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
3264 	base->event_count_active--;
3265 
3266 	TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3267 }
3268 static void
3269 event_queue_remove_timeout(struct event_base *base, struct event *ev)
3270 {
3271 	EVENT_BASE_ASSERT_LOCKED(base);
3272 	if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
3273 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3274 		    ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
3275 		return;
3276 	}
3277 	DECR_EVENT_COUNT(base, ev->ev_flags);
3278 	ev->ev_flags &= ~EVLIST_TIMEOUT;
3279 
3280 	if (is_common_timeout(&ev->ev_timeout, base)) {
3281 		struct common_timeout_list *ctl =
3282 		    get_common_timeout_list(base, &ev->ev_timeout);
3283 		TAILQ_REMOVE(&ctl->events, ev,
3284 		    ev_timeout_pos.ev_next_with_common_timeout);
3285 	} else {
3286 		min_heap_erase_(&base->timeheap, ev);
3287 	}
3288 }
3289 
3290 #ifdef USE_REINSERT_TIMEOUT
3291 /* Remove and reinsert 'ev' into the timeout queue. */
3292 static void
3293 event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
3294     int was_common, int is_common, int old_timeout_idx)
3295 {
3296 	struct common_timeout_list *ctl;
3297 	if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
3298 		event_queue_insert_timeout(base, ev);
3299 		return;
3300 	}
3301 
3302 	switch ((was_common<<1) | is_common) {
3303 	case 3: /* Changing from one common timeout to another */
3304 		ctl = base->common_timeout_queues[old_timeout_idx];
3305 		TAILQ_REMOVE(&ctl->events, ev,
3306 		    ev_timeout_pos.ev_next_with_common_timeout);
3307 		ctl = get_common_timeout_list(base, &ev->ev_timeout);
3308 		insert_common_timeout_inorder(ctl, ev);
3309 		break;
3310 	case 2: /* Was common; is no longer common */
3311 		ctl = base->common_timeout_queues[old_timeout_idx];
3312 		TAILQ_REMOVE(&ctl->events, ev,
3313 		    ev_timeout_pos.ev_next_with_common_timeout);
3314 		min_heap_push_(&base->timeheap, ev);
3315 		break;
3316 	case 1: /* Wasn't common; has become common. */
3317 		min_heap_erase_(&base->timeheap, ev);
3318 		ctl = get_common_timeout_list(base, &ev->ev_timeout);
3319 		insert_common_timeout_inorder(ctl, ev);
3320 		break;
3321 	case 0: /* was in heap; is still on heap. */
3322 		min_heap_adjust_(&base->timeheap, ev);
3323 		break;
3324 	default:
3325 		EVUTIL_ASSERT(0); /* unreachable */
3326 		break;
3327 	}
3328 }
3329 #endif
3330 
3331 /* Add 'ev' to the common timeout list in 'ev'. */
3332 static void
3333 insert_common_timeout_inorder(struct common_timeout_list *ctl,
3334     struct event *ev)
3335 {
3336 	struct event *e;
3337 	/* By all logic, we should just be able to append 'ev' to the end of
3338 	 * ctl->events, since the timeout on each 'ev' is set to {the common
3339 	 * timeout} + {the time when we add the event}, and so the events
3340 	 * should arrive in order of their timeeouts.  But just in case
3341 	 * there's some wacky threading issue going on, we do a search from
3342 	 * the end of 'ev' to find the right insertion point.
3343 	 */
3344 	TAILQ_FOREACH_REVERSE(e, &ctl->events,
3345 	    event_list, ev_timeout_pos.ev_next_with_common_timeout) {
3346 		/* This timercmp is a little sneaky, since both ev and e have
3347 		 * magic values in tv_usec.  Fortunately, they ought to have
3348 		 * the _same_ magic values in tv_usec.  Let's assert for that.
3349 		 */
3350 		EVUTIL_ASSERT(
3351 			is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
3352 		if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
3353 			TAILQ_INSERT_AFTER(&ctl->events, e, ev,
3354 			    ev_timeout_pos.ev_next_with_common_timeout);
3355 			return;
3356 		}
3357 	}
3358 	TAILQ_INSERT_HEAD(&ctl->events, ev,
3359 	    ev_timeout_pos.ev_next_with_common_timeout);
3360 }
3361 
3362 static void
3363 event_queue_insert_inserted(struct event_base *base, struct event *ev)
3364 {
3365 	EVENT_BASE_ASSERT_LOCKED(base);
3366 
3367 	if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
3368 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
3369 		    ev, EV_SOCK_ARG(ev->ev_fd));
3370 		return;
3371 	}
3372 
3373 	INCR_EVENT_COUNT(base, ev->ev_flags);
3374 
3375 	ev->ev_flags |= EVLIST_INSERTED;
3376 }
3377 
3378 static void
3379 event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
3380 {
3381 	EVENT_BASE_ASSERT_LOCKED(base);
3382 
3383 	if (evcb->evcb_flags & EVLIST_ACTIVE) {
3384 		/* Double insertion is possible for active events */
3385 		return;
3386 	}
3387 
3388 	INCR_EVENT_COUNT(base, evcb->evcb_flags);
3389 
3390 	evcb->evcb_flags |= EVLIST_ACTIVE;
3391 
3392 	base->event_count_active++;
3393 	MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3394 	EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3395 	TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
3396 	    evcb, evcb_active_next);
3397 }
3398 
3399 static void
3400 event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
3401 {
3402 	EVENT_BASE_ASSERT_LOCKED(base);
3403 	if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
3404 		/* Double insertion is possible */
3405 		return;
3406 	}
3407 
3408 	INCR_EVENT_COUNT(base, evcb->evcb_flags);
3409 	evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
3410 	base->event_count_active++;
3411 	MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3412 	EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3413 	TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
3414 }
3415 
3416 static void
3417 event_queue_insert_timeout(struct event_base *base, struct event *ev)
3418 {
3419 	EVENT_BASE_ASSERT_LOCKED(base);
3420 
3421 	if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
3422 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
3423 		    ev, EV_SOCK_ARG(ev->ev_fd));
3424 		return;
3425 	}
3426 
3427 	INCR_EVENT_COUNT(base, ev->ev_flags);
3428 
3429 	ev->ev_flags |= EVLIST_TIMEOUT;
3430 
3431 	if (is_common_timeout(&ev->ev_timeout, base)) {
3432 		struct common_timeout_list *ctl =
3433 		    get_common_timeout_list(base, &ev->ev_timeout);
3434 		insert_common_timeout_inorder(ctl, ev);
3435 	} else {
3436 		min_heap_push_(&base->timeheap, ev);
3437 	}
3438 }
3439 
3440 static void
3441 event_queue_make_later_events_active(struct event_base *base)
3442 {
3443 	struct event_callback *evcb;
3444 	EVENT_BASE_ASSERT_LOCKED(base);
3445 
3446 	while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
3447 		TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3448 		evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
3449 		EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3450 		TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
3451 		base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
3452 	}
3453 }
3454 
3455 /* Functions for debugging */
3456 
3457 const char *
3458 event_get_version(void)
3459 {
3460 	return (EVENT__VERSION);
3461 }
3462 
3463 ev_uint32_t
3464 event_get_version_number(void)
3465 {
3466 	return (EVENT__NUMERIC_VERSION);
3467 }
3468 
3469 /*
3470  * No thread-safe interface needed - the information should be the same
3471  * for all threads.
3472  */
3473 
3474 const char *
3475 event_get_method(void)
3476 {
3477 	return (current_base->evsel->name);
3478 }
3479 
3480 #ifndef EVENT__DISABLE_MM_REPLACEMENT
3481 static void *(*mm_malloc_fn_)(size_t sz) = NULL;
3482 static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
3483 static void (*mm_free_fn_)(void *p) = NULL;
3484 
3485 void *
3486 event_mm_malloc_(size_t sz)
3487 {
3488 	if (sz == 0)
3489 		return NULL;
3490 
3491 	if (mm_malloc_fn_)
3492 		return mm_malloc_fn_(sz);
3493 	else
3494 		return malloc(sz);
3495 }
3496 
3497 void *
3498 event_mm_calloc_(size_t count, size_t size)
3499 {
3500 	if (count == 0 || size == 0)
3501 		return NULL;
3502 
3503 	if (mm_malloc_fn_) {
3504 		size_t sz = count * size;
3505 		void *p = NULL;
3506 		if (count > EV_SIZE_MAX / size)
3507 			goto error;
3508 		p = mm_malloc_fn_(sz);
3509 		if (p)
3510 			return memset(p, 0, sz);
3511 	} else {
3512 		void *p = calloc(count, size);
3513 #ifdef _WIN32
3514 		/* Windows calloc doesn't reliably set ENOMEM */
3515 		if (p == NULL)
3516 			goto error;
3517 #endif
3518 		return p;
3519 	}
3520 
3521 error:
3522 	errno = ENOMEM;
3523 	return NULL;
3524 }
3525 
3526 char *
3527 event_mm_strdup_(const char *str)
3528 {
3529 	if (!str) {
3530 		errno = EINVAL;
3531 		return NULL;
3532 	}
3533 
3534 	if (mm_malloc_fn_) {
3535 		size_t ln = strlen(str);
3536 		void *p = NULL;
3537 		if (ln == EV_SIZE_MAX)
3538 			goto error;
3539 		p = mm_malloc_fn_(ln+1);
3540 		if (p)
3541 			return memcpy(p, str, ln+1);
3542 	} else
3543 #ifdef _WIN32
3544 		return _strdup(str);
3545 #else
3546 		return strdup(str);
3547 #endif
3548 
3549 error:
3550 	errno = ENOMEM;
3551 	return NULL;
3552 }
3553 
3554 void *
3555 event_mm_realloc_(void *ptr, size_t sz)
3556 {
3557 	if (mm_realloc_fn_)
3558 		return mm_realloc_fn_(ptr, sz);
3559 	else
3560 		return realloc(ptr, sz);
3561 }
3562 
3563 void
3564 event_mm_free_(void *ptr)
3565 {
3566 	if (mm_free_fn_)
3567 		mm_free_fn_(ptr);
3568 	else
3569 		free(ptr);
3570 }
3571 
3572 void
3573 event_set_mem_functions(void *(*malloc_fn)(size_t sz),
3574 			void *(*realloc_fn)(void *ptr, size_t sz),
3575 			void (*free_fn)(void *ptr))
3576 {
3577 	mm_malloc_fn_ = malloc_fn;
3578 	mm_realloc_fn_ = realloc_fn;
3579 	mm_free_fn_ = free_fn;
3580 }
3581 #endif
3582 
3583 #ifdef EVENT__HAVE_EVENTFD
3584 static void
3585 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
3586 {
3587 	ev_uint64_t msg;
3588 	ev_ssize_t r;
3589 	struct event_base *base = arg;
3590 
3591 	r = read(fd, (void*) &msg, sizeof(msg));
3592 	if (r<0 && errno != EAGAIN) {
3593 		event_sock_warn(fd, "Error reading from eventfd");
3594 	}
3595 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3596 	base->is_notify_pending = 0;
3597 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3598 }
3599 #endif
3600 
3601 static void
3602 evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
3603 {
3604 	unsigned char buf[1024];
3605 	struct event_base *base = arg;
3606 #ifdef _WIN32
3607 	while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
3608 		;
3609 #else
3610 	while (read(fd, (char*)buf, sizeof(buf)) > 0)
3611 		;
3612 #endif
3613 
3614 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3615 	base->is_notify_pending = 0;
3616 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3617 }
3618 
3619 int
3620 evthread_make_base_notifiable(struct event_base *base)
3621 {
3622 	int r;
3623 	if (!base)
3624 		return -1;
3625 
3626 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3627 	r = evthread_make_base_notifiable_nolock_(base);
3628 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3629 	return r;
3630 }
3631 
3632 static int
3633 evthread_make_base_notifiable_nolock_(struct event_base *base)
3634 {
3635 	void (*cb)(evutil_socket_t, short, void *);
3636 	int (*notify)(struct event_base *);
3637 
3638 	if (base->th_notify_fn != NULL) {
3639 		/* The base is already notifiable: we're doing fine. */
3640 		return 0;
3641 	}
3642 
3643 #if defined(EVENT__HAVE_WORKING_KQUEUE)
3644 	if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
3645 		base->th_notify_fn = event_kq_notify_base_;
3646 		/* No need to add an event here; the backend can wake
3647 		 * itself up just fine. */
3648 		return 0;
3649 	}
3650 #endif
3651 
3652 #ifdef EVENT__HAVE_EVENTFD
3653 	base->th_notify_fd[0] = evutil_eventfd_(0,
3654 	    EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
3655 	if (base->th_notify_fd[0] >= 0) {
3656 		base->th_notify_fd[1] = -1;
3657 		notify = evthread_notify_base_eventfd;
3658 		cb = evthread_notify_drain_eventfd;
3659 	} else
3660 #endif
3661 	if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
3662 		notify = evthread_notify_base_default;
3663 		cb = evthread_notify_drain_default;
3664 	} else {
3665 		return -1;
3666 	}
3667 
3668 	base->th_notify_fn = notify;
3669 
3670 	/* prepare an event that we can use for wakeup */
3671 	event_assign(&base->th_notify, base, base->th_notify_fd[0],
3672 				 EV_READ|EV_PERSIST, cb, base);
3673 
3674 	/* we need to mark this as internal event */
3675 	base->th_notify.ev_flags |= EVLIST_INTERNAL;
3676 	event_priority_set(&base->th_notify, 0);
3677 
3678 	return event_add_nolock_(&base->th_notify, NULL, 0);
3679 }
3680 
3681 int
3682 event_base_foreach_event_nolock_(struct event_base *base,
3683     event_base_foreach_event_cb fn, void *arg)
3684 {
3685 	int r, i;
3686 	unsigned u;
3687 	struct event *ev;
3688 
3689 	/* Start out with all the EVLIST_INSERTED events. */
3690 	if ((r = evmap_foreach_event_(base, fn, arg)))
3691 		return r;
3692 
3693 	/* Okay, now we deal with those events that have timeouts and are in
3694 	 * the min-heap. */
3695 	for (u = 0; u < base->timeheap.n; ++u) {
3696 		ev = base->timeheap.p[u];
3697 		if (ev->ev_flags & EVLIST_INSERTED) {
3698 			/* we already processed this one */
3699 			continue;
3700 		}
3701 		if ((r = fn(base, ev, arg)))
3702 			return r;
3703 	}
3704 
3705 	/* Now for the events in one of the timeout queues.
3706 	 * the min-heap. */
3707 	for (i = 0; i < base->n_common_timeouts; ++i) {
3708 		struct common_timeout_list *ctl =
3709 		    base->common_timeout_queues[i];
3710 		TAILQ_FOREACH(ev, &ctl->events,
3711 		    ev_timeout_pos.ev_next_with_common_timeout) {
3712 			if (ev->ev_flags & EVLIST_INSERTED) {
3713 				/* we already processed this one */
3714 				continue;
3715 			}
3716 			if ((r = fn(base, ev, arg)))
3717 				return r;
3718 		}
3719 	}
3720 
3721 	/* Finally, we deal wit all the active events that we haven't touched
3722 	 * yet. */
3723 	for (i = 0; i < base->nactivequeues; ++i) {
3724 		struct event_callback *evcb;
3725 		TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3726 			if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
3727 				/* This isn't an event (evlist_init clear), or
3728 				 * we already processed it. (inserted or
3729 				 * timeout set */
3730 				continue;
3731 			}
3732 			ev = event_callback_to_event(evcb);
3733 			if ((r = fn(base, ev, arg)))
3734 				return r;
3735 		}
3736 	}
3737 
3738 	return 0;
3739 }
3740 
3741 /* Helper for event_base_dump_events: called on each event in the event base;
3742  * dumps only the inserted events. */
3743 static int
3744 dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
3745 {
3746 	FILE *output = arg;
3747 	const char *gloss = (e->ev_events & EV_SIGNAL) ?
3748 	    "sig" : "fd ";
3749 
3750 	if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
3751 		return 0;
3752 
3753 	fprintf(output, "  %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s%s",
3754 	    (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
3755 	    (e->ev_events&EV_READ)?" Read":"",
3756 	    (e->ev_events&EV_WRITE)?" Write":"",
3757 	    (e->ev_events&EV_CLOSED)?" EOF":"",
3758 	    (e->ev_events&EV_SIGNAL)?" Signal":"",
3759 	    (e->ev_events&EV_PERSIST)?" Persist":"",
3760 	    (e->ev_events&EV_ET)?" ET":"",
3761 	    (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
3762 	if (e->ev_flags & EVLIST_TIMEOUT) {
3763 		struct timeval tv;
3764 		tv.tv_sec = e->ev_timeout.tv_sec;
3765 		tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
3766 		evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
3767 		fprintf(output, " Timeout=%ld.%06d",
3768 		    (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
3769 	}
3770 	fputc('\n', output);
3771 
3772 	return 0;
3773 }
3774 
3775 /* Helper for event_base_dump_events: called on each event in the event base;
3776  * dumps only the active events. */
3777 static int
3778 dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
3779 {
3780 	FILE *output = arg;
3781 	const char *gloss = (e->ev_events & EV_SIGNAL) ?
3782 	    "sig" : "fd ";
3783 
3784 	if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
3785 		return 0;
3786 
3787 	fprintf(output, "  %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
3788 	    (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
3789 	    (e->ev_res&EV_READ)?" Read":"",
3790 	    (e->ev_res&EV_WRITE)?" Write":"",
3791 	    (e->ev_res&EV_CLOSED)?" EOF":"",
3792 	    (e->ev_res&EV_SIGNAL)?" Signal":"",
3793 	    (e->ev_res&EV_TIMEOUT)?" Timeout":"",
3794 	    (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
3795 	    (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
3796 
3797 	return 0;
3798 }
3799 
3800 int
3801 event_base_foreach_event(struct event_base *base,
3802     event_base_foreach_event_cb fn, void *arg)
3803 {
3804 	int r;
3805 	if ((!fn) || (!base)) {
3806 		return -1;
3807 	}
3808 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3809 	r = event_base_foreach_event_nolock_(base, fn, arg);
3810 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3811 	return r;
3812 }
3813 
3814 
3815 void
3816 event_base_dump_events(struct event_base *base, FILE *output)
3817 {
3818 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3819 	fprintf(output, "Inserted events:\n");
3820 	event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
3821 
3822 	fprintf(output, "Active events:\n");
3823 	event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
3824 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3825 }
3826 
3827 void
3828 event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
3829 {
3830 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3831 
3832 	/* Activate any non timer events */
3833 	if (!(events & EV_TIMEOUT)) {
3834 		evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
3835 	} else {
3836 		/* If we want to activate timer events, loop and activate each event with
3837 		 * the same fd in both the timeheap and common timeouts list */
3838 		int i;
3839 		unsigned u;
3840 		struct event *ev;
3841 
3842 		for (u = 0; u < base->timeheap.n; ++u) {
3843 			ev = base->timeheap.p[u];
3844 			if (ev->ev_fd == fd) {
3845 				event_active_nolock_(ev, EV_TIMEOUT, 1);
3846 			}
3847 		}
3848 
3849 		for (i = 0; i < base->n_common_timeouts; ++i) {
3850 			struct common_timeout_list *ctl = base->common_timeout_queues[i];
3851 			TAILQ_FOREACH(ev, &ctl->events,
3852 				ev_timeout_pos.ev_next_with_common_timeout) {
3853 				if (ev->ev_fd == fd) {
3854 					event_active_nolock_(ev, EV_TIMEOUT, 1);
3855 				}
3856 			}
3857 		}
3858 	}
3859 
3860 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3861 }
3862 
3863 void
3864 event_base_active_by_signal(struct event_base *base, int sig)
3865 {
3866 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3867 	evmap_signal_active_(base, sig, 1);
3868 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3869 }
3870 
3871 
3872 void
3873 event_base_add_virtual_(struct event_base *base)
3874 {
3875 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3876 	base->virtual_event_count++;
3877 	MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
3878 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3879 }
3880 
3881 void
3882 event_base_del_virtual_(struct event_base *base)
3883 {
3884 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3885 	EVUTIL_ASSERT(base->virtual_event_count > 0);
3886 	base->virtual_event_count--;
3887 	if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
3888 		evthread_notify_base(base);
3889 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3890 }
3891 
3892 static void
3893 event_free_debug_globals_locks(void)
3894 {
3895 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3896 #ifndef EVENT__DISABLE_DEBUG_MODE
3897 	if (event_debug_map_lock_ != NULL) {
3898 		EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
3899 		event_debug_map_lock_ = NULL;
3900 		evthreadimpl_disable_lock_debugging_();
3901 	}
3902 #endif /* EVENT__DISABLE_DEBUG_MODE */
3903 #endif /* EVENT__DISABLE_THREAD_SUPPORT */
3904 	return;
3905 }
3906 
3907 static void
3908 event_free_debug_globals(void)
3909 {
3910 	event_free_debug_globals_locks();
3911 }
3912 
3913 static void
3914 event_free_evsig_globals(void)
3915 {
3916 	evsig_free_globals_();
3917 }
3918 
3919 static void
3920 event_free_evutil_globals(void)
3921 {
3922 	evutil_free_globals_();
3923 }
3924 
3925 static void
3926 event_free_globals(void)
3927 {
3928 	event_free_debug_globals();
3929 	event_free_evsig_globals();
3930 	event_free_evutil_globals();
3931 }
3932 
3933 void
3934 libevent_global_shutdown(void)
3935 {
3936 	event_disable_debug_mode();
3937 	event_free_globals();
3938 }
3939 
3940 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3941 int
3942 event_global_setup_locks_(const int enable_locks)
3943 {
3944 #ifndef EVENT__DISABLE_DEBUG_MODE
3945 	EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
3946 #endif
3947 	if (evsig_global_setup_locks_(enable_locks) < 0)
3948 		return -1;
3949 	if (evutil_global_setup_locks_(enable_locks) < 0)
3950 		return -1;
3951 	if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
3952 		return -1;
3953 	return 0;
3954 }
3955 #endif
3956 
3957 void
3958 event_base_assert_ok_(struct event_base *base)
3959 {
3960 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3961 	event_base_assert_ok_nolock_(base);
3962 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3963 }
3964 
3965 void
3966 event_base_assert_ok_nolock_(struct event_base *base)
3967 {
3968 	int i;
3969 	int count;
3970 
3971 	/* First do checks on the per-fd and per-signal lists */
3972 	evmap_check_integrity_(base);
3973 
3974 	/* Check the heap property */
3975 	for (i = 1; i < (int)base->timeheap.n; ++i) {
3976 		int parent = (i - 1) / 2;
3977 		struct event *ev, *p_ev;
3978 		ev = base->timeheap.p[i];
3979 		p_ev = base->timeheap.p[parent];
3980 		EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3981 		EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
3982 		EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
3983 	}
3984 
3985 	/* Check that the common timeouts are fine */
3986 	for (i = 0; i < base->n_common_timeouts; ++i) {
3987 		struct common_timeout_list *ctl = base->common_timeout_queues[i];
3988 		struct event *last=NULL, *ev;
3989 
3990 		EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
3991 
3992 		TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
3993 			if (last)
3994 				EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
3995 			EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3996 			EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
3997 			EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
3998 			last = ev;
3999 		}
4000 	}
4001 
4002 	/* Check the active queues. */
4003 	count = 0;
4004 	for (i = 0; i < base->nactivequeues; ++i) {
4005 		struct event_callback *evcb;
4006 		EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
4007 		TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
4008 			EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
4009 			EVUTIL_ASSERT(evcb->evcb_pri == i);
4010 			++count;
4011 		}
4012 	}
4013 
4014 	{
4015 		struct event_callback *evcb;
4016 		TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
4017 			EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
4018 			++count;
4019 		}
4020 	}
4021 	EVUTIL_ASSERT(count == base->event_count_active);
4022 }
4023