1 /* $NetBSD: event.c,v 1.6 2023/08/03 08:03:19 mrg Exp $ */
2
3 /*
4 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
5 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29 #include "event2/event-config.h"
30 #include <sys/cdefs.h>
31 __RCSID("$NetBSD: event.c,v 1.6 2023/08/03 08:03:19 mrg Exp $");
32 #include "evconfig-private.h"
33
34 #ifdef _WIN32
35 #include <winsock2.h>
36 #define WIN32_LEAN_AND_MEAN
37 #include <windows.h>
38 #undef WIN32_LEAN_AND_MEAN
39 #endif
40 #include <sys/types.h>
41 #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
42 #include <sys/time.h>
43 #endif
44 #include <sys/queue.h>
45 #ifdef EVENT__HAVE_SYS_SOCKET_H
46 #include <sys/socket.h>
47 #endif
48 #include <stdio.h>
49 #include <stdlib.h>
50 #ifdef EVENT__HAVE_UNISTD_H
51 #include <unistd.h>
52 #endif
53 #include <ctype.h>
54 #include <errno.h>
55 #include <signal.h>
56 #include <string.h>
57 #include <time.h>
58 #include <limits.h>
59 #ifdef EVENT__HAVE_FCNTL_H
60 #include <fcntl.h>
61 #endif
62
63 #include "event2/event.h"
64 #include "event2/event_struct.h"
65 #include "event2/event_compat.h"
66 #include "event-internal.h"
67 #include "defer-internal.h"
68 #include "evthread-internal.h"
69 #include "event2/thread.h"
70 #include "event2/util.h"
71 #include "log-internal.h"
72 #include "evmap-internal.h"
73 #include "iocp-internal.h"
74 #include "changelist-internal.h"
75 #define HT_NO_CACHE_HASH_VALUES
76 #include "ht-internal.h"
77 #include "util-internal.h"
78
79
80 #ifdef EVENT__HAVE_WORKING_KQUEUE
81 #include "kqueue-internal.h"
82 #endif
83
84 #ifdef EVENT__HAVE_EVENT_PORTS
85 extern const struct eventop evportops;
86 #endif
87 #ifdef EVENT__HAVE_SELECT
88 extern const struct eventop selectops;
89 #endif
90 #ifdef EVENT__HAVE_POLL
91 extern const struct eventop pollops;
92 #endif
93 #ifdef EVENT__HAVE_EPOLL
94 extern const struct eventop epollops;
95 #endif
96 #ifdef EVENT__HAVE_WORKING_KQUEUE
97 extern const struct eventop kqops;
98 #endif
99 #ifdef EVENT__HAVE_DEVPOLL
100 extern const struct eventop devpollops;
101 #endif
102 #ifdef _WIN32
103 extern const struct eventop win32ops;
104 #endif
105
106 /* Array of backends in order of preference. */
107 static const struct eventop *eventops[] = {
108 #ifdef EVENT__HAVE_EVENT_PORTS
109 &evportops,
110 #endif
111 #ifdef EVENT__HAVE_WORKING_KQUEUE
112 &kqops,
113 #endif
114 #ifdef EVENT__HAVE_EPOLL
115 &epollops,
116 #endif
117 #ifdef EVENT__HAVE_DEVPOLL
118 &devpollops,
119 #endif
120 #ifdef EVENT__HAVE_POLL
121 &pollops,
122 #endif
123 #ifdef EVENT__HAVE_SELECT
124 &selectops,
125 #endif
126 #ifdef _WIN32
127 &win32ops,
128 #endif
129 NULL
130 };
131
132 /* Global state; deprecated */
133 EVENT2_EXPORT_SYMBOL
134 struct event_base *event_global_current_base_ = NULL;
135 #define current_base event_global_current_base_
136
137 /* Global state */
138
139 static void *event_self_cbarg_ptr_ = NULL;
140
141 /* Prototypes */
142 static void event_queue_insert_active(struct event_base *, struct event_callback *);
143 static void event_queue_insert_active_later(struct event_base *, struct event_callback *);
144 static void event_queue_insert_timeout(struct event_base *, struct event *);
145 static void event_queue_insert_inserted(struct event_base *, struct event *);
146 static void event_queue_remove_active(struct event_base *, struct event_callback *);
147 static void event_queue_remove_active_later(struct event_base *, struct event_callback *);
148 static void event_queue_remove_timeout(struct event_base *, struct event *);
149 static void event_queue_remove_inserted(struct event_base *, struct event *);
150 static void event_queue_make_later_events_active(struct event_base *base);
151
152 static int evthread_make_base_notifiable_nolock_(struct event_base *base);
153 static int event_del_(struct event *ev, int blocking);
154
155 #ifdef USE_REINSERT_TIMEOUT
156 /* This code seems buggy; only turn it on if we find out what the trouble is. */
157 static void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
158 #endif
159
160 static int event_haveevents(struct event_base *);
161
162 static int event_process_active(struct event_base *);
163
164 static int timeout_next(struct event_base *, struct timeval **);
165 static void timeout_process(struct event_base *);
166
167 static inline void event_signal_closure(struct event_base *, struct event *ev);
168 static inline void event_persist_closure(struct event_base *, struct event *ev);
169
170 static int evthread_notify_base(struct event_base *base);
171
172 static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
173 struct event *ev);
174
175 #ifndef EVENT__DISABLE_DEBUG_MODE
176 /* These functions implement a hashtable of which 'struct event *' structures
177 * have been setup or added. We don't want to trust the content of the struct
178 * event itself, since we're trying to work through cases where an event gets
179 * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
180 */
181
182 struct event_debug_entry {
183 HT_ENTRY(event_debug_entry) node;
184 const struct event *ptr;
185 unsigned added : 1;
186 };
187
188 static inline unsigned
hash_debug_entry(const struct event_debug_entry * e)189 hash_debug_entry(const struct event_debug_entry *e)
190 {
191 /* We need to do this silliness to convince compilers that we
192 * honestly mean to cast e->ptr to an integer, and discard any
193 * part of it that doesn't fit in an unsigned.
194 */
195 unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
196 /* Our hashtable implementation is pretty sensitive to low bits,
197 * and every struct event is over 64 bytes in size, so we can
198 * just say >>6. */
199 return (u >> 6);
200 }
201
202 static inline int
eq_debug_entry(const struct event_debug_entry * a,const struct event_debug_entry * b)203 eq_debug_entry(const struct event_debug_entry *a,
204 const struct event_debug_entry *b)
205 {
206 return a->ptr == b->ptr;
207 }
208
209 int event_debug_mode_on_ = 0;
210
211
212 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
213 /**
214 * @brief debug mode variable which is set for any function/structure that needs
215 * to be shared across threads (if thread support is enabled).
216 *
217 * When and if evthreads are initialized, this variable will be evaluated,
218 * and if set to something other than zero, this means the evthread setup
219 * functions were called out of order.
220 *
221 * See: "Locks and threading" in the documentation.
222 */
223 int event_debug_created_threadable_ctx_ = 0;
224 #endif
225
226 /* Set if it's too late to enable event_debug_mode. */
227 static int event_debug_mode_too_late = 0;
228 #ifndef EVENT__DISABLE_THREAD_SUPPORT
229 static void *event_debug_map_lock_ = NULL;
230 #endif
231 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
232 HT_INITIALIZER();
233
HT_PROTOTYPE(event_debug_map,event_debug_entry,node,hash_debug_entry,eq_debug_entry)234 HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
235 eq_debug_entry)
236 HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
237 eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
238
239 /* record that ev is now setup (that is, ready for an add) */
240 static void event_debug_note_setup_(const struct event *ev)
241 {
242 struct event_debug_entry *dent, find;
243
244 if (!event_debug_mode_on_)
245 goto out;
246
247 find.ptr = ev;
248 EVLOCK_LOCK(event_debug_map_lock_, 0);
249 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
250 if (dent) {
251 dent->added = 0;
252 } else {
253 dent = mm_malloc(sizeof(*dent));
254 if (!dent)
255 event_err(1,
256 "Out of memory in debugging code");
257 dent->ptr = ev;
258 dent->added = 0;
259 HT_INSERT(event_debug_map, &global_debug_map, dent);
260 }
261 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
262
263 out:
264 event_debug_mode_too_late = 1;
265 }
266 /* record that ev is no longer setup */
event_debug_note_teardown_(const struct event * ev)267 static void event_debug_note_teardown_(const struct event *ev)
268 {
269 struct event_debug_entry *dent, find;
270
271 if (!event_debug_mode_on_)
272 goto out;
273
274 find.ptr = ev;
275 EVLOCK_LOCK(event_debug_map_lock_, 0);
276 dent = HT_REMOVE(event_debug_map, &global_debug_map, &find);
277 if (dent)
278 mm_free(dent);
279 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
280
281 out:
282 event_debug_mode_too_late = 1;
283 }
284 /* Macro: record that ev is now added */
event_debug_note_add_(const struct event * ev)285 static void event_debug_note_add_(const struct event *ev)
286 {
287 struct event_debug_entry *dent,find;
288
289 if (!event_debug_mode_on_)
290 goto out;
291
292 find.ptr = ev;
293 EVLOCK_LOCK(event_debug_map_lock_, 0);
294 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
295 if (dent) {
296 dent->added = 1;
297 } else {
298 event_errx(EVENT_ERR_ABORT_,
299 "%s: noting an add on a non-setup event %p"
300 " (events: 0x%x, fd: "EV_SOCK_FMT
301 ", flags: 0x%x)",
302 __func__, ev, ev->ev_events,
303 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
304 }
305 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
306
307 out:
308 event_debug_mode_too_late = 1;
309 }
310 /* record that ev is no longer added */
event_debug_note_del_(const struct event * ev)311 static void event_debug_note_del_(const struct event *ev)
312 {
313 struct event_debug_entry *dent, find;
314
315 if (!event_debug_mode_on_)
316 goto out;
317
318 find.ptr = ev;
319 EVLOCK_LOCK(event_debug_map_lock_, 0);
320 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
321 if (dent) {
322 dent->added = 0;
323 } else {
324 event_errx(EVENT_ERR_ABORT_,
325 "%s: noting a del on a non-setup event %p"
326 " (events: 0x%x, fd: "EV_SOCK_FMT
327 ", flags: 0x%x)",
328 __func__, ev, ev->ev_events,
329 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
330 }
331 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
332
333 out:
334 event_debug_mode_too_late = 1;
335 }
336 /* assert that ev is setup (i.e., okay to add or inspect) */
event_debug_assert_is_setup_(const struct event * ev)337 static void event_debug_assert_is_setup_(const struct event *ev)
338 {
339 struct event_debug_entry *dent, find;
340
341 if (!event_debug_mode_on_)
342 return;
343
344 find.ptr = ev;
345 EVLOCK_LOCK(event_debug_map_lock_, 0);
346 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
347 if (!dent) {
348 event_errx(EVENT_ERR_ABORT_,
349 "%s called on a non-initialized event %p"
350 " (events: 0x%x, fd: "EV_SOCK_FMT
351 ", flags: 0x%x)",
352 __func__, ev, ev->ev_events,
353 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
354 }
355 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
356 }
357 /* assert that ev is not added (i.e., okay to tear down or set up again) */
event_debug_assert_not_added_(const struct event * ev)358 static void event_debug_assert_not_added_(const struct event *ev)
359 {
360 struct event_debug_entry *dent, find;
361
362 if (!event_debug_mode_on_)
363 return;
364
365 find.ptr = ev;
366 EVLOCK_LOCK(event_debug_map_lock_, 0);
367 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
368 if (dent && dent->added) {
369 event_errx(EVENT_ERR_ABORT_,
370 "%s called on an already added event %p"
371 " (events: 0x%x, fd: "EV_SOCK_FMT", "
372 "flags: 0x%x)",
373 __func__, ev, ev->ev_events,
374 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
375 }
376 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
377 }
event_debug_assert_socket_nonblocking_(evutil_socket_t fd)378 static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd)
379 {
380 if (!event_debug_mode_on_)
381 return;
382 if (fd < 0)
383 return;
384
385 #ifndef _WIN32
386 {
387 int flags;
388 if ((flags = fcntl(fd, F_GETFL, NULL)) >= 0) {
389 EVUTIL_ASSERT(flags & O_NONBLOCK);
390 }
391 }
392 #endif
393 }
394 #else
event_debug_note_setup_(const struct event * ev)395 static void event_debug_note_setup_(const struct event *ev) { (void)ev; }
event_debug_note_teardown_(const struct event * ev)396 static void event_debug_note_teardown_(const struct event *ev) { (void)ev; }
event_debug_note_add_(const struct event * ev)397 static void event_debug_note_add_(const struct event *ev) { (void)ev; }
event_debug_note_del_(const struct event * ev)398 static void event_debug_note_del_(const struct event *ev) { (void)ev; }
event_debug_assert_is_setup_(const struct event * ev)399 static void event_debug_assert_is_setup_(const struct event *ev) { (void)ev; }
event_debug_assert_not_added_(const struct event * ev)400 static void event_debug_assert_not_added_(const struct event *ev) { (void)ev; }
event_debug_assert_socket_nonblocking_(evutil_socket_t fd)401 static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd) { (void)fd; }
402 #endif
403
404 #define EVENT_BASE_ASSERT_LOCKED(base) \
405 EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
406
407 /* How often (in seconds) do we check for changes in wall clock time relative
408 * to monotonic time? Set this to -1 for 'never.' */
409 #define CLOCK_SYNC_INTERVAL 5
410
411 /** Set 'tp' to the current time according to 'base'. We must hold the lock
412 * on 'base'. If there is a cached time, return it. Otherwise, use
413 * clock_gettime or gettimeofday as appropriate to find out the right time.
414 * Return 0 on success, -1 on failure.
415 */
416 static int
gettime(struct event_base * base,struct timeval * tp)417 gettime(struct event_base *base, struct timeval *tp)
418 {
419 EVENT_BASE_ASSERT_LOCKED(base);
420
421 if (base->tv_cache.tv_sec) {
422 *tp = base->tv_cache;
423 return (0);
424 }
425
426 if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
427 return -1;
428 }
429
430 if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
431 < tp->tv_sec) {
432 struct timeval tv;
433 evutil_gettimeofday(&tv,NULL);
434 evutil_timersub(&tv, tp, &base->tv_clock_diff);
435 base->last_updated_clock_diff = tp->tv_sec;
436 }
437
438 return 0;
439 }
440
441 int
event_base_gettimeofday_cached(struct event_base * base,struct timeval * tv)442 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
443 {
444 int r;
445 if (!base) {
446 base = current_base;
447 if (!current_base)
448 return evutil_gettimeofday(tv, NULL);
449 }
450
451 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
452 if (base->tv_cache.tv_sec == 0) {
453 r = evutil_gettimeofday(tv, NULL);
454 } else {
455 evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
456 r = 0;
457 }
458 EVBASE_RELEASE_LOCK(base, th_base_lock);
459 return r;
460 }
461
462 /** Make 'base' have no current cached time. */
463 static inline void
clear_time_cache(struct event_base * base)464 clear_time_cache(struct event_base *base)
465 {
466 base->tv_cache.tv_sec = 0;
467 }
468
469 /** Replace the cached time in 'base' with the current time. */
470 static inline void
update_time_cache(struct event_base * base)471 update_time_cache(struct event_base *base)
472 {
473 base->tv_cache.tv_sec = 0;
474 if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
475 gettime(base, &base->tv_cache);
476 }
477
478 int
event_base_update_cache_time(struct event_base * base)479 event_base_update_cache_time(struct event_base *base)
480 {
481
482 if (!base) {
483 base = current_base;
484 if (!current_base)
485 return -1;
486 }
487
488 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
489 if (base->running_loop)
490 update_time_cache(base);
491 EVBASE_RELEASE_LOCK(base, th_base_lock);
492 return 0;
493 }
494
495 static inline struct event *
event_callback_to_event(struct event_callback * evcb)496 event_callback_to_event(struct event_callback *evcb)
497 {
498 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
499 return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
500 }
501
502 static inline struct event_callback *
event_to_event_callback(struct event * ev)503 event_to_event_callback(struct event *ev)
504 {
505 return &ev->ev_evcallback;
506 }
507
508 struct event_base *
event_init(void)509 event_init(void)
510 {
511 struct event_base *base = event_base_new_with_config(NULL);
512
513 if (base == NULL) {
514 event_errx(1, "%s: Unable to construct event_base", __func__);
515 return NULL;
516 }
517
518 current_base = base;
519
520 return (base);
521 }
522
523 struct event_base *
event_base_new(void)524 event_base_new(void)
525 {
526 struct event_base *base = NULL;
527 struct event_config *cfg = event_config_new();
528 if (cfg) {
529 base = event_base_new_with_config(cfg);
530 event_config_free(cfg);
531 }
532 return base;
533 }
534
535 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
536 * avoid. */
537 static int
event_config_is_avoided_method(const struct event_config * cfg,const char * method)538 event_config_is_avoided_method(const struct event_config *cfg,
539 const char *method)
540 {
541 struct event_config_entry *entry;
542
543 TAILQ_FOREACH(entry, &cfg->entries, next) {
544 if (entry->avoid_method != NULL &&
545 strcmp(entry->avoid_method, method) == 0)
546 return (1);
547 }
548
549 return (0);
550 }
551
552 /** Return true iff 'method' is disabled according to the environment. */
553 static int
event_is_method_disabled(const char * name)554 event_is_method_disabled(const char *name)
555 {
556 char environment[64];
557 int i;
558
559 evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
560 for (i = 8; environment[i] != '\0'; ++i)
561 environment[i] = EVUTIL_TOUPPER_(environment[i]);
562 /* Note that evutil_getenv_() ignores the environment entirely if
563 * we're setuid */
564 return (evutil_getenv_(environment) != NULL);
565 }
566
567 int
event_base_get_features(const struct event_base * base)568 event_base_get_features(const struct event_base *base)
569 {
570 return base->evsel->features;
571 }
572
573 void
event_enable_debug_mode(void)574 event_enable_debug_mode(void)
575 {
576 #ifndef EVENT__DISABLE_DEBUG_MODE
577 if (event_debug_mode_on_)
578 event_errx(1, "%s was called twice!", __func__);
579 if (event_debug_mode_too_late)
580 event_errx(1, "%s must be called *before* creating any events "
581 "or event_bases",__func__);
582
583 event_debug_mode_on_ = 1;
584
585 HT_INIT(event_debug_map, &global_debug_map);
586 #endif
587 }
588
589 void
event_disable_debug_mode(void)590 event_disable_debug_mode(void)
591 {
592 #ifndef EVENT__DISABLE_DEBUG_MODE
593 struct event_debug_entry **ent, *victim;
594
595 EVLOCK_LOCK(event_debug_map_lock_, 0);
596 for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
597 victim = *ent;
598 ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
599 mm_free(victim);
600 }
601 HT_CLEAR(event_debug_map, &global_debug_map);
602 EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
603
604 event_debug_mode_on_ = 0;
605 #endif
606 }
607
608 struct event_base *
event_base_new_with_config(const struct event_config * cfg)609 event_base_new_with_config(const struct event_config *cfg)
610 {
611 int i;
612 struct event_base *base;
613 int should_check_environment;
614
615 #ifndef EVENT__DISABLE_DEBUG_MODE
616 event_debug_mode_too_late = 1;
617 #endif
618
619 if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
620 event_warn("%s: calloc", __func__);
621 return NULL;
622 }
623
624 if (cfg)
625 base->flags = cfg->flags;
626
627 should_check_environment =
628 !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
629
630 {
631 struct timeval tmp;
632 int precise_time =
633 cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
634 int flags;
635 if (should_check_environment && !precise_time) {
636 precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
637 if (precise_time) {
638 base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
639 }
640 }
641 flags = precise_time ? EV_MONOT_PRECISE : 0;
642 evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
643
644 gettime(base, &tmp);
645 }
646
647 min_heap_ctor_(&base->timeheap);
648
649 base->sig.ev_signal_pair[0] = -1;
650 base->sig.ev_signal_pair[1] = -1;
651 base->th_notify_fd[0] = -1;
652 base->th_notify_fd[1] = -1;
653
654 TAILQ_INIT(&base->active_later_queue);
655
656 evmap_io_initmap_(&base->io);
657 evmap_signal_initmap_(&base->sigmap);
658 event_changelist_init_(&base->changelist);
659
660 base->evbase = NULL;
661
662 if (cfg) {
663 memcpy(&base->max_dispatch_time,
664 &cfg->max_dispatch_interval, sizeof(struct timeval));
665 base->limit_callbacks_after_prio =
666 cfg->limit_callbacks_after_prio;
667 } else {
668 base->max_dispatch_time.tv_sec = -1;
669 base->limit_callbacks_after_prio = 1;
670 }
671 if (cfg && cfg->max_dispatch_callbacks >= 0) {
672 base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
673 } else {
674 base->max_dispatch_callbacks = INT_MAX;
675 }
676 if (base->max_dispatch_callbacks == INT_MAX &&
677 base->max_dispatch_time.tv_sec == -1)
678 base->limit_callbacks_after_prio = INT_MAX;
679
680 for (i = 0; eventops[i] && !base->evbase; i++) {
681 if (cfg != NULL) {
682 /* determine if this backend should be avoided */
683 if (event_config_is_avoided_method(cfg,
684 eventops[i]->name))
685 continue;
686 if ((eventops[i]->features & cfg->require_features)
687 != cfg->require_features)
688 continue;
689 }
690
691 /* also obey the environment variables */
692 if (should_check_environment &&
693 event_is_method_disabled(eventops[i]->name))
694 continue;
695
696 base->evsel = eventops[i];
697
698 base->evbase = base->evsel->init(base);
699 }
700
701 if (base->evbase == NULL) {
702 event_warnx("%s: no event mechanism available",
703 __func__);
704 base->evsel = NULL;
705 event_base_free(base);
706 return NULL;
707 }
708
709 if (evutil_getenv_("EVENT_SHOW_METHOD"))
710 event_msgx("libevent using: %s", base->evsel->name);
711
712 /* allocate a single active event queue */
713 if (event_base_priority_init(base, 1) < 0) {
714 event_base_free(base);
715 return NULL;
716 }
717
718 /* prepare for threading */
719
720 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
721 event_debug_created_threadable_ctx_ = 1;
722 #endif
723
724 #ifndef EVENT__DISABLE_THREAD_SUPPORT
725 if (EVTHREAD_LOCKING_ENABLED() &&
726 (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
727 int r;
728 EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
729 EVTHREAD_ALLOC_COND(base->current_event_cond);
730 r = evthread_make_base_notifiable(base);
731 if (r<0) {
732 event_warnx("%s: Unable to make base notifiable.", __func__);
733 event_base_free(base);
734 return NULL;
735 }
736 }
737 #endif
738
739 #ifdef _WIN32
740 if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
741 event_base_start_iocp_(base, cfg->n_cpus_hint);
742 #endif
743
744 return (base);
745 }
746
747 int
event_base_start_iocp_(struct event_base * base,int n_cpus)748 event_base_start_iocp_(struct event_base *base, int n_cpus)
749 {
750 #ifdef _WIN32
751 if (base->iocp)
752 return 0;
753 base->iocp = event_iocp_port_launch_(n_cpus);
754 if (!base->iocp) {
755 event_warnx("%s: Couldn't launch IOCP", __func__);
756 return -1;
757 }
758 return 0;
759 #else
760 return -1;
761 #endif
762 }
763
764 void
event_base_stop_iocp_(struct event_base * base)765 event_base_stop_iocp_(struct event_base *base)
766 {
767 #ifdef _WIN32
768 int rv;
769
770 if (!base->iocp)
771 return;
772 rv = event_iocp_shutdown_(base->iocp, -1);
773 EVUTIL_ASSERT(rv >= 0);
774 base->iocp = NULL;
775 #endif
776 }
777
778 static int
event_base_cancel_single_callback_(struct event_base * base,struct event_callback * evcb,int run_finalizers)779 event_base_cancel_single_callback_(struct event_base *base,
780 struct event_callback *evcb,
781 int run_finalizers)
782 {
783 int result = 0;
784
785 if (evcb->evcb_flags & EVLIST_INIT) {
786 struct event *ev = event_callback_to_event(evcb);
787 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
788 event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
789 result = 1;
790 }
791 } else {
792 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
793 event_callback_cancel_nolock_(base, evcb, 1);
794 EVBASE_RELEASE_LOCK(base, th_base_lock);
795 result = 1;
796 }
797
798 if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
799 switch (evcb->evcb_closure) {
800 case EV_CLOSURE_EVENT_FINALIZE:
801 case EV_CLOSURE_EVENT_FINALIZE_FREE: {
802 struct event *ev = event_callback_to_event(evcb);
803 ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
804 if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
805 mm_free(ev);
806 break;
807 }
808 case EV_CLOSURE_CB_FINALIZE:
809 evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
810 break;
811 default:
812 break;
813 }
814 }
815 return result;
816 }
817
event_base_free_queues_(struct event_base * base,int run_finalizers)818 static int event_base_free_queues_(struct event_base *base, int run_finalizers)
819 {
820 int deleted = 0, i;
821
822 for (i = 0; i < base->nactivequeues; ++i) {
823 struct event_callback *evcb, *next;
824 for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
825 next = TAILQ_NEXT(evcb, evcb_active_next);
826 deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
827 evcb = next;
828 }
829 }
830
831 {
832 struct event_callback *evcb;
833 while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
834 deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
835 }
836 }
837
838 return deleted;
839 }
840
841 static void
event_base_free_(struct event_base * base,int run_finalizers)842 event_base_free_(struct event_base *base, int run_finalizers)
843 {
844 int i, n_deleted=0;
845 struct event *ev;
846 /* XXXX grab the lock? If there is contention when one thread frees
847 * the base, then the contending thread will be very sad soon. */
848
849 /* event_base_free(NULL) is how to free the current_base if we
850 * made it with event_init and forgot to hold a reference to it. */
851 if (base == NULL && current_base)
852 base = current_base;
853 /* Don't actually free NULL. */
854 if (base == NULL) {
855 event_warnx("%s: no base to free", __func__);
856 return;
857 }
858 /* XXX(niels) - check for internal events first */
859
860 #ifdef _WIN32
861 event_base_stop_iocp_(base);
862 #endif
863
864 /* threading fds if we have them */
865 if (base->th_notify_fd[0] != -1) {
866 event_del(&base->th_notify);
867 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
868 if (base->th_notify_fd[1] != -1)
869 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
870 base->th_notify_fd[0] = -1;
871 base->th_notify_fd[1] = -1;
872 event_debug_unassign(&base->th_notify);
873 }
874
875 /* Delete all non-internal events. */
876 evmap_delete_all_(base);
877
878 while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
879 event_del(ev);
880 ++n_deleted;
881 }
882 for (i = 0; i < base->n_common_timeouts; ++i) {
883 struct common_timeout_list *ctl =
884 base->common_timeout_queues[i];
885 event_del(&ctl->timeout_event); /* Internal; doesn't count */
886 event_debug_unassign(&ctl->timeout_event);
887 for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
888 struct event *next = TAILQ_NEXT(ev,
889 ev_timeout_pos.ev_next_with_common_timeout);
890 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
891 event_del(ev);
892 ++n_deleted;
893 }
894 ev = next;
895 }
896 mm_free(ctl);
897 }
898 if (base->common_timeout_queues)
899 mm_free(base->common_timeout_queues);
900
901 for (;;) {
902 /* For finalizers we can register yet another finalizer out from
903 * finalizer, and iff finalizer will be in active_later_queue we can
904 * add finalizer to activequeues, and we will have events in
905 * activequeues after this function returns, which is not what we want
906 * (we even have an assertion for this).
907 *
908 * A simple case is bufferevent with underlying (i.e. filters).
909 */
910 int ii = event_base_free_queues_(base, run_finalizers);
911 event_debug(("%s: %d events freed", __func__, ii));
912 if (!i) {
913 break;
914 }
915 n_deleted += ii;
916 }
917
918 if (n_deleted)
919 event_debug(("%s: %d events were still set in base",
920 __func__, n_deleted));
921
922 while (LIST_FIRST(&base->once_events)) {
923 struct event_once *eonce = LIST_FIRST(&base->once_events);
924 LIST_REMOVE(eonce, next_once);
925 mm_free(eonce);
926 }
927
928 if (base->evsel != NULL && base->evsel->dealloc != NULL)
929 base->evsel->dealloc(base);
930
931 for (i = 0; i < base->nactivequeues; ++i)
932 EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
933
934 EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
935 min_heap_dtor_(&base->timeheap);
936
937 mm_free(base->activequeues);
938
939 evmap_io_clear_(&base->io);
940 evmap_signal_clear_(&base->sigmap);
941 event_changelist_freemem_(&base->changelist);
942
943 EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
944 EVTHREAD_FREE_COND(base->current_event_cond);
945
946 /* If we're freeing current_base, there won't be a current_base. */
947 if (base == current_base)
948 current_base = NULL;
949 mm_free(base);
950 }
951
952 void
event_base_free_nofinalize(struct event_base * base)953 event_base_free_nofinalize(struct event_base *base)
954 {
955 event_base_free_(base, 0);
956 }
957
958 void
event_base_free(struct event_base * base)959 event_base_free(struct event_base *base)
960 {
961 event_base_free_(base, 1);
962 }
963
964 /* Fake eventop; used to disable the backend temporarily inside event_reinit
965 * so that we can call event_del() on an event without telling the backend.
966 */
967 static int
nil_backend_del(struct event_base * b,evutil_socket_t fd,short old,short events,void * fdinfo)968 nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
969 short events, void *fdinfo)
970 {
971 return 0;
972 }
973 const struct eventop nil_eventop = {
974 "nil",
975 NULL, /* init: unused. */
976 NULL, /* add: unused. */
977 nil_backend_del, /* del: used, so needs to be killed. */
978 NULL, /* dispatch: unused. */
979 NULL, /* dealloc: unused. */
980 0, 0, 0
981 };
982
983 /* reinitialize the event base after a fork */
984 int
event_reinit(struct event_base * base)985 event_reinit(struct event_base *base)
986 {
987 const struct eventop *evsel;
988 int res = 0;
989 int was_notifiable = 0;
990 int had_signal_added = 0;
991
992 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
993
994 evsel = base->evsel;
995
996 /* check if this event mechanism requires reinit on the backend */
997 if (evsel->need_reinit) {
998 /* We're going to call event_del() on our notify events (the
999 * ones that tell about signals and wakeup events). But we
1000 * don't actually want to tell the backend to change its
1001 * state, since it might still share some resource (a kqueue,
1002 * an epoll fd) with the parent process, and we don't want to
1003 * delete the fds from _that_ backend, we temporarily stub out
1004 * the evsel with a replacement.
1005 */
1006 base->evsel = &nil_eventop;
1007 }
1008
1009 /* We need to re-create a new signal-notification fd and a new
1010 * thread-notification fd. Otherwise, we'll still share those with
1011 * the parent process, which would make any notification sent to them
1012 * get received by one or both of the event loops, more or less at
1013 * random.
1014 */
1015 if (base->sig.ev_signal_added) {
1016 event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
1017 event_debug_unassign(&base->sig.ev_signal);
1018 memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
1019 had_signal_added = 1;
1020 base->sig.ev_signal_added = 0;
1021 }
1022 if (base->sig.ev_signal_pair[0] != -1)
1023 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
1024 if (base->sig.ev_signal_pair[1] != -1)
1025 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
1026 if (base->th_notify_fn != NULL) {
1027 was_notifiable = 1;
1028 base->th_notify_fn = NULL;
1029 }
1030 if (base->th_notify_fd[0] != -1) {
1031 event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
1032 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
1033 if (base->th_notify_fd[1] != -1)
1034 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
1035 base->th_notify_fd[0] = -1;
1036 base->th_notify_fd[1] = -1;
1037 event_debug_unassign(&base->th_notify);
1038 }
1039
1040 /* Replace the original evsel. */
1041 base->evsel = evsel;
1042
1043 if (evsel->need_reinit) {
1044 /* Reconstruct the backend through brute-force, so that we do
1045 * not share any structures with the parent process. For some
1046 * backends, this is necessary: epoll and kqueue, for
1047 * instance, have events associated with a kernel
1048 * structure. If didn't reinitialize, we'd share that
1049 * structure with the parent process, and any changes made by
1050 * the parent would affect our backend's behavior (and vice
1051 * versa).
1052 */
1053 if (base->evsel->dealloc != NULL)
1054 base->evsel->dealloc(base);
1055 base->evbase = evsel->init(base);
1056 if (base->evbase == NULL) {
1057 event_errx(1,
1058 "%s: could not reinitialize event mechanism",
1059 __func__);
1060 res = -1;
1061 goto done;
1062 }
1063
1064 /* Empty out the changelist (if any): we are starting from a
1065 * blank slate. */
1066 event_changelist_freemem_(&base->changelist);
1067
1068 /* Tell the event maps to re-inform the backend about all
1069 * pending events. This will make the signal notification
1070 * event get re-created if necessary. */
1071 if (evmap_reinit_(base) < 0)
1072 res = -1;
1073 } else {
1074 res = evsig_init_(base);
1075 if (res == 0 && had_signal_added) {
1076 res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
1077 if (res == 0)
1078 base->sig.ev_signal_added = 1;
1079 }
1080 }
1081
1082 /* If we were notifiable before, and nothing just exploded, become
1083 * notifiable again. */
1084 if (was_notifiable && res == 0)
1085 res = evthread_make_base_notifiable_nolock_(base);
1086
1087 done:
1088 EVBASE_RELEASE_LOCK(base, th_base_lock);
1089 return (res);
1090 }
1091
1092 /* Get the monotonic time for this event_base' timer */
1093 int
event_gettime_monotonic(struct event_base * base,struct timeval * tv)1094 event_gettime_monotonic(struct event_base *base, struct timeval *tv)
1095 {
1096 int rv = -1;
1097
1098 if (base && tv) {
1099 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1100 rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
1101 EVBASE_RELEASE_LOCK(base, th_base_lock);
1102 }
1103
1104 return rv;
1105 }
1106
1107 const char **
event_get_supported_methods(void)1108 event_get_supported_methods(void)
1109 {
1110 static const char **methods = NULL;
1111 const struct eventop **method;
1112 const char **tmp;
1113 int i = 0, k;
1114
1115 /* count all methods */
1116 for (method = &eventops[0]; *method != NULL; ++method) {
1117 ++i;
1118 }
1119
1120 /* allocate one more than we need for the NULL pointer */
1121 tmp = mm_calloc((i + 1), sizeof(char *));
1122 if (tmp == NULL)
1123 return (NULL);
1124
1125 /* populate the array with the supported methods */
1126 for (k = 0, i = 0; eventops[k] != NULL; ++k) {
1127 tmp[i++] = eventops[k]->name;
1128 }
1129 tmp[i] = NULL;
1130
1131 if (methods != NULL)
1132 mm_free(__UNCONST(methods));
1133
1134 methods = tmp;
1135
1136 return (methods);
1137 }
1138
1139 struct event_config *
event_config_new(void)1140 event_config_new(void)
1141 {
1142 struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
1143
1144 if (cfg == NULL)
1145 return (NULL);
1146
1147 TAILQ_INIT(&cfg->entries);
1148 cfg->max_dispatch_interval.tv_sec = -1;
1149 cfg->max_dispatch_callbacks = INT_MAX;
1150 cfg->limit_callbacks_after_prio = 1;
1151
1152 return (cfg);
1153 }
1154
1155 static void
event_config_entry_free(struct event_config_entry * entry)1156 event_config_entry_free(struct event_config_entry *entry)
1157 {
1158 if (entry->avoid_method != NULL)
1159 mm_free(__UNCONST(entry->avoid_method));
1160 mm_free(entry);
1161 }
1162
1163 void
event_config_free(struct event_config * cfg)1164 event_config_free(struct event_config *cfg)
1165 {
1166 struct event_config_entry *entry;
1167
1168 while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
1169 TAILQ_REMOVE(&cfg->entries, entry, next);
1170 event_config_entry_free(entry);
1171 }
1172 mm_free(cfg);
1173 }
1174
1175 int
event_config_set_flag(struct event_config * cfg,int flag)1176 event_config_set_flag(struct event_config *cfg, int flag)
1177 {
1178 if (!cfg)
1179 return -1;
1180 cfg->flags |= flag;
1181 return 0;
1182 }
1183
1184 int
event_config_avoid_method(struct event_config * cfg,const char * method)1185 event_config_avoid_method(struct event_config *cfg, const char *method)
1186 {
1187 struct event_config_entry *entry = mm_malloc(sizeof(*entry));
1188 if (entry == NULL)
1189 return (-1);
1190
1191 if ((entry->avoid_method = mm_strdup(method)) == NULL) {
1192 mm_free(entry);
1193 return (-1);
1194 }
1195
1196 TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
1197
1198 return (0);
1199 }
1200
1201 int
event_config_require_features(struct event_config * cfg,int features)1202 event_config_require_features(struct event_config *cfg,
1203 int features)
1204 {
1205 if (!cfg)
1206 return (-1);
1207 cfg->require_features = features;
1208 return (0);
1209 }
1210
1211 int
event_config_set_num_cpus_hint(struct event_config * cfg,int cpus)1212 event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
1213 {
1214 if (!cfg)
1215 return (-1);
1216 cfg->n_cpus_hint = cpus;
1217 return (0);
1218 }
1219
1220 int
event_config_set_max_dispatch_interval(struct event_config * cfg,const struct timeval * max_interval,int max_callbacks,int min_priority)1221 event_config_set_max_dispatch_interval(struct event_config *cfg,
1222 const struct timeval *max_interval, int max_callbacks, int min_priority)
1223 {
1224 if (max_interval)
1225 memcpy(&cfg->max_dispatch_interval, max_interval,
1226 sizeof(struct timeval));
1227 else
1228 cfg->max_dispatch_interval.tv_sec = -1;
1229 cfg->max_dispatch_callbacks =
1230 max_callbacks >= 0 ? max_callbacks : INT_MAX;
1231 if (min_priority < 0)
1232 min_priority = 0;
1233 cfg->limit_callbacks_after_prio = min_priority;
1234 return (0);
1235 }
1236
1237 int
event_priority_init(int npriorities)1238 event_priority_init(int npriorities)
1239 {
1240 return event_base_priority_init(current_base, npriorities);
1241 }
1242
1243 int
event_base_priority_init(struct event_base * base,int npriorities)1244 event_base_priority_init(struct event_base *base, int npriorities)
1245 {
1246 int i, r;
1247 r = -1;
1248
1249 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1250
1251 if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1252 || npriorities >= EVENT_MAX_PRIORITIES)
1253 goto err;
1254
1255 if (npriorities == base->nactivequeues)
1256 goto ok;
1257
1258 if (base->nactivequeues) {
1259 mm_free(base->activequeues);
1260 base->nactivequeues = 0;
1261 }
1262
1263 /* Allocate our priority queues */
1264 base->activequeues = (struct evcallback_list *)
1265 mm_calloc(npriorities, sizeof(struct evcallback_list));
1266 if (base->activequeues == NULL) {
1267 event_warn("%s: calloc", __func__);
1268 goto err;
1269 }
1270 base->nactivequeues = npriorities;
1271
1272 for (i = 0; i < base->nactivequeues; ++i) {
1273 TAILQ_INIT(&base->activequeues[i]);
1274 }
1275
1276 ok:
1277 r = 0;
1278 err:
1279 EVBASE_RELEASE_LOCK(base, th_base_lock);
1280 return (r);
1281 }
1282
1283 int
event_base_get_npriorities(struct event_base * base)1284 event_base_get_npriorities(struct event_base *base)
1285 {
1286
1287 int n;
1288 if (base == NULL)
1289 base = current_base;
1290
1291 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1292 n = base->nactivequeues;
1293 EVBASE_RELEASE_LOCK(base, th_base_lock);
1294 return (n);
1295 }
1296
1297 int
event_base_get_num_events(struct event_base * base,unsigned int type)1298 event_base_get_num_events(struct event_base *base, unsigned int type)
1299 {
1300 int r = 0;
1301
1302 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1303
1304 if (type & EVENT_BASE_COUNT_ACTIVE)
1305 r += base->event_count_active;
1306
1307 if (type & EVENT_BASE_COUNT_VIRTUAL)
1308 r += base->virtual_event_count;
1309
1310 if (type & EVENT_BASE_COUNT_ADDED)
1311 r += base->event_count;
1312
1313 EVBASE_RELEASE_LOCK(base, th_base_lock);
1314
1315 return r;
1316 }
1317
1318 int
event_base_get_max_events(struct event_base * base,unsigned int type,int clear)1319 event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
1320 {
1321 int r = 0;
1322
1323 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1324
1325 if (type & EVENT_BASE_COUNT_ACTIVE) {
1326 r += base->event_count_active_max;
1327 if (clear)
1328 base->event_count_active_max = 0;
1329 }
1330
1331 if (type & EVENT_BASE_COUNT_VIRTUAL) {
1332 r += base->virtual_event_count_max;
1333 if (clear)
1334 base->virtual_event_count_max = 0;
1335 }
1336
1337 if (type & EVENT_BASE_COUNT_ADDED) {
1338 r += base->event_count_max;
1339 if (clear)
1340 base->event_count_max = 0;
1341 }
1342
1343 EVBASE_RELEASE_LOCK(base, th_base_lock);
1344
1345 return r;
1346 }
1347
1348 /* Returns true iff we're currently watching any events. */
1349 static int
event_haveevents(struct event_base * base)1350 event_haveevents(struct event_base *base)
1351 {
1352 /* Caller must hold th_base_lock */
1353 return (base->virtual_event_count > 0 || base->event_count > 0);
1354 }
1355
1356 /* "closure" function called when processing active signal events */
1357 static inline void
event_signal_closure(struct event_base * base,struct event * ev)1358 event_signal_closure(struct event_base *base, struct event *ev)
1359 {
1360 short ncalls;
1361 int should_break;
1362
1363 /* Allows deletes to work */
1364 ncalls = ev->ev_ncalls;
1365 if (ncalls != 0)
1366 ev->ev_pncalls = &ncalls;
1367 EVBASE_RELEASE_LOCK(base, th_base_lock);
1368 while (ncalls) {
1369 ncalls--;
1370 ev->ev_ncalls = ncalls;
1371 if (ncalls == 0)
1372 ev->ev_pncalls = NULL;
1373 (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1374
1375 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1376 should_break = base->event_break;
1377 EVBASE_RELEASE_LOCK(base, th_base_lock);
1378
1379 if (should_break) {
1380 if (ncalls != 0)
1381 ev->ev_pncalls = NULL;
1382 return;
1383 }
1384 }
1385 ev->ev_pncalls = NULL;
1386 }
1387
1388 /* Common timeouts are special timeouts that are handled as queues rather than
1389 * in the minheap. This is more efficient than the minheap if we happen to
1390 * know that we're going to get several thousands of timeout events all with
1391 * the same timeout value.
1392 *
1393 * Since all our timeout handling code assumes timevals can be copied,
1394 * assigned, etc, we can't use "magic pointer" to encode these common
1395 * timeouts. Searching through a list to see if every timeout is common could
1396 * also get inefficient. Instead, we take advantage of the fact that tv_usec
1397 * is 32 bits long, but only uses 20 of those bits (since it can never be over
1398 * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
1399 * of index into the event_base's aray of common timeouts.
1400 */
1401
1402 #define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK
1403 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1404 #define COMMON_TIMEOUT_IDX_SHIFT 20
1405 #define COMMON_TIMEOUT_MASK 0xf0000000
1406 #define COMMON_TIMEOUT_MAGIC 0x50000000
1407
1408 #define COMMON_TIMEOUT_IDX(tv) \
1409 (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1410
1411 /** Return true iff if 'tv' is a common timeout in 'base' */
1412 static inline int
is_common_timeout(const struct timeval * tv,const struct event_base * base)1413 is_common_timeout(const struct timeval *tv,
1414 const struct event_base *base)
1415 {
1416 int idx;
1417 if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1418 return 0;
1419 idx = COMMON_TIMEOUT_IDX(tv);
1420 return idx < base->n_common_timeouts;
1421 }
1422
1423 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1424 * one is a common timeout. */
1425 static inline int
is_same_common_timeout(const struct timeval * tv1,const struct timeval * tv2)1426 is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1427 {
1428 return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1429 (tv2->tv_usec & ~MICROSECONDS_MASK);
1430 }
1431
1432 /** Requires that 'tv' is a common timeout. Return the corresponding
1433 * common_timeout_list. */
1434 static inline struct common_timeout_list *
get_common_timeout_list(struct event_base * base,const struct timeval * tv)1435 get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1436 {
1437 return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1438 }
1439
1440 #if 0
1441 static inline int
1442 common_timeout_ok(const struct timeval *tv,
1443 struct event_base *base)
1444 {
1445 const struct timeval *expect =
1446 &get_common_timeout_list(base, tv)->duration;
1447 return tv->tv_sec == expect->tv_sec &&
1448 tv->tv_usec == expect->tv_usec;
1449 }
1450 #endif
1451
1452 /* Add the timeout for the first event in given common timeout list to the
1453 * event_base's minheap. */
1454 static void
common_timeout_schedule(struct common_timeout_list * ctl,const struct timeval * now,struct event * head)1455 common_timeout_schedule(struct common_timeout_list *ctl,
1456 const struct timeval *now, struct event *head)
1457 {
1458 struct timeval timeout = head->ev_timeout;
1459 timeout.tv_usec &= MICROSECONDS_MASK;
1460 event_add_nolock_(&ctl->timeout_event, &timeout, 1);
1461 }
1462
1463 /* Callback: invoked when the timeout for a common timeout queue triggers.
1464 * This means that (at least) the first event in that queue should be run,
1465 * and the timeout should be rescheduled if there are more events. */
1466 static void
common_timeout_callback(evutil_socket_t fd,short what,void * arg)1467 common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1468 {
1469 struct timeval now;
1470 struct common_timeout_list *ctl = arg;
1471 struct event_base *base = ctl->base;
1472 struct event *ev = NULL;
1473 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1474 gettime(base, &now);
1475 while (1) {
1476 ev = TAILQ_FIRST(&ctl->events);
1477 if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1478 (ev->ev_timeout.tv_sec == now.tv_sec &&
1479 (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1480 break;
1481 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1482 event_active_nolock_(ev, EV_TIMEOUT, 1);
1483 }
1484 if (ev)
1485 common_timeout_schedule(ctl, &now, ev);
1486 EVBASE_RELEASE_LOCK(base, th_base_lock);
1487 }
1488
1489 #define MAX_COMMON_TIMEOUTS 256
1490
1491 const struct timeval *
event_base_init_common_timeout(struct event_base * base,const struct timeval * duration)1492 event_base_init_common_timeout(struct event_base *base,
1493 const struct timeval *duration)
1494 {
1495 int i;
1496 struct timeval tv;
1497 const struct timeval *result=NULL;
1498 struct common_timeout_list *new_ctl;
1499
1500 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1501 if (duration->tv_usec > 1000000) {
1502 memcpy(&tv, duration, sizeof(struct timeval));
1503 if (is_common_timeout(duration, base))
1504 tv.tv_usec &= MICROSECONDS_MASK;
1505 tv.tv_sec += tv.tv_usec / 1000000;
1506 tv.tv_usec %= 1000000;
1507 duration = &tv;
1508 }
1509 for (i = 0; i < base->n_common_timeouts; ++i) {
1510 const struct common_timeout_list *ctl =
1511 base->common_timeout_queues[i];
1512 if (duration->tv_sec == ctl->duration.tv_sec &&
1513 duration->tv_usec ==
1514 (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1515 EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1516 result = &ctl->duration;
1517 goto done;
1518 }
1519 }
1520 if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1521 event_warnx("%s: Too many common timeouts already in use; "
1522 "we only support %d per event_base", __func__,
1523 MAX_COMMON_TIMEOUTS);
1524 goto done;
1525 }
1526 if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1527 int n = base->n_common_timeouts < 16 ? 16 :
1528 base->n_common_timeouts*2;
1529 struct common_timeout_list **newqueues =
1530 mm_realloc(base->common_timeout_queues,
1531 n*sizeof(struct common_timeout_queue *));
1532 if (!newqueues) {
1533 event_warn("%s: realloc",__func__);
1534 goto done;
1535 }
1536 base->n_common_timeouts_allocated = n;
1537 base->common_timeout_queues = newqueues;
1538 }
1539 new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1540 if (!new_ctl) {
1541 event_warn("%s: calloc",__func__);
1542 goto done;
1543 }
1544 TAILQ_INIT(&new_ctl->events);
1545 new_ctl->duration.tv_sec = duration->tv_sec;
1546 new_ctl->duration.tv_usec =
1547 duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1548 (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1549 evtimer_assign(&new_ctl->timeout_event, base,
1550 common_timeout_callback, new_ctl);
1551 new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1552 event_priority_set(&new_ctl->timeout_event, 0);
1553 new_ctl->base = base;
1554 base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1555 result = &new_ctl->duration;
1556
1557 done:
1558 if (result)
1559 EVUTIL_ASSERT(is_common_timeout(result, base));
1560
1561 EVBASE_RELEASE_LOCK(base, th_base_lock);
1562 return result;
1563 }
1564
1565 /* Closure function invoked when we're activating a persistent event. */
1566 static inline void
event_persist_closure(struct event_base * base,struct event * ev)1567 event_persist_closure(struct event_base *base, struct event *ev)
1568 {
1569 void (*evcb_callback)(evutil_socket_t, short, void *);
1570
1571 // Other fields of *ev that must be stored before executing
1572 evutil_socket_t evcb_fd;
1573 short evcb_res;
1574 void *evcb_arg;
1575
1576 /* reschedule the persistent event if we have a timeout. */
1577 if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1578 /* If there was a timeout, we want it to run at an interval of
1579 * ev_io_timeout after the last time it was _scheduled_ for,
1580 * not ev_io_timeout after _now_. If it fired for another
1581 * reason, though, the timeout ought to start ticking _now_. */
1582 struct timeval run_at, relative_to, delay, now;
1583 ev_uint32_t usec_mask = 0;
1584 EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1585 &ev->ev_io_timeout));
1586 gettime(base, &now);
1587 if (is_common_timeout(&ev->ev_timeout, base)) {
1588 delay = ev->ev_io_timeout;
1589 usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1590 delay.tv_usec &= MICROSECONDS_MASK;
1591 if (ev->ev_res & EV_TIMEOUT) {
1592 relative_to = ev->ev_timeout;
1593 relative_to.tv_usec &= MICROSECONDS_MASK;
1594 } else {
1595 relative_to = now;
1596 }
1597 } else {
1598 delay = ev->ev_io_timeout;
1599 if (ev->ev_res & EV_TIMEOUT) {
1600 relative_to = ev->ev_timeout;
1601 } else {
1602 relative_to = now;
1603 }
1604 }
1605 evutil_timeradd(&relative_to, &delay, &run_at);
1606 if (evutil_timercmp(&run_at, &now, <)) {
1607 /* Looks like we missed at least one invocation due to
1608 * a clock jump, not running the event loop for a
1609 * while, really slow callbacks, or
1610 * something. Reschedule relative to now.
1611 */
1612 evutil_timeradd(&now, &delay, &run_at);
1613 }
1614 run_at.tv_usec |= usec_mask;
1615 event_add_nolock_(ev, &run_at, 1);
1616 }
1617
1618 // Save our callback before we release the lock
1619 evcb_callback = ev->ev_callback;
1620 evcb_fd = ev->ev_fd;
1621 evcb_res = ev->ev_res;
1622 evcb_arg = ev->ev_arg;
1623
1624 // Release the lock
1625 EVBASE_RELEASE_LOCK(base, th_base_lock);
1626
1627 // Execute the callback
1628 (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
1629 }
1630
1631 /*
1632 Helper for event_process_active to process all the events in a single queue,
1633 releasing the lock as we go. This function requires that the lock be held
1634 when it's invoked. Returns -1 if we get a signal or an event_break that
1635 means we should stop processing any active events now. Otherwise returns
1636 the number of non-internal event_callbacks that we processed.
1637 */
1638 static int
event_process_active_single_queue(struct event_base * base,struct evcallback_list * activeq,int max_to_process,const struct timeval * endtime)1639 event_process_active_single_queue(struct event_base *base,
1640 struct evcallback_list *activeq,
1641 int max_to_process, const struct timeval *endtime)
1642 {
1643 struct event_callback *evcb;
1644 int count = 0;
1645
1646 EVUTIL_ASSERT(activeq != NULL);
1647
1648 for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
1649 struct event *ev=NULL;
1650 if (evcb->evcb_flags & EVLIST_INIT) {
1651 ev = event_callback_to_event(evcb);
1652
1653 if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
1654 event_queue_remove_active(base, evcb);
1655 else
1656 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1657 event_debug((
1658 "event_process_active: event: %p, %s%s%scall %p",
1659 ev,
1660 ev->ev_res & EV_READ ? "EV_READ " : " ",
1661 ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1662 ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
1663 ev->ev_callback));
1664 } else {
1665 event_queue_remove_active(base, evcb);
1666 event_debug(("event_process_active: event_callback %p, "
1667 "closure %d, call %p",
1668 evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
1669 }
1670
1671 if (!(evcb->evcb_flags & EVLIST_INTERNAL))
1672 ++count;
1673
1674
1675 base->current_event = evcb;
1676 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1677 base->current_event_waiters = 0;
1678 #endif
1679
1680 switch (evcb->evcb_closure) {
1681 case EV_CLOSURE_EVENT_SIGNAL:
1682 EVUTIL_ASSERT(ev != NULL);
1683 event_signal_closure(base, ev);
1684 break;
1685 case EV_CLOSURE_EVENT_PERSIST:
1686 EVUTIL_ASSERT(ev != NULL);
1687 event_persist_closure(base, ev);
1688 break;
1689 case EV_CLOSURE_EVENT: {
1690 void (*evcb_callback)(evutil_socket_t, short, void *);
1691 short res;
1692 EVUTIL_ASSERT(ev != NULL);
1693 evcb_callback = *ev->ev_callback;
1694 res = ev->ev_res;
1695 EVBASE_RELEASE_LOCK(base, th_base_lock);
1696 evcb_callback(ev->ev_fd, res, ev->ev_arg);
1697 }
1698 break;
1699 case EV_CLOSURE_CB_SELF: {
1700 void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
1701 EVBASE_RELEASE_LOCK(base, th_base_lock);
1702 evcb_selfcb(evcb, evcb->evcb_arg);
1703 }
1704 break;
1705 case EV_CLOSURE_EVENT_FINALIZE:
1706 case EV_CLOSURE_EVENT_FINALIZE_FREE: {
1707 void (*evcb_evfinalize)(struct event *, void *);
1708 int evcb_closure = evcb->evcb_closure;
1709 EVUTIL_ASSERT(ev != NULL);
1710 base->current_event = NULL;
1711 evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
1712 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1713 EVBASE_RELEASE_LOCK(base, th_base_lock);
1714 event_debug_note_teardown_(ev);
1715 evcb_evfinalize(ev, ev->ev_arg);
1716 if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
1717 mm_free(ev);
1718 }
1719 break;
1720 case EV_CLOSURE_CB_FINALIZE: {
1721 void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
1722 base->current_event = NULL;
1723 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1724 EVBASE_RELEASE_LOCK(base, th_base_lock);
1725 evcb_cbfinalize(evcb, evcb->evcb_arg);
1726 }
1727 break;
1728 default:
1729 EVUTIL_ASSERT(0);
1730 }
1731
1732 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1733 base->current_event = NULL;
1734 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1735 if (base->current_event_waiters) {
1736 base->current_event_waiters = 0;
1737 EVTHREAD_COND_BROADCAST(base->current_event_cond);
1738 }
1739 #endif
1740
1741 if (base->event_break)
1742 return -1;
1743 if (count >= max_to_process)
1744 return count;
1745 if (count && endtime) {
1746 struct timeval now;
1747 update_time_cache(base);
1748 gettime(base, &now);
1749 if (evutil_timercmp(&now, endtime, >=))
1750 return count;
1751 }
1752 if (base->event_continue)
1753 break;
1754 }
1755 return count;
1756 }
1757
1758 /*
1759 * Active events are stored in priority queues. Lower priorities are always
1760 * process before higher priorities. Low priority events can starve high
1761 * priority ones.
1762 */
1763
1764 static int
event_process_active(struct event_base * base)1765 event_process_active(struct event_base *base)
1766 {
1767 /* Caller must hold th_base_lock */
1768 struct evcallback_list *activeq = NULL;
1769 int i, c = 0;
1770 const struct timeval *endtime;
1771 struct timeval tv;
1772 const int maxcb = base->max_dispatch_callbacks;
1773 const int limit_after_prio = base->limit_callbacks_after_prio;
1774 if (base->max_dispatch_time.tv_sec >= 0) {
1775 update_time_cache(base);
1776 gettime(base, &tv);
1777 evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
1778 endtime = &tv;
1779 } else {
1780 endtime = NULL;
1781 }
1782
1783 for (i = 0; i < base->nactivequeues; ++i) {
1784 if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1785 base->event_running_priority = i;
1786 activeq = &base->activequeues[i];
1787 if (i < limit_after_prio)
1788 c = event_process_active_single_queue(base, activeq,
1789 INT_MAX, NULL);
1790 else
1791 c = event_process_active_single_queue(base, activeq,
1792 maxcb, endtime);
1793 if (c < 0) {
1794 goto done;
1795 } else if (c > 0)
1796 break; /* Processed a real event; do not
1797 * consider lower-priority events */
1798 /* If we get here, all of the events we processed
1799 * were internal. Continue. */
1800 }
1801 }
1802
1803 done:
1804 base->event_running_priority = -1;
1805
1806 return c;
1807 }
1808
1809 /*
1810 * Wait continuously for events. We exit only if no events are left.
1811 */
1812
1813 int
event_dispatch(void)1814 event_dispatch(void)
1815 {
1816 return (event_loop(0));
1817 }
1818
1819 int
event_base_dispatch(struct event_base * event_base)1820 event_base_dispatch(struct event_base *event_base)
1821 {
1822 return (event_base_loop(event_base, 0));
1823 }
1824
1825 const char *
event_base_get_method(const struct event_base * base)1826 event_base_get_method(const struct event_base *base)
1827 {
1828 EVUTIL_ASSERT(base);
1829 return (base->evsel->name);
1830 }
1831
1832 /** Callback: used to implement event_base_loopexit by telling the event_base
1833 * that it's time to exit its loop. */
1834 static void
event_loopexit_cb(evutil_socket_t fd,short what,void * arg)1835 event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1836 {
1837 struct event_base *base = arg;
1838 base->event_gotterm = 1;
1839 }
1840
1841 int
event_loopexit(const struct timeval * tv)1842 event_loopexit(const struct timeval *tv)
1843 {
1844 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1845 current_base, tv));
1846 }
1847
1848 int
event_base_loopexit(struct event_base * event_base,const struct timeval * tv)1849 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1850 {
1851 return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1852 event_base, tv));
1853 }
1854
1855 int
event_loopbreak(void)1856 event_loopbreak(void)
1857 {
1858 return (event_base_loopbreak(current_base));
1859 }
1860
1861 int
event_base_loopbreak(struct event_base * event_base)1862 event_base_loopbreak(struct event_base *event_base)
1863 {
1864 int r = 0;
1865 if (event_base == NULL)
1866 return (-1);
1867
1868 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1869 event_base->event_break = 1;
1870
1871 if (EVBASE_NEED_NOTIFY(event_base)) {
1872 r = evthread_notify_base(event_base);
1873 } else {
1874 r = (0);
1875 }
1876 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1877 return r;
1878 }
1879
1880 int
event_base_loopcontinue(struct event_base * event_base)1881 event_base_loopcontinue(struct event_base *event_base)
1882 {
1883 int r = 0;
1884 if (event_base == NULL)
1885 return (-1);
1886
1887 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1888 event_base->event_continue = 1;
1889
1890 if (EVBASE_NEED_NOTIFY(event_base)) {
1891 r = evthread_notify_base(event_base);
1892 } else {
1893 r = (0);
1894 }
1895 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1896 return r;
1897 }
1898
1899 int
event_base_got_break(struct event_base * event_base)1900 event_base_got_break(struct event_base *event_base)
1901 {
1902 int res;
1903 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1904 res = event_base->event_break;
1905 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1906 return res;
1907 }
1908
1909 int
event_base_got_exit(struct event_base * event_base)1910 event_base_got_exit(struct event_base *event_base)
1911 {
1912 int res;
1913 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1914 res = event_base->event_gotterm;
1915 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1916 return res;
1917 }
1918
1919 /* not thread safe */
1920
1921 int
event_loop(int flags)1922 event_loop(int flags)
1923 {
1924 return event_base_loop(current_base, flags);
1925 }
1926
1927 int
event_base_loop(struct event_base * base,int flags)1928 event_base_loop(struct event_base *base, int flags)
1929 {
1930 const struct eventop *evsel = base->evsel;
1931 struct timeval tv;
1932 struct timeval *tv_p;
1933 int res, done, retval = 0;
1934
1935 /* Grab the lock. We will release it inside evsel.dispatch, and again
1936 * as we invoke user callbacks. */
1937 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1938
1939 if (base->running_loop) {
1940 event_warnx("%s: reentrant invocation. Only one event_base_loop"
1941 " can run on each event_base at once.", __func__);
1942 EVBASE_RELEASE_LOCK(base, th_base_lock);
1943 return -1;
1944 }
1945
1946 base->running_loop = 1;
1947
1948 clear_time_cache(base);
1949
1950 if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1951 evsig_set_base_(base);
1952
1953 done = 0;
1954
1955 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1956 base->th_owner_id = EVTHREAD_GET_ID();
1957 #endif
1958
1959 base->event_gotterm = base->event_break = 0;
1960
1961 while (!done) {
1962 base->event_continue = 0;
1963 base->n_deferreds_queued = 0;
1964
1965 /* Terminate the loop if we have been asked to */
1966 if (base->event_gotterm) {
1967 break;
1968 }
1969
1970 if (base->event_break) {
1971 break;
1972 }
1973
1974 tv_p = &tv;
1975 if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1976 timeout_next(base, &tv_p);
1977 } else {
1978 /*
1979 * if we have active events, we just poll new events
1980 * without waiting.
1981 */
1982 evutil_timerclear(&tv);
1983 }
1984
1985 /* If we have no events, we just exit */
1986 if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
1987 !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
1988 event_debug(("%s: no events registered.", __func__));
1989 retval = 1;
1990 goto done;
1991 }
1992
1993 event_queue_make_later_events_active(base);
1994
1995 clear_time_cache(base);
1996
1997 res = evsel->dispatch(base, tv_p);
1998
1999 if (res == -1) {
2000 event_debug(("%s: dispatch returned unsuccessfully.",
2001 __func__));
2002 retval = -1;
2003 goto done;
2004 }
2005
2006 update_time_cache(base);
2007
2008 timeout_process(base);
2009
2010 if (N_ACTIVE_CALLBACKS(base)) {
2011 int n = event_process_active(base);
2012 if ((flags & EVLOOP_ONCE)
2013 && N_ACTIVE_CALLBACKS(base) == 0
2014 && n != 0)
2015 done = 1;
2016 } else if (flags & EVLOOP_NONBLOCK)
2017 done = 1;
2018 }
2019 event_debug(("%s: asked to terminate loop.", __func__));
2020
2021 done:
2022 clear_time_cache(base);
2023 base->running_loop = 0;
2024
2025 EVBASE_RELEASE_LOCK(base, th_base_lock);
2026
2027 return (retval);
2028 }
2029
2030 /* One-time callback to implement event_base_once: invokes the user callback,
2031 * then deletes the allocated storage */
2032 static void
event_once_cb(evutil_socket_t fd,short events,void * arg)2033 event_once_cb(evutil_socket_t fd, short events, void *arg)
2034 {
2035 struct event_once *eonce = arg;
2036
2037 (*eonce->cb)(fd, events, eonce->arg);
2038 EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
2039 LIST_REMOVE(eonce, next_once);
2040 EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
2041 event_debug_unassign(&eonce->ev);
2042 mm_free(eonce);
2043 }
2044
2045 /* not threadsafe, event scheduled once. */
2046 int
event_once(evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg,const struct timeval * tv)2047 event_once(evutil_socket_t fd, short events,
2048 void (*callback)(evutil_socket_t, short, void *),
2049 void *arg, const struct timeval *tv)
2050 {
2051 return event_base_once(current_base, fd, events, callback, arg, tv);
2052 }
2053
2054 /* Schedules an event once */
2055 int
event_base_once(struct event_base * base,evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg,const struct timeval * tv)2056 event_base_once(struct event_base *base, evutil_socket_t fd, short events,
2057 void (*callback)(evutil_socket_t, short, void *),
2058 void *arg, const struct timeval *tv)
2059 {
2060 struct event_once *eonce;
2061 int res = 0;
2062 int activate = 0;
2063
2064 if (!base)
2065 return (-1);
2066
2067 /* We cannot support signals that just fire once, or persistent
2068 * events. */
2069 if (events & (EV_SIGNAL|EV_PERSIST))
2070 return (-1);
2071
2072 if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
2073 return (-1);
2074
2075 eonce->cb = callback;
2076 eonce->arg = arg;
2077
2078 if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
2079 evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
2080
2081 if (tv == NULL || ! evutil_timerisset(tv)) {
2082 /* If the event is going to become active immediately,
2083 * don't put it on the timeout queue. This is one
2084 * idiom for scheduling a callback, so let's make
2085 * it fast (and order-preserving). */
2086 activate = 1;
2087 }
2088 } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
2089 events &= EV_READ|EV_WRITE|EV_CLOSED;
2090
2091 event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
2092 } else {
2093 /* Bad event combination */
2094 mm_free(eonce);
2095 return (-1);
2096 }
2097
2098 if (res == 0) {
2099 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2100 if (activate)
2101 event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
2102 else
2103 res = event_add_nolock_(&eonce->ev, tv, 0);
2104
2105 if (res != 0) {
2106 mm_free(eonce);
2107 return (res);
2108 } else {
2109 LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
2110 }
2111 EVBASE_RELEASE_LOCK(base, th_base_lock);
2112 }
2113
2114 return (0);
2115 }
2116
2117 int
event_assign(struct event * ev,struct event_base * base,evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg)2118 event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
2119 {
2120 if (!base)
2121 base = current_base;
2122 if (arg == &event_self_cbarg_ptr_)
2123 arg = ev;
2124
2125 if (!(events & EV_SIGNAL))
2126 event_debug_assert_socket_nonblocking_(fd);
2127 event_debug_assert_not_added_(ev);
2128
2129 ev->ev_base = base;
2130
2131 ev->ev_callback = callback;
2132 ev->ev_arg = arg;
2133 ev->ev_fd = fd;
2134 ev->ev_events = events;
2135 ev->ev_res = 0;
2136 ev->ev_flags = EVLIST_INIT;
2137 ev->ev_ncalls = 0;
2138 ev->ev_pncalls = NULL;
2139
2140 if (events & EV_SIGNAL) {
2141 if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
2142 event_warnx("%s: EV_SIGNAL is not compatible with "
2143 "EV_READ, EV_WRITE or EV_CLOSED", __func__);
2144 return -1;
2145 }
2146 ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
2147 } else {
2148 if (events & EV_PERSIST) {
2149 evutil_timerclear(&ev->ev_io_timeout);
2150 ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
2151 } else {
2152 ev->ev_closure = EV_CLOSURE_EVENT;
2153 }
2154 }
2155
2156 min_heap_elem_init_(ev);
2157
2158 if (base != NULL) {
2159 /* by default, we put new events into the middle priority */
2160 ev->ev_pri = base->nactivequeues / 2;
2161 }
2162
2163 event_debug_note_setup_(ev);
2164
2165 return 0;
2166 }
2167
2168 int
event_base_set(struct event_base * base,struct event * ev)2169 event_base_set(struct event_base *base, struct event *ev)
2170 {
2171 /* Only innocent events may be assigned to a different base */
2172 if (ev->ev_flags != EVLIST_INIT)
2173 return (-1);
2174
2175 event_debug_assert_is_setup_(ev);
2176
2177 ev->ev_base = base;
2178 ev->ev_pri = base->nactivequeues/2;
2179
2180 return (0);
2181 }
2182
2183 void
event_set(struct event * ev,evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg)2184 event_set(struct event *ev, evutil_socket_t fd, short events,
2185 void (*callback)(evutil_socket_t, short, void *), void *arg)
2186 {
2187 int r;
2188 r = event_assign(ev, current_base, fd, events, callback, arg);
2189 EVUTIL_ASSERT(r == 0);
2190 }
2191
2192 void *
event_self_cbarg(void)2193 event_self_cbarg(void)
2194 {
2195 return &event_self_cbarg_ptr_;
2196 }
2197
2198 struct event *
event_base_get_running_event(struct event_base * base)2199 event_base_get_running_event(struct event_base *base)
2200 {
2201 struct event *ev = NULL;
2202 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2203 if (EVBASE_IN_THREAD(base)) {
2204 struct event_callback *evcb = base->current_event;
2205 if (evcb->evcb_flags & EVLIST_INIT)
2206 ev = event_callback_to_event(evcb);
2207 }
2208 EVBASE_RELEASE_LOCK(base, th_base_lock);
2209 return ev;
2210 }
2211
2212 struct event *
event_new(struct event_base * base,evutil_socket_t fd,short events,void (* cb)(evutil_socket_t,short,void *),void * arg)2213 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
2214 {
2215 struct event *ev;
2216 ev = mm_malloc(sizeof(struct event));
2217 if (ev == NULL)
2218 return (NULL);
2219 if (event_assign(ev, base, fd, events, cb, arg) < 0) {
2220 mm_free(ev);
2221 return (NULL);
2222 }
2223
2224 return (ev);
2225 }
2226
2227 void
event_free(struct event * ev)2228 event_free(struct event *ev)
2229 {
2230 /* This is disabled, so that events which have been finalized be a
2231 * valid target for event_free(). That's */
2232 // event_debug_assert_is_setup_(ev);
2233
2234 /* make sure that this event won't be coming back to haunt us. */
2235 event_del(ev);
2236 event_debug_note_teardown_(ev);
2237 mm_free(ev);
2238
2239 }
2240
2241 void
event_debug_unassign(struct event * ev)2242 event_debug_unassign(struct event *ev)
2243 {
2244 event_debug_assert_not_added_(ev);
2245 event_debug_note_teardown_(ev);
2246
2247 ev->ev_flags &= ~EVLIST_INIT;
2248 }
2249
2250 #define EVENT_FINALIZE_FREE_ 0x10000
2251 static int
event_finalize_nolock_(struct event_base * base,unsigned flags,struct event * ev,event_finalize_callback_fn cb)2252 event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2253 {
2254 ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
2255 EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
2256
2257 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2258 ev->ev_closure = closure;
2259 ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
2260 event_active_nolock_(ev, EV_FINALIZE, 1);
2261 ev->ev_flags |= EVLIST_FINALIZING;
2262 return 0;
2263 }
2264
2265 static int
event_finalize_impl_(unsigned flags,struct event * ev,event_finalize_callback_fn cb)2266 event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2267 {
2268 int r;
2269 struct event_base *base = ev->ev_base;
2270 if (EVUTIL_FAILURE_CHECK(!base)) {
2271 event_warnx("%s: event has no event_base set.", __func__);
2272 return -1;
2273 }
2274
2275 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2276 r = event_finalize_nolock_(base, flags, ev, cb);
2277 EVBASE_RELEASE_LOCK(base, th_base_lock);
2278 return r;
2279 }
2280
2281 int
event_finalize(unsigned flags,struct event * ev,event_finalize_callback_fn cb)2282 event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2283 {
2284 return event_finalize_impl_(flags, ev, cb);
2285 }
2286
2287 int
event_free_finalize(unsigned flags,struct event * ev,event_finalize_callback_fn cb)2288 event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2289 {
2290 return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
2291 }
2292
2293 void
event_callback_finalize_nolock_(struct event_base * base,unsigned flags,struct event_callback * evcb,void (* cb)(struct event_callback *,void *))2294 event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2295 {
2296 struct event *ev = NULL;
2297 if (evcb->evcb_flags & EVLIST_INIT) {
2298 ev = event_callback_to_event(evcb);
2299 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2300 } else {
2301 event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
2302 }
2303
2304 evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
2305 evcb->evcb_cb_union.evcb_cbfinalize = cb;
2306 event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
2307 evcb->evcb_flags |= EVLIST_FINALIZING;
2308 }
2309
2310 void
event_callback_finalize_(struct event_base * base,unsigned flags,struct event_callback * evcb,void (* cb)(struct event_callback *,void *))2311 event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2312 {
2313 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2314 event_callback_finalize_nolock_(base, flags, evcb, cb);
2315 EVBASE_RELEASE_LOCK(base, th_base_lock);
2316 }
2317
2318 /** Internal: Finalize all of the n_cbs callbacks in evcbs. The provided
2319 * callback will be invoked on *one of them*, after they have *all* been
2320 * finalized. */
2321 int
event_callback_finalize_many_(struct event_base * base,int n_cbs,struct event_callback ** evcbs,void (* cb)(struct event_callback *,void *))2322 event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
2323 {
2324 int n_pending = 0, i;
2325
2326 if (base == NULL)
2327 base = current_base;
2328
2329 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2330
2331 event_debug(("%s: %d events finalizing", __func__, n_cbs));
2332
2333 /* At most one can be currently executing; the rest we just
2334 * cancel... But we always make sure that the finalize callback
2335 * runs. */
2336 for (i = 0; i < n_cbs; ++i) {
2337 struct event_callback *evcb = evcbs[i];
2338 if (evcb == base->current_event) {
2339 event_callback_finalize_nolock_(base, 0, evcb, cb);
2340 ++n_pending;
2341 } else {
2342 event_callback_cancel_nolock_(base, evcb, 0);
2343 }
2344 }
2345
2346 if (n_pending == 0) {
2347 /* Just do the first one. */
2348 event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
2349 }
2350
2351 EVBASE_RELEASE_LOCK(base, th_base_lock);
2352 return 0;
2353 }
2354
2355 /*
2356 * Set's the priority of an event - if an event is already scheduled
2357 * changing the priority is going to fail.
2358 */
2359
2360 int
event_priority_set(struct event * ev,int pri)2361 event_priority_set(struct event *ev, int pri)
2362 {
2363 event_debug_assert_is_setup_(ev);
2364
2365 if (ev->ev_flags & EVLIST_ACTIVE)
2366 return (-1);
2367 if (pri < 0 || pri >= ev->ev_base->nactivequeues)
2368 return (-1);
2369
2370 ev->ev_pri = pri;
2371
2372 return (0);
2373 }
2374
2375 /*
2376 * Checks if a specific event is pending or scheduled.
2377 */
2378
2379 int
event_pending(const struct event * ev,short event,struct timeval * tv)2380 event_pending(const struct event *ev, short event, struct timeval *tv)
2381 {
2382 int flags = 0;
2383
2384 if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
2385 event_warnx("%s: event has no event_base set.", __func__);
2386 return 0;
2387 }
2388
2389 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2390 event_debug_assert_is_setup_(ev);
2391
2392 if (ev->ev_flags & EVLIST_INSERTED)
2393 flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
2394 if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2395 flags |= ev->ev_res;
2396 if (ev->ev_flags & EVLIST_TIMEOUT)
2397 flags |= EV_TIMEOUT;
2398
2399 event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
2400
2401 /* See if there is a timeout that we should report */
2402 if (tv != NULL && (flags & event & EV_TIMEOUT)) {
2403 struct timeval tmp = ev->ev_timeout;
2404 tmp.tv_usec &= MICROSECONDS_MASK;
2405 /* correctly remamp to real time */
2406 evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
2407 }
2408
2409 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2410
2411 return (flags & event);
2412 }
2413
2414 int
event_initialized(const struct event * ev)2415 event_initialized(const struct event *ev)
2416 {
2417 if (!(ev->ev_flags & EVLIST_INIT))
2418 return 0;
2419
2420 return 1;
2421 }
2422
2423 void
event_get_assignment(const struct event * event,struct event_base ** base_out,evutil_socket_t * fd_out,short * events_out,event_callback_fn * callback_out,void ** arg_out)2424 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
2425 {
2426 event_debug_assert_is_setup_(event);
2427
2428 if (base_out)
2429 *base_out = event->ev_base;
2430 if (fd_out)
2431 *fd_out = event->ev_fd;
2432 if (events_out)
2433 *events_out = event->ev_events;
2434 if (callback_out)
2435 *callback_out = event->ev_callback;
2436 if (arg_out)
2437 *arg_out = event->ev_arg;
2438 }
2439
2440 size_t
event_get_struct_event_size(void)2441 event_get_struct_event_size(void)
2442 {
2443 return sizeof(struct event);
2444 }
2445
2446 evutil_socket_t
event_get_fd(const struct event * ev)2447 event_get_fd(const struct event *ev)
2448 {
2449 event_debug_assert_is_setup_(ev);
2450 return ev->ev_fd;
2451 }
2452
2453 struct event_base *
event_get_base(const struct event * ev)2454 event_get_base(const struct event *ev)
2455 {
2456 event_debug_assert_is_setup_(ev);
2457 return ev->ev_base;
2458 }
2459
2460 short
event_get_events(const struct event * ev)2461 event_get_events(const struct event *ev)
2462 {
2463 event_debug_assert_is_setup_(ev);
2464 return ev->ev_events;
2465 }
2466
2467 event_callback_fn
event_get_callback(const struct event * ev)2468 event_get_callback(const struct event *ev)
2469 {
2470 event_debug_assert_is_setup_(ev);
2471 return ev->ev_callback;
2472 }
2473
2474 void *
event_get_callback_arg(const struct event * ev)2475 event_get_callback_arg(const struct event *ev)
2476 {
2477 event_debug_assert_is_setup_(ev);
2478 return ev->ev_arg;
2479 }
2480
2481 int
event_get_priority(const struct event * ev)2482 event_get_priority(const struct event *ev)
2483 {
2484 event_debug_assert_is_setup_(ev);
2485 return ev->ev_pri;
2486 }
2487
2488 int
event_add(struct event * ev,const struct timeval * tv)2489 event_add(struct event *ev, const struct timeval *tv)
2490 {
2491 int res;
2492
2493 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2494 event_warnx("%s: event has no event_base set.", __func__);
2495 return -1;
2496 }
2497
2498 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2499
2500 res = event_add_nolock_(ev, tv, 0);
2501
2502 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2503
2504 return (res);
2505 }
2506
2507 /* Helper callback: wake an event_base from another thread. This version
2508 * works by writing a byte to one end of a socketpair, so that the event_base
2509 * listening on the other end will wake up as the corresponding event
2510 * triggers */
2511 static int
evthread_notify_base_default(struct event_base * base)2512 evthread_notify_base_default(struct event_base *base)
2513 {
2514 char buf[1];
2515 int r;
2516 buf[0] = (char) 0;
2517 #ifdef _WIN32
2518 r = send(base->th_notify_fd[1], buf, 1, 0);
2519 #else
2520 r = write(base->th_notify_fd[1], buf, 1);
2521 #endif
2522 return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
2523 }
2524
2525 #ifdef EVENT__HAVE_EVENTFD
2526 /* Helper callback: wake an event_base from another thread. This version
2527 * assumes that you have a working eventfd() implementation. */
2528 static int
evthread_notify_base_eventfd(struct event_base * base)2529 evthread_notify_base_eventfd(struct event_base *base)
2530 {
2531 ev_uint64_t msg = 1;
2532 int r;
2533 do {
2534 r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
2535 } while (r < 0 && errno == EAGAIN);
2536
2537 return (r < 0) ? -1 : 0;
2538 }
2539 #endif
2540
2541
2542 /** Tell the thread currently running the event_loop for base (if any) that it
2543 * needs to stop waiting in its dispatch function (if it is) and process all
2544 * active callbacks. */
2545 static int
evthread_notify_base(struct event_base * base)2546 evthread_notify_base(struct event_base *base)
2547 {
2548 EVENT_BASE_ASSERT_LOCKED(base);
2549 if (!base->th_notify_fn)
2550 return -1;
2551 if (base->is_notify_pending)
2552 return 0;
2553 base->is_notify_pending = 1;
2554 return base->th_notify_fn(base);
2555 }
2556
2557 /* Implementation function to remove a timeout on a currently pending event.
2558 */
2559 int
event_remove_timer_nolock_(struct event * ev)2560 event_remove_timer_nolock_(struct event *ev)
2561 {
2562 struct event_base *base = ev->ev_base;
2563
2564 EVENT_BASE_ASSERT_LOCKED(base);
2565 event_debug_assert_is_setup_(ev);
2566
2567 event_debug(("event_remove_timer_nolock: event: %p", ev));
2568
2569 /* If it's not pending on a timeout, we don't need to do anything. */
2570 if (ev->ev_flags & EVLIST_TIMEOUT) {
2571 event_queue_remove_timeout(base, ev);
2572 evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
2573 }
2574
2575 return (0);
2576 }
2577
2578 int
event_remove_timer(struct event * ev)2579 event_remove_timer(struct event *ev)
2580 {
2581 int res;
2582
2583 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2584 event_warnx("%s: event has no event_base set.", __func__);
2585 return -1;
2586 }
2587
2588 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2589
2590 res = event_remove_timer_nolock_(ev);
2591
2592 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2593
2594 return (res);
2595 }
2596
2597 /* Implementation function to add an event. Works just like event_add,
2598 * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
2599 * we treat tv as an absolute time, not as an interval to add to the current
2600 * time */
2601 int
event_add_nolock_(struct event * ev,const struct timeval * tv,int tv_is_absolute)2602 event_add_nolock_(struct event *ev, const struct timeval *tv,
2603 int tv_is_absolute)
2604 {
2605 struct event_base *base = ev->ev_base;
2606 int res = 0;
2607 int notify = 0;
2608
2609 EVENT_BASE_ASSERT_LOCKED(base);
2610 event_debug_assert_is_setup_(ev);
2611
2612 event_debug((
2613 "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
2614 ev,
2615 EV_SOCK_ARG(ev->ev_fd),
2616 ev->ev_events & EV_READ ? "EV_READ " : " ",
2617 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2618 ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
2619 tv ? "EV_TIMEOUT " : " ",
2620 ev->ev_callback));
2621
2622 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2623
2624 if (ev->ev_flags & EVLIST_FINALIZING) {
2625 /* XXXX debug */
2626 return (-1);
2627 }
2628
2629 /*
2630 * prepare for timeout insertion further below, if we get a
2631 * failure on any step, we should not change any state.
2632 */
2633 if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
2634 if (min_heap_reserve_(&base->timeheap,
2635 1 + min_heap_size_(&base->timeheap)) == -1)
2636 return (-1); /* ENOMEM == errno */
2637 }
2638
2639 /* If the main thread is currently executing a signal event's
2640 * callback, and we are not the main thread, then we want to wait
2641 * until the callback is done before we mess with the event, or else
2642 * we can race on ev_ncalls and ev_pncalls below. */
2643 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2644 if (base->current_event == event_to_event_callback(ev) &&
2645 (ev->ev_events & EV_SIGNAL)
2646 && !EVBASE_IN_THREAD(base)) {
2647 ++base->current_event_waiters;
2648 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2649 }
2650 #endif
2651
2652 if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
2653 !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2654 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2655 res = evmap_io_add_(base, ev->ev_fd, ev);
2656 else if (ev->ev_events & EV_SIGNAL)
2657 res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
2658 if (res != -1)
2659 event_queue_insert_inserted(base, ev);
2660 if (res == 1) {
2661 /* evmap says we need to notify the main thread. */
2662 notify = 1;
2663 res = 0;
2664 }
2665 }
2666
2667 /*
2668 * we should change the timeout state only if the previous event
2669 * addition succeeded.
2670 */
2671 if (res != -1 && tv != NULL) {
2672 struct timeval now;
2673 int common_timeout;
2674 #ifdef USE_REINSERT_TIMEOUT
2675 int was_common;
2676 int old_timeout_idx;
2677 #endif
2678
2679 /*
2680 * for persistent timeout events, we remember the
2681 * timeout value and re-add the event.
2682 *
2683 * If tv_is_absolute, this was already set.
2684 */
2685 if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
2686 ev->ev_io_timeout = *tv;
2687
2688 #ifndef USE_REINSERT_TIMEOUT
2689 if (ev->ev_flags & EVLIST_TIMEOUT) {
2690 event_queue_remove_timeout(base, ev);
2691 }
2692 #endif
2693
2694 /* Check if it is active due to a timeout. Rescheduling
2695 * this timeout before the callback can be executed
2696 * removes it from the active list. */
2697 if ((ev->ev_flags & EVLIST_ACTIVE) &&
2698 (ev->ev_res & EV_TIMEOUT)) {
2699 if (ev->ev_events & EV_SIGNAL) {
2700 /* See if we are just active executing
2701 * this event in a loop
2702 */
2703 if (ev->ev_ncalls && ev->ev_pncalls) {
2704 /* Abort loop */
2705 *ev->ev_pncalls = 0;
2706 }
2707 }
2708
2709 event_queue_remove_active(base, event_to_event_callback(ev));
2710 }
2711
2712 gettime(base, &now);
2713
2714 common_timeout = is_common_timeout(tv, base);
2715 #ifdef USE_REINSERT_TIMEOUT
2716 was_common = is_common_timeout(&ev->ev_timeout, base);
2717 old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
2718 #endif
2719
2720 if (tv_is_absolute) {
2721 ev->ev_timeout = *tv;
2722 } else if (common_timeout) {
2723 struct timeval tmp = *tv;
2724 tmp.tv_usec &= MICROSECONDS_MASK;
2725 evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2726 ev->ev_timeout.tv_usec |=
2727 (tv->tv_usec & ~MICROSECONDS_MASK);
2728 } else {
2729 evutil_timeradd(&now, tv, &ev->ev_timeout);
2730 }
2731
2732 event_debug((
2733 "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2734 ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
2735
2736 #ifdef USE_REINSERT_TIMEOUT
2737 event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
2738 #else
2739 event_queue_insert_timeout(base, ev);
2740 #endif
2741
2742 if (common_timeout) {
2743 struct common_timeout_list *ctl =
2744 get_common_timeout_list(base, &ev->ev_timeout);
2745 if (ev == TAILQ_FIRST(&ctl->events)) {
2746 common_timeout_schedule(ctl, &now, ev);
2747 }
2748 } else {
2749 struct event* top = NULL;
2750 /* See if the earliest timeout is now earlier than it
2751 * was before: if so, we will need to tell the main
2752 * thread to wake up earlier than it would otherwise.
2753 * We double check the timeout of the top element to
2754 * handle time distortions due to system suspension.
2755 */
2756 if (min_heap_elt_is_top_(ev))
2757 notify = 1;
2758 else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
2759 evutil_timercmp(&top->ev_timeout, &now, <))
2760 notify = 1;
2761 }
2762 }
2763
2764 /* if we are not in the right thread, we need to wake up the loop */
2765 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2766 evthread_notify_base(base);
2767
2768 event_debug_note_add_(ev);
2769
2770 return (res);
2771 }
2772
2773 static int
event_del_(struct event * ev,int blocking)2774 event_del_(struct event *ev, int blocking)
2775 {
2776 int res;
2777 struct event_base *base = ev->ev_base;
2778
2779 if (EVUTIL_FAILURE_CHECK(!base)) {
2780 event_warnx("%s: event has no event_base set.", __func__);
2781 return -1;
2782 }
2783
2784 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2785 res = event_del_nolock_(ev, blocking);
2786 EVBASE_RELEASE_LOCK(base, th_base_lock);
2787
2788 return (res);
2789 }
2790
2791 int
event_del(struct event * ev)2792 event_del(struct event *ev)
2793 {
2794 return event_del_(ev, EVENT_DEL_AUTOBLOCK);
2795 }
2796
2797 int
event_del_block(struct event * ev)2798 event_del_block(struct event *ev)
2799 {
2800 return event_del_(ev, EVENT_DEL_BLOCK);
2801 }
2802
2803 int
event_del_noblock(struct event * ev)2804 event_del_noblock(struct event *ev)
2805 {
2806 return event_del_(ev, EVENT_DEL_NOBLOCK);
2807 }
2808
2809 /** Helper for event_del: always called with th_base_lock held.
2810 *
2811 * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
2812 * EVEN_IF_FINALIZING} values. See those for more information.
2813 */
2814 int
event_del_nolock_(struct event * ev,int blocking)2815 event_del_nolock_(struct event *ev, int blocking)
2816 {
2817 struct event_base *base;
2818 int res = 0, notify = 0;
2819
2820 event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2821 ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
2822
2823 /* An event without a base has not been added */
2824 if (ev->ev_base == NULL)
2825 return (-1);
2826
2827 EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2828
2829 if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
2830 if (ev->ev_flags & EVLIST_FINALIZING) {
2831 /* XXXX Debug */
2832 return 0;
2833 }
2834 }
2835
2836 base = ev->ev_base;
2837
2838 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2839
2840 /* See if we are just active executing this event in a loop */
2841 if (ev->ev_events & EV_SIGNAL) {
2842 if (ev->ev_ncalls && ev->ev_pncalls) {
2843 /* Abort loop */
2844 *ev->ev_pncalls = 0;
2845 }
2846 }
2847
2848 if (ev->ev_flags & EVLIST_TIMEOUT) {
2849 /* NOTE: We never need to notify the main thread because of a
2850 * deleted timeout event: all that could happen if we don't is
2851 * that the dispatch loop might wake up too early. But the
2852 * point of notifying the main thread _is_ to wake up the
2853 * dispatch loop early anyway, so we wouldn't gain anything by
2854 * doing it.
2855 */
2856 event_queue_remove_timeout(base, ev);
2857 }
2858
2859 if (ev->ev_flags & EVLIST_ACTIVE)
2860 event_queue_remove_active(base, event_to_event_callback(ev));
2861 else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
2862 event_queue_remove_active_later(base, event_to_event_callback(ev));
2863
2864 if (ev->ev_flags & EVLIST_INSERTED) {
2865 event_queue_remove_inserted(base, ev);
2866 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2867 res = evmap_io_del_(base, ev->ev_fd, ev);
2868 else
2869 res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
2870 if (res == 1) {
2871 /* evmap says we need to notify the main thread. */
2872 notify = 1;
2873 res = 0;
2874 }
2875 /* If we do not have events, let's notify event base so it can
2876 * exit without waiting */
2877 if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base))
2878 notify = 1;
2879 }
2880
2881 /* if we are not in the right thread, we need to wake up the loop */
2882 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2883 evthread_notify_base(base);
2884
2885 event_debug_note_del_(ev);
2886
2887 /* If the main thread is currently executing this event's callback,
2888 * and we are not the main thread, then we want to wait until the
2889 * callback is done before returning. That way, when this function
2890 * returns, it will be safe to free the user-supplied argument.
2891 */
2892 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2893 if (blocking != EVENT_DEL_NOBLOCK &&
2894 base->current_event == event_to_event_callback(ev) &&
2895 !EVBASE_IN_THREAD(base) &&
2896 (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
2897 ++base->current_event_waiters;
2898 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2899 }
2900 #endif
2901
2902 return (res);
2903 }
2904
2905 void
event_active(struct event * ev,int res,short ncalls)2906 event_active(struct event *ev, int res, short ncalls)
2907 {
2908 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2909 event_warnx("%s: event has no event_base set.", __func__);
2910 return;
2911 }
2912
2913 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2914
2915 event_debug_assert_is_setup_(ev);
2916
2917 event_active_nolock_(ev, res, ncalls);
2918
2919 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2920 }
2921
2922
2923 void
event_active_nolock_(struct event * ev,int res,short ncalls)2924 event_active_nolock_(struct event *ev, int res, short ncalls)
2925 {
2926 struct event_base *base;
2927
2928 event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
2929 ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
2930
2931 base = ev->ev_base;
2932 EVENT_BASE_ASSERT_LOCKED(base);
2933
2934 if (ev->ev_flags & EVLIST_FINALIZING) {
2935 /* XXXX debug */
2936 return;
2937 }
2938
2939 switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2940 default:
2941 case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2942 EVUTIL_ASSERT(0);
2943 break;
2944 case EVLIST_ACTIVE:
2945 /* We get different kinds of events, add them together */
2946 ev->ev_res |= res;
2947 return;
2948 case EVLIST_ACTIVE_LATER:
2949 ev->ev_res |= res;
2950 break;
2951 case 0:
2952 ev->ev_res = res;
2953 break;
2954 }
2955
2956 if (ev->ev_pri < base->event_running_priority)
2957 base->event_continue = 1;
2958
2959 if (ev->ev_events & EV_SIGNAL) {
2960 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2961 if (base->current_event == event_to_event_callback(ev) &&
2962 !EVBASE_IN_THREAD(base)) {
2963 ++base->current_event_waiters;
2964 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2965 }
2966 #endif
2967 ev->ev_ncalls = ncalls;
2968 ev->ev_pncalls = NULL;
2969 }
2970
2971 event_callback_activate_nolock_(base, event_to_event_callback(ev));
2972 }
2973
2974 void
event_active_later_(struct event * ev,int res)2975 event_active_later_(struct event *ev, int res)
2976 {
2977 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2978 event_active_later_nolock_(ev, res);
2979 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2980 }
2981
2982 void
event_active_later_nolock_(struct event * ev,int res)2983 event_active_later_nolock_(struct event *ev, int res)
2984 {
2985 struct event_base *base = ev->ev_base;
2986 EVENT_BASE_ASSERT_LOCKED(base);
2987
2988 if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2989 /* We get different kinds of events, add them together */
2990 ev->ev_res |= res;
2991 return;
2992 }
2993
2994 ev->ev_res = res;
2995
2996 event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
2997 }
2998
2999 int
event_callback_activate_(struct event_base * base,struct event_callback * evcb)3000 event_callback_activate_(struct event_base *base,
3001 struct event_callback *evcb)
3002 {
3003 int r;
3004 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3005 r = event_callback_activate_nolock_(base, evcb);
3006 EVBASE_RELEASE_LOCK(base, th_base_lock);
3007 return r;
3008 }
3009
3010 int
event_callback_activate_nolock_(struct event_base * base,struct event_callback * evcb)3011 event_callback_activate_nolock_(struct event_base *base,
3012 struct event_callback *evcb)
3013 {
3014 int r = 1;
3015
3016 if (evcb->evcb_flags & EVLIST_FINALIZING)
3017 return 0;
3018
3019 switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
3020 default:
3021 EVUTIL_ASSERT(0);
3022 EVUTIL_FALLTHROUGH;
3023 case EVLIST_ACTIVE_LATER:
3024 event_queue_remove_active_later(base, evcb);
3025 r = 0;
3026 break;
3027 case EVLIST_ACTIVE:
3028 return 0;
3029 case 0:
3030 break;
3031 }
3032
3033 event_queue_insert_active(base, evcb);
3034
3035 if (EVBASE_NEED_NOTIFY(base))
3036 evthread_notify_base(base);
3037
3038 return r;
3039 }
3040
3041 int
event_callback_activate_later_nolock_(struct event_base * base,struct event_callback * evcb)3042 event_callback_activate_later_nolock_(struct event_base *base,
3043 struct event_callback *evcb)
3044 {
3045 if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
3046 return 0;
3047
3048 event_queue_insert_active_later(base, evcb);
3049 if (EVBASE_NEED_NOTIFY(base))
3050 evthread_notify_base(base);
3051 return 1;
3052 }
3053
3054 void
event_callback_init_(struct event_base * base,struct event_callback * cb)3055 event_callback_init_(struct event_base *base,
3056 struct event_callback *cb)
3057 {
3058 memset(cb, 0, sizeof(*cb));
3059 cb->evcb_pri = base->nactivequeues - 1;
3060 }
3061
3062 int
event_callback_cancel_(struct event_base * base,struct event_callback * evcb)3063 event_callback_cancel_(struct event_base *base,
3064 struct event_callback *evcb)
3065 {
3066 int r;
3067 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3068 r = event_callback_cancel_nolock_(base, evcb, 0);
3069 EVBASE_RELEASE_LOCK(base, th_base_lock);
3070 return r;
3071 }
3072
3073 int
event_callback_cancel_nolock_(struct event_base * base,struct event_callback * evcb,int even_if_finalizing)3074 event_callback_cancel_nolock_(struct event_base *base,
3075 struct event_callback *evcb, int even_if_finalizing)
3076 {
3077 if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
3078 return 0;
3079
3080 if (evcb->evcb_flags & EVLIST_INIT)
3081 return event_del_nolock_(event_callback_to_event(evcb),
3082 even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
3083
3084 switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
3085 default:
3086 case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
3087 EVUTIL_ASSERT(0);
3088 break;
3089 case EVLIST_ACTIVE:
3090 /* We get different kinds of events, add them together */
3091 event_queue_remove_active(base, evcb);
3092 return 0;
3093 case EVLIST_ACTIVE_LATER:
3094 event_queue_remove_active_later(base, evcb);
3095 break;
3096 case 0:
3097 break;
3098 }
3099
3100 return 0;
3101 }
3102
3103 void
event_deferred_cb_init_(struct event_callback * cb,ev_uint8_t priority,deferred_cb_fn fn,void * arg)3104 event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
3105 {
3106 memset(cb, 0, sizeof(*cb));
3107 cb->evcb_cb_union.evcb_selfcb = fn;
3108 cb->evcb_arg = arg;
3109 cb->evcb_pri = priority;
3110 cb->evcb_closure = EV_CLOSURE_CB_SELF;
3111 }
3112
3113 void
event_deferred_cb_set_priority_(struct event_callback * cb,ev_uint8_t priority)3114 event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
3115 {
3116 cb->evcb_pri = priority;
3117 }
3118
3119 void
event_deferred_cb_cancel_(struct event_base * base,struct event_callback * cb)3120 event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
3121 {
3122 if (!base)
3123 base = current_base;
3124 event_callback_cancel_(base, cb);
3125 }
3126
3127 #define MAX_DEFERREDS_QUEUED 32
3128 int
event_deferred_cb_schedule_(struct event_base * base,struct event_callback * cb)3129 event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
3130 {
3131 int r = 1;
3132 if (!base)
3133 base = current_base;
3134 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3135 if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
3136 r = event_callback_activate_later_nolock_(base, cb);
3137 } else {
3138 r = event_callback_activate_nolock_(base, cb);
3139 if (r) {
3140 ++base->n_deferreds_queued;
3141 }
3142 }
3143 EVBASE_RELEASE_LOCK(base, th_base_lock);
3144 return r;
3145 }
3146
3147 static int
timeout_next(struct event_base * base,struct timeval ** tv_p)3148 timeout_next(struct event_base *base, struct timeval **tv_p)
3149 {
3150 /* Caller must hold th_base_lock */
3151 struct timeval now;
3152 struct event *ev;
3153 struct timeval *tv = *tv_p;
3154 int res = 0;
3155
3156 ev = min_heap_top_(&base->timeheap);
3157
3158 if (ev == NULL) {
3159 /* if no time-based events are active wait for I/O */
3160 *tv_p = NULL;
3161 goto out;
3162 }
3163
3164 if (gettime(base, &now) == -1) {
3165 res = -1;
3166 goto out;
3167 }
3168
3169 if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
3170 evutil_timerclear(tv);
3171 goto out;
3172 }
3173
3174 evutil_timersub(&ev->ev_timeout, &now, tv);
3175
3176 EVUTIL_ASSERT(tv->tv_sec >= 0);
3177 EVUTIL_ASSERT(tv->tv_usec >= 0);
3178 event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
3179
3180 out:
3181 return (res);
3182 }
3183
3184 /* Activate every event whose timeout has elapsed. */
3185 static void
timeout_process(struct event_base * base)3186 timeout_process(struct event_base *base)
3187 {
3188 /* Caller must hold lock. */
3189 struct timeval now;
3190 struct event *ev;
3191
3192 if (min_heap_empty_(&base->timeheap)) {
3193 return;
3194 }
3195
3196 gettime(base, &now);
3197
3198 while ((ev = min_heap_top_(&base->timeheap))) {
3199 if (evutil_timercmp(&ev->ev_timeout, &now, >))
3200 break;
3201
3202 /* delete this event from the I/O queues */
3203 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
3204
3205 event_debug(("timeout_process: event: %p, call %p",
3206 ev, ev->ev_callback));
3207 event_active_nolock_(ev, EV_TIMEOUT, 1);
3208 }
3209 }
3210
3211 #ifndef MAX
3212 #define MAX(a,b) (((a)>(b))?(a):(b))
3213 #endif
3214
3215 #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
3216
3217 /* These are a fancy way to spell
3218 if (~flags & EVLIST_INTERNAL)
3219 base->event_count--/++;
3220 */
3221 #define DECR_EVENT_COUNT(base,flags) \
3222 ((base)->event_count -= !((flags) & EVLIST_INTERNAL))
3223 #define INCR_EVENT_COUNT(base,flags) do { \
3224 ((base)->event_count += !((flags) & EVLIST_INTERNAL)); \
3225 MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count); \
3226 } while (0)
3227
3228 static void
event_queue_remove_inserted(struct event_base * base,struct event * ev)3229 event_queue_remove_inserted(struct event_base *base, struct event *ev)
3230 {
3231 EVENT_BASE_ASSERT_LOCKED(base);
3232 if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
3233 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3234 ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
3235 return;
3236 }
3237 DECR_EVENT_COUNT(base, ev->ev_flags);
3238 ev->ev_flags &= ~EVLIST_INSERTED;
3239 }
3240 static void
event_queue_remove_active(struct event_base * base,struct event_callback * evcb)3241 event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
3242 {
3243 EVENT_BASE_ASSERT_LOCKED(base);
3244 if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
3245 event_errx(1, "%s: %p not on queue %x", __func__,
3246 evcb, EVLIST_ACTIVE);
3247 return;
3248 }
3249 DECR_EVENT_COUNT(base, evcb->evcb_flags);
3250 evcb->evcb_flags &= ~EVLIST_ACTIVE;
3251 base->event_count_active--;
3252
3253 TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
3254 evcb, evcb_active_next);
3255 }
3256 static void
event_queue_remove_active_later(struct event_base * base,struct event_callback * evcb)3257 event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
3258 {
3259 EVENT_BASE_ASSERT_LOCKED(base);
3260 if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
3261 event_errx(1, "%s: %p not on queue %x", __func__,
3262 evcb, EVLIST_ACTIVE_LATER);
3263 return;
3264 }
3265 DECR_EVENT_COUNT(base, evcb->evcb_flags);
3266 evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
3267 base->event_count_active--;
3268
3269 TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3270 }
3271 static void
event_queue_remove_timeout(struct event_base * base,struct event * ev)3272 event_queue_remove_timeout(struct event_base *base, struct event *ev)
3273 {
3274 EVENT_BASE_ASSERT_LOCKED(base);
3275 if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
3276 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3277 ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
3278 return;
3279 }
3280 DECR_EVENT_COUNT(base, ev->ev_flags);
3281 ev->ev_flags &= ~EVLIST_TIMEOUT;
3282
3283 if (is_common_timeout(&ev->ev_timeout, base)) {
3284 struct common_timeout_list *ctl =
3285 get_common_timeout_list(base, &ev->ev_timeout);
3286 TAILQ_REMOVE(&ctl->events, ev,
3287 ev_timeout_pos.ev_next_with_common_timeout);
3288 } else {
3289 min_heap_erase_(&base->timeheap, ev);
3290 }
3291 }
3292
3293 #ifdef USE_REINSERT_TIMEOUT
3294 /* Remove and reinsert 'ev' into the timeout queue. */
3295 static void
event_queue_reinsert_timeout(struct event_base * base,struct event * ev,int was_common,int is_common,int old_timeout_idx)3296 event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
3297 int was_common, int is_common, int old_timeout_idx)
3298 {
3299 struct common_timeout_list *ctl;
3300 if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
3301 event_queue_insert_timeout(base, ev);
3302 return;
3303 }
3304
3305 switch ((was_common<<1) | is_common) {
3306 case 3: /* Changing from one common timeout to another */
3307 ctl = base->common_timeout_queues[old_timeout_idx];
3308 TAILQ_REMOVE(&ctl->events, ev,
3309 ev_timeout_pos.ev_next_with_common_timeout);
3310 ctl = get_common_timeout_list(base, &ev->ev_timeout);
3311 insert_common_timeout_inorder(ctl, ev);
3312 break;
3313 case 2: /* Was common; is no longer common */
3314 ctl = base->common_timeout_queues[old_timeout_idx];
3315 TAILQ_REMOVE(&ctl->events, ev,
3316 ev_timeout_pos.ev_next_with_common_timeout);
3317 min_heap_push_(&base->timeheap, ev);
3318 break;
3319 case 1: /* Wasn't common; has become common. */
3320 min_heap_erase_(&base->timeheap, ev);
3321 ctl = get_common_timeout_list(base, &ev->ev_timeout);
3322 insert_common_timeout_inorder(ctl, ev);
3323 break;
3324 case 0: /* was in heap; is still on heap. */
3325 min_heap_adjust_(&base->timeheap, ev);
3326 break;
3327 default:
3328 EVUTIL_ASSERT(0); /* unreachable */
3329 break;
3330 }
3331 }
3332 #endif
3333
3334 /* Add 'ev' to the common timeout list in 'ev'. */
3335 static void
insert_common_timeout_inorder(struct common_timeout_list * ctl,struct event * ev)3336 insert_common_timeout_inorder(struct common_timeout_list *ctl,
3337 struct event *ev)
3338 {
3339 struct event *e;
3340 /* By all logic, we should just be able to append 'ev' to the end of
3341 * ctl->events, since the timeout on each 'ev' is set to {the common
3342 * timeout} + {the time when we add the event}, and so the events
3343 * should arrive in order of their timeeouts. But just in case
3344 * there's some wacky threading issue going on, we do a search from
3345 * the end of 'ev' to find the right insertion point.
3346 */
3347 TAILQ_FOREACH_REVERSE(e, &ctl->events,
3348 event_list, ev_timeout_pos.ev_next_with_common_timeout) {
3349 /* This timercmp is a little sneaky, since both ev and e have
3350 * magic values in tv_usec. Fortunately, they ought to have
3351 * the _same_ magic values in tv_usec. Let's assert for that.
3352 */
3353 EVUTIL_ASSERT(
3354 is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
3355 if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
3356 TAILQ_INSERT_AFTER(&ctl->events, e, ev,
3357 ev_timeout_pos.ev_next_with_common_timeout);
3358 return;
3359 }
3360 }
3361 TAILQ_INSERT_HEAD(&ctl->events, ev,
3362 ev_timeout_pos.ev_next_with_common_timeout);
3363 }
3364
3365 static void
event_queue_insert_inserted(struct event_base * base,struct event * ev)3366 event_queue_insert_inserted(struct event_base *base, struct event *ev)
3367 {
3368 EVENT_BASE_ASSERT_LOCKED(base);
3369
3370 if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
3371 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
3372 ev, EV_SOCK_ARG(ev->ev_fd));
3373 return;
3374 }
3375
3376 INCR_EVENT_COUNT(base, ev->ev_flags);
3377
3378 ev->ev_flags |= EVLIST_INSERTED;
3379 }
3380
3381 static void
event_queue_insert_active(struct event_base * base,struct event_callback * evcb)3382 event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
3383 {
3384 EVENT_BASE_ASSERT_LOCKED(base);
3385
3386 if (evcb->evcb_flags & EVLIST_ACTIVE) {
3387 /* Double insertion is possible for active events */
3388 return;
3389 }
3390
3391 INCR_EVENT_COUNT(base, evcb->evcb_flags);
3392
3393 evcb->evcb_flags |= EVLIST_ACTIVE;
3394
3395 base->event_count_active++;
3396 MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3397 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3398 TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
3399 evcb, evcb_active_next);
3400 }
3401
3402 static void
event_queue_insert_active_later(struct event_base * base,struct event_callback * evcb)3403 event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
3404 {
3405 EVENT_BASE_ASSERT_LOCKED(base);
3406 if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
3407 /* Double insertion is possible */
3408 return;
3409 }
3410
3411 INCR_EVENT_COUNT(base, evcb->evcb_flags);
3412 evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
3413 base->event_count_active++;
3414 MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3415 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3416 TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
3417 }
3418
3419 static void
event_queue_insert_timeout(struct event_base * base,struct event * ev)3420 event_queue_insert_timeout(struct event_base *base, struct event *ev)
3421 {
3422 EVENT_BASE_ASSERT_LOCKED(base);
3423
3424 if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
3425 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
3426 ev, EV_SOCK_ARG(ev->ev_fd));
3427 return;
3428 }
3429
3430 INCR_EVENT_COUNT(base, ev->ev_flags);
3431
3432 ev->ev_flags |= EVLIST_TIMEOUT;
3433
3434 if (is_common_timeout(&ev->ev_timeout, base)) {
3435 struct common_timeout_list *ctl =
3436 get_common_timeout_list(base, &ev->ev_timeout);
3437 insert_common_timeout_inorder(ctl, ev);
3438 } else {
3439 min_heap_push_(&base->timeheap, ev);
3440 }
3441 }
3442
3443 static void
event_queue_make_later_events_active(struct event_base * base)3444 event_queue_make_later_events_active(struct event_base *base)
3445 {
3446 struct event_callback *evcb;
3447 EVENT_BASE_ASSERT_LOCKED(base);
3448
3449 while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
3450 TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3451 evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
3452 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3453 TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
3454 base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
3455 }
3456 }
3457
3458 /* Functions for debugging */
3459
3460 const char *
event_get_version(void)3461 event_get_version(void)
3462 {
3463 return (EVENT__VERSION);
3464 }
3465
3466 ev_uint32_t
event_get_version_number(void)3467 event_get_version_number(void)
3468 {
3469 return (EVENT__NUMERIC_VERSION);
3470 }
3471
3472 /*
3473 * No thread-safe interface needed - the information should be the same
3474 * for all threads.
3475 */
3476
3477 const char *
event_get_method(void)3478 event_get_method(void)
3479 {
3480 return (current_base->evsel->name);
3481 }
3482
3483 #ifndef EVENT__DISABLE_MM_REPLACEMENT
3484 static void *(*mm_malloc_fn_)(size_t sz) = NULL;
3485 static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
3486 static void (*mm_free_fn_)(void *p) = NULL;
3487
3488 void *
event_mm_malloc_(size_t sz)3489 event_mm_malloc_(size_t sz)
3490 {
3491 if (sz == 0)
3492 return NULL;
3493
3494 if (mm_malloc_fn_)
3495 return mm_malloc_fn_(sz);
3496 else
3497 return malloc(sz);
3498 }
3499
3500 void *
event_mm_calloc_(size_t count,size_t size)3501 event_mm_calloc_(size_t count, size_t size)
3502 {
3503 if (count == 0 || size == 0)
3504 return NULL;
3505
3506 if (mm_malloc_fn_) {
3507 size_t sz = count * size;
3508 void *p = NULL;
3509 if (count > EV_SIZE_MAX / size)
3510 goto error;
3511 p = mm_malloc_fn_(sz);
3512 if (p)
3513 return memset(p, 0, sz);
3514 } else {
3515 void *p = calloc(count, size);
3516 #ifdef _WIN32
3517 /* Windows calloc doesn't reliably set ENOMEM */
3518 if (p == NULL)
3519 goto error;
3520 #endif
3521 return p;
3522 }
3523
3524 error:
3525 errno = ENOMEM;
3526 return NULL;
3527 }
3528
3529 char *
event_mm_strdup_(const char * str)3530 event_mm_strdup_(const char *str)
3531 {
3532 if (!str) {
3533 errno = EINVAL;
3534 return NULL;
3535 }
3536
3537 if (mm_malloc_fn_) {
3538 size_t ln = strlen(str);
3539 void *p = NULL;
3540 if (ln == EV_SIZE_MAX)
3541 goto error;
3542 p = mm_malloc_fn_(ln+1);
3543 if (p)
3544 return memcpy(p, str, ln+1);
3545 } else
3546 #ifdef _WIN32
3547 return _strdup(str);
3548 #else
3549 return strdup(str);
3550 #endif
3551
3552 error:
3553 errno = ENOMEM;
3554 return NULL;
3555 }
3556
3557 void *
event_mm_realloc_(void * ptr,size_t sz)3558 event_mm_realloc_(void *ptr, size_t sz)
3559 {
3560 if (mm_realloc_fn_)
3561 return mm_realloc_fn_(ptr, sz);
3562 else
3563 return realloc(ptr, sz);
3564 }
3565
3566 void
event_mm_free_(void * ptr)3567 event_mm_free_(void *ptr)
3568 {
3569 if (mm_free_fn_)
3570 mm_free_fn_(ptr);
3571 else
3572 free(ptr);
3573 }
3574
3575 void
event_set_mem_functions(void * (* malloc_fn)(size_t sz),void * (* realloc_fn)(void * ptr,size_t sz),void (* free_fn)(void * ptr))3576 event_set_mem_functions(void *(*malloc_fn)(size_t sz),
3577 void *(*realloc_fn)(void *ptr, size_t sz),
3578 void (*free_fn)(void *ptr))
3579 {
3580 mm_malloc_fn_ = malloc_fn;
3581 mm_realloc_fn_ = realloc_fn;
3582 mm_free_fn_ = free_fn;
3583 }
3584 #endif
3585
3586 #ifdef EVENT__HAVE_EVENTFD
3587 static void
evthread_notify_drain_eventfd(evutil_socket_t fd,short what,void * arg)3588 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
3589 {
3590 ev_uint64_t msg;
3591 ev_ssize_t r;
3592 struct event_base *base = arg;
3593
3594 r = read(fd, (void*) &msg, sizeof(msg));
3595 if (r<0 && errno != EAGAIN) {
3596 event_sock_warn(fd, "Error reading from eventfd");
3597 }
3598 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3599 base->is_notify_pending = 0;
3600 EVBASE_RELEASE_LOCK(base, th_base_lock);
3601 }
3602 #endif
3603
3604 static void
evthread_notify_drain_default(evutil_socket_t fd,short what,void * arg)3605 evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
3606 {
3607 unsigned char buf[1024];
3608 struct event_base *base = arg;
3609 #ifdef _WIN32
3610 while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
3611 ;
3612 #else
3613 while (read(fd, (char*)buf, sizeof(buf)) > 0)
3614 ;
3615 #endif
3616
3617 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3618 base->is_notify_pending = 0;
3619 EVBASE_RELEASE_LOCK(base, th_base_lock);
3620 }
3621
3622 int
evthread_make_base_notifiable(struct event_base * base)3623 evthread_make_base_notifiable(struct event_base *base)
3624 {
3625 int r;
3626 if (!base)
3627 return -1;
3628
3629 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3630 r = evthread_make_base_notifiable_nolock_(base);
3631 EVBASE_RELEASE_LOCK(base, th_base_lock);
3632 return r;
3633 }
3634
3635 static int
evthread_make_base_notifiable_nolock_(struct event_base * base)3636 evthread_make_base_notifiable_nolock_(struct event_base *base)
3637 {
3638 void (*cb)(evutil_socket_t, short, void *);
3639 int (*notify)(struct event_base *);
3640
3641 if (base->th_notify_fn != NULL) {
3642 /* The base is already notifiable: we're doing fine. */
3643 return 0;
3644 }
3645
3646 #if defined(EVENT__HAVE_WORKING_KQUEUE)
3647 if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
3648 base->th_notify_fn = event_kq_notify_base_;
3649 /* No need to add an event here; the backend can wake
3650 * itself up just fine. */
3651 return 0;
3652 }
3653 #endif
3654
3655 #ifdef EVENT__HAVE_EVENTFD
3656 base->th_notify_fd[0] = evutil_eventfd_(0,
3657 EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
3658 if (base->th_notify_fd[0] >= 0) {
3659 base->th_notify_fd[1] = -1;
3660 notify = evthread_notify_base_eventfd;
3661 cb = evthread_notify_drain_eventfd;
3662 } else
3663 #endif
3664 if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
3665 notify = evthread_notify_base_default;
3666 cb = evthread_notify_drain_default;
3667 } else {
3668 return -1;
3669 }
3670
3671 base->th_notify_fn = notify;
3672
3673 /* prepare an event that we can use for wakeup */
3674 event_assign(&base->th_notify, base, base->th_notify_fd[0],
3675 EV_READ|EV_PERSIST, cb, base);
3676
3677 /* we need to mark this as internal event */
3678 base->th_notify.ev_flags |= EVLIST_INTERNAL;
3679 event_priority_set(&base->th_notify, 0);
3680
3681 return event_add_nolock_(&base->th_notify, NULL, 0);
3682 }
3683
3684 int
event_base_foreach_event_nolock_(struct event_base * base,event_base_foreach_event_cb fn,void * arg)3685 event_base_foreach_event_nolock_(struct event_base *base,
3686 event_base_foreach_event_cb fn, void *arg)
3687 {
3688 int r, i;
3689 unsigned u;
3690 struct event *ev;
3691
3692 /* Start out with all the EVLIST_INSERTED events. */
3693 if ((r = evmap_foreach_event_(base, fn, arg)))
3694 return r;
3695
3696 /* Okay, now we deal with those events that have timeouts and are in
3697 * the min-heap. */
3698 for (u = 0; u < base->timeheap.n; ++u) {
3699 ev = base->timeheap.p[u];
3700 if (ev->ev_flags & EVLIST_INSERTED) {
3701 /* we already processed this one */
3702 continue;
3703 }
3704 if ((r = fn(base, ev, arg)))
3705 return r;
3706 }
3707
3708 /* Now for the events in one of the timeout queues.
3709 * the min-heap. */
3710 for (i = 0; i < base->n_common_timeouts; ++i) {
3711 struct common_timeout_list *ctl =
3712 base->common_timeout_queues[i];
3713 TAILQ_FOREACH(ev, &ctl->events,
3714 ev_timeout_pos.ev_next_with_common_timeout) {
3715 if (ev->ev_flags & EVLIST_INSERTED) {
3716 /* we already processed this one */
3717 continue;
3718 }
3719 if ((r = fn(base, ev, arg)))
3720 return r;
3721 }
3722 }
3723
3724 /* Finally, we deal wit all the active events that we haven't touched
3725 * yet. */
3726 for (i = 0; i < base->nactivequeues; ++i) {
3727 struct event_callback *evcb;
3728 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3729 if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
3730 /* This isn't an event (evlist_init clear), or
3731 * we already processed it. (inserted or
3732 * timeout set */
3733 continue;
3734 }
3735 ev = event_callback_to_event(evcb);
3736 if ((r = fn(base, ev, arg)))
3737 return r;
3738 }
3739 }
3740
3741 return 0;
3742 }
3743
3744 /* Helper for event_base_dump_events: called on each event in the event base;
3745 * dumps only the inserted events. */
3746 static int
dump_inserted_event_fn(const struct event_base * base,const struct event * e,void * arg)3747 dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
3748 {
3749 FILE *output = arg;
3750 const char *gloss = (e->ev_events & EV_SIGNAL) ?
3751 "sig" : "fd ";
3752
3753 if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
3754 return 0;
3755
3756 fprintf(output, " %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s%s",
3757 e, gloss, EV_SOCK_ARG(e->ev_fd),
3758 (e->ev_events&EV_READ)?" Read":"",
3759 (e->ev_events&EV_WRITE)?" Write":"",
3760 (e->ev_events&EV_CLOSED)?" EOF":"",
3761 (e->ev_events&EV_SIGNAL)?" Signal":"",
3762 (e->ev_events&EV_PERSIST)?" Persist":"",
3763 (e->ev_events&EV_ET)?" ET":"",
3764 (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
3765 if (e->ev_flags & EVLIST_TIMEOUT) {
3766 struct timeval tv;
3767 tv.tv_sec = e->ev_timeout.tv_sec;
3768 tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
3769 evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
3770 fprintf(output, " Timeout=%ld.%06d",
3771 (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
3772 }
3773 fputc('\n', output);
3774
3775 return 0;
3776 }
3777
3778 /* Helper for event_base_dump_events: called on each event in the event base;
3779 * dumps only the active events. */
3780 static int
dump_active_event_fn(const struct event_base * base,const struct event * e,void * arg)3781 dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
3782 {
3783 FILE *output = arg;
3784 const char *gloss = (e->ev_events & EV_SIGNAL) ?
3785 "sig" : "fd ";
3786
3787 if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
3788 return 0;
3789
3790 fprintf(output, " %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
3791 e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
3792 (e->ev_res&EV_READ)?" Read":"",
3793 (e->ev_res&EV_WRITE)?" Write":"",
3794 (e->ev_res&EV_CLOSED)?" EOF":"",
3795 (e->ev_res&EV_SIGNAL)?" Signal":"",
3796 (e->ev_res&EV_TIMEOUT)?" Timeout":"",
3797 (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
3798 (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
3799
3800 return 0;
3801 }
3802
3803 int
event_base_foreach_event(struct event_base * base,event_base_foreach_event_cb fn,void * arg)3804 event_base_foreach_event(struct event_base *base,
3805 event_base_foreach_event_cb fn, void *arg)
3806 {
3807 int r;
3808 if ((!fn) || (!base)) {
3809 return -1;
3810 }
3811 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3812 r = event_base_foreach_event_nolock_(base, fn, arg);
3813 EVBASE_RELEASE_LOCK(base, th_base_lock);
3814 return r;
3815 }
3816
3817
3818 void
event_base_dump_events(struct event_base * base,FILE * output)3819 event_base_dump_events(struct event_base *base, FILE *output)
3820 {
3821 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3822 fprintf(output, "Inserted events:\n");
3823 event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
3824
3825 fprintf(output, "Active events:\n");
3826 event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
3827 EVBASE_RELEASE_LOCK(base, th_base_lock);
3828 }
3829
3830 void
event_base_active_by_fd(struct event_base * base,evutil_socket_t fd,short events)3831 event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
3832 {
3833 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3834
3835 /* Activate any non timer events */
3836 if (!(events & EV_TIMEOUT)) {
3837 evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
3838 } else {
3839 /* If we want to activate timer events, loop and activate each event with
3840 * the same fd in both the timeheap and common timeouts list */
3841 int i;
3842 unsigned u;
3843 struct event *ev;
3844
3845 for (u = 0; u < base->timeheap.n; ++u) {
3846 ev = base->timeheap.p[u];
3847 if (ev->ev_fd == fd) {
3848 event_active_nolock_(ev, EV_TIMEOUT, 1);
3849 }
3850 }
3851
3852 for (i = 0; i < base->n_common_timeouts; ++i) {
3853 struct common_timeout_list *ctl = base->common_timeout_queues[i];
3854 TAILQ_FOREACH(ev, &ctl->events,
3855 ev_timeout_pos.ev_next_with_common_timeout) {
3856 if (ev->ev_fd == fd) {
3857 event_active_nolock_(ev, EV_TIMEOUT, 1);
3858 }
3859 }
3860 }
3861 }
3862
3863 EVBASE_RELEASE_LOCK(base, th_base_lock);
3864 }
3865
3866 void
event_base_active_by_signal(struct event_base * base,int sig)3867 event_base_active_by_signal(struct event_base *base, int sig)
3868 {
3869 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3870 evmap_signal_active_(base, sig, 1);
3871 EVBASE_RELEASE_LOCK(base, th_base_lock);
3872 }
3873
3874
3875 void
event_base_add_virtual_(struct event_base * base)3876 event_base_add_virtual_(struct event_base *base)
3877 {
3878 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3879 base->virtual_event_count++;
3880 MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
3881 EVBASE_RELEASE_LOCK(base, th_base_lock);
3882 }
3883
3884 void
event_base_del_virtual_(struct event_base * base)3885 event_base_del_virtual_(struct event_base *base)
3886 {
3887 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3888 EVUTIL_ASSERT(base->virtual_event_count > 0);
3889 base->virtual_event_count--;
3890 if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
3891 evthread_notify_base(base);
3892 EVBASE_RELEASE_LOCK(base, th_base_lock);
3893 }
3894
3895 static void
event_free_debug_globals_locks(void)3896 event_free_debug_globals_locks(void)
3897 {
3898 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3899 #ifndef EVENT__DISABLE_DEBUG_MODE
3900 if (event_debug_map_lock_ != NULL) {
3901 EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
3902 event_debug_map_lock_ = NULL;
3903 evthreadimpl_disable_lock_debugging_();
3904 }
3905 #endif /* EVENT__DISABLE_DEBUG_MODE */
3906 #endif /* EVENT__DISABLE_THREAD_SUPPORT */
3907 return;
3908 }
3909
3910 static void
event_free_debug_globals(void)3911 event_free_debug_globals(void)
3912 {
3913 event_free_debug_globals_locks();
3914 }
3915
3916 static void
event_free_evsig_globals(void)3917 event_free_evsig_globals(void)
3918 {
3919 evsig_free_globals_();
3920 }
3921
3922 static void
event_free_evutil_globals(void)3923 event_free_evutil_globals(void)
3924 {
3925 evutil_free_globals_();
3926 }
3927
3928 static void
event_free_globals(void)3929 event_free_globals(void)
3930 {
3931 event_free_debug_globals();
3932 event_free_evsig_globals();
3933 event_free_evutil_globals();
3934 }
3935
3936 void
libevent_global_shutdown(void)3937 libevent_global_shutdown(void)
3938 {
3939 event_disable_debug_mode();
3940 event_free_globals();
3941 }
3942
3943 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3944 int
event_global_setup_locks_(const int enable_locks)3945 event_global_setup_locks_(const int enable_locks)
3946 {
3947 #ifndef EVENT__DISABLE_DEBUG_MODE
3948 EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
3949 #endif
3950 if (evsig_global_setup_locks_(enable_locks) < 0)
3951 return -1;
3952 if (evutil_global_setup_locks_(enable_locks) < 0)
3953 return -1;
3954 if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
3955 return -1;
3956 return 0;
3957 }
3958 #endif
3959
3960 void
event_base_assert_ok_(struct event_base * base)3961 event_base_assert_ok_(struct event_base *base)
3962 {
3963 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3964 event_base_assert_ok_nolock_(base);
3965 EVBASE_RELEASE_LOCK(base, th_base_lock);
3966 }
3967
3968 void
event_base_assert_ok_nolock_(struct event_base * base)3969 event_base_assert_ok_nolock_(struct event_base *base)
3970 {
3971 int i;
3972 int count;
3973
3974 /* First do checks on the per-fd and per-signal lists */
3975 evmap_check_integrity_(base);
3976
3977 /* Check the heap property */
3978 for (i = 1; i < (int)base->timeheap.n; ++i) {
3979 int parent = (i - 1) / 2;
3980 struct event *ev, *p_ev;
3981 ev = base->timeheap.p[i];
3982 p_ev = base->timeheap.p[parent];
3983 EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3984 EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
3985 EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
3986 }
3987
3988 /* Check that the common timeouts are fine */
3989 for (i = 0; i < base->n_common_timeouts; ++i) {
3990 struct common_timeout_list *ctl = base->common_timeout_queues[i];
3991 struct event *last=NULL, *ev;
3992
3993 EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
3994
3995 TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
3996 if (last)
3997 EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
3998 EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3999 EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
4000 EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
4001 last = ev;
4002 }
4003 }
4004
4005 /* Check the active queues. */
4006 count = 0;
4007 for (i = 0; i < base->nactivequeues; ++i) {
4008 struct event_callback *evcb;
4009 EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
4010 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
4011 EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
4012 EVUTIL_ASSERT(evcb->evcb_pri == i);
4013 ++count;
4014 }
4015 }
4016
4017 {
4018 struct event_callback *evcb;
4019 TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
4020 EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
4021 ++count;
4022 }
4023 }
4024 EVUTIL_ASSERT(count == base->event_count_active);
4025 }
4026