1*67ecd4f3SMax Laier /*
2*67ecd4f3SMax Laier * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
3*67ecd4f3SMax Laier * All rights reserved.
4*67ecd4f3SMax Laier *
5*67ecd4f3SMax Laier * Redistribution and use in source and binary forms, with or without
6*67ecd4f3SMax Laier * modification, are permitted provided that the following conditions
7*67ecd4f3SMax Laier * are met:
8*67ecd4f3SMax Laier * 1. Redistributions of source code must retain the above copyright
9*67ecd4f3SMax Laier * notice, this list of conditions and the following disclaimer.
10*67ecd4f3SMax Laier * 2. Redistributions in binary form must reproduce the above copyright
11*67ecd4f3SMax Laier * notice, this list of conditions and the following disclaimer in the
12*67ecd4f3SMax Laier * documentation and/or other materials provided with the distribution.
13*67ecd4f3SMax Laier * 3. The name of the author may not be used to endorse or promote products
14*67ecd4f3SMax Laier * derived from this software without specific prior written permission.
15*67ecd4f3SMax Laier *
16*67ecd4f3SMax Laier * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17*67ecd4f3SMax Laier * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18*67ecd4f3SMax Laier * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19*67ecd4f3SMax Laier * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20*67ecd4f3SMax Laier * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21*67ecd4f3SMax Laier * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22*67ecd4f3SMax Laier * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23*67ecd4f3SMax Laier * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24*67ecd4f3SMax Laier * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25*67ecd4f3SMax Laier * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26*67ecd4f3SMax Laier */
27*67ecd4f3SMax Laier #ifdef HAVE_CONFIG_H
28*67ecd4f3SMax Laier #include "config.h"
29*67ecd4f3SMax Laier #endif
30*67ecd4f3SMax Laier
31*67ecd4f3SMax Laier #ifdef WIN32
32*67ecd4f3SMax Laier #define WIN32_LEAN_AND_MEAN
33*67ecd4f3SMax Laier #include <windows.h>
34*67ecd4f3SMax Laier #undef WIN32_LEAN_AND_MEAN
35*67ecd4f3SMax Laier #include "misc.h"
36*67ecd4f3SMax Laier #endif
37*67ecd4f3SMax Laier #include <sys/types.h>
38*67ecd4f3SMax Laier #include <sys/tree.h>
39*67ecd4f3SMax Laier #ifdef HAVE_SYS_TIME_H
40*67ecd4f3SMax Laier #include <sys/time.h>
41*67ecd4f3SMax Laier #else
42*67ecd4f3SMax Laier #include <sys/_time.h>
43*67ecd4f3SMax Laier #endif
44*67ecd4f3SMax Laier #include <sys/queue.h>
45*67ecd4f3SMax Laier #include <stdio.h>
46*67ecd4f3SMax Laier #include <stdlib.h>
47*67ecd4f3SMax Laier #ifndef WIN32
48*67ecd4f3SMax Laier #include <unistd.h>
49*67ecd4f3SMax Laier #endif
50*67ecd4f3SMax Laier #include <errno.h>
51*67ecd4f3SMax Laier #include <signal.h>
52*67ecd4f3SMax Laier #include <string.h>
53*67ecd4f3SMax Laier #include <assert.h>
54*67ecd4f3SMax Laier
55*67ecd4f3SMax Laier #include "event.h"
56*67ecd4f3SMax Laier #include "event-internal.h"
57*67ecd4f3SMax Laier #include "log.h"
58*67ecd4f3SMax Laier
59*67ecd4f3SMax Laier #ifdef HAVE_EVENT_PORTS
60*67ecd4f3SMax Laier extern const struct eventop evportops;
61*67ecd4f3SMax Laier #endif
62*67ecd4f3SMax Laier #ifdef HAVE_SELECT
63*67ecd4f3SMax Laier extern const struct eventop selectops;
64*67ecd4f3SMax Laier #endif
65*67ecd4f3SMax Laier #ifdef HAVE_POLL
66*67ecd4f3SMax Laier extern const struct eventop pollops;
67*67ecd4f3SMax Laier #endif
68*67ecd4f3SMax Laier #ifdef HAVE_RTSIG
69*67ecd4f3SMax Laier extern const struct eventop rtsigops;
70*67ecd4f3SMax Laier #endif
71*67ecd4f3SMax Laier #ifdef HAVE_EPOLL
72*67ecd4f3SMax Laier extern const struct eventop epollops;
73*67ecd4f3SMax Laier #endif
74*67ecd4f3SMax Laier #ifdef HAVE_WORKING_KQUEUE
75*67ecd4f3SMax Laier extern const struct eventop kqops;
76*67ecd4f3SMax Laier #endif
77*67ecd4f3SMax Laier #ifdef HAVE_DEVPOLL
78*67ecd4f3SMax Laier extern const struct eventop devpollops;
79*67ecd4f3SMax Laier #endif
80*67ecd4f3SMax Laier #ifdef WIN32
81*67ecd4f3SMax Laier extern const struct eventop win32ops;
82*67ecd4f3SMax Laier #endif
83*67ecd4f3SMax Laier
84*67ecd4f3SMax Laier /* In order of preference */
85*67ecd4f3SMax Laier const struct eventop *eventops[] = {
86*67ecd4f3SMax Laier #ifdef HAVE_EVENT_PORTS
87*67ecd4f3SMax Laier &evportops,
88*67ecd4f3SMax Laier #endif
89*67ecd4f3SMax Laier #ifdef HAVE_WORKING_KQUEUE
90*67ecd4f3SMax Laier &kqops,
91*67ecd4f3SMax Laier #endif
92*67ecd4f3SMax Laier #ifdef HAVE_EPOLL
93*67ecd4f3SMax Laier &epollops,
94*67ecd4f3SMax Laier #endif
95*67ecd4f3SMax Laier #ifdef HAVE_DEVPOLL
96*67ecd4f3SMax Laier &devpollops,
97*67ecd4f3SMax Laier #endif
98*67ecd4f3SMax Laier #ifdef HAVE_RTSIG
99*67ecd4f3SMax Laier &rtsigops,
100*67ecd4f3SMax Laier #endif
101*67ecd4f3SMax Laier #ifdef HAVE_POLL
102*67ecd4f3SMax Laier &pollops,
103*67ecd4f3SMax Laier #endif
104*67ecd4f3SMax Laier #ifdef HAVE_SELECT
105*67ecd4f3SMax Laier &selectops,
106*67ecd4f3SMax Laier #endif
107*67ecd4f3SMax Laier #ifdef WIN32
108*67ecd4f3SMax Laier &win32ops,
109*67ecd4f3SMax Laier #endif
110*67ecd4f3SMax Laier NULL
111*67ecd4f3SMax Laier };
112*67ecd4f3SMax Laier
113*67ecd4f3SMax Laier /* Global state */
114*67ecd4f3SMax Laier struct event_list signalqueue;
115*67ecd4f3SMax Laier
116*67ecd4f3SMax Laier struct event_base *current_base = NULL;
117*67ecd4f3SMax Laier
118*67ecd4f3SMax Laier /* Handle signals - This is a deprecated interface */
119*67ecd4f3SMax Laier int (*event_sigcb)(void); /* Signal callback when gotsig is set */
120*67ecd4f3SMax Laier volatile sig_atomic_t event_gotsig; /* Set in signal handler */
121*67ecd4f3SMax Laier
122*67ecd4f3SMax Laier /* Prototypes */
123*67ecd4f3SMax Laier static void event_queue_insert(struct event_base *, struct event *, int);
124*67ecd4f3SMax Laier static void event_queue_remove(struct event_base *, struct event *, int);
125*67ecd4f3SMax Laier static int event_haveevents(struct event_base *);
126*67ecd4f3SMax Laier
127*67ecd4f3SMax Laier static void event_process_active(struct event_base *);
128*67ecd4f3SMax Laier
129*67ecd4f3SMax Laier static int timeout_next(struct event_base *, struct timeval *);
130*67ecd4f3SMax Laier static void timeout_process(struct event_base *);
131*67ecd4f3SMax Laier static void timeout_correct(struct event_base *, struct timeval *);
132*67ecd4f3SMax Laier
133*67ecd4f3SMax Laier static int
compare(struct event * a,struct event * b)134*67ecd4f3SMax Laier compare(struct event *a, struct event *b)
135*67ecd4f3SMax Laier {
136*67ecd4f3SMax Laier if (timercmp(&a->ev_timeout, &b->ev_timeout, <))
137*67ecd4f3SMax Laier return (-1);
138*67ecd4f3SMax Laier else if (timercmp(&a->ev_timeout, &b->ev_timeout, >))
139*67ecd4f3SMax Laier return (1);
140*67ecd4f3SMax Laier if (a < b)
141*67ecd4f3SMax Laier return (-1);
142*67ecd4f3SMax Laier else if (a > b)
143*67ecd4f3SMax Laier return (1);
144*67ecd4f3SMax Laier return (0);
145*67ecd4f3SMax Laier }
146*67ecd4f3SMax Laier
147*67ecd4f3SMax Laier static int
gettime(struct timeval * tp)148*67ecd4f3SMax Laier gettime(struct timeval *tp)
149*67ecd4f3SMax Laier {
150*67ecd4f3SMax Laier #ifdef HAVE_CLOCK_GETTIME
151*67ecd4f3SMax Laier struct timespec ts;
152*67ecd4f3SMax Laier
153*67ecd4f3SMax Laier #ifdef HAVE_CLOCK_MONOTONIC
154*67ecd4f3SMax Laier if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
155*67ecd4f3SMax Laier #else
156*67ecd4f3SMax Laier if (clock_gettime(CLOCK_REALTIME, &ts) == -1)
157*67ecd4f3SMax Laier #endif
158*67ecd4f3SMax Laier return (-1);
159*67ecd4f3SMax Laier tp->tv_sec = ts.tv_sec;
160*67ecd4f3SMax Laier tp->tv_usec = ts.tv_nsec / 1000;
161*67ecd4f3SMax Laier #else
162*67ecd4f3SMax Laier gettimeofday(tp, NULL);
163*67ecd4f3SMax Laier #endif
164*67ecd4f3SMax Laier
165*67ecd4f3SMax Laier return (0);
166*67ecd4f3SMax Laier }
167*67ecd4f3SMax Laier
168*67ecd4f3SMax Laier RB_PROTOTYPE(event_tree, event, ev_timeout_node, compare);
169*67ecd4f3SMax Laier
170*67ecd4f3SMax Laier RB_GENERATE(event_tree, event, ev_timeout_node, compare);
171*67ecd4f3SMax Laier
172*67ecd4f3SMax Laier
173*67ecd4f3SMax Laier void *
event_init(void)174*67ecd4f3SMax Laier event_init(void)
175*67ecd4f3SMax Laier {
176*67ecd4f3SMax Laier int i;
177*67ecd4f3SMax Laier
178*67ecd4f3SMax Laier if ((current_base = calloc(1, sizeof(struct event_base))) == NULL)
179*67ecd4f3SMax Laier event_err(1, "%s: calloc");
180*67ecd4f3SMax Laier
181*67ecd4f3SMax Laier event_sigcb = NULL;
182*67ecd4f3SMax Laier event_gotsig = 0;
183*67ecd4f3SMax Laier gettime(¤t_base->event_tv);
184*67ecd4f3SMax Laier
185*67ecd4f3SMax Laier RB_INIT(¤t_base->timetree);
186*67ecd4f3SMax Laier TAILQ_INIT(¤t_base->eventqueue);
187*67ecd4f3SMax Laier TAILQ_INIT(&signalqueue);
188*67ecd4f3SMax Laier
189*67ecd4f3SMax Laier current_base->evbase = NULL;
190*67ecd4f3SMax Laier for (i = 0; eventops[i] && !current_base->evbase; i++) {
191*67ecd4f3SMax Laier current_base->evsel = eventops[i];
192*67ecd4f3SMax Laier
193*67ecd4f3SMax Laier current_base->evbase = current_base->evsel->init();
194*67ecd4f3SMax Laier }
195*67ecd4f3SMax Laier
196*67ecd4f3SMax Laier if (current_base->evbase == NULL)
197*67ecd4f3SMax Laier event_errx(1, "%s: no event mechanism available", __func__);
198*67ecd4f3SMax Laier
199*67ecd4f3SMax Laier if (getenv("EVENT_SHOW_METHOD"))
200*67ecd4f3SMax Laier event_msgx("libevent using: %s\n",
201*67ecd4f3SMax Laier current_base->evsel->name);
202*67ecd4f3SMax Laier
203*67ecd4f3SMax Laier /* allocate a single active event queue */
204*67ecd4f3SMax Laier event_base_priority_init(current_base, 1);
205*67ecd4f3SMax Laier
206*67ecd4f3SMax Laier return (current_base);
207*67ecd4f3SMax Laier }
208*67ecd4f3SMax Laier
209*67ecd4f3SMax Laier void
event_base_free(struct event_base * base)210*67ecd4f3SMax Laier event_base_free(struct event_base *base)
211*67ecd4f3SMax Laier {
212*67ecd4f3SMax Laier int i;
213*67ecd4f3SMax Laier
214*67ecd4f3SMax Laier if (base == NULL && current_base)
215*67ecd4f3SMax Laier base = current_base;
216*67ecd4f3SMax Laier if (base == current_base)
217*67ecd4f3SMax Laier current_base = NULL;
218*67ecd4f3SMax Laier
219*67ecd4f3SMax Laier assert(base);
220*67ecd4f3SMax Laier assert(TAILQ_EMPTY(&base->eventqueue));
221*67ecd4f3SMax Laier for (i=0; i < base->nactivequeues; ++i)
222*67ecd4f3SMax Laier assert(TAILQ_EMPTY(base->activequeues[i]));
223*67ecd4f3SMax Laier
224*67ecd4f3SMax Laier assert(RB_EMPTY(&base->timetree));
225*67ecd4f3SMax Laier
226*67ecd4f3SMax Laier for (i = 0; i < base->nactivequeues; ++i)
227*67ecd4f3SMax Laier free(base->activequeues[i]);
228*67ecd4f3SMax Laier free(base->activequeues);
229*67ecd4f3SMax Laier
230*67ecd4f3SMax Laier if (base->evsel->dealloc != NULL)
231*67ecd4f3SMax Laier base->evsel->dealloc(base->evbase);
232*67ecd4f3SMax Laier
233*67ecd4f3SMax Laier free(base);
234*67ecd4f3SMax Laier }
235*67ecd4f3SMax Laier
236*67ecd4f3SMax Laier int
event_priority_init(int npriorities)237*67ecd4f3SMax Laier event_priority_init(int npriorities)
238*67ecd4f3SMax Laier {
239*67ecd4f3SMax Laier return event_base_priority_init(current_base, npriorities);
240*67ecd4f3SMax Laier }
241*67ecd4f3SMax Laier
242*67ecd4f3SMax Laier int
event_base_priority_init(struct event_base * base,int npriorities)243*67ecd4f3SMax Laier event_base_priority_init(struct event_base *base, int npriorities)
244*67ecd4f3SMax Laier {
245*67ecd4f3SMax Laier int i;
246*67ecd4f3SMax Laier
247*67ecd4f3SMax Laier if (base->event_count_active)
248*67ecd4f3SMax Laier return (-1);
249*67ecd4f3SMax Laier
250*67ecd4f3SMax Laier if (base->nactivequeues && npriorities != base->nactivequeues) {
251*67ecd4f3SMax Laier for (i = 0; i < base->nactivequeues; ++i) {
252*67ecd4f3SMax Laier free(base->activequeues[i]);
253*67ecd4f3SMax Laier }
254*67ecd4f3SMax Laier free(base->activequeues);
255*67ecd4f3SMax Laier }
256*67ecd4f3SMax Laier
257*67ecd4f3SMax Laier /* Allocate our priority queues */
258*67ecd4f3SMax Laier base->nactivequeues = npriorities;
259*67ecd4f3SMax Laier base->activequeues = (struct event_list **)calloc(base->nactivequeues,
260*67ecd4f3SMax Laier npriorities * sizeof(struct event_list *));
261*67ecd4f3SMax Laier if (base->activequeues == NULL)
262*67ecd4f3SMax Laier event_err(1, "%s: calloc", __func__);
263*67ecd4f3SMax Laier
264*67ecd4f3SMax Laier for (i = 0; i < base->nactivequeues; ++i) {
265*67ecd4f3SMax Laier base->activequeues[i] = malloc(sizeof(struct event_list));
266*67ecd4f3SMax Laier if (base->activequeues[i] == NULL)
267*67ecd4f3SMax Laier event_err(1, "%s: malloc", __func__);
268*67ecd4f3SMax Laier TAILQ_INIT(base->activequeues[i]);
269*67ecd4f3SMax Laier }
270*67ecd4f3SMax Laier
271*67ecd4f3SMax Laier return (0);
272*67ecd4f3SMax Laier }
273*67ecd4f3SMax Laier
274*67ecd4f3SMax Laier int
event_haveevents(struct event_base * base)275*67ecd4f3SMax Laier event_haveevents(struct event_base *base)
276*67ecd4f3SMax Laier {
277*67ecd4f3SMax Laier return (base->event_count > 0);
278*67ecd4f3SMax Laier }
279*67ecd4f3SMax Laier
280*67ecd4f3SMax Laier /*
281*67ecd4f3SMax Laier * Active events are stored in priority queues. Lower priorities are always
282*67ecd4f3SMax Laier * process before higher priorities. Low priority events can starve high
283*67ecd4f3SMax Laier * priority ones.
284*67ecd4f3SMax Laier */
285*67ecd4f3SMax Laier
286*67ecd4f3SMax Laier static void
event_process_active(struct event_base * base)287*67ecd4f3SMax Laier event_process_active(struct event_base *base)
288*67ecd4f3SMax Laier {
289*67ecd4f3SMax Laier struct event *ev;
290*67ecd4f3SMax Laier struct event_list *activeq = NULL;
291*67ecd4f3SMax Laier int i;
292*67ecd4f3SMax Laier short ncalls;
293*67ecd4f3SMax Laier
294*67ecd4f3SMax Laier if (!base->event_count_active)
295*67ecd4f3SMax Laier return;
296*67ecd4f3SMax Laier
297*67ecd4f3SMax Laier for (i = 0; i < base->nactivequeues; ++i) {
298*67ecd4f3SMax Laier if (TAILQ_FIRST(base->activequeues[i]) != NULL) {
299*67ecd4f3SMax Laier activeq = base->activequeues[i];
300*67ecd4f3SMax Laier break;
301*67ecd4f3SMax Laier }
302*67ecd4f3SMax Laier }
303*67ecd4f3SMax Laier
304*67ecd4f3SMax Laier assert(activeq != NULL);
305*67ecd4f3SMax Laier
306*67ecd4f3SMax Laier for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
307*67ecd4f3SMax Laier event_queue_remove(base, ev, EVLIST_ACTIVE);
308*67ecd4f3SMax Laier
309*67ecd4f3SMax Laier /* Allows deletes to work */
310*67ecd4f3SMax Laier ncalls = ev->ev_ncalls;
311*67ecd4f3SMax Laier ev->ev_pncalls = &ncalls;
312*67ecd4f3SMax Laier while (ncalls) {
313*67ecd4f3SMax Laier ncalls--;
314*67ecd4f3SMax Laier ev->ev_ncalls = ncalls;
315*67ecd4f3SMax Laier (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
316*67ecd4f3SMax Laier if (event_gotsig)
317*67ecd4f3SMax Laier return;
318*67ecd4f3SMax Laier }
319*67ecd4f3SMax Laier }
320*67ecd4f3SMax Laier }
321*67ecd4f3SMax Laier
322*67ecd4f3SMax Laier /*
323*67ecd4f3SMax Laier * Wait continously for events. We exit only if no events are left.
324*67ecd4f3SMax Laier */
325*67ecd4f3SMax Laier
326*67ecd4f3SMax Laier int
event_dispatch(void)327*67ecd4f3SMax Laier event_dispatch(void)
328*67ecd4f3SMax Laier {
329*67ecd4f3SMax Laier return (event_loop(0));
330*67ecd4f3SMax Laier }
331*67ecd4f3SMax Laier
332*67ecd4f3SMax Laier int
event_base_dispatch(struct event_base * event_base)333*67ecd4f3SMax Laier event_base_dispatch(struct event_base *event_base)
334*67ecd4f3SMax Laier {
335*67ecd4f3SMax Laier return (event_base_loop(event_base, 0));
336*67ecd4f3SMax Laier }
337*67ecd4f3SMax Laier
338*67ecd4f3SMax Laier static void
event_loopexit_cb(int fd,short what,void * arg)339*67ecd4f3SMax Laier event_loopexit_cb(int fd, short what, void *arg)
340*67ecd4f3SMax Laier {
341*67ecd4f3SMax Laier struct event_base *base = arg;
342*67ecd4f3SMax Laier base->event_gotterm = 1;
343*67ecd4f3SMax Laier }
344*67ecd4f3SMax Laier
345*67ecd4f3SMax Laier /* not thread safe */
346*67ecd4f3SMax Laier
347*67ecd4f3SMax Laier int
event_loopexit(struct timeval * tv)348*67ecd4f3SMax Laier event_loopexit(struct timeval *tv)
349*67ecd4f3SMax Laier {
350*67ecd4f3SMax Laier return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
351*67ecd4f3SMax Laier current_base, tv));
352*67ecd4f3SMax Laier }
353*67ecd4f3SMax Laier
354*67ecd4f3SMax Laier int
event_base_loopexit(struct event_base * event_base,struct timeval * tv)355*67ecd4f3SMax Laier event_base_loopexit(struct event_base *event_base, struct timeval *tv)
356*67ecd4f3SMax Laier {
357*67ecd4f3SMax Laier return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
358*67ecd4f3SMax Laier event_base, tv));
359*67ecd4f3SMax Laier }
360*67ecd4f3SMax Laier
361*67ecd4f3SMax Laier /* not thread safe */
362*67ecd4f3SMax Laier
363*67ecd4f3SMax Laier int
event_loop(int flags)364*67ecd4f3SMax Laier event_loop(int flags)
365*67ecd4f3SMax Laier {
366*67ecd4f3SMax Laier return event_base_loop(current_base, flags);
367*67ecd4f3SMax Laier }
368*67ecd4f3SMax Laier
369*67ecd4f3SMax Laier int
event_base_loop(struct event_base * base,int flags)370*67ecd4f3SMax Laier event_base_loop(struct event_base *base, int flags)
371*67ecd4f3SMax Laier {
372*67ecd4f3SMax Laier const struct eventop *evsel = base->evsel;
373*67ecd4f3SMax Laier void *evbase = base->evbase;
374*67ecd4f3SMax Laier struct timeval tv;
375*67ecd4f3SMax Laier int res, done;
376*67ecd4f3SMax Laier
377*67ecd4f3SMax Laier done = 0;
378*67ecd4f3SMax Laier while (!done) {
379*67ecd4f3SMax Laier /* Calculate the initial events that we are waiting for */
380*67ecd4f3SMax Laier if (evsel->recalc(base, evbase, 0) == -1)
381*67ecd4f3SMax Laier return (-1);
382*67ecd4f3SMax Laier
383*67ecd4f3SMax Laier /* Terminate the loop if we have been asked to */
384*67ecd4f3SMax Laier if (base->event_gotterm) {
385*67ecd4f3SMax Laier base->event_gotterm = 0;
386*67ecd4f3SMax Laier break;
387*67ecd4f3SMax Laier }
388*67ecd4f3SMax Laier
389*67ecd4f3SMax Laier /* You cannot use this interface for multi-threaded apps */
390*67ecd4f3SMax Laier while (event_gotsig) {
391*67ecd4f3SMax Laier event_gotsig = 0;
392*67ecd4f3SMax Laier if (event_sigcb) {
393*67ecd4f3SMax Laier res = (*event_sigcb)();
394*67ecd4f3SMax Laier if (res == -1) {
395*67ecd4f3SMax Laier errno = EINTR;
396*67ecd4f3SMax Laier return (-1);
397*67ecd4f3SMax Laier }
398*67ecd4f3SMax Laier }
399*67ecd4f3SMax Laier }
400*67ecd4f3SMax Laier
401*67ecd4f3SMax Laier /* Check if time is running backwards */
402*67ecd4f3SMax Laier gettime(&tv);
403*67ecd4f3SMax Laier if (timercmp(&tv, &base->event_tv, <)) {
404*67ecd4f3SMax Laier struct timeval off;
405*67ecd4f3SMax Laier event_debug(("%s: time is running backwards, corrected",
406*67ecd4f3SMax Laier __func__));
407*67ecd4f3SMax Laier timersub(&base->event_tv, &tv, &off);
408*67ecd4f3SMax Laier timeout_correct(base, &off);
409*67ecd4f3SMax Laier }
410*67ecd4f3SMax Laier base->event_tv = tv;
411*67ecd4f3SMax Laier
412*67ecd4f3SMax Laier if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK))
413*67ecd4f3SMax Laier timeout_next(base, &tv);
414*67ecd4f3SMax Laier else
415*67ecd4f3SMax Laier timerclear(&tv);
416*67ecd4f3SMax Laier
417*67ecd4f3SMax Laier /* If we have no events, we just exit */
418*67ecd4f3SMax Laier if (!event_haveevents(base)) {
419*67ecd4f3SMax Laier event_debug(("%s: no events registered.", __func__));
420*67ecd4f3SMax Laier return (1);
421*67ecd4f3SMax Laier }
422*67ecd4f3SMax Laier
423*67ecd4f3SMax Laier res = evsel->dispatch(base, evbase, &tv);
424*67ecd4f3SMax Laier
425*67ecd4f3SMax Laier if (res == -1)
426*67ecd4f3SMax Laier return (-1);
427*67ecd4f3SMax Laier
428*67ecd4f3SMax Laier timeout_process(base);
429*67ecd4f3SMax Laier
430*67ecd4f3SMax Laier if (base->event_count_active) {
431*67ecd4f3SMax Laier event_process_active(base);
432*67ecd4f3SMax Laier if (!base->event_count_active && (flags & EVLOOP_ONCE))
433*67ecd4f3SMax Laier done = 1;
434*67ecd4f3SMax Laier } else if (flags & EVLOOP_NONBLOCK)
435*67ecd4f3SMax Laier done = 1;
436*67ecd4f3SMax Laier }
437*67ecd4f3SMax Laier
438*67ecd4f3SMax Laier event_debug(("%s: asked to terminate loop.", __func__));
439*67ecd4f3SMax Laier return (0);
440*67ecd4f3SMax Laier }
441*67ecd4f3SMax Laier
442*67ecd4f3SMax Laier /* Sets up an event for processing once */
443*67ecd4f3SMax Laier
444*67ecd4f3SMax Laier struct event_once {
445*67ecd4f3SMax Laier struct event ev;
446*67ecd4f3SMax Laier
447*67ecd4f3SMax Laier void (*cb)(int, short, void *);
448*67ecd4f3SMax Laier void *arg;
449*67ecd4f3SMax Laier };
450*67ecd4f3SMax Laier
451*67ecd4f3SMax Laier /* One-time callback, it deletes itself */
452*67ecd4f3SMax Laier
453*67ecd4f3SMax Laier static void
event_once_cb(int fd,short events,void * arg)454*67ecd4f3SMax Laier event_once_cb(int fd, short events, void *arg)
455*67ecd4f3SMax Laier {
456*67ecd4f3SMax Laier struct event_once *eonce = arg;
457*67ecd4f3SMax Laier
458*67ecd4f3SMax Laier (*eonce->cb)(fd, events, eonce->arg);
459*67ecd4f3SMax Laier free(eonce);
460*67ecd4f3SMax Laier }
461*67ecd4f3SMax Laier
462*67ecd4f3SMax Laier /* Schedules an event once */
463*67ecd4f3SMax Laier
464*67ecd4f3SMax Laier int
event_once(int fd,short events,void (* callback)(int,short,void *),void * arg,struct timeval * tv)465*67ecd4f3SMax Laier event_once(int fd, short events,
466*67ecd4f3SMax Laier void (*callback)(int, short, void *), void *arg, struct timeval *tv)
467*67ecd4f3SMax Laier {
468*67ecd4f3SMax Laier struct event_once *eonce;
469*67ecd4f3SMax Laier struct timeval etv;
470*67ecd4f3SMax Laier int res;
471*67ecd4f3SMax Laier
472*67ecd4f3SMax Laier /* We cannot support signals that just fire once */
473*67ecd4f3SMax Laier if (events & EV_SIGNAL)
474*67ecd4f3SMax Laier return (-1);
475*67ecd4f3SMax Laier
476*67ecd4f3SMax Laier if ((eonce = calloc(1, sizeof(struct event_once))) == NULL)
477*67ecd4f3SMax Laier return (-1);
478*67ecd4f3SMax Laier
479*67ecd4f3SMax Laier eonce->cb = callback;
480*67ecd4f3SMax Laier eonce->arg = arg;
481*67ecd4f3SMax Laier
482*67ecd4f3SMax Laier if (events == EV_TIMEOUT) {
483*67ecd4f3SMax Laier if (tv == NULL) {
484*67ecd4f3SMax Laier timerclear(&etv);
485*67ecd4f3SMax Laier tv = &etv;
486*67ecd4f3SMax Laier }
487*67ecd4f3SMax Laier
488*67ecd4f3SMax Laier evtimer_set(&eonce->ev, event_once_cb, eonce);
489*67ecd4f3SMax Laier } else if (events & (EV_READ|EV_WRITE)) {
490*67ecd4f3SMax Laier events &= EV_READ|EV_WRITE;
491*67ecd4f3SMax Laier
492*67ecd4f3SMax Laier event_set(&eonce->ev, fd, events, event_once_cb, eonce);
493*67ecd4f3SMax Laier } else {
494*67ecd4f3SMax Laier /* Bad event combination */
495*67ecd4f3SMax Laier free(eonce);
496*67ecd4f3SMax Laier return (-1);
497*67ecd4f3SMax Laier }
498*67ecd4f3SMax Laier
499*67ecd4f3SMax Laier res = event_add(&eonce->ev, tv);
500*67ecd4f3SMax Laier if (res != 0) {
501*67ecd4f3SMax Laier free(eonce);
502*67ecd4f3SMax Laier return (res);
503*67ecd4f3SMax Laier }
504*67ecd4f3SMax Laier
505*67ecd4f3SMax Laier return (0);
506*67ecd4f3SMax Laier }
507*67ecd4f3SMax Laier
508*67ecd4f3SMax Laier void
event_set(struct event * ev,int fd,short events,void (* callback)(int,short,void *),void * arg)509*67ecd4f3SMax Laier event_set(struct event *ev, int fd, short events,
510*67ecd4f3SMax Laier void (*callback)(int, short, void *), void *arg)
511*67ecd4f3SMax Laier {
512*67ecd4f3SMax Laier /* Take the current base - caller needs to set the real base later */
513*67ecd4f3SMax Laier ev->ev_base = current_base;
514*67ecd4f3SMax Laier
515*67ecd4f3SMax Laier ev->ev_callback = callback;
516*67ecd4f3SMax Laier ev->ev_arg = arg;
517*67ecd4f3SMax Laier ev->ev_fd = fd;
518*67ecd4f3SMax Laier ev->ev_events = events;
519*67ecd4f3SMax Laier ev->ev_flags = EVLIST_INIT;
520*67ecd4f3SMax Laier ev->ev_ncalls = 0;
521*67ecd4f3SMax Laier ev->ev_pncalls = NULL;
522*67ecd4f3SMax Laier
523*67ecd4f3SMax Laier /* by default, we put new events into the middle priority */
524*67ecd4f3SMax Laier ev->ev_pri = current_base->nactivequeues/2;
525*67ecd4f3SMax Laier }
526*67ecd4f3SMax Laier
527*67ecd4f3SMax Laier int
event_base_set(struct event_base * base,struct event * ev)528*67ecd4f3SMax Laier event_base_set(struct event_base *base, struct event *ev)
529*67ecd4f3SMax Laier {
530*67ecd4f3SMax Laier /* Only innocent events may be assigned to a different base */
531*67ecd4f3SMax Laier if (ev->ev_flags != EVLIST_INIT)
532*67ecd4f3SMax Laier return (-1);
533*67ecd4f3SMax Laier
534*67ecd4f3SMax Laier ev->ev_base = base;
535*67ecd4f3SMax Laier ev->ev_pri = base->nactivequeues/2;
536*67ecd4f3SMax Laier
537*67ecd4f3SMax Laier return (0);
538*67ecd4f3SMax Laier }
539*67ecd4f3SMax Laier
540*67ecd4f3SMax Laier /*
541*67ecd4f3SMax Laier * Set's the priority of an event - if an event is already scheduled
542*67ecd4f3SMax Laier * changing the priority is going to fail.
543*67ecd4f3SMax Laier */
544*67ecd4f3SMax Laier
545*67ecd4f3SMax Laier int
event_priority_set(struct event * ev,int pri)546*67ecd4f3SMax Laier event_priority_set(struct event *ev, int pri)
547*67ecd4f3SMax Laier {
548*67ecd4f3SMax Laier if (ev->ev_flags & EVLIST_ACTIVE)
549*67ecd4f3SMax Laier return (-1);
550*67ecd4f3SMax Laier if (pri < 0 || pri >= ev->ev_base->nactivequeues)
551*67ecd4f3SMax Laier return (-1);
552*67ecd4f3SMax Laier
553*67ecd4f3SMax Laier ev->ev_pri = pri;
554*67ecd4f3SMax Laier
555*67ecd4f3SMax Laier return (0);
556*67ecd4f3SMax Laier }
557*67ecd4f3SMax Laier
558*67ecd4f3SMax Laier /*
559*67ecd4f3SMax Laier * Checks if a specific event is pending or scheduled.
560*67ecd4f3SMax Laier */
561*67ecd4f3SMax Laier
562*67ecd4f3SMax Laier int
event_pending(struct event * ev,short event,struct timeval * tv)563*67ecd4f3SMax Laier event_pending(struct event *ev, short event, struct timeval *tv)
564*67ecd4f3SMax Laier {
565*67ecd4f3SMax Laier struct timeval now, res;
566*67ecd4f3SMax Laier int flags = 0;
567*67ecd4f3SMax Laier
568*67ecd4f3SMax Laier if (ev->ev_flags & EVLIST_INSERTED)
569*67ecd4f3SMax Laier flags |= (ev->ev_events & (EV_READ|EV_WRITE));
570*67ecd4f3SMax Laier if (ev->ev_flags & EVLIST_ACTIVE)
571*67ecd4f3SMax Laier flags |= ev->ev_res;
572*67ecd4f3SMax Laier if (ev->ev_flags & EVLIST_TIMEOUT)
573*67ecd4f3SMax Laier flags |= EV_TIMEOUT;
574*67ecd4f3SMax Laier if (ev->ev_flags & EVLIST_SIGNAL)
575*67ecd4f3SMax Laier flags |= EV_SIGNAL;
576*67ecd4f3SMax Laier
577*67ecd4f3SMax Laier event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
578*67ecd4f3SMax Laier
579*67ecd4f3SMax Laier /* See if there is a timeout that we should report */
580*67ecd4f3SMax Laier if (tv != NULL && (flags & event & EV_TIMEOUT)) {
581*67ecd4f3SMax Laier gettime(&now);
582*67ecd4f3SMax Laier timersub(&ev->ev_timeout, &now, &res);
583*67ecd4f3SMax Laier /* correctly remap to real time */
584*67ecd4f3SMax Laier gettimeofday(&now, NULL);
585*67ecd4f3SMax Laier timeradd(&now, &res, tv);
586*67ecd4f3SMax Laier }
587*67ecd4f3SMax Laier
588*67ecd4f3SMax Laier return (flags & event);
589*67ecd4f3SMax Laier }
590*67ecd4f3SMax Laier
591*67ecd4f3SMax Laier int
event_add(struct event * ev,struct timeval * tv)592*67ecd4f3SMax Laier event_add(struct event *ev, struct timeval *tv)
593*67ecd4f3SMax Laier {
594*67ecd4f3SMax Laier struct event_base *base = ev->ev_base;
595*67ecd4f3SMax Laier const struct eventop *evsel = base->evsel;
596*67ecd4f3SMax Laier void *evbase = base->evbase;
597*67ecd4f3SMax Laier
598*67ecd4f3SMax Laier event_debug((
599*67ecd4f3SMax Laier "event_add: event: %p, %s%s%scall %p",
600*67ecd4f3SMax Laier ev,
601*67ecd4f3SMax Laier ev->ev_events & EV_READ ? "EV_READ " : " ",
602*67ecd4f3SMax Laier ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
603*67ecd4f3SMax Laier tv ? "EV_TIMEOUT " : " ",
604*67ecd4f3SMax Laier ev->ev_callback));
605*67ecd4f3SMax Laier
606*67ecd4f3SMax Laier assert(!(ev->ev_flags & ~EVLIST_ALL));
607*67ecd4f3SMax Laier
608*67ecd4f3SMax Laier if (tv != NULL) {
609*67ecd4f3SMax Laier struct timeval now;
610*67ecd4f3SMax Laier
611*67ecd4f3SMax Laier if (ev->ev_flags & EVLIST_TIMEOUT)
612*67ecd4f3SMax Laier event_queue_remove(base, ev, EVLIST_TIMEOUT);
613*67ecd4f3SMax Laier
614*67ecd4f3SMax Laier /* Check if it is active due to a timeout. Rescheduling
615*67ecd4f3SMax Laier * this timeout before the callback can be executed
616*67ecd4f3SMax Laier * removes it from the active list. */
617*67ecd4f3SMax Laier if ((ev->ev_flags & EVLIST_ACTIVE) &&
618*67ecd4f3SMax Laier (ev->ev_res & EV_TIMEOUT)) {
619*67ecd4f3SMax Laier /* See if we are just active executing this
620*67ecd4f3SMax Laier * event in a loop
621*67ecd4f3SMax Laier */
622*67ecd4f3SMax Laier if (ev->ev_ncalls && ev->ev_pncalls) {
623*67ecd4f3SMax Laier /* Abort loop */
624*67ecd4f3SMax Laier *ev->ev_pncalls = 0;
625*67ecd4f3SMax Laier }
626*67ecd4f3SMax Laier
627*67ecd4f3SMax Laier event_queue_remove(base, ev, EVLIST_ACTIVE);
628*67ecd4f3SMax Laier }
629*67ecd4f3SMax Laier
630*67ecd4f3SMax Laier gettime(&now);
631*67ecd4f3SMax Laier timeradd(&now, tv, &ev->ev_timeout);
632*67ecd4f3SMax Laier
633*67ecd4f3SMax Laier event_debug((
634*67ecd4f3SMax Laier "event_add: timeout in %d seconds, call %p",
635*67ecd4f3SMax Laier tv->tv_sec, ev->ev_callback));
636*67ecd4f3SMax Laier
637*67ecd4f3SMax Laier event_queue_insert(base, ev, EVLIST_TIMEOUT);
638*67ecd4f3SMax Laier }
639*67ecd4f3SMax Laier
640*67ecd4f3SMax Laier if ((ev->ev_events & (EV_READ|EV_WRITE)) &&
641*67ecd4f3SMax Laier !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
642*67ecd4f3SMax Laier event_queue_insert(base, ev, EVLIST_INSERTED);
643*67ecd4f3SMax Laier
644*67ecd4f3SMax Laier return (evsel->add(evbase, ev));
645*67ecd4f3SMax Laier } else if ((ev->ev_events & EV_SIGNAL) &&
646*67ecd4f3SMax Laier !(ev->ev_flags & EVLIST_SIGNAL)) {
647*67ecd4f3SMax Laier event_queue_insert(base, ev, EVLIST_SIGNAL);
648*67ecd4f3SMax Laier
649*67ecd4f3SMax Laier return (evsel->add(evbase, ev));
650*67ecd4f3SMax Laier }
651*67ecd4f3SMax Laier
652*67ecd4f3SMax Laier return (0);
653*67ecd4f3SMax Laier }
654*67ecd4f3SMax Laier
655*67ecd4f3SMax Laier int
event_del(struct event * ev)656*67ecd4f3SMax Laier event_del(struct event *ev)
657*67ecd4f3SMax Laier {
658*67ecd4f3SMax Laier struct event_base *base;
659*67ecd4f3SMax Laier const struct eventop *evsel;
660*67ecd4f3SMax Laier void *evbase;
661*67ecd4f3SMax Laier
662*67ecd4f3SMax Laier event_debug(("event_del: %p, callback %p",
663*67ecd4f3SMax Laier ev, ev->ev_callback));
664*67ecd4f3SMax Laier
665*67ecd4f3SMax Laier /* An event without a base has not been added */
666*67ecd4f3SMax Laier if (ev->ev_base == NULL)
667*67ecd4f3SMax Laier return (-1);
668*67ecd4f3SMax Laier
669*67ecd4f3SMax Laier base = ev->ev_base;
670*67ecd4f3SMax Laier evsel = base->evsel;
671*67ecd4f3SMax Laier evbase = base->evbase;
672*67ecd4f3SMax Laier
673*67ecd4f3SMax Laier assert(!(ev->ev_flags & ~EVLIST_ALL));
674*67ecd4f3SMax Laier
675*67ecd4f3SMax Laier /* See if we are just active executing this event in a loop */
676*67ecd4f3SMax Laier if (ev->ev_ncalls && ev->ev_pncalls) {
677*67ecd4f3SMax Laier /* Abort loop */
678*67ecd4f3SMax Laier *ev->ev_pncalls = 0;
679*67ecd4f3SMax Laier }
680*67ecd4f3SMax Laier
681*67ecd4f3SMax Laier if (ev->ev_flags & EVLIST_TIMEOUT)
682*67ecd4f3SMax Laier event_queue_remove(base, ev, EVLIST_TIMEOUT);
683*67ecd4f3SMax Laier
684*67ecd4f3SMax Laier if (ev->ev_flags & EVLIST_ACTIVE)
685*67ecd4f3SMax Laier event_queue_remove(base, ev, EVLIST_ACTIVE);
686*67ecd4f3SMax Laier
687*67ecd4f3SMax Laier if (ev->ev_flags & EVLIST_INSERTED) {
688*67ecd4f3SMax Laier event_queue_remove(base, ev, EVLIST_INSERTED);
689*67ecd4f3SMax Laier return (evsel->del(evbase, ev));
690*67ecd4f3SMax Laier } else if (ev->ev_flags & EVLIST_SIGNAL) {
691*67ecd4f3SMax Laier event_queue_remove(base, ev, EVLIST_SIGNAL);
692*67ecd4f3SMax Laier return (evsel->del(evbase, ev));
693*67ecd4f3SMax Laier }
694*67ecd4f3SMax Laier
695*67ecd4f3SMax Laier return (0);
696*67ecd4f3SMax Laier }
697*67ecd4f3SMax Laier
698*67ecd4f3SMax Laier void
event_active(struct event * ev,int res,short ncalls)699*67ecd4f3SMax Laier event_active(struct event *ev, int res, short ncalls)
700*67ecd4f3SMax Laier {
701*67ecd4f3SMax Laier /* We get different kinds of events, add them together */
702*67ecd4f3SMax Laier if (ev->ev_flags & EVLIST_ACTIVE) {
703*67ecd4f3SMax Laier ev->ev_res |= res;
704*67ecd4f3SMax Laier return;
705*67ecd4f3SMax Laier }
706*67ecd4f3SMax Laier
707*67ecd4f3SMax Laier ev->ev_res = res;
708*67ecd4f3SMax Laier ev->ev_ncalls = ncalls;
709*67ecd4f3SMax Laier ev->ev_pncalls = NULL;
710*67ecd4f3SMax Laier event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE);
711*67ecd4f3SMax Laier }
712*67ecd4f3SMax Laier
713*67ecd4f3SMax Laier int
timeout_next(struct event_base * base,struct timeval * tv)714*67ecd4f3SMax Laier timeout_next(struct event_base *base, struct timeval *tv)
715*67ecd4f3SMax Laier {
716*67ecd4f3SMax Laier struct timeval dflt = TIMEOUT_DEFAULT;
717*67ecd4f3SMax Laier
718*67ecd4f3SMax Laier struct timeval now;
719*67ecd4f3SMax Laier struct event *ev;
720*67ecd4f3SMax Laier
721*67ecd4f3SMax Laier if ((ev = RB_MIN(event_tree, &base->timetree)) == NULL) {
722*67ecd4f3SMax Laier *tv = dflt;
723*67ecd4f3SMax Laier return (0);
724*67ecd4f3SMax Laier }
725*67ecd4f3SMax Laier
726*67ecd4f3SMax Laier if (gettime(&now) == -1)
727*67ecd4f3SMax Laier return (-1);
728*67ecd4f3SMax Laier
729*67ecd4f3SMax Laier if (timercmp(&ev->ev_timeout, &now, <=)) {
730*67ecd4f3SMax Laier timerclear(tv);
731*67ecd4f3SMax Laier return (0);
732*67ecd4f3SMax Laier }
733*67ecd4f3SMax Laier
734*67ecd4f3SMax Laier timersub(&ev->ev_timeout, &now, tv);
735*67ecd4f3SMax Laier
736*67ecd4f3SMax Laier assert(tv->tv_sec >= 0);
737*67ecd4f3SMax Laier assert(tv->tv_usec >= 0);
738*67ecd4f3SMax Laier
739*67ecd4f3SMax Laier event_debug(("timeout_next: in %d seconds", tv->tv_sec));
740*67ecd4f3SMax Laier return (0);
741*67ecd4f3SMax Laier }
742*67ecd4f3SMax Laier
743*67ecd4f3SMax Laier static void
timeout_correct(struct event_base * base,struct timeval * off)744*67ecd4f3SMax Laier timeout_correct(struct event_base *base, struct timeval *off)
745*67ecd4f3SMax Laier {
746*67ecd4f3SMax Laier struct event *ev;
747*67ecd4f3SMax Laier
748*67ecd4f3SMax Laier /*
749*67ecd4f3SMax Laier * We can modify the key element of the node without destroying
750*67ecd4f3SMax Laier * the key, beause we apply it to all in the right order.
751*67ecd4f3SMax Laier */
752*67ecd4f3SMax Laier RB_FOREACH(ev, event_tree, &base->timetree)
753*67ecd4f3SMax Laier timersub(&ev->ev_timeout, off, &ev->ev_timeout);
754*67ecd4f3SMax Laier }
755*67ecd4f3SMax Laier
756*67ecd4f3SMax Laier void
timeout_process(struct event_base * base)757*67ecd4f3SMax Laier timeout_process(struct event_base *base)
758*67ecd4f3SMax Laier {
759*67ecd4f3SMax Laier struct timeval now;
760*67ecd4f3SMax Laier struct event *ev, *next;
761*67ecd4f3SMax Laier
762*67ecd4f3SMax Laier gettime(&now);
763*67ecd4f3SMax Laier
764*67ecd4f3SMax Laier for (ev = RB_MIN(event_tree, &base->timetree); ev; ev = next) {
765*67ecd4f3SMax Laier if (timercmp(&ev->ev_timeout, &now, >))
766*67ecd4f3SMax Laier break;
767*67ecd4f3SMax Laier next = RB_NEXT(event_tree, &base->timetree, ev);
768*67ecd4f3SMax Laier
769*67ecd4f3SMax Laier event_queue_remove(base, ev, EVLIST_TIMEOUT);
770*67ecd4f3SMax Laier
771*67ecd4f3SMax Laier /* delete this event from the I/O queues */
772*67ecd4f3SMax Laier event_del(ev);
773*67ecd4f3SMax Laier
774*67ecd4f3SMax Laier event_debug(("timeout_process: call %p",
775*67ecd4f3SMax Laier ev->ev_callback));
776*67ecd4f3SMax Laier event_active(ev, EV_TIMEOUT, 1);
777*67ecd4f3SMax Laier }
778*67ecd4f3SMax Laier }
779*67ecd4f3SMax Laier
780*67ecd4f3SMax Laier void
event_queue_remove(struct event_base * base,struct event * ev,int queue)781*67ecd4f3SMax Laier event_queue_remove(struct event_base *base, struct event *ev, int queue)
782*67ecd4f3SMax Laier {
783*67ecd4f3SMax Laier int docount = 1;
784*67ecd4f3SMax Laier
785*67ecd4f3SMax Laier if (!(ev->ev_flags & queue))
786*67ecd4f3SMax Laier event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
787*67ecd4f3SMax Laier ev, ev->ev_fd, queue);
788*67ecd4f3SMax Laier
789*67ecd4f3SMax Laier if (ev->ev_flags & EVLIST_INTERNAL)
790*67ecd4f3SMax Laier docount = 0;
791*67ecd4f3SMax Laier
792*67ecd4f3SMax Laier if (docount)
793*67ecd4f3SMax Laier base->event_count--;
794*67ecd4f3SMax Laier
795*67ecd4f3SMax Laier ev->ev_flags &= ~queue;
796*67ecd4f3SMax Laier switch (queue) {
797*67ecd4f3SMax Laier case EVLIST_ACTIVE:
798*67ecd4f3SMax Laier if (docount)
799*67ecd4f3SMax Laier base->event_count_active--;
800*67ecd4f3SMax Laier TAILQ_REMOVE(base->activequeues[ev->ev_pri],
801*67ecd4f3SMax Laier ev, ev_active_next);
802*67ecd4f3SMax Laier break;
803*67ecd4f3SMax Laier case EVLIST_SIGNAL:
804*67ecd4f3SMax Laier TAILQ_REMOVE(&signalqueue, ev, ev_signal_next);
805*67ecd4f3SMax Laier break;
806*67ecd4f3SMax Laier case EVLIST_TIMEOUT:
807*67ecd4f3SMax Laier RB_REMOVE(event_tree, &base->timetree, ev);
808*67ecd4f3SMax Laier break;
809*67ecd4f3SMax Laier case EVLIST_INSERTED:
810*67ecd4f3SMax Laier TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
811*67ecd4f3SMax Laier break;
812*67ecd4f3SMax Laier default:
813*67ecd4f3SMax Laier event_errx(1, "%s: unknown queue %x", __func__, queue);
814*67ecd4f3SMax Laier }
815*67ecd4f3SMax Laier }
816*67ecd4f3SMax Laier
817*67ecd4f3SMax Laier void
event_queue_insert(struct event_base * base,struct event * ev,int queue)818*67ecd4f3SMax Laier event_queue_insert(struct event_base *base, struct event *ev, int queue)
819*67ecd4f3SMax Laier {
820*67ecd4f3SMax Laier int docount = 1;
821*67ecd4f3SMax Laier
822*67ecd4f3SMax Laier if (ev->ev_flags & queue) {
823*67ecd4f3SMax Laier /* Double insertion is possible for active events */
824*67ecd4f3SMax Laier if (queue & EVLIST_ACTIVE)
825*67ecd4f3SMax Laier return;
826*67ecd4f3SMax Laier
827*67ecd4f3SMax Laier event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
828*67ecd4f3SMax Laier ev, ev->ev_fd, queue);
829*67ecd4f3SMax Laier }
830*67ecd4f3SMax Laier
831*67ecd4f3SMax Laier if (ev->ev_flags & EVLIST_INTERNAL)
832*67ecd4f3SMax Laier docount = 0;
833*67ecd4f3SMax Laier
834*67ecd4f3SMax Laier if (docount)
835*67ecd4f3SMax Laier base->event_count++;
836*67ecd4f3SMax Laier
837*67ecd4f3SMax Laier ev->ev_flags |= queue;
838*67ecd4f3SMax Laier switch (queue) {
839*67ecd4f3SMax Laier case EVLIST_ACTIVE:
840*67ecd4f3SMax Laier if (docount)
841*67ecd4f3SMax Laier base->event_count_active++;
842*67ecd4f3SMax Laier TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri],
843*67ecd4f3SMax Laier ev,ev_active_next);
844*67ecd4f3SMax Laier break;
845*67ecd4f3SMax Laier case EVLIST_SIGNAL:
846*67ecd4f3SMax Laier TAILQ_INSERT_TAIL(&signalqueue, ev, ev_signal_next);
847*67ecd4f3SMax Laier break;
848*67ecd4f3SMax Laier case EVLIST_TIMEOUT: {
849*67ecd4f3SMax Laier struct event *tmp = RB_INSERT(event_tree, &base->timetree, ev);
850*67ecd4f3SMax Laier assert(tmp == NULL);
851*67ecd4f3SMax Laier break;
852*67ecd4f3SMax Laier }
853*67ecd4f3SMax Laier case EVLIST_INSERTED:
854*67ecd4f3SMax Laier TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
855*67ecd4f3SMax Laier break;
856*67ecd4f3SMax Laier default:
857*67ecd4f3SMax Laier event_errx(1, "%s: unknown queue %x", __func__, queue);
858*67ecd4f3SMax Laier }
859*67ecd4f3SMax Laier }
860*67ecd4f3SMax Laier
861*67ecd4f3SMax Laier /* Functions for debugging */
862*67ecd4f3SMax Laier
863*67ecd4f3SMax Laier const char *
event_get_version(void)864*67ecd4f3SMax Laier event_get_version(void)
865*67ecd4f3SMax Laier {
866*67ecd4f3SMax Laier return (VERSION);
867*67ecd4f3SMax Laier }
868*67ecd4f3SMax Laier
869*67ecd4f3SMax Laier /*
870*67ecd4f3SMax Laier * No thread-safe interface needed - the information should be the same
871*67ecd4f3SMax Laier * for all threads.
872*67ecd4f3SMax Laier */
873*67ecd4f3SMax Laier
874*67ecd4f3SMax Laier const char *
event_get_method(void)875*67ecd4f3SMax Laier event_get_method(void)
876*67ecd4f3SMax Laier {
877*67ecd4f3SMax Laier return (current_base->evsel->name);
878*67ecd4f3SMax Laier }
879