xref: /openbsd-src/lib/libevent/event.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: event.c,v 1.38 2015/01/06 23:11:23 bluhm Exp $	*/
2 
3 /*
4  * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/types.h>
31 #include <sys/socket.h>
32 #include <sys/time.h>
33 #include <sys/queue.h>
34 
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <unistd.h>
38 #include <errno.h>
39 #include <signal.h>
40 #include <string.h>
41 #include <assert.h>
42 #include <time.h>
43 #include <netdb.h>
44 #include <asr.h>
45 
46 #include "event.h"
47 #include "event-internal.h"
48 #include "log.h"
49 
50 extern const struct eventop selectops;
51 extern const struct eventop pollops;
52 extern const struct eventop kqops;
53 
54 /* In order of preference */
55 static const struct eventop *eventops[] = {
56 	&kqops,
57 	&pollops,
58 	&selectops,
59 	NULL
60 };
61 
62 /* Global state */
63 struct event_base *current_base = NULL;
64 extern struct event_base *evsignal_base;
65 static int use_monotonic;
66 
67 /* Handle signals - This is a deprecated interface */
68 int (*event_sigcb)(void);		/* Signal callback when gotsig is set */
69 volatile sig_atomic_t event_gotsig;	/* Set in signal handler */
70 
71 /* Prototypes */
72 static void	event_queue_insert(struct event_base *, struct event *, int);
73 static void	event_queue_remove(struct event_base *, struct event *, int);
74 static int	event_haveevents(struct event_base *);
75 
76 static void	event_process_active(struct event_base *);
77 
78 static int	timeout_next(struct event_base *, struct timeval **);
79 static void	timeout_process(struct event_base *);
80 static void	timeout_correct(struct event_base *, struct timeval *);
81 
82 static void
83 detect_monotonic(void)
84 {
85 	struct timespec	ts;
86 
87 	if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0)
88 		use_monotonic = 1;
89 }
90 
91 static int
92 gettime(struct event_base *base, struct timeval *tp)
93 {
94 	if (base->tv_cache.tv_sec) {
95 		*tp = base->tv_cache;
96 		return (0);
97 	}
98 
99 	if (use_monotonic) {
100 		struct timespec	ts;
101 
102 		if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
103 			return (-1);
104 
105 		tp->tv_sec = ts.tv_sec;
106 		tp->tv_usec = ts.tv_nsec / 1000;
107 		return (0);
108 	}
109 
110 	return (gettimeofday(tp, NULL));
111 }
112 
113 struct event_base *
114 event_init(void)
115 {
116 	struct event_base *base = event_base_new();
117 
118 	if (base != NULL)
119 		current_base = base;
120 
121 	return (base);
122 }
123 
124 struct event_base *
125 event_base_new(void)
126 {
127 	int i;
128 	struct event_base *base;
129 
130 	if ((base = calloc(1, sizeof(struct event_base))) == NULL)
131 		event_err(1, "%s: calloc", __func__);
132 
133 	event_sigcb = NULL;
134 	event_gotsig = 0;
135 
136 	detect_monotonic();
137 	gettime(base, &base->event_tv);
138 
139 	min_heap_ctor(&base->timeheap);
140 	TAILQ_INIT(&base->eventqueue);
141 	base->sig.ev_signal_pair[0] = -1;
142 	base->sig.ev_signal_pair[1] = -1;
143 
144 	base->evbase = NULL;
145 	for (i = 0; eventops[i] && !base->evbase; i++) {
146 		base->evsel = eventops[i];
147 
148 		base->evbase = base->evsel->init(base);
149 	}
150 
151 	if (base->evbase == NULL)
152 		event_errx(1, "%s: no event mechanism available", __func__);
153 
154 	if (!issetugid() && getenv("EVENT_SHOW_METHOD"))
155 		event_msgx("libevent using: %s", base->evsel->name);
156 
157 	/* allocate a single active event queue */
158 	event_base_priority_init(base, 1);
159 
160 	return (base);
161 }
162 
163 void
164 event_base_free(struct event_base *base)
165 {
166 	int i, n_deleted=0;
167 	struct event *ev;
168 
169 	if (base == NULL && current_base)
170 		base = current_base;
171 	if (base == current_base)
172 		current_base = NULL;
173 
174 	/* XXX(niels) - check for internal events first */
175 	assert(base);
176 	/* Delete all non-internal events. */
177 	for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) {
178 		struct event *next = TAILQ_NEXT(ev, ev_next);
179 		if (!(ev->ev_flags & EVLIST_INTERNAL)) {
180 			event_del(ev);
181 			++n_deleted;
182 		}
183 		ev = next;
184 	}
185 	while ((ev = min_heap_top(&base->timeheap)) != NULL) {
186 		event_del(ev);
187 		++n_deleted;
188 	}
189 
190 	for (i = 0; i < base->nactivequeues; ++i) {
191 		for (ev = TAILQ_FIRST(base->activequeues[i]); ev; ) {
192 			struct event *next = TAILQ_NEXT(ev, ev_active_next);
193 			if (!(ev->ev_flags & EVLIST_INTERNAL)) {
194 				event_del(ev);
195 				++n_deleted;
196 			}
197 			ev = next;
198 		}
199 	}
200 
201 	if (n_deleted)
202 		event_debug(("%s: %d events were still set in base",
203 			__func__, n_deleted));
204 
205 	if (base->evsel->dealloc != NULL)
206 		base->evsel->dealloc(base, base->evbase);
207 
208 	for (i = 0; i < base->nactivequeues; ++i)
209 		assert(TAILQ_EMPTY(base->activequeues[i]));
210 
211 	assert(min_heap_empty(&base->timeheap));
212 	min_heap_dtor(&base->timeheap);
213 
214 	for (i = 0; i < base->nactivequeues; ++i)
215 		free(base->activequeues[i]);
216 	free(base->activequeues);
217 
218 	assert(TAILQ_EMPTY(&base->eventqueue));
219 
220 	free(base);
221 }
222 
223 /* reinitialized the event base after a fork */
224 int
225 event_reinit(struct event_base *base)
226 {
227 	const struct eventop *evsel = base->evsel;
228 	void *evbase = base->evbase;
229 	int res = 0;
230 	struct event *ev;
231 
232 #if 0
233 	/* Right now, reinit always takes effect, since even if the
234 	   backend doesn't require it, the signal socketpair code does.
235 	*/
236 	/* check if this event mechanism requires reinit */
237 	if (!evsel->need_reinit)
238 		return (0);
239 #endif
240 
241 	/* prevent internal delete */
242 	if (base->sig.ev_signal_added) {
243 		/* we cannot call event_del here because the base has
244 		 * not been reinitialized yet. */
245 		event_queue_remove(base, &base->sig.ev_signal,
246 		    EVLIST_INSERTED);
247 		if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE)
248 			event_queue_remove(base, &base->sig.ev_signal,
249 			    EVLIST_ACTIVE);
250 		base->sig.ev_signal_added = 0;
251 	}
252 
253 	if (base->evsel->dealloc != NULL)
254 		base->evsel->dealloc(base, base->evbase);
255 	evbase = base->evbase = evsel->init(base);
256 	if (base->evbase == NULL)
257 		event_errx(1, "%s: could not reinitialize event mechanism",
258 		    __func__);
259 
260 	TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
261 		if (evsel->add(evbase, ev) == -1)
262 			res = -1;
263 	}
264 
265 	return (res);
266 }
267 
268 int
269 event_priority_init(int npriorities)
270 {
271   return event_base_priority_init(current_base, npriorities);
272 }
273 
274 int
275 event_base_priority_init(struct event_base *base, int npriorities)
276 {
277 	int i;
278 
279 	if (base->event_count_active)
280 		return (-1);
281 
282 	if (npriorities == base->nactivequeues)
283 		return (0);
284 
285 	if (base->nactivequeues) {
286 		for (i = 0; i < base->nactivequeues; ++i) {
287 			free(base->activequeues[i]);
288 		}
289 		free(base->activequeues);
290 	}
291 
292 	/* Allocate our priority queues */
293 	base->nactivequeues = npriorities;
294 	base->activequeues = (struct event_list **)
295 	    calloc(base->nactivequeues, sizeof(struct event_list *));
296 	if (base->activequeues == NULL)
297 		event_err(1, "%s: calloc", __func__);
298 
299 	for (i = 0; i < base->nactivequeues; ++i) {
300 		base->activequeues[i] = malloc(sizeof(struct event_list));
301 		if (base->activequeues[i] == NULL)
302 			event_err(1, "%s: malloc", __func__);
303 		TAILQ_INIT(base->activequeues[i]);
304 	}
305 
306 	return (0);
307 }
308 
309 int
310 event_haveevents(struct event_base *base)
311 {
312 	return (base->event_count > 0);
313 }
314 
315 /*
316  * Active events are stored in priority queues.  Lower priorities are always
317  * process before higher priorities.  Low priority events can starve high
318  * priority ones.
319  */
320 
321 static void
322 event_process_active(struct event_base *base)
323 {
324 	struct event *ev;
325 	struct event_list *activeq = NULL;
326 	int i;
327 	short ncalls;
328 
329 	for (i = 0; i < base->nactivequeues; ++i) {
330 		if (TAILQ_FIRST(base->activequeues[i]) != NULL) {
331 			activeq = base->activequeues[i];
332 			break;
333 		}
334 	}
335 
336 	assert(activeq != NULL);
337 
338 	for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
339 		if (ev->ev_events & EV_PERSIST)
340 			event_queue_remove(base, ev, EVLIST_ACTIVE);
341 		else
342 			event_del(ev);
343 
344 		/* Allows deletes to work */
345 		ncalls = ev->ev_ncalls;
346 		ev->ev_pncalls = &ncalls;
347 		while (ncalls) {
348 			ncalls--;
349 			ev->ev_ncalls = ncalls;
350 			(*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
351 			if (event_gotsig || base->event_break)
352 				return;
353 		}
354 	}
355 }
356 
357 /*
358  * Wait continously for events.  We exit only if no events are left.
359  */
360 
361 int
362 event_dispatch(void)
363 {
364 	return (event_loop(0));
365 }
366 
367 int
368 event_base_dispatch(struct event_base *event_base)
369 {
370   return (event_base_loop(event_base, 0));
371 }
372 
373 const char *
374 event_base_get_method(struct event_base *base)
375 {
376 	assert(base);
377 	return (base->evsel->name);
378 }
379 
380 static void
381 event_loopexit_cb(int fd, short what, void *arg)
382 {
383 	struct event_base *base = arg;
384 	base->event_gotterm = 1;
385 }
386 
387 /* not thread safe */
388 int
389 event_loopexit(const struct timeval *tv)
390 {
391 	return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
392 		    current_base, tv));
393 }
394 
395 int
396 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
397 {
398 	return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
399 		    event_base, tv));
400 }
401 
402 /* not thread safe */
403 int
404 event_loopbreak(void)
405 {
406 	return (event_base_loopbreak(current_base));
407 }
408 
409 int
410 event_base_loopbreak(struct event_base *event_base)
411 {
412 	if (event_base == NULL)
413 		return (-1);
414 
415 	event_base->event_break = 1;
416 	return (0);
417 }
418 
419 
420 
421 /* not thread safe */
422 
423 int
424 event_loop(int flags)
425 {
426 	return event_base_loop(current_base, flags);
427 }
428 
429 int
430 event_base_loop(struct event_base *base, int flags)
431 {
432 	const struct eventop *evsel = base->evsel;
433 	void *evbase = base->evbase;
434 	struct timeval tv;
435 	struct timeval *tv_p;
436 	int res, done;
437 
438 	/* clear time cache */
439 	base->tv_cache.tv_sec = 0;
440 
441 	if (base->sig.ev_signal_added)
442 		evsignal_base = base;
443 	done = 0;
444 	while (!done) {
445 		/* Terminate the loop if we have been asked to */
446 		if (base->event_gotterm) {
447 			base->event_gotterm = 0;
448 			break;
449 		}
450 
451 		if (base->event_break) {
452 			base->event_break = 0;
453 			break;
454 		}
455 
456 		/* You cannot use this interface for multi-threaded apps */
457 		while (event_gotsig) {
458 			event_gotsig = 0;
459 			if (event_sigcb) {
460 				res = (*event_sigcb)();
461 				if (res == -1) {
462 					errno = EINTR;
463 					return (-1);
464 				}
465 			}
466 		}
467 
468 		timeout_correct(base, &tv);
469 
470 		tv_p = &tv;
471 		if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK)) {
472 			timeout_next(base, &tv_p);
473 		} else {
474 			/*
475 			 * if we have active events, we just poll new events
476 			 * without waiting.
477 			 */
478 			timerclear(&tv);
479 		}
480 
481 		/* If we have no events, we just exit */
482 		if (!event_haveevents(base)) {
483 			event_debug(("%s: no events registered.", __func__));
484 			return (1);
485 		}
486 
487 		/* update last old time */
488 		gettime(base, &base->event_tv);
489 
490 		/* clear time cache */
491 		base->tv_cache.tv_sec = 0;
492 
493 		res = evsel->dispatch(base, evbase, tv_p);
494 
495 		if (res == -1)
496 			return (-1);
497 		gettime(base, &base->tv_cache);
498 
499 		timeout_process(base);
500 
501 		if (base->event_count_active) {
502 			event_process_active(base);
503 			if (!base->event_count_active && (flags & EVLOOP_ONCE))
504 				done = 1;
505 		} else if (flags & EVLOOP_NONBLOCK)
506 			done = 1;
507 	}
508 
509 	/* clear time cache */
510 	base->tv_cache.tv_sec = 0;
511 
512 	event_debug(("%s: asked to terminate loop.", __func__));
513 	return (0);
514 }
515 
516 /* Sets up an event for processing once */
517 
518 struct event_once {
519 	struct event ev;
520 
521 	void (*cb)(int, short, void *);
522 	void *arg;
523 };
524 
525 /* One-time callback, it deletes itself */
526 
527 static void
528 event_once_cb(int fd, short events, void *arg)
529 {
530 	struct event_once *eonce = arg;
531 
532 	(*eonce->cb)(fd, events, eonce->arg);
533 	free(eonce);
534 }
535 
536 /* not threadsafe, event scheduled once. */
537 int
538 event_once(int fd, short events,
539     void (*callback)(int, short, void *), void *arg, const struct timeval *tv)
540 {
541 	return event_base_once(current_base, fd, events, callback, arg, tv);
542 }
543 
544 /* Schedules an event once */
545 int
546 event_base_once(struct event_base *base, int fd, short events,
547     void (*callback)(int, short, void *), void *arg, const struct timeval *tv)
548 {
549 	struct event_once *eonce;
550 	struct timeval etv;
551 	int res;
552 
553 	/* We cannot support signals that just fire once */
554 	if (events & EV_SIGNAL)
555 		return (-1);
556 
557 	if ((eonce = calloc(1, sizeof(struct event_once))) == NULL)
558 		return (-1);
559 
560 	eonce->cb = callback;
561 	eonce->arg = arg;
562 
563 	if (events == EV_TIMEOUT) {
564 		if (tv == NULL) {
565 			timerclear(&etv);
566 			tv = &etv;
567 		}
568 
569 		evtimer_set(&eonce->ev, event_once_cb, eonce);
570 	} else if (events & (EV_READ|EV_WRITE)) {
571 		events &= EV_READ|EV_WRITE;
572 
573 		event_set(&eonce->ev, fd, events, event_once_cb, eonce);
574 	} else {
575 		/* Bad event combination */
576 		free(eonce);
577 		return (-1);
578 	}
579 
580 	res = event_base_set(base, &eonce->ev);
581 	if (res == 0)
582 		res = event_add(&eonce->ev, tv);
583 	if (res != 0) {
584 		free(eonce);
585 		return (res);
586 	}
587 
588 	return (0);
589 }
590 
591 void
592 event_set(struct event *ev, int fd, short events,
593 	  void (*callback)(int, short, void *), void *arg)
594 {
595 	/* Take the current base - caller needs to set the real base later */
596 	ev->ev_base = current_base;
597 
598 	ev->ev_callback = callback;
599 	ev->ev_arg = arg;
600 	ev->ev_fd = fd;
601 	ev->ev_events = events;
602 	ev->ev_res = 0;
603 	ev->ev_flags = EVLIST_INIT;
604 	ev->ev_ncalls = 0;
605 	ev->ev_pncalls = NULL;
606 
607 	min_heap_elem_init(ev);
608 
609 	/* by default, we put new events into the middle priority */
610 	if(current_base)
611 		ev->ev_pri = current_base->nactivequeues/2;
612 }
613 
614 int
615 event_base_set(struct event_base *base, struct event *ev)
616 {
617 	/* Only innocent events may be assigned to a different base */
618 	if (ev->ev_flags != EVLIST_INIT)
619 		return (-1);
620 
621 	ev->ev_base = base;
622 	ev->ev_pri = base->nactivequeues/2;
623 
624 	return (0);
625 }
626 
627 /*
628  * Set's the priority of an event - if an event is already scheduled
629  * changing the priority is going to fail.
630  */
631 
632 int
633 event_priority_set(struct event *ev, int pri)
634 {
635 	if (ev->ev_flags & EVLIST_ACTIVE)
636 		return (-1);
637 	if (pri < 0 || pri >= ev->ev_base->nactivequeues)
638 		return (-1);
639 
640 	ev->ev_pri = pri;
641 
642 	return (0);
643 }
644 
645 /*
646  * Checks if a specific event is pending or scheduled.
647  */
648 
649 int
650 event_pending(struct event *ev, short event, struct timeval *tv)
651 {
652 	struct timeval	now, res;
653 	int flags = 0;
654 
655 	if (ev->ev_flags & EVLIST_INSERTED)
656 		flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
657 	if (ev->ev_flags & EVLIST_ACTIVE)
658 		flags |= ev->ev_res;
659 	if (ev->ev_flags & EVLIST_TIMEOUT)
660 		flags |= EV_TIMEOUT;
661 
662 	event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
663 
664 	/* See if there is a timeout that we should report */
665 	if (tv != NULL && (flags & event & EV_TIMEOUT)) {
666 		gettime(ev->ev_base, &now);
667 		timersub(&ev->ev_timeout, &now, &res);
668 		/* correctly remap to real time */
669 		gettimeofday(&now, NULL);
670 		timeradd(&now, &res, tv);
671 	}
672 
673 	return (flags & event);
674 }
675 
676 int
677 event_add(struct event *ev, const struct timeval *tv)
678 {
679 	struct event_base *base = ev->ev_base;
680 	const struct eventop *evsel = base->evsel;
681 	void *evbase = base->evbase;
682 	int res = 0;
683 
684 	event_debug((
685 		 "event_add: event: %p, %s%s%scall %p",
686 		 ev,
687 		 ev->ev_events & EV_READ ? "EV_READ " : " ",
688 		 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
689 		 tv ? "EV_TIMEOUT " : " ",
690 		 ev->ev_callback));
691 
692 	assert(!(ev->ev_flags & ~EVLIST_ALL));
693 
694 	/*
695 	 * prepare for timeout insertion further below, if we get a
696 	 * failure on any step, we should not change any state.
697 	 */
698 	if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
699 		if (min_heap_reserve(&base->timeheap,
700 			1 + min_heap_size(&base->timeheap)) == -1)
701 			return (-1);  /* ENOMEM == errno */
702 	}
703 
704 	if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
705 	    !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
706 		res = evsel->add(evbase, ev);
707 		if (res != -1)
708 			event_queue_insert(base, ev, EVLIST_INSERTED);
709 	}
710 
711 	/*
712 	 * we should change the timout state only if the previous event
713 	 * addition succeeded.
714 	 */
715 	if (res != -1 && tv != NULL) {
716 		struct timeval now;
717 
718 		/*
719 		 * we already reserved memory above for the case where we
720 		 * are not replacing an exisiting timeout.
721 		 */
722 		if (ev->ev_flags & EVLIST_TIMEOUT)
723 			event_queue_remove(base, ev, EVLIST_TIMEOUT);
724 
725 		/* Check if it is active due to a timeout.  Rescheduling
726 		 * this timeout before the callback can be executed
727 		 * removes it from the active list. */
728 		if ((ev->ev_flags & EVLIST_ACTIVE) &&
729 		    (ev->ev_res & EV_TIMEOUT)) {
730 			/* See if we are just active executing this
731 			 * event in a loop
732 			 */
733 			if (ev->ev_ncalls && ev->ev_pncalls) {
734 				/* Abort loop */
735 				*ev->ev_pncalls = 0;
736 			}
737 
738 			event_queue_remove(base, ev, EVLIST_ACTIVE);
739 		}
740 
741 		gettime(base, &now);
742 		timeradd(&now, tv, &ev->ev_timeout);
743 
744 		event_debug((
745 			 "event_add: timeout in %lld seconds, call %p",
746 			 (long long)tv->tv_sec, ev->ev_callback));
747 
748 		event_queue_insert(base, ev, EVLIST_TIMEOUT);
749 	}
750 
751 	return (res);
752 }
753 
754 int
755 event_del(struct event *ev)
756 {
757 	struct event_base *base;
758 	const struct eventop *evsel;
759 	void *evbase;
760 
761 	event_debug(("event_del: %p, callback %p",
762 		 ev, ev->ev_callback));
763 
764 	/* An event without a base has not been added */
765 	if (ev->ev_base == NULL)
766 		return (-1);
767 
768 	base = ev->ev_base;
769 	evsel = base->evsel;
770 	evbase = base->evbase;
771 
772 	assert(!(ev->ev_flags & ~EVLIST_ALL));
773 
774 	/* See if we are just active executing this event in a loop */
775 	if (ev->ev_ncalls && ev->ev_pncalls) {
776 		/* Abort loop */
777 		*ev->ev_pncalls = 0;
778 	}
779 
780 	if (ev->ev_flags & EVLIST_TIMEOUT)
781 		event_queue_remove(base, ev, EVLIST_TIMEOUT);
782 
783 	if (ev->ev_flags & EVLIST_ACTIVE)
784 		event_queue_remove(base, ev, EVLIST_ACTIVE);
785 
786 	if (ev->ev_flags & EVLIST_INSERTED) {
787 		event_queue_remove(base, ev, EVLIST_INSERTED);
788 		return (evsel->del(evbase, ev));
789 	}
790 
791 	return (0);
792 }
793 
794 void
795 event_active(struct event *ev, int res, short ncalls)
796 {
797 	/* We get different kinds of events, add them together */
798 	if (ev->ev_flags & EVLIST_ACTIVE) {
799 		ev->ev_res |= res;
800 		return;
801 	}
802 
803 	ev->ev_res = res;
804 	ev->ev_ncalls = ncalls;
805 	ev->ev_pncalls = NULL;
806 	event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE);
807 }
808 
809 static int
810 timeout_next(struct event_base *base, struct timeval **tv_p)
811 {
812 	struct timeval now;
813 	struct event *ev;
814 	struct timeval *tv = *tv_p;
815 
816 	if ((ev = min_heap_top(&base->timeheap)) == NULL) {
817 		/* if no time-based events are active wait for I/O */
818 		*tv_p = NULL;
819 		return (0);
820 	}
821 
822 	if (gettime(base, &now) == -1)
823 		return (-1);
824 
825 	if (timercmp(&ev->ev_timeout, &now, <=)) {
826 		timerclear(tv);
827 		return (0);
828 	}
829 
830 	timersub(&ev->ev_timeout, &now, tv);
831 
832 	assert(tv->tv_sec >= 0);
833 	assert(tv->tv_usec >= 0);
834 
835 	event_debug(("timeout_next: in %lld seconds", (long long)tv->tv_sec));
836 	return (0);
837 }
838 
839 /*
840  * Determines if the time is running backwards by comparing the current
841  * time against the last time we checked.  Not needed when using clock
842  * monotonic.
843  */
844 
845 static void
846 timeout_correct(struct event_base *base, struct timeval *tv)
847 {
848 	struct event **pev;
849 	unsigned int size;
850 	struct timeval off;
851 
852 	if (use_monotonic)
853 		return;
854 
855 	/* Check if time is running backwards */
856 	gettime(base, tv);
857 	if (timercmp(tv, &base->event_tv, >=)) {
858 		base->event_tv = *tv;
859 		return;
860 	}
861 
862 	event_debug(("%s: time is running backwards, corrected",
863 		    __func__));
864 	timersub(&base->event_tv, tv, &off);
865 
866 	/*
867 	 * We can modify the key element of the node without destroying
868 	 * the key, beause we apply it to all in the right order.
869 	 */
870 	pev = base->timeheap.p;
871 	size = base->timeheap.n;
872 	for (; size-- > 0; ++pev) {
873 		struct timeval *ev_tv = &(**pev).ev_timeout;
874 		timersub(ev_tv, &off, ev_tv);
875 	}
876 	/* Now remember what the new time turned out to be. */
877 	base->event_tv = *tv;
878 }
879 
880 void
881 timeout_process(struct event_base *base)
882 {
883 	struct timeval now;
884 	struct event *ev;
885 
886 	if (min_heap_empty(&base->timeheap))
887 		return;
888 
889 	gettime(base, &now);
890 
891 	while ((ev = min_heap_top(&base->timeheap))) {
892 		if (timercmp(&ev->ev_timeout, &now, >))
893 			break;
894 
895 		/* delete this event from the I/O queues */
896 		event_del(ev);
897 
898 		event_debug(("timeout_process: call %p",
899 			 ev->ev_callback));
900 		event_active(ev, EV_TIMEOUT, 1);
901 	}
902 }
903 
904 void
905 event_queue_remove(struct event_base *base, struct event *ev, int queue)
906 {
907 	if (!(ev->ev_flags & queue))
908 		event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
909 			   ev, ev->ev_fd, queue);
910 
911 	if (~ev->ev_flags & EVLIST_INTERNAL)
912 		base->event_count--;
913 
914 	ev->ev_flags &= ~queue;
915 	switch (queue) {
916 	case EVLIST_INSERTED:
917 		TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
918 		break;
919 	case EVLIST_ACTIVE:
920 		base->event_count_active--;
921 		TAILQ_REMOVE(base->activequeues[ev->ev_pri],
922 		    ev, ev_active_next);
923 		break;
924 	case EVLIST_TIMEOUT:
925 		min_heap_erase(&base->timeheap, ev);
926 		break;
927 	default:
928 		event_errx(1, "%s: unknown queue %x", __func__, queue);
929 	}
930 }
931 
932 void
933 event_queue_insert(struct event_base *base, struct event *ev, int queue)
934 {
935 	if (ev->ev_flags & queue) {
936 		/* Double insertion is possible for active events */
937 		if (queue & EVLIST_ACTIVE)
938 			return;
939 
940 		event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
941 			   ev, ev->ev_fd, queue);
942 	}
943 
944 	if (~ev->ev_flags & EVLIST_INTERNAL)
945 		base->event_count++;
946 
947 	ev->ev_flags |= queue;
948 	switch (queue) {
949 	case EVLIST_INSERTED:
950 		TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
951 		break;
952 	case EVLIST_ACTIVE:
953 		base->event_count_active++;
954 		TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri],
955 		    ev,ev_active_next);
956 		break;
957 	case EVLIST_TIMEOUT: {
958 		min_heap_push(&base->timeheap, ev);
959 		break;
960 	}
961 	default:
962 		event_errx(1, "%s: unknown queue %x", __func__, queue);
963 	}
964 }
965 
966 /* Functions for debugging */
967 
968 const char *
969 event_get_version(void)
970 {
971 	return (_EVENT_VERSION);
972 }
973 
974 /*
975  * No thread-safe interface needed - the information should be the same
976  * for all threads.
977  */
978 
979 const char *
980 event_get_method(void)
981 {
982 	return (current_base->evsel->name);
983 }
984 
985 
986 /*
987  * Libevent glue for ASR.
988  */
989 struct event_asr {
990 	struct event	 ev;
991 	struct asr_query *async;
992 	void		(*cb)(struct asr_result *, void *);
993 	void		*arg;
994 };
995 
996 static void
997 event_asr_dispatch(int fd __attribute__((__unused__)),
998     short ev __attribute__((__unused__)), void *arg)
999 {
1000 	struct event_asr	*eva = arg;
1001 	struct asr_result	 ar;
1002 	struct timeval		 tv;
1003 
1004 	event_del(&eva->ev);
1005 
1006 	if (asr_run(eva->async, &ar)) {
1007 		eva->cb(&ar, eva->arg);
1008 		free(eva);
1009 	} else {
1010 		event_set(&eva->ev, ar.ar_fd,
1011 		    ar.ar_cond == ASR_WANT_READ ? EV_READ : EV_WRITE,
1012 		    event_asr_dispatch, eva);
1013 		tv.tv_sec = ar.ar_timeout / 1000;
1014 		tv.tv_usec = (ar.ar_timeout % 1000) * 1000;
1015 		event_add(&eva->ev, &tv);
1016 	}
1017 }
1018 
1019 struct event_asr *
1020 event_asr_run(struct asr_query *async, void (*cb)(struct asr_result *, void *),
1021     void *arg)
1022 {
1023 	struct event_asr *eva;
1024 	struct timeval tv;
1025 
1026 	eva = calloc(1, sizeof *eva);
1027 	if (eva == NULL)
1028 		return (NULL);
1029 	eva->async = async;
1030 	eva->cb = cb;
1031 	eva->arg = arg;
1032 	tv.tv_sec = 0;
1033 	tv.tv_usec = 0;
1034 	evtimer_set(&eva->ev, event_asr_dispatch, eva);
1035 	evtimer_add(&eva->ev, &tv);
1036 	return (eva);
1037 }
1038 
1039 void
1040 event_asr_abort(struct event_asr *eva)
1041 {
1042 	asr_abort(eva->async);
1043 	event_del(&eva->ev);
1044 	free(eva);
1045 }
1046