xref: /netbsd-src/external/bsd/ntp/dist/sntp/libevent/evmap.c (revision 413d532bcc3f62d122e56d92e13ac64825a40baf)
1 /*	$NetBSD: evmap.c,v 1.1.1.1 2013/12/27 23:31:15 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 #include "event2/event-config.h"
29 #include "evconfig-private.h"
30 
31 #ifdef _WIN32
32 #include <winsock2.h>
33 #define WIN32_LEAN_AND_MEAN
34 #include <windows.h>
35 #undef WIN32_LEAN_AND_MEAN
36 #endif
37 #include <sys/types.h>
38 #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
39 #include <sys/time.h>
40 #endif
41 #include <sys/queue.h>
42 #include <stdio.h>
43 #include <stdlib.h>
44 #ifndef _WIN32
45 #include <unistd.h>
46 #endif
47 #include <errno.h>
48 #include <signal.h>
49 #include <string.h>
50 #include <time.h>
51 
52 #include "event-internal.h"
53 #include "evmap-internal.h"
54 #include "mm-internal.h"
55 #include "changelist-internal.h"
56 
57 /** An entry for an evmap_io list: notes all the events that want to read or
58 	write on a given fd, and the number of each.
59   */
60 struct evmap_io {
61 	struct event_dlist events;
62 	ev_uint16_t nread;
63 	ev_uint16_t nwrite;
64 };
65 
66 /* An entry for an evmap_signal list: notes all the events that want to know
67    when a signal triggers. */
68 struct evmap_signal {
69 	struct event_dlist events;
70 };
71 
72 /* On some platforms, fds start at 0 and increment by 1 as they are
73    allocated, and old numbers get used.  For these platforms, we
74    implement io maps just like signal maps: as an array of pointers to
75    struct evmap_io.  But on other platforms (windows), sockets are not
76    0-indexed, not necessarily consecutive, and not necessarily reused.
77    There, we use a hashtable to implement evmap_io.
78 */
79 #ifdef EVMAP_USE_HT
80 struct event_map_entry {
81 	HT_ENTRY(event_map_entry) map_node;
82 	evutil_socket_t fd;
83 	union { /* This is a union in case we need to make more things that can
84 			   be in the hashtable. */
85 		struct evmap_io evmap_io;
86 	} ent;
87 };
88 
89 /* Helper used by the event_io_map hashtable code; tries to return a good hash
90  * of the fd in e->fd. */
91 static inline unsigned
92 hashsocket(struct event_map_entry *e)
93 {
94 	/* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
95 	 * matter.  Our hashtable implementation really likes low-order bits,
96 	 * though, so let's do the rotate-and-add trick. */
97 	unsigned h = (unsigned) e->fd;
98 	h += (h >> 2) | (h << 30);
99 	return h;
100 }
101 
102 /* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
103  * have the same e->fd. */
104 static inline int
105 eqsocket(struct event_map_entry *e1, struct event_map_entry *e2)
106 {
107 	return e1->fd == e2->fd;
108 }
109 
110 HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket)
111 HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket,
112 			0.5, mm_malloc, mm_realloc, mm_free)
113 
114 #define GET_IO_SLOT(x, map, slot, type)					\
115 	do {								\
116 		struct event_map_entry key_, *ent_;			\
117 		key_.fd = slot;						\
118 		ent_ = HT_FIND(event_io_map, map, &key_);		\
119 		(x) = ent_ ? &ent_->ent.type : NULL;			\
120 	} while (0);
121 
122 #define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len)	\
123 	do {								\
124 		struct event_map_entry key_, *ent_;			\
125 		key_.fd = slot;						\
126 		HT_FIND_OR_INSERT_(event_io_map, map_node, hashsocket, map, \
127 		    event_map_entry, &key_, ptr,			\
128 		    {							\
129 			    ent_ = *ptr;				\
130 		    },							\
131 		    {							\
132 			    ent_ = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
133 			    if (EVUTIL_UNLIKELY(ent_ == NULL))		\
134 				    return (-1);			\
135 			    ent_->fd = slot;				\
136 			    (ctor)(&ent_->ent.type);			\
137 			    HT_FOI_INSERT_(map_node, map, &key_, ent_, ptr) \
138 				});					\
139 		(x) = &ent_->ent.type;					\
140 	} while (0)
141 
142 void evmap_io_initmap_(struct event_io_map *ctx)
143 {
144 	HT_INIT(event_io_map, ctx);
145 }
146 
147 void evmap_io_clear_(struct event_io_map *ctx)
148 {
149 	struct event_map_entry **ent, **next, *this;
150 	for (ent = HT_START(event_io_map, ctx); ent; ent = next) {
151 		this = *ent;
152 		next = HT_NEXT_RMV(event_io_map, ctx, ent);
153 		mm_free(this);
154 	}
155 	HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */
156 }
157 #endif
158 
159 /* Set the variable 'x' to the field in event_map 'map' with fields of type
160    'struct type *' corresponding to the fd or signal 'slot'.  Set 'x' to NULL
161    if there are no entries for 'slot'.  Does no bounds-checking. */
162 #define GET_SIGNAL_SLOT(x, map, slot, type)			\
163 	(x) = (struct type *)((map)->entries[slot])
164 /* As GET_SLOT, but construct the entry for 'slot' if it is not present,
165    by allocating enough memory for a 'struct type', and initializing the new
166    value by calling the function 'ctor' on it.  Makes the function
167    return -1 on allocation failure.
168  */
169 #define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len)	\
170 	do {								\
171 		if ((map)->entries[slot] == NULL) {			\
172 			(map)->entries[slot] =				\
173 			    mm_calloc(1,sizeof(struct type)+fdinfo_len); \
174 			if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \
175 				return (-1);				\
176 			(ctor)((struct type *)(map)->entries[slot]);	\
177 		}							\
178 		(x) = (struct type *)((map)->entries[slot]);		\
179 	} while (0)
180 
181 /* If we aren't using hashtables, then define the IO_SLOT macros and functions
182    as thin aliases over the SIGNAL_SLOT versions. */
183 #ifndef EVMAP_USE_HT
184 #define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
185 #define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)	\
186 	GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
187 #define FDINFO_OFFSET sizeof(struct evmap_io)
188 void
189 evmap_io_initmap_(struct event_io_map* ctx)
190 {
191 	evmap_signal_initmap_(ctx);
192 }
193 void
194 evmap_io_clear_(struct event_io_map* ctx)
195 {
196 	evmap_signal_clear_(ctx);
197 }
198 #endif
199 
200 
201 /** Expand 'map' with new entries of width 'msize' until it is big enough
202 	to store a value in 'slot'.
203  */
204 static int
205 evmap_make_space(struct event_signal_map *map, int slot, int msize)
206 {
207 	if (map->nentries <= slot) {
208 		int nentries = map->nentries ? map->nentries : 32;
209 		void **tmp;
210 
211 		while (nentries <= slot)
212 			nentries <<= 1;
213 
214 		tmp = (void **)mm_realloc(map->entries, nentries * msize);
215 		if (tmp == NULL)
216 			return (-1);
217 
218 		memset(&tmp[map->nentries], 0,
219 		    (nentries - map->nentries) * msize);
220 
221 		map->nentries = nentries;
222 		map->entries = tmp;
223 	}
224 
225 	return (0);
226 }
227 
228 void
229 evmap_signal_initmap_(struct event_signal_map *ctx)
230 {
231 	ctx->nentries = 0;
232 	ctx->entries = NULL;
233 }
234 
235 void
236 evmap_signal_clear_(struct event_signal_map *ctx)
237 {
238 	if (ctx->entries != NULL) {
239 		int i;
240 		for (i = 0; i < ctx->nentries; ++i) {
241 			if (ctx->entries[i] != NULL)
242 				mm_free(ctx->entries[i]);
243 		}
244 		mm_free(ctx->entries);
245 		ctx->entries = NULL;
246 	}
247 	ctx->nentries = 0;
248 }
249 
250 
251 /* code specific to file descriptors */
252 
253 /** Constructor for struct evmap_io */
254 static void
255 evmap_io_init(struct evmap_io *entry)
256 {
257 	LIST_INIT(&entry->events);
258 	entry->nread = 0;
259 	entry->nwrite = 0;
260 }
261 
262 
263 /* return -1 on error, 0 on success if nothing changed in the event backend,
264  * and 1 on success if something did. */
265 int
266 evmap_io_add_(struct event_base *base, evutil_socket_t fd, struct event *ev)
267 {
268 	const struct eventop *evsel = base->evsel;
269 	struct event_io_map *io = &base->io;
270 	struct evmap_io *ctx = NULL;
271 	int nread, nwrite, retval = 0;
272 	short res = 0, old = 0;
273 	struct event *old_ev;
274 
275 	EVUTIL_ASSERT(fd == ev->ev_fd);
276 
277 	if (fd < 0)
278 		return 0;
279 
280 #ifndef EVMAP_USE_HT
281 	if (fd >= io->nentries) {
282 		if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1)
283 			return (-1);
284 	}
285 #endif
286 	GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init,
287 						 evsel->fdinfo_len);
288 
289 	nread = ctx->nread;
290 	nwrite = ctx->nwrite;
291 
292 	if (nread)
293 		old |= EV_READ;
294 	if (nwrite)
295 		old |= EV_WRITE;
296 
297 	if (ev->ev_events & EV_READ) {
298 		if (++nread == 1)
299 			res |= EV_READ;
300 	}
301 	if (ev->ev_events & EV_WRITE) {
302 		if (++nwrite == 1)
303 			res |= EV_WRITE;
304 	}
305 	if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff)) {
306 		event_warnx("Too many events reading or writing on fd %d",
307 		    (int)fd);
308 		return -1;
309 	}
310 	if (EVENT_DEBUG_MODE_IS_ON() &&
311 	    (old_ev = LIST_FIRST(&ctx->events)) &&
312 	    (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) {
313 		event_warnx("Tried to mix edge-triggered and non-edge-triggered"
314 		    " events on fd %d", (int)fd);
315 		return -1;
316 	}
317 
318 	if (res) {
319 		void *extra = ((char*)ctx) + sizeof(struct evmap_io);
320 		/* XXX(niels): we cannot mix edge-triggered and
321 		 * level-triggered, we should probably assert on
322 		 * this. */
323 		if (evsel->add(base, ev->ev_fd,
324 			old, (ev->ev_events & EV_ET) | res, extra) == -1)
325 			return (-1);
326 		retval = 1;
327 	}
328 
329 	ctx->nread = (ev_uint16_t) nread;
330 	ctx->nwrite = (ev_uint16_t) nwrite;
331 	LIST_INSERT_HEAD(&ctx->events, ev, ev_io_next);
332 
333 	return (retval);
334 }
335 
336 /* return -1 on error, 0 on success if nothing changed in the event backend,
337  * and 1 on success if something did. */
338 int
339 evmap_io_del_(struct event_base *base, evutil_socket_t fd, struct event *ev)
340 {
341 	const struct eventop *evsel = base->evsel;
342 	struct event_io_map *io = &base->io;
343 	struct evmap_io *ctx;
344 	int nread, nwrite, retval = 0;
345 	short res = 0, old = 0;
346 
347 	if (fd < 0)
348 		return 0;
349 
350 	EVUTIL_ASSERT(fd == ev->ev_fd);
351 
352 #ifndef EVMAP_USE_HT
353 	if (fd >= io->nentries)
354 		return (-1);
355 #endif
356 
357 	GET_IO_SLOT(ctx, io, fd, evmap_io);
358 
359 	nread = ctx->nread;
360 	nwrite = ctx->nwrite;
361 
362 	if (nread)
363 		old |= EV_READ;
364 	if (nwrite)
365 		old |= EV_WRITE;
366 
367 	if (ev->ev_events & EV_READ) {
368 		if (--nread == 0)
369 			res |= EV_READ;
370 		EVUTIL_ASSERT(nread >= 0);
371 	}
372 	if (ev->ev_events & EV_WRITE) {
373 		if (--nwrite == 0)
374 			res |= EV_WRITE;
375 		EVUTIL_ASSERT(nwrite >= 0);
376 	}
377 
378 	if (res) {
379 		void *extra = ((char*)ctx) + sizeof(struct evmap_io);
380 		if (evsel->del(base, ev->ev_fd, old, res, extra) == -1)
381 			return (-1);
382 		retval = 1;
383 	}
384 
385 	ctx->nread = nread;
386 	ctx->nwrite = nwrite;
387 	LIST_REMOVE(ev, ev_io_next);
388 
389 	return (retval);
390 }
391 
392 void
393 evmap_io_active_(struct event_base *base, evutil_socket_t fd, short events)
394 {
395 	struct event_io_map *io = &base->io;
396 	struct evmap_io *ctx;
397 	struct event *ev;
398 
399 #ifndef EVMAP_USE_HT
400 	EVUTIL_ASSERT(fd < io->nentries);
401 #endif
402 	GET_IO_SLOT(ctx, io, fd, evmap_io);
403 
404 	EVUTIL_ASSERT(ctx);
405 	LIST_FOREACH(ev, &ctx->events, ev_io_next) {
406 		if (ev->ev_events & events)
407 			event_active_nolock_(ev, ev->ev_events & events, 1);
408 	}
409 }
410 
411 /* code specific to signals */
412 
413 static void
414 evmap_signal_init(struct evmap_signal *entry)
415 {
416 	LIST_INIT(&entry->events);
417 }
418 
419 
420 int
421 evmap_signal_add_(struct event_base *base, int sig, struct event *ev)
422 {
423 	const struct eventop *evsel = base->evsigsel;
424 	struct event_signal_map *map = &base->sigmap;
425 	struct evmap_signal *ctx = NULL;
426 
427 	if (sig >= map->nentries) {
428 		if (evmap_make_space(
429 			map, sig, sizeof(struct evmap_signal *)) == -1)
430 			return (-1);
431 	}
432 	GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init,
433 	    base->evsigsel->fdinfo_len);
434 
435 	if (LIST_EMPTY(&ctx->events)) {
436 		if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL)
437 		    == -1)
438 			return (-1);
439 	}
440 
441 	LIST_INSERT_HEAD(&ctx->events, ev, ev_signal_next);
442 
443 	return (1);
444 }
445 
446 int
447 evmap_signal_del_(struct event_base *base, int sig, struct event *ev)
448 {
449 	const struct eventop *evsel = base->evsigsel;
450 	struct event_signal_map *map = &base->sigmap;
451 	struct evmap_signal *ctx;
452 
453 	if (sig >= map->nentries)
454 		return (-1);
455 
456 	GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
457 
458 	LIST_REMOVE(ev, ev_signal_next);
459 
460 	if (LIST_FIRST(&ctx->events) == NULL) {
461 		if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1)
462 			return (-1);
463 	}
464 
465 	return (1);
466 }
467 
468 void
469 evmap_signal_active_(struct event_base *base, evutil_socket_t sig, int ncalls)
470 {
471 	struct event_signal_map *map = &base->sigmap;
472 	struct evmap_signal *ctx;
473 	struct event *ev;
474 
475 	EVUTIL_ASSERT(sig < map->nentries);
476 	GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
477 
478 	LIST_FOREACH(ev, &ctx->events, ev_signal_next)
479 		event_active_nolock_(ev, EV_SIGNAL, ncalls);
480 }
481 
482 void *
483 evmap_io_get_fdinfo_(struct event_io_map *map, evutil_socket_t fd)
484 {
485 	struct evmap_io *ctx;
486 	GET_IO_SLOT(ctx, map, fd, evmap_io);
487 	if (ctx)
488 		return ((char*)ctx) + sizeof(struct evmap_io);
489 	else
490 		return NULL;
491 }
492 
493 /* Callback type for evmap_io_foreach_fd */
494 typedef int (*evmap_io_foreach_fd_cb)(
495 	struct event_base *, evutil_socket_t, struct evmap_io *, void *);
496 
497 /* Multipurpose helper function: Iterate over every file descriptor event_base
498  * for which we could have EV_READ or EV_WRITE events.  For each such fd, call
499  * fn(base, signum, evmap_io, arg), where fn is the user-provided
500  * function, base is the event_base, signum is the signal number, evmap_io
501  * is an evmap_io structure containing a list of events pending on the
502  * file descriptor, and arg is the user-supplied argument.
503  *
504  * If fn returns 0, continue on to the next signal. Otherwise, return the same
505  * value that fn returned.
506  *
507  * Note that there is no guarantee that the file descriptors will be processed
508  * in any particular order.
509  */
510 static int
511 evmap_io_foreach_fd(struct event_base *base,
512     evmap_io_foreach_fd_cb fn,
513     void *arg)
514 {
515 	evutil_socket_t fd;
516 	struct event_io_map *iomap = &base->io;
517 	int r = 0;
518 #ifdef EVMAP_USE_HT
519 	struct event_map_entry **mapent;
520 	HT_FOREACH(mapent, event_io_map, iomap) {
521 		struct evmap_io *ctx = &(*mapent)->ent.evmap_io;
522 		fd = (*mapent)->fd;
523 #else
524 	for (fd = 0; fd < iomap->nentries; ++fd) {
525 		struct evmap_io *ctx = iomap->entries[fd];
526 		if (!ctx)
527 			continue;
528 #endif
529 		if ((r = fn(base, fd, ctx, arg)))
530 			break;
531 	}
532 	return r;
533 }
534 
535 /* Callback type for evmap_signal_foreach_signal */
536 typedef int (*evmap_signal_foreach_signal_cb)(
537 	struct event_base *, int, struct evmap_signal *, void *);
538 
539 /* Multipurpose helper function: Iterate over every signal number in the
540  * event_base for which we could have signal events.  For each such signal,
541  * call fn(base, signum, evmap_signal, arg), where fn is the user-provided
542  * function, base is the event_base, signum is the signal number, evmap_signal
543  * is an evmap_signal structure containing a list of events pending on the
544  * signal, and arg is the user-supplied argument.
545  *
546  * If fn returns 0, continue on to the next signal. Otherwise, return the same
547  * value that fn returned.
548  */
549 static int
550 evmap_signal_foreach_signal(struct event_base *base,
551     evmap_signal_foreach_signal_cb fn,
552     void *arg)
553 {
554 	struct event_signal_map *sigmap = &base->sigmap;
555 	int r = 0;
556 	int signum;
557 
558 	for (signum = 0; signum < sigmap->nentries; ++signum) {
559 		struct evmap_signal *ctx = sigmap->entries[signum];
560 		if (!ctx)
561 			continue;
562 		if ((r = fn(base, signum, ctx, arg)))
563 			break;
564 	}
565 	return r;
566 }
567 
568 /* Helper for evmap_reinit_: tell the backend to add every fd for which we have
569  * pending events, with the appropriate combination of EV_READ, EV_WRITE, and
570  * EV_ET. */
571 static int
572 evmap_io_reinit_iter_fn(struct event_base *base, evutil_socket_t fd,
573     struct evmap_io *ctx, void *arg)
574 {
575 	const struct eventop *evsel = base->evsel;
576 	void *extra;
577 	int *result = arg;
578 	short events = 0;
579 	struct event *ev;
580 	EVUTIL_ASSERT(ctx);
581 
582 	extra = ((char*)ctx) + sizeof(struct evmap_io);
583 	if (ctx->nread)
584 		events |= EV_READ;
585 	if (ctx->nread)
586 		events |= EV_WRITE;
587 	if (evsel->fdinfo_len)
588 		memset(extra, 0, evsel->fdinfo_len);
589 	if (events &&
590 	    (ev = LIST_FIRST(&ctx->events)) &&
591 	    (ev->ev_events & EV_ET))
592 		events |= EV_ET;
593 	if (evsel->add(base, fd, 0, events, extra) == -1)
594 		*result = -1;
595 
596 	return 0;
597 }
598 
599 /* Helper for evmap_reinit_: tell the backend to add every signal for which we
600  * have pending events.  */
601 static int
602 evmap_signal_reinit_iter_fn(struct event_base *base,
603     int signum, struct evmap_signal *ctx, void *arg)
604 {
605 	const struct eventop *evsel = base->evsigsel;
606 	int *result = arg;
607 
608 	if (!LIST_EMPTY(&ctx->events)) {
609 		if (evsel->add(base, signum, 0, EV_SIGNAL, NULL) == -1)
610 			*result = -1;
611 	}
612 	return 0;
613 }
614 
615 int
616 evmap_reinit_(struct event_base *base)
617 {
618 	int result = 0;
619 
620 	evmap_io_foreach_fd(base, evmap_io_reinit_iter_fn, &result);
621 	if (result < 0)
622 		return -1;
623 	evmap_signal_foreach_signal(base, evmap_signal_reinit_iter_fn, &result);
624 	if (result < 0)
625 		return -1;
626 	return 0;
627 }
628 
629 /* Helper for evmap_delete_all_: delete every event in an event_dlist. */
630 static int
631 delete_all_in_dlist(struct event_dlist *dlist)
632 {
633 	struct event *ev;
634 	while ((ev = LIST_FIRST(dlist)))
635 		event_del(ev);
636 	return 0;
637 }
638 
639 /* Helper for evmap_delete_all_: delete every event pending on an fd. */
640 static int
641 evmap_io_delete_all_iter_fn(struct event_base *base, evutil_socket_t fd,
642     struct evmap_io *io_info, void *arg)
643 {
644 	return delete_all_in_dlist(&io_info->events);
645 }
646 
647 /* Helper for evmap_delete_all_: delete every event pending on a signal. */
648 static int
649 evmap_signal_delete_all_iter_fn(struct event_base *base, int signum,
650     struct evmap_signal *sig_info, void *arg)
651 {
652 	return delete_all_in_dlist(&sig_info->events);
653 }
654 
655 void
656 evmap_delete_all_(struct event_base *base)
657 {
658 	evmap_signal_foreach_signal(base, evmap_signal_delete_all_iter_fn, NULL);
659 	evmap_io_foreach_fd(base, evmap_io_delete_all_iter_fn, NULL);
660 }
661 
662 /** Per-fd structure for use with changelists.  It keeps track, for each fd or
663  * signal using the changelist, of where its entry in the changelist is.
664  */
665 struct event_changelist_fdinfo {
666 	int idxplus1; /* this is the index +1, so that memset(0) will make it
667 		       * a no-such-element */
668 };
669 
670 void
671 event_changelist_init_(struct event_changelist *changelist)
672 {
673 	changelist->changes = NULL;
674 	changelist->changes_size = 0;
675 	changelist->n_changes = 0;
676 }
677 
678 /** Helper: return the changelist_fdinfo corresponding to a given change. */
679 static inline struct event_changelist_fdinfo *
680 event_change_get_fdinfo(struct event_base *base,
681     const struct event_change *change)
682 {
683 	char *ptr;
684 	if (change->read_change & EV_CHANGE_SIGNAL) {
685 		struct evmap_signal *ctx;
686 		GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal);
687 		ptr = ((char*)ctx) + sizeof(struct evmap_signal);
688 	} else {
689 		struct evmap_io *ctx;
690 		GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io);
691 		ptr = ((char*)ctx) + sizeof(struct evmap_io);
692 	}
693 	return (void*)ptr;
694 }
695 
696 /** Callback helper for event_changelist_assert_ok */
697 static int
698 event_changelist_assert_ok_foreach_iter_fn(
699 	struct event_base *base,
700 	evutil_socket_t fd, struct evmap_io *io, void *arg)
701 {
702 	struct event_changelist *changelist = &base->changelist;
703 	struct event_changelist_fdinfo *f;
704 	f = (void*)
705 	    ( ((char*)io) + sizeof(struct evmap_io) );
706 	if (f->idxplus1) {
707 		struct event_change *c = &changelist->changes[f->idxplus1 - 1];
708 		EVUTIL_ASSERT(c->fd == fd);
709 	}
710 	return 0;
711 }
712 
713 /** Make sure that the changelist is consistent with the evmap structures. */
714 static void
715 event_changelist_assert_ok(struct event_base *base)
716 {
717 	int i;
718 	struct event_changelist *changelist = &base->changelist;
719 
720 	EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes);
721 	for (i = 0; i < changelist->n_changes; ++i) {
722 		struct event_change *c = &changelist->changes[i];
723 		struct event_changelist_fdinfo *f;
724 		EVUTIL_ASSERT(c->fd >= 0);
725 		f = event_change_get_fdinfo(base, c);
726 		EVUTIL_ASSERT(f);
727 		EVUTIL_ASSERT(f->idxplus1 == i + 1);
728 	}
729 
730 	evmap_io_foreach_fd(base,
731 	    event_changelist_assert_ok_foreach_iter_fn,
732 	    NULL);
733 }
734 
735 #ifdef DEBUG_CHANGELIST
736 #define event_changelist_check(base)  event_changelist_assert_ok((base))
737 #else
738 #define event_changelist_check(base)  ((void)0)
739 #endif
740 
741 void
742 event_changelist_remove_all_(struct event_changelist *changelist,
743     struct event_base *base)
744 {
745 	int i;
746 
747 	event_changelist_check(base);
748 
749 	for (i = 0; i < changelist->n_changes; ++i) {
750 		struct event_change *ch = &changelist->changes[i];
751 		struct event_changelist_fdinfo *fdinfo =
752 		    event_change_get_fdinfo(base, ch);
753 		EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1);
754 		fdinfo->idxplus1 = 0;
755 	}
756 
757 	changelist->n_changes = 0;
758 
759 	event_changelist_check(base);
760 }
761 
762 void
763 event_changelist_freemem_(struct event_changelist *changelist)
764 {
765 	if (changelist->changes)
766 		mm_free(changelist->changes);
767 	event_changelist_init_(changelist); /* zero it all out. */
768 }
769 
770 /** Increase the size of 'changelist' to hold more changes. */
771 static int
772 event_changelist_grow(struct event_changelist *changelist)
773 {
774 	int new_size;
775 	struct event_change *new_changes;
776 	if (changelist->changes_size < 64)
777 		new_size = 64;
778 	else
779 		new_size = changelist->changes_size * 2;
780 
781 	new_changes = mm_realloc(changelist->changes,
782 	    new_size * sizeof(struct event_change));
783 
784 	if (EVUTIL_UNLIKELY(new_changes == NULL))
785 		return (-1);
786 
787 	changelist->changes = new_changes;
788 	changelist->changes_size = new_size;
789 
790 	return (0);
791 }
792 
793 /** Return a pointer to the changelist entry for the file descriptor or signal
794  * 'fd', whose fdinfo is 'fdinfo'.  If none exists, construct it, setting its
795  * old_events field to old_events.
796  */
797 static struct event_change *
798 event_changelist_get_or_construct(struct event_changelist *changelist,
799     evutil_socket_t fd,
800     short old_events,
801     struct event_changelist_fdinfo *fdinfo)
802 {
803 	struct event_change *change;
804 
805 	if (fdinfo->idxplus1 == 0) {
806 		int idx;
807 		EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size);
808 
809 		if (changelist->n_changes == changelist->changes_size) {
810 			if (event_changelist_grow(changelist) < 0)
811 				return NULL;
812 		}
813 
814 		idx = changelist->n_changes++;
815 		change = &changelist->changes[idx];
816 		fdinfo->idxplus1 = idx + 1;
817 
818 		memset(change, 0, sizeof(struct event_change));
819 		change->fd = fd;
820 		change->old_events = old_events;
821 	} else {
822 		change = &changelist->changes[fdinfo->idxplus1 - 1];
823 		EVUTIL_ASSERT(change->fd == fd);
824 	}
825 	return change;
826 }
827 
828 int
829 event_changelist_add_(struct event_base *base, evutil_socket_t fd, short old, short events,
830     void *p)
831 {
832 	struct event_changelist *changelist = &base->changelist;
833 	struct event_changelist_fdinfo *fdinfo = p;
834 	struct event_change *change;
835 
836 	event_changelist_check(base);
837 
838 	change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
839 	if (!change)
840 		return -1;
841 
842 	/* An add replaces any previous delete, but doesn't result in a no-op,
843 	 * since the delete might fail (because the fd had been closed since
844 	 * the last add, for instance. */
845 
846 	if (events & (EV_READ|EV_SIGNAL)) {
847 		change->read_change = EV_CHANGE_ADD |
848 		    (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
849 	}
850 	if (events & EV_WRITE) {
851 		change->write_change = EV_CHANGE_ADD |
852 		    (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
853 	}
854 
855 	event_changelist_check(base);
856 	return (0);
857 }
858 
859 int
860 event_changelist_del_(struct event_base *base, evutil_socket_t fd, short old, short events,
861     void *p)
862 {
863 	struct event_changelist *changelist = &base->changelist;
864 	struct event_changelist_fdinfo *fdinfo = p;
865 	struct event_change *change;
866 
867 	event_changelist_check(base);
868 	change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
869 	event_changelist_check(base);
870 	if (!change)
871 		return -1;
872 
873 	/* A delete on an event set that doesn't contain the event to be
874 	   deleted produces a no-op.  This effectively emoves any previous
875 	   uncommitted add, rather than replacing it: on those platforms where
876 	   "add, delete, dispatch" is not the same as "no-op, dispatch", we
877 	   want the no-op behavior.
878 
879 	   If we have a no-op item, we could remove it it from the list
880 	   entirely, but really there's not much point: skipping the no-op
881 	   change when we do the dispatch later is far cheaper than rejuggling
882 	   the array now.
883 
884 	   As this stands, it also lets through deletions of events that are
885 	   not currently set.
886 	 */
887 
888 	if (events & (EV_READ|EV_SIGNAL)) {
889 		if (!(change->old_events & (EV_READ | EV_SIGNAL)))
890 			change->read_change = 0;
891 		else
892 			change->read_change = EV_CHANGE_DEL;
893 	}
894 	if (events & EV_WRITE) {
895 		if (!(change->old_events & EV_WRITE))
896 			change->write_change = 0;
897 		else
898 			change->write_change = EV_CHANGE_DEL;
899 	}
900 
901 	event_changelist_check(base);
902 	return (0);
903 }
904 
905 /* Helper for evmap_check_integrity_: verify that all of the events pending on
906  * given fd are set up correctly, and that the nread and nwrite counts on that
907  * fd are correct. */
908 static int
909 evmap_io_check_integrity_fn(struct event_base *base, evutil_socket_t fd,
910     struct evmap_io *io_info, void *arg)
911 {
912 	struct event *ev;
913 	int n_read = 0, n_write = 0;
914 
915 	/* First, make sure the list itself isn't corrupt. Otherwise,
916 	 * running LIST_FOREACH could be an exciting adventure. */
917 	EVUTIL_ASSERT_LIST_OK(&io_info->events, event, ev_io_next);
918 
919 	LIST_FOREACH(ev, &io_info->events, ev_io_next) {
920 		EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
921 		EVUTIL_ASSERT(ev->ev_fd == fd);
922 		EVUTIL_ASSERT(!(ev->ev_events & EV_SIGNAL));
923 		EVUTIL_ASSERT((ev->ev_events & (EV_READ|EV_WRITE)));
924 		if (ev->ev_events & EV_READ)
925 			++n_read;
926 		if (ev->ev_events & EV_WRITE)
927 			++n_write;
928 	}
929 
930 	EVUTIL_ASSERT(n_read == io_info->nread);
931 	EVUTIL_ASSERT(n_write == io_info->nwrite);
932 
933 	return 0;
934 }
935 
936 /* Helper for evmap_check_integrity_: verify that all of the events pending
937  * on given signal are set up correctly. */
938 static int
939 evmap_signal_check_integrity_fn(struct event_base *base,
940     int signum, struct evmap_signal *sig_info, void *arg)
941 {
942 	struct event *ev;
943 	/* First, make sure the list itself isn't corrupt. */
944 	EVUTIL_ASSERT_LIST_OK(&sig_info->events, event, ev_signal_next);
945 
946 	LIST_FOREACH(ev, &sig_info->events, ev_io_next) {
947 		EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
948 		EVUTIL_ASSERT(ev->ev_fd == signum);
949 		EVUTIL_ASSERT((ev->ev_events & EV_SIGNAL));
950 		EVUTIL_ASSERT(!(ev->ev_events & (EV_READ|EV_WRITE)));
951 	}
952 	return 0;
953 }
954 
955 void
956 evmap_check_integrity_(struct event_base *base)
957 {
958 	evmap_io_foreach_fd(base, evmap_io_check_integrity_fn, NULL);
959 	evmap_signal_foreach_signal(base, evmap_signal_check_integrity_fn, NULL);
960 
961 	if (base->evsel->add == event_changelist_add_)
962 		event_changelist_assert_ok(base);
963 }
964 
965 /* Helper type for evmap_foreach_event_: Bundles a function to call on every
966  * event, and the user-provided void* to use as its third argument. */
967 struct evmap_foreach_event_helper {
968 	event_base_foreach_event_cb fn;
969 	void *arg;
970 };
971 
972 /* Helper for evmap_foreach_event_: calls a provided function on every event
973  * pending on a given fd.  */
974 static int
975 evmap_io_foreach_event_fn(struct event_base *base, evutil_socket_t fd,
976     struct evmap_io *io_info, void *arg)
977 {
978 	struct evmap_foreach_event_helper *h = arg;
979 	struct event *ev;
980 	int r;
981 	LIST_FOREACH(ev, &io_info->events, ev_io_next) {
982 		if ((r = h->fn(base, ev, h->arg)))
983 			return r;
984 	}
985 	return 0;
986 }
987 
988 /* Helper for evmap_foreach_event_: calls a provided function on every event
989  * pending on a given signal.  */
990 static int
991 evmap_signal_foreach_event_fn(struct event_base *base, int signum,
992     struct evmap_signal *sig_info, void *arg)
993 {
994 	struct event *ev;
995 	struct evmap_foreach_event_helper *h = arg;
996 	int r;
997 	LIST_FOREACH(ev, &sig_info->events, ev_signal_next) {
998 		if ((r = h->fn(base, ev, h->arg)))
999 			return r;
1000 	}
1001 	return 0;
1002 }
1003 
1004 int
1005 evmap_foreach_event_(struct event_base *base,
1006     event_base_foreach_event_cb fn, void *arg)
1007 {
1008 	struct evmap_foreach_event_helper h;
1009 	int r;
1010 	h.fn = fn;
1011 	h.arg = arg;
1012 	if ((r = evmap_io_foreach_fd(base, evmap_io_foreach_event_fn, &h)))
1013 		return r;
1014 	return evmap_signal_foreach_signal(base, evmap_signal_foreach_event_fn, &h);
1015 }
1016 
1017