xref: /netbsd-src/external/bsd/ntp/dist/sntp/libevent/poll.c (revision 5d681e99340ceeda0a51163a65763ffea6d9a189)
1 /*	$NetBSD: poll.c,v 1.3 2015/07/10 14:20:34 christos Exp $	*/
2 
3 /*	$OpenBSD: poll.c,v 1.2 2002/06/25 15:50:15 mickey Exp $	*/
4 
5 /*
6  * Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
7  * Copyright 2007-2012 Niels Provos and Nick Mathewson
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 #include "event2/event-config.h"
32 #include "evconfig-private.h"
33 
34 #ifdef EVENT__HAVE_POLL
35 
36 #include <sys/types.h>
37 #ifdef EVENT__HAVE_SYS_TIME_H
38 #include <sys/time.h>
39 #endif
40 #include <sys/queue.h>
41 #include <poll.h>
42 #include <signal.h>
43 #include <limits.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <string.h>
47 #include <unistd.h>
48 #include <errno.h>
49 
50 #include "event-internal.h"
51 #include "evsignal-internal.h"
52 #include "log-internal.h"
53 #include "evmap-internal.h"
54 #include "event2/thread.h"
55 #include "evthread-internal.h"
56 #include "time-internal.h"
57 
58 struct pollidx {
59 	int idxplus1;
60 };
61 
62 struct pollop {
63 	int event_count;		/* Highest number alloc */
64 	int nfds;			/* Highest number used */
65 	int realloc_copy;		/* True iff we must realloc
66 					 * event_set_copy */
67 	struct pollfd *event_set;
68 	struct pollfd *event_set_copy;
69 };
70 
71 static void *poll_init(struct event_base *);
72 static int poll_add(struct event_base *, int, short old, short events, void *idx);
73 static int poll_del(struct event_base *, int, short old, short events, void *idx);
74 static int poll_dispatch(struct event_base *, struct timeval *);
75 static void poll_dealloc(struct event_base *);
76 
77 const struct eventop pollops = {
78 	"poll",
79 	poll_init,
80 	poll_add,
81 	poll_del,
82 	poll_dispatch,
83 	poll_dealloc,
84 	0, /* doesn't need_reinit */
85 	EV_FEATURE_FDS,
86 	sizeof(struct pollidx),
87 };
88 
89 static void *
90 poll_init(struct event_base *base)
91 {
92 	struct pollop *pollop;
93 
94 	if (!(pollop = mm_calloc(1, sizeof(struct pollop))))
95 		return (NULL);
96 
97 	evsig_init_(base);
98 
99 	evutil_weakrand_seed_(&base->weakrand_seed, 0);
100 
101 	return (pollop);
102 }
103 
104 #ifdef CHECK_INVARIANTS
105 static void
106 poll_check_ok(struct pollop *pop)
107 {
108 	int i, idx;
109 	struct event *ev;
110 
111 	for (i = 0; i < pop->fd_count; ++i) {
112 		idx = pop->idxplus1_by_fd[i]-1;
113 		if (idx < 0)
114 			continue;
115 		EVUTIL_ASSERT(pop->event_set[idx].fd == i);
116 	}
117 	for (i = 0; i < pop->nfds; ++i) {
118 		struct pollfd *pfd = &pop->event_set[i];
119 		EVUTIL_ASSERT(pop->idxplus1_by_fd[pfd->fd] == i+1);
120 	}
121 }
122 #else
123 #define poll_check_ok(pop)
124 #endif
125 
126 static int
127 poll_dispatch(struct event_base *base, struct timeval *tv)
128 {
129 	int res, i, j, nfds;
130 	long msec = -1;
131 	struct pollop *pop = base->evbase;
132 	struct pollfd *event_set;
133 
134 	poll_check_ok(pop);
135 
136 	nfds = pop->nfds;
137 
138 #ifndef EVENT__DISABLE_THREAD_SUPPORT
139 	if (base->th_base_lock) {
140 		/* If we're using this backend in a multithreaded setting,
141 		 * then we need to work on a copy of event_set, so that we can
142 		 * let other threads modify the main event_set while we're
143 		 * polling. If we're not multithreaded, then we'll skip the
144 		 * copy step here to save memory and time. */
145 		if (pop->realloc_copy) {
146 			struct pollfd *tmp = mm_realloc(pop->event_set_copy,
147 			    pop->event_count * sizeof(struct pollfd));
148 			if (tmp == NULL) {
149 				event_warn("realloc");
150 				return -1;
151 			}
152 			pop->event_set_copy = tmp;
153 			pop->realloc_copy = 0;
154 		}
155 		memcpy(pop->event_set_copy, pop->event_set,
156 		    sizeof(struct pollfd)*nfds);
157 		event_set = pop->event_set_copy;
158 	} else {
159 		event_set = pop->event_set;
160 	}
161 #else
162 	event_set = pop->event_set;
163 #endif
164 
165 	if (tv != NULL) {
166 		msec = evutil_tv_to_msec_(tv);
167 		if (msec < 0 || msec > INT_MAX)
168 			msec = INT_MAX;
169 	}
170 
171 	EVBASE_RELEASE_LOCK(base, th_base_lock);
172 
173 	res = poll(event_set, nfds, msec);
174 
175 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
176 
177 	if (res == -1) {
178 		if (errno != EINTR) {
179 			event_warn("poll");
180 			return (-1);
181 		}
182 
183 		return (0);
184 	}
185 
186 	event_debug(("%s: poll reports %d", __func__, res));
187 
188 	if (res == 0 || nfds == 0)
189 		return (0);
190 
191 	i = evutil_weakrand_range_(&base->weakrand_seed, nfds);
192 	for (j = 0; j < nfds; j++) {
193 		int what;
194 		if (++i == nfds)
195 			i = 0;
196 		what = event_set[i].revents;
197 		if (!what)
198 			continue;
199 
200 		res = 0;
201 
202 		/* If the file gets closed notify */
203 		if (what & (POLLHUP|POLLERR))
204 			what |= POLLIN|POLLOUT;
205 		if (what & POLLIN)
206 			res |= EV_READ;
207 		if (what & POLLOUT)
208 			res |= EV_WRITE;
209 		if (res == 0)
210 			continue;
211 
212 		evmap_io_active_(base, event_set[i].fd, res);
213 	}
214 
215 	return (0);
216 }
217 
218 static int
219 poll_add(struct event_base *base, int fd, short old, short events, void *idx_)
220 {
221 	struct pollop *pop = base->evbase;
222 	struct pollfd *pfd = NULL;
223 	struct pollidx *idx = idx_;
224 	int i;
225 
226 	EVUTIL_ASSERT((events & EV_SIGNAL) == 0);
227 	if (!(events & (EV_READ|EV_WRITE)))
228 		return (0);
229 
230 	poll_check_ok(pop);
231 	if (pop->nfds + 1 >= pop->event_count) {
232 		struct pollfd *tmp_event_set;
233 		int tmp_event_count;
234 
235 		if (pop->event_count < 32)
236 			tmp_event_count = 32;
237 		else
238 			tmp_event_count = pop->event_count * 2;
239 
240 		/* We need more file descriptors */
241 		tmp_event_set = mm_realloc(pop->event_set,
242 				 tmp_event_count * sizeof(struct pollfd));
243 		if (tmp_event_set == NULL) {
244 			event_warn("realloc");
245 			return (-1);
246 		}
247 		pop->event_set = tmp_event_set;
248 
249 		pop->event_count = tmp_event_count;
250 		pop->realloc_copy = 1;
251 	}
252 
253 	i = idx->idxplus1 - 1;
254 
255 	if (i >= 0) {
256 		pfd = &pop->event_set[i];
257 	} else {
258 		i = pop->nfds++;
259 		pfd = &pop->event_set[i];
260 		pfd->events = 0;
261 		pfd->fd = fd;
262 		idx->idxplus1 = i + 1;
263 	}
264 
265 	pfd->revents = 0;
266 	if (events & EV_WRITE)
267 		pfd->events |= POLLOUT;
268 	if (events & EV_READ)
269 		pfd->events |= POLLIN;
270 	poll_check_ok(pop);
271 
272 	return (0);
273 }
274 
275 /*
276  * Nothing to be done here.
277  */
278 
279 static int
280 poll_del(struct event_base *base, int fd, short old, short events, void *idx_)
281 {
282 	struct pollop *pop = base->evbase;
283 	struct pollfd *pfd = NULL;
284 	struct pollidx *idx = idx_;
285 	int i;
286 
287 	EVUTIL_ASSERT((events & EV_SIGNAL) == 0);
288 	if (!(events & (EV_READ|EV_WRITE)))
289 		return (0);
290 
291 	poll_check_ok(pop);
292 	i = idx->idxplus1 - 1;
293 	if (i < 0)
294 		return (-1);
295 
296 	/* Do we still want to read or write? */
297 	pfd = &pop->event_set[i];
298 	if (events & EV_READ)
299 		pfd->events &= ~POLLIN;
300 	if (events & EV_WRITE)
301 		pfd->events &= ~POLLOUT;
302 	poll_check_ok(pop);
303 	if (pfd->events)
304 		/* Another event cares about that fd. */
305 		return (0);
306 
307 	/* Okay, so we aren't interested in that fd anymore. */
308 	idx->idxplus1 = 0;
309 
310 	--pop->nfds;
311 	if (i != pop->nfds) {
312 		/*
313 		 * Shift the last pollfd down into the now-unoccupied
314 		 * position.
315 		 */
316 		memcpy(&pop->event_set[i], &pop->event_set[pop->nfds],
317 		       sizeof(struct pollfd));
318 		idx = evmap_io_get_fdinfo_(&base->io, pop->event_set[i].fd);
319 		EVUTIL_ASSERT(idx);
320 		EVUTIL_ASSERT(idx->idxplus1 == pop->nfds + 1);
321 		idx->idxplus1 = i + 1;
322 	}
323 
324 	poll_check_ok(pop);
325 	return (0);
326 }
327 
328 static void
329 poll_dealloc(struct event_base *base)
330 {
331 	struct pollop *pop = base->evbase;
332 
333 	evsig_dealloc_(base);
334 	if (pop->event_set)
335 		mm_free(pop->event_set);
336 	if (pop->event_set_copy)
337 		mm_free(pop->event_set_copy);
338 
339 	memset(pop, 0, sizeof(struct pollop));
340 	mm_free(pop);
341 }
342 
343 #endif /* EVENT__HAVE_POLL */
344