xref: /netbsd-src/external/bsd/libevent/dist/poll.c (revision c38e7cc395b1472a774ff828e46123de44c628e9)
1 /*	$NetBSD: poll.c,v 1.1.1.3 2017/01/31 21:14:52 christos Exp $	*/
2 /*	$OpenBSD: poll.c,v 1.2 2002/06/25 15:50:15 mickey Exp $	*/
3 
4 /*
5  * Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
6  * Copyright 2007-2012 Niels Provos and Nick Mathewson
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 #include "event2/event-config.h"
31 #include <sys/cdefs.h>
32 __RCSID("$NetBSD: poll.c,v 1.1.1.3 2017/01/31 21:14:52 christos Exp $");
33 #include "evconfig-private.h"
34 
35 #ifdef EVENT__HAVE_POLL
36 
37 #include <sys/types.h>
38 #ifdef EVENT__HAVE_SYS_TIME_H
39 #include <sys/time.h>
40 #endif
41 #include <sys/queue.h>
42 #include <poll.h>
43 #include <signal.h>
44 #include <limits.h>
45 #include <stdio.h>
46 #include <stdlib.h>
47 #include <string.h>
48 #include <unistd.h>
49 #include <errno.h>
50 
51 #include "event-internal.h"
52 #include "evsignal-internal.h"
53 #include "log-internal.h"
54 #include "evmap-internal.h"
55 #include "event2/thread.h"
56 #include "evthread-internal.h"
57 #include "time-internal.h"
58 
59 struct pollidx {
60 	int idxplus1;
61 };
62 
63 struct pollop {
64 	int event_count;		/* Highest number alloc */
65 	int nfds;			/* Highest number used */
66 	int realloc_copy;		/* True iff we must realloc
67 					 * event_set_copy */
68 	struct pollfd *event_set;
69 	struct pollfd *event_set_copy;
70 };
71 
72 static void *poll_init(struct event_base *);
73 static int poll_add(struct event_base *, int, short old, short events, void *idx);
74 static int poll_del(struct event_base *, int, short old, short events, void *idx);
75 static int poll_dispatch(struct event_base *, struct timeval *);
76 static void poll_dealloc(struct event_base *);
77 
78 const struct eventop pollops = {
79 	"poll",
80 	poll_init,
81 	poll_add,
82 	poll_del,
83 	poll_dispatch,
84 	poll_dealloc,
85 	0, /* doesn't need_reinit */
86 	EV_FEATURE_FDS,
87 	sizeof(struct pollidx),
88 };
89 
90 static void *
91 poll_init(struct event_base *base)
92 {
93 	struct pollop *pollop;
94 
95 	if (!(pollop = mm_calloc(1, sizeof(struct pollop))))
96 		return (NULL);
97 
98 	evsig_init_(base);
99 
100 	evutil_weakrand_seed_(&base->weakrand_seed, 0);
101 
102 	return (pollop);
103 }
104 
105 #ifdef CHECK_INVARIANTS
106 static void
107 poll_check_ok(struct pollop *pop)
108 {
109 	int i, idx;
110 	struct event *ev;
111 
112 	for (i = 0; i < pop->fd_count; ++i) {
113 		idx = pop->idxplus1_by_fd[i]-1;
114 		if (idx < 0)
115 			continue;
116 		EVUTIL_ASSERT(pop->event_set[idx].fd == i);
117 	}
118 	for (i = 0; i < pop->nfds; ++i) {
119 		struct pollfd *pfd = &pop->event_set[i];
120 		EVUTIL_ASSERT(pop->idxplus1_by_fd[pfd->fd] == i+1);
121 	}
122 }
123 #else
124 #define poll_check_ok(pop)
125 #endif
126 
127 static int
128 poll_dispatch(struct event_base *base, struct timeval *tv)
129 {
130 	int res, i, j, nfds;
131 	long msec = -1;
132 	struct pollop *pop = base->evbase;
133 	struct pollfd *event_set;
134 
135 	poll_check_ok(pop);
136 
137 	nfds = pop->nfds;
138 
139 #ifndef EVENT__DISABLE_THREAD_SUPPORT
140 	if (base->th_base_lock) {
141 		/* If we're using this backend in a multithreaded setting,
142 		 * then we need to work on a copy of event_set, so that we can
143 		 * let other threads modify the main event_set while we're
144 		 * polling. If we're not multithreaded, then we'll skip the
145 		 * copy step here to save memory and time. */
146 		if (pop->realloc_copy) {
147 			struct pollfd *tmp = mm_realloc(pop->event_set_copy,
148 			    pop->event_count * sizeof(struct pollfd));
149 			if (tmp == NULL) {
150 				event_warn("realloc");
151 				return -1;
152 			}
153 			pop->event_set_copy = tmp;
154 			pop->realloc_copy = 0;
155 		}
156 		memcpy(pop->event_set_copy, pop->event_set,
157 		    sizeof(struct pollfd)*nfds);
158 		event_set = pop->event_set_copy;
159 	} else {
160 		event_set = pop->event_set;
161 	}
162 #else
163 	event_set = pop->event_set;
164 #endif
165 
166 	if (tv != NULL) {
167 		msec = evutil_tv_to_msec_(tv);
168 		if (msec < 0 || msec > INT_MAX)
169 			msec = INT_MAX;
170 	}
171 
172 	EVBASE_RELEASE_LOCK(base, th_base_lock);
173 
174 	res = poll(event_set, nfds, msec);
175 
176 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
177 
178 	if (res == -1) {
179 		if (errno != EINTR) {
180 			event_warn("poll");
181 			return (-1);
182 		}
183 
184 		return (0);
185 	}
186 
187 	event_debug(("%s: poll reports %d", __func__, res));
188 
189 	if (res == 0 || nfds == 0)
190 		return (0);
191 
192 	i = evutil_weakrand_range_(&base->weakrand_seed, nfds);
193 	for (j = 0; j < nfds; j++) {
194 		int what;
195 		if (++i == nfds)
196 			i = 0;
197 		what = event_set[i].revents;
198 		if (!what)
199 			continue;
200 
201 		res = 0;
202 
203 		/* If the file gets closed notify */
204 		if (what & (POLLHUP|POLLERR|POLLNVAL))
205 			what |= POLLIN|POLLOUT;
206 		if (what & POLLIN)
207 			res |= EV_READ;
208 		if (what & POLLOUT)
209 			res |= EV_WRITE;
210 		if (res == 0)
211 			continue;
212 
213 		evmap_io_active_(base, event_set[i].fd, res);
214 	}
215 
216 	return (0);
217 }
218 
219 static int
220 poll_add(struct event_base *base, int fd, short old, short events, void *idx_)
221 {
222 	struct pollop *pop = base->evbase;
223 	struct pollfd *pfd = NULL;
224 	struct pollidx *idx = idx_;
225 	int i;
226 
227 	EVUTIL_ASSERT((events & EV_SIGNAL) == 0);
228 	if (!(events & (EV_READ|EV_WRITE)))
229 		return (0);
230 
231 	poll_check_ok(pop);
232 	if (pop->nfds + 1 >= pop->event_count) {
233 		struct pollfd *tmp_event_set;
234 		int tmp_event_count;
235 
236 		if (pop->event_count < 32)
237 			tmp_event_count = 32;
238 		else
239 			tmp_event_count = pop->event_count * 2;
240 
241 		/* We need more file descriptors */
242 		tmp_event_set = mm_realloc(pop->event_set,
243 				 tmp_event_count * sizeof(struct pollfd));
244 		if (tmp_event_set == NULL) {
245 			event_warn("realloc");
246 			return (-1);
247 		}
248 		pop->event_set = tmp_event_set;
249 
250 		pop->event_count = tmp_event_count;
251 		pop->realloc_copy = 1;
252 	}
253 
254 	i = idx->idxplus1 - 1;
255 
256 	if (i >= 0) {
257 		pfd = &pop->event_set[i];
258 	} else {
259 		i = pop->nfds++;
260 		pfd = &pop->event_set[i];
261 		pfd->events = 0;
262 		pfd->fd = fd;
263 		idx->idxplus1 = i + 1;
264 	}
265 
266 	pfd->revents = 0;
267 	if (events & EV_WRITE)
268 		pfd->events |= POLLOUT;
269 	if (events & EV_READ)
270 		pfd->events |= POLLIN;
271 	poll_check_ok(pop);
272 
273 	return (0);
274 }
275 
276 /*
277  * Nothing to be done here.
278  */
279 
280 static int
281 poll_del(struct event_base *base, int fd, short old, short events, void *idx_)
282 {
283 	struct pollop *pop = base->evbase;
284 	struct pollfd *pfd = NULL;
285 	struct pollidx *idx = idx_;
286 	int i;
287 
288 	EVUTIL_ASSERT((events & EV_SIGNAL) == 0);
289 	if (!(events & (EV_READ|EV_WRITE)))
290 		return (0);
291 
292 	poll_check_ok(pop);
293 	i = idx->idxplus1 - 1;
294 	if (i < 0)
295 		return (-1);
296 
297 	/* Do we still want to read or write? */
298 	pfd = &pop->event_set[i];
299 	if (events & EV_READ)
300 		pfd->events &= ~POLLIN;
301 	if (events & EV_WRITE)
302 		pfd->events &= ~POLLOUT;
303 	poll_check_ok(pop);
304 	if (pfd->events)
305 		/* Another event cares about that fd. */
306 		return (0);
307 
308 	/* Okay, so we aren't interested in that fd anymore. */
309 	idx->idxplus1 = 0;
310 
311 	--pop->nfds;
312 	if (i != pop->nfds) {
313 		/*
314 		 * Shift the last pollfd down into the now-unoccupied
315 		 * position.
316 		 */
317 		memcpy(&pop->event_set[i], &pop->event_set[pop->nfds],
318 		       sizeof(struct pollfd));
319 		idx = evmap_io_get_fdinfo_(&base->io, pop->event_set[i].fd);
320 		EVUTIL_ASSERT(idx);
321 		EVUTIL_ASSERT(idx->idxplus1 == pop->nfds + 1);
322 		idx->idxplus1 = i + 1;
323 	}
324 
325 	poll_check_ok(pop);
326 	return (0);
327 }
328 
329 static void
330 poll_dealloc(struct event_base *base)
331 {
332 	struct pollop *pop = base->evbase;
333 
334 	evsig_dealloc_(base);
335 	if (pop->event_set)
336 		mm_free(pop->event_set);
337 	if (pop->event_set_copy)
338 		mm_free(pop->event_set_copy);
339 
340 	memset(pop, 0, sizeof(struct pollop));
341 	mm_free(pop);
342 }
343 
344 #endif /* EVENT__HAVE_POLL */
345