1*c43e99fdSEd Maste /*
2*c43e99fdSEd Maste * Copyright 2000-2009 Niels Provos <provos@citi.umich.edu>
3*c43e99fdSEd Maste * Copyright 2009-2012 Niels Provos and Nick Mathewson
4*c43e99fdSEd Maste *
5*c43e99fdSEd Maste * Redistribution and use in source and binary forms, with or without
6*c43e99fdSEd Maste * modification, are permitted provided that the following conditions
7*c43e99fdSEd Maste * are met:
8*c43e99fdSEd Maste * 1. Redistributions of source code must retain the above copyright
9*c43e99fdSEd Maste * notice, this list of conditions and the following disclaimer.
10*c43e99fdSEd Maste * 2. Redistributions in binary form must reproduce the above copyright
11*c43e99fdSEd Maste * notice, this list of conditions and the following disclaimer in the
12*c43e99fdSEd Maste * documentation and/or other materials provided with the distribution.
13*c43e99fdSEd Maste * 3. The name of the author may not be used to endorse or promote products
14*c43e99fdSEd Maste * derived from this software without specific prior written permission.
15*c43e99fdSEd Maste *
16*c43e99fdSEd Maste * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17*c43e99fdSEd Maste * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18*c43e99fdSEd Maste * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19*c43e99fdSEd Maste * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20*c43e99fdSEd Maste * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21*c43e99fdSEd Maste * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22*c43e99fdSEd Maste * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23*c43e99fdSEd Maste * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24*c43e99fdSEd Maste * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25*c43e99fdSEd Maste * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26*c43e99fdSEd Maste */
27*c43e99fdSEd Maste #include "event2/event-config.h"
28*c43e99fdSEd Maste #include "evconfig-private.h"
29*c43e99fdSEd Maste
30*c43e99fdSEd Maste #ifdef EVENT__HAVE_DEVPOLL
31*c43e99fdSEd Maste
32*c43e99fdSEd Maste #include <sys/types.h>
33*c43e99fdSEd Maste #include <sys/resource.h>
34*c43e99fdSEd Maste #ifdef EVENT__HAVE_SYS_TIME_H
35*c43e99fdSEd Maste #include <sys/time.h>
36*c43e99fdSEd Maste #endif
37*c43e99fdSEd Maste #include <sys/queue.h>
38*c43e99fdSEd Maste #include <sys/devpoll.h>
39*c43e99fdSEd Maste #include <signal.h>
40*c43e99fdSEd Maste #include <stdio.h>
41*c43e99fdSEd Maste #include <stdlib.h>
42*c43e99fdSEd Maste #include <string.h>
43*c43e99fdSEd Maste #include <unistd.h>
44*c43e99fdSEd Maste #include <fcntl.h>
45*c43e99fdSEd Maste #include <errno.h>
46*c43e99fdSEd Maste
47*c43e99fdSEd Maste #include "event2/event.h"
48*c43e99fdSEd Maste #include "event2/event_struct.h"
49*c43e99fdSEd Maste #include "event2/thread.h"
50*c43e99fdSEd Maste #include "event-internal.h"
51*c43e99fdSEd Maste #include "evsignal-internal.h"
52*c43e99fdSEd Maste #include "log-internal.h"
53*c43e99fdSEd Maste #include "evmap-internal.h"
54*c43e99fdSEd Maste #include "evthread-internal.h"
55*c43e99fdSEd Maste
56*c43e99fdSEd Maste struct devpollop {
57*c43e99fdSEd Maste struct pollfd *events;
58*c43e99fdSEd Maste int nevents;
59*c43e99fdSEd Maste int dpfd;
60*c43e99fdSEd Maste struct pollfd *changes;
61*c43e99fdSEd Maste int nchanges;
62*c43e99fdSEd Maste };
63*c43e99fdSEd Maste
64*c43e99fdSEd Maste static void *devpoll_init(struct event_base *);
65*c43e99fdSEd Maste static int devpoll_add(struct event_base *, int fd, short old, short events, void *);
66*c43e99fdSEd Maste static int devpoll_del(struct event_base *, int fd, short old, short events, void *);
67*c43e99fdSEd Maste static int devpoll_dispatch(struct event_base *, struct timeval *);
68*c43e99fdSEd Maste static void devpoll_dealloc(struct event_base *);
69*c43e99fdSEd Maste
70*c43e99fdSEd Maste const struct eventop devpollops = {
71*c43e99fdSEd Maste "devpoll",
72*c43e99fdSEd Maste devpoll_init,
73*c43e99fdSEd Maste devpoll_add,
74*c43e99fdSEd Maste devpoll_del,
75*c43e99fdSEd Maste devpoll_dispatch,
76*c43e99fdSEd Maste devpoll_dealloc,
77*c43e99fdSEd Maste 1, /* need reinit */
78*c43e99fdSEd Maste EV_FEATURE_FDS|EV_FEATURE_O1,
79*c43e99fdSEd Maste 0
80*c43e99fdSEd Maste };
81*c43e99fdSEd Maste
82*c43e99fdSEd Maste #define NEVENT 32000
83*c43e99fdSEd Maste
84*c43e99fdSEd Maste static int
devpoll_commit(struct devpollop * devpollop)85*c43e99fdSEd Maste devpoll_commit(struct devpollop *devpollop)
86*c43e99fdSEd Maste {
87*c43e99fdSEd Maste /*
88*c43e99fdSEd Maste * Due to a bug in Solaris, we have to use pwrite with an offset of 0.
89*c43e99fdSEd Maste * Write is limited to 2GB of data, until it will fail.
90*c43e99fdSEd Maste */
91*c43e99fdSEd Maste if (pwrite(devpollop->dpfd, devpollop->changes,
92*c43e99fdSEd Maste sizeof(struct pollfd) * devpollop->nchanges, 0) == -1)
93*c43e99fdSEd Maste return (-1);
94*c43e99fdSEd Maste
95*c43e99fdSEd Maste devpollop->nchanges = 0;
96*c43e99fdSEd Maste return (0);
97*c43e99fdSEd Maste }
98*c43e99fdSEd Maste
99*c43e99fdSEd Maste static int
devpoll_queue(struct devpollop * devpollop,int fd,int events)100*c43e99fdSEd Maste devpoll_queue(struct devpollop *devpollop, int fd, int events) {
101*c43e99fdSEd Maste struct pollfd *pfd;
102*c43e99fdSEd Maste
103*c43e99fdSEd Maste if (devpollop->nchanges >= devpollop->nevents) {
104*c43e99fdSEd Maste /*
105*c43e99fdSEd Maste * Change buffer is full, must commit it to /dev/poll before
106*c43e99fdSEd Maste * adding more
107*c43e99fdSEd Maste */
108*c43e99fdSEd Maste if (devpoll_commit(devpollop) != 0)
109*c43e99fdSEd Maste return (-1);
110*c43e99fdSEd Maste }
111*c43e99fdSEd Maste
112*c43e99fdSEd Maste pfd = &devpollop->changes[devpollop->nchanges++];
113*c43e99fdSEd Maste pfd->fd = fd;
114*c43e99fdSEd Maste pfd->events = events;
115*c43e99fdSEd Maste pfd->revents = 0;
116*c43e99fdSEd Maste
117*c43e99fdSEd Maste return (0);
118*c43e99fdSEd Maste }
119*c43e99fdSEd Maste
120*c43e99fdSEd Maste static void *
devpoll_init(struct event_base * base)121*c43e99fdSEd Maste devpoll_init(struct event_base *base)
122*c43e99fdSEd Maste {
123*c43e99fdSEd Maste int dpfd, nfiles = NEVENT;
124*c43e99fdSEd Maste struct rlimit rl;
125*c43e99fdSEd Maste struct devpollop *devpollop;
126*c43e99fdSEd Maste
127*c43e99fdSEd Maste if (!(devpollop = mm_calloc(1, sizeof(struct devpollop))))
128*c43e99fdSEd Maste return (NULL);
129*c43e99fdSEd Maste
130*c43e99fdSEd Maste if (getrlimit(RLIMIT_NOFILE, &rl) == 0 &&
131*c43e99fdSEd Maste rl.rlim_cur != RLIM_INFINITY)
132*c43e99fdSEd Maste nfiles = rl.rlim_cur;
133*c43e99fdSEd Maste
134*c43e99fdSEd Maste /* Initialize the kernel queue */
135*c43e99fdSEd Maste if ((dpfd = evutil_open_closeonexec_("/dev/poll", O_RDWR, 0)) == -1) {
136*c43e99fdSEd Maste event_warn("open: /dev/poll");
137*c43e99fdSEd Maste mm_free(devpollop);
138*c43e99fdSEd Maste return (NULL);
139*c43e99fdSEd Maste }
140*c43e99fdSEd Maste
141*c43e99fdSEd Maste devpollop->dpfd = dpfd;
142*c43e99fdSEd Maste
143*c43e99fdSEd Maste /* Initialize fields */
144*c43e99fdSEd Maste /* FIXME: allocating 'nfiles' worth of space here can be
145*c43e99fdSEd Maste * expensive and unnecessary. See how epoll.c does it instead. */
146*c43e99fdSEd Maste devpollop->events = mm_calloc(nfiles, sizeof(struct pollfd));
147*c43e99fdSEd Maste if (devpollop->events == NULL) {
148*c43e99fdSEd Maste mm_free(devpollop);
149*c43e99fdSEd Maste close(dpfd);
150*c43e99fdSEd Maste return (NULL);
151*c43e99fdSEd Maste }
152*c43e99fdSEd Maste devpollop->nevents = nfiles;
153*c43e99fdSEd Maste
154*c43e99fdSEd Maste devpollop->changes = mm_calloc(nfiles, sizeof(struct pollfd));
155*c43e99fdSEd Maste if (devpollop->changes == NULL) {
156*c43e99fdSEd Maste mm_free(devpollop->events);
157*c43e99fdSEd Maste mm_free(devpollop);
158*c43e99fdSEd Maste close(dpfd);
159*c43e99fdSEd Maste return (NULL);
160*c43e99fdSEd Maste }
161*c43e99fdSEd Maste
162*c43e99fdSEd Maste evsig_init_(base);
163*c43e99fdSEd Maste
164*c43e99fdSEd Maste return (devpollop);
165*c43e99fdSEd Maste }
166*c43e99fdSEd Maste
167*c43e99fdSEd Maste static int
devpoll_dispatch(struct event_base * base,struct timeval * tv)168*c43e99fdSEd Maste devpoll_dispatch(struct event_base *base, struct timeval *tv)
169*c43e99fdSEd Maste {
170*c43e99fdSEd Maste struct devpollop *devpollop = base->evbase;
171*c43e99fdSEd Maste struct pollfd *events = devpollop->events;
172*c43e99fdSEd Maste struct dvpoll dvp;
173*c43e99fdSEd Maste int i, res, timeout = -1;
174*c43e99fdSEd Maste
175*c43e99fdSEd Maste if (devpollop->nchanges)
176*c43e99fdSEd Maste devpoll_commit(devpollop);
177*c43e99fdSEd Maste
178*c43e99fdSEd Maste if (tv != NULL)
179*c43e99fdSEd Maste timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000;
180*c43e99fdSEd Maste
181*c43e99fdSEd Maste dvp.dp_fds = devpollop->events;
182*c43e99fdSEd Maste dvp.dp_nfds = devpollop->nevents;
183*c43e99fdSEd Maste dvp.dp_timeout = timeout;
184*c43e99fdSEd Maste
185*c43e99fdSEd Maste EVBASE_RELEASE_LOCK(base, th_base_lock);
186*c43e99fdSEd Maste
187*c43e99fdSEd Maste res = ioctl(devpollop->dpfd, DP_POLL, &dvp);
188*c43e99fdSEd Maste
189*c43e99fdSEd Maste EVBASE_ACQUIRE_LOCK(base, th_base_lock);
190*c43e99fdSEd Maste
191*c43e99fdSEd Maste if (res == -1) {
192*c43e99fdSEd Maste if (errno != EINTR) {
193*c43e99fdSEd Maste event_warn("ioctl: DP_POLL");
194*c43e99fdSEd Maste return (-1);
195*c43e99fdSEd Maste }
196*c43e99fdSEd Maste
197*c43e99fdSEd Maste return (0);
198*c43e99fdSEd Maste }
199*c43e99fdSEd Maste
200*c43e99fdSEd Maste event_debug(("%s: devpoll_wait reports %d", __func__, res));
201*c43e99fdSEd Maste
202*c43e99fdSEd Maste for (i = 0; i < res; i++) {
203*c43e99fdSEd Maste int which = 0;
204*c43e99fdSEd Maste int what = events[i].revents;
205*c43e99fdSEd Maste
206*c43e99fdSEd Maste if (what & POLLHUP)
207*c43e99fdSEd Maste what |= POLLIN | POLLOUT;
208*c43e99fdSEd Maste else if (what & POLLERR)
209*c43e99fdSEd Maste what |= POLLIN | POLLOUT;
210*c43e99fdSEd Maste
211*c43e99fdSEd Maste if (what & POLLIN)
212*c43e99fdSEd Maste which |= EV_READ;
213*c43e99fdSEd Maste if (what & POLLOUT)
214*c43e99fdSEd Maste which |= EV_WRITE;
215*c43e99fdSEd Maste
216*c43e99fdSEd Maste if (!which)
217*c43e99fdSEd Maste continue;
218*c43e99fdSEd Maste
219*c43e99fdSEd Maste /* XXX(niels): not sure if this works for devpoll */
220*c43e99fdSEd Maste evmap_io_active_(base, events[i].fd, which);
221*c43e99fdSEd Maste }
222*c43e99fdSEd Maste
223*c43e99fdSEd Maste return (0);
224*c43e99fdSEd Maste }
225*c43e99fdSEd Maste
226*c43e99fdSEd Maste
227*c43e99fdSEd Maste static int
devpoll_add(struct event_base * base,int fd,short old,short events,void * p)228*c43e99fdSEd Maste devpoll_add(struct event_base *base, int fd, short old, short events, void *p)
229*c43e99fdSEd Maste {
230*c43e99fdSEd Maste struct devpollop *devpollop = base->evbase;
231*c43e99fdSEd Maste int res;
232*c43e99fdSEd Maste (void)p;
233*c43e99fdSEd Maste
234*c43e99fdSEd Maste /*
235*c43e99fdSEd Maste * It's not necessary to OR the existing read/write events that we
236*c43e99fdSEd Maste * are currently interested in with the new event we are adding.
237*c43e99fdSEd Maste * The /dev/poll driver ORs any new events with the existing events
238*c43e99fdSEd Maste * that it has cached for the fd.
239*c43e99fdSEd Maste */
240*c43e99fdSEd Maste
241*c43e99fdSEd Maste res = 0;
242*c43e99fdSEd Maste if (events & EV_READ)
243*c43e99fdSEd Maste res |= POLLIN;
244*c43e99fdSEd Maste if (events & EV_WRITE)
245*c43e99fdSEd Maste res |= POLLOUT;
246*c43e99fdSEd Maste
247*c43e99fdSEd Maste if (devpoll_queue(devpollop, fd, res) != 0)
248*c43e99fdSEd Maste return (-1);
249*c43e99fdSEd Maste
250*c43e99fdSEd Maste return (0);
251*c43e99fdSEd Maste }
252*c43e99fdSEd Maste
253*c43e99fdSEd Maste static int
devpoll_del(struct event_base * base,int fd,short old,short events,void * p)254*c43e99fdSEd Maste devpoll_del(struct event_base *base, int fd, short old, short events, void *p)
255*c43e99fdSEd Maste {
256*c43e99fdSEd Maste struct devpollop *devpollop = base->evbase;
257*c43e99fdSEd Maste int res;
258*c43e99fdSEd Maste (void)p;
259*c43e99fdSEd Maste
260*c43e99fdSEd Maste res = 0;
261*c43e99fdSEd Maste if (events & EV_READ)
262*c43e99fdSEd Maste res |= POLLIN;
263*c43e99fdSEd Maste if (events & EV_WRITE)
264*c43e99fdSEd Maste res |= POLLOUT;
265*c43e99fdSEd Maste
266*c43e99fdSEd Maste /*
267*c43e99fdSEd Maste * The only way to remove an fd from the /dev/poll monitored set is
268*c43e99fdSEd Maste * to use POLLREMOVE by itself. This removes ALL events for the fd
269*c43e99fdSEd Maste * provided so if we care about two events and are only removing one
270*c43e99fdSEd Maste * we must re-add the other event after POLLREMOVE.
271*c43e99fdSEd Maste */
272*c43e99fdSEd Maste
273*c43e99fdSEd Maste if (devpoll_queue(devpollop, fd, POLLREMOVE) != 0)
274*c43e99fdSEd Maste return (-1);
275*c43e99fdSEd Maste
276*c43e99fdSEd Maste if ((res & (POLLIN|POLLOUT)) != (POLLIN|POLLOUT)) {
277*c43e99fdSEd Maste /*
278*c43e99fdSEd Maste * We're not deleting all events, so we must resubmit the
279*c43e99fdSEd Maste * event that we are still interested in if one exists.
280*c43e99fdSEd Maste */
281*c43e99fdSEd Maste
282*c43e99fdSEd Maste if ((res & POLLIN) && (old & EV_WRITE)) {
283*c43e99fdSEd Maste /* Deleting read, still care about write */
284*c43e99fdSEd Maste devpoll_queue(devpollop, fd, POLLOUT);
285*c43e99fdSEd Maste } else if ((res & POLLOUT) && (old & EV_READ)) {
286*c43e99fdSEd Maste /* Deleting write, still care about read */
287*c43e99fdSEd Maste devpoll_queue(devpollop, fd, POLLIN);
288*c43e99fdSEd Maste }
289*c43e99fdSEd Maste }
290*c43e99fdSEd Maste
291*c43e99fdSEd Maste return (0);
292*c43e99fdSEd Maste }
293*c43e99fdSEd Maste
294*c43e99fdSEd Maste static void
devpoll_dealloc(struct event_base * base)295*c43e99fdSEd Maste devpoll_dealloc(struct event_base *base)
296*c43e99fdSEd Maste {
297*c43e99fdSEd Maste struct devpollop *devpollop = base->evbase;
298*c43e99fdSEd Maste
299*c43e99fdSEd Maste evsig_dealloc_(base);
300*c43e99fdSEd Maste if (devpollop->events)
301*c43e99fdSEd Maste mm_free(devpollop->events);
302*c43e99fdSEd Maste if (devpollop->changes)
303*c43e99fdSEd Maste mm_free(devpollop->changes);
304*c43e99fdSEd Maste if (devpollop->dpfd >= 0)
305*c43e99fdSEd Maste close(devpollop->dpfd);
306*c43e99fdSEd Maste
307*c43e99fdSEd Maste memset(devpollop, 0, sizeof(struct devpollop));
308*c43e99fdSEd Maste mm_free(devpollop);
309*c43e99fdSEd Maste }
310*c43e99fdSEd Maste
311*c43e99fdSEd Maste #endif /* EVENT__HAVE_DEVPOLL */
312