xref: /netbsd-src/external/mit/libuv/dist/src/unix/epoll.c (revision 5f2f42719cd62ff11fd913b40b7ce19f07c4fd25)
1*5f2f4271Schristos /* Copyright libuv contributors. All rights reserved.
2*5f2f4271Schristos  *
3*5f2f4271Schristos  * Permission is hereby granted, free of charge, to any person obtaining a copy
4*5f2f4271Schristos  * of this software and associated documentation files (the "Software"), to
5*5f2f4271Schristos  * deal in the Software without restriction, including without limitation the
6*5f2f4271Schristos  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7*5f2f4271Schristos  * sell copies of the Software, and to permit persons to whom the Software is
8*5f2f4271Schristos  * furnished to do so, subject to the following conditions:
9*5f2f4271Schristos  *
10*5f2f4271Schristos  * The above copyright notice and this permission notice shall be included in
11*5f2f4271Schristos  * all copies or substantial portions of the Software.
12*5f2f4271Schristos  *
13*5f2f4271Schristos  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14*5f2f4271Schristos  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15*5f2f4271Schristos  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16*5f2f4271Schristos  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17*5f2f4271Schristos  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18*5f2f4271Schristos  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19*5f2f4271Schristos  * IN THE SOFTWARE.
20*5f2f4271Schristos  */
21*5f2f4271Schristos 
22*5f2f4271Schristos #include "uv.h"
23*5f2f4271Schristos #include "internal.h"
24*5f2f4271Schristos #include <errno.h>
25*5f2f4271Schristos #include <sys/epoll.h>
26*5f2f4271Schristos 
uv__epoll_init(uv_loop_t * loop)27*5f2f4271Schristos int uv__epoll_init(uv_loop_t* loop) {
28*5f2f4271Schristos   int fd;
29*5f2f4271Schristos   fd = epoll_create1(O_CLOEXEC);
30*5f2f4271Schristos 
31*5f2f4271Schristos   /* epoll_create1() can fail either because it's not implemented (old kernel)
32*5f2f4271Schristos    * or because it doesn't understand the O_CLOEXEC flag.
33*5f2f4271Schristos    */
34*5f2f4271Schristos   if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) {
35*5f2f4271Schristos     fd = epoll_create(256);
36*5f2f4271Schristos 
37*5f2f4271Schristos     if (fd != -1)
38*5f2f4271Schristos       uv__cloexec(fd, 1);
39*5f2f4271Schristos   }
40*5f2f4271Schristos 
41*5f2f4271Schristos   loop->backend_fd = fd;
42*5f2f4271Schristos   if (fd == -1)
43*5f2f4271Schristos     return UV__ERR(errno);
44*5f2f4271Schristos 
45*5f2f4271Schristos   return 0;
46*5f2f4271Schristos }
47*5f2f4271Schristos 
48*5f2f4271Schristos 
uv__platform_invalidate_fd(uv_loop_t * loop,int fd)49*5f2f4271Schristos void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
50*5f2f4271Schristos   struct epoll_event* events;
51*5f2f4271Schristos   struct epoll_event dummy;
52*5f2f4271Schristos   uintptr_t i;
53*5f2f4271Schristos   uintptr_t nfds;
54*5f2f4271Schristos 
55*5f2f4271Schristos   assert(loop->watchers != NULL);
56*5f2f4271Schristos   assert(fd >= 0);
57*5f2f4271Schristos 
58*5f2f4271Schristos   events = (struct epoll_event*) loop->watchers[loop->nwatchers];
59*5f2f4271Schristos   nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
60*5f2f4271Schristos   if (events != NULL)
61*5f2f4271Schristos     /* Invalidate events with same file descriptor */
62*5f2f4271Schristos     for (i = 0; i < nfds; i++)
63*5f2f4271Schristos       if (events[i].data.fd == fd)
64*5f2f4271Schristos         events[i].data.fd = -1;
65*5f2f4271Schristos 
66*5f2f4271Schristos   /* Remove the file descriptor from the epoll.
67*5f2f4271Schristos    * This avoids a problem where the same file description remains open
68*5f2f4271Schristos    * in another process, causing repeated junk epoll events.
69*5f2f4271Schristos    *
70*5f2f4271Schristos    * We pass in a dummy epoll_event, to work around a bug in old kernels.
71*5f2f4271Schristos    */
72*5f2f4271Schristos   if (loop->backend_fd >= 0) {
73*5f2f4271Schristos     /* Work around a bug in kernels 3.10 to 3.19 where passing a struct that
74*5f2f4271Schristos      * has the EPOLLWAKEUP flag set generates spurious audit syslog warnings.
75*5f2f4271Schristos      */
76*5f2f4271Schristos     memset(&dummy, 0, sizeof(dummy));
77*5f2f4271Schristos     epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy);
78*5f2f4271Schristos   }
79*5f2f4271Schristos }
80*5f2f4271Schristos 
81*5f2f4271Schristos 
uv__io_check_fd(uv_loop_t * loop,int fd)82*5f2f4271Schristos int uv__io_check_fd(uv_loop_t* loop, int fd) {
83*5f2f4271Schristos   struct epoll_event e;
84*5f2f4271Schristos   int rc;
85*5f2f4271Schristos 
86*5f2f4271Schristos   memset(&e, 0, sizeof(e));
87*5f2f4271Schristos   e.events = POLLIN;
88*5f2f4271Schristos   e.data.fd = -1;
89*5f2f4271Schristos 
90*5f2f4271Schristos   rc = 0;
91*5f2f4271Schristos   if (epoll_ctl(loop->backend_fd, EPOLL_CTL_ADD, fd, &e))
92*5f2f4271Schristos     if (errno != EEXIST)
93*5f2f4271Schristos       rc = UV__ERR(errno);
94*5f2f4271Schristos 
95*5f2f4271Schristos   if (rc == 0)
96*5f2f4271Schristos     if (epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &e))
97*5f2f4271Schristos       abort();
98*5f2f4271Schristos 
99*5f2f4271Schristos   return rc;
100*5f2f4271Schristos }
101*5f2f4271Schristos 
102*5f2f4271Schristos 
uv__io_poll(uv_loop_t * loop,int timeout)103*5f2f4271Schristos void uv__io_poll(uv_loop_t* loop, int timeout) {
104*5f2f4271Schristos   /* A bug in kernels < 2.6.37 makes timeouts larger than ~30 minutes
105*5f2f4271Schristos    * effectively infinite on 32 bits architectures.  To avoid blocking
106*5f2f4271Schristos    * indefinitely, we cap the timeout and poll again if necessary.
107*5f2f4271Schristos    *
108*5f2f4271Schristos    * Note that "30 minutes" is a simplification because it depends on
109*5f2f4271Schristos    * the value of CONFIG_HZ.  The magic constant assumes CONFIG_HZ=1200,
110*5f2f4271Schristos    * that being the largest value I have seen in the wild (and only once.)
111*5f2f4271Schristos    */
112*5f2f4271Schristos   static const int max_safe_timeout = 1789569;
113*5f2f4271Schristos   static int no_epoll_pwait_cached;
114*5f2f4271Schristos   static int no_epoll_wait_cached;
115*5f2f4271Schristos   int no_epoll_pwait;
116*5f2f4271Schristos   int no_epoll_wait;
117*5f2f4271Schristos   struct epoll_event events[1024];
118*5f2f4271Schristos   struct epoll_event* pe;
119*5f2f4271Schristos   struct epoll_event e;
120*5f2f4271Schristos   int real_timeout;
121*5f2f4271Schristos   QUEUE* q;
122*5f2f4271Schristos   uv__io_t* w;
123*5f2f4271Schristos   sigset_t sigset;
124*5f2f4271Schristos   uint64_t sigmask;
125*5f2f4271Schristos   uint64_t base;
126*5f2f4271Schristos   int have_signals;
127*5f2f4271Schristos   int nevents;
128*5f2f4271Schristos   int count;
129*5f2f4271Schristos   int nfds;
130*5f2f4271Schristos   int fd;
131*5f2f4271Schristos   int op;
132*5f2f4271Schristos   int i;
133*5f2f4271Schristos   int user_timeout;
134*5f2f4271Schristos   int reset_timeout;
135*5f2f4271Schristos 
136*5f2f4271Schristos   if (loop->nfds == 0) {
137*5f2f4271Schristos     assert(QUEUE_EMPTY(&loop->watcher_queue));
138*5f2f4271Schristos     return;
139*5f2f4271Schristos   }
140*5f2f4271Schristos 
141*5f2f4271Schristos   memset(&e, 0, sizeof(e));
142*5f2f4271Schristos 
143*5f2f4271Schristos   while (!QUEUE_EMPTY(&loop->watcher_queue)) {
144*5f2f4271Schristos     q = QUEUE_HEAD(&loop->watcher_queue);
145*5f2f4271Schristos     QUEUE_REMOVE(q);
146*5f2f4271Schristos     QUEUE_INIT(q);
147*5f2f4271Schristos 
148*5f2f4271Schristos     w = QUEUE_DATA(q, uv__io_t, watcher_queue);
149*5f2f4271Schristos     assert(w->pevents != 0);
150*5f2f4271Schristos     assert(w->fd >= 0);
151*5f2f4271Schristos     assert(w->fd < (int) loop->nwatchers);
152*5f2f4271Schristos 
153*5f2f4271Schristos     e.events = w->pevents;
154*5f2f4271Schristos     e.data.fd = w->fd;
155*5f2f4271Schristos 
156*5f2f4271Schristos     if (w->events == 0)
157*5f2f4271Schristos       op = EPOLL_CTL_ADD;
158*5f2f4271Schristos     else
159*5f2f4271Schristos       op = EPOLL_CTL_MOD;
160*5f2f4271Schristos 
161*5f2f4271Schristos     /* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
162*5f2f4271Schristos      * events, skip the syscall and squelch the events after epoll_wait().
163*5f2f4271Schristos      */
164*5f2f4271Schristos     if (epoll_ctl(loop->backend_fd, op, w->fd, &e)) {
165*5f2f4271Schristos       if (errno != EEXIST)
166*5f2f4271Schristos         abort();
167*5f2f4271Schristos 
168*5f2f4271Schristos       assert(op == EPOLL_CTL_ADD);
169*5f2f4271Schristos 
170*5f2f4271Schristos       /* We've reactivated a file descriptor that's been watched before. */
171*5f2f4271Schristos       if (epoll_ctl(loop->backend_fd, EPOLL_CTL_MOD, w->fd, &e))
172*5f2f4271Schristos         abort();
173*5f2f4271Schristos     }
174*5f2f4271Schristos 
175*5f2f4271Schristos     w->events = w->pevents;
176*5f2f4271Schristos   }
177*5f2f4271Schristos 
178*5f2f4271Schristos   sigmask = 0;
179*5f2f4271Schristos   if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
180*5f2f4271Schristos     sigemptyset(&sigset);
181*5f2f4271Schristos     sigaddset(&sigset, SIGPROF);
182*5f2f4271Schristos     sigmask |= 1 << (SIGPROF - 1);
183*5f2f4271Schristos   }
184*5f2f4271Schristos 
185*5f2f4271Schristos   assert(timeout >= -1);
186*5f2f4271Schristos   base = loop->time;
187*5f2f4271Schristos   count = 48; /* Benchmarks suggest this gives the best throughput. */
188*5f2f4271Schristos   real_timeout = timeout;
189*5f2f4271Schristos 
190*5f2f4271Schristos   if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
191*5f2f4271Schristos     reset_timeout = 1;
192*5f2f4271Schristos     user_timeout = timeout;
193*5f2f4271Schristos     timeout = 0;
194*5f2f4271Schristos   } else {
195*5f2f4271Schristos     reset_timeout = 0;
196*5f2f4271Schristos     user_timeout = 0;
197*5f2f4271Schristos   }
198*5f2f4271Schristos 
199*5f2f4271Schristos   /* You could argue there is a dependency between these two but
200*5f2f4271Schristos    * ultimately we don't care about their ordering with respect
201*5f2f4271Schristos    * to one another. Worst case, we make a few system calls that
202*5f2f4271Schristos    * could have been avoided because another thread already knows
203*5f2f4271Schristos    * they fail with ENOSYS. Hardly the end of the world.
204*5f2f4271Schristos    */
205*5f2f4271Schristos   no_epoll_pwait = uv__load_relaxed(&no_epoll_pwait_cached);
206*5f2f4271Schristos   no_epoll_wait = uv__load_relaxed(&no_epoll_wait_cached);
207*5f2f4271Schristos 
208*5f2f4271Schristos   for (;;) {
209*5f2f4271Schristos     /* Only need to set the provider_entry_time if timeout != 0. The function
210*5f2f4271Schristos      * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
211*5f2f4271Schristos      */
212*5f2f4271Schristos     if (timeout != 0)
213*5f2f4271Schristos       uv__metrics_set_provider_entry_time(loop);
214*5f2f4271Schristos 
215*5f2f4271Schristos     /* See the comment for max_safe_timeout for an explanation of why
216*5f2f4271Schristos      * this is necessary.  Executive summary: kernel bug workaround.
217*5f2f4271Schristos      */
218*5f2f4271Schristos     if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
219*5f2f4271Schristos       timeout = max_safe_timeout;
220*5f2f4271Schristos 
221*5f2f4271Schristos     if (sigmask != 0 && no_epoll_pwait != 0)
222*5f2f4271Schristos       if (pthread_sigmask(SIG_BLOCK, &sigset, NULL))
223*5f2f4271Schristos         abort();
224*5f2f4271Schristos 
225*5f2f4271Schristos     if (no_epoll_wait != 0 || (sigmask != 0 && no_epoll_pwait == 0)) {
226*5f2f4271Schristos       nfds = epoll_pwait(loop->backend_fd,
227*5f2f4271Schristos                          events,
228*5f2f4271Schristos                          ARRAY_SIZE(events),
229*5f2f4271Schristos                          timeout,
230*5f2f4271Schristos                          &sigset);
231*5f2f4271Schristos       if (nfds == -1 && errno == ENOSYS) {
232*5f2f4271Schristos         uv__store_relaxed(&no_epoll_pwait_cached, 1);
233*5f2f4271Schristos         no_epoll_pwait = 1;
234*5f2f4271Schristos       }
235*5f2f4271Schristos     } else {
236*5f2f4271Schristos       nfds = epoll_wait(loop->backend_fd,
237*5f2f4271Schristos                         events,
238*5f2f4271Schristos                         ARRAY_SIZE(events),
239*5f2f4271Schristos                         timeout);
240*5f2f4271Schristos       if (nfds == -1 && errno == ENOSYS) {
241*5f2f4271Schristos         uv__store_relaxed(&no_epoll_wait_cached, 1);
242*5f2f4271Schristos         no_epoll_wait = 1;
243*5f2f4271Schristos       }
244*5f2f4271Schristos     }
245*5f2f4271Schristos 
246*5f2f4271Schristos     if (sigmask != 0 && no_epoll_pwait != 0)
247*5f2f4271Schristos       if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL))
248*5f2f4271Schristos         abort();
249*5f2f4271Schristos 
250*5f2f4271Schristos     /* Update loop->time unconditionally. It's tempting to skip the update when
251*5f2f4271Schristos      * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
252*5f2f4271Schristos      * operating system didn't reschedule our process while in the syscall.
253*5f2f4271Schristos      */
254*5f2f4271Schristos     SAVE_ERRNO(uv__update_time(loop));
255*5f2f4271Schristos 
256*5f2f4271Schristos     if (nfds == 0) {
257*5f2f4271Schristos       assert(timeout != -1);
258*5f2f4271Schristos 
259*5f2f4271Schristos       if (reset_timeout != 0) {
260*5f2f4271Schristos         timeout = user_timeout;
261*5f2f4271Schristos         reset_timeout = 0;
262*5f2f4271Schristos       }
263*5f2f4271Schristos 
264*5f2f4271Schristos       if (timeout == -1)
265*5f2f4271Schristos         continue;
266*5f2f4271Schristos 
267*5f2f4271Schristos       if (timeout == 0)
268*5f2f4271Schristos         return;
269*5f2f4271Schristos 
270*5f2f4271Schristos       /* We may have been inside the system call for longer than |timeout|
271*5f2f4271Schristos        * milliseconds so we need to update the timestamp to avoid drift.
272*5f2f4271Schristos        */
273*5f2f4271Schristos       goto update_timeout;
274*5f2f4271Schristos     }
275*5f2f4271Schristos 
276*5f2f4271Schristos     if (nfds == -1) {
277*5f2f4271Schristos       if (errno == ENOSYS) {
278*5f2f4271Schristos         /* epoll_wait() or epoll_pwait() failed, try the other system call. */
279*5f2f4271Schristos         assert(no_epoll_wait == 0 || no_epoll_pwait == 0);
280*5f2f4271Schristos         continue;
281*5f2f4271Schristos       }
282*5f2f4271Schristos 
283*5f2f4271Schristos       if (errno != EINTR)
284*5f2f4271Schristos         abort();
285*5f2f4271Schristos 
286*5f2f4271Schristos       if (reset_timeout != 0) {
287*5f2f4271Schristos         timeout = user_timeout;
288*5f2f4271Schristos         reset_timeout = 0;
289*5f2f4271Schristos       }
290*5f2f4271Schristos 
291*5f2f4271Schristos       if (timeout == -1)
292*5f2f4271Schristos         continue;
293*5f2f4271Schristos 
294*5f2f4271Schristos       if (timeout == 0)
295*5f2f4271Schristos         return;
296*5f2f4271Schristos 
297*5f2f4271Schristos       /* Interrupted by a signal. Update timeout and poll again. */
298*5f2f4271Schristos       goto update_timeout;
299*5f2f4271Schristos     }
300*5f2f4271Schristos 
301*5f2f4271Schristos     have_signals = 0;
302*5f2f4271Schristos     nevents = 0;
303*5f2f4271Schristos 
304*5f2f4271Schristos     {
305*5f2f4271Schristos       /* Squelch a -Waddress-of-packed-member warning with gcc >= 9. */
306*5f2f4271Schristos       union {
307*5f2f4271Schristos         struct epoll_event* events;
308*5f2f4271Schristos         uv__io_t* watchers;
309*5f2f4271Schristos       } x;
310*5f2f4271Schristos 
311*5f2f4271Schristos       x.events = events;
312*5f2f4271Schristos       assert(loop->watchers != NULL);
313*5f2f4271Schristos       loop->watchers[loop->nwatchers] = x.watchers;
314*5f2f4271Schristos       loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
315*5f2f4271Schristos     }
316*5f2f4271Schristos 
317*5f2f4271Schristos     for (i = 0; i < nfds; i++) {
318*5f2f4271Schristos       pe = events + i;
319*5f2f4271Schristos       fd = pe->data.fd;
320*5f2f4271Schristos 
321*5f2f4271Schristos       /* Skip invalidated events, see uv__platform_invalidate_fd */
322*5f2f4271Schristos       if (fd == -1)
323*5f2f4271Schristos         continue;
324*5f2f4271Schristos 
325*5f2f4271Schristos       assert(fd >= 0);
326*5f2f4271Schristos       assert((unsigned) fd < loop->nwatchers);
327*5f2f4271Schristos 
328*5f2f4271Schristos       w = loop->watchers[fd];
329*5f2f4271Schristos 
330*5f2f4271Schristos       if (w == NULL) {
331*5f2f4271Schristos         /* File descriptor that we've stopped watching, disarm it.
332*5f2f4271Schristos          *
333*5f2f4271Schristos          * Ignore all errors because we may be racing with another thread
334*5f2f4271Schristos          * when the file descriptor is closed.
335*5f2f4271Schristos          */
336*5f2f4271Schristos         epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, pe);
337*5f2f4271Schristos         continue;
338*5f2f4271Schristos       }
339*5f2f4271Schristos 
340*5f2f4271Schristos       /* Give users only events they're interested in. Prevents spurious
341*5f2f4271Schristos        * callbacks when previous callback invocation in this loop has stopped
342*5f2f4271Schristos        * the current watcher. Also, filters out events that users has not
343*5f2f4271Schristos        * requested us to watch.
344*5f2f4271Schristos        */
345*5f2f4271Schristos       pe->events &= w->pevents | POLLERR | POLLHUP;
346*5f2f4271Schristos 
347*5f2f4271Schristos       /* Work around an epoll quirk where it sometimes reports just the
348*5f2f4271Schristos        * EPOLLERR or EPOLLHUP event.  In order to force the event loop to
349*5f2f4271Schristos        * move forward, we merge in the read/write events that the watcher
350*5f2f4271Schristos        * is interested in; uv__read() and uv__write() will then deal with
351*5f2f4271Schristos        * the error or hangup in the usual fashion.
352*5f2f4271Schristos        *
353*5f2f4271Schristos        * Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user
354*5f2f4271Schristos        * reads the available data, calls uv_read_stop(), then sometime later
355*5f2f4271Schristos        * calls uv_read_start() again.  By then, libuv has forgotten about the
356*5f2f4271Schristos        * hangup and the kernel won't report EPOLLIN again because there's
357*5f2f4271Schristos        * nothing left to read.  If anything, libuv is to blame here.  The
358*5f2f4271Schristos        * current hack is just a quick bandaid; to properly fix it, libuv
359*5f2f4271Schristos        * needs to remember the error/hangup event.  We should get that for
360*5f2f4271Schristos        * free when we switch over to edge-triggered I/O.
361*5f2f4271Schristos        */
362*5f2f4271Schristos       if (pe->events == POLLERR || pe->events == POLLHUP)
363*5f2f4271Schristos         pe->events |=
364*5f2f4271Schristos           w->pevents & (POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
365*5f2f4271Schristos 
366*5f2f4271Schristos       if (pe->events != 0) {
367*5f2f4271Schristos         /* Run signal watchers last.  This also affects child process watchers
368*5f2f4271Schristos          * because those are implemented in terms of signal watchers.
369*5f2f4271Schristos          */
370*5f2f4271Schristos         if (w == &loop->signal_io_watcher) {
371*5f2f4271Schristos           have_signals = 1;
372*5f2f4271Schristos         } else {
373*5f2f4271Schristos           uv__metrics_update_idle_time(loop);
374*5f2f4271Schristos           w->cb(loop, w, pe->events);
375*5f2f4271Schristos         }
376*5f2f4271Schristos 
377*5f2f4271Schristos         nevents++;
378*5f2f4271Schristos       }
379*5f2f4271Schristos     }
380*5f2f4271Schristos 
381*5f2f4271Schristos     if (reset_timeout != 0) {
382*5f2f4271Schristos       timeout = user_timeout;
383*5f2f4271Schristos       reset_timeout = 0;
384*5f2f4271Schristos     }
385*5f2f4271Schristos 
386*5f2f4271Schristos     if (have_signals != 0) {
387*5f2f4271Schristos       uv__metrics_update_idle_time(loop);
388*5f2f4271Schristos       loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
389*5f2f4271Schristos     }
390*5f2f4271Schristos 
391*5f2f4271Schristos     loop->watchers[loop->nwatchers] = NULL;
392*5f2f4271Schristos     loop->watchers[loop->nwatchers + 1] = NULL;
393*5f2f4271Schristos 
394*5f2f4271Schristos     if (have_signals != 0)
395*5f2f4271Schristos       return;  /* Event loop should cycle now so don't poll again. */
396*5f2f4271Schristos 
397*5f2f4271Schristos     if (nevents != 0) {
398*5f2f4271Schristos       if (nfds == ARRAY_SIZE(events) && --count != 0) {
399*5f2f4271Schristos         /* Poll for more events but don't block this time. */
400*5f2f4271Schristos         timeout = 0;
401*5f2f4271Schristos         continue;
402*5f2f4271Schristos       }
403*5f2f4271Schristos       return;
404*5f2f4271Schristos     }
405*5f2f4271Schristos 
406*5f2f4271Schristos     if (timeout == 0)
407*5f2f4271Schristos       return;
408*5f2f4271Schristos 
409*5f2f4271Schristos     if (timeout == -1)
410*5f2f4271Schristos       continue;
411*5f2f4271Schristos 
412*5f2f4271Schristos update_timeout:
413*5f2f4271Schristos     assert(timeout > 0);
414*5f2f4271Schristos 
415*5f2f4271Schristos     real_timeout -= (loop->time - base);
416*5f2f4271Schristos     if (real_timeout <= 0)
417*5f2f4271Schristos       return;
418*5f2f4271Schristos 
419*5f2f4271Schristos     timeout = real_timeout;
420*5f2f4271Schristos   }
421*5f2f4271Schristos }
422*5f2f4271Schristos 
423