10e552da7Schristos /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
20e552da7Schristos * Permission is hereby granted, free of charge, to any person obtaining a copy
30e552da7Schristos * of this software and associated documentation files (the "Software"), to
40e552da7Schristos * deal in the Software without restriction, including without limitation the
50e552da7Schristos * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
60e552da7Schristos * sell copies of the Software, and to permit persons to whom the Software is
70e552da7Schristos * furnished to do so, subject to the following conditions:
80e552da7Schristos *
90e552da7Schristos * The above copyright notice and this permission notice shall be included in
100e552da7Schristos * all copies or substantial portions of the Software.
110e552da7Schristos *
120e552da7Schristos * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
130e552da7Schristos * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
140e552da7Schristos * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
150e552da7Schristos * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
160e552da7Schristos * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
170e552da7Schristos * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
180e552da7Schristos * IN THE SOFTWARE.
190e552da7Schristos */
200e552da7Schristos
210e552da7Schristos #include "uv.h"
220e552da7Schristos #include "internal.h"
230e552da7Schristos
240e552da7Schristos #include <assert.h>
250e552da7Schristos #include <stdlib.h>
260e552da7Schristos #include <string.h>
270e552da7Schristos #include <errno.h>
280e552da7Schristos
290e552da7Schristos #include <sys/sysctl.h>
300e552da7Schristos #include <sys/types.h>
310e552da7Schristos #include <sys/event.h>
320e552da7Schristos #include <sys/time.h>
330e552da7Schristos #include <unistd.h>
340e552da7Schristos #include <fcntl.h>
350e552da7Schristos #include <time.h>
360e552da7Schristos
370e552da7Schristos /*
380e552da7Schristos * Required on
390e552da7Schristos * - Until at least FreeBSD 11.0
400e552da7Schristos * - Older versions of Mac OS X
410e552da7Schristos *
420e552da7Schristos * http://www.boost.org/doc/libs/1_61_0/boost/asio/detail/kqueue_reactor.hpp
430e552da7Schristos */
440e552da7Schristos #ifndef EV_OOBAND
450e552da7Schristos #define EV_OOBAND EV_FLAG1
460e552da7Schristos #endif
470e552da7Schristos
480e552da7Schristos static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags);
490e552da7Schristos
500e552da7Schristos
uv__kqueue_init(uv_loop_t * loop)510e552da7Schristos int uv__kqueue_init(uv_loop_t* loop) {
520e552da7Schristos loop->backend_fd = kqueue();
530e552da7Schristos if (loop->backend_fd == -1)
540e552da7Schristos return UV__ERR(errno);
550e552da7Schristos
560e552da7Schristos uv__cloexec(loop->backend_fd, 1);
570e552da7Schristos
580e552da7Schristos return 0;
590e552da7Schristos }
600e552da7Schristos
610e552da7Schristos
620e552da7Schristos #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
630e552da7Schristos static int uv__has_forked_with_cfrunloop;
640e552da7Schristos #endif
650e552da7Schristos
uv__io_fork(uv_loop_t * loop)660e552da7Schristos int uv__io_fork(uv_loop_t* loop) {
670e552da7Schristos int err;
680e552da7Schristos loop->backend_fd = -1;
690e552da7Schristos err = uv__kqueue_init(loop);
700e552da7Schristos if (err)
710e552da7Schristos return err;
720e552da7Schristos
730e552da7Schristos #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
740e552da7Schristos if (loop->cf_state != NULL) {
750e552da7Schristos /* We cannot start another CFRunloop and/or thread in the child
760e552da7Schristos process; CF aborts if you try or if you try to touch the thread
770e552da7Schristos at all to kill it. So the best we can do is ignore it from now
780e552da7Schristos on. This means we can't watch directories in the same way
790e552da7Schristos anymore (like other BSDs). It also means we cannot properly
800e552da7Schristos clean up the allocated resources; calling
810e552da7Schristos uv__fsevents_loop_delete from uv_loop_close will crash the
820e552da7Schristos process. So we sidestep the issue by pretending like we never
830e552da7Schristos started it in the first place.
840e552da7Schristos */
85*5f2f4271Schristos uv__store_relaxed(&uv__has_forked_with_cfrunloop, 1);
860e552da7Schristos uv__free(loop->cf_state);
870e552da7Schristos loop->cf_state = NULL;
880e552da7Schristos }
890e552da7Schristos #endif /* #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 */
900e552da7Schristos return err;
910e552da7Schristos }
920e552da7Schristos
930e552da7Schristos
uv__io_check_fd(uv_loop_t * loop,int fd)940e552da7Schristos int uv__io_check_fd(uv_loop_t* loop, int fd) {
950e552da7Schristos struct kevent ev;
960e552da7Schristos int rc;
970e552da7Schristos
980e552da7Schristos rc = 0;
990e552da7Schristos EV_SET(&ev, fd, EVFILT_READ, EV_ADD, 0, 0, 0);
1000e552da7Schristos if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
1010e552da7Schristos rc = UV__ERR(errno);
1020e552da7Schristos
1030e552da7Schristos EV_SET(&ev, fd, EVFILT_READ, EV_DELETE, 0, 0, 0);
1040e552da7Schristos if (rc == 0)
1050e552da7Schristos if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
1060e552da7Schristos abort();
1070e552da7Schristos
1080e552da7Schristos return rc;
1090e552da7Schristos }
1100e552da7Schristos
1110e552da7Schristos
uv__io_poll(uv_loop_t * loop,int timeout)1120e552da7Schristos void uv__io_poll(uv_loop_t* loop, int timeout) {
1130e552da7Schristos struct kevent events[1024];
1140e552da7Schristos struct kevent* ev;
1150e552da7Schristos struct timespec spec;
1160e552da7Schristos unsigned int nevents;
1170e552da7Schristos unsigned int revents;
1180e552da7Schristos QUEUE* q;
1190e552da7Schristos uv__io_t* w;
120*5f2f4271Schristos uv_process_t* process;
1210e552da7Schristos sigset_t* pset;
1220e552da7Schristos sigset_t set;
1230e552da7Schristos uint64_t base;
1240e552da7Schristos uint64_t diff;
1250e552da7Schristos int have_signals;
1260e552da7Schristos int filter;
1270e552da7Schristos int fflags;
1280e552da7Schristos int count;
1290e552da7Schristos int nfds;
1300e552da7Schristos int fd;
1310e552da7Schristos int op;
1320e552da7Schristos int i;
133*5f2f4271Schristos int user_timeout;
134*5f2f4271Schristos int reset_timeout;
1350e552da7Schristos
1360e552da7Schristos if (loop->nfds == 0) {
1370e552da7Schristos assert(QUEUE_EMPTY(&loop->watcher_queue));
1380e552da7Schristos return;
1390e552da7Schristos }
1400e552da7Schristos
1410e552da7Schristos nevents = 0;
1420e552da7Schristos
1430e552da7Schristos while (!QUEUE_EMPTY(&loop->watcher_queue)) {
1440e552da7Schristos q = QUEUE_HEAD(&loop->watcher_queue);
1450e552da7Schristos QUEUE_REMOVE(q);
1460e552da7Schristos QUEUE_INIT(q);
1470e552da7Schristos
1480e552da7Schristos w = QUEUE_DATA(q, uv__io_t, watcher_queue);
1490e552da7Schristos assert(w->pevents != 0);
1500e552da7Schristos assert(w->fd >= 0);
1510e552da7Schristos assert(w->fd < (int) loop->nwatchers);
1520e552da7Schristos
1530e552da7Schristos if ((w->events & POLLIN) == 0 && (w->pevents & POLLIN) != 0) {
1540e552da7Schristos filter = EVFILT_READ;
1550e552da7Schristos fflags = 0;
1560e552da7Schristos op = EV_ADD;
1570e552da7Schristos
1580e552da7Schristos if (w->cb == uv__fs_event) {
1590e552da7Schristos filter = EVFILT_VNODE;
1600e552da7Schristos fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
1610e552da7Schristos | NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
1620e552da7Schristos op = EV_ADD | EV_ONESHOT; /* Stop the event from firing repeatedly. */
1630e552da7Schristos }
1640e552da7Schristos
1650e552da7Schristos EV_SET(events + nevents, w->fd, filter, op, fflags, 0, 0);
1660e552da7Schristos
1670e552da7Schristos if (++nevents == ARRAY_SIZE(events)) {
1680e552da7Schristos if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
1690e552da7Schristos abort();
1700e552da7Schristos nevents = 0;
1710e552da7Schristos }
1720e552da7Schristos }
1730e552da7Schristos
1740e552da7Schristos if ((w->events & POLLOUT) == 0 && (w->pevents & POLLOUT) != 0) {
1750e552da7Schristos EV_SET(events + nevents, w->fd, EVFILT_WRITE, EV_ADD, 0, 0, 0);
1760e552da7Schristos
1770e552da7Schristos if (++nevents == ARRAY_SIZE(events)) {
1780e552da7Schristos if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
1790e552da7Schristos abort();
1800e552da7Schristos nevents = 0;
1810e552da7Schristos }
1820e552da7Schristos }
1830e552da7Schristos
1840e552da7Schristos if ((w->events & UV__POLLPRI) == 0 && (w->pevents & UV__POLLPRI) != 0) {
1850e552da7Schristos EV_SET(events + nevents, w->fd, EV_OOBAND, EV_ADD, 0, 0, 0);
1860e552da7Schristos
1870e552da7Schristos if (++nevents == ARRAY_SIZE(events)) {
1880e552da7Schristos if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
1890e552da7Schristos abort();
1900e552da7Schristos nevents = 0;
1910e552da7Schristos }
1920e552da7Schristos }
1930e552da7Schristos
1940e552da7Schristos w->events = w->pevents;
1950e552da7Schristos }
1960e552da7Schristos
1970e552da7Schristos pset = NULL;
1980e552da7Schristos if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
1990e552da7Schristos pset = &set;
2000e552da7Schristos sigemptyset(pset);
2010e552da7Schristos sigaddset(pset, SIGPROF);
2020e552da7Schristos }
2030e552da7Schristos
2040e552da7Schristos assert(timeout >= -1);
2050e552da7Schristos base = loop->time;
2060e552da7Schristos count = 48; /* Benchmarks suggest this gives the best throughput. */
2070e552da7Schristos
208*5f2f4271Schristos if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
209*5f2f4271Schristos reset_timeout = 1;
210*5f2f4271Schristos user_timeout = timeout;
211*5f2f4271Schristos timeout = 0;
212*5f2f4271Schristos } else {
213*5f2f4271Schristos reset_timeout = 0;
214*5f2f4271Schristos }
215*5f2f4271Schristos
2160e552da7Schristos for (;; nevents = 0) {
217*5f2f4271Schristos /* Only need to set the provider_entry_time if timeout != 0. The function
218*5f2f4271Schristos * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
219*5f2f4271Schristos */
220*5f2f4271Schristos if (timeout != 0)
221*5f2f4271Schristos uv__metrics_set_provider_entry_time(loop);
222*5f2f4271Schristos
2230e552da7Schristos if (timeout != -1) {
2240e552da7Schristos spec.tv_sec = timeout / 1000;
2250e552da7Schristos spec.tv_nsec = (timeout % 1000) * 1000000;
2260e552da7Schristos }
2270e552da7Schristos
2280e552da7Schristos if (pset != NULL)
2290e552da7Schristos pthread_sigmask(SIG_BLOCK, pset, NULL);
2300e552da7Schristos
2310e552da7Schristos nfds = kevent(loop->backend_fd,
2320e552da7Schristos events,
2330e552da7Schristos nevents,
2340e552da7Schristos events,
2350e552da7Schristos ARRAY_SIZE(events),
2360e552da7Schristos timeout == -1 ? NULL : &spec);
2370e552da7Schristos
2380e552da7Schristos if (pset != NULL)
2390e552da7Schristos pthread_sigmask(SIG_UNBLOCK, pset, NULL);
2400e552da7Schristos
2410e552da7Schristos /* Update loop->time unconditionally. It's tempting to skip the update when
2420e552da7Schristos * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
2430e552da7Schristos * operating system didn't reschedule our process while in the syscall.
2440e552da7Schristos */
2450e552da7Schristos SAVE_ERRNO(uv__update_time(loop));
2460e552da7Schristos
2470e552da7Schristos if (nfds == 0) {
248*5f2f4271Schristos if (reset_timeout != 0) {
249*5f2f4271Schristos timeout = user_timeout;
250*5f2f4271Schristos reset_timeout = 0;
251*5f2f4271Schristos if (timeout == -1)
252*5f2f4271Schristos continue;
253*5f2f4271Schristos if (timeout > 0)
254*5f2f4271Schristos goto update_timeout;
255*5f2f4271Schristos }
256*5f2f4271Schristos
2570e552da7Schristos assert(timeout != -1);
2580e552da7Schristos return;
2590e552da7Schristos }
2600e552da7Schristos
2610e552da7Schristos if (nfds == -1) {
2620e552da7Schristos if (errno != EINTR)
2630e552da7Schristos abort();
2640e552da7Schristos
265*5f2f4271Schristos if (reset_timeout != 0) {
266*5f2f4271Schristos timeout = user_timeout;
267*5f2f4271Schristos reset_timeout = 0;
268*5f2f4271Schristos }
269*5f2f4271Schristos
2700e552da7Schristos if (timeout == 0)
2710e552da7Schristos return;
2720e552da7Schristos
2730e552da7Schristos if (timeout == -1)
2740e552da7Schristos continue;
2750e552da7Schristos
2760e552da7Schristos /* Interrupted by a signal. Update timeout and poll again. */
2770e552da7Schristos goto update_timeout;
2780e552da7Schristos }
2790e552da7Schristos
2800e552da7Schristos have_signals = 0;
2810e552da7Schristos nevents = 0;
2820e552da7Schristos
2830e552da7Schristos assert(loop->watchers != NULL);
2840e552da7Schristos loop->watchers[loop->nwatchers] = (void*) events;
2850e552da7Schristos loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
2860e552da7Schristos for (i = 0; i < nfds; i++) {
2870e552da7Schristos ev = events + i;
2880e552da7Schristos fd = ev->ident;
289*5f2f4271Schristos
290*5f2f4271Schristos /* Handle kevent NOTE_EXIT results */
291*5f2f4271Schristos if (ev->filter == EVFILT_PROC) {
292*5f2f4271Schristos QUEUE_FOREACH(q, &loop->process_handles) {
293*5f2f4271Schristos process = QUEUE_DATA(q, uv_process_t, queue);
294*5f2f4271Schristos if (process->pid == fd) {
295*5f2f4271Schristos process->flags |= UV_HANDLE_REAP;
296*5f2f4271Schristos loop->flags |= UV_LOOP_REAP_CHILDREN;
297*5f2f4271Schristos break;
298*5f2f4271Schristos }
299*5f2f4271Schristos }
300*5f2f4271Schristos nevents++;
301*5f2f4271Schristos continue;
302*5f2f4271Schristos }
303*5f2f4271Schristos
3040e552da7Schristos /* Skip invalidated events, see uv__platform_invalidate_fd */
3050e552da7Schristos if (fd == -1)
3060e552da7Schristos continue;
3070e552da7Schristos w = loop->watchers[fd];
3080e552da7Schristos
3090e552da7Schristos if (w == NULL) {
3100e552da7Schristos /* File descriptor that we've stopped watching, disarm it.
3110e552da7Schristos * TODO: batch up. */
3120e552da7Schristos struct kevent events[1];
3130e552da7Schristos
3140e552da7Schristos EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
3150e552da7Schristos if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
3160e552da7Schristos if (errno != EBADF && errno != ENOENT)
3170e552da7Schristos abort();
3180e552da7Schristos
3190e552da7Schristos continue;
3200e552da7Schristos }
3210e552da7Schristos
3220e552da7Schristos if (ev->filter == EVFILT_VNODE) {
3230e552da7Schristos assert(w->events == POLLIN);
3240e552da7Schristos assert(w->pevents == POLLIN);
325*5f2f4271Schristos uv__metrics_update_idle_time(loop);
3260e552da7Schristos w->cb(loop, w, ev->fflags); /* XXX always uv__fs_event() */
3270e552da7Schristos nevents++;
3280e552da7Schristos continue;
3290e552da7Schristos }
3300e552da7Schristos
3310e552da7Schristos revents = 0;
3320e552da7Schristos
3330e552da7Schristos if (ev->filter == EVFILT_READ) {
3340e552da7Schristos if (w->pevents & POLLIN) {
3350e552da7Schristos revents |= POLLIN;
3360e552da7Schristos w->rcount = ev->data;
3370e552da7Schristos } else {
3380e552da7Schristos /* TODO batch up */
3390e552da7Schristos struct kevent events[1];
3400e552da7Schristos EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
3410e552da7Schristos if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
3420e552da7Schristos if (errno != ENOENT)
3430e552da7Schristos abort();
3440e552da7Schristos }
345*5f2f4271Schristos if ((ev->flags & EV_EOF) && (w->pevents & UV__POLLRDHUP))
346*5f2f4271Schristos revents |= UV__POLLRDHUP;
3470e552da7Schristos }
3480e552da7Schristos
3490e552da7Schristos if (ev->filter == EV_OOBAND) {
3500e552da7Schristos if (w->pevents & UV__POLLPRI) {
3510e552da7Schristos revents |= UV__POLLPRI;
3520e552da7Schristos w->rcount = ev->data;
3530e552da7Schristos } else {
3540e552da7Schristos /* TODO batch up */
3550e552da7Schristos struct kevent events[1];
3560e552da7Schristos EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
3570e552da7Schristos if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
3580e552da7Schristos if (errno != ENOENT)
3590e552da7Schristos abort();
3600e552da7Schristos }
3610e552da7Schristos }
3620e552da7Schristos
3630e552da7Schristos if (ev->filter == EVFILT_WRITE) {
3640e552da7Schristos if (w->pevents & POLLOUT) {
3650e552da7Schristos revents |= POLLOUT;
3660e552da7Schristos w->wcount = ev->data;
3670e552da7Schristos } else {
3680e552da7Schristos /* TODO batch up */
3690e552da7Schristos struct kevent events[1];
3700e552da7Schristos EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
3710e552da7Schristos if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
3720e552da7Schristos if (errno != ENOENT)
3730e552da7Schristos abort();
3740e552da7Schristos }
3750e552da7Schristos }
3760e552da7Schristos
3770e552da7Schristos if (ev->flags & EV_ERROR)
3780e552da7Schristos revents |= POLLERR;
3790e552da7Schristos
3800e552da7Schristos if (revents == 0)
3810e552da7Schristos continue;
3820e552da7Schristos
3830e552da7Schristos /* Run signal watchers last. This also affects child process watchers
3840e552da7Schristos * because those are implemented in terms of signal watchers.
3850e552da7Schristos */
386*5f2f4271Schristos if (w == &loop->signal_io_watcher) {
3870e552da7Schristos have_signals = 1;
388*5f2f4271Schristos } else {
389*5f2f4271Schristos uv__metrics_update_idle_time(loop);
3900e552da7Schristos w->cb(loop, w, revents);
391*5f2f4271Schristos }
3920e552da7Schristos
3930e552da7Schristos nevents++;
3940e552da7Schristos }
3950e552da7Schristos
396*5f2f4271Schristos if (loop->flags & UV_LOOP_REAP_CHILDREN) {
397*5f2f4271Schristos loop->flags &= ~UV_LOOP_REAP_CHILDREN;
398*5f2f4271Schristos uv__wait_children(loop);
399*5f2f4271Schristos }
400*5f2f4271Schristos
401*5f2f4271Schristos if (reset_timeout != 0) {
402*5f2f4271Schristos timeout = user_timeout;
403*5f2f4271Schristos reset_timeout = 0;
404*5f2f4271Schristos }
405*5f2f4271Schristos
406*5f2f4271Schristos if (have_signals != 0) {
407*5f2f4271Schristos uv__metrics_update_idle_time(loop);
4080e552da7Schristos loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
409*5f2f4271Schristos }
4100e552da7Schristos
4110e552da7Schristos loop->watchers[loop->nwatchers] = NULL;
4120e552da7Schristos loop->watchers[loop->nwatchers + 1] = NULL;
4130e552da7Schristos
4140e552da7Schristos if (have_signals != 0)
4150e552da7Schristos return; /* Event loop should cycle now so don't poll again. */
4160e552da7Schristos
4170e552da7Schristos if (nevents != 0) {
4180e552da7Schristos if (nfds == ARRAY_SIZE(events) && --count != 0) {
4190e552da7Schristos /* Poll for more events but don't block this time. */
4200e552da7Schristos timeout = 0;
4210e552da7Schristos continue;
4220e552da7Schristos }
4230e552da7Schristos return;
4240e552da7Schristos }
4250e552da7Schristos
4260e552da7Schristos if (timeout == 0)
4270e552da7Schristos return;
4280e552da7Schristos
4290e552da7Schristos if (timeout == -1)
4300e552da7Schristos continue;
4310e552da7Schristos
4320e552da7Schristos update_timeout:
4330e552da7Schristos assert(timeout > 0);
4340e552da7Schristos
4350e552da7Schristos diff = loop->time - base;
4360e552da7Schristos if (diff >= (uint64_t) timeout)
4370e552da7Schristos return;
4380e552da7Schristos
4390e552da7Schristos timeout -= diff;
4400e552da7Schristos }
4410e552da7Schristos }
4420e552da7Schristos
4430e552da7Schristos
uv__platform_invalidate_fd(uv_loop_t * loop,int fd)4440e552da7Schristos void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
4450e552da7Schristos struct kevent* events;
4460e552da7Schristos uintptr_t i;
4470e552da7Schristos uintptr_t nfds;
4480e552da7Schristos
4490e552da7Schristos assert(loop->watchers != NULL);
4500e552da7Schristos assert(fd >= 0);
4510e552da7Schristos
4520e552da7Schristos events = (struct kevent*) loop->watchers[loop->nwatchers];
4530e552da7Schristos nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
4540e552da7Schristos if (events == NULL)
4550e552da7Schristos return;
4560e552da7Schristos
4570e552da7Schristos /* Invalidate events with same file descriptor */
4580e552da7Schristos for (i = 0; i < nfds; i++)
459*5f2f4271Schristos if ((int) events[i].ident == fd && events[i].filter != EVFILT_PROC)
4600e552da7Schristos events[i].ident = -1;
4610e552da7Schristos }
4620e552da7Schristos
4630e552da7Schristos
uv__fs_event(uv_loop_t * loop,uv__io_t * w,unsigned int fflags)4640e552da7Schristos static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags) {
4650e552da7Schristos uv_fs_event_t* handle;
4660e552da7Schristos struct kevent ev;
4670e552da7Schristos int events;
4680e552da7Schristos const char* path;
4690e552da7Schristos #if defined(F_GETPATH)
4700e552da7Schristos /* MAXPATHLEN == PATH_MAX but the former is what XNU calls it internally. */
4710e552da7Schristos char pathbuf[MAXPATHLEN];
4720e552da7Schristos #endif
4730e552da7Schristos
4740e552da7Schristos handle = container_of(w, uv_fs_event_t, event_watcher);
4750e552da7Schristos
4760e552da7Schristos if (fflags & (NOTE_ATTRIB | NOTE_EXTEND))
4770e552da7Schristos events = UV_CHANGE;
4780e552da7Schristos else
4790e552da7Schristos events = UV_RENAME;
4800e552da7Schristos
4810e552da7Schristos path = NULL;
4820e552da7Schristos #if defined(F_GETPATH)
4830e552da7Schristos /* Also works when the file has been unlinked from the file system. Passing
4840e552da7Schristos * in the path when the file has been deleted is arguably a little strange
4850e552da7Schristos * but it's consistent with what the inotify backend does.
4860e552da7Schristos */
4870e552da7Schristos if (fcntl(handle->event_watcher.fd, F_GETPATH, pathbuf) == 0)
4880e552da7Schristos path = uv__basename_r(pathbuf);
4890e552da7Schristos #endif
4900e552da7Schristos handle->cb(handle, path, events, 0);
4910e552da7Schristos
4920e552da7Schristos if (handle->event_watcher.fd == -1)
4930e552da7Schristos return;
4940e552da7Schristos
4950e552da7Schristos /* Watcher operates in one-shot mode, re-arm it. */
4960e552da7Schristos fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
4970e552da7Schristos | NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
4980e552da7Schristos
4990e552da7Schristos EV_SET(&ev, w->fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, fflags, 0, 0);
5000e552da7Schristos
5010e552da7Schristos if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
5020e552da7Schristos abort();
5030e552da7Schristos }
5040e552da7Schristos
5050e552da7Schristos
uv_fs_event_init(uv_loop_t * loop,uv_fs_event_t * handle)5060e552da7Schristos int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
5070e552da7Schristos uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
5080e552da7Schristos return 0;
5090e552da7Schristos }
5100e552da7Schristos
5110e552da7Schristos
uv_fs_event_start(uv_fs_event_t * handle,uv_fs_event_cb cb,const char * path,unsigned int flags)5120e552da7Schristos int uv_fs_event_start(uv_fs_event_t* handle,
5130e552da7Schristos uv_fs_event_cb cb,
5140e552da7Schristos const char* path,
5150e552da7Schristos unsigned int flags) {
5160e552da7Schristos int fd;
5170e552da7Schristos #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
5180e552da7Schristos struct stat statbuf;
5190e552da7Schristos #endif
5200e552da7Schristos
5210e552da7Schristos if (uv__is_active(handle))
5220e552da7Schristos return UV_EINVAL;
5230e552da7Schristos
5240e552da7Schristos handle->cb = cb;
5250e552da7Schristos handle->path = uv__strdup(path);
5260e552da7Schristos if (handle->path == NULL)
5270e552da7Schristos return UV_ENOMEM;
5280e552da7Schristos
5290e552da7Schristos /* TODO open asynchronously - but how do we report back errors? */
5300e552da7Schristos fd = open(handle->path, O_RDONLY);
5310e552da7Schristos if (fd == -1) {
5320e552da7Schristos uv__free(handle->path);
5330e552da7Schristos handle->path = NULL;
5340e552da7Schristos return UV__ERR(errno);
5350e552da7Schristos }
5360e552da7Schristos
5370e552da7Schristos #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
5380e552da7Schristos /* Nullify field to perform checks later */
5390e552da7Schristos handle->cf_cb = NULL;
5400e552da7Schristos handle->realpath = NULL;
5410e552da7Schristos handle->realpath_len = 0;
5420e552da7Schristos handle->cf_flags = flags;
5430e552da7Schristos
5440e552da7Schristos if (fstat(fd, &statbuf))
5450e552da7Schristos goto fallback;
5460e552da7Schristos /* FSEvents works only with directories */
5470e552da7Schristos if (!(statbuf.st_mode & S_IFDIR))
5480e552da7Schristos goto fallback;
5490e552da7Schristos
550*5f2f4271Schristos if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop)) {
5510e552da7Schristos int r;
5520e552da7Schristos /* The fallback fd is no longer needed */
5530e552da7Schristos uv__close_nocheckstdio(fd);
5540e552da7Schristos handle->event_watcher.fd = -1;
5550e552da7Schristos r = uv__fsevents_init(handle);
5560e552da7Schristos if (r == 0) {
5570e552da7Schristos uv__handle_start(handle);
5580e552da7Schristos } else {
5590e552da7Schristos uv__free(handle->path);
5600e552da7Schristos handle->path = NULL;
5610e552da7Schristos }
5620e552da7Schristos return r;
5630e552da7Schristos }
5640e552da7Schristos fallback:
5650e552da7Schristos #endif /* #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 */
5660e552da7Schristos
5670e552da7Schristos uv__handle_start(handle);
5680e552da7Schristos uv__io_init(&handle->event_watcher, uv__fs_event, fd);
5690e552da7Schristos uv__io_start(handle->loop, &handle->event_watcher, POLLIN);
5700e552da7Schristos
5710e552da7Schristos return 0;
5720e552da7Schristos }
5730e552da7Schristos
5740e552da7Schristos
uv_fs_event_stop(uv_fs_event_t * handle)5750e552da7Schristos int uv_fs_event_stop(uv_fs_event_t* handle) {
5760e552da7Schristos int r;
5770e552da7Schristos r = 0;
5780e552da7Schristos
5790e552da7Schristos if (!uv__is_active(handle))
5800e552da7Schristos return 0;
5810e552da7Schristos
5820e552da7Schristos uv__handle_stop(handle);
5830e552da7Schristos
5840e552da7Schristos #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
585*5f2f4271Schristos if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop))
586*5f2f4271Schristos if (handle->cf_cb != NULL)
5870e552da7Schristos r = uv__fsevents_close(handle);
5880e552da7Schristos #endif
5890e552da7Schristos
5900e552da7Schristos if (handle->event_watcher.fd != -1) {
5910e552da7Schristos uv__io_close(handle->loop, &handle->event_watcher);
5920e552da7Schristos uv__close(handle->event_watcher.fd);
5930e552da7Schristos handle->event_watcher.fd = -1;
5940e552da7Schristos }
5950e552da7Schristos
5960e552da7Schristos uv__free(handle->path);
5970e552da7Schristos handle->path = NULL;
5980e552da7Schristos
5990e552da7Schristos return r;
6000e552da7Schristos }
6010e552da7Schristos
6020e552da7Schristos
uv__fs_event_close(uv_fs_event_t * handle)6030e552da7Schristos void uv__fs_event_close(uv_fs_event_t* handle) {
6040e552da7Schristos uv_fs_event_stop(handle);
6050e552da7Schristos }
606