1 /* $NetBSD: kern_event.c,v 1.98 2017/11/11 03:58:01 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * SUCH DAMAGE. 56 * 57 * FreeBSD: src/sys/kern/kern_event.c,v 1.27 2001/07/05 17:10:44 rwatson Exp 58 */ 59 60 #include <sys/cdefs.h> 61 __KERNEL_RCSID(0, "$NetBSD: kern_event.c,v 1.98 2017/11/11 03:58:01 christos Exp $"); 62 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/kernel.h> 66 #include <sys/wait.h> 67 #include <sys/proc.h> 68 #include <sys/file.h> 69 #include <sys/select.h> 70 #include <sys/queue.h> 71 #include <sys/event.h> 72 #include <sys/eventvar.h> 73 #include <sys/poll.h> 74 #include <sys/kmem.h> 75 #include <sys/stat.h> 76 #include <sys/filedesc.h> 77 #include <sys/syscallargs.h> 78 #include <sys/kauth.h> 79 #include <sys/conf.h> 80 #include <sys/atomic.h> 81 82 static int kqueue_scan(file_t *, size_t, struct kevent *, 83 const struct timespec *, register_t *, 84 const struct kevent_ops *, struct kevent *, 85 size_t); 86 static int kqueue_ioctl(file_t *, u_long, void *); 87 static int kqueue_fcntl(file_t *, u_int, void *); 88 static int kqueue_poll(file_t *, int); 89 static int kqueue_kqfilter(file_t *, struct knote *); 90 static int kqueue_stat(file_t *, struct stat *); 91 static int kqueue_close(file_t *); 92 static int kqueue_register(struct kqueue *, struct kevent *); 93 static void kqueue_doclose(struct kqueue *, struct klist *, int); 94 95 static void knote_detach(struct knote *, filedesc_t *fdp, bool); 96 static void knote_enqueue(struct knote *); 97 static void knote_activate(struct knote *); 98 99 static void filt_kqdetach(struct knote *); 100 static int filt_kqueue(struct knote *, long hint); 101 static int filt_procattach(struct knote *); 102 static void filt_procdetach(struct knote *); 103 static int filt_proc(struct knote *, long hint); 104 static int filt_fileattach(struct knote *); 105 static void filt_timerexpire(void *x); 106 static int filt_timerattach(struct knote *); 107 static void filt_timerdetach(struct knote *); 108 static int filt_timer(struct knote *, long hint); 109 110 static const struct fileops kqueueops = { 111 .fo_read = (void *)enxio, 112 .fo_write = (void *)enxio, 113 .fo_ioctl = kqueue_ioctl, 114 .fo_fcntl = kqueue_fcntl, 115 .fo_poll = kqueue_poll, 116 .fo_stat = kqueue_stat, 117 .fo_close = kqueue_close, 118 .fo_kqfilter = kqueue_kqfilter, 119 .fo_restart = fnullop_restart, 120 }; 121 122 static const struct filterops kqread_filtops = { 123 .f_isfd = 1, 124 .f_attach = NULL, 125 .f_detach = filt_kqdetach, 126 .f_event = filt_kqueue, 127 }; 128 129 static const struct filterops proc_filtops = { 130 .f_isfd = 0, 131 .f_attach = filt_procattach, 132 .f_detach = filt_procdetach, 133 .f_event = filt_proc, 134 }; 135 136 static const struct filterops file_filtops = { 137 .f_isfd = 1, 138 .f_attach = filt_fileattach, 139 .f_detach = NULL, 140 .f_event = NULL, 141 }; 142 143 static const struct filterops timer_filtops = { 144 .f_isfd = 0, 145 .f_attach = filt_timerattach, 146 .f_detach = filt_timerdetach, 147 .f_event = filt_timer, 148 }; 149 150 static u_int kq_ncallouts = 0; 151 static int kq_calloutmax = (4 * 1024); 152 153 #define KN_HASHSIZE 64 /* XXX should be tunable */ 154 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 155 156 extern const struct filterops sig_filtops; 157 158 /* 159 * Table for for all system-defined filters. 160 * These should be listed in the numeric order of the EVFILT_* defines. 161 * If filtops is NULL, the filter isn't implemented in NetBSD. 162 * End of list is when name is NULL. 163 * 164 * Note that 'refcnt' is meaningless for built-in filters. 165 */ 166 struct kfilter { 167 const char *name; /* name of filter */ 168 uint32_t filter; /* id of filter */ 169 unsigned refcnt; /* reference count */ 170 const struct filterops *filtops;/* operations for filter */ 171 size_t namelen; /* length of name string */ 172 }; 173 174 /* System defined filters */ 175 static struct kfilter sys_kfilters[] = { 176 { "EVFILT_READ", EVFILT_READ, 0, &file_filtops, 0 }, 177 { "EVFILT_WRITE", EVFILT_WRITE, 0, &file_filtops, 0, }, 178 { "EVFILT_AIO", EVFILT_AIO, 0, NULL, 0 }, 179 { "EVFILT_VNODE", EVFILT_VNODE, 0, &file_filtops, 0 }, 180 { "EVFILT_PROC", EVFILT_PROC, 0, &proc_filtops, 0 }, 181 { "EVFILT_SIGNAL", EVFILT_SIGNAL, 0, &sig_filtops, 0 }, 182 { "EVFILT_TIMER", EVFILT_TIMER, 0, &timer_filtops, 0 }, 183 { NULL, 0, 0, NULL, 0 }, 184 }; 185 186 /* User defined kfilters */ 187 static struct kfilter *user_kfilters; /* array */ 188 static int user_kfilterc; /* current offset */ 189 static int user_kfiltermaxc; /* max size so far */ 190 static size_t user_kfiltersz; /* size of allocated memory */ 191 192 /* 193 * Global Locks. 194 * 195 * Lock order: 196 * 197 * kqueue_filter_lock 198 * -> kn_kq->kq_fdp->fd_lock 199 * -> object lock (e.g., device driver lock, kqueue_misc_lock, &c.) 200 * -> kn_kq->kq_lock 201 * 202 * Locking rules: 203 * 204 * f_attach: fdp->fd_lock, KERNEL_LOCK 205 * f_detach: fdp->fd_lock, KERNEL_LOCK 206 * f_event(!NOTE_SUBMIT) via kevent: fdp->fd_lock, _no_ object lock 207 * f_event via knote: whatever caller guarantees 208 * Typically, f_event(NOTE_SUBMIT) via knote: object lock 209 * f_event(!NOTE_SUBMIT) via knote: nothing, 210 * acquires/releases object lock inside. 211 */ 212 static krwlock_t kqueue_filter_lock; /* lock on filter lists */ 213 static kmutex_t kqueue_misc_lock; /* miscellaneous */ 214 215 static kauth_listener_t kqueue_listener; 216 217 static int 218 kqueue_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, 219 void *arg0, void *arg1, void *arg2, void *arg3) 220 { 221 struct proc *p; 222 int result; 223 224 result = KAUTH_RESULT_DEFER; 225 p = arg0; 226 227 if (action != KAUTH_PROCESS_KEVENT_FILTER) 228 return result; 229 230 if ((kauth_cred_getuid(p->p_cred) != kauth_cred_getuid(cred) || 231 ISSET(p->p_flag, PK_SUGID))) 232 return result; 233 234 result = KAUTH_RESULT_ALLOW; 235 236 return result; 237 } 238 239 /* 240 * Initialize the kqueue subsystem. 241 */ 242 void 243 kqueue_init(void) 244 { 245 246 rw_init(&kqueue_filter_lock); 247 mutex_init(&kqueue_misc_lock, MUTEX_DEFAULT, IPL_NONE); 248 249 kqueue_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS, 250 kqueue_listener_cb, NULL); 251 } 252 253 /* 254 * Find kfilter entry by name, or NULL if not found. 255 */ 256 static struct kfilter * 257 kfilter_byname_sys(const char *name) 258 { 259 int i; 260 261 KASSERT(rw_lock_held(&kqueue_filter_lock)); 262 263 for (i = 0; sys_kfilters[i].name != NULL; i++) { 264 if (strcmp(name, sys_kfilters[i].name) == 0) 265 return &sys_kfilters[i]; 266 } 267 return NULL; 268 } 269 270 static struct kfilter * 271 kfilter_byname_user(const char *name) 272 { 273 int i; 274 275 KASSERT(rw_lock_held(&kqueue_filter_lock)); 276 277 /* user filter slots have a NULL name if previously deregistered */ 278 for (i = 0; i < user_kfilterc ; i++) { 279 if (user_kfilters[i].name != NULL && 280 strcmp(name, user_kfilters[i].name) == 0) 281 return &user_kfilters[i]; 282 } 283 return NULL; 284 } 285 286 static struct kfilter * 287 kfilter_byname(const char *name) 288 { 289 struct kfilter *kfilter; 290 291 KASSERT(rw_lock_held(&kqueue_filter_lock)); 292 293 if ((kfilter = kfilter_byname_sys(name)) != NULL) 294 return kfilter; 295 296 return kfilter_byname_user(name); 297 } 298 299 /* 300 * Find kfilter entry by filter id, or NULL if not found. 301 * Assumes entries are indexed in filter id order, for speed. 302 */ 303 static struct kfilter * 304 kfilter_byfilter(uint32_t filter) 305 { 306 struct kfilter *kfilter; 307 308 KASSERT(rw_lock_held(&kqueue_filter_lock)); 309 310 if (filter < EVFILT_SYSCOUNT) /* it's a system filter */ 311 kfilter = &sys_kfilters[filter]; 312 else if (user_kfilters != NULL && 313 filter < EVFILT_SYSCOUNT + user_kfilterc) 314 /* it's a user filter */ 315 kfilter = &user_kfilters[filter - EVFILT_SYSCOUNT]; 316 else 317 return (NULL); /* out of range */ 318 KASSERT(kfilter->filter == filter); /* sanity check! */ 319 return (kfilter); 320 } 321 322 /* 323 * Register a new kfilter. Stores the entry in user_kfilters. 324 * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise. 325 * If retfilter != NULL, the new filterid is returned in it. 326 */ 327 int 328 kfilter_register(const char *name, const struct filterops *filtops, 329 int *retfilter) 330 { 331 struct kfilter *kfilter; 332 size_t len; 333 int i; 334 335 if (name == NULL || name[0] == '\0' || filtops == NULL) 336 return (EINVAL); /* invalid args */ 337 338 rw_enter(&kqueue_filter_lock, RW_WRITER); 339 if (kfilter_byname(name) != NULL) { 340 rw_exit(&kqueue_filter_lock); 341 return (EEXIST); /* already exists */ 342 } 343 if (user_kfilterc > 0xffffffff - EVFILT_SYSCOUNT) { 344 rw_exit(&kqueue_filter_lock); 345 return (EINVAL); /* too many */ 346 } 347 348 for (i = 0; i < user_kfilterc; i++) { 349 kfilter = &user_kfilters[i]; 350 if (kfilter->name == NULL) { 351 /* Previously deregistered slot. Reuse. */ 352 goto reuse; 353 } 354 } 355 356 /* check if need to grow user_kfilters */ 357 if (user_kfilterc + 1 > user_kfiltermaxc) { 358 /* Grow in KFILTER_EXTENT chunks. */ 359 user_kfiltermaxc += KFILTER_EXTENT; 360 len = user_kfiltermaxc * sizeof(*kfilter); 361 kfilter = kmem_alloc(len, KM_SLEEP); 362 memset((char *)kfilter + user_kfiltersz, 0, len - user_kfiltersz); 363 if (user_kfilters != NULL) { 364 memcpy(kfilter, user_kfilters, user_kfiltersz); 365 kmem_free(user_kfilters, user_kfiltersz); 366 } 367 user_kfiltersz = len; 368 user_kfilters = kfilter; 369 } 370 /* Adding new slot */ 371 kfilter = &user_kfilters[user_kfilterc++]; 372 reuse: 373 kfilter->name = kmem_strdupsize(name, &kfilter->namelen, KM_SLEEP); 374 375 kfilter->filter = (kfilter - user_kfilters) + EVFILT_SYSCOUNT; 376 377 kfilter->filtops = kmem_alloc(sizeof(*filtops), KM_SLEEP); 378 memcpy(__UNCONST(kfilter->filtops), filtops, sizeof(*filtops)); 379 380 if (retfilter != NULL) 381 *retfilter = kfilter->filter; 382 rw_exit(&kqueue_filter_lock); 383 384 return (0); 385 } 386 387 /* 388 * Unregister a kfilter previously registered with kfilter_register. 389 * This retains the filter id, but clears the name and frees filtops (filter 390 * operations), so that the number isn't reused during a boot. 391 * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise. 392 */ 393 int 394 kfilter_unregister(const char *name) 395 { 396 struct kfilter *kfilter; 397 398 if (name == NULL || name[0] == '\0') 399 return (EINVAL); /* invalid name */ 400 401 rw_enter(&kqueue_filter_lock, RW_WRITER); 402 if (kfilter_byname_sys(name) != NULL) { 403 rw_exit(&kqueue_filter_lock); 404 return (EINVAL); /* can't detach system filters */ 405 } 406 407 kfilter = kfilter_byname_user(name); 408 if (kfilter == NULL) { 409 rw_exit(&kqueue_filter_lock); 410 return (ENOENT); 411 } 412 if (kfilter->refcnt != 0) { 413 rw_exit(&kqueue_filter_lock); 414 return (EBUSY); 415 } 416 417 /* Cast away const (but we know it's safe. */ 418 kmem_free(__UNCONST(kfilter->name), kfilter->namelen); 419 kfilter->name = NULL; /* mark as `not implemented' */ 420 421 if (kfilter->filtops != NULL) { 422 /* Cast away const (but we know it's safe. */ 423 kmem_free(__UNCONST(kfilter->filtops), 424 sizeof(*kfilter->filtops)); 425 kfilter->filtops = NULL; /* mark as `not implemented' */ 426 } 427 rw_exit(&kqueue_filter_lock); 428 429 return (0); 430 } 431 432 433 /* 434 * Filter attach method for EVFILT_READ and EVFILT_WRITE on normal file 435 * descriptors. Calls fileops kqfilter method for given file descriptor. 436 */ 437 static int 438 filt_fileattach(struct knote *kn) 439 { 440 file_t *fp; 441 442 fp = kn->kn_obj; 443 444 return (*fp->f_ops->fo_kqfilter)(fp, kn); 445 } 446 447 /* 448 * Filter detach method for EVFILT_READ on kqueue descriptor. 449 */ 450 static void 451 filt_kqdetach(struct knote *kn) 452 { 453 struct kqueue *kq; 454 455 kq = ((file_t *)kn->kn_obj)->f_kqueue; 456 457 mutex_spin_enter(&kq->kq_lock); 458 SLIST_REMOVE(&kq->kq_sel.sel_klist, kn, knote, kn_selnext); 459 mutex_spin_exit(&kq->kq_lock); 460 } 461 462 /* 463 * Filter event method for EVFILT_READ on kqueue descriptor. 464 */ 465 /*ARGSUSED*/ 466 static int 467 filt_kqueue(struct knote *kn, long hint) 468 { 469 struct kqueue *kq; 470 int rv; 471 472 kq = ((file_t *)kn->kn_obj)->f_kqueue; 473 474 if (hint != NOTE_SUBMIT) 475 mutex_spin_enter(&kq->kq_lock); 476 kn->kn_data = kq->kq_count; 477 rv = (kn->kn_data > 0); 478 if (hint != NOTE_SUBMIT) 479 mutex_spin_exit(&kq->kq_lock); 480 481 return rv; 482 } 483 484 /* 485 * Filter attach method for EVFILT_PROC. 486 */ 487 static int 488 filt_procattach(struct knote *kn) 489 { 490 struct proc *p; 491 struct lwp *curl; 492 493 curl = curlwp; 494 495 mutex_enter(proc_lock); 496 if (kn->kn_flags & EV_FLAG1) { 497 /* 498 * NOTE_TRACK attaches to the child process too early 499 * for proc_find, so do a raw look up and check the state 500 * explicitly. 501 */ 502 p = proc_find_raw(kn->kn_id); 503 if (p != NULL && p->p_stat != SIDL) 504 p = NULL; 505 } else { 506 p = proc_find(kn->kn_id); 507 } 508 509 if (p == NULL) { 510 mutex_exit(proc_lock); 511 return ESRCH; 512 } 513 514 /* 515 * Fail if it's not owned by you, or the last exec gave us 516 * setuid/setgid privs (unless you're root). 517 */ 518 mutex_enter(p->p_lock); 519 mutex_exit(proc_lock); 520 if (kauth_authorize_process(curl->l_cred, KAUTH_PROCESS_KEVENT_FILTER, 521 p, NULL, NULL, NULL) != 0) { 522 mutex_exit(p->p_lock); 523 return EACCES; 524 } 525 526 kn->kn_obj = p; 527 kn->kn_flags |= EV_CLEAR; /* automatically set */ 528 529 /* 530 * internal flag indicating registration done by kernel 531 */ 532 if (kn->kn_flags & EV_FLAG1) { 533 kn->kn_data = kn->kn_sdata; /* ppid */ 534 kn->kn_fflags = NOTE_CHILD; 535 kn->kn_flags &= ~EV_FLAG1; 536 } 537 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext); 538 mutex_exit(p->p_lock); 539 540 return 0; 541 } 542 543 /* 544 * Filter detach method for EVFILT_PROC. 545 * 546 * The knote may be attached to a different process, which may exit, 547 * leaving nothing for the knote to be attached to. So when the process 548 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 549 * it will be deleted when read out. However, as part of the knote deletion, 550 * this routine is called, so a check is needed to avoid actually performing 551 * a detach, because the original process might not exist any more. 552 */ 553 static void 554 filt_procdetach(struct knote *kn) 555 { 556 struct proc *p; 557 558 if (kn->kn_status & KN_DETACHED) 559 return; 560 561 p = kn->kn_obj; 562 563 mutex_enter(p->p_lock); 564 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 565 mutex_exit(p->p_lock); 566 } 567 568 /* 569 * Filter event method for EVFILT_PROC. 570 */ 571 static int 572 filt_proc(struct knote *kn, long hint) 573 { 574 u_int event, fflag; 575 struct kevent kev; 576 struct kqueue *kq; 577 int error; 578 579 event = (u_int)hint & NOTE_PCTRLMASK; 580 kq = kn->kn_kq; 581 fflag = 0; 582 583 /* If the user is interested in this event, record it. */ 584 if (kn->kn_sfflags & event) 585 fflag |= event; 586 587 if (event == NOTE_EXIT) { 588 struct proc *p = kn->kn_obj; 589 590 if (p != NULL) 591 kn->kn_data = P_WAITSTATUS(p); 592 /* 593 * Process is gone, so flag the event as finished. 594 * 595 * Detach the knote from watched process and mark 596 * it as such. We can't leave this to kqueue_scan(), 597 * since the process might not exist by then. And we 598 * have to do this now, since psignal KNOTE() is called 599 * also for zombies and we might end up reading freed 600 * memory if the kevent would already be picked up 601 * and knote g/c'ed. 602 */ 603 filt_procdetach(kn); 604 605 mutex_spin_enter(&kq->kq_lock); 606 kn->kn_status |= KN_DETACHED; 607 /* Mark as ONESHOT, so that the knote it g/c'ed when read */ 608 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 609 kn->kn_fflags |= fflag; 610 mutex_spin_exit(&kq->kq_lock); 611 612 return 1; 613 } 614 615 mutex_spin_enter(&kq->kq_lock); 616 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { 617 /* 618 * Process forked, and user wants to track the new process, 619 * so attach a new knote to it, and immediately report an 620 * event with the parent's pid. Register knote with new 621 * process. 622 */ 623 kev.ident = hint & NOTE_PDATAMASK; /* pid */ 624 kev.filter = kn->kn_filter; 625 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 626 kev.fflags = kn->kn_sfflags; 627 kev.data = kn->kn_id; /* parent */ 628 kev.udata = kn->kn_kevent.udata; /* preserve udata */ 629 mutex_spin_exit(&kq->kq_lock); 630 error = kqueue_register(kq, &kev); 631 mutex_spin_enter(&kq->kq_lock); 632 if (error != 0) 633 kn->kn_fflags |= NOTE_TRACKERR; 634 } 635 kn->kn_fflags |= fflag; 636 fflag = kn->kn_fflags; 637 mutex_spin_exit(&kq->kq_lock); 638 639 return fflag != 0; 640 } 641 642 static void 643 filt_timerexpire(void *knx) 644 { 645 struct knote *kn = knx; 646 int tticks; 647 648 mutex_enter(&kqueue_misc_lock); 649 kn->kn_data++; 650 knote_activate(kn); 651 if ((kn->kn_flags & EV_ONESHOT) == 0) { 652 tticks = mstohz(kn->kn_sdata); 653 if (tticks <= 0) 654 tticks = 1; 655 callout_schedule((callout_t *)kn->kn_hook, tticks); 656 } 657 mutex_exit(&kqueue_misc_lock); 658 } 659 660 /* 661 * data contains amount of time to sleep, in milliseconds 662 */ 663 static int 664 filt_timerattach(struct knote *kn) 665 { 666 callout_t *calloutp; 667 struct kqueue *kq; 668 int tticks; 669 670 tticks = mstohz(kn->kn_sdata); 671 672 /* if the supplied value is under our resolution, use 1 tick */ 673 if (tticks == 0) { 674 if (kn->kn_sdata == 0) 675 return EINVAL; 676 tticks = 1; 677 } 678 679 if (atomic_inc_uint_nv(&kq_ncallouts) >= kq_calloutmax || 680 (calloutp = kmem_alloc(sizeof(*calloutp), KM_NOSLEEP)) == NULL) { 681 atomic_dec_uint(&kq_ncallouts); 682 return ENOMEM; 683 } 684 callout_init(calloutp, CALLOUT_MPSAFE); 685 686 kq = kn->kn_kq; 687 mutex_spin_enter(&kq->kq_lock); 688 kn->kn_flags |= EV_CLEAR; /* automatically set */ 689 kn->kn_hook = calloutp; 690 mutex_spin_exit(&kq->kq_lock); 691 692 callout_reset(calloutp, tticks, filt_timerexpire, kn); 693 694 return (0); 695 } 696 697 static void 698 filt_timerdetach(struct knote *kn) 699 { 700 callout_t *calloutp; 701 702 calloutp = (callout_t *)kn->kn_hook; 703 callout_halt(calloutp, NULL); 704 callout_destroy(calloutp); 705 kmem_free(calloutp, sizeof(*calloutp)); 706 atomic_dec_uint(&kq_ncallouts); 707 } 708 709 static int 710 filt_timer(struct knote *kn, long hint) 711 { 712 int rv; 713 714 mutex_enter(&kqueue_misc_lock); 715 rv = (kn->kn_data != 0); 716 mutex_exit(&kqueue_misc_lock); 717 718 return rv; 719 } 720 721 /* 722 * filt_seltrue: 723 * 724 * This filter "event" routine simulates seltrue(). 725 */ 726 int 727 filt_seltrue(struct knote *kn, long hint) 728 { 729 730 /* 731 * We don't know how much data can be read/written, 732 * but we know that it *can* be. This is about as 733 * good as select/poll does as well. 734 */ 735 kn->kn_data = 0; 736 return (1); 737 } 738 739 /* 740 * This provides full kqfilter entry for device switch tables, which 741 * has same effect as filter using filt_seltrue() as filter method. 742 */ 743 static void 744 filt_seltruedetach(struct knote *kn) 745 { 746 /* Nothing to do */ 747 } 748 749 const struct filterops seltrue_filtops = { 750 .f_isfd = 1, 751 .f_attach = NULL, 752 .f_detach = filt_seltruedetach, 753 .f_event = filt_seltrue, 754 }; 755 756 int 757 seltrue_kqfilter(dev_t dev, struct knote *kn) 758 { 759 switch (kn->kn_filter) { 760 case EVFILT_READ: 761 case EVFILT_WRITE: 762 kn->kn_fop = &seltrue_filtops; 763 break; 764 default: 765 return (EINVAL); 766 } 767 768 /* Nothing more to do */ 769 return (0); 770 } 771 772 /* 773 * kqueue(2) system call. 774 */ 775 static int 776 kqueue1(struct lwp *l, int flags, register_t *retval) 777 { 778 struct kqueue *kq; 779 file_t *fp; 780 int fd, error; 781 782 if ((error = fd_allocfile(&fp, &fd)) != 0) 783 return error; 784 fp->f_flag = FREAD | FWRITE | (flags & (FNONBLOCK|FNOSIGPIPE)); 785 fp->f_type = DTYPE_KQUEUE; 786 fp->f_ops = &kqueueops; 787 kq = kmem_zalloc(sizeof(*kq), KM_SLEEP); 788 mutex_init(&kq->kq_lock, MUTEX_DEFAULT, IPL_SCHED); 789 cv_init(&kq->kq_cv, "kqueue"); 790 selinit(&kq->kq_sel); 791 TAILQ_INIT(&kq->kq_head); 792 fp->f_kqueue = kq; 793 *retval = fd; 794 kq->kq_fdp = curlwp->l_fd; 795 fd_set_exclose(l, fd, (flags & O_CLOEXEC) != 0); 796 fd_affix(curproc, fp, fd); 797 return error; 798 } 799 800 /* 801 * kqueue(2) system call. 802 */ 803 int 804 sys_kqueue(struct lwp *l, const void *v, register_t *retval) 805 { 806 return kqueue1(l, 0, retval); 807 } 808 809 int 810 sys_kqueue1(struct lwp *l, const struct sys_kqueue1_args *uap, 811 register_t *retval) 812 { 813 /* { 814 syscallarg(int) flags; 815 } */ 816 return kqueue1(l, SCARG(uap, flags), retval); 817 } 818 819 /* 820 * kevent(2) system call. 821 */ 822 int 823 kevent_fetch_changes(void *ctx, const struct kevent *changelist, 824 struct kevent *changes, size_t index, int n) 825 { 826 827 return copyin(changelist + index, changes, n * sizeof(*changes)); 828 } 829 830 int 831 kevent_put_events(void *ctx, struct kevent *events, 832 struct kevent *eventlist, size_t index, int n) 833 { 834 835 return copyout(events, eventlist + index, n * sizeof(*events)); 836 } 837 838 static const struct kevent_ops kevent_native_ops = { 839 .keo_private = NULL, 840 .keo_fetch_timeout = copyin, 841 .keo_fetch_changes = kevent_fetch_changes, 842 .keo_put_events = kevent_put_events, 843 }; 844 845 int 846 sys___kevent50(struct lwp *l, const struct sys___kevent50_args *uap, 847 register_t *retval) 848 { 849 /* { 850 syscallarg(int) fd; 851 syscallarg(const struct kevent *) changelist; 852 syscallarg(size_t) nchanges; 853 syscallarg(struct kevent *) eventlist; 854 syscallarg(size_t) nevents; 855 syscallarg(const struct timespec *) timeout; 856 } */ 857 858 return kevent1(retval, SCARG(uap, fd), SCARG(uap, changelist), 859 SCARG(uap, nchanges), SCARG(uap, eventlist), SCARG(uap, nevents), 860 SCARG(uap, timeout), &kevent_native_ops); 861 } 862 863 int 864 kevent1(register_t *retval, int fd, 865 const struct kevent *changelist, size_t nchanges, 866 struct kevent *eventlist, size_t nevents, 867 const struct timespec *timeout, 868 const struct kevent_ops *keops) 869 { 870 struct kevent *kevp; 871 struct kqueue *kq; 872 struct timespec ts; 873 size_t i, n, ichange; 874 int nerrors, error; 875 struct kevent kevbuf[KQ_NEVENTS]; /* approx 300 bytes on 64-bit */ 876 file_t *fp; 877 878 /* check that we're dealing with a kq */ 879 fp = fd_getfile(fd); 880 if (fp == NULL) 881 return (EBADF); 882 883 if (fp->f_type != DTYPE_KQUEUE) { 884 fd_putfile(fd); 885 return (EBADF); 886 } 887 888 if (timeout != NULL) { 889 error = (*keops->keo_fetch_timeout)(timeout, &ts, sizeof(ts)); 890 if (error) 891 goto done; 892 timeout = &ts; 893 } 894 895 kq = fp->f_kqueue; 896 nerrors = 0; 897 ichange = 0; 898 899 /* traverse list of events to register */ 900 while (nchanges > 0) { 901 n = MIN(nchanges, __arraycount(kevbuf)); 902 error = (*keops->keo_fetch_changes)(keops->keo_private, 903 changelist, kevbuf, ichange, n); 904 if (error) 905 goto done; 906 for (i = 0; i < n; i++) { 907 kevp = &kevbuf[i]; 908 kevp->flags &= ~EV_SYSFLAGS; 909 /* register each knote */ 910 error = kqueue_register(kq, kevp); 911 if (!error && !(kevp->flags & EV_RECEIPT)) 912 continue; 913 if (nevents == 0) 914 goto done; 915 kevp->flags = EV_ERROR; 916 kevp->data = error; 917 error = (*keops->keo_put_events) 918 (keops->keo_private, kevp, 919 eventlist, nerrors, 1); 920 if (error) 921 goto done; 922 nevents--; 923 nerrors++; 924 } 925 nchanges -= n; /* update the results */ 926 ichange += n; 927 } 928 if (nerrors) { 929 *retval = nerrors; 930 error = 0; 931 goto done; 932 } 933 934 /* actually scan through the events */ 935 error = kqueue_scan(fp, nevents, eventlist, timeout, retval, keops, 936 kevbuf, __arraycount(kevbuf)); 937 done: 938 fd_putfile(fd); 939 return (error); 940 } 941 942 /* 943 * Register a given kevent kev onto the kqueue 944 */ 945 static int 946 kqueue_register(struct kqueue *kq, struct kevent *kev) 947 { 948 struct kfilter *kfilter; 949 filedesc_t *fdp; 950 file_t *fp; 951 fdfile_t *ff; 952 struct knote *kn, *newkn; 953 struct klist *list; 954 int error, fd, rv; 955 956 fdp = kq->kq_fdp; 957 fp = NULL; 958 kn = NULL; 959 error = 0; 960 fd = 0; 961 962 newkn = kmem_zalloc(sizeof(*newkn), KM_SLEEP); 963 964 rw_enter(&kqueue_filter_lock, RW_READER); 965 kfilter = kfilter_byfilter(kev->filter); 966 if (kfilter == NULL || kfilter->filtops == NULL) { 967 /* filter not found nor implemented */ 968 rw_exit(&kqueue_filter_lock); 969 kmem_free(newkn, sizeof(*newkn)); 970 return (EINVAL); 971 } 972 973 /* search if knote already exists */ 974 if (kfilter->filtops->f_isfd) { 975 /* monitoring a file descriptor */ 976 /* validate descriptor */ 977 if (kev->ident > INT_MAX 978 || (fp = fd_getfile(fd = kev->ident)) == NULL) { 979 rw_exit(&kqueue_filter_lock); 980 kmem_free(newkn, sizeof(*newkn)); 981 return EBADF; 982 } 983 mutex_enter(&fdp->fd_lock); 984 ff = fdp->fd_dt->dt_ff[fd]; 985 if (ff->ff_refcnt & FR_CLOSING) { 986 error = EBADF; 987 goto doneunlock; 988 } 989 if (fd <= fdp->fd_lastkqfile) { 990 SLIST_FOREACH(kn, &ff->ff_knlist, kn_link) { 991 if (kq == kn->kn_kq && 992 kev->filter == kn->kn_filter) 993 break; 994 } 995 } 996 } else { 997 /* 998 * not monitoring a file descriptor, so 999 * lookup knotes in internal hash table 1000 */ 1001 mutex_enter(&fdp->fd_lock); 1002 if (fdp->fd_knhashmask != 0) { 1003 list = &fdp->fd_knhash[ 1004 KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)]; 1005 SLIST_FOREACH(kn, list, kn_link) { 1006 if (kev->ident == kn->kn_id && 1007 kq == kn->kn_kq && 1008 kev->filter == kn->kn_filter) 1009 break; 1010 } 1011 } 1012 } 1013 1014 /* 1015 * kn now contains the matching knote, or NULL if no match 1016 */ 1017 if (kev->flags & EV_ADD) { 1018 if (kn == NULL) { 1019 /* create new knote */ 1020 kn = newkn; 1021 newkn = NULL; 1022 kn->kn_obj = fp; 1023 kn->kn_id = kev->ident; 1024 kn->kn_kq = kq; 1025 kn->kn_fop = kfilter->filtops; 1026 kn->kn_kfilter = kfilter; 1027 kn->kn_sfflags = kev->fflags; 1028 kn->kn_sdata = kev->data; 1029 kev->fflags = 0; 1030 kev->data = 0; 1031 kn->kn_kevent = *kev; 1032 1033 KASSERT(kn->kn_fop != NULL); 1034 /* 1035 * apply reference count to knote structure, and 1036 * do not release it at the end of this routine. 1037 */ 1038 fp = NULL; 1039 1040 if (!kn->kn_fop->f_isfd) { 1041 /* 1042 * If knote is not on an fd, store on 1043 * internal hash table. 1044 */ 1045 if (fdp->fd_knhashmask == 0) { 1046 /* XXXAD can block with fd_lock held */ 1047 fdp->fd_knhash = hashinit(KN_HASHSIZE, 1048 HASH_LIST, true, 1049 &fdp->fd_knhashmask); 1050 } 1051 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, 1052 fdp->fd_knhashmask)]; 1053 } else { 1054 /* Otherwise, knote is on an fd. */ 1055 list = (struct klist *) 1056 &fdp->fd_dt->dt_ff[kn->kn_id]->ff_knlist; 1057 if ((int)kn->kn_id > fdp->fd_lastkqfile) 1058 fdp->fd_lastkqfile = kn->kn_id; 1059 } 1060 SLIST_INSERT_HEAD(list, kn, kn_link); 1061 1062 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1063 error = (*kfilter->filtops->f_attach)(kn); 1064 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1065 if (error != 0) { 1066 #ifdef DIAGNOSTIC 1067 printf("%s: event type %d not supported for " 1068 "file type %d (error %d)\n", __func__, 1069 kn->kn_filter, kn->kn_obj ? 1070 ((file_t *)kn->kn_obj)->f_type : -1, error); 1071 #endif 1072 /* knote_detach() drops fdp->fd_lock */ 1073 knote_detach(kn, fdp, false); 1074 goto done; 1075 } 1076 atomic_inc_uint(&kfilter->refcnt); 1077 } else { 1078 /* 1079 * The user may change some filter values after the 1080 * initial EV_ADD, but doing so will not reset any 1081 * filter which have already been triggered. 1082 */ 1083 kn->kn_sfflags = kev->fflags; 1084 kn->kn_sdata = kev->data; 1085 kn->kn_kevent.udata = kev->udata; 1086 } 1087 /* 1088 * We can get here if we are trying to attach 1089 * an event to a file descriptor that does not 1090 * support events, and the attach routine is 1091 * broken and does not return an error. 1092 */ 1093 KASSERT(kn->kn_fop != NULL); 1094 KASSERT(kn->kn_fop->f_event != NULL); 1095 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1096 rv = (*kn->kn_fop->f_event)(kn, 0); 1097 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1098 if (rv) 1099 knote_activate(kn); 1100 } else { 1101 if (kn == NULL) { 1102 error = ENOENT; 1103 goto doneunlock; 1104 } 1105 if (kev->flags & EV_DELETE) { 1106 /* knote_detach() drops fdp->fd_lock */ 1107 knote_detach(kn, fdp, true); 1108 goto done; 1109 } 1110 } 1111 1112 /* disable knote */ 1113 if ((kev->flags & EV_DISABLE)) { 1114 mutex_spin_enter(&kq->kq_lock); 1115 if ((kn->kn_status & KN_DISABLED) == 0) 1116 kn->kn_status |= KN_DISABLED; 1117 mutex_spin_exit(&kq->kq_lock); 1118 } 1119 1120 /* enable knote */ 1121 if ((kev->flags & EV_ENABLE)) { 1122 knote_enqueue(kn); 1123 } 1124 doneunlock: 1125 mutex_exit(&fdp->fd_lock); 1126 done: 1127 rw_exit(&kqueue_filter_lock); 1128 if (newkn != NULL) 1129 kmem_free(newkn, sizeof(*newkn)); 1130 if (fp != NULL) 1131 fd_putfile(fd); 1132 return (error); 1133 } 1134 1135 #if defined(DEBUG) 1136 #define KN_FMT(buf, kn) \ 1137 (snprintb((buf), sizeof(buf), __KN_FLAG_BITS, (kn)->kn_status), buf) 1138 1139 static void 1140 kqueue_check(const char *func, size_t line, const struct kqueue *kq) 1141 { 1142 const struct knote *kn; 1143 int count; 1144 int nmarker; 1145 char buf[128]; 1146 1147 KASSERT(mutex_owned(&kq->kq_lock)); 1148 KASSERT(kq->kq_count >= 0); 1149 1150 count = 0; 1151 nmarker = 0; 1152 TAILQ_FOREACH(kn, &kq->kq_head, kn_tqe) { 1153 if ((kn->kn_status & (KN_MARKER | KN_QUEUED)) == 0) { 1154 panic("%s,%zu: kq=%p kn=%p !(MARKER|QUEUED) %s", 1155 func, line, kq, kn, KN_FMT(buf, kn)); 1156 } 1157 if ((kn->kn_status & KN_MARKER) == 0) { 1158 if (kn->kn_kq != kq) { 1159 panic("%s,%zu: kq=%p kn(%p) != kn->kq(%p): %s", 1160 func, line, kq, kn, kn->kn_kq, 1161 KN_FMT(buf, kn)); 1162 } 1163 if ((kn->kn_status & KN_ACTIVE) == 0) { 1164 panic("%s,%zu: kq=%p kn=%p: !ACTIVE %s", 1165 func, line, kq, kn, KN_FMT(buf, kn)); 1166 } 1167 count++; 1168 if (count > kq->kq_count) { 1169 goto bad; 1170 } 1171 } else { 1172 nmarker++; 1173 #if 0 1174 if (nmarker > 10000) { 1175 panic("%s,%zu: kq=%p too many markers: " 1176 "%d != %d, nmarker=%d", 1177 func, line, kq, kq->kq_count, count, 1178 nmarker); 1179 } 1180 #endif 1181 } 1182 } 1183 if (kq->kq_count != count) { 1184 bad: 1185 panic("%s,%zu: kq=%p kq->kq_count(%d) != count(%d), nmarker=%d", 1186 func, line, kq, kq->kq_count, count, nmarker); 1187 } 1188 } 1189 #define kq_check(a) kqueue_check(__func__, __LINE__, (a)) 1190 #else /* defined(DEBUG) */ 1191 #define kq_check(a) /* nothing */ 1192 #endif /* defined(DEBUG) */ 1193 1194 /* 1195 * Scan through the list of events on fp (for a maximum of maxevents), 1196 * returning the results in to ulistp. Timeout is determined by tsp; if 1197 * NULL, wait indefinitely, if 0 valued, perform a poll, otherwise wait 1198 * as appropriate. 1199 */ 1200 static int 1201 kqueue_scan(file_t *fp, size_t maxevents, struct kevent *ulistp, 1202 const struct timespec *tsp, register_t *retval, 1203 const struct kevent_ops *keops, struct kevent *kevbuf, 1204 size_t kevcnt) 1205 { 1206 struct kqueue *kq; 1207 struct kevent *kevp; 1208 struct timespec ats, sleepts; 1209 struct knote *kn, *marker, morker; 1210 size_t count, nkev, nevents; 1211 int timeout, error, rv; 1212 filedesc_t *fdp; 1213 1214 fdp = curlwp->l_fd; 1215 kq = fp->f_kqueue; 1216 count = maxevents; 1217 nkev = nevents = error = 0; 1218 if (count == 0) { 1219 *retval = 0; 1220 return 0; 1221 } 1222 1223 if (tsp) { /* timeout supplied */ 1224 ats = *tsp; 1225 if (inittimeleft(&ats, &sleepts) == -1) { 1226 *retval = maxevents; 1227 return EINVAL; 1228 } 1229 timeout = tstohz(&ats); 1230 if (timeout <= 0) 1231 timeout = -1; /* do poll */ 1232 } else { 1233 /* no timeout, wait forever */ 1234 timeout = 0; 1235 } 1236 1237 memset(&morker, 0, sizeof(morker)); 1238 marker = &morker; 1239 marker->kn_status = KN_MARKER; 1240 mutex_spin_enter(&kq->kq_lock); 1241 retry: 1242 kevp = kevbuf; 1243 if (kq->kq_count == 0) { 1244 if (timeout >= 0) { 1245 error = cv_timedwait_sig(&kq->kq_cv, 1246 &kq->kq_lock, timeout); 1247 if (error == 0) { 1248 if (tsp == NULL || (timeout = 1249 gettimeleft(&ats, &sleepts)) > 0) 1250 goto retry; 1251 } else { 1252 /* don't restart after signals... */ 1253 if (error == ERESTART) 1254 error = EINTR; 1255 if (error == EWOULDBLOCK) 1256 error = 0; 1257 } 1258 } 1259 mutex_spin_exit(&kq->kq_lock); 1260 } else { 1261 /* mark end of knote list */ 1262 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); 1263 1264 /* 1265 * Acquire the fdp->fd_lock interlock to avoid races with 1266 * file creation/destruction from other threads. 1267 */ 1268 mutex_spin_exit(&kq->kq_lock); 1269 mutex_enter(&fdp->fd_lock); 1270 mutex_spin_enter(&kq->kq_lock); 1271 1272 while (count != 0) { 1273 kn = TAILQ_FIRST(&kq->kq_head); /* get next knote */ 1274 while ((kn->kn_status & KN_MARKER) != 0) { 1275 if (kn == marker) { 1276 /* it's our marker, stop */ 1277 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1278 if (count < maxevents || (tsp != NULL && 1279 (timeout = gettimeleft(&ats, 1280 &sleepts)) <= 0)) 1281 goto done; 1282 mutex_exit(&fdp->fd_lock); 1283 goto retry; 1284 } 1285 /* someone else's marker. */ 1286 kn = TAILQ_NEXT(kn, kn_tqe); 1287 } 1288 kq_check(kq); 1289 kq->kq_count--; 1290 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1291 kn->kn_status &= ~KN_QUEUED; 1292 kn->kn_status |= KN_BUSY; 1293 kq_check(kq); 1294 if (kn->kn_status & KN_DISABLED) { 1295 kn->kn_status &= ~KN_BUSY; 1296 /* don't want disabled events */ 1297 continue; 1298 } 1299 if ((kn->kn_flags & EV_ONESHOT) == 0) { 1300 mutex_spin_exit(&kq->kq_lock); 1301 KASSERT(kn->kn_fop != NULL); 1302 KASSERT(kn->kn_fop->f_event != NULL); 1303 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1304 KASSERT(mutex_owned(&fdp->fd_lock)); 1305 rv = (*kn->kn_fop->f_event)(kn, 0); 1306 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1307 mutex_spin_enter(&kq->kq_lock); 1308 /* Re-poll if note was re-enqueued. */ 1309 if ((kn->kn_status & KN_QUEUED) != 0) { 1310 kn->kn_status &= ~KN_BUSY; 1311 continue; 1312 } 1313 if (rv == 0) { 1314 /* 1315 * non-ONESHOT event that hasn't 1316 * triggered again, so de-queue. 1317 */ 1318 kn->kn_status &= ~(KN_ACTIVE|KN_BUSY); 1319 continue; 1320 } 1321 } 1322 /* XXXAD should be got from f_event if !oneshot. */ 1323 *kevp++ = kn->kn_kevent; 1324 nkev++; 1325 if (kn->kn_flags & EV_ONESHOT) { 1326 /* delete ONESHOT events after retrieval */ 1327 kn->kn_status &= ~KN_BUSY; 1328 mutex_spin_exit(&kq->kq_lock); 1329 knote_detach(kn, fdp, true); 1330 mutex_enter(&fdp->fd_lock); 1331 mutex_spin_enter(&kq->kq_lock); 1332 } else if (kn->kn_flags & EV_CLEAR) { 1333 /* clear state after retrieval */ 1334 kn->kn_data = 0; 1335 kn->kn_fflags = 0; 1336 kn->kn_status &= ~(KN_QUEUED|KN_ACTIVE|KN_BUSY); 1337 } else if (kn->kn_flags & EV_DISPATCH) { 1338 kn->kn_status |= KN_DISABLED; 1339 kn->kn_status &= ~(KN_QUEUED|KN_ACTIVE|KN_BUSY); 1340 } else { 1341 /* add event back on list */ 1342 kq_check(kq); 1343 kn->kn_status |= KN_QUEUED; 1344 kn->kn_status &= ~KN_BUSY; 1345 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1346 kq->kq_count++; 1347 kq_check(kq); 1348 } 1349 if (nkev == kevcnt) { 1350 /* do copyouts in kevcnt chunks */ 1351 mutex_spin_exit(&kq->kq_lock); 1352 mutex_exit(&fdp->fd_lock); 1353 error = (*keops->keo_put_events) 1354 (keops->keo_private, 1355 kevbuf, ulistp, nevents, nkev); 1356 mutex_enter(&fdp->fd_lock); 1357 mutex_spin_enter(&kq->kq_lock); 1358 nevents += nkev; 1359 nkev = 0; 1360 kevp = kevbuf; 1361 } 1362 count--; 1363 if (error != 0 || count == 0) { 1364 /* remove marker */ 1365 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); 1366 break; 1367 } 1368 } 1369 done: 1370 mutex_spin_exit(&kq->kq_lock); 1371 mutex_exit(&fdp->fd_lock); 1372 } 1373 if (nkev != 0) { 1374 /* copyout remaining events */ 1375 error = (*keops->keo_put_events)(keops->keo_private, 1376 kevbuf, ulistp, nevents, nkev); 1377 } 1378 *retval = maxevents - count; 1379 1380 return error; 1381 } 1382 1383 /* 1384 * fileops ioctl method for a kqueue descriptor. 1385 * 1386 * Two ioctls are currently supported. They both use struct kfilter_mapping: 1387 * KFILTER_BYNAME find name for filter, and return result in 1388 * name, which is of size len. 1389 * KFILTER_BYFILTER find filter for name. len is ignored. 1390 */ 1391 /*ARGSUSED*/ 1392 static int 1393 kqueue_ioctl(file_t *fp, u_long com, void *data) 1394 { 1395 struct kfilter_mapping *km; 1396 const struct kfilter *kfilter; 1397 char *name; 1398 int error; 1399 1400 km = data; 1401 error = 0; 1402 name = kmem_alloc(KFILTER_MAXNAME, KM_SLEEP); 1403 1404 switch (com) { 1405 case KFILTER_BYFILTER: /* convert filter -> name */ 1406 rw_enter(&kqueue_filter_lock, RW_READER); 1407 kfilter = kfilter_byfilter(km->filter); 1408 if (kfilter != NULL) { 1409 strlcpy(name, kfilter->name, KFILTER_MAXNAME); 1410 rw_exit(&kqueue_filter_lock); 1411 error = copyoutstr(name, km->name, km->len, NULL); 1412 } else { 1413 rw_exit(&kqueue_filter_lock); 1414 error = ENOENT; 1415 } 1416 break; 1417 1418 case KFILTER_BYNAME: /* convert name -> filter */ 1419 error = copyinstr(km->name, name, KFILTER_MAXNAME, NULL); 1420 if (error) { 1421 break; 1422 } 1423 rw_enter(&kqueue_filter_lock, RW_READER); 1424 kfilter = kfilter_byname(name); 1425 if (kfilter != NULL) 1426 km->filter = kfilter->filter; 1427 else 1428 error = ENOENT; 1429 rw_exit(&kqueue_filter_lock); 1430 break; 1431 1432 default: 1433 error = ENOTTY; 1434 break; 1435 1436 } 1437 kmem_free(name, KFILTER_MAXNAME); 1438 return (error); 1439 } 1440 1441 /* 1442 * fileops fcntl method for a kqueue descriptor. 1443 */ 1444 static int 1445 kqueue_fcntl(file_t *fp, u_int com, void *data) 1446 { 1447 1448 return (ENOTTY); 1449 } 1450 1451 /* 1452 * fileops poll method for a kqueue descriptor. 1453 * Determine if kqueue has events pending. 1454 */ 1455 static int 1456 kqueue_poll(file_t *fp, int events) 1457 { 1458 struct kqueue *kq; 1459 int revents; 1460 1461 kq = fp->f_kqueue; 1462 1463 revents = 0; 1464 if (events & (POLLIN | POLLRDNORM)) { 1465 mutex_spin_enter(&kq->kq_lock); 1466 if (kq->kq_count != 0) { 1467 revents |= events & (POLLIN | POLLRDNORM); 1468 } else { 1469 selrecord(curlwp, &kq->kq_sel); 1470 } 1471 kq_check(kq); 1472 mutex_spin_exit(&kq->kq_lock); 1473 } 1474 1475 return revents; 1476 } 1477 1478 /* 1479 * fileops stat method for a kqueue descriptor. 1480 * Returns dummy info, with st_size being number of events pending. 1481 */ 1482 static int 1483 kqueue_stat(file_t *fp, struct stat *st) 1484 { 1485 struct kqueue *kq; 1486 1487 kq = fp->f_kqueue; 1488 1489 memset(st, 0, sizeof(*st)); 1490 st->st_size = kq->kq_count; 1491 st->st_blksize = sizeof(struct kevent); 1492 st->st_mode = S_IFIFO; 1493 1494 return 0; 1495 } 1496 1497 static void 1498 kqueue_doclose(struct kqueue *kq, struct klist *list, int fd) 1499 { 1500 struct knote *kn; 1501 filedesc_t *fdp; 1502 1503 fdp = kq->kq_fdp; 1504 1505 KASSERT(mutex_owned(&fdp->fd_lock)); 1506 1507 for (kn = SLIST_FIRST(list); kn != NULL;) { 1508 if (kq != kn->kn_kq) { 1509 kn = SLIST_NEXT(kn, kn_link); 1510 continue; 1511 } 1512 knote_detach(kn, fdp, true); 1513 mutex_enter(&fdp->fd_lock); 1514 kn = SLIST_FIRST(list); 1515 } 1516 } 1517 1518 1519 /* 1520 * fileops close method for a kqueue descriptor. 1521 */ 1522 static int 1523 kqueue_close(file_t *fp) 1524 { 1525 struct kqueue *kq; 1526 filedesc_t *fdp; 1527 fdfile_t *ff; 1528 int i; 1529 1530 kq = fp->f_kqueue; 1531 fp->f_kqueue = NULL; 1532 fp->f_type = 0; 1533 fdp = curlwp->l_fd; 1534 1535 mutex_enter(&fdp->fd_lock); 1536 for (i = 0; i <= fdp->fd_lastkqfile; i++) { 1537 if ((ff = fdp->fd_dt->dt_ff[i]) == NULL) 1538 continue; 1539 kqueue_doclose(kq, (struct klist *)&ff->ff_knlist, i); 1540 } 1541 if (fdp->fd_knhashmask != 0) { 1542 for (i = 0; i < fdp->fd_knhashmask + 1; i++) { 1543 kqueue_doclose(kq, &fdp->fd_knhash[i], -1); 1544 } 1545 } 1546 mutex_exit(&fdp->fd_lock); 1547 1548 KASSERT(kq->kq_count == 0); 1549 mutex_destroy(&kq->kq_lock); 1550 cv_destroy(&kq->kq_cv); 1551 seldestroy(&kq->kq_sel); 1552 kmem_free(kq, sizeof(*kq)); 1553 1554 return (0); 1555 } 1556 1557 /* 1558 * struct fileops kqfilter method for a kqueue descriptor. 1559 * Event triggered when monitored kqueue changes. 1560 */ 1561 static int 1562 kqueue_kqfilter(file_t *fp, struct knote *kn) 1563 { 1564 struct kqueue *kq; 1565 1566 kq = ((file_t *)kn->kn_obj)->f_kqueue; 1567 1568 KASSERT(fp == kn->kn_obj); 1569 1570 if (kn->kn_filter != EVFILT_READ) 1571 return 1; 1572 1573 kn->kn_fop = &kqread_filtops; 1574 mutex_enter(&kq->kq_lock); 1575 SLIST_INSERT_HEAD(&kq->kq_sel.sel_klist, kn, kn_selnext); 1576 mutex_exit(&kq->kq_lock); 1577 1578 return 0; 1579 } 1580 1581 1582 /* 1583 * Walk down a list of knotes, activating them if their event has 1584 * triggered. The caller's object lock (e.g. device driver lock) 1585 * must be held. 1586 */ 1587 void 1588 knote(struct klist *list, long hint) 1589 { 1590 struct knote *kn, *tmpkn; 1591 1592 SLIST_FOREACH_SAFE(kn, list, kn_selnext, tmpkn) { 1593 KASSERT(kn->kn_fop != NULL); 1594 KASSERT(kn->kn_fop->f_event != NULL); 1595 if ((*kn->kn_fop->f_event)(kn, hint)) 1596 knote_activate(kn); 1597 } 1598 } 1599 1600 /* 1601 * Remove all knotes referencing a specified fd 1602 */ 1603 void 1604 knote_fdclose(int fd) 1605 { 1606 struct klist *list; 1607 struct knote *kn; 1608 filedesc_t *fdp; 1609 1610 fdp = curlwp->l_fd; 1611 list = (struct klist *)&fdp->fd_dt->dt_ff[fd]->ff_knlist; 1612 mutex_enter(&fdp->fd_lock); 1613 while ((kn = SLIST_FIRST(list)) != NULL) { 1614 knote_detach(kn, fdp, true); 1615 mutex_enter(&fdp->fd_lock); 1616 } 1617 mutex_exit(&fdp->fd_lock); 1618 } 1619 1620 /* 1621 * Drop knote. Called with fdp->fd_lock held, and will drop before 1622 * returning. 1623 */ 1624 static void 1625 knote_detach(struct knote *kn, filedesc_t *fdp, bool dofop) 1626 { 1627 struct klist *list; 1628 struct kqueue *kq; 1629 1630 kq = kn->kn_kq; 1631 1632 KASSERT((kn->kn_status & KN_MARKER) == 0); 1633 KASSERT(mutex_owned(&fdp->fd_lock)); 1634 1635 KASSERT(kn->kn_fop != NULL); 1636 /* Remove from monitored object. */ 1637 if (dofop) { 1638 KASSERT(kn->kn_fop->f_detach != NULL); 1639 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1640 (*kn->kn_fop->f_detach)(kn); 1641 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1642 } 1643 1644 /* Remove from descriptor table. */ 1645 if (kn->kn_fop->f_isfd) 1646 list = (struct klist *)&fdp->fd_dt->dt_ff[kn->kn_id]->ff_knlist; 1647 else 1648 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; 1649 1650 SLIST_REMOVE(list, kn, knote, kn_link); 1651 1652 /* Remove from kqueue. */ 1653 again: 1654 mutex_spin_enter(&kq->kq_lock); 1655 if ((kn->kn_status & KN_QUEUED) != 0) { 1656 kq_check(kq); 1657 kq->kq_count--; 1658 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1659 kn->kn_status &= ~KN_QUEUED; 1660 kq_check(kq); 1661 } else if (kn->kn_status & KN_BUSY) { 1662 mutex_spin_exit(&kq->kq_lock); 1663 goto again; 1664 } 1665 mutex_spin_exit(&kq->kq_lock); 1666 1667 mutex_exit(&fdp->fd_lock); 1668 if (kn->kn_fop->f_isfd) 1669 fd_putfile(kn->kn_id); 1670 atomic_dec_uint(&kn->kn_kfilter->refcnt); 1671 kmem_free(kn, sizeof(*kn)); 1672 } 1673 1674 /* 1675 * Queue new event for knote. 1676 */ 1677 static void 1678 knote_enqueue(struct knote *kn) 1679 { 1680 struct kqueue *kq; 1681 1682 KASSERT((kn->kn_status & KN_MARKER) == 0); 1683 1684 kq = kn->kn_kq; 1685 1686 mutex_spin_enter(&kq->kq_lock); 1687 if ((kn->kn_status & KN_DISABLED) != 0) { 1688 kn->kn_status &= ~KN_DISABLED; 1689 } 1690 if ((kn->kn_status & (KN_ACTIVE | KN_QUEUED)) == KN_ACTIVE) { 1691 kq_check(kq); 1692 kn->kn_status |= KN_QUEUED; 1693 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1694 kq->kq_count++; 1695 kq_check(kq); 1696 cv_broadcast(&kq->kq_cv); 1697 selnotify(&kq->kq_sel, 0, NOTE_SUBMIT); 1698 } 1699 mutex_spin_exit(&kq->kq_lock); 1700 } 1701 /* 1702 * Queue new event for knote. 1703 */ 1704 static void 1705 knote_activate(struct knote *kn) 1706 { 1707 struct kqueue *kq; 1708 1709 KASSERT((kn->kn_status & KN_MARKER) == 0); 1710 1711 kq = kn->kn_kq; 1712 1713 mutex_spin_enter(&kq->kq_lock); 1714 kn->kn_status |= KN_ACTIVE; 1715 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) { 1716 kq_check(kq); 1717 kn->kn_status |= KN_QUEUED; 1718 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1719 kq->kq_count++; 1720 kq_check(kq); 1721 cv_broadcast(&kq->kq_cv); 1722 selnotify(&kq->kq_sel, 0, NOTE_SUBMIT); 1723 } 1724 mutex_spin_exit(&kq->kq_lock); 1725 } 1726