1 /* $NetBSD: kern_event.c,v 1.108 2020/10/31 01:08:32 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 34 * Copyright (c) 2009 Apple, Inc 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 56 * SUCH DAMAGE. 57 * 58 * FreeBSD: src/sys/kern/kern_event.c,v 1.27 2001/07/05 17:10:44 rwatson Exp 59 */ 60 61 #include <sys/cdefs.h> 62 __KERNEL_RCSID(0, "$NetBSD: kern_event.c,v 1.108 2020/10/31 01:08:32 christos Exp $"); 63 64 #include <sys/param.h> 65 #include <sys/systm.h> 66 #include <sys/kernel.h> 67 #include <sys/wait.h> 68 #include <sys/proc.h> 69 #include <sys/file.h> 70 #include <sys/select.h> 71 #include <sys/queue.h> 72 #include <sys/event.h> 73 #include <sys/eventvar.h> 74 #include <sys/poll.h> 75 #include <sys/kmem.h> 76 #include <sys/stat.h> 77 #include <sys/filedesc.h> 78 #include <sys/syscallargs.h> 79 #include <sys/kauth.h> 80 #include <sys/conf.h> 81 #include <sys/atomic.h> 82 83 static int kqueue_scan(file_t *, size_t, struct kevent *, 84 const struct timespec *, register_t *, 85 const struct kevent_ops *, struct kevent *, 86 size_t); 87 static int kqueue_ioctl(file_t *, u_long, void *); 88 static int kqueue_fcntl(file_t *, u_int, void *); 89 static int kqueue_poll(file_t *, int); 90 static int kqueue_kqfilter(file_t *, struct knote *); 91 static int kqueue_stat(file_t *, struct stat *); 92 static int kqueue_close(file_t *); 93 static int kqueue_register(struct kqueue *, struct kevent *); 94 static void kqueue_doclose(struct kqueue *, struct klist *, int); 95 96 static void knote_detach(struct knote *, filedesc_t *fdp, bool); 97 static void knote_enqueue(struct knote *); 98 static void knote_activate(struct knote *); 99 100 static void filt_kqdetach(struct knote *); 101 static int filt_kqueue(struct knote *, long hint); 102 static int filt_procattach(struct knote *); 103 static void filt_procdetach(struct knote *); 104 static int filt_proc(struct knote *, long hint); 105 static int filt_fileattach(struct knote *); 106 static void filt_timerexpire(void *x); 107 static int filt_timerattach(struct knote *); 108 static void filt_timerdetach(struct knote *); 109 static int filt_timer(struct knote *, long hint); 110 static int filt_fsattach(struct knote *kn); 111 static void filt_fsdetach(struct knote *kn); 112 static int filt_fs(struct knote *kn, long hint); 113 static int filt_userattach(struct knote *); 114 static void filt_userdetach(struct knote *); 115 static int filt_user(struct knote *, long hint); 116 static void filt_usertouch(struct knote *, struct kevent *, long type); 117 118 static const struct fileops kqueueops = { 119 .fo_name = "kqueue", 120 .fo_read = (void *)enxio, 121 .fo_write = (void *)enxio, 122 .fo_ioctl = kqueue_ioctl, 123 .fo_fcntl = kqueue_fcntl, 124 .fo_poll = kqueue_poll, 125 .fo_stat = kqueue_stat, 126 .fo_close = kqueue_close, 127 .fo_kqfilter = kqueue_kqfilter, 128 .fo_restart = fnullop_restart, 129 }; 130 131 static const struct filterops kqread_filtops = { 132 .f_isfd = 1, 133 .f_attach = NULL, 134 .f_detach = filt_kqdetach, 135 .f_event = filt_kqueue, 136 }; 137 138 static const struct filterops proc_filtops = { 139 .f_isfd = 0, 140 .f_attach = filt_procattach, 141 .f_detach = filt_procdetach, 142 .f_event = filt_proc, 143 }; 144 145 static const struct filterops file_filtops = { 146 .f_isfd = 1, 147 .f_attach = filt_fileattach, 148 .f_detach = NULL, 149 .f_event = NULL, 150 }; 151 152 static const struct filterops timer_filtops = { 153 .f_isfd = 0, 154 .f_attach = filt_timerattach, 155 .f_detach = filt_timerdetach, 156 .f_event = filt_timer, 157 }; 158 159 static const struct filterops fs_filtops = { 160 .f_isfd = 0, 161 .f_attach = filt_fsattach, 162 .f_detach = filt_fsdetach, 163 .f_event = filt_fs, 164 }; 165 166 static const struct filterops user_filtops = { 167 .f_isfd = 0, 168 .f_attach = filt_userattach, 169 .f_detach = filt_userdetach, 170 .f_event = filt_user, 171 .f_touch = filt_usertouch, 172 }; 173 174 static u_int kq_ncallouts = 0; 175 static int kq_calloutmax = (4 * 1024); 176 177 #define KN_HASHSIZE 64 /* XXX should be tunable */ 178 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 179 180 extern const struct filterops sig_filtops; 181 182 /* 183 * Table for for all system-defined filters. 184 * These should be listed in the numeric order of the EVFILT_* defines. 185 * If filtops is NULL, the filter isn't implemented in NetBSD. 186 * End of list is when name is NULL. 187 * 188 * Note that 'refcnt' is meaningless for built-in filters. 189 */ 190 struct kfilter { 191 const char *name; /* name of filter */ 192 uint32_t filter; /* id of filter */ 193 unsigned refcnt; /* reference count */ 194 const struct filterops *filtops;/* operations for filter */ 195 size_t namelen; /* length of name string */ 196 }; 197 198 /* System defined filters */ 199 static struct kfilter sys_kfilters[] = { 200 { "EVFILT_READ", EVFILT_READ, 0, &file_filtops, 0 }, 201 { "EVFILT_WRITE", EVFILT_WRITE, 0, &file_filtops, 0, }, 202 { "EVFILT_AIO", EVFILT_AIO, 0, NULL, 0 }, 203 { "EVFILT_VNODE", EVFILT_VNODE, 0, &file_filtops, 0 }, 204 { "EVFILT_PROC", EVFILT_PROC, 0, &proc_filtops, 0 }, 205 { "EVFILT_SIGNAL", EVFILT_SIGNAL, 0, &sig_filtops, 0 }, 206 { "EVFILT_TIMER", EVFILT_TIMER, 0, &timer_filtops, 0 }, 207 { "EVFILT_FS", EVFILT_FS, 0, &fs_filtops, 0 }, 208 { "EVFILT_USER", EVFILT_USER, 0, &user_filtops, 0 }, 209 { NULL, 0, 0, NULL, 0 }, 210 }; 211 212 /* User defined kfilters */ 213 static struct kfilter *user_kfilters; /* array */ 214 static int user_kfilterc; /* current offset */ 215 static int user_kfiltermaxc; /* max size so far */ 216 static size_t user_kfiltersz; /* size of allocated memory */ 217 218 /* 219 * Global Locks. 220 * 221 * Lock order: 222 * 223 * kqueue_filter_lock 224 * -> kn_kq->kq_fdp->fd_lock 225 * -> object lock (e.g., device driver lock, kqueue_misc_lock, &c.) 226 * -> kn_kq->kq_lock 227 * 228 * Locking rules: 229 * 230 * f_attach: fdp->fd_lock, KERNEL_LOCK 231 * f_detach: fdp->fd_lock, KERNEL_LOCK 232 * f_event(!NOTE_SUBMIT) via kevent: fdp->fd_lock, _no_ object lock 233 * f_event via knote: whatever caller guarantees 234 * Typically, f_event(NOTE_SUBMIT) via knote: object lock 235 * f_event(!NOTE_SUBMIT) via knote: nothing, 236 * acquires/releases object lock inside. 237 */ 238 static krwlock_t kqueue_filter_lock; /* lock on filter lists */ 239 static kmutex_t kqueue_misc_lock; /* miscellaneous */ 240 241 static kauth_listener_t kqueue_listener; 242 243 static int 244 kqueue_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, 245 void *arg0, void *arg1, void *arg2, void *arg3) 246 { 247 struct proc *p; 248 int result; 249 250 result = KAUTH_RESULT_DEFER; 251 p = arg0; 252 253 if (action != KAUTH_PROCESS_KEVENT_FILTER) 254 return result; 255 256 if ((kauth_cred_getuid(p->p_cred) != kauth_cred_getuid(cred) || 257 ISSET(p->p_flag, PK_SUGID))) 258 return result; 259 260 result = KAUTH_RESULT_ALLOW; 261 262 return result; 263 } 264 265 /* 266 * Initialize the kqueue subsystem. 267 */ 268 void 269 kqueue_init(void) 270 { 271 272 rw_init(&kqueue_filter_lock); 273 mutex_init(&kqueue_misc_lock, MUTEX_DEFAULT, IPL_NONE); 274 275 kqueue_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS, 276 kqueue_listener_cb, NULL); 277 } 278 279 /* 280 * Find kfilter entry by name, or NULL if not found. 281 */ 282 static struct kfilter * 283 kfilter_byname_sys(const char *name) 284 { 285 int i; 286 287 KASSERT(rw_lock_held(&kqueue_filter_lock)); 288 289 for (i = 0; sys_kfilters[i].name != NULL; i++) { 290 if (strcmp(name, sys_kfilters[i].name) == 0) 291 return &sys_kfilters[i]; 292 } 293 return NULL; 294 } 295 296 static struct kfilter * 297 kfilter_byname_user(const char *name) 298 { 299 int i; 300 301 KASSERT(rw_lock_held(&kqueue_filter_lock)); 302 303 /* user filter slots have a NULL name if previously deregistered */ 304 for (i = 0; i < user_kfilterc ; i++) { 305 if (user_kfilters[i].name != NULL && 306 strcmp(name, user_kfilters[i].name) == 0) 307 return &user_kfilters[i]; 308 } 309 return NULL; 310 } 311 312 static struct kfilter * 313 kfilter_byname(const char *name) 314 { 315 struct kfilter *kfilter; 316 317 KASSERT(rw_lock_held(&kqueue_filter_lock)); 318 319 if ((kfilter = kfilter_byname_sys(name)) != NULL) 320 return kfilter; 321 322 return kfilter_byname_user(name); 323 } 324 325 /* 326 * Find kfilter entry by filter id, or NULL if not found. 327 * Assumes entries are indexed in filter id order, for speed. 328 */ 329 static struct kfilter * 330 kfilter_byfilter(uint32_t filter) 331 { 332 struct kfilter *kfilter; 333 334 KASSERT(rw_lock_held(&kqueue_filter_lock)); 335 336 if (filter < EVFILT_SYSCOUNT) /* it's a system filter */ 337 kfilter = &sys_kfilters[filter]; 338 else if (user_kfilters != NULL && 339 filter < EVFILT_SYSCOUNT + user_kfilterc) 340 /* it's a user filter */ 341 kfilter = &user_kfilters[filter - EVFILT_SYSCOUNT]; 342 else 343 return (NULL); /* out of range */ 344 KASSERT(kfilter->filter == filter); /* sanity check! */ 345 return (kfilter); 346 } 347 348 /* 349 * Register a new kfilter. Stores the entry in user_kfilters. 350 * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise. 351 * If retfilter != NULL, the new filterid is returned in it. 352 */ 353 int 354 kfilter_register(const char *name, const struct filterops *filtops, 355 int *retfilter) 356 { 357 struct kfilter *kfilter; 358 size_t len; 359 int i; 360 361 if (name == NULL || name[0] == '\0' || filtops == NULL) 362 return (EINVAL); /* invalid args */ 363 364 rw_enter(&kqueue_filter_lock, RW_WRITER); 365 if (kfilter_byname(name) != NULL) { 366 rw_exit(&kqueue_filter_lock); 367 return (EEXIST); /* already exists */ 368 } 369 if (user_kfilterc > 0xffffffff - EVFILT_SYSCOUNT) { 370 rw_exit(&kqueue_filter_lock); 371 return (EINVAL); /* too many */ 372 } 373 374 for (i = 0; i < user_kfilterc; i++) { 375 kfilter = &user_kfilters[i]; 376 if (kfilter->name == NULL) { 377 /* Previously deregistered slot. Reuse. */ 378 goto reuse; 379 } 380 } 381 382 /* check if need to grow user_kfilters */ 383 if (user_kfilterc + 1 > user_kfiltermaxc) { 384 /* Grow in KFILTER_EXTENT chunks. */ 385 user_kfiltermaxc += KFILTER_EXTENT; 386 len = user_kfiltermaxc * sizeof(*kfilter); 387 kfilter = kmem_alloc(len, KM_SLEEP); 388 memset((char *)kfilter + user_kfiltersz, 0, len - user_kfiltersz); 389 if (user_kfilters != NULL) { 390 memcpy(kfilter, user_kfilters, user_kfiltersz); 391 kmem_free(user_kfilters, user_kfiltersz); 392 } 393 user_kfiltersz = len; 394 user_kfilters = kfilter; 395 } 396 /* Adding new slot */ 397 kfilter = &user_kfilters[user_kfilterc++]; 398 reuse: 399 kfilter->name = kmem_strdupsize(name, &kfilter->namelen, KM_SLEEP); 400 401 kfilter->filter = (kfilter - user_kfilters) + EVFILT_SYSCOUNT; 402 403 kfilter->filtops = kmem_alloc(sizeof(*filtops), KM_SLEEP); 404 memcpy(__UNCONST(kfilter->filtops), filtops, sizeof(*filtops)); 405 406 if (retfilter != NULL) 407 *retfilter = kfilter->filter; 408 rw_exit(&kqueue_filter_lock); 409 410 return (0); 411 } 412 413 /* 414 * Unregister a kfilter previously registered with kfilter_register. 415 * This retains the filter id, but clears the name and frees filtops (filter 416 * operations), so that the number isn't reused during a boot. 417 * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise. 418 */ 419 int 420 kfilter_unregister(const char *name) 421 { 422 struct kfilter *kfilter; 423 424 if (name == NULL || name[0] == '\0') 425 return (EINVAL); /* invalid name */ 426 427 rw_enter(&kqueue_filter_lock, RW_WRITER); 428 if (kfilter_byname_sys(name) != NULL) { 429 rw_exit(&kqueue_filter_lock); 430 return (EINVAL); /* can't detach system filters */ 431 } 432 433 kfilter = kfilter_byname_user(name); 434 if (kfilter == NULL) { 435 rw_exit(&kqueue_filter_lock); 436 return (ENOENT); 437 } 438 if (kfilter->refcnt != 0) { 439 rw_exit(&kqueue_filter_lock); 440 return (EBUSY); 441 } 442 443 /* Cast away const (but we know it's safe. */ 444 kmem_free(__UNCONST(kfilter->name), kfilter->namelen); 445 kfilter->name = NULL; /* mark as `not implemented' */ 446 447 if (kfilter->filtops != NULL) { 448 /* Cast away const (but we know it's safe. */ 449 kmem_free(__UNCONST(kfilter->filtops), 450 sizeof(*kfilter->filtops)); 451 kfilter->filtops = NULL; /* mark as `not implemented' */ 452 } 453 rw_exit(&kqueue_filter_lock); 454 455 return (0); 456 } 457 458 459 /* 460 * Filter attach method for EVFILT_READ and EVFILT_WRITE on normal file 461 * descriptors. Calls fileops kqfilter method for given file descriptor. 462 */ 463 static int 464 filt_fileattach(struct knote *kn) 465 { 466 file_t *fp; 467 468 fp = kn->kn_obj; 469 470 return (*fp->f_ops->fo_kqfilter)(fp, kn); 471 } 472 473 /* 474 * Filter detach method for EVFILT_READ on kqueue descriptor. 475 */ 476 static void 477 filt_kqdetach(struct knote *kn) 478 { 479 struct kqueue *kq; 480 481 kq = ((file_t *)kn->kn_obj)->f_kqueue; 482 483 mutex_spin_enter(&kq->kq_lock); 484 SLIST_REMOVE(&kq->kq_sel.sel_klist, kn, knote, kn_selnext); 485 mutex_spin_exit(&kq->kq_lock); 486 } 487 488 /* 489 * Filter event method for EVFILT_READ on kqueue descriptor. 490 */ 491 /*ARGSUSED*/ 492 static int 493 filt_kqueue(struct knote *kn, long hint) 494 { 495 struct kqueue *kq; 496 int rv; 497 498 kq = ((file_t *)kn->kn_obj)->f_kqueue; 499 500 if (hint != NOTE_SUBMIT) 501 mutex_spin_enter(&kq->kq_lock); 502 kn->kn_data = kq->kq_count; 503 rv = (kn->kn_data > 0); 504 if (hint != NOTE_SUBMIT) 505 mutex_spin_exit(&kq->kq_lock); 506 507 return rv; 508 } 509 510 /* 511 * Filter attach method for EVFILT_PROC. 512 */ 513 static int 514 filt_procattach(struct knote *kn) 515 { 516 struct proc *p; 517 struct lwp *curl; 518 519 curl = curlwp; 520 521 mutex_enter(&proc_lock); 522 if (kn->kn_flags & EV_FLAG1) { 523 /* 524 * NOTE_TRACK attaches to the child process too early 525 * for proc_find, so do a raw look up and check the state 526 * explicitly. 527 */ 528 p = proc_find_raw(kn->kn_id); 529 if (p != NULL && p->p_stat != SIDL) 530 p = NULL; 531 } else { 532 p = proc_find(kn->kn_id); 533 } 534 535 if (p == NULL) { 536 mutex_exit(&proc_lock); 537 return ESRCH; 538 } 539 540 /* 541 * Fail if it's not owned by you, or the last exec gave us 542 * setuid/setgid privs (unless you're root). 543 */ 544 mutex_enter(p->p_lock); 545 mutex_exit(&proc_lock); 546 if (kauth_authorize_process(curl->l_cred, KAUTH_PROCESS_KEVENT_FILTER, 547 p, NULL, NULL, NULL) != 0) { 548 mutex_exit(p->p_lock); 549 return EACCES; 550 } 551 552 kn->kn_obj = p; 553 kn->kn_flags |= EV_CLEAR; /* automatically set */ 554 555 /* 556 * internal flag indicating registration done by kernel 557 */ 558 if (kn->kn_flags & EV_FLAG1) { 559 kn->kn_data = kn->kn_sdata; /* ppid */ 560 kn->kn_fflags = NOTE_CHILD; 561 kn->kn_flags &= ~EV_FLAG1; 562 } 563 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext); 564 mutex_exit(p->p_lock); 565 566 return 0; 567 } 568 569 /* 570 * Filter detach method for EVFILT_PROC. 571 * 572 * The knote may be attached to a different process, which may exit, 573 * leaving nothing for the knote to be attached to. So when the process 574 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 575 * it will be deleted when read out. However, as part of the knote deletion, 576 * this routine is called, so a check is needed to avoid actually performing 577 * a detach, because the original process might not exist any more. 578 */ 579 static void 580 filt_procdetach(struct knote *kn) 581 { 582 struct proc *p; 583 584 if (kn->kn_status & KN_DETACHED) 585 return; 586 587 p = kn->kn_obj; 588 589 mutex_enter(p->p_lock); 590 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 591 mutex_exit(p->p_lock); 592 } 593 594 /* 595 * Filter event method for EVFILT_PROC. 596 */ 597 static int 598 filt_proc(struct knote *kn, long hint) 599 { 600 u_int event, fflag; 601 struct kevent kev; 602 struct kqueue *kq; 603 int error; 604 605 event = (u_int)hint & NOTE_PCTRLMASK; 606 kq = kn->kn_kq; 607 fflag = 0; 608 609 /* If the user is interested in this event, record it. */ 610 if (kn->kn_sfflags & event) 611 fflag |= event; 612 613 if (event == NOTE_EXIT) { 614 struct proc *p = kn->kn_obj; 615 616 if (p != NULL) 617 kn->kn_data = P_WAITSTATUS(p); 618 /* 619 * Process is gone, so flag the event as finished. 620 * 621 * Detach the knote from watched process and mark 622 * it as such. We can't leave this to kqueue_scan(), 623 * since the process might not exist by then. And we 624 * have to do this now, since psignal KNOTE() is called 625 * also for zombies and we might end up reading freed 626 * memory if the kevent would already be picked up 627 * and knote g/c'ed. 628 */ 629 filt_procdetach(kn); 630 631 mutex_spin_enter(&kq->kq_lock); 632 kn->kn_status |= KN_DETACHED; 633 /* Mark as ONESHOT, so that the knote it g/c'ed when read */ 634 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 635 kn->kn_fflags |= fflag; 636 mutex_spin_exit(&kq->kq_lock); 637 638 return 1; 639 } 640 641 mutex_spin_enter(&kq->kq_lock); 642 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { 643 /* 644 * Process forked, and user wants to track the new process, 645 * so attach a new knote to it, and immediately report an 646 * event with the parent's pid. Register knote with new 647 * process. 648 */ 649 memset(&kev, 0, sizeof(kev)); 650 kev.ident = hint & NOTE_PDATAMASK; /* pid */ 651 kev.filter = kn->kn_filter; 652 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 653 kev.fflags = kn->kn_sfflags; 654 kev.data = kn->kn_id; /* parent */ 655 kev.udata = kn->kn_kevent.udata; /* preserve udata */ 656 mutex_spin_exit(&kq->kq_lock); 657 error = kqueue_register(kq, &kev); 658 mutex_spin_enter(&kq->kq_lock); 659 if (error != 0) 660 kn->kn_fflags |= NOTE_TRACKERR; 661 } 662 kn->kn_fflags |= fflag; 663 fflag = kn->kn_fflags; 664 mutex_spin_exit(&kq->kq_lock); 665 666 return fflag != 0; 667 } 668 669 static void 670 filt_timerexpire(void *knx) 671 { 672 struct knote *kn = knx; 673 int tticks; 674 675 mutex_enter(&kqueue_misc_lock); 676 kn->kn_data++; 677 knote_activate(kn); 678 if ((kn->kn_flags & EV_ONESHOT) == 0) { 679 tticks = mstohz(kn->kn_sdata); 680 if (tticks <= 0) 681 tticks = 1; 682 callout_schedule((callout_t *)kn->kn_hook, tticks); 683 } 684 mutex_exit(&kqueue_misc_lock); 685 } 686 687 /* 688 * data contains amount of time to sleep, in milliseconds 689 */ 690 static int 691 filt_timerattach(struct knote *kn) 692 { 693 callout_t *calloutp; 694 struct kqueue *kq; 695 int tticks; 696 697 tticks = mstohz(kn->kn_sdata); 698 699 /* if the supplied value is under our resolution, use 1 tick */ 700 if (tticks == 0) { 701 if (kn->kn_sdata == 0) 702 return EINVAL; 703 tticks = 1; 704 } 705 706 if (atomic_inc_uint_nv(&kq_ncallouts) >= kq_calloutmax || 707 (calloutp = kmem_alloc(sizeof(*calloutp), KM_NOSLEEP)) == NULL) { 708 atomic_dec_uint(&kq_ncallouts); 709 return ENOMEM; 710 } 711 callout_init(calloutp, CALLOUT_MPSAFE); 712 713 kq = kn->kn_kq; 714 mutex_spin_enter(&kq->kq_lock); 715 kn->kn_flags |= EV_CLEAR; /* automatically set */ 716 kn->kn_hook = calloutp; 717 mutex_spin_exit(&kq->kq_lock); 718 719 callout_reset(calloutp, tticks, filt_timerexpire, kn); 720 721 return (0); 722 } 723 724 static void 725 filt_timerdetach(struct knote *kn) 726 { 727 callout_t *calloutp; 728 struct kqueue *kq = kn->kn_kq; 729 730 mutex_spin_enter(&kq->kq_lock); 731 /* prevent rescheduling when we expire */ 732 kn->kn_flags |= EV_ONESHOT; 733 mutex_spin_exit(&kq->kq_lock); 734 735 calloutp = (callout_t *)kn->kn_hook; 736 callout_halt(calloutp, NULL); 737 callout_destroy(calloutp); 738 kmem_free(calloutp, sizeof(*calloutp)); 739 atomic_dec_uint(&kq_ncallouts); 740 } 741 742 static int 743 filt_timer(struct knote *kn, long hint) 744 { 745 int rv; 746 747 mutex_enter(&kqueue_misc_lock); 748 rv = (kn->kn_data != 0); 749 mutex_exit(&kqueue_misc_lock); 750 751 return rv; 752 } 753 754 /* 755 * Filter event method for EVFILT_FS. 756 */ 757 struct klist fs_klist = SLIST_HEAD_INITIALIZER(&fs_klist); 758 759 static int 760 filt_fsattach(struct knote *kn) 761 { 762 763 mutex_enter(&kqueue_misc_lock); 764 kn->kn_flags |= EV_CLEAR; 765 SLIST_INSERT_HEAD(&fs_klist, kn, kn_selnext); 766 mutex_exit(&kqueue_misc_lock); 767 768 return 0; 769 } 770 771 static void 772 filt_fsdetach(struct knote *kn) 773 { 774 775 mutex_enter(&kqueue_misc_lock); 776 SLIST_REMOVE(&fs_klist, kn, knote, kn_selnext); 777 mutex_exit(&kqueue_misc_lock); 778 } 779 780 static int 781 filt_fs(struct knote *kn, long hint) 782 { 783 int rv; 784 785 mutex_enter(&kqueue_misc_lock); 786 kn->kn_fflags |= hint; 787 rv = (kn->kn_fflags != 0); 788 mutex_exit(&kqueue_misc_lock); 789 790 return rv; 791 } 792 793 static int 794 filt_userattach(struct knote *kn) 795 { 796 struct kqueue *kq = kn->kn_kq; 797 798 /* 799 * EVFILT_USER knotes are not attached to anything in the kernel. 800 */ 801 mutex_spin_enter(&kq->kq_lock); 802 kn->kn_hook = NULL; 803 if (kn->kn_fflags & NOTE_TRIGGER) 804 kn->kn_hookid = 1; 805 else 806 kn->kn_hookid = 0; 807 mutex_spin_exit(&kq->kq_lock); 808 return (0); 809 } 810 811 static void 812 filt_userdetach(struct knote *kn) 813 { 814 815 /* 816 * EVFILT_USER knotes are not attached to anything in the kernel. 817 */ 818 } 819 820 static int 821 filt_user(struct knote *kn, long hint) 822 { 823 struct kqueue *kq = kn->kn_kq; 824 int hookid; 825 826 mutex_spin_enter(&kq->kq_lock); 827 hookid = kn->kn_hookid; 828 mutex_spin_exit(&kq->kq_lock); 829 830 return hookid; 831 } 832 833 static void 834 filt_usertouch(struct knote *kn, struct kevent *kev, long type) 835 { 836 struct kqueue *kq = kn->kn_kq; 837 int ffctrl; 838 839 mutex_spin_enter(&kq->kq_lock); 840 switch (type) { 841 case EVENT_REGISTER: 842 if (kev->fflags & NOTE_TRIGGER) 843 kn->kn_hookid = 1; 844 845 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 846 kev->fflags &= NOTE_FFLAGSMASK; 847 switch (ffctrl) { 848 case NOTE_FFNOP: 849 break; 850 851 case NOTE_FFAND: 852 kn->kn_sfflags &= kev->fflags; 853 break; 854 855 case NOTE_FFOR: 856 kn->kn_sfflags |= kev->fflags; 857 break; 858 859 case NOTE_FFCOPY: 860 kn->kn_sfflags = kev->fflags; 861 break; 862 863 default: 864 /* XXX Return error? */ 865 break; 866 } 867 kn->kn_sdata = kev->data; 868 if (kev->flags & EV_CLEAR) { 869 kn->kn_hookid = 0; 870 kn->kn_data = 0; 871 kn->kn_fflags = 0; 872 } 873 break; 874 875 case EVENT_PROCESS: 876 *kev = kn->kn_kevent; 877 kev->fflags = kn->kn_sfflags; 878 kev->data = kn->kn_sdata; 879 if (kn->kn_flags & EV_CLEAR) { 880 kn->kn_hookid = 0; 881 kn->kn_data = 0; 882 kn->kn_fflags = 0; 883 } 884 break; 885 886 default: 887 panic("filt_usertouch() - invalid type (%ld)", type); 888 break; 889 } 890 mutex_spin_exit(&kq->kq_lock); 891 } 892 893 /* 894 * filt_seltrue: 895 * 896 * This filter "event" routine simulates seltrue(). 897 */ 898 int 899 filt_seltrue(struct knote *kn, long hint) 900 { 901 902 /* 903 * We don't know how much data can be read/written, 904 * but we know that it *can* be. This is about as 905 * good as select/poll does as well. 906 */ 907 kn->kn_data = 0; 908 return (1); 909 } 910 911 /* 912 * This provides full kqfilter entry for device switch tables, which 913 * has same effect as filter using filt_seltrue() as filter method. 914 */ 915 static void 916 filt_seltruedetach(struct knote *kn) 917 { 918 /* Nothing to do */ 919 } 920 921 const struct filterops seltrue_filtops = { 922 .f_isfd = 1, 923 .f_attach = NULL, 924 .f_detach = filt_seltruedetach, 925 .f_event = filt_seltrue, 926 .f_touch = NULL, 927 }; 928 929 int 930 seltrue_kqfilter(dev_t dev, struct knote *kn) 931 { 932 switch (kn->kn_filter) { 933 case EVFILT_READ: 934 case EVFILT_WRITE: 935 kn->kn_fop = &seltrue_filtops; 936 break; 937 default: 938 return (EINVAL); 939 } 940 941 /* Nothing more to do */ 942 return (0); 943 } 944 945 /* 946 * kqueue(2) system call. 947 */ 948 static int 949 kqueue1(struct lwp *l, int flags, register_t *retval) 950 { 951 struct kqueue *kq; 952 file_t *fp; 953 int fd, error; 954 955 if ((error = fd_allocfile(&fp, &fd)) != 0) 956 return error; 957 fp->f_flag = FREAD | FWRITE | (flags & (FNONBLOCK|FNOSIGPIPE)); 958 fp->f_type = DTYPE_KQUEUE; 959 fp->f_ops = &kqueueops; 960 kq = kmem_zalloc(sizeof(*kq), KM_SLEEP); 961 mutex_init(&kq->kq_lock, MUTEX_DEFAULT, IPL_SCHED); 962 cv_init(&kq->kq_cv, "kqueue"); 963 selinit(&kq->kq_sel); 964 TAILQ_INIT(&kq->kq_head); 965 fp->f_kqueue = kq; 966 *retval = fd; 967 kq->kq_fdp = curlwp->l_fd; 968 fd_set_exclose(l, fd, (flags & O_CLOEXEC) != 0); 969 fd_affix(curproc, fp, fd); 970 return error; 971 } 972 973 /* 974 * kqueue(2) system call. 975 */ 976 int 977 sys_kqueue(struct lwp *l, const void *v, register_t *retval) 978 { 979 return kqueue1(l, 0, retval); 980 } 981 982 int 983 sys_kqueue1(struct lwp *l, const struct sys_kqueue1_args *uap, 984 register_t *retval) 985 { 986 /* { 987 syscallarg(int) flags; 988 } */ 989 return kqueue1(l, SCARG(uap, flags), retval); 990 } 991 992 /* 993 * kevent(2) system call. 994 */ 995 int 996 kevent_fetch_changes(void *ctx, const struct kevent *changelist, 997 struct kevent *changes, size_t index, int n) 998 { 999 1000 return copyin(changelist + index, changes, n * sizeof(*changes)); 1001 } 1002 1003 int 1004 kevent_put_events(void *ctx, struct kevent *events, 1005 struct kevent *eventlist, size_t index, int n) 1006 { 1007 1008 return copyout(events, eventlist + index, n * sizeof(*events)); 1009 } 1010 1011 static const struct kevent_ops kevent_native_ops = { 1012 .keo_private = NULL, 1013 .keo_fetch_timeout = copyin, 1014 .keo_fetch_changes = kevent_fetch_changes, 1015 .keo_put_events = kevent_put_events, 1016 }; 1017 1018 int 1019 sys___kevent50(struct lwp *l, const struct sys___kevent50_args *uap, 1020 register_t *retval) 1021 { 1022 /* { 1023 syscallarg(int) fd; 1024 syscallarg(const struct kevent *) changelist; 1025 syscallarg(size_t) nchanges; 1026 syscallarg(struct kevent *) eventlist; 1027 syscallarg(size_t) nevents; 1028 syscallarg(const struct timespec *) timeout; 1029 } */ 1030 1031 return kevent1(retval, SCARG(uap, fd), SCARG(uap, changelist), 1032 SCARG(uap, nchanges), SCARG(uap, eventlist), SCARG(uap, nevents), 1033 SCARG(uap, timeout), &kevent_native_ops); 1034 } 1035 1036 int 1037 kevent1(register_t *retval, int fd, 1038 const struct kevent *changelist, size_t nchanges, 1039 struct kevent *eventlist, size_t nevents, 1040 const struct timespec *timeout, 1041 const struct kevent_ops *keops) 1042 { 1043 struct kevent *kevp; 1044 struct kqueue *kq; 1045 struct timespec ts; 1046 size_t i, n, ichange; 1047 int nerrors, error; 1048 struct kevent kevbuf[KQ_NEVENTS]; /* approx 300 bytes on 64-bit */ 1049 file_t *fp; 1050 1051 /* check that we're dealing with a kq */ 1052 fp = fd_getfile(fd); 1053 if (fp == NULL) 1054 return (EBADF); 1055 1056 if (fp->f_type != DTYPE_KQUEUE) { 1057 fd_putfile(fd); 1058 return (EBADF); 1059 } 1060 1061 if (timeout != NULL) { 1062 error = (*keops->keo_fetch_timeout)(timeout, &ts, sizeof(ts)); 1063 if (error) 1064 goto done; 1065 timeout = &ts; 1066 } 1067 1068 kq = fp->f_kqueue; 1069 nerrors = 0; 1070 ichange = 0; 1071 1072 /* traverse list of events to register */ 1073 while (nchanges > 0) { 1074 n = MIN(nchanges, __arraycount(kevbuf)); 1075 error = (*keops->keo_fetch_changes)(keops->keo_private, 1076 changelist, kevbuf, ichange, n); 1077 if (error) 1078 goto done; 1079 for (i = 0; i < n; i++) { 1080 kevp = &kevbuf[i]; 1081 kevp->flags &= ~EV_SYSFLAGS; 1082 /* register each knote */ 1083 error = kqueue_register(kq, kevp); 1084 if (!error && !(kevp->flags & EV_RECEIPT)) 1085 continue; 1086 if (nevents == 0) 1087 goto done; 1088 kevp->flags = EV_ERROR; 1089 kevp->data = error; 1090 error = (*keops->keo_put_events) 1091 (keops->keo_private, kevp, 1092 eventlist, nerrors, 1); 1093 if (error) 1094 goto done; 1095 nevents--; 1096 nerrors++; 1097 } 1098 nchanges -= n; /* update the results */ 1099 ichange += n; 1100 } 1101 if (nerrors) { 1102 *retval = nerrors; 1103 error = 0; 1104 goto done; 1105 } 1106 1107 /* actually scan through the events */ 1108 error = kqueue_scan(fp, nevents, eventlist, timeout, retval, keops, 1109 kevbuf, __arraycount(kevbuf)); 1110 done: 1111 fd_putfile(fd); 1112 return (error); 1113 } 1114 1115 /* 1116 * Register a given kevent kev onto the kqueue 1117 */ 1118 static int 1119 kqueue_register(struct kqueue *kq, struct kevent *kev) 1120 { 1121 struct kfilter *kfilter; 1122 filedesc_t *fdp; 1123 file_t *fp; 1124 fdfile_t *ff; 1125 struct knote *kn, *newkn; 1126 struct klist *list; 1127 int error, fd, rv; 1128 1129 fdp = kq->kq_fdp; 1130 fp = NULL; 1131 kn = NULL; 1132 error = 0; 1133 fd = 0; 1134 1135 newkn = kmem_zalloc(sizeof(*newkn), KM_SLEEP); 1136 1137 rw_enter(&kqueue_filter_lock, RW_READER); 1138 kfilter = kfilter_byfilter(kev->filter); 1139 if (kfilter == NULL || kfilter->filtops == NULL) { 1140 /* filter not found nor implemented */ 1141 rw_exit(&kqueue_filter_lock); 1142 kmem_free(newkn, sizeof(*newkn)); 1143 return (EINVAL); 1144 } 1145 1146 /* search if knote already exists */ 1147 if (kfilter->filtops->f_isfd) { 1148 /* monitoring a file descriptor */ 1149 /* validate descriptor */ 1150 if (kev->ident > INT_MAX 1151 || (fp = fd_getfile(fd = kev->ident)) == NULL) { 1152 rw_exit(&kqueue_filter_lock); 1153 kmem_free(newkn, sizeof(*newkn)); 1154 return EBADF; 1155 } 1156 mutex_enter(&fdp->fd_lock); 1157 ff = fdp->fd_dt->dt_ff[fd]; 1158 if (ff->ff_refcnt & FR_CLOSING) { 1159 error = EBADF; 1160 goto doneunlock; 1161 } 1162 if (fd <= fdp->fd_lastkqfile) { 1163 SLIST_FOREACH(kn, &ff->ff_knlist, kn_link) { 1164 if (kq == kn->kn_kq && 1165 kev->filter == kn->kn_filter) 1166 break; 1167 } 1168 } 1169 } else { 1170 /* 1171 * not monitoring a file descriptor, so 1172 * lookup knotes in internal hash table 1173 */ 1174 mutex_enter(&fdp->fd_lock); 1175 if (fdp->fd_knhashmask != 0) { 1176 list = &fdp->fd_knhash[ 1177 KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)]; 1178 SLIST_FOREACH(kn, list, kn_link) { 1179 if (kev->ident == kn->kn_id && 1180 kq == kn->kn_kq && 1181 kev->filter == kn->kn_filter) 1182 break; 1183 } 1184 } 1185 } 1186 1187 /* 1188 * kn now contains the matching knote, or NULL if no match 1189 */ 1190 if (kn == NULL) { 1191 if (kev->flags & EV_ADD) { 1192 /* create new knote */ 1193 kn = newkn; 1194 newkn = NULL; 1195 kn->kn_obj = fp; 1196 kn->kn_id = kev->ident; 1197 kn->kn_kq = kq; 1198 kn->kn_fop = kfilter->filtops; 1199 kn->kn_kfilter = kfilter; 1200 kn->kn_sfflags = kev->fflags; 1201 kn->kn_sdata = kev->data; 1202 kev->fflags = 0; 1203 kev->data = 0; 1204 kn->kn_kevent = *kev; 1205 1206 KASSERT(kn->kn_fop != NULL); 1207 /* 1208 * apply reference count to knote structure, and 1209 * do not release it at the end of this routine. 1210 */ 1211 fp = NULL; 1212 1213 if (!kn->kn_fop->f_isfd) { 1214 /* 1215 * If knote is not on an fd, store on 1216 * internal hash table. 1217 */ 1218 if (fdp->fd_knhashmask == 0) { 1219 /* XXXAD can block with fd_lock held */ 1220 fdp->fd_knhash = hashinit(KN_HASHSIZE, 1221 HASH_LIST, true, 1222 &fdp->fd_knhashmask); 1223 } 1224 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, 1225 fdp->fd_knhashmask)]; 1226 } else { 1227 /* Otherwise, knote is on an fd. */ 1228 list = (struct klist *) 1229 &fdp->fd_dt->dt_ff[kn->kn_id]->ff_knlist; 1230 if ((int)kn->kn_id > fdp->fd_lastkqfile) 1231 fdp->fd_lastkqfile = kn->kn_id; 1232 } 1233 SLIST_INSERT_HEAD(list, kn, kn_link); 1234 1235 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1236 error = (*kfilter->filtops->f_attach)(kn); 1237 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1238 if (error != 0) { 1239 #ifdef DEBUG 1240 struct proc *p = curlwp->l_proc; 1241 const file_t *ft = kn->kn_obj; 1242 printf("%s: %s[%d]: event type %d not " 1243 "supported for file type %d/%s " 1244 "(error %d)\n", __func__, 1245 p->p_comm, p->p_pid, 1246 kn->kn_filter, ft ? ft->f_type : -1, 1247 ft ? ft->f_ops->fo_name : "?", error); 1248 #endif 1249 1250 /* knote_detach() drops fdp->fd_lock */ 1251 knote_detach(kn, fdp, false); 1252 goto done; 1253 } 1254 atomic_inc_uint(&kfilter->refcnt); 1255 goto done_ev_add; 1256 } else { 1257 /* No matching knote and the EV_ADD flag is not set. */ 1258 error = ENOENT; 1259 goto doneunlock; 1260 } 1261 } 1262 1263 if (kev->flags & EV_DELETE) { 1264 /* knote_detach() drops fdp->fd_lock */ 1265 knote_detach(kn, fdp, true); 1266 goto done; 1267 } 1268 1269 /* 1270 * The user may change some filter values after the 1271 * initial EV_ADD, but doing so will not reset any 1272 * filter which have already been triggered. 1273 */ 1274 kn->kn_kevent.udata = kev->udata; 1275 KASSERT(kn->kn_fop != NULL); 1276 if (!kn->kn_fop->f_isfd && kn->kn_fop->f_touch != NULL) { 1277 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1278 (*kn->kn_fop->f_touch)(kn, kev, EVENT_REGISTER); 1279 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1280 } else { 1281 kn->kn_sfflags = kev->fflags; 1282 kn->kn_sdata = kev->data; 1283 } 1284 1285 /* 1286 * We can get here if we are trying to attach 1287 * an event to a file descriptor that does not 1288 * support events, and the attach routine is 1289 * broken and does not return an error. 1290 */ 1291 done_ev_add: 1292 KASSERT(kn->kn_fop != NULL); 1293 KASSERT(kn->kn_fop->f_event != NULL); 1294 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1295 rv = (*kn->kn_fop->f_event)(kn, 0); 1296 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1297 if (rv) 1298 knote_activate(kn); 1299 1300 /* disable knote */ 1301 if ((kev->flags & EV_DISABLE)) { 1302 mutex_spin_enter(&kq->kq_lock); 1303 if ((kn->kn_status & KN_DISABLED) == 0) 1304 kn->kn_status |= KN_DISABLED; 1305 mutex_spin_exit(&kq->kq_lock); 1306 } 1307 1308 /* enable knote */ 1309 if ((kev->flags & EV_ENABLE)) { 1310 knote_enqueue(kn); 1311 } 1312 doneunlock: 1313 mutex_exit(&fdp->fd_lock); 1314 done: 1315 rw_exit(&kqueue_filter_lock); 1316 if (newkn != NULL) 1317 kmem_free(newkn, sizeof(*newkn)); 1318 if (fp != NULL) 1319 fd_putfile(fd); 1320 return (error); 1321 } 1322 1323 #if defined(DEBUG) 1324 #define KN_FMT(buf, kn) \ 1325 (snprintb((buf), sizeof(buf), __KN_FLAG_BITS, (kn)->kn_status), buf) 1326 1327 static void 1328 kqueue_check(const char *func, size_t line, const struct kqueue *kq) 1329 { 1330 const struct knote *kn; 1331 int count; 1332 int nmarker; 1333 char buf[128]; 1334 1335 KASSERT(mutex_owned(&kq->kq_lock)); 1336 KASSERT(kq->kq_count >= 0); 1337 1338 count = 0; 1339 nmarker = 0; 1340 TAILQ_FOREACH(kn, &kq->kq_head, kn_tqe) { 1341 if ((kn->kn_status & (KN_MARKER | KN_QUEUED)) == 0) { 1342 panic("%s,%zu: kq=%p kn=%p !(MARKER|QUEUED) %s", 1343 func, line, kq, kn, KN_FMT(buf, kn)); 1344 } 1345 if ((kn->kn_status & KN_MARKER) == 0) { 1346 if (kn->kn_kq != kq) { 1347 panic("%s,%zu: kq=%p kn(%p) != kn->kq(%p): %s", 1348 func, line, kq, kn, kn->kn_kq, 1349 KN_FMT(buf, kn)); 1350 } 1351 if ((kn->kn_status & KN_ACTIVE) == 0) { 1352 panic("%s,%zu: kq=%p kn=%p: !ACTIVE %s", 1353 func, line, kq, kn, KN_FMT(buf, kn)); 1354 } 1355 count++; 1356 if (count > kq->kq_count) { 1357 goto bad; 1358 } 1359 } else { 1360 nmarker++; 1361 #if 0 1362 if (nmarker > 10000) { 1363 panic("%s,%zu: kq=%p too many markers: " 1364 "%d != %d, nmarker=%d", 1365 func, line, kq, kq->kq_count, count, 1366 nmarker); 1367 } 1368 #endif 1369 } 1370 } 1371 if (kq->kq_count != count) { 1372 bad: 1373 panic("%s,%zu: kq=%p kq->kq_count(%d) != count(%d), nmarker=%d", 1374 func, line, kq, kq->kq_count, count, nmarker); 1375 } 1376 } 1377 #define kq_check(a) kqueue_check(__func__, __LINE__, (a)) 1378 #else /* defined(DEBUG) */ 1379 #define kq_check(a) /* nothing */ 1380 #endif /* defined(DEBUG) */ 1381 1382 /* 1383 * Scan through the list of events on fp (for a maximum of maxevents), 1384 * returning the results in to ulistp. Timeout is determined by tsp; if 1385 * NULL, wait indefinitely, if 0 valued, perform a poll, otherwise wait 1386 * as appropriate. 1387 */ 1388 static int 1389 kqueue_scan(file_t *fp, size_t maxevents, struct kevent *ulistp, 1390 const struct timespec *tsp, register_t *retval, 1391 const struct kevent_ops *keops, struct kevent *kevbuf, 1392 size_t kevcnt) 1393 { 1394 struct kqueue *kq; 1395 struct kevent *kevp; 1396 struct timespec ats, sleepts; 1397 struct knote *kn, *marker, morker; 1398 size_t count, nkev, nevents; 1399 int timeout, error, touch, rv; 1400 filedesc_t *fdp; 1401 1402 fdp = curlwp->l_fd; 1403 kq = fp->f_kqueue; 1404 count = maxevents; 1405 nkev = nevents = error = 0; 1406 if (count == 0) { 1407 *retval = 0; 1408 return 0; 1409 } 1410 1411 if (tsp) { /* timeout supplied */ 1412 ats = *tsp; 1413 if (inittimeleft(&ats, &sleepts) == -1) { 1414 *retval = maxevents; 1415 return EINVAL; 1416 } 1417 timeout = tstohz(&ats); 1418 if (timeout <= 0) 1419 timeout = -1; /* do poll */ 1420 } else { 1421 /* no timeout, wait forever */ 1422 timeout = 0; 1423 } 1424 1425 memset(&morker, 0, sizeof(morker)); 1426 marker = &morker; 1427 marker->kn_status = KN_MARKER; 1428 mutex_spin_enter(&kq->kq_lock); 1429 retry: 1430 kevp = kevbuf; 1431 if (kq->kq_count == 0) { 1432 if (timeout >= 0) { 1433 error = cv_timedwait_sig(&kq->kq_cv, 1434 &kq->kq_lock, timeout); 1435 if (error == 0) { 1436 if (tsp == NULL || (timeout = 1437 gettimeleft(&ats, &sleepts)) > 0) 1438 goto retry; 1439 } else { 1440 /* don't restart after signals... */ 1441 if (error == ERESTART) 1442 error = EINTR; 1443 if (error == EWOULDBLOCK) 1444 error = 0; 1445 } 1446 } 1447 mutex_spin_exit(&kq->kq_lock); 1448 } else { 1449 /* mark end of knote list */ 1450 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); 1451 1452 /* 1453 * Acquire the fdp->fd_lock interlock to avoid races with 1454 * file creation/destruction from other threads. 1455 */ 1456 mutex_spin_exit(&kq->kq_lock); 1457 mutex_enter(&fdp->fd_lock); 1458 mutex_spin_enter(&kq->kq_lock); 1459 1460 while (count != 0) { 1461 kn = TAILQ_FIRST(&kq->kq_head); /* get next knote */ 1462 while ((kn->kn_status & KN_MARKER) != 0) { 1463 if (kn == marker) { 1464 /* it's our marker, stop */ 1465 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1466 if (count < maxevents || (tsp != NULL && 1467 (timeout = gettimeleft(&ats, 1468 &sleepts)) <= 0)) 1469 goto done; 1470 mutex_exit(&fdp->fd_lock); 1471 goto retry; 1472 } 1473 /* someone else's marker. */ 1474 kn = TAILQ_NEXT(kn, kn_tqe); 1475 } 1476 kq_check(kq); 1477 kq->kq_count--; 1478 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1479 kn->kn_status &= ~KN_QUEUED; 1480 kn->kn_status |= KN_BUSY; 1481 kq_check(kq); 1482 if (kn->kn_status & KN_DISABLED) { 1483 kn->kn_status &= ~KN_BUSY; 1484 /* don't want disabled events */ 1485 continue; 1486 } 1487 if ((kn->kn_flags & EV_ONESHOT) == 0) { 1488 mutex_spin_exit(&kq->kq_lock); 1489 KASSERT(kn->kn_fop != NULL); 1490 KASSERT(kn->kn_fop->f_event != NULL); 1491 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1492 KASSERT(mutex_owned(&fdp->fd_lock)); 1493 rv = (*kn->kn_fop->f_event)(kn, 0); 1494 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1495 mutex_spin_enter(&kq->kq_lock); 1496 /* Re-poll if note was re-enqueued. */ 1497 if ((kn->kn_status & KN_QUEUED) != 0) { 1498 kn->kn_status &= ~KN_BUSY; 1499 continue; 1500 } 1501 if (rv == 0) { 1502 /* 1503 * non-ONESHOT event that hasn't 1504 * triggered again, so de-queue. 1505 */ 1506 kn->kn_status &= ~(KN_ACTIVE|KN_BUSY); 1507 continue; 1508 } 1509 } 1510 KASSERT(kn->kn_fop != NULL); 1511 touch = (!kn->kn_fop->f_isfd && 1512 kn->kn_fop->f_touch != NULL); 1513 /* XXXAD should be got from f_event if !oneshot. */ 1514 if (touch) { 1515 mutex_spin_exit(&kq->kq_lock); 1516 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1517 (*kn->kn_fop->f_touch)(kn, kevp, EVENT_PROCESS); 1518 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1519 mutex_spin_enter(&kq->kq_lock); 1520 } else { 1521 *kevp = kn->kn_kevent; 1522 } 1523 kevp++; 1524 nkev++; 1525 if (kn->kn_flags & EV_ONESHOT) { 1526 /* delete ONESHOT events after retrieval */ 1527 kn->kn_status &= ~KN_BUSY; 1528 mutex_spin_exit(&kq->kq_lock); 1529 knote_detach(kn, fdp, true); 1530 mutex_enter(&fdp->fd_lock); 1531 mutex_spin_enter(&kq->kq_lock); 1532 } else if (kn->kn_flags & EV_CLEAR) { 1533 /* clear state after retrieval */ 1534 kn->kn_data = 0; 1535 kn->kn_fflags = 0; 1536 /* 1537 * Manually clear knotes who weren't 1538 * 'touch'ed. 1539 */ 1540 if (touch == 0) { 1541 kn->kn_data = 0; 1542 kn->kn_fflags = 0; 1543 } 1544 kn->kn_status &= ~(KN_QUEUED|KN_ACTIVE|KN_BUSY); 1545 } else if (kn->kn_flags & EV_DISPATCH) { 1546 kn->kn_status |= KN_DISABLED; 1547 kn->kn_status &= ~(KN_QUEUED|KN_ACTIVE|KN_BUSY); 1548 } else { 1549 /* add event back on list */ 1550 kq_check(kq); 1551 kn->kn_status |= KN_QUEUED; 1552 kn->kn_status &= ~KN_BUSY; 1553 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1554 kq->kq_count++; 1555 kq_check(kq); 1556 } 1557 if (nkev == kevcnt) { 1558 /* do copyouts in kevcnt chunks */ 1559 mutex_spin_exit(&kq->kq_lock); 1560 mutex_exit(&fdp->fd_lock); 1561 error = (*keops->keo_put_events) 1562 (keops->keo_private, 1563 kevbuf, ulistp, nevents, nkev); 1564 mutex_enter(&fdp->fd_lock); 1565 mutex_spin_enter(&kq->kq_lock); 1566 nevents += nkev; 1567 nkev = 0; 1568 kevp = kevbuf; 1569 } 1570 count--; 1571 if (error != 0 || count == 0) { 1572 /* remove marker */ 1573 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); 1574 break; 1575 } 1576 } 1577 done: 1578 mutex_spin_exit(&kq->kq_lock); 1579 mutex_exit(&fdp->fd_lock); 1580 } 1581 if (nkev != 0) { 1582 /* copyout remaining events */ 1583 error = (*keops->keo_put_events)(keops->keo_private, 1584 kevbuf, ulistp, nevents, nkev); 1585 } 1586 *retval = maxevents - count; 1587 1588 return error; 1589 } 1590 1591 /* 1592 * fileops ioctl method for a kqueue descriptor. 1593 * 1594 * Two ioctls are currently supported. They both use struct kfilter_mapping: 1595 * KFILTER_BYNAME find name for filter, and return result in 1596 * name, which is of size len. 1597 * KFILTER_BYFILTER find filter for name. len is ignored. 1598 */ 1599 /*ARGSUSED*/ 1600 static int 1601 kqueue_ioctl(file_t *fp, u_long com, void *data) 1602 { 1603 struct kfilter_mapping *km; 1604 const struct kfilter *kfilter; 1605 char *name; 1606 int error; 1607 1608 km = data; 1609 error = 0; 1610 name = kmem_alloc(KFILTER_MAXNAME, KM_SLEEP); 1611 1612 switch (com) { 1613 case KFILTER_BYFILTER: /* convert filter -> name */ 1614 rw_enter(&kqueue_filter_lock, RW_READER); 1615 kfilter = kfilter_byfilter(km->filter); 1616 if (kfilter != NULL) { 1617 strlcpy(name, kfilter->name, KFILTER_MAXNAME); 1618 rw_exit(&kqueue_filter_lock); 1619 error = copyoutstr(name, km->name, km->len, NULL); 1620 } else { 1621 rw_exit(&kqueue_filter_lock); 1622 error = ENOENT; 1623 } 1624 break; 1625 1626 case KFILTER_BYNAME: /* convert name -> filter */ 1627 error = copyinstr(km->name, name, KFILTER_MAXNAME, NULL); 1628 if (error) { 1629 break; 1630 } 1631 rw_enter(&kqueue_filter_lock, RW_READER); 1632 kfilter = kfilter_byname(name); 1633 if (kfilter != NULL) 1634 km->filter = kfilter->filter; 1635 else 1636 error = ENOENT; 1637 rw_exit(&kqueue_filter_lock); 1638 break; 1639 1640 default: 1641 error = ENOTTY; 1642 break; 1643 1644 } 1645 kmem_free(name, KFILTER_MAXNAME); 1646 return (error); 1647 } 1648 1649 /* 1650 * fileops fcntl method for a kqueue descriptor. 1651 */ 1652 static int 1653 kqueue_fcntl(file_t *fp, u_int com, void *data) 1654 { 1655 1656 return (ENOTTY); 1657 } 1658 1659 /* 1660 * fileops poll method for a kqueue descriptor. 1661 * Determine if kqueue has events pending. 1662 */ 1663 static int 1664 kqueue_poll(file_t *fp, int events) 1665 { 1666 struct kqueue *kq; 1667 int revents; 1668 1669 kq = fp->f_kqueue; 1670 1671 revents = 0; 1672 if (events & (POLLIN | POLLRDNORM)) { 1673 mutex_spin_enter(&kq->kq_lock); 1674 if (kq->kq_count != 0) { 1675 revents |= events & (POLLIN | POLLRDNORM); 1676 } else { 1677 selrecord(curlwp, &kq->kq_sel); 1678 } 1679 kq_check(kq); 1680 mutex_spin_exit(&kq->kq_lock); 1681 } 1682 1683 return revents; 1684 } 1685 1686 /* 1687 * fileops stat method for a kqueue descriptor. 1688 * Returns dummy info, with st_size being number of events pending. 1689 */ 1690 static int 1691 kqueue_stat(file_t *fp, struct stat *st) 1692 { 1693 struct kqueue *kq; 1694 1695 kq = fp->f_kqueue; 1696 1697 memset(st, 0, sizeof(*st)); 1698 st->st_size = kq->kq_count; 1699 st->st_blksize = sizeof(struct kevent); 1700 st->st_mode = S_IFIFO; 1701 1702 return 0; 1703 } 1704 1705 static void 1706 kqueue_doclose(struct kqueue *kq, struct klist *list, int fd) 1707 { 1708 struct knote *kn; 1709 filedesc_t *fdp; 1710 1711 fdp = kq->kq_fdp; 1712 1713 KASSERT(mutex_owned(&fdp->fd_lock)); 1714 1715 for (kn = SLIST_FIRST(list); kn != NULL;) { 1716 if (kq != kn->kn_kq) { 1717 kn = SLIST_NEXT(kn, kn_link); 1718 continue; 1719 } 1720 knote_detach(kn, fdp, true); 1721 mutex_enter(&fdp->fd_lock); 1722 kn = SLIST_FIRST(list); 1723 } 1724 } 1725 1726 1727 /* 1728 * fileops close method for a kqueue descriptor. 1729 */ 1730 static int 1731 kqueue_close(file_t *fp) 1732 { 1733 struct kqueue *kq; 1734 filedesc_t *fdp; 1735 fdfile_t *ff; 1736 int i; 1737 1738 kq = fp->f_kqueue; 1739 fp->f_kqueue = NULL; 1740 fp->f_type = 0; 1741 fdp = curlwp->l_fd; 1742 1743 mutex_enter(&fdp->fd_lock); 1744 for (i = 0; i <= fdp->fd_lastkqfile; i++) { 1745 if ((ff = fdp->fd_dt->dt_ff[i]) == NULL) 1746 continue; 1747 kqueue_doclose(kq, (struct klist *)&ff->ff_knlist, i); 1748 } 1749 if (fdp->fd_knhashmask != 0) { 1750 for (i = 0; i < fdp->fd_knhashmask + 1; i++) { 1751 kqueue_doclose(kq, &fdp->fd_knhash[i], -1); 1752 } 1753 } 1754 mutex_exit(&fdp->fd_lock); 1755 1756 KASSERT(kq->kq_count == 0); 1757 mutex_destroy(&kq->kq_lock); 1758 cv_destroy(&kq->kq_cv); 1759 seldestroy(&kq->kq_sel); 1760 kmem_free(kq, sizeof(*kq)); 1761 1762 return (0); 1763 } 1764 1765 /* 1766 * struct fileops kqfilter method for a kqueue descriptor. 1767 * Event triggered when monitored kqueue changes. 1768 */ 1769 static int 1770 kqueue_kqfilter(file_t *fp, struct knote *kn) 1771 { 1772 struct kqueue *kq; 1773 1774 kq = ((file_t *)kn->kn_obj)->f_kqueue; 1775 1776 KASSERT(fp == kn->kn_obj); 1777 1778 if (kn->kn_filter != EVFILT_READ) 1779 return 1; 1780 1781 kn->kn_fop = &kqread_filtops; 1782 mutex_enter(&kq->kq_lock); 1783 SLIST_INSERT_HEAD(&kq->kq_sel.sel_klist, kn, kn_selnext); 1784 mutex_exit(&kq->kq_lock); 1785 1786 return 0; 1787 } 1788 1789 1790 /* 1791 * Walk down a list of knotes, activating them if their event has 1792 * triggered. The caller's object lock (e.g. device driver lock) 1793 * must be held. 1794 */ 1795 void 1796 knote(struct klist *list, long hint) 1797 { 1798 struct knote *kn, *tmpkn; 1799 1800 SLIST_FOREACH_SAFE(kn, list, kn_selnext, tmpkn) { 1801 KASSERT(kn->kn_fop != NULL); 1802 KASSERT(kn->kn_fop->f_event != NULL); 1803 if ((*kn->kn_fop->f_event)(kn, hint)) 1804 knote_activate(kn); 1805 } 1806 } 1807 1808 /* 1809 * Remove all knotes referencing a specified fd 1810 */ 1811 void 1812 knote_fdclose(int fd) 1813 { 1814 struct klist *list; 1815 struct knote *kn; 1816 filedesc_t *fdp; 1817 1818 fdp = curlwp->l_fd; 1819 mutex_enter(&fdp->fd_lock); 1820 list = (struct klist *)&fdp->fd_dt->dt_ff[fd]->ff_knlist; 1821 while ((kn = SLIST_FIRST(list)) != NULL) { 1822 knote_detach(kn, fdp, true); 1823 mutex_enter(&fdp->fd_lock); 1824 } 1825 mutex_exit(&fdp->fd_lock); 1826 } 1827 1828 /* 1829 * Drop knote. Called with fdp->fd_lock held, and will drop before 1830 * returning. 1831 */ 1832 static void 1833 knote_detach(struct knote *kn, filedesc_t *fdp, bool dofop) 1834 { 1835 struct klist *list; 1836 struct kqueue *kq; 1837 1838 kq = kn->kn_kq; 1839 1840 KASSERT((kn->kn_status & KN_MARKER) == 0); 1841 KASSERT(mutex_owned(&fdp->fd_lock)); 1842 1843 KASSERT(kn->kn_fop != NULL); 1844 /* Remove from monitored object. */ 1845 if (dofop) { 1846 KASSERT(kn->kn_fop->f_detach != NULL); 1847 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1848 (*kn->kn_fop->f_detach)(kn); 1849 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1850 } 1851 1852 /* Remove from descriptor table. */ 1853 if (kn->kn_fop->f_isfd) 1854 list = (struct klist *)&fdp->fd_dt->dt_ff[kn->kn_id]->ff_knlist; 1855 else 1856 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; 1857 1858 SLIST_REMOVE(list, kn, knote, kn_link); 1859 1860 /* Remove from kqueue. */ 1861 again: 1862 mutex_spin_enter(&kq->kq_lock); 1863 if ((kn->kn_status & KN_QUEUED) != 0) { 1864 kq_check(kq); 1865 kq->kq_count--; 1866 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1867 kn->kn_status &= ~KN_QUEUED; 1868 kq_check(kq); 1869 } else if (kn->kn_status & KN_BUSY) { 1870 mutex_spin_exit(&kq->kq_lock); 1871 goto again; 1872 } 1873 mutex_spin_exit(&kq->kq_lock); 1874 1875 mutex_exit(&fdp->fd_lock); 1876 if (kn->kn_fop->f_isfd) 1877 fd_putfile(kn->kn_id); 1878 atomic_dec_uint(&kn->kn_kfilter->refcnt); 1879 kmem_free(kn, sizeof(*kn)); 1880 } 1881 1882 /* 1883 * Queue new event for knote. 1884 */ 1885 static void 1886 knote_enqueue(struct knote *kn) 1887 { 1888 struct kqueue *kq; 1889 1890 KASSERT((kn->kn_status & KN_MARKER) == 0); 1891 1892 kq = kn->kn_kq; 1893 1894 mutex_spin_enter(&kq->kq_lock); 1895 if ((kn->kn_status & KN_DISABLED) != 0) { 1896 kn->kn_status &= ~KN_DISABLED; 1897 } 1898 if ((kn->kn_status & (KN_ACTIVE | KN_QUEUED)) == KN_ACTIVE) { 1899 kq_check(kq); 1900 kn->kn_status |= KN_QUEUED; 1901 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1902 kq->kq_count++; 1903 kq_check(kq); 1904 cv_broadcast(&kq->kq_cv); 1905 selnotify(&kq->kq_sel, 0, NOTE_SUBMIT); 1906 } 1907 mutex_spin_exit(&kq->kq_lock); 1908 } 1909 /* 1910 * Queue new event for knote. 1911 */ 1912 static void 1913 knote_activate(struct knote *kn) 1914 { 1915 struct kqueue *kq; 1916 1917 KASSERT((kn->kn_status & KN_MARKER) == 0); 1918 1919 kq = kn->kn_kq; 1920 1921 mutex_spin_enter(&kq->kq_lock); 1922 kn->kn_status |= KN_ACTIVE; 1923 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) { 1924 kq_check(kq); 1925 kn->kn_status |= KN_QUEUED; 1926 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1927 kq->kq_count++; 1928 kq_check(kq); 1929 cv_broadcast(&kq->kq_cv); 1930 selnotify(&kq->kq_sel, 0, NOTE_SUBMIT); 1931 } 1932 mutex_spin_exit(&kq->kq_lock); 1933 } 1934