1 /* $NetBSD: kern_event.c,v 1.85 2016/01/31 04:40:01 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * SUCH DAMAGE. 56 * 57 * FreeBSD: src/sys/kern/kern_event.c,v 1.27 2001/07/05 17:10:44 rwatson Exp 58 */ 59 60 #include <sys/cdefs.h> 61 __KERNEL_RCSID(0, "$NetBSD: kern_event.c,v 1.85 2016/01/31 04:40:01 christos Exp $"); 62 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/kernel.h> 66 #include <sys/proc.h> 67 #include <sys/file.h> 68 #include <sys/select.h> 69 #include <sys/queue.h> 70 #include <sys/event.h> 71 #include <sys/eventvar.h> 72 #include <sys/poll.h> 73 #include <sys/kmem.h> 74 #include <sys/stat.h> 75 #include <sys/filedesc.h> 76 #include <sys/syscallargs.h> 77 #include <sys/kauth.h> 78 #include <sys/conf.h> 79 #include <sys/atomic.h> 80 81 static int kqueue_scan(file_t *, size_t, struct kevent *, 82 const struct timespec *, register_t *, 83 const struct kevent_ops *, struct kevent *, 84 size_t); 85 static int kqueue_ioctl(file_t *, u_long, void *); 86 static int kqueue_fcntl(file_t *, u_int, void *); 87 static int kqueue_poll(file_t *, int); 88 static int kqueue_kqfilter(file_t *, struct knote *); 89 static int kqueue_stat(file_t *, struct stat *); 90 static int kqueue_close(file_t *); 91 static int kqueue_register(struct kqueue *, struct kevent *); 92 static void kqueue_doclose(struct kqueue *, struct klist *, int); 93 94 static void knote_detach(struct knote *, filedesc_t *fdp, bool); 95 static void knote_enqueue(struct knote *); 96 static void knote_activate(struct knote *); 97 98 static void filt_kqdetach(struct knote *); 99 static int filt_kqueue(struct knote *, long hint); 100 static int filt_procattach(struct knote *); 101 static void filt_procdetach(struct knote *); 102 static int filt_proc(struct knote *, long hint); 103 static int filt_fileattach(struct knote *); 104 static void filt_timerexpire(void *x); 105 static int filt_timerattach(struct knote *); 106 static void filt_timerdetach(struct knote *); 107 static int filt_timer(struct knote *, long hint); 108 109 static const struct fileops kqueueops = { 110 .fo_read = (void *)enxio, 111 .fo_write = (void *)enxio, 112 .fo_ioctl = kqueue_ioctl, 113 .fo_fcntl = kqueue_fcntl, 114 .fo_poll = kqueue_poll, 115 .fo_stat = kqueue_stat, 116 .fo_close = kqueue_close, 117 .fo_kqfilter = kqueue_kqfilter, 118 .fo_restart = fnullop_restart, 119 }; 120 121 static const struct filterops kqread_filtops = 122 { 1, NULL, filt_kqdetach, filt_kqueue }; 123 static const struct filterops proc_filtops = 124 { 0, filt_procattach, filt_procdetach, filt_proc }; 125 static const struct filterops file_filtops = 126 { 1, filt_fileattach, NULL, NULL }; 127 static const struct filterops timer_filtops = 128 { 0, filt_timerattach, filt_timerdetach, filt_timer }; 129 130 static u_int kq_ncallouts = 0; 131 static int kq_calloutmax = (4 * 1024); 132 133 #define KN_HASHSIZE 64 /* XXX should be tunable */ 134 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 135 136 extern const struct filterops sig_filtops; 137 138 /* 139 * Table for for all system-defined filters. 140 * These should be listed in the numeric order of the EVFILT_* defines. 141 * If filtops is NULL, the filter isn't implemented in NetBSD. 142 * End of list is when name is NULL. 143 * 144 * Note that 'refcnt' is meaningless for built-in filters. 145 */ 146 struct kfilter { 147 const char *name; /* name of filter */ 148 uint32_t filter; /* id of filter */ 149 unsigned refcnt; /* reference count */ 150 const struct filterops *filtops;/* operations for filter */ 151 size_t namelen; /* length of name string */ 152 }; 153 154 /* System defined filters */ 155 static struct kfilter sys_kfilters[] = { 156 { "EVFILT_READ", EVFILT_READ, 0, &file_filtops, 0 }, 157 { "EVFILT_WRITE", EVFILT_WRITE, 0, &file_filtops, 0, }, 158 { "EVFILT_AIO", EVFILT_AIO, 0, NULL, 0 }, 159 { "EVFILT_VNODE", EVFILT_VNODE, 0, &file_filtops, 0 }, 160 { "EVFILT_PROC", EVFILT_PROC, 0, &proc_filtops, 0 }, 161 { "EVFILT_SIGNAL", EVFILT_SIGNAL, 0, &sig_filtops, 0 }, 162 { "EVFILT_TIMER", EVFILT_TIMER, 0, &timer_filtops, 0 }, 163 { NULL, 0, 0, NULL, 0 }, 164 }; 165 166 /* User defined kfilters */ 167 static struct kfilter *user_kfilters; /* array */ 168 static int user_kfilterc; /* current offset */ 169 static int user_kfiltermaxc; /* max size so far */ 170 static size_t user_kfiltersz; /* size of allocated memory */ 171 172 /* Locks */ 173 static krwlock_t kqueue_filter_lock; /* lock on filter lists */ 174 static kmutex_t kqueue_misc_lock; /* miscellaneous */ 175 176 static kauth_listener_t kqueue_listener; 177 178 static int 179 kqueue_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, 180 void *arg0, void *arg1, void *arg2, void *arg3) 181 { 182 struct proc *p; 183 int result; 184 185 result = KAUTH_RESULT_DEFER; 186 p = arg0; 187 188 if (action != KAUTH_PROCESS_KEVENT_FILTER) 189 return result; 190 191 if ((kauth_cred_getuid(p->p_cred) != kauth_cred_getuid(cred) || 192 ISSET(p->p_flag, PK_SUGID))) 193 return result; 194 195 result = KAUTH_RESULT_ALLOW; 196 197 return result; 198 } 199 200 /* 201 * Initialize the kqueue subsystem. 202 */ 203 void 204 kqueue_init(void) 205 { 206 207 rw_init(&kqueue_filter_lock); 208 mutex_init(&kqueue_misc_lock, MUTEX_DEFAULT, IPL_NONE); 209 210 kqueue_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS, 211 kqueue_listener_cb, NULL); 212 } 213 214 /* 215 * Find kfilter entry by name, or NULL if not found. 216 */ 217 static struct kfilter * 218 kfilter_byname_sys(const char *name) 219 { 220 int i; 221 222 KASSERT(rw_lock_held(&kqueue_filter_lock)); 223 224 for (i = 0; sys_kfilters[i].name != NULL; i++) { 225 if (strcmp(name, sys_kfilters[i].name) == 0) 226 return &sys_kfilters[i]; 227 } 228 return NULL; 229 } 230 231 static struct kfilter * 232 kfilter_byname_user(const char *name) 233 { 234 int i; 235 236 KASSERT(rw_lock_held(&kqueue_filter_lock)); 237 238 /* user filter slots have a NULL name if previously deregistered */ 239 for (i = 0; i < user_kfilterc ; i++) { 240 if (user_kfilters[i].name != NULL && 241 strcmp(name, user_kfilters[i].name) == 0) 242 return &user_kfilters[i]; 243 } 244 return NULL; 245 } 246 247 static struct kfilter * 248 kfilter_byname(const char *name) 249 { 250 struct kfilter *kfilter; 251 252 KASSERT(rw_lock_held(&kqueue_filter_lock)); 253 254 if ((kfilter = kfilter_byname_sys(name)) != NULL) 255 return kfilter; 256 257 return kfilter_byname_user(name); 258 } 259 260 /* 261 * Find kfilter entry by filter id, or NULL if not found. 262 * Assumes entries are indexed in filter id order, for speed. 263 */ 264 static struct kfilter * 265 kfilter_byfilter(uint32_t filter) 266 { 267 struct kfilter *kfilter; 268 269 KASSERT(rw_lock_held(&kqueue_filter_lock)); 270 271 if (filter < EVFILT_SYSCOUNT) /* it's a system filter */ 272 kfilter = &sys_kfilters[filter]; 273 else if (user_kfilters != NULL && 274 filter < EVFILT_SYSCOUNT + user_kfilterc) 275 /* it's a user filter */ 276 kfilter = &user_kfilters[filter - EVFILT_SYSCOUNT]; 277 else 278 return (NULL); /* out of range */ 279 KASSERT(kfilter->filter == filter); /* sanity check! */ 280 return (kfilter); 281 } 282 283 /* 284 * Register a new kfilter. Stores the entry in user_kfilters. 285 * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise. 286 * If retfilter != NULL, the new filterid is returned in it. 287 */ 288 int 289 kfilter_register(const char *name, const struct filterops *filtops, 290 int *retfilter) 291 { 292 struct kfilter *kfilter; 293 size_t len; 294 int i; 295 296 if (name == NULL || name[0] == '\0' || filtops == NULL) 297 return (EINVAL); /* invalid args */ 298 299 rw_enter(&kqueue_filter_lock, RW_WRITER); 300 if (kfilter_byname(name) != NULL) { 301 rw_exit(&kqueue_filter_lock); 302 return (EEXIST); /* already exists */ 303 } 304 if (user_kfilterc > 0xffffffff - EVFILT_SYSCOUNT) { 305 rw_exit(&kqueue_filter_lock); 306 return (EINVAL); /* too many */ 307 } 308 309 for (i = 0; i < user_kfilterc; i++) { 310 kfilter = &user_kfilters[i]; 311 if (kfilter->name == NULL) { 312 /* Previously deregistered slot. Reuse. */ 313 goto reuse; 314 } 315 } 316 317 /* check if need to grow user_kfilters */ 318 if (user_kfilterc + 1 > user_kfiltermaxc) { 319 /* Grow in KFILTER_EXTENT chunks. */ 320 user_kfiltermaxc += KFILTER_EXTENT; 321 len = user_kfiltermaxc * sizeof(*kfilter); 322 kfilter = kmem_alloc(len, KM_SLEEP); 323 memset((char *)kfilter + user_kfiltersz, 0, len - user_kfiltersz); 324 if (user_kfilters != NULL) { 325 memcpy(kfilter, user_kfilters, user_kfiltersz); 326 kmem_free(user_kfilters, user_kfiltersz); 327 } 328 user_kfiltersz = len; 329 user_kfilters = kfilter; 330 } 331 /* Adding new slot */ 332 kfilter = &user_kfilters[user_kfilterc++]; 333 reuse: 334 kfilter->namelen = strlen(name) + 1; 335 kfilter->name = kmem_alloc(kfilter->namelen, KM_SLEEP); 336 memcpy(__UNCONST(kfilter->name), name, kfilter->namelen); 337 338 kfilter->filter = (kfilter - user_kfilters) + EVFILT_SYSCOUNT; 339 340 kfilter->filtops = kmem_alloc(sizeof(*filtops), KM_SLEEP); 341 memcpy(__UNCONST(kfilter->filtops), filtops, sizeof(*filtops)); 342 343 if (retfilter != NULL) 344 *retfilter = kfilter->filter; 345 rw_exit(&kqueue_filter_lock); 346 347 return (0); 348 } 349 350 /* 351 * Unregister a kfilter previously registered with kfilter_register. 352 * This retains the filter id, but clears the name and frees filtops (filter 353 * operations), so that the number isn't reused during a boot. 354 * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise. 355 */ 356 int 357 kfilter_unregister(const char *name) 358 { 359 struct kfilter *kfilter; 360 361 if (name == NULL || name[0] == '\0') 362 return (EINVAL); /* invalid name */ 363 364 rw_enter(&kqueue_filter_lock, RW_WRITER); 365 if (kfilter_byname_sys(name) != NULL) { 366 rw_exit(&kqueue_filter_lock); 367 return (EINVAL); /* can't detach system filters */ 368 } 369 370 kfilter = kfilter_byname_user(name); 371 if (kfilter == NULL) { 372 rw_exit(&kqueue_filter_lock); 373 return (ENOENT); 374 } 375 if (kfilter->refcnt != 0) { 376 rw_exit(&kqueue_filter_lock); 377 return (EBUSY); 378 } 379 380 /* Cast away const (but we know it's safe. */ 381 kmem_free(__UNCONST(kfilter->name), kfilter->namelen); 382 kfilter->name = NULL; /* mark as `not implemented' */ 383 384 if (kfilter->filtops != NULL) { 385 /* Cast away const (but we know it's safe. */ 386 kmem_free(__UNCONST(kfilter->filtops), 387 sizeof(*kfilter->filtops)); 388 kfilter->filtops = NULL; /* mark as `not implemented' */ 389 } 390 rw_exit(&kqueue_filter_lock); 391 392 return (0); 393 } 394 395 396 /* 397 * Filter attach method for EVFILT_READ and EVFILT_WRITE on normal file 398 * descriptors. Calls fileops kqfilter method for given file descriptor. 399 */ 400 static int 401 filt_fileattach(struct knote *kn) 402 { 403 file_t *fp; 404 405 fp = kn->kn_obj; 406 407 return (*fp->f_ops->fo_kqfilter)(fp, kn); 408 } 409 410 /* 411 * Filter detach method for EVFILT_READ on kqueue descriptor. 412 */ 413 static void 414 filt_kqdetach(struct knote *kn) 415 { 416 struct kqueue *kq; 417 418 kq = ((file_t *)kn->kn_obj)->f_kqueue; 419 420 mutex_spin_enter(&kq->kq_lock); 421 SLIST_REMOVE(&kq->kq_sel.sel_klist, kn, knote, kn_selnext); 422 mutex_spin_exit(&kq->kq_lock); 423 } 424 425 /* 426 * Filter event method for EVFILT_READ on kqueue descriptor. 427 */ 428 /*ARGSUSED*/ 429 static int 430 filt_kqueue(struct knote *kn, long hint) 431 { 432 struct kqueue *kq; 433 int rv; 434 435 kq = ((file_t *)kn->kn_obj)->f_kqueue; 436 437 if (hint != NOTE_SUBMIT) 438 mutex_spin_enter(&kq->kq_lock); 439 kn->kn_data = kq->kq_count; 440 rv = (kn->kn_data > 0); 441 if (hint != NOTE_SUBMIT) 442 mutex_spin_exit(&kq->kq_lock); 443 444 return rv; 445 } 446 447 /* 448 * Filter attach method for EVFILT_PROC. 449 */ 450 static int 451 filt_procattach(struct knote *kn) 452 { 453 struct proc *p; 454 struct lwp *curl; 455 456 curl = curlwp; 457 458 mutex_enter(proc_lock); 459 if (kn->kn_flags & EV_FLAG1) { 460 /* 461 * NOTE_TRACK attaches to the child process too early 462 * for proc_find, so do a raw look up and check the state 463 * explicitly. 464 */ 465 p = proc_find_raw(kn->kn_id); 466 if (p != NULL && p->p_stat != SIDL) 467 p = NULL; 468 } else { 469 p = proc_find(kn->kn_id); 470 } 471 472 if (p == NULL) { 473 mutex_exit(proc_lock); 474 return ESRCH; 475 } 476 477 /* 478 * Fail if it's not owned by you, or the last exec gave us 479 * setuid/setgid privs (unless you're root). 480 */ 481 mutex_enter(p->p_lock); 482 mutex_exit(proc_lock); 483 if (kauth_authorize_process(curl->l_cred, KAUTH_PROCESS_KEVENT_FILTER, 484 p, NULL, NULL, NULL) != 0) { 485 mutex_exit(p->p_lock); 486 return EACCES; 487 } 488 489 kn->kn_obj = p; 490 kn->kn_flags |= EV_CLEAR; /* automatically set */ 491 492 /* 493 * internal flag indicating registration done by kernel 494 */ 495 if (kn->kn_flags & EV_FLAG1) { 496 kn->kn_data = kn->kn_sdata; /* ppid */ 497 kn->kn_fflags = NOTE_CHILD; 498 kn->kn_flags &= ~EV_FLAG1; 499 } 500 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext); 501 mutex_exit(p->p_lock); 502 503 return 0; 504 } 505 506 /* 507 * Filter detach method for EVFILT_PROC. 508 * 509 * The knote may be attached to a different process, which may exit, 510 * leaving nothing for the knote to be attached to. So when the process 511 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 512 * it will be deleted when read out. However, as part of the knote deletion, 513 * this routine is called, so a check is needed to avoid actually performing 514 * a detach, because the original process might not exist any more. 515 */ 516 static void 517 filt_procdetach(struct knote *kn) 518 { 519 struct proc *p; 520 521 if (kn->kn_status & KN_DETACHED) 522 return; 523 524 p = kn->kn_obj; 525 526 mutex_enter(p->p_lock); 527 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 528 mutex_exit(p->p_lock); 529 } 530 531 /* 532 * Filter event method for EVFILT_PROC. 533 */ 534 static int 535 filt_proc(struct knote *kn, long hint) 536 { 537 u_int event, fflag; 538 struct kevent kev; 539 struct kqueue *kq; 540 int error; 541 542 event = (u_int)hint & NOTE_PCTRLMASK; 543 kq = kn->kn_kq; 544 fflag = 0; 545 546 /* If the user is interested in this event, record it. */ 547 if (kn->kn_sfflags & event) 548 fflag |= event; 549 550 if (event == NOTE_EXIT) { 551 struct proc *p = kn->kn_obj; 552 553 if (p != NULL) 554 kn->kn_data = p->p_xstat; 555 /* 556 * Process is gone, so flag the event as finished. 557 * 558 * Detach the knote from watched process and mark 559 * it as such. We can't leave this to kqueue_scan(), 560 * since the process might not exist by then. And we 561 * have to do this now, since psignal KNOTE() is called 562 * also for zombies and we might end up reading freed 563 * memory if the kevent would already be picked up 564 * and knote g/c'ed. 565 */ 566 filt_procdetach(kn); 567 568 mutex_spin_enter(&kq->kq_lock); 569 kn->kn_status |= KN_DETACHED; 570 /* Mark as ONESHOT, so that the knote it g/c'ed when read */ 571 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 572 kn->kn_fflags |= fflag; 573 mutex_spin_exit(&kq->kq_lock); 574 575 return 1; 576 } 577 578 mutex_spin_enter(&kq->kq_lock); 579 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { 580 /* 581 * Process forked, and user wants to track the new process, 582 * so attach a new knote to it, and immediately report an 583 * event with the parent's pid. Register knote with new 584 * process. 585 */ 586 kev.ident = hint & NOTE_PDATAMASK; /* pid */ 587 kev.filter = kn->kn_filter; 588 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 589 kev.fflags = kn->kn_sfflags; 590 kev.data = kn->kn_id; /* parent */ 591 kev.udata = kn->kn_kevent.udata; /* preserve udata */ 592 mutex_spin_exit(&kq->kq_lock); 593 error = kqueue_register(kq, &kev); 594 mutex_spin_enter(&kq->kq_lock); 595 if (error != 0) 596 kn->kn_fflags |= NOTE_TRACKERR; 597 } 598 kn->kn_fflags |= fflag; 599 fflag = kn->kn_fflags; 600 mutex_spin_exit(&kq->kq_lock); 601 602 return fflag != 0; 603 } 604 605 static void 606 filt_timerexpire(void *knx) 607 { 608 struct knote *kn = knx; 609 int tticks; 610 611 mutex_enter(&kqueue_misc_lock); 612 kn->kn_data++; 613 knote_activate(kn); 614 if ((kn->kn_flags & EV_ONESHOT) == 0) { 615 tticks = mstohz(kn->kn_sdata); 616 if (tticks <= 0) 617 tticks = 1; 618 callout_schedule((callout_t *)kn->kn_hook, tticks); 619 } 620 mutex_exit(&kqueue_misc_lock); 621 } 622 623 /* 624 * data contains amount of time to sleep, in milliseconds 625 */ 626 static int 627 filt_timerattach(struct knote *kn) 628 { 629 callout_t *calloutp; 630 struct kqueue *kq; 631 int tticks; 632 633 tticks = mstohz(kn->kn_sdata); 634 635 /* if the supplied value is under our resolution, use 1 tick */ 636 if (tticks == 0) { 637 if (kn->kn_sdata == 0) 638 return EINVAL; 639 tticks = 1; 640 } 641 642 if (atomic_inc_uint_nv(&kq_ncallouts) >= kq_calloutmax || 643 (calloutp = kmem_alloc(sizeof(*calloutp), KM_NOSLEEP)) == NULL) { 644 atomic_dec_uint(&kq_ncallouts); 645 return ENOMEM; 646 } 647 callout_init(calloutp, CALLOUT_MPSAFE); 648 649 kq = kn->kn_kq; 650 mutex_spin_enter(&kq->kq_lock); 651 kn->kn_flags |= EV_CLEAR; /* automatically set */ 652 kn->kn_hook = calloutp; 653 mutex_spin_exit(&kq->kq_lock); 654 655 callout_reset(calloutp, tticks, filt_timerexpire, kn); 656 657 return (0); 658 } 659 660 static void 661 filt_timerdetach(struct knote *kn) 662 { 663 callout_t *calloutp; 664 665 calloutp = (callout_t *)kn->kn_hook; 666 callout_halt(calloutp, NULL); 667 callout_destroy(calloutp); 668 kmem_free(calloutp, sizeof(*calloutp)); 669 atomic_dec_uint(&kq_ncallouts); 670 } 671 672 static int 673 filt_timer(struct knote *kn, long hint) 674 { 675 int rv; 676 677 mutex_enter(&kqueue_misc_lock); 678 rv = (kn->kn_data != 0); 679 mutex_exit(&kqueue_misc_lock); 680 681 return rv; 682 } 683 684 /* 685 * filt_seltrue: 686 * 687 * This filter "event" routine simulates seltrue(). 688 */ 689 int 690 filt_seltrue(struct knote *kn, long hint) 691 { 692 693 /* 694 * We don't know how much data can be read/written, 695 * but we know that it *can* be. This is about as 696 * good as select/poll does as well. 697 */ 698 kn->kn_data = 0; 699 return (1); 700 } 701 702 /* 703 * This provides full kqfilter entry for device switch tables, which 704 * has same effect as filter using filt_seltrue() as filter method. 705 */ 706 static void 707 filt_seltruedetach(struct knote *kn) 708 { 709 /* Nothing to do */ 710 } 711 712 const struct filterops seltrue_filtops = 713 { 1, NULL, filt_seltruedetach, filt_seltrue }; 714 715 int 716 seltrue_kqfilter(dev_t dev, struct knote *kn) 717 { 718 switch (kn->kn_filter) { 719 case EVFILT_READ: 720 case EVFILT_WRITE: 721 kn->kn_fop = &seltrue_filtops; 722 break; 723 default: 724 return (EINVAL); 725 } 726 727 /* Nothing more to do */ 728 return (0); 729 } 730 731 /* 732 * kqueue(2) system call. 733 */ 734 static int 735 kqueue1(struct lwp *l, int flags, register_t *retval) 736 { 737 struct kqueue *kq; 738 file_t *fp; 739 int fd, error; 740 741 if ((error = fd_allocfile(&fp, &fd)) != 0) 742 return error; 743 fp->f_flag = FREAD | FWRITE | (flags & (FNONBLOCK|FNOSIGPIPE)); 744 fp->f_type = DTYPE_KQUEUE; 745 fp->f_ops = &kqueueops; 746 kq = kmem_zalloc(sizeof(*kq), KM_SLEEP); 747 mutex_init(&kq->kq_lock, MUTEX_DEFAULT, IPL_SCHED); 748 cv_init(&kq->kq_cv, "kqueue"); 749 selinit(&kq->kq_sel); 750 TAILQ_INIT(&kq->kq_head); 751 fp->f_kqueue = kq; 752 *retval = fd; 753 kq->kq_fdp = curlwp->l_fd; 754 fd_set_exclose(l, fd, (flags & O_CLOEXEC) != 0); 755 fd_affix(curproc, fp, fd); 756 return error; 757 } 758 759 /* 760 * kqueue(2) system call. 761 */ 762 int 763 sys_kqueue(struct lwp *l, const void *v, register_t *retval) 764 { 765 return kqueue1(l, 0, retval); 766 } 767 768 int 769 sys_kqueue1(struct lwp *l, const struct sys_kqueue1_args *uap, 770 register_t *retval) 771 { 772 /* { 773 syscallarg(int) flags; 774 } */ 775 return kqueue1(l, SCARG(uap, flags), retval); 776 } 777 778 /* 779 * kevent(2) system call. 780 */ 781 int 782 kevent_fetch_changes(void *ctx, const struct kevent *changelist, 783 struct kevent *changes, size_t index, int n) 784 { 785 786 return copyin(changelist + index, changes, n * sizeof(*changes)); 787 } 788 789 int 790 kevent_put_events(void *ctx, struct kevent *events, 791 struct kevent *eventlist, size_t index, int n) 792 { 793 794 return copyout(events, eventlist + index, n * sizeof(*events)); 795 } 796 797 static const struct kevent_ops kevent_native_ops = { 798 .keo_private = NULL, 799 .keo_fetch_timeout = copyin, 800 .keo_fetch_changes = kevent_fetch_changes, 801 .keo_put_events = kevent_put_events, 802 }; 803 804 int 805 sys___kevent50(struct lwp *l, const struct sys___kevent50_args *uap, 806 register_t *retval) 807 { 808 /* { 809 syscallarg(int) fd; 810 syscallarg(const struct kevent *) changelist; 811 syscallarg(size_t) nchanges; 812 syscallarg(struct kevent *) eventlist; 813 syscallarg(size_t) nevents; 814 syscallarg(const struct timespec *) timeout; 815 } */ 816 817 return kevent1(retval, SCARG(uap, fd), SCARG(uap, changelist), 818 SCARG(uap, nchanges), SCARG(uap, eventlist), SCARG(uap, nevents), 819 SCARG(uap, timeout), &kevent_native_ops); 820 } 821 822 int 823 kevent1(register_t *retval, int fd, 824 const struct kevent *changelist, size_t nchanges, 825 struct kevent *eventlist, size_t nevents, 826 const struct timespec *timeout, 827 const struct kevent_ops *keops) 828 { 829 struct kevent *kevp; 830 struct kqueue *kq; 831 struct timespec ts; 832 size_t i, n, ichange; 833 int nerrors, error; 834 struct kevent kevbuf[KQ_NEVENTS]; /* approx 300 bytes on 64-bit */ 835 file_t *fp; 836 837 /* check that we're dealing with a kq */ 838 fp = fd_getfile(fd); 839 if (fp == NULL) 840 return (EBADF); 841 842 if (fp->f_type != DTYPE_KQUEUE) { 843 fd_putfile(fd); 844 return (EBADF); 845 } 846 847 if (timeout != NULL) { 848 error = (*keops->keo_fetch_timeout)(timeout, &ts, sizeof(ts)); 849 if (error) 850 goto done; 851 timeout = &ts; 852 } 853 854 kq = fp->f_kqueue; 855 nerrors = 0; 856 ichange = 0; 857 858 /* traverse list of events to register */ 859 while (nchanges > 0) { 860 n = MIN(nchanges, __arraycount(kevbuf)); 861 error = (*keops->keo_fetch_changes)(keops->keo_private, 862 changelist, kevbuf, ichange, n); 863 if (error) 864 goto done; 865 for (i = 0; i < n; i++) { 866 kevp = &kevbuf[i]; 867 kevp->flags &= ~EV_SYSFLAGS; 868 /* register each knote */ 869 error = kqueue_register(kq, kevp); 870 if (error || (kevp->flags & EV_RECEIPT)) { 871 if (nevents != 0) { 872 kevp->flags = EV_ERROR; 873 kevp->data = error; 874 error = (*keops->keo_put_events) 875 (keops->keo_private, kevp, 876 eventlist, nerrors, 1); 877 if (error) 878 goto done; 879 nevents--; 880 nerrors++; 881 } else { 882 goto done; 883 } 884 } 885 } 886 nchanges -= n; /* update the results */ 887 ichange += n; 888 } 889 if (nerrors) { 890 *retval = nerrors; 891 error = 0; 892 goto done; 893 } 894 895 /* actually scan through the events */ 896 error = kqueue_scan(fp, nevents, eventlist, timeout, retval, keops, 897 kevbuf, __arraycount(kevbuf)); 898 done: 899 fd_putfile(fd); 900 return (error); 901 } 902 903 /* 904 * Register a given kevent kev onto the kqueue 905 */ 906 static int 907 kqueue_register(struct kqueue *kq, struct kevent *kev) 908 { 909 struct kfilter *kfilter; 910 filedesc_t *fdp; 911 file_t *fp; 912 fdfile_t *ff; 913 struct knote *kn, *newkn; 914 struct klist *list; 915 int error, fd, rv; 916 917 fdp = kq->kq_fdp; 918 fp = NULL; 919 kn = NULL; 920 error = 0; 921 fd = 0; 922 923 newkn = kmem_zalloc(sizeof(*newkn), KM_SLEEP); 924 925 rw_enter(&kqueue_filter_lock, RW_READER); 926 kfilter = kfilter_byfilter(kev->filter); 927 if (kfilter == NULL || kfilter->filtops == NULL) { 928 /* filter not found nor implemented */ 929 rw_exit(&kqueue_filter_lock); 930 kmem_free(newkn, sizeof(*newkn)); 931 return (EINVAL); 932 } 933 934 /* search if knote already exists */ 935 if (kfilter->filtops->f_isfd) { 936 /* monitoring a file descriptor */ 937 fd = kev->ident; 938 if ((fp = fd_getfile(fd)) == NULL) { 939 rw_exit(&kqueue_filter_lock); 940 kmem_free(newkn, sizeof(*newkn)); 941 return EBADF; 942 } 943 mutex_enter(&fdp->fd_lock); 944 ff = fdp->fd_dt->dt_ff[fd]; 945 if (fd <= fdp->fd_lastkqfile) { 946 SLIST_FOREACH(kn, &ff->ff_knlist, kn_link) { 947 if (kq == kn->kn_kq && 948 kev->filter == kn->kn_filter) 949 break; 950 } 951 } 952 } else { 953 /* 954 * not monitoring a file descriptor, so 955 * lookup knotes in internal hash table 956 */ 957 mutex_enter(&fdp->fd_lock); 958 if (fdp->fd_knhashmask != 0) { 959 list = &fdp->fd_knhash[ 960 KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)]; 961 SLIST_FOREACH(kn, list, kn_link) { 962 if (kev->ident == kn->kn_id && 963 kq == kn->kn_kq && 964 kev->filter == kn->kn_filter) 965 break; 966 } 967 } 968 } 969 970 /* 971 * kn now contains the matching knote, or NULL if no match 972 */ 973 if (kev->flags & EV_ADD) { 974 if (kn == NULL) { 975 /* create new knote */ 976 kn = newkn; 977 newkn = NULL; 978 kn->kn_obj = fp; 979 kn->kn_id = kev->ident; 980 kn->kn_kq = kq; 981 kn->kn_fop = kfilter->filtops; 982 kn->kn_kfilter = kfilter; 983 kn->kn_sfflags = kev->fflags; 984 kn->kn_sdata = kev->data; 985 kev->fflags = 0; 986 kev->data = 0; 987 kn->kn_kevent = *kev; 988 989 KASSERT(kn->kn_fop != NULL); 990 /* 991 * apply reference count to knote structure, and 992 * do not release it at the end of this routine. 993 */ 994 fp = NULL; 995 996 if (!kn->kn_fop->f_isfd) { 997 /* 998 * If knote is not on an fd, store on 999 * internal hash table. 1000 */ 1001 if (fdp->fd_knhashmask == 0) { 1002 /* XXXAD can block with fd_lock held */ 1003 fdp->fd_knhash = hashinit(KN_HASHSIZE, 1004 HASH_LIST, true, 1005 &fdp->fd_knhashmask); 1006 } 1007 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, 1008 fdp->fd_knhashmask)]; 1009 } else { 1010 /* Otherwise, knote is on an fd. */ 1011 list = (struct klist *) 1012 &fdp->fd_dt->dt_ff[kn->kn_id]->ff_knlist; 1013 if ((int)kn->kn_id > fdp->fd_lastkqfile) 1014 fdp->fd_lastkqfile = kn->kn_id; 1015 } 1016 SLIST_INSERT_HEAD(list, kn, kn_link); 1017 1018 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1019 error = (*kfilter->filtops->f_attach)(kn); 1020 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1021 if (error != 0) { 1022 #ifdef DIAGNOSTIC 1023 printf("%s: event not supported for file type" 1024 " %d\n", __func__, fp ? fp->f_type : -1); 1025 #endif 1026 /* knote_detach() drops fdp->fd_lock */ 1027 knote_detach(kn, fdp, false); 1028 goto done; 1029 } 1030 atomic_inc_uint(&kfilter->refcnt); 1031 } else { 1032 /* 1033 * The user may change some filter values after the 1034 * initial EV_ADD, but doing so will not reset any 1035 * filter which have already been triggered. 1036 */ 1037 kn->kn_sfflags = kev->fflags; 1038 kn->kn_sdata = kev->data; 1039 kn->kn_kevent.udata = kev->udata; 1040 } 1041 /* 1042 * We can get here if we are trying to attach 1043 * an event to a file descriptor that does not 1044 * support events, and the attach routine is 1045 * broken and does not return an error. 1046 */ 1047 KASSERT(kn->kn_fop != NULL); 1048 KASSERT(kn->kn_fop->f_event != NULL); 1049 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1050 rv = (*kn->kn_fop->f_event)(kn, 0); 1051 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1052 if (rv) 1053 knote_activate(kn); 1054 } else { 1055 if (kn == NULL) { 1056 error = ENOENT; 1057 mutex_exit(&fdp->fd_lock); 1058 goto done; 1059 } 1060 if (kev->flags & EV_DELETE) { 1061 /* knote_detach() drops fdp->fd_lock */ 1062 knote_detach(kn, fdp, true); 1063 goto done; 1064 } 1065 } 1066 1067 /* disable knote */ 1068 if ((kev->flags & EV_DISABLE)) { 1069 mutex_spin_enter(&kq->kq_lock); 1070 if ((kn->kn_status & KN_DISABLED) == 0) 1071 kn->kn_status |= KN_DISABLED; 1072 mutex_spin_exit(&kq->kq_lock); 1073 } 1074 1075 /* enable knote */ 1076 if ((kev->flags & EV_ENABLE)) { 1077 knote_enqueue(kn); 1078 } 1079 mutex_exit(&fdp->fd_lock); 1080 done: 1081 rw_exit(&kqueue_filter_lock); 1082 if (newkn != NULL) 1083 kmem_free(newkn, sizeof(*newkn)); 1084 if (fp != NULL) 1085 fd_putfile(fd); 1086 return (error); 1087 } 1088 1089 #if defined(DEBUG) 1090 static void 1091 kq_check(struct kqueue *kq) 1092 { 1093 const struct knote *kn; 1094 int count; 1095 int nmarker; 1096 1097 KASSERT(mutex_owned(&kq->kq_lock)); 1098 KASSERT(kq->kq_count >= 0); 1099 1100 count = 0; 1101 nmarker = 0; 1102 TAILQ_FOREACH(kn, &kq->kq_head, kn_tqe) { 1103 if ((kn->kn_status & (KN_MARKER | KN_QUEUED)) == 0) { 1104 panic("%s: kq=%p kn=%p inconsist 1", __func__, kq, kn); 1105 } 1106 if ((kn->kn_status & KN_MARKER) == 0) { 1107 if (kn->kn_kq != kq) { 1108 panic("%s: kq=%p kn=%p inconsist 2", 1109 __func__, kq, kn); 1110 } 1111 if ((kn->kn_status & KN_ACTIVE) == 0) { 1112 panic("%s: kq=%p kn=%p: not active", 1113 __func__, kq, kn); 1114 } 1115 count++; 1116 if (count > kq->kq_count) { 1117 goto bad; 1118 } 1119 } else { 1120 nmarker++; 1121 #if 0 1122 if (nmarker > 10000) { 1123 panic("%s: kq=%p too many markers: %d != %d, " 1124 "nmarker=%d", 1125 __func__, kq, kq->kq_count, count, nmarker); 1126 } 1127 #endif 1128 } 1129 } 1130 if (kq->kq_count != count) { 1131 bad: 1132 panic("%s: kq=%p inconsist 3: %d != %d, nmarker=%d", 1133 __func__, kq, kq->kq_count, count, nmarker); 1134 } 1135 } 1136 #else /* defined(DEBUG) */ 1137 #define kq_check(a) /* nothing */ 1138 #endif /* defined(DEBUG) */ 1139 1140 /* 1141 * Scan through the list of events on fp (for a maximum of maxevents), 1142 * returning the results in to ulistp. Timeout is determined by tsp; if 1143 * NULL, wait indefinitely, if 0 valued, perform a poll, otherwise wait 1144 * as appropriate. 1145 */ 1146 static int 1147 kqueue_scan(file_t *fp, size_t maxevents, struct kevent *ulistp, 1148 const struct timespec *tsp, register_t *retval, 1149 const struct kevent_ops *keops, struct kevent *kevbuf, 1150 size_t kevcnt) 1151 { 1152 struct kqueue *kq; 1153 struct kevent *kevp; 1154 struct timespec ats, sleepts; 1155 struct knote *kn, *marker, morker; 1156 size_t count, nkev, nevents; 1157 int timeout, error, rv; 1158 filedesc_t *fdp; 1159 1160 fdp = curlwp->l_fd; 1161 kq = fp->f_kqueue; 1162 count = maxevents; 1163 nkev = nevents = error = 0; 1164 if (count == 0) { 1165 *retval = 0; 1166 return 0; 1167 } 1168 1169 if (tsp) { /* timeout supplied */ 1170 ats = *tsp; 1171 if (inittimeleft(&ats, &sleepts) == -1) { 1172 *retval = maxevents; 1173 return EINVAL; 1174 } 1175 timeout = tstohz(&ats); 1176 if (timeout <= 0) 1177 timeout = -1; /* do poll */ 1178 } else { 1179 /* no timeout, wait forever */ 1180 timeout = 0; 1181 } 1182 1183 memset(&morker, 0, sizeof(morker)); 1184 marker = &morker; 1185 marker->kn_status = KN_MARKER; 1186 mutex_spin_enter(&kq->kq_lock); 1187 retry: 1188 kevp = kevbuf; 1189 if (kq->kq_count == 0) { 1190 if (timeout >= 0) { 1191 error = cv_timedwait_sig(&kq->kq_cv, 1192 &kq->kq_lock, timeout); 1193 if (error == 0) { 1194 if (tsp == NULL || (timeout = 1195 gettimeleft(&ats, &sleepts)) > 0) 1196 goto retry; 1197 } else { 1198 /* don't restart after signals... */ 1199 if (error == ERESTART) 1200 error = EINTR; 1201 if (error == EWOULDBLOCK) 1202 error = 0; 1203 } 1204 } 1205 } else { 1206 /* mark end of knote list */ 1207 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); 1208 1209 while (count != 0) { 1210 kn = TAILQ_FIRST(&kq->kq_head); /* get next knote */ 1211 while ((kn->kn_status & KN_MARKER) != 0) { 1212 if (kn == marker) { 1213 /* it's our marker, stop */ 1214 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1215 if (count < maxevents || (tsp != NULL && 1216 (timeout = gettimeleft(&ats, 1217 &sleepts)) <= 0)) 1218 goto done; 1219 goto retry; 1220 } 1221 /* someone else's marker. */ 1222 kn = TAILQ_NEXT(kn, kn_tqe); 1223 } 1224 kq_check(kq); 1225 kq->kq_count--; 1226 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1227 kn->kn_status &= ~KN_QUEUED; 1228 kn->kn_status |= KN_BUSY; 1229 kq_check(kq); 1230 if (kn->kn_status & KN_DISABLED) { 1231 kn->kn_status &= ~KN_BUSY; 1232 /* don't want disabled events */ 1233 continue; 1234 } 1235 if ((kn->kn_flags & EV_ONESHOT) == 0) { 1236 mutex_spin_exit(&kq->kq_lock); 1237 KASSERT(kn->kn_fop != NULL); 1238 KASSERT(kn->kn_fop->f_event != NULL); 1239 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1240 rv = (*kn->kn_fop->f_event)(kn, 0); 1241 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1242 mutex_spin_enter(&kq->kq_lock); 1243 /* Re-poll if note was re-enqueued. */ 1244 if ((kn->kn_status & KN_QUEUED) != 0) { 1245 kn->kn_status &= ~KN_BUSY; 1246 continue; 1247 } 1248 if (rv == 0) { 1249 /* 1250 * non-ONESHOT event that hasn't 1251 * triggered again, so de-queue. 1252 */ 1253 kn->kn_status &= ~(KN_ACTIVE|KN_BUSY); 1254 continue; 1255 } 1256 } 1257 /* XXXAD should be got from f_event if !oneshot. */ 1258 *kevp++ = kn->kn_kevent; 1259 nkev++; 1260 if (kn->kn_flags & EV_ONESHOT) { 1261 /* delete ONESHOT events after retrieval */ 1262 mutex_spin_exit(&kq->kq_lock); 1263 mutex_enter(&fdp->fd_lock); 1264 kn->kn_status &= ~KN_BUSY; 1265 knote_detach(kn, fdp, true); 1266 mutex_spin_enter(&kq->kq_lock); 1267 } else if (kn->kn_flags & EV_CLEAR) { 1268 /* clear state after retrieval */ 1269 kn->kn_data = 0; 1270 kn->kn_fflags = 0; 1271 kn->kn_status &= ~(KN_QUEUED|KN_ACTIVE|KN_BUSY); 1272 } else if (kn->kn_flags & EV_DISPATCH) { 1273 kn->kn_status |= KN_DISABLED; 1274 kn->kn_status &= ~(KN_QUEUED|KN_ACTIVE|KN_BUSY); 1275 } else { 1276 /* add event back on list */ 1277 kq_check(kq); 1278 kn->kn_status |= KN_QUEUED; 1279 kn->kn_status &= ~KN_BUSY; 1280 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1281 kq->kq_count++; 1282 kq_check(kq); 1283 } 1284 if (nkev == kevcnt) { 1285 /* do copyouts in kevcnt chunks */ 1286 mutex_spin_exit(&kq->kq_lock); 1287 error = (*keops->keo_put_events) 1288 (keops->keo_private, 1289 kevbuf, ulistp, nevents, nkev); 1290 mutex_spin_enter(&kq->kq_lock); 1291 nevents += nkev; 1292 nkev = 0; 1293 kevp = kevbuf; 1294 } 1295 count--; 1296 if (error != 0 || count == 0) { 1297 /* remove marker */ 1298 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); 1299 break; 1300 } 1301 } 1302 } 1303 done: 1304 mutex_spin_exit(&kq->kq_lock); 1305 if (nkev != 0) { 1306 /* copyout remaining events */ 1307 error = (*keops->keo_put_events)(keops->keo_private, 1308 kevbuf, ulistp, nevents, nkev); 1309 } 1310 *retval = maxevents - count; 1311 1312 return error; 1313 } 1314 1315 /* 1316 * fileops ioctl method for a kqueue descriptor. 1317 * 1318 * Two ioctls are currently supported. They both use struct kfilter_mapping: 1319 * KFILTER_BYNAME find name for filter, and return result in 1320 * name, which is of size len. 1321 * KFILTER_BYFILTER find filter for name. len is ignored. 1322 */ 1323 /*ARGSUSED*/ 1324 static int 1325 kqueue_ioctl(file_t *fp, u_long com, void *data) 1326 { 1327 struct kfilter_mapping *km; 1328 const struct kfilter *kfilter; 1329 char *name; 1330 int error; 1331 1332 km = data; 1333 error = 0; 1334 name = kmem_alloc(KFILTER_MAXNAME, KM_SLEEP); 1335 1336 switch (com) { 1337 case KFILTER_BYFILTER: /* convert filter -> name */ 1338 rw_enter(&kqueue_filter_lock, RW_READER); 1339 kfilter = kfilter_byfilter(km->filter); 1340 if (kfilter != NULL) { 1341 strlcpy(name, kfilter->name, KFILTER_MAXNAME); 1342 rw_exit(&kqueue_filter_lock); 1343 error = copyoutstr(name, km->name, km->len, NULL); 1344 } else { 1345 rw_exit(&kqueue_filter_lock); 1346 error = ENOENT; 1347 } 1348 break; 1349 1350 case KFILTER_BYNAME: /* convert name -> filter */ 1351 error = copyinstr(km->name, name, KFILTER_MAXNAME, NULL); 1352 if (error) { 1353 break; 1354 } 1355 rw_enter(&kqueue_filter_lock, RW_READER); 1356 kfilter = kfilter_byname(name); 1357 if (kfilter != NULL) 1358 km->filter = kfilter->filter; 1359 else 1360 error = ENOENT; 1361 rw_exit(&kqueue_filter_lock); 1362 break; 1363 1364 default: 1365 error = ENOTTY; 1366 break; 1367 1368 } 1369 kmem_free(name, KFILTER_MAXNAME); 1370 return (error); 1371 } 1372 1373 /* 1374 * fileops fcntl method for a kqueue descriptor. 1375 */ 1376 static int 1377 kqueue_fcntl(file_t *fp, u_int com, void *data) 1378 { 1379 1380 return (ENOTTY); 1381 } 1382 1383 /* 1384 * fileops poll method for a kqueue descriptor. 1385 * Determine if kqueue has events pending. 1386 */ 1387 static int 1388 kqueue_poll(file_t *fp, int events) 1389 { 1390 struct kqueue *kq; 1391 int revents; 1392 1393 kq = fp->f_kqueue; 1394 1395 revents = 0; 1396 if (events & (POLLIN | POLLRDNORM)) { 1397 mutex_spin_enter(&kq->kq_lock); 1398 if (kq->kq_count != 0) { 1399 revents |= events & (POLLIN | POLLRDNORM); 1400 } else { 1401 selrecord(curlwp, &kq->kq_sel); 1402 } 1403 kq_check(kq); 1404 mutex_spin_exit(&kq->kq_lock); 1405 } 1406 1407 return revents; 1408 } 1409 1410 /* 1411 * fileops stat method for a kqueue descriptor. 1412 * Returns dummy info, with st_size being number of events pending. 1413 */ 1414 static int 1415 kqueue_stat(file_t *fp, struct stat *st) 1416 { 1417 struct kqueue *kq; 1418 1419 kq = fp->f_kqueue; 1420 1421 memset(st, 0, sizeof(*st)); 1422 st->st_size = kq->kq_count; 1423 st->st_blksize = sizeof(struct kevent); 1424 st->st_mode = S_IFIFO; 1425 1426 return 0; 1427 } 1428 1429 static void 1430 kqueue_doclose(struct kqueue *kq, struct klist *list, int fd) 1431 { 1432 struct knote *kn; 1433 filedesc_t *fdp; 1434 1435 fdp = kq->kq_fdp; 1436 1437 KASSERT(mutex_owned(&fdp->fd_lock)); 1438 1439 for (kn = SLIST_FIRST(list); kn != NULL;) { 1440 if (kq != kn->kn_kq) { 1441 kn = SLIST_NEXT(kn, kn_link); 1442 continue; 1443 } 1444 knote_detach(kn, fdp, true); 1445 mutex_enter(&fdp->fd_lock); 1446 kn = SLIST_FIRST(list); 1447 } 1448 } 1449 1450 1451 /* 1452 * fileops close method for a kqueue descriptor. 1453 */ 1454 static int 1455 kqueue_close(file_t *fp) 1456 { 1457 struct kqueue *kq; 1458 filedesc_t *fdp; 1459 fdfile_t *ff; 1460 int i; 1461 1462 kq = fp->f_kqueue; 1463 fp->f_kqueue = NULL; 1464 fp->f_type = 0; 1465 fdp = curlwp->l_fd; 1466 1467 mutex_enter(&fdp->fd_lock); 1468 for (i = 0; i <= fdp->fd_lastkqfile; i++) { 1469 if ((ff = fdp->fd_dt->dt_ff[i]) == NULL) 1470 continue; 1471 kqueue_doclose(kq, (struct klist *)&ff->ff_knlist, i); 1472 } 1473 if (fdp->fd_knhashmask != 0) { 1474 for (i = 0; i < fdp->fd_knhashmask + 1; i++) { 1475 kqueue_doclose(kq, &fdp->fd_knhash[i], -1); 1476 } 1477 } 1478 mutex_exit(&fdp->fd_lock); 1479 1480 KASSERT(kq->kq_count == 0); 1481 mutex_destroy(&kq->kq_lock); 1482 cv_destroy(&kq->kq_cv); 1483 seldestroy(&kq->kq_sel); 1484 kmem_free(kq, sizeof(*kq)); 1485 1486 return (0); 1487 } 1488 1489 /* 1490 * struct fileops kqfilter method for a kqueue descriptor. 1491 * Event triggered when monitored kqueue changes. 1492 */ 1493 static int 1494 kqueue_kqfilter(file_t *fp, struct knote *kn) 1495 { 1496 struct kqueue *kq; 1497 1498 kq = ((file_t *)kn->kn_obj)->f_kqueue; 1499 1500 KASSERT(fp == kn->kn_obj); 1501 1502 if (kn->kn_filter != EVFILT_READ) 1503 return 1; 1504 1505 kn->kn_fop = &kqread_filtops; 1506 mutex_enter(&kq->kq_lock); 1507 SLIST_INSERT_HEAD(&kq->kq_sel.sel_klist, kn, kn_selnext); 1508 mutex_exit(&kq->kq_lock); 1509 1510 return 0; 1511 } 1512 1513 1514 /* 1515 * Walk down a list of knotes, activating them if their event has 1516 * triggered. The caller's object lock (e.g. device driver lock) 1517 * must be held. 1518 */ 1519 void 1520 knote(struct klist *list, long hint) 1521 { 1522 struct knote *kn, *tmpkn; 1523 1524 SLIST_FOREACH_SAFE(kn, list, kn_selnext, tmpkn) { 1525 KASSERT(kn->kn_fop != NULL); 1526 KASSERT(kn->kn_fop->f_event != NULL); 1527 if ((*kn->kn_fop->f_event)(kn, hint)) 1528 knote_activate(kn); 1529 } 1530 } 1531 1532 /* 1533 * Remove all knotes referencing a specified fd 1534 */ 1535 void 1536 knote_fdclose(int fd) 1537 { 1538 struct klist *list; 1539 struct knote *kn; 1540 filedesc_t *fdp; 1541 1542 fdp = curlwp->l_fd; 1543 list = (struct klist *)&fdp->fd_dt->dt_ff[fd]->ff_knlist; 1544 mutex_enter(&fdp->fd_lock); 1545 while ((kn = SLIST_FIRST(list)) != NULL) { 1546 knote_detach(kn, fdp, true); 1547 mutex_enter(&fdp->fd_lock); 1548 } 1549 mutex_exit(&fdp->fd_lock); 1550 } 1551 1552 /* 1553 * Drop knote. Called with fdp->fd_lock held, and will drop before 1554 * returning. 1555 */ 1556 static void 1557 knote_detach(struct knote *kn, filedesc_t *fdp, bool dofop) 1558 { 1559 struct klist *list; 1560 struct kqueue *kq; 1561 1562 kq = kn->kn_kq; 1563 1564 KASSERT((kn->kn_status & KN_MARKER) == 0); 1565 KASSERT(mutex_owned(&fdp->fd_lock)); 1566 1567 KASSERT(kn->kn_fop != NULL); 1568 /* Remove from monitored object. */ 1569 if (dofop) { 1570 KASSERT(kn->kn_fop->f_detach != NULL); 1571 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1572 (*kn->kn_fop->f_detach)(kn); 1573 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1574 } 1575 1576 /* Remove from descriptor table. */ 1577 if (kn->kn_fop->f_isfd) 1578 list = (struct klist *)&fdp->fd_dt->dt_ff[kn->kn_id]->ff_knlist; 1579 else 1580 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; 1581 1582 SLIST_REMOVE(list, kn, knote, kn_link); 1583 1584 /* Remove from kqueue. */ 1585 again: 1586 mutex_spin_enter(&kq->kq_lock); 1587 if ((kn->kn_status & KN_QUEUED) != 0) { 1588 kq_check(kq); 1589 kq->kq_count--; 1590 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1591 kn->kn_status &= ~KN_QUEUED; 1592 kq_check(kq); 1593 } else if (kn->kn_status & KN_BUSY) { 1594 mutex_spin_exit(&kq->kq_lock); 1595 goto again; 1596 } 1597 mutex_spin_exit(&kq->kq_lock); 1598 1599 mutex_exit(&fdp->fd_lock); 1600 if (kn->kn_fop->f_isfd) 1601 fd_putfile(kn->kn_id); 1602 atomic_dec_uint(&kn->kn_kfilter->refcnt); 1603 kmem_free(kn, sizeof(*kn)); 1604 } 1605 1606 /* 1607 * Queue new event for knote. 1608 */ 1609 static void 1610 knote_enqueue(struct knote *kn) 1611 { 1612 struct kqueue *kq; 1613 1614 KASSERT((kn->kn_status & KN_MARKER) == 0); 1615 1616 kq = kn->kn_kq; 1617 1618 mutex_spin_enter(&kq->kq_lock); 1619 if ((kn->kn_status & KN_DISABLED) != 0) { 1620 kn->kn_status &= ~KN_DISABLED; 1621 } 1622 if ((kn->kn_status & (KN_ACTIVE | KN_QUEUED)) == KN_ACTIVE) { 1623 kq_check(kq); 1624 kn->kn_status |= KN_QUEUED; 1625 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1626 kq->kq_count++; 1627 kq_check(kq); 1628 cv_broadcast(&kq->kq_cv); 1629 selnotify(&kq->kq_sel, 0, NOTE_SUBMIT); 1630 } 1631 mutex_spin_exit(&kq->kq_lock); 1632 } 1633 /* 1634 * Queue new event for knote. 1635 */ 1636 static void 1637 knote_activate(struct knote *kn) 1638 { 1639 struct kqueue *kq; 1640 1641 KASSERT((kn->kn_status & KN_MARKER) == 0); 1642 1643 kq = kn->kn_kq; 1644 1645 mutex_spin_enter(&kq->kq_lock); 1646 kn->kn_status |= KN_ACTIVE; 1647 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) { 1648 kq_check(kq); 1649 kn->kn_status |= KN_QUEUED; 1650 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1651 kq->kq_count++; 1652 kq_check(kq); 1653 cv_broadcast(&kq->kq_cv); 1654 selnotify(&kq->kq_sel, 0, NOTE_SUBMIT); 1655 } 1656 mutex_spin_exit(&kq->kq_lock); 1657 } 1658