1 /* $NetBSD: kern_event.c,v 1.82 2014/09/05 09:20:59 matt Exp $ */ 2 3 /*- 4 * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * SUCH DAMAGE. 56 * 57 * FreeBSD: src/sys/kern/kern_event.c,v 1.27 2001/07/05 17:10:44 rwatson Exp 58 */ 59 60 #include <sys/cdefs.h> 61 __KERNEL_RCSID(0, "$NetBSD: kern_event.c,v 1.82 2014/09/05 09:20:59 matt Exp $"); 62 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/kernel.h> 66 #include <sys/proc.h> 67 #include <sys/file.h> 68 #include <sys/select.h> 69 #include <sys/queue.h> 70 #include <sys/event.h> 71 #include <sys/eventvar.h> 72 #include <sys/poll.h> 73 #include <sys/kmem.h> 74 #include <sys/stat.h> 75 #include <sys/filedesc.h> 76 #include <sys/syscallargs.h> 77 #include <sys/kauth.h> 78 #include <sys/conf.h> 79 #include <sys/atomic.h> 80 81 static int kqueue_scan(file_t *, size_t, struct kevent *, 82 const struct timespec *, register_t *, 83 const struct kevent_ops *, struct kevent *, 84 size_t); 85 static int kqueue_ioctl(file_t *, u_long, void *); 86 static int kqueue_fcntl(file_t *, u_int, void *); 87 static int kqueue_poll(file_t *, int); 88 static int kqueue_kqfilter(file_t *, struct knote *); 89 static int kqueue_stat(file_t *, struct stat *); 90 static int kqueue_close(file_t *); 91 static int kqueue_register(struct kqueue *, struct kevent *); 92 static void kqueue_doclose(struct kqueue *, struct klist *, int); 93 94 static void knote_detach(struct knote *, filedesc_t *fdp, bool); 95 static void knote_enqueue(struct knote *); 96 static void knote_activate(struct knote *); 97 98 static void filt_kqdetach(struct knote *); 99 static int filt_kqueue(struct knote *, long hint); 100 static int filt_procattach(struct knote *); 101 static void filt_procdetach(struct knote *); 102 static int filt_proc(struct knote *, long hint); 103 static int filt_fileattach(struct knote *); 104 static void filt_timerexpire(void *x); 105 static int filt_timerattach(struct knote *); 106 static void filt_timerdetach(struct knote *); 107 static int filt_timer(struct knote *, long hint); 108 109 static const struct fileops kqueueops = { 110 .fo_read = (void *)enxio, 111 .fo_write = (void *)enxio, 112 .fo_ioctl = kqueue_ioctl, 113 .fo_fcntl = kqueue_fcntl, 114 .fo_poll = kqueue_poll, 115 .fo_stat = kqueue_stat, 116 .fo_close = kqueue_close, 117 .fo_kqfilter = kqueue_kqfilter, 118 .fo_restart = fnullop_restart, 119 }; 120 121 static const struct filterops kqread_filtops = 122 { 1, NULL, filt_kqdetach, filt_kqueue }; 123 static const struct filterops proc_filtops = 124 { 0, filt_procattach, filt_procdetach, filt_proc }; 125 static const struct filterops file_filtops = 126 { 1, filt_fileattach, NULL, NULL }; 127 static const struct filterops timer_filtops = 128 { 0, filt_timerattach, filt_timerdetach, filt_timer }; 129 130 static u_int kq_ncallouts = 0; 131 static int kq_calloutmax = (4 * 1024); 132 133 #define KN_HASHSIZE 64 /* XXX should be tunable */ 134 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 135 136 extern const struct filterops sig_filtops; 137 138 /* 139 * Table for for all system-defined filters. 140 * These should be listed in the numeric order of the EVFILT_* defines. 141 * If filtops is NULL, the filter isn't implemented in NetBSD. 142 * End of list is when name is NULL. 143 * 144 * Note that 'refcnt' is meaningless for built-in filters. 145 */ 146 struct kfilter { 147 const char *name; /* name of filter */ 148 uint32_t filter; /* id of filter */ 149 unsigned refcnt; /* reference count */ 150 const struct filterops *filtops;/* operations for filter */ 151 size_t namelen; /* length of name string */ 152 }; 153 154 /* System defined filters */ 155 static struct kfilter sys_kfilters[] = { 156 { "EVFILT_READ", EVFILT_READ, 0, &file_filtops, 0 }, 157 { "EVFILT_WRITE", EVFILT_WRITE, 0, &file_filtops, 0, }, 158 { "EVFILT_AIO", EVFILT_AIO, 0, NULL, 0 }, 159 { "EVFILT_VNODE", EVFILT_VNODE, 0, &file_filtops, 0 }, 160 { "EVFILT_PROC", EVFILT_PROC, 0, &proc_filtops, 0 }, 161 { "EVFILT_SIGNAL", EVFILT_SIGNAL, 0, &sig_filtops, 0 }, 162 { "EVFILT_TIMER", EVFILT_TIMER, 0, &timer_filtops, 0 }, 163 { NULL, 0, 0, NULL, 0 }, 164 }; 165 166 /* User defined kfilters */ 167 static struct kfilter *user_kfilters; /* array */ 168 static int user_kfilterc; /* current offset */ 169 static int user_kfiltermaxc; /* max size so far */ 170 static size_t user_kfiltersz; /* size of allocated memory */ 171 172 /* Locks */ 173 static krwlock_t kqueue_filter_lock; /* lock on filter lists */ 174 static kmutex_t kqueue_misc_lock; /* miscellaneous */ 175 176 static kauth_listener_t kqueue_listener; 177 178 static int 179 kqueue_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, 180 void *arg0, void *arg1, void *arg2, void *arg3) 181 { 182 struct proc *p; 183 int result; 184 185 result = KAUTH_RESULT_DEFER; 186 p = arg0; 187 188 if (action != KAUTH_PROCESS_KEVENT_FILTER) 189 return result; 190 191 if ((kauth_cred_getuid(p->p_cred) != kauth_cred_getuid(cred) || 192 ISSET(p->p_flag, PK_SUGID))) 193 return result; 194 195 result = KAUTH_RESULT_ALLOW; 196 197 return result; 198 } 199 200 /* 201 * Initialize the kqueue subsystem. 202 */ 203 void 204 kqueue_init(void) 205 { 206 207 rw_init(&kqueue_filter_lock); 208 mutex_init(&kqueue_misc_lock, MUTEX_DEFAULT, IPL_NONE); 209 210 kqueue_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS, 211 kqueue_listener_cb, NULL); 212 } 213 214 /* 215 * Find kfilter entry by name, or NULL if not found. 216 */ 217 static struct kfilter * 218 kfilter_byname_sys(const char *name) 219 { 220 int i; 221 222 KASSERT(rw_lock_held(&kqueue_filter_lock)); 223 224 for (i = 0; sys_kfilters[i].name != NULL; i++) { 225 if (strcmp(name, sys_kfilters[i].name) == 0) 226 return &sys_kfilters[i]; 227 } 228 return NULL; 229 } 230 231 static struct kfilter * 232 kfilter_byname_user(const char *name) 233 { 234 int i; 235 236 KASSERT(rw_lock_held(&kqueue_filter_lock)); 237 238 /* user filter slots have a NULL name if previously deregistered */ 239 for (i = 0; i < user_kfilterc ; i++) { 240 if (user_kfilters[i].name != NULL && 241 strcmp(name, user_kfilters[i].name) == 0) 242 return &user_kfilters[i]; 243 } 244 return NULL; 245 } 246 247 static struct kfilter * 248 kfilter_byname(const char *name) 249 { 250 struct kfilter *kfilter; 251 252 KASSERT(rw_lock_held(&kqueue_filter_lock)); 253 254 if ((kfilter = kfilter_byname_sys(name)) != NULL) 255 return kfilter; 256 257 return kfilter_byname_user(name); 258 } 259 260 /* 261 * Find kfilter entry by filter id, or NULL if not found. 262 * Assumes entries are indexed in filter id order, for speed. 263 */ 264 static struct kfilter * 265 kfilter_byfilter(uint32_t filter) 266 { 267 struct kfilter *kfilter; 268 269 KASSERT(rw_lock_held(&kqueue_filter_lock)); 270 271 if (filter < EVFILT_SYSCOUNT) /* it's a system filter */ 272 kfilter = &sys_kfilters[filter]; 273 else if (user_kfilters != NULL && 274 filter < EVFILT_SYSCOUNT + user_kfilterc) 275 /* it's a user filter */ 276 kfilter = &user_kfilters[filter - EVFILT_SYSCOUNT]; 277 else 278 return (NULL); /* out of range */ 279 KASSERT(kfilter->filter == filter); /* sanity check! */ 280 return (kfilter); 281 } 282 283 /* 284 * Register a new kfilter. Stores the entry in user_kfilters. 285 * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise. 286 * If retfilter != NULL, the new filterid is returned in it. 287 */ 288 int 289 kfilter_register(const char *name, const struct filterops *filtops, 290 int *retfilter) 291 { 292 struct kfilter *kfilter; 293 size_t len; 294 int i; 295 296 if (name == NULL || name[0] == '\0' || filtops == NULL) 297 return (EINVAL); /* invalid args */ 298 299 rw_enter(&kqueue_filter_lock, RW_WRITER); 300 if (kfilter_byname(name) != NULL) { 301 rw_exit(&kqueue_filter_lock); 302 return (EEXIST); /* already exists */ 303 } 304 if (user_kfilterc > 0xffffffff - EVFILT_SYSCOUNT) { 305 rw_exit(&kqueue_filter_lock); 306 return (EINVAL); /* too many */ 307 } 308 309 for (i = 0; i < user_kfilterc; i++) { 310 kfilter = &user_kfilters[i]; 311 if (kfilter->name == NULL) { 312 /* Previously deregistered slot. Reuse. */ 313 goto reuse; 314 } 315 } 316 317 /* check if need to grow user_kfilters */ 318 if (user_kfilterc + 1 > user_kfiltermaxc) { 319 /* Grow in KFILTER_EXTENT chunks. */ 320 user_kfiltermaxc += KFILTER_EXTENT; 321 len = user_kfiltermaxc * sizeof(*kfilter); 322 kfilter = kmem_alloc(len, KM_SLEEP); 323 memset((char *)kfilter + user_kfiltersz, 0, len - user_kfiltersz); 324 if (user_kfilters != NULL) { 325 memcpy(kfilter, user_kfilters, user_kfiltersz); 326 kmem_free(user_kfilters, user_kfiltersz); 327 } 328 user_kfiltersz = len; 329 user_kfilters = kfilter; 330 } 331 /* Adding new slot */ 332 kfilter = &user_kfilters[user_kfilterc++]; 333 reuse: 334 kfilter->namelen = strlen(name) + 1; 335 kfilter->name = kmem_alloc(kfilter->namelen, KM_SLEEP); 336 memcpy(__UNCONST(kfilter->name), name, kfilter->namelen); 337 338 kfilter->filter = (kfilter - user_kfilters) + EVFILT_SYSCOUNT; 339 340 kfilter->filtops = kmem_alloc(sizeof(*filtops), KM_SLEEP); 341 memcpy(__UNCONST(kfilter->filtops), filtops, sizeof(*filtops)); 342 343 if (retfilter != NULL) 344 *retfilter = kfilter->filter; 345 rw_exit(&kqueue_filter_lock); 346 347 return (0); 348 } 349 350 /* 351 * Unregister a kfilter previously registered with kfilter_register. 352 * This retains the filter id, but clears the name and frees filtops (filter 353 * operations), so that the number isn't reused during a boot. 354 * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise. 355 */ 356 int 357 kfilter_unregister(const char *name) 358 { 359 struct kfilter *kfilter; 360 361 if (name == NULL || name[0] == '\0') 362 return (EINVAL); /* invalid name */ 363 364 rw_enter(&kqueue_filter_lock, RW_WRITER); 365 if (kfilter_byname_sys(name) != NULL) { 366 rw_exit(&kqueue_filter_lock); 367 return (EINVAL); /* can't detach system filters */ 368 } 369 370 kfilter = kfilter_byname_user(name); 371 if (kfilter == NULL) { 372 rw_exit(&kqueue_filter_lock); 373 return (ENOENT); 374 } 375 if (kfilter->refcnt != 0) { 376 rw_exit(&kqueue_filter_lock); 377 return (EBUSY); 378 } 379 380 /* Cast away const (but we know it's safe. */ 381 kmem_free(__UNCONST(kfilter->name), kfilter->namelen); 382 kfilter->name = NULL; /* mark as `not implemented' */ 383 384 if (kfilter->filtops != NULL) { 385 /* Cast away const (but we know it's safe. */ 386 kmem_free(__UNCONST(kfilter->filtops), 387 sizeof(*kfilter->filtops)); 388 kfilter->filtops = NULL; /* mark as `not implemented' */ 389 } 390 rw_exit(&kqueue_filter_lock); 391 392 return (0); 393 } 394 395 396 /* 397 * Filter attach method for EVFILT_READ and EVFILT_WRITE on normal file 398 * descriptors. Calls fileops kqfilter method for given file descriptor. 399 */ 400 static int 401 filt_fileattach(struct knote *kn) 402 { 403 file_t *fp; 404 405 fp = kn->kn_obj; 406 407 return (*fp->f_ops->fo_kqfilter)(fp, kn); 408 } 409 410 /* 411 * Filter detach method for EVFILT_READ on kqueue descriptor. 412 */ 413 static void 414 filt_kqdetach(struct knote *kn) 415 { 416 struct kqueue *kq; 417 418 kq = ((file_t *)kn->kn_obj)->f_kqueue; 419 420 mutex_spin_enter(&kq->kq_lock); 421 SLIST_REMOVE(&kq->kq_sel.sel_klist, kn, knote, kn_selnext); 422 mutex_spin_exit(&kq->kq_lock); 423 } 424 425 /* 426 * Filter event method for EVFILT_READ on kqueue descriptor. 427 */ 428 /*ARGSUSED*/ 429 static int 430 filt_kqueue(struct knote *kn, long hint) 431 { 432 struct kqueue *kq; 433 int rv; 434 435 kq = ((file_t *)kn->kn_obj)->f_kqueue; 436 437 if (hint != NOTE_SUBMIT) 438 mutex_spin_enter(&kq->kq_lock); 439 kn->kn_data = kq->kq_count; 440 rv = (kn->kn_data > 0); 441 if (hint != NOTE_SUBMIT) 442 mutex_spin_exit(&kq->kq_lock); 443 444 return rv; 445 } 446 447 /* 448 * Filter attach method for EVFILT_PROC. 449 */ 450 static int 451 filt_procattach(struct knote *kn) 452 { 453 struct proc *p; 454 struct lwp *curl; 455 456 curl = curlwp; 457 458 mutex_enter(proc_lock); 459 if (kn->kn_flags & EV_FLAG1) { 460 /* 461 * NOTE_TRACK attaches to the child process too early 462 * for proc_find, so do a raw look up and check the state 463 * explicitly. 464 */ 465 p = proc_find_raw(kn->kn_id); 466 if (p != NULL && p->p_stat != SIDL) 467 p = NULL; 468 } else { 469 p = proc_find(kn->kn_id); 470 } 471 472 if (p == NULL) { 473 mutex_exit(proc_lock); 474 return ESRCH; 475 } 476 477 /* 478 * Fail if it's not owned by you, or the last exec gave us 479 * setuid/setgid privs (unless you're root). 480 */ 481 mutex_enter(p->p_lock); 482 mutex_exit(proc_lock); 483 if (kauth_authorize_process(curl->l_cred, KAUTH_PROCESS_KEVENT_FILTER, 484 p, NULL, NULL, NULL) != 0) { 485 mutex_exit(p->p_lock); 486 return EACCES; 487 } 488 489 kn->kn_obj = p; 490 kn->kn_flags |= EV_CLEAR; /* automatically set */ 491 492 /* 493 * internal flag indicating registration done by kernel 494 */ 495 if (kn->kn_flags & EV_FLAG1) { 496 kn->kn_data = kn->kn_sdata; /* ppid */ 497 kn->kn_fflags = NOTE_CHILD; 498 kn->kn_flags &= ~EV_FLAG1; 499 } 500 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext); 501 mutex_exit(p->p_lock); 502 503 return 0; 504 } 505 506 /* 507 * Filter detach method for EVFILT_PROC. 508 * 509 * The knote may be attached to a different process, which may exit, 510 * leaving nothing for the knote to be attached to. So when the process 511 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 512 * it will be deleted when read out. However, as part of the knote deletion, 513 * this routine is called, so a check is needed to avoid actually performing 514 * a detach, because the original process might not exist any more. 515 */ 516 static void 517 filt_procdetach(struct knote *kn) 518 { 519 struct proc *p; 520 521 if (kn->kn_status & KN_DETACHED) 522 return; 523 524 p = kn->kn_obj; 525 526 mutex_enter(p->p_lock); 527 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 528 mutex_exit(p->p_lock); 529 } 530 531 /* 532 * Filter event method for EVFILT_PROC. 533 */ 534 static int 535 filt_proc(struct knote *kn, long hint) 536 { 537 u_int event, fflag; 538 struct kevent kev; 539 struct kqueue *kq; 540 int error; 541 542 event = (u_int)hint & NOTE_PCTRLMASK; 543 kq = kn->kn_kq; 544 fflag = 0; 545 546 /* If the user is interested in this event, record it. */ 547 if (kn->kn_sfflags & event) 548 fflag |= event; 549 550 if (event == NOTE_EXIT) { 551 /* 552 * Process is gone, so flag the event as finished. 553 * 554 * Detach the knote from watched process and mark 555 * it as such. We can't leave this to kqueue_scan(), 556 * since the process might not exist by then. And we 557 * have to do this now, since psignal KNOTE() is called 558 * also for zombies and we might end up reading freed 559 * memory if the kevent would already be picked up 560 * and knote g/c'ed. 561 */ 562 filt_procdetach(kn); 563 564 mutex_spin_enter(&kq->kq_lock); 565 kn->kn_status |= KN_DETACHED; 566 /* Mark as ONESHOT, so that the knote it g/c'ed when read */ 567 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 568 kn->kn_fflags |= fflag; 569 mutex_spin_exit(&kq->kq_lock); 570 571 return 1; 572 } 573 574 mutex_spin_enter(&kq->kq_lock); 575 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { 576 /* 577 * Process forked, and user wants to track the new process, 578 * so attach a new knote to it, and immediately report an 579 * event with the parent's pid. Register knote with new 580 * process. 581 */ 582 kev.ident = hint & NOTE_PDATAMASK; /* pid */ 583 kev.filter = kn->kn_filter; 584 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 585 kev.fflags = kn->kn_sfflags; 586 kev.data = kn->kn_id; /* parent */ 587 kev.udata = kn->kn_kevent.udata; /* preserve udata */ 588 mutex_spin_exit(&kq->kq_lock); 589 error = kqueue_register(kq, &kev); 590 mutex_spin_enter(&kq->kq_lock); 591 if (error != 0) 592 kn->kn_fflags |= NOTE_TRACKERR; 593 } 594 kn->kn_fflags |= fflag; 595 fflag = kn->kn_fflags; 596 mutex_spin_exit(&kq->kq_lock); 597 598 return fflag != 0; 599 } 600 601 static void 602 filt_timerexpire(void *knx) 603 { 604 struct knote *kn = knx; 605 int tticks; 606 607 mutex_enter(&kqueue_misc_lock); 608 kn->kn_data++; 609 knote_activate(kn); 610 if ((kn->kn_flags & EV_ONESHOT) == 0) { 611 tticks = mstohz(kn->kn_sdata); 612 if (tticks <= 0) 613 tticks = 1; 614 callout_schedule((callout_t *)kn->kn_hook, tticks); 615 } 616 mutex_exit(&kqueue_misc_lock); 617 } 618 619 /* 620 * data contains amount of time to sleep, in milliseconds 621 */ 622 static int 623 filt_timerattach(struct knote *kn) 624 { 625 callout_t *calloutp; 626 struct kqueue *kq; 627 int tticks; 628 629 tticks = mstohz(kn->kn_sdata); 630 631 /* if the supplied value is under our resolution, use 1 tick */ 632 if (tticks == 0) { 633 if (kn->kn_sdata == 0) 634 return EINVAL; 635 tticks = 1; 636 } 637 638 if (atomic_inc_uint_nv(&kq_ncallouts) >= kq_calloutmax || 639 (calloutp = kmem_alloc(sizeof(*calloutp), KM_NOSLEEP)) == NULL) { 640 atomic_dec_uint(&kq_ncallouts); 641 return ENOMEM; 642 } 643 callout_init(calloutp, CALLOUT_MPSAFE); 644 645 kq = kn->kn_kq; 646 mutex_spin_enter(&kq->kq_lock); 647 kn->kn_flags |= EV_CLEAR; /* automatically set */ 648 kn->kn_hook = calloutp; 649 mutex_spin_exit(&kq->kq_lock); 650 651 callout_reset(calloutp, tticks, filt_timerexpire, kn); 652 653 return (0); 654 } 655 656 static void 657 filt_timerdetach(struct knote *kn) 658 { 659 callout_t *calloutp; 660 661 calloutp = (callout_t *)kn->kn_hook; 662 callout_halt(calloutp, NULL); 663 callout_destroy(calloutp); 664 kmem_free(calloutp, sizeof(*calloutp)); 665 atomic_dec_uint(&kq_ncallouts); 666 } 667 668 static int 669 filt_timer(struct knote *kn, long hint) 670 { 671 int rv; 672 673 mutex_enter(&kqueue_misc_lock); 674 rv = (kn->kn_data != 0); 675 mutex_exit(&kqueue_misc_lock); 676 677 return rv; 678 } 679 680 /* 681 * filt_seltrue: 682 * 683 * This filter "event" routine simulates seltrue(). 684 */ 685 int 686 filt_seltrue(struct knote *kn, long hint) 687 { 688 689 /* 690 * We don't know how much data can be read/written, 691 * but we know that it *can* be. This is about as 692 * good as select/poll does as well. 693 */ 694 kn->kn_data = 0; 695 return (1); 696 } 697 698 /* 699 * This provides full kqfilter entry for device switch tables, which 700 * has same effect as filter using filt_seltrue() as filter method. 701 */ 702 static void 703 filt_seltruedetach(struct knote *kn) 704 { 705 /* Nothing to do */ 706 } 707 708 const struct filterops seltrue_filtops = 709 { 1, NULL, filt_seltruedetach, filt_seltrue }; 710 711 int 712 seltrue_kqfilter(dev_t dev, struct knote *kn) 713 { 714 switch (kn->kn_filter) { 715 case EVFILT_READ: 716 case EVFILT_WRITE: 717 kn->kn_fop = &seltrue_filtops; 718 break; 719 default: 720 return (EINVAL); 721 } 722 723 /* Nothing more to do */ 724 return (0); 725 } 726 727 /* 728 * kqueue(2) system call. 729 */ 730 static int 731 kqueue1(struct lwp *l, int flags, register_t *retval) 732 { 733 struct kqueue *kq; 734 file_t *fp; 735 int fd, error; 736 737 if ((error = fd_allocfile(&fp, &fd)) != 0) 738 return error; 739 fp->f_flag = FREAD | FWRITE | (flags & (FNONBLOCK|FNOSIGPIPE)); 740 fp->f_type = DTYPE_KQUEUE; 741 fp->f_ops = &kqueueops; 742 kq = kmem_zalloc(sizeof(*kq), KM_SLEEP); 743 mutex_init(&kq->kq_lock, MUTEX_DEFAULT, IPL_SCHED); 744 cv_init(&kq->kq_cv, "kqueue"); 745 selinit(&kq->kq_sel); 746 TAILQ_INIT(&kq->kq_head); 747 fp->f_kqueue = kq; 748 *retval = fd; 749 kq->kq_fdp = curlwp->l_fd; 750 fd_set_exclose(l, fd, (flags & O_CLOEXEC) != 0); 751 fd_affix(curproc, fp, fd); 752 return error; 753 } 754 755 /* 756 * kqueue(2) system call. 757 */ 758 int 759 sys_kqueue(struct lwp *l, const void *v, register_t *retval) 760 { 761 return kqueue1(l, 0, retval); 762 } 763 764 int 765 sys_kqueue1(struct lwp *l, const struct sys_kqueue1_args *uap, 766 register_t *retval) 767 { 768 /* { 769 syscallarg(int) flags; 770 } */ 771 return kqueue1(l, SCARG(uap, flags), retval); 772 } 773 774 /* 775 * kevent(2) system call. 776 */ 777 int 778 kevent_fetch_changes(void *ctx, const struct kevent *changelist, 779 struct kevent *changes, size_t index, int n) 780 { 781 782 return copyin(changelist + index, changes, n * sizeof(*changes)); 783 } 784 785 int 786 kevent_put_events(void *ctx, struct kevent *events, 787 struct kevent *eventlist, size_t index, int n) 788 { 789 790 return copyout(events, eventlist + index, n * sizeof(*events)); 791 } 792 793 static const struct kevent_ops kevent_native_ops = { 794 .keo_private = NULL, 795 .keo_fetch_timeout = copyin, 796 .keo_fetch_changes = kevent_fetch_changes, 797 .keo_put_events = kevent_put_events, 798 }; 799 800 int 801 sys___kevent50(struct lwp *l, const struct sys___kevent50_args *uap, 802 register_t *retval) 803 { 804 /* { 805 syscallarg(int) fd; 806 syscallarg(const struct kevent *) changelist; 807 syscallarg(size_t) nchanges; 808 syscallarg(struct kevent *) eventlist; 809 syscallarg(size_t) nevents; 810 syscallarg(const struct timespec *) timeout; 811 } */ 812 813 return kevent1(retval, SCARG(uap, fd), SCARG(uap, changelist), 814 SCARG(uap, nchanges), SCARG(uap, eventlist), SCARG(uap, nevents), 815 SCARG(uap, timeout), &kevent_native_ops); 816 } 817 818 int 819 kevent1(register_t *retval, int fd, 820 const struct kevent *changelist, size_t nchanges, 821 struct kevent *eventlist, size_t nevents, 822 const struct timespec *timeout, 823 const struct kevent_ops *keops) 824 { 825 struct kevent *kevp; 826 struct kqueue *kq; 827 struct timespec ts; 828 size_t i, n, ichange; 829 int nerrors, error; 830 struct kevent kevbuf[KQ_NEVENTS]; /* approx 300 bytes on 64-bit */ 831 file_t *fp; 832 833 /* check that we're dealing with a kq */ 834 fp = fd_getfile(fd); 835 if (fp == NULL) 836 return (EBADF); 837 838 if (fp->f_type != DTYPE_KQUEUE) { 839 fd_putfile(fd); 840 return (EBADF); 841 } 842 843 if (timeout != NULL) { 844 error = (*keops->keo_fetch_timeout)(timeout, &ts, sizeof(ts)); 845 if (error) 846 goto done; 847 timeout = &ts; 848 } 849 850 kq = fp->f_kqueue; 851 nerrors = 0; 852 ichange = 0; 853 854 /* traverse list of events to register */ 855 while (nchanges > 0) { 856 n = MIN(nchanges, __arraycount(kevbuf)); 857 error = (*keops->keo_fetch_changes)(keops->keo_private, 858 changelist, kevbuf, ichange, n); 859 if (error) 860 goto done; 861 for (i = 0; i < n; i++) { 862 kevp = &kevbuf[i]; 863 kevp->flags &= ~EV_SYSFLAGS; 864 /* register each knote */ 865 error = kqueue_register(kq, kevp); 866 if (error) { 867 if (nevents != 0) { 868 kevp->flags = EV_ERROR; 869 kevp->data = error; 870 error = (*keops->keo_put_events) 871 (keops->keo_private, kevp, 872 eventlist, nerrors, 1); 873 if (error) 874 goto done; 875 nevents--; 876 nerrors++; 877 } else { 878 goto done; 879 } 880 } 881 } 882 nchanges -= n; /* update the results */ 883 ichange += n; 884 } 885 if (nerrors) { 886 *retval = nerrors; 887 error = 0; 888 goto done; 889 } 890 891 /* actually scan through the events */ 892 error = kqueue_scan(fp, nevents, eventlist, timeout, retval, keops, 893 kevbuf, __arraycount(kevbuf)); 894 done: 895 fd_putfile(fd); 896 return (error); 897 } 898 899 /* 900 * Register a given kevent kev onto the kqueue 901 */ 902 static int 903 kqueue_register(struct kqueue *kq, struct kevent *kev) 904 { 905 struct kfilter *kfilter; 906 filedesc_t *fdp; 907 file_t *fp; 908 fdfile_t *ff; 909 struct knote *kn, *newkn; 910 struct klist *list; 911 int error, fd, rv; 912 913 fdp = kq->kq_fdp; 914 fp = NULL; 915 kn = NULL; 916 error = 0; 917 fd = 0; 918 919 newkn = kmem_zalloc(sizeof(*newkn), KM_SLEEP); 920 921 rw_enter(&kqueue_filter_lock, RW_READER); 922 kfilter = kfilter_byfilter(kev->filter); 923 if (kfilter == NULL || kfilter->filtops == NULL) { 924 /* filter not found nor implemented */ 925 rw_exit(&kqueue_filter_lock); 926 kmem_free(newkn, sizeof(*newkn)); 927 return (EINVAL); 928 } 929 930 /* search if knote already exists */ 931 if (kfilter->filtops->f_isfd) { 932 /* monitoring a file descriptor */ 933 fd = kev->ident; 934 if ((fp = fd_getfile(fd)) == NULL) { 935 rw_exit(&kqueue_filter_lock); 936 kmem_free(newkn, sizeof(*newkn)); 937 return EBADF; 938 } 939 mutex_enter(&fdp->fd_lock); 940 ff = fdp->fd_dt->dt_ff[fd]; 941 if (fd <= fdp->fd_lastkqfile) { 942 SLIST_FOREACH(kn, &ff->ff_knlist, kn_link) { 943 if (kq == kn->kn_kq && 944 kev->filter == kn->kn_filter) 945 break; 946 } 947 } 948 } else { 949 /* 950 * not monitoring a file descriptor, so 951 * lookup knotes in internal hash table 952 */ 953 mutex_enter(&fdp->fd_lock); 954 if (fdp->fd_knhashmask != 0) { 955 list = &fdp->fd_knhash[ 956 KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)]; 957 SLIST_FOREACH(kn, list, kn_link) { 958 if (kev->ident == kn->kn_id && 959 kq == kn->kn_kq && 960 kev->filter == kn->kn_filter) 961 break; 962 } 963 } 964 } 965 966 /* 967 * kn now contains the matching knote, or NULL if no match 968 */ 969 if (kev->flags & EV_ADD) { 970 if (kn == NULL) { 971 /* create new knote */ 972 kn = newkn; 973 newkn = NULL; 974 kn->kn_obj = fp; 975 kn->kn_id = kev->ident; 976 kn->kn_kq = kq; 977 kn->kn_fop = kfilter->filtops; 978 kn->kn_kfilter = kfilter; 979 kn->kn_sfflags = kev->fflags; 980 kn->kn_sdata = kev->data; 981 kev->fflags = 0; 982 kev->data = 0; 983 kn->kn_kevent = *kev; 984 985 /* 986 * apply reference count to knote structure, and 987 * do not release it at the end of this routine. 988 */ 989 fp = NULL; 990 991 if (!kn->kn_fop->f_isfd) { 992 /* 993 * If knote is not on an fd, store on 994 * internal hash table. 995 */ 996 if (fdp->fd_knhashmask == 0) { 997 /* XXXAD can block with fd_lock held */ 998 fdp->fd_knhash = hashinit(KN_HASHSIZE, 999 HASH_LIST, true, 1000 &fdp->fd_knhashmask); 1001 } 1002 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, 1003 fdp->fd_knhashmask)]; 1004 } else { 1005 /* Otherwise, knote is on an fd. */ 1006 list = (struct klist *) 1007 &fdp->fd_dt->dt_ff[kn->kn_id]->ff_knlist; 1008 if ((int)kn->kn_id > fdp->fd_lastkqfile) 1009 fdp->fd_lastkqfile = kn->kn_id; 1010 } 1011 SLIST_INSERT_HEAD(list, kn, kn_link); 1012 1013 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1014 error = (*kfilter->filtops->f_attach)(kn); 1015 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1016 if (error != 0) { 1017 #ifdef DIAGNOSTIC 1018 printf("%s: event not supported for file type" 1019 " %d\n", __func__, fp ? fp->f_type : -1); 1020 #endif 1021 /* knote_detach() drops fdp->fd_lock */ 1022 knote_detach(kn, fdp, false); 1023 goto done; 1024 } 1025 atomic_inc_uint(&kfilter->refcnt); 1026 } else { 1027 /* 1028 * The user may change some filter values after the 1029 * initial EV_ADD, but doing so will not reset any 1030 * filter which have already been triggered. 1031 */ 1032 kn->kn_sfflags = kev->fflags; 1033 kn->kn_sdata = kev->data; 1034 kn->kn_kevent.udata = kev->udata; 1035 } 1036 /* 1037 * We can get here if we are trying to attach 1038 * an event to a file descriptor that does not 1039 * support events, and the attach routine is 1040 * broken and does not return an error. 1041 */ 1042 KASSERT(kn->kn_fop->f_event != NULL); 1043 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1044 rv = (*kn->kn_fop->f_event)(kn, 0); 1045 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1046 if (rv) 1047 knote_activate(kn); 1048 } else { 1049 if (kn == NULL) { 1050 error = ENOENT; 1051 mutex_exit(&fdp->fd_lock); 1052 goto done; 1053 } 1054 if (kev->flags & EV_DELETE) { 1055 /* knote_detach() drops fdp->fd_lock */ 1056 knote_detach(kn, fdp, true); 1057 goto done; 1058 } 1059 } 1060 1061 /* disable knote */ 1062 if ((kev->flags & EV_DISABLE)) { 1063 mutex_spin_enter(&kq->kq_lock); 1064 if ((kn->kn_status & KN_DISABLED) == 0) 1065 kn->kn_status |= KN_DISABLED; 1066 mutex_spin_exit(&kq->kq_lock); 1067 } 1068 1069 /* enable knote */ 1070 if ((kev->flags & EV_ENABLE)) { 1071 knote_enqueue(kn); 1072 } 1073 mutex_exit(&fdp->fd_lock); 1074 done: 1075 rw_exit(&kqueue_filter_lock); 1076 if (newkn != NULL) 1077 kmem_free(newkn, sizeof(*newkn)); 1078 if (fp != NULL) 1079 fd_putfile(fd); 1080 return (error); 1081 } 1082 1083 #if defined(DEBUG) 1084 static void 1085 kq_check(struct kqueue *kq) 1086 { 1087 const struct knote *kn; 1088 int count; 1089 int nmarker; 1090 1091 KASSERT(mutex_owned(&kq->kq_lock)); 1092 KASSERT(kq->kq_count >= 0); 1093 1094 count = 0; 1095 nmarker = 0; 1096 TAILQ_FOREACH(kn, &kq->kq_head, kn_tqe) { 1097 if ((kn->kn_status & (KN_MARKER | KN_QUEUED)) == 0) { 1098 panic("%s: kq=%p kn=%p inconsist 1", __func__, kq, kn); 1099 } 1100 if ((kn->kn_status & KN_MARKER) == 0) { 1101 if (kn->kn_kq != kq) { 1102 panic("%s: kq=%p kn=%p inconsist 2", 1103 __func__, kq, kn); 1104 } 1105 if ((kn->kn_status & KN_ACTIVE) == 0) { 1106 panic("%s: kq=%p kn=%p: not active", 1107 __func__, kq, kn); 1108 } 1109 count++; 1110 if (count > kq->kq_count) { 1111 goto bad; 1112 } 1113 } else { 1114 nmarker++; 1115 #if 0 1116 if (nmarker > 10000) { 1117 panic("%s: kq=%p too many markers: %d != %d, " 1118 "nmarker=%d", 1119 __func__, kq, kq->kq_count, count, nmarker); 1120 } 1121 #endif 1122 } 1123 } 1124 if (kq->kq_count != count) { 1125 bad: 1126 panic("%s: kq=%p inconsist 3: %d != %d, nmarker=%d", 1127 __func__, kq, kq->kq_count, count, nmarker); 1128 } 1129 } 1130 #else /* defined(DEBUG) */ 1131 #define kq_check(a) /* nothing */ 1132 #endif /* defined(DEBUG) */ 1133 1134 /* 1135 * Scan through the list of events on fp (for a maximum of maxevents), 1136 * returning the results in to ulistp. Timeout is determined by tsp; if 1137 * NULL, wait indefinitely, if 0 valued, perform a poll, otherwise wait 1138 * as appropriate. 1139 */ 1140 static int 1141 kqueue_scan(file_t *fp, size_t maxevents, struct kevent *ulistp, 1142 const struct timespec *tsp, register_t *retval, 1143 const struct kevent_ops *keops, struct kevent *kevbuf, 1144 size_t kevcnt) 1145 { 1146 struct kqueue *kq; 1147 struct kevent *kevp; 1148 struct timespec ats, sleepts; 1149 struct knote *kn, *marker; 1150 size_t count, nkev, nevents; 1151 int timeout, error, rv; 1152 filedesc_t *fdp; 1153 1154 fdp = curlwp->l_fd; 1155 kq = fp->f_kqueue; 1156 count = maxevents; 1157 nkev = nevents = error = 0; 1158 if (count == 0) { 1159 *retval = 0; 1160 return 0; 1161 } 1162 1163 if (tsp) { /* timeout supplied */ 1164 ats = *tsp; 1165 if (inittimeleft(&ats, &sleepts) == -1) { 1166 *retval = maxevents; 1167 return EINVAL; 1168 } 1169 timeout = tstohz(&ats); 1170 if (timeout <= 0) 1171 timeout = -1; /* do poll */ 1172 } else { 1173 /* no timeout, wait forever */ 1174 timeout = 0; 1175 } 1176 1177 marker = kmem_zalloc(sizeof(*marker), KM_SLEEP); 1178 marker->kn_status = KN_MARKER; 1179 mutex_spin_enter(&kq->kq_lock); 1180 retry: 1181 kevp = kevbuf; 1182 if (kq->kq_count == 0) { 1183 if (timeout >= 0) { 1184 error = cv_timedwait_sig(&kq->kq_cv, 1185 &kq->kq_lock, timeout); 1186 if (error == 0) { 1187 if (tsp == NULL || (timeout = 1188 gettimeleft(&ats, &sleepts)) > 0) 1189 goto retry; 1190 } else { 1191 /* don't restart after signals... */ 1192 if (error == ERESTART) 1193 error = EINTR; 1194 if (error == EWOULDBLOCK) 1195 error = 0; 1196 } 1197 } 1198 } else { 1199 /* mark end of knote list */ 1200 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); 1201 1202 while (count != 0) { 1203 kn = TAILQ_FIRST(&kq->kq_head); /* get next knote */ 1204 while ((kn->kn_status & KN_MARKER) != 0) { 1205 if (kn == marker) { 1206 /* it's our marker, stop */ 1207 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1208 if (count < maxevents || (tsp != NULL && 1209 (timeout = gettimeleft(&ats, 1210 &sleepts)) <= 0)) 1211 goto done; 1212 goto retry; 1213 } 1214 /* someone else's marker. */ 1215 kn = TAILQ_NEXT(kn, kn_tqe); 1216 } 1217 kq_check(kq); 1218 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1219 kq->kq_count--; 1220 kn->kn_status &= ~KN_QUEUED; 1221 kq_check(kq); 1222 if (kn->kn_status & KN_DISABLED) { 1223 /* don't want disabled events */ 1224 continue; 1225 } 1226 if ((kn->kn_flags & EV_ONESHOT) == 0) { 1227 mutex_spin_exit(&kq->kq_lock); 1228 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1229 rv = (*kn->kn_fop->f_event)(kn, 0); 1230 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1231 mutex_spin_enter(&kq->kq_lock); 1232 /* Re-poll if note was re-enqueued. */ 1233 if ((kn->kn_status & KN_QUEUED) != 0) 1234 continue; 1235 if (rv == 0) { 1236 /* 1237 * non-ONESHOT event that hasn't 1238 * triggered again, so de-queue. 1239 */ 1240 kn->kn_status &= ~KN_ACTIVE; 1241 continue; 1242 } 1243 } 1244 /* XXXAD should be got from f_event if !oneshot. */ 1245 *kevp++ = kn->kn_kevent; 1246 nkev++; 1247 if (kn->kn_flags & EV_ONESHOT) { 1248 /* delete ONESHOT events after retrieval */ 1249 mutex_spin_exit(&kq->kq_lock); 1250 mutex_enter(&fdp->fd_lock); 1251 knote_detach(kn, fdp, true); 1252 mutex_spin_enter(&kq->kq_lock); 1253 } else if (kn->kn_flags & EV_CLEAR) { 1254 /* clear state after retrieval */ 1255 kn->kn_data = 0; 1256 kn->kn_fflags = 0; 1257 kn->kn_status &= ~KN_ACTIVE; 1258 } else { 1259 /* add event back on list */ 1260 kq_check(kq); 1261 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1262 kq->kq_count++; 1263 kn->kn_status |= KN_QUEUED; 1264 kq_check(kq); 1265 } 1266 if (nkev == kevcnt) { 1267 /* do copyouts in kevcnt chunks */ 1268 mutex_spin_exit(&kq->kq_lock); 1269 error = (*keops->keo_put_events) 1270 (keops->keo_private, 1271 kevbuf, ulistp, nevents, nkev); 1272 mutex_spin_enter(&kq->kq_lock); 1273 nevents += nkev; 1274 nkev = 0; 1275 kevp = kevbuf; 1276 } 1277 count--; 1278 if (error != 0 || count == 0) { 1279 /* remove marker */ 1280 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); 1281 break; 1282 } 1283 } 1284 } 1285 done: 1286 mutex_spin_exit(&kq->kq_lock); 1287 if (marker != NULL) 1288 kmem_free(marker, sizeof(*marker)); 1289 if (nkev != 0) { 1290 /* copyout remaining events */ 1291 error = (*keops->keo_put_events)(keops->keo_private, 1292 kevbuf, ulistp, nevents, nkev); 1293 } 1294 *retval = maxevents - count; 1295 1296 return error; 1297 } 1298 1299 /* 1300 * fileops ioctl method for a kqueue descriptor. 1301 * 1302 * Two ioctls are currently supported. They both use struct kfilter_mapping: 1303 * KFILTER_BYNAME find name for filter, and return result in 1304 * name, which is of size len. 1305 * KFILTER_BYFILTER find filter for name. len is ignored. 1306 */ 1307 /*ARGSUSED*/ 1308 static int 1309 kqueue_ioctl(file_t *fp, u_long com, void *data) 1310 { 1311 struct kfilter_mapping *km; 1312 const struct kfilter *kfilter; 1313 char *name; 1314 int error; 1315 1316 km = data; 1317 error = 0; 1318 name = kmem_alloc(KFILTER_MAXNAME, KM_SLEEP); 1319 1320 switch (com) { 1321 case KFILTER_BYFILTER: /* convert filter -> name */ 1322 rw_enter(&kqueue_filter_lock, RW_READER); 1323 kfilter = kfilter_byfilter(km->filter); 1324 if (kfilter != NULL) { 1325 strlcpy(name, kfilter->name, KFILTER_MAXNAME); 1326 rw_exit(&kqueue_filter_lock); 1327 error = copyoutstr(name, km->name, km->len, NULL); 1328 } else { 1329 rw_exit(&kqueue_filter_lock); 1330 error = ENOENT; 1331 } 1332 break; 1333 1334 case KFILTER_BYNAME: /* convert name -> filter */ 1335 error = copyinstr(km->name, name, KFILTER_MAXNAME, NULL); 1336 if (error) { 1337 break; 1338 } 1339 rw_enter(&kqueue_filter_lock, RW_READER); 1340 kfilter = kfilter_byname(name); 1341 if (kfilter != NULL) 1342 km->filter = kfilter->filter; 1343 else 1344 error = ENOENT; 1345 rw_exit(&kqueue_filter_lock); 1346 break; 1347 1348 default: 1349 error = ENOTTY; 1350 break; 1351 1352 } 1353 kmem_free(name, KFILTER_MAXNAME); 1354 return (error); 1355 } 1356 1357 /* 1358 * fileops fcntl method for a kqueue descriptor. 1359 */ 1360 static int 1361 kqueue_fcntl(file_t *fp, u_int com, void *data) 1362 { 1363 1364 return (ENOTTY); 1365 } 1366 1367 /* 1368 * fileops poll method for a kqueue descriptor. 1369 * Determine if kqueue has events pending. 1370 */ 1371 static int 1372 kqueue_poll(file_t *fp, int events) 1373 { 1374 struct kqueue *kq; 1375 int revents; 1376 1377 kq = fp->f_kqueue; 1378 1379 revents = 0; 1380 if (events & (POLLIN | POLLRDNORM)) { 1381 mutex_spin_enter(&kq->kq_lock); 1382 if (kq->kq_count != 0) { 1383 revents |= events & (POLLIN | POLLRDNORM); 1384 } else { 1385 selrecord(curlwp, &kq->kq_sel); 1386 } 1387 kq_check(kq); 1388 mutex_spin_exit(&kq->kq_lock); 1389 } 1390 1391 return revents; 1392 } 1393 1394 /* 1395 * fileops stat method for a kqueue descriptor. 1396 * Returns dummy info, with st_size being number of events pending. 1397 */ 1398 static int 1399 kqueue_stat(file_t *fp, struct stat *st) 1400 { 1401 struct kqueue *kq; 1402 1403 kq = fp->f_kqueue; 1404 1405 memset(st, 0, sizeof(*st)); 1406 st->st_size = kq->kq_count; 1407 st->st_blksize = sizeof(struct kevent); 1408 st->st_mode = S_IFIFO; 1409 1410 return 0; 1411 } 1412 1413 static void 1414 kqueue_doclose(struct kqueue *kq, struct klist *list, int fd) 1415 { 1416 struct knote *kn; 1417 filedesc_t *fdp; 1418 1419 fdp = kq->kq_fdp; 1420 1421 KASSERT(mutex_owned(&fdp->fd_lock)); 1422 1423 for (kn = SLIST_FIRST(list); kn != NULL;) { 1424 if (kq != kn->kn_kq) { 1425 kn = SLIST_NEXT(kn, kn_link); 1426 continue; 1427 } 1428 knote_detach(kn, fdp, true); 1429 mutex_enter(&fdp->fd_lock); 1430 kn = SLIST_FIRST(list); 1431 } 1432 } 1433 1434 1435 /* 1436 * fileops close method for a kqueue descriptor. 1437 */ 1438 static int 1439 kqueue_close(file_t *fp) 1440 { 1441 struct kqueue *kq; 1442 filedesc_t *fdp; 1443 fdfile_t *ff; 1444 int i; 1445 1446 kq = fp->f_kqueue; 1447 fp->f_kqueue = NULL; 1448 fp->f_type = 0; 1449 fdp = curlwp->l_fd; 1450 1451 mutex_enter(&fdp->fd_lock); 1452 for (i = 0; i <= fdp->fd_lastkqfile; i++) { 1453 if ((ff = fdp->fd_dt->dt_ff[i]) == NULL) 1454 continue; 1455 kqueue_doclose(kq, (struct klist *)&ff->ff_knlist, i); 1456 } 1457 if (fdp->fd_knhashmask != 0) { 1458 for (i = 0; i < fdp->fd_knhashmask + 1; i++) { 1459 kqueue_doclose(kq, &fdp->fd_knhash[i], -1); 1460 } 1461 } 1462 mutex_exit(&fdp->fd_lock); 1463 1464 KASSERT(kq->kq_count == 0); 1465 mutex_destroy(&kq->kq_lock); 1466 cv_destroy(&kq->kq_cv); 1467 seldestroy(&kq->kq_sel); 1468 kmem_free(kq, sizeof(*kq)); 1469 1470 return (0); 1471 } 1472 1473 /* 1474 * struct fileops kqfilter method for a kqueue descriptor. 1475 * Event triggered when monitored kqueue changes. 1476 */ 1477 static int 1478 kqueue_kqfilter(file_t *fp, struct knote *kn) 1479 { 1480 struct kqueue *kq; 1481 1482 kq = ((file_t *)kn->kn_obj)->f_kqueue; 1483 1484 KASSERT(fp == kn->kn_obj); 1485 1486 if (kn->kn_filter != EVFILT_READ) 1487 return 1; 1488 1489 kn->kn_fop = &kqread_filtops; 1490 mutex_enter(&kq->kq_lock); 1491 SLIST_INSERT_HEAD(&kq->kq_sel.sel_klist, kn, kn_selnext); 1492 mutex_exit(&kq->kq_lock); 1493 1494 return 0; 1495 } 1496 1497 1498 /* 1499 * Walk down a list of knotes, activating them if their event has 1500 * triggered. The caller's object lock (e.g. device driver lock) 1501 * must be held. 1502 */ 1503 void 1504 knote(struct klist *list, long hint) 1505 { 1506 struct knote *kn, *tmpkn; 1507 1508 SLIST_FOREACH_SAFE(kn, list, kn_selnext, tmpkn) { 1509 if ((*kn->kn_fop->f_event)(kn, hint)) 1510 knote_activate(kn); 1511 } 1512 } 1513 1514 /* 1515 * Remove all knotes referencing a specified fd 1516 */ 1517 void 1518 knote_fdclose(int fd) 1519 { 1520 struct klist *list; 1521 struct knote *kn; 1522 filedesc_t *fdp; 1523 1524 fdp = curlwp->l_fd; 1525 list = (struct klist *)&fdp->fd_dt->dt_ff[fd]->ff_knlist; 1526 mutex_enter(&fdp->fd_lock); 1527 while ((kn = SLIST_FIRST(list)) != NULL) { 1528 knote_detach(kn, fdp, true); 1529 mutex_enter(&fdp->fd_lock); 1530 } 1531 mutex_exit(&fdp->fd_lock); 1532 } 1533 1534 /* 1535 * Drop knote. Called with fdp->fd_lock held, and will drop before 1536 * returning. 1537 */ 1538 static void 1539 knote_detach(struct knote *kn, filedesc_t *fdp, bool dofop) 1540 { 1541 struct klist *list; 1542 struct kqueue *kq; 1543 1544 kq = kn->kn_kq; 1545 1546 KASSERT((kn->kn_status & KN_MARKER) == 0); 1547 KASSERT(mutex_owned(&fdp->fd_lock)); 1548 1549 /* Remove from monitored object. */ 1550 if (dofop) { 1551 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1552 (*kn->kn_fop->f_detach)(kn); 1553 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1554 } 1555 1556 /* Remove from descriptor table. */ 1557 if (kn->kn_fop->f_isfd) 1558 list = (struct klist *)&fdp->fd_dt->dt_ff[kn->kn_id]->ff_knlist; 1559 else 1560 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; 1561 1562 SLIST_REMOVE(list, kn, knote, kn_link); 1563 1564 /* Remove from kqueue. */ 1565 /* XXXAD should verify not in use by kqueue_scan. */ 1566 mutex_spin_enter(&kq->kq_lock); 1567 if ((kn->kn_status & KN_QUEUED) != 0) { 1568 kq_check(kq); 1569 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1570 kn->kn_status &= ~KN_QUEUED; 1571 kq->kq_count--; 1572 kq_check(kq); 1573 } 1574 mutex_spin_exit(&kq->kq_lock); 1575 1576 mutex_exit(&fdp->fd_lock); 1577 if (kn->kn_fop->f_isfd) 1578 fd_putfile(kn->kn_id); 1579 atomic_dec_uint(&kn->kn_kfilter->refcnt); 1580 kmem_free(kn, sizeof(*kn)); 1581 } 1582 1583 /* 1584 * Queue new event for knote. 1585 */ 1586 static void 1587 knote_enqueue(struct knote *kn) 1588 { 1589 struct kqueue *kq; 1590 1591 KASSERT((kn->kn_status & KN_MARKER) == 0); 1592 1593 kq = kn->kn_kq; 1594 1595 mutex_spin_enter(&kq->kq_lock); 1596 if ((kn->kn_status & KN_DISABLED) != 0) { 1597 kn->kn_status &= ~KN_DISABLED; 1598 } 1599 if ((kn->kn_status & (KN_ACTIVE | KN_QUEUED)) == KN_ACTIVE) { 1600 kq_check(kq); 1601 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1602 kn->kn_status |= KN_QUEUED; 1603 kq->kq_count++; 1604 kq_check(kq); 1605 cv_broadcast(&kq->kq_cv); 1606 selnotify(&kq->kq_sel, 0, NOTE_SUBMIT); 1607 } 1608 mutex_spin_exit(&kq->kq_lock); 1609 } 1610 /* 1611 * Queue new event for knote. 1612 */ 1613 static void 1614 knote_activate(struct knote *kn) 1615 { 1616 struct kqueue *kq; 1617 1618 KASSERT((kn->kn_status & KN_MARKER) == 0); 1619 1620 kq = kn->kn_kq; 1621 1622 mutex_spin_enter(&kq->kq_lock); 1623 kn->kn_status |= KN_ACTIVE; 1624 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) { 1625 kq_check(kq); 1626 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1627 kn->kn_status |= KN_QUEUED; 1628 kq->kq_count++; 1629 kq_check(kq); 1630 cv_broadcast(&kq->kq_cv); 1631 selnotify(&kq->kq_sel, 0, NOTE_SUBMIT); 1632 } 1633 mutex_spin_exit(&kq->kq_lock); 1634 } 1635