1 /* $NetBSD: kern_event.c,v 1.72 2011/06/26 16:42:42 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * SUCH DAMAGE. 56 * 57 * FreeBSD: src/sys/kern/kern_event.c,v 1.27 2001/07/05 17:10:44 rwatson Exp 58 */ 59 60 #include <sys/cdefs.h> 61 __KERNEL_RCSID(0, "$NetBSD: kern_event.c,v 1.72 2011/06/26 16:42:42 christos Exp $"); 62 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/kernel.h> 66 #include <sys/proc.h> 67 #include <sys/file.h> 68 #include <sys/select.h> 69 #include <sys/queue.h> 70 #include <sys/event.h> 71 #include <sys/eventvar.h> 72 #include <sys/poll.h> 73 #include <sys/kmem.h> 74 #include <sys/stat.h> 75 #include <sys/filedesc.h> 76 #include <sys/syscallargs.h> 77 #include <sys/kauth.h> 78 #include <sys/conf.h> 79 #include <sys/atomic.h> 80 81 static int kqueue_scan(file_t *, size_t, struct kevent *, 82 const struct timespec *, register_t *, 83 const struct kevent_ops *, struct kevent *, 84 size_t); 85 static int kqueue_ioctl(file_t *, u_long, void *); 86 static int kqueue_fcntl(file_t *, u_int, void *); 87 static int kqueue_poll(file_t *, int); 88 static int kqueue_kqfilter(file_t *, struct knote *); 89 static int kqueue_stat(file_t *, struct stat *); 90 static int kqueue_close(file_t *); 91 static int kqueue_register(struct kqueue *, struct kevent *); 92 static void kqueue_doclose(struct kqueue *, struct klist *, int); 93 94 static void knote_detach(struct knote *, filedesc_t *fdp, bool); 95 static void knote_enqueue(struct knote *); 96 static void knote_activate(struct knote *); 97 98 static void filt_kqdetach(struct knote *); 99 static int filt_kqueue(struct knote *, long hint); 100 static int filt_procattach(struct knote *); 101 static void filt_procdetach(struct knote *); 102 static int filt_proc(struct knote *, long hint); 103 static int filt_fileattach(struct knote *); 104 static void filt_timerexpire(void *x); 105 static int filt_timerattach(struct knote *); 106 static void filt_timerdetach(struct knote *); 107 static int filt_timer(struct knote *, long hint); 108 109 static const struct fileops kqueueops = { 110 .fo_read = (void *)enxio, 111 .fo_write = (void *)enxio, 112 .fo_ioctl = kqueue_ioctl, 113 .fo_fcntl = kqueue_fcntl, 114 .fo_poll = kqueue_poll, 115 .fo_stat = kqueue_stat, 116 .fo_close = kqueue_close, 117 .fo_kqfilter = kqueue_kqfilter, 118 .fo_restart = fnullop_restart, 119 }; 120 121 static const struct filterops kqread_filtops = 122 { 1, NULL, filt_kqdetach, filt_kqueue }; 123 static const struct filterops proc_filtops = 124 { 0, filt_procattach, filt_procdetach, filt_proc }; 125 static const struct filterops file_filtops = 126 { 1, filt_fileattach, NULL, NULL }; 127 static const struct filterops timer_filtops = 128 { 0, filt_timerattach, filt_timerdetach, filt_timer }; 129 130 static u_int kq_ncallouts = 0; 131 static int kq_calloutmax = (4 * 1024); 132 133 #define KN_HASHSIZE 64 /* XXX should be tunable */ 134 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 135 136 extern const struct filterops sig_filtops; 137 138 /* 139 * Table for for all system-defined filters. 140 * These should be listed in the numeric order of the EVFILT_* defines. 141 * If filtops is NULL, the filter isn't implemented in NetBSD. 142 * End of list is when name is NULL. 143 * 144 * Note that 'refcnt' is meaningless for built-in filters. 145 */ 146 struct kfilter { 147 const char *name; /* name of filter */ 148 uint32_t filter; /* id of filter */ 149 unsigned refcnt; /* reference count */ 150 const struct filterops *filtops;/* operations for filter */ 151 size_t namelen; /* length of name string */ 152 }; 153 154 /* System defined filters */ 155 static struct kfilter sys_kfilters[] = { 156 { "EVFILT_READ", EVFILT_READ, 0, &file_filtops, 0 }, 157 { "EVFILT_WRITE", EVFILT_WRITE, 0, &file_filtops, 0, }, 158 { "EVFILT_AIO", EVFILT_AIO, 0, NULL, 0 }, 159 { "EVFILT_VNODE", EVFILT_VNODE, 0, &file_filtops, 0 }, 160 { "EVFILT_PROC", EVFILT_PROC, 0, &proc_filtops, 0 }, 161 { "EVFILT_SIGNAL", EVFILT_SIGNAL, 0, &sig_filtops, 0 }, 162 { "EVFILT_TIMER", EVFILT_TIMER, 0, &timer_filtops, 0 }, 163 { NULL, 0, 0, NULL, 0 }, 164 }; 165 166 /* User defined kfilters */ 167 static struct kfilter *user_kfilters; /* array */ 168 static int user_kfilterc; /* current offset */ 169 static int user_kfiltermaxc; /* max size so far */ 170 static size_t user_kfiltersz; /* size of allocated memory */ 171 172 /* Locks */ 173 static krwlock_t kqueue_filter_lock; /* lock on filter lists */ 174 static kmutex_t kqueue_misc_lock; /* miscellaneous */ 175 176 static kauth_listener_t kqueue_listener; 177 178 static int 179 kqueue_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, 180 void *arg0, void *arg1, void *arg2, void *arg3) 181 { 182 struct proc *p; 183 int result; 184 185 result = KAUTH_RESULT_DEFER; 186 p = arg0; 187 188 if (action != KAUTH_PROCESS_KEVENT_FILTER) 189 return result; 190 191 if ((kauth_cred_getuid(p->p_cred) != kauth_cred_getuid(cred) || 192 ISSET(p->p_flag, PK_SUGID))) 193 return result; 194 195 result = KAUTH_RESULT_ALLOW; 196 197 return result; 198 } 199 200 /* 201 * Initialize the kqueue subsystem. 202 */ 203 void 204 kqueue_init(void) 205 { 206 207 rw_init(&kqueue_filter_lock); 208 mutex_init(&kqueue_misc_lock, MUTEX_DEFAULT, IPL_NONE); 209 210 kqueue_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS, 211 kqueue_listener_cb, NULL); 212 } 213 214 /* 215 * Find kfilter entry by name, or NULL if not found. 216 */ 217 static struct kfilter * 218 kfilter_byname_sys(const char *name) 219 { 220 int i; 221 222 KASSERT(rw_lock_held(&kqueue_filter_lock)); 223 224 for (i = 0; sys_kfilters[i].name != NULL; i++) { 225 if (strcmp(name, sys_kfilters[i].name) == 0) 226 return &sys_kfilters[i]; 227 } 228 return NULL; 229 } 230 231 static struct kfilter * 232 kfilter_byname_user(const char *name) 233 { 234 int i; 235 236 KASSERT(rw_lock_held(&kqueue_filter_lock)); 237 238 /* user filter slots have a NULL name if previously deregistered */ 239 for (i = 0; i < user_kfilterc ; i++) { 240 if (user_kfilters[i].name != NULL && 241 strcmp(name, user_kfilters[i].name) == 0) 242 return &user_kfilters[i]; 243 } 244 return NULL; 245 } 246 247 static struct kfilter * 248 kfilter_byname(const char *name) 249 { 250 struct kfilter *kfilter; 251 252 KASSERT(rw_lock_held(&kqueue_filter_lock)); 253 254 if ((kfilter = kfilter_byname_sys(name)) != NULL) 255 return kfilter; 256 257 return kfilter_byname_user(name); 258 } 259 260 /* 261 * Find kfilter entry by filter id, or NULL if not found. 262 * Assumes entries are indexed in filter id order, for speed. 263 */ 264 static struct kfilter * 265 kfilter_byfilter(uint32_t filter) 266 { 267 struct kfilter *kfilter; 268 269 KASSERT(rw_lock_held(&kqueue_filter_lock)); 270 271 if (filter < EVFILT_SYSCOUNT) /* it's a system filter */ 272 kfilter = &sys_kfilters[filter]; 273 else if (user_kfilters != NULL && 274 filter < EVFILT_SYSCOUNT + user_kfilterc) 275 /* it's a user filter */ 276 kfilter = &user_kfilters[filter - EVFILT_SYSCOUNT]; 277 else 278 return (NULL); /* out of range */ 279 KASSERT(kfilter->filter == filter); /* sanity check! */ 280 return (kfilter); 281 } 282 283 /* 284 * Register a new kfilter. Stores the entry in user_kfilters. 285 * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise. 286 * If retfilter != NULL, the new filterid is returned in it. 287 */ 288 int 289 kfilter_register(const char *name, const struct filterops *filtops, 290 int *retfilter) 291 { 292 struct kfilter *kfilter; 293 size_t len; 294 int i; 295 296 if (name == NULL || name[0] == '\0' || filtops == NULL) 297 return (EINVAL); /* invalid args */ 298 299 rw_enter(&kqueue_filter_lock, RW_WRITER); 300 if (kfilter_byname(name) != NULL) { 301 rw_exit(&kqueue_filter_lock); 302 return (EEXIST); /* already exists */ 303 } 304 if (user_kfilterc > 0xffffffff - EVFILT_SYSCOUNT) { 305 rw_exit(&kqueue_filter_lock); 306 return (EINVAL); /* too many */ 307 } 308 309 for (i = 0; i < user_kfilterc; i++) { 310 kfilter = &user_kfilters[i]; 311 if (kfilter->name == NULL) { 312 /* Previously deregistered slot. Reuse. */ 313 goto reuse; 314 } 315 } 316 317 /* check if need to grow user_kfilters */ 318 if (user_kfilterc + 1 > user_kfiltermaxc) { 319 /* Grow in KFILTER_EXTENT chunks. */ 320 user_kfiltermaxc += KFILTER_EXTENT; 321 len = user_kfiltermaxc * sizeof(*kfilter); 322 kfilter = kmem_alloc(len, KM_SLEEP); 323 memset((char *)kfilter + user_kfiltersz, 0, len - user_kfiltersz); 324 if (user_kfilters != NULL) { 325 memcpy(kfilter, user_kfilters, user_kfiltersz); 326 kmem_free(user_kfilters, user_kfiltersz); 327 } 328 user_kfiltersz = len; 329 user_kfilters = kfilter; 330 } 331 /* Adding new slot */ 332 kfilter = &user_kfilters[user_kfilterc++]; 333 reuse: 334 kfilter->namelen = strlen(name) + 1; 335 kfilter->name = kmem_alloc(kfilter->namelen, KM_SLEEP); 336 memcpy(__UNCONST(kfilter->name), name, kfilter->namelen); 337 338 kfilter->filter = (kfilter - user_kfilters) + EVFILT_SYSCOUNT; 339 340 kfilter->filtops = kmem_alloc(sizeof(*filtops), KM_SLEEP); 341 memcpy(__UNCONST(kfilter->filtops), filtops, sizeof(*filtops)); 342 343 if (retfilter != NULL) 344 *retfilter = kfilter->filter; 345 rw_exit(&kqueue_filter_lock); 346 347 return (0); 348 } 349 350 /* 351 * Unregister a kfilter previously registered with kfilter_register. 352 * This retains the filter id, but clears the name and frees filtops (filter 353 * operations), so that the number isn't reused during a boot. 354 * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise. 355 */ 356 int 357 kfilter_unregister(const char *name) 358 { 359 struct kfilter *kfilter; 360 361 if (name == NULL || name[0] == '\0') 362 return (EINVAL); /* invalid name */ 363 364 rw_enter(&kqueue_filter_lock, RW_WRITER); 365 if (kfilter_byname_sys(name) != NULL) { 366 rw_exit(&kqueue_filter_lock); 367 return (EINVAL); /* can't detach system filters */ 368 } 369 370 kfilter = kfilter_byname_user(name); 371 if (kfilter == NULL) { 372 rw_exit(&kqueue_filter_lock); 373 return (ENOENT); 374 } 375 if (kfilter->refcnt != 0) { 376 rw_exit(&kqueue_filter_lock); 377 return (EBUSY); 378 } 379 380 /* Cast away const (but we know it's safe. */ 381 kmem_free(__UNCONST(kfilter->name), kfilter->namelen); 382 kfilter->name = NULL; /* mark as `not implemented' */ 383 384 if (kfilter->filtops != NULL) { 385 /* Cast away const (but we know it's safe. */ 386 kmem_free(__UNCONST(kfilter->filtops), 387 sizeof(*kfilter->filtops)); 388 kfilter->filtops = NULL; /* mark as `not implemented' */ 389 } 390 rw_exit(&kqueue_filter_lock); 391 392 return (0); 393 } 394 395 396 /* 397 * Filter attach method for EVFILT_READ and EVFILT_WRITE on normal file 398 * descriptors. Calls fileops kqfilter method for given file descriptor. 399 */ 400 static int 401 filt_fileattach(struct knote *kn) 402 { 403 file_t *fp; 404 405 fp = kn->kn_obj; 406 407 return (*fp->f_ops->fo_kqfilter)(fp, kn); 408 } 409 410 /* 411 * Filter detach method for EVFILT_READ on kqueue descriptor. 412 */ 413 static void 414 filt_kqdetach(struct knote *kn) 415 { 416 struct kqueue *kq; 417 418 kq = ((file_t *)kn->kn_obj)->f_data; 419 420 mutex_spin_enter(&kq->kq_lock); 421 SLIST_REMOVE(&kq->kq_sel.sel_klist, kn, knote, kn_selnext); 422 mutex_spin_exit(&kq->kq_lock); 423 } 424 425 /* 426 * Filter event method for EVFILT_READ on kqueue descriptor. 427 */ 428 /*ARGSUSED*/ 429 static int 430 filt_kqueue(struct knote *kn, long hint) 431 { 432 struct kqueue *kq; 433 int rv; 434 435 kq = ((file_t *)kn->kn_obj)->f_data; 436 437 if (hint != NOTE_SUBMIT) 438 mutex_spin_enter(&kq->kq_lock); 439 kn->kn_data = kq->kq_count; 440 rv = (kn->kn_data > 0); 441 if (hint != NOTE_SUBMIT) 442 mutex_spin_exit(&kq->kq_lock); 443 444 return rv; 445 } 446 447 /* 448 * Filter attach method for EVFILT_PROC. 449 */ 450 static int 451 filt_procattach(struct knote *kn) 452 { 453 struct proc *p, *curp; 454 struct lwp *curl; 455 456 curl = curlwp; 457 curp = curl->l_proc; 458 459 mutex_enter(proc_lock); 460 p = proc_find(kn->kn_id); 461 if (p == NULL) { 462 mutex_exit(proc_lock); 463 return ESRCH; 464 } 465 466 /* 467 * Fail if it's not owned by you, or the last exec gave us 468 * setuid/setgid privs (unless you're root). 469 */ 470 mutex_enter(p->p_lock); 471 mutex_exit(proc_lock); 472 if (kauth_authorize_process(curl->l_cred, KAUTH_PROCESS_KEVENT_FILTER, 473 p, NULL, NULL, NULL) != 0) { 474 mutex_exit(p->p_lock); 475 return EACCES; 476 } 477 478 kn->kn_obj = p; 479 kn->kn_flags |= EV_CLEAR; /* automatically set */ 480 481 /* 482 * internal flag indicating registration done by kernel 483 */ 484 if (kn->kn_flags & EV_FLAG1) { 485 kn->kn_data = kn->kn_sdata; /* ppid */ 486 kn->kn_fflags = NOTE_CHILD; 487 kn->kn_flags &= ~EV_FLAG1; 488 } 489 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext); 490 mutex_exit(p->p_lock); 491 492 return 0; 493 } 494 495 /* 496 * Filter detach method for EVFILT_PROC. 497 * 498 * The knote may be attached to a different process, which may exit, 499 * leaving nothing for the knote to be attached to. So when the process 500 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 501 * it will be deleted when read out. However, as part of the knote deletion, 502 * this routine is called, so a check is needed to avoid actually performing 503 * a detach, because the original process might not exist any more. 504 */ 505 static void 506 filt_procdetach(struct knote *kn) 507 { 508 struct proc *p; 509 510 if (kn->kn_status & KN_DETACHED) 511 return; 512 513 p = kn->kn_obj; 514 515 mutex_enter(p->p_lock); 516 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 517 mutex_exit(p->p_lock); 518 } 519 520 /* 521 * Filter event method for EVFILT_PROC. 522 */ 523 static int 524 filt_proc(struct knote *kn, long hint) 525 { 526 u_int event, fflag; 527 struct kevent kev; 528 struct kqueue *kq; 529 int error; 530 531 event = (u_int)hint & NOTE_PCTRLMASK; 532 kq = kn->kn_kq; 533 fflag = 0; 534 535 /* If the user is interested in this event, record it. */ 536 if (kn->kn_sfflags & event) 537 fflag |= event; 538 539 if (event == NOTE_EXIT) { 540 /* 541 * Process is gone, so flag the event as finished. 542 * 543 * Detach the knote from watched process and mark 544 * it as such. We can't leave this to kqueue_scan(), 545 * since the process might not exist by then. And we 546 * have to do this now, since psignal KNOTE() is called 547 * also for zombies and we might end up reading freed 548 * memory if the kevent would already be picked up 549 * and knote g/c'ed. 550 */ 551 filt_procdetach(kn); 552 553 mutex_spin_enter(&kq->kq_lock); 554 kn->kn_status |= KN_DETACHED; 555 /* Mark as ONESHOT, so that the knote it g/c'ed when read */ 556 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 557 kn->kn_fflags |= fflag; 558 mutex_spin_exit(&kq->kq_lock); 559 560 return 1; 561 } 562 563 mutex_spin_enter(&kq->kq_lock); 564 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { 565 /* 566 * Process forked, and user wants to track the new process, 567 * so attach a new knote to it, and immediately report an 568 * event with the parent's pid. Register knote with new 569 * process. 570 */ 571 kev.ident = hint & NOTE_PDATAMASK; /* pid */ 572 kev.filter = kn->kn_filter; 573 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 574 kev.fflags = kn->kn_sfflags; 575 kev.data = kn->kn_id; /* parent */ 576 kev.udata = kn->kn_kevent.udata; /* preserve udata */ 577 mutex_spin_exit(&kq->kq_lock); 578 error = kqueue_register(kq, &kev); 579 mutex_spin_enter(&kq->kq_lock); 580 if (error != 0) 581 kn->kn_fflags |= NOTE_TRACKERR; 582 } 583 kn->kn_fflags |= fflag; 584 fflag = kn->kn_fflags; 585 mutex_spin_exit(&kq->kq_lock); 586 587 return fflag != 0; 588 } 589 590 static void 591 filt_timerexpire(void *knx) 592 { 593 struct knote *kn = knx; 594 int tticks; 595 596 mutex_enter(&kqueue_misc_lock); 597 kn->kn_data++; 598 knote_activate(kn); 599 if ((kn->kn_flags & EV_ONESHOT) == 0) { 600 tticks = mstohz(kn->kn_sdata); 601 callout_schedule((callout_t *)kn->kn_hook, tticks); 602 } 603 mutex_exit(&kqueue_misc_lock); 604 } 605 606 /* 607 * data contains amount of time to sleep, in milliseconds 608 */ 609 static int 610 filt_timerattach(struct knote *kn) 611 { 612 callout_t *calloutp; 613 struct kqueue *kq; 614 int tticks; 615 616 tticks = mstohz(kn->kn_sdata); 617 618 /* if the supplied value is under our resolution, use 1 tick */ 619 if (tticks == 0) { 620 if (kn->kn_sdata == 0) 621 return EINVAL; 622 tticks = 1; 623 } 624 625 if (atomic_inc_uint_nv(&kq_ncallouts) >= kq_calloutmax || 626 (calloutp = kmem_alloc(sizeof(*calloutp), KM_NOSLEEP)) == NULL) { 627 atomic_dec_uint(&kq_ncallouts); 628 return ENOMEM; 629 } 630 callout_init(calloutp, CALLOUT_MPSAFE); 631 632 kq = kn->kn_kq; 633 mutex_spin_enter(&kq->kq_lock); 634 kn->kn_flags |= EV_CLEAR; /* automatically set */ 635 kn->kn_hook = calloutp; 636 mutex_spin_exit(&kq->kq_lock); 637 638 callout_reset(calloutp, tticks, filt_timerexpire, kn); 639 640 return (0); 641 } 642 643 static void 644 filt_timerdetach(struct knote *kn) 645 { 646 callout_t *calloutp; 647 648 calloutp = (callout_t *)kn->kn_hook; 649 callout_halt(calloutp, NULL); 650 callout_destroy(calloutp); 651 kmem_free(calloutp, sizeof(*calloutp)); 652 atomic_dec_uint(&kq_ncallouts); 653 } 654 655 static int 656 filt_timer(struct knote *kn, long hint) 657 { 658 int rv; 659 660 mutex_enter(&kqueue_misc_lock); 661 rv = (kn->kn_data != 0); 662 mutex_exit(&kqueue_misc_lock); 663 664 return rv; 665 } 666 667 /* 668 * filt_seltrue: 669 * 670 * This filter "event" routine simulates seltrue(). 671 */ 672 int 673 filt_seltrue(struct knote *kn, long hint) 674 { 675 676 /* 677 * We don't know how much data can be read/written, 678 * but we know that it *can* be. This is about as 679 * good as select/poll does as well. 680 */ 681 kn->kn_data = 0; 682 return (1); 683 } 684 685 /* 686 * This provides full kqfilter entry for device switch tables, which 687 * has same effect as filter using filt_seltrue() as filter method. 688 */ 689 static void 690 filt_seltruedetach(struct knote *kn) 691 { 692 /* Nothing to do */ 693 } 694 695 const struct filterops seltrue_filtops = 696 { 1, NULL, filt_seltruedetach, filt_seltrue }; 697 698 int 699 seltrue_kqfilter(dev_t dev, struct knote *kn) 700 { 701 switch (kn->kn_filter) { 702 case EVFILT_READ: 703 case EVFILT_WRITE: 704 kn->kn_fop = &seltrue_filtops; 705 break; 706 default: 707 return (EINVAL); 708 } 709 710 /* Nothing more to do */ 711 return (0); 712 } 713 714 /* 715 * kqueue(2) system call. 716 */ 717 static int 718 kqueue1(struct lwp *l, int flags, register_t *retval) 719 { 720 struct kqueue *kq; 721 file_t *fp; 722 int fd, error; 723 724 if ((error = fd_allocfile(&fp, &fd)) != 0) 725 return error; 726 fp->f_flag = FREAD | FWRITE | (flags & FNONBLOCK); 727 fp->f_type = DTYPE_KQUEUE; 728 fp->f_ops = &kqueueops; 729 kq = kmem_zalloc(sizeof(*kq), KM_SLEEP); 730 mutex_init(&kq->kq_lock, MUTEX_DEFAULT, IPL_SCHED); 731 cv_init(&kq->kq_cv, "kqueue"); 732 selinit(&kq->kq_sel); 733 TAILQ_INIT(&kq->kq_head); 734 fp->f_data = kq; 735 *retval = fd; 736 kq->kq_fdp = curlwp->l_fd; 737 fd_set_exclose(l, fd, (flags & O_CLOEXEC) != 0); 738 fd_affix(curproc, fp, fd); 739 return error; 740 } 741 742 /* 743 * kqueue(2) system call. 744 */ 745 int 746 sys_kqueue(struct lwp *l, const void *v, register_t *retval) 747 { 748 return kqueue1(l, 0, retval); 749 } 750 751 int 752 sys_kqueue1(struct lwp *l, const struct sys_kqueue1_args *uap, 753 register_t *retval) 754 { 755 /* { 756 syscallarg(int) flags; 757 } */ 758 return kqueue1(l, SCARG(uap, flags), retval); 759 } 760 761 /* 762 * kevent(2) system call. 763 */ 764 int 765 kevent_fetch_changes(void *private, const struct kevent *changelist, 766 struct kevent *changes, size_t index, int n) 767 { 768 769 return copyin(changelist + index, changes, n * sizeof(*changes)); 770 } 771 772 int 773 kevent_put_events(void *private, struct kevent *events, 774 struct kevent *eventlist, size_t index, int n) 775 { 776 777 return copyout(events, eventlist + index, n * sizeof(*events)); 778 } 779 780 static const struct kevent_ops kevent_native_ops = { 781 .keo_private = NULL, 782 .keo_fetch_timeout = copyin, 783 .keo_fetch_changes = kevent_fetch_changes, 784 .keo_put_events = kevent_put_events, 785 }; 786 787 int 788 sys___kevent50(struct lwp *l, const struct sys___kevent50_args *uap, 789 register_t *retval) 790 { 791 /* { 792 syscallarg(int) fd; 793 syscallarg(const struct kevent *) changelist; 794 syscallarg(size_t) nchanges; 795 syscallarg(struct kevent *) eventlist; 796 syscallarg(size_t) nevents; 797 syscallarg(const struct timespec *) timeout; 798 } */ 799 800 return kevent1(retval, SCARG(uap, fd), SCARG(uap, changelist), 801 SCARG(uap, nchanges), SCARG(uap, eventlist), SCARG(uap, nevents), 802 SCARG(uap, timeout), &kevent_native_ops); 803 } 804 805 int 806 kevent1(register_t *retval, int fd, 807 const struct kevent *changelist, size_t nchanges, 808 struct kevent *eventlist, size_t nevents, 809 const struct timespec *timeout, 810 const struct kevent_ops *keops) 811 { 812 struct kevent *kevp; 813 struct kqueue *kq; 814 struct timespec ts; 815 size_t i, n, ichange; 816 int nerrors, error; 817 struct kevent kevbuf[8]; /* approx 300 bytes on 64-bit */ 818 file_t *fp; 819 820 /* check that we're dealing with a kq */ 821 fp = fd_getfile(fd); 822 if (fp == NULL) 823 return (EBADF); 824 825 if (fp->f_type != DTYPE_KQUEUE) { 826 fd_putfile(fd); 827 return (EBADF); 828 } 829 830 if (timeout != NULL) { 831 error = (*keops->keo_fetch_timeout)(timeout, &ts, sizeof(ts)); 832 if (error) 833 goto done; 834 timeout = &ts; 835 } 836 837 kq = (struct kqueue *)fp->f_data; 838 nerrors = 0; 839 ichange = 0; 840 841 /* traverse list of events to register */ 842 while (nchanges > 0) { 843 n = MIN(nchanges, __arraycount(kevbuf)); 844 error = (*keops->keo_fetch_changes)(keops->keo_private, 845 changelist, kevbuf, ichange, n); 846 if (error) 847 goto done; 848 for (i = 0; i < n; i++) { 849 kevp = &kevbuf[i]; 850 kevp->flags &= ~EV_SYSFLAGS; 851 /* register each knote */ 852 error = kqueue_register(kq, kevp); 853 if (error) { 854 if (nevents != 0) { 855 kevp->flags = EV_ERROR; 856 kevp->data = error; 857 error = (*keops->keo_put_events) 858 (keops->keo_private, kevp, 859 eventlist, nerrors, 1); 860 if (error) 861 goto done; 862 nevents--; 863 nerrors++; 864 } else { 865 goto done; 866 } 867 } 868 } 869 nchanges -= n; /* update the results */ 870 ichange += n; 871 } 872 if (nerrors) { 873 *retval = nerrors; 874 error = 0; 875 goto done; 876 } 877 878 /* actually scan through the events */ 879 error = kqueue_scan(fp, nevents, eventlist, timeout, retval, keops, 880 kevbuf, __arraycount(kevbuf)); 881 done: 882 fd_putfile(fd); 883 return (error); 884 } 885 886 /* 887 * Register a given kevent kev onto the kqueue 888 */ 889 static int 890 kqueue_register(struct kqueue *kq, struct kevent *kev) 891 { 892 struct kfilter *kfilter; 893 filedesc_t *fdp; 894 file_t *fp; 895 fdfile_t *ff; 896 struct knote *kn, *newkn; 897 struct klist *list; 898 int error, fd, rv; 899 900 fdp = kq->kq_fdp; 901 fp = NULL; 902 kn = NULL; 903 error = 0; 904 fd = 0; 905 906 newkn = kmem_zalloc(sizeof(*newkn), KM_SLEEP); 907 908 rw_enter(&kqueue_filter_lock, RW_READER); 909 kfilter = kfilter_byfilter(kev->filter); 910 if (kfilter == NULL || kfilter->filtops == NULL) { 911 /* filter not found nor implemented */ 912 rw_exit(&kqueue_filter_lock); 913 kmem_free(newkn, sizeof(*newkn)); 914 return (EINVAL); 915 } 916 917 mutex_enter(&fdp->fd_lock); 918 919 /* search if knote already exists */ 920 if (kfilter->filtops->f_isfd) { 921 /* monitoring a file descriptor */ 922 fd = kev->ident; 923 if ((fp = fd_getfile(fd)) == NULL) { 924 mutex_exit(&fdp->fd_lock); 925 rw_exit(&kqueue_filter_lock); 926 kmem_free(newkn, sizeof(*newkn)); 927 return EBADF; 928 } 929 ff = fdp->fd_dt->dt_ff[fd]; 930 if (fd <= fdp->fd_lastkqfile) { 931 SLIST_FOREACH(kn, &ff->ff_knlist, kn_link) { 932 if (kq == kn->kn_kq && 933 kev->filter == kn->kn_filter) 934 break; 935 } 936 } 937 } else { 938 /* 939 * not monitoring a file descriptor, so 940 * lookup knotes in internal hash table 941 */ 942 if (fdp->fd_knhashmask != 0) { 943 list = &fdp->fd_knhash[ 944 KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)]; 945 SLIST_FOREACH(kn, list, kn_link) { 946 if (kev->ident == kn->kn_id && 947 kq == kn->kn_kq && 948 kev->filter == kn->kn_filter) 949 break; 950 } 951 } 952 } 953 954 /* 955 * kn now contains the matching knote, or NULL if no match 956 */ 957 if (kev->flags & EV_ADD) { 958 if (kn == NULL) { 959 /* create new knote */ 960 kn = newkn; 961 newkn = NULL; 962 kn->kn_obj = fp; 963 kn->kn_kq = kq; 964 kn->kn_fop = kfilter->filtops; 965 kn->kn_kfilter = kfilter; 966 kn->kn_sfflags = kev->fflags; 967 kn->kn_sdata = kev->data; 968 kev->fflags = 0; 969 kev->data = 0; 970 kn->kn_kevent = *kev; 971 972 /* 973 * apply reference count to knote structure, and 974 * do not release it at the end of this routine. 975 */ 976 fp = NULL; 977 978 if (!kn->kn_fop->f_isfd) { 979 /* 980 * If knote is not on an fd, store on 981 * internal hash table. 982 */ 983 if (fdp->fd_knhashmask == 0) { 984 /* XXXAD can block with fd_lock held */ 985 fdp->fd_knhash = hashinit(KN_HASHSIZE, 986 HASH_LIST, true, 987 &fdp->fd_knhashmask); 988 } 989 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, 990 fdp->fd_knhashmask)]; 991 } else { 992 /* Otherwise, knote is on an fd. */ 993 list = (struct klist *) 994 &fdp->fd_dt->dt_ff[kn->kn_id]->ff_knlist; 995 if ((int)kn->kn_id > fdp->fd_lastkqfile) 996 fdp->fd_lastkqfile = kn->kn_id; 997 } 998 SLIST_INSERT_HEAD(list, kn, kn_link); 999 1000 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1001 error = (*kfilter->filtops->f_attach)(kn); 1002 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1003 if (error != 0) { 1004 /* knote_detach() drops fdp->fd_lock */ 1005 knote_detach(kn, fdp, false); 1006 goto done; 1007 } 1008 atomic_inc_uint(&kfilter->refcnt); 1009 } else { 1010 /* 1011 * The user may change some filter values after the 1012 * initial EV_ADD, but doing so will not reset any 1013 * filter which have already been triggered. 1014 */ 1015 kn->kn_sfflags = kev->fflags; 1016 kn->kn_sdata = kev->data; 1017 kn->kn_kevent.udata = kev->udata; 1018 } 1019 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1020 rv = (*kn->kn_fop->f_event)(kn, 0); 1021 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1022 if (rv) 1023 knote_activate(kn); 1024 } else { 1025 if (kn == NULL) { 1026 error = ENOENT; 1027 mutex_exit(&fdp->fd_lock); 1028 goto done; 1029 } 1030 if (kev->flags & EV_DELETE) { 1031 /* knote_detach() drops fdp->fd_lock */ 1032 knote_detach(kn, fdp, true); 1033 goto done; 1034 } 1035 } 1036 1037 /* disable knote */ 1038 if ((kev->flags & EV_DISABLE)) { 1039 mutex_spin_enter(&kq->kq_lock); 1040 if ((kn->kn_status & KN_DISABLED) == 0) 1041 kn->kn_status |= KN_DISABLED; 1042 mutex_spin_exit(&kq->kq_lock); 1043 } 1044 1045 /* enable knote */ 1046 if ((kev->flags & EV_ENABLE)) { 1047 knote_enqueue(kn); 1048 } 1049 mutex_exit(&fdp->fd_lock); 1050 done: 1051 rw_exit(&kqueue_filter_lock); 1052 if (newkn != NULL) 1053 kmem_free(newkn, sizeof(*newkn)); 1054 if (fp != NULL) 1055 fd_putfile(fd); 1056 return (error); 1057 } 1058 1059 #if defined(DEBUG) 1060 static void 1061 kq_check(struct kqueue *kq) 1062 { 1063 const struct knote *kn; 1064 int count; 1065 int nmarker; 1066 1067 KASSERT(mutex_owned(&kq->kq_lock)); 1068 KASSERT(kq->kq_count >= 0); 1069 1070 count = 0; 1071 nmarker = 0; 1072 TAILQ_FOREACH(kn, &kq->kq_head, kn_tqe) { 1073 if ((kn->kn_status & (KN_MARKER | KN_QUEUED)) == 0) { 1074 panic("%s: kq=%p kn=%p inconsist 1", __func__, kq, kn); 1075 } 1076 if ((kn->kn_status & KN_MARKER) == 0) { 1077 if (kn->kn_kq != kq) { 1078 panic("%s: kq=%p kn=%p inconsist 2", 1079 __func__, kq, kn); 1080 } 1081 if ((kn->kn_status & KN_ACTIVE) == 0) { 1082 panic("%s: kq=%p kn=%p: not active", 1083 __func__, kq, kn); 1084 } 1085 count++; 1086 if (count > kq->kq_count) { 1087 goto bad; 1088 } 1089 } else { 1090 nmarker++; 1091 #if 0 1092 if (nmarker > 10000) { 1093 panic("%s: kq=%p too many markers: %d != %d, " 1094 "nmarker=%d", 1095 __func__, kq, kq->kq_count, count, nmarker); 1096 } 1097 #endif 1098 } 1099 } 1100 if (kq->kq_count != count) { 1101 bad: 1102 panic("%s: kq=%p inconsist 3: %d != %d, nmarker=%d", 1103 __func__, kq, kq->kq_count, count, nmarker); 1104 } 1105 } 1106 #else /* defined(DEBUG) */ 1107 #define kq_check(a) /* nothing */ 1108 #endif /* defined(DEBUG) */ 1109 1110 /* 1111 * Scan through the list of events on fp (for a maximum of maxevents), 1112 * returning the results in to ulistp. Timeout is determined by tsp; if 1113 * NULL, wait indefinitely, if 0 valued, perform a poll, otherwise wait 1114 * as appropriate. 1115 */ 1116 static int 1117 kqueue_scan(file_t *fp, size_t maxevents, struct kevent *ulistp, 1118 const struct timespec *tsp, register_t *retval, 1119 const struct kevent_ops *keops, struct kevent *kevbuf, 1120 size_t kevcnt) 1121 { 1122 struct kqueue *kq; 1123 struct kevent *kevp; 1124 struct timespec ats, sleepts; 1125 struct knote *kn, *marker; 1126 size_t count, nkev, nevents; 1127 int timeout, error, rv; 1128 filedesc_t *fdp; 1129 1130 fdp = curlwp->l_fd; 1131 kq = fp->f_data; 1132 count = maxevents; 1133 nkev = nevents = error = 0; 1134 if (count == 0) { 1135 *retval = 0; 1136 return 0; 1137 } 1138 1139 if (tsp) { /* timeout supplied */ 1140 ats = *tsp; 1141 if (inittimeleft(&ats, &sleepts) == -1) { 1142 *retval = maxevents; 1143 return EINVAL; 1144 } 1145 timeout = tstohz(&ats); 1146 if (timeout <= 0) 1147 timeout = -1; /* do poll */ 1148 } else { 1149 /* no timeout, wait forever */ 1150 timeout = 0; 1151 } 1152 1153 marker = kmem_zalloc(sizeof(*marker), KM_SLEEP); 1154 marker->kn_status = KN_MARKER; 1155 mutex_spin_enter(&kq->kq_lock); 1156 retry: 1157 kevp = kevbuf; 1158 if (kq->kq_count == 0) { 1159 if (timeout >= 0) { 1160 error = cv_timedwait_sig(&kq->kq_cv, 1161 &kq->kq_lock, timeout); 1162 if (error == 0) { 1163 if (tsp == NULL || (timeout = 1164 gettimeleft(&ats, &sleepts)) > 0) 1165 goto retry; 1166 } else { 1167 /* don't restart after signals... */ 1168 if (error == ERESTART) 1169 error = EINTR; 1170 if (error == EWOULDBLOCK) 1171 error = 0; 1172 } 1173 } 1174 } else { 1175 /* mark end of knote list */ 1176 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); 1177 1178 while (count != 0) { 1179 kn = TAILQ_FIRST(&kq->kq_head); /* get next knote */ 1180 while ((kn->kn_status & KN_MARKER) != 0) { 1181 if (kn == marker) { 1182 /* it's our marker, stop */ 1183 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1184 if (count < maxevents || (tsp != NULL && 1185 (timeout = gettimeleft(&ats, 1186 &sleepts)) <= 0)) 1187 goto done; 1188 goto retry; 1189 } 1190 /* someone else's marker. */ 1191 kn = TAILQ_NEXT(kn, kn_tqe); 1192 } 1193 kq_check(kq); 1194 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1195 kq->kq_count--; 1196 kn->kn_status &= ~KN_QUEUED; 1197 kq_check(kq); 1198 if (kn->kn_status & KN_DISABLED) { 1199 /* don't want disabled events */ 1200 continue; 1201 } 1202 if ((kn->kn_flags & EV_ONESHOT) == 0) { 1203 mutex_spin_exit(&kq->kq_lock); 1204 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1205 rv = (*kn->kn_fop->f_event)(kn, 0); 1206 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1207 mutex_spin_enter(&kq->kq_lock); 1208 /* Re-poll if note was re-enqueued. */ 1209 if ((kn->kn_status & KN_QUEUED) != 0) 1210 continue; 1211 if (rv == 0) { 1212 /* 1213 * non-ONESHOT event that hasn't 1214 * triggered again, so de-queue. 1215 */ 1216 kn->kn_status &= ~KN_ACTIVE; 1217 continue; 1218 } 1219 } 1220 /* XXXAD should be got from f_event if !oneshot. */ 1221 *kevp++ = kn->kn_kevent; 1222 nkev++; 1223 if (kn->kn_flags & EV_ONESHOT) { 1224 /* delete ONESHOT events after retrieval */ 1225 mutex_spin_exit(&kq->kq_lock); 1226 mutex_enter(&fdp->fd_lock); 1227 knote_detach(kn, fdp, true); 1228 mutex_spin_enter(&kq->kq_lock); 1229 } else if (kn->kn_flags & EV_CLEAR) { 1230 /* clear state after retrieval */ 1231 kn->kn_data = 0; 1232 kn->kn_fflags = 0; 1233 kn->kn_status &= ~KN_ACTIVE; 1234 } else { 1235 /* add event back on list */ 1236 kq_check(kq); 1237 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1238 kq->kq_count++; 1239 kn->kn_status |= KN_QUEUED; 1240 kq_check(kq); 1241 } 1242 if (nkev == kevcnt) { 1243 /* do copyouts in kevcnt chunks */ 1244 mutex_spin_exit(&kq->kq_lock); 1245 error = (*keops->keo_put_events) 1246 (keops->keo_private, 1247 kevbuf, ulistp, nevents, nkev); 1248 mutex_spin_enter(&kq->kq_lock); 1249 nevents += nkev; 1250 nkev = 0; 1251 kevp = kevbuf; 1252 } 1253 count--; 1254 if (error != 0 || count == 0) { 1255 /* remove marker */ 1256 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); 1257 break; 1258 } 1259 } 1260 } 1261 done: 1262 mutex_spin_exit(&kq->kq_lock); 1263 if (marker != NULL) 1264 kmem_free(marker, sizeof(*marker)); 1265 if (nkev != 0) { 1266 /* copyout remaining events */ 1267 error = (*keops->keo_put_events)(keops->keo_private, 1268 kevbuf, ulistp, nevents, nkev); 1269 } 1270 *retval = maxevents - count; 1271 1272 return error; 1273 } 1274 1275 /* 1276 * fileops ioctl method for a kqueue descriptor. 1277 * 1278 * Two ioctls are currently supported. They both use struct kfilter_mapping: 1279 * KFILTER_BYNAME find name for filter, and return result in 1280 * name, which is of size len. 1281 * KFILTER_BYFILTER find filter for name. len is ignored. 1282 */ 1283 /*ARGSUSED*/ 1284 static int 1285 kqueue_ioctl(file_t *fp, u_long com, void *data) 1286 { 1287 struct kfilter_mapping *km; 1288 const struct kfilter *kfilter; 1289 char *name; 1290 int error; 1291 1292 km = data; 1293 error = 0; 1294 name = kmem_alloc(KFILTER_MAXNAME, KM_SLEEP); 1295 1296 switch (com) { 1297 case KFILTER_BYFILTER: /* convert filter -> name */ 1298 rw_enter(&kqueue_filter_lock, RW_READER); 1299 kfilter = kfilter_byfilter(km->filter); 1300 if (kfilter != NULL) { 1301 strlcpy(name, kfilter->name, KFILTER_MAXNAME); 1302 rw_exit(&kqueue_filter_lock); 1303 error = copyoutstr(name, km->name, km->len, NULL); 1304 } else { 1305 rw_exit(&kqueue_filter_lock); 1306 error = ENOENT; 1307 } 1308 break; 1309 1310 case KFILTER_BYNAME: /* convert name -> filter */ 1311 error = copyinstr(km->name, name, KFILTER_MAXNAME, NULL); 1312 if (error) { 1313 break; 1314 } 1315 rw_enter(&kqueue_filter_lock, RW_READER); 1316 kfilter = kfilter_byname(name); 1317 if (kfilter != NULL) 1318 km->filter = kfilter->filter; 1319 else 1320 error = ENOENT; 1321 rw_exit(&kqueue_filter_lock); 1322 break; 1323 1324 default: 1325 error = ENOTTY; 1326 break; 1327 1328 } 1329 kmem_free(name, KFILTER_MAXNAME); 1330 return (error); 1331 } 1332 1333 /* 1334 * fileops fcntl method for a kqueue descriptor. 1335 */ 1336 static int 1337 kqueue_fcntl(file_t *fp, u_int com, void *data) 1338 { 1339 1340 return (ENOTTY); 1341 } 1342 1343 /* 1344 * fileops poll method for a kqueue descriptor. 1345 * Determine if kqueue has events pending. 1346 */ 1347 static int 1348 kqueue_poll(file_t *fp, int events) 1349 { 1350 struct kqueue *kq; 1351 int revents; 1352 1353 kq = fp->f_data; 1354 1355 revents = 0; 1356 if (events & (POLLIN | POLLRDNORM)) { 1357 mutex_spin_enter(&kq->kq_lock); 1358 if (kq->kq_count != 0) { 1359 revents |= events & (POLLIN | POLLRDNORM); 1360 } else { 1361 selrecord(curlwp, &kq->kq_sel); 1362 } 1363 kq_check(kq); 1364 mutex_spin_exit(&kq->kq_lock); 1365 } 1366 1367 return revents; 1368 } 1369 1370 /* 1371 * fileops stat method for a kqueue descriptor. 1372 * Returns dummy info, with st_size being number of events pending. 1373 */ 1374 static int 1375 kqueue_stat(file_t *fp, struct stat *st) 1376 { 1377 struct kqueue *kq; 1378 1379 kq = fp->f_data; 1380 1381 memset(st, 0, sizeof(*st)); 1382 st->st_size = kq->kq_count; 1383 st->st_blksize = sizeof(struct kevent); 1384 st->st_mode = S_IFIFO; 1385 1386 return 0; 1387 } 1388 1389 static void 1390 kqueue_doclose(struct kqueue *kq, struct klist *list, int fd) 1391 { 1392 struct knote *kn; 1393 filedesc_t *fdp; 1394 1395 fdp = kq->kq_fdp; 1396 1397 KASSERT(mutex_owned(&fdp->fd_lock)); 1398 1399 for (kn = SLIST_FIRST(list); kn != NULL;) { 1400 if (kq != kn->kn_kq) { 1401 kn = SLIST_NEXT(kn, kn_link); 1402 continue; 1403 } 1404 knote_detach(kn, fdp, true); 1405 mutex_enter(&fdp->fd_lock); 1406 kn = SLIST_FIRST(list); 1407 } 1408 } 1409 1410 1411 /* 1412 * fileops close method for a kqueue descriptor. 1413 */ 1414 static int 1415 kqueue_close(file_t *fp) 1416 { 1417 struct kqueue *kq; 1418 filedesc_t *fdp; 1419 fdfile_t *ff; 1420 int i; 1421 1422 kq = fp->f_data; 1423 fdp = curlwp->l_fd; 1424 1425 mutex_enter(&fdp->fd_lock); 1426 for (i = 0; i <= fdp->fd_lastkqfile; i++) { 1427 if ((ff = fdp->fd_dt->dt_ff[i]) == NULL) 1428 continue; 1429 kqueue_doclose(kq, (struct klist *)&ff->ff_knlist, i); 1430 } 1431 if (fdp->fd_knhashmask != 0) { 1432 for (i = 0; i < fdp->fd_knhashmask + 1; i++) { 1433 kqueue_doclose(kq, &fdp->fd_knhash[i], -1); 1434 } 1435 } 1436 mutex_exit(&fdp->fd_lock); 1437 1438 KASSERT(kq->kq_count == 0); 1439 mutex_destroy(&kq->kq_lock); 1440 cv_destroy(&kq->kq_cv); 1441 seldestroy(&kq->kq_sel); 1442 kmem_free(kq, sizeof(*kq)); 1443 fp->f_data = NULL; 1444 1445 return (0); 1446 } 1447 1448 /* 1449 * struct fileops kqfilter method for a kqueue descriptor. 1450 * Event triggered when monitored kqueue changes. 1451 */ 1452 static int 1453 kqueue_kqfilter(file_t *fp, struct knote *kn) 1454 { 1455 struct kqueue *kq; 1456 filedesc_t *fdp; 1457 1458 kq = ((file_t *)kn->kn_obj)->f_data; 1459 1460 KASSERT(fp == kn->kn_obj); 1461 1462 if (kn->kn_filter != EVFILT_READ) 1463 return 1; 1464 1465 kn->kn_fop = &kqread_filtops; 1466 fdp = curlwp->l_fd; 1467 mutex_enter(&kq->kq_lock); 1468 SLIST_INSERT_HEAD(&kq->kq_sel.sel_klist, kn, kn_selnext); 1469 mutex_exit(&kq->kq_lock); 1470 1471 return 0; 1472 } 1473 1474 1475 /* 1476 * Walk down a list of knotes, activating them if their event has 1477 * triggered. The caller's object lock (e.g. device driver lock) 1478 * must be held. 1479 */ 1480 void 1481 knote(struct klist *list, long hint) 1482 { 1483 struct knote *kn, *tmpkn; 1484 1485 SLIST_FOREACH_SAFE(kn, list, kn_selnext, tmpkn) { 1486 if ((*kn->kn_fop->f_event)(kn, hint)) 1487 knote_activate(kn); 1488 } 1489 } 1490 1491 /* 1492 * Remove all knotes referencing a specified fd 1493 */ 1494 void 1495 knote_fdclose(int fd) 1496 { 1497 struct klist *list; 1498 struct knote *kn; 1499 filedesc_t *fdp; 1500 1501 fdp = curlwp->l_fd; 1502 list = (struct klist *)&fdp->fd_dt->dt_ff[fd]->ff_knlist; 1503 mutex_enter(&fdp->fd_lock); 1504 while ((kn = SLIST_FIRST(list)) != NULL) { 1505 knote_detach(kn, fdp, true); 1506 mutex_enter(&fdp->fd_lock); 1507 } 1508 mutex_exit(&fdp->fd_lock); 1509 } 1510 1511 /* 1512 * Drop knote. Called with fdp->fd_lock held, and will drop before 1513 * returning. 1514 */ 1515 static void 1516 knote_detach(struct knote *kn, filedesc_t *fdp, bool dofop) 1517 { 1518 struct klist *list; 1519 struct kqueue *kq; 1520 1521 kq = kn->kn_kq; 1522 1523 KASSERT((kn->kn_status & KN_MARKER) == 0); 1524 KASSERT(mutex_owned(&fdp->fd_lock)); 1525 1526 /* Remove from monitored object. */ 1527 if (dofop) { 1528 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1529 (*kn->kn_fop->f_detach)(kn); 1530 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1531 } 1532 1533 /* Remove from descriptor table. */ 1534 if (kn->kn_fop->f_isfd) 1535 list = (struct klist *)&fdp->fd_dt->dt_ff[kn->kn_id]->ff_knlist; 1536 else 1537 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; 1538 1539 SLIST_REMOVE(list, kn, knote, kn_link); 1540 1541 /* Remove from kqueue. */ 1542 /* XXXAD should verify not in use by kqueue_scan. */ 1543 mutex_spin_enter(&kq->kq_lock); 1544 if ((kn->kn_status & KN_QUEUED) != 0) { 1545 kq_check(kq); 1546 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1547 kn->kn_status &= ~KN_QUEUED; 1548 kq->kq_count--; 1549 kq_check(kq); 1550 } 1551 mutex_spin_exit(&kq->kq_lock); 1552 1553 mutex_exit(&fdp->fd_lock); 1554 if (kn->kn_fop->f_isfd) 1555 fd_putfile(kn->kn_id); 1556 atomic_dec_uint(&kn->kn_kfilter->refcnt); 1557 kmem_free(kn, sizeof(*kn)); 1558 } 1559 1560 /* 1561 * Queue new event for knote. 1562 */ 1563 static void 1564 knote_enqueue(struct knote *kn) 1565 { 1566 struct kqueue *kq; 1567 1568 KASSERT((kn->kn_status & KN_MARKER) == 0); 1569 1570 kq = kn->kn_kq; 1571 1572 mutex_spin_enter(&kq->kq_lock); 1573 if ((kn->kn_status & KN_DISABLED) != 0) { 1574 kn->kn_status &= ~KN_DISABLED; 1575 } 1576 if ((kn->kn_status & (KN_ACTIVE | KN_QUEUED)) == KN_ACTIVE) { 1577 kq_check(kq); 1578 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1579 kn->kn_status |= KN_QUEUED; 1580 kq->kq_count++; 1581 kq_check(kq); 1582 cv_broadcast(&kq->kq_cv); 1583 selnotify(&kq->kq_sel, 0, NOTE_SUBMIT); 1584 } 1585 mutex_spin_exit(&kq->kq_lock); 1586 } 1587 /* 1588 * Queue new event for knote. 1589 */ 1590 static void 1591 knote_activate(struct knote *kn) 1592 { 1593 struct kqueue *kq; 1594 1595 KASSERT((kn->kn_status & KN_MARKER) == 0); 1596 1597 kq = kn->kn_kq; 1598 1599 mutex_spin_enter(&kq->kq_lock); 1600 kn->kn_status |= KN_ACTIVE; 1601 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) { 1602 kq_check(kq); 1603 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1604 kn->kn_status |= KN_QUEUED; 1605 kq->kq_count++; 1606 kq_check(kq); 1607 cv_broadcast(&kq->kq_cv); 1608 selnotify(&kq->kq_sel, 0, NOTE_SUBMIT); 1609 } 1610 mutex_spin_exit(&kq->kq_lock); 1611 } 1612