1 /*- 2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $ 27 * $DragonFly: src/sys/kern/kern_event.c,v 1.33 2007/02/03 17:05:57 corecode Exp $ 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/proc.h> 34 #include <sys/malloc.h> 35 #include <sys/unistd.h> 36 #include <sys/file.h> 37 #include <sys/lock.h> 38 #include <sys/fcntl.h> 39 #include <sys/select.h> 40 #include <sys/queue.h> 41 #include <sys/event.h> 42 #include <sys/eventvar.h> 43 #include <sys/poll.h> 44 #include <sys/protosw.h> 45 #include <sys/socket.h> 46 #include <sys/socketvar.h> 47 #include <sys/stat.h> 48 #include <sys/sysctl.h> 49 #include <sys/sysproto.h> 50 #include <sys/uio.h> 51 #include <sys/thread2.h> 52 #include <sys/signalvar.h> 53 #include <sys/filio.h> 54 #include <sys/file2.h> 55 56 #include <vm/vm_zone.h> 57 58 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 59 60 static int kqueue_scan(struct file *fp, int maxevents, 61 struct kevent *ulistp, const struct timespec *timeout, 62 struct thread *td, int *res); 63 static int kqueue_read(struct file *fp, struct uio *uio, 64 struct ucred *cred, int flags); 65 static int kqueue_write(struct file *fp, struct uio *uio, 66 struct ucred *cred, int flags); 67 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 68 struct ucred *cred, struct sysmsg *msg); 69 static int kqueue_poll(struct file *fp, int events, struct ucred *cred); 70 static int kqueue_kqfilter(struct file *fp, struct knote *kn); 71 static int kqueue_stat(struct file *fp, struct stat *st, 72 struct ucred *cred); 73 static int kqueue_close(struct file *fp); 74 static void kqueue_wakeup(struct kqueue *kq); 75 76 /* 77 * MPSAFE 78 */ 79 static struct fileops kqueueops = { 80 .fo_read = kqueue_read, 81 .fo_write = kqueue_write, 82 .fo_ioctl = kqueue_ioctl, 83 .fo_poll = kqueue_poll, 84 .fo_kqfilter = kqueue_kqfilter, 85 .fo_stat = kqueue_stat, 86 .fo_close = kqueue_close, 87 .fo_shutdown = nofo_shutdown 88 }; 89 90 static void knote_attach(struct knote *kn, struct filedesc *fdp); 91 static void knote_drop(struct knote *kn, struct thread *td); 92 static void knote_enqueue(struct knote *kn); 93 static void knote_dequeue(struct knote *kn); 94 static void knote_init(void); 95 static struct knote *knote_alloc(void); 96 static void knote_free(struct knote *kn); 97 98 static void filt_kqdetach(struct knote *kn); 99 static int filt_kqueue(struct knote *kn, long hint); 100 static int filt_procattach(struct knote *kn); 101 static void filt_procdetach(struct knote *kn); 102 static int filt_proc(struct knote *kn, long hint); 103 static int filt_fileattach(struct knote *kn); 104 static void filt_timerexpire(void *knx); 105 static int filt_timerattach(struct knote *kn); 106 static void filt_timerdetach(struct knote *kn); 107 static int filt_timer(struct knote *kn, long hint); 108 109 static struct filterops file_filtops = 110 { 1, filt_fileattach, NULL, NULL }; 111 static struct filterops kqread_filtops = 112 { 1, NULL, filt_kqdetach, filt_kqueue }; 113 static struct filterops proc_filtops = 114 { 0, filt_procattach, filt_procdetach, filt_proc }; 115 static struct filterops timer_filtops = 116 { 0, filt_timerattach, filt_timerdetach, filt_timer }; 117 118 static vm_zone_t knote_zone; 119 static int kq_ncallouts = 0; 120 static int kq_calloutmax = (4 * 1024); 121 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 122 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 123 124 #define KNOTE_ACTIVATE(kn) do { \ 125 kn->kn_status |= KN_ACTIVE; \ 126 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 127 knote_enqueue(kn); \ 128 } while(0) 129 130 #define KN_HASHSIZE 64 /* XXX should be tunable */ 131 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 132 133 extern struct filterops aio_filtops; 134 extern struct filterops sig_filtops; 135 136 /* 137 * Table for for all system-defined filters. 138 */ 139 static struct filterops *sysfilt_ops[] = { 140 &file_filtops, /* EVFILT_READ */ 141 &file_filtops, /* EVFILT_WRITE */ 142 &aio_filtops, /* EVFILT_AIO */ 143 &file_filtops, /* EVFILT_VNODE */ 144 &proc_filtops, /* EVFILT_PROC */ 145 &sig_filtops, /* EVFILT_SIGNAL */ 146 &timer_filtops, /* EVFILT_TIMER */ 147 }; 148 149 static int 150 filt_fileattach(struct knote *kn) 151 { 152 return (fo_kqfilter(kn->kn_fp, kn)); 153 } 154 155 /* 156 * MPALMOSTSAFE - acquires mplock 157 */ 158 static int 159 kqueue_kqfilter(struct file *fp, struct knote *kn) 160 { 161 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 162 163 get_mplock(); 164 if (kn->kn_filter != EVFILT_READ) { 165 rel_mplock(); 166 return (1); 167 } 168 169 kn->kn_fop = &kqread_filtops; 170 SLIST_INSERT_HEAD(&kq->kq_sel.si_note, kn, kn_selnext); 171 rel_mplock(); 172 return (0); 173 } 174 175 static void 176 filt_kqdetach(struct knote *kn) 177 { 178 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 179 180 SLIST_REMOVE(&kq->kq_sel.si_note, kn, knote, kn_selnext); 181 } 182 183 /*ARGSUSED*/ 184 static int 185 filt_kqueue(struct knote *kn, long hint) 186 { 187 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 188 189 kn->kn_data = kq->kq_count; 190 return (kn->kn_data > 0); 191 } 192 193 static int 194 filt_procattach(struct knote *kn) 195 { 196 struct proc *p; 197 int immediate; 198 199 immediate = 0; 200 p = pfind(kn->kn_id); 201 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) { 202 p = zpfind(kn->kn_id); 203 immediate = 1; 204 } 205 if (p == NULL) 206 return (ESRCH); 207 if (! PRISON_CHECK(curproc->p_ucred, p->p_ucred)) 208 return (EACCES); 209 210 kn->kn_ptr.p_proc = p; 211 kn->kn_flags |= EV_CLEAR; /* automatically set */ 212 213 /* 214 * internal flag indicating registration done by kernel 215 */ 216 if (kn->kn_flags & EV_FLAG1) { 217 kn->kn_data = kn->kn_sdata; /* ppid */ 218 kn->kn_fflags = NOTE_CHILD; 219 kn->kn_flags &= ~EV_FLAG1; 220 } 221 222 /* XXX lock the proc here while adding to the list? */ 223 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext); 224 225 /* 226 * Immediately activate any exit notes if the target process is a 227 * zombie. This is necessary to handle the case where the target 228 * process, e.g. a child, dies before the kevent is registered. 229 */ 230 if (immediate && filt_proc(kn, NOTE_EXIT)) 231 KNOTE_ACTIVATE(kn); 232 233 return (0); 234 } 235 236 /* 237 * The knote may be attached to a different process, which may exit, 238 * leaving nothing for the knote to be attached to. So when the process 239 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 240 * it will be deleted when read out. However, as part of the knote deletion, 241 * this routine is called, so a check is needed to avoid actually performing 242 * a detach, because the original process does not exist any more. 243 */ 244 static void 245 filt_procdetach(struct knote *kn) 246 { 247 struct proc *p; 248 249 if (kn->kn_status & KN_DETACHED) 250 return; 251 /* XXX locking? this might modify another process. */ 252 p = kn->kn_ptr.p_proc; 253 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 254 } 255 256 static int 257 filt_proc(struct knote *kn, long hint) 258 { 259 u_int event; 260 261 /* 262 * mask off extra data 263 */ 264 event = (u_int)hint & NOTE_PCTRLMASK; 265 266 /* 267 * if the user is interested in this event, record it. 268 */ 269 if (kn->kn_sfflags & event) 270 kn->kn_fflags |= event; 271 272 /* 273 * Process is gone, so flag the event as finished. Detach the 274 * knote from the process now because the process will be poof, 275 * gone later on. 276 */ 277 if (event == NOTE_EXIT) { 278 struct proc *p = kn->kn_ptr.p_proc; 279 if ((kn->kn_status & KN_DETACHED) == 0) { 280 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 281 kn->kn_status |= KN_DETACHED; 282 kn->kn_data = p->p_xstat; 283 kn->kn_ptr.p_proc = NULL; 284 } 285 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 286 return (1); 287 } 288 289 /* 290 * process forked, and user wants to track the new process, 291 * so attach a new knote to it, and immediately report an 292 * event with the parent's pid. 293 */ 294 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { 295 struct kevent kev; 296 int error; 297 298 /* 299 * register knote with new process. 300 */ 301 kev.ident = hint & NOTE_PDATAMASK; /* pid */ 302 kev.filter = kn->kn_filter; 303 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 304 kev.fflags = kn->kn_sfflags; 305 kev.data = kn->kn_id; /* parent */ 306 kev.udata = kn->kn_kevent.udata; /* preserve udata */ 307 error = kqueue_register(kn->kn_kq, &kev, NULL); 308 if (error) 309 kn->kn_fflags |= NOTE_TRACKERR; 310 } 311 312 return (kn->kn_fflags != 0); 313 } 314 315 static void 316 filt_timerexpire(void *knx) 317 { 318 struct knote *kn = knx; 319 struct callout *calloutp; 320 struct timeval tv; 321 int tticks; 322 323 kn->kn_data++; 324 KNOTE_ACTIVATE(kn); 325 326 if ((kn->kn_flags & EV_ONESHOT) == 0) { 327 tv.tv_sec = kn->kn_sdata / 1000; 328 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 329 tticks = tvtohz_high(&tv); 330 calloutp = (struct callout *)kn->kn_hook; 331 callout_reset(calloutp, tticks, filt_timerexpire, kn); 332 } 333 } 334 335 /* 336 * data contains amount of time to sleep, in milliseconds 337 */ 338 static int 339 filt_timerattach(struct knote *kn) 340 { 341 struct callout *calloutp; 342 struct timeval tv; 343 int tticks; 344 345 if (kq_ncallouts >= kq_calloutmax) 346 return (ENOMEM); 347 kq_ncallouts++; 348 349 tv.tv_sec = kn->kn_sdata / 1000; 350 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 351 tticks = tvtohz_high(&tv); 352 353 kn->kn_flags |= EV_CLEAR; /* automatically set */ 354 MALLOC(calloutp, struct callout *, sizeof(*calloutp), 355 M_KQUEUE, M_WAITOK); 356 callout_init(calloutp); 357 kn->kn_hook = (caddr_t)calloutp; 358 callout_reset(calloutp, tticks, filt_timerexpire, kn); 359 360 return (0); 361 } 362 363 static void 364 filt_timerdetach(struct knote *kn) 365 { 366 struct callout *calloutp; 367 368 calloutp = (struct callout *)kn->kn_hook; 369 callout_stop(calloutp); 370 FREE(calloutp, M_KQUEUE); 371 kq_ncallouts--; 372 } 373 374 static int 375 filt_timer(struct knote *kn, long hint) 376 { 377 378 return (kn->kn_data != 0); 379 } 380 381 /* 382 * MPSAFE 383 */ 384 int 385 sys_kqueue(struct kqueue_args *uap) 386 { 387 struct proc *p = curproc; 388 struct filedesc *fdp = p->p_fd; 389 struct kqueue *kq; 390 struct file *fp; 391 int fd, error; 392 393 error = falloc(p, &fp, &fd); 394 if (error) 395 return (error); 396 fp->f_flag = FREAD | FWRITE; 397 fp->f_type = DTYPE_KQUEUE; 398 fp->f_ops = &kqueueops; 399 400 kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO); 401 TAILQ_INIT(&kq->kq_head); 402 kq->kq_fdp = fdp; 403 fp->f_data = kq; 404 405 fsetfd(p, fp, fd); 406 uap->sysmsg_result = fd; 407 fdrop(fp); 408 return (error); 409 } 410 411 /* 412 * MPALMOSTSAFE 413 */ 414 int 415 sys_kevent(struct kevent_args *uap) 416 { 417 struct thread *td = curthread; 418 struct proc *p = td->td_proc; 419 struct kevent *kevp; 420 struct kqueue *kq; 421 struct file *fp = NULL; 422 struct timespec ts; 423 int i, n, nerrors, error; 424 425 fp = holdfp(p->p_fd, uap->fd, -1); 426 if (fp == NULL) 427 return (EBADF); 428 if (fp->f_type != DTYPE_KQUEUE) { 429 fdrop(fp); 430 return (EBADF); 431 } 432 433 if (uap->timeout != NULL) { 434 error = copyin(uap->timeout, &ts, sizeof(ts)); 435 if (error) 436 goto done; 437 uap->timeout = &ts; 438 } 439 440 kq = (struct kqueue *)fp->f_data; 441 nerrors = 0; 442 443 get_mplock(); 444 while (uap->nchanges > 0) { 445 n = uap->nchanges > KQ_NEVENTS ? KQ_NEVENTS : uap->nchanges; 446 error = copyin(uap->changelist, kq->kq_kev, 447 n * sizeof(struct kevent)); 448 if (error) 449 goto done; 450 for (i = 0; i < n; i++) { 451 kevp = &kq->kq_kev[i]; 452 kevp->flags &= ~EV_SYSFLAGS; 453 error = kqueue_register(kq, kevp, td); 454 if (error) { 455 if (uap->nevents != 0) { 456 kevp->flags = EV_ERROR; 457 kevp->data = error; 458 (void) copyout((caddr_t)kevp, 459 (caddr_t)uap->eventlist, 460 sizeof(*kevp)); 461 uap->eventlist++; 462 uap->nevents--; 463 nerrors++; 464 } else { 465 goto done; 466 } 467 } 468 } 469 uap->nchanges -= n; 470 uap->changelist += n; 471 } 472 if (nerrors) { 473 uap->sysmsg_result = nerrors; 474 error = 0; 475 goto done; 476 } 477 478 error = kqueue_scan(fp, uap->nevents, uap->eventlist, 479 uap->timeout, td, &uap->sysmsg_result); 480 done: 481 rel_mplock(); 482 if (fp != NULL) 483 fdrop(fp); 484 return (error); 485 } 486 487 int 488 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td) 489 { 490 struct filedesc *fdp = kq->kq_fdp; 491 struct filterops *fops; 492 struct file *fp = NULL; 493 struct knote *kn = NULL; 494 int error = 0; 495 496 if (kev->filter < 0) { 497 if (kev->filter + EVFILT_SYSCOUNT < 0) 498 return (EINVAL); 499 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */ 500 } else { 501 /* 502 * XXX 503 * filter attach routine is responsible for insuring that 504 * the identifier can be attached to it. 505 */ 506 kprintf("unknown filter: %d\n", kev->filter); 507 return (EINVAL); 508 } 509 510 if (fops->f_isfd) { 511 /* validate descriptor */ 512 fp = holdfp(fdp, kev->ident, -1); 513 if (fp == NULL) 514 return (EBADF); 515 516 if (kev->ident < fdp->fd_knlistsize) { 517 SLIST_FOREACH(kn, &fdp->fd_knlist[kev->ident], kn_link) 518 if (kq == kn->kn_kq && 519 kev->filter == kn->kn_filter) 520 break; 521 } 522 } else { 523 if (fdp->fd_knhashmask != 0) { 524 struct klist *list; 525 526 list = &fdp->fd_knhash[ 527 KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)]; 528 SLIST_FOREACH(kn, list, kn_link) 529 if (kev->ident == kn->kn_id && 530 kq == kn->kn_kq && 531 kev->filter == kn->kn_filter) 532 break; 533 } 534 } 535 536 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) { 537 error = ENOENT; 538 goto done; 539 } 540 541 /* 542 * kn now contains the matching knote, or NULL if no match 543 */ 544 if (kev->flags & EV_ADD) { 545 546 if (kn == NULL) { 547 kn = knote_alloc(); 548 if (kn == NULL) { 549 error = ENOMEM; 550 goto done; 551 } 552 kn->kn_fp = fp; 553 kn->kn_kq = kq; 554 kn->kn_fop = fops; 555 556 /* 557 * apply reference count to knote structure, and 558 * do not release it at the end of this routine. 559 */ 560 fp = NULL; 561 562 kn->kn_sfflags = kev->fflags; 563 kn->kn_sdata = kev->data; 564 kev->fflags = 0; 565 kev->data = 0; 566 kn->kn_kevent = *kev; 567 568 knote_attach(kn, fdp); 569 if ((error = fops->f_attach(kn)) != 0) { 570 knote_drop(kn, td); 571 goto done; 572 } 573 } else { 574 /* 575 * The user may change some filter values after the 576 * initial EV_ADD, but doing so will not reset any 577 * filter which have already been triggered. 578 */ 579 kn->kn_sfflags = kev->fflags; 580 kn->kn_sdata = kev->data; 581 kn->kn_kevent.udata = kev->udata; 582 } 583 584 crit_enter(); 585 if (kn->kn_fop->f_event(kn, 0)) 586 KNOTE_ACTIVATE(kn); 587 crit_exit(); 588 } else if (kev->flags & EV_DELETE) { 589 kn->kn_fop->f_detach(kn); 590 knote_drop(kn, td); 591 goto done; 592 } 593 594 if ((kev->flags & EV_DISABLE) && 595 ((kn->kn_status & KN_DISABLED) == 0)) { 596 crit_enter(); 597 kn->kn_status |= KN_DISABLED; 598 crit_exit(); 599 } 600 601 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 602 crit_enter(); 603 kn->kn_status &= ~KN_DISABLED; 604 if ((kn->kn_status & KN_ACTIVE) && 605 ((kn->kn_status & KN_QUEUED) == 0)) 606 knote_enqueue(kn); 607 crit_exit(); 608 } 609 610 done: 611 if (fp != NULL) 612 fdrop(fp); 613 return (error); 614 } 615 616 static int 617 kqueue_scan(struct file *fp, int maxevents, struct kevent *ulistp, 618 const struct timespec *tsp, struct thread *td, int *res) 619 { 620 struct kqueue *kq = (struct kqueue *)fp->f_data; 621 struct kevent *kevp; 622 struct timeval atv, rtv, ttv; 623 struct knote *kn, marker; 624 int count, timeout, nkev = 0, error = 0; 625 626 count = maxevents; 627 if (count == 0) 628 goto done; 629 630 if (tsp != NULL) { 631 TIMESPEC_TO_TIMEVAL(&atv, tsp); 632 if (itimerfix(&atv)) { 633 error = EINVAL; 634 goto done; 635 } 636 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) 637 timeout = -1; 638 else 639 timeout = atv.tv_sec > 24 * 60 * 60 ? 640 24 * 60 * 60 * hz : tvtohz_high(&atv); 641 getmicrouptime(&rtv); 642 timevaladd(&atv, &rtv); 643 } else { 644 atv.tv_sec = 0; 645 atv.tv_usec = 0; 646 timeout = 0; 647 } 648 goto start; 649 650 retry: 651 if (atv.tv_sec || atv.tv_usec) { 652 getmicrouptime(&rtv); 653 if (timevalcmp(&rtv, &atv, >=)) 654 goto done; 655 ttv = atv; 656 timevalsub(&ttv, &rtv); 657 timeout = ttv.tv_sec > 24 * 60 * 60 ? 658 24 * 60 * 60 * hz : tvtohz_high(&ttv); 659 } 660 661 start: 662 kevp = kq->kq_kev; 663 crit_enter(); 664 if (kq->kq_count == 0) { 665 if (timeout < 0) { 666 error = EWOULDBLOCK; 667 } else { 668 kq->kq_state |= KQ_SLEEP; 669 error = tsleep(kq, PCATCH, "kqread", timeout); 670 } 671 crit_exit(); 672 if (error == 0) 673 goto retry; 674 /* don't restart after signals... */ 675 if (error == ERESTART) 676 error = EINTR; 677 else if (error == EWOULDBLOCK) 678 error = 0; 679 goto done; 680 } 681 682 TAILQ_INSERT_TAIL(&kq->kq_head, &marker, kn_tqe); 683 while (count) { 684 kn = TAILQ_FIRST(&kq->kq_head); 685 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 686 if (kn == &marker) { 687 crit_exit(); 688 if (count == maxevents) 689 goto retry; 690 goto done; 691 } 692 if (kn->kn_status & KN_DISABLED) { 693 kn->kn_status &= ~KN_QUEUED; 694 kq->kq_count--; 695 continue; 696 } 697 if ((kn->kn_flags & EV_ONESHOT) == 0 && 698 kn->kn_fop->f_event(kn, 0) == 0) { 699 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 700 kq->kq_count--; 701 continue; 702 } 703 *kevp = kn->kn_kevent; 704 kevp++; 705 nkev++; 706 if (kn->kn_flags & EV_ONESHOT) { 707 kn->kn_status &= ~KN_QUEUED; 708 kq->kq_count--; 709 crit_exit(); 710 kn->kn_fop->f_detach(kn); 711 knote_drop(kn, td); 712 crit_enter(); 713 } else if (kn->kn_flags & EV_CLEAR) { 714 kn->kn_data = 0; 715 kn->kn_fflags = 0; 716 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 717 kq->kq_count--; 718 } else { 719 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 720 } 721 count--; 722 if (nkev == KQ_NEVENTS) { 723 crit_exit(); 724 error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp, 725 sizeof(struct kevent) * nkev); 726 ulistp += nkev; 727 nkev = 0; 728 kevp = kq->kq_kev; 729 crit_enter(); 730 if (error) 731 break; 732 } 733 } 734 TAILQ_REMOVE(&kq->kq_head, &marker, kn_tqe); 735 crit_exit(); 736 done: 737 if (nkev != 0) 738 error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp, 739 sizeof(struct kevent) * nkev); 740 *res = maxevents - count; 741 return (error); 742 } 743 744 /* 745 * XXX 746 * This could be expanded to call kqueue_scan, if desired. 747 * 748 * MPSAFE 749 */ 750 static int 751 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 752 { 753 return (ENXIO); 754 } 755 756 /* 757 * MPSAFE 758 */ 759 static int 760 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 761 { 762 return (ENXIO); 763 } 764 765 /* 766 * MPSAFE 767 */ 768 static int 769 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 770 struct ucred *cred, struct sysmsg *msg) 771 { 772 struct kqueue *kq; 773 int error; 774 775 get_mplock(); 776 kq = (struct kqueue *)fp->f_data; 777 778 switch(com) { 779 case FIOASYNC: 780 if (*(int *)data) 781 kq->kq_state |= KQ_ASYNC; 782 else 783 kq->kq_state &= ~KQ_ASYNC; 784 error = 0; 785 break; 786 case FIOSETOWN: 787 error = fsetown(*(int *)data, &kq->kq_sigio); 788 break; 789 default: 790 error = ENOTTY; 791 break; 792 } 793 rel_mplock(); 794 return (error); 795 } 796 797 /* 798 * MPALMOSTSAFE - acquires mplock 799 */ 800 static int 801 kqueue_poll(struct file *fp, int events, struct ucred *cred) 802 { 803 struct kqueue *kq = (struct kqueue *)fp->f_data; 804 int revents = 0; 805 806 get_mplock(); 807 crit_enter(); 808 if (events & (POLLIN | POLLRDNORM)) { 809 if (kq->kq_count) { 810 revents |= events & (POLLIN | POLLRDNORM); 811 } else { 812 selrecord(curthread, &kq->kq_sel); 813 kq->kq_state |= KQ_SEL; 814 } 815 } 816 crit_exit(); 817 rel_mplock(); 818 return (revents); 819 } 820 821 /* 822 * MPSAFE 823 */ 824 static int 825 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred) 826 { 827 struct kqueue *kq = (struct kqueue *)fp->f_data; 828 829 bzero((void *)st, sizeof(*st)); 830 st->st_size = kq->kq_count; 831 st->st_blksize = sizeof(struct kevent); 832 st->st_mode = S_IFIFO; 833 return (0); 834 } 835 836 /* 837 * MPALMOSTSAFE - acquires mplock 838 */ 839 static int 840 kqueue_close(struct file *fp) 841 { 842 struct thread *td = curthread; 843 struct proc *p = td->td_proc; 844 struct kqueue *kq = (struct kqueue *)fp->f_data; 845 struct filedesc *fdp; 846 struct knote **knp, *kn, *kn0; 847 int i; 848 849 KKASSERT(p); 850 get_mplock(); 851 fdp = p->p_fd; 852 for (i = 0; i < fdp->fd_knlistsize; i++) { 853 knp = &SLIST_FIRST(&fdp->fd_knlist[i]); 854 kn = *knp; 855 while (kn != NULL) { 856 kn0 = SLIST_NEXT(kn, kn_link); 857 if (kq == kn->kn_kq) { 858 kn->kn_fop->f_detach(kn); 859 fdrop(kn->kn_fp); 860 knote_free(kn); 861 *knp = kn0; 862 } else { 863 knp = &SLIST_NEXT(kn, kn_link); 864 } 865 kn = kn0; 866 } 867 } 868 if (fdp->fd_knhashmask != 0) { 869 for (i = 0; i < fdp->fd_knhashmask + 1; i++) { 870 knp = &SLIST_FIRST(&fdp->fd_knhash[i]); 871 kn = *knp; 872 while (kn != NULL) { 873 kn0 = SLIST_NEXT(kn, kn_link); 874 if (kq == kn->kn_kq) { 875 kn->kn_fop->f_detach(kn); 876 /* XXX non-fd release of kn->kn_ptr */ 877 knote_free(kn); 878 *knp = kn0; 879 } else { 880 knp = &SLIST_NEXT(kn, kn_link); 881 } 882 kn = kn0; 883 } 884 } 885 } 886 fp->f_data = NULL; 887 funsetown(kq->kq_sigio); 888 rel_mplock(); 889 890 kfree(kq, M_KQUEUE); 891 return (0); 892 } 893 894 static void 895 kqueue_wakeup(struct kqueue *kq) 896 { 897 if (kq->kq_state & KQ_SLEEP) { 898 kq->kq_state &= ~KQ_SLEEP; 899 wakeup(kq); 900 } 901 if (kq->kq_state & KQ_SEL) { 902 kq->kq_state &= ~KQ_SEL; 903 selwakeup(&kq->kq_sel); 904 } 905 KNOTE(&kq->kq_sel.si_note, 0); 906 } 907 908 /* 909 * walk down a list of knotes, activating them if their event has triggered. 910 */ 911 void 912 knote(struct klist *list, long hint) 913 { 914 struct knote *kn; 915 916 SLIST_FOREACH(kn, list, kn_selnext) 917 if (kn->kn_fop->f_event(kn, hint)) 918 KNOTE_ACTIVATE(kn); 919 } 920 921 /* 922 * remove all knotes from a specified klist 923 */ 924 void 925 knote_remove(struct thread *td, struct klist *list) 926 { 927 struct knote *kn; 928 929 while ((kn = SLIST_FIRST(list)) != NULL) { 930 kn->kn_fop->f_detach(kn); 931 knote_drop(kn, td); 932 } 933 } 934 935 /* 936 * remove all knotes referencing a specified fd 937 */ 938 void 939 knote_fdclose(struct proc *p, int fd) 940 { 941 struct filedesc *fdp = p->p_fd; 942 struct klist *list = &fdp->fd_knlist[fd]; 943 /* Take any thread of p */ 944 struct thread *td = FIRST_LWP_IN_PROC(p)->lwp_thread; 945 946 knote_remove(td, list); 947 } 948 949 static void 950 knote_attach(struct knote *kn, struct filedesc *fdp) 951 { 952 struct klist *list; 953 int size; 954 955 if (! kn->kn_fop->f_isfd) { 956 if (fdp->fd_knhashmask == 0) 957 fdp->fd_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 958 &fdp->fd_knhashmask); 959 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; 960 goto done; 961 } 962 963 if (fdp->fd_knlistsize <= kn->kn_id) { 964 size = fdp->fd_knlistsize; 965 while (size <= kn->kn_id) 966 size += KQEXTENT; 967 MALLOC(list, struct klist *, 968 size * sizeof(struct klist *), M_KQUEUE, M_WAITOK); 969 bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list, 970 fdp->fd_knlistsize * sizeof(struct klist *)); 971 bzero((caddr_t)list + 972 fdp->fd_knlistsize * sizeof(struct klist *), 973 (size - fdp->fd_knlistsize) * sizeof(struct klist *)); 974 if (fdp->fd_knlist != NULL) 975 FREE(fdp->fd_knlist, M_KQUEUE); 976 fdp->fd_knlistsize = size; 977 fdp->fd_knlist = list; 978 } 979 list = &fdp->fd_knlist[kn->kn_id]; 980 done: 981 SLIST_INSERT_HEAD(list, kn, kn_link); 982 kn->kn_status = 0; 983 } 984 985 /* 986 * should be called outside of a critical section, since we don't want to 987 * hold a critical section while calling fdrop and free. 988 */ 989 static void 990 knote_drop(struct knote *kn, struct thread *td) 991 { 992 struct filedesc *fdp; 993 struct klist *list; 994 995 KKASSERT(td->td_proc); 996 fdp = td->td_proc->p_fd; 997 if (kn->kn_fop->f_isfd) 998 list = &fdp->fd_knlist[kn->kn_id]; 999 else 1000 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; 1001 1002 SLIST_REMOVE(list, kn, knote, kn_link); 1003 if (kn->kn_status & KN_QUEUED) 1004 knote_dequeue(kn); 1005 if (kn->kn_fop->f_isfd) 1006 fdrop(kn->kn_fp); 1007 knote_free(kn); 1008 } 1009 1010 1011 static void 1012 knote_enqueue(struct knote *kn) 1013 { 1014 struct kqueue *kq = kn->kn_kq; 1015 1016 crit_enter(); 1017 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 1018 1019 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1020 kn->kn_status |= KN_QUEUED; 1021 ++kq->kq_count; 1022 1023 /* 1024 * Send SIGIO on request (typically set up as a mailbox signal) 1025 */ 1026 if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1) 1027 pgsigio(kq->kq_sigio, SIGIO, 0); 1028 crit_exit(); 1029 kqueue_wakeup(kq); 1030 } 1031 1032 static void 1033 knote_dequeue(struct knote *kn) 1034 { 1035 struct kqueue *kq = kn->kn_kq; 1036 1037 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 1038 crit_enter(); 1039 1040 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1041 kn->kn_status &= ~KN_QUEUED; 1042 kq->kq_count--; 1043 crit_exit(); 1044 } 1045 1046 static void 1047 knote_init(void) 1048 { 1049 knote_zone = zinit("KNOTE", sizeof(struct knote), 0, 0, 1); 1050 } 1051 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL) 1052 1053 static struct knote * 1054 knote_alloc(void) 1055 { 1056 return ((struct knote *)zalloc(knote_zone)); 1057 } 1058 1059 static void 1060 knote_free(struct knote *kn) 1061 { 1062 zfree(knote_zone, kn); 1063 } 1064