1 /*- 2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $ 27 * $DragonFly: src/sys/kern/kern_event.c,v 1.33 2007/02/03 17:05:57 corecode Exp $ 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/proc.h> 34 #include <sys/malloc.h> 35 #include <sys/unistd.h> 36 #include <sys/file.h> 37 #include <sys/lock.h> 38 #include <sys/fcntl.h> 39 #include <sys/select.h> 40 #include <sys/queue.h> 41 #include <sys/event.h> 42 #include <sys/eventvar.h> 43 #include <sys/poll.h> 44 #include <sys/protosw.h> 45 #include <sys/socket.h> 46 #include <sys/socketvar.h> 47 #include <sys/stat.h> 48 #include <sys/sysctl.h> 49 #include <sys/sysproto.h> 50 #include <sys/uio.h> 51 #include <sys/signalvar.h> 52 #include <sys/filio.h> 53 #include <sys/ktr.h> 54 55 #include <sys/thread2.h> 56 #include <sys/file2.h> 57 #include <sys/mplock2.h> 58 59 #include <vm/vm_zone.h> 60 61 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 62 63 struct kevent_copyin_args { 64 struct kevent_args *ka; 65 int pchanges; 66 }; 67 68 static int kqueue_sleep(struct kqueue *kq, struct timespec *tsp); 69 static int kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count, 70 struct knote *marker); 71 static int kqueue_read(struct file *fp, struct uio *uio, 72 struct ucred *cred, int flags); 73 static int kqueue_write(struct file *fp, struct uio *uio, 74 struct ucred *cred, int flags); 75 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 76 struct ucred *cred, struct sysmsg *msg); 77 static int kqueue_poll(struct file *fp, int events, struct ucred *cred); 78 static int kqueue_kqfilter(struct file *fp, struct knote *kn); 79 static int kqueue_stat(struct file *fp, struct stat *st, 80 struct ucred *cred); 81 static int kqueue_close(struct file *fp); 82 83 /* 84 * MPSAFE 85 */ 86 static struct fileops kqueueops = { 87 .fo_read = kqueue_read, 88 .fo_write = kqueue_write, 89 .fo_ioctl = kqueue_ioctl, 90 .fo_poll = kqueue_poll, 91 .fo_kqfilter = kqueue_kqfilter, 92 .fo_stat = kqueue_stat, 93 .fo_close = kqueue_close, 94 .fo_shutdown = nofo_shutdown 95 }; 96 97 static void knote_attach(struct knote *kn); 98 static void knote_drop(struct knote *kn); 99 static void knote_enqueue(struct knote *kn); 100 static void knote_dequeue(struct knote *kn); 101 static void knote_init(void); 102 static struct knote *knote_alloc(void); 103 static void knote_free(struct knote *kn); 104 105 static void filt_kqdetach(struct knote *kn); 106 static int filt_kqueue(struct knote *kn, long hint); 107 static int filt_procattach(struct knote *kn); 108 static void filt_procdetach(struct knote *kn); 109 static int filt_proc(struct knote *kn, long hint); 110 static int filt_fileattach(struct knote *kn); 111 static void filt_timerexpire(void *knx); 112 static int filt_timerattach(struct knote *kn); 113 static void filt_timerdetach(struct knote *kn); 114 static int filt_timer(struct knote *kn, long hint); 115 116 static struct filterops file_filtops = 117 { 1, filt_fileattach, NULL, NULL }; 118 static struct filterops kqread_filtops = 119 { 1, NULL, filt_kqdetach, filt_kqueue }; 120 static struct filterops proc_filtops = 121 { 0, filt_procattach, filt_procdetach, filt_proc }; 122 static struct filterops timer_filtops = 123 { 0, filt_timerattach, filt_timerdetach, filt_timer }; 124 125 static vm_zone_t knote_zone; 126 static int kq_ncallouts = 0; 127 static int kq_calloutmax = (4 * 1024); 128 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 129 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 130 131 #define KNOTE_ACTIVATE(kn) do { \ 132 kn->kn_status |= KN_ACTIVE; \ 133 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 134 knote_enqueue(kn); \ 135 } while(0) 136 137 #define KN_HASHSIZE 64 /* XXX should be tunable */ 138 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 139 140 extern struct filterops aio_filtops; 141 extern struct filterops sig_filtops; 142 143 /* 144 * Table for for all system-defined filters. 145 */ 146 static struct filterops *sysfilt_ops[] = { 147 &file_filtops, /* EVFILT_READ */ 148 &file_filtops, /* EVFILT_WRITE */ 149 &aio_filtops, /* EVFILT_AIO */ 150 &file_filtops, /* EVFILT_VNODE */ 151 &proc_filtops, /* EVFILT_PROC */ 152 &sig_filtops, /* EVFILT_SIGNAL */ 153 &timer_filtops, /* EVFILT_TIMER */ 154 &file_filtops, /* EVFILT_EXCEPT */ 155 }; 156 157 static int 158 filt_fileattach(struct knote *kn) 159 { 160 return (fo_kqfilter(kn->kn_fp, kn)); 161 } 162 163 /* 164 * MPALMOSTSAFE - acquires mplock 165 */ 166 static int 167 kqueue_kqfilter(struct file *fp, struct knote *kn) 168 { 169 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 170 171 get_mplock(); 172 if (kn->kn_filter != EVFILT_READ) { 173 rel_mplock(); 174 return (1); 175 } 176 177 kn->kn_fop = &kqread_filtops; 178 SLIST_INSERT_HEAD(&kq->kq_sel.si_note, kn, kn_selnext); 179 rel_mplock(); 180 return (0); 181 } 182 183 static void 184 filt_kqdetach(struct knote *kn) 185 { 186 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 187 188 SLIST_REMOVE(&kq->kq_sel.si_note, kn, knote, kn_selnext); 189 } 190 191 /*ARGSUSED*/ 192 static int 193 filt_kqueue(struct knote *kn, long hint) 194 { 195 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 196 197 kn->kn_data = kq->kq_count; 198 return (kn->kn_data > 0); 199 } 200 201 static int 202 filt_procattach(struct knote *kn) 203 { 204 struct proc *p; 205 int immediate; 206 207 immediate = 0; 208 lwkt_gettoken(&proc_token); 209 p = pfind(kn->kn_id); 210 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) { 211 p = zpfind(kn->kn_id); 212 immediate = 1; 213 } 214 if (p == NULL) { 215 lwkt_reltoken(&proc_token); 216 return (ESRCH); 217 } 218 if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) { 219 lwkt_reltoken(&proc_token); 220 return (EACCES); 221 } 222 223 kn->kn_ptr.p_proc = p; 224 kn->kn_flags |= EV_CLEAR; /* automatically set */ 225 226 /* 227 * internal flag indicating registration done by kernel 228 */ 229 if (kn->kn_flags & EV_FLAG1) { 230 kn->kn_data = kn->kn_sdata; /* ppid */ 231 kn->kn_fflags = NOTE_CHILD; 232 kn->kn_flags &= ~EV_FLAG1; 233 } 234 235 /* XXX lock the proc here while adding to the list? */ 236 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext); 237 238 /* 239 * Immediately activate any exit notes if the target process is a 240 * zombie. This is necessary to handle the case where the target 241 * process, e.g. a child, dies before the kevent is registered. 242 */ 243 if (immediate && filt_proc(kn, NOTE_EXIT)) 244 KNOTE_ACTIVATE(kn); 245 lwkt_reltoken(&proc_token); 246 247 return (0); 248 } 249 250 /* 251 * The knote may be attached to a different process, which may exit, 252 * leaving nothing for the knote to be attached to. So when the process 253 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 254 * it will be deleted when read out. However, as part of the knote deletion, 255 * this routine is called, so a check is needed to avoid actually performing 256 * a detach, because the original process does not exist any more. 257 */ 258 static void 259 filt_procdetach(struct knote *kn) 260 { 261 struct proc *p; 262 263 if (kn->kn_status & KN_DETACHED) 264 return; 265 /* XXX locking? this might modify another process. */ 266 p = kn->kn_ptr.p_proc; 267 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 268 } 269 270 static int 271 filt_proc(struct knote *kn, long hint) 272 { 273 u_int event; 274 275 /* 276 * mask off extra data 277 */ 278 event = (u_int)hint & NOTE_PCTRLMASK; 279 280 /* 281 * if the user is interested in this event, record it. 282 */ 283 if (kn->kn_sfflags & event) 284 kn->kn_fflags |= event; 285 286 /* 287 * Process is gone, so flag the event as finished. Detach the 288 * knote from the process now because the process will be poof, 289 * gone later on. 290 */ 291 if (event == NOTE_EXIT) { 292 struct proc *p = kn->kn_ptr.p_proc; 293 if ((kn->kn_status & KN_DETACHED) == 0) { 294 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 295 kn->kn_status |= KN_DETACHED; 296 kn->kn_data = p->p_xstat; 297 kn->kn_ptr.p_proc = NULL; 298 } 299 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 300 return (1); 301 } 302 303 /* 304 * process forked, and user wants to track the new process, 305 * so attach a new knote to it, and immediately report an 306 * event with the parent's pid. 307 */ 308 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { 309 struct kevent kev; 310 int error; 311 312 /* 313 * register knote with new process. 314 */ 315 kev.ident = hint & NOTE_PDATAMASK; /* pid */ 316 kev.filter = kn->kn_filter; 317 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 318 kev.fflags = kn->kn_sfflags; 319 kev.data = kn->kn_id; /* parent */ 320 kev.udata = kn->kn_kevent.udata; /* preserve udata */ 321 error = kqueue_register(kn->kn_kq, &kev); 322 if (error) 323 kn->kn_fflags |= NOTE_TRACKERR; 324 } 325 326 return (kn->kn_fflags != 0); 327 } 328 329 static void 330 filt_timerexpire(void *knx) 331 { 332 struct knote *kn = knx; 333 struct callout *calloutp; 334 struct timeval tv; 335 int tticks; 336 337 kn->kn_data++; 338 KNOTE_ACTIVATE(kn); 339 340 if ((kn->kn_flags & EV_ONESHOT) == 0) { 341 tv.tv_sec = kn->kn_sdata / 1000; 342 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 343 tticks = tvtohz_high(&tv); 344 calloutp = (struct callout *)kn->kn_hook; 345 callout_reset(calloutp, tticks, filt_timerexpire, kn); 346 } 347 } 348 349 /* 350 * data contains amount of time to sleep, in milliseconds 351 */ 352 static int 353 filt_timerattach(struct knote *kn) 354 { 355 struct callout *calloutp; 356 struct timeval tv; 357 int tticks; 358 359 if (kq_ncallouts >= kq_calloutmax) 360 return (ENOMEM); 361 kq_ncallouts++; 362 363 tv.tv_sec = kn->kn_sdata / 1000; 364 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 365 tticks = tvtohz_high(&tv); 366 367 kn->kn_flags |= EV_CLEAR; /* automatically set */ 368 MALLOC(calloutp, struct callout *, sizeof(*calloutp), 369 M_KQUEUE, M_WAITOK); 370 callout_init(calloutp); 371 kn->kn_hook = (caddr_t)calloutp; 372 callout_reset(calloutp, tticks, filt_timerexpire, kn); 373 374 return (0); 375 } 376 377 static void 378 filt_timerdetach(struct knote *kn) 379 { 380 struct callout *calloutp; 381 382 calloutp = (struct callout *)kn->kn_hook; 383 callout_stop(calloutp); 384 FREE(calloutp, M_KQUEUE); 385 kq_ncallouts--; 386 } 387 388 static int 389 filt_timer(struct knote *kn, long hint) 390 { 391 392 return (kn->kn_data != 0); 393 } 394 395 /* 396 * Initialize a kqueue. 397 * 398 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops. 399 * 400 * MPSAFE 401 */ 402 void 403 kqueue_init(struct kqueue *kq, struct filedesc *fdp) 404 { 405 TAILQ_INIT(&kq->kq_knpend); 406 TAILQ_INIT(&kq->kq_knlist); 407 kq->kq_fdp = fdp; 408 } 409 410 /* 411 * Terminate a kqueue. Freeing the actual kq itself is left up to the 412 * caller (it might be embedded in a lwp so we don't do it here). 413 */ 414 void 415 kqueue_terminate(struct kqueue *kq) 416 { 417 struct knote *kn; 418 struct klist *list; 419 int hv; 420 421 while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) { 422 kn->kn_fop->f_detach(kn); 423 if (kn->kn_fop->f_isfd) { 424 list = &kn->kn_fp->f_klist; 425 SLIST_REMOVE(list, kn, knote, kn_link); 426 fdrop(kn->kn_fp); 427 kn->kn_fp = NULL; 428 } else { 429 hv = KN_HASH(kn->kn_id, kq->kq_knhashmask); 430 list = &kq->kq_knhash[hv]; 431 SLIST_REMOVE(list, kn, knote, kn_link); 432 } 433 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink); 434 if (kn->kn_status & KN_QUEUED) 435 knote_dequeue(kn); 436 knote_free(kn); 437 } 438 439 if (kq->kq_knhash) { 440 kfree(kq->kq_knhash, M_KQUEUE); 441 kq->kq_knhash = NULL; 442 kq->kq_knhashmask = 0; 443 } 444 } 445 446 /* 447 * MPSAFE 448 */ 449 int 450 sys_kqueue(struct kqueue_args *uap) 451 { 452 struct thread *td = curthread; 453 struct kqueue *kq; 454 struct file *fp; 455 int fd, error; 456 457 error = falloc(td->td_lwp, &fp, &fd); 458 if (error) 459 return (error); 460 fp->f_flag = FREAD | FWRITE; 461 fp->f_type = DTYPE_KQUEUE; 462 fp->f_ops = &kqueueops; 463 464 kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO); 465 kqueue_init(kq, td->td_proc->p_fd); 466 fp->f_data = kq; 467 468 fsetfd(kq->kq_fdp, fp, fd); 469 uap->sysmsg_result = fd; 470 fdrop(fp); 471 return (error); 472 } 473 474 /* 475 * Copy 'count' items into the destination list pointed to by uap->eventlist. 476 */ 477 static int 478 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res) 479 { 480 struct kevent_copyin_args *kap; 481 int error; 482 483 kap = (struct kevent_copyin_args *)arg; 484 485 error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp)); 486 if (error == 0) { 487 kap->ka->eventlist += count; 488 *res += count; 489 } else { 490 *res = -1; 491 } 492 493 return (error); 494 } 495 496 /* 497 * Copy at most 'max' items from the list pointed to by kap->changelist, 498 * return number of items in 'events'. 499 */ 500 static int 501 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events) 502 { 503 struct kevent_copyin_args *kap; 504 int error, count; 505 506 kap = (struct kevent_copyin_args *)arg; 507 508 count = min(kap->ka->nchanges - kap->pchanges, max); 509 error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp); 510 if (error == 0) { 511 kap->ka->changelist += count; 512 kap->pchanges += count; 513 *events = count; 514 } 515 516 return (error); 517 } 518 519 /* 520 * MPALMOSTSAFE 521 */ 522 int 523 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap, 524 k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn, 525 struct timespec *tsp_in) 526 { 527 struct kevent *kevp; 528 struct timespec *tsp; 529 int i, n, total, error, nerrors = 0; 530 struct kevent kev[KQ_NEVENTS]; 531 struct knote marker; 532 533 tsp = tsp_in; 534 *res = 0; 535 536 get_mplock(); 537 for ( ;; ) { 538 n = 0; 539 error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n); 540 if (error) 541 goto done; 542 if (n == 0) 543 break; 544 for (i = 0; i < n; i++) { 545 kevp = &kev[i]; 546 kevp->flags &= ~EV_SYSFLAGS; 547 error = kqueue_register(kq, kevp); 548 if (error) { 549 if (nevents != 0) { 550 kevp->flags = EV_ERROR; 551 kevp->data = error; 552 kevent_copyoutfn(uap, kevp, 1, res); 553 nevents--; 554 nerrors++; 555 } else { 556 goto done; 557 } 558 } 559 } 560 } 561 if (nerrors) { 562 error = 0; 563 goto done; 564 } 565 566 /* 567 * Acquire/wait for events - setup timeout 568 */ 569 if (tsp != NULL) { 570 struct timespec ats; 571 572 if (tsp->tv_sec || tsp->tv_nsec) { 573 nanouptime(&ats); 574 timespecadd(tsp, &ats); /* tsp = target time */ 575 } 576 } 577 578 /* 579 * Loop as required. 580 * 581 * Collect as many events as we can. Sleeping on successive 582 * loops is disabled if copyoutfn has incremented (*res). 583 * 584 * The loop stops if an error occurs, all events have been 585 * scanned (the marker has been reached), or fewer than the 586 * maximum number of events is found. 587 * 588 * The copyoutfn function does not have to increment (*res) in 589 * order for the loop to continue. 590 * 591 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents. 592 */ 593 total = 0; 594 error = 0; 595 marker.kn_filter = EVFILT_MARKER; 596 crit_enter(); 597 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe); 598 crit_exit(); 599 while ((n = nevents - total) > 0) { 600 if (n > KQ_NEVENTS) 601 n = KQ_NEVENTS; 602 603 if (kq->kq_count == 0 && *res == 0) { 604 error = kqueue_sleep(kq, tsp); 605 606 if (error) 607 break; 608 609 /* 610 * Move the marker to the end of the list 611 * after a sleep. 612 */ 613 crit_enter(); 614 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe); 615 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe); 616 crit_exit(); 617 } 618 619 i = kqueue_scan(kq, kev, n, &marker); 620 if (i) { 621 error = kevent_copyoutfn(uap, kev, i, res); 622 total += i; 623 if (error) 624 break; 625 } 626 627 /* 628 * Normally when fewer events are returned than requested 629 * we can stop. However, if only spurious events were 630 * collected the copyout will not bump (*res) and we have 631 * to continue. 632 */ 633 if (i < n && *res) 634 break; 635 } 636 crit_enter(); 637 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe); 638 crit_exit(); 639 640 /* Timeouts do not return EWOULDBLOCK. */ 641 if (error == EWOULDBLOCK) 642 error = 0; 643 644 done: 645 rel_mplock(); 646 return (error); 647 } 648 649 /* 650 * MPALMOSTSAFE 651 */ 652 int 653 sys_kevent(struct kevent_args *uap) 654 { 655 struct thread *td = curthread; 656 struct proc *p = td->td_proc; 657 struct timespec ts, *tsp; 658 struct kqueue *kq; 659 struct file *fp = NULL; 660 struct kevent_copyin_args *kap, ka; 661 int error; 662 663 if (uap->timeout) { 664 error = copyin(uap->timeout, &ts, sizeof(ts)); 665 if (error) 666 return (error); 667 tsp = &ts; 668 } else { 669 tsp = NULL; 670 } 671 672 fp = holdfp(p->p_fd, uap->fd, -1); 673 if (fp == NULL) 674 return (EBADF); 675 if (fp->f_type != DTYPE_KQUEUE) { 676 fdrop(fp); 677 return (EBADF); 678 } 679 680 kq = (struct kqueue *)fp->f_data; 681 682 kap = &ka; 683 kap->ka = uap; 684 kap->pchanges = 0; 685 686 error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap, 687 kevent_copyin, kevent_copyout, tsp); 688 689 fdrop(fp); 690 691 return (error); 692 } 693 694 int 695 kqueue_register(struct kqueue *kq, struct kevent *kev) 696 { 697 struct filedesc *fdp = kq->kq_fdp; 698 struct filterops *fops; 699 struct file *fp = NULL; 700 struct knote *kn = NULL; 701 int error = 0; 702 703 if (kev->filter < 0) { 704 if (kev->filter + EVFILT_SYSCOUNT < 0) 705 return (EINVAL); 706 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */ 707 } else { 708 /* 709 * XXX 710 * filter attach routine is responsible for insuring that 711 * the identifier can be attached to it. 712 */ 713 kprintf("unknown filter: %d\n", kev->filter); 714 return (EINVAL); 715 } 716 717 if (fops->f_isfd) { 718 /* validate descriptor */ 719 fp = holdfp(fdp, kev->ident, -1); 720 if (fp == NULL) 721 return (EBADF); 722 723 SLIST_FOREACH(kn, &fp->f_klist, kn_link) { 724 if (kn->kn_kq == kq && 725 kn->kn_filter == kev->filter && 726 kn->kn_id == kev->ident) { 727 break; 728 } 729 } 730 } else { 731 if (kq->kq_knhashmask) { 732 struct klist *list; 733 734 list = &kq->kq_knhash[ 735 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 736 SLIST_FOREACH(kn, list, kn_link) { 737 if (kn->kn_id == kev->ident && 738 kn->kn_filter == kev->filter) 739 break; 740 } 741 } 742 } 743 744 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) { 745 error = ENOENT; 746 goto done; 747 } 748 749 /* 750 * kn now contains the matching knote, or NULL if no match 751 */ 752 if (kev->flags & EV_ADD) { 753 if (kn == NULL) { 754 kn = knote_alloc(); 755 if (kn == NULL) { 756 error = ENOMEM; 757 goto done; 758 } 759 kn->kn_fp = fp; 760 kn->kn_kq = kq; 761 kn->kn_fop = fops; 762 763 /* 764 * apply reference count to knote structure, and 765 * do not release it at the end of this routine. 766 */ 767 fp = NULL; 768 769 kn->kn_sfflags = kev->fflags; 770 kn->kn_sdata = kev->data; 771 kev->fflags = 0; 772 kev->data = 0; 773 kn->kn_kevent = *kev; 774 775 knote_attach(kn); 776 if ((error = fops->f_attach(kn)) != 0) { 777 knote_drop(kn); 778 goto done; 779 } 780 } else { 781 /* 782 * The user may change some filter values after the 783 * initial EV_ADD, but doing so will not reset any 784 * filter which have already been triggered. 785 */ 786 kn->kn_sfflags = kev->fflags; 787 kn->kn_sdata = kev->data; 788 kn->kn_kevent.udata = kev->udata; 789 } 790 791 crit_enter(); 792 if (kn->kn_fop->f_event(kn, 0)) 793 KNOTE_ACTIVATE(kn); 794 crit_exit(); 795 } else if (kev->flags & EV_DELETE) { 796 kn->kn_fop->f_detach(kn); 797 knote_drop(kn); 798 goto done; 799 } 800 801 if ((kev->flags & EV_DISABLE) && 802 ((kn->kn_status & KN_DISABLED) == 0)) { 803 crit_enter(); 804 kn->kn_status |= KN_DISABLED; 805 crit_exit(); 806 } 807 808 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 809 crit_enter(); 810 kn->kn_status &= ~KN_DISABLED; 811 if ((kn->kn_status & KN_ACTIVE) && 812 ((kn->kn_status & KN_QUEUED) == 0)) 813 knote_enqueue(kn); 814 crit_exit(); 815 } 816 817 done: 818 if (fp != NULL) 819 fdrop(fp); 820 return (error); 821 } 822 823 /* 824 * Block as necessary until the target time is reached. 825 * If tsp is NULL we block indefinitely. If tsp->ts_secs/nsecs are both 826 * 0 we do not block at all. 827 */ 828 static int 829 kqueue_sleep(struct kqueue *kq, struct timespec *tsp) 830 { 831 int error = 0; 832 833 crit_enter(); 834 if (tsp == NULL) { 835 kq->kq_state |= KQ_SLEEP; 836 error = tsleep(kq, PCATCH, "kqread", 0); 837 } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) { 838 error = EWOULDBLOCK; 839 } else { 840 struct timespec ats; 841 struct timespec atx = *tsp; 842 int timeout; 843 844 nanouptime(&ats); 845 timespecsub(&atx, &ats); 846 if (ats.tv_sec < 0) { 847 error = EWOULDBLOCK; 848 } else { 849 timeout = atx.tv_sec > 24 * 60 * 60 ? 850 24 * 60 * 60 * hz : tstohz_high(&atx); 851 kq->kq_state |= KQ_SLEEP; 852 error = tsleep(kq, PCATCH, "kqread", timeout); 853 } 854 } 855 crit_exit(); 856 857 /* don't restart after signals... */ 858 if (error == ERESTART) 859 return (EINTR); 860 861 return (error); 862 } 863 864 /* 865 * Scan the kqueue, return the number of active events placed in kevp up 866 * to count. 867 * 868 * Continuous mode events may get recycled, do not continue scanning past 869 * marker unless no events have been collected. 870 */ 871 static int 872 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count, 873 struct knote *marker) 874 { 875 struct knote *kn, local_marker; 876 int total; 877 878 total = 0; 879 local_marker.kn_filter = EVFILT_MARKER; 880 crit_enter(); 881 882 /* 883 * Collect events. 884 */ 885 TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe); 886 while (count) { 887 kn = TAILQ_NEXT(&local_marker, kn_tqe); 888 if (kn->kn_filter == EVFILT_MARKER) { 889 /* Marker reached, we are done */ 890 if (kn == marker) 891 break; 892 893 /* Move local marker past some other threads marker */ 894 kn = TAILQ_NEXT(kn, kn_tqe); 895 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe); 896 TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe); 897 continue; 898 } 899 900 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe); 901 if (kn->kn_status & KN_DISABLED) { 902 kn->kn_status &= ~KN_QUEUED; 903 kq->kq_count--; 904 continue; 905 } 906 if ((kn->kn_flags & EV_ONESHOT) == 0 && 907 kn->kn_fop->f_event(kn, 0) == 0) { 908 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 909 kq->kq_count--; 910 continue; 911 } 912 *kevp++ = kn->kn_kevent; 913 ++total; 914 --count; 915 916 /* 917 * Post-event action on the note 918 */ 919 if (kn->kn_flags & EV_ONESHOT) { 920 kn->kn_status &= ~KN_QUEUED; 921 kq->kq_count--; 922 crit_exit(); 923 kn->kn_fop->f_detach(kn); 924 knote_drop(kn); 925 crit_enter(); 926 } else if (kn->kn_flags & EV_CLEAR) { 927 kn->kn_data = 0; 928 kn->kn_fflags = 0; 929 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 930 kq->kq_count--; 931 } else { 932 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe); 933 } 934 } 935 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe); 936 937 crit_exit(); 938 return (total); 939 } 940 941 /* 942 * XXX 943 * This could be expanded to call kqueue_scan, if desired. 944 * 945 * MPSAFE 946 */ 947 static int 948 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 949 { 950 return (ENXIO); 951 } 952 953 /* 954 * MPSAFE 955 */ 956 static int 957 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 958 { 959 return (ENXIO); 960 } 961 962 /* 963 * MPALMOSTSAFE 964 */ 965 static int 966 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 967 struct ucred *cred, struct sysmsg *msg) 968 { 969 struct kqueue *kq; 970 int error; 971 972 get_mplock(); 973 kq = (struct kqueue *)fp->f_data; 974 975 switch(com) { 976 case FIOASYNC: 977 if (*(int *)data) 978 kq->kq_state |= KQ_ASYNC; 979 else 980 kq->kq_state &= ~KQ_ASYNC; 981 error = 0; 982 break; 983 case FIOSETOWN: 984 error = fsetown(*(int *)data, &kq->kq_sigio); 985 break; 986 default: 987 error = ENOTTY; 988 break; 989 } 990 rel_mplock(); 991 return (error); 992 } 993 994 /* 995 * MPALMOSTSAFE - acquires mplock 996 */ 997 static int 998 kqueue_poll(struct file *fp, int events, struct ucred *cred) 999 { 1000 struct kqueue *kq = (struct kqueue *)fp->f_data; 1001 int revents = 0; 1002 1003 get_mplock(); 1004 crit_enter(); 1005 if (events & (POLLIN | POLLRDNORM)) { 1006 if (kq->kq_count) { 1007 revents |= events & (POLLIN | POLLRDNORM); 1008 } else { 1009 selrecord(curthread, &kq->kq_sel); 1010 kq->kq_state |= KQ_SEL; 1011 } 1012 } 1013 crit_exit(); 1014 rel_mplock(); 1015 return (revents); 1016 } 1017 1018 /* 1019 * MPSAFE 1020 */ 1021 static int 1022 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred) 1023 { 1024 struct kqueue *kq = (struct kqueue *)fp->f_data; 1025 1026 bzero((void *)st, sizeof(*st)); 1027 st->st_size = kq->kq_count; 1028 st->st_blksize = sizeof(struct kevent); 1029 st->st_mode = S_IFIFO; 1030 return (0); 1031 } 1032 1033 /* 1034 * MPALMOSTSAFE - acquires mplock 1035 */ 1036 static int 1037 kqueue_close(struct file *fp) 1038 { 1039 struct kqueue *kq = (struct kqueue *)fp->f_data; 1040 1041 get_mplock(); 1042 1043 kqueue_terminate(kq); 1044 1045 fp->f_data = NULL; 1046 funsetown(kq->kq_sigio); 1047 rel_mplock(); 1048 1049 kfree(kq, M_KQUEUE); 1050 return (0); 1051 } 1052 1053 void 1054 kqueue_wakeup(struct kqueue *kq) 1055 { 1056 if (kq->kq_state & KQ_SLEEP) { 1057 kq->kq_state &= ~KQ_SLEEP; 1058 wakeup(kq); 1059 } 1060 if (kq->kq_state & KQ_SEL) { 1061 kq->kq_state &= ~KQ_SEL; 1062 selwakeup(&kq->kq_sel); 1063 } 1064 KNOTE(&kq->kq_sel.si_note, 0); 1065 } 1066 1067 /* 1068 * walk down a list of knotes, activating them if their event has triggered. 1069 */ 1070 void 1071 knote(struct klist *list, long hint) 1072 { 1073 struct knote *kn; 1074 1075 SLIST_FOREACH(kn, list, kn_selnext) 1076 if (kn->kn_fop->f_event(kn, hint)) 1077 KNOTE_ACTIVATE(kn); 1078 } 1079 1080 /* 1081 * remove all knotes from a specified klist 1082 */ 1083 void 1084 knote_remove(struct klist *list) 1085 { 1086 struct knote *kn; 1087 1088 while ((kn = SLIST_FIRST(list)) != NULL) { 1089 kn->kn_fop->f_detach(kn); 1090 knote_drop(kn); 1091 } 1092 } 1093 1094 /* 1095 * remove all knotes referencing a specified fd 1096 */ 1097 void 1098 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd) 1099 { 1100 struct knote *kn; 1101 1102 restart: 1103 SLIST_FOREACH(kn, &fp->f_klist, kn_link) { 1104 if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) { 1105 kn->kn_fop->f_detach(kn); 1106 knote_drop(kn); 1107 goto restart; 1108 } 1109 } 1110 } 1111 1112 static void 1113 knote_attach(struct knote *kn) 1114 { 1115 struct klist *list; 1116 struct kqueue *kq = kn->kn_kq; 1117 1118 if (kn->kn_fop->f_isfd) { 1119 KKASSERT(kn->kn_fp); 1120 list = &kn->kn_fp->f_klist; 1121 } else { 1122 if (kq->kq_knhashmask == 0) 1123 kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 1124 &kq->kq_knhashmask); 1125 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 1126 } 1127 SLIST_INSERT_HEAD(list, kn, kn_link); 1128 TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink); 1129 kn->kn_status = 0; 1130 } 1131 1132 /* 1133 * should be called outside of a critical section, since we don't want to 1134 * hold a critical section while calling fdrop and free. 1135 */ 1136 static void 1137 knote_drop(struct knote *kn) 1138 { 1139 struct kqueue *kq; 1140 struct klist *list; 1141 1142 kq = kn->kn_kq; 1143 1144 if (kn->kn_fop->f_isfd) 1145 list = &kn->kn_fp->f_klist; 1146 else 1147 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 1148 1149 SLIST_REMOVE(list, kn, knote, kn_link); 1150 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink); 1151 if (kn->kn_status & KN_QUEUED) 1152 knote_dequeue(kn); 1153 if (kn->kn_fop->f_isfd) 1154 fdrop(kn->kn_fp); 1155 knote_free(kn); 1156 } 1157 1158 1159 static void 1160 knote_enqueue(struct knote *kn) 1161 { 1162 struct kqueue *kq = kn->kn_kq; 1163 1164 crit_enter(); 1165 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 1166 1167 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe); 1168 kn->kn_status |= KN_QUEUED; 1169 ++kq->kq_count; 1170 1171 /* 1172 * Send SIGIO on request (typically set up as a mailbox signal) 1173 */ 1174 if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1) 1175 pgsigio(kq->kq_sigio, SIGIO, 0); 1176 crit_exit(); 1177 kqueue_wakeup(kq); 1178 } 1179 1180 static void 1181 knote_dequeue(struct knote *kn) 1182 { 1183 struct kqueue *kq = kn->kn_kq; 1184 1185 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 1186 crit_enter(); 1187 1188 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe); 1189 kn->kn_status &= ~KN_QUEUED; 1190 kq->kq_count--; 1191 crit_exit(); 1192 } 1193 1194 static void 1195 knote_init(void) 1196 { 1197 knote_zone = zinit("KNOTE", sizeof(struct knote), 0, 0, 1); 1198 } 1199 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL) 1200 1201 static struct knote * 1202 knote_alloc(void) 1203 { 1204 return ((struct knote *)zalloc(knote_zone)); 1205 } 1206 1207 static void 1208 knote_free(struct knote *kn) 1209 { 1210 zfree(knote_zone, kn); 1211 } 1212