1 /*- 2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $ 27 * $DragonFly: src/sys/kern/kern_event.c,v 1.33 2007/02/03 17:05:57 corecode Exp $ 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/proc.h> 34 #include <sys/malloc.h> 35 #include <sys/unistd.h> 36 #include <sys/file.h> 37 #include <sys/lock.h> 38 #include <sys/fcntl.h> 39 #include <sys/queue.h> 40 #include <sys/event.h> 41 #include <sys/eventvar.h> 42 #include <sys/protosw.h> 43 #include <sys/socket.h> 44 #include <sys/socketvar.h> 45 #include <sys/stat.h> 46 #include <sys/sysctl.h> 47 #include <sys/sysproto.h> 48 #include <sys/thread.h> 49 #include <sys/uio.h> 50 #include <sys/signalvar.h> 51 #include <sys/filio.h> 52 #include <sys/ktr.h> 53 54 #include <sys/thread2.h> 55 #include <sys/file2.h> 56 #include <sys/mplock2.h> 57 58 #include <vm/vm_zone.h> 59 60 /* 61 * Global token for kqueue subsystem 62 */ 63 struct lwkt_token kq_token = LWKT_TOKEN_UP_INITIALIZER; 64 65 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 66 67 struct kevent_copyin_args { 68 struct kevent_args *ka; 69 int pchanges; 70 }; 71 72 static int kqueue_sleep(struct kqueue *kq, struct timespec *tsp); 73 static int kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count, 74 struct knote *marker); 75 static int kqueue_read(struct file *fp, struct uio *uio, 76 struct ucred *cred, int flags); 77 static int kqueue_write(struct file *fp, struct uio *uio, 78 struct ucred *cred, int flags); 79 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 80 struct ucred *cred, struct sysmsg *msg); 81 static int kqueue_kqfilter(struct file *fp, struct knote *kn); 82 static int kqueue_stat(struct file *fp, struct stat *st, 83 struct ucred *cred); 84 static int kqueue_close(struct file *fp); 85 static void kqueue_wakeup(struct kqueue *kq); 86 static int filter_attach(struct knote *kn); 87 static void filter_detach(struct knote *kn); 88 static int filter_event(struct knote *kn, long hint); 89 90 /* 91 * MPSAFE 92 */ 93 static struct fileops kqueueops = { 94 .fo_read = kqueue_read, 95 .fo_write = kqueue_write, 96 .fo_ioctl = kqueue_ioctl, 97 .fo_kqfilter = kqueue_kqfilter, 98 .fo_stat = kqueue_stat, 99 .fo_close = kqueue_close, 100 .fo_shutdown = nofo_shutdown 101 }; 102 103 static void knote_attach(struct knote *kn); 104 static void knote_drop(struct knote *kn); 105 static void knote_enqueue(struct knote *kn); 106 static void knote_dequeue(struct knote *kn); 107 static void knote_init(void); 108 static struct knote *knote_alloc(void); 109 static void knote_free(struct knote *kn); 110 111 static void filt_kqdetach(struct knote *kn); 112 static int filt_kqueue(struct knote *kn, long hint); 113 static int filt_procattach(struct knote *kn); 114 static void filt_procdetach(struct knote *kn); 115 static int filt_proc(struct knote *kn, long hint); 116 static int filt_fileattach(struct knote *kn); 117 static void filt_timerexpire(void *knx); 118 static int filt_timerattach(struct knote *kn); 119 static void filt_timerdetach(struct knote *kn); 120 static int filt_timer(struct knote *kn, long hint); 121 122 static struct filterops file_filtops = 123 { FILTEROP_ISFD, filt_fileattach, NULL, NULL }; 124 static struct filterops kqread_filtops = 125 { FILTEROP_ISFD, NULL, filt_kqdetach, filt_kqueue }; 126 static struct filterops proc_filtops = 127 { 0, filt_procattach, filt_procdetach, filt_proc }; 128 static struct filterops timer_filtops = 129 { 0, filt_timerattach, filt_timerdetach, filt_timer }; 130 131 static vm_zone_t knote_zone; 132 static int kq_ncallouts = 0; 133 static int kq_calloutmax = (4 * 1024); 134 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 135 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 136 137 #define KNOTE_ACTIVATE(kn) do { \ 138 kn->kn_status |= KN_ACTIVE; \ 139 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 140 knote_enqueue(kn); \ 141 } while(0) 142 143 #define KN_HASHSIZE 64 /* XXX should be tunable */ 144 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 145 146 extern struct filterops aio_filtops; 147 extern struct filterops sig_filtops; 148 149 /* 150 * Table for for all system-defined filters. 151 */ 152 static struct filterops *sysfilt_ops[] = { 153 &file_filtops, /* EVFILT_READ */ 154 &file_filtops, /* EVFILT_WRITE */ 155 &aio_filtops, /* EVFILT_AIO */ 156 &file_filtops, /* EVFILT_VNODE */ 157 &proc_filtops, /* EVFILT_PROC */ 158 &sig_filtops, /* EVFILT_SIGNAL */ 159 &timer_filtops, /* EVFILT_TIMER */ 160 &file_filtops, /* EVFILT_EXCEPT */ 161 }; 162 163 static int 164 filt_fileattach(struct knote *kn) 165 { 166 return (fo_kqfilter(kn->kn_fp, kn)); 167 } 168 169 /* 170 * MPSAFE 171 */ 172 static int 173 kqueue_kqfilter(struct file *fp, struct knote *kn) 174 { 175 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 176 177 if (kn->kn_filter != EVFILT_READ) 178 return (EOPNOTSUPP); 179 180 kn->kn_fop = &kqread_filtops; 181 knote_insert(&kq->kq_kqinfo.ki_note, kn); 182 return (0); 183 } 184 185 static void 186 filt_kqdetach(struct knote *kn) 187 { 188 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 189 190 knote_remove(&kq->kq_kqinfo.ki_note, kn); 191 } 192 193 /*ARGSUSED*/ 194 static int 195 filt_kqueue(struct knote *kn, long hint) 196 { 197 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 198 199 kn->kn_data = kq->kq_count; 200 return (kn->kn_data > 0); 201 } 202 203 static int 204 filt_procattach(struct knote *kn) 205 { 206 struct proc *p; 207 int immediate; 208 209 immediate = 0; 210 lwkt_gettoken(&proc_token); 211 p = pfind(kn->kn_id); 212 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) { 213 p = zpfind(kn->kn_id); 214 immediate = 1; 215 } 216 if (p == NULL) { 217 lwkt_reltoken(&proc_token); 218 return (ESRCH); 219 } 220 if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) { 221 lwkt_reltoken(&proc_token); 222 return (EACCES); 223 } 224 225 kn->kn_ptr.p_proc = p; 226 kn->kn_flags |= EV_CLEAR; /* automatically set */ 227 228 /* 229 * internal flag indicating registration done by kernel 230 */ 231 if (kn->kn_flags & EV_FLAG1) { 232 kn->kn_data = kn->kn_sdata; /* ppid */ 233 kn->kn_fflags = NOTE_CHILD; 234 kn->kn_flags &= ~EV_FLAG1; 235 } 236 237 knote_insert(&p->p_klist, kn); 238 239 /* 240 * Immediately activate any exit notes if the target process is a 241 * zombie. This is necessary to handle the case where the target 242 * process, e.g. a child, dies before the kevent is negistered. 243 */ 244 if (immediate && filt_proc(kn, NOTE_EXIT)) 245 KNOTE_ACTIVATE(kn); 246 lwkt_reltoken(&proc_token); 247 248 return (0); 249 } 250 251 /* 252 * The knote may be attached to a different process, which may exit, 253 * leaving nothing for the knote to be attached to. So when the process 254 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 255 * it will be deleted when read out. However, as part of the knote deletion, 256 * this routine is called, so a check is needed to avoid actually performing 257 * a detach, because the original process does not exist any more. 258 */ 259 static void 260 filt_procdetach(struct knote *kn) 261 { 262 struct proc *p; 263 264 if (kn->kn_status & KN_DETACHED) 265 return; 266 /* XXX locking? take proc_token here? */ 267 p = kn->kn_ptr.p_proc; 268 knote_remove(&p->p_klist, kn); 269 } 270 271 static int 272 filt_proc(struct knote *kn, long hint) 273 { 274 u_int event; 275 276 /* 277 * mask off extra data 278 */ 279 event = (u_int)hint & NOTE_PCTRLMASK; 280 281 /* 282 * if the user is interested in this event, record it. 283 */ 284 if (kn->kn_sfflags & event) 285 kn->kn_fflags |= event; 286 287 /* 288 * Process is gone, so flag the event as finished. Detach the 289 * knote from the process now because the process will be poof, 290 * gone later on. 291 */ 292 if (event == NOTE_EXIT) { 293 struct proc *p = kn->kn_ptr.p_proc; 294 if ((kn->kn_status & KN_DETACHED) == 0) { 295 knote_remove(&p->p_klist, kn); 296 kn->kn_status |= KN_DETACHED; 297 kn->kn_data = p->p_xstat; 298 kn->kn_ptr.p_proc = NULL; 299 } 300 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 301 return (1); 302 } 303 304 /* 305 * process forked, and user wants to track the new process, 306 * so attach a new knote to it, and immediately report an 307 * event with the parent's pid. 308 */ 309 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { 310 struct kevent kev; 311 int error; 312 313 /* 314 * register knote with new process. 315 */ 316 kev.ident = hint & NOTE_PDATAMASK; /* pid */ 317 kev.filter = kn->kn_filter; 318 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 319 kev.fflags = kn->kn_sfflags; 320 kev.data = kn->kn_id; /* parent */ 321 kev.udata = kn->kn_kevent.udata; /* preserve udata */ 322 error = kqueue_register(kn->kn_kq, &kev); 323 if (error) 324 kn->kn_fflags |= NOTE_TRACKERR; 325 } 326 327 return (kn->kn_fflags != 0); 328 } 329 330 static void 331 filt_timerexpire(void *knx) 332 { 333 struct knote *kn = knx; 334 struct callout *calloutp; 335 struct timeval tv; 336 int tticks; 337 338 kn->kn_data++; 339 KNOTE_ACTIVATE(kn); 340 341 if ((kn->kn_flags & EV_ONESHOT) == 0) { 342 tv.tv_sec = kn->kn_sdata / 1000; 343 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 344 tticks = tvtohz_high(&tv); 345 calloutp = (struct callout *)kn->kn_hook; 346 callout_reset(calloutp, tticks, filt_timerexpire, kn); 347 } 348 } 349 350 /* 351 * data contains amount of time to sleep, in milliseconds 352 */ 353 static int 354 filt_timerattach(struct knote *kn) 355 { 356 struct callout *calloutp; 357 struct timeval tv; 358 int tticks; 359 360 if (kq_ncallouts >= kq_calloutmax) 361 return (ENOMEM); 362 kq_ncallouts++; 363 364 tv.tv_sec = kn->kn_sdata / 1000; 365 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 366 tticks = tvtohz_high(&tv); 367 368 kn->kn_flags |= EV_CLEAR; /* automatically set */ 369 MALLOC(calloutp, struct callout *, sizeof(*calloutp), 370 M_KQUEUE, M_WAITOK); 371 callout_init(calloutp); 372 kn->kn_hook = (caddr_t)calloutp; 373 callout_reset(calloutp, tticks, filt_timerexpire, kn); 374 375 return (0); 376 } 377 378 static void 379 filt_timerdetach(struct knote *kn) 380 { 381 struct callout *calloutp; 382 383 calloutp = (struct callout *)kn->kn_hook; 384 callout_stop(calloutp); 385 FREE(calloutp, M_KQUEUE); 386 kq_ncallouts--; 387 } 388 389 static int 390 filt_timer(struct knote *kn, long hint) 391 { 392 393 return (kn->kn_data != 0); 394 } 395 396 /* 397 * Initialize a kqueue. 398 * 399 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops. 400 * 401 * MPSAFE 402 */ 403 void 404 kqueue_init(struct kqueue *kq, struct filedesc *fdp) 405 { 406 TAILQ_INIT(&kq->kq_knpend); 407 TAILQ_INIT(&kq->kq_knlist); 408 kq->kq_count = 0; 409 kq->kq_fdp = fdp; 410 SLIST_INIT(&kq->kq_kqinfo.ki_note); 411 } 412 413 /* 414 * Terminate a kqueue. Freeing the actual kq itself is left up to the 415 * caller (it might be embedded in a lwp so we don't do it here). 416 */ 417 void 418 kqueue_terminate(struct kqueue *kq) 419 { 420 struct knote *kn; 421 struct klist *list; 422 int hv; 423 424 while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) { 425 filter_detach(kn); 426 if (kn->kn_fop->f_flags & FILTEROP_ISFD) { 427 list = &kn->kn_fp->f_klist; 428 SLIST_REMOVE(list, kn, knote, kn_link); 429 fdrop(kn->kn_fp); 430 kn->kn_fp = NULL; 431 } else { 432 hv = KN_HASH(kn->kn_id, kq->kq_knhashmask); 433 list = &kq->kq_knhash[hv]; 434 SLIST_REMOVE(list, kn, knote, kn_link); 435 } 436 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink); 437 if (kn->kn_status & KN_QUEUED) 438 knote_dequeue(kn); 439 knote_free(kn); 440 } 441 442 if (kq->kq_knhash) { 443 kfree(kq->kq_knhash, M_KQUEUE); 444 kq->kq_knhash = NULL; 445 kq->kq_knhashmask = 0; 446 } 447 } 448 449 /* 450 * MPSAFE 451 */ 452 int 453 sys_kqueue(struct kqueue_args *uap) 454 { 455 struct thread *td = curthread; 456 struct kqueue *kq; 457 struct file *fp; 458 int fd, error; 459 460 error = falloc(td->td_lwp, &fp, &fd); 461 if (error) 462 return (error); 463 fp->f_flag = FREAD | FWRITE; 464 fp->f_type = DTYPE_KQUEUE; 465 fp->f_ops = &kqueueops; 466 467 kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO); 468 kqueue_init(kq, td->td_proc->p_fd); 469 fp->f_data = kq; 470 471 fsetfd(kq->kq_fdp, fp, fd); 472 uap->sysmsg_result = fd; 473 fdrop(fp); 474 return (error); 475 } 476 477 /* 478 * Copy 'count' items into the destination list pointed to by uap->eventlist. 479 */ 480 static int 481 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res) 482 { 483 struct kevent_copyin_args *kap; 484 int error; 485 486 kap = (struct kevent_copyin_args *)arg; 487 488 error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp)); 489 if (error == 0) { 490 kap->ka->eventlist += count; 491 *res += count; 492 } else { 493 *res = -1; 494 } 495 496 return (error); 497 } 498 499 /* 500 * Copy at most 'max' items from the list pointed to by kap->changelist, 501 * return number of items in 'events'. 502 */ 503 static int 504 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events) 505 { 506 struct kevent_copyin_args *kap; 507 int error, count; 508 509 kap = (struct kevent_copyin_args *)arg; 510 511 count = min(kap->ka->nchanges - kap->pchanges, max); 512 error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp); 513 if (error == 0) { 514 kap->ka->changelist += count; 515 kap->pchanges += count; 516 *events = count; 517 } 518 519 return (error); 520 } 521 522 /* 523 * MPSAFE 524 */ 525 int 526 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap, 527 k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn, 528 struct timespec *tsp_in) 529 { 530 struct kevent *kevp; 531 struct timespec *tsp; 532 int i, n, total, error, nerrors = 0; 533 int lres; 534 struct kevent kev[KQ_NEVENTS]; 535 struct knote marker; 536 537 tsp = tsp_in; 538 *res = 0; 539 540 lwkt_gettoken(&kq_token); 541 for ( ;; ) { 542 n = 0; 543 error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n); 544 if (error) 545 goto done; 546 if (n == 0) 547 break; 548 for (i = 0; i < n; i++) { 549 kevp = &kev[i]; 550 kevp->flags &= ~EV_SYSFLAGS; 551 error = kqueue_register(kq, kevp); 552 553 /* 554 * If a registration returns an error we 555 * immediately post the error. The kevent() 556 * call itself will fail with the error if 557 * no space is available for posting. 558 * 559 * Such errors normally bypass the timeout/blocking 560 * code. However, if the copyoutfn function refuses 561 * to post the error (see sys_poll()), then we 562 * ignore it too. 563 */ 564 if (error) { 565 kevp->flags = EV_ERROR; 566 kevp->data = error; 567 lres = *res; 568 kevent_copyoutfn(uap, kevp, 1, res); 569 if (lres != *res) { 570 nevents--; 571 nerrors++; 572 } 573 } 574 } 575 } 576 if (nerrors) { 577 error = 0; 578 goto done; 579 } 580 581 /* 582 * Acquire/wait for events - setup timeout 583 */ 584 if (tsp != NULL) { 585 struct timespec ats; 586 587 if (tsp->tv_sec || tsp->tv_nsec) { 588 nanouptime(&ats); 589 timespecadd(tsp, &ats); /* tsp = target time */ 590 } 591 } 592 593 /* 594 * Loop as required. 595 * 596 * Collect as many events as we can. Sleeping on successive 597 * loops is disabled if copyoutfn has incremented (*res). 598 * 599 * The loop stops if an error occurs, all events have been 600 * scanned (the marker has been reached), or fewer than the 601 * maximum number of events is found. 602 * 603 * The copyoutfn function does not have to increment (*res) in 604 * order for the loop to continue. 605 * 606 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents. 607 */ 608 total = 0; 609 error = 0; 610 marker.kn_filter = EVFILT_MARKER; 611 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe); 612 while ((n = nevents - total) > 0) { 613 if (n > KQ_NEVENTS) 614 n = KQ_NEVENTS; 615 616 /* 617 * If no events are pending sleep until timeout (if any) 618 * or an event occurs. 619 * 620 * After the sleep completes the marker is moved to the 621 * end of the list, making any received events available 622 * to our scan. 623 */ 624 if (kq->kq_count == 0 && *res == 0) { 625 error = kqueue_sleep(kq, tsp); 626 if (error) 627 break; 628 629 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe); 630 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe); 631 } 632 633 /* 634 * Process all received events 635 * Account for all non-spurious events in our total 636 */ 637 i = kqueue_scan(kq, kev, n, &marker); 638 if (i) { 639 lres = *res; 640 error = kevent_copyoutfn(uap, kev, i, res); 641 total += *res - lres; 642 if (error) 643 break; 644 } 645 646 /* 647 * Normally when fewer events are returned than requested 648 * we can stop. However, if only spurious events were 649 * collected the copyout will not bump (*res) and we have 650 * to continue. 651 */ 652 if (i < n && *res) 653 break; 654 655 /* 656 * Deal with an edge case where spurious events can cause 657 * a loop to occur without moving the marker. This can 658 * prevent kqueue_scan() from picking up new events which 659 * race us. We must be sure to move the marker for this 660 * case. 661 * 662 * NOTE: We do not want to move the marker if events 663 * were scanned because normal kqueue operations 664 * may reactivate events. Moving the marker in 665 * that case could result in duplicates for the 666 * same event. 667 */ 668 if (i == 0) { 669 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe); 670 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe); 671 } 672 } 673 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe); 674 675 /* Timeouts do not return EWOULDBLOCK. */ 676 if (error == EWOULDBLOCK) 677 error = 0; 678 679 done: 680 lwkt_reltoken(&kq_token); 681 return (error); 682 } 683 684 /* 685 * MPALMOSTSAFE 686 */ 687 int 688 sys_kevent(struct kevent_args *uap) 689 { 690 struct thread *td = curthread; 691 struct proc *p = td->td_proc; 692 struct timespec ts, *tsp; 693 struct kqueue *kq; 694 struct file *fp = NULL; 695 struct kevent_copyin_args *kap, ka; 696 int error; 697 698 if (uap->timeout) { 699 error = copyin(uap->timeout, &ts, sizeof(ts)); 700 if (error) 701 return (error); 702 tsp = &ts; 703 } else { 704 tsp = NULL; 705 } 706 707 fp = holdfp(p->p_fd, uap->fd, -1); 708 if (fp == NULL) 709 return (EBADF); 710 if (fp->f_type != DTYPE_KQUEUE) { 711 fdrop(fp); 712 return (EBADF); 713 } 714 715 kq = (struct kqueue *)fp->f_data; 716 717 kap = &ka; 718 kap->ka = uap; 719 kap->pchanges = 0; 720 721 error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap, 722 kevent_copyin, kevent_copyout, tsp); 723 724 fdrop(fp); 725 726 return (error); 727 } 728 729 int 730 kqueue_register(struct kqueue *kq, struct kevent *kev) 731 { 732 struct filedesc *fdp = kq->kq_fdp; 733 struct filterops *fops; 734 struct file *fp = NULL; 735 struct knote *kn = NULL; 736 int error = 0; 737 738 if (kev->filter < 0) { 739 if (kev->filter + EVFILT_SYSCOUNT < 0) 740 return (EINVAL); 741 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */ 742 } else { 743 /* 744 * XXX 745 * filter attach routine is responsible for insuring that 746 * the identifier can be attached to it. 747 */ 748 kprintf("unknown filter: %d\n", kev->filter); 749 return (EINVAL); 750 } 751 752 if (fops->f_flags & FILTEROP_ISFD) { 753 /* validate descriptor */ 754 fp = holdfp(fdp, kev->ident, -1); 755 if (fp == NULL) 756 return (EBADF); 757 758 SLIST_FOREACH(kn, &fp->f_klist, kn_link) { 759 if (kn->kn_kq == kq && 760 kn->kn_filter == kev->filter && 761 kn->kn_id == kev->ident) { 762 break; 763 } 764 } 765 } else { 766 if (kq->kq_knhashmask) { 767 struct klist *list; 768 769 list = &kq->kq_knhash[ 770 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 771 SLIST_FOREACH(kn, list, kn_link) { 772 if (kn->kn_id == kev->ident && 773 kn->kn_filter == kev->filter) 774 break; 775 } 776 } 777 } 778 779 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) { 780 error = ENOENT; 781 goto done; 782 } 783 784 /* 785 * kn now contains the matching knote, or NULL if no match 786 */ 787 if (kev->flags & EV_ADD) { 788 if (kn == NULL) { 789 kn = knote_alloc(); 790 if (kn == NULL) { 791 error = ENOMEM; 792 goto done; 793 } 794 kn->kn_fp = fp; 795 kn->kn_kq = kq; 796 kn->kn_fop = fops; 797 798 /* 799 * apply reference count to knote structure, and 800 * do not release it at the end of this routine. 801 */ 802 fp = NULL; 803 804 kn->kn_sfflags = kev->fflags; 805 kn->kn_sdata = kev->data; 806 kev->fflags = 0; 807 kev->data = 0; 808 kn->kn_kevent = *kev; 809 810 knote_attach(kn); 811 if ((error = filter_attach(kn)) != 0) { 812 knote_drop(kn); 813 goto done; 814 } 815 } else { 816 /* 817 * The user may change some filter values after the 818 * initial EV_ADD, but doing so will not reset any 819 * filter which have already been triggered. 820 */ 821 kn->kn_sfflags = kev->fflags; 822 kn->kn_sdata = kev->data; 823 kn->kn_kevent.udata = kev->udata; 824 } 825 826 if (filter_event(kn, 0)) 827 KNOTE_ACTIVATE(kn); 828 829 } else if (kev->flags & EV_DELETE) { 830 filter_detach(kn); 831 knote_drop(kn); 832 goto done; 833 } 834 835 if ((kev->flags & EV_DISABLE) && 836 ((kn->kn_status & KN_DISABLED) == 0)) { 837 kn->kn_status |= KN_DISABLED; 838 } 839 840 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 841 kn->kn_status &= ~KN_DISABLED; 842 if ((kn->kn_status & KN_ACTIVE) && 843 ((kn->kn_status & KN_QUEUED) == 0)) 844 knote_enqueue(kn); 845 } 846 847 done: 848 if (fp != NULL) 849 fdrop(fp); 850 return (error); 851 } 852 853 /* 854 * Block as necessary until the target time is reached. 855 * If tsp is NULL we block indefinitely. If tsp->ts_secs/nsecs are both 856 * 0 we do not block at all. 857 */ 858 static int 859 kqueue_sleep(struct kqueue *kq, struct timespec *tsp) 860 { 861 int error = 0; 862 863 if (tsp == NULL) { 864 kq->kq_state |= KQ_SLEEP; 865 error = tsleep(kq, PCATCH, "kqread", 0); 866 } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) { 867 error = EWOULDBLOCK; 868 } else { 869 struct timespec ats; 870 struct timespec atx = *tsp; 871 int timeout; 872 873 nanouptime(&ats); 874 timespecsub(&atx, &ats); 875 if (ats.tv_sec < 0) { 876 error = EWOULDBLOCK; 877 } else { 878 timeout = atx.tv_sec > 24 * 60 * 60 ? 879 24 * 60 * 60 * hz : tstohz_high(&atx); 880 kq->kq_state |= KQ_SLEEP; 881 error = tsleep(kq, PCATCH, "kqread", timeout); 882 } 883 } 884 885 /* don't restart after signals... */ 886 if (error == ERESTART) 887 return (EINTR); 888 889 return (error); 890 } 891 892 /* 893 * Scan the kqueue, return the number of active events placed in kevp up 894 * to count. 895 * 896 * Continuous mode events may get recycled, do not continue scanning past 897 * marker unless no events have been collected. 898 */ 899 static int 900 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count, 901 struct knote *marker) 902 { 903 struct knote *kn, local_marker; 904 int total; 905 906 total = 0; 907 local_marker.kn_filter = EVFILT_MARKER; 908 909 /* 910 * Collect events. 911 */ 912 TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe); 913 while (count) { 914 kn = TAILQ_NEXT(&local_marker, kn_tqe); 915 if (kn->kn_filter == EVFILT_MARKER) { 916 /* Marker reached, we are done */ 917 if (kn == marker) 918 break; 919 920 /* Move local marker past some other threads marker */ 921 kn = TAILQ_NEXT(kn, kn_tqe); 922 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe); 923 TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe); 924 continue; 925 } 926 927 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe); 928 kq->kq_count--; 929 if (kn->kn_status & KN_DISABLED) { 930 kn->kn_status &= ~KN_QUEUED; 931 continue; 932 } 933 if ((kn->kn_flags & EV_ONESHOT) == 0 && 934 filter_event(kn, 0) == 0) { 935 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 936 continue; 937 } 938 *kevp++ = kn->kn_kevent; 939 ++total; 940 --count; 941 942 /* 943 * Post-event action on the note 944 */ 945 if (kn->kn_flags & EV_ONESHOT) { 946 kn->kn_status &= ~KN_QUEUED; 947 filter_detach(kn); 948 knote_drop(kn); 949 } else if (kn->kn_flags & EV_CLEAR) { 950 kn->kn_data = 0; 951 kn->kn_fflags = 0; 952 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 953 } else { 954 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe); 955 kq->kq_count++; 956 } 957 } 958 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe); 959 960 return (total); 961 } 962 963 /* 964 * XXX 965 * This could be expanded to call kqueue_scan, if desired. 966 * 967 * MPSAFE 968 */ 969 static int 970 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 971 { 972 return (ENXIO); 973 } 974 975 /* 976 * MPSAFE 977 */ 978 static int 979 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 980 { 981 return (ENXIO); 982 } 983 984 /* 985 * MPALMOSTSAFE 986 */ 987 static int 988 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 989 struct ucred *cred, struct sysmsg *msg) 990 { 991 struct kqueue *kq; 992 int error; 993 994 lwkt_gettoken(&kq_token); 995 kq = (struct kqueue *)fp->f_data; 996 997 switch(com) { 998 case FIOASYNC: 999 if (*(int *)data) 1000 kq->kq_state |= KQ_ASYNC; 1001 else 1002 kq->kq_state &= ~KQ_ASYNC; 1003 error = 0; 1004 break; 1005 case FIOSETOWN: 1006 error = fsetown(*(int *)data, &kq->kq_sigio); 1007 break; 1008 default: 1009 error = ENOTTY; 1010 break; 1011 } 1012 lwkt_reltoken(&kq_token); 1013 return (error); 1014 } 1015 1016 /* 1017 * MPSAFE 1018 */ 1019 static int 1020 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred) 1021 { 1022 struct kqueue *kq = (struct kqueue *)fp->f_data; 1023 1024 bzero((void *)st, sizeof(*st)); 1025 st->st_size = kq->kq_count; 1026 st->st_blksize = sizeof(struct kevent); 1027 st->st_mode = S_IFIFO; 1028 return (0); 1029 } 1030 1031 /* 1032 * MPSAFE 1033 */ 1034 static int 1035 kqueue_close(struct file *fp) 1036 { 1037 struct kqueue *kq = (struct kqueue *)fp->f_data; 1038 1039 lwkt_gettoken(&kq_token); 1040 1041 kqueue_terminate(kq); 1042 1043 fp->f_data = NULL; 1044 funsetown(kq->kq_sigio); 1045 lwkt_reltoken(&kq_token); 1046 1047 kfree(kq, M_KQUEUE); 1048 return (0); 1049 } 1050 1051 static void 1052 kqueue_wakeup(struct kqueue *kq) 1053 { 1054 if (kq->kq_state & KQ_SLEEP) { 1055 kq->kq_state &= ~KQ_SLEEP; 1056 wakeup(kq); 1057 } 1058 KNOTE(&kq->kq_kqinfo.ki_note, 0); 1059 } 1060 1061 /* 1062 * Calls filterops f_attach function, acquiring mplock if filter is not 1063 * marked as FILTEROP_MPSAFE. 1064 */ 1065 static int 1066 filter_attach(struct knote *kn) 1067 { 1068 int ret; 1069 1070 if (!(kn->kn_fop->f_flags & FILTEROP_MPSAFE)) { 1071 get_mplock(); 1072 ret = kn->kn_fop->f_attach(kn); 1073 rel_mplock(); 1074 } else { 1075 ret = kn->kn_fop->f_attach(kn); 1076 } 1077 1078 return (ret); 1079 } 1080 1081 /* 1082 * Calls filterops f_detach function, acquiring mplock if filter is not 1083 * marked as FILTEROP_MPSAFE. 1084 */ 1085 static void 1086 filter_detach(struct knote *kn) 1087 { 1088 if (!(kn->kn_fop->f_flags & FILTEROP_MPSAFE)) { 1089 get_mplock(); 1090 kn->kn_fop->f_detach(kn); 1091 rel_mplock(); 1092 } else { 1093 kn->kn_fop->f_detach(kn); 1094 } 1095 } 1096 1097 /* 1098 * Calls filterops f_event function, acquiring mplock if filter is not 1099 * marked as FILTEROP_MPSAFE. 1100 */ 1101 static int 1102 filter_event(struct knote *kn, long hint) 1103 { 1104 int ret; 1105 1106 if (!(kn->kn_fop->f_flags & FILTEROP_MPSAFE)) { 1107 get_mplock(); 1108 ret = kn->kn_fop->f_event(kn, hint); 1109 rel_mplock(); 1110 } else { 1111 ret = kn->kn_fop->f_event(kn, hint); 1112 } 1113 1114 return (ret); 1115 } 1116 1117 /* 1118 * walk down a list of knotes, activating them if their event has triggered. 1119 */ 1120 void 1121 knote(struct klist *list, long hint) 1122 { 1123 struct knote *kn; 1124 1125 lwkt_gettoken(&kq_token); 1126 SLIST_FOREACH(kn, list, kn_next) 1127 if (filter_event(kn, hint)) 1128 KNOTE_ACTIVATE(kn); 1129 lwkt_reltoken(&kq_token); 1130 } 1131 1132 /* 1133 * insert knote at head of klist 1134 * 1135 * Requires: kq_token 1136 */ 1137 void 1138 knote_insert(struct klist *klist, struct knote *kn) 1139 { 1140 SLIST_INSERT_HEAD(klist, kn, kn_next); 1141 } 1142 1143 /* 1144 * remove knote from a klist 1145 * 1146 * Requires: kq_token 1147 */ 1148 void 1149 knote_remove(struct klist *klist, struct knote *kn) 1150 { 1151 SLIST_REMOVE(klist, kn, knote, kn_next); 1152 } 1153 1154 /* 1155 * remove all knotes from a specified klist 1156 */ 1157 void 1158 knote_empty(struct klist *list) 1159 { 1160 struct knote *kn; 1161 1162 lwkt_gettoken(&kq_token); 1163 while ((kn = SLIST_FIRST(list)) != NULL) { 1164 filter_detach(kn); 1165 knote_drop(kn); 1166 } 1167 lwkt_reltoken(&kq_token); 1168 } 1169 1170 /* 1171 * remove all knotes referencing a specified fd 1172 */ 1173 void 1174 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd) 1175 { 1176 struct knote *kn; 1177 1178 lwkt_gettoken(&kq_token); 1179 restart: 1180 SLIST_FOREACH(kn, &fp->f_klist, kn_link) { 1181 if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) { 1182 filter_detach(kn); 1183 knote_drop(kn); 1184 goto restart; 1185 } 1186 } 1187 lwkt_reltoken(&kq_token); 1188 } 1189 1190 static void 1191 knote_attach(struct knote *kn) 1192 { 1193 struct klist *list; 1194 struct kqueue *kq = kn->kn_kq; 1195 1196 if (kn->kn_fop->f_flags & FILTEROP_ISFD) { 1197 KKASSERT(kn->kn_fp); 1198 list = &kn->kn_fp->f_klist; 1199 } else { 1200 if (kq->kq_knhashmask == 0) 1201 kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 1202 &kq->kq_knhashmask); 1203 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 1204 } 1205 SLIST_INSERT_HEAD(list, kn, kn_link); 1206 TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink); 1207 kn->kn_status = 0; 1208 } 1209 1210 static void 1211 knote_drop(struct knote *kn) 1212 { 1213 struct kqueue *kq; 1214 struct klist *list; 1215 1216 kq = kn->kn_kq; 1217 1218 if (kn->kn_fop->f_flags & FILTEROP_ISFD) 1219 list = &kn->kn_fp->f_klist; 1220 else 1221 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 1222 1223 SLIST_REMOVE(list, kn, knote, kn_link); 1224 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink); 1225 if (kn->kn_status & KN_QUEUED) 1226 knote_dequeue(kn); 1227 if (kn->kn_fop->f_flags & FILTEROP_ISFD) 1228 fdrop(kn->kn_fp); 1229 knote_free(kn); 1230 } 1231 1232 1233 static void 1234 knote_enqueue(struct knote *kn) 1235 { 1236 struct kqueue *kq = kn->kn_kq; 1237 1238 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 1239 1240 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe); 1241 kn->kn_status |= KN_QUEUED; 1242 ++kq->kq_count; 1243 1244 /* 1245 * Send SIGIO on request (typically set up as a mailbox signal) 1246 */ 1247 if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1) 1248 pgsigio(kq->kq_sigio, SIGIO, 0); 1249 1250 kqueue_wakeup(kq); 1251 } 1252 1253 static void 1254 knote_dequeue(struct knote *kn) 1255 { 1256 struct kqueue *kq = kn->kn_kq; 1257 1258 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 1259 1260 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe); 1261 kn->kn_status &= ~KN_QUEUED; 1262 kq->kq_count--; 1263 } 1264 1265 static void 1266 knote_init(void) 1267 { 1268 knote_zone = zinit("KNOTE", sizeof(struct knote), 0, 0, 1); 1269 } 1270 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL) 1271 1272 static struct knote * 1273 knote_alloc(void) 1274 { 1275 return ((struct knote *)zalloc(knote_zone)); 1276 } 1277 1278 static void 1279 knote_free(struct knote *kn) 1280 { 1281 zfree(knote_zone, kn); 1282 } 1283