1 /* $OpenBSD: kern_event.c,v 1.188 2022/05/12 13:33:00 visa Exp $ */ 2 3 /*- 4 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD: src/sys/kern/kern_event.c,v 1.22 2001/02/23 20:32:42 jlemon Exp $ 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/proc.h> 35 #include <sys/pledge.h> 36 #include <sys/malloc.h> 37 #include <sys/unistd.h> 38 #include <sys/file.h> 39 #include <sys/filedesc.h> 40 #include <sys/fcntl.h> 41 #include <sys/selinfo.h> 42 #include <sys/queue.h> 43 #include <sys/event.h> 44 #include <sys/eventvar.h> 45 #include <sys/ktrace.h> 46 #include <sys/pool.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/stat.h> 50 #include <sys/uio.h> 51 #include <sys/mount.h> 52 #include <sys/poll.h> 53 #include <sys/syscallargs.h> 54 #include <sys/time.h> 55 #include <sys/timeout.h> 56 #include <sys/vnode.h> 57 #include <sys/wait.h> 58 59 #ifdef DIAGNOSTIC 60 #define KLIST_ASSERT_LOCKED(kl) do { \ 61 if ((kl)->kl_ops != NULL) \ 62 (kl)->kl_ops->klo_assertlk((kl)->kl_arg); \ 63 else \ 64 KERNEL_ASSERT_LOCKED(); \ 65 } while (0) 66 #else 67 #define KLIST_ASSERT_LOCKED(kl) ((void)(kl)) 68 #endif 69 70 struct kqueue *kqueue_alloc(struct filedesc *); 71 void kqueue_terminate(struct proc *p, struct kqueue *); 72 void KQREF(struct kqueue *); 73 void KQRELE(struct kqueue *); 74 75 void kqueue_purge(struct proc *, struct kqueue *); 76 int kqueue_sleep(struct kqueue *, struct timespec *); 77 78 int kqueue_read(struct file *, struct uio *, int); 79 int kqueue_write(struct file *, struct uio *, int); 80 int kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 81 struct proc *p); 82 int kqueue_poll(struct file *fp, int events, struct proc *p); 83 int kqueue_kqfilter(struct file *fp, struct knote *kn); 84 int kqueue_stat(struct file *fp, struct stat *st, struct proc *p); 85 int kqueue_close(struct file *fp, struct proc *p); 86 void kqueue_wakeup(struct kqueue *kq); 87 88 #ifdef KQUEUE_DEBUG 89 void kqueue_do_check(struct kqueue *kq, const char *func, int line); 90 #define kqueue_check(kq) kqueue_do_check((kq), __func__, __LINE__) 91 #else 92 #define kqueue_check(kq) do {} while (0) 93 #endif 94 95 static int filter_attach(struct knote *kn); 96 static void filter_detach(struct knote *kn); 97 static int filter_event(struct knote *kn, long hint); 98 static int filter_modify(struct kevent *kev, struct knote *kn); 99 static int filter_process(struct knote *kn, struct kevent *kev); 100 static void kqueue_expand_hash(struct kqueue *kq); 101 static void kqueue_expand_list(struct kqueue *kq, int fd); 102 static void kqueue_task(void *); 103 static int klist_lock(struct klist *); 104 static void klist_unlock(struct klist *, int); 105 106 const struct fileops kqueueops = { 107 .fo_read = kqueue_read, 108 .fo_write = kqueue_write, 109 .fo_ioctl = kqueue_ioctl, 110 .fo_poll = kqueue_poll, 111 .fo_kqfilter = kqueue_kqfilter, 112 .fo_stat = kqueue_stat, 113 .fo_close = kqueue_close 114 }; 115 116 void knote_attach(struct knote *kn); 117 void knote_detach(struct knote *kn); 118 void knote_drop(struct knote *kn, struct proc *p); 119 void knote_enqueue(struct knote *kn); 120 void knote_dequeue(struct knote *kn); 121 int knote_acquire(struct knote *kn, struct klist *, int); 122 void knote_release(struct knote *kn); 123 void knote_activate(struct knote *kn); 124 void knote_remove(struct proc *p, struct kqueue *kq, struct knlist **plist, 125 int idx, int purge); 126 127 void filt_kqdetach(struct knote *kn); 128 int filt_kqueue(struct knote *kn, long hint); 129 int filt_kqueuemodify(struct kevent *kev, struct knote *kn); 130 int filt_kqueueprocess(struct knote *kn, struct kevent *kev); 131 int filt_kqueue_common(struct knote *kn, struct kqueue *kq); 132 int filt_procattach(struct knote *kn); 133 void filt_procdetach(struct knote *kn); 134 int filt_proc(struct knote *kn, long hint); 135 int filt_fileattach(struct knote *kn); 136 void filt_timerexpire(void *knx); 137 int filt_timerattach(struct knote *kn); 138 void filt_timerdetach(struct knote *kn); 139 int filt_timermodify(struct kevent *kev, struct knote *kn); 140 int filt_timerprocess(struct knote *kn, struct kevent *kev); 141 void filt_seltruedetach(struct knote *kn); 142 143 const struct filterops kqread_filtops = { 144 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 145 .f_attach = NULL, 146 .f_detach = filt_kqdetach, 147 .f_event = filt_kqueue, 148 .f_modify = filt_kqueuemodify, 149 .f_process = filt_kqueueprocess, 150 }; 151 152 const struct filterops proc_filtops = { 153 .f_flags = 0, 154 .f_attach = filt_procattach, 155 .f_detach = filt_procdetach, 156 .f_event = filt_proc, 157 }; 158 159 const struct filterops file_filtops = { 160 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 161 .f_attach = filt_fileattach, 162 .f_detach = NULL, 163 .f_event = NULL, 164 }; 165 166 const struct filterops timer_filtops = { 167 .f_flags = 0, 168 .f_attach = filt_timerattach, 169 .f_detach = filt_timerdetach, 170 .f_event = NULL, 171 .f_modify = filt_timermodify, 172 .f_process = filt_timerprocess, 173 }; 174 175 struct pool knote_pool; 176 struct pool kqueue_pool; 177 struct mutex kqueue_klist_lock = MUTEX_INITIALIZER(IPL_MPFLOOR); 178 int kq_ntimeouts = 0; 179 int kq_timeoutmax = (4 * 1024); 180 181 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 182 183 /* 184 * Table for for all system-defined filters. 185 */ 186 const struct filterops *const sysfilt_ops[] = { 187 &file_filtops, /* EVFILT_READ */ 188 &file_filtops, /* EVFILT_WRITE */ 189 NULL, /*&aio_filtops,*/ /* EVFILT_AIO */ 190 &file_filtops, /* EVFILT_VNODE */ 191 &proc_filtops, /* EVFILT_PROC */ 192 &sig_filtops, /* EVFILT_SIGNAL */ 193 &timer_filtops, /* EVFILT_TIMER */ 194 &file_filtops, /* EVFILT_DEVICE */ 195 &file_filtops, /* EVFILT_EXCEPT */ 196 }; 197 198 void 199 KQREF(struct kqueue *kq) 200 { 201 refcnt_take(&kq->kq_refcnt); 202 } 203 204 void 205 KQRELE(struct kqueue *kq) 206 { 207 struct filedesc *fdp; 208 209 if (refcnt_rele(&kq->kq_refcnt) == 0) 210 return; 211 212 fdp = kq->kq_fdp; 213 if (rw_status(&fdp->fd_lock) == RW_WRITE) { 214 LIST_REMOVE(kq, kq_next); 215 } else { 216 fdplock(fdp); 217 LIST_REMOVE(kq, kq_next); 218 fdpunlock(fdp); 219 } 220 221 KASSERT(TAILQ_EMPTY(&kq->kq_head)); 222 KASSERT(kq->kq_nknotes == 0); 223 224 free(kq->kq_knlist, M_KEVENT, kq->kq_knlistsize * 225 sizeof(struct knlist)); 226 hashfree(kq->kq_knhash, KN_HASHSIZE, M_KEVENT); 227 klist_free(&kq->kq_sel.si_note); 228 pool_put(&kqueue_pool, kq); 229 } 230 231 void 232 kqueue_init(void) 233 { 234 pool_init(&kqueue_pool, sizeof(struct kqueue), 0, IPL_MPFLOOR, 235 PR_WAITOK, "kqueuepl", NULL); 236 pool_init(&knote_pool, sizeof(struct knote), 0, IPL_MPFLOOR, 237 PR_WAITOK, "knotepl", NULL); 238 } 239 240 void 241 kqueue_init_percpu(void) 242 { 243 pool_cache_init(&knote_pool); 244 } 245 246 int 247 filt_fileattach(struct knote *kn) 248 { 249 struct file *fp = kn->kn_fp; 250 251 return fp->f_ops->fo_kqfilter(fp, kn); 252 } 253 254 int 255 kqueue_kqfilter(struct file *fp, struct knote *kn) 256 { 257 struct kqueue *kq = kn->kn_fp->f_data; 258 259 if (kn->kn_filter != EVFILT_READ) 260 return (EINVAL); 261 262 kn->kn_fop = &kqread_filtops; 263 klist_insert(&kq->kq_sel.si_note, kn); 264 return (0); 265 } 266 267 void 268 filt_kqdetach(struct knote *kn) 269 { 270 struct kqueue *kq = kn->kn_fp->f_data; 271 272 klist_remove(&kq->kq_sel.si_note, kn); 273 } 274 275 int 276 filt_kqueue_common(struct knote *kn, struct kqueue *kq) 277 { 278 MUTEX_ASSERT_LOCKED(&kq->kq_lock); 279 280 kn->kn_data = kq->kq_count; 281 282 return (kn->kn_data > 0); 283 } 284 285 int 286 filt_kqueue(struct knote *kn, long hint) 287 { 288 struct kqueue *kq = kn->kn_fp->f_data; 289 int active; 290 291 mtx_enter(&kq->kq_lock); 292 active = filt_kqueue_common(kn, kq); 293 mtx_leave(&kq->kq_lock); 294 295 return (active); 296 } 297 298 int 299 filt_kqueuemodify(struct kevent *kev, struct knote *kn) 300 { 301 struct kqueue *kq = kn->kn_fp->f_data; 302 int active; 303 304 mtx_enter(&kq->kq_lock); 305 knote_assign(kev, kn); 306 active = filt_kqueue_common(kn, kq); 307 mtx_leave(&kq->kq_lock); 308 309 return (active); 310 } 311 312 int 313 filt_kqueueprocess(struct knote *kn, struct kevent *kev) 314 { 315 struct kqueue *kq = kn->kn_fp->f_data; 316 int active; 317 318 mtx_enter(&kq->kq_lock); 319 if (kev != NULL && (kn->kn_flags & EV_ONESHOT)) 320 active = 1; 321 else 322 active = filt_kqueue_common(kn, kq); 323 if (active) 324 knote_submit(kn, kev); 325 mtx_leave(&kq->kq_lock); 326 327 return (active); 328 } 329 330 int 331 filt_procattach(struct knote *kn) 332 { 333 struct process *pr; 334 int s; 335 336 if ((curproc->p_p->ps_flags & PS_PLEDGE) && 337 (curproc->p_p->ps_pledge & PLEDGE_PROC) == 0) 338 return pledge_fail(curproc, EPERM, PLEDGE_PROC); 339 340 if (kn->kn_id > PID_MAX) 341 return ESRCH; 342 343 pr = prfind(kn->kn_id); 344 if (pr == NULL) 345 return (ESRCH); 346 347 /* exiting processes can't be specified */ 348 if (pr->ps_flags & PS_EXITING) 349 return (ESRCH); 350 351 kn->kn_ptr.p_process = pr; 352 kn->kn_flags |= EV_CLEAR; /* automatically set */ 353 354 /* 355 * internal flag indicating registration done by kernel 356 */ 357 if (kn->kn_flags & EV_FLAG1) { 358 kn->kn_data = kn->kn_sdata; /* ppid */ 359 kn->kn_fflags = NOTE_CHILD; 360 kn->kn_flags &= ~EV_FLAG1; 361 } 362 363 s = splhigh(); 364 klist_insert_locked(&pr->ps_klist, kn); 365 splx(s); 366 367 return (0); 368 } 369 370 /* 371 * The knote may be attached to a different process, which may exit, 372 * leaving nothing for the knote to be attached to. So when the process 373 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 374 * it will be deleted when read out. However, as part of the knote deletion, 375 * this routine is called, so a check is needed to avoid actually performing 376 * a detach, because the original process does not exist any more. 377 */ 378 void 379 filt_procdetach(struct knote *kn) 380 { 381 struct kqueue *kq = kn->kn_kq; 382 struct process *pr = kn->kn_ptr.p_process; 383 int s, status; 384 385 mtx_enter(&kq->kq_lock); 386 status = kn->kn_status; 387 mtx_leave(&kq->kq_lock); 388 389 if (status & KN_DETACHED) 390 return; 391 392 s = splhigh(); 393 klist_remove_locked(&pr->ps_klist, kn); 394 splx(s); 395 } 396 397 int 398 filt_proc(struct knote *kn, long hint) 399 { 400 struct kqueue *kq = kn->kn_kq; 401 u_int event; 402 403 /* 404 * mask off extra data 405 */ 406 event = (u_int)hint & NOTE_PCTRLMASK; 407 408 /* 409 * if the user is interested in this event, record it. 410 */ 411 if (kn->kn_sfflags & event) 412 kn->kn_fflags |= event; 413 414 /* 415 * process is gone, so flag the event as finished and remove it 416 * from the process's klist 417 */ 418 if (event == NOTE_EXIT) { 419 struct process *pr = kn->kn_ptr.p_process; 420 int s; 421 422 mtx_enter(&kq->kq_lock); 423 kn->kn_status |= KN_DETACHED; 424 mtx_leave(&kq->kq_lock); 425 426 s = splhigh(); 427 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 428 kn->kn_data = W_EXITCODE(pr->ps_xexit, pr->ps_xsig); 429 klist_remove_locked(&pr->ps_klist, kn); 430 splx(s); 431 return (1); 432 } 433 434 /* 435 * process forked, and user wants to track the new process, 436 * so attach a new knote to it, and immediately report an 437 * event with the parent's pid. 438 */ 439 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { 440 struct kevent kev; 441 int error; 442 443 /* 444 * register knote with new process. 445 */ 446 memset(&kev, 0, sizeof(kev)); 447 kev.ident = hint & NOTE_PDATAMASK; /* pid */ 448 kev.filter = kn->kn_filter; 449 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 450 kev.fflags = kn->kn_sfflags; 451 kev.data = kn->kn_id; /* parent */ 452 kev.udata = kn->kn_udata; /* preserve udata */ 453 error = kqueue_register(kq, &kev, 0, NULL); 454 if (error) 455 kn->kn_fflags |= NOTE_TRACKERR; 456 } 457 458 return (kn->kn_fflags != 0); 459 } 460 461 static void 462 filt_timer_timeout_add(struct knote *kn) 463 { 464 struct timeval tv; 465 struct timeout *to = kn->kn_hook; 466 int tticks; 467 468 tv.tv_sec = kn->kn_sdata / 1000; 469 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 470 tticks = tvtohz(&tv); 471 /* Remove extra tick from tvtohz() if timeout has fired before. */ 472 if (timeout_triggered(to)) 473 tticks--; 474 timeout_add(to, (tticks > 0) ? tticks : 1); 475 } 476 477 void 478 filt_timerexpire(void *knx) 479 { 480 struct knote *kn = knx; 481 struct kqueue *kq = kn->kn_kq; 482 483 kn->kn_data++; 484 mtx_enter(&kq->kq_lock); 485 knote_activate(kn); 486 mtx_leave(&kq->kq_lock); 487 488 if ((kn->kn_flags & EV_ONESHOT) == 0) 489 filt_timer_timeout_add(kn); 490 } 491 492 493 /* 494 * data contains amount of time to sleep, in milliseconds 495 */ 496 int 497 filt_timerattach(struct knote *kn) 498 { 499 struct timeout *to; 500 501 if (kq_ntimeouts > kq_timeoutmax) 502 return (ENOMEM); 503 kq_ntimeouts++; 504 505 kn->kn_flags |= EV_CLEAR; /* automatically set */ 506 to = malloc(sizeof(*to), M_KEVENT, M_WAITOK); 507 timeout_set(to, filt_timerexpire, kn); 508 kn->kn_hook = to; 509 filt_timer_timeout_add(kn); 510 511 return (0); 512 } 513 514 void 515 filt_timerdetach(struct knote *kn) 516 { 517 struct timeout *to; 518 519 to = (struct timeout *)kn->kn_hook; 520 timeout_del_barrier(to); 521 free(to, M_KEVENT, sizeof(*to)); 522 kq_ntimeouts--; 523 } 524 525 int 526 filt_timermodify(struct kevent *kev, struct knote *kn) 527 { 528 struct kqueue *kq = kn->kn_kq; 529 struct timeout *to = kn->kn_hook; 530 531 /* Reset the timer. Any pending events are discarded. */ 532 533 timeout_del_barrier(to); 534 535 mtx_enter(&kq->kq_lock); 536 if (kn->kn_status & KN_QUEUED) 537 knote_dequeue(kn); 538 kn->kn_status &= ~KN_ACTIVE; 539 mtx_leave(&kq->kq_lock); 540 541 kn->kn_data = 0; 542 knote_assign(kev, kn); 543 /* Reinit timeout to invoke tick adjustment again. */ 544 timeout_set(to, filt_timerexpire, kn); 545 filt_timer_timeout_add(kn); 546 547 return (0); 548 } 549 550 int 551 filt_timerprocess(struct knote *kn, struct kevent *kev) 552 { 553 int active, s; 554 555 s = splsoftclock(); 556 active = (kn->kn_data != 0); 557 if (active) 558 knote_submit(kn, kev); 559 splx(s); 560 561 return (active); 562 } 563 564 565 /* 566 * filt_seltrue: 567 * 568 * This filter "event" routine simulates seltrue(). 569 */ 570 int 571 filt_seltrue(struct knote *kn, long hint) 572 { 573 574 /* 575 * We don't know how much data can be read/written, 576 * but we know that it *can* be. This is about as 577 * good as select/poll does as well. 578 */ 579 kn->kn_data = 0; 580 return (1); 581 } 582 583 int 584 filt_seltruemodify(struct kevent *kev, struct knote *kn) 585 { 586 knote_assign(kev, kn); 587 return (kn->kn_fop->f_event(kn, 0)); 588 } 589 590 int 591 filt_seltrueprocess(struct knote *kn, struct kevent *kev) 592 { 593 int active; 594 595 active = kn->kn_fop->f_event(kn, 0); 596 if (active) 597 knote_submit(kn, kev); 598 return (active); 599 } 600 601 /* 602 * This provides full kqfilter entry for device switch tables, which 603 * has same effect as filter using filt_seltrue() as filter method. 604 */ 605 void 606 filt_seltruedetach(struct knote *kn) 607 { 608 /* Nothing to do */ 609 } 610 611 const struct filterops seltrue_filtops = { 612 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 613 .f_attach = NULL, 614 .f_detach = filt_seltruedetach, 615 .f_event = filt_seltrue, 616 .f_modify = filt_seltruemodify, 617 .f_process = filt_seltrueprocess, 618 }; 619 620 int 621 seltrue_kqfilter(dev_t dev, struct knote *kn) 622 { 623 switch (kn->kn_filter) { 624 case EVFILT_READ: 625 case EVFILT_WRITE: 626 kn->kn_fop = &seltrue_filtops; 627 break; 628 default: 629 return (EINVAL); 630 } 631 632 /* Nothing more to do */ 633 return (0); 634 } 635 636 static int 637 filt_dead(struct knote *kn, long hint) 638 { 639 if (kn->kn_filter == EVFILT_EXCEPT) { 640 /* 641 * Do not deliver event because there is no out-of-band data. 642 * However, let HUP condition pass for poll(2). 643 */ 644 if ((kn->kn_flags & __EV_POLL) == 0) { 645 kn->kn_flags |= EV_DISABLE; 646 return (0); 647 } 648 } 649 650 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 651 if (kn->kn_flags & __EV_POLL) 652 kn->kn_flags |= __EV_HUP; 653 kn->kn_data = 0; 654 return (1); 655 } 656 657 static void 658 filt_deaddetach(struct knote *kn) 659 { 660 /* Nothing to do */ 661 } 662 663 const struct filterops dead_filtops = { 664 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 665 .f_attach = NULL, 666 .f_detach = filt_deaddetach, 667 .f_event = filt_dead, 668 .f_modify = filt_seltruemodify, 669 .f_process = filt_seltrueprocess, 670 }; 671 672 static int 673 filt_badfd(struct knote *kn, long hint) 674 { 675 kn->kn_flags |= (EV_ERROR | EV_ONESHOT); 676 kn->kn_data = EBADF; 677 return (1); 678 } 679 680 /* For use with kqpoll. */ 681 const struct filterops badfd_filtops = { 682 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 683 .f_attach = NULL, 684 .f_detach = filt_deaddetach, 685 .f_event = filt_badfd, 686 .f_modify = filt_seltruemodify, 687 .f_process = filt_seltrueprocess, 688 }; 689 690 static int 691 filter_attach(struct knote *kn) 692 { 693 int error; 694 695 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 696 error = kn->kn_fop->f_attach(kn); 697 } else { 698 KERNEL_LOCK(); 699 error = kn->kn_fop->f_attach(kn); 700 KERNEL_UNLOCK(); 701 } 702 return (error); 703 } 704 705 static void 706 filter_detach(struct knote *kn) 707 { 708 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 709 kn->kn_fop->f_detach(kn); 710 } else { 711 KERNEL_LOCK(); 712 kn->kn_fop->f_detach(kn); 713 KERNEL_UNLOCK(); 714 } 715 } 716 717 static int 718 filter_event(struct knote *kn, long hint) 719 { 720 if ((kn->kn_fop->f_flags & FILTEROP_MPSAFE) == 0) 721 KERNEL_ASSERT_LOCKED(); 722 723 return (kn->kn_fop->f_event(kn, hint)); 724 } 725 726 static int 727 filter_modify(struct kevent *kev, struct knote *kn) 728 { 729 int active, s; 730 731 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 732 active = kn->kn_fop->f_modify(kev, kn); 733 } else { 734 KERNEL_LOCK(); 735 if (kn->kn_fop->f_modify != NULL) { 736 active = kn->kn_fop->f_modify(kev, kn); 737 } else { 738 s = splhigh(); 739 active = knote_modify(kev, kn); 740 splx(s); 741 } 742 KERNEL_UNLOCK(); 743 } 744 return (active); 745 } 746 747 static int 748 filter_process(struct knote *kn, struct kevent *kev) 749 { 750 int active, s; 751 752 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 753 active = kn->kn_fop->f_process(kn, kev); 754 } else { 755 KERNEL_LOCK(); 756 if (kn->kn_fop->f_process != NULL) { 757 active = kn->kn_fop->f_process(kn, kev); 758 } else { 759 s = splhigh(); 760 active = knote_process(kn, kev); 761 splx(s); 762 } 763 KERNEL_UNLOCK(); 764 } 765 return (active); 766 } 767 768 /* 769 * Initialize the current thread for poll/select system call. 770 * num indicates the number of serials that the system call may utilize. 771 * After this function, the valid range of serials is 772 * p_kq_serial <= x < p_kq_serial + num. 773 */ 774 void 775 kqpoll_init(unsigned int num) 776 { 777 struct proc *p = curproc; 778 struct filedesc *fdp; 779 780 if (p->p_kq == NULL) { 781 p->p_kq = kqueue_alloc(p->p_fd); 782 p->p_kq_serial = arc4random(); 783 fdp = p->p_fd; 784 fdplock(fdp); 785 LIST_INSERT_HEAD(&fdp->fd_kqlist, p->p_kq, kq_next); 786 fdpunlock(fdp); 787 } 788 789 if (p->p_kq_serial + num < p->p_kq_serial) { 790 /* Serial is about to wrap. Clear all attached knotes. */ 791 kqueue_purge(p, p->p_kq); 792 p->p_kq_serial = 0; 793 } 794 } 795 796 /* 797 * Finish poll/select system call. 798 * num must have the same value that was used with kqpoll_init(). 799 */ 800 void 801 kqpoll_done(unsigned int num) 802 { 803 struct proc *p = curproc; 804 struct kqueue *kq = p->p_kq; 805 806 KASSERT(p->p_kq != NULL); 807 KASSERT(p->p_kq_serial + num >= p->p_kq_serial); 808 809 p->p_kq_serial += num; 810 811 /* 812 * Because of kn_pollid key, a thread can in principle allocate 813 * up to O(maxfiles^2) knotes by calling poll(2) repeatedly 814 * with suitably varying pollfd arrays. 815 * Prevent such a large allocation by clearing knotes eagerly 816 * if there are too many of them. 817 * 818 * A small multiple of kq_knlistsize should give enough margin 819 * that eager clearing is infrequent, or does not happen at all, 820 * with normal programs. 821 * A single pollfd entry can use up to three knotes. 822 * Typically there is no significant overlap of fd and events 823 * between different entries in the pollfd array. 824 */ 825 if (kq->kq_nknotes > 4 * kq->kq_knlistsize) 826 kqueue_purge(p, kq); 827 } 828 829 void 830 kqpoll_exit(void) 831 { 832 struct proc *p = curproc; 833 834 if (p->p_kq == NULL) 835 return; 836 837 kqueue_purge(p, p->p_kq); 838 kqueue_terminate(p, p->p_kq); 839 KASSERT(p->p_kq->kq_refcnt.r_refs == 1); 840 KQRELE(p->p_kq); 841 p->p_kq = NULL; 842 } 843 844 struct kqueue * 845 kqueue_alloc(struct filedesc *fdp) 846 { 847 struct kqueue *kq; 848 849 kq = pool_get(&kqueue_pool, PR_WAITOK | PR_ZERO); 850 refcnt_init(&kq->kq_refcnt); 851 kq->kq_fdp = fdp; 852 TAILQ_INIT(&kq->kq_head); 853 mtx_init(&kq->kq_lock, IPL_HIGH); 854 task_set(&kq->kq_task, kqueue_task, kq); 855 klist_init_mutex(&kq->kq_sel.si_note, &kqueue_klist_lock); 856 857 return (kq); 858 } 859 860 int 861 sys_kqueue(struct proc *p, void *v, register_t *retval) 862 { 863 struct filedesc *fdp = p->p_fd; 864 struct kqueue *kq; 865 struct file *fp; 866 int fd, error; 867 868 kq = kqueue_alloc(fdp); 869 870 fdplock(fdp); 871 error = falloc(p, &fp, &fd); 872 if (error) 873 goto out; 874 fp->f_flag = FREAD | FWRITE; 875 fp->f_type = DTYPE_KQUEUE; 876 fp->f_ops = &kqueueops; 877 fp->f_data = kq; 878 *retval = fd; 879 LIST_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_next); 880 kq = NULL; 881 fdinsert(fdp, fd, 0, fp); 882 FRELE(fp, p); 883 out: 884 fdpunlock(fdp); 885 if (kq != NULL) 886 pool_put(&kqueue_pool, kq); 887 return (error); 888 } 889 890 int 891 sys_kevent(struct proc *p, void *v, register_t *retval) 892 { 893 struct kqueue_scan_state scan; 894 struct filedesc* fdp = p->p_fd; 895 struct sys_kevent_args /* { 896 syscallarg(int) fd; 897 syscallarg(const struct kevent *) changelist; 898 syscallarg(int) nchanges; 899 syscallarg(struct kevent *) eventlist; 900 syscallarg(int) nevents; 901 syscallarg(const struct timespec *) timeout; 902 } */ *uap = v; 903 struct kevent *kevp; 904 struct kqueue *kq; 905 struct file *fp; 906 struct timespec ts; 907 struct timespec *tsp = NULL; 908 int i, n, nerrors, error; 909 int ready, total; 910 struct kevent kev[KQ_NEVENTS]; 911 912 if ((fp = fd_getfile(fdp, SCARG(uap, fd))) == NULL) 913 return (EBADF); 914 915 if (fp->f_type != DTYPE_KQUEUE) { 916 error = EBADF; 917 goto done; 918 } 919 920 if (SCARG(uap, timeout) != NULL) { 921 error = copyin(SCARG(uap, timeout), &ts, sizeof(ts)); 922 if (error) 923 goto done; 924 #ifdef KTRACE 925 if (KTRPOINT(p, KTR_STRUCT)) 926 ktrreltimespec(p, &ts); 927 #endif 928 if (ts.tv_sec < 0 || !timespecisvalid(&ts)) { 929 error = EINVAL; 930 goto done; 931 } 932 tsp = &ts; 933 } 934 935 kq = fp->f_data; 936 nerrors = 0; 937 938 while ((n = SCARG(uap, nchanges)) > 0) { 939 if (n > nitems(kev)) 940 n = nitems(kev); 941 error = copyin(SCARG(uap, changelist), kev, 942 n * sizeof(struct kevent)); 943 if (error) 944 goto done; 945 #ifdef KTRACE 946 if (KTRPOINT(p, KTR_STRUCT)) 947 ktrevent(p, kev, n); 948 #endif 949 for (i = 0; i < n; i++) { 950 kevp = &kev[i]; 951 kevp->flags &= ~EV_SYSFLAGS; 952 error = kqueue_register(kq, kevp, 0, p); 953 if (error || (kevp->flags & EV_RECEIPT)) { 954 if (SCARG(uap, nevents) != 0) { 955 kevp->flags = EV_ERROR; 956 kevp->data = error; 957 copyout(kevp, SCARG(uap, eventlist), 958 sizeof(*kevp)); 959 SCARG(uap, eventlist)++; 960 SCARG(uap, nevents)--; 961 nerrors++; 962 } else { 963 goto done; 964 } 965 } 966 } 967 SCARG(uap, nchanges) -= n; 968 SCARG(uap, changelist) += n; 969 } 970 if (nerrors) { 971 *retval = nerrors; 972 error = 0; 973 goto done; 974 } 975 976 kqueue_scan_setup(&scan, kq); 977 FRELE(fp, p); 978 /* 979 * Collect as many events as we can. The timeout on successive 980 * loops is disabled (kqueue_scan() becomes non-blocking). 981 */ 982 total = 0; 983 error = 0; 984 while ((n = SCARG(uap, nevents) - total) > 0) { 985 if (n > nitems(kev)) 986 n = nitems(kev); 987 ready = kqueue_scan(&scan, n, kev, tsp, p, &error); 988 if (ready == 0) 989 break; 990 error = copyout(kev, SCARG(uap, eventlist) + total, 991 sizeof(struct kevent) * ready); 992 #ifdef KTRACE 993 if (KTRPOINT(p, KTR_STRUCT)) 994 ktrevent(p, kev, ready); 995 #endif 996 total += ready; 997 if (error || ready < n) 998 break; 999 } 1000 kqueue_scan_finish(&scan); 1001 *retval = total; 1002 return (error); 1003 1004 done: 1005 FRELE(fp, p); 1006 return (error); 1007 } 1008 1009 #ifdef KQUEUE_DEBUG 1010 void 1011 kqueue_do_check(struct kqueue *kq, const char *func, int line) 1012 { 1013 struct knote *kn; 1014 int count = 0, nmarker = 0; 1015 1016 MUTEX_ASSERT_LOCKED(&kq->kq_lock); 1017 1018 TAILQ_FOREACH(kn, &kq->kq_head, kn_tqe) { 1019 if (kn->kn_filter == EVFILT_MARKER) { 1020 if ((kn->kn_status & KN_QUEUED) != 0) 1021 panic("%s:%d: kq=%p kn=%p marker QUEUED", 1022 func, line, kq, kn); 1023 nmarker++; 1024 } else { 1025 if ((kn->kn_status & KN_ACTIVE) == 0) 1026 panic("%s:%d: kq=%p kn=%p knote !ACTIVE", 1027 func, line, kq, kn); 1028 if ((kn->kn_status & KN_QUEUED) == 0) 1029 panic("%s:%d: kq=%p kn=%p knote !QUEUED", 1030 func, line, kq, kn); 1031 if (kn->kn_kq != kq) 1032 panic("%s:%d: kq=%p kn=%p kn_kq=%p != kq", 1033 func, line, kq, kn, kn->kn_kq); 1034 count++; 1035 if (count > kq->kq_count) 1036 goto bad; 1037 } 1038 } 1039 if (count != kq->kq_count) { 1040 bad: 1041 panic("%s:%d: kq=%p kq_count=%d count=%d nmarker=%d", 1042 func, line, kq, kq->kq_count, count, nmarker); 1043 } 1044 } 1045 #endif 1046 1047 int 1048 kqueue_register(struct kqueue *kq, struct kevent *kev, unsigned int pollid, 1049 struct proc *p) 1050 { 1051 struct filedesc *fdp = kq->kq_fdp; 1052 const struct filterops *fops = NULL; 1053 struct file *fp = NULL; 1054 struct knote *kn = NULL, *newkn = NULL; 1055 struct knlist *list = NULL; 1056 int active, error = 0; 1057 1058 KASSERT(pollid == 0 || (p != NULL && p->p_kq == kq)); 1059 1060 if (kev->filter < 0) { 1061 if (kev->filter + EVFILT_SYSCOUNT < 0) 1062 return (EINVAL); 1063 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */ 1064 } 1065 1066 if (fops == NULL) { 1067 /* 1068 * XXX 1069 * filter attach routine is responsible for ensuring that 1070 * the identifier can be attached to it. 1071 */ 1072 return (EINVAL); 1073 } 1074 1075 if (fops->f_flags & FILTEROP_ISFD) { 1076 /* validate descriptor */ 1077 if (kev->ident > INT_MAX) 1078 return (EBADF); 1079 } 1080 1081 if (kev->flags & EV_ADD) 1082 newkn = pool_get(&knote_pool, PR_WAITOK | PR_ZERO); 1083 1084 again: 1085 if (fops->f_flags & FILTEROP_ISFD) { 1086 if ((fp = fd_getfile(fdp, kev->ident)) == NULL) { 1087 error = EBADF; 1088 goto done; 1089 } 1090 mtx_enter(&kq->kq_lock); 1091 if (kev->flags & EV_ADD) 1092 kqueue_expand_list(kq, kev->ident); 1093 if (kev->ident < kq->kq_knlistsize) 1094 list = &kq->kq_knlist[kev->ident]; 1095 } else { 1096 mtx_enter(&kq->kq_lock); 1097 if (kev->flags & EV_ADD) 1098 kqueue_expand_hash(kq); 1099 if (kq->kq_knhashmask != 0) { 1100 list = &kq->kq_knhash[ 1101 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 1102 } 1103 } 1104 if (list != NULL) { 1105 SLIST_FOREACH(kn, list, kn_link) { 1106 if (kev->filter == kn->kn_filter && 1107 kev->ident == kn->kn_id && 1108 pollid == kn->kn_pollid) { 1109 if (!knote_acquire(kn, NULL, 0)) { 1110 /* knote_acquire() has released 1111 * kq_lock. */ 1112 if (fp != NULL) { 1113 FRELE(fp, p); 1114 fp = NULL; 1115 } 1116 goto again; 1117 } 1118 break; 1119 } 1120 } 1121 } 1122 KASSERT(kn == NULL || (kn->kn_status & KN_PROCESSING) != 0); 1123 1124 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) { 1125 mtx_leave(&kq->kq_lock); 1126 error = ENOENT; 1127 goto done; 1128 } 1129 1130 /* 1131 * kn now contains the matching knote, or NULL if no match. 1132 */ 1133 if (kev->flags & EV_ADD) { 1134 if (kn == NULL) { 1135 kn = newkn; 1136 newkn = NULL; 1137 kn->kn_status = KN_PROCESSING; 1138 kn->kn_fp = fp; 1139 kn->kn_kq = kq; 1140 kn->kn_fop = fops; 1141 1142 /* 1143 * apply reference count to knote structure, and 1144 * do not release it at the end of this routine. 1145 */ 1146 fp = NULL; 1147 1148 kn->kn_sfflags = kev->fflags; 1149 kn->kn_sdata = kev->data; 1150 kev->fflags = 0; 1151 kev->data = 0; 1152 kn->kn_kevent = *kev; 1153 kn->kn_pollid = pollid; 1154 1155 knote_attach(kn); 1156 mtx_leave(&kq->kq_lock); 1157 1158 error = filter_attach(kn); 1159 if (error != 0) { 1160 knote_drop(kn, p); 1161 goto done; 1162 } 1163 1164 /* 1165 * If this is a file descriptor filter, check if 1166 * fd was closed while the knote was being added. 1167 * knote_fdclose() has missed kn if the function 1168 * ran before kn appeared in kq_knlist. 1169 */ 1170 if ((fops->f_flags & FILTEROP_ISFD) && 1171 fd_checkclosed(fdp, kev->ident, kn->kn_fp)) { 1172 /* 1173 * Drop the knote silently without error 1174 * because another thread might already have 1175 * seen it. This corresponds to the insert 1176 * happening in full before the close. 1177 */ 1178 filter_detach(kn); 1179 knote_drop(kn, p); 1180 goto done; 1181 } 1182 1183 /* Check if there is a pending event. */ 1184 active = filter_process(kn, NULL); 1185 mtx_enter(&kq->kq_lock); 1186 if (active) 1187 knote_activate(kn); 1188 } else if (kn->kn_fop == &badfd_filtops) { 1189 /* 1190 * Nothing expects this badfd knote any longer. 1191 * Drop it to make room for the new knote and retry. 1192 */ 1193 KASSERT(kq == p->p_kq); 1194 mtx_leave(&kq->kq_lock); 1195 filter_detach(kn); 1196 knote_drop(kn, p); 1197 1198 KASSERT(fp != NULL); 1199 FRELE(fp, p); 1200 fp = NULL; 1201 1202 goto again; 1203 } else { 1204 /* 1205 * The user may change some filter values after the 1206 * initial EV_ADD, but doing so will not reset any 1207 * filters which have already been triggered. 1208 */ 1209 mtx_leave(&kq->kq_lock); 1210 active = filter_modify(kev, kn); 1211 mtx_enter(&kq->kq_lock); 1212 if (active) 1213 knote_activate(kn); 1214 if (kev->flags & EV_ERROR) { 1215 error = kev->data; 1216 goto release; 1217 } 1218 } 1219 } else if (kev->flags & EV_DELETE) { 1220 mtx_leave(&kq->kq_lock); 1221 filter_detach(kn); 1222 knote_drop(kn, p); 1223 goto done; 1224 } 1225 1226 if ((kev->flags & EV_DISABLE) && ((kn->kn_status & KN_DISABLED) == 0)) 1227 kn->kn_status |= KN_DISABLED; 1228 1229 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 1230 kn->kn_status &= ~KN_DISABLED; 1231 mtx_leave(&kq->kq_lock); 1232 /* Check if there is a pending event. */ 1233 active = filter_process(kn, NULL); 1234 mtx_enter(&kq->kq_lock); 1235 if (active) 1236 knote_activate(kn); 1237 } 1238 1239 release: 1240 knote_release(kn); 1241 mtx_leave(&kq->kq_lock); 1242 done: 1243 if (fp != NULL) 1244 FRELE(fp, p); 1245 if (newkn != NULL) 1246 pool_put(&knote_pool, newkn); 1247 return (error); 1248 } 1249 1250 int 1251 kqueue_sleep(struct kqueue *kq, struct timespec *tsp) 1252 { 1253 struct timespec elapsed, start, stop; 1254 uint64_t nsecs; 1255 int error; 1256 1257 MUTEX_ASSERT_LOCKED(&kq->kq_lock); 1258 1259 if (tsp != NULL) { 1260 getnanouptime(&start); 1261 nsecs = MIN(TIMESPEC_TO_NSEC(tsp), MAXTSLP); 1262 } else 1263 nsecs = INFSLP; 1264 error = msleep_nsec(kq, &kq->kq_lock, PSOCK | PCATCH | PNORELOCK, 1265 "kqread", nsecs); 1266 if (tsp != NULL) { 1267 getnanouptime(&stop); 1268 timespecsub(&stop, &start, &elapsed); 1269 timespecsub(tsp, &elapsed, tsp); 1270 if (tsp->tv_sec < 0) 1271 timespecclear(tsp); 1272 } 1273 1274 return (error); 1275 } 1276 1277 /* 1278 * Scan the kqueue, blocking if necessary until the target time is reached. 1279 * If tsp is NULL we block indefinitely. If tsp->ts_secs/nsecs are both 1280 * 0 we do not block at all. 1281 */ 1282 int 1283 kqueue_scan(struct kqueue_scan_state *scan, int maxevents, 1284 struct kevent *kevp, struct timespec *tsp, struct proc *p, int *errorp) 1285 { 1286 struct kqueue *kq = scan->kqs_kq; 1287 struct knote *kn; 1288 int error = 0, nkev = 0; 1289 1290 if (maxevents == 0) 1291 goto done; 1292 retry: 1293 KASSERT(nkev == 0); 1294 1295 error = 0; 1296 1297 /* msleep() with PCATCH requires kernel lock. */ 1298 KERNEL_LOCK(); 1299 1300 mtx_enter(&kq->kq_lock); 1301 1302 if (kq->kq_state & KQ_DYING) { 1303 mtx_leave(&kq->kq_lock); 1304 KERNEL_UNLOCK(); 1305 error = EBADF; 1306 goto done; 1307 } 1308 1309 if (kq->kq_count == 0) { 1310 /* 1311 * Successive loops are only necessary if there are more 1312 * ready events to gather, so they don't need to block. 1313 */ 1314 if ((tsp != NULL && !timespecisset(tsp)) || 1315 scan->kqs_nevent != 0) { 1316 mtx_leave(&kq->kq_lock); 1317 KERNEL_UNLOCK(); 1318 error = 0; 1319 goto done; 1320 } 1321 kq->kq_state |= KQ_SLEEP; 1322 error = kqueue_sleep(kq, tsp); 1323 /* kqueue_sleep() has released kq_lock. */ 1324 KERNEL_UNLOCK(); 1325 if (error == 0 || error == EWOULDBLOCK) 1326 goto retry; 1327 /* don't restart after signals... */ 1328 if (error == ERESTART) 1329 error = EINTR; 1330 goto done; 1331 } 1332 1333 /* The actual scan does not sleep on kq, so unlock the kernel. */ 1334 KERNEL_UNLOCK(); 1335 1336 /* 1337 * Put the end marker in the queue to limit the scan to the events 1338 * that are currently active. This prevents events from being 1339 * recollected if they reactivate during scan. 1340 * 1341 * If a partial scan has been performed already but no events have 1342 * been collected, reposition the end marker to make any new events 1343 * reachable. 1344 */ 1345 if (!scan->kqs_queued) { 1346 TAILQ_INSERT_TAIL(&kq->kq_head, &scan->kqs_end, kn_tqe); 1347 scan->kqs_queued = 1; 1348 } else if (scan->kqs_nevent == 0) { 1349 TAILQ_REMOVE(&kq->kq_head, &scan->kqs_end, kn_tqe); 1350 TAILQ_INSERT_TAIL(&kq->kq_head, &scan->kqs_end, kn_tqe); 1351 } 1352 1353 TAILQ_INSERT_HEAD(&kq->kq_head, &scan->kqs_start, kn_tqe); 1354 while (nkev < maxevents) { 1355 kn = TAILQ_NEXT(&scan->kqs_start, kn_tqe); 1356 if (kn->kn_filter == EVFILT_MARKER) { 1357 if (kn == &scan->kqs_end) 1358 break; 1359 1360 /* Move start marker past another thread's marker. */ 1361 TAILQ_REMOVE(&kq->kq_head, &scan->kqs_start, kn_tqe); 1362 TAILQ_INSERT_AFTER(&kq->kq_head, kn, &scan->kqs_start, 1363 kn_tqe); 1364 continue; 1365 } 1366 1367 if (!knote_acquire(kn, NULL, 0)) { 1368 /* knote_acquire() has released kq_lock. */ 1369 mtx_enter(&kq->kq_lock); 1370 continue; 1371 } 1372 1373 kqueue_check(kq); 1374 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1375 kn->kn_status &= ~KN_QUEUED; 1376 kq->kq_count--; 1377 kqueue_check(kq); 1378 1379 if (kn->kn_status & KN_DISABLED) { 1380 knote_release(kn); 1381 continue; 1382 } 1383 1384 mtx_leave(&kq->kq_lock); 1385 1386 /* Drop expired kqpoll knotes. */ 1387 if (p->p_kq == kq && 1388 p->p_kq_serial > (unsigned long)kn->kn_udata) { 1389 filter_detach(kn); 1390 knote_drop(kn, p); 1391 mtx_enter(&kq->kq_lock); 1392 continue; 1393 } 1394 1395 /* 1396 * Invalidate knotes whose vnodes have been revoked. 1397 * This is a workaround; it is tricky to clear existing 1398 * knotes and prevent new ones from being registered 1399 * with the current revocation mechanism. 1400 */ 1401 if ((kn->kn_fop->f_flags & FILTEROP_ISFD) && 1402 kn->kn_fp != NULL && 1403 kn->kn_fp->f_type == DTYPE_VNODE) { 1404 struct vnode *vp = kn->kn_fp->f_data; 1405 1406 if (__predict_false(vp->v_op == &dead_vops && 1407 kn->kn_fop != &dead_filtops)) { 1408 filter_detach(kn); 1409 kn->kn_fop = &dead_filtops; 1410 1411 /* 1412 * Check if the event should be delivered. 1413 * Use f_event directly because this is 1414 * a special situation. 1415 */ 1416 if (kn->kn_fop->f_event(kn, 0) == 0) { 1417 filter_detach(kn); 1418 knote_drop(kn, p); 1419 mtx_enter(&kq->kq_lock); 1420 continue; 1421 } 1422 } 1423 } 1424 1425 memset(kevp, 0, sizeof(*kevp)); 1426 if (filter_process(kn, kevp) == 0) { 1427 mtx_enter(&kq->kq_lock); 1428 if ((kn->kn_status & KN_QUEUED) == 0) 1429 kn->kn_status &= ~KN_ACTIVE; 1430 knote_release(kn); 1431 kqueue_check(kq); 1432 continue; 1433 } 1434 1435 /* 1436 * Post-event action on the note 1437 */ 1438 if (kevp->flags & EV_ONESHOT) { 1439 filter_detach(kn); 1440 knote_drop(kn, p); 1441 mtx_enter(&kq->kq_lock); 1442 } else if (kevp->flags & (EV_CLEAR | EV_DISPATCH)) { 1443 mtx_enter(&kq->kq_lock); 1444 if (kevp->flags & EV_DISPATCH) 1445 kn->kn_status |= KN_DISABLED; 1446 if ((kn->kn_status & KN_QUEUED) == 0) 1447 kn->kn_status &= ~KN_ACTIVE; 1448 knote_release(kn); 1449 } else { 1450 mtx_enter(&kq->kq_lock); 1451 if ((kn->kn_status & KN_QUEUED) == 0) { 1452 kqueue_check(kq); 1453 kq->kq_count++; 1454 kn->kn_status |= KN_QUEUED; 1455 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1456 } 1457 knote_release(kn); 1458 } 1459 kqueue_check(kq); 1460 1461 kevp++; 1462 nkev++; 1463 scan->kqs_nevent++; 1464 } 1465 TAILQ_REMOVE(&kq->kq_head, &scan->kqs_start, kn_tqe); 1466 mtx_leave(&kq->kq_lock); 1467 if (scan->kqs_nevent == 0) 1468 goto retry; 1469 done: 1470 *errorp = error; 1471 return (nkev); 1472 } 1473 1474 void 1475 kqueue_scan_setup(struct kqueue_scan_state *scan, struct kqueue *kq) 1476 { 1477 memset(scan, 0, sizeof(*scan)); 1478 1479 KQREF(kq); 1480 scan->kqs_kq = kq; 1481 scan->kqs_start.kn_filter = EVFILT_MARKER; 1482 scan->kqs_start.kn_status = KN_PROCESSING; 1483 scan->kqs_end.kn_filter = EVFILT_MARKER; 1484 scan->kqs_end.kn_status = KN_PROCESSING; 1485 } 1486 1487 void 1488 kqueue_scan_finish(struct kqueue_scan_state *scan) 1489 { 1490 struct kqueue *kq = scan->kqs_kq; 1491 1492 KASSERT(scan->kqs_start.kn_filter == EVFILT_MARKER); 1493 KASSERT(scan->kqs_start.kn_status == KN_PROCESSING); 1494 KASSERT(scan->kqs_end.kn_filter == EVFILT_MARKER); 1495 KASSERT(scan->kqs_end.kn_status == KN_PROCESSING); 1496 1497 if (scan->kqs_queued) { 1498 scan->kqs_queued = 0; 1499 mtx_enter(&kq->kq_lock); 1500 TAILQ_REMOVE(&kq->kq_head, &scan->kqs_end, kn_tqe); 1501 mtx_leave(&kq->kq_lock); 1502 } 1503 KQRELE(kq); 1504 } 1505 1506 /* 1507 * XXX 1508 * This could be expanded to call kqueue_scan, if desired. 1509 */ 1510 int 1511 kqueue_read(struct file *fp, struct uio *uio, int fflags) 1512 { 1513 return (ENXIO); 1514 } 1515 1516 int 1517 kqueue_write(struct file *fp, struct uio *uio, int fflags) 1518 { 1519 return (ENXIO); 1520 } 1521 1522 int 1523 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p) 1524 { 1525 return (ENOTTY); 1526 } 1527 1528 int 1529 kqueue_poll(struct file *fp, int events, struct proc *p) 1530 { 1531 struct kqueue *kq = (struct kqueue *)fp->f_data; 1532 int revents = 0; 1533 1534 if (events & (POLLIN | POLLRDNORM)) { 1535 mtx_enter(&kq->kq_lock); 1536 if (kq->kq_count) { 1537 revents |= events & (POLLIN | POLLRDNORM); 1538 } else { 1539 selrecord(p, &kq->kq_sel); 1540 kq->kq_state |= KQ_SEL; 1541 } 1542 mtx_leave(&kq->kq_lock); 1543 } 1544 return (revents); 1545 } 1546 1547 int 1548 kqueue_stat(struct file *fp, struct stat *st, struct proc *p) 1549 { 1550 struct kqueue *kq = fp->f_data; 1551 1552 memset(st, 0, sizeof(*st)); 1553 st->st_size = kq->kq_count; /* unlocked read */ 1554 st->st_blksize = sizeof(struct kevent); 1555 st->st_mode = S_IFIFO; 1556 return (0); 1557 } 1558 1559 void 1560 kqueue_purge(struct proc *p, struct kqueue *kq) 1561 { 1562 int i; 1563 1564 mtx_enter(&kq->kq_lock); 1565 for (i = 0; i < kq->kq_knlistsize; i++) 1566 knote_remove(p, kq, &kq->kq_knlist, i, 1); 1567 if (kq->kq_knhashmask != 0) { 1568 for (i = 0; i < kq->kq_knhashmask + 1; i++) 1569 knote_remove(p, kq, &kq->kq_knhash, i, 1); 1570 } 1571 mtx_leave(&kq->kq_lock); 1572 } 1573 1574 void 1575 kqueue_terminate(struct proc *p, struct kqueue *kq) 1576 { 1577 struct knote *kn; 1578 1579 mtx_enter(&kq->kq_lock); 1580 1581 /* 1582 * Any remaining entries should be scan markers. 1583 * They are removed when the ongoing scans finish. 1584 */ 1585 KASSERT(kq->kq_count == 0); 1586 TAILQ_FOREACH(kn, &kq->kq_head, kn_tqe) 1587 KASSERT(kn->kn_filter == EVFILT_MARKER); 1588 1589 kq->kq_state |= KQ_DYING; 1590 kqueue_wakeup(kq); 1591 mtx_leave(&kq->kq_lock); 1592 1593 KASSERT(klist_empty(&kq->kq_sel.si_note)); 1594 task_del(systqmp, &kq->kq_task); 1595 } 1596 1597 int 1598 kqueue_close(struct file *fp, struct proc *p) 1599 { 1600 struct kqueue *kq = fp->f_data; 1601 1602 fp->f_data = NULL; 1603 1604 kqueue_purge(p, kq); 1605 kqueue_terminate(p, kq); 1606 1607 KQRELE(kq); 1608 1609 return (0); 1610 } 1611 1612 static void 1613 kqueue_task(void *arg) 1614 { 1615 struct kqueue *kq = arg; 1616 1617 mtx_enter(&kqueue_klist_lock); 1618 KNOTE(&kq->kq_sel.si_note, 0); 1619 mtx_leave(&kqueue_klist_lock); 1620 KQRELE(kq); 1621 } 1622 1623 void 1624 kqueue_wakeup(struct kqueue *kq) 1625 { 1626 MUTEX_ASSERT_LOCKED(&kq->kq_lock); 1627 1628 if (kq->kq_state & KQ_SLEEP) { 1629 kq->kq_state &= ~KQ_SLEEP; 1630 wakeup(kq); 1631 } 1632 if (!klist_empty(&kq->kq_sel.si_note)) { 1633 /* Defer activation to avoid recursion. */ 1634 KQREF(kq); 1635 if (!task_add(systqmp, &kq->kq_task)) 1636 KQRELE(kq); 1637 } 1638 } 1639 1640 static void 1641 kqueue_expand_hash(struct kqueue *kq) 1642 { 1643 struct knlist *hash; 1644 u_long hashmask; 1645 1646 MUTEX_ASSERT_LOCKED(&kq->kq_lock); 1647 1648 if (kq->kq_knhashmask == 0) { 1649 mtx_leave(&kq->kq_lock); 1650 hash = hashinit(KN_HASHSIZE, M_KEVENT, M_WAITOK, &hashmask); 1651 mtx_enter(&kq->kq_lock); 1652 if (kq->kq_knhashmask == 0) { 1653 kq->kq_knhash = hash; 1654 kq->kq_knhashmask = hashmask; 1655 } else { 1656 /* Another thread has allocated the hash. */ 1657 mtx_leave(&kq->kq_lock); 1658 hashfree(hash, KN_HASHSIZE, M_KEVENT); 1659 mtx_enter(&kq->kq_lock); 1660 } 1661 } 1662 } 1663 1664 static void 1665 kqueue_expand_list(struct kqueue *kq, int fd) 1666 { 1667 struct knlist *list, *olist; 1668 int size, osize; 1669 1670 MUTEX_ASSERT_LOCKED(&kq->kq_lock); 1671 1672 if (kq->kq_knlistsize <= fd) { 1673 size = kq->kq_knlistsize; 1674 mtx_leave(&kq->kq_lock); 1675 while (size <= fd) 1676 size += KQEXTENT; 1677 list = mallocarray(size, sizeof(*list), M_KEVENT, M_WAITOK); 1678 mtx_enter(&kq->kq_lock); 1679 if (kq->kq_knlistsize <= fd) { 1680 memcpy(list, kq->kq_knlist, 1681 kq->kq_knlistsize * sizeof(*list)); 1682 memset(&list[kq->kq_knlistsize], 0, 1683 (size - kq->kq_knlistsize) * sizeof(*list)); 1684 olist = kq->kq_knlist; 1685 osize = kq->kq_knlistsize; 1686 kq->kq_knlist = list; 1687 kq->kq_knlistsize = size; 1688 mtx_leave(&kq->kq_lock); 1689 free(olist, M_KEVENT, osize * sizeof(*list)); 1690 mtx_enter(&kq->kq_lock); 1691 } else { 1692 /* Another thread has expanded the list. */ 1693 mtx_leave(&kq->kq_lock); 1694 free(list, M_KEVENT, size * sizeof(*list)); 1695 mtx_enter(&kq->kq_lock); 1696 } 1697 } 1698 } 1699 1700 /* 1701 * Acquire a knote, return non-zero on success, 0 on failure. 1702 * 1703 * If we cannot acquire the knote we sleep and return 0. The knote 1704 * may be stale on return in this case and the caller must restart 1705 * whatever loop they are in. 1706 * 1707 * If we are about to sleep and klist is non-NULL, the list is unlocked 1708 * before sleep and remains unlocked on return. 1709 */ 1710 int 1711 knote_acquire(struct knote *kn, struct klist *klist, int ls) 1712 { 1713 struct kqueue *kq = kn->kn_kq; 1714 1715 MUTEX_ASSERT_LOCKED(&kq->kq_lock); 1716 KASSERT(kn->kn_filter != EVFILT_MARKER); 1717 1718 if (kn->kn_status & KN_PROCESSING) { 1719 kn->kn_status |= KN_WAITING; 1720 if (klist != NULL) { 1721 mtx_leave(&kq->kq_lock); 1722 klist_unlock(klist, ls); 1723 /* XXX Timeout resolves potential loss of wakeup. */ 1724 tsleep_nsec(kn, 0, "kqepts", SEC_TO_NSEC(1)); 1725 } else { 1726 msleep_nsec(kn, &kq->kq_lock, PNORELOCK, "kqepts", 1727 SEC_TO_NSEC(1)); 1728 } 1729 /* knote may be stale now */ 1730 return (0); 1731 } 1732 kn->kn_status |= KN_PROCESSING; 1733 return (1); 1734 } 1735 1736 /* 1737 * Release an acquired knote, clearing KN_PROCESSING. 1738 */ 1739 void 1740 knote_release(struct knote *kn) 1741 { 1742 MUTEX_ASSERT_LOCKED(&kn->kn_kq->kq_lock); 1743 KASSERT(kn->kn_filter != EVFILT_MARKER); 1744 KASSERT(kn->kn_status & KN_PROCESSING); 1745 1746 if (kn->kn_status & KN_WAITING) { 1747 kn->kn_status &= ~KN_WAITING; 1748 wakeup(kn); 1749 } 1750 kn->kn_status &= ~KN_PROCESSING; 1751 /* kn should not be accessed anymore */ 1752 } 1753 1754 /* 1755 * activate one knote. 1756 */ 1757 void 1758 knote_activate(struct knote *kn) 1759 { 1760 MUTEX_ASSERT_LOCKED(&kn->kn_kq->kq_lock); 1761 1762 kn->kn_status |= KN_ACTIVE; 1763 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) 1764 knote_enqueue(kn); 1765 } 1766 1767 /* 1768 * walk down a list of knotes, activating them if their event has triggered. 1769 */ 1770 void 1771 knote(struct klist *list, long hint) 1772 { 1773 struct knote *kn, *kn0; 1774 struct kqueue *kq; 1775 1776 KLIST_ASSERT_LOCKED(list); 1777 1778 SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, kn0) { 1779 if (filter_event(kn, hint)) { 1780 kq = kn->kn_kq; 1781 mtx_enter(&kq->kq_lock); 1782 knote_activate(kn); 1783 mtx_leave(&kq->kq_lock); 1784 } 1785 } 1786 } 1787 1788 /* 1789 * remove all knotes from a specified knlist 1790 */ 1791 void 1792 knote_remove(struct proc *p, struct kqueue *kq, struct knlist **plist, int idx, 1793 int purge) 1794 { 1795 struct knote *kn; 1796 1797 MUTEX_ASSERT_LOCKED(&kq->kq_lock); 1798 1799 /* Always fetch array pointer as another thread can resize kq_knlist. */ 1800 while ((kn = SLIST_FIRST(*plist + idx)) != NULL) { 1801 KASSERT(kn->kn_kq == kq); 1802 1803 if (!purge) { 1804 /* Skip pending badfd knotes. */ 1805 while (kn->kn_fop == &badfd_filtops) { 1806 kn = SLIST_NEXT(kn, kn_link); 1807 if (kn == NULL) 1808 return; 1809 KASSERT(kn->kn_kq == kq); 1810 } 1811 } 1812 1813 if (!knote_acquire(kn, NULL, 0)) { 1814 /* knote_acquire() has released kq_lock. */ 1815 mtx_enter(&kq->kq_lock); 1816 continue; 1817 } 1818 mtx_leave(&kq->kq_lock); 1819 filter_detach(kn); 1820 1821 /* 1822 * Notify poll(2) and select(2) when a monitored 1823 * file descriptor is closed. 1824 * 1825 * This reuses the original knote for delivering the 1826 * notification so as to avoid allocating memory. 1827 */ 1828 if (!purge && (kn->kn_flags & (__EV_POLL | __EV_SELECT)) && 1829 !(p->p_kq == kq && 1830 p->p_kq_serial > (unsigned long)kn->kn_udata) && 1831 kn->kn_fop != &badfd_filtops) { 1832 KASSERT(kn->kn_fop->f_flags & FILTEROP_ISFD); 1833 FRELE(kn->kn_fp, p); 1834 kn->kn_fp = NULL; 1835 1836 kn->kn_fop = &badfd_filtops; 1837 filter_event(kn, 0); 1838 mtx_enter(&kq->kq_lock); 1839 knote_activate(kn); 1840 knote_release(kn); 1841 continue; 1842 } 1843 1844 knote_drop(kn, p); 1845 mtx_enter(&kq->kq_lock); 1846 } 1847 } 1848 1849 /* 1850 * remove all knotes referencing a specified fd 1851 */ 1852 void 1853 knote_fdclose(struct proc *p, int fd) 1854 { 1855 struct filedesc *fdp = p->p_p->ps_fd; 1856 struct kqueue *kq; 1857 1858 /* 1859 * fdplock can be ignored if the file descriptor table is being freed 1860 * because no other thread can access the fdp. 1861 */ 1862 if (fdp->fd_refcnt != 0) 1863 fdpassertlocked(fdp); 1864 1865 LIST_FOREACH(kq, &fdp->fd_kqlist, kq_next) { 1866 mtx_enter(&kq->kq_lock); 1867 if (fd < kq->kq_knlistsize) 1868 knote_remove(p, kq, &kq->kq_knlist, fd, 0); 1869 mtx_leave(&kq->kq_lock); 1870 } 1871 } 1872 1873 /* 1874 * handle a process exiting, including the triggering of NOTE_EXIT notes 1875 * XXX this could be more efficient, doing a single pass down the klist 1876 */ 1877 void 1878 knote_processexit(struct process *pr) 1879 { 1880 KERNEL_ASSERT_LOCKED(); 1881 1882 KNOTE(&pr->ps_klist, NOTE_EXIT); 1883 1884 /* remove other knotes hanging off the process */ 1885 klist_invalidate(&pr->ps_klist); 1886 } 1887 1888 void 1889 knote_attach(struct knote *kn) 1890 { 1891 struct kqueue *kq = kn->kn_kq; 1892 struct knlist *list; 1893 1894 MUTEX_ASSERT_LOCKED(&kq->kq_lock); 1895 KASSERT(kn->kn_status & KN_PROCESSING); 1896 1897 if (kn->kn_fop->f_flags & FILTEROP_ISFD) { 1898 KASSERT(kq->kq_knlistsize > kn->kn_id); 1899 list = &kq->kq_knlist[kn->kn_id]; 1900 } else { 1901 KASSERT(kq->kq_knhashmask != 0); 1902 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 1903 } 1904 SLIST_INSERT_HEAD(list, kn, kn_link); 1905 kq->kq_nknotes++; 1906 } 1907 1908 void 1909 knote_detach(struct knote *kn) 1910 { 1911 struct kqueue *kq = kn->kn_kq; 1912 struct knlist *list; 1913 1914 MUTEX_ASSERT_LOCKED(&kq->kq_lock); 1915 KASSERT(kn->kn_status & KN_PROCESSING); 1916 1917 kq->kq_nknotes--; 1918 if (kn->kn_fop->f_flags & FILTEROP_ISFD) 1919 list = &kq->kq_knlist[kn->kn_id]; 1920 else 1921 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 1922 SLIST_REMOVE(list, kn, knote, kn_link); 1923 } 1924 1925 /* 1926 * should be called at spl == 0, since we don't want to hold spl 1927 * while calling FRELE and pool_put. 1928 */ 1929 void 1930 knote_drop(struct knote *kn, struct proc *p) 1931 { 1932 struct kqueue *kq = kn->kn_kq; 1933 1934 KASSERT(kn->kn_filter != EVFILT_MARKER); 1935 1936 mtx_enter(&kq->kq_lock); 1937 knote_detach(kn); 1938 if (kn->kn_status & KN_QUEUED) 1939 knote_dequeue(kn); 1940 if (kn->kn_status & KN_WAITING) { 1941 kn->kn_status &= ~KN_WAITING; 1942 wakeup(kn); 1943 } 1944 mtx_leave(&kq->kq_lock); 1945 1946 if ((kn->kn_fop->f_flags & FILTEROP_ISFD) && kn->kn_fp != NULL) 1947 FRELE(kn->kn_fp, p); 1948 pool_put(&knote_pool, kn); 1949 } 1950 1951 1952 void 1953 knote_enqueue(struct knote *kn) 1954 { 1955 struct kqueue *kq = kn->kn_kq; 1956 1957 MUTEX_ASSERT_LOCKED(&kq->kq_lock); 1958 KASSERT(kn->kn_filter != EVFILT_MARKER); 1959 KASSERT((kn->kn_status & KN_QUEUED) == 0); 1960 1961 kqueue_check(kq); 1962 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1963 kn->kn_status |= KN_QUEUED; 1964 kq->kq_count++; 1965 kqueue_check(kq); 1966 kqueue_wakeup(kq); 1967 } 1968 1969 void 1970 knote_dequeue(struct knote *kn) 1971 { 1972 struct kqueue *kq = kn->kn_kq; 1973 1974 MUTEX_ASSERT_LOCKED(&kq->kq_lock); 1975 KASSERT(kn->kn_filter != EVFILT_MARKER); 1976 KASSERT(kn->kn_status & KN_QUEUED); 1977 1978 kqueue_check(kq); 1979 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1980 kn->kn_status &= ~KN_QUEUED; 1981 kq->kq_count--; 1982 kqueue_check(kq); 1983 } 1984 1985 /* 1986 * Assign parameters to the knote. 1987 * 1988 * The knote's object lock must be held. 1989 */ 1990 void 1991 knote_assign(const struct kevent *kev, struct knote *kn) 1992 { 1993 if ((kn->kn_fop->f_flags & FILTEROP_MPSAFE) == 0) 1994 KERNEL_ASSERT_LOCKED(); 1995 1996 kn->kn_sfflags = kev->fflags; 1997 kn->kn_sdata = kev->data; 1998 kn->kn_udata = kev->udata; 1999 } 2000 2001 /* 2002 * Submit the knote's event for delivery. 2003 * 2004 * The knote's object lock must be held. 2005 */ 2006 void 2007 knote_submit(struct knote *kn, struct kevent *kev) 2008 { 2009 if ((kn->kn_fop->f_flags & FILTEROP_MPSAFE) == 0) 2010 KERNEL_ASSERT_LOCKED(); 2011 2012 if (kev != NULL) { 2013 *kev = kn->kn_kevent; 2014 if (kn->kn_flags & EV_CLEAR) { 2015 kn->kn_fflags = 0; 2016 kn->kn_data = 0; 2017 } 2018 } 2019 } 2020 2021 void 2022 klist_init(struct klist *klist, const struct klistops *ops, void *arg) 2023 { 2024 SLIST_INIT(&klist->kl_list); 2025 klist->kl_ops = ops; 2026 klist->kl_arg = arg; 2027 } 2028 2029 void 2030 klist_free(struct klist *klist) 2031 { 2032 KASSERT(SLIST_EMPTY(&klist->kl_list)); 2033 } 2034 2035 void 2036 klist_insert(struct klist *klist, struct knote *kn) 2037 { 2038 int ls; 2039 2040 ls = klist_lock(klist); 2041 SLIST_INSERT_HEAD(&klist->kl_list, kn, kn_selnext); 2042 klist_unlock(klist, ls); 2043 } 2044 2045 void 2046 klist_insert_locked(struct klist *klist, struct knote *kn) 2047 { 2048 KLIST_ASSERT_LOCKED(klist); 2049 2050 SLIST_INSERT_HEAD(&klist->kl_list, kn, kn_selnext); 2051 } 2052 2053 void 2054 klist_remove(struct klist *klist, struct knote *kn) 2055 { 2056 int ls; 2057 2058 ls = klist_lock(klist); 2059 SLIST_REMOVE(&klist->kl_list, kn, knote, kn_selnext); 2060 klist_unlock(klist, ls); 2061 } 2062 2063 void 2064 klist_remove_locked(struct klist *klist, struct knote *kn) 2065 { 2066 KLIST_ASSERT_LOCKED(klist); 2067 2068 SLIST_REMOVE(&klist->kl_list, kn, knote, kn_selnext); 2069 } 2070 2071 /* 2072 * Detach all knotes from klist. The knotes are rewired to indicate EOF. 2073 * 2074 * The caller of this function must not hold any locks that can block 2075 * filterops callbacks that run with KN_PROCESSING. 2076 * Otherwise this function might deadlock. 2077 */ 2078 void 2079 klist_invalidate(struct klist *list) 2080 { 2081 struct knote *kn; 2082 struct kqueue *kq; 2083 struct proc *p = curproc; 2084 int ls; 2085 2086 NET_ASSERT_UNLOCKED(); 2087 2088 ls = klist_lock(list); 2089 while ((kn = SLIST_FIRST(&list->kl_list)) != NULL) { 2090 kq = kn->kn_kq; 2091 mtx_enter(&kq->kq_lock); 2092 if (!knote_acquire(kn, list, ls)) { 2093 /* knote_acquire() has released kq_lock 2094 * and klist lock. */ 2095 ls = klist_lock(list); 2096 continue; 2097 } 2098 mtx_leave(&kq->kq_lock); 2099 klist_unlock(list, ls); 2100 filter_detach(kn); 2101 if (kn->kn_fop->f_flags & FILTEROP_ISFD) { 2102 kn->kn_fop = &dead_filtops; 2103 filter_event(kn, 0); 2104 mtx_enter(&kq->kq_lock); 2105 knote_activate(kn); 2106 knote_release(kn); 2107 mtx_leave(&kq->kq_lock); 2108 } else { 2109 knote_drop(kn, p); 2110 } 2111 ls = klist_lock(list); 2112 } 2113 klist_unlock(list, ls); 2114 } 2115 2116 static int 2117 klist_lock(struct klist *list) 2118 { 2119 int ls = 0; 2120 2121 if (list->kl_ops != NULL) { 2122 ls = list->kl_ops->klo_lock(list->kl_arg); 2123 } else { 2124 KERNEL_LOCK(); 2125 ls = splhigh(); 2126 } 2127 return ls; 2128 } 2129 2130 static void 2131 klist_unlock(struct klist *list, int ls) 2132 { 2133 if (list->kl_ops != NULL) { 2134 list->kl_ops->klo_unlock(list->kl_arg, ls); 2135 } else { 2136 splx(ls); 2137 KERNEL_UNLOCK(); 2138 } 2139 } 2140 2141 static void 2142 klist_mutex_assertlk(void *arg) 2143 { 2144 struct mutex *mtx = arg; 2145 2146 (void)mtx; 2147 2148 MUTEX_ASSERT_LOCKED(mtx); 2149 } 2150 2151 static int 2152 klist_mutex_lock(void *arg) 2153 { 2154 struct mutex *mtx = arg; 2155 2156 mtx_enter(mtx); 2157 return 0; 2158 } 2159 2160 static void 2161 klist_mutex_unlock(void *arg, int s) 2162 { 2163 struct mutex *mtx = arg; 2164 2165 mtx_leave(mtx); 2166 } 2167 2168 static const struct klistops mutex_klistops = { 2169 .klo_assertlk = klist_mutex_assertlk, 2170 .klo_lock = klist_mutex_lock, 2171 .klo_unlock = klist_mutex_unlock, 2172 }; 2173 2174 void 2175 klist_init_mutex(struct klist *klist, struct mutex *mtx) 2176 { 2177 klist_init(klist, &mutex_klistops, mtx); 2178 } 2179 2180 static void 2181 klist_rwlock_assertlk(void *arg) 2182 { 2183 struct rwlock *rwl = arg; 2184 2185 (void)rwl; 2186 2187 rw_assert_wrlock(rwl); 2188 } 2189 2190 static int 2191 klist_rwlock_lock(void *arg) 2192 { 2193 struct rwlock *rwl = arg; 2194 2195 rw_enter_write(rwl); 2196 return 0; 2197 } 2198 2199 static void 2200 klist_rwlock_unlock(void *arg, int s) 2201 { 2202 struct rwlock *rwl = arg; 2203 2204 rw_exit_write(rwl); 2205 } 2206 2207 static const struct klistops rwlock_klistops = { 2208 .klo_assertlk = klist_rwlock_assertlk, 2209 .klo_lock = klist_rwlock_lock, 2210 .klo_unlock = klist_rwlock_unlock, 2211 }; 2212 2213 void 2214 klist_init_rwlock(struct klist *klist, struct rwlock *rwl) 2215 { 2216 klist_init(klist, &rwlock_klistops, rwl); 2217 } 2218