xref: /openbsd-src/sys/kern/kern_event.c (revision 1a8dbaac879b9f3335ad7fb25429ce63ac1d6bac)
1 /*	$OpenBSD: kern_event.c,v 1.143 2020/10/11 07:11:59 mpi Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD: src/sys/kern/kern_event.c,v 1.22 2001/02/23 20:32:42 jlemon Exp $
29  */
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/atomic.h>
34 #include <sys/kernel.h>
35 #include <sys/proc.h>
36 #include <sys/pledge.h>
37 #include <sys/malloc.h>
38 #include <sys/unistd.h>
39 #include <sys/file.h>
40 #include <sys/filedesc.h>
41 #include <sys/fcntl.h>
42 #include <sys/selinfo.h>
43 #include <sys/queue.h>
44 #include <sys/event.h>
45 #include <sys/eventvar.h>
46 #include <sys/ktrace.h>
47 #include <sys/pool.h>
48 #include <sys/protosw.h>
49 #include <sys/socket.h>
50 #include <sys/socketvar.h>
51 #include <sys/stat.h>
52 #include <sys/uio.h>
53 #include <sys/mount.h>
54 #include <sys/poll.h>
55 #include <sys/syscallargs.h>
56 #include <sys/time.h>
57 #include <sys/timeout.h>
58 #include <sys/wait.h>
59 
60 void	kqueue_terminate(struct proc *p, struct kqueue *);
61 void	kqueue_free(struct kqueue *);
62 void	kqueue_init(void);
63 void	KQREF(struct kqueue *);
64 void	KQRELE(struct kqueue *);
65 
66 int	kqueue_sleep(struct kqueue *, struct timespec *);
67 
68 int	kqueue_read(struct file *, struct uio *, int);
69 int	kqueue_write(struct file *, struct uio *, int);
70 int	kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
71 		    struct proc *p);
72 int	kqueue_poll(struct file *fp, int events, struct proc *p);
73 int	kqueue_kqfilter(struct file *fp, struct knote *kn);
74 int	kqueue_stat(struct file *fp, struct stat *st, struct proc *p);
75 int	kqueue_close(struct file *fp, struct proc *p);
76 void	kqueue_wakeup(struct kqueue *kq);
77 
78 static void	kqueue_expand_hash(struct kqueue *kq);
79 static void	kqueue_expand_list(struct kqueue *kq, int fd);
80 static void	kqueue_task(void *);
81 
82 const struct fileops kqueueops = {
83 	.fo_read	= kqueue_read,
84 	.fo_write	= kqueue_write,
85 	.fo_ioctl	= kqueue_ioctl,
86 	.fo_poll	= kqueue_poll,
87 	.fo_kqfilter	= kqueue_kqfilter,
88 	.fo_stat	= kqueue_stat,
89 	.fo_close	= kqueue_close
90 };
91 
92 void	knote_attach(struct knote *kn);
93 void	knote_drop(struct knote *kn, struct proc *p);
94 void	knote_enqueue(struct knote *kn);
95 void	knote_dequeue(struct knote *kn);
96 int	knote_acquire(struct knote *kn);
97 void	knote_release(struct knote *kn);
98 
99 void	filt_kqdetach(struct knote *kn);
100 int	filt_kqueue(struct knote *kn, long hint);
101 int	filt_procattach(struct knote *kn);
102 void	filt_procdetach(struct knote *kn);
103 int	filt_proc(struct knote *kn, long hint);
104 int	filt_fileattach(struct knote *kn);
105 void	filt_timerexpire(void *knx);
106 int	filt_timerattach(struct knote *kn);
107 void	filt_timerdetach(struct knote *kn);
108 int	filt_timer(struct knote *kn, long hint);
109 void	filt_seltruedetach(struct knote *kn);
110 
111 const struct filterops kqread_filtops = {
112 	.f_flags	= FILTEROP_ISFD,
113 	.f_attach	= NULL,
114 	.f_detach	= filt_kqdetach,
115 	.f_event	= filt_kqueue,
116 };
117 
118 const struct filterops proc_filtops = {
119 	.f_flags	= 0,
120 	.f_attach	= filt_procattach,
121 	.f_detach	= filt_procdetach,
122 	.f_event	= filt_proc,
123 };
124 
125 const struct filterops file_filtops = {
126 	.f_flags	= FILTEROP_ISFD,
127 	.f_attach	= filt_fileattach,
128 	.f_detach	= NULL,
129 	.f_event	= NULL,
130 };
131 
132 const struct filterops timer_filtops = {
133 	.f_flags	= 0,
134 	.f_attach	= filt_timerattach,
135 	.f_detach	= filt_timerdetach,
136 	.f_event	= filt_timer,
137 };
138 
139 struct	pool knote_pool;
140 struct	pool kqueue_pool;
141 int kq_ntimeouts = 0;
142 int kq_timeoutmax = (4 * 1024);
143 
144 #define KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
145 
146 /*
147  * Table for for all system-defined filters.
148  */
149 const struct filterops *const sysfilt_ops[] = {
150 	&file_filtops,			/* EVFILT_READ */
151 	&file_filtops,			/* EVFILT_WRITE */
152 	NULL, /*&aio_filtops,*/		/* EVFILT_AIO */
153 	&file_filtops,			/* EVFILT_VNODE */
154 	&proc_filtops,			/* EVFILT_PROC */
155 	&sig_filtops,			/* EVFILT_SIGNAL */
156 	&timer_filtops,			/* EVFILT_TIMER */
157 	&file_filtops,			/* EVFILT_DEVICE */
158 	&file_filtops,			/* EVFILT_EXCEPT */
159 };
160 
161 void
162 KQREF(struct kqueue *kq)
163 {
164 	atomic_inc_int(&kq->kq_refs);
165 }
166 
167 void
168 KQRELE(struct kqueue *kq)
169 {
170 	struct filedesc *fdp;
171 
172 	if (atomic_dec_int_nv(&kq->kq_refs) > 0)
173 		return;
174 
175 	fdp = kq->kq_fdp;
176 	if (rw_status(&fdp->fd_lock) == RW_WRITE) {
177 		LIST_REMOVE(kq, kq_next);
178 	} else {
179 		fdplock(fdp);
180 		LIST_REMOVE(kq, kq_next);
181 		fdpunlock(fdp);
182 	}
183 
184 	kqueue_free(kq);
185 }
186 
187 void
188 kqueue_free(struct kqueue *kq)
189 {
190 	free(kq->kq_knlist, M_KEVENT, kq->kq_knlistsize *
191 	    sizeof(struct knlist));
192 	hashfree(kq->kq_knhash, KN_HASHSIZE, M_KEVENT);
193 	pool_put(&kqueue_pool, kq);
194 }
195 
196 void
197 kqueue_init(void)
198 {
199 	pool_init(&kqueue_pool, sizeof(struct kqueue), 0, IPL_MPFLOOR,
200 	    PR_WAITOK, "kqueuepl", NULL);
201 	pool_init(&knote_pool, sizeof(struct knote), 0, IPL_MPFLOOR,
202 	    PR_WAITOK, "knotepl", NULL);
203 }
204 
205 int
206 filt_fileattach(struct knote *kn)
207 {
208 	struct file *fp = kn->kn_fp;
209 
210 	return fp->f_ops->fo_kqfilter(fp, kn);
211 }
212 
213 int
214 kqueue_kqfilter(struct file *fp, struct knote *kn)
215 {
216 	struct kqueue *kq = kn->kn_fp->f_data;
217 
218 	if (kn->kn_filter != EVFILT_READ)
219 		return (EINVAL);
220 
221 	kn->kn_fop = &kqread_filtops;
222 	klist_insert(&kq->kq_sel.si_note, kn);
223 	return (0);
224 }
225 
226 void
227 filt_kqdetach(struct knote *kn)
228 {
229 	struct kqueue *kq = kn->kn_fp->f_data;
230 
231 	klist_remove(&kq->kq_sel.si_note, kn);
232 }
233 
234 int
235 filt_kqueue(struct knote *kn, long hint)
236 {
237 	struct kqueue *kq = kn->kn_fp->f_data;
238 
239 	kn->kn_data = kq->kq_count;
240 	return (kn->kn_data > 0);
241 }
242 
243 int
244 filt_procattach(struct knote *kn)
245 {
246 	struct process *pr;
247 	int s;
248 
249 	if ((curproc->p_p->ps_flags & PS_PLEDGE) &&
250 	    (curproc->p_p->ps_pledge & PLEDGE_PROC) == 0)
251 		return pledge_fail(curproc, EPERM, PLEDGE_PROC);
252 
253 	if (kn->kn_id > PID_MAX)
254 		return ESRCH;
255 
256 	pr = prfind(kn->kn_id);
257 	if (pr == NULL)
258 		return (ESRCH);
259 
260 	/* exiting processes can't be specified */
261 	if (pr->ps_flags & PS_EXITING)
262 		return (ESRCH);
263 
264 	kn->kn_ptr.p_process = pr;
265 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
266 
267 	/*
268 	 * internal flag indicating registration done by kernel
269 	 */
270 	if (kn->kn_flags & EV_FLAG1) {
271 		kn->kn_data = kn->kn_sdata;		/* ppid */
272 		kn->kn_fflags = NOTE_CHILD;
273 		kn->kn_flags &= ~EV_FLAG1;
274 	}
275 
276 	s = splhigh();
277 	klist_insert(&pr->ps_klist, kn);
278 	splx(s);
279 
280 	return (0);
281 }
282 
283 /*
284  * The knote may be attached to a different process, which may exit,
285  * leaving nothing for the knote to be attached to.  So when the process
286  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
287  * it will be deleted when read out.  However, as part of the knote deletion,
288  * this routine is called, so a check is needed to avoid actually performing
289  * a detach, because the original process does not exist any more.
290  */
291 void
292 filt_procdetach(struct knote *kn)
293 {
294 	struct process *pr = kn->kn_ptr.p_process;
295 	int s;
296 
297 	if (kn->kn_status & KN_DETACHED)
298 		return;
299 
300 	s = splhigh();
301 	klist_remove(&pr->ps_klist, kn);
302 	splx(s);
303 }
304 
305 int
306 filt_proc(struct knote *kn, long hint)
307 {
308 	u_int event;
309 
310 	/*
311 	 * mask off extra data
312 	 */
313 	event = (u_int)hint & NOTE_PCTRLMASK;
314 
315 	/*
316 	 * if the user is interested in this event, record it.
317 	 */
318 	if (kn->kn_sfflags & event)
319 		kn->kn_fflags |= event;
320 
321 	/*
322 	 * process is gone, so flag the event as finished and remove it
323 	 * from the process's klist
324 	 */
325 	if (event == NOTE_EXIT) {
326 		struct process *pr = kn->kn_ptr.p_process;
327 		int s;
328 
329 		s = splhigh();
330 		kn->kn_status |= KN_DETACHED;
331 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
332 		kn->kn_data = W_EXITCODE(pr->ps_xexit, pr->ps_xsig);
333 		klist_remove(&pr->ps_klist, kn);
334 		splx(s);
335 		return (1);
336 	}
337 
338 	/*
339 	 * process forked, and user wants to track the new process,
340 	 * so attach a new knote to it, and immediately report an
341 	 * event with the parent's pid.
342 	 */
343 	if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
344 		struct kevent kev;
345 		int error;
346 
347 		/*
348 		 * register knote with new process.
349 		 */
350 		memset(&kev, 0, sizeof(kev));
351 		kev.ident = hint & NOTE_PDATAMASK;	/* pid */
352 		kev.filter = kn->kn_filter;
353 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
354 		kev.fflags = kn->kn_sfflags;
355 		kev.data = kn->kn_id;			/* parent */
356 		kev.udata = kn->kn_kevent.udata;	/* preserve udata */
357 		error = kqueue_register(kn->kn_kq, &kev, NULL);
358 		if (error)
359 			kn->kn_fflags |= NOTE_TRACKERR;
360 	}
361 
362 	return (kn->kn_fflags != 0);
363 }
364 
365 static void
366 filt_timer_timeout_add(struct knote *kn)
367 {
368 	struct timeval tv;
369 	struct timeout *to = kn->kn_hook;
370 	int tticks;
371 
372 	tv.tv_sec = kn->kn_sdata / 1000;
373 	tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
374 	tticks = tvtohz(&tv);
375 	/* Remove extra tick from tvtohz() if timeout has fired before. */
376 	if (timeout_triggered(to))
377 		tticks--;
378 	timeout_add(to, (tticks > 0) ? tticks : 1);
379 }
380 
381 void
382 filt_timerexpire(void *knx)
383 {
384 	struct knote *kn = knx;
385 
386 	kn->kn_data++;
387 	knote_activate(kn);
388 
389 	if ((kn->kn_flags & EV_ONESHOT) == 0)
390 		filt_timer_timeout_add(kn);
391 }
392 
393 
394 /*
395  * data contains amount of time to sleep, in milliseconds
396  */
397 int
398 filt_timerattach(struct knote *kn)
399 {
400 	struct timeout *to;
401 
402 	if (kq_ntimeouts > kq_timeoutmax)
403 		return (ENOMEM);
404 	kq_ntimeouts++;
405 
406 	kn->kn_flags |= EV_CLEAR;	/* automatically set */
407 	to = malloc(sizeof(*to), M_KEVENT, M_WAITOK);
408 	timeout_set(to, filt_timerexpire, kn);
409 	kn->kn_hook = to;
410 	filt_timer_timeout_add(kn);
411 
412 	return (0);
413 }
414 
415 void
416 filt_timerdetach(struct knote *kn)
417 {
418 	struct timeout *to;
419 
420 	to = (struct timeout *)kn->kn_hook;
421 	timeout_del(to);
422 	free(to, M_KEVENT, sizeof(*to));
423 	kq_ntimeouts--;
424 }
425 
426 int
427 filt_timer(struct knote *kn, long hint)
428 {
429 	return (kn->kn_data != 0);
430 }
431 
432 
433 /*
434  * filt_seltrue:
435  *
436  *	This filter "event" routine simulates seltrue().
437  */
438 int
439 filt_seltrue(struct knote *kn, long hint)
440 {
441 
442 	/*
443 	 * We don't know how much data can be read/written,
444 	 * but we know that it *can* be.  This is about as
445 	 * good as select/poll does as well.
446 	 */
447 	kn->kn_data = 0;
448 	return (1);
449 }
450 
451 /*
452  * This provides full kqfilter entry for device switch tables, which
453  * has same effect as filter using filt_seltrue() as filter method.
454  */
455 void
456 filt_seltruedetach(struct knote *kn)
457 {
458 	/* Nothing to do */
459 }
460 
461 const struct filterops seltrue_filtops = {
462 	.f_flags	= FILTEROP_ISFD,
463 	.f_attach	= NULL,
464 	.f_detach	= filt_seltruedetach,
465 	.f_event	= filt_seltrue,
466 };
467 
468 int
469 seltrue_kqfilter(dev_t dev, struct knote *kn)
470 {
471 	switch (kn->kn_filter) {
472 	case EVFILT_READ:
473 	case EVFILT_WRITE:
474 		kn->kn_fop = &seltrue_filtops;
475 		break;
476 	default:
477 		return (EINVAL);
478 	}
479 
480 	/* Nothing more to do */
481 	return (0);
482 }
483 
484 static int
485 filt_dead(struct knote *kn, long hint)
486 {
487 	kn->kn_flags |= (EV_EOF | EV_ONESHOT);
488 	if (kn->kn_flags & __EV_POLL)
489 		kn->kn_flags |= __EV_HUP;
490 	kn->kn_data = 0;
491 	return (1);
492 }
493 
494 static void
495 filt_deaddetach(struct knote *kn)
496 {
497 	/* Nothing to do */
498 }
499 
500 const struct filterops dead_filtops = {
501 	.f_flags	= FILTEROP_ISFD,
502 	.f_attach	= NULL,
503 	.f_detach	= filt_deaddetach,
504 	.f_event	= filt_dead,
505 };
506 
507 struct kqueue *
508 kqueue_alloc(struct filedesc *fdp)
509 {
510 	struct kqueue *kq;
511 
512 	kq = pool_get(&kqueue_pool, PR_WAITOK | PR_ZERO);
513 	kq->kq_refs = 1;
514 	kq->kq_fdp = fdp;
515 	TAILQ_INIT(&kq->kq_head);
516 	task_set(&kq->kq_task, kqueue_task, kq);
517 
518 	return (kq);
519 }
520 
521 int
522 sys_kqueue(struct proc *p, void *v, register_t *retval)
523 {
524 	struct filedesc *fdp = p->p_fd;
525 	struct kqueue *kq;
526 	struct file *fp;
527 	int fd, error;
528 
529 	kq = kqueue_alloc(fdp);
530 
531 	fdplock(fdp);
532 	error = falloc(p, &fp, &fd);
533 	if (error)
534 		goto out;
535 	fp->f_flag = FREAD | FWRITE;
536 	fp->f_type = DTYPE_KQUEUE;
537 	fp->f_ops = &kqueueops;
538 	fp->f_data = kq;
539 	*retval = fd;
540 	LIST_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_next);
541 	kq = NULL;
542 	fdinsert(fdp, fd, 0, fp);
543 	FRELE(fp, p);
544 out:
545 	fdpunlock(fdp);
546 	if (kq != NULL)
547 		pool_put(&kqueue_pool, kq);
548 	return (error);
549 }
550 
551 int
552 sys_kevent(struct proc *p, void *v, register_t *retval)
553 {
554 	struct kqueue_scan_state scan;
555 	struct filedesc* fdp = p->p_fd;
556 	struct sys_kevent_args /* {
557 		syscallarg(int)	fd;
558 		syscallarg(const struct kevent *) changelist;
559 		syscallarg(int)	nchanges;
560 		syscallarg(struct kevent *) eventlist;
561 		syscallarg(int)	nevents;
562 		syscallarg(const struct timespec *) timeout;
563 	} */ *uap = v;
564 	struct kevent *kevp;
565 	struct kqueue *kq;
566 	struct file *fp;
567 	struct timespec ts;
568 	struct timespec *tsp = NULL;
569 	int i, n, nerrors, error;
570 	struct kevent kev[KQ_NEVENTS];
571 
572 	if ((fp = fd_getfile(fdp, SCARG(uap, fd))) == NULL)
573 		return (EBADF);
574 
575 	if (fp->f_type != DTYPE_KQUEUE) {
576 		error = EBADF;
577 		goto done;
578 	}
579 
580 	if (SCARG(uap, timeout) != NULL) {
581 		error = copyin(SCARG(uap, timeout), &ts, sizeof(ts));
582 		if (error)
583 			goto done;
584 		if (ts.tv_sec < 0 || !timespecisvalid(&ts)) {
585 			error = EINVAL;
586 			goto done;
587 		}
588 #ifdef KTRACE
589 		if (KTRPOINT(p, KTR_STRUCT))
590 			ktrreltimespec(p, &ts);
591 #endif
592 		tsp = &ts;
593 	}
594 
595 	kq = fp->f_data;
596 	nerrors = 0;
597 
598 	while (SCARG(uap, nchanges) > 0) {
599 		n = SCARG(uap, nchanges) > KQ_NEVENTS ?
600 		    KQ_NEVENTS : SCARG(uap, nchanges);
601 		error = copyin(SCARG(uap, changelist), kev,
602 		    n * sizeof(struct kevent));
603 		if (error)
604 			goto done;
605 #ifdef KTRACE
606 		if (KTRPOINT(p, KTR_STRUCT))
607 			ktrevent(p, kev, n);
608 #endif
609 		for (i = 0; i < n; i++) {
610 			kevp = &kev[i];
611 			kevp->flags &= ~EV_SYSFLAGS;
612 			error = kqueue_register(kq, kevp, p);
613 			if (error || (kevp->flags & EV_RECEIPT)) {
614 				if (SCARG(uap, nevents) != 0) {
615 					kevp->flags = EV_ERROR;
616 					kevp->data = error;
617 					copyout(kevp, SCARG(uap, eventlist),
618 					    sizeof(*kevp));
619 					SCARG(uap, eventlist)++;
620 					SCARG(uap, nevents)--;
621 					nerrors++;
622 				} else {
623 					goto done;
624 				}
625 			}
626 		}
627 		SCARG(uap, nchanges) -= n;
628 		SCARG(uap, changelist) += n;
629 	}
630 	if (nerrors) {
631 		*retval = nerrors;
632 		error = 0;
633 		goto done;
634 	}
635 
636 	kqueue_scan_setup(&scan, kq);
637 	FRELE(fp, p);
638 	error = kqueue_scan(&scan, SCARG(uap, nevents), SCARG(uap, eventlist),
639 	    tsp, kev, p, &n);
640 	kqueue_scan_finish(&scan);
641 
642 	*retval = n;
643 	return (error);
644 
645  done:
646 	FRELE(fp, p);
647 	return (error);
648 }
649 
650 #ifdef KQUEUE_DEBUG
651 void
652 kqueue_do_check(struct kqueue *kq, const char *func, int line)
653 {
654 	struct knote *kn;
655 	int count = 0, nmarker = 0;
656 
657 	KERNEL_ASSERT_LOCKED();
658 	splassert(IPL_HIGH);
659 
660 	TAILQ_FOREACH(kn, &kq->kq_head, kn_tqe) {
661 		if (kn->kn_filter == EVFILT_MARKER) {
662 			if ((kn->kn_status & KN_QUEUED) != 0)
663 				panic("%s:%d: kq=%p kn=%p marker QUEUED",
664 				    func, line, kq, kn);
665 			nmarker++;
666 		} else {
667 			if ((kn->kn_status & KN_ACTIVE) == 0)
668 				panic("%s:%d: kq=%p kn=%p knote !ACTIVE",
669 				    func, line, kq, kn);
670 			if ((kn->kn_status & KN_QUEUED) == 0)
671 				panic("%s:%d: kq=%p kn=%p knote !QUEUED",
672 				    func, line, kq, kn);
673 			if (kn->kn_kq != kq)
674 				panic("%s:%d: kq=%p kn=%p kn_kq=%p != kq",
675 				    func, line, kq, kn, kn->kn_kq);
676 			count++;
677 			if (count > kq->kq_count)
678 				goto bad;
679 		}
680 	}
681 	if (count != kq->kq_count) {
682 bad:
683 		panic("%s:%d: kq=%p kq_count=%d count=%d nmarker=%d",
684 		    func, line, kq, kq->kq_count, count, nmarker);
685 	}
686 }
687 #define kqueue_check(kq)	kqueue_do_check((kq), __func__, __LINE__)
688 #else
689 #define kqueue_check(kq)	do {} while (0)
690 #endif
691 
692 int
693 kqueue_register(struct kqueue *kq, struct kevent *kev, struct proc *p)
694 {
695 	struct filedesc *fdp = kq->kq_fdp;
696 	const struct filterops *fops = NULL;
697 	struct file *fp = NULL;
698 	struct knote *kn = NULL, *newkn = NULL;
699 	struct knlist *list = NULL;
700 	int s, error = 0;
701 
702 	if (kev->filter < 0) {
703 		if (kev->filter + EVFILT_SYSCOUNT < 0)
704 			return (EINVAL);
705 		fops = sysfilt_ops[~kev->filter];	/* to 0-base index */
706 	}
707 
708 	if (fops == NULL) {
709 		/*
710 		 * XXX
711 		 * filter attach routine is responsible for ensuring that
712 		 * the identifier can be attached to it.
713 		 */
714 		return (EINVAL);
715 	}
716 
717 	if (fops->f_flags & FILTEROP_ISFD) {
718 		/* validate descriptor */
719 		if (kev->ident > INT_MAX)
720 			return (EBADF);
721 	}
722 
723 	if (kev->flags & EV_ADD)
724 		newkn = pool_get(&knote_pool, PR_WAITOK | PR_ZERO);
725 
726 again:
727 	if (fops->f_flags & FILTEROP_ISFD) {
728 		if ((fp = fd_getfile(fdp, kev->ident)) == NULL) {
729 			error = EBADF;
730 			goto done;
731 		}
732 		if (kev->flags & EV_ADD)
733 			kqueue_expand_list(kq, kev->ident);
734 		if (kev->ident < kq->kq_knlistsize)
735 			list = &kq->kq_knlist[kev->ident];
736 	} else {
737 		if (kev->flags & EV_ADD)
738 			kqueue_expand_hash(kq);
739 		if (kq->kq_knhashmask != 0) {
740 			list = &kq->kq_knhash[
741 			    KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
742 		}
743 	}
744 	if (list != NULL) {
745 		SLIST_FOREACH(kn, list, kn_link) {
746 			if (kev->filter == kn->kn_filter &&
747 			    kev->ident == kn->kn_id) {
748 				s = splhigh();
749 				if (!knote_acquire(kn)) {
750 					splx(s);
751 					if (fp != NULL) {
752 						FRELE(fp, p);
753 						fp = NULL;
754 					}
755 					goto again;
756 				}
757 				splx(s);
758 				break;
759 			}
760 		}
761 	}
762 	KASSERT(kn == NULL || (kn->kn_status & KN_PROCESSING) != 0);
763 
764 	if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
765 		error = ENOENT;
766 		goto done;
767 	}
768 
769 	/*
770 	 * kn now contains the matching knote, or NULL if no match.
771 	 * If adding a new knote, sleeping is not allowed until the knote
772 	 * has been inserted.
773 	 */
774 	if (kev->flags & EV_ADD) {
775 		if (kn == NULL) {
776 			kn = newkn;
777 			newkn = NULL;
778 			kn->kn_status = KN_PROCESSING;
779 			kn->kn_fp = fp;
780 			kn->kn_kq = kq;
781 			kn->kn_fop = fops;
782 
783 			/*
784 			 * apply reference count to knote structure, and
785 			 * do not release it at the end of this routine.
786 			 */
787 			fp = NULL;
788 
789 			kn->kn_sfflags = kev->fflags;
790 			kn->kn_sdata = kev->data;
791 			kev->fflags = 0;
792 			kev->data = 0;
793 			kn->kn_kevent = *kev;
794 
795 			knote_attach(kn);
796 			if ((error = fops->f_attach(kn)) != 0) {
797 				knote_drop(kn, p);
798 				goto done;
799 			}
800 
801 			/*
802 			 * If this is a file descriptor filter, check if
803 			 * fd was closed while the knote was being added.
804 			 * knote_fdclose() has missed kn if the function
805 			 * ran before kn appeared in kq_knlist.
806 			 */
807 			if ((fops->f_flags & FILTEROP_ISFD) &&
808 			    fd_checkclosed(fdp, kev->ident, kn->kn_fp)) {
809 				/*
810 				 * Drop the knote silently without error
811 				 * because another thread might already have
812 				 * seen it. This corresponds to the insert
813 				 * happening in full before the close.
814 				 */
815 				kn->kn_fop->f_detach(kn);
816 				knote_drop(kn, p);
817 				goto done;
818 			}
819 		} else {
820 			/*
821 			 * The user may change some filter values after the
822 			 * initial EV_ADD, but doing so will not reset any
823 			 * filters which have already been triggered.
824 			 */
825 			kn->kn_sfflags = kev->fflags;
826 			kn->kn_sdata = kev->data;
827 			kn->kn_kevent.udata = kev->udata;
828 		}
829 
830 		s = splhigh();
831 		if (kn->kn_fop->f_event(kn, 0))
832 			knote_activate(kn);
833 		splx(s);
834 
835 	} else if (kev->flags & EV_DELETE) {
836 		kn->kn_fop->f_detach(kn);
837 		knote_drop(kn, p);
838 		goto done;
839 	}
840 
841 	if ((kev->flags & EV_DISABLE) &&
842 	    ((kn->kn_status & KN_DISABLED) == 0)) {
843 		s = splhigh();
844 		kn->kn_status |= KN_DISABLED;
845 		splx(s);
846 	}
847 
848 	if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
849 		s = splhigh();
850 		kn->kn_status &= ~KN_DISABLED;
851 		if (kn->kn_fop->f_event(kn, 0))
852 			kn->kn_status |= KN_ACTIVE;
853 		if ((kn->kn_status & KN_ACTIVE) &&
854 		    ((kn->kn_status & KN_QUEUED) == 0))
855 			knote_enqueue(kn);
856 		splx(s);
857 	}
858 
859 	s = splhigh();
860 	knote_release(kn);
861 	splx(s);
862 done:
863 	if (fp != NULL)
864 		FRELE(fp, p);
865 	if (newkn != NULL)
866 		pool_put(&knote_pool, newkn);
867 	return (error);
868 }
869 
870 int
871 kqueue_sleep(struct kqueue *kq, struct timespec *tsp)
872 {
873 	struct timespec elapsed, start, stop;
874 	uint64_t nsecs;
875 	int error;
876 
877 	splassert(IPL_HIGH);
878 
879 	if (tsp != NULL) {
880 		getnanouptime(&start);
881 		nsecs = MIN(TIMESPEC_TO_NSEC(tsp), MAXTSLP);
882 	} else
883 		nsecs = INFSLP;
884 	error = tsleep_nsec(kq, PSOCK | PCATCH, "kqread", nsecs);
885 	if (tsp != NULL) {
886 		getnanouptime(&stop);
887 		timespecsub(&stop, &start, &elapsed);
888 		timespecsub(tsp, &elapsed, tsp);
889 		if (tsp->tv_sec < 0)
890 			timespecclear(tsp);
891 	}
892 
893 	return (error);
894 }
895 
896 int
897 kqueue_scan(struct kqueue_scan_state *scan, int maxevents,
898     struct kevent *ulistp, struct timespec *tsp, struct kevent *kev,
899     struct proc *p, int *retval)
900 {
901 	struct kqueue *kq = scan->kqs_kq;
902 	struct kevent *kevp;
903 	struct knote *kn;
904 	int s, count, nkev, error = 0;
905 
906 	nkev = 0;
907 	kevp = kev;
908 
909 	count = maxevents;
910 	if (count == 0)
911 		goto done;
912 
913 retry:
914 	KASSERT(count == maxevents);
915 	KASSERT(nkev == 0);
916 
917 	if (kq->kq_state & KQ_DYING) {
918 		error = EBADF;
919 		goto done;
920 	}
921 
922 	s = splhigh();
923 	if (kq->kq_count == 0) {
924 		if (tsp != NULL && !timespecisset(tsp)) {
925 			splx(s);
926 			error = 0;
927 			goto done;
928 		}
929 		kq->kq_state |= KQ_SLEEP;
930 		error = kqueue_sleep(kq, tsp);
931 		splx(s);
932 		if (error == 0 || error == EWOULDBLOCK)
933 			goto retry;
934 		/* don't restart after signals... */
935 		if (error == ERESTART)
936 			error = EINTR;
937 		goto done;
938 	}
939 
940 	TAILQ_INSERT_TAIL(&kq->kq_head, &scan->kqs_end, kn_tqe);
941 	TAILQ_INSERT_HEAD(&kq->kq_head, &scan->kqs_start, kn_tqe);
942 	while (count) {
943 		kn = TAILQ_NEXT(&scan->kqs_start, kn_tqe);
944 		if (kn->kn_filter == EVFILT_MARKER) {
945 			if (kn == &scan->kqs_end) {
946 				TAILQ_REMOVE(&kq->kq_head, &scan->kqs_end,
947 				    kn_tqe);
948 				TAILQ_REMOVE(&kq->kq_head, &scan->kqs_start,
949 				    kn_tqe);
950 				splx(s);
951 				if (count == maxevents)
952 					goto retry;
953 				goto done;
954 			}
955 
956 			/* Move start marker past another thread's marker. */
957 			TAILQ_REMOVE(&kq->kq_head, &scan->kqs_start, kn_tqe);
958 			TAILQ_INSERT_AFTER(&kq->kq_head, kn, &scan->kqs_start,
959 			    kn_tqe);
960 			continue;
961 		}
962 
963 		if (!knote_acquire(kn))
964 			continue;
965 
966 		kqueue_check(kq);
967 		TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
968 		kn->kn_status &= ~KN_QUEUED;
969 		kq->kq_count--;
970 		kqueue_check(kq);
971 
972 		if (kn->kn_status & KN_DISABLED) {
973 			knote_release(kn);
974 			continue;
975 		}
976 		if ((kn->kn_flags & EV_ONESHOT) == 0 &&
977 		    kn->kn_fop->f_event(kn, 0) == 0) {
978 			if ((kn->kn_status & KN_QUEUED) == 0)
979 				kn->kn_status &= ~KN_ACTIVE;
980 			knote_release(kn);
981 			kqueue_check(kq);
982 			continue;
983 		}
984 		*kevp = kn->kn_kevent;
985 		kevp++;
986 		nkev++;
987 		if (kn->kn_flags & EV_ONESHOT) {
988 			splx(s);
989 			kn->kn_fop->f_detach(kn);
990 			knote_drop(kn, p);
991 			s = splhigh();
992 		} else if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) {
993 			if (kn->kn_flags & EV_CLEAR) {
994 				kn->kn_data = 0;
995 				kn->kn_fflags = 0;
996 			}
997 			if (kn->kn_flags & EV_DISPATCH)
998 				kn->kn_status |= KN_DISABLED;
999 			if ((kn->kn_status & KN_QUEUED) == 0)
1000 				kn->kn_status &= ~KN_ACTIVE;
1001 			knote_release(kn);
1002 		} else {
1003 			if ((kn->kn_status & KN_QUEUED) == 0) {
1004 				kqueue_check(kq);
1005 				kq->kq_count++;
1006 				kn->kn_status |= KN_QUEUED;
1007 				TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
1008 			}
1009 			knote_release(kn);
1010 		}
1011 		kqueue_check(kq);
1012 		count--;
1013 		if (nkev == KQ_NEVENTS) {
1014 			splx(s);
1015 #ifdef KTRACE
1016 			if (KTRPOINT(p, KTR_STRUCT))
1017 				ktrevent(p, kev, nkev);
1018 #endif
1019 			error = copyout(kev, ulistp,
1020 			    sizeof(struct kevent) * nkev);
1021 			ulistp += nkev;
1022 			nkev = 0;
1023 			kevp = kev;
1024 			s = splhigh();
1025 			if (error)
1026 				break;
1027 		}
1028 	}
1029 	TAILQ_REMOVE(&kq->kq_head, &scan->kqs_end, kn_tqe);
1030 	TAILQ_REMOVE(&kq->kq_head, &scan->kqs_start, kn_tqe);
1031 	splx(s);
1032 done:
1033 	if (nkev != 0) {
1034 #ifdef KTRACE
1035 		if (KTRPOINT(p, KTR_STRUCT))
1036 			ktrevent(p, kev, nkev);
1037 #endif
1038 		error = copyout(kev, ulistp,
1039 		    sizeof(struct kevent) * nkev);
1040 	}
1041 	*retval = maxevents - count;
1042 	return (error);
1043 }
1044 
1045 void
1046 kqueue_scan_setup(struct kqueue_scan_state *scan, struct kqueue *kq)
1047 {
1048 	memset(scan, 0, sizeof(*scan));
1049 
1050 	KQREF(kq);
1051 	scan->kqs_kq = kq;
1052 	scan->kqs_start.kn_filter = EVFILT_MARKER;
1053 	scan->kqs_start.kn_status = KN_PROCESSING;
1054 	scan->kqs_end.kn_filter = EVFILT_MARKER;
1055 	scan->kqs_end.kn_status = KN_PROCESSING;
1056 }
1057 
1058 void
1059 kqueue_scan_finish(struct kqueue_scan_state *scan)
1060 {
1061 	struct kqueue *kq = scan->kqs_kq;
1062 
1063 	KASSERT(scan->kqs_start.kn_filter == EVFILT_MARKER);
1064 	KASSERT(scan->kqs_start.kn_status == KN_PROCESSING);
1065 	KASSERT(scan->kqs_end.kn_filter == EVFILT_MARKER);
1066 	KASSERT(scan->kqs_end.kn_status == KN_PROCESSING);
1067 
1068 	KQRELE(kq);
1069 }
1070 
1071 
1072 /*
1073  * XXX
1074  * This could be expanded to call kqueue_scan, if desired.
1075  */
1076 int
1077 kqueue_read(struct file *fp, struct uio *uio, int fflags)
1078 {
1079 	return (ENXIO);
1080 }
1081 
1082 int
1083 kqueue_write(struct file *fp, struct uio *uio, int fflags)
1084 {
1085 	return (ENXIO);
1086 }
1087 
1088 int
1089 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
1090 {
1091 	return (ENOTTY);
1092 }
1093 
1094 int
1095 kqueue_poll(struct file *fp, int events, struct proc *p)
1096 {
1097 	struct kqueue *kq = (struct kqueue *)fp->f_data;
1098 	int revents = 0;
1099 	int s = splhigh();
1100 
1101 	if (events & (POLLIN | POLLRDNORM)) {
1102 		if (kq->kq_count) {
1103 			revents |= events & (POLLIN | POLLRDNORM);
1104 		} else {
1105 			selrecord(p, &kq->kq_sel);
1106 			kq->kq_state |= KQ_SEL;
1107 		}
1108 	}
1109 	splx(s);
1110 	return (revents);
1111 }
1112 
1113 int
1114 kqueue_stat(struct file *fp, struct stat *st, struct proc *p)
1115 {
1116 	struct kqueue *kq = fp->f_data;
1117 
1118 	memset(st, 0, sizeof(*st));
1119 	st->st_size = kq->kq_count;
1120 	st->st_blksize = sizeof(struct kevent);
1121 	st->st_mode = S_IFIFO;
1122 	return (0);
1123 }
1124 
1125 void
1126 kqueue_terminate(struct proc *p, struct kqueue *kq)
1127 {
1128 	int i;
1129 
1130 	KERNEL_ASSERT_LOCKED();
1131 
1132 	for (i = 0; i < kq->kq_knlistsize; i++)
1133 		knote_remove(p, &kq->kq_knlist[i]);
1134 	if (kq->kq_knhashmask != 0) {
1135 		for (i = 0; i < kq->kq_knhashmask + 1; i++)
1136 			knote_remove(p, &kq->kq_knhash[i]);
1137 	}
1138 	kq->kq_state |= KQ_DYING;
1139 	kqueue_wakeup(kq);
1140 
1141 	KASSERT(klist_empty(&kq->kq_sel.si_note));
1142 	task_del(systq, &kq->kq_task);
1143 
1144 }
1145 
1146 int
1147 kqueue_close(struct file *fp, struct proc *p)
1148 {
1149 	struct kqueue *kq = fp->f_data;
1150 
1151 	KERNEL_LOCK();
1152 	kqueue_terminate(p, kq);
1153 	fp->f_data = NULL;
1154 
1155 	KQRELE(kq);
1156 
1157 	KERNEL_UNLOCK();
1158 
1159 	return (0);
1160 }
1161 
1162 static void
1163 kqueue_task(void *arg)
1164 {
1165 	struct kqueue *kq = arg;
1166 
1167 	if (kq->kq_state & KQ_SEL) {
1168 		kq->kq_state &= ~KQ_SEL;
1169 		selwakeup(&kq->kq_sel);
1170 	} else {
1171 		KNOTE(&kq->kq_sel.si_note, 0);
1172 	}
1173 	KQRELE(kq);
1174 }
1175 
1176 void
1177 kqueue_wakeup(struct kqueue *kq)
1178 {
1179 
1180 	if (kq->kq_state & KQ_SLEEP) {
1181 		kq->kq_state &= ~KQ_SLEEP;
1182 		wakeup(kq);
1183 	}
1184 	if ((kq->kq_state & KQ_SEL) || !klist_empty(&kq->kq_sel.si_note)) {
1185 		/* Defer activation to avoid recursion. */
1186 		KQREF(kq);
1187 		if (!task_add(systq, &kq->kq_task))
1188 			KQRELE(kq);
1189 	}
1190 }
1191 
1192 static void
1193 kqueue_expand_hash(struct kqueue *kq)
1194 {
1195 	struct knlist *hash;
1196 	u_long hashmask;
1197 
1198 	if (kq->kq_knhashmask == 0) {
1199 		hash = hashinit(KN_HASHSIZE, M_KEVENT, M_WAITOK, &hashmask);
1200 		if (kq->kq_knhashmask == 0) {
1201 			kq->kq_knhash = hash;
1202 			kq->kq_knhashmask = hashmask;
1203 		} else {
1204 			/* Another thread has allocated the hash. */
1205 			hashfree(hash, KN_HASHSIZE, M_KEVENT);
1206 		}
1207 	}
1208 }
1209 
1210 static void
1211 kqueue_expand_list(struct kqueue *kq, int fd)
1212 {
1213 	struct knlist *list;
1214 	int size;
1215 
1216 	if (kq->kq_knlistsize <= fd) {
1217 		size = kq->kq_knlistsize;
1218 		while (size <= fd)
1219 			size += KQEXTENT;
1220 		list = mallocarray(size, sizeof(*list), M_KEVENT, M_WAITOK);
1221 		if (kq->kq_knlistsize <= fd) {
1222 			memcpy(list, kq->kq_knlist,
1223 			    kq->kq_knlistsize * sizeof(*list));
1224 			memset(&list[kq->kq_knlistsize], 0,
1225 			    (size - kq->kq_knlistsize) * sizeof(*list));
1226 			free(kq->kq_knlist, M_KEVENT,
1227 			    kq->kq_knlistsize * sizeof(*list));
1228 			kq->kq_knlist = list;
1229 			kq->kq_knlistsize = size;
1230 		} else {
1231 			/* Another thread has expanded the list. */
1232 			free(list, M_KEVENT, size * sizeof(*list));
1233 		}
1234 	}
1235 }
1236 
1237 /*
1238  * Acquire a knote, return non-zero on success, 0 on failure.
1239  *
1240  * If we cannot acquire the knote we sleep and return 0.  The knote
1241  * may be stale on return in this case and the caller must restart
1242  * whatever loop they are in.
1243  */
1244 int
1245 knote_acquire(struct knote *kn)
1246 {
1247 	splassert(IPL_HIGH);
1248 	KASSERT(kn->kn_filter != EVFILT_MARKER);
1249 
1250 	if (kn->kn_status & KN_PROCESSING) {
1251 		kn->kn_status |= KN_WAITING;
1252 		tsleep_nsec(kn, 0, "kqepts", SEC_TO_NSEC(1));
1253 		/* knote may be stale now */
1254 		return (0);
1255 	}
1256 	kn->kn_status |= KN_PROCESSING;
1257 	return (1);
1258 }
1259 
1260 /*
1261  * Release an acquired knote, clearing KN_PROCESSING.
1262  */
1263 void
1264 knote_release(struct knote *kn)
1265 {
1266 	splassert(IPL_HIGH);
1267 	KASSERT(kn->kn_filter != EVFILT_MARKER);
1268 	KASSERT(kn->kn_status & KN_PROCESSING);
1269 
1270 	if (kn->kn_status & KN_WAITING) {
1271 		kn->kn_status &= ~KN_WAITING;
1272 		wakeup(kn);
1273 	}
1274 	kn->kn_status &= ~KN_PROCESSING;
1275 	/* kn should not be accessed anymore */
1276 }
1277 
1278 /*
1279  * activate one knote.
1280  */
1281 void
1282 knote_activate(struct knote *kn)
1283 {
1284 	int s;
1285 
1286 	s = splhigh();
1287 	kn->kn_status |= KN_ACTIVE;
1288 	if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)
1289 		knote_enqueue(kn);
1290 	splx(s);
1291 }
1292 
1293 /*
1294  * walk down a list of knotes, activating them if their event has triggered.
1295  */
1296 void
1297 knote(struct klist *list, long hint)
1298 {
1299 	struct knote *kn, *kn0;
1300 
1301 	SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, kn0)
1302 		if (kn->kn_fop->f_event(kn, hint))
1303 			knote_activate(kn);
1304 }
1305 
1306 /*
1307  * remove all knotes from a specified knlist
1308  */
1309 void
1310 knote_remove(struct proc *p, struct knlist *list)
1311 {
1312 	struct knote *kn;
1313 	int s;
1314 
1315 	while ((kn = SLIST_FIRST(list)) != NULL) {
1316 		s = splhigh();
1317 		if (!knote_acquire(kn)) {
1318 			splx(s);
1319 			continue;
1320 		}
1321 		splx(s);
1322 		kn->kn_fop->f_detach(kn);
1323 		knote_drop(kn, p);
1324 	}
1325 }
1326 
1327 /*
1328  * remove all knotes referencing a specified fd
1329  */
1330 void
1331 knote_fdclose(struct proc *p, int fd)
1332 {
1333 	struct filedesc *fdp = p->p_p->ps_fd;
1334 	struct kqueue *kq;
1335 	struct knlist *list;
1336 
1337 	/*
1338 	 * fdplock can be ignored if the file descriptor table is being freed
1339 	 * because no other thread can access the fdp.
1340 	 */
1341 	if (fdp->fd_refcnt != 0)
1342 		fdpassertlocked(fdp);
1343 
1344 	if (LIST_EMPTY(&fdp->fd_kqlist))
1345 		return;
1346 
1347 	KERNEL_LOCK();
1348 	LIST_FOREACH(kq, &fdp->fd_kqlist, kq_next) {
1349 		if (fd >= kq->kq_knlistsize)
1350 			continue;
1351 
1352 		list = &kq->kq_knlist[fd];
1353 		knote_remove(p, list);
1354 	}
1355 	KERNEL_UNLOCK();
1356 }
1357 
1358 /*
1359  * handle a process exiting, including the triggering of NOTE_EXIT notes
1360  * XXX this could be more efficient, doing a single pass down the klist
1361  */
1362 void
1363 knote_processexit(struct proc *p)
1364 {
1365 	struct process *pr = p->p_p;
1366 
1367 	KASSERT(p == curproc);
1368 
1369 	KNOTE(&pr->ps_klist, NOTE_EXIT);
1370 
1371 	/* remove other knotes hanging off the process */
1372 	klist_invalidate(&pr->ps_klist);
1373 }
1374 
1375 void
1376 knote_attach(struct knote *kn)
1377 {
1378 	struct kqueue *kq = kn->kn_kq;
1379 	struct knlist *list;
1380 
1381 	if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1382 		KASSERT(kq->kq_knlistsize > kn->kn_id);
1383 		list = &kq->kq_knlist[kn->kn_id];
1384 	} else {
1385 		KASSERT(kq->kq_knhashmask != 0);
1386 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1387 	}
1388 	SLIST_INSERT_HEAD(list, kn, kn_link);
1389 }
1390 
1391 /*
1392  * should be called at spl == 0, since we don't want to hold spl
1393  * while calling FRELE and pool_put.
1394  */
1395 void
1396 knote_drop(struct knote *kn, struct proc *p)
1397 {
1398 	struct kqueue *kq = kn->kn_kq;
1399 	struct knlist *list;
1400 	int s;
1401 
1402 	KASSERT(kn->kn_filter != EVFILT_MARKER);
1403 
1404 	if (kn->kn_fop->f_flags & FILTEROP_ISFD)
1405 		list = &kq->kq_knlist[kn->kn_id];
1406 	else
1407 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1408 
1409 	SLIST_REMOVE(list, kn, knote, kn_link);
1410 	s = splhigh();
1411 	if (kn->kn_status & KN_QUEUED)
1412 		knote_dequeue(kn);
1413 	if (kn->kn_status & KN_WAITING) {
1414 		kn->kn_status &= ~KN_WAITING;
1415 		wakeup(kn);
1416 	}
1417 	splx(s);
1418 	if (kn->kn_fop->f_flags & FILTEROP_ISFD)
1419 		FRELE(kn->kn_fp, p);
1420 	pool_put(&knote_pool, kn);
1421 }
1422 
1423 
1424 void
1425 knote_enqueue(struct knote *kn)
1426 {
1427 	struct kqueue *kq = kn->kn_kq;
1428 
1429 	splassert(IPL_HIGH);
1430 	KASSERT(kn->kn_filter != EVFILT_MARKER);
1431 	KASSERT((kn->kn_status & KN_QUEUED) == 0);
1432 
1433 	kqueue_check(kq);
1434 	TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
1435 	kn->kn_status |= KN_QUEUED;
1436 	kq->kq_count++;
1437 	kqueue_check(kq);
1438 	kqueue_wakeup(kq);
1439 }
1440 
1441 void
1442 knote_dequeue(struct knote *kn)
1443 {
1444 	struct kqueue *kq = kn->kn_kq;
1445 
1446 	splassert(IPL_HIGH);
1447 	KASSERT(kn->kn_filter != EVFILT_MARKER);
1448 	KASSERT(kn->kn_status & KN_QUEUED);
1449 
1450 	kqueue_check(kq);
1451 	TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1452 	kn->kn_status &= ~KN_QUEUED;
1453 	kq->kq_count--;
1454 	kqueue_check(kq);
1455 }
1456 
1457 void
1458 klist_insert(struct klist *klist, struct knote *kn)
1459 {
1460 	SLIST_INSERT_HEAD(&klist->kl_list, kn, kn_selnext);
1461 }
1462 
1463 void
1464 klist_remove(struct klist *klist, struct knote *kn)
1465 {
1466 	SLIST_REMOVE(&klist->kl_list, kn, knote, kn_selnext);
1467 }
1468 
1469 int
1470 klist_empty(struct klist *klist)
1471 {
1472 	return (SLIST_EMPTY(&klist->kl_list));
1473 }
1474 
1475 void
1476 klist_invalidate(struct klist *list)
1477 {
1478 	struct knote *kn;
1479 	struct proc *p = curproc;
1480 	int s;
1481 
1482 	/*
1483 	 * NET_LOCK() must not be held because it can block another thread
1484 	 * in f_event with a knote acquired.
1485 	 */
1486 	NET_ASSERT_UNLOCKED();
1487 
1488 	s = splhigh();
1489 	while ((kn = SLIST_FIRST(&list->kl_list)) != NULL) {
1490 		if (!knote_acquire(kn))
1491 			continue;
1492 		splx(s);
1493 		kn->kn_fop->f_detach(kn);
1494 		if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1495 			kn->kn_fop = &dead_filtops;
1496 			knote_activate(kn);
1497 			s = splhigh();
1498 			knote_release(kn);
1499 		} else {
1500 			knote_drop(kn, p);
1501 			s = splhigh();
1502 		}
1503 	}
1504 	splx(s);
1505 }
1506