xref: /openbsd-src/sys/kern/kern_event.c (revision daf88648c0e349d5c02e1504293082072c981640)
1 /*	$OpenBSD: kern_event.c,v 1.28 2006/12/01 07:17:25 camield Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD: src/sys/kern/kern_event.c,v 1.22 2001/02/23 20:32:42 jlemon Exp $
29  */
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/proc.h>
35 #include <sys/malloc.h>
36 #include <sys/unistd.h>
37 #include <sys/file.h>
38 #include <sys/filedesc.h>
39 #include <sys/fcntl.h>
40 #include <sys/selinfo.h>
41 #include <sys/queue.h>
42 #include <sys/event.h>
43 #include <sys/eventvar.h>
44 #include <sys/pool.h>
45 #include <sys/protosw.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/stat.h>
49 #include <sys/uio.h>
50 #include <sys/mount.h>
51 #include <sys/poll.h>
52 #include <sys/syscallargs.h>
53 
54 int	kqueue_scan(struct file *fp, int maxevents,
55 		    struct kevent *ulistp, const struct timespec *timeout,
56 		    struct proc *p, int *retval);
57 
58 int	kqueue_read(struct file *fp, off_t *poff, struct uio *uio,
59 		    struct ucred *cred);
60 int	kqueue_write(struct file *fp, off_t *poff, struct uio *uio,
61 		    struct ucred *cred);
62 int	kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
63 		    struct proc *p);
64 int	kqueue_poll(struct file *fp, int events, struct proc *p);
65 int 	kqueue_kqfilter(struct file *fp, struct knote *kn);
66 int	kqueue_stat(struct file *fp, struct stat *st, struct proc *p);
67 int	kqueue_close(struct file *fp, struct proc *p);
68 void	kqueue_wakeup(struct kqueue *kq);
69 
70 struct fileops kqueueops = {
71 	kqueue_read,
72 	kqueue_write,
73 	kqueue_ioctl,
74 	kqueue_poll,
75 	kqueue_kqfilter,
76 	kqueue_stat,
77 	kqueue_close
78 };
79 
80 void	knote_attach(struct knote *kn, struct filedesc *fdp);
81 void	knote_drop(struct knote *kn, struct proc *p, struct filedesc *fdp);
82 void	knote_enqueue(struct knote *kn);
83 void	knote_dequeue(struct knote *kn);
84 #define knote_alloc() ((struct knote *)pool_get(&knote_pool, PR_WAITOK))
85 #define knote_free(kn) pool_put(&knote_pool, (kn))
86 
87 void	filt_kqdetach(struct knote *kn);
88 int	filt_kqueue(struct knote *kn, long hint);
89 int	filt_procattach(struct knote *kn);
90 void	filt_procdetach(struct knote *kn);
91 int	filt_proc(struct knote *kn, long hint);
92 int	filt_fileattach(struct knote *kn);
93 
94 struct filterops kqread_filtops =
95 	{ 1, NULL, filt_kqdetach, filt_kqueue };
96 struct filterops proc_filtops =
97 	{ 0, filt_procattach, filt_procdetach, filt_proc };
98 struct filterops file_filtops =
99 	{ 1, filt_fileattach, NULL, NULL };
100 
101 struct	pool knote_pool;
102 struct	pool kqueue_pool;
103 
104 #define KNOTE_ACTIVATE(kn) do {						\
105 	kn->kn_status |= KN_ACTIVE;					\
106 	if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
107 		knote_enqueue(kn);					\
108 } while(0)
109 
110 #define	KN_HASHSIZE		64		/* XXX should be tunable */
111 #define KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
112 
113 extern struct filterops sig_filtops;
114 #ifdef notyet
115 extern struct filterops aio_filtops;
116 #endif
117 
118 /*
119  * Table for for all system-defined filters.
120  */
121 struct filterops *sysfilt_ops[] = {
122 	&file_filtops,			/* EVFILT_READ */
123 	&file_filtops,			/* EVFILT_WRITE */
124 	NULL, /*&aio_filtops,*/		/* EVFILT_AIO */
125 	&file_filtops,			/* EVFILT_VNODE */
126 	&proc_filtops,			/* EVFILT_PROC */
127 	&sig_filtops,			/* EVFILT_SIGNAL */
128 };
129 
130 void kqueue_init(void);
131 
132 void
133 kqueue_init(void)
134 {
135 
136 	pool_init(&kqueue_pool, sizeof(struct kqueue), 0, 0, 0, "kqueuepl",
137 	    &pool_allocator_nointr);
138 	pool_init(&knote_pool, sizeof(struct knote), 0, 0, 0, "knotepl",
139 	    &pool_allocator_nointr);
140 }
141 
142 int
143 filt_fileattach(struct knote *kn)
144 {
145 	struct file *fp = kn->kn_fp;
146 
147 	return ((*fp->f_ops->fo_kqfilter)(fp, kn));
148 }
149 
150 int
151 kqueue_kqfilter(struct file *fp, struct knote *kn)
152 {
153 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
154 
155 	if (kn->kn_filter != EVFILT_READ)
156 		return (1);
157 
158 	kn->kn_fop = &kqread_filtops;
159 	SLIST_INSERT_HEAD(&kq->kq_sel.si_note, kn, kn_selnext);
160 	return (0);
161 }
162 
163 void
164 filt_kqdetach(struct knote *kn)
165 {
166 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
167 
168 	SLIST_REMOVE(&kq->kq_sel.si_note, kn, knote, kn_selnext);
169 }
170 
171 /*ARGSUSED*/
172 int
173 filt_kqueue(struct knote *kn, long hint)
174 {
175 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
176 
177 	kn->kn_data = kq->kq_count;
178 	return (kn->kn_data > 0);
179 }
180 
181 int
182 filt_procattach(struct knote *kn)
183 {
184 	struct proc *p;
185 
186 	p = pfind(kn->kn_id);
187 	if (p == NULL)
188 		return (ESRCH);
189 
190 	/*
191 	 * Fail if it's not owned by you, or the last exec gave us
192 	 * setuid/setgid privs (unless you're root).
193 	 */
194 	if ((p->p_cred->p_ruid != curproc->p_cred->p_ruid ||
195 	    (p->p_flag & P_SUGID)) && suser(curproc, 0) != 0)
196 		return (EACCES);
197 
198 	kn->kn_ptr.p_proc = p;
199 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
200 
201 	/*
202 	 * internal flag indicating registration done by kernel
203 	 */
204 	if (kn->kn_flags & EV_FLAG1) {
205 		kn->kn_data = kn->kn_sdata;		/* ppid */
206 		kn->kn_fflags = NOTE_CHILD;
207 		kn->kn_flags &= ~EV_FLAG1;
208 	}
209 
210 	/* XXX lock the proc here while adding to the list? */
211 	SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
212 
213 	return (0);
214 }
215 
216 /*
217  * The knote may be attached to a different process, which may exit,
218  * leaving nothing for the knote to be attached to.  So when the process
219  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
220  * it will be deleted when read out.  However, as part of the knote deletion,
221  * this routine is called, so a check is needed to avoid actually performing
222  * a detach, because the original process does not exist any more.
223  */
224 void
225 filt_procdetach(struct knote *kn)
226 {
227 	struct proc *p = kn->kn_ptr.p_proc;
228 
229 	if (kn->kn_status & KN_DETACHED)
230 		return;
231 
232 	/* XXX locking?  this might modify another process. */
233 	SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
234 }
235 
236 int
237 filt_proc(struct knote *kn, long hint)
238 {
239 	u_int event;
240 
241 	/*
242 	 * mask off extra data
243 	 */
244 	event = (u_int)hint & NOTE_PCTRLMASK;
245 
246 	/*
247 	 * if the user is interested in this event, record it.
248 	 */
249 	if (kn->kn_sfflags & event)
250 		kn->kn_fflags |= event;
251 
252 	/*
253 	 * process is gone, so flag the event as finished.
254 	 */
255 	if (event == NOTE_EXIT) {
256 		kn->kn_status |= KN_DETACHED;
257 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
258 		return (1);
259 	}
260 
261 	/*
262 	 * process forked, and user wants to track the new process,
263 	 * so attach a new knote to it, and immediately report an
264 	 * event with the parent's pid.
265 	 */
266 	if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
267 		struct kevent kev;
268 		int error;
269 
270 		/*
271 		 * register knote with new process.
272 		 */
273 		kev.ident = hint & NOTE_PDATAMASK;	/* pid */
274 		kev.filter = kn->kn_filter;
275 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
276 		kev.fflags = kn->kn_sfflags;
277 		kev.data = kn->kn_id;			/* parent */
278 		kev.udata = kn->kn_kevent.udata;	/* preserve udata */
279 		error = kqueue_register(kn->kn_kq, &kev, NULL);
280 		if (error)
281 			kn->kn_fflags |= NOTE_TRACKERR;
282 	}
283 
284 	return (kn->kn_fflags != 0);
285 }
286 
287 /*
288  * filt_seltrue:
289  *
290  *	This filter "event" routine simulates seltrue().
291  */
292 int
293 filt_seltrue(struct knote *kn, long hint)
294 {
295 
296 	/*
297 	 * We don't know how much data can be read/written,
298 	 * but we know that it *can* be.  This is about as
299 	 * good as select/poll does as well.
300 	 */
301 	kn->kn_data = 0;
302 	return (1);
303 }
304 
305 int
306 sys_kqueue(struct proc *p, void *v, register_t *retval)
307 {
308 	struct filedesc *fdp = p->p_fd;
309 	struct kqueue *kq;
310 	struct file *fp;
311 	int fd, error;
312 
313 	error = falloc(p, &fp, &fd);
314 	if (error)
315 		return (error);
316 	fp->f_flag = FREAD | FWRITE;
317 	fp->f_type = DTYPE_KQUEUE;
318 	fp->f_ops = &kqueueops;
319 	kq = pool_get(&kqueue_pool, PR_WAITOK);
320 	bzero(kq, sizeof(*kq));
321 	TAILQ_INIT(&kq->kq_head);
322 	fp->f_data = (caddr_t)kq;
323 	*retval = fd;
324 	if (fdp->fd_knlistsize < 0)
325 		fdp->fd_knlistsize = 0;		/* this process has a kq */
326 	kq->kq_fdp = fdp;
327 	FILE_SET_MATURE(fp);
328 	return (0);
329 }
330 
331 int
332 sys_kevent(struct proc *p, void *v, register_t *retval)
333 {
334 	struct filedesc* fdp = p->p_fd;
335 	struct sys_kevent_args /* {
336 		syscallarg(int)	fd;
337 		syscallarg(const struct kevent *) changelist;
338 		syscallarg(int)	nchanges;
339 		syscallarg(struct kevent *) eventlist;
340 		syscallarg(int)	nevents;
341 		syscallarg(const struct timespec *) timeout;
342 	} */ *uap = v;
343 	struct kevent *kevp;
344 	struct kqueue *kq;
345 	struct file *fp;
346 	struct timespec ts;
347 	int i, n, nerrors, error;
348 
349 	if ((fp = fd_getfile(fdp, SCARG(uap, fd))) == NULL ||
350 	    (fp->f_type != DTYPE_KQUEUE))
351 		return (EBADF);
352 
353 	FREF(fp);
354 
355 	if (SCARG(uap, timeout) != NULL) {
356 		error = copyin(SCARG(uap, timeout), &ts, sizeof(ts));
357 		if (error)
358 			goto done;
359 		SCARG(uap, timeout) = &ts;
360 	}
361 
362 	kq = (struct kqueue *)fp->f_data;
363 	nerrors = 0;
364 
365 	while (SCARG(uap, nchanges) > 0) {
366 		n = SCARG(uap, nchanges) > KQ_NEVENTS
367 			? KQ_NEVENTS : SCARG(uap, nchanges);
368 		error = copyin(SCARG(uap, changelist), kq->kq_kev,
369 		    n * sizeof(struct kevent));
370 		if (error)
371 			goto done;
372 		for (i = 0; i < n; i++) {
373 			kevp = &kq->kq_kev[i];
374 			kevp->flags &= ~EV_SYSFLAGS;
375 			error = kqueue_register(kq, kevp, p);
376 			if (error) {
377 				if (SCARG(uap, nevents) != 0) {
378 					kevp->flags = EV_ERROR;
379 					kevp->data = error;
380 					(void) copyout((caddr_t)kevp,
381 					    (caddr_t)SCARG(uap, eventlist),
382 					    sizeof(*kevp));
383 					SCARG(uap, eventlist)++;
384 					SCARG(uap, nevents)--;
385 					nerrors++;
386 				} else {
387 					goto done;
388 				}
389 			}
390 		}
391 		SCARG(uap, nchanges) -= n;
392 		SCARG(uap, changelist) += n;
393 	}
394 	if (nerrors) {
395 		*retval = nerrors;
396 		error = 0;
397 		goto done;
398 	}
399 
400 	error = kqueue_scan(fp, SCARG(uap, nevents), SCARG(uap, eventlist),
401 			    SCARG(uap, timeout), p, &n);
402 	*retval = n;
403  done:
404 	FRELE(fp);
405 	return (error);
406 }
407 
408 int
409 kqueue_register(struct kqueue *kq, struct kevent *kev, struct proc *p)
410 {
411 	struct filedesc *fdp = kq->kq_fdp;
412 	struct filterops *fops = NULL;
413 	struct file *fp = NULL;
414 	struct knote *kn = NULL;
415 	int s, error = 0;
416 
417 	if (kev->filter < 0) {
418 		if (kev->filter + EVFILT_SYSCOUNT < 0)
419 			return (EINVAL);
420 		fops = sysfilt_ops[~kev->filter];	/* to 0-base index */
421 	}
422 
423 	if (fops == NULL) {
424 		/*
425 		 * XXX
426 		 * filter attach routine is responsible for ensuring that
427 		 * the identifier can be attached to it.
428 		 */
429 		return (EINVAL);
430 	}
431 
432 	if (fops->f_isfd) {
433 		/* validate descriptor */
434 		if ((fp = fd_getfile(fdp, kev->ident)) == NULL)
435 			return (EBADF);
436 		FREF(fp);
437 		fp->f_count++;
438 
439 		if (kev->ident < fdp->fd_knlistsize) {
440 			SLIST_FOREACH(kn, &fdp->fd_knlist[kev->ident], kn_link)
441 				if (kq == kn->kn_kq &&
442 				    kev->filter == kn->kn_filter)
443 					break;
444 		}
445 	} else {
446 		if (fdp->fd_knhashmask != 0) {
447 			struct klist *list;
448 
449 			list = &fdp->fd_knhash[
450 			    KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)];
451 			SLIST_FOREACH(kn, list, kn_link)
452 				if (kev->ident == kn->kn_id &&
453 				    kq == kn->kn_kq &&
454 				    kev->filter == kn->kn_filter)
455 					break;
456 		}
457 	}
458 
459 	if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
460 		error = ENOENT;
461 		goto done;
462 	}
463 
464 	/*
465 	 * kn now contains the matching knote, or NULL if no match
466 	 */
467 	if (kev->flags & EV_ADD) {
468 
469 		if (kn == NULL) {
470 			kn = knote_alloc();
471 			if (kn == NULL) {
472 				error = ENOMEM;
473 				goto done;
474 			}
475 			kn->kn_fp = fp;
476 			kn->kn_kq = kq;
477 			kn->kn_fop = fops;
478 
479 			/*
480 			 * apply reference count to knote structure, and
481 			 * do not release it at the end of this routine.
482 			 */
483 			if (fp != NULL)
484 				FRELE(fp);
485 			fp = NULL;
486 
487 			kn->kn_sfflags = kev->fflags;
488 			kn->kn_sdata = kev->data;
489 			kev->fflags = 0;
490 			kev->data = 0;
491 			kn->kn_kevent = *kev;
492 
493 			knote_attach(kn, fdp);
494 			if ((error = fops->f_attach(kn)) != 0) {
495 				knote_drop(kn, p, fdp);
496 				goto done;
497 			}
498 		} else {
499 			/*
500 			 * The user may change some filter values after the
501 			 * initial EV_ADD, but doing so will not reset any
502 			 * filters which have already been triggered.
503 			 */
504 			kn->kn_sfflags = kev->fflags;
505 			kn->kn_sdata = kev->data;
506 			kn->kn_kevent.udata = kev->udata;
507 		}
508 
509 		s = splhigh();
510 		if (kn->kn_fop->f_event(kn, 0))
511 			KNOTE_ACTIVATE(kn);
512 		splx(s);
513 
514 	} else if (kev->flags & EV_DELETE) {
515 		kn->kn_fop->f_detach(kn);
516 		knote_drop(kn, p, p->p_fd);
517 		goto done;
518 	}
519 
520 	if ((kev->flags & EV_DISABLE) &&
521 	    ((kn->kn_status & KN_DISABLED) == 0)) {
522 		s = splhigh();
523 		kn->kn_status |= KN_DISABLED;
524 		splx(s);
525 	}
526 
527 	if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
528 		s = splhigh();
529 		kn->kn_status &= ~KN_DISABLED;
530 		if ((kn->kn_status & KN_ACTIVE) &&
531 		    ((kn->kn_status & KN_QUEUED) == 0))
532 			knote_enqueue(kn);
533 		splx(s);
534 	}
535 
536 done:
537 	if (fp != NULL)
538 		closef(fp, p);
539 	return (error);
540 }
541 
542 int
543 kqueue_scan(struct file *fp, int maxevents, struct kevent *ulistp,
544 	const struct timespec *tsp, struct proc *p, int *retval)
545 {
546 	struct kqueue *kq = (struct kqueue *)fp->f_data;
547 	struct kevent *kevp;
548 	struct timeval atv, rtv, ttv;
549 	struct knote *kn, marker;
550 	int s, count, timeout, nkev = 0, error = 0;
551 
552 	count = maxevents;
553 	if (count == 0)
554 		goto done;
555 
556 	if (tsp != NULL) {
557 		TIMESPEC_TO_TIMEVAL(&atv, tsp);
558 		if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) {
559 			/* No timeout, just poll */
560 			timeout = -1;
561 			goto start;
562 		}
563 		if (itimerfix(&atv)) {
564 			error = EINVAL;
565 			goto done;
566 		}
567 
568 		timeout = atv.tv_sec > 24 * 60 * 60 ?
569 			24 * 60 * 60 * hz : tvtohz(&atv);
570 
571 		getmicrouptime(&rtv);
572 		timeradd(&atv, &rtv, &atv);
573 	} else {
574 		atv.tv_sec = 0;
575 		atv.tv_usec = 0;
576 		timeout = 0;
577 	}
578 	goto start;
579 
580 retry:
581 	if (atv.tv_sec || atv.tv_usec) {
582 		getmicrouptime(&rtv);
583 		if (timercmp(&rtv, &atv, >=))
584 			goto done;
585 		ttv = atv;
586 		timersub(&ttv, &rtv, &ttv);
587 		timeout = ttv.tv_sec > 24 * 60 * 60 ?
588 			24 * 60 * 60 * hz : tvtohz(&ttv);
589 	}
590 
591 start:
592 	kevp = kq->kq_kev;
593 	s = splhigh();
594 	if (kq->kq_count == 0) {
595 		if (timeout < 0) {
596 			error = EWOULDBLOCK;
597 		} else {
598 			kq->kq_state |= KQ_SLEEP;
599 			error = tsleep(kq, PSOCK | PCATCH, "kqread", timeout);
600 		}
601 		splx(s);
602 		if (error == 0)
603 			goto retry;
604 		/* don't restart after signals... */
605 		if (error == ERESTART)
606 			error = EINTR;
607 		else if (error == EWOULDBLOCK)
608 			error = 0;
609 		goto done;
610 	}
611 
612 	TAILQ_INSERT_TAIL(&kq->kq_head, &marker, kn_tqe);
613 	while (count) {
614 		kn = TAILQ_FIRST(&kq->kq_head);
615 		TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
616 		if (kn == &marker) {
617 			splx(s);
618 			if (count == maxevents)
619 				goto retry;
620 			goto done;
621 		}
622 		if (kn->kn_status & KN_DISABLED) {
623 			kn->kn_status &= ~KN_QUEUED;
624 			kq->kq_count--;
625 			continue;
626 		}
627 		if ((kn->kn_flags & EV_ONESHOT) == 0 &&
628 		    kn->kn_fop->f_event(kn, 0) == 0) {
629 			kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
630 			kq->kq_count--;
631 			continue;
632 		}
633 		*kevp = kn->kn_kevent;
634 		kevp++;
635 		nkev++;
636 		if (kn->kn_flags & EV_ONESHOT) {
637 			kn->kn_status &= ~KN_QUEUED;
638 			kq->kq_count--;
639 			splx(s);
640 			kn->kn_fop->f_detach(kn);
641 			knote_drop(kn, p, p->p_fd);
642 			s = splhigh();
643 		} else if (kn->kn_flags & EV_CLEAR) {
644 			kn->kn_data = 0;
645 			kn->kn_fflags = 0;
646 			kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
647 			kq->kq_count--;
648 		} else {
649 			TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
650 		}
651 		count--;
652 		if (nkev == KQ_NEVENTS) {
653 			splx(s);
654 			error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp,
655 			    sizeof(struct kevent) * nkev);
656 			ulistp += nkev;
657 			nkev = 0;
658 			kevp = kq->kq_kev;
659 			s = splhigh();
660 			if (error)
661 				break;
662 		}
663 	}
664 	TAILQ_REMOVE(&kq->kq_head, &marker, kn_tqe);
665 	splx(s);
666 done:
667 	if (nkev != 0)
668 		error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp,
669 		    sizeof(struct kevent) * nkev);
670 	*retval = maxevents - count;
671 	return (error);
672 }
673 
674 /*
675  * XXX
676  * This could be expanded to call kqueue_scan, if desired.
677  */
678 /*ARGSUSED*/
679 int
680 kqueue_read(struct file *fp, off_t *poff, struct uio *uio, struct ucred *cred)
681 {
682 	return (ENXIO);
683 }
684 
685 /*ARGSUSED*/
686 int
687 kqueue_write(struct file *fp, off_t *poff, struct uio *uio, struct ucred *cred)
688 
689 {
690 	return (ENXIO);
691 }
692 
693 /*ARGSUSED*/
694 int
695 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
696 {
697 	return (ENOTTY);
698 }
699 
700 /*ARGSUSED*/
701 int
702 kqueue_poll(struct file *fp, int events, struct proc *p)
703 {
704 	struct kqueue *kq = (struct kqueue *)fp->f_data;
705 	int revents = 0;
706 	int s = splnet();
707 
708 	if (events & (POLLIN | POLLRDNORM)) {
709 		if (kq->kq_count) {
710 			revents |= events & (POLLIN | POLLRDNORM);
711 		} else {
712 			selrecord(p, &kq->kq_sel);
713 			kq->kq_state |= KQ_SEL;
714 		}
715 	}
716 	splx(s);
717 	return (revents);
718 }
719 
720 /*ARGSUSED*/
721 int
722 kqueue_stat(struct file *fp, struct stat *st, struct proc *p)
723 {
724 	struct kqueue *kq = (struct kqueue *)fp->f_data;
725 
726 	bzero((void *)st, sizeof(*st));
727 	st->st_size = kq->kq_count;
728 	st->st_blksize = sizeof(struct kevent);
729 	st->st_mode = S_IFIFO;
730 	return (0);
731 }
732 
733 /*ARGSUSED*/
734 int
735 kqueue_close(struct file *fp, struct proc *p)
736 {
737 	struct kqueue *kq = (struct kqueue *)fp->f_data;
738 	struct filedesc *fdp = p->p_fd;
739 	struct knote **knp, *kn, *kn0;
740 	int i;
741 
742 	for (i = 0; i < fdp->fd_knlistsize; i++) {
743 		knp = &SLIST_FIRST(&fdp->fd_knlist[i]);
744 		kn = *knp;
745 		while (kn != NULL) {
746 			kn0 = SLIST_NEXT(kn, kn_link);
747 			if (kq == kn->kn_kq) {
748 				FREF(kn->kn_fp);
749 				kn->kn_fop->f_detach(kn);
750 				closef(kn->kn_fp, p);
751 				knote_free(kn);
752 				*knp = kn0;
753 			} else {
754 				knp = &SLIST_NEXT(kn, kn_link);
755 			}
756 			kn = kn0;
757 		}
758 	}
759 	if (fdp->fd_knhashmask != 0) {
760 		for (i = 0; i < fdp->fd_knhashmask + 1; i++) {
761 			knp = &SLIST_FIRST(&fdp->fd_knhash[i]);
762 			kn = *knp;
763 			while (kn != NULL) {
764 				kn0 = SLIST_NEXT(kn, kn_link);
765 				if (kq == kn->kn_kq) {
766 					kn->kn_fop->f_detach(kn);
767 		/* XXX non-fd release of kn->kn_ptr */
768 					knote_free(kn);
769 					*knp = kn0;
770 				} else {
771 					knp = &SLIST_NEXT(kn, kn_link);
772 				}
773 				kn = kn0;
774 			}
775 		}
776 	}
777 	pool_put(&kqueue_pool, kq);
778 	fp->f_data = NULL;
779 
780 	return (0);
781 }
782 
783 void
784 kqueue_wakeup(struct kqueue *kq)
785 {
786 
787 	if (kq->kq_state & KQ_SLEEP) {
788 		kq->kq_state &= ~KQ_SLEEP;
789 		wakeup(kq);
790 	}
791 	if (kq->kq_state & KQ_SEL) {
792 		kq->kq_state &= ~KQ_SEL;
793 		selwakeup(&kq->kq_sel);
794 	}
795 	KNOTE(&kq->kq_sel.si_note, 0);
796 }
797 
798 /*
799  * walk down a list of knotes, activating them if their event has triggered.
800  */
801 void
802 knote(struct klist *list, long hint)
803 {
804 	struct knote *kn;
805 
806 	SLIST_FOREACH(kn, list, kn_selnext)
807 		if (kn->kn_fop->f_event(kn, hint))
808 			KNOTE_ACTIVATE(kn);
809 }
810 
811 /*
812  * remove all knotes from a specified klist
813  */
814 void
815 knote_remove(struct proc *p, struct klist *list)
816 {
817 	struct knote *kn;
818 
819 	while ((kn = SLIST_FIRST(list)) != NULL) {
820 		kn->kn_fop->f_detach(kn);
821 		knote_drop(kn, p, p->p_fd);
822 	}
823 }
824 
825 /*
826  * remove all knotes referencing a specified fd
827  */
828 void
829 knote_fdclose(struct proc *p, int fd)
830 {
831 	struct filedesc *fdp = p->p_fd;
832 	struct klist *list = &fdp->fd_knlist[fd];
833 
834 	knote_remove(p, list);
835 }
836 
837 void
838 knote_attach(struct knote *kn, struct filedesc *fdp)
839 {
840 	struct klist *list;
841 	int size;
842 
843 	if (! kn->kn_fop->f_isfd) {
844 		if (fdp->fd_knhashmask == 0)
845 			fdp->fd_knhash = hashinit(KN_HASHSIZE, M_TEMP,
846 			    M_WAITOK, &fdp->fd_knhashmask);
847 		list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
848 		goto done;
849 	}
850 
851 	if (fdp->fd_knlistsize <= kn->kn_id) {
852 		size = fdp->fd_knlistsize;
853 		while (size <= kn->kn_id)
854 			size += KQEXTENT;
855 		list = malloc(size * sizeof(struct klist *), M_TEMP, M_WAITOK);
856 		bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list,
857 		    fdp->fd_knlistsize * sizeof(struct klist *));
858 		bzero((caddr_t)list +
859 		    fdp->fd_knlistsize * sizeof(struct klist *),
860 		    (size - fdp->fd_knlistsize) * sizeof(struct klist *));
861 		if (fdp->fd_knlist != NULL)
862 			free(fdp->fd_knlist, M_TEMP);
863 		fdp->fd_knlistsize = size;
864 		fdp->fd_knlist = list;
865 	}
866 	list = &fdp->fd_knlist[kn->kn_id];
867 done:
868 	SLIST_INSERT_HEAD(list, kn, kn_link);
869 	kn->kn_status = 0;
870 }
871 
872 /*
873  * should be called at spl == 0, since we don't want to hold spl
874  * while calling closef and free.
875  */
876 void
877 knote_drop(struct knote *kn, struct proc *p, struct filedesc *fdp)
878 {
879 	struct klist *list;
880 
881 	if (kn->kn_fop->f_isfd)
882 		list = &fdp->fd_knlist[kn->kn_id];
883 	else
884 		list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
885 
886 	SLIST_REMOVE(list, kn, knote, kn_link);
887 	if (kn->kn_status & KN_QUEUED)
888 		knote_dequeue(kn);
889 	if (kn->kn_fop->f_isfd) {
890 		FREF(kn->kn_fp);
891 		closef(kn->kn_fp, p);
892 	}
893 	knote_free(kn);
894 }
895 
896 
897 void
898 knote_enqueue(struct knote *kn)
899 {
900 	struct kqueue *kq = kn->kn_kq;
901 	int s = splhigh();
902 
903 	KASSERT((kn->kn_status & KN_QUEUED) == 0);
904 
905 	TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
906 	kn->kn_status |= KN_QUEUED;
907 	kq->kq_count++;
908 	splx(s);
909 	kqueue_wakeup(kq);
910 }
911 
912 void
913 knote_dequeue(struct knote *kn)
914 {
915 	struct kqueue *kq = kn->kn_kq;
916 	int s = splhigh();
917 
918 	KASSERT(kn->kn_status & KN_QUEUED);
919 
920 	TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
921 	kn->kn_status &= ~KN_QUEUED;
922 	kq->kq_count--;
923 	splx(s);
924 }
925 
926 void
927 klist_invalidate(struct klist *list)
928 {
929 	struct knote *kn;
930 
931 	SLIST_FOREACH(kn, list, kn_selnext) {
932 		kn->kn_status |= KN_DETACHED;
933 		kn->kn_flags |= EV_EOF | EV_ONESHOT;
934 	}
935 }
936