xref: /openbsd-src/sys/kern/kern_event.c (revision 33b792a3c1c87b47219fdf9a73548c4003214de3)
1 /*	$OpenBSD: kern_event.c,v 1.16 2002/02/08 13:53:28 art Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD: src/sys/kern/kern_event.c,v 1.22 2001/02/23 20:32:42 jlemon Exp $
29  */
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/proc.h>
35 #include <sys/malloc.h>
36 #include <sys/unistd.h>
37 #include <sys/file.h>
38 #include <sys/filedesc.h>
39 #include <sys/fcntl.h>
40 #include <sys/select.h>
41 #include <sys/queue.h>
42 #include <sys/event.h>
43 #include <sys/eventvar.h>
44 #include <sys/pool.h>
45 #include <sys/protosw.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/stat.h>
49 #include <sys/uio.h>
50 #include <sys/mount.h>
51 #include <sys/syscallargs.h>
52 
53 int	kqueue_scan(struct file *fp, int maxevents,
54 		    struct kevent *ulistp, const struct timespec *timeout,
55 		    struct proc *p, int *retval);
56 
57 int	kqueue_read(struct file *fp, off_t *poff, struct uio *uio,
58 		    struct ucred *cred);
59 int	kqueue_write(struct file *fp, off_t *poff, struct uio *uio,
60 		    struct ucred *cred);
61 int	kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
62 		    struct proc *p);
63 int	kqueue_select(struct file *fp, int which, struct proc *p);
64 int 	kqueue_kqfilter(struct file *fp, struct knote *kn);
65 int	kqueue_stat(struct file *fp, struct stat *st, struct proc *p);
66 int	kqueue_close(struct file *fp, struct proc *p);
67 void	kqueue_wakeup(struct kqueue *kq);
68 
69 struct fileops kqueueops = {
70 	kqueue_read,
71 	kqueue_write,
72 	kqueue_ioctl,
73 	kqueue_select,
74 	kqueue_kqfilter,
75 	kqueue_stat,
76 	kqueue_close
77 };
78 
79 void	knote_attach(struct knote *kn, struct filedesc *fdp);
80 void	knote_drop(struct knote *kn, struct proc *p);
81 void	knote_enqueue(struct knote *kn);
82 void	knote_dequeue(struct knote *kn);
83 #define knote_alloc() ((struct knote *)pool_get(&knote_pool, PR_WAITOK))
84 #define knote_free(kn) (pool_put(&knote_pool, kn))
85 
86 void	filt_kqdetach(struct knote *kn);
87 int	filt_kqueue(struct knote *kn, long hint);
88 int	filt_procattach(struct knote *kn);
89 void	filt_procdetach(struct knote *kn);
90 int	filt_proc(struct knote *kn, long hint);
91 int	filt_fileattach(struct knote *kn);
92 
93 struct filterops kqread_filtops =
94 	{ 1, NULL, filt_kqdetach, filt_kqueue };
95 struct filterops proc_filtops =
96 	{ 0, filt_procattach, filt_procdetach, filt_proc };
97 struct filterops file_filtops =
98 	{ 1, filt_fileattach, NULL, NULL };
99 
100 struct	pool knote_pool;
101 struct	pool kqueue_pool;
102 
103 #define KNOTE_ACTIVATE(kn) do {						\
104 	kn->kn_status |= KN_ACTIVE;					\
105 	if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
106 		knote_enqueue(kn);					\
107 } while(0)
108 
109 #define	KN_HASHSIZE		64		/* XXX should be tunable */
110 #define KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
111 
112 extern struct filterops sig_filtops;
113 #ifdef notyet
114 extern struct filterops aio_filtops;
115 #endif
116 
117 /*
118  * Table for for all system-defined filters.
119  */
120 struct filterops *sysfilt_ops[] = {
121 	&file_filtops,			/* EVFILT_READ */
122 	&file_filtops,			/* EVFILT_WRITE */
123 	NULL, /*&aio_filtops,*/		/* EVFILT_AIO */
124 	&file_filtops,			/* EVFILT_VNODE */
125 	&proc_filtops,			/* EVFILT_PROC */
126 	&sig_filtops,			/* EVFILT_SIGNAL */
127 };
128 
129 /* XXX - call this on startup instead. */
130 void kqueue_init(void);
131 
132 int kqueue_initialized;
133 
134 void
135 kqueue_init(void)
136 {
137 	pool_init(&kqueue_pool, sizeof(struct kqueue), 0, 0, 0, "kqeuepl",
138 	    &pool_allocator_nointr);
139 	pool_init(&knote_pool, sizeof(struct knote), 0, 0, 0, "knotepl",
140 	    &pool_allocator_nointr);
141 
142 	kqueue_initialized = 1;
143 }
144 
145 int
146 filt_fileattach(struct knote *kn)
147 {
148 	struct file *fp = kn->kn_fp;
149 
150 	return ((*fp->f_ops->fo_kqfilter)(fp, kn));
151 }
152 
153 int
154 kqueue_kqfilter(struct file *fp, struct knote *kn)
155 {
156 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
157 
158 	if (kn->kn_filter != EVFILT_READ)
159 		return (1);
160 
161 	kn->kn_fop = &kqread_filtops;
162 	SLIST_INSERT_HEAD(&kq->kq_sel.si_note, kn, kn_selnext);
163 	return (0);
164 }
165 
166 void
167 filt_kqdetach(struct knote *kn)
168 {
169 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
170 
171 	SLIST_REMOVE(&kq->kq_sel.si_note, kn, knote, kn_selnext);
172 }
173 
174 /*ARGSUSED*/
175 int
176 filt_kqueue(struct knote *kn, long hint)
177 {
178 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
179 
180 	kn->kn_data = kq->kq_count;
181 	return (kn->kn_data > 0);
182 }
183 
184 int
185 filt_procattach(struct knote *kn)
186 {
187 	struct proc *p;
188 
189 	p = pfind(kn->kn_id);
190 	if (p == NULL)
191 		return (ESRCH);
192 
193 	/*
194 	 * Fail if it's not owned by you, or the last exec gave us
195 	 * setuid/setgid privs (unless you're root).
196 	 */
197 	if ((p->p_cred->p_ruid != curproc->p_cred->p_ruid ||
198 	        (p->p_flag & P_SUGID)) &&
199 	    suser(curproc->p_ucred, &curproc->p_acflag) != 0)
200 		return (EACCES);
201 
202 	kn->kn_ptr.p_proc = p;
203 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
204 
205 	/*
206 	 * internal flag indicating registration done by kernel
207 	 */
208 	if (kn->kn_flags & EV_FLAG1) {
209 		kn->kn_data = kn->kn_sdata;		/* ppid */
210 		kn->kn_fflags = NOTE_CHILD;
211 		kn->kn_flags &= ~EV_FLAG1;
212 	}
213 
214 	/* XXX lock the proc here while adding to the list? */
215 	SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
216 
217 	return (0);
218 }
219 
220 /*
221  * The knote may be attached to a different process, which may exit,
222  * leaving nothing for the knote to be attached to.  So when the process
223  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
224  * it will be deleted when read out.  However, as part of the knote deletion,
225  * this routine is called, so a check is needed to avoid actually performing
226  * a detach, because the original process does not exist any more.
227  */
228 void
229 filt_procdetach(struct knote *kn)
230 {
231 	struct proc *p = kn->kn_ptr.p_proc;
232 
233 	if (kn->kn_status & KN_DETACHED)
234 		return;
235 
236 	/* XXX locking?  this might modify another process. */
237 	SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
238 }
239 
240 int
241 filt_proc(struct knote *kn, long hint)
242 {
243 	u_int event;
244 
245 	/*
246 	 * mask off extra data
247 	 */
248 	event = (u_int)hint & NOTE_PCTRLMASK;
249 
250 	/*
251 	 * if the user is interested in this event, record it.
252 	 */
253 	if (kn->kn_sfflags & event)
254 		kn->kn_fflags |= event;
255 
256 	/*
257 	 * process is gone, so flag the event as finished.
258 	 */
259 	if (event == NOTE_EXIT) {
260 		kn->kn_status |= KN_DETACHED;
261 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
262 		return (1);
263 	}
264 
265 	/*
266 	 * process forked, and user wants to track the new process,
267 	 * so attach a new knote to it, and immediately report an
268 	 * event with the parent's pid.
269 	 */
270 	if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
271 		struct kevent kev;
272 		int error;
273 
274 		/*
275 		 * register knote with new process.
276 		 */
277 		kev.ident = hint & NOTE_PDATAMASK;	/* pid */
278 		kev.filter = kn->kn_filter;
279 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
280 		kev.fflags = kn->kn_sfflags;
281 		kev.data = kn->kn_id;			/* parent */
282 		kev.udata = kn->kn_kevent.udata;	/* preserve udata */
283 		error = kqueue_register(kn->kn_kq, &kev, NULL);
284 		if (error)
285 			kn->kn_fflags |= NOTE_TRACKERR;
286 	}
287 
288 	return (kn->kn_fflags != 0);
289 }
290 
291 int
292 sys_kqueue(struct proc *p, void *v, register_t *retval)
293 {
294 	struct filedesc *fdp = p->p_fd;
295 	struct kqueue *kq;
296 	struct file *fp;
297 	int fd, error;
298 
299 	if (!kqueue_initialized)
300 		kqueue_init();
301 
302 	error = falloc(p, &fp, &fd);
303 	if (error)
304 		return (error);
305 	fp->f_flag = FREAD | FWRITE;
306 	fp->f_type = DTYPE_KQUEUE;
307 	fp->f_ops = &kqueueops;
308 	kq = pool_get(&kqueue_pool, PR_WAITOK);
309 	bzero(kq, sizeof(*kq));
310 	TAILQ_INIT(&kq->kq_head);
311 	fp->f_data = (caddr_t)kq;
312 	*retval = fd;
313 	if (fdp->fd_knlistsize < 0)
314 		fdp->fd_knlistsize = 0;		/* this process has a kq */
315 	kq->kq_fdp = fdp;
316 	FILE_SET_MATURE(fp);
317 	return (0);
318 }
319 
320 int
321 sys_kevent(struct proc *p, void *v, register_t *retval)
322 {
323 	struct filedesc* fdp = p->p_fd;
324 	struct sys_kevent_args /* {
325 		syscallarg(int)	fd;
326 		syscallarg(const struct kevent *) changelist;
327 		syscallarg(int)	nchanges;
328 		syscallarg(struct kevent *) eventlist;
329 		syscallarg(int)	nevents;
330 		syscallarg(const struct timespec *) timeout;
331 	} */ *uap = v;
332 	struct kevent *kevp;
333 	struct kqueue *kq;
334 	struct file *fp;
335 	struct timespec ts;
336 	int i, n, nerrors, error;
337 
338 	if ((fp = fd_getfile(fdp, SCARG(uap, fd))) == NULL ||
339 	    (fp->f_type != DTYPE_KQUEUE))
340 		return (EBADF);
341 
342 	FREF(fp);
343 
344 	if (SCARG(uap, timeout) != NULL) {
345 		error = copyin(SCARG(uap, timeout), &ts, sizeof(ts));
346 		if (error)
347 			goto done;
348 		SCARG(uap, timeout) = &ts;
349 	}
350 
351 	kq = (struct kqueue *)fp->f_data;
352 	nerrors = 0;
353 
354 	while (SCARG(uap, nchanges) > 0) {
355 		n = SCARG(uap, nchanges) > KQ_NEVENTS
356 			? KQ_NEVENTS : SCARG(uap, nchanges);
357 		error = copyin(SCARG(uap, changelist), kq->kq_kev,
358 		    n * sizeof(struct kevent));
359 		if (error)
360 			goto done;
361 		for (i = 0; i < n; i++) {
362 			kevp = &kq->kq_kev[i];
363 			kevp->flags &= ~EV_SYSFLAGS;
364 			error = kqueue_register(kq, kevp, p);
365 			if (error) {
366 				if (SCARG(uap, nevents) != 0) {
367 					kevp->flags = EV_ERROR;
368 					kevp->data = error;
369 					(void) copyout((caddr_t)kevp,
370 					    (caddr_t)SCARG(uap, eventlist),
371 					    sizeof(*kevp));
372 					SCARG(uap, eventlist)++;
373 					SCARG(uap, nevents)--;
374 					nerrors++;
375 				} else {
376 					goto done;
377 				}
378 			}
379 		}
380 		SCARG(uap, nchanges) -= n;
381 		SCARG(uap, changelist) += n;
382 	}
383 	if (nerrors) {
384 		*retval = nerrors;
385 		error = 0;
386 		goto done;
387 	}
388 
389 	error = kqueue_scan(fp, SCARG(uap, nevents), SCARG(uap, eventlist),
390 			    SCARG(uap, timeout), p, &n);
391 	*retval = n;
392  done:
393 	FRELE(fp);
394 	return (error);
395 }
396 
397 int
398 kqueue_register(struct kqueue *kq, struct kevent *kev, struct proc *p)
399 {
400 	struct filedesc *fdp = kq->kq_fdp;
401 	struct filterops *fops = NULL;
402 	struct file *fp = NULL;
403 	struct knote *kn = NULL;
404 	int s, error = 0;
405 
406 	if (kev->filter < 0) {
407 		if (kev->filter + EVFILT_SYSCOUNT < 0)
408 			return (EINVAL);
409 		fops = sysfilt_ops[~kev->filter];	/* to 0-base index */
410 	}
411 
412 	if (fops == NULL) {
413 		/*
414 		 * XXX
415 		 * filter attach routine is responsible for insuring that
416 		 * the identifier can be attached to it.
417 		 */
418 		return (EINVAL);
419 	}
420 
421 	if (fops->f_isfd) {
422 		/* validate descriptor */
423 		if ((fp = fd_getfile(fdp, kev->ident)) == NULL)
424 			return (EBADF);
425 		FREF(fp);
426 		fp->f_count++;
427 
428 		if (kev->ident < fdp->fd_knlistsize) {
429 			SLIST_FOREACH(kn, &fdp->fd_knlist[kev->ident], kn_link)
430 				if (kq == kn->kn_kq &&
431 				    kev->filter == kn->kn_filter)
432 					break;
433 		}
434 	} else {
435 		if (fdp->fd_knhashmask != 0) {
436 			struct klist *list;
437 
438 			list = &fdp->fd_knhash[
439 			    KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)];
440 			SLIST_FOREACH(kn, list, kn_link)
441 				if (kev->ident == kn->kn_id &&
442 				    kq == kn->kn_kq &&
443 				    kev->filter == kn->kn_filter)
444 					break;
445 		}
446 	}
447 
448 	if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
449 		error = ENOENT;
450 		goto done;
451 	}
452 
453 	/*
454 	 * kn now contains the matching knote, or NULL if no match
455 	 */
456 	if (kev->flags & EV_ADD) {
457 
458 		if (kn == NULL) {
459 			kn = knote_alloc();
460 			if (kn == NULL) {
461 				error = ENOMEM;
462 				goto done;
463 			}
464 			kn->kn_fp = fp;
465 			kn->kn_kq = kq;
466 			kn->kn_fop = fops;
467 
468 			/*
469 			 * apply reference count to knote structure, and
470 			 * do not release it at the end of this routine.
471 			 */
472 			FRELE(fp);
473 			fp = NULL;
474 
475 			kn->kn_sfflags = kev->fflags;
476 			kn->kn_sdata = kev->data;
477 			kev->fflags = 0;
478 			kev->data = 0;
479 			kn->kn_kevent = *kev;
480 
481 			knote_attach(kn, fdp);
482 			if ((error = fops->f_attach(kn)) != 0) {
483 				knote_drop(kn, p);
484 				goto done;
485 			}
486 		} else {
487 			/*
488 			 * The user may change some filter values after the
489 			 * initial EV_ADD, but doing so will not reset any
490 			 * filter which have already been triggered.
491 			 */
492 			kn->kn_sfflags = kev->fflags;
493 			kn->kn_sdata = kev->data;
494 			kn->kn_kevent.udata = kev->udata;
495 		}
496 
497 		s = splhigh();
498 		if (kn->kn_fop->f_event(kn, 0))
499 			KNOTE_ACTIVATE(kn);
500 		splx(s);
501 
502 	} else if (kev->flags & EV_DELETE) {
503 		kn->kn_fop->f_detach(kn);
504 		knote_drop(kn, p);
505 		goto done;
506 	}
507 
508 	if ((kev->flags & EV_DISABLE) &&
509 	    ((kn->kn_status & KN_DISABLED) == 0)) {
510 		s = splhigh();
511 		kn->kn_status |= KN_DISABLED;
512 		splx(s);
513 	}
514 
515 	if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
516 		s = splhigh();
517 		kn->kn_status &= ~KN_DISABLED;
518 		if ((kn->kn_status & KN_ACTIVE) &&
519 		    ((kn->kn_status & KN_QUEUED) == 0))
520 			knote_enqueue(kn);
521 		splx(s);
522 	}
523 
524 done:
525 	if (fp != NULL)
526 		closef(fp, p);
527 	return (error);
528 }
529 
530 int
531 kqueue_scan(struct file *fp, int maxevents, struct kevent *ulistp,
532 	const struct timespec *tsp, struct proc *p, int *retval)
533 {
534 	struct kqueue *kq = (struct kqueue *)fp->f_data;
535 	struct kevent *kevp;
536 	struct timeval atv;
537 	struct knote *kn, marker;
538 	int s, count, timeout, nkev = 0, error = 0;
539 
540 	count = maxevents;
541 	if (count == 0)
542 		goto done;
543 
544 	if (tsp != NULL) {
545 		TIMESPEC_TO_TIMEVAL(&atv, tsp);
546 		if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) {
547 			/* No timeout, just poll */
548 			timeout = -1;
549 			goto start;
550 		}
551 		if (itimerfix(&atv)) {
552 			error = EINVAL;
553 			goto done;
554 		}
555 
556 		s = splclock();
557 		timeradd(&atv, &time, &atv);
558 		timeout = hzto(&atv);
559 		splx(s);
560 	} else {
561 		atv.tv_sec = 0;
562 		atv.tv_usec = 0;
563 		timeout = 0;
564 	}
565 	goto start;
566 
567 retry:
568 	if (atv.tv_sec || atv.tv_usec) {
569 		timeout = hzto(&atv);
570 		if (timeout <= 0)
571 			goto done;
572 	}
573 
574 start:
575 	kevp = kq->kq_kev;
576 	s = splhigh();
577 	if (kq->kq_count == 0) {
578 		if (timeout < 0) {
579 			error = EWOULDBLOCK;
580 		} else {
581 			kq->kq_state |= KQ_SLEEP;
582 			error = tsleep(kq, PSOCK | PCATCH, "kqread", timeout);
583 		}
584 		splx(s);
585 		if (error == 0)
586 			goto retry;
587 		/* don't restart after signals... */
588 		if (error == ERESTART)
589 			error = EINTR;
590 		else if (error == EWOULDBLOCK)
591 			error = 0;
592 		goto done;
593 	}
594 
595 	TAILQ_INSERT_TAIL(&kq->kq_head, &marker, kn_tqe);
596 	while (count) {
597 		kn = TAILQ_FIRST(&kq->kq_head);
598 		TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
599 		if (kn == &marker) {
600 			splx(s);
601 			if (count == maxevents)
602 				goto retry;
603 			goto done;
604 		}
605 		if (kn->kn_status & KN_DISABLED) {
606 			kn->kn_status &= ~KN_QUEUED;
607 			kq->kq_count--;
608 			continue;
609 		}
610 		if ((kn->kn_flags & EV_ONESHOT) == 0 &&
611 		    kn->kn_fop->f_event(kn, 0) == 0) {
612 			kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
613 			kq->kq_count--;
614 			continue;
615 		}
616 		*kevp = kn->kn_kevent;
617 		kevp++;
618 		nkev++;
619 		if (kn->kn_flags & EV_ONESHOT) {
620 			kn->kn_status &= ~KN_QUEUED;
621 			kq->kq_count--;
622 			splx(s);
623 			kn->kn_fop->f_detach(kn);
624 			knote_drop(kn, p);
625 			s = splhigh();
626 		} else if (kn->kn_flags & EV_CLEAR) {
627 			kn->kn_data = 0;
628 			kn->kn_fflags = 0;
629 			kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
630 			kq->kq_count--;
631 		} else {
632 			TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
633 		}
634 		count--;
635 		if (nkev == KQ_NEVENTS) {
636 			splx(s);
637 			error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp,
638 			    sizeof(struct kevent) * nkev);
639 			ulistp += nkev;
640 			nkev = 0;
641 			kevp = kq->kq_kev;
642 			s = splhigh();
643 			if (error)
644 				break;
645 		}
646 	}
647 	TAILQ_REMOVE(&kq->kq_head, &marker, kn_tqe);
648 	splx(s);
649 done:
650 	if (nkev != 0)
651 		error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp,
652 		    sizeof(struct kevent) * nkev);
653 	*retval = maxevents - count;
654 	return (error);
655 }
656 
657 /*
658  * XXX
659  * This could be expanded to call kqueue_scan, if desired.
660  */
661 /*ARGSUSED*/
662 int
663 kqueue_read(struct file *fp, off_t *poff, struct uio *uio, struct ucred *cred)
664 {
665 	return (ENXIO);
666 }
667 
668 /*ARGSUSED*/
669 int
670 kqueue_write(struct file *fp, off_t *poff, struct uio *uio, struct ucred *cred)
671 
672 {
673 	return (ENXIO);
674 }
675 
676 /*ARGSUSED*/
677 int
678 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
679 {
680 	return (ENOTTY);
681 }
682 
683 /*ARGSUSED*/
684 int
685 kqueue_select(struct file *fp, int which, struct proc *p)
686 {
687 	struct kqueue *kq = (struct kqueue *)fp->f_data;
688 	int res = 0;
689 	int s = splnet();
690 
691 	if (which == FREAD) {
692 		if (kq->kq_count) {
693 			res = 1;
694 		} else {
695 			selrecord(p, &kq->kq_sel);
696 			kq->kq_state |= KQ_SEL;
697 		}
698 	}
699 	splx(s);
700 	return (res);
701 }
702 
703 /*ARGSUSED*/
704 int
705 kqueue_stat(struct file *fp, struct stat *st, struct proc *p)
706 {
707 	struct kqueue *kq = (struct kqueue *)fp->f_data;
708 
709 	bzero((void *)st, sizeof(*st));
710 	st->st_size = kq->kq_count;
711 	st->st_blksize = sizeof(struct kevent);
712 	st->st_mode = S_IFIFO;
713 	return (0);
714 }
715 
716 /*ARGSUSED*/
717 int
718 kqueue_close(struct file *fp, struct proc *p)
719 {
720 	struct kqueue *kq = (struct kqueue *)fp->f_data;
721 	struct filedesc *fdp = p->p_fd;
722 	struct knote **knp, *kn, *kn0;
723 	int i;
724 
725 	for (i = 0; i < fdp->fd_knlistsize; i++) {
726 		knp = &SLIST_FIRST(&fdp->fd_knlist[i]);
727 		kn = *knp;
728 		while (kn != NULL) {
729 			kn0 = SLIST_NEXT(kn, kn_link);
730 			if (kq == kn->kn_kq) {
731 				FREF(kn->kn_fp);
732 				kn->kn_fop->f_detach(kn);
733 				closef(kn->kn_fp, p);
734 				knote_free(kn);
735 				*knp = kn0;
736 			} else {
737 				knp = &SLIST_NEXT(kn, kn_link);
738 			}
739 			kn = kn0;
740 		}
741 	}
742 	if (fdp->fd_knhashmask != 0) {
743 		for (i = 0; i < fdp->fd_knhashmask + 1; i++) {
744 			knp = &SLIST_FIRST(&fdp->fd_knhash[i]);
745 			kn = *knp;
746 			while (kn != NULL) {
747 				kn0 = SLIST_NEXT(kn, kn_link);
748 				if (kq == kn->kn_kq) {
749 					kn->kn_fop->f_detach(kn);
750 		/* XXX non-fd release of kn->kn_ptr */
751 					knote_free(kn);
752 					*knp = kn0;
753 				} else {
754 					knp = &SLIST_NEXT(kn, kn_link);
755 				}
756 				kn = kn0;
757 			}
758 		}
759 	}
760 	pool_put(&kqueue_pool, kq);
761 	fp->f_data = NULL;
762 
763 	return (0);
764 }
765 
766 void
767 kqueue_wakeup(struct kqueue *kq)
768 {
769 
770 	if (kq->kq_state & KQ_SLEEP) {
771 		kq->kq_state &= ~KQ_SLEEP;
772 		wakeup(kq);
773 	}
774 	if (kq->kq_state & KQ_SEL) {
775 		kq->kq_state &= ~KQ_SEL;
776 		selwakeup(&kq->kq_sel);
777 	}
778 	KNOTE(&kq->kq_sel.si_note, 0);
779 }
780 
781 /*
782  * walk down a list of knotes, activating them if their event has triggered.
783  */
784 void
785 knote(struct klist *list, long hint)
786 {
787 	struct knote *kn;
788 
789 	SLIST_FOREACH(kn, list, kn_selnext)
790 		if (kn->kn_fop->f_event(kn, hint))
791 			KNOTE_ACTIVATE(kn);
792 }
793 
794 /*
795  * remove all knotes from a specified klist
796  */
797 void
798 knote_remove(struct proc *p, struct klist *list)
799 {
800 	struct knote *kn;
801 
802 	while ((kn = SLIST_FIRST(list)) != NULL) {
803 		kn->kn_fop->f_detach(kn);
804 		knote_drop(kn, p);
805 	}
806 }
807 
808 /*
809  * remove all knotes referencing a specified fd
810  */
811 void
812 knote_fdclose(struct proc *p, int fd)
813 {
814 	struct filedesc *fdp = p->p_fd;
815 	struct klist *list = &fdp->fd_knlist[fd];
816 
817 	knote_remove(p, list);
818 }
819 
820 void
821 knote_attach(struct knote *kn, struct filedesc *fdp)
822 {
823 	struct klist *list;
824 	int size;
825 
826 	if (! kn->kn_fop->f_isfd) {
827 		if (fdp->fd_knhashmask == 0)
828 			fdp->fd_knhash = hashinit(KN_HASHSIZE, M_TEMP,
829 			    M_WAITOK, &fdp->fd_knhashmask);
830 		list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
831 		goto done;
832 	}
833 
834 	if (fdp->fd_knlistsize <= kn->kn_id) {
835 		size = fdp->fd_knlistsize;
836 		while (size <= kn->kn_id)
837 			size += KQEXTENT;
838 		list = malloc(size * sizeof(struct klist *), M_TEMP, M_WAITOK);
839 		bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list,
840 		    fdp->fd_knlistsize * sizeof(struct klist *));
841 		bzero((caddr_t)list +
842 		    fdp->fd_knlistsize * sizeof(struct klist *),
843 		    (size - fdp->fd_knlistsize) * sizeof(struct klist *));
844 		if (fdp->fd_knlist != NULL)
845 			free(fdp->fd_knlist, M_TEMP);
846 		fdp->fd_knlistsize = size;
847 		fdp->fd_knlist = list;
848 	}
849 	list = &fdp->fd_knlist[kn->kn_id];
850 done:
851 	SLIST_INSERT_HEAD(list, kn, kn_link);
852 	kn->kn_status = 0;
853 }
854 
855 /*
856  * should be called at spl == 0, since we don't want to hold spl
857  * while calling closef and free.
858  */
859 void
860 knote_drop(struct knote *kn, struct proc *p)
861 {
862 	struct filedesc *fdp = p->p_fd;
863 	struct klist *list;
864 
865 	if (kn->kn_fop->f_isfd)
866 		list = &fdp->fd_knlist[kn->kn_id];
867 	else
868 		list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
869 
870 	SLIST_REMOVE(list, kn, knote, kn_link);
871 	if (kn->kn_status & KN_QUEUED)
872 		knote_dequeue(kn);
873 	if (kn->kn_fop->f_isfd) {
874 		FREF(kn->kn_fp);
875 		closef(kn->kn_fp, p);
876 	}
877 	knote_free(kn);
878 }
879 
880 
881 void
882 knote_enqueue(struct knote *kn)
883 {
884 	struct kqueue *kq = kn->kn_kq;
885 	int s = splhigh();
886 
887 	KASSERT((kn->kn_status & KN_QUEUED) == 0);
888 
889 	TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
890 	kn->kn_status |= KN_QUEUED;
891 	kq->kq_count++;
892 	splx(s);
893 	kqueue_wakeup(kq);
894 }
895 
896 void
897 knote_dequeue(struct knote *kn)
898 {
899 	struct kqueue *kq = kn->kn_kq;
900 	int s = splhigh();
901 
902 	KASSERT(kn->kn_status & KN_QUEUED);
903 
904 	TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
905 	kn->kn_status &= ~KN_QUEUED;
906 	kq->kq_count--;
907 	splx(s);
908 }
909