xref: /netbsd-src/sys/kern/sys_generic.c (revision 4472dbe5e3bd91ef2540bada7a7ca7384627ff9b)
1 /*	$NetBSD: sys_generic.c,v 1.48 2000/05/27 00:40:47 sommerfeld Exp $	*/
2 
3 /*
4  * Copyright (c) 1982, 1986, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * (c) UNIX System Laboratories, Inc.
7  * All or some portions of this file are derived from material licensed
8  * to the University of California by American Telephone and Telegraph
9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10  * the permission of UNIX System Laboratories, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)sys_generic.c	8.9 (Berkeley) 2/14/95
41  */
42 
43 #include "opt_ktrace.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/filedesc.h>
48 #include <sys/ioctl.h>
49 #include <sys/file.h>
50 #include <sys/proc.h>
51 #include <sys/socketvar.h>
52 #include <sys/signalvar.h>
53 #include <sys/uio.h>
54 #include <sys/kernel.h>
55 #include <sys/stat.h>
56 #include <sys/malloc.h>
57 #include <sys/poll.h>
58 #ifdef KTRACE
59 #include <sys/ktrace.h>
60 #endif
61 
62 #include <sys/mount.h>
63 #include <sys/syscallargs.h>
64 
65 int selscan __P((struct proc *, fd_mask *, fd_mask *, int, register_t *));
66 int pollscan __P((struct proc *, struct pollfd *, int, register_t *));
67 
68 /*
69  * Read system call.
70  */
71 /* ARGSUSED */
72 int
73 sys_read(p, v, retval)
74 	struct proc *p;
75 	void *v;
76 	register_t *retval;
77 {
78 	struct sys_read_args /* {
79 		syscallarg(int) fd;
80 		syscallarg(void *) buf;
81 		syscallarg(size_t) nbyte;
82 	} */ *uap = v;
83 	int fd = SCARG(uap, fd);
84 	struct file *fp;
85 	struct filedesc *fdp = p->p_fd;
86 
87 	if ((u_int)fd >= fdp->fd_nfiles ||
88 	    (fp = fdp->fd_ofiles[fd]) == NULL ||
89 	    (fp->f_iflags & FIF_WANTCLOSE) != 0 ||
90 	    (fp->f_flag & FREAD) == 0)
91 		return (EBADF);
92 
93 	FILE_USE(fp);
94 
95 	/* dofileread() will unuse the descriptor for us */
96 	return (dofileread(p, fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
97 	    &fp->f_offset, FOF_UPDATE_OFFSET, retval));
98 }
99 
100 int
101 dofileread(p, fd, fp, buf, nbyte, offset, flags, retval)
102 	struct proc *p;
103 	int fd;
104 	struct file *fp;
105 	void *buf;
106 	size_t nbyte;
107 	off_t *offset;
108 	int flags;
109 	register_t *retval;
110 {
111 	struct uio auio;
112 	struct iovec aiov;
113 	long cnt, error = 0;
114 #ifdef KTRACE
115 	struct iovec ktriov;
116 #endif
117 
118 	aiov.iov_base = (caddr_t)buf;
119 	aiov.iov_len = nbyte;
120 	auio.uio_iov = &aiov;
121 	auio.uio_iovcnt = 1;
122 	auio.uio_resid = nbyte;
123 	auio.uio_rw = UIO_READ;
124 	auio.uio_segflg = UIO_USERSPACE;
125 	auio.uio_procp = p;
126 
127 	/*
128 	 * Reads return ssize_t because -1 is returned on error.  Therefore
129 	 * we must restrict the length to SSIZE_MAX to avoid garbage return
130 	 * values.
131 	 */
132 	if (auio.uio_resid > SSIZE_MAX) {
133 		error = EINVAL;
134 		goto out;
135 	}
136 
137 #ifdef KTRACE
138 	/*
139 	 * if tracing, save a copy of iovec
140 	 */
141 	if (KTRPOINT(p, KTR_GENIO))
142 		ktriov = aiov;
143 #endif
144 	cnt = auio.uio_resid;
145 	error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags);
146 	if (error)
147 		if (auio.uio_resid != cnt && (error == ERESTART ||
148 		    error == EINTR || error == EWOULDBLOCK))
149 			error = 0;
150 	cnt -= auio.uio_resid;
151 #ifdef KTRACE
152 	if (KTRPOINT(p, KTR_GENIO) && error == 0)
153 		ktrgenio(p, fd, UIO_READ, &ktriov, cnt, error);
154 #endif
155 	*retval = cnt;
156  out:
157 	FILE_UNUSE(fp, p);
158 	return (error);
159 }
160 
161 /*
162  * Scatter read system call.
163  */
164 int
165 sys_readv(p, v, retval)
166 	struct proc *p;
167 	void *v;
168 	register_t *retval;
169 {
170 	struct sys_readv_args /* {
171 		syscallarg(int) fd;
172 		syscallarg(const struct iovec *) iovp;
173 		syscallarg(int) iovcnt;
174 	} */ *uap = v;
175 	int fd = SCARG(uap, fd);
176 	struct file *fp;
177 	struct filedesc *fdp = p->p_fd;
178 
179 	if ((u_int)fd >= fdp->fd_nfiles ||
180 	    (fp = fdp->fd_ofiles[fd]) == NULL ||
181 	    (fp->f_iflags & FIF_WANTCLOSE) != 0 ||
182 	    (fp->f_flag & FREAD) == 0)
183 		return (EBADF);
184 
185 	FILE_USE(fp);
186 
187 	/* dofilereadv() will unuse the descriptor for us */
188 	return (dofilereadv(p, fd, fp, SCARG(uap, iovp), SCARG(uap, iovcnt),
189 	    &fp->f_offset, FOF_UPDATE_OFFSET, retval));
190 }
191 
192 int
193 dofilereadv(p, fd, fp, iovp, iovcnt, offset, flags, retval)
194 	struct proc *p;
195 	int fd;
196 	struct file *fp;
197 	const struct iovec *iovp;
198 	int iovcnt;
199 	off_t *offset;
200 	int flags;
201 	register_t *retval;
202 {
203 	struct uio auio;
204 	struct iovec *iov;
205 	struct iovec *needfree;
206 	struct iovec aiov[UIO_SMALLIOV];
207 	long i, cnt, error = 0;
208 	u_int iovlen;
209 #ifdef KTRACE
210 	struct iovec *ktriov = NULL;
211 #endif
212 
213 	/* note: can't use iovlen until iovcnt is validated */
214 	iovlen = iovcnt * sizeof(struct iovec);
215 	if ((u_int)iovcnt > UIO_SMALLIOV) {
216 		if ((u_int)iovcnt > IOV_MAX) {
217 			error = EINVAL;
218 			goto out;
219 		}
220 		MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
221 		needfree = iov;
222 	} else if ((u_int)iovcnt > 0) {
223 		iov = aiov;
224 		needfree = NULL;
225 	} else {
226 		error = EINVAL;
227 		goto out;
228 	}
229 
230 	auio.uio_iov = iov;
231 	auio.uio_iovcnt = iovcnt;
232 	auio.uio_rw = UIO_READ;
233 	auio.uio_segflg = UIO_USERSPACE;
234 	auio.uio_procp = p;
235 	error = copyin(iovp, iov, iovlen);
236 	if (error)
237 		goto done;
238 	auio.uio_resid = 0;
239 	for (i = 0; i < iovcnt; i++) {
240 		auio.uio_resid += iov->iov_len;
241 		/*
242 		 * Reads return ssize_t because -1 is returned on error.
243 		 * Therefore we must restrict the length to SSIZE_MAX to
244 		 * avoid garbage return values.
245 		 */
246 		if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
247 			error = EINVAL;
248 			goto done;
249 		}
250 		iov++;
251 	}
252 #ifdef KTRACE
253 	/*
254 	 * if tracing, save a copy of iovec
255 	 */
256 	if (KTRPOINT(p, KTR_GENIO))  {
257 		MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
258 		memcpy((caddr_t)ktriov, (caddr_t)auio.uio_iov, iovlen);
259 	}
260 #endif
261 	cnt = auio.uio_resid;
262 	error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags);
263 	if (error)
264 		if (auio.uio_resid != cnt && (error == ERESTART ||
265 		    error == EINTR || error == EWOULDBLOCK))
266 			error = 0;
267 	cnt -= auio.uio_resid;
268 #ifdef KTRACE
269 	if (KTRPOINT(p, KTR_GENIO))
270 		if (error == 0) {
271 			ktrgenio(p, fd, UIO_READ, ktriov, cnt, error);
272 		FREE(ktriov, M_TEMP);
273 	}
274 #endif
275 	*retval = cnt;
276  done:
277 	if (needfree)
278 		FREE(needfree, M_IOV);
279  out:
280 	FILE_UNUSE(fp, p);
281 	return (error);
282 }
283 
284 /*
285  * Write system call
286  */
287 int
288 sys_write(p, v, retval)
289 	struct proc *p;
290 	void *v;
291 	register_t *retval;
292 {
293 	struct sys_write_args /* {
294 		syscallarg(int) fd;
295 		syscallarg(const void *) buf;
296 		syscallarg(size_t) nbyte;
297 	} */ *uap = v;
298 	int fd = SCARG(uap, fd);
299 	struct file *fp;
300 	struct filedesc *fdp = p->p_fd;
301 
302 	if ((u_int)fd >= fdp->fd_nfiles ||
303 	    (fp = fdp->fd_ofiles[fd]) == NULL ||
304 	    (fp->f_iflags & FIF_WANTCLOSE) != 0 ||
305 	    (fp->f_flag & FWRITE) == 0)
306 		return (EBADF);
307 
308 	FILE_USE(fp);
309 
310 	/* dofilewrite() will unuse the descriptor for us */
311 	return (dofilewrite(p, fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
312 	    &fp->f_offset, FOF_UPDATE_OFFSET, retval));
313 }
314 
315 int
316 dofilewrite(p, fd, fp, buf, nbyte, offset, flags, retval)
317 	struct proc *p;
318 	int fd;
319 	struct file *fp;
320 	const void *buf;
321 	size_t nbyte;
322 	off_t *offset;
323 	int flags;
324 	register_t *retval;
325 {
326 	struct uio auio;
327 	struct iovec aiov;
328 	long cnt, error = 0;
329 #ifdef KTRACE
330 	struct iovec ktriov;
331 #endif
332 
333 	aiov.iov_base = (caddr_t)buf;		/* XXX kills const */
334 	aiov.iov_len = nbyte;
335 	auio.uio_iov = &aiov;
336 	auio.uio_iovcnt = 1;
337 	auio.uio_resid = nbyte;
338 	auio.uio_rw = UIO_WRITE;
339 	auio.uio_segflg = UIO_USERSPACE;
340 	auio.uio_procp = p;
341 
342 	/*
343 	 * Writes return ssize_t because -1 is returned on error.  Therefore
344 	 * we must restrict the length to SSIZE_MAX to avoid garbage return
345 	 * values.
346 	 */
347 	if (auio.uio_resid > SSIZE_MAX) {
348 		error = EINVAL;
349 		goto out;
350 	}
351 
352 #ifdef KTRACE
353 	/*
354 	 * if tracing, save a copy of iovec
355 	 */
356 	if (KTRPOINT(p, KTR_GENIO))
357 		ktriov = aiov;
358 #endif
359 	cnt = auio.uio_resid;
360 	error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags);
361 	if (error) {
362 		if (auio.uio_resid != cnt && (error == ERESTART ||
363 		    error == EINTR || error == EWOULDBLOCK))
364 			error = 0;
365 		if (error == EPIPE)
366 			psignal(p, SIGPIPE);
367 	}
368 	cnt -= auio.uio_resid;
369 #ifdef KTRACE
370 	if (KTRPOINT(p, KTR_GENIO) && error == 0)
371 		ktrgenio(p, fd, UIO_WRITE, &ktriov, cnt, error);
372 #endif
373 	*retval = cnt;
374  out:
375 	FILE_UNUSE(fp, p);
376 	return (error);
377 }
378 
379 /*
380  * Gather write system call
381  */
382 int
383 sys_writev(p, v, retval)
384 	struct proc *p;
385 	void *v;
386 	register_t *retval;
387 {
388 	struct sys_writev_args /* {
389 		syscallarg(int) fd;
390 		syscallarg(const struct iovec *) iovp;
391 		syscallarg(int) iovcnt;
392 	} */ *uap = v;
393 	int fd = SCARG(uap, fd);
394 	struct file *fp;
395 	struct filedesc *fdp = p->p_fd;
396 
397 	if ((u_int)fd >= fdp->fd_nfiles ||
398 	    (fp = fdp->fd_ofiles[fd]) == NULL ||
399 	    (fp->f_iflags & FIF_WANTCLOSE) != 0 ||
400 	    (fp->f_flag & FWRITE) == 0)
401 		return (EBADF);
402 
403 	FILE_USE(fp);
404 
405 	/* dofilewritev() will unuse the descriptor for us */
406 	return (dofilewritev(p, fd, fp, SCARG(uap, iovp), SCARG(uap, iovcnt),
407 	    &fp->f_offset, FOF_UPDATE_OFFSET, retval));
408 }
409 
410 int
411 dofilewritev(p, fd, fp, iovp, iovcnt, offset, flags, retval)
412 	struct proc *p;
413 	int fd;
414 	struct file *fp;
415 	const struct iovec *iovp;
416 	int iovcnt;
417 	off_t *offset;
418 	int flags;
419 	register_t *retval;
420 {
421 	struct uio auio;
422 	struct iovec *iov;
423 	struct iovec *needfree;
424 	struct iovec aiov[UIO_SMALLIOV];
425 	long i, cnt, error = 0;
426 	u_int iovlen;
427 #ifdef KTRACE
428 	struct iovec *ktriov = NULL;
429 #endif
430 
431 	/* note: can't use iovlen until iovcnt is validated */
432 	iovlen = iovcnt * sizeof(struct iovec);
433 	if ((u_int)iovcnt > UIO_SMALLIOV) {
434 		if ((u_int)iovcnt > IOV_MAX)
435 			return (EINVAL);
436 		MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
437 		needfree = iov;
438 	} else if ((u_int)iovcnt > 0) {
439 		iov = aiov;
440 		needfree = NULL;
441 	} else {
442 		error = EINVAL;
443 		goto out;
444 	}
445 
446 	auio.uio_iov = iov;
447 	auio.uio_iovcnt = iovcnt;
448 	auio.uio_rw = UIO_WRITE;
449 	auio.uio_segflg = UIO_USERSPACE;
450 	auio.uio_procp = p;
451 	error = copyin(iovp, iov, iovlen);
452 	if (error)
453 		goto done;
454 	auio.uio_resid = 0;
455 	for (i = 0; i < iovcnt; i++) {
456 		auio.uio_resid += iov->iov_len;
457 		/*
458 		 * Writes return ssize_t because -1 is returned on error.
459 		 * Therefore we must restrict the length to SSIZE_MAX to
460 		 * avoid garbage return values.
461 		 */
462 		if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
463 			error = EINVAL;
464 			goto done;
465 		}
466 		iov++;
467 	}
468 #ifdef KTRACE
469 	/*
470 	 * if tracing, save a copy of iovec
471 	 */
472 	if (KTRPOINT(p, KTR_GENIO))  {
473 		MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
474 		memcpy((caddr_t)ktriov, (caddr_t)auio.uio_iov, iovlen);
475 	}
476 #endif
477 	cnt = auio.uio_resid;
478 	error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags);
479 	if (error) {
480 		if (auio.uio_resid != cnt && (error == ERESTART ||
481 		    error == EINTR || error == EWOULDBLOCK))
482 			error = 0;
483 		if (error == EPIPE)
484 			psignal(p, SIGPIPE);
485 	}
486 	cnt -= auio.uio_resid;
487 #ifdef KTRACE
488 	if (KTRPOINT(p, KTR_GENIO))
489 		if (error == 0) {
490 			ktrgenio(p, fd, UIO_WRITE, ktriov, cnt, error);
491 		FREE(ktriov, M_TEMP);
492 	}
493 #endif
494 	*retval = cnt;
495  done:
496 	if (needfree)
497 		FREE(needfree, M_IOV);
498  out:
499 	FILE_UNUSE(fp, p);
500 	return (error);
501 }
502 
503 /*
504  * Ioctl system call
505  */
506 /* ARGSUSED */
507 int
508 sys_ioctl(p, v, retval)
509 	struct proc *p;
510 	void *v;
511 	register_t *retval;
512 {
513 	struct sys_ioctl_args /* {
514 		syscallarg(int) fd;
515 		syscallarg(u_long) com;
516 		syscallarg(caddr_t) data;
517 	} */ *uap = v;
518 	struct file *fp;
519 	struct filedesc *fdp;
520 	u_long com;
521 	int error = 0;
522 	u_int size;
523 	caddr_t data, memp;
524 	int tmp;
525 #define STK_PARAMS	128
526 	u_long stkbuf[STK_PARAMS/sizeof(u_long)];
527 
528 	fdp = p->p_fd;
529 	if ((u_int)SCARG(uap, fd) >= fdp->fd_nfiles ||
530 	    (fp = fdp->fd_ofiles[SCARG(uap, fd)]) == NULL ||
531 	    (fp->f_iflags & FIF_WANTCLOSE) != 0)
532 		return (EBADF);
533 
534 	FILE_USE(fp);
535 
536 	if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
537 		error = EBADF;
538 		goto out;
539 	}
540 
541 	switch (com = SCARG(uap, com)) {
542 	case FIONCLEX:
543 		fdp->fd_ofileflags[SCARG(uap, fd)] &= ~UF_EXCLOSE;
544 		goto out;
545 
546 	case FIOCLEX:
547 		fdp->fd_ofileflags[SCARG(uap, fd)] |= UF_EXCLOSE;
548 		goto out;
549 	}
550 
551 	/*
552 	 * Interpret high order word to find amount of data to be
553 	 * copied to/from the user's address space.
554 	 */
555 	size = IOCPARM_LEN(com);
556 	if (size > IOCPARM_MAX) {
557 		error = ENOTTY;
558 		goto out;
559 	}
560 	memp = NULL;
561 	if (size > sizeof(stkbuf)) {
562 		memp = (caddr_t)malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
563 		data = memp;
564 	} else
565 		data = (caddr_t)stkbuf;
566 	if (com&IOC_IN) {
567 		if (size) {
568 			error = copyin(SCARG(uap, data), data, size);
569 			if (error) {
570 				if (memp)
571 					free(memp, M_IOCTLOPS);
572 				goto out;
573 			}
574 		} else
575 			*(caddr_t *)data = SCARG(uap, data);
576 	} else if ((com&IOC_OUT) && size)
577 		/*
578 		 * Zero the buffer so the user always
579 		 * gets back something deterministic.
580 		 */
581 		memset(data, 0, size);
582 	else if (com&IOC_VOID)
583 		*(caddr_t *)data = SCARG(uap, data);
584 
585 	switch (com) {
586 
587 	case FIONBIO:
588 		if ((tmp = *(int *)data) != 0)
589 			fp->f_flag |= FNONBLOCK;
590 		else
591 			fp->f_flag &= ~FNONBLOCK;
592 		error = (*fp->f_ops->fo_ioctl)(fp, FIONBIO, (caddr_t)&tmp, p);
593 		break;
594 
595 	case FIOASYNC:
596 		if ((tmp = *(int *)data) != 0)
597 			fp->f_flag |= FASYNC;
598 		else
599 			fp->f_flag &= ~FASYNC;
600 		error = (*fp->f_ops->fo_ioctl)(fp, FIOASYNC, (caddr_t)&tmp, p);
601 		break;
602 
603 	case FIOSETOWN:
604 		tmp = *(int *)data;
605 		if (fp->f_type == DTYPE_SOCKET) {
606 			((struct socket *)fp->f_data)->so_pgid = tmp;
607 			error = 0;
608 			break;
609 		}
610 		if (tmp <= 0) {
611 			tmp = -tmp;
612 		} else {
613 			struct proc *p1 = pfind(tmp);
614 			if (p1 == 0) {
615 				error = ESRCH;
616 				break;
617 			}
618 			tmp = p1->p_pgrp->pg_id;
619 		}
620 		error = (*fp->f_ops->fo_ioctl)
621 			(fp, TIOCSPGRP, (caddr_t)&tmp, p);
622 		break;
623 
624 	case FIOGETOWN:
625 		if (fp->f_type == DTYPE_SOCKET) {
626 			error = 0;
627 			*(int *)data = ((struct socket *)fp->f_data)->so_pgid;
628 			break;
629 		}
630 		error = (*fp->f_ops->fo_ioctl)(fp, TIOCGPGRP, data, p);
631 		*(int *)data = -*(int *)data;
632 		break;
633 
634 	default:
635 		error = (*fp->f_ops->fo_ioctl)(fp, com, data, p);
636 		/*
637 		 * Copy any data to user, size was
638 		 * already set and checked above.
639 		 */
640 		if (error == 0 && (com&IOC_OUT) && size)
641 			error = copyout(data, SCARG(uap, data), size);
642 		break;
643 	}
644 	if (memp)
645 		free(memp, M_IOCTLOPS);
646  out:
647 	FILE_UNUSE(fp, p);
648 	return (error);
649 }
650 
651 int	selwait, nselcoll;
652 
653 /*
654  * Select system call.
655  */
656 int
657 sys_select(p, v, retval)
658 	struct proc *p;
659 	void *v;
660 	register_t *retval;
661 {
662 	struct sys_select_args /* {
663 		syscallarg(int) nd;
664 		syscallarg(fd_set *) in;
665 		syscallarg(fd_set *) ou;
666 		syscallarg(fd_set *) ex;
667 		syscallarg(struct timeval *) tv;
668 	} */ *uap = v;
669 	caddr_t bits;
670 	char smallbits[howmany(FD_SETSIZE, NFDBITS) * sizeof(fd_mask) * 6];
671 	struct timeval atv;
672 	int s, ncoll, error = 0, timo;
673 	size_t ni;
674 
675 	if (SCARG(uap, nd) < 0)
676 		return (EINVAL);
677 	if (SCARG(uap, nd) > p->p_fd->fd_nfiles) {
678 		/* forgiving; slightly wrong */
679 		SCARG(uap, nd) = p->p_fd->fd_nfiles;
680 	}
681 	ni = howmany(SCARG(uap, nd), NFDBITS) * sizeof(fd_mask);
682 	if (ni * 6 > sizeof(smallbits))
683 		bits = malloc(ni * 6, M_TEMP, M_WAITOK);
684 	else
685 		bits = smallbits;
686 
687 #define	getbits(name, x) \
688 	if (SCARG(uap, name)) { \
689 		error = copyin(SCARG(uap, name), bits + ni * x, ni); \
690 		if (error) \
691 			goto done; \
692 	} else \
693 		memset(bits + ni * x, 0, ni);
694 	getbits(in, 0);
695 	getbits(ou, 1);
696 	getbits(ex, 2);
697 #undef	getbits
698 
699 	if (SCARG(uap, tv)) {
700 		error = copyin(SCARG(uap, tv), (caddr_t)&atv,
701 			sizeof(atv));
702 		if (error)
703 			goto done;
704 		if (itimerfix(&atv)) {
705 			error = EINVAL;
706 			goto done;
707 		}
708 		s = splclock();
709 		timeradd(&atv, &time, &atv);
710 		timo = hzto(&atv);
711 		/*
712 		 * Avoid inadvertently sleeping forever.
713 		 */
714 		if (timo == 0)
715 			timo = 1;
716 		splx(s);
717 	} else
718 		timo = 0;
719 retry:
720 	ncoll = nselcoll;
721 	p->p_flag |= P_SELECT;
722 	error = selscan(p, (fd_mask *)(bits + ni * 0),
723 			   (fd_mask *)(bits + ni * 3), SCARG(uap, nd), retval);
724 	if (error || *retval)
725 		goto done;
726 	s = splhigh();
727 	if (timo && timercmp(&time, &atv, >=)) {
728 		splx(s);
729 		goto done;
730 	}
731 	if ((p->p_flag & P_SELECT) == 0 || nselcoll != ncoll) {
732 		splx(s);
733 		goto retry;
734 	}
735 	p->p_flag &= ~P_SELECT;
736 	error = tsleep((caddr_t)&selwait, PSOCK | PCATCH, "select", timo);
737 	splx(s);
738 	if (error == 0)
739 		goto retry;
740 done:
741 	p->p_flag &= ~P_SELECT;
742 	/* select is not restarted after signals... */
743 	if (error == ERESTART)
744 		error = EINTR;
745 	if (error == EWOULDBLOCK)
746 		error = 0;
747 	if (error == 0) {
748 #define	putbits(name, x) \
749 		if (SCARG(uap, name)) { \
750 			error = copyout(bits + ni * x, SCARG(uap, name), ni); \
751 			if (error) \
752 				goto out; \
753 		}
754 		putbits(in, 3);
755 		putbits(ou, 4);
756 		putbits(ex, 5);
757 #undef putbits
758 	}
759 out:
760 	if (ni * 6 > sizeof(smallbits))
761 		free(bits, M_TEMP);
762 	return (error);
763 }
764 
765 int
766 selscan(p, ibitp, obitp, nfd, retval)
767 	struct proc *p;
768 	fd_mask *ibitp, *obitp;
769 	int nfd;
770 	register_t *retval;
771 {
772 	struct filedesc *fdp = p->p_fd;
773 	int msk, i, j, fd;
774 	fd_mask ibits, obits;
775 	struct file *fp;
776 	int n = 0;
777 	static int flag[3] = { POLLRDNORM | POLLHUP | POLLERR,
778 			       POLLWRNORM | POLLHUP | POLLERR,
779 			       POLLRDBAND };
780 
781 	for (msk = 0; msk < 3; msk++) {
782 		for (i = 0; i < nfd; i += NFDBITS) {
783 			ibits = *ibitp++;
784 			obits = 0;
785 			while ((j = ffs(ibits)) && (fd = i + --j) < nfd) {
786 				ibits &= ~(1 << j);
787 				fp = fdp->fd_ofiles[fd];
788 				if (fp == NULL ||
789 				    (fp->f_iflags & FIF_WANTCLOSE) != 0)
790 					return (EBADF);
791 				FILE_USE(fp);
792 				if ((*fp->f_ops->fo_poll)(fp, flag[msk], p)) {
793 					obits |= (1 << j);
794 					n++;
795 				}
796 				FILE_UNUSE(fp, p);
797 			}
798 			*obitp++ = obits;
799 		}
800 	}
801 	*retval = n;
802 	return (0);
803 }
804 
805 /*
806  * Poll system call.
807  */
808 int
809 sys_poll(p, v, retval)
810 	struct proc *p;
811 	void *v;
812 	register_t *retval;
813 {
814 	struct sys_poll_args /* {
815 		syscallarg(struct pollfd *) fds;
816 		syscallarg(u_int) nfds;
817 		syscallarg(int) timeout;
818 	} */ *uap = v;
819 	caddr_t bits;
820 	char smallbits[32 * sizeof(struct pollfd)];
821 	struct timeval atv;
822 	int s, ncoll, error = 0, timo;
823 	size_t ni;
824 
825 	if (SCARG(uap, nfds) > p->p_fd->fd_nfiles) {
826 		/* forgiving; slightly wrong */
827 		SCARG(uap, nfds) = p->p_fd->fd_nfiles;
828 	}
829 	ni = SCARG(uap, nfds) * sizeof(struct pollfd);
830 	if (ni > sizeof(smallbits))
831 		bits = malloc(ni, M_TEMP, M_WAITOK);
832 	else
833 		bits = smallbits;
834 
835 	error = copyin(SCARG(uap, fds), bits, ni);
836 	if (error)
837 		goto done;
838 
839 	if (SCARG(uap, timeout) != INFTIM) {
840 		atv.tv_sec = SCARG(uap, timeout) / 1000;
841 		atv.tv_usec = (SCARG(uap, timeout) % 1000) * 1000;
842 		if (itimerfix(&atv)) {
843 			error = EINVAL;
844 			goto done;
845 		}
846 		s = splclock();
847 		timeradd(&atv, &time, &atv);
848 		timo = hzto(&atv);
849 		/*
850 		 * Avoid inadvertently sleeping forever.
851 		 */
852 		if (timo == 0)
853 			timo = 1;
854 		splx(s);
855 	} else
856 		timo = 0;
857 retry:
858 	ncoll = nselcoll;
859 	p->p_flag |= P_SELECT;
860 	error = pollscan(p, (struct pollfd *)bits, SCARG(uap, nfds), retval);
861 	if (error || *retval)
862 		goto done;
863 	s = splhigh();
864 	if (timo && timercmp(&time, &atv, >=)) {
865 		splx(s);
866 		goto done;
867 	}
868 	if ((p->p_flag & P_SELECT) == 0 || nselcoll != ncoll) {
869 		splx(s);
870 		goto retry;
871 	}
872 	p->p_flag &= ~P_SELECT;
873 	error = tsleep((caddr_t)&selwait, PSOCK | PCATCH, "select", timo);
874 	splx(s);
875 	if (error == 0)
876 		goto retry;
877 done:
878 	p->p_flag &= ~P_SELECT;
879 	/* poll is not restarted after signals... */
880 	if (error == ERESTART)
881 		error = EINTR;
882 	if (error == EWOULDBLOCK)
883 		error = 0;
884 	if (error == 0) {
885 		error = copyout(bits, SCARG(uap, fds), ni);
886 		if (error)
887 			goto out;
888 	}
889 out:
890 	if (ni > sizeof(smallbits))
891 		free(bits, M_TEMP);
892 	return (error);
893 }
894 
895 int
896 pollscan(p, fds, nfd, retval)
897 	struct proc *p;
898 	struct pollfd *fds;
899 	int nfd;
900 	register_t *retval;
901 {
902 	struct filedesc *fdp = p->p_fd;
903 	int i;
904 	struct file *fp;
905 	int n = 0;
906 
907 	for (i = 0; i < nfd; i++, fds++) {
908 		if ((u_int)fds->fd >= fdp->fd_nfiles) {
909 			fds->revents = POLLNVAL;
910 			n++;
911 		} else {
912 			fp = fdp->fd_ofiles[fds->fd];
913 			if (fp == NULL ||
914 			    (fp->f_iflags & FIF_WANTCLOSE) != 0) {
915 				fds->revents = POLLNVAL;
916 				n++;
917 			} else {
918 				FILE_USE(fp);
919 				fds->revents = (*fp->f_ops->fo_poll)(fp,
920 				    fds->events | POLLERR | POLLHUP, p);
921 				if (fds->revents != 0)
922 					n++;
923 				FILE_UNUSE(fp, p);
924 			}
925 		}
926 	}
927 	*retval = n;
928 	return (0);
929 }
930 
931 /*ARGSUSED*/
932 int
933 seltrue(dev, events, p)
934 	dev_t dev;
935 	int events;
936 	struct proc *p;
937 {
938 
939 	return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
940 }
941 
942 /*
943  * Record a select request.
944  */
945 void
946 selrecord(selector, sip)
947 	struct proc *selector;
948 	struct selinfo *sip;
949 {
950 	struct proc *p;
951 	pid_t mypid;
952 
953 	mypid = selector->p_pid;
954 	if (sip->si_pid == mypid)
955 		return;
956 	if (sip->si_pid && (p = pfind(sip->si_pid)) &&
957 	    p->p_wchan == (caddr_t)&selwait)
958 		sip->si_flags |= SI_COLL;
959 	else
960 		sip->si_pid = mypid;
961 }
962 
963 /*
964  * Do a wakeup when a selectable event occurs.
965  */
966 void
967 selwakeup(sip)
968 	struct selinfo *sip;
969 {
970 	struct proc *p;
971 	int s;
972 
973 	if (sip->si_pid == 0)
974 		return;
975 	if (sip->si_flags & SI_COLL) {
976 		nselcoll++;
977 		sip->si_flags &= ~SI_COLL;
978 		wakeup((caddr_t)&selwait);
979 	}
980 	p = pfind(sip->si_pid);
981 	sip->si_pid = 0;
982 	if (p != NULL) {
983 		s = splhigh();
984 		if (p->p_wchan == (caddr_t)&selwait) {
985 			if (p->p_stat == SSLEEP)
986 				setrunnable(p);
987 			else
988 				unsleep(p);
989 		} else if (p->p_flag & P_SELECT)
990 			p->p_flag &= ~P_SELECT;
991 		splx(s);
992 	}
993 }
994