xref: /netbsd-src/sys/kern/sys_generic.c (revision 481fca6e59249d8ffcf24fef7cfbe7b131bfb080)
1 /*	$NetBSD: sys_generic.c,v 1.49 2000/07/13 01:32:33 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 1982, 1986, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * (c) UNIX System Laboratories, Inc.
7  * All or some portions of this file are derived from material licensed
8  * to the University of California by American Telephone and Telegraph
9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10  * the permission of UNIX System Laboratories, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)sys_generic.c	8.9 (Berkeley) 2/14/95
41  */
42 
43 #include "opt_ktrace.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/filedesc.h>
48 #include <sys/ioctl.h>
49 #include <sys/file.h>
50 #include <sys/proc.h>
51 #include <sys/socketvar.h>
52 #include <sys/signalvar.h>
53 #include <sys/uio.h>
54 #include <sys/kernel.h>
55 #include <sys/stat.h>
56 #include <sys/malloc.h>
57 #include <sys/poll.h>
58 #ifdef KTRACE
59 #include <sys/ktrace.h>
60 #endif
61 
62 #include <sys/mount.h>
63 #include <sys/syscallargs.h>
64 
65 int selscan __P((struct proc *, fd_mask *, fd_mask *, int, register_t *));
66 int pollscan __P((struct proc *, struct pollfd *, int, register_t *));
67 
68 /*
69  * Read system call.
70  */
71 /* ARGSUSED */
72 int
73 sys_read(p, v, retval)
74 	struct proc *p;
75 	void *v;
76 	register_t *retval;
77 {
78 	struct sys_read_args /* {
79 		syscallarg(int) fd;
80 		syscallarg(void *) buf;
81 		syscallarg(size_t) nbyte;
82 	} */ *uap = v;
83 	int fd = SCARG(uap, fd);
84 	struct file *fp;
85 	struct filedesc *fdp = p->p_fd;
86 
87 	if ((u_int)fd >= fdp->fd_nfiles ||
88 	    (fp = fdp->fd_ofiles[fd]) == NULL ||
89 	    (fp->f_iflags & FIF_WANTCLOSE) != 0 ||
90 	    (fp->f_flag & FREAD) == 0)
91 		return (EBADF);
92 
93 	FILE_USE(fp);
94 
95 	/* dofileread() will unuse the descriptor for us */
96 	return (dofileread(p, fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
97 	    &fp->f_offset, FOF_UPDATE_OFFSET, retval));
98 }
99 
100 int
101 dofileread(p, fd, fp, buf, nbyte, offset, flags, retval)
102 	struct proc *p;
103 	int fd;
104 	struct file *fp;
105 	void *buf;
106 	size_t nbyte;
107 	off_t *offset;
108 	int flags;
109 	register_t *retval;
110 {
111 	struct uio auio;
112 	struct iovec aiov;
113 	long cnt, error = 0;
114 #ifdef KTRACE
115 	struct iovec ktriov;
116 #endif
117 
118 	aiov.iov_base = (caddr_t)buf;
119 	aiov.iov_len = nbyte;
120 	auio.uio_iov = &aiov;
121 	auio.uio_iovcnt = 1;
122 	auio.uio_resid = nbyte;
123 	auio.uio_rw = UIO_READ;
124 	auio.uio_segflg = UIO_USERSPACE;
125 	auio.uio_procp = p;
126 
127 	/*
128 	 * Reads return ssize_t because -1 is returned on error.  Therefore
129 	 * we must restrict the length to SSIZE_MAX to avoid garbage return
130 	 * values.
131 	 */
132 	if (auio.uio_resid > SSIZE_MAX) {
133 		error = EINVAL;
134 		goto out;
135 	}
136 
137 #ifdef KTRACE
138 	/*
139 	 * if tracing, save a copy of iovec
140 	 */
141 	if (KTRPOINT(p, KTR_GENIO))
142 		ktriov = aiov;
143 #endif
144 	cnt = auio.uio_resid;
145 	error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags);
146 	if (error)
147 		if (auio.uio_resid != cnt && (error == ERESTART ||
148 		    error == EINTR || error == EWOULDBLOCK))
149 			error = 0;
150 	cnt -= auio.uio_resid;
151 #ifdef KTRACE
152 	if (KTRPOINT(p, KTR_GENIO) && error == 0)
153 		ktrgenio(p, fd, UIO_READ, &ktriov, cnt, error);
154 #endif
155 	*retval = cnt;
156  out:
157 	FILE_UNUSE(fp, p);
158 	return (error);
159 }
160 
161 /*
162  * Scatter read system call.
163  */
164 int
165 sys_readv(p, v, retval)
166 	struct proc *p;
167 	void *v;
168 	register_t *retval;
169 {
170 	struct sys_readv_args /* {
171 		syscallarg(int) fd;
172 		syscallarg(const struct iovec *) iovp;
173 		syscallarg(int) iovcnt;
174 	} */ *uap = v;
175 	int fd = SCARG(uap, fd);
176 	struct file *fp;
177 	struct filedesc *fdp = p->p_fd;
178 
179 	if ((u_int)fd >= fdp->fd_nfiles ||
180 	    (fp = fdp->fd_ofiles[fd]) == NULL ||
181 	    (fp->f_iflags & FIF_WANTCLOSE) != 0 ||
182 	    (fp->f_flag & FREAD) == 0)
183 		return (EBADF);
184 
185 	FILE_USE(fp);
186 
187 	/* dofilereadv() will unuse the descriptor for us */
188 	return (dofilereadv(p, fd, fp, SCARG(uap, iovp), SCARG(uap, iovcnt),
189 	    &fp->f_offset, FOF_UPDATE_OFFSET, retval));
190 }
191 
192 int
193 dofilereadv(p, fd, fp, iovp, iovcnt, offset, flags, retval)
194 	struct proc *p;
195 	int fd;
196 	struct file *fp;
197 	const struct iovec *iovp;
198 	int iovcnt;
199 	off_t *offset;
200 	int flags;
201 	register_t *retval;
202 {
203 	struct uio auio;
204 	struct iovec *iov;
205 	struct iovec *needfree;
206 	struct iovec aiov[UIO_SMALLIOV];
207 	long i, cnt, error = 0;
208 	u_int iovlen;
209 #ifdef KTRACE
210 	struct iovec *ktriov = NULL;
211 #endif
212 
213 	/* note: can't use iovlen until iovcnt is validated */
214 	iovlen = iovcnt * sizeof(struct iovec);
215 	if ((u_int)iovcnt > UIO_SMALLIOV) {
216 		if ((u_int)iovcnt > IOV_MAX) {
217 			error = EINVAL;
218 			goto out;
219 		}
220 		MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
221 		needfree = iov;
222 	} else if ((u_int)iovcnt > 0) {
223 		iov = aiov;
224 		needfree = NULL;
225 	} else {
226 		error = EINVAL;
227 		goto out;
228 	}
229 
230 	auio.uio_iov = iov;
231 	auio.uio_iovcnt = iovcnt;
232 	auio.uio_rw = UIO_READ;
233 	auio.uio_segflg = UIO_USERSPACE;
234 	auio.uio_procp = p;
235 	error = copyin(iovp, iov, iovlen);
236 	if (error)
237 		goto done;
238 	auio.uio_resid = 0;
239 	for (i = 0; i < iovcnt; i++) {
240 		auio.uio_resid += iov->iov_len;
241 		/*
242 		 * Reads return ssize_t because -1 is returned on error.
243 		 * Therefore we must restrict the length to SSIZE_MAX to
244 		 * avoid garbage return values.
245 		 */
246 		if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
247 			error = EINVAL;
248 			goto done;
249 		}
250 		iov++;
251 	}
252 #ifdef KTRACE
253 	/*
254 	 * if tracing, save a copy of iovec
255 	 */
256 	if (KTRPOINT(p, KTR_GENIO))  {
257 		MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
258 		memcpy((caddr_t)ktriov, (caddr_t)auio.uio_iov, iovlen);
259 	}
260 #endif
261 	cnt = auio.uio_resid;
262 	error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags);
263 	if (error)
264 		if (auio.uio_resid != cnt && (error == ERESTART ||
265 		    error == EINTR || error == EWOULDBLOCK))
266 			error = 0;
267 	cnt -= auio.uio_resid;
268 #ifdef KTRACE
269 	if (KTRPOINT(p, KTR_GENIO))
270 		if (error == 0) {
271 			ktrgenio(p, fd, UIO_READ, ktriov, cnt, error);
272 		FREE(ktriov, M_TEMP);
273 	}
274 #endif
275 	*retval = cnt;
276  done:
277 	if (needfree)
278 		FREE(needfree, M_IOV);
279  out:
280 	FILE_UNUSE(fp, p);
281 	return (error);
282 }
283 
284 /*
285  * Write system call
286  */
287 int
288 sys_write(p, v, retval)
289 	struct proc *p;
290 	void *v;
291 	register_t *retval;
292 {
293 	struct sys_write_args /* {
294 		syscallarg(int) fd;
295 		syscallarg(const void *) buf;
296 		syscallarg(size_t) nbyte;
297 	} */ *uap = v;
298 	int fd = SCARG(uap, fd);
299 	struct file *fp;
300 	struct filedesc *fdp = p->p_fd;
301 
302 	if ((u_int)fd >= fdp->fd_nfiles ||
303 	    (fp = fdp->fd_ofiles[fd]) == NULL ||
304 	    (fp->f_iflags & FIF_WANTCLOSE) != 0 ||
305 	    (fp->f_flag & FWRITE) == 0)
306 		return (EBADF);
307 
308 	FILE_USE(fp);
309 
310 	/* dofilewrite() will unuse the descriptor for us */
311 	return (dofilewrite(p, fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
312 	    &fp->f_offset, FOF_UPDATE_OFFSET, retval));
313 }
314 
315 int
316 dofilewrite(p, fd, fp, buf, nbyte, offset, flags, retval)
317 	struct proc *p;
318 	int fd;
319 	struct file *fp;
320 	const void *buf;
321 	size_t nbyte;
322 	off_t *offset;
323 	int flags;
324 	register_t *retval;
325 {
326 	struct uio auio;
327 	struct iovec aiov;
328 	long cnt, error = 0;
329 #ifdef KTRACE
330 	struct iovec ktriov;
331 #endif
332 
333 	aiov.iov_base = (caddr_t)buf;		/* XXX kills const */
334 	aiov.iov_len = nbyte;
335 	auio.uio_iov = &aiov;
336 	auio.uio_iovcnt = 1;
337 	auio.uio_resid = nbyte;
338 	auio.uio_rw = UIO_WRITE;
339 	auio.uio_segflg = UIO_USERSPACE;
340 	auio.uio_procp = p;
341 
342 	/*
343 	 * Writes return ssize_t because -1 is returned on error.  Therefore
344 	 * we must restrict the length to SSIZE_MAX to avoid garbage return
345 	 * values.
346 	 */
347 	if (auio.uio_resid > SSIZE_MAX) {
348 		error = EINVAL;
349 		goto out;
350 	}
351 
352 #ifdef KTRACE
353 	/*
354 	 * if tracing, save a copy of iovec
355 	 */
356 	if (KTRPOINT(p, KTR_GENIO))
357 		ktriov = aiov;
358 #endif
359 	cnt = auio.uio_resid;
360 	error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags);
361 	if (error) {
362 		if (auio.uio_resid != cnt && (error == ERESTART ||
363 		    error == EINTR || error == EWOULDBLOCK))
364 			error = 0;
365 		if (error == EPIPE)
366 			psignal(p, SIGPIPE);
367 	}
368 	cnt -= auio.uio_resid;
369 #ifdef KTRACE
370 	if (KTRPOINT(p, KTR_GENIO) && error == 0)
371 		ktrgenio(p, fd, UIO_WRITE, &ktriov, cnt, error);
372 #endif
373 	*retval = cnt;
374  out:
375 	FILE_UNUSE(fp, p);
376 	return (error);
377 }
378 
379 /*
380  * Gather write system call
381  */
382 int
383 sys_writev(p, v, retval)
384 	struct proc *p;
385 	void *v;
386 	register_t *retval;
387 {
388 	struct sys_writev_args /* {
389 		syscallarg(int) fd;
390 		syscallarg(const struct iovec *) iovp;
391 		syscallarg(int) iovcnt;
392 	} */ *uap = v;
393 	int fd = SCARG(uap, fd);
394 	struct file *fp;
395 	struct filedesc *fdp = p->p_fd;
396 
397 	if ((u_int)fd >= fdp->fd_nfiles ||
398 	    (fp = fdp->fd_ofiles[fd]) == NULL ||
399 	    (fp->f_iflags & FIF_WANTCLOSE) != 0 ||
400 	    (fp->f_flag & FWRITE) == 0)
401 		return (EBADF);
402 
403 	FILE_USE(fp);
404 
405 	/* dofilewritev() will unuse the descriptor for us */
406 	return (dofilewritev(p, fd, fp, SCARG(uap, iovp), SCARG(uap, iovcnt),
407 	    &fp->f_offset, FOF_UPDATE_OFFSET, retval));
408 }
409 
410 int
411 dofilewritev(p, fd, fp, iovp, iovcnt, offset, flags, retval)
412 	struct proc *p;
413 	int fd;
414 	struct file *fp;
415 	const struct iovec *iovp;
416 	int iovcnt;
417 	off_t *offset;
418 	int flags;
419 	register_t *retval;
420 {
421 	struct uio auio;
422 	struct iovec *iov;
423 	struct iovec *needfree;
424 	struct iovec aiov[UIO_SMALLIOV];
425 	long i, cnt, error = 0;
426 	u_int iovlen;
427 #ifdef KTRACE
428 	struct iovec *ktriov = NULL;
429 #endif
430 
431 	/* note: can't use iovlen until iovcnt is validated */
432 	iovlen = iovcnt * sizeof(struct iovec);
433 	if ((u_int)iovcnt > UIO_SMALLIOV) {
434 		if ((u_int)iovcnt > IOV_MAX)
435 			return (EINVAL);
436 		MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
437 		needfree = iov;
438 	} else if ((u_int)iovcnt > 0) {
439 		iov = aiov;
440 		needfree = NULL;
441 	} else {
442 		error = EINVAL;
443 		goto out;
444 	}
445 
446 	auio.uio_iov = iov;
447 	auio.uio_iovcnt = iovcnt;
448 	auio.uio_rw = UIO_WRITE;
449 	auio.uio_segflg = UIO_USERSPACE;
450 	auio.uio_procp = p;
451 	error = copyin(iovp, iov, iovlen);
452 	if (error)
453 		goto done;
454 	auio.uio_resid = 0;
455 	for (i = 0; i < iovcnt; i++) {
456 		auio.uio_resid += iov->iov_len;
457 		/*
458 		 * Writes return ssize_t because -1 is returned on error.
459 		 * Therefore we must restrict the length to SSIZE_MAX to
460 		 * avoid garbage return values.
461 		 */
462 		if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
463 			error = EINVAL;
464 			goto done;
465 		}
466 		iov++;
467 	}
468 #ifdef KTRACE
469 	/*
470 	 * if tracing, save a copy of iovec
471 	 */
472 	if (KTRPOINT(p, KTR_GENIO))  {
473 		MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
474 		memcpy((caddr_t)ktriov, (caddr_t)auio.uio_iov, iovlen);
475 	}
476 #endif
477 	cnt = auio.uio_resid;
478 	error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags);
479 	if (error) {
480 		if (auio.uio_resid != cnt && (error == ERESTART ||
481 		    error == EINTR || error == EWOULDBLOCK))
482 			error = 0;
483 		if (error == EPIPE)
484 			psignal(p, SIGPIPE);
485 	}
486 	cnt -= auio.uio_resid;
487 #ifdef KTRACE
488 	if (KTRPOINT(p, KTR_GENIO))
489 		if (error == 0) {
490 			ktrgenio(p, fd, UIO_WRITE, ktriov, cnt, error);
491 		FREE(ktriov, M_TEMP);
492 	}
493 #endif
494 	*retval = cnt;
495  done:
496 	if (needfree)
497 		FREE(needfree, M_IOV);
498  out:
499 	FILE_UNUSE(fp, p);
500 	return (error);
501 }
502 
503 /*
504  * Ioctl system call
505  */
506 /* ARGSUSED */
507 int
508 sys_ioctl(p, v, retval)
509 	struct proc *p;
510 	void *v;
511 	register_t *retval;
512 {
513 	struct sys_ioctl_args /* {
514 		syscallarg(int) fd;
515 		syscallarg(u_long) com;
516 		syscallarg(caddr_t) data;
517 	} */ *uap = v;
518 	struct file *fp;
519 	struct filedesc *fdp;
520 	u_long com;
521 	int error = 0;
522 	u_int size;
523 	caddr_t data, memp;
524 	int tmp;
525 #define STK_PARAMS	128
526 	u_long stkbuf[STK_PARAMS/sizeof(u_long)];
527 
528 	fdp = p->p_fd;
529 	if ((u_int)SCARG(uap, fd) >= fdp->fd_nfiles ||
530 	    (fp = fdp->fd_ofiles[SCARG(uap, fd)]) == NULL ||
531 	    (fp->f_iflags & FIF_WANTCLOSE) != 0)
532 		return (EBADF);
533 
534 	FILE_USE(fp);
535 
536 	if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
537 		error = EBADF;
538 		goto out;
539 	}
540 
541 	switch (com = SCARG(uap, com)) {
542 	case FIONCLEX:
543 		fdp->fd_ofileflags[SCARG(uap, fd)] &= ~UF_EXCLOSE;
544 		goto out;
545 
546 	case FIOCLEX:
547 		fdp->fd_ofileflags[SCARG(uap, fd)] |= UF_EXCLOSE;
548 		goto out;
549 	}
550 
551 	/*
552 	 * Interpret high order word to find amount of data to be
553 	 * copied to/from the user's address space.
554 	 */
555 	size = IOCPARM_LEN(com);
556 	if (size > IOCPARM_MAX) {
557 		error = ENOTTY;
558 		goto out;
559 	}
560 	memp = NULL;
561 	if (size > sizeof(stkbuf)) {
562 		memp = (caddr_t)malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
563 		data = memp;
564 	} else
565 		data = (caddr_t)stkbuf;
566 	if (com&IOC_IN) {
567 		if (size) {
568 			error = copyin(SCARG(uap, data), data, size);
569 			if (error) {
570 				if (memp)
571 					free(memp, M_IOCTLOPS);
572 				goto out;
573 			}
574 		} else
575 			*(caddr_t *)data = SCARG(uap, data);
576 	} else if ((com&IOC_OUT) && size)
577 		/*
578 		 * Zero the buffer so the user always
579 		 * gets back something deterministic.
580 		 */
581 		memset(data, 0, size);
582 	else if (com&IOC_VOID)
583 		*(caddr_t *)data = SCARG(uap, data);
584 
585 	switch (com) {
586 
587 	case FIONBIO:
588 		if ((tmp = *(int *)data) != 0)
589 			fp->f_flag |= FNONBLOCK;
590 		else
591 			fp->f_flag &= ~FNONBLOCK;
592 		error = (*fp->f_ops->fo_ioctl)(fp, FIONBIO, (caddr_t)&tmp, p);
593 		break;
594 
595 	case FIOASYNC:
596 		if ((tmp = *(int *)data) != 0)
597 			fp->f_flag |= FASYNC;
598 		else
599 			fp->f_flag &= ~FASYNC;
600 		error = (*fp->f_ops->fo_ioctl)(fp, FIOASYNC, (caddr_t)&tmp, p);
601 		break;
602 
603 	case FIOSETOWN:
604 		tmp = *(int *)data;
605 		if (fp->f_type == DTYPE_SOCKET) {
606 			((struct socket *)fp->f_data)->so_pgid = tmp;
607 			error = 0;
608 			break;
609 		}
610 		if (tmp <= 0) {
611 			tmp = -tmp;
612 		} else {
613 			struct proc *p1 = pfind(tmp);
614 			if (p1 == 0) {
615 				error = ESRCH;
616 				break;
617 			}
618 			tmp = p1->p_pgrp->pg_id;
619 		}
620 		error = (*fp->f_ops->fo_ioctl)
621 			(fp, TIOCSPGRP, (caddr_t)&tmp, p);
622 		break;
623 
624 	case FIOGETOWN:
625 		if (fp->f_type == DTYPE_SOCKET) {
626 			error = 0;
627 			*(int *)data = ((struct socket *)fp->f_data)->so_pgid;
628 			break;
629 		}
630 		error = (*fp->f_ops->fo_ioctl)(fp, TIOCGPGRP, data, p);
631 		*(int *)data = -*(int *)data;
632 		break;
633 
634 	default:
635 		error = (*fp->f_ops->fo_ioctl)(fp, com, data, p);
636 		/*
637 		 * Copy any data to user, size was
638 		 * already set and checked above.
639 		 */
640 		if (error == 0 && (com&IOC_OUT) && size)
641 			error = copyout(data, SCARG(uap, data), size);
642 		break;
643 	}
644 	if (memp)
645 		free(memp, M_IOCTLOPS);
646  out:
647 	FILE_UNUSE(fp, p);
648 	return (error);
649 }
650 
651 int	selwait, nselcoll;
652 
653 /*
654  * Select system call.
655  */
656 int
657 sys_select(p, v, retval)
658 	struct proc *p;
659 	void *v;
660 	register_t *retval;
661 {
662 	struct sys_select_args /* {
663 		syscallarg(int) nd;
664 		syscallarg(fd_set *) in;
665 		syscallarg(fd_set *) ou;
666 		syscallarg(fd_set *) ex;
667 		syscallarg(struct timeval *) tv;
668 	} */ *uap = v;
669 	caddr_t bits;
670 	char smallbits[howmany(FD_SETSIZE, NFDBITS) * sizeof(fd_mask) * 6];
671 	struct timeval atv;
672 	int s, ncoll, error = 0, timo;
673 	size_t ni;
674 
675 	if (SCARG(uap, nd) < 0)
676 		return (EINVAL);
677 	if (SCARG(uap, nd) > p->p_fd->fd_nfiles) {
678 		/* forgiving; slightly wrong */
679 		SCARG(uap, nd) = p->p_fd->fd_nfiles;
680 	}
681 	ni = howmany(SCARG(uap, nd), NFDBITS) * sizeof(fd_mask);
682 	if (ni * 6 > sizeof(smallbits))
683 		bits = malloc(ni * 6, M_TEMP, M_WAITOK);
684 	else
685 		bits = smallbits;
686 
687 #define	getbits(name, x) \
688 	if (SCARG(uap, name)) { \
689 		error = copyin(SCARG(uap, name), bits + ni * x, ni); \
690 		if (error) \
691 			goto done; \
692 	} else \
693 		memset(bits + ni * x, 0, ni);
694 	getbits(in, 0);
695 	getbits(ou, 1);
696 	getbits(ex, 2);
697 #undef	getbits
698 
699 	if (SCARG(uap, tv)) {
700 		error = copyin(SCARG(uap, tv), (caddr_t)&atv,
701 			sizeof(atv));
702 		if (error)
703 			goto done;
704 		if (itimerfix(&atv)) {
705 			error = EINVAL;
706 			goto done;
707 		}
708 		s = splclock();
709 		timeradd(&atv, &time, &atv);
710 		splx(s);
711 	} else
712 		timo = 0;
713 retry:
714 	ncoll = nselcoll;
715 	p->p_flag |= P_SELECT;
716 	error = selscan(p, (fd_mask *)(bits + ni * 0),
717 			   (fd_mask *)(bits + ni * 3), SCARG(uap, nd), retval);
718 	if (error || *retval)
719 		goto done;
720 	if (SCARG(uap, tv)) {
721 		/*
722 		 * We have to recalculate the timeout on every retry.
723 		 */
724 		timo = hzto(&atv);
725 		if (timo <= 0)
726 			goto done;
727 	}
728 	s = splhigh();
729 	if ((p->p_flag & P_SELECT) == 0 || nselcoll != ncoll) {
730 		splx(s);
731 		goto retry;
732 	}
733 	p->p_flag &= ~P_SELECT;
734 	error = tsleep((caddr_t)&selwait, PSOCK | PCATCH, "select", timo);
735 	splx(s);
736 	if (error == 0)
737 		goto retry;
738 done:
739 	p->p_flag &= ~P_SELECT;
740 	/* select is not restarted after signals... */
741 	if (error == ERESTART)
742 		error = EINTR;
743 	if (error == EWOULDBLOCK)
744 		error = 0;
745 	if (error == 0) {
746 #define	putbits(name, x) \
747 		if (SCARG(uap, name)) { \
748 			error = copyout(bits + ni * x, SCARG(uap, name), ni); \
749 			if (error) \
750 				goto out; \
751 		}
752 		putbits(in, 3);
753 		putbits(ou, 4);
754 		putbits(ex, 5);
755 #undef putbits
756 	}
757 out:
758 	if (ni * 6 > sizeof(smallbits))
759 		free(bits, M_TEMP);
760 	return (error);
761 }
762 
763 int
764 selscan(p, ibitp, obitp, nfd, retval)
765 	struct proc *p;
766 	fd_mask *ibitp, *obitp;
767 	int nfd;
768 	register_t *retval;
769 {
770 	struct filedesc *fdp = p->p_fd;
771 	int msk, i, j, fd;
772 	fd_mask ibits, obits;
773 	struct file *fp;
774 	int n = 0;
775 	static int flag[3] = { POLLRDNORM | POLLHUP | POLLERR,
776 			       POLLWRNORM | POLLHUP | POLLERR,
777 			       POLLRDBAND };
778 
779 	for (msk = 0; msk < 3; msk++) {
780 		for (i = 0; i < nfd; i += NFDBITS) {
781 			ibits = *ibitp++;
782 			obits = 0;
783 			while ((j = ffs(ibits)) && (fd = i + --j) < nfd) {
784 				ibits &= ~(1 << j);
785 				fp = fdp->fd_ofiles[fd];
786 				if (fp == NULL ||
787 				    (fp->f_iflags & FIF_WANTCLOSE) != 0)
788 					return (EBADF);
789 				FILE_USE(fp);
790 				if ((*fp->f_ops->fo_poll)(fp, flag[msk], p)) {
791 					obits |= (1 << j);
792 					n++;
793 				}
794 				FILE_UNUSE(fp, p);
795 			}
796 			*obitp++ = obits;
797 		}
798 	}
799 	*retval = n;
800 	return (0);
801 }
802 
803 /*
804  * Poll system call.
805  */
806 int
807 sys_poll(p, v, retval)
808 	struct proc *p;
809 	void *v;
810 	register_t *retval;
811 {
812 	struct sys_poll_args /* {
813 		syscallarg(struct pollfd *) fds;
814 		syscallarg(u_int) nfds;
815 		syscallarg(int) timeout;
816 	} */ *uap = v;
817 	caddr_t bits;
818 	char smallbits[32 * sizeof(struct pollfd)];
819 	struct timeval atv;
820 	int s, ncoll, error = 0, timo;
821 	size_t ni;
822 
823 	if (SCARG(uap, nfds) > p->p_fd->fd_nfiles) {
824 		/* forgiving; slightly wrong */
825 		SCARG(uap, nfds) = p->p_fd->fd_nfiles;
826 	}
827 	ni = SCARG(uap, nfds) * sizeof(struct pollfd);
828 	if (ni > sizeof(smallbits))
829 		bits = malloc(ni, M_TEMP, M_WAITOK);
830 	else
831 		bits = smallbits;
832 
833 	error = copyin(SCARG(uap, fds), bits, ni);
834 	if (error)
835 		goto done;
836 
837 	if (SCARG(uap, timeout) != INFTIM) {
838 		atv.tv_sec = SCARG(uap, timeout) / 1000;
839 		atv.tv_usec = (SCARG(uap, timeout) % 1000) * 1000;
840 		if (itimerfix(&atv)) {
841 			error = EINVAL;
842 			goto done;
843 		}
844 		s = splclock();
845 		timeradd(&atv, &time, &atv);
846 		splx(s);
847 	} else
848 		timo = 0;
849 retry:
850 	ncoll = nselcoll;
851 	p->p_flag |= P_SELECT;
852 	error = pollscan(p, (struct pollfd *)bits, SCARG(uap, nfds), retval);
853 	if (error || *retval)
854 		goto done;
855 	if (SCARG(uap, timeout) != INFTIM) {
856 		/*
857 		 * We have to recalculate the timeout on every retry.
858 		 */
859 		timo = hzto(&atv);
860 		if (timo <= 0)
861 			goto done;
862 	}
863 	s = splhigh();
864 	if ((p->p_flag & P_SELECT) == 0 || nselcoll != ncoll) {
865 		splx(s);
866 		goto retry;
867 	}
868 	p->p_flag &= ~P_SELECT;
869 	error = tsleep((caddr_t)&selwait, PSOCK | PCATCH, "select", timo);
870 	splx(s);
871 	if (error == 0)
872 		goto retry;
873 done:
874 	p->p_flag &= ~P_SELECT;
875 	/* poll is not restarted after signals... */
876 	if (error == ERESTART)
877 		error = EINTR;
878 	if (error == EWOULDBLOCK)
879 		error = 0;
880 	if (error == 0) {
881 		error = copyout(bits, SCARG(uap, fds), ni);
882 		if (error)
883 			goto out;
884 	}
885 out:
886 	if (ni > sizeof(smallbits))
887 		free(bits, M_TEMP);
888 	return (error);
889 }
890 
891 int
892 pollscan(p, fds, nfd, retval)
893 	struct proc *p;
894 	struct pollfd *fds;
895 	int nfd;
896 	register_t *retval;
897 {
898 	struct filedesc *fdp = p->p_fd;
899 	int i;
900 	struct file *fp;
901 	int n = 0;
902 
903 	for (i = 0; i < nfd; i++, fds++) {
904 		if ((u_int)fds->fd >= fdp->fd_nfiles) {
905 			fds->revents = POLLNVAL;
906 			n++;
907 		} else {
908 			fp = fdp->fd_ofiles[fds->fd];
909 			if (fp == NULL ||
910 			    (fp->f_iflags & FIF_WANTCLOSE) != 0) {
911 				fds->revents = POLLNVAL;
912 				n++;
913 			} else {
914 				FILE_USE(fp);
915 				fds->revents = (*fp->f_ops->fo_poll)(fp,
916 				    fds->events | POLLERR | POLLHUP, p);
917 				if (fds->revents != 0)
918 					n++;
919 				FILE_UNUSE(fp, p);
920 			}
921 		}
922 	}
923 	*retval = n;
924 	return (0);
925 }
926 
927 /*ARGSUSED*/
928 int
929 seltrue(dev, events, p)
930 	dev_t dev;
931 	int events;
932 	struct proc *p;
933 {
934 
935 	return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
936 }
937 
938 /*
939  * Record a select request.
940  */
941 void
942 selrecord(selector, sip)
943 	struct proc *selector;
944 	struct selinfo *sip;
945 {
946 	struct proc *p;
947 	pid_t mypid;
948 
949 	mypid = selector->p_pid;
950 	if (sip->si_pid == mypid)
951 		return;
952 	if (sip->si_pid && (p = pfind(sip->si_pid)) &&
953 	    p->p_wchan == (caddr_t)&selwait)
954 		sip->si_flags |= SI_COLL;
955 	else
956 		sip->si_pid = mypid;
957 }
958 
959 /*
960  * Do a wakeup when a selectable event occurs.
961  */
962 void
963 selwakeup(sip)
964 	struct selinfo *sip;
965 {
966 	struct proc *p;
967 	int s;
968 
969 	if (sip->si_pid == 0)
970 		return;
971 	if (sip->si_flags & SI_COLL) {
972 		nselcoll++;
973 		sip->si_flags &= ~SI_COLL;
974 		wakeup((caddr_t)&selwait);
975 	}
976 	p = pfind(sip->si_pid);
977 	sip->si_pid = 0;
978 	if (p != NULL) {
979 		s = splhigh();
980 		if (p->p_wchan == (caddr_t)&selwait) {
981 			if (p->p_stat == SSLEEP)
982 				setrunnable(p);
983 			else
984 				unsleep(p);
985 		} else if (p->p_flag & P_SELECT)
986 			p->p_flag &= ~P_SELECT;
987 		splx(s);
988 	}
989 }
990