xref: /netbsd-src/sys/kern/vfs_vnops.c (revision 1ad9454efb13a65cd7535ccf867508cb14d9d30e)
1 /*	$NetBSD: vfs_vnops.c,v 1.124 2006/09/12 08:23:51 elad Exp $	*/
2 
3 /*
4  * Copyright (c) 1982, 1986, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * (c) UNIX System Laboratories, Inc.
7  * All or some portions of this file are derived from material licensed
8  * to the University of California by American Telephone and Telegraph
9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10  * the permission of UNIX System Laboratories, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)vfs_vnops.c	8.14 (Berkeley) 6/15/95
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.124 2006/09/12 08:23:51 elad Exp $");
41 
42 #include "fs_union.h"
43 #include "veriexec.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/file.h>
49 #include <sys/stat.h>
50 #include <sys/buf.h>
51 #include <sys/proc.h>
52 #include <sys/malloc.h>
53 #include <sys/mount.h>
54 #include <sys/namei.h>
55 #include <sys/vnode.h>
56 #include <sys/ioctl.h>
57 #include <sys/tty.h>
58 #include <sys/poll.h>
59 #include <sys/kauth.h>
60 
61 #include <miscfs/specfs/specdev.h>
62 
63 #include <uvm/uvm_extern.h>
64 #include <uvm/uvm_readahead.h>
65 
66 #ifdef UNION
67 #include <fs/union/union.h>
68 #endif
69 
70 #if defined(LKM) || defined(UNION)
71 int (*vn_union_readdir_hook) (struct vnode **, struct file *, struct lwp *);
72 #endif
73 
74 #if NVERIEXEC > 0
75 #include <sys/verified_exec.h>
76 #endif /* NVERIEXEC > 0 */
77 
78 static int vn_read(struct file *fp, off_t *offset, struct uio *uio,
79 	    kauth_cred_t cred, int flags);
80 static int vn_write(struct file *fp, off_t *offset, struct uio *uio,
81 	    kauth_cred_t cred, int flags);
82 static int vn_closefile(struct file *fp, struct lwp *l);
83 static int vn_poll(struct file *fp, int events, struct lwp *l);
84 static int vn_fcntl(struct file *fp, u_int com, void *data, struct lwp *l);
85 static int vn_statfile(struct file *fp, struct stat *sb, struct lwp *l);
86 static int vn_ioctl(struct file *fp, u_long com, void *data, struct lwp *l);
87 
88 const struct fileops vnops = {
89 	vn_read, vn_write, vn_ioctl, vn_fcntl, vn_poll,
90 	vn_statfile, vn_closefile, vn_kqfilter
91 };
92 
93 /*
94  * Common code for vnode open operations.
95  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
96  */
97 int
98 vn_open(struct nameidata *ndp, int fmode, int cmode)
99 {
100 	struct vnode *vp;
101 	struct mount *mp = NULL;	/* XXX: GCC */
102 	struct lwp *l = ndp->ni_cnd.cn_lwp;
103 	kauth_cred_t cred = l->l_cred;
104 	struct vattr va;
105 	int error;
106 #if NVERIEXEC > 0
107 	struct veriexec_file_entry *vfe = NULL;
108 	char pathbuf[MAXPATHLEN];
109 	size_t pathlen;
110 	int (*copyfun)(const void *, void *, size_t, size_t *) =
111 	    ndp->ni_segflg == UIO_SYSSPACE ? copystr : copyinstr;
112 #endif /* NVERIEXEC > 0 */
113 
114 #if NVERIEXEC > 0
115 	error = (*copyfun)(ndp->ni_dirp, pathbuf, sizeof(pathbuf), &pathlen);
116 	if (error) {
117 		if (veriexec_verbose >= 1)
118 			printf("veriexec: Can't copy path. (error=%d)\n",
119 			    error);
120 
121 		return (error);
122 	}
123 #endif /* NVERIEXEC > 0 */
124 
125 restart:
126 	if (fmode & O_CREAT) {
127 		ndp->ni_cnd.cn_nameiop = CREATE;
128 		ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
129 		if ((fmode & O_EXCL) == 0 &&
130 		    ((fmode & O_NOFOLLOW) == 0))
131 			ndp->ni_cnd.cn_flags |= FOLLOW;
132 		if ((error = namei(ndp)) != 0)
133 			return (error);
134 		if (ndp->ni_vp == NULL) {
135 #if NVERIEXEC > 0
136 			/* Lockdown mode: Prevent creation of new files. */
137 			if (veriexec_strict >= VERIEXEC_LOCKDOWN) {
138 				VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
139 
140 				printf("Veriexec: vn_open: Preventing "
141 				       "new file creation in %s.\n",
142 				       pathbuf);
143 
144 				vp = ndp->ni_dvp;
145 				error = EPERM;
146 				goto bad;
147 			}
148 #endif /* NVERIEXEC > 0 */
149 
150 			VATTR_NULL(&va);
151 			va.va_type = VREG;
152 			va.va_mode = cmode;
153 			if (fmode & O_EXCL)
154 				 va.va_vaflags |= VA_EXCLUSIVE;
155 			if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
156 				VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
157 				vput(ndp->ni_dvp);
158 				if ((error = vn_start_write(NULL, &mp,
159 				    V_WAIT | V_SLEEPONLY | V_PCATCH)) != 0)
160 					return (error);
161 				goto restart;
162 			}
163 			VOP_LEASE(ndp->ni_dvp, l, cred, LEASE_WRITE);
164 			error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
165 					   &ndp->ni_cnd, &va);
166 			vn_finished_write(mp, 0);
167 			if (error)
168 				return (error);
169 			fmode &= ~O_TRUNC;
170 			vp = ndp->ni_vp;
171 		} else {
172 			VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
173 			if (ndp->ni_dvp == ndp->ni_vp)
174 				vrele(ndp->ni_dvp);
175 			else
176 				vput(ndp->ni_dvp);
177 			ndp->ni_dvp = NULL;
178 			vp = ndp->ni_vp;
179 			if (fmode & O_EXCL) {
180 				error = EEXIST;
181 				goto bad;
182 			}
183 			fmode &= ~O_CREAT;
184 		}
185 	} else {
186 		ndp->ni_cnd.cn_nameiop = LOOKUP;
187 		ndp->ni_cnd.cn_flags = LOCKLEAF;
188 		if ((fmode & O_NOFOLLOW) == 0)
189 			ndp->ni_cnd.cn_flags |= FOLLOW;
190 		if ((error = namei(ndp)) != 0)
191 			return (error);
192 		vp = ndp->ni_vp;
193 	}
194 	if (vp->v_type == VSOCK) {
195 		error = EOPNOTSUPP;
196 		goto bad;
197 	}
198 	if (ndp->ni_vp->v_type == VLNK) {
199 		error = EFTYPE;
200 		goto bad;
201 	}
202 
203 	if ((fmode & O_CREAT) == 0) {
204 #if NVERIEXEC > 0
205 		if ((error = veriexec_verify(l, vp, pathbuf, VERIEXEC_FILE,
206 		    &vfe)) != 0)
207 			goto bad;
208 #endif /* NVERIEXEC > 0 */
209 
210 		if (fmode & FREAD) {
211 			if ((error = VOP_ACCESS(vp, VREAD, cred, l)) != 0)
212 				goto bad;
213 		}
214 
215 		if (fmode & (FWRITE | O_TRUNC)) {
216 			if (vp->v_type == VDIR) {
217 				error = EISDIR;
218 				goto bad;
219 			}
220 			if ((error = vn_writechk(vp)) != 0 ||
221 			    (error = VOP_ACCESS(vp, VWRITE, cred, l)) != 0)
222 				goto bad;
223 #if NVERIEXEC > 0
224 			if (vfe != NULL) {
225 				veriexec_report("Write access request.",
226 				    pathbuf, l, REPORT_ALWAYS|REPORT_ALARM);
227 
228 				/* IPS mode: Deny writing to monitored files. */
229 				if (veriexec_strict >= VERIEXEC_IPS) {
230 					error = EPERM;
231 					goto bad;
232 				} else {
233 					veriexec_purge(vfe);
234 				}
235 			}
236 #endif /* NVERIEXEC > 0 */
237 		}
238 	}
239 
240 	if (fmode & O_TRUNC) {
241 #if NVERIEXEC > 0
242 		if ((error = veriexec_verify(l, vp, pathbuf, VERIEXEC_FILE,
243 					     &vfe)) != 0) {
244 			/*VOP_UNLOCK(vp, 0);*/
245 			goto bad;
246 		}
247 
248 		if (vfe != NULL) {
249 			veriexec_report("truncate access request.",
250 					pathbuf, l,
251 					REPORT_VERBOSE | REPORT_ALARM);
252 
253 			/* IPS mode: Deny truncating monitored files. */
254 			if (veriexec_strict >= 2) {
255 				error = EPERM;
256 				goto bad;
257 			} else {
258 				veriexec_purge(vfe);
259 			}
260 		}
261 #endif /* NVERIEXEC > 0 */
262 
263 		VOP_UNLOCK(vp, 0);			/* XXX */
264 
265 		if ((error = vn_start_write(vp, &mp, V_WAIT | V_PCATCH)) != 0) {
266 			vrele(vp);
267 			return (error);
268 		}
269 		VOP_LEASE(vp, l, cred, LEASE_WRITE);
270 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);	/* XXX */
271 		VATTR_NULL(&va);
272 		va.va_size = 0;
273 		error = VOP_SETATTR(vp, &va, cred, l);
274 		vn_finished_write(mp, 0);
275 		if (error != 0)
276 			goto bad;
277 	}
278 	if ((error = VOP_OPEN(vp, fmode, cred, l)) != 0)
279 		goto bad;
280 	if (vp->v_type == VREG &&
281 	    uvn_attach(vp, fmode & FWRITE ? VM_PROT_WRITE : 0) == NULL) {
282 		error = EIO;
283 		goto bad;
284 	}
285 	if (fmode & FWRITE)
286 		vp->v_writecount++;
287 
288 	return (0);
289 bad:
290 	vput(vp);
291 	return (error);
292 }
293 
294 /*
295  * Check for write permissions on the specified vnode.
296  * Prototype text segments cannot be written.
297  */
298 int
299 vn_writechk(struct vnode *vp)
300 {
301 
302 	/*
303 	 * If the vnode is in use as a process's text,
304 	 * we can't allow writing.
305 	 */
306 	if (vp->v_flag & VTEXT)
307 		return (ETXTBSY);
308 	return (0);
309 }
310 
311 /*
312  * Mark a vnode as having executable mappings.
313  */
314 void
315 vn_markexec(struct vnode *vp)
316 {
317 	if ((vp->v_flag & VEXECMAP) == 0) {
318 		uvmexp.filepages -= vp->v_uobj.uo_npages;
319 		uvmexp.execpages += vp->v_uobj.uo_npages;
320 	}
321 	vp->v_flag |= VEXECMAP;
322 }
323 
324 /*
325  * Mark a vnode as being the text of a process.
326  * Fail if the vnode is currently writable.
327  */
328 int
329 vn_marktext(struct vnode *vp)
330 {
331 
332 	if (vp->v_writecount != 0) {
333 		KASSERT((vp->v_flag & VTEXT) == 0);
334 		return (ETXTBSY);
335 	}
336 	vp->v_flag |= VTEXT;
337 	vn_markexec(vp);
338 	return (0);
339 }
340 
341 /*
342  * Vnode close call
343  *
344  * Note: takes an unlocked vnode, while VOP_CLOSE takes a locked node.
345  */
346 int
347 vn_close(struct vnode *vp, int flags, kauth_cred_t cred, struct lwp *l)
348 {
349 	int error;
350 
351 	if (flags & FWRITE)
352 		vp->v_writecount--;
353 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
354 	error = VOP_CLOSE(vp, flags, cred, l);
355 	vput(vp);
356 	return (error);
357 }
358 
359 /*
360  * Package up an I/O request on a vnode into a uio and do it.
361  */
362 int
363 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len, off_t offset,
364     enum uio_seg segflg, int ioflg, kauth_cred_t cred, size_t *aresid,
365     struct lwp *l)
366 {
367 	struct uio auio;
368 	struct iovec aiov;
369 	struct mount *mp = NULL;
370 	int error;
371 
372 	if ((ioflg & IO_NODELOCKED) == 0) {
373 		if (rw == UIO_READ) {
374 			vn_lock(vp, LK_SHARED | LK_RETRY);
375 		} else /* UIO_WRITE */ {
376 			if (vp->v_type != VCHR &&
377 			    (error = vn_start_write(vp, &mp, V_WAIT | V_PCATCH))
378 			    != 0)
379 				return (error);
380 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
381 		}
382 	}
383 	auio.uio_iov = &aiov;
384 	auio.uio_iovcnt = 1;
385 	aiov.iov_base = base;
386 	aiov.iov_len = len;
387 	auio.uio_resid = len;
388 	auio.uio_offset = offset;
389 	auio.uio_rw = rw;
390 	if (segflg == UIO_SYSSPACE) {
391 		UIO_SETUP_SYSSPACE(&auio);
392 	} else {
393 		auio.uio_vmspace = l->l_proc->p_vmspace;
394 	}
395 	if (rw == UIO_READ) {
396 		error = VOP_READ(vp, &auio, ioflg, cred);
397 	} else {
398 		error = VOP_WRITE(vp, &auio, ioflg, cred);
399 	}
400 	if (aresid)
401 		*aresid = auio.uio_resid;
402 	else
403 		if (auio.uio_resid && error == 0)
404 			error = EIO;
405 	if ((ioflg & IO_NODELOCKED) == 0) {
406 		if (rw == UIO_WRITE)
407 			vn_finished_write(mp, 0);
408 		VOP_UNLOCK(vp, 0);
409 	}
410 	return (error);
411 }
412 
413 int
414 vn_readdir(struct file *fp, char *bf, int segflg, u_int count, int *done,
415     struct lwp *l, off_t **cookies, int *ncookies)
416 {
417 	struct vnode *vp = (struct vnode *)fp->f_data;
418 	struct iovec aiov;
419 	struct uio auio;
420 	int error, eofflag;
421 
422 	/* Limit the size on any kernel buffers used by VOP_READDIR */
423 	count = min(MAXBSIZE, count);
424 
425 unionread:
426 	if (vp->v_type != VDIR)
427 		return (EINVAL);
428 	aiov.iov_base = bf;
429 	aiov.iov_len = count;
430 	auio.uio_iov = &aiov;
431 	auio.uio_iovcnt = 1;
432 	auio.uio_rw = UIO_READ;
433 	if (segflg == UIO_SYSSPACE) {
434 		UIO_SETUP_SYSSPACE(&auio);
435 	} else {
436 		KASSERT(l == curlwp);
437 		auio.uio_vmspace = l->l_proc->p_vmspace;
438 	}
439 	auio.uio_resid = count;
440 	vn_lock(vp, LK_SHARED | LK_RETRY);
441 	auio.uio_offset = fp->f_offset;
442 	error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, cookies,
443 		    ncookies);
444 	fp->f_offset = auio.uio_offset;
445 	VOP_UNLOCK(vp, 0);
446 	if (error)
447 		return (error);
448 
449 #if defined(UNION) || defined(LKM)
450 	if (count == auio.uio_resid && vn_union_readdir_hook) {
451 		struct vnode *ovp = vp;
452 
453 		error = (*vn_union_readdir_hook)(&vp, fp, l);
454 		if (error)
455 			return (error);
456 		if (vp != ovp)
457 			goto unionread;
458 	}
459 #endif /* UNION || LKM */
460 
461 	if (count == auio.uio_resid && (vp->v_flag & VROOT) &&
462 	    (vp->v_mount->mnt_flag & MNT_UNION)) {
463 		struct vnode *tvp = vp;
464 		vp = vp->v_mount->mnt_vnodecovered;
465 		VREF(vp);
466 		fp->f_data = vp;
467 		fp->f_offset = 0;
468 		vrele(tvp);
469 		goto unionread;
470 	}
471 	*done = count - auio.uio_resid;
472 	return error;
473 }
474 
475 /*
476  * File table vnode read routine.
477  */
478 static int
479 vn_read(struct file *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
480     int flags)
481 {
482 	struct vnode *vp = (struct vnode *)fp->f_data;
483 	int count, error, ioflag;
484 	struct lwp *l = curlwp;
485 
486 	VOP_LEASE(vp, l, cred, LEASE_READ);
487 	ioflag = IO_ADV_ENCODE(fp->f_advice);
488 	if (fp->f_flag & FNONBLOCK)
489 		ioflag |= IO_NDELAY;
490 	if ((fp->f_flag & (FFSYNC | FRSYNC)) == (FFSYNC | FRSYNC))
491 		ioflag |= IO_SYNC;
492 	if (fp->f_flag & FALTIO)
493 		ioflag |= IO_ALTSEMANTICS;
494 	vn_lock(vp, LK_SHARED | LK_RETRY);
495 	uio->uio_offset = *offset;
496 	count = uio->uio_resid;
497 	error = VOP_READ(vp, uio, ioflag, cred);
498 	if (flags & FOF_UPDATE_OFFSET)
499 		*offset += count - uio->uio_resid;
500 	VOP_UNLOCK(vp, 0);
501 	return (error);
502 }
503 
504 /*
505  * File table vnode write routine.
506  */
507 static int
508 vn_write(struct file *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
509     int flags)
510 {
511 	struct vnode *vp = (struct vnode *)fp->f_data;
512 	struct mount *mp;
513 	int count, error, ioflag = IO_UNIT;
514 	struct lwp *l = curlwp;
515 
516 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
517 		ioflag |= IO_APPEND;
518 	if (fp->f_flag & FNONBLOCK)
519 		ioflag |= IO_NDELAY;
520 	if (fp->f_flag & FFSYNC ||
521 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
522 		ioflag |= IO_SYNC;
523 	else if (fp->f_flag & FDSYNC)
524 		ioflag |= IO_DSYNC;
525 	if (fp->f_flag & FALTIO)
526 		ioflag |= IO_ALTSEMANTICS;
527 	mp = NULL;
528 	if (vp->v_type != VCHR &&
529 	    (error = vn_start_write(vp, &mp, V_WAIT | V_PCATCH)) != 0)
530 		return (error);
531 	VOP_LEASE(vp, l, cred, LEASE_WRITE);
532 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
533 	uio->uio_offset = *offset;
534 	count = uio->uio_resid;
535 	error = VOP_WRITE(vp, uio, ioflag, cred);
536 	if (flags & FOF_UPDATE_OFFSET) {
537 		if (ioflag & IO_APPEND)
538 			*offset = uio->uio_offset;
539 		else
540 			*offset += count - uio->uio_resid;
541 	}
542 	VOP_UNLOCK(vp, 0);
543 	vn_finished_write(mp, 0);
544 	return (error);
545 }
546 
547 /*
548  * File table vnode stat routine.
549  */
550 static int
551 vn_statfile(struct file *fp, struct stat *sb, struct lwp *l)
552 {
553 	struct vnode *vp = (struct vnode *)fp->f_data;
554 
555 	return vn_stat(vp, sb, l);
556 }
557 
558 int
559 vn_stat(struct vnode *vp, struct stat *sb, struct lwp *l)
560 {
561 	struct vattr va;
562 	int error;
563 	mode_t mode;
564 
565 	error = VOP_GETATTR(vp, &va, l->l_cred, l);
566 	if (error)
567 		return (error);
568 	/*
569 	 * Copy from vattr table
570 	 */
571 	sb->st_dev = va.va_fsid;
572 	sb->st_ino = va.va_fileid;
573 	mode = va.va_mode;
574 	switch (vp->v_type) {
575 	case VREG:
576 		mode |= S_IFREG;
577 		break;
578 	case VDIR:
579 		mode |= S_IFDIR;
580 		break;
581 	case VBLK:
582 		mode |= S_IFBLK;
583 		break;
584 	case VCHR:
585 		mode |= S_IFCHR;
586 		break;
587 	case VLNK:
588 		mode |= S_IFLNK;
589 		break;
590 	case VSOCK:
591 		mode |= S_IFSOCK;
592 		break;
593 	case VFIFO:
594 		mode |= S_IFIFO;
595 		break;
596 	default:
597 		return (EBADF);
598 	};
599 	sb->st_mode = mode;
600 	sb->st_nlink = va.va_nlink;
601 	sb->st_uid = va.va_uid;
602 	sb->st_gid = va.va_gid;
603 	sb->st_rdev = va.va_rdev;
604 	sb->st_size = va.va_size;
605 	sb->st_atimespec = va.va_atime;
606 	sb->st_mtimespec = va.va_mtime;
607 	sb->st_ctimespec = va.va_ctime;
608 	sb->st_birthtimespec = va.va_birthtime;
609 	sb->st_blksize = va.va_blocksize;
610 	sb->st_flags = va.va_flags;
611 	sb->st_gen = 0;
612 	sb->st_blocks = va.va_bytes / S_BLKSIZE;
613 	return (0);
614 }
615 
616 /*
617  * File table vnode fcntl routine.
618  */
619 static int
620 vn_fcntl(struct file *fp, u_int com, void *data, struct lwp *l)
621 {
622 	struct vnode *vp = ((struct vnode *)fp->f_data);
623 	int error;
624 
625 	error = VOP_FCNTL(vp, com, data, fp->f_flag, l->l_cred, l);
626 	return (error);
627 }
628 
629 /*
630  * File table vnode ioctl routine.
631  */
632 static int
633 vn_ioctl(struct file *fp, u_long com, void *data, struct lwp *l)
634 {
635 	struct vnode *vp = ((struct vnode *)fp->f_data);
636 	struct proc *p = l->l_proc;
637 	struct vattr vattr;
638 	int error;
639 
640 	switch (vp->v_type) {
641 
642 	case VREG:
643 	case VDIR:
644 		if (com == FIONREAD) {
645 			error = VOP_GETATTR(vp, &vattr, l->l_cred, l);
646 			if (error)
647 				return (error);
648 			*(int *)data = vattr.va_size - fp->f_offset;
649 			return (0);
650 		}
651 		if ((com == FIONWRITE) || (com == FIONSPACE)) {
652 			/*
653 			 * Files don't have send queues, so there never
654 			 * are any bytes in them, nor is there any
655 			 * open space in them.
656 			 */
657 			*(int *)data = 0;
658 			return (0);
659 		}
660 		if (com == FIOGETBMAP) {
661 			daddr_t *block;
662 
663 			if (*(daddr_t *)data < 0)
664 				return (EINVAL);
665 			block = (daddr_t *)data;
666 			return (VOP_BMAP(vp, *block, NULL, block, NULL));
667 		}
668 		if (com == OFIOGETBMAP) {
669 			daddr_t ibn, obn;
670 
671 			if (*(int32_t *)data < 0)
672 				return (EINVAL);
673 			ibn = (daddr_t)*(int32_t *)data;
674 			error = VOP_BMAP(vp, ibn, NULL, &obn, NULL);
675 			*(int32_t *)data = (int32_t)obn;
676 			return error;
677 		}
678 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
679 			return (0);			/* XXX */
680 		/* fall into ... */
681 	case VFIFO:
682 	case VCHR:
683 	case VBLK:
684 		error = VOP_IOCTL(vp, com, data, fp->f_flag,
685 		    l->l_cred, l);
686 		if (error == 0 && com == TIOCSCTTY) {
687 			if (p->p_session->s_ttyvp)
688 				vrele(p->p_session->s_ttyvp);
689 			p->p_session->s_ttyvp = vp;
690 			VREF(vp);
691 		}
692 		return (error);
693 
694 	default:
695 		return (EPASSTHROUGH);
696 	}
697 }
698 
699 /*
700  * File table vnode poll routine.
701  */
702 static int
703 vn_poll(struct file *fp, int events, struct lwp *l)
704 {
705 
706 	return (VOP_POLL(((struct vnode *)fp->f_data), events, l));
707 }
708 
709 /*
710  * File table vnode kqfilter routine.
711  */
712 int
713 vn_kqfilter(struct file *fp, struct knote *kn)
714 {
715 
716 	return (VOP_KQFILTER((struct vnode *)fp->f_data, kn));
717 }
718 
719 /*
720  * Check that the vnode is still valid, and if so
721  * acquire requested lock.
722  */
723 int
724 vn_lock(struct vnode *vp, int flags)
725 {
726 	int error;
727 
728 #if 0
729 	KASSERT(vp->v_usecount > 0 || (flags & LK_INTERLOCK) != 0
730 	    || (vp->v_flag & VONWORKLST) != 0);
731 #endif
732 	KASSERT((flags &
733 	    ~(LK_INTERLOCK|LK_SHARED|LK_EXCLUSIVE|LK_DRAIN|LK_NOWAIT|LK_RETRY|
734 	    LK_SETRECURSE|LK_CANRECURSE))
735 	    == 0);
736 
737 	do {
738 		if ((flags & LK_INTERLOCK) == 0)
739 			simple_lock(&vp->v_interlock);
740 		if (vp->v_flag & VXLOCK) {
741 			if (flags & LK_NOWAIT) {
742 				simple_unlock(&vp->v_interlock);
743 				return EBUSY;
744 			}
745 			vp->v_flag |= VXWANT;
746 			ltsleep(vp, PINOD | PNORELOCK,
747 			    "vn_lock", 0, &vp->v_interlock);
748 			error = ENOENT;
749 		} else {
750 			error = VOP_LOCK(vp,
751 			    (flags & ~LK_RETRY) | LK_INTERLOCK);
752 			if (error == 0 || error == EDEADLK || error == EBUSY)
753 				return (error);
754 		}
755 		flags &= ~LK_INTERLOCK;
756 	} while (flags & LK_RETRY);
757 	return (error);
758 }
759 
760 /*
761  * File table vnode close routine.
762  */
763 static int
764 vn_closefile(struct file *fp, struct lwp *l)
765 {
766 
767 	return (vn_close(((struct vnode *)fp->f_data), fp->f_flag,
768 		fp->f_cred, l));
769 }
770 
771 /*
772  * Enable LK_CANRECURSE on lock. Return prior status.
773  */
774 u_int
775 vn_setrecurse(struct vnode *vp)
776 {
777 	struct lock *lkp = &vp->v_lock;
778 	u_int retval = lkp->lk_flags & LK_CANRECURSE;
779 
780 	lkp->lk_flags |= LK_CANRECURSE;
781 	return retval;
782 }
783 
784 /*
785  * Called when done with locksetrecurse.
786  */
787 void
788 vn_restorerecurse(struct vnode *vp, u_int flags)
789 {
790 	struct lock *lkp = &vp->v_lock;
791 
792 	lkp->lk_flags &= ~LK_CANRECURSE;
793 	lkp->lk_flags |= flags;
794 }
795 
796 int
797 vn_cow_establish(struct vnode *vp,
798     int (*func)(void *, struct buf *), void *cookie)
799 {
800 	int s;
801 	struct spec_cow_entry *e;
802 
803 	MALLOC(e, struct spec_cow_entry *, sizeof(struct spec_cow_entry),
804 	    M_DEVBUF, M_WAITOK);
805 	e->ce_func = func;
806 	e->ce_cookie = cookie;
807 
808 	SPEC_COW_LOCK(vp->v_specinfo, s);
809 	vp->v_spec_cow_req++;
810 	while (vp->v_spec_cow_count > 0)
811 		ltsleep(&vp->v_spec_cow_req, PRIBIO, "cowlist", 0,
812 		    &vp->v_spec_cow_slock);
813 
814 	SLIST_INSERT_HEAD(&vp->v_spec_cow_head, e, ce_list);
815 
816 	vp->v_spec_cow_req--;
817 	if (vp->v_spec_cow_req == 0)
818 		wakeup(&vp->v_spec_cow_req);
819 	SPEC_COW_UNLOCK(vp->v_specinfo, s);
820 
821 	return 0;
822 }
823 
824 int
825 vn_cow_disestablish(struct vnode *vp,
826     int (*func)(void *, struct buf *), void *cookie)
827 {
828 	int s;
829 	struct spec_cow_entry *e;
830 
831 	SPEC_COW_LOCK(vp->v_specinfo, s);
832 	vp->v_spec_cow_req++;
833 	while (vp->v_spec_cow_count > 0)
834 		ltsleep(&vp->v_spec_cow_req, PRIBIO, "cowlist", 0,
835 		    &vp->v_spec_cow_slock);
836 
837 	SLIST_FOREACH(e, &vp->v_spec_cow_head, ce_list)
838 		if (e->ce_func == func && e->ce_cookie == cookie) {
839 			SLIST_REMOVE(&vp->v_spec_cow_head, e,
840 			    spec_cow_entry, ce_list);
841 			FREE(e, M_DEVBUF);
842 			break;
843 		}
844 
845 	vp->v_spec_cow_req--;
846 	if (vp->v_spec_cow_req == 0)
847 		wakeup(&vp->v_spec_cow_req);
848 	SPEC_COW_UNLOCK(vp->v_specinfo, s);
849 
850 	return e ? 0 : EINVAL;
851 }
852 
853 /*
854  * Simplified in-kernel wrapper calls for extended attribute access.
855  * Both calls pass in a NULL credential, authorizing a "kernel" access.
856  * Set IO_NODELOCKED in ioflg if the vnode is already locked.
857  */
858 int
859 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
860     const char *attrname, size_t *buflen, void *bf, struct lwp *l)
861 {
862 	struct uio auio;
863 	struct iovec aiov;
864 	int error;
865 
866 	aiov.iov_len = *buflen;
867 	aiov.iov_base = bf;
868 
869 	auio.uio_iov = &aiov;
870 	auio.uio_iovcnt = 1;
871 	auio.uio_rw = UIO_READ;
872 	auio.uio_offset = 0;
873 	auio.uio_resid = *buflen;
874 	UIO_SETUP_SYSSPACE(&auio);
875 
876 	if ((ioflg & IO_NODELOCKED) == 0)
877 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
878 
879 	error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL,
880 	    l);
881 
882 	if ((ioflg & IO_NODELOCKED) == 0)
883 		VOP_UNLOCK(vp, 0);
884 
885 	if (error == 0)
886 		*buflen = *buflen - auio.uio_resid;
887 
888 	return (error);
889 }
890 
891 /*
892  * XXX Failure mode if partially written?
893  */
894 int
895 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
896     const char *attrname, size_t buflen, const void *bf, struct lwp *l)
897 {
898 	struct uio auio;
899 	struct iovec aiov;
900 	struct mount *mp = NULL;	/* XXX: GCC */
901 	int error;
902 
903 	aiov.iov_len = buflen;
904 	aiov.iov_base = __UNCONST(bf);		/* XXXUNCONST kills const */
905 
906 	auio.uio_iov = &aiov;
907 	auio.uio_iovcnt = 1;
908 	auio.uio_rw = UIO_WRITE;
909 	auio.uio_offset = 0;
910 	auio.uio_resid = buflen;
911 	UIO_SETUP_SYSSPACE(&auio);
912 
913 	if ((ioflg & IO_NODELOCKED) == 0) {
914 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
915 			return (error);
916 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
917 	}
918 
919 	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, l);
920 
921 	if ((ioflg & IO_NODELOCKED) == 0) {
922 		vn_finished_write(mp, 0);
923 		VOP_UNLOCK(vp, 0);
924 	}
925 
926 	return (error);
927 }
928 
929 int
930 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
931     const char *attrname, struct lwp *l)
932 {
933 	struct mount *mp = NULL;	/* XXX: GCC */
934 	int error;
935 
936 	if ((ioflg & IO_NODELOCKED) == 0) {
937 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
938 			return (error);
939 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
940 	}
941 
942 	error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, l);
943 	if (error == EOPNOTSUPP)
944 		error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
945 		    NULL, l);
946 
947 	if ((ioflg & IO_NODELOCKED) == 0) {
948 		vn_finished_write(mp, 0);
949 		VOP_UNLOCK(vp, 0);
950 	}
951 
952 	return (error);
953 }
954 
955 /*
956  * Preparing to start a filesystem write operation. If the operation is
957  * permitted, then we bump the count of operations in progress and
958  * proceed. If a suspend request is in progress, we wait until the
959  * suspension is over, and then proceed.
960  * V_PCATCH    adds PCATCH to the tsleep flags.
961  * V_WAIT      waits until suspension is over. Otherwise returns EWOULDBLOCK.
962  * V_SLEEPONLY wait, but do not bump the operations count.
963  * V_LOWER     this is a lower level operation. No further vnodes should be
964  *             locked. Otherwise it is a upper level operation. No vnodes
965  *             should be locked.
966  */
967 int
968 vn_start_write(struct vnode *vp, struct mount **mpp, int flags)
969 {
970 	struct mount *mp;
971 	int error, mask, prio;
972 
973 	/*
974 	 * If a vnode is provided, get and return the mount point that
975 	 * to which it will write.
976 	 */
977 	if (vp != NULL) {
978 		*mpp = vp->v_mount;
979 	}
980 	if ((mp = *mpp) == NULL)
981 		return (0);
982 	mp = mp->mnt_leaf;
983 	/*
984 	 * Check on status of suspension.
985 	 */
986 	prio = PUSER - 1;
987 	if (flags & V_PCATCH)
988 		prio |= PCATCH;
989 
990 	if ((flags & V_LOWER) == 0)
991 		mask = IMNT_SUSPEND;
992 	else
993 		mask = IMNT_SUSPENDLOW;
994 
995 	while ((mp->mnt_iflag & mask) != 0) {
996 		if ((flags & V_WAIT) == 0)
997 			return (EWOULDBLOCK);
998 		error = tsleep(&mp->mnt_flag, prio, "suspfs", 0);
999 		if (error)
1000 			return (error);
1001 	}
1002 	if (flags & V_SLEEPONLY)
1003 		return (0);
1004 	simple_lock(&mp->mnt_slock);
1005 	if ((flags & V_LOWER) == 0)
1006 		mp->mnt_writeopcountupper++;
1007 	else
1008 		mp->mnt_writeopcountlower++;
1009 	simple_unlock(&mp->mnt_slock);
1010 	return (0);
1011 }
1012 
1013 /*
1014  * Filesystem write operation has completed. If we are suspending and this
1015  * operation is the last one, notify the suspender that the suspension is
1016  * now in effect.
1017  */
1018 void
1019 vn_finished_write(struct mount *mp, int flags)
1020 {
1021 	if (mp == NULL)
1022 		return;
1023 	mp = mp->mnt_leaf;
1024 	simple_lock(&mp->mnt_slock);
1025 	if ((flags & V_LOWER) == 0) {
1026 		mp->mnt_writeopcountupper--;
1027 		if (mp->mnt_writeopcountupper < 0)
1028 			printf("vn_finished_write: neg cnt upper=%d\n",
1029 			       mp->mnt_writeopcountupper);
1030 		if ((mp->mnt_iflag & IMNT_SUSPEND) != 0 &&
1031 		    mp->mnt_writeopcountupper <= 0)
1032 			wakeup(&mp->mnt_writeopcountupper);
1033 	} else {
1034 		mp->mnt_writeopcountlower--;
1035 		if (mp->mnt_writeopcountlower < 0)
1036 			printf("vn_finished_write: neg cnt lower=%d\n",
1037 			       mp->mnt_writeopcountlower);
1038 		if ((mp->mnt_iflag & IMNT_SUSPENDLOW) != 0 &&
1039 		    mp->mnt_writeopcountupper <= 0)
1040 			wakeup(&mp->mnt_writeopcountlower);
1041 	}
1042 	simple_unlock(&mp->mnt_slock);
1043 }
1044 
1045 void
1046 vn_ra_allocctx(struct vnode *vp)
1047 {
1048 	struct uvm_ractx *ra = NULL;
1049 
1050 	if (vp->v_type != VREG) {
1051 		return;
1052 	}
1053 	if (vp->v_ractx != NULL) {
1054 		return;
1055 	}
1056 	simple_lock(&vp->v_interlock);
1057 	if (vp->v_ractx == NULL) {
1058 		simple_unlock(&vp->v_interlock);
1059 		ra = uvm_ra_allocctx();
1060 		simple_lock(&vp->v_interlock);
1061 		if (ra != NULL && vp->v_ractx == NULL) {
1062 			vp->v_ractx = ra;
1063 			ra = NULL;
1064 		}
1065 	}
1066 	simple_unlock(&vp->v_interlock);
1067 	if (ra != NULL) {
1068 		uvm_ra_freectx(ra);
1069 	}
1070 }
1071