xref: /netbsd-src/sys/kern/vfs_vnops.c (revision 404fbe5fb94ca1e054339640cabb2801ce52dd30)
1 /*	$NetBSD: vfs_vnops.c,v 1.161 2008/11/12 12:36:16 ad Exp $	*/
2 
3 /*
4  * Copyright (c) 1982, 1986, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * (c) UNIX System Laboratories, Inc.
7  * All or some portions of this file are derived from material licensed
8  * to the University of California by American Telephone and Telegraph
9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10  * the permission of UNIX System Laboratories, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)vfs_vnops.c	8.14 (Berkeley) 6/15/95
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.161 2008/11/12 12:36:16 ad Exp $");
41 
42 #include "veriexec.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/file.h>
48 #include <sys/stat.h>
49 #include <sys/buf.h>
50 #include <sys/proc.h>
51 #include <sys/malloc.h>
52 #include <sys/mount.h>
53 #include <sys/namei.h>
54 #include <sys/vnode.h>
55 #include <sys/ioctl.h>
56 #include <sys/tty.h>
57 #include <sys/poll.h>
58 #include <sys/kauth.h>
59 #include <sys/syslog.h>
60 #include <sys/fstrans.h>
61 #include <sys/atomic.h>
62 #include <sys/filedesc.h>
63 #include <sys/wapbl.h>
64 
65 #include <miscfs/specfs/specdev.h>
66 
67 #include <uvm/uvm_extern.h>
68 #include <uvm/uvm_readahead.h>
69 
70 #ifdef UNION
71 #include <fs/union/union.h>
72 #endif
73 
74 int (*vn_union_readdir_hook) (struct vnode **, struct file *, struct lwp *);
75 
76 #include <sys/verified_exec.h>
77 
78 static int vn_read(file_t *fp, off_t *offset, struct uio *uio,
79 	    kauth_cred_t cred, int flags);
80 static int vn_write(file_t *fp, off_t *offset, struct uio *uio,
81 	    kauth_cred_t cred, int flags);
82 static int vn_closefile(file_t *fp);
83 static int vn_poll(file_t *fp, int events);
84 static int vn_fcntl(file_t *fp, u_int com, void *data);
85 static int vn_statfile(file_t *fp, struct stat *sb);
86 static int vn_ioctl(file_t *fp, u_long com, void *data);
87 
88 const struct fileops vnops = {
89 	vn_read, vn_write, vn_ioctl, vn_fcntl, vn_poll,
90 	vn_statfile, vn_closefile, vn_kqfilter
91 };
92 
93 /*
94  * Common code for vnode open operations.
95  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
96  */
97 int
98 vn_open(struct nameidata *ndp, int fmode, int cmode)
99 {
100 	struct vnode *vp;
101 	struct lwp *l = curlwp;
102 	kauth_cred_t cred = l->l_cred;
103 	struct vattr va;
104 	int error;
105 	char *path;
106 
107 	ndp->ni_cnd.cn_flags &= TRYEMULROOT;
108 
109 	if (fmode & O_CREAT) {
110 		ndp->ni_cnd.cn_nameiop = CREATE;
111 		ndp->ni_cnd.cn_flags |= LOCKPARENT | LOCKLEAF;
112 		if ((fmode & O_EXCL) == 0 &&
113 		    ((fmode & O_NOFOLLOW) == 0))
114 			ndp->ni_cnd.cn_flags |= FOLLOW;
115 	} else {
116 		ndp->ni_cnd.cn_nameiop = LOOKUP;
117 		ndp->ni_cnd.cn_flags |= LOCKLEAF;
118 		if ((fmode & O_NOFOLLOW) == 0)
119 			ndp->ni_cnd.cn_flags |= FOLLOW;
120 	}
121 
122 	VERIEXEC_PATH_GET(ndp->ni_dirp, ndp->ni_segflg, ndp->ni_dirp, path);
123 
124 	error = namei(ndp);
125 	if (error)
126 		goto out;
127 
128 	vp = ndp->ni_vp;
129 
130 #if NVERIEXEC > 0
131 	error = veriexec_openchk(l, ndp->ni_vp, ndp->ni_dirp, fmode);
132 	if (error)
133 		goto bad;
134 #endif /* NVERIEXEC > 0 */
135 
136 	if (fmode & O_CREAT) {
137 		if (ndp->ni_vp == NULL) {
138 			VATTR_NULL(&va);
139 			va.va_type = VREG;
140 			va.va_mode = cmode;
141 			if (fmode & O_EXCL)
142 				 va.va_vaflags |= VA_EXCLUSIVE;
143 			error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
144 					   &ndp->ni_cnd, &va);
145 			if (error)
146 				goto out;
147 			fmode &= ~O_TRUNC;
148 			vp = ndp->ni_vp;
149 		} else {
150 			VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
151 			if (ndp->ni_dvp == ndp->ni_vp)
152 				vrele(ndp->ni_dvp);
153 			else
154 				vput(ndp->ni_dvp);
155 			ndp->ni_dvp = NULL;
156 			vp = ndp->ni_vp;
157 			if (fmode & O_EXCL) {
158 				error = EEXIST;
159 				goto bad;
160 			}
161 			fmode &= ~O_CREAT;
162 		}
163 	} else {
164 		vp = ndp->ni_vp;
165 	}
166 	if (vp->v_type == VSOCK) {
167 		error = EOPNOTSUPP;
168 		goto bad;
169 	}
170 	if (ndp->ni_vp->v_type == VLNK) {
171 		error = EFTYPE;
172 		goto bad;
173 	}
174 
175 	if ((fmode & O_CREAT) == 0) {
176 		error = vn_openchk(vp, cred, fmode);
177 		if (error != 0)
178 			goto bad;
179 	}
180 
181 	if (fmode & O_TRUNC) {
182 		VOP_UNLOCK(vp, 0);			/* XXX */
183 
184 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);	/* XXX */
185 		VATTR_NULL(&va);
186 		va.va_size = 0;
187 		error = VOP_SETATTR(vp, &va, cred);
188 		if (error != 0)
189 			goto bad;
190 	}
191 	if ((error = VOP_OPEN(vp, fmode, cred)) != 0)
192 		goto bad;
193 	if (fmode & FWRITE) {
194 		mutex_enter(&vp->v_interlock);
195 		vp->v_writecount++;
196 		mutex_exit(&vp->v_interlock);
197 	}
198 
199 bad:
200 	if (error)
201 		vput(vp);
202 out:
203 	VERIEXEC_PATH_PUT(path);
204 	return (error);
205 }
206 
207 /*
208  * Check for write permissions on the specified vnode.
209  * Prototype text segments cannot be written.
210  */
211 int
212 vn_writechk(struct vnode *vp)
213 {
214 
215 	/*
216 	 * If the vnode is in use as a process's text,
217 	 * we can't allow writing.
218 	 */
219 	if (vp->v_iflag & VI_TEXT)
220 		return (ETXTBSY);
221 	return (0);
222 }
223 
224 int
225 vn_openchk(struct vnode *vp, kauth_cred_t cred, int fflags)
226 {
227 	int permbits = 0;
228 	int error;
229 
230 	if ((fflags & FREAD) != 0) {
231 		permbits = VREAD;
232 	}
233 	if ((fflags & (FWRITE | O_TRUNC)) != 0) {
234 		permbits |= VWRITE;
235 		if (vp->v_type == VDIR) {
236 			error = EISDIR;
237 			goto bad;
238 		}
239 		error = vn_writechk(vp);
240 		if (error != 0)
241 			goto bad;
242 	}
243 	error = VOP_ACCESS(vp, permbits, cred);
244 bad:
245 	return error;
246 }
247 
248 /*
249  * Mark a vnode as having executable mappings.
250  */
251 void
252 vn_markexec(struct vnode *vp)
253 {
254 
255 	if ((vp->v_iflag & VI_EXECMAP) != 0) {
256 		/* Safe unlocked, as long as caller holds a reference. */
257 		return;
258 	}
259 
260 	mutex_enter(&vp->v_interlock);
261 	if ((vp->v_iflag & VI_EXECMAP) == 0) {
262 		atomic_add_int(&uvmexp.filepages, -vp->v_uobj.uo_npages);
263 		atomic_add_int(&uvmexp.execpages, vp->v_uobj.uo_npages);
264 		vp->v_iflag |= VI_EXECMAP;
265 	}
266 	mutex_exit(&vp->v_interlock);
267 }
268 
269 /*
270  * Mark a vnode as being the text of a process.
271  * Fail if the vnode is currently writable.
272  */
273 int
274 vn_marktext(struct vnode *vp)
275 {
276 
277 	if ((vp->v_iflag & (VI_TEXT|VI_EXECMAP)) == (VI_TEXT|VI_EXECMAP)) {
278 		/* Safe unlocked, as long as caller holds a reference. */
279 		return (0);
280 	}
281 
282 	mutex_enter(&vp->v_interlock);
283 	if (vp->v_writecount != 0) {
284 		KASSERT((vp->v_iflag & VI_TEXT) == 0);
285 		mutex_exit(&vp->v_interlock);
286 		return (ETXTBSY);
287 	}
288 	if ((vp->v_iflag & VI_EXECMAP) == 0) {
289 		atomic_add_int(&uvmexp.filepages, -vp->v_uobj.uo_npages);
290 		atomic_add_int(&uvmexp.execpages, vp->v_uobj.uo_npages);
291 	}
292 	vp->v_iflag |= (VI_TEXT | VI_EXECMAP);
293 	mutex_exit(&vp->v_interlock);
294 	return (0);
295 }
296 
297 /*
298  * Vnode close call
299  *
300  * Note: takes an unlocked vnode, while VOP_CLOSE takes a locked node.
301  */
302 int
303 vn_close(struct vnode *vp, int flags, kauth_cred_t cred)
304 {
305 	int error;
306 
307 	if (flags & FWRITE) {
308 		mutex_enter(&vp->v_interlock);
309 		vp->v_writecount--;
310 		mutex_exit(&vp->v_interlock);
311 	}
312 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
313 	error = VOP_CLOSE(vp, flags, cred);
314 	vput(vp);
315 	return (error);
316 }
317 
318 /*
319  * Package up an I/O request on a vnode into a uio and do it.
320  */
321 int
322 vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset,
323     enum uio_seg segflg, int ioflg, kauth_cred_t cred, size_t *aresid,
324     struct lwp *l)
325 {
326 	struct uio auio;
327 	struct iovec aiov;
328 	int error;
329 
330 	if ((ioflg & IO_NODELOCKED) == 0) {
331 		if (rw == UIO_READ) {
332 			vn_lock(vp, LK_SHARED | LK_RETRY);
333 		} else /* UIO_WRITE */ {
334 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
335 		}
336 	}
337 	auio.uio_iov = &aiov;
338 	auio.uio_iovcnt = 1;
339 	aiov.iov_base = base;
340 	aiov.iov_len = len;
341 	auio.uio_resid = len;
342 	auio.uio_offset = offset;
343 	auio.uio_rw = rw;
344 	if (segflg == UIO_SYSSPACE) {
345 		UIO_SETUP_SYSSPACE(&auio);
346 	} else {
347 		auio.uio_vmspace = l->l_proc->p_vmspace;
348 	}
349 	if (rw == UIO_READ) {
350 		error = VOP_READ(vp, &auio, ioflg, cred);
351 	} else {
352 		error = VOP_WRITE(vp, &auio, ioflg, cred);
353 	}
354 	if (aresid)
355 		*aresid = auio.uio_resid;
356 	else
357 		if (auio.uio_resid && error == 0)
358 			error = EIO;
359 	if ((ioflg & IO_NODELOCKED) == 0) {
360 		VOP_UNLOCK(vp, 0);
361 	}
362 	return (error);
363 }
364 
365 int
366 vn_readdir(file_t *fp, char *bf, int segflg, u_int count, int *done,
367     struct lwp *l, off_t **cookies, int *ncookies)
368 {
369 	struct vnode *vp = (struct vnode *)fp->f_data;
370 	struct iovec aiov;
371 	struct uio auio;
372 	int error, eofflag;
373 
374 	/* Limit the size on any kernel buffers used by VOP_READDIR */
375 	count = min(MAXBSIZE, count);
376 
377 unionread:
378 	if (vp->v_type != VDIR)
379 		return (EINVAL);
380 	aiov.iov_base = bf;
381 	aiov.iov_len = count;
382 	auio.uio_iov = &aiov;
383 	auio.uio_iovcnt = 1;
384 	auio.uio_rw = UIO_READ;
385 	if (segflg == UIO_SYSSPACE) {
386 		UIO_SETUP_SYSSPACE(&auio);
387 	} else {
388 		KASSERT(l == curlwp);
389 		auio.uio_vmspace = l->l_proc->p_vmspace;
390 	}
391 	auio.uio_resid = count;
392 	vn_lock(vp, LK_SHARED | LK_RETRY);
393 	auio.uio_offset = fp->f_offset;
394 	error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, cookies,
395 		    ncookies);
396 	FILE_LOCK(fp);
397 	fp->f_offset = auio.uio_offset;
398 	FILE_UNLOCK(fp);
399 	VOP_UNLOCK(vp, 0);
400 	if (error)
401 		return (error);
402 
403 	if (count == auio.uio_resid && vn_union_readdir_hook) {
404 		struct vnode *ovp = vp;
405 
406 		error = (*vn_union_readdir_hook)(&vp, fp, l);
407 		if (error)
408 			return (error);
409 		if (vp != ovp)
410 			goto unionread;
411 	}
412 
413 	if (count == auio.uio_resid && (vp->v_vflag & VV_ROOT) &&
414 	    (vp->v_mount->mnt_flag & MNT_UNION)) {
415 		struct vnode *tvp = vp;
416 		vp = vp->v_mount->mnt_vnodecovered;
417 		VREF(vp);
418 		FILE_LOCK(fp);
419 		fp->f_data = vp;
420 		fp->f_offset = 0;
421 		FILE_UNLOCK(fp);
422 		vrele(tvp);
423 		goto unionread;
424 	}
425 	*done = count - auio.uio_resid;
426 	return error;
427 }
428 
429 /*
430  * File table vnode read routine.
431  */
432 static int
433 vn_read(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
434     int flags)
435 {
436 	struct vnode *vp = (struct vnode *)fp->f_data;
437 	int count, error, ioflag, fflag;
438 
439 	ioflag = IO_ADV_ENCODE(fp->f_advice);
440 	fflag = fp->f_flag;
441 	if (fflag & FNONBLOCK)
442 		ioflag |= IO_NDELAY;
443 	if ((fflag & (FFSYNC | FRSYNC)) == (FFSYNC | FRSYNC))
444 		ioflag |= IO_SYNC;
445 	if (fflag & FALTIO)
446 		ioflag |= IO_ALTSEMANTICS;
447 	if (fflag & FDIRECT)
448 		ioflag |= IO_DIRECT;
449 	vn_lock(vp, LK_SHARED | LK_RETRY);
450 	uio->uio_offset = *offset;
451 	count = uio->uio_resid;
452 	error = VOP_READ(vp, uio, ioflag, cred);
453 	if (flags & FOF_UPDATE_OFFSET)
454 		*offset += count - uio->uio_resid;
455 	VOP_UNLOCK(vp, 0);
456 	return (error);
457 }
458 
459 /*
460  * File table vnode write routine.
461  */
462 static int
463 vn_write(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
464     int flags)
465 {
466 	struct vnode *vp = (struct vnode *)fp->f_data;
467 	int count, error, ioflag, fflag;
468 
469 	ioflag = IO_ADV_ENCODE(fp->f_advice) | IO_UNIT;
470 	fflag = fp->f_flag;
471 	if (vp->v_type == VREG && (fflag & O_APPEND))
472 		ioflag |= IO_APPEND;
473 	if (fflag & FNONBLOCK)
474 		ioflag |= IO_NDELAY;
475 	if (fflag & FFSYNC ||
476 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
477 		ioflag |= IO_SYNC;
478 	else if (fflag & FDSYNC)
479 		ioflag |= IO_DSYNC;
480 	if (fflag & FALTIO)
481 		ioflag |= IO_ALTSEMANTICS;
482 	if (fflag & FDIRECT)
483 		ioflag |= IO_DIRECT;
484 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
485 	uio->uio_offset = *offset;
486 	count = uio->uio_resid;
487 	error = VOP_WRITE(vp, uio, ioflag, cred);
488 	if (flags & FOF_UPDATE_OFFSET) {
489 		if (ioflag & IO_APPEND) {
490 			/*
491 			 * SUSv3 describes behaviour for count = 0 as following:
492 			 * "Before any action ... is taken, and if nbyte is zero
493 			 * and the file is a regular file, the write() function
494 			 * ... in the absence of errors ... shall return zero
495 			 * and have no other results."
496 			 */
497 			if (count)
498 				*offset = uio->uio_offset;
499 		} else
500 			*offset += count - uio->uio_resid;
501 	}
502 	VOP_UNLOCK(vp, 0);
503 	return (error);
504 }
505 
506 /*
507  * File table vnode stat routine.
508  */
509 static int
510 vn_statfile(file_t *fp, struct stat *sb)
511 {
512 	struct vnode *vp = (struct vnode *)fp->f_data;
513 
514 	return vn_stat(vp, sb);
515 }
516 
517 int
518 vn_stat(struct vnode *vp, struct stat *sb)
519 {
520 	struct vattr va;
521 	int error;
522 	mode_t mode;
523 
524 	error = VOP_GETATTR(vp, &va, kauth_cred_get());
525 	if (error)
526 		return (error);
527 	/*
528 	 * Copy from vattr table
529 	 */
530 	sb->st_dev = va.va_fsid;
531 	sb->st_ino = va.va_fileid;
532 	mode = va.va_mode;
533 	switch (vp->v_type) {
534 	case VREG:
535 		mode |= S_IFREG;
536 		break;
537 	case VDIR:
538 		mode |= S_IFDIR;
539 		break;
540 	case VBLK:
541 		mode |= S_IFBLK;
542 		break;
543 	case VCHR:
544 		mode |= S_IFCHR;
545 		break;
546 	case VLNK:
547 		mode |= S_IFLNK;
548 		break;
549 	case VSOCK:
550 		mode |= S_IFSOCK;
551 		break;
552 	case VFIFO:
553 		mode |= S_IFIFO;
554 		break;
555 	default:
556 		return (EBADF);
557 	};
558 	sb->st_mode = mode;
559 	sb->st_nlink = va.va_nlink;
560 	sb->st_uid = va.va_uid;
561 	sb->st_gid = va.va_gid;
562 	sb->st_rdev = va.va_rdev;
563 	sb->st_size = va.va_size;
564 	sb->st_atimespec = va.va_atime;
565 	sb->st_mtimespec = va.va_mtime;
566 	sb->st_ctimespec = va.va_ctime;
567 	sb->st_birthtimespec = va.va_birthtime;
568 	sb->st_blksize = va.va_blocksize;
569 	sb->st_flags = va.va_flags;
570 	sb->st_gen = 0;
571 	sb->st_blocks = va.va_bytes / S_BLKSIZE;
572 	return (0);
573 }
574 
575 /*
576  * File table vnode fcntl routine.
577  */
578 static int
579 vn_fcntl(file_t *fp, u_int com, void *data)
580 {
581 	struct vnode *vp = fp->f_data;
582 	int error;
583 
584 	error = VOP_FCNTL(vp, com, data, fp->f_flag, kauth_cred_get());
585 	return (error);
586 }
587 
588 /*
589  * File table vnode ioctl routine.
590  */
591 static int
592 vn_ioctl(file_t *fp, u_long com, void *data)
593 {
594 	struct vnode *vp = fp->f_data, *ovp;
595 	struct vattr vattr;
596 	int error;
597 
598 	switch (vp->v_type) {
599 
600 	case VREG:
601 	case VDIR:
602 		if (com == FIONREAD) {
603 			error = VOP_GETATTR(vp, &vattr,
604 			    kauth_cred_get());
605 			if (error)
606 				return (error);
607 			*(int *)data = vattr.va_size - fp->f_offset;
608 			return (0);
609 		}
610 		if ((com == FIONWRITE) || (com == FIONSPACE)) {
611 			/*
612 			 * Files don't have send queues, so there never
613 			 * are any bytes in them, nor is there any
614 			 * open space in them.
615 			 */
616 			*(int *)data = 0;
617 			return (0);
618 		}
619 		if (com == FIOGETBMAP) {
620 			daddr_t *block;
621 
622 			if (*(daddr_t *)data < 0)
623 				return (EINVAL);
624 			block = (daddr_t *)data;
625 			return (VOP_BMAP(vp, *block, NULL, block, NULL));
626 		}
627 		if (com == OFIOGETBMAP) {
628 			daddr_t ibn, obn;
629 
630 			if (*(int32_t *)data < 0)
631 				return (EINVAL);
632 			ibn = (daddr_t)*(int32_t *)data;
633 			error = VOP_BMAP(vp, ibn, NULL, &obn, NULL);
634 			*(int32_t *)data = (int32_t)obn;
635 			return error;
636 		}
637 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
638 			return (0);			/* XXX */
639 		/* fall into ... */
640 	case VFIFO:
641 	case VCHR:
642 	case VBLK:
643 		error = VOP_IOCTL(vp, com, data, fp->f_flag,
644 		    kauth_cred_get());
645 		if (error == 0 && com == TIOCSCTTY) {
646 			VREF(vp);
647 			mutex_enter(proc_lock);
648 			ovp = curproc->p_session->s_ttyvp;
649 			curproc->p_session->s_ttyvp = vp;
650 			mutex_exit(proc_lock);
651 			if (ovp != NULL)
652 				vrele(ovp);
653 		}
654 		return (error);
655 
656 	default:
657 		return (EPASSTHROUGH);
658 	}
659 }
660 
661 /*
662  * File table vnode poll routine.
663  */
664 static int
665 vn_poll(file_t *fp, int events)
666 {
667 
668 	return (VOP_POLL(fp->f_data, events));
669 }
670 
671 /*
672  * File table vnode kqfilter routine.
673  */
674 int
675 vn_kqfilter(file_t *fp, struct knote *kn)
676 {
677 
678 	return (VOP_KQFILTER(fp->f_data, kn));
679 }
680 
681 /*
682  * Check that the vnode is still valid, and if so
683  * acquire requested lock.
684  */
685 int
686 vn_lock(struct vnode *vp, int flags)
687 {
688 	int error;
689 
690 #if 0
691 	KASSERT(vp->v_usecount > 0 || (flags & LK_INTERLOCK) != 0
692 	    || (vp->v_iflag & VI_ONWORKLST) != 0);
693 #endif
694 	KASSERT((flags &
695 	    ~(LK_INTERLOCK|LK_SHARED|LK_EXCLUSIVE|LK_NOWAIT|LK_RETRY|
696 	    LK_CANRECURSE))
697 	    == 0);
698 
699 #ifdef DIAGNOSTIC
700 	if (wapbl_vphaswapbl(vp))
701 		WAPBL_JUNLOCK_ASSERT(wapbl_vptomp(vp));
702 #endif
703 
704 	do {
705 		/*
706 		 * XXX PR 37706 forced unmount of file systems is unsafe.
707 		 * Race between vclean() and this the remaining problem.
708 		 */
709 		if (vp->v_iflag & VI_XLOCK) {
710 			if ((flags & LK_INTERLOCK) == 0) {
711 				mutex_enter(&vp->v_interlock);
712 			}
713 			flags &= ~LK_INTERLOCK;
714 			if (flags & LK_NOWAIT) {
715 				mutex_exit(&vp->v_interlock);
716 				return EBUSY;
717 			}
718 			vwait(vp, VI_XLOCK);
719 			mutex_exit(&vp->v_interlock);
720 			error = ENOENT;
721 		} else {
722 			if ((flags & LK_INTERLOCK) != 0) {
723 				mutex_exit(&vp->v_interlock);
724 			}
725 			flags &= ~LK_INTERLOCK;
726 			error = VOP_LOCK(vp, (flags & ~LK_RETRY));
727 			if (error == 0 || error == EDEADLK || error == EBUSY)
728 				return (error);
729 		}
730 	} while (flags & LK_RETRY);
731 	return (error);
732 }
733 
734 /*
735  * File table vnode close routine.
736  */
737 static int
738 vn_closefile(file_t *fp)
739 {
740 
741 	return vn_close(fp->f_data, fp->f_flag, fp->f_cred);
742 }
743 
744 /*
745  * Enable LK_CANRECURSE on lock. Return prior status.
746  */
747 u_int
748 vn_setrecurse(struct vnode *vp)
749 {
750 	struct vnlock *lkp;
751 
752 	lkp = (vp->v_vnlock != NULL ? vp->v_vnlock : &vp->v_lock);
753 	atomic_inc_uint(&lkp->vl_canrecurse);
754 
755 	return 0;
756 }
757 
758 /*
759  * Called when done with locksetrecurse.
760  */
761 void
762 vn_restorerecurse(struct vnode *vp, u_int flags)
763 {
764 	struct vnlock *lkp;
765 
766 	lkp = (vp->v_vnlock != NULL ? vp->v_vnlock : &vp->v_lock);
767 	atomic_dec_uint(&lkp->vl_canrecurse);
768 }
769 
770 /*
771  * Simplified in-kernel wrapper calls for extended attribute access.
772  * Both calls pass in a NULL credential, authorizing a "kernel" access.
773  * Set IO_NODELOCKED in ioflg if the vnode is already locked.
774  */
775 int
776 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
777     const char *attrname, size_t *buflen, void *bf, struct lwp *l)
778 {
779 	struct uio auio;
780 	struct iovec aiov;
781 	int error;
782 
783 	aiov.iov_len = *buflen;
784 	aiov.iov_base = bf;
785 
786 	auio.uio_iov = &aiov;
787 	auio.uio_iovcnt = 1;
788 	auio.uio_rw = UIO_READ;
789 	auio.uio_offset = 0;
790 	auio.uio_resid = *buflen;
791 	UIO_SETUP_SYSSPACE(&auio);
792 
793 	if ((ioflg & IO_NODELOCKED) == 0)
794 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
795 
796 	error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL);
797 
798 	if ((ioflg & IO_NODELOCKED) == 0)
799 		VOP_UNLOCK(vp, 0);
800 
801 	if (error == 0)
802 		*buflen = *buflen - auio.uio_resid;
803 
804 	return (error);
805 }
806 
807 /*
808  * XXX Failure mode if partially written?
809  */
810 int
811 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
812     const char *attrname, size_t buflen, const void *bf, struct lwp *l)
813 {
814 	struct uio auio;
815 	struct iovec aiov;
816 	int error;
817 
818 	aiov.iov_len = buflen;
819 	aiov.iov_base = __UNCONST(bf);		/* XXXUNCONST kills const */
820 
821 	auio.uio_iov = &aiov;
822 	auio.uio_iovcnt = 1;
823 	auio.uio_rw = UIO_WRITE;
824 	auio.uio_offset = 0;
825 	auio.uio_resid = buflen;
826 	UIO_SETUP_SYSSPACE(&auio);
827 
828 	if ((ioflg & IO_NODELOCKED) == 0) {
829 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
830 	}
831 
832 	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL);
833 
834 	if ((ioflg & IO_NODELOCKED) == 0) {
835 		VOP_UNLOCK(vp, 0);
836 	}
837 
838 	return (error);
839 }
840 
841 int
842 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
843     const char *attrname, struct lwp *l)
844 {
845 	int error;
846 
847 	if ((ioflg & IO_NODELOCKED) == 0) {
848 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
849 	}
850 
851 	error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL);
852 	if (error == EOPNOTSUPP)
853 		error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, NULL);
854 
855 	if ((ioflg & IO_NODELOCKED) == 0) {
856 		VOP_UNLOCK(vp, 0);
857 	}
858 
859 	return (error);
860 }
861 
862 void
863 vn_ra_allocctx(struct vnode *vp)
864 {
865 	struct uvm_ractx *ra = NULL;
866 
867 	KASSERT(mutex_owned(&vp->v_interlock));
868 
869 	if (vp->v_type != VREG) {
870 		return;
871 	}
872 	if (vp->v_ractx != NULL) {
873 		return;
874 	}
875 	if (vp->v_ractx == NULL) {
876 		mutex_exit(&vp->v_interlock);
877 		ra = uvm_ra_allocctx();
878 		mutex_enter(&vp->v_interlock);
879 		if (ra != NULL && vp->v_ractx == NULL) {
880 			vp->v_ractx = ra;
881 			ra = NULL;
882 		}
883 	}
884 	if (ra != NULL) {
885 		uvm_ra_freectx(ra);
886 	}
887 }
888