xref: /netbsd-src/sys/kern/vfs_vnops.c (revision 122b5006ee1bd67145794b4cde92f4fe4781a5ec)
1 /*	$NetBSD: vfs_vnops.c,v 1.224 2021/10/20 03:08:18 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 2009 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright (c) 1982, 1986, 1989, 1993
34  *	The Regents of the University of California.  All rights reserved.
35  * (c) UNIX System Laboratories, Inc.
36  * All or some portions of this file are derived from material licensed
37  * to the University of California by American Telephone and Telegraph
38  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
39  * the permission of UNIX System Laboratories, Inc.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. Neither the name of the University nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  *	@(#)vfs_vnops.c	8.14 (Berkeley) 6/15/95
66  */
67 
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.224 2021/10/20 03:08:18 thorpej Exp $");
70 
71 #include "veriexec.h"
72 
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/kernel.h>
76 #include <sys/file.h>
77 #include <sys/stat.h>
78 #include <sys/buf.h>
79 #include <sys/proc.h>
80 #include <sys/mount.h>
81 #include <sys/namei.h>
82 #include <sys/vnode.h>
83 #include <sys/ioctl.h>
84 #include <sys/tty.h>
85 #include <sys/poll.h>
86 #include <sys/kauth.h>
87 #include <sys/syslog.h>
88 #include <sys/fstrans.h>
89 #include <sys/atomic.h>
90 #include <sys/filedesc.h>
91 #include <sys/wapbl.h>
92 #include <sys/mman.h>
93 
94 #include <miscfs/specfs/specdev.h>
95 #include <miscfs/fifofs/fifo.h>
96 
97 #include <uvm/uvm_extern.h>
98 #include <uvm/uvm_readahead.h>
99 #include <uvm/uvm_device.h>
100 
101 #ifdef UNION
102 #include <fs/union/union.h>
103 #endif
104 
105 #ifndef COMPAT_ZERODEV
106 #define COMPAT_ZERODEV(dev)	(0)
107 #endif
108 
109 int (*vn_union_readdir_hook) (struct vnode **, struct file *, struct lwp *);
110 
111 #include <sys/verified_exec.h>
112 
113 static int vn_read(file_t *fp, off_t *offset, struct uio *uio,
114 	    kauth_cred_t cred, int flags);
115 static int vn_write(file_t *fp, off_t *offset, struct uio *uio,
116 	    kauth_cred_t cred, int flags);
117 static int vn_closefile(file_t *fp);
118 static int vn_poll(file_t *fp, int events);
119 static int vn_fcntl(file_t *fp, u_int com, void *data);
120 static int vn_statfile(file_t *fp, struct stat *sb);
121 static int vn_ioctl(file_t *fp, u_long com, void *data);
122 static int vn_mmap(struct file *, off_t *, size_t, int, int *, int *,
123 		   struct uvm_object **, int *);
124 static int vn_seek(struct file *, off_t, int, off_t *, int);
125 
126 const struct fileops vnops = {
127 	.fo_name = "vn",
128 	.fo_read = vn_read,
129 	.fo_write = vn_write,
130 	.fo_ioctl = vn_ioctl,
131 	.fo_fcntl = vn_fcntl,
132 	.fo_poll = vn_poll,
133 	.fo_stat = vn_statfile,
134 	.fo_close = vn_closefile,
135 	.fo_kqfilter = vn_kqfilter,
136 	.fo_restart = fnullop_restart,
137 	.fo_mmap = vn_mmap,
138 	.fo_seek = vn_seek,
139 };
140 
141 /*
142  * Common code for vnode open operations.
143  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
144  *
145  * at_dvp is the directory for openat(), if any.
146  * pb is the path.
147  * nmode is additional namei flags, restricted to TRYEMULROOT and NOCHROOT.
148  * fmode is the open flags, converted from O_* to F*
149  * cmode is the creation file permissions.
150  *
151  * XXX shouldn't cmode be mode_t?
152  *
153  * On success produces either a vnode in *ret_vp, or if that is NULL,
154  * a file descriptor number in ret_fd.
155  *
156  * The caller may pass NULL for ret_fd (and ret_domove), in which case
157  * EOPNOTSUPP will be produced in the cases that would otherwise return
158  * a file descriptor.
159  *
160  * Note that callers that want no-follow behavior should pass
161  * O_NOFOLLOW in fmode. Neither FOLLOW nor NOFOLLOW in nmode is
162  * honored.
163  */
164 int
165 vn_open(struct vnode *at_dvp, struct pathbuf *pb,
166 	int nmode, int fmode, int cmode,
167 	struct vnode **ret_vp, bool *ret_domove, int *ret_fd)
168 {
169 	struct nameidata nd;
170 	struct vnode *vp = NULL;
171 	struct lwp *l = curlwp;
172 	kauth_cred_t cred = l->l_cred;
173 	struct vattr va;
174 	int error;
175 	const char *pathstring;
176 
177 	KASSERT((nmode & (TRYEMULROOT | NOCHROOT)) == nmode);
178 
179 	KASSERT(ret_vp != NULL);
180 	KASSERT((ret_domove == NULL) == (ret_fd == NULL));
181 
182 	if ((fmode & (O_CREAT | O_DIRECTORY)) == (O_CREAT | O_DIRECTORY))
183 		return EINVAL;
184 
185 	NDINIT(&nd, LOOKUP, nmode, pb);
186 	if (at_dvp != NULL)
187 		NDAT(&nd, at_dvp);
188 
189 	nd.ni_cnd.cn_flags &= TRYEMULROOT | NOCHROOT;
190 
191 	if (fmode & O_CREAT) {
192 		nd.ni_cnd.cn_nameiop = CREATE;
193 		nd.ni_cnd.cn_flags |= LOCKPARENT | LOCKLEAF;
194 		if ((fmode & O_EXCL) == 0 &&
195 		    ((fmode & O_NOFOLLOW) == 0))
196 			nd.ni_cnd.cn_flags |= FOLLOW;
197 		if ((fmode & O_EXCL) == 0)
198 			nd.ni_cnd.cn_flags |= NONEXCLHACK;
199 	} else {
200 		nd.ni_cnd.cn_nameiop = LOOKUP;
201 		nd.ni_cnd.cn_flags |= LOCKLEAF;
202 		if ((fmode & O_NOFOLLOW) == 0)
203 			nd.ni_cnd.cn_flags |= FOLLOW;
204 	}
205 
206 	pathstring = pathbuf_stringcopy_get(nd.ni_pathbuf);
207 	if (pathstring == NULL) {
208 		return ENOMEM;
209 	}
210 
211 	/*
212 	 * When this "interface" was exposed to do_open() it used
213 	 * to initialize l_dupfd to -newfd-1 (thus passing in the
214 	 * new file handle number to use)... but nothing in the
215 	 * kernel uses that value. So just send 0.
216 	 */
217 	l->l_dupfd = 0;
218 
219 	error = namei(&nd);
220 	if (error)
221 		goto out;
222 
223 	vp = nd.ni_vp;
224 
225 #if NVERIEXEC > 0
226 	error = veriexec_openchk(l, nd.ni_vp, pathstring, fmode);
227 	if (error) {
228 		/* We have to release the locks ourselves */
229 		/*
230 		 * 20210604 dholland passing NONEXCLHACK means we can
231 		 * get ni_dvp == NULL back if ni_vp exists, and we should
232 		 * treat that like the non-O_CREAT case.
233 		 */
234 		if ((fmode & O_CREAT) != 0 && nd.ni_dvp != NULL) {
235 			if (vp == NULL) {
236 				vput(nd.ni_dvp);
237 			} else {
238 				VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
239 				if (nd.ni_dvp == nd.ni_vp)
240 					vrele(nd.ni_dvp);
241 				else
242 					vput(nd.ni_dvp);
243 				nd.ni_dvp = NULL;
244 				vput(vp);
245 			}
246 		} else {
247 			vput(vp);
248 		}
249 		goto out;
250 	}
251 #endif /* NVERIEXEC > 0 */
252 
253 	/*
254 	 * 20210604 dholland ditto
255 	 */
256 	if ((fmode & O_CREAT) != 0 && nd.ni_dvp != NULL) {
257 		if (nd.ni_vp == NULL) {
258 			vattr_null(&va);
259 			va.va_type = VREG;
260 			va.va_mode = cmode;
261 			if (fmode & O_EXCL)
262 				 va.va_vaflags |= VA_EXCLUSIVE;
263 			error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp,
264 					   &nd.ni_cnd, &va);
265 			if (error) {
266 				vput(nd.ni_dvp);
267 				goto out;
268 			}
269 			fmode &= ~O_TRUNC;
270 			vp = nd.ni_vp;
271 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
272 			vput(nd.ni_dvp);
273 		} else {
274 			VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
275 			if (nd.ni_dvp == nd.ni_vp)
276 				vrele(nd.ni_dvp);
277 			else
278 				vput(nd.ni_dvp);
279 			nd.ni_dvp = NULL;
280 			vp = nd.ni_vp;
281 			if (fmode & O_EXCL) {
282 				error = EEXIST;
283 				goto bad;
284 			}
285 			fmode &= ~O_CREAT;
286 		}
287 	} else if ((fmode & O_CREAT) != 0) {
288 		/*
289 		 * 20210606 dholland passing NONEXCLHACK means this
290 		 * case exists; it is the same as the following one
291 		 * but also needs to do things in the second (exists)
292 		 * half of the following block. (Besides handle
293 		 * ni_dvp, anyway.)
294 		 */
295 		vp = nd.ni_vp;
296 		KASSERT((fmode & O_EXCL) == 0);
297 		fmode &= ~O_CREAT;
298 	} else {
299 		vp = nd.ni_vp;
300 	}
301 	if (vp->v_type == VSOCK) {
302 		error = EOPNOTSUPP;
303 		goto bad;
304 	}
305 	if (nd.ni_vp->v_type == VLNK) {
306 		error = EFTYPE;
307 		goto bad;
308 	}
309 
310 	if ((fmode & O_CREAT) == 0) {
311 		error = vn_openchk(vp, cred, fmode);
312 		if (error != 0)
313 			goto bad;
314 	}
315 
316 	if (fmode & O_TRUNC) {
317 		vattr_null(&va);
318 		va.va_size = 0;
319 		error = VOP_SETATTR(vp, &va, cred);
320 		if (error != 0)
321 			goto bad;
322 	}
323 	if ((error = VOP_OPEN(vp, fmode, cred)) != 0)
324 		goto bad;
325 	if (fmode & FWRITE) {
326 		mutex_enter(vp->v_interlock);
327 		vp->v_writecount++;
328 		mutex_exit(vp->v_interlock);
329 	}
330 
331 bad:
332 	if (error)
333 		vput(vp);
334 out:
335 	pathbuf_stringcopy_put(nd.ni_pathbuf, pathstring);
336 
337 	switch (error) {
338 	case EDUPFD:
339 	case EMOVEFD:
340 		/* if the caller isn't prepared to handle fds, fail for them */
341 		if (ret_fd == NULL) {
342 			error = EOPNOTSUPP;
343 			break;
344 		}
345 		*ret_vp = NULL;
346 		*ret_domove = error == EMOVEFD;
347 		*ret_fd = l->l_dupfd;
348 		error = 0;
349 		break;
350 	case 0:
351 		*ret_vp = vp;
352 		break;
353 	}
354 	l->l_dupfd = 0;
355 	return error;
356 }
357 
358 /*
359  * Check for write permissions on the specified vnode.
360  * Prototype text segments cannot be written.
361  */
362 int
363 vn_writechk(struct vnode *vp)
364 {
365 
366 	/*
367 	 * If the vnode is in use as a process's text,
368 	 * we can't allow writing.
369 	 */
370 	if (vp->v_iflag & VI_TEXT)
371 		return (ETXTBSY);
372 	return (0);
373 }
374 
375 int
376 vn_openchk(struct vnode *vp, kauth_cred_t cred, int fflags)
377 {
378 	int permbits = 0;
379 	int error;
380 
381 	if (vp->v_type == VNON || vp->v_type == VBAD)
382 		return ENXIO;
383 
384 	if ((fflags & O_DIRECTORY) != 0 && vp->v_type != VDIR)
385 		return ENOTDIR;
386 
387 	if ((fflags & O_REGULAR) != 0 && vp->v_type != VREG)
388 		return EFTYPE;
389 
390 	if ((fflags & FREAD) != 0) {
391 		permbits = VREAD;
392 	}
393 	if ((fflags & FEXEC) != 0) {
394 		permbits |= VEXEC;
395 	}
396 	if ((fflags & (FWRITE | O_TRUNC)) != 0) {
397 		permbits |= VWRITE;
398 		if (vp->v_type == VDIR) {
399 			error = EISDIR;
400 			goto bad;
401 		}
402 		error = vn_writechk(vp);
403 		if (error != 0)
404 			goto bad;
405 	}
406 	error = VOP_ACCESS(vp, permbits, cred);
407 bad:
408 	return error;
409 }
410 
411 /*
412  * Mark a vnode as having executable mappings.
413  */
414 void
415 vn_markexec(struct vnode *vp)
416 {
417 
418 	if ((vp->v_iflag & VI_EXECMAP) != 0) {
419 		/* Safe unlocked, as long as caller holds a reference. */
420 		return;
421 	}
422 
423 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
424 	mutex_enter(vp->v_interlock);
425 	if ((vp->v_iflag & VI_EXECMAP) == 0) {
426 		cpu_count(CPU_COUNT_EXECPAGES, vp->v_uobj.uo_npages);
427 		vp->v_iflag |= VI_EXECMAP;
428 	}
429 	mutex_exit(vp->v_interlock);
430 	rw_exit(vp->v_uobj.vmobjlock);
431 }
432 
433 /*
434  * Mark a vnode as being the text of a process.
435  * Fail if the vnode is currently writable.
436  */
437 int
438 vn_marktext(struct vnode *vp)
439 {
440 
441 	if ((vp->v_iflag & (VI_TEXT|VI_EXECMAP)) == (VI_TEXT|VI_EXECMAP)) {
442 		/* Safe unlocked, as long as caller holds a reference. */
443 		return (0);
444 	}
445 
446 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
447 	mutex_enter(vp->v_interlock);
448 	if (vp->v_writecount != 0) {
449 		KASSERT((vp->v_iflag & VI_TEXT) == 0);
450 		mutex_exit(vp->v_interlock);
451 		rw_exit(vp->v_uobj.vmobjlock);
452 		return (ETXTBSY);
453 	}
454 	if ((vp->v_iflag & VI_EXECMAP) == 0) {
455 		cpu_count(CPU_COUNT_EXECPAGES, vp->v_uobj.uo_npages);
456 	}
457 	vp->v_iflag |= (VI_TEXT | VI_EXECMAP);
458 	mutex_exit(vp->v_interlock);
459 	rw_exit(vp->v_uobj.vmobjlock);
460 	return (0);
461 }
462 
463 /*
464  * Vnode close call
465  *
466  * Note: takes an unlocked vnode, while VOP_CLOSE takes a locked node.
467  */
468 int
469 vn_close(struct vnode *vp, int flags, kauth_cred_t cred)
470 {
471 	int error;
472 
473 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
474 	if (flags & FWRITE) {
475 		mutex_enter(vp->v_interlock);
476 		KASSERT(vp->v_writecount > 0);
477 		vp->v_writecount--;
478 		mutex_exit(vp->v_interlock);
479 	}
480 	error = VOP_CLOSE(vp, flags, cred);
481 	vput(vp);
482 	return (error);
483 }
484 
485 static int
486 enforce_rlimit_fsize(struct vnode *vp, struct uio *uio, int ioflag)
487 {
488 	struct lwp *l = curlwp;
489 	off_t testoff;
490 
491 	if (uio->uio_rw != UIO_WRITE || vp->v_type != VREG)
492 		return 0;
493 
494 	KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
495 	if (ioflag & IO_APPEND)
496 		testoff = vp->v_size;
497 	else
498 		testoff = uio->uio_offset;
499 
500 	if (testoff + uio->uio_resid >
501 	    l->l_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
502 		mutex_enter(&proc_lock);
503 		psignal(l->l_proc, SIGXFSZ);
504 		mutex_exit(&proc_lock);
505 		return EFBIG;
506 	}
507 
508 	return 0;
509 }
510 
511 /*
512  * Package up an I/O request on a vnode into a uio and do it.
513  */
514 int
515 vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset,
516     enum uio_seg segflg, int ioflg, kauth_cred_t cred, size_t *aresid,
517     struct lwp *l)
518 {
519 	struct uio auio;
520 	struct iovec aiov;
521 	int error;
522 
523 	if ((ioflg & IO_NODELOCKED) == 0) {
524 		if (rw == UIO_READ) {
525 			vn_lock(vp, LK_SHARED | LK_RETRY);
526 		} else /* UIO_WRITE */ {
527 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
528 		}
529 	}
530 	auio.uio_iov = &aiov;
531 	auio.uio_iovcnt = 1;
532 	aiov.iov_base = base;
533 	aiov.iov_len = len;
534 	auio.uio_resid = len;
535 	auio.uio_offset = offset;
536 	auio.uio_rw = rw;
537 	if (segflg == UIO_SYSSPACE) {
538 		UIO_SETUP_SYSSPACE(&auio);
539 	} else {
540 		auio.uio_vmspace = l->l_proc->p_vmspace;
541 	}
542 
543 	if ((error = enforce_rlimit_fsize(vp, &auio, ioflg)) != 0)
544 		goto out;
545 
546 	if (rw == UIO_READ) {
547 		error = VOP_READ(vp, &auio, ioflg, cred);
548 	} else {
549 		error = VOP_WRITE(vp, &auio, ioflg, cred);
550 	}
551 
552 	if (aresid)
553 		*aresid = auio.uio_resid;
554 	else
555 		if (auio.uio_resid && error == 0)
556 			error = EIO;
557 
558  out:
559 	if ((ioflg & IO_NODELOCKED) == 0) {
560 		VOP_UNLOCK(vp);
561 	}
562 	return (error);
563 }
564 
565 int
566 vn_readdir(file_t *fp, char *bf, int segflg, u_int count, int *done,
567     struct lwp *l, off_t **cookies, int *ncookies)
568 {
569 	struct vnode *vp = fp->f_vnode;
570 	struct iovec aiov;
571 	struct uio auio;
572 	int error, eofflag;
573 
574 	/* Limit the size on any kernel buffers used by VOP_READDIR */
575 	count = uimin(MAXBSIZE, count);
576 
577 unionread:
578 	if (vp->v_type != VDIR)
579 		return (EINVAL);
580 	aiov.iov_base = bf;
581 	aiov.iov_len = count;
582 	auio.uio_iov = &aiov;
583 	auio.uio_iovcnt = 1;
584 	auio.uio_rw = UIO_READ;
585 	if (segflg == UIO_SYSSPACE) {
586 		UIO_SETUP_SYSSPACE(&auio);
587 	} else {
588 		KASSERT(l == curlwp);
589 		auio.uio_vmspace = l->l_proc->p_vmspace;
590 	}
591 	auio.uio_resid = count;
592 	vn_lock(vp, LK_SHARED | LK_RETRY);
593 	auio.uio_offset = fp->f_offset;
594 	error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, cookies,
595 		    ncookies);
596 	mutex_enter(&fp->f_lock);
597 	fp->f_offset = auio.uio_offset;
598 	mutex_exit(&fp->f_lock);
599 	VOP_UNLOCK(vp);
600 	if (error)
601 		return (error);
602 
603 	if (count == auio.uio_resid && vn_union_readdir_hook) {
604 		struct vnode *ovp = vp;
605 
606 		error = (*vn_union_readdir_hook)(&vp, fp, l);
607 		if (error)
608 			return (error);
609 		if (vp != ovp)
610 			goto unionread;
611 	}
612 
613 	if (count == auio.uio_resid && (vp->v_vflag & VV_ROOT) &&
614 	    (vp->v_mount->mnt_flag & MNT_UNION)) {
615 		struct vnode *tvp = vp;
616 		vp = vp->v_mount->mnt_vnodecovered;
617 		vref(vp);
618 		mutex_enter(&fp->f_lock);
619 		fp->f_vnode = vp;
620 		fp->f_offset = 0;
621 		mutex_exit(&fp->f_lock);
622 		vrele(tvp);
623 		goto unionread;
624 	}
625 	*done = count - auio.uio_resid;
626 	return error;
627 }
628 
629 /*
630  * File table vnode read routine.
631  */
632 static int
633 vn_read(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
634     int flags)
635 {
636 	struct vnode *vp = fp->f_vnode;
637 	int error, ioflag, fflag;
638 	size_t count;
639 
640 	ioflag = IO_ADV_ENCODE(fp->f_advice);
641 	fflag = fp->f_flag;
642 	if (fflag & FNONBLOCK)
643 		ioflag |= IO_NDELAY;
644 	if ((fflag & (FFSYNC | FRSYNC)) == (FFSYNC | FRSYNC))
645 		ioflag |= IO_SYNC;
646 	if (fflag & FALTIO)
647 		ioflag |= IO_ALTSEMANTICS;
648 	if (fflag & FDIRECT)
649 		ioflag |= IO_DIRECT;
650 	vn_lock(vp, LK_SHARED | LK_RETRY);
651 	uio->uio_offset = *offset;
652 	count = uio->uio_resid;
653 	error = VOP_READ(vp, uio, ioflag, cred);
654 	if (flags & FOF_UPDATE_OFFSET)
655 		*offset += count - uio->uio_resid;
656 	VOP_UNLOCK(vp);
657 	return (error);
658 }
659 
660 /*
661  * File table vnode write routine.
662  */
663 static int
664 vn_write(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
665     int flags)
666 {
667 	struct vnode *vp = fp->f_vnode;
668 	int error, ioflag, fflag;
669 	size_t count;
670 
671 	ioflag = IO_ADV_ENCODE(fp->f_advice) | IO_UNIT;
672 	fflag = fp->f_flag;
673 	if (vp->v_type == VREG && (fflag & O_APPEND))
674 		ioflag |= IO_APPEND;
675 	if (fflag & FNONBLOCK)
676 		ioflag |= IO_NDELAY;
677 	if (fflag & FFSYNC ||
678 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
679 		ioflag |= IO_SYNC;
680 	else if (fflag & FDSYNC)
681 		ioflag |= IO_DSYNC;
682 	if (fflag & FALTIO)
683 		ioflag |= IO_ALTSEMANTICS;
684 	if (fflag & FDIRECT)
685 		ioflag |= IO_DIRECT;
686 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
687 	uio->uio_offset = *offset;
688 	count = uio->uio_resid;
689 
690 	if ((error = enforce_rlimit_fsize(vp, uio, ioflag)) != 0)
691 		goto out;
692 
693 	error = VOP_WRITE(vp, uio, ioflag, cred);
694 
695 	if (flags & FOF_UPDATE_OFFSET) {
696 		if (ioflag & IO_APPEND) {
697 			/*
698 			 * SUSv3 describes behaviour for count = 0 as following:
699 			 * "Before any action ... is taken, and if nbyte is zero
700 			 * and the file is a regular file, the write() function
701 			 * ... in the absence of errors ... shall return zero
702 			 * and have no other results."
703 			 */
704 			if (count)
705 				*offset = uio->uio_offset;
706 		} else
707 			*offset += count - uio->uio_resid;
708 	}
709 
710  out:
711 	VOP_UNLOCK(vp);
712 	return (error);
713 }
714 
715 /*
716  * File table vnode stat routine.
717  */
718 static int
719 vn_statfile(file_t *fp, struct stat *sb)
720 {
721 	struct vnode *vp = fp->f_vnode;
722 	int error;
723 
724 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
725 	error = vn_stat(vp, sb);
726 	VOP_UNLOCK(vp);
727 	return error;
728 }
729 
730 int
731 vn_stat(struct vnode *vp, struct stat *sb)
732 {
733 	struct vattr va;
734 	int error;
735 	mode_t mode;
736 
737 	memset(&va, 0, sizeof(va));
738 	error = VOP_GETATTR(vp, &va, kauth_cred_get());
739 	if (error)
740 		return (error);
741 	/*
742 	 * Copy from vattr table
743 	 */
744 	memset(sb, 0, sizeof(*sb));
745 	sb->st_dev = va.va_fsid;
746 	sb->st_ino = va.va_fileid;
747 	mode = va.va_mode;
748 	switch (vp->v_type) {
749 	case VREG:
750 		mode |= S_IFREG;
751 		break;
752 	case VDIR:
753 		mode |= S_IFDIR;
754 		break;
755 	case VBLK:
756 		mode |= S_IFBLK;
757 		break;
758 	case VCHR:
759 		mode |= S_IFCHR;
760 		break;
761 	case VLNK:
762 		mode |= S_IFLNK;
763 		break;
764 	case VSOCK:
765 		mode |= S_IFSOCK;
766 		break;
767 	case VFIFO:
768 		mode |= S_IFIFO;
769 		break;
770 	default:
771 		return (EBADF);
772 	}
773 	sb->st_mode = mode;
774 	sb->st_nlink = va.va_nlink;
775 	sb->st_uid = va.va_uid;
776 	sb->st_gid = va.va_gid;
777 	sb->st_rdev = va.va_rdev;
778 	sb->st_size = va.va_size;
779 	sb->st_atimespec = va.va_atime;
780 	sb->st_mtimespec = va.va_mtime;
781 	sb->st_ctimespec = va.va_ctime;
782 	sb->st_birthtimespec = va.va_birthtime;
783 	sb->st_blksize = va.va_blocksize;
784 	sb->st_flags = va.va_flags;
785 	sb->st_gen = 0;
786 	sb->st_blocks = va.va_bytes / S_BLKSIZE;
787 	return (0);
788 }
789 
790 /*
791  * File table vnode fcntl routine.
792  */
793 static int
794 vn_fcntl(file_t *fp, u_int com, void *data)
795 {
796 	struct vnode *vp = fp->f_vnode;
797 	int error;
798 
799 	error = VOP_FCNTL(vp, com, data, fp->f_flag, kauth_cred_get());
800 	return (error);
801 }
802 
803 /*
804  * File table vnode ioctl routine.
805  */
806 static int
807 vn_ioctl(file_t *fp, u_long com, void *data)
808 {
809 	struct vnode *vp = fp->f_vnode, *ovp;
810 	struct vattr vattr;
811 	int error;
812 
813 	switch (vp->v_type) {
814 
815 	case VREG:
816 	case VDIR:
817 		if (com == FIONREAD) {
818 			vn_lock(vp, LK_SHARED | LK_RETRY);
819 			error = VOP_GETATTR(vp, &vattr, kauth_cred_get());
820 			if (error == 0)
821 				*(int *)data = vattr.va_size - fp->f_offset;
822 			VOP_UNLOCK(vp);
823 			if (error)
824 				return (error);
825 			return (0);
826 		}
827 		if ((com == FIONWRITE) || (com == FIONSPACE)) {
828 			/*
829 			 * Files don't have send queues, so there never
830 			 * are any bytes in them, nor is there any
831 			 * open space in them.
832 			 */
833 			*(int *)data = 0;
834 			return (0);
835 		}
836 		if (com == FIOGETBMAP) {
837 			daddr_t *block;
838 
839 			if (*(daddr_t *)data < 0)
840 				return (EINVAL);
841 			block = (daddr_t *)data;
842 			vn_lock(vp, LK_SHARED | LK_RETRY);
843 			error = VOP_BMAP(vp, *block, NULL, block, NULL);
844 			VOP_UNLOCK(vp);
845 			return error;
846 		}
847 		if (com == OFIOGETBMAP) {
848 			daddr_t ibn, obn;
849 
850 			if (*(int32_t *)data < 0)
851 				return (EINVAL);
852 			ibn = (daddr_t)*(int32_t *)data;
853 			vn_lock(vp, LK_SHARED | LK_RETRY);
854 			error = VOP_BMAP(vp, ibn, NULL, &obn, NULL);
855 			VOP_UNLOCK(vp);
856 			*(int32_t *)data = (int32_t)obn;
857 			return error;
858 		}
859 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
860 			return (0);			/* XXX */
861 		/* FALLTHROUGH */
862 	case VFIFO:
863 	case VCHR:
864 	case VBLK:
865 		error = VOP_IOCTL(vp, com, data, fp->f_flag,
866 		    kauth_cred_get());
867 		if (error == 0 && com == TIOCSCTTY) {
868 			vref(vp);
869 			mutex_enter(&proc_lock);
870 			ovp = curproc->p_session->s_ttyvp;
871 			curproc->p_session->s_ttyvp = vp;
872 			mutex_exit(&proc_lock);
873 			if (ovp != NULL)
874 				vrele(ovp);
875 		}
876 		return (error);
877 
878 	default:
879 		return (EPASSTHROUGH);
880 	}
881 }
882 
883 /*
884  * File table vnode poll routine.
885  */
886 static int
887 vn_poll(file_t *fp, int events)
888 {
889 
890 	return (VOP_POLL(fp->f_vnode, events));
891 }
892 
893 /*
894  * File table vnode kqfilter routine.
895  */
896 int
897 vn_kqfilter(file_t *fp, struct knote *kn)
898 {
899 
900 	return (VOP_KQFILTER(fp->f_vnode, kn));
901 }
902 
903 static int
904 vn_mmap(struct file *fp, off_t *offp, size_t size, int prot, int *flagsp,
905 	int *advicep, struct uvm_object **uobjp, int *maxprotp)
906 {
907 	struct uvm_object *uobj;
908 	struct vnode *vp;
909 	struct vattr va;
910 	struct lwp *l;
911 	vm_prot_t maxprot;
912 	off_t off;
913 	int error, flags;
914 	bool needwritemap;
915 
916 	l = curlwp;
917 
918 	off = *offp;
919 	flags = *flagsp;
920 	maxprot = VM_PROT_EXECUTE;
921 
922 	vp = fp->f_vnode;
923 	if (vp->v_type != VREG && vp->v_type != VCHR &&
924 	    vp->v_type != VBLK) {
925 		/* only REG/CHR/BLK support mmap */
926 		return ENODEV;
927 	}
928 	if (vp->v_type != VCHR && off < 0) {
929 		return EINVAL;
930 	}
931 	if (vp->v_type != VCHR && (off_t)(off + size) < off) {
932 		/* no offset wrapping */
933 		return EOVERFLOW;
934 	}
935 
936 	/* special case: catch SunOS style /dev/zero */
937 	if (vp->v_type == VCHR &&
938 	    (vp->v_rdev == zerodev || COMPAT_ZERODEV(vp->v_rdev))) {
939 		*uobjp = NULL;
940 		*maxprotp = VM_PROT_ALL;
941 		return 0;
942 	}
943 
944 	/*
945 	 * Old programs may not select a specific sharing type, so
946 	 * default to an appropriate one.
947 	 *
948 	 * XXX: how does MAP_ANON fit in the picture?
949 	 */
950 	if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) {
951 #if defined(DEBUG)
952 		struct proc *p = l->l_proc;
953 		printf("WARNING: defaulted mmap() share type to "
954 		       "%s (pid %d command %s)\n", vp->v_type == VCHR ?
955 		       "MAP_SHARED" : "MAP_PRIVATE", p->p_pid,
956 		       p->p_comm);
957 #endif
958 		if (vp->v_type == VCHR)
959 			flags |= MAP_SHARED;	/* for a device */
960 		else
961 			flags |= MAP_PRIVATE;	/* for a file */
962 	}
963 
964 	/*
965 	 * MAP_PRIVATE device mappings don't make sense (and aren't
966 	 * supported anyway).  However, some programs rely on this,
967 	 * so just change it to MAP_SHARED.
968 	 */
969 	if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
970 		flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
971 	}
972 
973 	/*
974 	 * now check protection
975 	 */
976 
977 	/* check read access */
978 	if (fp->f_flag & FREAD)
979 		maxprot |= VM_PROT_READ;
980 	else if (prot & PROT_READ) {
981 		return EACCES;
982 	}
983 
984 	/* check write access, shared case first */
985 	if (flags & MAP_SHARED) {
986 		/*
987 		 * if the file is writable, only add PROT_WRITE to
988 		 * maxprot if the file is not immutable, append-only.
989 		 * otherwise, if we have asked for PROT_WRITE, return
990 		 * EPERM.
991 		 */
992 		if (fp->f_flag & FWRITE) {
993 			vn_lock(vp, LK_SHARED | LK_RETRY);
994 			error = VOP_GETATTR(vp, &va, l->l_cred);
995 			VOP_UNLOCK(vp);
996 			if (error) {
997 				return error;
998 			}
999 			if ((va.va_flags &
1000 			     (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0)
1001 				maxprot |= VM_PROT_WRITE;
1002 			else if (prot & PROT_WRITE) {
1003 				return EPERM;
1004 			}
1005 		} else if (prot & PROT_WRITE) {
1006 			return EACCES;
1007 		}
1008 	} else {
1009 		/* MAP_PRIVATE mappings can always write to */
1010 		maxprot |= VM_PROT_WRITE;
1011 	}
1012 
1013 	/*
1014 	 * Don't allow mmap for EXEC if the file system
1015 	 * is mounted NOEXEC.
1016 	 */
1017 	if ((prot & PROT_EXEC) != 0 &&
1018 	    (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
1019 		return EACCES;
1020 	}
1021 
1022 	if (vp->v_type != VCHR) {
1023 		error = VOP_MMAP(vp, prot, curlwp->l_cred);
1024 		if (error) {
1025 			return error;
1026 		}
1027 		vref(vp);
1028 		uobj = &vp->v_uobj;
1029 
1030 		/*
1031 		 * If the vnode is being mapped with PROT_EXEC,
1032 		 * then mark it as text.
1033 		 */
1034 		if (prot & PROT_EXEC) {
1035 			vn_markexec(vp);
1036 		}
1037 	} else {
1038 		int i = maxprot;
1039 
1040 		/*
1041 		 * XXX Some devices don't like to be mapped with
1042 		 * XXX PROT_EXEC or PROT_WRITE, but we don't really
1043 		 * XXX have a better way of handling this, right now
1044 		 */
1045 		do {
1046 			uobj = udv_attach(vp->v_rdev,
1047 					  (flags & MAP_SHARED) ? i :
1048 					  (i & ~VM_PROT_WRITE), off, size);
1049 			i--;
1050 		} while ((uobj == NULL) && (i > 0));
1051 		if (uobj == NULL) {
1052 			return EINVAL;
1053 		}
1054 		*advicep = UVM_ADV_RANDOM;
1055 	}
1056 
1057 	/*
1058 	 * Set vnode flags to indicate the new kinds of mapping.
1059 	 * We take the vnode lock in exclusive mode here to serialize
1060 	 * with direct I/O.
1061 	 *
1062 	 * Safe to check for these flag values without a lock, as
1063 	 * long as a reference to the vnode is held.
1064 	 */
1065 	needwritemap = (vp->v_iflag & VI_WRMAP) == 0 &&
1066 		(flags & MAP_SHARED) != 0 &&
1067 		(maxprot & VM_PROT_WRITE) != 0;
1068 	if ((vp->v_vflag & VV_MAPPED) == 0 || needwritemap) {
1069 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1070 		vp->v_vflag |= VV_MAPPED;
1071 		if (needwritemap) {
1072 			rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
1073 			mutex_enter(vp->v_interlock);
1074 			vp->v_iflag |= VI_WRMAP;
1075 			mutex_exit(vp->v_interlock);
1076 			rw_exit(vp->v_uobj.vmobjlock);
1077 		}
1078 		VOP_UNLOCK(vp);
1079 	}
1080 
1081 #if NVERIEXEC > 0
1082 
1083 	/*
1084 	 * Check if the file can be executed indirectly.
1085 	 *
1086 	 * XXX: This gives false warnings about "Incorrect access type"
1087 	 * XXX: if the mapping is not executable. Harmless, but will be
1088 	 * XXX: fixed as part of other changes.
1089 	 */
1090 	if (veriexec_verify(l, vp, "(mmap)", VERIEXEC_INDIRECT,
1091 			    NULL)) {
1092 
1093 		/*
1094 		 * Don't allow executable mappings if we can't
1095 		 * indirectly execute the file.
1096 		 */
1097 		if (prot & VM_PROT_EXECUTE) {
1098 			return EPERM;
1099 		}
1100 
1101 		/*
1102 		 * Strip the executable bit from 'maxprot' to make sure
1103 		 * it can't be made executable later.
1104 		 */
1105 		maxprot &= ~VM_PROT_EXECUTE;
1106 	}
1107 #endif /* NVERIEXEC > 0 */
1108 
1109 	*uobjp = uobj;
1110 	*maxprotp = maxprot;
1111 	*flagsp = flags;
1112 
1113 	return 0;
1114 }
1115 
1116 static int
1117 vn_seek(struct file *fp, off_t delta, int whence, off_t *newoffp,
1118     int flags)
1119 {
1120 	kauth_cred_t cred = fp->f_cred;
1121 	off_t oldoff, newoff;
1122 	struct vnode *vp = fp->f_vnode;
1123 	struct vattr vattr;
1124 	int error;
1125 
1126 	if (vp->v_type == VFIFO)
1127 		return ESPIPE;
1128 
1129 	vn_lock(vp, LK_SHARED | LK_RETRY);
1130 
1131 	/* Compute the old and new offsets.  */
1132 	oldoff = fp->f_offset;
1133 	switch (whence) {
1134 	case SEEK_CUR:
1135 		newoff = oldoff + delta; /* XXX arithmetic overflow */
1136 		break;
1137 	case SEEK_END:
1138 		error = VOP_GETATTR(vp, &vattr, cred);
1139 		if (error)
1140 			goto out;
1141 		newoff = delta + vattr.va_size; /* XXX arithmetic overflow */
1142 		break;
1143 	case SEEK_SET:
1144 		newoff = delta;
1145 		break;
1146 	default:
1147 		error = EINVAL;
1148 		goto out;
1149 	}
1150 
1151 	/* Pass the proposed change to the file system to audit.  */
1152 	error = VOP_SEEK(vp, oldoff, newoff, cred);
1153 	if (error)
1154 		goto out;
1155 
1156 	/* Success!  */
1157 	if (newoffp)
1158 		*newoffp = newoff;
1159 	if (flags & FOF_UPDATE_OFFSET)
1160 		fp->f_offset = newoff;
1161 	error = 0;
1162 
1163 out:	VOP_UNLOCK(vp);
1164 	return error;
1165 }
1166 
1167 /*
1168  * Check that the vnode is still valid, and if so
1169  * acquire requested lock.
1170  */
1171 int
1172 vn_lock(struct vnode *vp, int flags)
1173 {
1174 	struct lwp *l;
1175 	int error;
1176 
1177 #if 0
1178 	KASSERT(vrefcnt(vp) > 0 || (vp->v_iflag & VI_ONWORKLST) != 0);
1179 #endif
1180 	KASSERT((flags & ~(LK_SHARED|LK_EXCLUSIVE|LK_NOWAIT|LK_RETRY|
1181 	    LK_UPGRADE|LK_DOWNGRADE)) == 0);
1182 	KASSERT((flags & LK_NOWAIT) != 0 || !mutex_owned(vp->v_interlock));
1183 
1184 #ifdef DIAGNOSTIC
1185 	if (wapbl_vphaswapbl(vp))
1186 		WAPBL_JUNLOCK_ASSERT(wapbl_vptomp(vp));
1187 #endif
1188 
1189 	/* Get a more useful report for lockstat. */
1190 	l = curlwp;
1191 	KASSERT(l->l_rwcallsite == 0);
1192 	l->l_rwcallsite = (uintptr_t)__builtin_return_address(0);
1193 
1194 	error = VOP_LOCK(vp, flags);
1195 	if ((flags & LK_RETRY) != 0 && error == ENOENT)
1196 		error = VOP_LOCK(vp, flags);
1197 
1198 	l->l_rwcallsite = 0;
1199 
1200 	KASSERT((flags & LK_RETRY) == 0 || (flags & LK_NOWAIT) != 0 ||
1201 	    error == 0);
1202 
1203 	return error;
1204 }
1205 
1206 /*
1207  * File table vnode close routine.
1208  */
1209 static int
1210 vn_closefile(file_t *fp)
1211 {
1212 
1213 	return vn_close(fp->f_vnode, fp->f_flag, fp->f_cred);
1214 }
1215 
1216 /*
1217  * Simplified in-kernel wrapper calls for extended attribute access.
1218  * Both calls pass in a NULL credential, authorizing a "kernel" access.
1219  * Set IO_NODELOCKED in ioflg if the vnode is already locked.
1220  */
1221 int
1222 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
1223     const char *attrname, size_t *buflen, void *bf, struct lwp *l)
1224 {
1225 	struct uio auio;
1226 	struct iovec aiov;
1227 	int error;
1228 
1229 	aiov.iov_len = *buflen;
1230 	aiov.iov_base = bf;
1231 
1232 	auio.uio_iov = &aiov;
1233 	auio.uio_iovcnt = 1;
1234 	auio.uio_rw = UIO_READ;
1235 	auio.uio_offset = 0;
1236 	auio.uio_resid = *buflen;
1237 	UIO_SETUP_SYSSPACE(&auio);
1238 
1239 	if ((ioflg & IO_NODELOCKED) == 0)
1240 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1241 
1242 	error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL,
1243 	    NOCRED);
1244 
1245 	if ((ioflg & IO_NODELOCKED) == 0)
1246 		VOP_UNLOCK(vp);
1247 
1248 	if (error == 0)
1249 		*buflen = *buflen - auio.uio_resid;
1250 
1251 	return (error);
1252 }
1253 
1254 /*
1255  * XXX Failure mode if partially written?
1256  */
1257 int
1258 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
1259     const char *attrname, size_t buflen, const void *bf, struct lwp *l)
1260 {
1261 	struct uio auio;
1262 	struct iovec aiov;
1263 	int error;
1264 
1265 	aiov.iov_len = buflen;
1266 	aiov.iov_base = __UNCONST(bf);		/* XXXUNCONST kills const */
1267 
1268 	auio.uio_iov = &aiov;
1269 	auio.uio_iovcnt = 1;
1270 	auio.uio_rw = UIO_WRITE;
1271 	auio.uio_offset = 0;
1272 	auio.uio_resid = buflen;
1273 	UIO_SETUP_SYSSPACE(&auio);
1274 
1275 	if ((ioflg & IO_NODELOCKED) == 0) {
1276 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1277 	}
1278 
1279 	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NOCRED);
1280 
1281 	if ((ioflg & IO_NODELOCKED) == 0) {
1282 		VOP_UNLOCK(vp);
1283 	}
1284 
1285 	return (error);
1286 }
1287 
1288 int
1289 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
1290     const char *attrname, struct lwp *l)
1291 {
1292 	int error;
1293 
1294 	if ((ioflg & IO_NODELOCKED) == 0) {
1295 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1296 	}
1297 
1298 	error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NOCRED);
1299 	if (error == EOPNOTSUPP)
1300 		error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
1301 		    NOCRED);
1302 
1303 	if ((ioflg & IO_NODELOCKED) == 0) {
1304 		VOP_UNLOCK(vp);
1305 	}
1306 
1307 	return (error);
1308 }
1309 
1310 int
1311 vn_fifo_bypass(void *v)
1312 {
1313 	struct vop_generic_args *ap = v;
1314 
1315 	return VOCALL(fifo_vnodeop_p, ap->a_desc->vdesc_offset, v);
1316 }
1317 
1318 /*
1319  * Open block device by device number
1320  */
1321 int
1322 vn_bdev_open(dev_t dev, struct vnode **vpp, struct lwp *l)
1323 {
1324 	int     error;
1325 
1326 	if ((error = bdevvp(dev, vpp)) != 0)
1327 		return error;
1328 
1329 	if ((error = VOP_OPEN(*vpp, FREAD | FWRITE, l->l_cred)) != 0) {
1330 		vrele(*vpp);
1331 		return error;
1332 	}
1333 	mutex_enter((*vpp)->v_interlock);
1334 	(*vpp)->v_writecount++;
1335 	mutex_exit((*vpp)->v_interlock);
1336 
1337 	return 0;
1338 }
1339 
1340 /*
1341  * Lookup the provided name in the filesystem.  If the file exists,
1342  * is a valid block device, and isn't being used by anyone else,
1343  * set *vpp to the file's vnode.
1344  */
1345 int
1346 vn_bdev_openpath(struct pathbuf *pb, struct vnode **vpp, struct lwp *l)
1347 {
1348 	struct vnode *vp;
1349 	dev_t dev;
1350 	enum vtype vt;
1351 	int     error;
1352 
1353 	error = vn_open(NULL, pb, 0, FREAD | FWRITE, 0, &vp, NULL, NULL);
1354 	if (error != 0)
1355 		return error;
1356 
1357 	dev = vp->v_rdev;
1358 	vt = vp->v_type;
1359 
1360 	VOP_UNLOCK(vp);
1361 	(void) vn_close(vp, FREAD | FWRITE, l->l_cred);
1362 
1363 	if (vt != VBLK)
1364 		return ENOTBLK;
1365 
1366 	return vn_bdev_open(dev, vpp, l);
1367 }
1368 
1369 static long
1370 vn_knote_to_interest(const struct knote *kn)
1371 {
1372 	switch (kn->kn_filter) {
1373 	case EVFILT_READ:
1374 		/*
1375 		 * Writing to the file or changing its attributes can
1376 		 * set the file size, which impacts the readability
1377 		 * filter.
1378 		 *
1379 		 * (No need to set NOTE_EXTEND here; it's only ever
1380 		 * send with other hints; see vnode_if.c.)
1381 		 */
1382 		return NOTE_WRITE | NOTE_ATTRIB;
1383 
1384 	case EVFILT_VNODE:
1385 		return kn->kn_sfflags;
1386 
1387 	case EVFILT_WRITE:
1388 	default:
1389 		return 0;
1390 	}
1391 }
1392 
1393 void
1394 vn_knote_attach(struct vnode *vp, struct knote *kn)
1395 {
1396 	long interest = 0;
1397 
1398 	/*
1399 	 * We maintain a bitmask of the kevents that there is interest in,
1400 	 * to minimize the impact of having watchers.  It's silly to have
1401 	 * to traverse vn_klist every time a read or write happens simply
1402 	 * because there is someone interested in knowing when the file
1403 	 * is deleted, for example.
1404 	 */
1405 
1406 	mutex_enter(vp->v_interlock);
1407 	SLIST_INSERT_HEAD(&vp->v_klist, kn, kn_selnext);
1408 	SLIST_FOREACH(kn, &vp->v_klist, kn_selnext) {
1409 		interest |= vn_knote_to_interest(kn);
1410 	}
1411 	vp->v_klist_interest = interest;
1412 	mutex_exit(vp->v_interlock);
1413 }
1414 
1415 void
1416 vn_knote_detach(struct vnode *vp, struct knote *kn)
1417 {
1418 	int interest = 0;
1419 
1420 	/*
1421 	 * We special case removing the head of the list, beacuse:
1422 	 *
1423 	 * 1. It's extremely likely that we're detaching the only
1424 	 *    knote.
1425 	 *
1426 	 * 2. We're already traversing the whole list, so we don't
1427 	 *    want to use the generic SLIST_REMOVE() which would
1428 	 *    traverse it *again*.
1429 	 */
1430 
1431 	mutex_enter(vp->v_interlock);
1432 	if (__predict_true(kn == SLIST_FIRST(&vp->v_klist))) {
1433 		SLIST_REMOVE_HEAD(&vp->v_klist, kn_selnext);
1434 		SLIST_FOREACH(kn, &vp->v_klist, kn_selnext) {
1435 			interest |= vn_knote_to_interest(kn);
1436 		}
1437 		vp->v_klist_interest = interest;
1438 	} else {
1439 		struct knote *thiskn, *nextkn, *prevkn = NULL;
1440 
1441 		SLIST_FOREACH_SAFE(thiskn, &vp->v_klist, kn_selnext, nextkn) {
1442 			if (thiskn == kn) {
1443 				KASSERT(kn != NULL);
1444 				KASSERT(prevkn != NULL);
1445 				SLIST_REMOVE_AFTER(prevkn, kn_selnext);
1446 				kn = NULL;
1447 			} else {
1448 				interest |= vn_knote_to_interest(thiskn);
1449 				prevkn = thiskn;
1450 			}
1451 		}
1452 		vp->v_klist_interest = interest;
1453 	}
1454 	mutex_exit(vp->v_interlock);
1455 }
1456