xref: /openbsd-src/sys/kern/vfs_subr.c (revision 9f11ffb7133c203312a01e4b986886bc88c7d74b)
1 /*	$OpenBSD: vfs_subr.c,v 1.285 2019/01/21 18:09:21 anton Exp $	*/
2 /*	$NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
38  */
39 
40 /*
41  * External virtual filesystem routines
42  */
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/sysctl.h>
48 #include <sys/mount.h>
49 #include <sys/time.h>
50 #include <sys/fcntl.h>
51 #include <sys/kernel.h>
52 #include <sys/conf.h>
53 #include <sys/vnode.h>
54 #include <sys/lock.h>
55 #include <sys/lockf.h>
56 #include <sys/stat.h>
57 #include <sys/acct.h>
58 #include <sys/namei.h>
59 #include <sys/ucred.h>
60 #include <sys/buf.h>
61 #include <sys/errno.h>
62 #include <sys/malloc.h>
63 #include <sys/mbuf.h>
64 #include <sys/syscallargs.h>
65 #include <sys/pool.h>
66 #include <sys/tree.h>
67 #include <sys/specdev.h>
68 #include <sys/atomic.h>
69 
70 #include <netinet/in.h>
71 
72 #include <uvm/uvm_extern.h>
73 #include <uvm/uvm_vnode.h>
74 
75 #include "softraid.h"
76 
77 void sr_quiesce(void);
78 
79 enum vtype iftovt_tab[16] = {
80 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
81 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
82 };
83 
84 int	vttoif_tab[9] = {
85 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
86 	S_IFSOCK, S_IFIFO, S_IFMT,
87 };
88 
89 int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
90 int suid_clear = 1;		/* 1 => clear SUID / SGID on owner change */
91 
92 /*
93  * Insq/Remq for the vnode usage lists.
94  */
95 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
96 #define	bufremvn(bp) {							\
97 	LIST_REMOVE(bp, b_vnbufs);					\
98 	LIST_NEXT(bp, b_vnbufs) = NOLIST;				\
99 }
100 
101 struct freelst vnode_hold_list;	/* list of vnodes referencing buffers */
102 struct freelst vnode_free_list;	/* vnode free list */
103 
104 struct mntlist mountlist;	/* mounted filesystem list */
105 
106 void	vclean(struct vnode *, int, struct proc *);
107 
108 void insmntque(struct vnode *, struct mount *);
109 int getdevvp(dev_t, struct vnode **, enum vtype);
110 
111 int vfs_hang_addrlist(struct mount *, struct netexport *,
112 				  struct export_args *);
113 int vfs_free_netcred(struct radix_node *, void *, u_int);
114 void vfs_free_addrlist(struct netexport *);
115 void vputonfreelist(struct vnode *);
116 
117 int vflush_vnode(struct vnode *, void *);
118 int maxvnodes;
119 
120 void vfs_unmountall(void);
121 
122 #ifdef DEBUG
123 void printlockedvnodes(void);
124 #endif
125 
126 struct pool vnode_pool;
127 struct pool uvm_vnode_pool;
128 
129 static inline int rb_buf_compare(const struct buf *b1, const struct buf *b2);
130 RBT_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare);
131 
132 static inline int
133 rb_buf_compare(const struct buf *b1, const struct buf *b2)
134 {
135 	if (b1->b_lblkno < b2->b_lblkno)
136 		return(-1);
137 	if (b1->b_lblkno > b2->b_lblkno)
138 		return(1);
139 	return(0);
140 }
141 
142 /*
143  * Initialize the vnode management data structures.
144  */
145 void
146 vntblinit(void)
147 {
148 	/* buffer cache may need a vnode for each buffer */
149 	maxvnodes = 2 * initialvnodes;
150 	pool_init(&vnode_pool, sizeof(struct vnode), 0, IPL_NONE,
151 	    PR_WAITOK, "vnodes", NULL);
152 	pool_init(&uvm_vnode_pool, sizeof(struct uvm_vnode), 0, IPL_NONE,
153 	    PR_WAITOK, "uvmvnodes", NULL);
154 	TAILQ_INIT(&vnode_hold_list);
155 	TAILQ_INIT(&vnode_free_list);
156 	TAILQ_INIT(&mountlist);
157 	/*
158 	 * Initialize the filesystem syncer.
159 	 */
160 	vn_initialize_syncerd();
161 
162 #ifdef NFSSERVER
163 	rn_init(sizeof(struct sockaddr_in));
164 #endif /* NFSSERVER */
165 }
166 
167 /*
168  * Allocate a mount point.
169  *
170  * The returned mount point is marked as busy.
171  */
172 struct mount *
173 vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp)
174 {
175 	struct mount *mp;
176 
177 	mp = malloc(sizeof(*mp), M_MOUNT, M_WAITOK|M_ZERO);
178 	rw_init_flags(&mp->mnt_lock, "vfslock", RWL_IS_VNODE);
179 	(void)vfs_busy(mp, VB_READ|VB_NOWAIT);
180 
181 	LIST_INIT(&mp->mnt_vnodelist);
182 	mp->mnt_vnodecovered = vp;
183 
184 	atomic_inc_int(&vfsp->vfc_refcount);
185 	mp->mnt_vfc = vfsp;
186 	mp->mnt_op = vfsp->vfc_vfsops;
187 	mp->mnt_flag = vfsp->vfc_flags & MNT_VISFLAGMASK;
188 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
189 
190 	return (mp);
191 }
192 
193 /*
194  * Release a mount point.
195  */
196 void
197 vfs_mount_free(struct mount *mp)
198 {
199 	atomic_dec_int(&mp->mnt_vfc->vfc_refcount);
200 	free(mp, M_MOUNT, sizeof(*mp));
201 }
202 
203 /*
204  * Mark a mount point as busy. Used to synchronize access and to delay
205  * unmounting.
206  *
207  * Default behaviour is to attempt getting a READ lock and in case of an
208  * ongoing unmount, to wait for it to finish and then return failure.
209  */
210 int
211 vfs_busy(struct mount *mp, int flags)
212 {
213 	int rwflags = 0;
214 
215 	if (flags & VB_WRITE)
216 		rwflags |= RW_WRITE;
217 	else
218 		rwflags |= RW_READ;
219 
220 	if (flags & VB_WAIT)
221 		rwflags |= RW_SLEEPFAIL;
222 	else
223 		rwflags |= RW_NOSLEEP;
224 
225 #ifdef WITNESS
226 	if (flags & VB_DUPOK)
227 		rwflags |= RW_DUPOK;
228 #endif
229 
230 	if (rw_enter(&mp->mnt_lock, rwflags))
231 		return (EBUSY);
232 
233 	return (0);
234 }
235 
236 /*
237  * Free a busy file system
238  */
239 void
240 vfs_unbusy(struct mount *mp)
241 {
242 	rw_exit(&mp->mnt_lock);
243 }
244 
245 int
246 vfs_isbusy(struct mount *mp)
247 {
248 	if (RWLOCK_OWNER(&mp->mnt_lock) > 0)
249 		return (1);
250 	else
251 		return (0);
252 }
253 
254 /*
255  * Lookup a filesystem type, and if found allocate and initialize
256  * a mount structure for it.
257  *
258  * Devname is usually updated by mount(8) after booting.
259  */
260 int
261 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
262 {
263 	struct vfsconf *vfsp;
264 	struct mount *mp;
265 
266 	vfsp = vfs_byname(fstypename);
267 	if (vfsp == NULL)
268 		return (ENODEV);
269 	mp = vfs_mount_alloc(NULLVP, vfsp);
270 	mp->mnt_flag |= MNT_RDONLY;
271 	mp->mnt_stat.f_mntonname[0] = '/';
272 	copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN, 0);
273 	copystr(devname, mp->mnt_stat.f_mntfromspec, MNAMELEN, 0);
274 	*mpp = mp;
275  	return (0);
276  }
277 
278 /*
279  * Lookup a mount point by filesystem identifier.
280  */
281 struct mount *
282 vfs_getvfs(fsid_t *fsid)
283 {
284 	struct mount *mp;
285 
286 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
287 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
288 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
289 			return (mp);
290 		}
291 	}
292 
293 	return (NULL);
294 }
295 
296 
297 /*
298  * Get a new unique fsid
299  */
300 void
301 vfs_getnewfsid(struct mount *mp)
302 {
303 	static u_short xxxfs_mntid;
304 
305 	fsid_t tfsid;
306 	int mtype;
307 
308 	mtype = mp->mnt_vfc->vfc_typenum;
309 	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
310 	mp->mnt_stat.f_fsid.val[1] = mtype;
311 	if (xxxfs_mntid == 0)
312 		++xxxfs_mntid;
313 	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
314 	tfsid.val[1] = mtype;
315 	if (!TAILQ_EMPTY(&mountlist)) {
316 		while (vfs_getvfs(&tfsid)) {
317 			tfsid.val[0]++;
318 			xxxfs_mntid++;
319 		}
320 	}
321 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
322 }
323 
324 /*
325  * Set vnode attributes to VNOVAL
326  */
327 void
328 vattr_null(struct vattr *vap)
329 {
330 
331 	vap->va_type = VNON;
332 	/*
333 	 * Don't get fancy: u_quad_t = u_int = VNOVAL leaves the u_quad_t
334 	 * with 2^31-1 instead of 2^64-1.  Just write'm out and let
335 	 * the compiler do its job.
336 	 */
337 	vap->va_mode = VNOVAL;
338 	vap->va_nlink = VNOVAL;
339 	vap->va_uid = VNOVAL;
340 	vap->va_gid = VNOVAL;
341 	vap->va_fsid = VNOVAL;
342 	vap->va_fileid = VNOVAL;
343 	vap->va_size = VNOVAL;
344 	vap->va_blocksize = VNOVAL;
345 	vap->va_atime.tv_sec = VNOVAL;
346 	vap->va_atime.tv_nsec = VNOVAL;
347 	vap->va_mtime.tv_sec = VNOVAL;
348 	vap->va_mtime.tv_nsec = VNOVAL;
349 	vap->va_ctime.tv_sec = VNOVAL;
350 	vap->va_ctime.tv_nsec = VNOVAL;
351 	vap->va_gen = VNOVAL;
352 	vap->va_flags = VNOVAL;
353 	vap->va_rdev = VNOVAL;
354 	vap->va_bytes = VNOVAL;
355 	vap->va_filerev = VNOVAL;
356 	vap->va_vaflags = 0;
357 }
358 
359 /*
360  * Routines having to do with the management of the vnode table.
361  */
362 long numvnodes;
363 
364 /*
365  * Return the next vnode from the free list.
366  */
367 int
368 getnewvnode(enum vtagtype tag, struct mount *mp, struct vops *vops,
369     struct vnode **vpp)
370 {
371 	struct proc *p = curproc;
372 	struct freelst *listhd;
373 	static int toggle;
374 	struct vnode *vp;
375 	int s;
376 
377 	/*
378 	 * allow maxvnodes to increase if the buffer cache itself
379 	 * is big enough to justify it. (we don't shrink it ever)
380 	 */
381 	maxvnodes = maxvnodes < bcstats.numbufs ? bcstats.numbufs
382 	    : maxvnodes;
383 
384 	/*
385 	 * We must choose whether to allocate a new vnode or recycle an
386 	 * existing one. The criterion for allocating a new one is that
387 	 * the total number of vnodes is less than the number desired or
388 	 * there are no vnodes on either free list. Generally we only
389 	 * want to recycle vnodes that have no buffers associated with
390 	 * them, so we look first on the vnode_free_list. If it is empty,
391 	 * we next consider vnodes with referencing buffers on the
392 	 * vnode_hold_list. The toggle ensures that half the time we
393 	 * will use a buffer from the vnode_hold_list, and half the time
394 	 * we will allocate a new one unless the list has grown to twice
395 	 * the desired size. We are reticent to recycle vnodes from the
396 	 * vnode_hold_list because we will lose the identity of all its
397 	 * referencing buffers.
398 	 */
399 	toggle ^= 1;
400 	if (numvnodes / 2 > maxvnodes)
401 		toggle = 0;
402 
403 	s = splbio();
404 	if ((numvnodes < maxvnodes) ||
405 	    ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
406 	    ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
407 		splx(s);
408 		vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO);
409 		vp->v_uvm = pool_get(&uvm_vnode_pool, PR_WAITOK | PR_ZERO);
410 		vp->v_uvm->u_vnode = vp;
411 		RBT_INIT(buf_rb_bufs, &vp->v_bufs_tree);
412 		cache_tree_init(&vp->v_nc_tree);
413 		TAILQ_INIT(&vp->v_cache_dst);
414 		numvnodes++;
415 	} else {
416 		TAILQ_FOREACH(vp, listhd, v_freelist) {
417 			if (VOP_ISLOCKED(vp) == 0)
418 				break;
419 		}
420 		/*
421 		 * Unless this is a bad time of the month, at most
422 		 * the first NCPUS items on the free list are
423 		 * locked, so this is close enough to being empty.
424 		 */
425 		if (vp == NULL) {
426 			splx(s);
427 			tablefull("vnode");
428 			*vpp = 0;
429 			return (ENFILE);
430 		}
431 
432 #ifdef DIAGNOSTIC
433 		if (vp->v_usecount) {
434 			vprint("free vnode", vp);
435 			panic("free vnode isn't");
436 		}
437 #endif
438 
439 		TAILQ_REMOVE(listhd, vp, v_freelist);
440 		vp->v_bioflag &= ~VBIOONFREELIST;
441 		splx(s);
442 
443 		if (vp->v_type != VBAD)
444 			vgonel(vp, p);
445 #ifdef DIAGNOSTIC
446 		if (vp->v_data) {
447 			vprint("cleaned vnode", vp);
448 			panic("cleaned vnode isn't");
449 		}
450 		s = splbio();
451 		if (vp->v_numoutput)
452 			panic("Clean vnode has pending I/O's");
453 		splx(s);
454 #endif
455 		vp->v_flag = 0;
456 		vp->v_socket = 0;
457 	}
458 	cache_purge(vp);
459 	vp->v_type = VNON;
460 	vp->v_tag = tag;
461 	vp->v_op = vops;
462 	insmntque(vp, mp);
463 	*vpp = vp;
464 	vp->v_usecount = 1;
465 	vp->v_data = 0;
466 	return (0);
467 }
468 
469 /*
470  * Move a vnode from one mount queue to another.
471  */
472 void
473 insmntque(struct vnode *vp, struct mount *mp)
474 {
475 	/*
476 	 * Delete from old mount point vnode list, if on one.
477 	 */
478 	if (vp->v_mount != NULL)
479 		LIST_REMOVE(vp, v_mntvnodes);
480 	/*
481 	 * Insert into list of vnodes for the new mount point, if available.
482 	 */
483 	if ((vp->v_mount = mp) != NULL)
484 		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
485 }
486 
487 /*
488  * Create a vnode for a block device.
489  * Used for root filesystem, argdev, and swap areas.
490  * Also used for memory file system special devices.
491  */
492 int
493 bdevvp(dev_t dev, struct vnode **vpp)
494 {
495 	return (getdevvp(dev, vpp, VBLK));
496 }
497 
498 /*
499  * Create a vnode for a character device.
500  * Used for console handling.
501  */
502 int
503 cdevvp(dev_t dev, struct vnode **vpp)
504 {
505 	return (getdevvp(dev, vpp, VCHR));
506 }
507 
508 /*
509  * Create a vnode for a device.
510  * Used by bdevvp (block device) for root file system etc.,
511  * and by cdevvp (character device) for console.
512  */
513 int
514 getdevvp(dev_t dev, struct vnode **vpp, enum vtype type)
515 {
516 	struct vnode *vp;
517 	struct vnode *nvp;
518 	int error;
519 
520 	if (dev == NODEV) {
521 		*vpp = NULLVP;
522 		return (0);
523 	}
524 	error = getnewvnode(VT_NON, NULL, &spec_vops, &nvp);
525 	if (error) {
526 		*vpp = NULLVP;
527 		return (error);
528 	}
529 	vp = nvp;
530 	vp->v_type = type;
531 	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
532 		vput(vp);
533 		vp = nvp;
534 	}
535 	if (vp->v_type == VCHR && cdevsw[major(vp->v_rdev)].d_type == D_TTY)
536 		vp->v_flag |= VISTTY;
537 	*vpp = vp;
538 	return (0);
539 }
540 
541 /*
542  * Check to see if the new vnode represents a special device
543  * for which we already have a vnode (either because of
544  * bdevvp() or because of a different vnode representing
545  * the same block device). If such an alias exists, deallocate
546  * the existing contents and return the aliased vnode. The
547  * caller is responsible for filling it with its new contents.
548  */
549 struct vnode *
550 checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp)
551 {
552 	struct proc *p = curproc;
553 	struct vnode *vp;
554 	struct vnode **vpp;
555 
556 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
557 		return (NULLVP);
558 
559 	vpp = &speclisth[SPECHASH(nvp_rdev)];
560 loop:
561 	for (vp = *vpp; vp; vp = vp->v_specnext) {
562 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) {
563 			continue;
564 		}
565 		/*
566 		 * Alias, but not in use, so flush it out.
567 		 */
568 		if (vp->v_usecount == 0) {
569 			vgonel(vp, p);
570 			goto loop;
571 		}
572 		if (vget(vp, LK_EXCLUSIVE)) {
573 			goto loop;
574 		}
575 		break;
576 	}
577 
578 	/*
579 	 * Common case is actually in the if statement
580 	 */
581 	if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
582 		nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE,
583 			M_WAITOK);
584 		nvp->v_rdev = nvp_rdev;
585 		nvp->v_hashchain = vpp;
586 		nvp->v_specnext = *vpp;
587 		nvp->v_specmountpoint = NULL;
588 		nvp->v_speclockf = NULL;
589 		nvp->v_specbitmap = NULL;
590 		if (nvp->v_type == VCHR &&
591 		    (cdevsw[major(nvp_rdev)].d_flags & D_CLONE) &&
592 		    (minor(nvp_rdev) >> CLONE_SHIFT == 0)) {
593 			if (vp != NULLVP)
594 				nvp->v_specbitmap = vp->v_specbitmap;
595 			else
596 				nvp->v_specbitmap = malloc(CLONE_MAPSZ,
597 				    M_VNODE, M_WAITOK | M_ZERO);
598 		}
599 		*vpp = nvp;
600 		if (vp != NULLVP) {
601 			nvp->v_flag |= VALIASED;
602 			vp->v_flag |= VALIASED;
603 			vput(vp);
604 		}
605 		return (NULLVP);
606 	}
607 
608 	/*
609 	 * This code is the uncommon case. It is called in case
610 	 * we found an alias that was VT_NON && vtype of VBLK
611 	 * This means we found a block device that was created
612 	 * using bdevvp.
613 	 * An example of such a vnode is the root partition device vnode
614 	 * created in ffs_mountroot.
615 	 *
616 	 * The vnodes created by bdevvp should not be aliased (why?).
617 	 */
618 
619 	VOP_UNLOCK(vp);
620 	vclean(vp, 0, p);
621 	vp->v_op = nvp->v_op;
622 	vp->v_tag = nvp->v_tag;
623 	nvp->v_type = VNON;
624 	insmntque(vp, mp);
625 	return (vp);
626 }
627 
628 /*
629  * Grab a particular vnode from the free list, increment its
630  * reference count and lock it. If the vnode lock bit is set,
631  * the vnode is being eliminated in vgone. In that case, we
632  * cannot grab it, so the process is awakened when the
633  * transition is completed, and an error code is returned to
634  * indicate that the vnode is no longer usable, possibly
635  * having been changed to a new file system type.
636  */
637 int
638 vget(struct vnode *vp, int flags)
639 {
640 	int error, s, onfreelist;
641 
642 	/*
643 	 * If the vnode is in the process of being cleaned out for
644 	 * another use, we wait for the cleaning to finish and then
645 	 * return failure. Cleaning is determined by checking that
646 	 * the VXLOCK flag is set.
647 	 */
648 
649 	if (vp->v_flag & VXLOCK) {
650 		if (flags & LK_NOWAIT) {
651 			return (EBUSY);
652 		}
653 
654 		vp->v_flag |= VXWANT;
655 		tsleep(vp, PINOD, "vget", 0);
656 		return (ENOENT);
657 	}
658 
659 	onfreelist = vp->v_bioflag & VBIOONFREELIST;
660 	if (vp->v_usecount == 0 && onfreelist) {
661 		s = splbio();
662 		if (vp->v_holdcnt > 0)
663 			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
664 		else
665 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
666 		vp->v_bioflag &= ~VBIOONFREELIST;
667 		splx(s);
668 	}
669 
670  	vp->v_usecount++;
671 	if (flags & LK_TYPE_MASK) {
672 		if ((error = vn_lock(vp, flags)) != 0) {
673 			vp->v_usecount--;
674 			if (vp->v_usecount == 0 && onfreelist)
675 				vputonfreelist(vp);
676 		}
677 		return (error);
678 	}
679 
680 	return (0);
681 }
682 
683 
684 /* Vnode reference. */
685 void
686 vref(struct vnode *vp)
687 {
688 #ifdef DIAGNOSTIC
689 	if (vp->v_usecount == 0)
690 		panic("vref used where vget required");
691 	if (vp->v_type == VNON)
692 		panic("vref on a VNON vnode");
693 #endif
694 	vp->v_usecount++;
695 }
696 
697 void
698 vputonfreelist(struct vnode *vp)
699 {
700 	int s;
701 	struct freelst *lst;
702 
703 	s = splbio();
704 #ifdef DIAGNOSTIC
705 	if (vp->v_usecount != 0)
706 		panic("Use count is not zero!");
707 
708 	if (vp->v_bioflag & VBIOONFREELIST) {
709 		vprint("vnode already on free list: ", vp);
710 		panic("vnode already on free list");
711 	}
712 #endif
713 
714 	vp->v_bioflag |= VBIOONFREELIST;
715 
716 	if (vp->v_holdcnt > 0)
717 		lst = &vnode_hold_list;
718 	else
719 		lst = &vnode_free_list;
720 
721 	if (vp->v_type == VBAD)
722 		TAILQ_INSERT_HEAD(lst, vp, v_freelist);
723 	else
724 		TAILQ_INSERT_TAIL(lst, vp, v_freelist);
725 
726 	splx(s);
727 }
728 
729 /*
730  * vput(), just unlock and vrele()
731  */
732 void
733 vput(struct vnode *vp)
734 {
735 	struct proc *p = curproc;
736 
737 #ifdef DIAGNOSTIC
738 	if (vp == NULL)
739 		panic("vput: null vp");
740 #endif
741 
742 #ifdef DIAGNOSTIC
743 	if (vp->v_usecount == 0) {
744 		vprint("vput: bad ref count", vp);
745 		panic("vput: ref cnt");
746 	}
747 #endif
748 	vp->v_usecount--;
749 	KASSERT(vp->v_usecount > 0 || vp->v_uvcount == 0);
750 	if (vp->v_usecount > 0) {
751 		VOP_UNLOCK(vp);
752 		return;
753 	}
754 
755 #ifdef DIAGNOSTIC
756 	if (vp->v_writecount != 0) {
757 		vprint("vput: bad writecount", vp);
758 		panic("vput: v_writecount != 0");
759 	}
760 #endif
761 
762 	VOP_INACTIVE(vp, p);
763 
764 	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
765 		vputonfreelist(vp);
766 }
767 
768 /*
769  * Vnode release - use for active VNODES.
770  * If count drops to zero, call inactive routine and return to freelist.
771  * Returns 0 if it did not sleep.
772  */
773 int
774 vrele(struct vnode *vp)
775 {
776 	struct proc *p = curproc;
777 
778 #ifdef DIAGNOSTIC
779 	if (vp == NULL)
780 		panic("vrele: null vp");
781 #endif
782 #ifdef DIAGNOSTIC
783 	if (vp->v_usecount == 0) {
784 		vprint("vrele: bad ref count", vp);
785 		panic("vrele: ref cnt");
786 	}
787 #endif
788 	vp->v_usecount--;
789 	if (vp->v_usecount > 0) {
790 		return (0);
791 	}
792 
793 #ifdef DIAGNOSTIC
794 	if (vp->v_writecount != 0) {
795 		vprint("vrele: bad writecount", vp);
796 		panic("vrele: v_writecount != 0");
797 	}
798 #endif
799 
800 	if (vn_lock(vp, LK_EXCLUSIVE)) {
801 #ifdef DIAGNOSTIC
802 		vprint("vrele: cannot lock", vp);
803 #endif
804 		return (1);
805 	}
806 
807 	VOP_INACTIVE(vp, p);
808 
809 	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
810 		vputonfreelist(vp);
811 	return (1);
812 }
813 
814 /* Page or buffer structure gets a reference. */
815 void
816 vhold(struct vnode *vp)
817 {
818 	/*
819 	 * If it is on the freelist and the hold count is currently
820 	 * zero, move it to the hold list.
821 	 */
822 	if ((vp->v_bioflag & VBIOONFREELIST) &&
823 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
824 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
825 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
826 	}
827 	vp->v_holdcnt++;
828 }
829 
830 /* Lose interest in a vnode. */
831 void
832 vdrop(struct vnode *vp)
833 {
834 #ifdef DIAGNOSTIC
835 	if (vp->v_holdcnt == 0)
836 		panic("vdrop: zero holdcnt");
837 #endif
838 
839 	vp->v_holdcnt--;
840 
841 	/*
842 	 * If it is on the holdlist and the hold count drops to
843 	 * zero, move it to the free list.
844 	 */
845 	if ((vp->v_bioflag & VBIOONFREELIST) &&
846 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
847 		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
848 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
849 	}
850 }
851 
852 /*
853  * Remove any vnodes in the vnode table belonging to mount point mp.
854  *
855  * If MNT_NOFORCE is specified, there should not be any active ones,
856  * return error if any are found (nb: this is a user error, not a
857  * system error). If MNT_FORCE is specified, detach any active vnodes
858  * that are found.
859  */
860 #ifdef DEBUG
861 int busyprt = 0;	/* print out busy vnodes */
862 struct ctldebug debug1 = { "busyprt", &busyprt };
863 #endif
864 
865 int
866 vfs_mount_foreach_vnode(struct mount *mp,
867     int (*func)(struct vnode *, void *), void *arg) {
868 	struct vnode *vp, *nvp;
869 	int error = 0;
870 
871 loop:
872 	LIST_FOREACH_SAFE(vp , &mp->mnt_vnodelist, v_mntvnodes, nvp) {
873 		if (vp->v_mount != mp)
874 			goto loop;
875 
876 		error = func(vp, arg);
877 
878 		if (error != 0)
879 			break;
880 	}
881 
882 	return (error);
883 }
884 
885 struct vflush_args {
886 	struct vnode *skipvp;
887 	int busy;
888 	int flags;
889 };
890 
891 int
892 vflush_vnode(struct vnode *vp, void *arg)
893 {
894 	struct vflush_args *va = arg;
895 	struct proc *p = curproc;
896 
897 	if (vp == va->skipvp) {
898 		return (0);
899 	}
900 
901 	if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
902 		return (0);
903 	}
904 
905 	/*
906 	 * If WRITECLOSE is set, only flush out regular file
907 	 * vnodes open for writing.
908 	 */
909 	if ((va->flags & WRITECLOSE) &&
910 	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
911 		return (0);
912 	}
913 
914 	/*
915 	 * With v_usecount == 0, all we need to do is clear
916 	 * out the vnode data structures and we are done.
917 	 */
918 	if (vp->v_usecount == 0) {
919 		vgonel(vp, p);
920 		return (0);
921 	}
922 
923 	/*
924 	 * If FORCECLOSE is set, forcibly close the vnode.
925 	 * For block or character devices, revert to an
926 	 * anonymous device. For all other files, just kill them.
927 	 */
928 	if (va->flags & FORCECLOSE) {
929 		if (vp->v_type != VBLK && vp->v_type != VCHR) {
930 			vgonel(vp, p);
931 		} else {
932 			vclean(vp, 0, p);
933 			vp->v_op = &spec_vops;
934 			insmntque(vp, NULL);
935 		}
936 		return (0);
937 	}
938 
939 	/*
940 	 * If set, this is allowed to ignore vnodes which don't
941 	 * have changes pending to disk.
942 	 * XXX Might be nice to check per-fs "inode" flags, but
943 	 * generally the filesystem is sync'd already, right?
944 	 */
945 	if ((va->flags & IGNORECLEAN) &&
946 	    LIST_EMPTY(&vp->v_dirtyblkhd))
947 		return (0);
948 
949 #ifdef DEBUG
950 	if (busyprt)
951 		vprint("vflush: busy vnode", vp);
952 #endif
953 	va->busy++;
954 	return (0);
955 }
956 
957 int
958 vflush(struct mount *mp, struct vnode *skipvp, int flags)
959 {
960 	struct vflush_args va;
961 	va.skipvp = skipvp;
962 	va.busy = 0;
963 	va.flags = flags;
964 
965 	vfs_mount_foreach_vnode(mp, vflush_vnode, &va);
966 
967 	if (va.busy)
968 		return (EBUSY);
969 	return (0);
970 }
971 
972 /*
973  * Disassociate the underlying file system from a vnode.
974  */
975 void
976 vclean(struct vnode *vp, int flags, struct proc *p)
977 {
978 	int active;
979 
980 	/*
981 	 * Check to see if the vnode is in use.
982 	 * If so we have to reference it before we clean it out
983 	 * so that its count cannot fall to zero and generate a
984 	 * race against ourselves to recycle it.
985 	 */
986 	if ((active = vp->v_usecount) != 0)
987 		vp->v_usecount++;
988 
989 	/*
990 	 * Prevent the vnode from being recycled or
991 	 * brought into use while we clean it out.
992 	 */
993 	if (vp->v_flag & VXLOCK)
994 		panic("vclean: deadlock");
995 	vp->v_flag |= VXLOCK;
996 	/*
997 	 * Even if the count is zero, the VOP_INACTIVE routine may still
998 	 * have the object locked while it cleans it out. The VOP_LOCK
999 	 * ensures that the VOP_INACTIVE routine is done with its work.
1000 	 * For active vnodes, it ensures that no other activity can
1001 	 * occur while the underlying object is being cleaned out.
1002 	 */
1003 	VOP_LOCK(vp, LK_DRAIN | LK_EXCLUSIVE);
1004 
1005 	/*
1006 	 * Clean out any VM data associated with the vnode.
1007 	 */
1008 	uvm_vnp_terminate(vp);
1009 	/*
1010 	 * Clean out any buffers associated with the vnode.
1011 	 */
1012 	if (flags & DOCLOSE)
1013 		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
1014 	/*
1015 	 * If purging an active vnode, it must be closed and
1016 	 * deactivated before being reclaimed. Note that the
1017 	 * VOP_INACTIVE will unlock the vnode
1018 	 */
1019 	if (active) {
1020 		if (flags & DOCLOSE)
1021 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
1022 		VOP_INACTIVE(vp, p);
1023 	} else {
1024 		/*
1025 		 * Any other processes trying to obtain this lock must first
1026 		 * wait for VXLOCK to clear, then call the new lock operation.
1027 		 */
1028 		VOP_UNLOCK(vp);
1029 	}
1030 
1031 	/*
1032 	 * Reclaim the vnode.
1033 	 */
1034 	if (VOP_RECLAIM(vp, p))
1035 		panic("vclean: cannot reclaim");
1036 	if (active) {
1037 		vp->v_usecount--;
1038 		if (vp->v_usecount == 0) {
1039 			if (vp->v_holdcnt > 0)
1040 				panic("vclean: not clean");
1041 			vputonfreelist(vp);
1042 		}
1043 	}
1044 	cache_purge(vp);
1045 
1046 	/*
1047 	 * Done with purge, notify sleepers of the grim news.
1048 	 */
1049 	vp->v_op = &dead_vops;
1050 	VN_KNOTE(vp, NOTE_REVOKE);
1051 	vp->v_tag = VT_NON;
1052 	vp->v_flag &= ~VXLOCK;
1053 #ifdef VFSLCKDEBUG
1054 	vp->v_flag &= ~VLOCKSWORK;
1055 #endif
1056 	if (vp->v_flag & VXWANT) {
1057 		vp->v_flag &= ~VXWANT;
1058 		wakeup(vp);
1059 	}
1060 }
1061 
1062 /*
1063  * Recycle an unused vnode to the front of the free list.
1064  */
1065 int
1066 vrecycle(struct vnode *vp, struct proc *p)
1067 {
1068 	if (vp->v_usecount == 0) {
1069 		vgonel(vp, p);
1070 		return (1);
1071 	}
1072 	return (0);
1073 }
1074 
1075 /*
1076  * Eliminate all activity associated with a vnode
1077  * in preparation for reuse.
1078  */
1079 void
1080 vgone(struct vnode *vp)
1081 {
1082 	struct proc *p = curproc;
1083 	vgonel(vp, p);
1084 }
1085 
1086 /*
1087  * vgone, with struct proc.
1088  */
1089 void
1090 vgonel(struct vnode *vp, struct proc *p)
1091 {
1092 	struct vnode *vq;
1093 	struct vnode *vx;
1094 
1095 	KASSERT(vp->v_uvcount == 0);
1096 
1097 	/*
1098 	 * If a vgone (or vclean) is already in progress,
1099 	 * wait until it is done and return.
1100 	 */
1101 	if (vp->v_flag & VXLOCK) {
1102 		vp->v_flag |= VXWANT;
1103 		tsleep(vp, PINOD, "vgone", 0);
1104 		return;
1105 	}
1106 
1107 	/*
1108 	 * Clean out the filesystem specific data.
1109 	 */
1110 	vclean(vp, DOCLOSE, p);
1111 	/*
1112 	 * Delete from old mount point vnode list, if on one.
1113 	 */
1114 	if (vp->v_mount != NULL)
1115 		insmntque(vp, NULL);
1116 	/*
1117 	 * If special device, remove it from special device alias list
1118 	 * if it is on one.
1119 	 */
1120 	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1121 		if ((vp->v_flag & VALIASED) == 0 && vp->v_type == VCHR &&
1122 		    (cdevsw[major(vp->v_rdev)].d_flags & D_CLONE) &&
1123 		    (minor(vp->v_rdev) >> CLONE_SHIFT == 0)) {
1124 			free(vp->v_specbitmap, M_VNODE, CLONE_MAPSZ);
1125 		}
1126 		if (*vp->v_hashchain == vp) {
1127 			*vp->v_hashchain = vp->v_specnext;
1128 		} else {
1129 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1130 				if (vq->v_specnext != vp)
1131 					continue;
1132 				vq->v_specnext = vp->v_specnext;
1133 				break;
1134 			}
1135 			if (vq == NULL)
1136 				panic("missing bdev");
1137 		}
1138 		if (vp->v_flag & VALIASED) {
1139 			vx = NULL;
1140 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1141 				if (vq->v_rdev != vp->v_rdev ||
1142 				    vq->v_type != vp->v_type)
1143 					continue;
1144 				if (vx)
1145 					break;
1146 				vx = vq;
1147 			}
1148 			if (vx == NULL)
1149 				panic("missing alias");
1150 			if (vq == NULL)
1151 				vx->v_flag &= ~VALIASED;
1152 			vp->v_flag &= ~VALIASED;
1153 		}
1154 		lf_purgelocks(vp->v_speclockf);
1155 		free(vp->v_specinfo, M_VNODE, sizeof(struct specinfo));
1156 		vp->v_specinfo = NULL;
1157 	}
1158 	/*
1159 	 * If it is on the freelist and not already at the head,
1160 	 * move it to the head of the list.
1161 	 */
1162 	vp->v_type = VBAD;
1163 
1164 	/*
1165 	 * Move onto the free list, unless we were called from
1166 	 * getnewvnode and we're not on any free list
1167 	 */
1168 	if (vp->v_usecount == 0 &&
1169 	    (vp->v_bioflag & VBIOONFREELIST)) {
1170 		int s;
1171 
1172 		s = splbio();
1173 
1174 		if (vp->v_holdcnt > 0)
1175 			panic("vgonel: not clean");
1176 
1177 		if (TAILQ_FIRST(&vnode_free_list) != vp) {
1178 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1179 			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1180 		}
1181 		splx(s);
1182 	}
1183 }
1184 
1185 /*
1186  * Lookup a vnode by device number.
1187  */
1188 int
1189 vfinddev(dev_t dev, enum vtype type, struct vnode **vpp)
1190 {
1191 	struct vnode *vp;
1192 	int rc =0;
1193 
1194 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1195 		if (dev != vp->v_rdev || type != vp->v_type)
1196 			continue;
1197 		*vpp = vp;
1198 		rc = 1;
1199 		break;
1200 	}
1201 	return (rc);
1202 }
1203 
1204 /*
1205  * Revoke all the vnodes corresponding to the specified minor number
1206  * range (endpoints inclusive) of the specified major.
1207  */
1208 void
1209 vdevgone(int maj, int minl, int minh, enum vtype type)
1210 {
1211 	struct vnode *vp;
1212 	int mn;
1213 
1214 	for (mn = minl; mn <= minh; mn++)
1215 		if (vfinddev(makedev(maj, mn), type, &vp))
1216 			VOP_REVOKE(vp, REVOKEALL);
1217 }
1218 
1219 /*
1220  * Calculate the total number of references to a special device.
1221  */
1222 int
1223 vcount(struct vnode *vp)
1224 {
1225 	struct vnode *vq, *vnext;
1226 	int count;
1227 
1228 loop:
1229 	if ((vp->v_flag & VALIASED) == 0)
1230 		return (vp->v_usecount);
1231 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1232 		vnext = vq->v_specnext;
1233 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1234 			continue;
1235 		/*
1236 		 * Alias, but not in use, so flush it out.
1237 		 */
1238 		if (vq->v_usecount == 0 && vq != vp) {
1239 			vgone(vq);
1240 			goto loop;
1241 		}
1242 		count += vq->v_usecount;
1243 	}
1244 	return (count);
1245 }
1246 
1247 #if defined(DEBUG) || defined(DIAGNOSTIC)
1248 /*
1249  * Print out a description of a vnode.
1250  */
1251 static char *typename[] =
1252    { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1253 
1254 void
1255 vprint(char *label, struct vnode *vp)
1256 {
1257 	char buf[64];
1258 
1259 	if (label != NULL)
1260 		printf("%s: ", label);
1261 	printf("%p, type %s, use %u, write %u, hold %u,",
1262 		vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1263 		vp->v_holdcnt);
1264 	buf[0] = '\0';
1265 	if (vp->v_flag & VROOT)
1266 		strlcat(buf, "|VROOT", sizeof buf);
1267 	if (vp->v_flag & VTEXT)
1268 		strlcat(buf, "|VTEXT", sizeof buf);
1269 	if (vp->v_flag & VSYSTEM)
1270 		strlcat(buf, "|VSYSTEM", sizeof buf);
1271 	if (vp->v_flag & VXLOCK)
1272 		strlcat(buf, "|VXLOCK", sizeof buf);
1273 	if (vp->v_flag & VXWANT)
1274 		strlcat(buf, "|VXWANT", sizeof buf);
1275 	if (vp->v_bioflag & VBIOWAIT)
1276 		strlcat(buf, "|VBIOWAIT", sizeof buf);
1277 	if (vp->v_bioflag & VBIOONFREELIST)
1278 		strlcat(buf, "|VBIOONFREELIST", sizeof buf);
1279 	if (vp->v_bioflag & VBIOONSYNCLIST)
1280 		strlcat(buf, "|VBIOONSYNCLIST", sizeof buf);
1281 	if (vp->v_flag & VALIASED)
1282 		strlcat(buf, "|VALIASED", sizeof buf);
1283 	if (buf[0] != '\0')
1284 		printf(" flags (%s)", &buf[1]);
1285 	if (vp->v_data == NULL) {
1286 		printf("\n");
1287 	} else {
1288 		printf("\n\t");
1289 		VOP_PRINT(vp);
1290 	}
1291 }
1292 #endif /* DEBUG || DIAGNOSTIC */
1293 
1294 #ifdef DEBUG
1295 /*
1296  * List all of the locked vnodes in the system.
1297  * Called when debugging the kernel.
1298  */
1299 void
1300 printlockedvnodes(void)
1301 {
1302 	struct mount *mp;
1303 	struct vnode *vp;
1304 
1305 	printf("Locked vnodes\n");
1306 
1307 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1308 		if (vfs_busy(mp, VB_READ|VB_NOWAIT))
1309 			continue;
1310 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1311 			if (VOP_ISLOCKED(vp))
1312 				vprint(NULL, vp);
1313 		}
1314 		vfs_unbusy(mp);
1315  	}
1316 
1317 }
1318 #endif
1319 
1320 /*
1321  * Top level filesystem related information gathering.
1322  */
1323 int
1324 vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1325     size_t newlen, struct proc *p)
1326 {
1327 	struct vfsconf *vfsp, *tmpvfsp;
1328 	int ret;
1329 
1330 	/* all sysctl names at this level are at least name and field */
1331 	if (namelen < 2)
1332 		return (ENOTDIR);		/* overloaded */
1333 
1334 	if (name[0] != VFS_GENERIC) {
1335 		vfsp = vfs_bytypenum(name[0]);
1336 		if (vfsp == NULL || vfsp->vfc_vfsops->vfs_sysctl == NULL)
1337 			return (EOPNOTSUPP);
1338 
1339 		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1340 		    oldp, oldlenp, newp, newlen, p));
1341 	}
1342 
1343 	switch (name[1]) {
1344 	case VFS_MAXTYPENUM:
1345 		return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
1346 
1347 	case VFS_CONF:
1348 		if (namelen < 3)
1349 			return (ENOTDIR);	/* overloaded */
1350 
1351 		vfsp = vfs_bytypenum(name[2]);
1352 		if (vfsp == NULL)
1353 			return (EOPNOTSUPP);
1354 
1355 		/* Make a copy, clear out kernel pointers */
1356 		tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK|M_ZERO);
1357 		memcpy(tmpvfsp, vfsp, sizeof(*tmpvfsp));
1358 		tmpvfsp->vfc_vfsops = NULL;
1359 
1360 		ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp,
1361 		    sizeof(struct vfsconf));
1362 
1363 		free(tmpvfsp, M_TEMP, sizeof(*tmpvfsp));
1364 		return (ret);
1365 	case VFS_BCACHESTAT:	/* buffer cache statistics */
1366 		ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats,
1367 		    sizeof(struct bcachestats));
1368 		return(ret);
1369 	}
1370 	return (EOPNOTSUPP);
1371 }
1372 
1373 /*
1374  * Check to see if a filesystem is mounted on a block device.
1375  */
1376 int
1377 vfs_mountedon(struct vnode *vp)
1378 {
1379 	struct vnode *vq;
1380 	int error = 0;
1381 
1382  	if (vp->v_specmountpoint != NULL)
1383 		return (EBUSY);
1384 	if (vp->v_flag & VALIASED) {
1385 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1386 			if (vq->v_rdev != vp->v_rdev ||
1387 			    vq->v_type != vp->v_type)
1388 				continue;
1389 			if (vq->v_specmountpoint != NULL) {
1390 				error = EBUSY;
1391 				break;
1392 			}
1393  		}
1394 	}
1395 	return (error);
1396 }
1397 
1398 #ifdef NFSSERVER
1399 /*
1400  * Build hash lists of net addresses and hang them off the mount point.
1401  * Called by vfs_export() to set up the lists of export addresses.
1402  */
1403 int
1404 vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1405     struct export_args *argp)
1406 {
1407 	struct netcred *np;
1408 	struct radix_node_head *rnh;
1409 	int nplen, i;
1410 	struct radix_node *rn;
1411 	struct sockaddr *saddr, *smask = 0;
1412 	int error;
1413 
1414 	if (argp->ex_addrlen == 0) {
1415 		if (mp->mnt_flag & MNT_DEFEXPORTED)
1416 			return (EPERM);
1417 		np = &nep->ne_defexported;
1418 		/* fill in the kernel's ucred from userspace's xucred */
1419 		if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1420 			return (error);
1421 		mp->mnt_flag |= MNT_DEFEXPORTED;
1422 		goto finish;
1423 	}
1424 	if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN ||
1425 	    argp->ex_addrlen < 0 || argp->ex_masklen < 0)
1426 		return (EINVAL);
1427 	nplen = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1428 	np = (struct netcred *)malloc(nplen, M_NETADDR, M_WAITOK|M_ZERO);
1429 	np->netc_len = nplen;
1430 	saddr = (struct sockaddr *)(np + 1);
1431 	error = copyin(argp->ex_addr, saddr, argp->ex_addrlen);
1432 	if (error)
1433 		goto out;
1434 	if (saddr->sa_len > argp->ex_addrlen)
1435 		saddr->sa_len = argp->ex_addrlen;
1436 	if (argp->ex_masklen) {
1437 		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1438 		error = copyin(argp->ex_mask, smask, argp->ex_masklen);
1439 		if (error)
1440 			goto out;
1441 		if (smask->sa_len > argp->ex_masklen)
1442 			smask->sa_len = argp->ex_masklen;
1443 	}
1444 	/* fill in the kernel's ucred from userspace's xucred */
1445 	if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1446 		goto out;
1447 	i = saddr->sa_family;
1448 	switch (i) {
1449 	case AF_INET:
1450 		if ((rnh = nep->ne_rtable_inet) == NULL) {
1451 			if (!rn_inithead((void **)&nep->ne_rtable_inet,
1452 			    offsetof(struct sockaddr_in, sin_addr))) {
1453 				error = ENOBUFS;
1454 				goto out;
1455 			}
1456 			rnh = nep->ne_rtable_inet;
1457 		}
1458 		break;
1459 	default:
1460 		error = EINVAL;
1461 		goto out;
1462 	}
1463 	rn = rn_addroute(saddr, smask, rnh, np->netc_rnodes, 0);
1464 	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1465 		error = EPERM;
1466 		goto out;
1467 	}
1468 finish:
1469 	np->netc_exflags = argp->ex_flags;
1470 	return (0);
1471 out:
1472 	free(np, M_NETADDR, np->netc_len);
1473 	return (error);
1474 }
1475 
1476 int
1477 vfs_free_netcred(struct radix_node *rn, void *w, u_int id)
1478 {
1479 	struct radix_node_head *rnh = (struct radix_node_head *)w;
1480 	struct netcred * np = (struct netcred *)rn;
1481 
1482 	rn_delete(rn->rn_key, rn->rn_mask, rnh, NULL);
1483 	free(np, M_NETADDR, np->netc_len);
1484 	return (0);
1485 }
1486 
1487 /*
1488  * Free the net address hash lists that are hanging off the mount points.
1489  */
1490 void
1491 vfs_free_addrlist(struct netexport *nep)
1492 {
1493 	struct radix_node_head *rnh;
1494 
1495 	if ((rnh = nep->ne_rtable_inet) != NULL) {
1496 		rn_walktree(rnh, vfs_free_netcred, rnh);
1497 		free(rnh, M_RTABLE, sizeof(*rnh));
1498 		nep->ne_rtable_inet = NULL;
1499 	}
1500 }
1501 #endif /* NFSSERVER */
1502 
1503 int
1504 vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp)
1505 {
1506 #ifdef NFSSERVER
1507 	int error;
1508 
1509 	if (argp->ex_flags & MNT_DELEXPORT) {
1510 		vfs_free_addrlist(nep);
1511 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1512 	}
1513 	if (argp->ex_flags & MNT_EXPORTED) {
1514 		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1515 			return (error);
1516 		mp->mnt_flag |= MNT_EXPORTED;
1517 	}
1518 	return (0);
1519 #else
1520 	return (ENOTSUP);
1521 #endif /* NFSSERVER */
1522 }
1523 
1524 struct netcred *
1525 vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam)
1526 {
1527 #ifdef NFSSERVER
1528 	struct netcred *np;
1529 	struct radix_node_head *rnh;
1530 	struct sockaddr *saddr;
1531 
1532 	np = NULL;
1533 	if (mp->mnt_flag & MNT_EXPORTED) {
1534 		/*
1535 		 * Lookup in the export list first.
1536 		 */
1537 		if (nam != NULL) {
1538 			saddr = mtod(nam, struct sockaddr *);
1539 			switch(saddr->sa_family) {
1540 			case AF_INET:
1541 				rnh = nep->ne_rtable_inet;
1542 				break;
1543 			default:
1544 				rnh = NULL;
1545 				break;
1546 			}
1547 			if (rnh != NULL)
1548 				np = (struct netcred *)rn_match(saddr, rnh);
1549 		}
1550 		/*
1551 		 * If no address match, use the default if it exists.
1552 		 */
1553 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1554 			np = &nep->ne_defexported;
1555 	}
1556 	return (np);
1557 #else
1558 	return (NULL);
1559 #endif /* NFSSERVER */
1560 }
1561 
1562 /*
1563  * Do the usual access checking.
1564  * file_mode, uid and gid are from the vnode in question,
1565  * while acc_mode and cred are from the VOP_ACCESS parameter list
1566  */
1567 int
1568 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
1569     mode_t acc_mode, struct ucred *cred)
1570 {
1571 	mode_t mask;
1572 
1573 	/* User id 0 always gets read/write access. */
1574 	if (cred->cr_uid == 0) {
1575 		/* For VEXEC, at least one of the execute bits must be set. */
1576 		if ((acc_mode & VEXEC) && type != VDIR &&
1577 		    (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
1578 			return EACCES;
1579 		return 0;
1580 	}
1581 
1582 	mask = 0;
1583 
1584 	/* Otherwise, check the owner. */
1585 	if (cred->cr_uid == uid) {
1586 		if (acc_mode & VEXEC)
1587 			mask |= S_IXUSR;
1588 		if (acc_mode & VREAD)
1589 			mask |= S_IRUSR;
1590 		if (acc_mode & VWRITE)
1591 			mask |= S_IWUSR;
1592 		return (file_mode & mask) == mask ? 0 : EACCES;
1593 	}
1594 
1595 	/* Otherwise, check the groups. */
1596 	if (groupmember(gid, cred)) {
1597 		if (acc_mode & VEXEC)
1598 			mask |= S_IXGRP;
1599 		if (acc_mode & VREAD)
1600 			mask |= S_IRGRP;
1601 		if (acc_mode & VWRITE)
1602 			mask |= S_IWGRP;
1603 		return (file_mode & mask) == mask ? 0 : EACCES;
1604 	}
1605 
1606 	/* Otherwise, check everyone else. */
1607 	if (acc_mode & VEXEC)
1608 		mask |= S_IXOTH;
1609 	if (acc_mode & VREAD)
1610 		mask |= S_IROTH;
1611 	if (acc_mode & VWRITE)
1612 		mask |= S_IWOTH;
1613 	return (file_mode & mask) == mask ? 0 : EACCES;
1614 }
1615 
1616 int
1617 vnoperm(struct vnode *vp)
1618 {
1619 	if (vp->v_flag & VROOT || vp->v_mount == NULL)
1620 		return 0;
1621 
1622 	return (vp->v_mount->mnt_flag & MNT_NOPERM);
1623 }
1624 
1625 struct rwlock vfs_stall_lock = RWLOCK_INITIALIZER("vfs_stall");
1626 
1627 int
1628 vfs_stall(struct proc *p, int stall)
1629 {
1630 	struct mount *mp;
1631 	int allerror = 0, error;
1632 
1633 	if (stall)
1634 		rw_enter_write(&vfs_stall_lock);
1635 
1636 	/*
1637 	 * The loop variable mp is protected by vfs_busy() so that it cannot
1638 	 * be unmounted while VFS_SYNC() sleeps.  Traverse forward to keep the
1639 	 * lock order consistent with dounmount().
1640 	 */
1641 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1642 		if (stall) {
1643 			error = vfs_busy(mp, VB_WRITE|VB_WAIT|VB_DUPOK);
1644 			if (error) {
1645 				printf("%s: busy\n", mp->mnt_stat.f_mntonname);
1646 				allerror = error;
1647 				continue;
1648 			}
1649 			uvm_vnp_sync(mp);
1650 			error = VFS_SYNC(mp, MNT_WAIT, stall, p->p_ucred, p);
1651 			if (error) {
1652 				printf("%s: failed to sync\n", mp->mnt_stat.f_mntonname);
1653 				vfs_unbusy(mp);
1654 				allerror = error;
1655 				continue;
1656 			}
1657 			mp->mnt_flag |= MNT_STALLED;
1658 		} else {
1659 			if (mp->mnt_flag & MNT_STALLED) {
1660 				vfs_unbusy(mp);
1661 				mp->mnt_flag &= ~MNT_STALLED;
1662 			}
1663 		}
1664 	}
1665 
1666 	if (!stall)
1667 		rw_exit_write(&vfs_stall_lock);
1668 
1669 	return (allerror);
1670 }
1671 
1672 void
1673 vfs_stall_barrier(void)
1674 {
1675 	rw_enter_read(&vfs_stall_lock);
1676 	rw_exit_read(&vfs_stall_lock);
1677 }
1678 
1679 /*
1680  * Unmount all file systems.
1681  * We traverse the list in reverse order under the assumption that doing so
1682  * will avoid needing to worry about dependencies.
1683  */
1684 void
1685 vfs_unmountall(void)
1686 {
1687 	struct mount *mp, *nmp;
1688 	int allerror, error, again = 1;
1689 
1690  retry:
1691 	allerror = 0;
1692 	TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, nmp) {
1693 		if (vfs_busy(mp, VB_WRITE|VB_NOWAIT))
1694 			continue;
1695 		/* XXX Here is a race, the next pointer is not locked. */
1696 		if ((error = dounmount(mp, MNT_FORCE, curproc)) != 0) {
1697 			printf("unmount of %s failed with error %d\n",
1698 			    mp->mnt_stat.f_mntonname, error);
1699 			allerror = 1;
1700 		}
1701 	}
1702 
1703 	if (allerror) {
1704 		printf("WARNING: some file systems would not unmount\n");
1705 		if (again) {
1706 			printf("retrying\n");
1707 			again = 0;
1708 			goto retry;
1709 		}
1710 	}
1711 }
1712 
1713 /*
1714  * Sync and unmount file systems before shutting down.
1715  */
1716 void
1717 vfs_shutdown(struct proc *p)
1718 {
1719 #ifdef ACCOUNTING
1720 	acct_shutdown();
1721 #endif
1722 
1723 	printf("syncing disks...");
1724 
1725 	if (panicstr == 0) {
1726 		/* Sync before unmount, in case we hang on something. */
1727 		sys_sync(p, NULL, NULL);
1728 		vfs_unmountall();
1729 	}
1730 
1731 #if NSOFTRAID > 0
1732 	sr_quiesce();
1733 #endif
1734 
1735 	if (vfs_syncwait(p, 1))
1736 		printf(" giving up\n");
1737 	else
1738 		printf(" done\n");
1739 }
1740 
1741 /*
1742  * perform sync() operation and wait for buffers to flush.
1743  */
1744 int
1745 vfs_syncwait(struct proc *p, int verbose)
1746 {
1747 	struct buf *bp;
1748 	int iter, nbusy, dcount, s;
1749 #ifdef MULTIPROCESSOR
1750 	int hold_count;
1751 #endif
1752 
1753 	sys_sync(p, NULL, NULL);
1754 
1755 	/* Wait for sync to finish. */
1756 	dcount = 10000;
1757 	for (iter = 0; iter < 20; iter++) {
1758 		nbusy = 0;
1759 		LIST_FOREACH(bp, &bufhead, b_list) {
1760 			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1761 				nbusy++;
1762 			/*
1763 			 * With soft updates, some buffers that are
1764 			 * written will be remarked as dirty until other
1765 			 * buffers are written.
1766 			 */
1767 			if (bp->b_flags & B_DELWRI) {
1768 				s = splbio();
1769 				bremfree(bp);
1770 				buf_acquire(bp);
1771 				splx(s);
1772 				nbusy++;
1773 				bawrite(bp);
1774 				if (dcount-- <= 0) {
1775 					if (verbose)
1776 						printf("softdep ");
1777 					return 1;
1778 				}
1779 			}
1780 		}
1781 		if (nbusy == 0)
1782 			break;
1783 		if (verbose)
1784 			printf("%d ", nbusy);
1785 #ifdef MULTIPROCESSOR
1786 		if (_kernel_lock_held())
1787 			hold_count = __mp_release_all(&kernel_lock);
1788 		else
1789 			hold_count = 0;
1790 #endif
1791 		DELAY(40000 * iter);
1792 #ifdef MULTIPROCESSOR
1793 		if (hold_count)
1794 			__mp_acquire_count(&kernel_lock, hold_count);
1795 #endif
1796 	}
1797 
1798 	return nbusy;
1799 }
1800 
1801 /*
1802  * posix file system related system variables.
1803  */
1804 int
1805 fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1806     void *newp, size_t newlen, struct proc *p)
1807 {
1808 	/* all sysctl names at this level are terminal */
1809 	if (namelen != 1)
1810 		return (ENOTDIR);
1811 
1812 	switch (name[0]) {
1813 	case FS_POSIX_SETUID:
1814 		if (newp && securelevel > 0)
1815 			return (EPERM);
1816 		return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear));
1817 	default:
1818 		return (EOPNOTSUPP);
1819 	}
1820 	/* NOTREACHED */
1821 }
1822 
1823 /*
1824  * file system related system variables.
1825  */
1826 int
1827 fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1828     size_t newlen, struct proc *p)
1829 {
1830 	sysctlfn *fn;
1831 
1832 	switch (name[0]) {
1833 	case FS_POSIX:
1834 		fn = fs_posix_sysctl;
1835 		break;
1836 	default:
1837 		return (EOPNOTSUPP);
1838 	}
1839 	return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
1840 }
1841 
1842 
1843 /*
1844  * Routines dealing with vnodes and buffers
1845  */
1846 
1847 /*
1848  * Wait for all outstanding I/Os to complete
1849  *
1850  * Manipulates v_numoutput. Must be called at splbio()
1851  */
1852 int
1853 vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo)
1854 {
1855 	int error = 0;
1856 
1857 	splassert(IPL_BIO);
1858 
1859 	while (vp->v_numoutput) {
1860 		vp->v_bioflag |= VBIOWAIT;
1861 		error = tsleep(&vp->v_numoutput,
1862 		    slpflag | (PRIBIO + 1), wmesg, timeo);
1863 		if (error)
1864 			break;
1865 	}
1866 
1867 	return (error);
1868 }
1869 
1870 /*
1871  * Update outstanding I/O count and do wakeup if requested.
1872  *
1873  * Manipulates v_numoutput. Must be called at splbio()
1874  */
1875 void
1876 vwakeup(struct vnode *vp)
1877 {
1878 	splassert(IPL_BIO);
1879 
1880 	if (vp != NULL) {
1881 		if (vp->v_numoutput-- == 0)
1882 			panic("vwakeup: neg numoutput");
1883 		if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) {
1884 			vp->v_bioflag &= ~VBIOWAIT;
1885 			wakeup(&vp->v_numoutput);
1886 		}
1887 	}
1888 }
1889 
1890 /*
1891  * Flush out and invalidate all buffers associated with a vnode.
1892  * Called with the underlying object locked.
1893  */
1894 int
1895 vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p,
1896     int slpflag, int slptimeo)
1897 {
1898 	struct buf *bp;
1899 	struct buf *nbp, *blist;
1900 	int s, error;
1901 
1902 #ifdef VFSLCKDEBUG
1903 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
1904 		panic("%s: vp isn't locked, vp %p", __func__, vp);
1905 #endif
1906 
1907 	if (flags & V_SAVE) {
1908 		s = splbio();
1909 		vwaitforio(vp, 0, "vinvalbuf", 0);
1910 		if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1911 			splx(s);
1912 			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
1913 				return (error);
1914 			s = splbio();
1915 			if (vp->v_numoutput > 0 ||
1916 			    !LIST_EMPTY(&vp->v_dirtyblkhd))
1917 				panic("%s: dirty bufs, vp %p", __func__, vp);
1918 		}
1919 		splx(s);
1920 	}
1921 loop:
1922 	s = splbio();
1923 	for (;;) {
1924 		if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) &&
1925 		    (flags & V_SAVEMETA))
1926 			while (blist && blist->b_lblkno < 0)
1927 				blist = LIST_NEXT(blist, b_vnbufs);
1928 		if (blist == NULL &&
1929 		    (blist = LIST_FIRST(&vp->v_dirtyblkhd)) &&
1930 		    (flags & V_SAVEMETA))
1931 			while (blist && blist->b_lblkno < 0)
1932 				blist = LIST_NEXT(blist, b_vnbufs);
1933 		if (!blist)
1934 			break;
1935 
1936 		for (bp = blist; bp; bp = nbp) {
1937 			nbp = LIST_NEXT(bp, b_vnbufs);
1938 			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
1939 				continue;
1940 			if (bp->b_flags & B_BUSY) {
1941 				bp->b_flags |= B_WANTED;
1942 				error = tsleep(bp, slpflag | (PRIBIO + 1),
1943 				    "vinvalbuf", slptimeo);
1944 				if (error) {
1945 					splx(s);
1946 					return (error);
1947 				}
1948 				break;
1949 			}
1950 			bremfree(bp);
1951 			/*
1952 			 * XXX Since there are no node locks for NFS, I believe
1953 			 * there is a slight chance that a delayed write will
1954 			 * occur while sleeping just above, so check for it.
1955 			 */
1956 			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
1957 				buf_acquire(bp);
1958 				splx(s);
1959 				(void) VOP_BWRITE(bp);
1960 				goto loop;
1961 			}
1962 			buf_acquire_nomap(bp);
1963 			bp->b_flags |= B_INVAL;
1964 			brelse(bp);
1965 		}
1966 	}
1967 	if (!(flags & V_SAVEMETA) &&
1968 	    (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd)))
1969 		panic("%s: flush failed, vp %p", __func__, vp);
1970 	splx(s);
1971 	return (0);
1972 }
1973 
1974 void
1975 vflushbuf(struct vnode *vp, int sync)
1976 {
1977 	struct buf *bp, *nbp;
1978 	int s;
1979 
1980 loop:
1981 	s = splbio();
1982 	LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp) {
1983 		if ((bp->b_flags & B_BUSY))
1984 			continue;
1985 		if ((bp->b_flags & B_DELWRI) == 0)
1986 			panic("vflushbuf: not dirty");
1987 		bremfree(bp);
1988 		buf_acquire(bp);
1989 		splx(s);
1990 		/*
1991 		 * Wait for I/O associated with indirect blocks to complete,
1992 		 * since there is no way to quickly wait for them below.
1993 		 */
1994 		if (bp->b_vp == vp || sync == 0)
1995 			(void) bawrite(bp);
1996 		else
1997 			(void) bwrite(bp);
1998 		goto loop;
1999 	}
2000 	if (sync == 0) {
2001 		splx(s);
2002 		return;
2003 	}
2004 	vwaitforio(vp, 0, "vflushbuf", 0);
2005 	if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
2006 		splx(s);
2007 #ifdef DIAGNOSTIC
2008 		vprint("vflushbuf: dirty", vp);
2009 #endif
2010 		goto loop;
2011 	}
2012 	splx(s);
2013 }
2014 
2015 /*
2016  * Associate a buffer with a vnode.
2017  *
2018  * Manipulates buffer vnode queues. Must be called at splbio().
2019  */
2020 void
2021 bgetvp(struct vnode *vp, struct buf *bp)
2022 {
2023 	splassert(IPL_BIO);
2024 
2025 
2026 	if (bp->b_vp)
2027 		panic("bgetvp: not free");
2028 	vhold(vp);
2029 	bp->b_vp = vp;
2030 	if (vp->v_type == VBLK || vp->v_type == VCHR)
2031 		bp->b_dev = vp->v_rdev;
2032 	else
2033 		bp->b_dev = NODEV;
2034 	/*
2035 	 * Insert onto list for new vnode.
2036 	 */
2037 	bufinsvn(bp, &vp->v_cleanblkhd);
2038 }
2039 
2040 /*
2041  * Disassociate a buffer from a vnode.
2042  *
2043  * Manipulates vnode buffer queues. Must be called at splbio().
2044  */
2045 void
2046 brelvp(struct buf *bp)
2047 {
2048 	struct vnode *vp;
2049 
2050 	splassert(IPL_BIO);
2051 
2052 	if ((vp = bp->b_vp) == (struct vnode *) 0)
2053 		panic("brelvp: NULL");
2054 	/*
2055 	 * Delete from old vnode list, if on one.
2056 	 */
2057 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2058 		bufremvn(bp);
2059 	if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2060 	    LIST_EMPTY(&vp->v_dirtyblkhd)) {
2061 		vp->v_bioflag &= ~VBIOONSYNCLIST;
2062 		LIST_REMOVE(vp, v_synclist);
2063 	}
2064 	bp->b_vp = NULL;
2065 
2066 	vdrop(vp);
2067 }
2068 
2069 /*
2070  * Replaces the current vnode associated with the buffer, if any,
2071  * with a new vnode.
2072  *
2073  * If an output I/O is pending on the buffer, the old vnode
2074  * I/O count is adjusted.
2075  *
2076  * Ignores vnode buffer queues. Must be called at splbio().
2077  */
2078 void
2079 buf_replacevnode(struct buf *bp, struct vnode *newvp)
2080 {
2081 	struct vnode *oldvp = bp->b_vp;
2082 
2083 	splassert(IPL_BIO);
2084 
2085 	if (oldvp)
2086 		brelvp(bp);
2087 
2088 	if ((bp->b_flags & (B_READ | B_DONE)) == 0) {
2089 		newvp->v_numoutput++;	/* put it on swapdev */
2090 		vwakeup(oldvp);
2091 	}
2092 
2093 	bgetvp(newvp, bp);
2094 	bufremvn(bp);
2095 }
2096 
2097 /*
2098  * Used to assign buffers to the appropriate clean or dirty list on
2099  * the vnode and to add newly dirty vnodes to the appropriate
2100  * filesystem syncer list.
2101  *
2102  * Manipulates vnode buffer queues. Must be called at splbio().
2103  */
2104 void
2105 reassignbuf(struct buf *bp)
2106 {
2107 	struct buflists *listheadp;
2108 	int delay;
2109 	struct vnode *vp = bp->b_vp;
2110 
2111 	splassert(IPL_BIO);
2112 
2113 	/*
2114 	 * Delete from old vnode list, if on one.
2115 	 */
2116 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2117 		bufremvn(bp);
2118 
2119 	/*
2120 	 * If dirty, put on list of dirty buffers;
2121 	 * otherwise insert onto list of clean buffers.
2122 	 */
2123 	if ((bp->b_flags & B_DELWRI) == 0) {
2124 		listheadp = &vp->v_cleanblkhd;
2125 		if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2126 		    LIST_EMPTY(&vp->v_dirtyblkhd)) {
2127 			vp->v_bioflag &= ~VBIOONSYNCLIST;
2128 			LIST_REMOVE(vp, v_synclist);
2129 		}
2130 	} else {
2131 		listheadp = &vp->v_dirtyblkhd;
2132 		if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) {
2133 			switch (vp->v_type) {
2134 			case VDIR:
2135 				delay = syncdelay / 2;
2136 				break;
2137 			case VBLK:
2138 				if (vp->v_specmountpoint != NULL) {
2139 					delay = syncdelay / 3;
2140 					break;
2141 				}
2142 				/* FALLTHROUGH */
2143 			default:
2144 				delay = syncdelay;
2145 			}
2146 			vn_syncer_add_to_worklist(vp, delay);
2147 		}
2148 	}
2149 	bufinsvn(bp, listheadp);
2150 }
2151 
2152 /*
2153  * Check if vnode represents a disk device
2154  */
2155 int
2156 vn_isdisk(struct vnode *vp, int *errp)
2157 {
2158 	if (vp->v_type != VBLK && vp->v_type != VCHR)
2159 		return (0);
2160 
2161 	return (1);
2162 }
2163 
2164 #ifdef DDB
2165 #include <machine/db_machdep.h>
2166 #include <ddb/db_interface.h>
2167 
2168 void
2169 vfs_buf_print(void *b, int full,
2170     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2171 {
2172 	struct buf *bp = b;
2173 
2174 	(*pr)("  vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n"
2175 	      "  proc %p error %d flags %lb\n",
2176 	    bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev,
2177 	    bp->b_proc, bp->b_error, bp->b_flags, B_BITS);
2178 
2179 	(*pr)("  bufsize 0x%lx bcount 0x%lx resid 0x%lx\n"
2180 	      "  data %p saveaddr %p dep %p iodone %p\n",
2181 	    bp->b_bufsize, bp->b_bcount, (long)bp->b_resid,
2182 	    bp->b_data, bp->b_saveaddr,
2183 	    LIST_FIRST(&bp->b_dep), bp->b_iodone);
2184 
2185 	(*pr)("  dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n",
2186 	    bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend);
2187 
2188 #ifdef FFS_SOFTUPDATES
2189 	if (full)
2190 		softdep_print(bp, full, pr);
2191 #endif
2192 }
2193 
2194 const char *vtypes[] = { VTYPE_NAMES };
2195 const char *vtags[] = { VTAG_NAMES };
2196 
2197 void
2198 vfs_vnode_print(void *v, int full,
2199     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2200 {
2201 	struct vnode *vp = v;
2202 
2203 	(*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n",
2204 	      (u_int)vp->v_tag >= nitems(vtags)? "<unk>":vtags[vp->v_tag],
2205 	      vp->v_tag,
2206 	      (u_int)vp->v_type >= nitems(vtypes)? "<unk>":vtypes[vp->v_type],
2207 	      vp->v_type, vp->v_mount, vp->v_mountedhere);
2208 
2209 	(*pr)("data %p usecount %d writecount %d holdcnt %d numoutput %d\n",
2210 	      vp->v_data, vp->v_usecount, vp->v_writecount,
2211 	      vp->v_holdcnt, vp->v_numoutput);
2212 
2213 	/* uvm_object_printit(&vp->v_uobj, full, pr); */
2214 
2215 	if (full) {
2216 		struct buf *bp;
2217 
2218 		(*pr)("clean bufs:\n");
2219 		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
2220 			(*pr)(" bp %p\n", bp);
2221 			vfs_buf_print(bp, full, pr);
2222 		}
2223 
2224 		(*pr)("dirty bufs:\n");
2225 		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
2226 			(*pr)(" bp %p\n", bp);
2227 			vfs_buf_print(bp, full, pr);
2228 		}
2229 	}
2230 }
2231 
2232 void
2233 vfs_mount_print(struct mount *mp, int full,
2234     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2235 {
2236 	struct vfsconf *vfc = mp->mnt_vfc;
2237 	struct vnode *vp;
2238 	int cnt;
2239 
2240 	(*pr)("flags %b\nvnodecovered %p syncer %p data %p\n",
2241 	    mp->mnt_flag, MNT_BITS,
2242 	    mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data);
2243 
2244 	(*pr)("vfsconf: ops %p name \"%s\" num %d ref %u flags 0x%x\n",
2245             vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum,
2246 	    vfc->vfc_refcount, vfc->vfc_flags);
2247 
2248 	(*pr)("statvfs cache: bsize %x iosize %x\nblocks %llu free %llu avail %lld\n",
2249 	    mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks,
2250 	    mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail);
2251 
2252 	(*pr)("  files %llu ffiles %llu favail %lld\n", mp->mnt_stat.f_files,
2253 	    mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail);
2254 
2255 	(*pr)("  f_fsidx {0x%x, 0x%x} owner %u ctime 0x%llx\n",
2256 	    mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1],
2257 	    mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime);
2258 
2259  	(*pr)("  syncwrites %llu asyncwrites = %llu\n",
2260 	    mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites);
2261 
2262  	(*pr)("  syncreads %llu asyncreads = %llu\n",
2263 	    mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads);
2264 
2265 	(*pr)("  fstype \"%s\" mnton \"%s\" mntfrom \"%s\" mntspec \"%s\"\n",
2266 	    mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname,
2267 	    mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntfromspec);
2268 
2269 	(*pr)("locked vnodes:");
2270 	/* XXX would take mountlist lock, except ddb has no context */
2271 	cnt = 0;
2272 	LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2273 		if (VOP_ISLOCKED(vp)) {
2274 			if (cnt == 0)
2275 				(*pr)("\n  %p", vp);
2276 			else if ((cnt % (72 / (sizeof(void *) * 2 + 4))) == 0)
2277 				(*pr)(",\n  %p", vp);
2278 			else
2279 				(*pr)(", %p", vp);
2280 			cnt++;
2281 		}
2282 	}
2283 	(*pr)("\n");
2284 
2285 	if (full) {
2286 		(*pr)("all vnodes:");
2287 		/* XXX would take mountlist lock, except ddb has no context */
2288 		cnt = 0;
2289 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2290 			if (cnt == 0)
2291 				(*pr)("\n  %p", vp);
2292 			else if ((cnt % (72 / (sizeof(void *) * 2 + 4))) == 0)
2293 				(*pr)(",\n  %p", vp);
2294 			else
2295 				(*pr)(", %p", vp);
2296 			cnt++;
2297 		}
2298 		(*pr)("\n");
2299 	}
2300 }
2301 #endif /* DDB */
2302 
2303 void
2304 copy_statfs_info(struct statfs *sbp, const struct mount *mp)
2305 {
2306 	const struct statfs *mbp;
2307 
2308 	strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN);
2309 
2310 	if (sbp == (mbp = &mp->mnt_stat))
2311 		return;
2312 
2313 	sbp->f_fsid = mbp->f_fsid;
2314 	sbp->f_owner = mbp->f_owner;
2315 	sbp->f_flags = mbp->f_flags;
2316 	sbp->f_syncwrites = mbp->f_syncwrites;
2317 	sbp->f_asyncwrites = mbp->f_asyncwrites;
2318 	sbp->f_syncreads = mbp->f_syncreads;
2319 	sbp->f_asyncreads = mbp->f_asyncreads;
2320 	sbp->f_namemax = mbp->f_namemax;
2321 	memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
2322 	memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
2323 	memcpy(sbp->f_mntfromspec, mp->mnt_stat.f_mntfromspec, MNAMELEN);
2324 	memcpy(&sbp->mount_info, &mp->mnt_stat.mount_info,
2325 	    sizeof(union mount_info));
2326 }
2327