xref: /openbsd-src/sys/kern/vfs_subr.c (revision b0f539e9923c93d213bbde92bfd6b7a67cb6927c)
1 /*	$OpenBSD: vfs_subr.c,v 1.288 2019/04/19 09:41:07 visa Exp $	*/
2 /*	$NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
38  */
39 
40 /*
41  * External virtual filesystem routines
42  */
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/sysctl.h>
48 #include <sys/mount.h>
49 #include <sys/time.h>
50 #include <sys/fcntl.h>
51 #include <sys/kernel.h>
52 #include <sys/conf.h>
53 #include <sys/vnode.h>
54 #include <sys/lock.h>
55 #include <sys/lockf.h>
56 #include <sys/stat.h>
57 #include <sys/acct.h>
58 #include <sys/namei.h>
59 #include <sys/ucred.h>
60 #include <sys/buf.h>
61 #include <sys/errno.h>
62 #include <sys/malloc.h>
63 #include <sys/mbuf.h>
64 #include <sys/syscallargs.h>
65 #include <sys/pool.h>
66 #include <sys/tree.h>
67 #include <sys/specdev.h>
68 #include <sys/atomic.h>
69 
70 #include <netinet/in.h>
71 
72 #include <uvm/uvm_extern.h>
73 #include <uvm/uvm_vnode.h>
74 
75 #include "softraid.h"
76 
77 void sr_quiesce(void);
78 
79 enum vtype iftovt_tab[16] = {
80 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
81 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
82 };
83 
84 int	vttoif_tab[9] = {
85 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
86 	S_IFSOCK, S_IFIFO, S_IFMT,
87 };
88 
89 int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
90 int suid_clear = 1;		/* 1 => clear SUID / SGID on owner change */
91 
92 /*
93  * Insq/Remq for the vnode usage lists.
94  */
95 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
96 #define	bufremvn(bp) {							\
97 	LIST_REMOVE(bp, b_vnbufs);					\
98 	LIST_NEXT(bp, b_vnbufs) = NOLIST;				\
99 }
100 
101 struct freelst vnode_hold_list;	/* list of vnodes referencing buffers */
102 struct freelst vnode_free_list;	/* vnode free list */
103 
104 struct mntlist mountlist;	/* mounted filesystem list */
105 
106 void	vclean(struct vnode *, int, struct proc *);
107 
108 void insmntque(struct vnode *, struct mount *);
109 int getdevvp(dev_t, struct vnode **, enum vtype);
110 
111 int vfs_hang_addrlist(struct mount *, struct netexport *,
112 				  struct export_args *);
113 int vfs_free_netcred(struct radix_node *, void *, u_int);
114 void vfs_free_addrlist(struct netexport *);
115 void vputonfreelist(struct vnode *);
116 
117 int vflush_vnode(struct vnode *, void *);
118 int maxvnodes;
119 
120 void vfs_unmountall(void);
121 
122 #ifdef DEBUG
123 void printlockedvnodes(void);
124 #endif
125 
126 struct pool vnode_pool;
127 struct pool uvm_vnode_pool;
128 
129 static inline int rb_buf_compare(const struct buf *b1, const struct buf *b2);
130 RBT_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare);
131 
132 static inline int
133 rb_buf_compare(const struct buf *b1, const struct buf *b2)
134 {
135 	if (b1->b_lblkno < b2->b_lblkno)
136 		return(-1);
137 	if (b1->b_lblkno > b2->b_lblkno)
138 		return(1);
139 	return(0);
140 }
141 
142 /*
143  * Initialize the vnode management data structures.
144  */
145 void
146 vntblinit(void)
147 {
148 	/* buffer cache may need a vnode for each buffer */
149 	maxvnodes = 2 * initialvnodes;
150 	pool_init(&vnode_pool, sizeof(struct vnode), 0, IPL_NONE,
151 	    PR_WAITOK, "vnodes", NULL);
152 	pool_init(&uvm_vnode_pool, sizeof(struct uvm_vnode), 0, IPL_NONE,
153 	    PR_WAITOK, "uvmvnodes", NULL);
154 	TAILQ_INIT(&vnode_hold_list);
155 	TAILQ_INIT(&vnode_free_list);
156 	TAILQ_INIT(&mountlist);
157 	/*
158 	 * Initialize the filesystem syncer.
159 	 */
160 	vn_initialize_syncerd();
161 
162 #ifdef NFSSERVER
163 	rn_init(sizeof(struct sockaddr_in));
164 #endif /* NFSSERVER */
165 }
166 
167 /*
168  * Allocate a mount point.
169  *
170  * The returned mount point is marked as busy.
171  */
172 struct mount *
173 vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp)
174 {
175 	struct mount *mp;
176 
177 	mp = malloc(sizeof(*mp), M_MOUNT, M_WAITOK|M_ZERO);
178 	rw_init_flags(&mp->mnt_lock, "vfslock", RWL_IS_VNODE);
179 	(void)vfs_busy(mp, VB_READ|VB_NOWAIT);
180 
181 	LIST_INIT(&mp->mnt_vnodelist);
182 	mp->mnt_vnodecovered = vp;
183 
184 	atomic_inc_int(&vfsp->vfc_refcount);
185 	mp->mnt_vfc = vfsp;
186 	mp->mnt_op = vfsp->vfc_vfsops;
187 	mp->mnt_flag = vfsp->vfc_flags;
188 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
189 
190 	return (mp);
191 }
192 
193 /*
194  * Release a mount point.
195  */
196 void
197 vfs_mount_free(struct mount *mp)
198 {
199 	atomic_dec_int(&mp->mnt_vfc->vfc_refcount);
200 	free(mp, M_MOUNT, sizeof(*mp));
201 }
202 
203 /*
204  * Mark a mount point as busy. Used to synchronize access and to delay
205  * unmounting.
206  *
207  * Default behaviour is to attempt getting a READ lock and in case of an
208  * ongoing unmount, to wait for it to finish and then return failure.
209  */
210 int
211 vfs_busy(struct mount *mp, int flags)
212 {
213 	int rwflags = 0;
214 
215 	if (flags & VB_WRITE)
216 		rwflags |= RW_WRITE;
217 	else
218 		rwflags |= RW_READ;
219 
220 	if (flags & VB_WAIT)
221 		rwflags |= RW_SLEEPFAIL;
222 	else
223 		rwflags |= RW_NOSLEEP;
224 
225 #ifdef WITNESS
226 	if (flags & VB_DUPOK)
227 		rwflags |= RW_DUPOK;
228 #endif
229 
230 	if (rw_enter(&mp->mnt_lock, rwflags))
231 		return (EBUSY);
232 
233 	return (0);
234 }
235 
236 /*
237  * Free a busy file system
238  */
239 void
240 vfs_unbusy(struct mount *mp)
241 {
242 	rw_exit(&mp->mnt_lock);
243 }
244 
245 int
246 vfs_isbusy(struct mount *mp)
247 {
248 	if (RWLOCK_OWNER(&mp->mnt_lock) > 0)
249 		return (1);
250 	else
251 		return (0);
252 }
253 
254 /*
255  * Lookup a filesystem type, and if found allocate and initialize
256  * a mount structure for it.
257  *
258  * Devname is usually updated by mount(8) after booting.
259  */
260 int
261 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
262 {
263 	struct vfsconf *vfsp;
264 	struct mount *mp;
265 
266 	vfsp = vfs_byname(fstypename);
267 	if (vfsp == NULL)
268 		return (ENODEV);
269 	mp = vfs_mount_alloc(NULLVP, vfsp);
270 	mp->mnt_flag |= MNT_RDONLY;
271 	mp->mnt_stat.f_mntonname[0] = '/';
272 	copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN, 0);
273 	copystr(devname, mp->mnt_stat.f_mntfromspec, MNAMELEN, 0);
274 	*mpp = mp;
275  	return (0);
276  }
277 
278 /*
279  * Lookup a mount point by filesystem identifier.
280  */
281 struct mount *
282 vfs_getvfs(fsid_t *fsid)
283 {
284 	struct mount *mp;
285 
286 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
287 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
288 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
289 			return (mp);
290 		}
291 	}
292 
293 	return (NULL);
294 }
295 
296 
297 /*
298  * Get a new unique fsid
299  */
300 void
301 vfs_getnewfsid(struct mount *mp)
302 {
303 	static u_short xxxfs_mntid;
304 
305 	fsid_t tfsid;
306 	int mtype;
307 
308 	mtype = mp->mnt_vfc->vfc_typenum;
309 	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
310 	mp->mnt_stat.f_fsid.val[1] = mtype;
311 	if (xxxfs_mntid == 0)
312 		++xxxfs_mntid;
313 	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
314 	tfsid.val[1] = mtype;
315 	if (!TAILQ_EMPTY(&mountlist)) {
316 		while (vfs_getvfs(&tfsid)) {
317 			tfsid.val[0]++;
318 			xxxfs_mntid++;
319 		}
320 	}
321 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
322 }
323 
324 /*
325  * Set vnode attributes to VNOVAL
326  */
327 void
328 vattr_null(struct vattr *vap)
329 {
330 
331 	vap->va_type = VNON;
332 	/*
333 	 * Don't get fancy: u_quad_t = u_int = VNOVAL leaves the u_quad_t
334 	 * with 2^31-1 instead of 2^64-1.  Just write'm out and let
335 	 * the compiler do its job.
336 	 */
337 	vap->va_mode = VNOVAL;
338 	vap->va_nlink = VNOVAL;
339 	vap->va_uid = VNOVAL;
340 	vap->va_gid = VNOVAL;
341 	vap->va_fsid = VNOVAL;
342 	vap->va_fileid = VNOVAL;
343 	vap->va_size = VNOVAL;
344 	vap->va_blocksize = VNOVAL;
345 	vap->va_atime.tv_sec = VNOVAL;
346 	vap->va_atime.tv_nsec = VNOVAL;
347 	vap->va_mtime.tv_sec = VNOVAL;
348 	vap->va_mtime.tv_nsec = VNOVAL;
349 	vap->va_ctime.tv_sec = VNOVAL;
350 	vap->va_ctime.tv_nsec = VNOVAL;
351 	vap->va_gen = VNOVAL;
352 	vap->va_flags = VNOVAL;
353 	vap->va_rdev = VNOVAL;
354 	vap->va_bytes = VNOVAL;
355 	vap->va_filerev = VNOVAL;
356 	vap->va_vaflags = 0;
357 }
358 
359 /*
360  * Routines having to do with the management of the vnode table.
361  */
362 long numvnodes;
363 
364 /*
365  * Return the next vnode from the free list.
366  */
367 int
368 getnewvnode(enum vtagtype tag, struct mount *mp, struct vops *vops,
369     struct vnode **vpp)
370 {
371 	struct proc *p = curproc;
372 	struct freelst *listhd;
373 	static int toggle;
374 	struct vnode *vp;
375 	int s;
376 
377 	/*
378 	 * allow maxvnodes to increase if the buffer cache itself
379 	 * is big enough to justify it. (we don't shrink it ever)
380 	 */
381 	maxvnodes = maxvnodes < bcstats.numbufs ? bcstats.numbufs
382 	    : maxvnodes;
383 
384 	/*
385 	 * We must choose whether to allocate a new vnode or recycle an
386 	 * existing one. The criterion for allocating a new one is that
387 	 * the total number of vnodes is less than the number desired or
388 	 * there are no vnodes on either free list. Generally we only
389 	 * want to recycle vnodes that have no buffers associated with
390 	 * them, so we look first on the vnode_free_list. If it is empty,
391 	 * we next consider vnodes with referencing buffers on the
392 	 * vnode_hold_list. The toggle ensures that half the time we
393 	 * will use a buffer from the vnode_hold_list, and half the time
394 	 * we will allocate a new one unless the list has grown to twice
395 	 * the desired size. We are reticent to recycle vnodes from the
396 	 * vnode_hold_list because we will lose the identity of all its
397 	 * referencing buffers.
398 	 */
399 	toggle ^= 1;
400 	if (numvnodes / 2 > maxvnodes)
401 		toggle = 0;
402 
403 	s = splbio();
404 	if ((numvnodes < maxvnodes) ||
405 	    ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
406 	    ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
407 		splx(s);
408 		vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO);
409 		vp->v_uvm = pool_get(&uvm_vnode_pool, PR_WAITOK | PR_ZERO);
410 		vp->v_uvm->u_vnode = vp;
411 		RBT_INIT(buf_rb_bufs, &vp->v_bufs_tree);
412 		cache_tree_init(&vp->v_nc_tree);
413 		TAILQ_INIT(&vp->v_cache_dst);
414 		numvnodes++;
415 	} else {
416 		TAILQ_FOREACH(vp, listhd, v_freelist) {
417 			if (VOP_ISLOCKED(vp) == 0)
418 				break;
419 		}
420 		/*
421 		 * Unless this is a bad time of the month, at most
422 		 * the first NCPUS items on the free list are
423 		 * locked, so this is close enough to being empty.
424 		 */
425 		if (vp == NULL) {
426 			splx(s);
427 			tablefull("vnode");
428 			*vpp = 0;
429 			return (ENFILE);
430 		}
431 
432 #ifdef DIAGNOSTIC
433 		if (vp->v_usecount) {
434 			vprint("free vnode", vp);
435 			panic("free vnode isn't");
436 		}
437 #endif
438 
439 		TAILQ_REMOVE(listhd, vp, v_freelist);
440 		vp->v_bioflag &= ~VBIOONFREELIST;
441 		splx(s);
442 
443 		if (vp->v_type != VBAD)
444 			vgonel(vp, p);
445 #ifdef DIAGNOSTIC
446 		if (vp->v_data) {
447 			vprint("cleaned vnode", vp);
448 			panic("cleaned vnode isn't");
449 		}
450 		s = splbio();
451 		if (vp->v_numoutput)
452 			panic("Clean vnode has pending I/O's");
453 		splx(s);
454 #endif
455 		vp->v_flag = 0;
456 		vp->v_socket = 0;
457 	}
458 	cache_purge(vp);
459 	vp->v_type = VNON;
460 	vp->v_tag = tag;
461 	vp->v_op = vops;
462 	insmntque(vp, mp);
463 	*vpp = vp;
464 	vp->v_usecount = 1;
465 	vp->v_data = 0;
466 	return (0);
467 }
468 
469 /*
470  * Move a vnode from one mount queue to another.
471  */
472 void
473 insmntque(struct vnode *vp, struct mount *mp)
474 {
475 	/*
476 	 * Delete from old mount point vnode list, if on one.
477 	 */
478 	if (vp->v_mount != NULL)
479 		LIST_REMOVE(vp, v_mntvnodes);
480 	/*
481 	 * Insert into list of vnodes for the new mount point, if available.
482 	 */
483 	if ((vp->v_mount = mp) != NULL)
484 		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
485 }
486 
487 /*
488  * Create a vnode for a block device.
489  * Used for root filesystem, argdev, and swap areas.
490  * Also used for memory file system special devices.
491  */
492 int
493 bdevvp(dev_t dev, struct vnode **vpp)
494 {
495 	return (getdevvp(dev, vpp, VBLK));
496 }
497 
498 /*
499  * Create a vnode for a character device.
500  * Used for console handling.
501  */
502 int
503 cdevvp(dev_t dev, struct vnode **vpp)
504 {
505 	return (getdevvp(dev, vpp, VCHR));
506 }
507 
508 /*
509  * Create a vnode for a device.
510  * Used by bdevvp (block device) for root file system etc.,
511  * and by cdevvp (character device) for console.
512  */
513 int
514 getdevvp(dev_t dev, struct vnode **vpp, enum vtype type)
515 {
516 	struct vnode *vp;
517 	struct vnode *nvp;
518 	int error;
519 
520 	if (dev == NODEV) {
521 		*vpp = NULLVP;
522 		return (0);
523 	}
524 	error = getnewvnode(VT_NON, NULL, &spec_vops, &nvp);
525 	if (error) {
526 		*vpp = NULLVP;
527 		return (error);
528 	}
529 	vp = nvp;
530 	vp->v_type = type;
531 	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
532 		vput(vp);
533 		vp = nvp;
534 	}
535 	if (vp->v_type == VCHR && cdevsw[major(vp->v_rdev)].d_type == D_TTY)
536 		vp->v_flag |= VISTTY;
537 	*vpp = vp;
538 	return (0);
539 }
540 
541 /*
542  * Check to see if the new vnode represents a special device
543  * for which we already have a vnode (either because of
544  * bdevvp() or because of a different vnode representing
545  * the same block device). If such an alias exists, deallocate
546  * the existing contents and return the aliased vnode. The
547  * caller is responsible for filling it with its new contents.
548  */
549 struct vnode *
550 checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp)
551 {
552 	struct proc *p = curproc;
553 	struct vnode *vp;
554 	struct vnode **vpp;
555 
556 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
557 		return (NULLVP);
558 
559 	vpp = &speclisth[SPECHASH(nvp_rdev)];
560 loop:
561 	for (vp = *vpp; vp; vp = vp->v_specnext) {
562 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) {
563 			continue;
564 		}
565 		/*
566 		 * Alias, but not in use, so flush it out.
567 		 */
568 		if (vp->v_usecount == 0) {
569 			vgonel(vp, p);
570 			goto loop;
571 		}
572 		if (vget(vp, LK_EXCLUSIVE)) {
573 			goto loop;
574 		}
575 		break;
576 	}
577 
578 	/*
579 	 * Common case is actually in the if statement
580 	 */
581 	if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
582 		nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE,
583 			M_WAITOK);
584 		nvp->v_rdev = nvp_rdev;
585 		nvp->v_hashchain = vpp;
586 		nvp->v_specnext = *vpp;
587 		nvp->v_specmountpoint = NULL;
588 		nvp->v_speclockf = NULL;
589 		nvp->v_specbitmap = NULL;
590 		if (nvp->v_type == VCHR &&
591 		    (cdevsw[major(nvp_rdev)].d_flags & D_CLONE) &&
592 		    (minor(nvp_rdev) >> CLONE_SHIFT == 0)) {
593 			if (vp != NULLVP)
594 				nvp->v_specbitmap = vp->v_specbitmap;
595 			else
596 				nvp->v_specbitmap = malloc(CLONE_MAPSZ,
597 				    M_VNODE, M_WAITOK | M_ZERO);
598 		}
599 		*vpp = nvp;
600 		if (vp != NULLVP) {
601 			nvp->v_flag |= VALIASED;
602 			vp->v_flag |= VALIASED;
603 			vput(vp);
604 		}
605 		return (NULLVP);
606 	}
607 
608 	/*
609 	 * This code is the uncommon case. It is called in case
610 	 * we found an alias that was VT_NON && vtype of VBLK
611 	 * This means we found a block device that was created
612 	 * using bdevvp.
613 	 * An example of such a vnode is the root partition device vnode
614 	 * created in ffs_mountroot.
615 	 *
616 	 * The vnodes created by bdevvp should not be aliased (why?).
617 	 */
618 
619 	VOP_UNLOCK(vp);
620 	vclean(vp, 0, p);
621 	vp->v_op = nvp->v_op;
622 	vp->v_tag = nvp->v_tag;
623 	nvp->v_type = VNON;
624 	insmntque(vp, mp);
625 	return (vp);
626 }
627 
628 /*
629  * Grab a particular vnode from the free list, increment its
630  * reference count and lock it. If the vnode lock bit is set,
631  * the vnode is being eliminated in vgone. In that case, we
632  * cannot grab it, so the process is awakened when the
633  * transition is completed, and an error code is returned to
634  * indicate that the vnode is no longer usable, possibly
635  * having been changed to a new file system type.
636  */
637 int
638 vget(struct vnode *vp, int flags)
639 {
640 	int error, s, onfreelist;
641 
642 	/*
643 	 * If the vnode is in the process of being cleaned out for
644 	 * another use, we wait for the cleaning to finish and then
645 	 * return failure. Cleaning is determined by checking that
646 	 * the VXLOCK flag is set.
647 	 */
648 
649 	if (vp->v_flag & VXLOCK) {
650 		if (flags & LK_NOWAIT) {
651 			return (EBUSY);
652 		}
653 
654 		vp->v_flag |= VXWANT;
655 		tsleep(vp, PINOD, "vget", 0);
656 		return (ENOENT);
657 	}
658 
659 	onfreelist = vp->v_bioflag & VBIOONFREELIST;
660 	if (vp->v_usecount == 0 && onfreelist) {
661 		s = splbio();
662 		if (vp->v_holdcnt > 0)
663 			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
664 		else
665 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
666 		vp->v_bioflag &= ~VBIOONFREELIST;
667 		splx(s);
668 	}
669 
670  	vp->v_usecount++;
671 	if (flags & LK_TYPE_MASK) {
672 		if ((error = vn_lock(vp, flags)) != 0) {
673 			vp->v_usecount--;
674 			if (vp->v_usecount == 0 && onfreelist)
675 				vputonfreelist(vp);
676 		}
677 		return (error);
678 	}
679 
680 	return (0);
681 }
682 
683 
684 /* Vnode reference. */
685 void
686 vref(struct vnode *vp)
687 {
688 #ifdef DIAGNOSTIC
689 	if (vp->v_usecount == 0)
690 		panic("vref used where vget required");
691 	if (vp->v_type == VNON)
692 		panic("vref on a VNON vnode");
693 #endif
694 	vp->v_usecount++;
695 }
696 
697 void
698 vputonfreelist(struct vnode *vp)
699 {
700 	int s;
701 	struct freelst *lst;
702 
703 	s = splbio();
704 #ifdef DIAGNOSTIC
705 	if (vp->v_usecount != 0)
706 		panic("Use count is not zero!");
707 
708 	if (vp->v_bioflag & VBIOONFREELIST) {
709 		vprint("vnode already on free list: ", vp);
710 		panic("vnode already on free list");
711 	}
712 #endif
713 
714 	vp->v_bioflag |= VBIOONFREELIST;
715 	vp->v_bioflag &= ~VBIOERROR;
716 
717 	if (vp->v_holdcnt > 0)
718 		lst = &vnode_hold_list;
719 	else
720 		lst = &vnode_free_list;
721 
722 	if (vp->v_type == VBAD)
723 		TAILQ_INSERT_HEAD(lst, vp, v_freelist);
724 	else
725 		TAILQ_INSERT_TAIL(lst, vp, v_freelist);
726 
727 	splx(s);
728 }
729 
730 /*
731  * vput(), just unlock and vrele()
732  */
733 void
734 vput(struct vnode *vp)
735 {
736 	struct proc *p = curproc;
737 
738 #ifdef DIAGNOSTIC
739 	if (vp == NULL)
740 		panic("vput: null vp");
741 #endif
742 
743 #ifdef DIAGNOSTIC
744 	if (vp->v_usecount == 0) {
745 		vprint("vput: bad ref count", vp);
746 		panic("vput: ref cnt");
747 	}
748 #endif
749 	vp->v_usecount--;
750 	KASSERT(vp->v_usecount > 0 || vp->v_uvcount == 0);
751 	if (vp->v_usecount > 0) {
752 		VOP_UNLOCK(vp);
753 		return;
754 	}
755 
756 #ifdef DIAGNOSTIC
757 	if (vp->v_writecount != 0) {
758 		vprint("vput: bad writecount", vp);
759 		panic("vput: v_writecount != 0");
760 	}
761 #endif
762 
763 	VOP_INACTIVE(vp, p);
764 
765 	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
766 		vputonfreelist(vp);
767 }
768 
769 /*
770  * Vnode release - use for active VNODES.
771  * If count drops to zero, call inactive routine and return to freelist.
772  * Returns 0 if it did not sleep.
773  */
774 int
775 vrele(struct vnode *vp)
776 {
777 	struct proc *p = curproc;
778 
779 #ifdef DIAGNOSTIC
780 	if (vp == NULL)
781 		panic("vrele: null vp");
782 #endif
783 #ifdef DIAGNOSTIC
784 	if (vp->v_usecount == 0) {
785 		vprint("vrele: bad ref count", vp);
786 		panic("vrele: ref cnt");
787 	}
788 #endif
789 	vp->v_usecount--;
790 	if (vp->v_usecount > 0) {
791 		return (0);
792 	}
793 
794 #ifdef DIAGNOSTIC
795 	if (vp->v_writecount != 0) {
796 		vprint("vrele: bad writecount", vp);
797 		panic("vrele: v_writecount != 0");
798 	}
799 #endif
800 
801 	if (vn_lock(vp, LK_EXCLUSIVE)) {
802 #ifdef DIAGNOSTIC
803 		vprint("vrele: cannot lock", vp);
804 #endif
805 		return (1);
806 	}
807 
808 	VOP_INACTIVE(vp, p);
809 
810 	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
811 		vputonfreelist(vp);
812 	return (1);
813 }
814 
815 /* Page or buffer structure gets a reference. */
816 void
817 vhold(struct vnode *vp)
818 {
819 	/*
820 	 * If it is on the freelist and the hold count is currently
821 	 * zero, move it to the hold list.
822 	 */
823 	if ((vp->v_bioflag & VBIOONFREELIST) &&
824 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
825 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
826 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
827 	}
828 	vp->v_holdcnt++;
829 }
830 
831 /* Lose interest in a vnode. */
832 void
833 vdrop(struct vnode *vp)
834 {
835 #ifdef DIAGNOSTIC
836 	if (vp->v_holdcnt == 0)
837 		panic("vdrop: zero holdcnt");
838 #endif
839 
840 	vp->v_holdcnt--;
841 
842 	/*
843 	 * If it is on the holdlist and the hold count drops to
844 	 * zero, move it to the free list.
845 	 */
846 	if ((vp->v_bioflag & VBIOONFREELIST) &&
847 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
848 		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
849 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
850 	}
851 }
852 
853 /*
854  * Remove any vnodes in the vnode table belonging to mount point mp.
855  *
856  * If MNT_NOFORCE is specified, there should not be any active ones,
857  * return error if any are found (nb: this is a user error, not a
858  * system error). If MNT_FORCE is specified, detach any active vnodes
859  * that are found.
860  */
861 #ifdef DEBUG
862 int busyprt = 0;	/* print out busy vnodes */
863 struct ctldebug debug1 = { "busyprt", &busyprt };
864 #endif
865 
866 int
867 vfs_mount_foreach_vnode(struct mount *mp,
868     int (*func)(struct vnode *, void *), void *arg) {
869 	struct vnode *vp, *nvp;
870 	int error = 0;
871 
872 loop:
873 	LIST_FOREACH_SAFE(vp , &mp->mnt_vnodelist, v_mntvnodes, nvp) {
874 		if (vp->v_mount != mp)
875 			goto loop;
876 
877 		error = func(vp, arg);
878 
879 		if (error != 0)
880 			break;
881 	}
882 
883 	return (error);
884 }
885 
886 struct vflush_args {
887 	struct vnode *skipvp;
888 	int busy;
889 	int flags;
890 };
891 
892 int
893 vflush_vnode(struct vnode *vp, void *arg)
894 {
895 	struct vflush_args *va = arg;
896 	struct proc *p = curproc;
897 
898 	if (vp == va->skipvp) {
899 		return (0);
900 	}
901 
902 	if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
903 		return (0);
904 	}
905 
906 	/*
907 	 * If WRITECLOSE is set, only flush out regular file
908 	 * vnodes open for writing.
909 	 */
910 	if ((va->flags & WRITECLOSE) &&
911 	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
912 		return (0);
913 	}
914 
915 	/*
916 	 * With v_usecount == 0, all we need to do is clear
917 	 * out the vnode data structures and we are done.
918 	 */
919 	if (vp->v_usecount == 0) {
920 		vgonel(vp, p);
921 		return (0);
922 	}
923 
924 	/*
925 	 * If FORCECLOSE is set, forcibly close the vnode.
926 	 * For block or character devices, revert to an
927 	 * anonymous device. For all other files, just kill them.
928 	 */
929 	if (va->flags & FORCECLOSE) {
930 		if (vp->v_type != VBLK && vp->v_type != VCHR) {
931 			vgonel(vp, p);
932 		} else {
933 			vclean(vp, 0, p);
934 			vp->v_op = &spec_vops;
935 			insmntque(vp, NULL);
936 		}
937 		return (0);
938 	}
939 
940 	/*
941 	 * If set, this is allowed to ignore vnodes which don't
942 	 * have changes pending to disk.
943 	 * XXX Might be nice to check per-fs "inode" flags, but
944 	 * generally the filesystem is sync'd already, right?
945 	 */
946 	if ((va->flags & IGNORECLEAN) &&
947 	    LIST_EMPTY(&vp->v_dirtyblkhd))
948 		return (0);
949 
950 #ifdef DEBUG
951 	if (busyprt)
952 		vprint("vflush: busy vnode", vp);
953 #endif
954 	va->busy++;
955 	return (0);
956 }
957 
958 int
959 vflush(struct mount *mp, struct vnode *skipvp, int flags)
960 {
961 	struct vflush_args va;
962 	va.skipvp = skipvp;
963 	va.busy = 0;
964 	va.flags = flags;
965 
966 	vfs_mount_foreach_vnode(mp, vflush_vnode, &va);
967 
968 	if (va.busy)
969 		return (EBUSY);
970 	return (0);
971 }
972 
973 /*
974  * Disassociate the underlying file system from a vnode.
975  */
976 void
977 vclean(struct vnode *vp, int flags, struct proc *p)
978 {
979 	int active;
980 
981 	/*
982 	 * Check to see if the vnode is in use.
983 	 * If so we have to reference it before we clean it out
984 	 * so that its count cannot fall to zero and generate a
985 	 * race against ourselves to recycle it.
986 	 */
987 	if ((active = vp->v_usecount) != 0)
988 		vp->v_usecount++;
989 
990 	/*
991 	 * Prevent the vnode from being recycled or
992 	 * brought into use while we clean it out.
993 	 */
994 	if (vp->v_flag & VXLOCK)
995 		panic("vclean: deadlock");
996 	vp->v_flag |= VXLOCK;
997 	/*
998 	 * Even if the count is zero, the VOP_INACTIVE routine may still
999 	 * have the object locked while it cleans it out. The VOP_LOCK
1000 	 * ensures that the VOP_INACTIVE routine is done with its work.
1001 	 * For active vnodes, it ensures that no other activity can
1002 	 * occur while the underlying object is being cleaned out.
1003 	 */
1004 	VOP_LOCK(vp, LK_DRAIN | LK_EXCLUSIVE);
1005 
1006 	/*
1007 	 * Clean out any VM data associated with the vnode.
1008 	 */
1009 	uvm_vnp_terminate(vp);
1010 	/*
1011 	 * Clean out any buffers associated with the vnode.
1012 	 */
1013 	if (flags & DOCLOSE)
1014 		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
1015 	/*
1016 	 * If purging an active vnode, it must be closed and
1017 	 * deactivated before being reclaimed. Note that the
1018 	 * VOP_INACTIVE will unlock the vnode
1019 	 */
1020 	if (active) {
1021 		if (flags & DOCLOSE)
1022 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
1023 		VOP_INACTIVE(vp, p);
1024 	} else {
1025 		/*
1026 		 * Any other processes trying to obtain this lock must first
1027 		 * wait for VXLOCK to clear, then call the new lock operation.
1028 		 */
1029 		VOP_UNLOCK(vp);
1030 	}
1031 
1032 	/*
1033 	 * Reclaim the vnode.
1034 	 */
1035 	if (VOP_RECLAIM(vp, p))
1036 		panic("vclean: cannot reclaim");
1037 	if (active) {
1038 		vp->v_usecount--;
1039 		if (vp->v_usecount == 0) {
1040 			if (vp->v_holdcnt > 0)
1041 				panic("vclean: not clean");
1042 			vputonfreelist(vp);
1043 		}
1044 	}
1045 	cache_purge(vp);
1046 
1047 	/*
1048 	 * Done with purge, notify sleepers of the grim news.
1049 	 */
1050 	vp->v_op = &dead_vops;
1051 	VN_KNOTE(vp, NOTE_REVOKE);
1052 	vp->v_tag = VT_NON;
1053 	vp->v_flag &= ~VXLOCK;
1054 #ifdef VFSLCKDEBUG
1055 	vp->v_flag &= ~VLOCKSWORK;
1056 #endif
1057 	if (vp->v_flag & VXWANT) {
1058 		vp->v_flag &= ~VXWANT;
1059 		wakeup(vp);
1060 	}
1061 }
1062 
1063 /*
1064  * Recycle an unused vnode to the front of the free list.
1065  */
1066 int
1067 vrecycle(struct vnode *vp, struct proc *p)
1068 {
1069 	if (vp->v_usecount == 0) {
1070 		vgonel(vp, p);
1071 		return (1);
1072 	}
1073 	return (0);
1074 }
1075 
1076 /*
1077  * Eliminate all activity associated with a vnode
1078  * in preparation for reuse.
1079  */
1080 void
1081 vgone(struct vnode *vp)
1082 {
1083 	struct proc *p = curproc;
1084 	vgonel(vp, p);
1085 }
1086 
1087 /*
1088  * vgone, with struct proc.
1089  */
1090 void
1091 vgonel(struct vnode *vp, struct proc *p)
1092 {
1093 	struct vnode *vq;
1094 	struct vnode *vx;
1095 
1096 	KASSERT(vp->v_uvcount == 0);
1097 
1098 	/*
1099 	 * If a vgone (or vclean) is already in progress,
1100 	 * wait until it is done and return.
1101 	 */
1102 	if (vp->v_flag & VXLOCK) {
1103 		vp->v_flag |= VXWANT;
1104 		tsleep(vp, PINOD, "vgone", 0);
1105 		return;
1106 	}
1107 
1108 	/*
1109 	 * Clean out the filesystem specific data.
1110 	 */
1111 	vclean(vp, DOCLOSE, p);
1112 	/*
1113 	 * Delete from old mount point vnode list, if on one.
1114 	 */
1115 	if (vp->v_mount != NULL)
1116 		insmntque(vp, NULL);
1117 	/*
1118 	 * If special device, remove it from special device alias list
1119 	 * if it is on one.
1120 	 */
1121 	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1122 		if ((vp->v_flag & VALIASED) == 0 && vp->v_type == VCHR &&
1123 		    (cdevsw[major(vp->v_rdev)].d_flags & D_CLONE) &&
1124 		    (minor(vp->v_rdev) >> CLONE_SHIFT == 0)) {
1125 			free(vp->v_specbitmap, M_VNODE, CLONE_MAPSZ);
1126 		}
1127 		if (*vp->v_hashchain == vp) {
1128 			*vp->v_hashchain = vp->v_specnext;
1129 		} else {
1130 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1131 				if (vq->v_specnext != vp)
1132 					continue;
1133 				vq->v_specnext = vp->v_specnext;
1134 				break;
1135 			}
1136 			if (vq == NULL)
1137 				panic("missing bdev");
1138 		}
1139 		if (vp->v_flag & VALIASED) {
1140 			vx = NULL;
1141 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1142 				if (vq->v_rdev != vp->v_rdev ||
1143 				    vq->v_type != vp->v_type)
1144 					continue;
1145 				if (vx)
1146 					break;
1147 				vx = vq;
1148 			}
1149 			if (vx == NULL)
1150 				panic("missing alias");
1151 			if (vq == NULL)
1152 				vx->v_flag &= ~VALIASED;
1153 			vp->v_flag &= ~VALIASED;
1154 		}
1155 		lf_purgelocks(&vp->v_speclockf);
1156 		free(vp->v_specinfo, M_VNODE, sizeof(struct specinfo));
1157 		vp->v_specinfo = NULL;
1158 	}
1159 	/*
1160 	 * If it is on the freelist and not already at the head,
1161 	 * move it to the head of the list.
1162 	 */
1163 	vp->v_type = VBAD;
1164 
1165 	/*
1166 	 * Move onto the free list, unless we were called from
1167 	 * getnewvnode and we're not on any free list
1168 	 */
1169 	if (vp->v_usecount == 0 &&
1170 	    (vp->v_bioflag & VBIOONFREELIST)) {
1171 		int s;
1172 
1173 		s = splbio();
1174 
1175 		if (vp->v_holdcnt > 0)
1176 			panic("vgonel: not clean");
1177 
1178 		if (TAILQ_FIRST(&vnode_free_list) != vp) {
1179 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1180 			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1181 		}
1182 		splx(s);
1183 	}
1184 }
1185 
1186 /*
1187  * Lookup a vnode by device number.
1188  */
1189 int
1190 vfinddev(dev_t dev, enum vtype type, struct vnode **vpp)
1191 {
1192 	struct vnode *vp;
1193 	int rc =0;
1194 
1195 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1196 		if (dev != vp->v_rdev || type != vp->v_type)
1197 			continue;
1198 		*vpp = vp;
1199 		rc = 1;
1200 		break;
1201 	}
1202 	return (rc);
1203 }
1204 
1205 /*
1206  * Revoke all the vnodes corresponding to the specified minor number
1207  * range (endpoints inclusive) of the specified major.
1208  */
1209 void
1210 vdevgone(int maj, int minl, int minh, enum vtype type)
1211 {
1212 	struct vnode *vp;
1213 	int mn;
1214 
1215 	for (mn = minl; mn <= minh; mn++)
1216 		if (vfinddev(makedev(maj, mn), type, &vp))
1217 			VOP_REVOKE(vp, REVOKEALL);
1218 }
1219 
1220 /*
1221  * Calculate the total number of references to a special device.
1222  */
1223 int
1224 vcount(struct vnode *vp)
1225 {
1226 	struct vnode *vq, *vnext;
1227 	int count;
1228 
1229 loop:
1230 	if ((vp->v_flag & VALIASED) == 0)
1231 		return (vp->v_usecount);
1232 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1233 		vnext = vq->v_specnext;
1234 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1235 			continue;
1236 		/*
1237 		 * Alias, but not in use, so flush it out.
1238 		 */
1239 		if (vq->v_usecount == 0 && vq != vp) {
1240 			vgone(vq);
1241 			goto loop;
1242 		}
1243 		count += vq->v_usecount;
1244 	}
1245 	return (count);
1246 }
1247 
1248 #if defined(DEBUG) || defined(DIAGNOSTIC)
1249 /*
1250  * Print out a description of a vnode.
1251  */
1252 static char *typename[] =
1253    { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1254 
1255 void
1256 vprint(char *label, struct vnode *vp)
1257 {
1258 	char buf[64];
1259 
1260 	if (label != NULL)
1261 		printf("%s: ", label);
1262 	printf("%p, type %s, use %u, write %u, hold %u,",
1263 		vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1264 		vp->v_holdcnt);
1265 	buf[0] = '\0';
1266 	if (vp->v_flag & VROOT)
1267 		strlcat(buf, "|VROOT", sizeof buf);
1268 	if (vp->v_flag & VTEXT)
1269 		strlcat(buf, "|VTEXT", sizeof buf);
1270 	if (vp->v_flag & VSYSTEM)
1271 		strlcat(buf, "|VSYSTEM", sizeof buf);
1272 	if (vp->v_flag & VXLOCK)
1273 		strlcat(buf, "|VXLOCK", sizeof buf);
1274 	if (vp->v_flag & VXWANT)
1275 		strlcat(buf, "|VXWANT", sizeof buf);
1276 	if (vp->v_bioflag & VBIOWAIT)
1277 		strlcat(buf, "|VBIOWAIT", sizeof buf);
1278 	if (vp->v_bioflag & VBIOONFREELIST)
1279 		strlcat(buf, "|VBIOONFREELIST", sizeof buf);
1280 	if (vp->v_bioflag & VBIOONSYNCLIST)
1281 		strlcat(buf, "|VBIOONSYNCLIST", sizeof buf);
1282 	if (vp->v_flag & VALIASED)
1283 		strlcat(buf, "|VALIASED", sizeof buf);
1284 	if (buf[0] != '\0')
1285 		printf(" flags (%s)", &buf[1]);
1286 	if (vp->v_data == NULL) {
1287 		printf("\n");
1288 	} else {
1289 		printf("\n\t");
1290 		VOP_PRINT(vp);
1291 	}
1292 }
1293 #endif /* DEBUG || DIAGNOSTIC */
1294 
1295 #ifdef DEBUG
1296 /*
1297  * List all of the locked vnodes in the system.
1298  * Called when debugging the kernel.
1299  */
1300 void
1301 printlockedvnodes(void)
1302 {
1303 	struct mount *mp;
1304 	struct vnode *vp;
1305 
1306 	printf("Locked vnodes\n");
1307 
1308 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1309 		if (vfs_busy(mp, VB_READ|VB_NOWAIT))
1310 			continue;
1311 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1312 			if (VOP_ISLOCKED(vp))
1313 				vprint(NULL, vp);
1314 		}
1315 		vfs_unbusy(mp);
1316  	}
1317 
1318 }
1319 #endif
1320 
1321 /*
1322  * Top level filesystem related information gathering.
1323  */
1324 int
1325 vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1326     size_t newlen, struct proc *p)
1327 {
1328 	struct vfsconf *vfsp, *tmpvfsp;
1329 	int ret;
1330 
1331 	/* all sysctl names at this level are at least name and field */
1332 	if (namelen < 2)
1333 		return (ENOTDIR);		/* overloaded */
1334 
1335 	if (name[0] != VFS_GENERIC) {
1336 		vfsp = vfs_bytypenum(name[0]);
1337 		if (vfsp == NULL || vfsp->vfc_vfsops->vfs_sysctl == NULL)
1338 			return (EOPNOTSUPP);
1339 
1340 		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1341 		    oldp, oldlenp, newp, newlen, p));
1342 	}
1343 
1344 	switch (name[1]) {
1345 	case VFS_MAXTYPENUM:
1346 		return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
1347 
1348 	case VFS_CONF:
1349 		if (namelen < 3)
1350 			return (ENOTDIR);	/* overloaded */
1351 
1352 		vfsp = vfs_bytypenum(name[2]);
1353 		if (vfsp == NULL)
1354 			return (EOPNOTSUPP);
1355 
1356 		/* Make a copy, clear out kernel pointers */
1357 		tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK|M_ZERO);
1358 		memcpy(tmpvfsp, vfsp, sizeof(*tmpvfsp));
1359 		tmpvfsp->vfc_vfsops = NULL;
1360 
1361 		ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp,
1362 		    sizeof(struct vfsconf));
1363 
1364 		free(tmpvfsp, M_TEMP, sizeof(*tmpvfsp));
1365 		return (ret);
1366 	case VFS_BCACHESTAT:	/* buffer cache statistics */
1367 		ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats,
1368 		    sizeof(struct bcachestats));
1369 		return(ret);
1370 	}
1371 	return (EOPNOTSUPP);
1372 }
1373 
1374 /*
1375  * Check to see if a filesystem is mounted on a block device.
1376  */
1377 int
1378 vfs_mountedon(struct vnode *vp)
1379 {
1380 	struct vnode *vq;
1381 	int error = 0;
1382 
1383  	if (vp->v_specmountpoint != NULL)
1384 		return (EBUSY);
1385 	if (vp->v_flag & VALIASED) {
1386 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1387 			if (vq->v_rdev != vp->v_rdev ||
1388 			    vq->v_type != vp->v_type)
1389 				continue;
1390 			if (vq->v_specmountpoint != NULL) {
1391 				error = EBUSY;
1392 				break;
1393 			}
1394  		}
1395 	}
1396 	return (error);
1397 }
1398 
1399 #ifdef NFSSERVER
1400 /*
1401  * Build hash lists of net addresses and hang them off the mount point.
1402  * Called by vfs_export() to set up the lists of export addresses.
1403  */
1404 int
1405 vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1406     struct export_args *argp)
1407 {
1408 	struct netcred *np;
1409 	struct radix_node_head *rnh;
1410 	int nplen, i;
1411 	struct radix_node *rn;
1412 	struct sockaddr *saddr, *smask = 0;
1413 	int error;
1414 
1415 	if (argp->ex_addrlen == 0) {
1416 		if (mp->mnt_flag & MNT_DEFEXPORTED)
1417 			return (EPERM);
1418 		np = &nep->ne_defexported;
1419 		/* fill in the kernel's ucred from userspace's xucred */
1420 		if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1421 			return (error);
1422 		mp->mnt_flag |= MNT_DEFEXPORTED;
1423 		goto finish;
1424 	}
1425 	if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN ||
1426 	    argp->ex_addrlen < 0 || argp->ex_masklen < 0)
1427 		return (EINVAL);
1428 	nplen = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1429 	np = (struct netcred *)malloc(nplen, M_NETADDR, M_WAITOK|M_ZERO);
1430 	np->netc_len = nplen;
1431 	saddr = (struct sockaddr *)(np + 1);
1432 	error = copyin(argp->ex_addr, saddr, argp->ex_addrlen);
1433 	if (error)
1434 		goto out;
1435 	if (saddr->sa_len > argp->ex_addrlen)
1436 		saddr->sa_len = argp->ex_addrlen;
1437 	if (argp->ex_masklen) {
1438 		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1439 		error = copyin(argp->ex_mask, smask, argp->ex_masklen);
1440 		if (error)
1441 			goto out;
1442 		if (smask->sa_len > argp->ex_masklen)
1443 			smask->sa_len = argp->ex_masklen;
1444 	}
1445 	/* fill in the kernel's ucred from userspace's xucred */
1446 	if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1447 		goto out;
1448 	i = saddr->sa_family;
1449 	switch (i) {
1450 	case AF_INET:
1451 		if ((rnh = nep->ne_rtable_inet) == NULL) {
1452 			if (!rn_inithead((void **)&nep->ne_rtable_inet,
1453 			    offsetof(struct sockaddr_in, sin_addr))) {
1454 				error = ENOBUFS;
1455 				goto out;
1456 			}
1457 			rnh = nep->ne_rtable_inet;
1458 		}
1459 		break;
1460 	default:
1461 		error = EINVAL;
1462 		goto out;
1463 	}
1464 	rn = rn_addroute(saddr, smask, rnh, np->netc_rnodes, 0);
1465 	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1466 		error = EPERM;
1467 		goto out;
1468 	}
1469 finish:
1470 	np->netc_exflags = argp->ex_flags;
1471 	return (0);
1472 out:
1473 	free(np, M_NETADDR, np->netc_len);
1474 	return (error);
1475 }
1476 
1477 int
1478 vfs_free_netcred(struct radix_node *rn, void *w, u_int id)
1479 {
1480 	struct radix_node_head *rnh = (struct radix_node_head *)w;
1481 	struct netcred * np = (struct netcred *)rn;
1482 
1483 	rn_delete(rn->rn_key, rn->rn_mask, rnh, NULL);
1484 	free(np, M_NETADDR, np->netc_len);
1485 	return (0);
1486 }
1487 
1488 /*
1489  * Free the net address hash lists that are hanging off the mount points.
1490  */
1491 void
1492 vfs_free_addrlist(struct netexport *nep)
1493 {
1494 	struct radix_node_head *rnh;
1495 
1496 	if ((rnh = nep->ne_rtable_inet) != NULL) {
1497 		rn_walktree(rnh, vfs_free_netcred, rnh);
1498 		free(rnh, M_RTABLE, sizeof(*rnh));
1499 		nep->ne_rtable_inet = NULL;
1500 	}
1501 }
1502 #endif /* NFSSERVER */
1503 
1504 int
1505 vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp)
1506 {
1507 #ifdef NFSSERVER
1508 	int error;
1509 
1510 	if (argp->ex_flags & MNT_DELEXPORT) {
1511 		vfs_free_addrlist(nep);
1512 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1513 	}
1514 	if (argp->ex_flags & MNT_EXPORTED) {
1515 		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1516 			return (error);
1517 		mp->mnt_flag |= MNT_EXPORTED;
1518 	}
1519 	return (0);
1520 #else
1521 	return (ENOTSUP);
1522 #endif /* NFSSERVER */
1523 }
1524 
1525 struct netcred *
1526 vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam)
1527 {
1528 #ifdef NFSSERVER
1529 	struct netcred *np;
1530 	struct radix_node_head *rnh;
1531 	struct sockaddr *saddr;
1532 
1533 	np = NULL;
1534 	if (mp->mnt_flag & MNT_EXPORTED) {
1535 		/*
1536 		 * Lookup in the export list first.
1537 		 */
1538 		if (nam != NULL) {
1539 			saddr = mtod(nam, struct sockaddr *);
1540 			switch(saddr->sa_family) {
1541 			case AF_INET:
1542 				rnh = nep->ne_rtable_inet;
1543 				break;
1544 			default:
1545 				rnh = NULL;
1546 				break;
1547 			}
1548 			if (rnh != NULL)
1549 				np = (struct netcred *)rn_match(saddr, rnh);
1550 		}
1551 		/*
1552 		 * If no address match, use the default if it exists.
1553 		 */
1554 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1555 			np = &nep->ne_defexported;
1556 	}
1557 	return (np);
1558 #else
1559 	return (NULL);
1560 #endif /* NFSSERVER */
1561 }
1562 
1563 /*
1564  * Do the usual access checking.
1565  * file_mode, uid and gid are from the vnode in question,
1566  * while acc_mode and cred are from the VOP_ACCESS parameter list
1567  */
1568 int
1569 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
1570     mode_t acc_mode, struct ucred *cred)
1571 {
1572 	mode_t mask;
1573 
1574 	/* User id 0 always gets read/write access. */
1575 	if (cred->cr_uid == 0) {
1576 		/* For VEXEC, at least one of the execute bits must be set. */
1577 		if ((acc_mode & VEXEC) && type != VDIR &&
1578 		    (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
1579 			return EACCES;
1580 		return 0;
1581 	}
1582 
1583 	mask = 0;
1584 
1585 	/* Otherwise, check the owner. */
1586 	if (cred->cr_uid == uid) {
1587 		if (acc_mode & VEXEC)
1588 			mask |= S_IXUSR;
1589 		if (acc_mode & VREAD)
1590 			mask |= S_IRUSR;
1591 		if (acc_mode & VWRITE)
1592 			mask |= S_IWUSR;
1593 		return (file_mode & mask) == mask ? 0 : EACCES;
1594 	}
1595 
1596 	/* Otherwise, check the groups. */
1597 	if (groupmember(gid, cred)) {
1598 		if (acc_mode & VEXEC)
1599 			mask |= S_IXGRP;
1600 		if (acc_mode & VREAD)
1601 			mask |= S_IRGRP;
1602 		if (acc_mode & VWRITE)
1603 			mask |= S_IWGRP;
1604 		return (file_mode & mask) == mask ? 0 : EACCES;
1605 	}
1606 
1607 	/* Otherwise, check everyone else. */
1608 	if (acc_mode & VEXEC)
1609 		mask |= S_IXOTH;
1610 	if (acc_mode & VREAD)
1611 		mask |= S_IROTH;
1612 	if (acc_mode & VWRITE)
1613 		mask |= S_IWOTH;
1614 	return (file_mode & mask) == mask ? 0 : EACCES;
1615 }
1616 
1617 int
1618 vnoperm(struct vnode *vp)
1619 {
1620 	if (vp->v_flag & VROOT || vp->v_mount == NULL)
1621 		return 0;
1622 
1623 	return (vp->v_mount->mnt_flag & MNT_NOPERM);
1624 }
1625 
1626 struct rwlock vfs_stall_lock = RWLOCK_INITIALIZER("vfs_stall");
1627 
1628 int
1629 vfs_stall(struct proc *p, int stall)
1630 {
1631 	struct mount *mp;
1632 	int allerror = 0, error;
1633 
1634 	if (stall)
1635 		rw_enter_write(&vfs_stall_lock);
1636 
1637 	/*
1638 	 * The loop variable mp is protected by vfs_busy() so that it cannot
1639 	 * be unmounted while VFS_SYNC() sleeps.  Traverse forward to keep the
1640 	 * lock order consistent with dounmount().
1641 	 */
1642 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1643 		if (stall) {
1644 			error = vfs_busy(mp, VB_WRITE|VB_WAIT|VB_DUPOK);
1645 			if (error) {
1646 				printf("%s: busy\n", mp->mnt_stat.f_mntonname);
1647 				allerror = error;
1648 				continue;
1649 			}
1650 			uvm_vnp_sync(mp);
1651 			error = VFS_SYNC(mp, MNT_WAIT, stall, p->p_ucred, p);
1652 			if (error) {
1653 				printf("%s: failed to sync\n", mp->mnt_stat.f_mntonname);
1654 				vfs_unbusy(mp);
1655 				allerror = error;
1656 				continue;
1657 			}
1658 			mp->mnt_flag |= MNT_STALLED;
1659 		} else {
1660 			if (mp->mnt_flag & MNT_STALLED) {
1661 				vfs_unbusy(mp);
1662 				mp->mnt_flag &= ~MNT_STALLED;
1663 			}
1664 		}
1665 	}
1666 
1667 	if (!stall)
1668 		rw_exit_write(&vfs_stall_lock);
1669 
1670 	return (allerror);
1671 }
1672 
1673 void
1674 vfs_stall_barrier(void)
1675 {
1676 	rw_enter_read(&vfs_stall_lock);
1677 	rw_exit_read(&vfs_stall_lock);
1678 }
1679 
1680 /*
1681  * Unmount all file systems.
1682  * We traverse the list in reverse order under the assumption that doing so
1683  * will avoid needing to worry about dependencies.
1684  */
1685 void
1686 vfs_unmountall(void)
1687 {
1688 	struct mount *mp, *nmp;
1689 	int allerror, error, again = 1;
1690 
1691  retry:
1692 	allerror = 0;
1693 	TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, nmp) {
1694 		if (vfs_busy(mp, VB_WRITE|VB_NOWAIT))
1695 			continue;
1696 		/* XXX Here is a race, the next pointer is not locked. */
1697 		if ((error = dounmount(mp, MNT_FORCE, curproc)) != 0) {
1698 			printf("unmount of %s failed with error %d\n",
1699 			    mp->mnt_stat.f_mntonname, error);
1700 			allerror = 1;
1701 		}
1702 	}
1703 
1704 	if (allerror) {
1705 		printf("WARNING: some file systems would not unmount\n");
1706 		if (again) {
1707 			printf("retrying\n");
1708 			again = 0;
1709 			goto retry;
1710 		}
1711 	}
1712 }
1713 
1714 /*
1715  * Sync and unmount file systems before shutting down.
1716  */
1717 void
1718 vfs_shutdown(struct proc *p)
1719 {
1720 #ifdef ACCOUNTING
1721 	acct_shutdown();
1722 #endif
1723 
1724 	printf("syncing disks...");
1725 
1726 	if (panicstr == 0) {
1727 		/* Sync before unmount, in case we hang on something. */
1728 		sys_sync(p, NULL, NULL);
1729 		vfs_unmountall();
1730 	}
1731 
1732 #if NSOFTRAID > 0
1733 	sr_quiesce();
1734 #endif
1735 
1736 	if (vfs_syncwait(p, 1))
1737 		printf(" giving up\n");
1738 	else
1739 		printf(" done\n");
1740 }
1741 
1742 /*
1743  * perform sync() operation and wait for buffers to flush.
1744  */
1745 int
1746 vfs_syncwait(struct proc *p, int verbose)
1747 {
1748 	struct buf *bp;
1749 	int iter, nbusy, dcount, s;
1750 #ifdef MULTIPROCESSOR
1751 	int hold_count;
1752 #endif
1753 
1754 	sys_sync(p, NULL, NULL);
1755 
1756 	/* Wait for sync to finish. */
1757 	dcount = 10000;
1758 	for (iter = 0; iter < 20; iter++) {
1759 		nbusy = 0;
1760 		LIST_FOREACH(bp, &bufhead, b_list) {
1761 			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1762 				nbusy++;
1763 			/*
1764 			 * With soft updates, some buffers that are
1765 			 * written will be remarked as dirty until other
1766 			 * buffers are written.
1767 			 */
1768 			if (bp->b_flags & B_DELWRI) {
1769 				s = splbio();
1770 				bremfree(bp);
1771 				buf_acquire(bp);
1772 				splx(s);
1773 				nbusy++;
1774 				bawrite(bp);
1775 				if (dcount-- <= 0) {
1776 					if (verbose)
1777 						printf("softdep ");
1778 					return 1;
1779 				}
1780 			}
1781 		}
1782 		if (nbusy == 0)
1783 			break;
1784 		if (verbose)
1785 			printf("%d ", nbusy);
1786 #ifdef MULTIPROCESSOR
1787 		if (_kernel_lock_held())
1788 			hold_count = __mp_release_all(&kernel_lock);
1789 		else
1790 			hold_count = 0;
1791 #endif
1792 		DELAY(40000 * iter);
1793 #ifdef MULTIPROCESSOR
1794 		if (hold_count)
1795 			__mp_acquire_count(&kernel_lock, hold_count);
1796 #endif
1797 	}
1798 
1799 	return nbusy;
1800 }
1801 
1802 /*
1803  * posix file system related system variables.
1804  */
1805 int
1806 fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1807     void *newp, size_t newlen, struct proc *p)
1808 {
1809 	/* all sysctl names at this level are terminal */
1810 	if (namelen != 1)
1811 		return (ENOTDIR);
1812 
1813 	switch (name[0]) {
1814 	case FS_POSIX_SETUID:
1815 		if (newp && securelevel > 0)
1816 			return (EPERM);
1817 		return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear));
1818 	default:
1819 		return (EOPNOTSUPP);
1820 	}
1821 	/* NOTREACHED */
1822 }
1823 
1824 /*
1825  * file system related system variables.
1826  */
1827 int
1828 fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1829     size_t newlen, struct proc *p)
1830 {
1831 	sysctlfn *fn;
1832 
1833 	switch (name[0]) {
1834 	case FS_POSIX:
1835 		fn = fs_posix_sysctl;
1836 		break;
1837 	default:
1838 		return (EOPNOTSUPP);
1839 	}
1840 	return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
1841 }
1842 
1843 
1844 /*
1845  * Routines dealing with vnodes and buffers
1846  */
1847 
1848 /*
1849  * Wait for all outstanding I/Os to complete
1850  *
1851  * Manipulates v_numoutput. Must be called at splbio()
1852  */
1853 int
1854 vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo)
1855 {
1856 	int error = 0;
1857 
1858 	splassert(IPL_BIO);
1859 
1860 	while (vp->v_numoutput) {
1861 		vp->v_bioflag |= VBIOWAIT;
1862 		error = tsleep(&vp->v_numoutput,
1863 		    slpflag | (PRIBIO + 1), wmesg, timeo);
1864 		if (error)
1865 			break;
1866 	}
1867 
1868 	return (error);
1869 }
1870 
1871 /*
1872  * Update outstanding I/O count and do wakeup if requested.
1873  *
1874  * Manipulates v_numoutput. Must be called at splbio()
1875  */
1876 void
1877 vwakeup(struct vnode *vp)
1878 {
1879 	splassert(IPL_BIO);
1880 
1881 	if (vp != NULL) {
1882 		if (vp->v_numoutput-- == 0)
1883 			panic("vwakeup: neg numoutput");
1884 		if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) {
1885 			vp->v_bioflag &= ~VBIOWAIT;
1886 			wakeup(&vp->v_numoutput);
1887 		}
1888 	}
1889 }
1890 
1891 /*
1892  * Flush out and invalidate all buffers associated with a vnode.
1893  * Called with the underlying object locked.
1894  */
1895 int
1896 vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p,
1897     int slpflag, int slptimeo)
1898 {
1899 	struct buf *bp;
1900 	struct buf *nbp, *blist;
1901 	int s, error;
1902 
1903 #ifdef VFSLCKDEBUG
1904 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
1905 		panic("%s: vp isn't locked, vp %p", __func__, vp);
1906 #endif
1907 
1908 	if (flags & V_SAVE) {
1909 		s = splbio();
1910 		vwaitforio(vp, 0, "vinvalbuf", 0);
1911 		if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1912 			splx(s);
1913 			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
1914 				return (error);
1915 			s = splbio();
1916 			if (vp->v_numoutput > 0 ||
1917 			    !LIST_EMPTY(&vp->v_dirtyblkhd))
1918 				panic("%s: dirty bufs, vp %p", __func__, vp);
1919 		}
1920 		splx(s);
1921 	}
1922 loop:
1923 	s = splbio();
1924 	for (;;) {
1925 		if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) &&
1926 		    (flags & V_SAVEMETA))
1927 			while (blist && blist->b_lblkno < 0)
1928 				blist = LIST_NEXT(blist, b_vnbufs);
1929 		if (blist == NULL &&
1930 		    (blist = LIST_FIRST(&vp->v_dirtyblkhd)) &&
1931 		    (flags & V_SAVEMETA))
1932 			while (blist && blist->b_lblkno < 0)
1933 				blist = LIST_NEXT(blist, b_vnbufs);
1934 		if (!blist)
1935 			break;
1936 
1937 		for (bp = blist; bp; bp = nbp) {
1938 			nbp = LIST_NEXT(bp, b_vnbufs);
1939 			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
1940 				continue;
1941 			if (bp->b_flags & B_BUSY) {
1942 				bp->b_flags |= B_WANTED;
1943 				error = tsleep(bp, slpflag | (PRIBIO + 1),
1944 				    "vinvalbuf", slptimeo);
1945 				if (error) {
1946 					splx(s);
1947 					return (error);
1948 				}
1949 				break;
1950 			}
1951 			bremfree(bp);
1952 			/*
1953 			 * XXX Since there are no node locks for NFS, I believe
1954 			 * there is a slight chance that a delayed write will
1955 			 * occur while sleeping just above, so check for it.
1956 			 */
1957 			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
1958 				buf_acquire(bp);
1959 				splx(s);
1960 				(void) VOP_BWRITE(bp);
1961 				goto loop;
1962 			}
1963 			buf_acquire_nomap(bp);
1964 			bp->b_flags |= B_INVAL;
1965 			brelse(bp);
1966 		}
1967 	}
1968 	if (!(flags & V_SAVEMETA) &&
1969 	    (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd)))
1970 		panic("%s: flush failed, vp %p", __func__, vp);
1971 	splx(s);
1972 	return (0);
1973 }
1974 
1975 void
1976 vflushbuf(struct vnode *vp, int sync)
1977 {
1978 	struct buf *bp, *nbp;
1979 	int s;
1980 
1981 loop:
1982 	s = splbio();
1983 	LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp) {
1984 		if ((bp->b_flags & B_BUSY))
1985 			continue;
1986 		if ((bp->b_flags & B_DELWRI) == 0)
1987 			panic("vflushbuf: not dirty");
1988 		bremfree(bp);
1989 		buf_acquire(bp);
1990 		splx(s);
1991 		/*
1992 		 * Wait for I/O associated with indirect blocks to complete,
1993 		 * since there is no way to quickly wait for them below.
1994 		 */
1995 		if (bp->b_vp == vp || sync == 0)
1996 			(void) bawrite(bp);
1997 		else
1998 			(void) bwrite(bp);
1999 		goto loop;
2000 	}
2001 	if (sync == 0) {
2002 		splx(s);
2003 		return;
2004 	}
2005 	vwaitforio(vp, 0, "vflushbuf", 0);
2006 	if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
2007 		splx(s);
2008 #ifdef DIAGNOSTIC
2009 		vprint("vflushbuf: dirty", vp);
2010 #endif
2011 		goto loop;
2012 	}
2013 	splx(s);
2014 }
2015 
2016 /*
2017  * Associate a buffer with a vnode.
2018  *
2019  * Manipulates buffer vnode queues. Must be called at splbio().
2020  */
2021 void
2022 bgetvp(struct vnode *vp, struct buf *bp)
2023 {
2024 	splassert(IPL_BIO);
2025 
2026 
2027 	if (bp->b_vp)
2028 		panic("bgetvp: not free");
2029 	vhold(vp);
2030 	bp->b_vp = vp;
2031 	if (vp->v_type == VBLK || vp->v_type == VCHR)
2032 		bp->b_dev = vp->v_rdev;
2033 	else
2034 		bp->b_dev = NODEV;
2035 	/*
2036 	 * Insert onto list for new vnode.
2037 	 */
2038 	bufinsvn(bp, &vp->v_cleanblkhd);
2039 }
2040 
2041 /*
2042  * Disassociate a buffer from a vnode.
2043  *
2044  * Manipulates vnode buffer queues. Must be called at splbio().
2045  */
2046 void
2047 brelvp(struct buf *bp)
2048 {
2049 	struct vnode *vp;
2050 
2051 	splassert(IPL_BIO);
2052 
2053 	if ((vp = bp->b_vp) == (struct vnode *) 0)
2054 		panic("brelvp: NULL");
2055 	/*
2056 	 * Delete from old vnode list, if on one.
2057 	 */
2058 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2059 		bufremvn(bp);
2060 	if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2061 	    LIST_EMPTY(&vp->v_dirtyblkhd)) {
2062 		vp->v_bioflag &= ~VBIOONSYNCLIST;
2063 		LIST_REMOVE(vp, v_synclist);
2064 	}
2065 	bp->b_vp = NULL;
2066 
2067 	vdrop(vp);
2068 }
2069 
2070 /*
2071  * Replaces the current vnode associated with the buffer, if any,
2072  * with a new vnode.
2073  *
2074  * If an output I/O is pending on the buffer, the old vnode
2075  * I/O count is adjusted.
2076  *
2077  * Ignores vnode buffer queues. Must be called at splbio().
2078  */
2079 void
2080 buf_replacevnode(struct buf *bp, struct vnode *newvp)
2081 {
2082 	struct vnode *oldvp = bp->b_vp;
2083 
2084 	splassert(IPL_BIO);
2085 
2086 	if (oldvp)
2087 		brelvp(bp);
2088 
2089 	if ((bp->b_flags & (B_READ | B_DONE)) == 0) {
2090 		newvp->v_numoutput++;	/* put it on swapdev */
2091 		vwakeup(oldvp);
2092 	}
2093 
2094 	bgetvp(newvp, bp);
2095 	bufremvn(bp);
2096 }
2097 
2098 /*
2099  * Used to assign buffers to the appropriate clean or dirty list on
2100  * the vnode and to add newly dirty vnodes to the appropriate
2101  * filesystem syncer list.
2102  *
2103  * Manipulates vnode buffer queues. Must be called at splbio().
2104  */
2105 void
2106 reassignbuf(struct buf *bp)
2107 {
2108 	struct buflists *listheadp;
2109 	int delay;
2110 	struct vnode *vp = bp->b_vp;
2111 
2112 	splassert(IPL_BIO);
2113 
2114 	/*
2115 	 * Delete from old vnode list, if on one.
2116 	 */
2117 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2118 		bufremvn(bp);
2119 
2120 	/*
2121 	 * If dirty, put on list of dirty buffers;
2122 	 * otherwise insert onto list of clean buffers.
2123 	 */
2124 	if ((bp->b_flags & B_DELWRI) == 0) {
2125 		listheadp = &vp->v_cleanblkhd;
2126 		if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2127 		    LIST_EMPTY(&vp->v_dirtyblkhd)) {
2128 			vp->v_bioflag &= ~VBIOONSYNCLIST;
2129 			LIST_REMOVE(vp, v_synclist);
2130 		}
2131 	} else {
2132 		listheadp = &vp->v_dirtyblkhd;
2133 		if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) {
2134 			switch (vp->v_type) {
2135 			case VDIR:
2136 				delay = syncdelay / 2;
2137 				break;
2138 			case VBLK:
2139 				if (vp->v_specmountpoint != NULL) {
2140 					delay = syncdelay / 3;
2141 					break;
2142 				}
2143 				/* FALLTHROUGH */
2144 			default:
2145 				delay = syncdelay;
2146 			}
2147 			vn_syncer_add_to_worklist(vp, delay);
2148 		}
2149 	}
2150 	bufinsvn(bp, listheadp);
2151 }
2152 
2153 /*
2154  * Check if vnode represents a disk device
2155  */
2156 int
2157 vn_isdisk(struct vnode *vp, int *errp)
2158 {
2159 	if (vp->v_type != VBLK && vp->v_type != VCHR)
2160 		return (0);
2161 
2162 	return (1);
2163 }
2164 
2165 #ifdef DDB
2166 #include <machine/db_machdep.h>
2167 #include <ddb/db_interface.h>
2168 
2169 void
2170 vfs_buf_print(void *b, int full,
2171     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2172 {
2173 	struct buf *bp = b;
2174 
2175 	(*pr)("  vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n"
2176 	      "  proc %p error %d flags %lb\n",
2177 	    bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev,
2178 	    bp->b_proc, bp->b_error, bp->b_flags, B_BITS);
2179 
2180 	(*pr)("  bufsize 0x%lx bcount 0x%lx resid 0x%lx\n"
2181 	      "  data %p saveaddr %p dep %p iodone %p\n",
2182 	    bp->b_bufsize, bp->b_bcount, (long)bp->b_resid,
2183 	    bp->b_data, bp->b_saveaddr,
2184 	    LIST_FIRST(&bp->b_dep), bp->b_iodone);
2185 
2186 	(*pr)("  dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n",
2187 	    bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend);
2188 
2189 #ifdef FFS_SOFTUPDATES
2190 	if (full)
2191 		softdep_print(bp, full, pr);
2192 #endif
2193 }
2194 
2195 const char *vtypes[] = { VTYPE_NAMES };
2196 const char *vtags[] = { VTAG_NAMES };
2197 
2198 void
2199 vfs_vnode_print(void *v, int full,
2200     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2201 {
2202 	struct vnode *vp = v;
2203 
2204 	(*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n",
2205 	      (u_int)vp->v_tag >= nitems(vtags)? "<unk>":vtags[vp->v_tag],
2206 	      vp->v_tag,
2207 	      (u_int)vp->v_type >= nitems(vtypes)? "<unk>":vtypes[vp->v_type],
2208 	      vp->v_type, vp->v_mount, vp->v_mountedhere);
2209 
2210 	(*pr)("data %p usecount %d writecount %d holdcnt %d numoutput %d\n",
2211 	      vp->v_data, vp->v_usecount, vp->v_writecount,
2212 	      vp->v_holdcnt, vp->v_numoutput);
2213 
2214 	/* uvm_object_printit(&vp->v_uobj, full, pr); */
2215 
2216 	if (full) {
2217 		struct buf *bp;
2218 
2219 		(*pr)("clean bufs:\n");
2220 		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
2221 			(*pr)(" bp %p\n", bp);
2222 			vfs_buf_print(bp, full, pr);
2223 		}
2224 
2225 		(*pr)("dirty bufs:\n");
2226 		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
2227 			(*pr)(" bp %p\n", bp);
2228 			vfs_buf_print(bp, full, pr);
2229 		}
2230 	}
2231 }
2232 
2233 void
2234 vfs_mount_print(struct mount *mp, int full,
2235     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2236 {
2237 	struct vfsconf *vfc = mp->mnt_vfc;
2238 	struct vnode *vp;
2239 	int cnt;
2240 
2241 	(*pr)("flags %b\nvnodecovered %p syncer %p data %p\n",
2242 	    mp->mnt_flag, MNT_BITS,
2243 	    mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data);
2244 
2245 	(*pr)("vfsconf: ops %p name \"%s\" num %d ref %u flags 0x%x\n",
2246             vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum,
2247 	    vfc->vfc_refcount, vfc->vfc_flags);
2248 
2249 	(*pr)("statvfs cache: bsize %x iosize %x\nblocks %llu free %llu avail %lld\n",
2250 	    mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks,
2251 	    mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail);
2252 
2253 	(*pr)("  files %llu ffiles %llu favail %lld\n", mp->mnt_stat.f_files,
2254 	    mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail);
2255 
2256 	(*pr)("  f_fsidx {0x%x, 0x%x} owner %u ctime 0x%llx\n",
2257 	    mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1],
2258 	    mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime);
2259 
2260  	(*pr)("  syncwrites %llu asyncwrites = %llu\n",
2261 	    mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites);
2262 
2263  	(*pr)("  syncreads %llu asyncreads = %llu\n",
2264 	    mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads);
2265 
2266 	(*pr)("  fstype \"%s\" mnton \"%s\" mntfrom \"%s\" mntspec \"%s\"\n",
2267 	    mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname,
2268 	    mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntfromspec);
2269 
2270 	(*pr)("locked vnodes:");
2271 	/* XXX would take mountlist lock, except ddb has no context */
2272 	cnt = 0;
2273 	LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2274 		if (VOP_ISLOCKED(vp)) {
2275 			if (cnt == 0)
2276 				(*pr)("\n  %p", vp);
2277 			else if ((cnt % (72 / (sizeof(void *) * 2 + 4))) == 0)
2278 				(*pr)(",\n  %p", vp);
2279 			else
2280 				(*pr)(", %p", vp);
2281 			cnt++;
2282 		}
2283 	}
2284 	(*pr)("\n");
2285 
2286 	if (full) {
2287 		(*pr)("all vnodes:");
2288 		/* XXX would take mountlist lock, except ddb has no context */
2289 		cnt = 0;
2290 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2291 			if (cnt == 0)
2292 				(*pr)("\n  %p", vp);
2293 			else if ((cnt % (72 / (sizeof(void *) * 2 + 4))) == 0)
2294 				(*pr)(",\n  %p", vp);
2295 			else
2296 				(*pr)(", %p", vp);
2297 			cnt++;
2298 		}
2299 		(*pr)("\n");
2300 	}
2301 }
2302 #endif /* DDB */
2303 
2304 void
2305 copy_statfs_info(struct statfs *sbp, const struct mount *mp)
2306 {
2307 	const struct statfs *mbp;
2308 
2309 	strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN);
2310 
2311 	if (sbp == (mbp = &mp->mnt_stat))
2312 		return;
2313 
2314 	sbp->f_fsid = mbp->f_fsid;
2315 	sbp->f_owner = mbp->f_owner;
2316 	sbp->f_flags = mbp->f_flags;
2317 	sbp->f_syncwrites = mbp->f_syncwrites;
2318 	sbp->f_asyncwrites = mbp->f_asyncwrites;
2319 	sbp->f_syncreads = mbp->f_syncreads;
2320 	sbp->f_asyncreads = mbp->f_asyncreads;
2321 	sbp->f_namemax = mbp->f_namemax;
2322 	memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
2323 	memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
2324 	memcpy(sbp->f_mntfromspec, mp->mnt_stat.f_mntfromspec, MNAMELEN);
2325 	memcpy(&sbp->mount_info, &mp->mnt_stat.mount_info,
2326 	    sizeof(union mount_info));
2327 }
2328