xref: /openbsd-src/sys/kern/vfs_subr.c (revision 4c1e55dc91edd6e69ccc60ce855900fbc12cf34f)
1 /*	$OpenBSD: vfs_subr.c,v 1.195 2011/07/04 20:35:35 deraadt Exp $	*/
2 /*	$NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
38  */
39 
40 /*
41  * External virtual filesystem routines
42  */
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/mount.h>
48 #include <sys/time.h>
49 #include <sys/fcntl.h>
50 #include <sys/kernel.h>
51 #include <sys/vnode.h>
52 #include <sys/stat.h>
53 #include <sys/namei.h>
54 #include <sys/ucred.h>
55 #include <sys/buf.h>
56 #include <sys/errno.h>
57 #include <sys/malloc.h>
58 #include <sys/domain.h>
59 #include <sys/mbuf.h>
60 #include <sys/syscallargs.h>
61 #include <sys/pool.h>
62 #include <sys/tree.h>
63 #include <sys/specdev.h>
64 
65 #include <uvm/uvm_extern.h>
66 #include <sys/sysctl.h>
67 
68 enum vtype iftovt_tab[16] = {
69 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
70 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
71 };
72 
73 int	vttoif_tab[9] = {
74 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
75 	S_IFSOCK, S_IFIFO, S_IFMT,
76 };
77 
78 int doforce = 1;		/* 1 => permit forcible unmounting */
79 int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
80 int suid_clear = 1;		/* 1 => clear SUID / SGID on owner change */
81 
82 /*
83  * Insq/Remq for the vnode usage lists.
84  */
85 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
86 #define	bufremvn(bp) {							\
87 	LIST_REMOVE(bp, b_vnbufs);					\
88 	LIST_NEXT(bp, b_vnbufs) = NOLIST;				\
89 }
90 
91 struct freelst vnode_hold_list;	/* list of vnodes referencing buffers */
92 struct freelst vnode_free_list;	/* vnode free list */
93 
94 struct mntlist mountlist;	/* mounted filesystem list */
95 
96 void	vclean(struct vnode *, int, struct proc *);
97 void	vhold(struct vnode *);
98 void	vdrop(struct vnode *);
99 
100 void insmntque(struct vnode *, struct mount *);
101 int getdevvp(dev_t, struct vnode **, enum vtype);
102 
103 int vfs_hang_addrlist(struct mount *, struct netexport *,
104 				  struct export_args *);
105 int vfs_free_netcred(struct radix_node *, void *, u_int);
106 void vfs_free_addrlist(struct netexport *);
107 void vputonfreelist(struct vnode *);
108 
109 int vflush_vnode(struct vnode *, void *);
110 int maxvnodes;
111 
112 #ifdef DEBUG
113 void printlockedvnodes(void);
114 #endif
115 
116 struct pool vnode_pool;
117 
118 static int rb_buf_compare(struct buf *b1, struct buf *b2);
119 RB_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare);
120 
121 static int
122 rb_buf_compare(struct buf *b1, struct buf *b2)
123 {
124 	if (b1->b_lblkno < b2->b_lblkno)
125 		return(-1);
126 	if (b1->b_lblkno > b2->b_lblkno)
127 		return(1);
128 	return(0);
129 }
130 
131 /*
132  * Initialize the vnode management data structures.
133  */
134 void
135 vntblinit(void)
136 {
137 	/* buffer cache may need a vnode for each buffer */
138 	maxvnodes = 2 * desiredvnodes;
139 	pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodes",
140 	    &pool_allocator_nointr);
141 	TAILQ_INIT(&vnode_hold_list);
142 	TAILQ_INIT(&vnode_free_list);
143 	CIRCLEQ_INIT(&mountlist);
144 	/*
145 	 * Initialize the filesystem syncer.
146 	 */
147 	vn_initialize_syncerd();
148 }
149 
150 /*
151  * Mark a mount point as busy. Used to synchronize access and to delay
152  * unmounting.
153  *
154  * Default behaviour is to attempt getting a READ lock and in case of an
155  * ongoing unmount, to wait for it to finish and then return failure.
156  */
157 int
158 vfs_busy(struct mount *mp, int flags)
159 {
160 	int rwflags = 0;
161 
162 	/* new mountpoints need their lock initialised */
163 	if (mp->mnt_lock.rwl_name == NULL)
164 		rw_init(&mp->mnt_lock, "vfslock");
165 
166 	if (flags & VB_WRITE)
167 		rwflags |= RW_WRITE;
168 	else
169 		rwflags |= RW_READ;
170 
171 	if (flags & VB_WAIT)
172 		rwflags |= RW_SLEEPFAIL;
173 	else
174 		rwflags |= RW_NOSLEEP;
175 
176 	if (rw_enter(&mp->mnt_lock, rwflags))
177 		return (EBUSY);
178 
179 	return (0);
180 }
181 
182 /*
183  * Free a busy file system
184  */
185 void
186 vfs_unbusy(struct mount *mp)
187 {
188 	rw_exit(&mp->mnt_lock);
189 }
190 
191 int
192 vfs_isbusy(struct mount *mp)
193 {
194 	if (RWLOCK_OWNER(&mp->mnt_lock) > 0)
195 		return (1);
196 	else
197 		return (0);
198 }
199 
200 /*
201  * Lookup a filesystem type, and if found allocate and initialize
202  * a mount structure for it.
203  *
204  * Devname is usually updated by mount(8) after booting.
205  */
206 int
207 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
208 {
209 	struct vfsconf *vfsp;
210 	struct mount *mp;
211 
212 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
213 		if (!strcmp(vfsp->vfc_name, fstypename))
214 			break;
215 	if (vfsp == NULL)
216 		return (ENODEV);
217 	mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK|M_ZERO);
218 	(void)vfs_busy(mp, VB_READ|VB_NOWAIT);
219 	LIST_INIT(&mp->mnt_vnodelist);
220 	mp->mnt_vfc = vfsp;
221 	mp->mnt_op = vfsp->vfc_vfsops;
222 	mp->mnt_flag = MNT_RDONLY;
223 	mp->mnt_vnodecovered = NULLVP;
224 	vfsp->vfc_refcount++;
225 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
226 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
227 	mp->mnt_stat.f_mntonname[0] = '/';
228 	(void)copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
229 	*mpp = mp;
230  	return (0);
231  }
232 
233 /*
234  * Lookup a mount point by filesystem identifier.
235  */
236 struct mount *
237 vfs_getvfs(fsid_t *fsid)
238 {
239 	struct mount *mp;
240 
241 	CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) {
242 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
243 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
244 			return (mp);
245 		}
246 	}
247 
248 	return (NULL);
249 }
250 
251 
252 /*
253  * Get a new unique fsid
254  */
255 void
256 vfs_getnewfsid(struct mount *mp)
257 {
258 	static u_short xxxfs_mntid;
259 
260 	fsid_t tfsid;
261 	int mtype;
262 
263 	mtype = mp->mnt_vfc->vfc_typenum;
264 	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
265 	mp->mnt_stat.f_fsid.val[1] = mtype;
266 	if (xxxfs_mntid == 0)
267 		++xxxfs_mntid;
268 	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
269 	tfsid.val[1] = mtype;
270 	if (!CIRCLEQ_EMPTY(&mountlist)) {
271 		while (vfs_getvfs(&tfsid)) {
272 			tfsid.val[0]++;
273 			xxxfs_mntid++;
274 		}
275 	}
276 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
277 }
278 
279 /*
280  * Set vnode attributes to VNOVAL
281  */
282 void
283 vattr_null(struct vattr *vap)
284 {
285 
286 	vap->va_type = VNON;
287 	/* XXX These next two used to be one line, but for a GCC bug. */
288 	vap->va_size = VNOVAL;
289 	vap->va_bytes = VNOVAL;
290 	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
291 		vap->va_fsid = vap->va_fileid =
292 		vap->va_blocksize = vap->va_rdev =
293 		vap->va_atime.tv_sec = vap->va_atime.tv_nsec =
294 		vap->va_mtime.tv_sec = vap->va_mtime.tv_nsec =
295 		vap->va_ctime.tv_sec = vap->va_ctime.tv_nsec =
296 		vap->va_flags = vap->va_gen = VNOVAL;
297 	vap->va_vaflags = 0;
298 }
299 
300 /*
301  * Routines having to do with the management of the vnode table.
302  */
303 long numvnodes;
304 
305 /*
306  * Return the next vnode from the free list.
307  */
308 int
309 getnewvnode(enum vtagtype tag, struct mount *mp, struct vops *vops,
310     struct vnode **vpp)
311 {
312 	struct proc *p = curproc;
313 	struct freelst *listhd;
314 	static int toggle;
315 	struct vnode *vp;
316 	int s;
317 
318 	/*
319 	 * allow maxvnodes to increase if the buffer cache itself
320 	 * is big enough to justify it. (we don't shrink it ever)
321 	 */
322 	maxvnodes = maxvnodes < bcstats.numbufs ? bcstats.numbufs
323 	    : maxvnodes;
324 
325 	/*
326 	 * We must choose whether to allocate a new vnode or recycle an
327 	 * existing one. The criterion for allocating a new one is that
328 	 * the total number of vnodes is less than the number desired or
329 	 * there are no vnodes on either free list. Generally we only
330 	 * want to recycle vnodes that have no buffers associated with
331 	 * them, so we look first on the vnode_free_list. If it is empty,
332 	 * we next consider vnodes with referencing buffers on the
333 	 * vnode_hold_list. The toggle ensures that half the time we
334 	 * will use a buffer from the vnode_hold_list, and half the time
335 	 * we will allocate a new one unless the list has grown to twice
336 	 * the desired size. We are reticent to recycle vnodes from the
337 	 * vnode_hold_list because we will lose the identity of all its
338 	 * referencing buffers.
339 	 */
340 	toggle ^= 1;
341 	if (numvnodes / 2 > maxvnodes)
342 		toggle = 0;
343 
344 	s = splbio();
345 	if ((numvnodes < maxvnodes) ||
346 	    ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
347 	    ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
348 		splx(s);
349 		vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO);
350 		RB_INIT(&vp->v_bufs_tree);
351 		RB_INIT(&vp->v_nc_tree);
352 		TAILQ_INIT(&vp->v_cache_dst);
353 		numvnodes++;
354 	} else {
355 		for (vp = TAILQ_FIRST(listhd); vp != NULLVP;
356 		    vp = TAILQ_NEXT(vp, v_freelist)) {
357 			if (VOP_ISLOCKED(vp) == 0)
358 				break;
359 		}
360 		/*
361 		 * Unless this is a bad time of the month, at most
362 		 * the first NCPUS items on the free list are
363 		 * locked, so this is close enough to being empty.
364 		 */
365 		if (vp == NULL) {
366 			splx(s);
367 			tablefull("vnode");
368 			*vpp = 0;
369 			return (ENFILE);
370 		}
371 
372 #ifdef DIAGNOSTIC
373 		if (vp->v_usecount) {
374 			vprint("free vnode", vp);
375 			panic("free vnode isn't");
376 		}
377 #endif
378 
379 		TAILQ_REMOVE(listhd, vp, v_freelist);
380 		vp->v_bioflag &= ~VBIOONFREELIST;
381 		splx(s);
382 
383 		if (vp->v_type != VBAD)
384 			vgonel(vp, p);
385 #ifdef DIAGNOSTIC
386 		if (vp->v_data) {
387 			vprint("cleaned vnode", vp);
388 			panic("cleaned vnode isn't");
389 		}
390 		s = splbio();
391 		if (vp->v_numoutput)
392 			panic("Clean vnode has pending I/O's");
393 		splx(s);
394 #endif
395 		vp->v_flag = 0;
396 		vp->v_socket = 0;
397 	}
398 	vp->v_type = VNON;
399 	cache_purge(vp);
400 	vp->v_tag = tag;
401 	vp->v_op = vops;
402 	insmntque(vp, mp);
403 	*vpp = vp;
404 	vp->v_usecount = 1;
405 	vp->v_data = 0;
406 	simple_lock_init(&vp->v_uvm.u_obj.vmobjlock);
407 	return (0);
408 }
409 
410 /*
411  * Move a vnode from one mount queue to another.
412  */
413 void
414 insmntque(struct vnode *vp, struct mount *mp)
415 {
416 	/*
417 	 * Delete from old mount point vnode list, if on one.
418 	 */
419 	if (vp->v_mount != NULL)
420 		LIST_REMOVE(vp, v_mntvnodes);
421 	/*
422 	 * Insert into list of vnodes for the new mount point, if available.
423 	 */
424 	if ((vp->v_mount = mp) != NULL)
425 		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
426 }
427 
428 /*
429  * Create a vnode for a block device.
430  * Used for root filesystem, argdev, and swap areas.
431  * Also used for memory file system special devices.
432  */
433 int
434 bdevvp(dev_t dev, struct vnode **vpp)
435 {
436 	return (getdevvp(dev, vpp, VBLK));
437 }
438 
439 /*
440  * Create a vnode for a character device.
441  * Used for console handling.
442  */
443 int
444 cdevvp(dev_t dev, struct vnode **vpp)
445 {
446 	return (getdevvp(dev, vpp, VCHR));
447 }
448 
449 /*
450  * Create a vnode for a device.
451  * Used by bdevvp (block device) for root file system etc.,
452  * and by cdevvp (character device) for console.
453  */
454 int
455 getdevvp(dev_t dev, struct vnode **vpp, enum vtype type)
456 {
457 	struct vnode *vp;
458 	struct vnode *nvp;
459 	int error;
460 
461 	if (dev == NODEV) {
462 		*vpp = NULLVP;
463 		return (0);
464 	}
465 	error = getnewvnode(VT_NON, NULL, &spec_vops, &nvp);
466 	if (error) {
467 		*vpp = NULLVP;
468 		return (error);
469 	}
470 	vp = nvp;
471 	vp->v_type = type;
472 	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
473 		vput(vp);
474 		vp = nvp;
475 	}
476 	*vpp = vp;
477 	return (0);
478 }
479 
480 /*
481  * Check to see if the new vnode represents a special device
482  * for which we already have a vnode (either because of
483  * bdevvp() or because of a different vnode representing
484  * the same block device). If such an alias exists, deallocate
485  * the existing contents and return the aliased vnode. The
486  * caller is responsible for filling it with its new contents.
487  */
488 struct vnode *
489 checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp)
490 {
491 	struct proc *p = curproc;
492 	struct vnode *vp;
493 	struct vnode **vpp;
494 
495 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
496 		return (NULLVP);
497 
498 	vpp = &speclisth[SPECHASH(nvp_rdev)];
499 loop:
500 	for (vp = *vpp; vp; vp = vp->v_specnext) {
501 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) {
502 			continue;
503 		}
504 		/*
505 		 * Alias, but not in use, so flush it out.
506 		 */
507 		if (vp->v_usecount == 0) {
508 			vgonel(vp, p);
509 			goto loop;
510 		}
511 		if (vget(vp, LK_EXCLUSIVE, p)) {
512 			goto loop;
513 		}
514 		break;
515 	}
516 
517 	/*
518 	 * Common case is actually in the if statement
519 	 */
520 	if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
521 		nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE,
522 			M_WAITOK);
523 		nvp->v_rdev = nvp_rdev;
524 		nvp->v_hashchain = vpp;
525 		nvp->v_specnext = *vpp;
526 		nvp->v_specmountpoint = NULL;
527 		nvp->v_speclockf = NULL;
528 		bzero(nvp->v_specbitmap, sizeof(nvp->v_specbitmap));
529 		*vpp = nvp;
530 		if (vp != NULLVP) {
531 			nvp->v_flag |= VALIASED;
532 			vp->v_flag |= VALIASED;
533 			vput(vp);
534 		}
535 		return (NULLVP);
536 	}
537 
538 	/*
539 	 * This code is the uncommon case. It is called in case
540 	 * we found an alias that was VT_NON && vtype of VBLK
541 	 * This means we found a block device that was created
542 	 * using bdevvp.
543 	 * An example of such a vnode is the root partition device vnode
544 	 * created in ffs_mountroot.
545 	 *
546 	 * The vnodes created by bdevvp should not be aliased (why?).
547 	 */
548 
549 	VOP_UNLOCK(vp, 0, p);
550 	vclean(vp, 0, p);
551 	vp->v_op = nvp->v_op;
552 	vp->v_tag = nvp->v_tag;
553 	nvp->v_type = VNON;
554 	insmntque(vp, mp);
555 	return (vp);
556 }
557 
558 /*
559  * Grab a particular vnode from the free list, increment its
560  * reference count and lock it. If the vnode lock bit is set,
561  * the vnode is being eliminated in vgone. In that case, we
562  * cannot grab it, so the process is awakened when the
563  * transition is completed, and an error code is returned to
564  * indicate that the vnode is no longer usable, possibly
565  * having been changed to a new file system type.
566  */
567 int
568 vget(struct vnode *vp, int flags, struct proc *p)
569 {
570 	int error, s, onfreelist;
571 
572 	/*
573 	 * If the vnode is in the process of being cleaned out for
574 	 * another use, we wait for the cleaning to finish and then
575 	 * return failure. Cleaning is determined by checking that
576 	 * the VXLOCK flag is set.
577 	 */
578 
579 	if (vp->v_flag & VXLOCK) {
580 		if (flags & LK_NOWAIT) {
581 			return (EBUSY);
582 		}
583 
584 		vp->v_flag |= VXWANT;
585 		tsleep(vp, PINOD, "vget", 0);
586 		return (ENOENT);
587 	}
588 
589 	onfreelist = vp->v_bioflag & VBIOONFREELIST;
590 	if (vp->v_usecount == 0 && onfreelist) {
591 		s = splbio();
592 		if (vp->v_holdcnt > 0)
593 			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
594 		else
595 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
596 		vp->v_bioflag &= ~VBIOONFREELIST;
597 		splx(s);
598 	}
599 
600  	vp->v_usecount++;
601 	if (flags & LK_TYPE_MASK) {
602 		if ((error = vn_lock(vp, flags, p)) != 0) {
603 			vp->v_usecount--;
604 			if (vp->v_usecount == 0 && onfreelist)
605 				vputonfreelist(vp);
606 		}
607 		return (error);
608 	}
609 
610 	return (0);
611 }
612 
613 
614 /* Vnode reference. */
615 void
616 vref(struct vnode *vp)
617 {
618 #ifdef DIAGNOSTIC
619 	if (vp->v_usecount == 0)
620 		panic("vref used where vget required");
621 	if (vp->v_type == VNON)
622 		panic("vref on a VNON vnode");
623 #endif
624 	vp->v_usecount++;
625 }
626 
627 void
628 vputonfreelist(struct vnode *vp)
629 {
630 	int s;
631 	struct freelst *lst;
632 
633 	s = splbio();
634 #ifdef DIAGNOSTIC
635 	if (vp->v_usecount != 0)
636 		panic("Use count is not zero!");
637 
638 	if (vp->v_bioflag & VBIOONFREELIST) {
639 		vprint("vnode already on free list: ", vp);
640 		panic("vnode already on free list");
641 	}
642 #endif
643 
644 	vp->v_bioflag |= VBIOONFREELIST;
645 
646 	if (vp->v_holdcnt > 0)
647 		lst = &vnode_hold_list;
648 	else
649 		lst = &vnode_free_list;
650 
651 	if (vp->v_type == VBAD)
652 		TAILQ_INSERT_HEAD(lst, vp, v_freelist);
653 	else
654 		TAILQ_INSERT_TAIL(lst, vp, v_freelist);
655 
656 	splx(s);
657 }
658 
659 /*
660  * vput(), just unlock and vrele()
661  */
662 void
663 vput(struct vnode *vp)
664 {
665 	struct proc *p = curproc;
666 
667 #ifdef DIAGNOSTIC
668 	if (vp == NULL)
669 		panic("vput: null vp");
670 #endif
671 
672 #ifdef DIAGNOSTIC
673 	if (vp->v_usecount == 0) {
674 		vprint("vput: bad ref count", vp);
675 		panic("vput: ref cnt");
676 	}
677 #endif
678 	vp->v_usecount--;
679 	if (vp->v_usecount > 0) {
680 		VOP_UNLOCK(vp, 0, p);
681 		return;
682 	}
683 
684 #ifdef DIAGNOSTIC
685 	if (vp->v_writecount != 0) {
686 		vprint("vput: bad writecount", vp);
687 		panic("vput: v_writecount != 0");
688 	}
689 #endif
690 
691 	VOP_INACTIVE(vp, p);
692 
693 	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
694 		vputonfreelist(vp);
695 }
696 
697 /*
698  * Vnode release - use for active VNODES.
699  * If count drops to zero, call inactive routine and return to freelist.
700  * Returns 0 if it did not sleep.
701  */
702 int
703 vrele(struct vnode *vp)
704 {
705 	struct proc *p = curproc;
706 
707 #ifdef DIAGNOSTIC
708 	if (vp == NULL)
709 		panic("vrele: null vp");
710 #endif
711 #ifdef DIAGNOSTIC
712 	if (vp->v_usecount == 0) {
713 		vprint("vrele: bad ref count", vp);
714 		panic("vrele: ref cnt");
715 	}
716 #endif
717 	vp->v_usecount--;
718 	if (vp->v_usecount > 0) {
719 		return (0);
720 	}
721 
722 #ifdef DIAGNOSTIC
723 	if (vp->v_writecount != 0) {
724 		vprint("vrele: bad writecount", vp);
725 		panic("vrele: v_writecount != 0");
726 	}
727 #endif
728 
729 	if (vn_lock(vp, LK_EXCLUSIVE, p)) {
730 #ifdef DIAGNOSTIC
731 		vprint("vrele: cannot lock", vp);
732 #endif
733 		return (1);
734 	}
735 
736 	VOP_INACTIVE(vp, p);
737 
738 	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
739 		vputonfreelist(vp);
740 	return (1);
741 }
742 
743 /* Page or buffer structure gets a reference. */
744 void
745 vhold(struct vnode *vp)
746 {
747 	/*
748 	 * If it is on the freelist and the hold count is currently
749 	 * zero, move it to the hold list.
750 	 */
751 	if ((vp->v_bioflag & VBIOONFREELIST) &&
752 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
753 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
754 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
755 	}
756 	vp->v_holdcnt++;
757 }
758 
759 /* Lose interest in a vnode. */
760 void
761 vdrop(struct vnode *vp)
762 {
763 #ifdef DIAGNOSTIC
764 	if (vp->v_holdcnt == 0)
765 		panic("vdrop: zero holdcnt");
766 #endif
767 
768 	vp->v_holdcnt--;
769 
770 	/*
771 	 * If it is on the holdlist and the hold count drops to
772 	 * zero, move it to the free list.
773 	 */
774 	if ((vp->v_bioflag & VBIOONFREELIST) &&
775 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
776 		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
777 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
778 	}
779 }
780 
781 /*
782  * Remove any vnodes in the vnode table belonging to mount point mp.
783  *
784  * If MNT_NOFORCE is specified, there should not be any active ones,
785  * return error if any are found (nb: this is a user error, not a
786  * system error). If MNT_FORCE is specified, detach any active vnodes
787  * that are found.
788  */
789 #ifdef DEBUG
790 int busyprt = 0;	/* print out busy vnodes */
791 struct ctldebug debug1 = { "busyprt", &busyprt };
792 #endif
793 
794 int
795 vfs_mount_foreach_vnode(struct mount *mp,
796     int (*func)(struct vnode *, void *), void *arg) {
797 	struct vnode *vp, *nvp;
798 	int error = 0;
799 
800 loop:
801 	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
802 		if (vp->v_mount != mp)
803 			goto loop;
804 		nvp = LIST_NEXT(vp, v_mntvnodes);
805 
806 		error = func(vp, arg);
807 
808 		if (error != 0)
809 			break;
810 	}
811 
812 	return (error);
813 }
814 
815 struct vflush_args {
816 	struct vnode *skipvp;
817 	int busy;
818 	int flags;
819 };
820 
821 int
822 vflush_vnode(struct vnode *vp, void *arg) {
823 	struct vflush_args *va = arg;
824 	struct proc *p = curproc;
825 
826 	if (vp == va->skipvp) {
827 		return (0);
828 	}
829 
830 	if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
831 		return (0);
832 	}
833 
834 	/*
835 	 * If WRITECLOSE is set, only flush out regular file
836 	 * vnodes open for writing.
837 	 */
838 	if ((va->flags & WRITECLOSE) &&
839 	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
840 		return (0);
841 	}
842 
843 	/*
844 	 * With v_usecount == 0, all we need to do is clear
845 	 * out the vnode data structures and we are done.
846 	 */
847 	if (vp->v_usecount == 0) {
848 		vgonel(vp, p);
849 		return (0);
850 	}
851 
852 	/*
853 	 * If FORCECLOSE is set, forcibly close the vnode.
854 	 * For block or character devices, revert to an
855 	 * anonymous device. For all other files, just kill them.
856 	 */
857 	if (va->flags & FORCECLOSE) {
858 		if (vp->v_type != VBLK && vp->v_type != VCHR) {
859 			vgonel(vp, p);
860 		} else {
861 			vclean(vp, 0, p);
862 			vp->v_op = &spec_vops;
863 			insmntque(vp, (struct mount *)0);
864 		}
865 		return (0);
866 	}
867 
868 #ifdef DEBUG
869 	if (busyprt)
870 		vprint("vflush: busy vnode", vp);
871 #endif
872 	va->busy++;
873 	return (0);
874 }
875 
876 int
877 vflush(struct mount *mp, struct vnode *skipvp, int flags)
878 {
879 	struct vflush_args va;
880 	va.skipvp = skipvp;
881 	va.busy = 0;
882 	va.flags = flags;
883 
884 	vfs_mount_foreach_vnode(mp, vflush_vnode, &va);
885 
886 	if (va.busy)
887 		return (EBUSY);
888 	return (0);
889 }
890 
891 /*
892  * Disassociate the underlying file system from a vnode.
893  */
894 void
895 vclean(struct vnode *vp, int flags, struct proc *p)
896 {
897 	int active;
898 
899 	/*
900 	 * Check to see if the vnode is in use.
901 	 * If so we have to reference it before we clean it out
902 	 * so that its count cannot fall to zero and generate a
903 	 * race against ourselves to recycle it.
904 	 */
905 	if ((active = vp->v_usecount) != 0)
906 		vp->v_usecount++;
907 
908 	/*
909 	 * Prevent the vnode from being recycled or
910 	 * brought into use while we clean it out.
911 	 */
912 	if (vp->v_flag & VXLOCK)
913 		panic("vclean: deadlock");
914 	vp->v_flag |= VXLOCK;
915 	/*
916 	 * Even if the count is zero, the VOP_INACTIVE routine may still
917 	 * have the object locked while it cleans it out. The VOP_LOCK
918 	 * ensures that the VOP_INACTIVE routine is done with its work.
919 	 * For active vnodes, it ensures that no other activity can
920 	 * occur while the underlying object is being cleaned out.
921 	 */
922 	VOP_LOCK(vp, LK_DRAIN, p);
923 
924 	/*
925 	 * Clean out any VM data associated with the vnode.
926 	 */
927 	uvm_vnp_terminate(vp);
928 	/*
929 	 * Clean out any buffers associated with the vnode.
930 	 */
931 	if (flags & DOCLOSE)
932 		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
933 	/*
934 	 * If purging an active vnode, it must be closed and
935 	 * deactivated before being reclaimed. Note that the
936 	 * VOP_INACTIVE will unlock the vnode
937 	 */
938 	if (active) {
939 		if (flags & DOCLOSE)
940 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
941 		VOP_INACTIVE(vp, p);
942 	} else {
943 		/*
944 		 * Any other processes trying to obtain this lock must first
945 		 * wait for VXLOCK to clear, then call the new lock operation.
946 		 */
947 		VOP_UNLOCK(vp, 0, p);
948 	}
949 
950 	/*
951 	 * Reclaim the vnode.
952 	 */
953 	if (VOP_RECLAIM(vp, p))
954 		panic("vclean: cannot reclaim");
955 	if (active) {
956 		vp->v_usecount--;
957 		if (vp->v_usecount == 0) {
958 			if (vp->v_holdcnt > 0)
959 				panic("vclean: not clean");
960 			vputonfreelist(vp);
961 		}
962 	}
963 	cache_purge(vp);
964 
965 	/*
966 	 * Done with purge, notify sleepers of the grim news.
967 	 */
968 	vp->v_op = &dead_vops;
969 	VN_KNOTE(vp, NOTE_REVOKE);
970 	vp->v_tag = VT_NON;
971 	vp->v_flag &= ~VXLOCK;
972 #ifdef VFSLCKDEBUG
973 	vp->v_flag &= ~VLOCKSWORK;
974 #endif
975 	if (vp->v_flag & VXWANT) {
976 		vp->v_flag &= ~VXWANT;
977 		wakeup(vp);
978 	}
979 }
980 
981 /*
982  * Recycle an unused vnode to the front of the free list.
983  */
984 int
985 vrecycle(struct vnode *vp, struct proc *p)
986 {
987 	if (vp->v_usecount == 0) {
988 		vgonel(vp, p);
989 		return (1);
990 	}
991 	return (0);
992 }
993 
994 /*
995  * Eliminate all activity associated with a vnode
996  * in preparation for reuse.
997  */
998 void
999 vgone(struct vnode *vp)
1000 {
1001 	struct proc *p = curproc;
1002 	vgonel(vp, p);
1003 }
1004 
1005 /*
1006  * vgone, with struct proc.
1007  */
1008 void
1009 vgonel(struct vnode *vp, struct proc *p)
1010 {
1011 	struct vnode *vq;
1012 	struct vnode *vx;
1013 
1014 	/*
1015 	 * If a vgone (or vclean) is already in progress,
1016 	 * wait until it is done and return.
1017 	 */
1018 	if (vp->v_flag & VXLOCK) {
1019 		vp->v_flag |= VXWANT;
1020 		tsleep(vp, PINOD, "vgone", 0);
1021 		return;
1022 	}
1023 
1024 	/*
1025 	 * Clean out the filesystem specific data.
1026 	 */
1027 	vclean(vp, DOCLOSE, p);
1028 	/*
1029 	 * Delete from old mount point vnode list, if on one.
1030 	 */
1031 	if (vp->v_mount != NULL)
1032 		insmntque(vp, (struct mount *)0);
1033 	/*
1034 	 * If special device, remove it from special device alias list
1035 	 * if it is on one.
1036 	 */
1037 	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1038 		if (*vp->v_hashchain == vp) {
1039 			*vp->v_hashchain = vp->v_specnext;
1040 		} else {
1041 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1042 				if (vq->v_specnext != vp)
1043 					continue;
1044 				vq->v_specnext = vp->v_specnext;
1045 				break;
1046 			}
1047 			if (vq == NULL)
1048 				panic("missing bdev");
1049 		}
1050 		if (vp->v_flag & VALIASED) {
1051 			vx = NULL;
1052 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1053 				if (vq->v_rdev != vp->v_rdev ||
1054 				    vq->v_type != vp->v_type)
1055 					continue;
1056 				if (vx)
1057 					break;
1058 				vx = vq;
1059 			}
1060 			if (vx == NULL)
1061 				panic("missing alias");
1062 			if (vq == NULL)
1063 				vx->v_flag &= ~VALIASED;
1064 			vp->v_flag &= ~VALIASED;
1065 		}
1066 		free(vp->v_specinfo, M_VNODE);
1067 		vp->v_specinfo = NULL;
1068 	}
1069 	/*
1070 	 * If it is on the freelist and not already at the head,
1071 	 * move it to the head of the list.
1072 	 */
1073 	vp->v_type = VBAD;
1074 
1075 	/*
1076 	 * Move onto the free list, unless we were called from
1077 	 * getnewvnode and we're not on any free list
1078 	 */
1079 	if (vp->v_usecount == 0 &&
1080 	    (vp->v_bioflag & VBIOONFREELIST)) {
1081 		int s;
1082 
1083 		s = splbio();
1084 
1085 		if (vp->v_holdcnt > 0)
1086 			panic("vgonel: not clean");
1087 
1088 		if (TAILQ_FIRST(&vnode_free_list) != vp) {
1089 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1090 			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1091 		}
1092 		splx(s);
1093 	}
1094 }
1095 
1096 /*
1097  * Lookup a vnode by device number.
1098  */
1099 int
1100 vfinddev(dev_t dev, enum vtype type, struct vnode **vpp)
1101 {
1102 	struct vnode *vp;
1103 	int rc =0;
1104 
1105 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1106 		if (dev != vp->v_rdev || type != vp->v_type)
1107 			continue;
1108 		*vpp = vp;
1109 		rc = 1;
1110 		break;
1111 	}
1112 	return (rc);
1113 }
1114 
1115 /*
1116  * Revoke all the vnodes corresponding to the specified minor number
1117  * range (endpoints inclusive) of the specified major.
1118  */
1119 void
1120 vdevgone(int maj, int minl, int minh, enum vtype type)
1121 {
1122 	struct vnode *vp;
1123 	int mn;
1124 
1125 	for (mn = minl; mn <= minh; mn++)
1126 		if (vfinddev(makedev(maj, mn), type, &vp))
1127 			VOP_REVOKE(vp, REVOKEALL);
1128 }
1129 
1130 /*
1131  * Calculate the total number of references to a special device.
1132  */
1133 int
1134 vcount(struct vnode *vp)
1135 {
1136 	struct vnode *vq, *vnext;
1137 	int count;
1138 
1139 loop:
1140 	if ((vp->v_flag & VALIASED) == 0)
1141 		return (vp->v_usecount);
1142 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1143 		vnext = vq->v_specnext;
1144 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1145 			continue;
1146 		/*
1147 		 * Alias, but not in use, so flush it out.
1148 		 */
1149 		if (vq->v_usecount == 0 && vq != vp) {
1150 			vgone(vq);
1151 			goto loop;
1152 		}
1153 		count += vq->v_usecount;
1154 	}
1155 	return (count);
1156 }
1157 
1158 #if defined(DEBUG) || defined(DIAGNOSTIC)
1159 /*
1160  * Print out a description of a vnode.
1161  */
1162 static char *typename[] =
1163    { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1164 
1165 void
1166 vprint(char *label, struct vnode *vp)
1167 {
1168 	char buf[64];
1169 
1170 	if (label != NULL)
1171 		printf("%s: ", label);
1172 	printf("%p, type %s, use %u, write %u, hold %u,",
1173 		vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1174 		vp->v_holdcnt);
1175 	buf[0] = '\0';
1176 	if (vp->v_flag & VROOT)
1177 		strlcat(buf, "|VROOT", sizeof buf);
1178 	if (vp->v_flag & VTEXT)
1179 		strlcat(buf, "|VTEXT", sizeof buf);
1180 	if (vp->v_flag & VSYSTEM)
1181 		strlcat(buf, "|VSYSTEM", sizeof buf);
1182 	if (vp->v_flag & VXLOCK)
1183 		strlcat(buf, "|VXLOCK", sizeof buf);
1184 	if (vp->v_flag & VXWANT)
1185 		strlcat(buf, "|VXWANT", sizeof buf);
1186 	if (vp->v_bioflag & VBIOWAIT)
1187 		strlcat(buf, "|VBIOWAIT", sizeof buf);
1188 	if (vp->v_bioflag & VBIOONFREELIST)
1189 		strlcat(buf, "|VBIOONFREELIST", sizeof buf);
1190 	if (vp->v_bioflag & VBIOONSYNCLIST)
1191 		strlcat(buf, "|VBIOONSYNCLIST", sizeof buf);
1192 	if (vp->v_flag & VALIASED)
1193 		strlcat(buf, "|VALIASED", sizeof buf);
1194 	if (buf[0] != '\0')
1195 		printf(" flags (%s)", &buf[1]);
1196 	if (vp->v_data == NULL) {
1197 		printf("\n");
1198 	} else {
1199 		printf("\n\t");
1200 		VOP_PRINT(vp);
1201 	}
1202 }
1203 #endif /* DEBUG || DIAGNOSTIC */
1204 
1205 #ifdef DEBUG
1206 /*
1207  * List all of the locked vnodes in the system.
1208  * Called when debugging the kernel.
1209  */
1210 void
1211 printlockedvnodes(void)
1212 {
1213 	struct mount *mp, *nmp;
1214 	struct vnode *vp;
1215 
1216 	printf("Locked vnodes\n");
1217 
1218 	for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1219 	    mp = nmp) {
1220 		if (vfs_busy(mp, VB_READ|VB_NOWAIT)) {
1221 			nmp = CIRCLEQ_NEXT(mp, mnt_list);
1222 			continue;
1223 		}
1224 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1225 			if (VOP_ISLOCKED(vp))
1226 				vprint((char *)0, vp);
1227 		}
1228 		nmp = CIRCLEQ_NEXT(mp, mnt_list);
1229 		vfs_unbusy(mp);
1230  	}
1231 
1232 }
1233 #endif
1234 
1235 /*
1236  * Top level filesystem related information gathering.
1237  */
1238 int
1239 vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1240     size_t newlen, struct proc *p)
1241 {
1242 	struct vfsconf *vfsp, *tmpvfsp;
1243 	int ret;
1244 
1245 	/* all sysctl names at this level are at least name and field */
1246 	if (namelen < 2)
1247 		return (ENOTDIR);		/* overloaded */
1248 
1249 	if (name[0] != VFS_GENERIC) {
1250 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1251 			if (vfsp->vfc_typenum == name[0])
1252 				break;
1253 
1254 		if (vfsp == NULL)
1255 			return (EOPNOTSUPP);
1256 
1257 		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1258 		    oldp, oldlenp, newp, newlen, p));
1259 	}
1260 
1261 	switch (name[1]) {
1262 	case VFS_MAXTYPENUM:
1263 		return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
1264 
1265 	case VFS_CONF:
1266 		if (namelen < 3)
1267 			return (ENOTDIR);	/* overloaded */
1268 
1269 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1270 			if (vfsp->vfc_typenum == name[2])
1271 				break;
1272 
1273 		if (vfsp == NULL)
1274 			return (EOPNOTSUPP);
1275 
1276 		/* Make a copy, clear out kernel pointers */
1277 		tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK);
1278 		bcopy(vfsp, tmpvfsp, sizeof(*tmpvfsp));
1279 		tmpvfsp->vfc_vfsops = NULL;
1280 		tmpvfsp->vfc_next = NULL;
1281 
1282 		ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp,
1283 		    sizeof(struct vfsconf));
1284 
1285 		free(tmpvfsp, M_TEMP);
1286 		return (ret);
1287 	case VFS_BCACHESTAT:	/* buffer cache statistics */
1288 		ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats,
1289 		    sizeof(struct bcachestats));
1290 		return(ret);
1291 	}
1292 	return (EOPNOTSUPP);
1293 }
1294 
1295 int kinfo_vdebug = 1;
1296 #define KINFO_VNODESLOP	10
1297 /*
1298  * Dump vnode list (via sysctl).
1299  * Copyout address of vnode followed by vnode.
1300  */
1301 /* ARGSUSED */
1302 int
1303 sysctl_vnode(char *where, size_t *sizep, struct proc *p)
1304 {
1305 	struct mount *mp, *nmp;
1306 	struct vnode *vp, *nvp;
1307 	char *bp = where, *savebp;
1308 	char *ewhere;
1309 	int error;
1310 
1311 	if (where == NULL) {
1312 		*sizep = (numvnodes + KINFO_VNODESLOP) * sizeof(struct e_vnode);
1313 		return (0);
1314 	}
1315 	ewhere = where + *sizep;
1316 
1317 	for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1318 	    mp = nmp) {
1319 		if (vfs_busy(mp, VB_READ|VB_NOWAIT)) {
1320 			nmp = CIRCLEQ_NEXT(mp, mnt_list);
1321 			continue;
1322 		}
1323 		savebp = bp;
1324 again:
1325 		for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL;
1326 		    vp = nvp) {
1327 			/*
1328 			 * Check that the vp is still associated with
1329 			 * this filesystem.  RACE: could have been
1330 			 * recycled onto the same filesystem.
1331 			 */
1332 			if (vp->v_mount != mp) {
1333 				if (kinfo_vdebug)
1334 					printf("kinfo: vp changed\n");
1335 				bp = savebp;
1336 				goto again;
1337 			}
1338 			nvp = LIST_NEXT(vp, v_mntvnodes);
1339 			if (bp + sizeof(struct e_vnode) > ewhere) {
1340 				*sizep = bp - where;
1341 				vfs_unbusy(mp);
1342 				return (ENOMEM);
1343 			}
1344 			if ((error = copyout(&vp,
1345 			    &((struct e_vnode *)bp)->vptr,
1346 			    sizeof(struct vnode *))) ||
1347 			   (error = copyout(vp,
1348 			    &((struct e_vnode *)bp)->vnode,
1349 			    sizeof(struct vnode)))) {
1350 				vfs_unbusy(mp);
1351 				return (error);
1352 			}
1353 			bp += sizeof(struct e_vnode);
1354 		}
1355 
1356 		nmp = CIRCLEQ_NEXT(mp, mnt_list);
1357 		vfs_unbusy(mp);
1358 	}
1359 
1360 	*sizep = bp - where;
1361 
1362 	return (0);
1363 }
1364 
1365 /*
1366  * Check to see if a filesystem is mounted on a block device.
1367  */
1368 int
1369 vfs_mountedon(struct vnode *vp)
1370 {
1371 	struct vnode *vq;
1372 	int error = 0;
1373 
1374  	if (vp->v_specmountpoint != NULL)
1375 		return (EBUSY);
1376 	if (vp->v_flag & VALIASED) {
1377 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1378 			if (vq->v_rdev != vp->v_rdev ||
1379 			    vq->v_type != vp->v_type)
1380 				continue;
1381 			if (vq->v_specmountpoint != NULL) {
1382 				error = EBUSY;
1383 				break;
1384 			}
1385  		}
1386 	}
1387 	return (error);
1388 }
1389 
1390 /*
1391  * Build hash lists of net addresses and hang them off the mount point.
1392  * Called by ufs_mount() to set up the lists of export addresses.
1393  */
1394 int
1395 vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1396     struct export_args *argp)
1397 {
1398 	struct netcred *np;
1399 	struct radix_node_head *rnh;
1400 	int i;
1401 	struct radix_node *rn;
1402 	struct sockaddr *saddr, *smask = 0;
1403 	struct domain *dom;
1404 	int error;
1405 
1406 	if (argp->ex_addrlen == 0) {
1407 		if (mp->mnt_flag & MNT_DEFEXPORTED)
1408 			return (EPERM);
1409 		np = &nep->ne_defexported;
1410 		np->netc_exflags = argp->ex_flags;
1411 		np->netc_anon = argp->ex_anon;
1412 		np->netc_anon.cr_ref = 1;
1413 		mp->mnt_flag |= MNT_DEFEXPORTED;
1414 		return (0);
1415 	}
1416 	if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN ||
1417 	    argp->ex_addrlen < 0 || argp->ex_masklen < 0)
1418 		return (EINVAL);
1419 	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1420 	np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK|M_ZERO);
1421 	saddr = (struct sockaddr *)(np + 1);
1422 	error = copyin(argp->ex_addr, saddr, argp->ex_addrlen);
1423 	if (error)
1424 		goto out;
1425 	if (saddr->sa_len > argp->ex_addrlen)
1426 		saddr->sa_len = argp->ex_addrlen;
1427 	if (argp->ex_masklen) {
1428 		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1429 		error = copyin(argp->ex_mask, smask, argp->ex_masklen);
1430 		if (error)
1431 			goto out;
1432 		if (smask->sa_len > argp->ex_masklen)
1433 			smask->sa_len = argp->ex_masklen;
1434 	}
1435 	i = saddr->sa_family;
1436 	if (i < 0 || i > AF_MAX) {
1437 		error = EINVAL;
1438 		goto out;
1439 	}
1440 	if ((rnh = nep->ne_rtable[i]) == 0) {
1441 		/*
1442 		 * Seems silly to initialize every AF when most are not
1443 		 * used, do so on demand here
1444 		 */
1445 		for (dom = domains; dom; dom = dom->dom_next)
1446 			if (dom->dom_family == i && dom->dom_rtattach) {
1447 				dom->dom_rtattach((void **)&nep->ne_rtable[i],
1448 					dom->dom_rtoffset);
1449 				break;
1450 			}
1451 		if ((rnh = nep->ne_rtable[i]) == 0) {
1452 			error = ENOBUFS;
1453 			goto out;
1454 		}
1455 	}
1456 	rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh,
1457 		np->netc_rnodes, 0);
1458 	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1459 		error = EPERM;
1460 		goto out;
1461 	}
1462 	np->netc_exflags = argp->ex_flags;
1463 	np->netc_anon = argp->ex_anon;
1464 	np->netc_anon.cr_ref = 1;
1465 	return (0);
1466 out:
1467 	free(np, M_NETADDR);
1468 	return (error);
1469 }
1470 
1471 /* ARGSUSED */
1472 int
1473 vfs_free_netcred(struct radix_node *rn, void *w, u_int id)
1474 {
1475 	struct radix_node_head *rnh = (struct radix_node_head *)w;
1476 
1477 	(*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh, NULL);
1478 	free(rn, M_NETADDR);
1479 	return (0);
1480 }
1481 
1482 /*
1483  * Free the net address hash lists that are hanging off the mount points.
1484  */
1485 void
1486 vfs_free_addrlist(struct netexport *nep)
1487 {
1488 	int i;
1489 	struct radix_node_head *rnh;
1490 
1491 	for (i = 0; i <= AF_MAX; i++)
1492 		if ((rnh = nep->ne_rtable[i]) != NULL) {
1493 			(*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh);
1494 			free(rnh, M_RTABLE);
1495 			nep->ne_rtable[i] = 0;
1496 		}
1497 }
1498 
1499 int
1500 vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp)
1501 {
1502 	int error;
1503 
1504 	if (argp->ex_flags & MNT_DELEXPORT) {
1505 		vfs_free_addrlist(nep);
1506 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1507 	}
1508 	if (argp->ex_flags & MNT_EXPORTED) {
1509 		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1510 			return (error);
1511 		mp->mnt_flag |= MNT_EXPORTED;
1512 	}
1513 	return (0);
1514 }
1515 
1516 struct netcred *
1517 vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam)
1518 {
1519 	struct netcred *np;
1520 	struct radix_node_head *rnh;
1521 	struct sockaddr *saddr;
1522 
1523 	np = NULL;
1524 	if (mp->mnt_flag & MNT_EXPORTED) {
1525 		/*
1526 		 * Lookup in the export list first.
1527 		 */
1528 		if (nam != NULL) {
1529 			saddr = mtod(nam, struct sockaddr *);
1530 			rnh = nep->ne_rtable[saddr->sa_family];
1531 			if (rnh != NULL) {
1532 				np = (struct netcred *)
1533 					(*rnh->rnh_matchaddr)((caddr_t)saddr,
1534 					    rnh);
1535 				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
1536 					np = NULL;
1537 			}
1538 		}
1539 		/*
1540 		 * If no address match, use the default if it exists.
1541 		 */
1542 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1543 			np = &nep->ne_defexported;
1544 	}
1545 	return (np);
1546 }
1547 
1548 /*
1549  * Do the usual access checking.
1550  * file_mode, uid and gid are from the vnode in question,
1551  * while acc_mode and cred are from the VOP_ACCESS parameter list
1552  */
1553 int
1554 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
1555     mode_t acc_mode, struct ucred *cred)
1556 {
1557 	mode_t mask;
1558 
1559 	/* User id 0 always gets read/write access. */
1560 	if (cred->cr_uid == 0) {
1561 		/* For VEXEC, at least one of the execute bits must be set. */
1562 		if ((acc_mode & VEXEC) && type != VDIR &&
1563 		    (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
1564 			return EACCES;
1565 		return 0;
1566 	}
1567 
1568 	mask = 0;
1569 
1570 	/* Otherwise, check the owner. */
1571 	if (cred->cr_uid == uid) {
1572 		if (acc_mode & VEXEC)
1573 			mask |= S_IXUSR;
1574 		if (acc_mode & VREAD)
1575 			mask |= S_IRUSR;
1576 		if (acc_mode & VWRITE)
1577 			mask |= S_IWUSR;
1578 		return (file_mode & mask) == mask ? 0 : EACCES;
1579 	}
1580 
1581 	/* Otherwise, check the groups. */
1582 	if (cred->cr_gid == gid || groupmember(gid, cred)) {
1583 		if (acc_mode & VEXEC)
1584 			mask |= S_IXGRP;
1585 		if (acc_mode & VREAD)
1586 			mask |= S_IRGRP;
1587 		if (acc_mode & VWRITE)
1588 			mask |= S_IWGRP;
1589 		return (file_mode & mask) == mask ? 0 : EACCES;
1590 	}
1591 
1592 	/* Otherwise, check everyone else. */
1593 	if (acc_mode & VEXEC)
1594 		mask |= S_IXOTH;
1595 	if (acc_mode & VREAD)
1596 		mask |= S_IROTH;
1597 	if (acc_mode & VWRITE)
1598 		mask |= S_IWOTH;
1599 	return (file_mode & mask) == mask ? 0 : EACCES;
1600 }
1601 
1602 /*
1603  * Unmount all file systems.
1604  * We traverse the list in reverse order under the assumption that doing so
1605  * will avoid needing to worry about dependencies.
1606  */
1607 void
1608 vfs_unmountall(void)
1609 {
1610 	struct mount *mp, *nmp;
1611 	int allerror, error, again = 1;
1612 
1613  retry:
1614 	allerror = 0;
1615 	for (mp = CIRCLEQ_LAST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1616 	    mp = nmp) {
1617 		nmp = CIRCLEQ_PREV(mp, mnt_list);
1618 		if ((vfs_busy(mp, VB_WRITE|VB_NOWAIT)) != 0)
1619 			continue;
1620 		if ((error = dounmount(mp, MNT_FORCE, curproc, NULL)) != 0) {
1621 			printf("unmount of %s failed with error %d\n",
1622 			    mp->mnt_stat.f_mntonname, error);
1623 			allerror = 1;
1624 		}
1625 	}
1626 
1627 	if (allerror) {
1628 		printf("WARNING: some file systems would not unmount\n");
1629 		if (again) {
1630 			printf("retrying\n");
1631 			again = 0;
1632 			goto retry;
1633 		}
1634 	}
1635 }
1636 
1637 /*
1638  * Sync and unmount file systems before shutting down.
1639  */
1640 void
1641 vfs_shutdown(void)
1642 {
1643 #ifdef ACCOUNTING
1644 	extern void acct_shutdown(void);
1645 
1646 	acct_shutdown();
1647 #endif
1648 
1649 	/* XXX Should suspend scheduling. */
1650 	(void) spl0();
1651 
1652 	printf("syncing disks... ");
1653 
1654 	if (panicstr == 0) {
1655 		/* Sync before unmount, in case we hang on something. */
1656 		sys_sync(&proc0, (void *)0, (register_t *)0);
1657 
1658 		/* Unmount file systems. */
1659 		vfs_unmountall();
1660 	}
1661 
1662 	if (vfs_syncwait(1))
1663 		printf("giving up\n");
1664 	else
1665 		printf("done\n");
1666 }
1667 
1668 /*
1669  * perform sync() operation and wait for buffers to flush.
1670  * assumptions: called w/ scheduler disabled and physical io enabled
1671  * for now called at spl0() XXX
1672  */
1673 int
1674 vfs_syncwait(int verbose)
1675 {
1676 	struct buf *bp;
1677 	int iter, nbusy, dcount, s;
1678 	struct proc *p;
1679 
1680 	p = curproc? curproc : &proc0;
1681 	sys_sync(p, (void *)0, (register_t *)0);
1682 
1683 	/* Wait for sync to finish. */
1684 	dcount = 10000;
1685 	for (iter = 0; iter < 20; iter++) {
1686 		nbusy = 0;
1687 		LIST_FOREACH(bp, &bufhead, b_list) {
1688 			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1689 				nbusy++;
1690 			/*
1691 			 * With soft updates, some buffers that are
1692 			 * written will be remarked as dirty until other
1693 			 * buffers are written.
1694 			 */
1695 			if (bp->b_flags & B_DELWRI) {
1696 				s = splbio();
1697 				bremfree(bp);
1698 				buf_acquire(bp);
1699 				splx(s);
1700 				nbusy++;
1701 				bawrite(bp);
1702 				if (dcount-- <= 0) {
1703 					if (verbose)
1704 						printf("softdep ");
1705 					return 1;
1706 				}
1707 			}
1708 		}
1709 		if (nbusy == 0)
1710 			break;
1711 		if (verbose)
1712 			printf("%d ", nbusy);
1713 		DELAY(40000 * iter);
1714 	}
1715 
1716 	return nbusy;
1717 }
1718 
1719 /*
1720  * posix file system related system variables.
1721  */
1722 int
1723 fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1724     void *newp, size_t newlen, struct proc *p)
1725 {
1726 	/* all sysctl names at this level are terminal */
1727 	if (namelen != 1)
1728 		return (ENOTDIR);
1729 
1730 	switch (name[0]) {
1731 	case FS_POSIX_SETUID:
1732 		if (newp && securelevel > 0)
1733 			return (EPERM);
1734 		return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear));
1735 	default:
1736 		return (EOPNOTSUPP);
1737 	}
1738 	/* NOTREACHED */
1739 }
1740 
1741 /*
1742  * file system related system variables.
1743  */
1744 int
1745 fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1746     size_t newlen, struct proc *p)
1747 {
1748 	sysctlfn *fn;
1749 
1750 	switch (name[0]) {
1751 	case FS_POSIX:
1752 		fn = fs_posix_sysctl;
1753 		break;
1754 	default:
1755 		return (EOPNOTSUPP);
1756 	}
1757 	return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
1758 }
1759 
1760 
1761 /*
1762  * Routines dealing with vnodes and buffers
1763  */
1764 
1765 /*
1766  * Wait for all outstanding I/Os to complete
1767  *
1768  * Manipulates v_numoutput. Must be called at splbio()
1769  */
1770 int
1771 vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo)
1772 {
1773 	int error = 0;
1774 
1775 	splassert(IPL_BIO);
1776 
1777 	while (vp->v_numoutput) {
1778 		vp->v_bioflag |= VBIOWAIT;
1779 		error = tsleep(&vp->v_numoutput,
1780 		    slpflag | (PRIBIO + 1), wmesg, timeo);
1781 		if (error)
1782 			break;
1783 	}
1784 
1785 	return (error);
1786 }
1787 
1788 /*
1789  * Update outstanding I/O count and do wakeup if requested.
1790  *
1791  * Manipulates v_numoutput. Must be called at splbio()
1792  */
1793 void
1794 vwakeup(struct vnode *vp)
1795 {
1796 	splassert(IPL_BIO);
1797 
1798 	if (vp != NULL) {
1799 		if (vp->v_numoutput-- == 0)
1800 			panic("vwakeup: neg numoutput");
1801 		if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) {
1802 			vp->v_bioflag &= ~VBIOWAIT;
1803 			wakeup(&vp->v_numoutput);
1804 		}
1805 	}
1806 }
1807 
1808 /*
1809  * Flush out and invalidate all buffers associated with a vnode.
1810  * Called with the underlying object locked.
1811  */
1812 int
1813 vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p,
1814     int slpflag, int slptimeo)
1815 {
1816 	struct buf *bp;
1817 	struct buf *nbp, *blist;
1818 	int s, error;
1819 
1820 #ifdef VFSLCKDEBUG
1821 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
1822 		panic("vinvalbuf(): vp isn't locked");
1823 #endif
1824 
1825 	if (flags & V_SAVE) {
1826 		s = splbio();
1827 		vwaitforio(vp, 0, "vinvalbuf", 0);
1828 		if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1829 			splx(s);
1830 			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
1831 				return (error);
1832 			s = splbio();
1833 			if (vp->v_numoutput > 0 ||
1834 			    !LIST_EMPTY(&vp->v_dirtyblkhd))
1835 				panic("vinvalbuf: dirty bufs");
1836 		}
1837 		splx(s);
1838 	}
1839 loop:
1840 	s = splbio();
1841 	for (;;) {
1842 		if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) &&
1843 		    (flags & V_SAVEMETA))
1844 			while (blist && blist->b_lblkno < 0)
1845 				blist = LIST_NEXT(blist, b_vnbufs);
1846 		if (blist == NULL &&
1847 		    (blist = LIST_FIRST(&vp->v_dirtyblkhd)) &&
1848 		    (flags & V_SAVEMETA))
1849 			while (blist && blist->b_lblkno < 0)
1850 				blist = LIST_NEXT(blist, b_vnbufs);
1851 		if (!blist)
1852 			break;
1853 
1854 		for (bp = blist; bp; bp = nbp) {
1855 			nbp = LIST_NEXT(bp, b_vnbufs);
1856 			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
1857 				continue;
1858 			if (bp->b_flags & B_BUSY) {
1859 				bp->b_flags |= B_WANTED;
1860 				error = tsleep(bp, slpflag | (PRIBIO + 1),
1861 				    "vinvalbuf", slptimeo);
1862 				if (error) {
1863 					splx(s);
1864 					return (error);
1865 				}
1866 				break;
1867 			}
1868 			bremfree(bp);
1869 			buf_acquire(bp);
1870 			/*
1871 			 * XXX Since there are no node locks for NFS, I believe
1872 			 * there is a slight chance that a delayed write will
1873 			 * occur while sleeping just above, so check for it.
1874 			 */
1875 			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
1876 				splx(s);
1877 				(void) VOP_BWRITE(bp);
1878 				goto loop;
1879 			}
1880 			bp->b_flags |= B_INVAL;
1881 			brelse(bp);
1882 		}
1883 	}
1884 	if (!(flags & V_SAVEMETA) &&
1885 	    (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd)))
1886 		panic("vinvalbuf: flush failed");
1887 	splx(s);
1888 	return (0);
1889 }
1890 
1891 void
1892 vflushbuf(struct vnode *vp, int sync)
1893 {
1894 	struct buf *bp, *nbp;
1895 	int s;
1896 
1897 loop:
1898 	s = splbio();
1899 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd);
1900 	    bp != LIST_END(&vp->v_dirtyblkhd); bp = nbp) {
1901 		nbp = LIST_NEXT(bp, b_vnbufs);
1902 		if ((bp->b_flags & B_BUSY))
1903 			continue;
1904 		if ((bp->b_flags & B_DELWRI) == 0)
1905 			panic("vflushbuf: not dirty");
1906 		bremfree(bp);
1907 		buf_acquire(bp);
1908 		splx(s);
1909 		/*
1910 		 * Wait for I/O associated with indirect blocks to complete,
1911 		 * since there is no way to quickly wait for them below.
1912 		 */
1913 		if (bp->b_vp == vp || sync == 0)
1914 			(void) bawrite(bp);
1915 		else
1916 			(void) bwrite(bp);
1917 		goto loop;
1918 	}
1919 	if (sync == 0) {
1920 		splx(s);
1921 		return;
1922 	}
1923 	vwaitforio(vp, 0, "vflushbuf", 0);
1924 	if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1925 		splx(s);
1926 #ifdef DIAGNOSTIC
1927 		vprint("vflushbuf: dirty", vp);
1928 #endif
1929 		goto loop;
1930 	}
1931 	splx(s);
1932 }
1933 
1934 /*
1935  * Associate a buffer with a vnode.
1936  *
1937  * Manipulates buffer vnode queues. Must be called at splbio().
1938  */
1939 void
1940 bgetvp(struct vnode *vp, struct buf *bp)
1941 {
1942 	splassert(IPL_BIO);
1943 
1944 
1945 	if (bp->b_vp)
1946 		panic("bgetvp: not free");
1947 	vhold(vp);
1948 	bp->b_vp = vp;
1949 	if (vp->v_type == VBLK || vp->v_type == VCHR)
1950 		bp->b_dev = vp->v_rdev;
1951 	else
1952 		bp->b_dev = NODEV;
1953 	/*
1954 	 * Insert onto list for new vnode.
1955 	 */
1956 	bufinsvn(bp, &vp->v_cleanblkhd);
1957 }
1958 
1959 /*
1960  * Disassociate a buffer from a vnode.
1961  *
1962  * Manipulates vnode buffer queues. Must be called at splbio().
1963  */
1964 void
1965 brelvp(struct buf *bp)
1966 {
1967 	struct vnode *vp;
1968 
1969 	splassert(IPL_BIO);
1970 
1971 	if ((vp = bp->b_vp) == (struct vnode *) 0)
1972 		panic("brelvp: NULL");
1973 	/*
1974 	 * Delete from old vnode list, if on one.
1975 	 */
1976 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
1977 		bufremvn(bp);
1978 	if ((vp->v_bioflag & VBIOONSYNCLIST) &&
1979 	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
1980 		vp->v_bioflag &= ~VBIOONSYNCLIST;
1981 		LIST_REMOVE(vp, v_synclist);
1982 	}
1983 	bp->b_vp = NULL;
1984 
1985 	vdrop(vp);
1986 }
1987 
1988 /*
1989  * Replaces the current vnode associated with the buffer, if any,
1990  * with a new vnode.
1991  *
1992  * If an output I/O is pending on the buffer, the old vnode
1993  * I/O count is adjusted.
1994  *
1995  * Ignores vnode buffer queues. Must be called at splbio().
1996  */
1997 void
1998 buf_replacevnode(struct buf *bp, struct vnode *newvp)
1999 {
2000 	struct vnode *oldvp = bp->b_vp;
2001 
2002 	splassert(IPL_BIO);
2003 
2004 	if (oldvp)
2005 		brelvp(bp);
2006 
2007 	if ((bp->b_flags & (B_READ | B_DONE)) == 0) {
2008 		newvp->v_numoutput++;	/* put it on swapdev */
2009 		vwakeup(oldvp);
2010 	}
2011 
2012 	bgetvp(newvp, bp);
2013 	bufremvn(bp);
2014 }
2015 
2016 /*
2017  * Used to assign buffers to the appropriate clean or dirty list on
2018  * the vnode and to add newly dirty vnodes to the appropriate
2019  * filesystem syncer list.
2020  *
2021  * Manipulates vnode buffer queues. Must be called at splbio().
2022  */
2023 void
2024 reassignbuf(struct buf *bp)
2025 {
2026 	struct buflists *listheadp;
2027 	int delay;
2028 	struct vnode *vp = bp->b_vp;
2029 
2030 	splassert(IPL_BIO);
2031 
2032 	/*
2033 	 * Delete from old vnode list, if on one.
2034 	 */
2035 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2036 		bufremvn(bp);
2037 
2038 	/*
2039 	 * If dirty, put on list of dirty buffers;
2040 	 * otherwise insert onto list of clean buffers.
2041 	 */
2042 	if ((bp->b_flags & B_DELWRI) == 0) {
2043 		listheadp = &vp->v_cleanblkhd;
2044 		if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2045 		    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2046 			vp->v_bioflag &= ~VBIOONSYNCLIST;
2047 			LIST_REMOVE(vp, v_synclist);
2048 		}
2049 	} else {
2050 		listheadp = &vp->v_dirtyblkhd;
2051 		if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) {
2052 			switch (vp->v_type) {
2053 			case VDIR:
2054 				delay = syncdelay / 2;
2055 				break;
2056 			case VBLK:
2057 				if (vp->v_specmountpoint != NULL) {
2058 					delay = syncdelay / 3;
2059 					break;
2060 				}
2061 				/* FALLTHROUGH */
2062 			default:
2063 				delay = syncdelay;
2064 			}
2065 			vn_syncer_add_to_worklist(vp, delay);
2066 		}
2067 	}
2068 	bufinsvn(bp, listheadp);
2069 }
2070 
2071 int
2072 vfs_register(struct vfsconf *vfs)
2073 {
2074 	struct vfsconf *vfsp;
2075 	struct vfsconf **vfspp;
2076 
2077 #ifdef DIAGNOSTIC
2078 	/* Paranoia? */
2079 	if (vfs->vfc_refcount != 0)
2080 		printf("vfs_register called with vfc_refcount > 0\n");
2081 #endif
2082 
2083 	/* Check if filesystem already known */
2084 	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2085 	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next)
2086 		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2087 			return (EEXIST);
2088 
2089 	if (vfs->vfc_typenum > maxvfsconf)
2090 		maxvfsconf = vfs->vfc_typenum;
2091 
2092 	vfs->vfc_next = NULL;
2093 
2094 	/* Add to the end of the list */
2095 	*vfspp = vfs;
2096 
2097 	/* Call vfs_init() */
2098 	if (vfs->vfc_vfsops->vfs_init)
2099 		(*(vfs->vfc_vfsops->vfs_init))(vfs);
2100 
2101 	return 0;
2102 }
2103 
2104 int
2105 vfs_unregister(struct vfsconf *vfs)
2106 {
2107 	struct vfsconf *vfsp;
2108 	struct vfsconf **vfspp;
2109 	int maxtypenum;
2110 
2111 	/* Find our vfsconf struct */
2112 	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2113 	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) {
2114 		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2115 			break;
2116 	}
2117 
2118 	if (!vfsp)			/* Not found */
2119 		return (ENOENT);
2120 
2121 	if (vfsp->vfc_refcount)		/* In use */
2122 		return (EBUSY);
2123 
2124 	/* Remove from list and free */
2125 	*vfspp = vfsp->vfc_next;
2126 
2127 	maxtypenum = 0;
2128 
2129 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2130 		if (vfsp->vfc_typenum > maxtypenum)
2131 			maxtypenum = vfsp->vfc_typenum;
2132 
2133 	maxvfsconf = maxtypenum;
2134 	return 0;
2135 }
2136 
2137 /*
2138  * Check if vnode represents a disk device
2139  */
2140 int
2141 vn_isdisk(struct vnode *vp, int *errp)
2142 {
2143 	if (vp->v_type != VBLK && vp->v_type != VCHR)
2144 		return (0);
2145 
2146 	return (1);
2147 }
2148 
2149 #ifdef DDB
2150 #include <machine/db_machdep.h>
2151 #include <ddb/db_interface.h>
2152 #include <ddb/db_output.h>
2153 
2154 void
2155 vfs_buf_print(void *b, int full, int (*pr)(const char *, ...))
2156 {
2157 	struct buf *bp = b;
2158 
2159 	(*pr)("  vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n"
2160 	      "  proc %p error %d flags %b\n",
2161 	    bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev,
2162 	    bp->b_proc, bp->b_error, bp->b_flags, B_BITS);
2163 
2164 	(*pr)("  bufsize 0x%lx bcount 0x%lx resid 0x%lx sync 0x%x\n"
2165 	      "  data %p saveaddr %p dep %p iodone %p\n",
2166 	    bp->b_bufsize, bp->b_bcount, (long)bp->b_resid, bp->b_synctime,
2167 	    bp->b_data, bp->b_saveaddr, LIST_FIRST(&bp->b_dep), bp->b_iodone);
2168 
2169 	(*pr)("  dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n",
2170 	    bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend);
2171 
2172 #ifdef FFS_SOFTUPDATES
2173 	if (full)
2174 		softdep_print(bp, full, pr);
2175 #endif
2176 }
2177 
2178 const char *vtypes[] = { VTYPE_NAMES };
2179 const char *vtags[] = { VTAG_NAMES };
2180 
2181 void
2182 vfs_vnode_print(void *v, int full, int (*pr)(const char *, ...))
2183 {
2184 	struct vnode *vp = v;
2185 
2186 	(*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n",
2187 	      vp->v_tag > nitems(vtags)? "<unk>":vtags[vp->v_tag], vp->v_tag,
2188 	      vp->v_type > nitems(vtypes)? "<unk>":vtypes[vp->v_type],
2189 	      vp->v_type, vp->v_mount, vp->v_mountedhere);
2190 
2191 	(*pr)("data %p usecount %d writecount %ld holdcnt %ld numoutput %d\n",
2192 	      vp->v_data, vp->v_usecount, vp->v_writecount,
2193 	      vp->v_holdcnt, vp->v_numoutput);
2194 
2195 	/* uvm_object_printit(&vp->v_uobj, full, pr); */
2196 
2197 	if (full) {
2198 		struct buf *bp;
2199 
2200 		(*pr)("clean bufs:\n");
2201 		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
2202 			(*pr)(" bp %p\n", bp);
2203 			vfs_buf_print(bp, full, pr);
2204 		}
2205 
2206 		(*pr)("dirty bufs:\n");
2207 		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
2208 			(*pr)(" bp %p\n", bp);
2209 			vfs_buf_print(bp, full, pr);
2210 		}
2211 	}
2212 }
2213 
2214 void
2215 vfs_mount_print(struct mount *mp, int full, int (*pr)(const char *, ...))
2216 {
2217 	struct vfsconf *vfc = mp->mnt_vfc;
2218 	struct vnode *vp;
2219 	int cnt = 0;
2220 
2221 	(*pr)("flags %b\nvnodecovered %p syncer %p data %p\n",
2222 	    mp->mnt_flag, MNT_BITS,
2223 	    mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data);
2224 
2225 	(*pr)("vfsconf: ops %p name \"%s\" num %d ref %d flags 0x%x\n",
2226             vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum,
2227 	    vfc->vfc_refcount, vfc->vfc_flags);
2228 
2229 	(*pr)("statvfs cache: bsize %x iosize %x\nblocks %llu free %llu avail %lld\n",
2230 	    mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks,
2231 	    mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail);
2232 
2233 	(*pr)("  files %llu ffiles %llu favail %lld\n", mp->mnt_stat.f_files,
2234 	    mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail);
2235 
2236 	(*pr)("  f_fsidx {0x%x, 0x%x} owner %u ctime 0x%x\n",
2237 	    mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1],
2238 	    mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime);
2239 
2240  	(*pr)("  syncwrites %llu asyncwrites = %llu\n",
2241 	    mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites);
2242 
2243  	(*pr)("  syncreads %llu asyncreads = %llu\n",
2244 	    mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads);
2245 
2246 	(*pr)("  fstype \"%s\" mnton \"%s\" mntfrom \"%s\"\n",
2247 	    mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname,
2248 	    mp->mnt_stat.f_mntfromname);
2249 
2250 	(*pr)("locked vnodes:");
2251 	/* XXX would take mountlist lock, except ddb has no context */
2252 	LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)
2253 		if (VOP_ISLOCKED(vp)) {
2254 			if (!LIST_NEXT(vp, v_mntvnodes))
2255 				(*pr)(" %p", vp);
2256 			else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4))))
2257 				(*pr)("\n\t%p", vp);
2258 			else
2259 				(*pr)(", %p", vp);
2260 		}
2261 	(*pr)("\n");
2262 
2263 	if (full) {
2264 		(*pr)("all vnodes:\n\t");
2265 		/* XXX would take mountlist lock, except ddb has no context */
2266 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)
2267 			if (!LIST_NEXT(vp, v_mntvnodes))
2268 				(*pr)(" %p", vp);
2269 			else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4))))
2270 				(*pr)(" %p,\n\t", vp);
2271 			else
2272 				(*pr)(" %p,", vp);
2273 		(*pr)("\n");
2274 	}
2275 }
2276 #endif /* DDB */
2277 
2278 void
2279 copy_statfs_info(struct statfs *sbp, const struct mount *mp)
2280 {
2281 	const struct statfs *mbp;
2282 
2283 	strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN);
2284 
2285 	if (sbp == (mbp = &mp->mnt_stat))
2286 		return;
2287 
2288 	sbp->f_fsid = mbp->f_fsid;
2289 	sbp->f_owner = mbp->f_owner;
2290 	sbp->f_flags = mbp->f_flags;
2291 	sbp->f_syncwrites = mbp->f_syncwrites;
2292 	sbp->f_asyncwrites = mbp->f_asyncwrites;
2293 	sbp->f_syncreads = mbp->f_syncreads;
2294 	sbp->f_asyncreads = mbp->f_asyncreads;
2295 	sbp->f_namemax = mbp->f_namemax;
2296 	bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN);
2297 	bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN);
2298 	bcopy(&mp->mnt_stat.mount_info.ufs_args, &sbp->mount_info.ufs_args,
2299 	    sizeof(struct ufs_args));
2300 }
2301