xref: /netbsd-src/sys/kern/vfs_subr.c (revision bbf97636b52a3be3e47af15f9d1519275d0cba10)
1 /*	$NetBSD: vfs_subr.c,v 1.230 2004/07/01 10:03:29 hannken Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Copyright (c) 1989, 1993
42  *	The Regents of the University of California.  All rights reserved.
43  * (c) UNIX System Laboratories, Inc.
44  * All or some portions of this file are derived from material licensed
45  * to the University of California by American Telephone and Telegraph
46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47  * the permission of UNIX System Laboratories, Inc.
48  *
49  * Redistribution and use in source and binary forms, with or without
50  * modification, are permitted provided that the following conditions
51  * are met:
52  * 1. Redistributions of source code must retain the above copyright
53  *    notice, this list of conditions and the following disclaimer.
54  * 2. Redistributions in binary form must reproduce the above copyright
55  *    notice, this list of conditions and the following disclaimer in the
56  *    documentation and/or other materials provided with the distribution.
57  * 3. Neither the name of the University nor the names of its contributors
58  *    may be used to endorse or promote products derived from this software
59  *    without specific prior written permission.
60  *
61  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71  * SUCH DAMAGE.
72  *
73  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
74  */
75 
76 /*
77  * External virtual filesystem routines
78  */
79 
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: vfs_subr.c,v 1.230 2004/07/01 10:03:29 hannken Exp $");
82 
83 #include "opt_inet.h"
84 #include "opt_ddb.h"
85 #include "opt_compat_netbsd.h"
86 #include "opt_compat_43.h"
87 
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/proc.h>
91 #include <sys/kernel.h>
92 #include <sys/mount.h>
93 #include <sys/time.h>
94 #include <sys/event.h>
95 #include <sys/fcntl.h>
96 #include <sys/vnode.h>
97 #include <sys/stat.h>
98 #include <sys/namei.h>
99 #include <sys/ucred.h>
100 #include <sys/buf.h>
101 #include <sys/errno.h>
102 #include <sys/malloc.h>
103 #include <sys/domain.h>
104 #include <sys/mbuf.h>
105 #include <sys/sa.h>
106 #include <sys/syscallargs.h>
107 #include <sys/device.h>
108 #include <sys/dirent.h>
109 #include <sys/filedesc.h>
110 
111 #include <miscfs/specfs/specdev.h>
112 #include <miscfs/genfs/genfs.h>
113 #include <miscfs/syncfs/syncfs.h>
114 
115 #include <netinet/in.h>
116 
117 #include <uvm/uvm.h>
118 #include <uvm/uvm_ddb.h>
119 
120 #include <netinet/in.h>
121 
122 #include <sys/sysctl.h>
123 
124 const enum vtype iftovt_tab[16] = {
125 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
126 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
127 };
128 const int	vttoif_tab[9] = {
129 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
130 	S_IFSOCK, S_IFIFO, S_IFMT,
131 };
132 
133 int doforce = 1;		/* 1 => permit forcible unmounting */
134 int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
135 
136 extern int dovfsusermount;	/* 1 => permit any user to mount filesystems */
137 
138 /*
139  * Insq/Remq for the vnode usage lists.
140  */
141 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
142 #define	bufremvn(bp) {							\
143 	LIST_REMOVE(bp, b_vnbufs);					\
144 	(bp)->b_vnbufs.le_next = NOLIST;				\
145 }
146 /* TAILQ_HEAD(freelst, vnode) vnode_free_list =	vnode free list (in vnode.h) */
147 struct freelst vnode_free_list = TAILQ_HEAD_INITIALIZER(vnode_free_list);
148 struct freelst vnode_hold_list = TAILQ_HEAD_INITIALIZER(vnode_hold_list);
149 
150 struct mntlist mountlist =			/* mounted filesystem list */
151     CIRCLEQ_HEAD_INITIALIZER(mountlist);
152 struct vfs_list_head vfs_list =			/* vfs list */
153     LIST_HEAD_INITIALIZER(vfs_list);
154 
155 struct nfs_public nfs_pub;			/* publicly exported FS */
156 
157 struct simplelock mountlist_slock = SIMPLELOCK_INITIALIZER;
158 static struct simplelock mntid_slock = SIMPLELOCK_INITIALIZER;
159 struct simplelock mntvnode_slock = SIMPLELOCK_INITIALIZER;
160 struct simplelock vnode_free_list_slock = SIMPLELOCK_INITIALIZER;
161 struct simplelock spechash_slock = SIMPLELOCK_INITIALIZER;
162 
163 /* XXX - gross; single global lock to protect v_numoutput */
164 struct simplelock global_v_numoutput_slock = SIMPLELOCK_INITIALIZER;
165 
166 /*
167  * These define the root filesystem and device.
168  */
169 struct mount *rootfs;
170 struct vnode *rootvnode;
171 struct device *root_device;			/* root device */
172 
173 POOL_INIT(vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodepl",
174     &pool_allocator_nointr);
175 
176 MALLOC_DEFINE(M_VNODE, "vnodes", "Dynamically allocated vnodes");
177 
178 /*
179  * Local declarations.
180  */
181 void insmntque(struct vnode *, struct mount *);
182 int getdevvp(dev_t, struct vnode **, enum vtype);
183 void vgoneall(struct vnode *);
184 
185 void vclean(struct vnode *, int, struct proc *);
186 
187 static int vfs_hang_addrlist(struct mount *, struct netexport *,
188 			     struct export_args *);
189 static int vfs_free_netcred(struct radix_node *, void *);
190 static void vfs_free_addrlist(struct netexport *);
191 static struct vnode *getcleanvnode(struct proc *);
192 
193 #ifdef DEBUG
194 void printlockedvnodes(void);
195 #endif
196 
197 /*
198  * Initialize the vnode management data structures.
199  */
200 void
201 vntblinit()
202 {
203 
204 	/*
205 	 * Initialize the filesystem syncer.
206 	 */
207 	vn_initialize_syncerd();
208 }
209 
210 int
211 vfs_drainvnodes(long target, struct proc *p)
212 {
213 
214 	simple_lock(&vnode_free_list_slock);
215 	while (numvnodes > target) {
216 		struct vnode *vp;
217 
218 		vp = getcleanvnode(p);
219 		if (vp == NULL)
220 			return EBUSY; /* give up */
221 		pool_put(&vnode_pool, vp);
222 		simple_lock(&vnode_free_list_slock);
223 		numvnodes--;
224 	}
225 	simple_unlock(&vnode_free_list_slock);
226 
227 	return 0;
228 }
229 
230 /*
231  * grab a vnode from freelist and clean it.
232  */
233 struct vnode *
234 getcleanvnode(p)
235 	struct proc *p;
236 {
237 	struct vnode *vp;
238 	struct mount *mp;
239 	struct freelst *listhd;
240 
241 	LOCK_ASSERT(simple_lock_held(&vnode_free_list_slock));
242 
243 	listhd = &vnode_free_list;
244 try_nextlist:
245 	TAILQ_FOREACH(vp, listhd, v_freelist) {
246 		if (!simple_lock_try(&vp->v_interlock))
247 			continue;
248 		/*
249 		 * as our lwp might hold the underlying vnode locked,
250 		 * don't try to reclaim the VLAYER vnode if it's locked.
251 		 */
252 		if ((vp->v_flag & VXLOCK) == 0 &&
253 		    ((vp->v_flag & VLAYER) == 0 || VOP_ISLOCKED(vp) == 0)) {
254 			if (vn_start_write(vp, &mp, V_NOWAIT) == 0)
255 				break;
256 		}
257 		mp = NULL;
258 		simple_unlock(&vp->v_interlock);
259 	}
260 
261 	if (vp == NULLVP) {
262 		if (listhd == &vnode_free_list) {
263 			listhd = &vnode_hold_list;
264 			goto try_nextlist;
265 		}
266 		simple_unlock(&vnode_free_list_slock);
267 		return NULLVP;
268 	}
269 
270 	if (vp->v_usecount)
271 		panic("free vnode isn't, vp %p", vp);
272 	TAILQ_REMOVE(listhd, vp, v_freelist);
273 	/* see comment on why 0xdeadb is set at end of vgone (below) */
274 	vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb;
275 	simple_unlock(&vnode_free_list_slock);
276 	vp->v_lease = NULL;
277 
278 	if (vp->v_type != VBAD)
279 		vgonel(vp, p);
280 	else
281 		simple_unlock(&vp->v_interlock);
282 	vn_finished_write(mp, 0);
283 #ifdef DIAGNOSTIC
284 	if (vp->v_data || vp->v_uobj.uo_npages ||
285 	    TAILQ_FIRST(&vp->v_uobj.memq))
286 		panic("cleaned vnode isn't, vp %p", vp);
287 	if (vp->v_numoutput)
288 		panic("clean vnode has pending I/O's, vp %p", vp);
289 #endif
290 	KASSERT((vp->v_flag & VONWORKLST) == 0);
291 
292 	return vp;
293 }
294 
295 /*
296  * Mark a mount point as busy. Used to synchronize access and to delay
297  * unmounting. Interlock is not released on failure.
298  */
299 int
300 vfs_busy(mp, flags, interlkp)
301 	struct mount *mp;
302 	int flags;
303 	struct simplelock *interlkp;
304 {
305 	int lkflags;
306 
307 	while (mp->mnt_iflag & IMNT_UNMOUNT) {
308 		int gone, n;
309 
310 		if (flags & LK_NOWAIT)
311 			return (ENOENT);
312 		if ((flags & LK_RECURSEFAIL) && mp->mnt_unmounter != NULL
313 		    && mp->mnt_unmounter == curproc)
314 			return (EDEADLK);
315 		if (interlkp)
316 			simple_unlock(interlkp);
317 		/*
318 		 * Since all busy locks are shared except the exclusive
319 		 * lock granted when unmounting, the only place that a
320 		 * wakeup needs to be done is at the release of the
321 		 * exclusive lock at the end of dounmount.
322 		 */
323 		simple_lock(&mp->mnt_slock);
324 		mp->mnt_wcnt++;
325 		ltsleep((caddr_t)mp, PVFS, "vfs_busy", 0, &mp->mnt_slock);
326 		n = --mp->mnt_wcnt;
327 		simple_unlock(&mp->mnt_slock);
328 		gone = mp->mnt_iflag & IMNT_GONE;
329 
330 		if (n == 0)
331 			wakeup(&mp->mnt_wcnt);
332 		if (interlkp)
333 			simple_lock(interlkp);
334 		if (gone)
335 			return (ENOENT);
336 	}
337 	lkflags = LK_SHARED;
338 	if (interlkp)
339 		lkflags |= LK_INTERLOCK;
340 	if (lockmgr(&mp->mnt_lock, lkflags, interlkp))
341 		panic("vfs_busy: unexpected lock failure");
342 	return (0);
343 }
344 
345 /*
346  * Free a busy filesystem.
347  */
348 void
349 vfs_unbusy(mp)
350 	struct mount *mp;
351 {
352 
353 	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL);
354 }
355 
356 /*
357  * Lookup a filesystem type, and if found allocate and initialize
358  * a mount structure for it.
359  *
360  * Devname is usually updated by mount(8) after booting.
361  */
362 int
363 vfs_rootmountalloc(fstypename, devname, mpp)
364 	char *fstypename;
365 	char *devname;
366 	struct mount **mpp;
367 {
368 	struct vfsops *vfsp = NULL;
369 	struct mount *mp;
370 
371 	LIST_FOREACH(vfsp, &vfs_list, vfs_list)
372 		if (!strncmp(vfsp->vfs_name, fstypename, MFSNAMELEN))
373 			break;
374 
375 	if (vfsp == NULL)
376 		return (ENODEV);
377 	mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
378 	memset((char *)mp, 0, (u_long)sizeof(struct mount));
379 	lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
380 	simple_lock_init(&mp->mnt_slock);
381 	(void)vfs_busy(mp, LK_NOWAIT, 0);
382 	LIST_INIT(&mp->mnt_vnodelist);
383 	mp->mnt_op = vfsp;
384 	mp->mnt_flag = MNT_RDONLY;
385 	mp->mnt_vnodecovered = NULLVP;
386 	mp->mnt_leaf = mp;
387 	vfsp->vfs_refcount++;
388 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfs_name, MFSNAMELEN);
389 	mp->mnt_stat.f_mntonname[0] = '/';
390 	(void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
391 	*mpp = mp;
392 	return (0);
393 }
394 
395 /*
396  * Lookup a mount point by filesystem identifier.
397  */
398 struct mount *
399 vfs_getvfs(fsid)
400 	fsid_t *fsid;
401 {
402 	struct mount *mp;
403 
404 	simple_lock(&mountlist_slock);
405 	CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) {
406 		if (mp->mnt_stat.f_fsidx.__fsid_val[0] == fsid->__fsid_val[0] &&
407 		    mp->mnt_stat.f_fsidx.__fsid_val[1] == fsid->__fsid_val[1]) {
408 			simple_unlock(&mountlist_slock);
409 			return (mp);
410 		}
411 	}
412 	simple_unlock(&mountlist_slock);
413 	return ((struct mount *)0);
414 }
415 
416 /*
417  * Get a new unique fsid
418  */
419 void
420 vfs_getnewfsid(mp)
421 	struct mount *mp;
422 {
423 	static u_short xxxfs_mntid;
424 	fsid_t tfsid;
425 	int mtype;
426 
427 	simple_lock(&mntid_slock);
428 	mtype = makefstype(mp->mnt_op->vfs_name);
429 	mp->mnt_stat.f_fsidx.__fsid_val[0] = makedev(mtype, 0);
430 	mp->mnt_stat.f_fsidx.__fsid_val[1] = mtype;
431 	mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
432 	if (xxxfs_mntid == 0)
433 		++xxxfs_mntid;
434 	tfsid.__fsid_val[0] = makedev(mtype & 0xff, xxxfs_mntid);
435 	tfsid.__fsid_val[1] = mtype;
436 	if (!CIRCLEQ_EMPTY(&mountlist)) {
437 		while (vfs_getvfs(&tfsid)) {
438 			tfsid.__fsid_val[0]++;
439 			xxxfs_mntid++;
440 		}
441 	}
442 	mp->mnt_stat.f_fsidx.__fsid_val[0] = tfsid.__fsid_val[0];
443 	mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
444 	simple_unlock(&mntid_slock);
445 }
446 
447 /*
448  * Make a 'unique' number from a mount type name.
449  */
450 long
451 makefstype(type)
452 	const char *type;
453 {
454 	long rv;
455 
456 	for (rv = 0; *type; type++) {
457 		rv <<= 2;
458 		rv ^= *type;
459 	}
460 	return rv;
461 }
462 
463 
464 /*
465  * Set vnode attributes to VNOVAL
466  */
467 void
468 vattr_null(vap)
469 	struct vattr *vap;
470 {
471 
472 	vap->va_type = VNON;
473 
474 	/*
475 	 * Assign individually so that it is safe even if size and
476 	 * sign of each member are varied.
477 	 */
478 	vap->va_mode = VNOVAL;
479 	vap->va_nlink = VNOVAL;
480 	vap->va_uid = VNOVAL;
481 	vap->va_gid = VNOVAL;
482 	vap->va_fsid = VNOVAL;
483 	vap->va_fileid = VNOVAL;
484 	vap->va_size = VNOVAL;
485 	vap->va_blocksize = VNOVAL;
486 	vap->va_atime.tv_sec =
487 	    vap->va_mtime.tv_sec =
488 	    vap->va_ctime.tv_sec =
489 	    vap->va_birthtime.tv_sec = VNOVAL;
490 	vap->va_atime.tv_nsec =
491 	    vap->va_mtime.tv_nsec =
492 	    vap->va_ctime.tv_nsec =
493 	    vap->va_birthtime.tv_nsec = VNOVAL;
494 	vap->va_gen = VNOVAL;
495 	vap->va_flags = VNOVAL;
496 	vap->va_rdev = VNOVAL;
497 	vap->va_bytes = VNOVAL;
498 	vap->va_vaflags = 0;
499 }
500 
501 /*
502  * Routines having to do with the management of the vnode table.
503  */
504 extern int (**dead_vnodeop_p)(void *);
505 long numvnodes;
506 
507 /*
508  * Return the next vnode from the free list.
509  */
510 int
511 getnewvnode(tag, mp, vops, vpp)
512 	enum vtagtype tag;
513 	struct mount *mp;
514 	int (**vops)(void *);
515 	struct vnode **vpp;
516 {
517 	extern struct uvm_pagerops uvm_vnodeops;
518 	struct uvm_object *uobj;
519 	struct proc *p = curproc;	/* XXX */
520 	static int toggle;
521 	struct vnode *vp;
522 	int error = 0, tryalloc;
523 
524  try_again:
525 	if (mp) {
526 		/*
527 		 * Mark filesystem busy while we're creating a vnode.
528 		 * If unmount is in progress, this will wait; if the
529 		 * unmount succeeds (only if umount -f), this will
530 		 * return an error.  If the unmount fails, we'll keep
531 		 * going afterwards.
532 		 * (This puts the per-mount vnode list logically under
533 		 * the protection of the vfs_busy lock).
534 		 */
535 		error = vfs_busy(mp, LK_RECURSEFAIL, 0);
536 		if (error && error != EDEADLK)
537 			return error;
538 	}
539 
540 	/*
541 	 * We must choose whether to allocate a new vnode or recycle an
542 	 * existing one. The criterion for allocating a new one is that
543 	 * the total number of vnodes is less than the number desired or
544 	 * there are no vnodes on either free list. Generally we only
545 	 * want to recycle vnodes that have no buffers associated with
546 	 * them, so we look first on the vnode_free_list. If it is empty,
547 	 * we next consider vnodes with referencing buffers on the
548 	 * vnode_hold_list. The toggle ensures that half the time we
549 	 * will use a buffer from the vnode_hold_list, and half the time
550 	 * we will allocate a new one unless the list has grown to twice
551 	 * the desired size. We are reticent to recycle vnodes from the
552 	 * vnode_hold_list because we will lose the identity of all its
553 	 * referencing buffers.
554 	 */
555 
556 	vp = NULL;
557 
558 	simple_lock(&vnode_free_list_slock);
559 
560 	toggle ^= 1;
561 	if (numvnodes > 2 * desiredvnodes)
562 		toggle = 0;
563 
564 	tryalloc = numvnodes < desiredvnodes ||
565 	    (TAILQ_FIRST(&vnode_free_list) == NULL &&
566 	     (TAILQ_FIRST(&vnode_hold_list) == NULL || toggle));
567 
568 	if (tryalloc &&
569 	    (vp = pool_get(&vnode_pool, PR_NOWAIT)) != NULL) {
570 		numvnodes++;
571 		simple_unlock(&vnode_free_list_slock);
572 		memset(vp, 0, sizeof(*vp));
573 		simple_lock_init(&vp->v_interlock);
574 		uobj = &vp->v_uobj;
575 		uobj->pgops = &uvm_vnodeops;
576 		TAILQ_INIT(&uobj->memq);
577 		/*
578 		 * done by memset() above.
579 		 *	uobj->uo_npages = 0;
580 		 *	LIST_INIT(&vp->v_nclist);
581 		 *	LIST_INIT(&vp->v_dnclist);
582 		 */
583 	} else {
584 		vp = getcleanvnode(p);
585 		/*
586 		 * Unless this is a bad time of the month, at most
587 		 * the first NCPUS items on the free list are
588 		 * locked, so this is close enough to being empty.
589 		 */
590 		if (vp == NULLVP) {
591 			if (mp && error != EDEADLK)
592 				vfs_unbusy(mp);
593 			if (tryalloc) {
594 				printf("WARNING: unable to allocate new "
595 				    "vnode, retrying...\n");
596 				(void) tsleep(&lbolt, PRIBIO, "newvn", hz);
597 				goto try_again;
598 			}
599 			tablefull("vnode", "increase kern.maxvnodes or NVNODE");
600 			*vpp = 0;
601 			return (ENFILE);
602 		}
603 		vp->v_flag = 0;
604 		vp->v_socket = NULL;
605 #ifdef VERIFIED_EXEC
606 		vp->fp_status = FINGERPRINT_INVALID;
607 #endif
608 	}
609 	vp->v_type = VNON;
610 	vp->v_vnlock = &vp->v_lock;
611 	lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
612 	KASSERT(LIST_EMPTY(&vp->v_nclist));
613 	KASSERT(LIST_EMPTY(&vp->v_dnclist));
614 	vp->v_tag = tag;
615 	vp->v_op = vops;
616 	insmntque(vp, mp);
617 	*vpp = vp;
618 	vp->v_usecount = 1;
619 	vp->v_data = 0;
620 	simple_lock_init(&vp->v_uobj.vmobjlock);
621 
622 	/*
623 	 * initialize uvm_object within vnode.
624 	 */
625 
626 	uobj = &vp->v_uobj;
627 	KASSERT(uobj->pgops == &uvm_vnodeops);
628 	KASSERT(uobj->uo_npages == 0);
629 	KASSERT(TAILQ_FIRST(&uobj->memq) == NULL);
630 	vp->v_size = VSIZENOTSET;
631 
632 	if (mp && error != EDEADLK)
633 		vfs_unbusy(mp);
634 	return (0);
635 }
636 
637 /*
638  * This is really just the reverse of getnewvnode(). Needed for
639  * VFS_VGET functions who may need to push back a vnode in case
640  * of a locking race.
641  */
642 void
643 ungetnewvnode(vp)
644 	struct vnode *vp;
645 {
646 #ifdef DIAGNOSTIC
647 	if (vp->v_usecount != 1)
648 		panic("ungetnewvnode: busy vnode");
649 #endif
650 	vp->v_usecount--;
651 	insmntque(vp, NULL);
652 	vp->v_type = VBAD;
653 
654 	simple_lock(&vp->v_interlock);
655 	/*
656 	 * Insert at head of LRU list
657 	 */
658 	simple_lock(&vnode_free_list_slock);
659 	if (vp->v_holdcnt > 0)
660 		TAILQ_INSERT_HEAD(&vnode_hold_list, vp, v_freelist);
661 	else
662 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
663 	simple_unlock(&vnode_free_list_slock);
664 	simple_unlock(&vp->v_interlock);
665 }
666 
667 /*
668  * Move a vnode from one mount queue to another.
669  */
670 void
671 insmntque(vp, mp)
672 	struct vnode *vp;
673 	struct mount *mp;
674 {
675 
676 #ifdef DIAGNOSTIC
677 	if ((mp != NULL) &&
678 	    (mp->mnt_iflag & IMNT_UNMOUNT) &&
679 	    !(mp->mnt_flag & MNT_SOFTDEP) &&
680 	    vp->v_tag != VT_VFS) {
681 		panic("insmntque into dying filesystem");
682 	}
683 #endif
684 
685 	simple_lock(&mntvnode_slock);
686 	/*
687 	 * Delete from old mount point vnode list, if on one.
688 	 */
689 	if (vp->v_mount != NULL)
690 		LIST_REMOVE(vp, v_mntvnodes);
691 	/*
692 	 * Insert into list of vnodes for the new mount point, if available.
693 	 */
694 	if ((vp->v_mount = mp) != NULL)
695 		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
696 	simple_unlock(&mntvnode_slock);
697 }
698 
699 /*
700  * Update outstanding I/O count and do wakeup if requested.
701  */
702 void
703 vwakeup(bp)
704 	struct buf *bp;
705 {
706 	struct vnode *vp;
707 
708 	if ((vp = bp->b_vp) != NULL) {
709 		/* XXX global lock hack
710 		 * can't use v_interlock here since this is called
711 		 * in interrupt context from biodone().
712 		 */
713 		simple_lock(&global_v_numoutput_slock);
714 		if (--vp->v_numoutput < 0)
715 			panic("vwakeup: neg numoutput, vp %p", vp);
716 		if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
717 			vp->v_flag &= ~VBWAIT;
718 			wakeup((caddr_t)&vp->v_numoutput);
719 		}
720 		simple_unlock(&global_v_numoutput_slock);
721 	}
722 }
723 
724 /*
725  * Flush out and invalidate all buffers associated with a vnode.
726  * Called with the underlying vnode locked, which should prevent new dirty
727  * buffers from being queued.
728  */
729 int
730 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
731 	struct vnode *vp;
732 	int flags;
733 	struct ucred *cred;
734 	struct proc *p;
735 	int slpflag, slptimeo;
736 {
737 	struct buf *bp, *nbp;
738 	int s, error;
739 	int flushflags = PGO_ALLPAGES | PGO_FREE | PGO_SYNCIO |
740 		(flags & V_SAVE ? PGO_CLEANIT : 0);
741 
742 	/* XXXUBC this doesn't look at flags or slp* */
743 	simple_lock(&vp->v_interlock);
744 	error = VOP_PUTPAGES(vp, 0, 0, flushflags);
745 	if (error) {
746 		return error;
747 	}
748 
749 	if (flags & V_SAVE) {
750 		error = VOP_FSYNC(vp, cred, FSYNC_WAIT|FSYNC_RECLAIM, 0, 0, p);
751 		if (error)
752 		        return (error);
753 #ifdef DIAGNOSTIC
754 		s = splbio();
755 		if (vp->v_numoutput > 0 || !LIST_EMPTY(&vp->v_dirtyblkhd))
756 		        panic("vinvalbuf: dirty bufs, vp %p", vp);
757 		splx(s);
758 #endif
759 	}
760 
761 	s = splbio();
762 
763 restart:
764 	for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
765 		nbp = LIST_NEXT(bp, b_vnbufs);
766 		simple_lock(&bp->b_interlock);
767 		if (bp->b_flags & B_BUSY) {
768 			bp->b_flags |= B_WANTED;
769 			error = ltsleep((caddr_t)bp,
770 				    slpflag | (PRIBIO + 1) | PNORELOCK,
771 				    "vinvalbuf", slptimeo, &bp->b_interlock);
772 			if (error) {
773 				splx(s);
774 				return (error);
775 			}
776 			goto restart;
777 		}
778 		bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
779 		simple_unlock(&bp->b_interlock);
780 		brelse(bp);
781 	}
782 
783 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
784 		nbp = LIST_NEXT(bp, b_vnbufs);
785 		simple_lock(&bp->b_interlock);
786 		if (bp->b_flags & B_BUSY) {
787 			bp->b_flags |= B_WANTED;
788 			error = ltsleep((caddr_t)bp,
789 				    slpflag | (PRIBIO + 1) | PNORELOCK,
790 				    "vinvalbuf", slptimeo, &bp->b_interlock);
791 			if (error) {
792 				splx(s);
793 				return (error);
794 			}
795 			goto restart;
796 		}
797 		/*
798 		 * XXX Since there are no node locks for NFS, I believe
799 		 * there is a slight chance that a delayed write will
800 		 * occur while sleeping just above, so check for it.
801 		 */
802 		if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
803 #ifdef DEBUG
804 			printf("buffer still DELWRI\n");
805 #endif
806 			bp->b_flags |= B_BUSY | B_VFLUSH;
807 			simple_unlock(&bp->b_interlock);
808 			VOP_BWRITE(bp);
809 			goto restart;
810 		}
811 		bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
812 		simple_unlock(&bp->b_interlock);
813 		brelse(bp);
814 	}
815 
816 #ifdef DIAGNOSTIC
817 	if (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd))
818 		panic("vinvalbuf: flush failed, vp %p", vp);
819 #endif
820 
821 	splx(s);
822 
823 	return (0);
824 }
825 
826 /*
827  * Destroy any in core blocks past the truncation length.
828  * Called with the underlying vnode locked, which should prevent new dirty
829  * buffers from being queued.
830  */
831 int
832 vtruncbuf(vp, lbn, slpflag, slptimeo)
833 	struct vnode *vp;
834 	daddr_t lbn;
835 	int slpflag, slptimeo;
836 {
837 	struct buf *bp, *nbp;
838 	int s, error;
839 	voff_t off;
840 
841 	off = round_page((voff_t)lbn << vp->v_mount->mnt_fs_bshift);
842 	simple_lock(&vp->v_interlock);
843 	error = VOP_PUTPAGES(vp, off, 0, PGO_FREE | PGO_SYNCIO);
844 	if (error) {
845 		return error;
846 	}
847 
848 	s = splbio();
849 
850 restart:
851 	for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
852 		nbp = LIST_NEXT(bp, b_vnbufs);
853 		if (bp->b_lblkno < lbn)
854 			continue;
855 		simple_lock(&bp->b_interlock);
856 		if (bp->b_flags & B_BUSY) {
857 			bp->b_flags |= B_WANTED;
858 			error = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
859 			    "vtruncbuf", slptimeo, &bp->b_interlock);
860 			if (error) {
861 				splx(s);
862 				return (error);
863 			}
864 			goto restart;
865 		}
866 		bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
867 		simple_unlock(&bp->b_interlock);
868 		brelse(bp);
869 	}
870 
871 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
872 		nbp = LIST_NEXT(bp, b_vnbufs);
873 		if (bp->b_lblkno < lbn)
874 			continue;
875 		simple_lock(&bp->b_interlock);
876 		if (bp->b_flags & B_BUSY) {
877 			bp->b_flags |= B_WANTED;
878 			error = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
879 			    "vtruncbuf", slptimeo, &bp->b_interlock);
880 			if (error) {
881 				splx(s);
882 				return (error);
883 			}
884 			goto restart;
885 		}
886 		bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
887 		simple_unlock(&bp->b_interlock);
888 		brelse(bp);
889 	}
890 
891 	splx(s);
892 
893 	return (0);
894 }
895 
896 void
897 vflushbuf(vp, sync)
898 	struct vnode *vp;
899 	int sync;
900 {
901 	struct buf *bp, *nbp;
902 	int flags = PGO_CLEANIT | PGO_ALLPAGES | (sync ? PGO_SYNCIO : 0);
903 	int s;
904 
905 	simple_lock(&vp->v_interlock);
906 	(void) VOP_PUTPAGES(vp, 0, 0, flags);
907 
908 loop:
909 	s = splbio();
910 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
911 		nbp = LIST_NEXT(bp, b_vnbufs);
912 		simple_lock(&bp->b_interlock);
913 		if ((bp->b_flags & B_BUSY)) {
914 			simple_unlock(&bp->b_interlock);
915 			continue;
916 		}
917 		if ((bp->b_flags & B_DELWRI) == 0)
918 			panic("vflushbuf: not dirty, bp %p", bp);
919 		bp->b_flags |= B_BUSY | B_VFLUSH;
920 		simple_unlock(&bp->b_interlock);
921 		splx(s);
922 		/*
923 		 * Wait for I/O associated with indirect blocks to complete,
924 		 * since there is no way to quickly wait for them below.
925 		 */
926 		if (bp->b_vp == vp || sync == 0)
927 			(void) bawrite(bp);
928 		else
929 			(void) bwrite(bp);
930 		goto loop;
931 	}
932 	if (sync == 0) {
933 		splx(s);
934 		return;
935 	}
936 	simple_lock(&global_v_numoutput_slock);
937 	while (vp->v_numoutput) {
938 		vp->v_flag |= VBWAIT;
939 		ltsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "vflushbuf", 0,
940 			&global_v_numoutput_slock);
941 	}
942 	simple_unlock(&global_v_numoutput_slock);
943 	splx(s);
944 	if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
945 		vprint("vflushbuf: dirty", vp);
946 		goto loop;
947 	}
948 }
949 
950 /*
951  * Associate a buffer with a vnode.
952  */
953 void
954 bgetvp(vp, bp)
955 	struct vnode *vp;
956 	struct buf *bp;
957 {
958 	int s;
959 
960 	if (bp->b_vp)
961 		panic("bgetvp: not free, bp %p", bp);
962 	VHOLD(vp);
963 	s = splbio();
964 	bp->b_vp = vp;
965 	if (vp->v_type == VBLK || vp->v_type == VCHR)
966 		bp->b_dev = vp->v_rdev;
967 	else
968 		bp->b_dev = NODEV;
969 	/*
970 	 * Insert onto list for new vnode.
971 	 */
972 	bufinsvn(bp, &vp->v_cleanblkhd);
973 	splx(s);
974 }
975 
976 /*
977  * Disassociate a buffer from a vnode.
978  */
979 void
980 brelvp(bp)
981 	struct buf *bp;
982 {
983 	struct vnode *vp;
984 	int s;
985 
986 	if (bp->b_vp == NULL)
987 		panic("brelvp: vp NULL, bp %p", bp);
988 
989 	s = splbio();
990 	vp = bp->b_vp;
991 	/*
992 	 * Delete from old vnode list, if on one.
993 	 */
994 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
995 		bufremvn(bp);
996 
997 	if (TAILQ_EMPTY(&vp->v_uobj.memq) && (vp->v_flag & VONWORKLST) &&
998 	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
999 		vp->v_flag &= ~VONWORKLST;
1000 		LIST_REMOVE(vp, v_synclist);
1001 	}
1002 
1003 	bp->b_vp = NULL;
1004 	HOLDRELE(vp);
1005 	splx(s);
1006 }
1007 
1008 /*
1009  * Reassign a buffer from one vnode to another.
1010  * Used to assign file specific control information
1011  * (indirect blocks) to the vnode to which they belong.
1012  *
1013  * This function must be called at splbio().
1014  */
1015 void
1016 reassignbuf(bp, newvp)
1017 	struct buf *bp;
1018 	struct vnode *newvp;
1019 {
1020 	struct buflists *listheadp;
1021 	int delay;
1022 
1023 	/*
1024 	 * Delete from old vnode list, if on one.
1025 	 */
1026 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
1027 		bufremvn(bp);
1028 	/*
1029 	 * If dirty, put on list of dirty buffers;
1030 	 * otherwise insert onto list of clean buffers.
1031 	 */
1032 	if ((bp->b_flags & B_DELWRI) == 0) {
1033 		listheadp = &newvp->v_cleanblkhd;
1034 		if (TAILQ_EMPTY(&newvp->v_uobj.memq) &&
1035 		    (newvp->v_flag & VONWORKLST) &&
1036 		    LIST_FIRST(&newvp->v_dirtyblkhd) == NULL) {
1037 			newvp->v_flag &= ~VONWORKLST;
1038 			LIST_REMOVE(newvp, v_synclist);
1039 		}
1040 	} else {
1041 		listheadp = &newvp->v_dirtyblkhd;
1042 		if ((newvp->v_flag & VONWORKLST) == 0) {
1043 			switch (newvp->v_type) {
1044 			case VDIR:
1045 				delay = dirdelay;
1046 				break;
1047 			case VBLK:
1048 				if (newvp->v_specmountpoint != NULL) {
1049 					delay = metadelay;
1050 					break;
1051 				}
1052 				/* fall through */
1053 			default:
1054 				delay = filedelay;
1055 				break;
1056 			}
1057 			if (!newvp->v_mount ||
1058 			    (newvp->v_mount->mnt_flag & MNT_ASYNC) == 0)
1059 				vn_syncer_add_to_worklist(newvp, delay);
1060 		}
1061 	}
1062 	bufinsvn(bp, listheadp);
1063 }
1064 
1065 /*
1066  * Create a vnode for a block device.
1067  * Used for root filesystem and swap areas.
1068  * Also used for memory file system special devices.
1069  */
1070 int
1071 bdevvp(dev, vpp)
1072 	dev_t dev;
1073 	struct vnode **vpp;
1074 {
1075 
1076 	return (getdevvp(dev, vpp, VBLK));
1077 }
1078 
1079 /*
1080  * Create a vnode for a character device.
1081  * Used for kernfs and some console handling.
1082  */
1083 int
1084 cdevvp(dev, vpp)
1085 	dev_t dev;
1086 	struct vnode **vpp;
1087 {
1088 
1089 	return (getdevvp(dev, vpp, VCHR));
1090 }
1091 
1092 /*
1093  * Create a vnode for a device.
1094  * Used by bdevvp (block device) for root file system etc.,
1095  * and by cdevvp (character device) for console and kernfs.
1096  */
1097 int
1098 getdevvp(dev, vpp, type)
1099 	dev_t dev;
1100 	struct vnode **vpp;
1101 	enum vtype type;
1102 {
1103 	struct vnode *vp;
1104 	struct vnode *nvp;
1105 	int error;
1106 
1107 	if (dev == NODEV) {
1108 		*vpp = NULLVP;
1109 		return (0);
1110 	}
1111 	error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp);
1112 	if (error) {
1113 		*vpp = NULLVP;
1114 		return (error);
1115 	}
1116 	vp = nvp;
1117 	vp->v_type = type;
1118 	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
1119 		vput(vp);
1120 		vp = nvp;
1121 	}
1122 	*vpp = vp;
1123 	return (0);
1124 }
1125 
1126 /*
1127  * Check to see if the new vnode represents a special device
1128  * for which we already have a vnode (either because of
1129  * bdevvp() or because of a different vnode representing
1130  * the same block device). If such an alias exists, deallocate
1131  * the existing contents and return the aliased vnode. The
1132  * caller is responsible for filling it with its new contents.
1133  */
1134 struct vnode *
1135 checkalias(nvp, nvp_rdev, mp)
1136 	struct vnode *nvp;
1137 	dev_t nvp_rdev;
1138 	struct mount *mp;
1139 {
1140 	struct proc *p = curproc;       /* XXX */
1141 	struct vnode *vp;
1142 	struct vnode **vpp;
1143 
1144 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
1145 		return (NULLVP);
1146 
1147 	vpp = &speclisth[SPECHASH(nvp_rdev)];
1148 loop:
1149 	simple_lock(&spechash_slock);
1150 	for (vp = *vpp; vp; vp = vp->v_specnext) {
1151 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
1152 			continue;
1153 		/*
1154 		 * Alias, but not in use, so flush it out.
1155 		 */
1156 		simple_lock(&vp->v_interlock);
1157 		if (vp->v_usecount == 0) {
1158 			simple_unlock(&spechash_slock);
1159 			vgonel(vp, p);
1160 			goto loop;
1161 		}
1162 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT)) {
1163 			simple_unlock(&spechash_slock);
1164 			goto loop;
1165 		}
1166 		break;
1167 	}
1168 	if (vp == NULL || vp->v_tag != VT_NON || vp->v_type != VBLK) {
1169 		MALLOC(nvp->v_specinfo, struct specinfo *,
1170 			sizeof(struct specinfo), M_VNODE, M_NOWAIT);
1171 		/* XXX Erg. */
1172 		if (nvp->v_specinfo == NULL) {
1173 			simple_unlock(&spechash_slock);
1174 			uvm_wait("checkalias");
1175 			goto loop;
1176 		}
1177 
1178 		nvp->v_rdev = nvp_rdev;
1179 		nvp->v_hashchain = vpp;
1180 		nvp->v_specnext = *vpp;
1181 		nvp->v_specmountpoint = NULL;
1182 		simple_unlock(&spechash_slock);
1183 		nvp->v_speclockf = NULL;
1184 		simple_lock_init(&nvp->v_spec_cow_slock);
1185 		SLIST_INIT(&nvp->v_spec_cow_head);
1186 		nvp->v_spec_cow_req = 0;
1187 		nvp->v_spec_cow_count = 0;
1188 
1189 		*vpp = nvp;
1190 		if (vp != NULLVP) {
1191 			nvp->v_flag |= VALIASED;
1192 			vp->v_flag |= VALIASED;
1193 			vput(vp);
1194 		}
1195 		return (NULLVP);
1196 	}
1197 	simple_unlock(&spechash_slock);
1198 	VOP_UNLOCK(vp, 0);
1199 	simple_lock(&vp->v_interlock);
1200 	vclean(vp, 0, p);
1201 	vp->v_op = nvp->v_op;
1202 	vp->v_tag = nvp->v_tag;
1203 	vp->v_vnlock = &vp->v_lock;
1204 	lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
1205 	nvp->v_type = VNON;
1206 	insmntque(vp, mp);
1207 	return (vp);
1208 }
1209 
1210 /*
1211  * Grab a particular vnode from the free list, increment its
1212  * reference count and lock it. If the vnode lock bit is set the
1213  * vnode is being eliminated in vgone. In that case, we can not
1214  * grab the vnode, so the process is awakened when the transition is
1215  * completed, and an error returned to indicate that the vnode is no
1216  * longer usable (possibly having been changed to a new file system type).
1217  */
1218 int
1219 vget(vp, flags)
1220 	struct vnode *vp;
1221 	int flags;
1222 {
1223 	int error;
1224 
1225 	/*
1226 	 * If the vnode is in the process of being cleaned out for
1227 	 * another use, we wait for the cleaning to finish and then
1228 	 * return failure. Cleaning is determined by checking that
1229 	 * the VXLOCK flag is set.
1230 	 */
1231 
1232 	if ((flags & LK_INTERLOCK) == 0)
1233 		simple_lock(&vp->v_interlock);
1234 	if (vp->v_flag & VXLOCK) {
1235 		if (flags & LK_NOWAIT) {
1236 			simple_unlock(&vp->v_interlock);
1237 			return EBUSY;
1238 		}
1239 		vp->v_flag |= VXWANT;
1240 		ltsleep(vp, PINOD|PNORELOCK, "vget", 0, &vp->v_interlock);
1241 		return (ENOENT);
1242 	}
1243 	if (vp->v_usecount == 0) {
1244 		simple_lock(&vnode_free_list_slock);
1245 		if (vp->v_holdcnt > 0)
1246 			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
1247 		else
1248 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1249 		simple_unlock(&vnode_free_list_slock);
1250 	}
1251 	vp->v_usecount++;
1252 #ifdef DIAGNOSTIC
1253 	if (vp->v_usecount == 0) {
1254 		vprint("vget", vp);
1255 		panic("vget: usecount overflow, vp %p", vp);
1256 	}
1257 #endif
1258 	if (flags & LK_TYPE_MASK) {
1259 		if ((error = vn_lock(vp, flags | LK_INTERLOCK))) {
1260 			/*
1261 			 * must expand vrele here because we do not want
1262 			 * to call VOP_INACTIVE if the reference count
1263 			 * drops back to zero since it was never really
1264 			 * active. We must remove it from the free list
1265 			 * before sleeping so that multiple processes do
1266 			 * not try to recycle it.
1267 			 */
1268 			simple_lock(&vp->v_interlock);
1269 			vp->v_usecount--;
1270 			if (vp->v_usecount > 0) {
1271 				simple_unlock(&vp->v_interlock);
1272 				return (error);
1273 			}
1274 			/*
1275 			 * insert at tail of LRU list
1276 			 */
1277 			simple_lock(&vnode_free_list_slock);
1278 			if (vp->v_holdcnt > 0)
1279 				TAILQ_INSERT_TAIL(&vnode_hold_list, vp,
1280 				    v_freelist);
1281 			else
1282 				TAILQ_INSERT_TAIL(&vnode_free_list, vp,
1283 				    v_freelist);
1284 			simple_unlock(&vnode_free_list_slock);
1285 			simple_unlock(&vp->v_interlock);
1286 		}
1287 		return (error);
1288 	}
1289 	simple_unlock(&vp->v_interlock);
1290 	return (0);
1291 }
1292 
1293 /*
1294  * vput(), just unlock and vrele()
1295  */
1296 void
1297 vput(vp)
1298 	struct vnode *vp;
1299 {
1300 	struct proc *p = curproc;	/* XXX */
1301 
1302 #ifdef DIAGNOSTIC
1303 	if (vp == NULL)
1304 		panic("vput: null vp");
1305 #endif
1306 	simple_lock(&vp->v_interlock);
1307 	vp->v_usecount--;
1308 	if (vp->v_usecount > 0) {
1309 		simple_unlock(&vp->v_interlock);
1310 		VOP_UNLOCK(vp, 0);
1311 		return;
1312 	}
1313 #ifdef DIAGNOSTIC
1314 	if (vp->v_usecount < 0 || vp->v_writecount != 0) {
1315 		vprint("vput: bad ref count", vp);
1316 		panic("vput: ref cnt");
1317 	}
1318 #endif
1319 	/*
1320 	 * Insert at tail of LRU list.
1321 	 */
1322 	simple_lock(&vnode_free_list_slock);
1323 	if (vp->v_holdcnt > 0)
1324 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
1325 	else
1326 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1327 	simple_unlock(&vnode_free_list_slock);
1328 	if (vp->v_flag & VEXECMAP) {
1329 		uvmexp.execpages -= vp->v_uobj.uo_npages;
1330 		uvmexp.filepages += vp->v_uobj.uo_npages;
1331 	}
1332 	vp->v_flag &= ~(VTEXT|VEXECMAP);
1333 	simple_unlock(&vp->v_interlock);
1334 	VOP_INACTIVE(vp, p);
1335 }
1336 
1337 /*
1338  * Vnode release.
1339  * If count drops to zero, call inactive routine and return to freelist.
1340  */
1341 void
1342 vrele(vp)
1343 	struct vnode *vp;
1344 {
1345 	struct proc *p = curproc;	/* XXX */
1346 
1347 #ifdef DIAGNOSTIC
1348 	if (vp == NULL)
1349 		panic("vrele: null vp");
1350 #endif
1351 	simple_lock(&vp->v_interlock);
1352 	vp->v_usecount--;
1353 	if (vp->v_usecount > 0) {
1354 		simple_unlock(&vp->v_interlock);
1355 		return;
1356 	}
1357 #ifdef DIAGNOSTIC
1358 	if (vp->v_usecount < 0 || vp->v_writecount != 0) {
1359 		vprint("vrele: bad ref count", vp);
1360 		panic("vrele: ref cnt vp %p", vp);
1361 	}
1362 #endif
1363 	/*
1364 	 * Insert at tail of LRU list.
1365 	 */
1366 	simple_lock(&vnode_free_list_slock);
1367 	if (vp->v_holdcnt > 0)
1368 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
1369 	else
1370 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1371 	simple_unlock(&vnode_free_list_slock);
1372 	if (vp->v_flag & VEXECMAP) {
1373 		uvmexp.execpages -= vp->v_uobj.uo_npages;
1374 		uvmexp.filepages += vp->v_uobj.uo_npages;
1375 	}
1376 	vp->v_flag &= ~(VTEXT|VEXECMAP);
1377 	if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK) == 0)
1378 		VOP_INACTIVE(vp, p);
1379 }
1380 
1381 #ifdef DIAGNOSTIC
1382 /*
1383  * Page or buffer structure gets a reference.
1384  */
1385 void
1386 vholdl(vp)
1387 	struct vnode *vp;
1388 {
1389 
1390 	/*
1391 	 * If it is on the freelist and the hold count is currently
1392 	 * zero, move it to the hold list. The test of the back
1393 	 * pointer and the use reference count of zero is because
1394 	 * it will be removed from a free list by getnewvnode,
1395 	 * but will not have its reference count incremented until
1396 	 * after calling vgone. If the reference count were
1397 	 * incremented first, vgone would (incorrectly) try to
1398 	 * close the previous instance of the underlying object.
1399 	 * So, the back pointer is explicitly set to `0xdeadb' in
1400 	 * getnewvnode after removing it from a freelist to ensure
1401 	 * that we do not try to move it here.
1402 	 */
1403 	if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) &&
1404 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
1405 		simple_lock(&vnode_free_list_slock);
1406 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1407 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
1408 		simple_unlock(&vnode_free_list_slock);
1409 	}
1410 	vp->v_holdcnt++;
1411 }
1412 
1413 /*
1414  * Page or buffer structure frees a reference.
1415  */
1416 void
1417 holdrelel(vp)
1418 	struct vnode *vp;
1419 {
1420 
1421 	if (vp->v_holdcnt <= 0)
1422 		panic("holdrelel: holdcnt vp %p", vp);
1423 	vp->v_holdcnt--;
1424 
1425 	/*
1426 	 * If it is on the holdlist and the hold count drops to
1427 	 * zero, move it to the free list. The test of the back
1428 	 * pointer and the use reference count of zero is because
1429 	 * it will be removed from a free list by getnewvnode,
1430 	 * but will not have its reference count incremented until
1431 	 * after calling vgone. If the reference count were
1432 	 * incremented first, vgone would (incorrectly) try to
1433 	 * close the previous instance of the underlying object.
1434 	 * So, the back pointer is explicitly set to `0xdeadb' in
1435 	 * getnewvnode after removing it from a freelist to ensure
1436 	 * that we do not try to move it here.
1437 	 */
1438 
1439 	if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) &&
1440 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
1441 		simple_lock(&vnode_free_list_slock);
1442 		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
1443 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1444 		simple_unlock(&vnode_free_list_slock);
1445 	}
1446 }
1447 
1448 /*
1449  * Vnode reference.
1450  */
1451 void
1452 vref(vp)
1453 	struct vnode *vp;
1454 {
1455 
1456 	simple_lock(&vp->v_interlock);
1457 	if (vp->v_usecount <= 0)
1458 		panic("vref used where vget required, vp %p", vp);
1459 	vp->v_usecount++;
1460 #ifdef DIAGNOSTIC
1461 	if (vp->v_usecount == 0) {
1462 		vprint("vref", vp);
1463 		panic("vref: usecount overflow, vp %p", vp);
1464 	}
1465 #endif
1466 	simple_unlock(&vp->v_interlock);
1467 }
1468 #endif /* DIAGNOSTIC */
1469 
1470 /*
1471  * Remove any vnodes in the vnode table belonging to mount point mp.
1472  *
1473  * If FORCECLOSE is not specified, there should not be any active ones,
1474  * return error if any are found (nb: this is a user error, not a
1475  * system error). If FORCECLOSE is specified, detach any active vnodes
1476  * that are found.
1477  *
1478  * If WRITECLOSE is set, only flush out regular file vnodes open for
1479  * writing.
1480  *
1481  * SKIPSYSTEM causes any vnodes marked V_SYSTEM to be skipped.
1482  */
1483 #ifdef DEBUG
1484 int busyprt = 0;	/* print out busy vnodes */
1485 struct ctldebug debug1 = { "busyprt", &busyprt };
1486 #endif
1487 
1488 int
1489 vflush(mp, skipvp, flags)
1490 	struct mount *mp;
1491 	struct vnode *skipvp;
1492 	int flags;
1493 {
1494 	struct proc *p = curproc;	/* XXX */
1495 	struct vnode *vp, *nvp;
1496 	int busy = 0;
1497 
1498 	simple_lock(&mntvnode_slock);
1499 loop:
1500 	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
1501 		if (vp->v_mount != mp)
1502 			goto loop;
1503 		nvp = LIST_NEXT(vp, v_mntvnodes);
1504 		/*
1505 		 * Skip over a selected vnode.
1506 		 */
1507 		if (vp == skipvp)
1508 			continue;
1509 		simple_lock(&vp->v_interlock);
1510 		/*
1511 		 * Skip over a vnodes marked VSYSTEM.
1512 		 */
1513 		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1514 			simple_unlock(&vp->v_interlock);
1515 			continue;
1516 		}
1517 		/*
1518 		 * If WRITECLOSE is set, only flush out regular file
1519 		 * vnodes open for writing.
1520 		 */
1521 		if ((flags & WRITECLOSE) &&
1522 		    (vp->v_writecount == 0 || vp->v_type != VREG)) {
1523 			simple_unlock(&vp->v_interlock);
1524 			continue;
1525 		}
1526 		/*
1527 		 * With v_usecount == 0, all we need to do is clear
1528 		 * out the vnode data structures and we are done.
1529 		 */
1530 		if (vp->v_usecount == 0) {
1531 			simple_unlock(&mntvnode_slock);
1532 			vgonel(vp, p);
1533 			simple_lock(&mntvnode_slock);
1534 			continue;
1535 		}
1536 		/*
1537 		 * If FORCECLOSE is set, forcibly close the vnode.
1538 		 * For block or character devices, revert to an
1539 		 * anonymous device. For all other files, just kill them.
1540 		 */
1541 		if (flags & FORCECLOSE) {
1542 			simple_unlock(&mntvnode_slock);
1543 			if (vp->v_type != VBLK && vp->v_type != VCHR) {
1544 				vgonel(vp, p);
1545 			} else {
1546 				vclean(vp, 0, p);
1547 				vp->v_op = spec_vnodeop_p;
1548 				insmntque(vp, (struct mount *)0);
1549 			}
1550 			simple_lock(&mntvnode_slock);
1551 			continue;
1552 		}
1553 #ifdef DEBUG
1554 		if (busyprt)
1555 			vprint("vflush: busy vnode", vp);
1556 #endif
1557 		simple_unlock(&vp->v_interlock);
1558 		busy++;
1559 	}
1560 	simple_unlock(&mntvnode_slock);
1561 	if (busy)
1562 		return (EBUSY);
1563 	return (0);
1564 }
1565 
1566 /*
1567  * Disassociate the underlying file system from a vnode.
1568  */
1569 void
1570 vclean(vp, flags, p)
1571 	struct vnode *vp;
1572 	int flags;
1573 	struct proc *p;
1574 {
1575 	struct mount *mp;
1576 	int active;
1577 
1578 	LOCK_ASSERT(simple_lock_held(&vp->v_interlock));
1579 
1580 	/*
1581 	 * Check to see if the vnode is in use.
1582 	 * If so we have to reference it before we clean it out
1583 	 * so that its count cannot fall to zero and generate a
1584 	 * race against ourselves to recycle it.
1585 	 */
1586 
1587 	if ((active = vp->v_usecount) != 0) {
1588 		vp->v_usecount++;
1589 #ifdef DIAGNOSTIC
1590 		if (vp->v_usecount == 0) {
1591 			vprint("vclean", vp);
1592 			panic("vclean: usecount overflow");
1593 		}
1594 #endif
1595 	}
1596 
1597 	/*
1598 	 * Prevent the vnode from being recycled or
1599 	 * brought into use while we clean it out.
1600 	 */
1601 	if (vp->v_flag & VXLOCK)
1602 		panic("vclean: deadlock, vp %p", vp);
1603 	vp->v_flag |= VXLOCK;
1604 	if (vp->v_flag & VEXECMAP) {
1605 		uvmexp.execpages -= vp->v_uobj.uo_npages;
1606 		uvmexp.filepages += vp->v_uobj.uo_npages;
1607 	}
1608 	vp->v_flag &= ~(VTEXT|VEXECMAP);
1609 
1610 	/*
1611 	 * Even if the count is zero, the VOP_INACTIVE routine may still
1612 	 * have the object locked while it cleans it out. The VOP_LOCK
1613 	 * ensures that the VOP_INACTIVE routine is done with its work.
1614 	 * For active vnodes, it ensures that no other activity can
1615 	 * occur while the underlying object is being cleaned out.
1616 	 */
1617 	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK);
1618 
1619 	/*
1620 	 * Clean out any cached data associated with the vnode.
1621 	 */
1622 	if (flags & DOCLOSE) {
1623 		int error;
1624 		vn_start_write(vp, &mp, V_WAIT | V_LOWER);
1625 		error = vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
1626 		vn_finished_write(mp, V_LOWER);
1627 		if (error)
1628 			error = vinvalbuf(vp, 0, NOCRED, p, 0, 0);
1629 		KASSERT(error == 0);
1630 		KASSERT((vp->v_flag & VONWORKLST) == 0);
1631 	}
1632 	LOCK_ASSERT(!simple_lock_held(&vp->v_interlock));
1633 
1634 	/*
1635 	 * If purging an active vnode, it must be closed and
1636 	 * deactivated before being reclaimed. Note that the
1637 	 * VOP_INACTIVE will unlock the vnode.
1638 	 */
1639 	if (active) {
1640 		if (flags & DOCLOSE)
1641 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, NULL);
1642 		VOP_INACTIVE(vp, p);
1643 	} else {
1644 		/*
1645 		 * Any other processes trying to obtain this lock must first
1646 		 * wait for VXLOCK to clear, then call the new lock operation.
1647 		 */
1648 		VOP_UNLOCK(vp, 0);
1649 	}
1650 	/*
1651 	 * Reclaim the vnode.
1652 	 */
1653 	if (VOP_RECLAIM(vp, p))
1654 		panic("vclean: cannot reclaim, vp %p", vp);
1655 	if (active) {
1656 		/*
1657 		 * Inline copy of vrele() since VOP_INACTIVE
1658 		 * has already been called.
1659 		 */
1660 		simple_lock(&vp->v_interlock);
1661 		if (--vp->v_usecount <= 0) {
1662 #ifdef DIAGNOSTIC
1663 			if (vp->v_usecount < 0 || vp->v_writecount != 0) {
1664 				vprint("vclean: bad ref count", vp);
1665 				panic("vclean: ref cnt");
1666 			}
1667 #endif
1668 			/*
1669 			 * Insert at tail of LRU list.
1670 			 */
1671 
1672 			simple_unlock(&vp->v_interlock);
1673 			simple_lock(&vnode_free_list_slock);
1674 #ifdef DIAGNOSTIC
1675 			if (vp->v_holdcnt > 0)
1676 				panic("vclean: not clean, vp %p", vp);
1677 #endif
1678 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1679 			simple_unlock(&vnode_free_list_slock);
1680 		} else
1681 			simple_unlock(&vp->v_interlock);
1682 	}
1683 
1684 	KASSERT(vp->v_uobj.uo_npages == 0);
1685 	cache_purge(vp);
1686 
1687 	/*
1688 	 * Done with purge, notify sleepers of the grim news.
1689 	 */
1690 	vp->v_op = dead_vnodeop_p;
1691 	vp->v_tag = VT_NON;
1692 	simple_lock(&vp->v_interlock);
1693 	VN_KNOTE(vp, NOTE_REVOKE);	/* FreeBSD has this in vn_pollgone() */
1694 	vp->v_flag &= ~VXLOCK;
1695 	if (vp->v_flag & VXWANT) {
1696 		vp->v_flag &= ~VXWANT;
1697 		simple_unlock(&vp->v_interlock);
1698 		wakeup((caddr_t)vp);
1699 	} else
1700 		simple_unlock(&vp->v_interlock);
1701 }
1702 
1703 /*
1704  * Recycle an unused vnode to the front of the free list.
1705  * Release the passed interlock if the vnode will be recycled.
1706  */
1707 int
1708 vrecycle(vp, inter_lkp, p)
1709 	struct vnode *vp;
1710 	struct simplelock *inter_lkp;
1711 	struct proc *p;
1712 {
1713 
1714 	simple_lock(&vp->v_interlock);
1715 	if (vp->v_usecount == 0) {
1716 		if (inter_lkp)
1717 			simple_unlock(inter_lkp);
1718 		vgonel(vp, p);
1719 		return (1);
1720 	}
1721 	simple_unlock(&vp->v_interlock);
1722 	return (0);
1723 }
1724 
1725 /*
1726  * Eliminate all activity associated with a vnode
1727  * in preparation for reuse.
1728  */
1729 void
1730 vgone(vp)
1731 	struct vnode *vp;
1732 {
1733 	struct proc *p = curproc;	/* XXX */
1734 
1735 	simple_lock(&vp->v_interlock);
1736 	vgonel(vp, p);
1737 }
1738 
1739 /*
1740  * vgone, with the vp interlock held.
1741  */
1742 void
1743 vgonel(vp, p)
1744 	struct vnode *vp;
1745 	struct proc *p;
1746 {
1747 	struct vnode *vq;
1748 	struct vnode *vx;
1749 
1750 	LOCK_ASSERT(simple_lock_held(&vp->v_interlock));
1751 
1752 	/*
1753 	 * If a vgone (or vclean) is already in progress,
1754 	 * wait until it is done and return.
1755 	 */
1756 
1757 	if (vp->v_flag & VXLOCK) {
1758 		vp->v_flag |= VXWANT;
1759 		ltsleep(vp, PINOD | PNORELOCK, "vgone", 0, &vp->v_interlock);
1760 		return;
1761 	}
1762 
1763 	/*
1764 	 * Clean out the filesystem specific data.
1765 	 */
1766 
1767 	vclean(vp, DOCLOSE, p);
1768 	KASSERT((vp->v_flag & VONWORKLST) == 0);
1769 
1770 	/*
1771 	 * Delete from old mount point vnode list, if on one.
1772 	 */
1773 
1774 	if (vp->v_mount != NULL)
1775 		insmntque(vp, (struct mount *)0);
1776 
1777 	/*
1778 	 * If special device, remove it from special device alias list.
1779 	 * if it is on one.
1780 	 */
1781 
1782 	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1783 		simple_lock(&spechash_slock);
1784 		if (vp->v_hashchain != NULL) {
1785 			if (*vp->v_hashchain == vp) {
1786 				*vp->v_hashchain = vp->v_specnext;
1787 			} else {
1788 				for (vq = *vp->v_hashchain; vq;
1789 							vq = vq->v_specnext) {
1790 					if (vq->v_specnext != vp)
1791 						continue;
1792 					vq->v_specnext = vp->v_specnext;
1793 					break;
1794 				}
1795 				if (vq == NULL)
1796 					panic("missing bdev");
1797 			}
1798 			if (vp->v_flag & VALIASED) {
1799 				vx = NULL;
1800 				for (vq = *vp->v_hashchain; vq;
1801 							vq = vq->v_specnext) {
1802 					if (vq->v_rdev != vp->v_rdev ||
1803 					    vq->v_type != vp->v_type)
1804 						continue;
1805 					if (vx)
1806 						break;
1807 					vx = vq;
1808 				}
1809 				if (vx == NULL)
1810 					panic("missing alias");
1811 				if (vq == NULL)
1812 					vx->v_flag &= ~VALIASED;
1813 				vp->v_flag &= ~VALIASED;
1814 			}
1815 		}
1816 		simple_unlock(&spechash_slock);
1817 		FREE(vp->v_specinfo, M_VNODE);
1818 		vp->v_specinfo = NULL;
1819 	}
1820 
1821 	/*
1822 	 * The test of the back pointer and the reference count of
1823 	 * zero is because it will be removed from the free list by
1824 	 * getcleanvnode, but will not have its reference count
1825 	 * incremented until after calling vgone. If the reference
1826 	 * count were incremented first, vgone would (incorrectly)
1827 	 * try to close the previous instance of the underlying object.
1828 	 * So, the back pointer is explicitly set to `0xdeadb' in
1829 	 * getnewvnode after removing it from the freelist to ensure
1830 	 * that we do not try to move it here.
1831 	 */
1832 
1833 	vp->v_type = VBAD;
1834 	if (vp->v_usecount == 0) {
1835 		boolean_t dofree;
1836 
1837 		simple_lock(&vnode_free_list_slock);
1838 		if (vp->v_holdcnt > 0)
1839 			panic("vgonel: not clean, vp %p", vp);
1840 		/*
1841 		 * if it isn't on the freelist, we're called by getcleanvnode
1842 		 * and vnode is being re-used.  otherwise, we'll free it.
1843 		 */
1844 		dofree = vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb;
1845 		if (dofree) {
1846 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1847 			numvnodes--;
1848 		}
1849 		simple_unlock(&vnode_free_list_slock);
1850 		if (dofree)
1851 			pool_put(&vnode_pool, vp);
1852 	}
1853 }
1854 
1855 /*
1856  * Lookup a vnode by device number.
1857  */
1858 int
1859 vfinddev(dev, type, vpp)
1860 	dev_t dev;
1861 	enum vtype type;
1862 	struct vnode **vpp;
1863 {
1864 	struct vnode *vp;
1865 	int rc = 0;
1866 
1867 	simple_lock(&spechash_slock);
1868 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1869 		if (dev != vp->v_rdev || type != vp->v_type)
1870 			continue;
1871 		*vpp = vp;
1872 		rc = 1;
1873 		break;
1874 	}
1875 	simple_unlock(&spechash_slock);
1876 	return (rc);
1877 }
1878 
1879 /*
1880  * Revoke all the vnodes corresponding to the specified minor number
1881  * range (endpoints inclusive) of the specified major.
1882  */
1883 void
1884 vdevgone(maj, minl, minh, type)
1885 	int maj, minl, minh;
1886 	enum vtype type;
1887 {
1888 	struct vnode *vp;
1889 	int mn;
1890 
1891 	for (mn = minl; mn <= minh; mn++)
1892 		if (vfinddev(makedev(maj, mn), type, &vp))
1893 			VOP_REVOKE(vp, REVOKEALL);
1894 }
1895 
1896 /*
1897  * Calculate the total number of references to a special device.
1898  */
1899 int
1900 vcount(vp)
1901 	struct vnode *vp;
1902 {
1903 	struct vnode *vq, *vnext;
1904 	int count;
1905 
1906 loop:
1907 	if ((vp->v_flag & VALIASED) == 0)
1908 		return (vp->v_usecount);
1909 	simple_lock(&spechash_slock);
1910 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1911 		vnext = vq->v_specnext;
1912 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1913 			continue;
1914 		/*
1915 		 * Alias, but not in use, so flush it out.
1916 		 */
1917 		if (vq->v_usecount == 0 && vq != vp &&
1918 		    (vq->v_flag & VXLOCK) == 0) {
1919 			simple_unlock(&spechash_slock);
1920 			vgone(vq);
1921 			goto loop;
1922 		}
1923 		count += vq->v_usecount;
1924 	}
1925 	simple_unlock(&spechash_slock);
1926 	return (count);
1927 }
1928 
1929 /*
1930  * Print out a description of a vnode.
1931  */
1932 const char * const vnode_types[] = {
1933 	"VNON",
1934 	"VREG",
1935 	"VDIR",
1936 	"VBLK",
1937 	"VCHR",
1938 	"VLNK",
1939 	"VSOCK",
1940 	"VFIFO",
1941 	"VBAD"
1942 };
1943 
1944 void
1945 vprint(label, vp)
1946 	char *label;
1947 	struct vnode *vp;
1948 {
1949 	char buf[96];
1950 
1951 	if (label != NULL)
1952 		printf("%s: ", label);
1953 	printf("tag %d type %s, usecount %d, writecount %ld, refcount %ld,",
1954 	    vp->v_tag, vnode_types[vp->v_type],
1955 	    vp->v_usecount, vp->v_writecount, vp->v_holdcnt);
1956 	buf[0] = '\0';
1957 	if (vp->v_flag & VROOT)
1958 		strlcat(buf, "|VROOT", sizeof(buf));
1959 	if (vp->v_flag & VTEXT)
1960 		strlcat(buf, "|VTEXT", sizeof(buf));
1961 	if (vp->v_flag & VEXECMAP)
1962 		strlcat(buf, "|VEXECMAP", sizeof(buf));
1963 	if (vp->v_flag & VSYSTEM)
1964 		strlcat(buf, "|VSYSTEM", sizeof(buf));
1965 	if (vp->v_flag & VXLOCK)
1966 		strlcat(buf, "|VXLOCK", sizeof(buf));
1967 	if (vp->v_flag & VXWANT)
1968 		strlcat(buf, "|VXWANT", sizeof(buf));
1969 	if (vp->v_flag & VBWAIT)
1970 		strlcat(buf, "|VBWAIT", sizeof(buf));
1971 	if (vp->v_flag & VALIASED)
1972 		strlcat(buf, "|VALIASED", sizeof(buf));
1973 	if (buf[0] != '\0')
1974 		printf(" flags (%s)", &buf[1]);
1975 	if (vp->v_data == NULL) {
1976 		printf("\n");
1977 	} else {
1978 		printf("\n\t");
1979 		VOP_PRINT(vp);
1980 	}
1981 }
1982 
1983 #ifdef DEBUG
1984 /*
1985  * List all of the locked vnodes in the system.
1986  * Called when debugging the kernel.
1987  */
1988 void
1989 printlockedvnodes()
1990 {
1991 	struct mount *mp, *nmp;
1992 	struct vnode *vp;
1993 
1994 	printf("Locked vnodes\n");
1995 	simple_lock(&mountlist_slock);
1996 	for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
1997 	     mp = nmp) {
1998 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
1999 			nmp = CIRCLEQ_NEXT(mp, mnt_list);
2000 			continue;
2001 		}
2002 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2003 			if (VOP_ISLOCKED(vp))
2004 				vprint(NULL, vp);
2005 		}
2006 		simple_lock(&mountlist_slock);
2007 		nmp = CIRCLEQ_NEXT(mp, mnt_list);
2008 		vfs_unbusy(mp);
2009 	}
2010 	simple_unlock(&mountlist_slock);
2011 }
2012 #endif
2013 
2014 /*
2015  * sysctl helper routine for vfs.generic.conf lookups.
2016  */
2017 #if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44)
2018 static int
2019 sysctl_vfs_generic_conf(SYSCTLFN_ARGS)
2020 {
2021         struct vfsconf vfc;
2022         extern const char * const mountcompatnames[];
2023         extern int nmountcompatnames;
2024 	struct sysctlnode node;
2025 	struct vfsops *vfsp;
2026 	u_int vfsnum;
2027 
2028 	if (namelen != 1)
2029 		return (ENOTDIR);
2030 	vfsnum = name[0];
2031 	if (vfsnum >= nmountcompatnames ||
2032 	    mountcompatnames[vfsnum] == NULL)
2033 		return (EOPNOTSUPP);
2034 	vfsp = vfs_getopsbyname(mountcompatnames[vfsnum]);
2035 	if (vfsp == NULL)
2036 		return (EOPNOTSUPP);
2037 
2038 	vfc.vfc_vfsops = vfsp;
2039 	strncpy(vfc.vfc_name, vfsp->vfs_name, MFSNAMELEN);
2040 	vfc.vfc_typenum = vfsnum;
2041 	vfc.vfc_refcount = vfsp->vfs_refcount;
2042 	vfc.vfc_flags = 0;
2043 	vfc.vfc_mountroot = vfsp->vfs_mountroot;
2044 	vfc.vfc_next = NULL;
2045 
2046 	node = *rnode;
2047 	node.sysctl_data = &vfc;
2048 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
2049 }
2050 #endif
2051 
2052 /*
2053  * sysctl helper routine to return list of supported fstypes
2054  */
2055 static int
2056 sysctl_vfs_generic_fstypes(SYSCTLFN_ARGS)
2057 {
2058 	char buf[MFSNAMELEN];
2059 	char *where = oldp;
2060 	struct vfsops *v;
2061 	size_t needed, left, slen;
2062 	int error, first;
2063 
2064 	if (newp != NULL)
2065 		return (EPERM);
2066 	if (namelen != 0)
2067 		return (EINVAL);
2068 
2069 	first = 1;
2070 	error = 0;
2071 	needed = 0;
2072 	left = *oldlenp;
2073 
2074 	LIST_FOREACH(v, &vfs_list, vfs_list) {
2075 		if (where == NULL)
2076 			needed += strlen(v->vfs_name) + 1;
2077 		else {
2078 			memset(buf, 0, sizeof(buf));
2079 			if (first) {
2080 				strncpy(buf, v->vfs_name, sizeof(buf));
2081 				first = 0;
2082 			} else {
2083 				buf[0] = ' ';
2084 				strncpy(buf + 1, v->vfs_name, sizeof(buf) - 1);
2085 			}
2086 			buf[sizeof(buf)-1] = '\0';
2087 			slen = strlen(buf);
2088 			if (left < slen + 1)
2089 				break;
2090 			/* +1 to copy out the trailing NUL byte */
2091 			error = copyout(buf, where, slen + 1);
2092 			if (error)
2093 				break;
2094 			where += slen;
2095 			needed += slen;
2096 			left -= slen;
2097 		}
2098 	}
2099 	*oldlenp = needed;
2100 	return (error);
2101 }
2102 
2103 /*
2104  * Top level filesystem related information gathering.
2105  */
2106 SYSCTL_SETUP(sysctl_vfs_setup, "sysctl vfs subtree setup")
2107 {
2108 #if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44)
2109 	extern int nmountcompatnames;
2110 #endif
2111 
2112 	sysctl_createv(clog, 0, NULL, NULL,
2113 		       CTLFLAG_PERMANENT,
2114 		       CTLTYPE_NODE, "vfs", NULL,
2115 		       NULL, 0, NULL, 0,
2116 		       CTL_VFS, CTL_EOL);
2117 	sysctl_createv(clog, 0, NULL, NULL,
2118 		       CTLFLAG_PERMANENT,
2119 		       CTLTYPE_NODE, "generic",
2120 		       SYSCTL_DESCR("Non-specific vfs related information"),
2121 		       NULL, 0, NULL, 0,
2122 		       CTL_VFS, VFS_GENERIC, CTL_EOL);
2123 
2124 #if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44)
2125 	sysctl_createv(clog, 0, NULL, NULL,
2126 		       CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
2127 		       CTLTYPE_INT, "maxtypenum",
2128 		       SYSCTL_DESCR("Highest valid filesystem type number"),
2129 		       NULL, nmountcompatnames, NULL, 0,
2130 		       CTL_VFS, VFS_GENERIC, VFS_MAXTYPENUM, CTL_EOL);
2131 #endif
2132 	sysctl_createv(clog, 0, NULL, NULL,
2133 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
2134 		       CTLTYPE_INT, "usermount",
2135 		       SYSCTL_DESCR("Whether unprivileged users may mount "
2136 				    "filesystems"),
2137 		       NULL, 0, &dovfsusermount, 0,
2138 		       CTL_VFS, VFS_GENERIC, VFS_USERMOUNT, CTL_EOL);
2139 	sysctl_createv(clog, 0, NULL, NULL,
2140 		       CTLFLAG_PERMANENT,
2141 		       CTLTYPE_STRING, "fstypes",
2142 		       SYSCTL_DESCR("List of file systems present"),
2143 		       sysctl_vfs_generic_fstypes, 0, NULL, 0,
2144 		       CTL_VFS, VFS_GENERIC, CTL_CREATE, CTL_EOL);
2145 #if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44)
2146 	sysctl_createv(clog, 0, NULL, NULL,
2147 		       CTLFLAG_PERMANENT,
2148 		       CTLTYPE_STRUCT, "conf",
2149 		       SYSCTL_DESCR("Filesystem configuration information"),
2150 		       sysctl_vfs_generic_conf, 0, NULL,
2151 		       sizeof(struct vfsconf),
2152 		       CTL_VFS, VFS_GENERIC, VFS_CONF, CTL_EOL);
2153 #endif
2154 }
2155 
2156 
2157 int kinfo_vdebug = 1;
2158 int kinfo_vgetfailed;
2159 #define KINFO_VNODESLOP	10
2160 /*
2161  * Dump vnode list (via sysctl).
2162  * Copyout address of vnode followed by vnode.
2163  */
2164 /* ARGSUSED */
2165 int
2166 sysctl_kern_vnode(SYSCTLFN_ARGS)
2167 {
2168 	char *where = oldp;
2169 	size_t *sizep = oldlenp;
2170 	struct mount *mp, *nmp;
2171 	struct vnode *nvp, *vp;
2172 	char *bp = where, *savebp;
2173 	char *ewhere;
2174 	int error;
2175 
2176 	if (namelen != 0)
2177 		return (EOPNOTSUPP);
2178 	if (newp != NULL)
2179 		return (EPERM);
2180 
2181 #define VPTRSZ	sizeof(struct vnode *)
2182 #define VNODESZ	sizeof(struct vnode)
2183 	if (where == NULL) {
2184 		*sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
2185 		return (0);
2186 	}
2187 	ewhere = where + *sizep;
2188 
2189 	simple_lock(&mountlist_slock);
2190 	for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
2191 	     mp = nmp) {
2192 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
2193 			nmp = CIRCLEQ_NEXT(mp, mnt_list);
2194 			continue;
2195 		}
2196 		savebp = bp;
2197 again:
2198 		simple_lock(&mntvnode_slock);
2199 		for (vp = LIST_FIRST(&mp->mnt_vnodelist);
2200 		     vp != NULL;
2201 		     vp = nvp) {
2202 			/*
2203 			 * Check that the vp is still associated with
2204 			 * this filesystem.  RACE: could have been
2205 			 * recycled onto the same filesystem.
2206 			 */
2207 			if (vp->v_mount != mp) {
2208 				simple_unlock(&mntvnode_slock);
2209 				if (kinfo_vdebug)
2210 					printf("kinfo: vp changed\n");
2211 				bp = savebp;
2212 				goto again;
2213 			}
2214 			nvp = LIST_NEXT(vp, v_mntvnodes);
2215 			if (bp + VPTRSZ + VNODESZ > ewhere) {
2216 				simple_unlock(&mntvnode_slock);
2217 				*sizep = bp - where;
2218 				return (ENOMEM);
2219 			}
2220 			simple_unlock(&mntvnode_slock);
2221 			if ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
2222 			   (error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ)))
2223 				return (error);
2224 			bp += VPTRSZ + VNODESZ;
2225 			simple_lock(&mntvnode_slock);
2226 		}
2227 		simple_unlock(&mntvnode_slock);
2228 		simple_lock(&mountlist_slock);
2229 		nmp = CIRCLEQ_NEXT(mp, mnt_list);
2230 		vfs_unbusy(mp);
2231 	}
2232 	simple_unlock(&mountlist_slock);
2233 
2234 	*sizep = bp - where;
2235 	return (0);
2236 }
2237 
2238 /*
2239  * Check to see if a filesystem is mounted on a block device.
2240  */
2241 int
2242 vfs_mountedon(vp)
2243 	struct vnode *vp;
2244 {
2245 	struct vnode *vq;
2246 	int error = 0;
2247 
2248 	if (vp->v_specmountpoint != NULL)
2249 		return (EBUSY);
2250 	if (vp->v_flag & VALIASED) {
2251 		simple_lock(&spechash_slock);
2252 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2253 			if (vq->v_rdev != vp->v_rdev ||
2254 			    vq->v_type != vp->v_type)
2255 				continue;
2256 			if (vq->v_specmountpoint != NULL) {
2257 				error = EBUSY;
2258 				break;
2259 			}
2260 		}
2261 		simple_unlock(&spechash_slock);
2262 	}
2263 	return (error);
2264 }
2265 
2266 static int
2267 sacheck(struct sockaddr *sa)
2268 {
2269 	switch (sa->sa_family) {
2270 #ifdef INET
2271 	case AF_INET: {
2272 		struct sockaddr_in *sin = (struct sockaddr_in *)sa;
2273 		char *p = (char *)sin->sin_zero;
2274 		size_t i;
2275 
2276 		if (sin->sin_len != sizeof(*sin))
2277 			return -1;
2278 		if (sin->sin_port != 0)
2279 			return -1;
2280 		for (i = 0; i < sizeof(sin->sin_zero); i++)
2281 			if (*p++ != '\0')
2282 				return -1;
2283 		return 0;
2284 	}
2285 #endif
2286 #ifdef INET6
2287 	case AF_INET6: {
2288 		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
2289 
2290 		if (sin6->sin6_len != sizeof(*sin6))
2291 			return -1;
2292 		if (sin6->sin6_port != 0)
2293 			return -1;
2294 		return 0;
2295 	}
2296 #endif
2297 	default:
2298 		return -1;
2299 	}
2300 }
2301 
2302 /*
2303  * Build hash lists of net addresses and hang them off the mount point.
2304  * Called by ufs_mount() to set up the lists of export addresses.
2305  */
2306 static int
2307 vfs_hang_addrlist(mp, nep, argp)
2308 	struct mount *mp;
2309 	struct netexport *nep;
2310 	struct export_args *argp;
2311 {
2312 	struct netcred *np, *enp;
2313 	struct radix_node_head *rnh;
2314 	int i;
2315 	struct sockaddr *saddr, *smask = 0;
2316 	struct domain *dom;
2317 	int error;
2318 
2319 	if (argp->ex_addrlen == 0) {
2320 		if (mp->mnt_flag & MNT_DEFEXPORTED)
2321 			return (EPERM);
2322 		np = &nep->ne_defexported;
2323 		np->netc_exflags = argp->ex_flags;
2324 		crcvt(&np->netc_anon, &argp->ex_anon);
2325 		np->netc_anon.cr_ref = 1;
2326 		mp->mnt_flag |= MNT_DEFEXPORTED;
2327 		return (0);
2328 	}
2329 
2330 	if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN)
2331 		return (EINVAL);
2332 
2333 	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
2334 	np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK);
2335 	memset((caddr_t)np, 0, i);
2336 	saddr = (struct sockaddr *)(np + 1);
2337 	error = copyin(argp->ex_addr, (caddr_t)saddr, argp->ex_addrlen);
2338 	if (error)
2339 		goto out;
2340 	if (saddr->sa_len > argp->ex_addrlen)
2341 		saddr->sa_len = argp->ex_addrlen;
2342 	if (sacheck(saddr) == -1)
2343 		return EINVAL;
2344 	if (argp->ex_masklen) {
2345 		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
2346 		error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen);
2347 		if (error)
2348 			goto out;
2349 		if (smask->sa_len > argp->ex_masklen)
2350 			smask->sa_len = argp->ex_masklen;
2351 		if (smask->sa_family != saddr->sa_family)
2352 			return EINVAL;
2353 		if (sacheck(smask) == -1)
2354 			return EINVAL;
2355 	}
2356 	i = saddr->sa_family;
2357 	if ((rnh = nep->ne_rtable[i]) == 0) {
2358 		/*
2359 		 * Seems silly to initialize every AF when most are not
2360 		 * used, do so on demand here
2361 		 */
2362 		for (dom = domains; dom; dom = dom->dom_next)
2363 			if (dom->dom_family == i && dom->dom_rtattach) {
2364 				dom->dom_rtattach((void **)&nep->ne_rtable[i],
2365 					dom->dom_rtoffset);
2366 				break;
2367 			}
2368 		if ((rnh = nep->ne_rtable[i]) == 0) {
2369 			error = ENOBUFS;
2370 			goto out;
2371 		}
2372 	}
2373 
2374 	enp = (struct netcred *)(*rnh->rnh_addaddr)(saddr, smask, rnh,
2375 	    np->netc_rnodes);
2376 	if (enp != np) {
2377 		if (enp == NULL) {
2378 			enp = (struct netcred *)(*rnh->rnh_lookup)(saddr,
2379 			    smask, rnh);
2380 			if (enp == NULL) {
2381 				error = EPERM;
2382 				goto out;
2383 			}
2384 		} else
2385 			enp->netc_refcnt++;
2386 
2387 		goto check;
2388 	} else
2389 		enp->netc_refcnt = 1;
2390 
2391 	np->netc_exflags = argp->ex_flags;
2392 	crcvt(&np->netc_anon, &argp->ex_anon);
2393 	np->netc_anon.cr_ref = 1;
2394 	return 0;
2395 check:
2396 	if (enp->netc_exflags != argp->ex_flags ||
2397 	    crcmp(&enp->netc_anon, &argp->ex_anon) != 0)
2398 		error = EPERM;
2399 	else
2400 		error = 0;
2401 out:
2402 	free(np, M_NETADDR);
2403 	return error;
2404 }
2405 
2406 /* ARGSUSED */
2407 static int
2408 vfs_free_netcred(rn, w)
2409 	struct radix_node *rn;
2410 	void *w;
2411 {
2412 	struct radix_node_head *rnh = (struct radix_node_head *)w;
2413 	struct netcred *np = (struct netcred *)(void *)rn;
2414 
2415 	(*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh);
2416 	if (--(np->netc_refcnt) <= 0)
2417 		free(np, M_NETADDR);
2418 	return (0);
2419 }
2420 
2421 /*
2422  * Free the net address hash lists that are hanging off the mount points.
2423  */
2424 static void
2425 vfs_free_addrlist(nep)
2426 	struct netexport *nep;
2427 {
2428 	int i;
2429 	struct radix_node_head *rnh;
2430 
2431 	for (i = 0; i <= AF_MAX; i++)
2432 		if ((rnh = nep->ne_rtable[i]) != NULL) {
2433 			(*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh);
2434 			free((caddr_t)rnh, M_RTABLE);
2435 			nep->ne_rtable[i] = 0;
2436 		}
2437 }
2438 
2439 int
2440 vfs_export(mp, nep, argp)
2441 	struct mount *mp;
2442 	struct netexport *nep;
2443 	struct export_args *argp;
2444 {
2445 	int error;
2446 
2447 	if (argp->ex_flags & MNT_DELEXPORT) {
2448 		if (mp->mnt_flag & MNT_EXPUBLIC) {
2449 			vfs_setpublicfs(NULL, NULL, NULL);
2450 			mp->mnt_flag &= ~MNT_EXPUBLIC;
2451 		}
2452 		vfs_free_addrlist(nep);
2453 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
2454 	}
2455 	if (argp->ex_flags & MNT_EXPORTED) {
2456 		if (argp->ex_flags & MNT_EXPUBLIC) {
2457 			if ((error = vfs_setpublicfs(mp, nep, argp)) != 0)
2458 				return (error);
2459 			mp->mnt_flag |= MNT_EXPUBLIC;
2460 		}
2461 		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
2462 			return (error);
2463 		mp->mnt_flag |= MNT_EXPORTED;
2464 	}
2465 	return (0);
2466 }
2467 
2468 /*
2469  * Set the publicly exported filesystem (WebNFS). Currently, only
2470  * one public filesystem is possible in the spec (RFC 2054 and 2055)
2471  */
2472 int
2473 vfs_setpublicfs(mp, nep, argp)
2474 	struct mount *mp;
2475 	struct netexport *nep;
2476 	struct export_args *argp;
2477 {
2478 	int error;
2479 	struct vnode *rvp;
2480 	char *cp;
2481 
2482 	/*
2483 	 * mp == NULL -> invalidate the current info, the FS is
2484 	 * no longer exported. May be called from either vfs_export
2485 	 * or unmount, so check if it hasn't already been done.
2486 	 */
2487 	if (mp == NULL) {
2488 		if (nfs_pub.np_valid) {
2489 			nfs_pub.np_valid = 0;
2490 			if (nfs_pub.np_index != NULL) {
2491 				FREE(nfs_pub.np_index, M_TEMP);
2492 				nfs_pub.np_index = NULL;
2493 			}
2494 		}
2495 		return (0);
2496 	}
2497 
2498 	/*
2499 	 * Only one allowed at a time.
2500 	 */
2501 	if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount)
2502 		return (EBUSY);
2503 
2504 	/*
2505 	 * Get real filehandle for root of exported FS.
2506 	 */
2507 	memset((caddr_t)&nfs_pub.np_handle, 0, sizeof(nfs_pub.np_handle));
2508 	nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsidx;
2509 
2510 	if ((error = VFS_ROOT(mp, &rvp)))
2511 		return (error);
2512 
2513 	if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid)))
2514 		return (error);
2515 
2516 	vput(rvp);
2517 
2518 	/*
2519 	 * If an indexfile was specified, pull it in.
2520 	 */
2521 	if (argp->ex_indexfile != NULL) {
2522 		MALLOC(nfs_pub.np_index, char *, MAXNAMLEN + 1, M_TEMP,
2523 		    M_WAITOK);
2524 		error = copyinstr(argp->ex_indexfile, nfs_pub.np_index,
2525 		    MAXNAMLEN, (size_t *)0);
2526 		if (!error) {
2527 			/*
2528 			 * Check for illegal filenames.
2529 			 */
2530 			for (cp = nfs_pub.np_index; *cp; cp++) {
2531 				if (*cp == '/') {
2532 					error = EINVAL;
2533 					break;
2534 				}
2535 			}
2536 		}
2537 		if (error) {
2538 			FREE(nfs_pub.np_index, M_TEMP);
2539 			return (error);
2540 		}
2541 	}
2542 
2543 	nfs_pub.np_mount = mp;
2544 	nfs_pub.np_valid = 1;
2545 	return (0);
2546 }
2547 
2548 struct netcred *
2549 vfs_export_lookup(mp, nep, nam)
2550 	struct mount *mp;
2551 	struct netexport *nep;
2552 	struct mbuf *nam;
2553 {
2554 	struct netcred *np;
2555 	struct radix_node_head *rnh;
2556 	struct sockaddr *saddr;
2557 
2558 	np = NULL;
2559 	if (mp->mnt_flag & MNT_EXPORTED) {
2560 		/*
2561 		 * Lookup in the export list first.
2562 		 */
2563 		if (nam != NULL) {
2564 			saddr = mtod(nam, struct sockaddr *);
2565 			rnh = nep->ne_rtable[saddr->sa_family];
2566 			if (rnh != NULL) {
2567 				np = (struct netcred *)
2568 					(*rnh->rnh_matchaddr)((caddr_t)saddr,
2569 							      rnh);
2570 				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
2571 					np = NULL;
2572 			}
2573 		}
2574 		/*
2575 		 * If no address match, use the default if it exists.
2576 		 */
2577 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
2578 			np = &nep->ne_defexported;
2579 	}
2580 	return (np);
2581 }
2582 
2583 /*
2584  * Do the usual access checking.
2585  * file_mode, uid and gid are from the vnode in question,
2586  * while acc_mode and cred are from the VOP_ACCESS parameter list
2587  */
2588 int
2589 vaccess(type, file_mode, uid, gid, acc_mode, cred)
2590 	enum vtype type;
2591 	mode_t file_mode;
2592 	uid_t uid;
2593 	gid_t gid;
2594 	mode_t acc_mode;
2595 	struct ucred *cred;
2596 {
2597 	mode_t mask;
2598 
2599 	/*
2600 	 * Super-user always gets read/write access, but execute access depends
2601 	 * on at least one execute bit being set.
2602 	 */
2603 	if (cred->cr_uid == 0) {
2604 		if ((acc_mode & VEXEC) && type != VDIR &&
2605 		    (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
2606 			return (EACCES);
2607 		return (0);
2608 	}
2609 
2610 	mask = 0;
2611 
2612 	/* Otherwise, check the owner. */
2613 	if (cred->cr_uid == uid) {
2614 		if (acc_mode & VEXEC)
2615 			mask |= S_IXUSR;
2616 		if (acc_mode & VREAD)
2617 			mask |= S_IRUSR;
2618 		if (acc_mode & VWRITE)
2619 			mask |= S_IWUSR;
2620 		return ((file_mode & mask) == mask ? 0 : EACCES);
2621 	}
2622 
2623 	/* Otherwise, check the groups. */
2624 	if (cred->cr_gid == gid || groupmember(gid, cred)) {
2625 		if (acc_mode & VEXEC)
2626 			mask |= S_IXGRP;
2627 		if (acc_mode & VREAD)
2628 			mask |= S_IRGRP;
2629 		if (acc_mode & VWRITE)
2630 			mask |= S_IWGRP;
2631 		return ((file_mode & mask) == mask ? 0 : EACCES);
2632 	}
2633 
2634 	/* Otherwise, check everyone else. */
2635 	if (acc_mode & VEXEC)
2636 		mask |= S_IXOTH;
2637 	if (acc_mode & VREAD)
2638 		mask |= S_IROTH;
2639 	if (acc_mode & VWRITE)
2640 		mask |= S_IWOTH;
2641 	return ((file_mode & mask) == mask ? 0 : EACCES);
2642 }
2643 
2644 /*
2645  * Unmount all file systems.
2646  * We traverse the list in reverse order under the assumption that doing so
2647  * will avoid needing to worry about dependencies.
2648  */
2649 void
2650 vfs_unmountall(p)
2651 	struct proc *p;
2652 {
2653 	struct mount *mp, *nmp;
2654 	int allerror, error;
2655 
2656 	for (allerror = 0,
2657 	     mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
2658 		nmp = mp->mnt_list.cqe_prev;
2659 #ifdef DEBUG
2660 		printf("unmounting %s (%s)...\n",
2661 		    mp->mnt_stat.f_mntonname, mp->mnt_stat.f_mntfromname);
2662 #endif
2663 		/*
2664 		 * XXX Freeze syncer.  Must do this before locking the
2665 		 * mount point.  See dounmount() for details.
2666 		 */
2667 		lockmgr(&syncer_lock, LK_EXCLUSIVE, NULL);
2668 		if (vfs_busy(mp, 0, 0)) {
2669 			lockmgr(&syncer_lock, LK_RELEASE, NULL);
2670 			continue;
2671 		}
2672 		if ((error = dounmount(mp, MNT_FORCE, p)) != 0) {
2673 			printf("unmount of %s failed with error %d\n",
2674 			    mp->mnt_stat.f_mntonname, error);
2675 			allerror = 1;
2676 		}
2677 	}
2678 	if (allerror)
2679 		printf("WARNING: some file systems would not unmount\n");
2680 }
2681 
2682 extern struct simplelock bqueue_slock; /* XXX */
2683 
2684 /*
2685  * Sync and unmount file systems before shutting down.
2686  */
2687 void
2688 vfs_shutdown()
2689 {
2690 	struct lwp *l = curlwp;
2691 	struct proc *p;
2692 
2693 	/* XXX we're certainly not running in proc0's context! */
2694 	if (l == NULL || (p = l->l_proc) == NULL)
2695 		p = &proc0;
2696 
2697 	printf("syncing disks... ");
2698 
2699 	/* remove user process from run queue */
2700 	suspendsched();
2701 	(void) spl0();
2702 
2703 	/* avoid coming back this way again if we panic. */
2704 	doing_shutdown = 1;
2705 
2706 	sys_sync(l, NULL, NULL);
2707 
2708 	/* Wait for sync to finish. */
2709 	if (buf_syncwait() != 0) {
2710 #if defined(DDB) && defined(DEBUG_HALT_BUSY)
2711 		Debugger();
2712 #endif
2713 		printf("giving up\n");
2714 		return;
2715 	} else
2716 		printf("done\n");
2717 
2718 	/*
2719 	 * If we've panic'd, don't make the situation potentially
2720 	 * worse by unmounting the file systems.
2721 	 */
2722 	if (panicstr != NULL)
2723 		return;
2724 
2725 	/* Release inodes held by texts before update. */
2726 #ifdef notdef
2727 	vnshutdown();
2728 #endif
2729 	/* Unmount file systems. */
2730 	vfs_unmountall(p);
2731 }
2732 
2733 /*
2734  * Mount the root file system.  If the operator didn't specify a
2735  * file system to use, try all possible file systems until one
2736  * succeeds.
2737  */
2738 int
2739 vfs_mountroot()
2740 {
2741 	struct vfsops *v;
2742 
2743 	if (root_device == NULL)
2744 		panic("vfs_mountroot: root device unknown");
2745 
2746 	switch (root_device->dv_class) {
2747 	case DV_IFNET:
2748 		if (rootdev != NODEV)
2749 			panic("vfs_mountroot: rootdev set for DV_IFNET "
2750 			    "(0x%08x -> %d,%d)", rootdev,
2751 			    major(rootdev), minor(rootdev));
2752 		break;
2753 
2754 	case DV_DISK:
2755 		if (rootdev == NODEV)
2756 			panic("vfs_mountroot: rootdev not set for DV_DISK");
2757 		break;
2758 
2759 	default:
2760 		printf("%s: inappropriate for root file system\n",
2761 		    root_device->dv_xname);
2762 		return (ENODEV);
2763 	}
2764 
2765 	/*
2766 	 * If user specified a file system, use it.
2767 	 */
2768 	if (mountroot != NULL)
2769 		return ((*mountroot)());
2770 
2771 	/*
2772 	 * Try each file system currently configured into the kernel.
2773 	 */
2774 	LIST_FOREACH(v, &vfs_list, vfs_list) {
2775 		if (v->vfs_mountroot == NULL)
2776 			continue;
2777 #ifdef DEBUG
2778 		aprint_normal("mountroot: trying %s...\n", v->vfs_name);
2779 #endif
2780 		if ((*v->vfs_mountroot)() == 0) {
2781 			aprint_normal("root file system type: %s\n",
2782 			    v->vfs_name);
2783 			break;
2784 		}
2785 	}
2786 
2787 	if (v == NULL) {
2788 		printf("no file system for %s", root_device->dv_xname);
2789 		if (root_device->dv_class == DV_DISK)
2790 			printf(" (dev 0x%x)", rootdev);
2791 		printf("\n");
2792 		return (EFTYPE);
2793 	}
2794 	return (0);
2795 }
2796 
2797 /*
2798  * Given a file system name, look up the vfsops for that
2799  * file system, or return NULL if file system isn't present
2800  * in the kernel.
2801  */
2802 struct vfsops *
2803 vfs_getopsbyname(name)
2804 	const char *name;
2805 {
2806 	struct vfsops *v;
2807 
2808 	LIST_FOREACH(v, &vfs_list, vfs_list) {
2809 		if (strcmp(v->vfs_name, name) == 0)
2810 			break;
2811 	}
2812 
2813 	return (v);
2814 }
2815 
2816 /*
2817  * Establish a file system and initialize it.
2818  */
2819 int
2820 vfs_attach(vfs)
2821 	struct vfsops *vfs;
2822 {
2823 	struct vfsops *v;
2824 	int error = 0;
2825 
2826 
2827 	/*
2828 	 * Make sure this file system doesn't already exist.
2829 	 */
2830 	LIST_FOREACH(v, &vfs_list, vfs_list) {
2831 		if (strcmp(vfs->vfs_name, v->vfs_name) == 0) {
2832 			error = EEXIST;
2833 			goto out;
2834 		}
2835 	}
2836 
2837 	/*
2838 	 * Initialize the vnode operations for this file system.
2839 	 */
2840 	vfs_opv_init(vfs->vfs_opv_descs);
2841 
2842 	/*
2843 	 * Now initialize the file system itself.
2844 	 */
2845 	(*vfs->vfs_init)();
2846 
2847 	/*
2848 	 * ...and link it into the kernel's list.
2849 	 */
2850 	LIST_INSERT_HEAD(&vfs_list, vfs, vfs_list);
2851 
2852 	/*
2853 	 * Sanity: make sure the reference count is 0.
2854 	 */
2855 	vfs->vfs_refcount = 0;
2856 
2857  out:
2858 	return (error);
2859 }
2860 
2861 /*
2862  * Remove a file system from the kernel.
2863  */
2864 int
2865 vfs_detach(vfs)
2866 	struct vfsops *vfs;
2867 {
2868 	struct vfsops *v;
2869 
2870 	/*
2871 	 * Make sure no one is using the filesystem.
2872 	 */
2873 	if (vfs->vfs_refcount != 0)
2874 		return (EBUSY);
2875 
2876 	/*
2877 	 * ...and remove it from the kernel's list.
2878 	 */
2879 	LIST_FOREACH(v, &vfs_list, vfs_list) {
2880 		if (v == vfs) {
2881 			LIST_REMOVE(v, vfs_list);
2882 			break;
2883 		}
2884 	}
2885 
2886 	if (v == NULL)
2887 		return (ESRCH);
2888 
2889 	/*
2890 	 * Now run the file system-specific cleanups.
2891 	 */
2892 	(*vfs->vfs_done)();
2893 
2894 	/*
2895 	 * Free the vnode operations vector.
2896 	 */
2897 	vfs_opv_free(vfs->vfs_opv_descs);
2898 	return (0);
2899 }
2900 
2901 void
2902 vfs_reinit(void)
2903 {
2904 	struct vfsops *vfs;
2905 
2906 	LIST_FOREACH(vfs, &vfs_list, vfs_list) {
2907 		if (vfs->vfs_reinit) {
2908 			(*vfs->vfs_reinit)();
2909 		}
2910 	}
2911 }
2912 
2913 /*
2914  * Request a filesystem to suspend write operations.
2915  */
2916 int
2917 vfs_write_suspend(struct mount *mp, int slpflag, int slptimeo)
2918 {
2919 	struct proc *p = curproc;	/* XXX */
2920 	int error;
2921 
2922 	while ((mp->mnt_iflag & IMNT_SUSPEND)) {
2923 		if (slptimeo < 0)
2924 			return EWOULDBLOCK;
2925 		error = tsleep(&mp->mnt_flag, slpflag, "suspwt1", slptimeo);
2926 		if (error)
2927 			return error;
2928 	}
2929 	mp->mnt_iflag |= IMNT_SUSPEND;
2930 
2931 	simple_lock(&mp->mnt_slock);
2932 	if (mp->mnt_writeopcountupper > 0)
2933 		ltsleep(&mp->mnt_writeopcountupper, PUSER - 1, "suspwt",
2934 			0, &mp->mnt_slock);
2935 	simple_unlock(&mp->mnt_slock);
2936 
2937 	error = VFS_SYNC(mp, MNT_WAIT, p->p_ucred, p);
2938 	if (error) {
2939 		vfs_write_resume(mp);
2940 		return error;
2941 	}
2942 	mp->mnt_iflag |= IMNT_SUSPENDLOW;
2943 
2944 	simple_lock(&mp->mnt_slock);
2945 	if (mp->mnt_writeopcountlower > 0)
2946 		ltsleep(&mp->mnt_writeopcountlower, PUSER - 1, "suspwt",
2947 			0, &mp->mnt_slock);
2948 	mp->mnt_iflag |= IMNT_SUSPENDED;
2949 	simple_unlock(&mp->mnt_slock);
2950 
2951 	return 0;
2952 }
2953 
2954 /*
2955  * Request a filesystem to resume write operations.
2956  */
2957 void
2958 vfs_write_resume(struct mount *mp)
2959 {
2960 
2961 	if ((mp->mnt_iflag & IMNT_SUSPEND) == 0)
2962 		return;
2963 	mp->mnt_iflag &= ~(IMNT_SUSPEND | IMNT_SUSPENDLOW | IMNT_SUSPENDED);
2964 	wakeup(&mp->mnt_flag);
2965 }
2966 
2967 void
2968 copy_statvfs_info(struct statvfs *sbp, const struct mount *mp)
2969 {
2970 	const struct statvfs *mbp;
2971 
2972 	if (sbp == (mbp = &mp->mnt_stat))
2973 		return;
2974 
2975 	(void)memcpy(&sbp->f_fsidx, &mbp->f_fsidx, sizeof(sbp->f_fsidx));
2976 	sbp->f_fsid = mbp->f_fsid;
2977 	sbp->f_owner = mbp->f_owner;
2978 	sbp->f_flag = mbp->f_flag;
2979 	sbp->f_syncwrites = mbp->f_syncwrites;
2980 	sbp->f_asyncwrites = mbp->f_asyncwrites;
2981 	sbp->f_syncreads = mbp->f_syncreads;
2982 	sbp->f_asyncreads = mbp->f_asyncreads;
2983 	(void)memcpy(sbp->f_spare, mbp->f_spare, sizeof(mbp->f_spare));
2984 	(void)memcpy(sbp->f_fstypename, mbp->f_fstypename,
2985 	    sizeof(sbp->f_fstypename));
2986 	(void)memcpy(sbp->f_mntonname, mbp->f_mntonname,
2987 	    sizeof(sbp->f_mntonname));
2988 	(void)memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname,
2989 	    sizeof(sbp->f_mntfromname));
2990 }
2991 
2992 int
2993 set_statvfs_info(const char *onp, int ukon, const char *fromp, int ukfrom,
2994     struct mount *mp, struct proc *p)
2995 {
2996 	int error;
2997 	size_t size;
2998 	struct statvfs *sfs = &mp->mnt_stat;
2999 	int (*fun)(const void *, void *, size_t, size_t *);
3000 
3001 	(void)strncpy(mp->mnt_stat.f_fstypename, mp->mnt_op->vfs_name,
3002 	    sizeof(mp->mnt_stat.f_fstypename));
3003 
3004 	if (onp) {
3005 		struct cwdinfo *cwdi = p->p_cwdi;
3006 		fun = (ukon == UIO_SYSSPACE) ? copystr : copyinstr;
3007 		if (cwdi->cwdi_rdir != NULL) {
3008 			size_t len;
3009 			char *bp;
3010 			char *path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
3011 
3012 			if (!path) /* XXX can't happen with M_WAITOK */
3013 				return ENOMEM;
3014 
3015 			bp = path + MAXPATHLEN;
3016 			*--bp = '\0';
3017 			error = getcwd_common(cwdi->cwdi_rdir, rootvnode, &bp,
3018 			    path, MAXPATHLEN / 2, 0, p);
3019 			if (error) {
3020 				free(path, M_TEMP);
3021 				return error;
3022 			}
3023 
3024 			len = strlen(bp);
3025 			if (len > sizeof(sfs->f_mntonname) - 1)
3026 				len = sizeof(sfs->f_mntonname) - 1;
3027 			(void)strncpy(sfs->f_mntonname, bp, len);
3028 			free(path, M_TEMP);
3029 
3030 			if (len < sizeof(sfs->f_mntonname) - 1) {
3031 				error = (*fun)(onp, &sfs->f_mntonname[len],
3032 				    sizeof(sfs->f_mntonname) - len - 1, &size);
3033 				if (error)
3034 					return error;
3035 				size += len;
3036 			} else {
3037 				size = len;
3038 			}
3039 		} else {
3040 			error = (*fun)(onp, &sfs->f_mntonname,
3041 			    sizeof(sfs->f_mntonname) - 1, &size);
3042 			if (error)
3043 				return error;
3044 		}
3045 		(void)memset(sfs->f_mntonname + size, 0,
3046 		    sizeof(sfs->f_mntonname) - size);
3047 	}
3048 
3049 	if (fromp) {
3050 		fun = (ukfrom == UIO_SYSSPACE) ? copystr : copyinstr;
3051 		error = (*fun)(fromp, sfs->f_mntfromname,
3052 		    sizeof(sfs->f_mntfromname) - 1, &size);
3053 		if (error)
3054 			return error;
3055 		(void)memset(sfs->f_mntfromname + size, 0,
3056 		    sizeof(sfs->f_mntfromname) - size);
3057 	}
3058 	return 0;
3059 }
3060 
3061 #ifdef DDB
3062 const char buf_flagbits[] =
3063 	"\20\1AGE\2NEEDCOMMIT\3ASYNC\4BAD\5BUSY\6SCANNED\7CALL\10DELWRI"
3064 	"\11DIRTY\12DONE\13EINTR\14ERROR\15GATHERED\16INVAL\17LOCKED\20NOCACHE"
3065 	"\21ORDERED\22CACHE\23PHYS\24RAW\25READ\26TAPE\30WANTED"
3066 	"\32XXX\33VFLUSH";
3067 
3068 void
3069 vfs_buf_print(bp, full, pr)
3070 	struct buf *bp;
3071 	int full;
3072 	void (*pr)(const char *, ...);
3073 {
3074 	char buf[1024];
3075 
3076 	(*pr)("  vp %p lblkno 0x%"PRIx64" blkno 0x%"PRIx64" dev 0x%x\n",
3077 		  bp->b_vp, bp->b_lblkno, bp->b_blkno, bp->b_dev);
3078 
3079 	bitmask_snprintf(bp->b_flags, buf_flagbits, buf, sizeof(buf));
3080 	(*pr)("  error %d flags 0x%s\n", bp->b_error, buf);
3081 
3082 	(*pr)("  bufsize 0x%lx bcount 0x%lx resid 0x%lx\n",
3083 		  bp->b_bufsize, bp->b_bcount, bp->b_resid);
3084 	(*pr)("  data %p saveaddr %p dep %p\n",
3085 		  bp->b_data, bp->b_saveaddr, LIST_FIRST(&bp->b_dep));
3086 	(*pr)("  iodone %p\n", bp->b_iodone);
3087 }
3088 
3089 
3090 const char vnode_flagbits[] =
3091 	"\20\1ROOT\2TEXT\3SYSTEM\4ISTTY\5EXECMAP"
3092 	"\11XLOCK\12XWANT\13BWAIT\14ALIASED"
3093 	"\15DIROP\16LAYER\17ONWORKLIST\20DIRTY";
3094 
3095 const char * const vnode_tags[] = {
3096 	"VT_NON",
3097 	"VT_UFS",
3098 	"VT_NFS",
3099 	"VT_MFS",
3100 	"VT_MSDOSFS",
3101 	"VT_LFS",
3102 	"VT_LOFS",
3103 	"VT_FDESC",
3104 	"VT_PORTAL",
3105 	"VT_NULL",
3106 	"VT_UMAP",
3107 	"VT_KERNFS",
3108 	"VT_PROCFS",
3109 	"VT_AFS",
3110 	"VT_ISOFS",
3111 	"VT_UNION",
3112 	"VT_ADOSFS",
3113 	"VT_EXT2FS",
3114 	"VT_CODA",
3115 	"VT_FILECORE",
3116 	"VT_NTFS",
3117 	"VT_VFS",
3118 	"VT_OVERLAY",
3119 	"VT_SMBFS"
3120 };
3121 
3122 void
3123 vfs_vnode_print(vp, full, pr)
3124 	struct vnode *vp;
3125 	int full;
3126 	void (*pr)(const char *, ...);
3127 {
3128 	char buf[256];
3129 	const char *vtype, *vtag;
3130 
3131 	uvm_object_printit(&vp->v_uobj, full, pr);
3132 	bitmask_snprintf(vp->v_flag, vnode_flagbits, buf, sizeof(buf));
3133 	(*pr)("\nVNODE flags %s\n", buf);
3134 	(*pr)("mp %p numoutput %d size 0x%llx\n",
3135 	      vp->v_mount, vp->v_numoutput, vp->v_size);
3136 
3137 	(*pr)("data %p usecount %d writecount %ld holdcnt %ld numoutput %d\n",
3138 	      vp->v_data, vp->v_usecount, vp->v_writecount,
3139 	      vp->v_holdcnt, vp->v_numoutput);
3140 
3141 	vtype = (vp->v_type >= 0 &&
3142 		 vp->v_type < sizeof(vnode_types) / sizeof(vnode_types[0])) ?
3143 		vnode_types[vp->v_type] : "UNKNOWN";
3144 	vtag = (vp->v_tag >= 0 &&
3145 		vp->v_tag < sizeof(vnode_tags) / sizeof(vnode_tags[0])) ?
3146 		vnode_tags[vp->v_tag] : "UNKNOWN";
3147 
3148 	(*pr)("type %s(%d) tag %s(%d) mount %p typedata %p\n",
3149 	      vtype, vp->v_type, vtag, vp->v_tag,
3150 	      vp->v_mount, vp->v_mountedhere);
3151 
3152 	if (full) {
3153 		struct buf *bp;
3154 
3155 		(*pr)("clean bufs:\n");
3156 		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
3157 			(*pr)(" bp %p\n", bp);
3158 			vfs_buf_print(bp, full, pr);
3159 		}
3160 
3161 		(*pr)("dirty bufs:\n");
3162 		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
3163 			(*pr)(" bp %p\n", bp);
3164 			vfs_buf_print(bp, full, pr);
3165 		}
3166 	}
3167 }
3168 
3169 void
3170 vfs_mount_print(mp, full, pr)
3171 	struct mount *mp;
3172 	int full;
3173 	void (*pr)(const char *, ...);
3174 {
3175 	char sbuf[256];
3176 
3177 	(*pr)("vnodecovered = %p syncer = %p data = %p\n",
3178 			mp->mnt_vnodecovered,mp->mnt_syncer,mp->mnt_data);
3179 
3180 	(*pr)("fs_bshift %d dev_bshift = %d maxsymlinklen = %d\n",
3181 			mp->mnt_fs_bshift,mp->mnt_dev_bshift,mp->mnt_maxsymlinklen);
3182 
3183 	bitmask_snprintf(mp->mnt_flag, __MNT_FLAG_BITS, sbuf, sizeof(sbuf));
3184 	(*pr)("flag = %s\n", sbuf);
3185 
3186 	bitmask_snprintf(mp->mnt_iflag, __IMNT_FLAG_BITS, sbuf, sizeof(sbuf));
3187 	(*pr)("iflag = %s\n", sbuf);
3188 
3189 	/* XXX use lockmgr_printinfo */
3190 	if (mp->mnt_lock.lk_sharecount)
3191 		(*pr)(" lock type %s: SHARED (count %d)", mp->mnt_lock.lk_wmesg,
3192 		    mp->mnt_lock.lk_sharecount);
3193 	else if (mp->mnt_lock.lk_flags & LK_HAVE_EXCL) {
3194 		(*pr)(" lock type %s: EXCL (count %d) by ",
3195 		    mp->mnt_lock.lk_wmesg, mp->mnt_lock.lk_exclusivecount);
3196 		if (mp->mnt_lock.lk_flags & LK_SPIN)
3197 			(*pr)("processor %lu", mp->mnt_lock.lk_cpu);
3198 		else
3199 			(*pr)("pid %d.%d", mp->mnt_lock.lk_lockholder,
3200 			    mp->mnt_lock.lk_locklwp);
3201 	} else
3202 		(*pr)(" not locked");
3203 	if ((mp->mnt_lock.lk_flags & LK_SPIN) == 0 && mp->mnt_lock.lk_waitcount > 0)
3204 		(*pr)(" with %d pending", mp->mnt_lock.lk_waitcount);
3205 
3206 	(*pr)("\n");
3207 
3208 	if (mp->mnt_unmounter) {
3209 		(*pr)("unmounter pid = %d ",mp->mnt_unmounter->p_pid);
3210 	}
3211 	(*pr)("wcnt = %d, writeopcountupper = %d, writeopcountupper = %d\n",
3212 		mp->mnt_wcnt,mp->mnt_writeopcountupper,mp->mnt_writeopcountlower);
3213 
3214 	(*pr)("statvfs cache:\n");
3215 	(*pr)("\tbsize = %lu\n",mp->mnt_stat.f_bsize);
3216 	(*pr)("\tfrsize = %lu\n",mp->mnt_stat.f_frsize);
3217 	(*pr)("\tiosize = %lu\n",mp->mnt_stat.f_iosize);
3218 
3219 	(*pr)("\tblocks = "PRIu64"\n",mp->mnt_stat.f_blocks);
3220 	(*pr)("\tbfree = "PRIu64"\n",mp->mnt_stat.f_bfree);
3221 	(*pr)("\tbavail = "PRIu64"\n",mp->mnt_stat.f_bavail);
3222 	(*pr)("\tbresvd = "PRIu64"\n",mp->mnt_stat.f_bresvd);
3223 
3224 	(*pr)("\tfiles = "PRIu64"\n",mp->mnt_stat.f_files);
3225 	(*pr)("\tffree = "PRIu64"\n",mp->mnt_stat.f_ffree);
3226 	(*pr)("\tfavail = "PRIu64"\n",mp->mnt_stat.f_favail);
3227 	(*pr)("\tfresvd = "PRIu64"\n",mp->mnt_stat.f_fresvd);
3228 
3229 	(*pr)("\tf_fsidx = { 0x%"PRIx32", 0x%"PRIx32" }\n",
3230 			mp->mnt_stat.f_fsidx.__fsid_val[0],
3231 			mp->mnt_stat.f_fsidx.__fsid_val[1]);
3232 
3233 	(*pr)("\towner = %"PRIu32"\n",mp->mnt_stat.f_owner);
3234 	(*pr)("\tnamemax = %lu\n",mp->mnt_stat.f_namemax);
3235 
3236 	bitmask_snprintf(mp->mnt_stat.f_flag, __MNT_FLAG_BITS, sbuf,
3237 	    sizeof(sbuf));
3238 	(*pr)("\tflag = %s\n",sbuf);
3239 	(*pr)("\tsyncwrites = " PRIu64 "\n",mp->mnt_stat.f_syncwrites);
3240 	(*pr)("\tasyncwrites = " PRIu64 "\n",mp->mnt_stat.f_asyncwrites);
3241 	(*pr)("\tsyncreads = " PRIu64 "\n",mp->mnt_stat.f_syncreads);
3242 	(*pr)("\tasyncreads = " PRIu64 "\n",mp->mnt_stat.f_asyncreads);
3243 	(*pr)("\tfstypename = %s\n",mp->mnt_stat.f_fstypename);
3244 	(*pr)("\tmntonname = %s\n",mp->mnt_stat.f_mntonname);
3245 	(*pr)("\tmntfromname = %s\n",mp->mnt_stat.f_mntfromname);
3246 
3247 	{
3248 		int cnt = 0;
3249 		struct vnode *vp;
3250 		(*pr)("locked vnodes =");
3251 		/* XXX would take mountlist lock, except ddb may not have context */
3252 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
3253 			if (VOP_ISLOCKED(vp)) {
3254 				if ((++cnt % 6) == 0) {
3255 					(*pr)(" %p,\n\t", vp);
3256 				} else {
3257 					(*pr)(" %p,", vp);
3258 				}
3259 			}
3260 		}
3261 		(*pr)("\n");
3262 	}
3263 
3264 	if (full) {
3265 		int cnt = 0;
3266 		struct vnode *vp;
3267 		(*pr)("all vnodes =");
3268 		/* XXX would take mountlist lock, except ddb may not have context */
3269 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
3270 			if (!LIST_NEXT(vp, v_mntvnodes)) {
3271 				(*pr)(" %p", vp);
3272 			} else if ((++cnt % 6) == 0) {
3273 				(*pr)(" %p,\n\t", vp);
3274 			} else {
3275 				(*pr)(" %p,", vp);
3276 			}
3277 		}
3278 		(*pr)("\n", vp);
3279 	}
3280 }
3281 
3282 #endif
3283