xref: /netbsd-src/sys/kern/vfs_subr.c (revision 21e37cc72a480a47828990a439cde7ac9ffaf0c6)
1 /*	$NetBSD: vfs_subr.c,v 1.229 2004/06/19 06:20:02 yamt Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Copyright (c) 1989, 1993
42  *	The Regents of the University of California.  All rights reserved.
43  * (c) UNIX System Laboratories, Inc.
44  * All or some portions of this file are derived from material licensed
45  * to the University of California by American Telephone and Telegraph
46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47  * the permission of UNIX System Laboratories, Inc.
48  *
49  * Redistribution and use in source and binary forms, with or without
50  * modification, are permitted provided that the following conditions
51  * are met:
52  * 1. Redistributions of source code must retain the above copyright
53  *    notice, this list of conditions and the following disclaimer.
54  * 2. Redistributions in binary form must reproduce the above copyright
55  *    notice, this list of conditions and the following disclaimer in the
56  *    documentation and/or other materials provided with the distribution.
57  * 3. Neither the name of the University nor the names of its contributors
58  *    may be used to endorse or promote products derived from this software
59  *    without specific prior written permission.
60  *
61  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71  * SUCH DAMAGE.
72  *
73  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
74  */
75 
76 /*
77  * External virtual filesystem routines
78  */
79 
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: vfs_subr.c,v 1.229 2004/06/19 06:20:02 yamt Exp $");
82 
83 #include "opt_inet.h"
84 #include "opt_ddb.h"
85 #include "opt_compat_netbsd.h"
86 #include "opt_compat_43.h"
87 
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/proc.h>
91 #include <sys/kernel.h>
92 #include <sys/mount.h>
93 #include <sys/time.h>
94 #include <sys/event.h>
95 #include <sys/fcntl.h>
96 #include <sys/vnode.h>
97 #include <sys/stat.h>
98 #include <sys/namei.h>
99 #include <sys/ucred.h>
100 #include <sys/buf.h>
101 #include <sys/errno.h>
102 #include <sys/malloc.h>
103 #include <sys/domain.h>
104 #include <sys/mbuf.h>
105 #include <sys/sa.h>
106 #include <sys/syscallargs.h>
107 #include <sys/device.h>
108 #include <sys/dirent.h>
109 #include <sys/filedesc.h>
110 
111 #include <miscfs/specfs/specdev.h>
112 #include <miscfs/genfs/genfs.h>
113 #include <miscfs/syncfs/syncfs.h>
114 
115 #include <netinet/in.h>
116 
117 #include <uvm/uvm.h>
118 #include <uvm/uvm_ddb.h>
119 
120 #include <netinet/in.h>
121 
122 #include <sys/sysctl.h>
123 
124 const enum vtype iftovt_tab[16] = {
125 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
126 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
127 };
128 const int	vttoif_tab[9] = {
129 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
130 	S_IFSOCK, S_IFIFO, S_IFMT,
131 };
132 
133 int doforce = 1;		/* 1 => permit forcible unmounting */
134 int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
135 
136 extern int dovfsusermount;	/* 1 => permit any user to mount filesystems */
137 
138 /*
139  * Insq/Remq for the vnode usage lists.
140  */
141 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
142 #define	bufremvn(bp) {							\
143 	LIST_REMOVE(bp, b_vnbufs);					\
144 	(bp)->b_vnbufs.le_next = NOLIST;				\
145 }
146 /* TAILQ_HEAD(freelst, vnode) vnode_free_list =	vnode free list (in vnode.h) */
147 struct freelst vnode_free_list = TAILQ_HEAD_INITIALIZER(vnode_free_list);
148 struct freelst vnode_hold_list = TAILQ_HEAD_INITIALIZER(vnode_hold_list);
149 
150 struct mntlist mountlist =			/* mounted filesystem list */
151     CIRCLEQ_HEAD_INITIALIZER(mountlist);
152 struct vfs_list_head vfs_list =			/* vfs list */
153     LIST_HEAD_INITIALIZER(vfs_list);
154 
155 struct nfs_public nfs_pub;			/* publicly exported FS */
156 
157 struct simplelock mountlist_slock = SIMPLELOCK_INITIALIZER;
158 static struct simplelock mntid_slock = SIMPLELOCK_INITIALIZER;
159 struct simplelock mntvnode_slock = SIMPLELOCK_INITIALIZER;
160 struct simplelock vnode_free_list_slock = SIMPLELOCK_INITIALIZER;
161 struct simplelock spechash_slock = SIMPLELOCK_INITIALIZER;
162 
163 /* XXX - gross; single global lock to protect v_numoutput */
164 struct simplelock global_v_numoutput_slock = SIMPLELOCK_INITIALIZER;
165 
166 /*
167  * These define the root filesystem and device.
168  */
169 struct mount *rootfs;
170 struct vnode *rootvnode;
171 struct device *root_device;			/* root device */
172 
173 POOL_INIT(vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodepl",
174     &pool_allocator_nointr);
175 
176 MALLOC_DEFINE(M_VNODE, "vnodes", "Dynamically allocated vnodes");
177 
178 /*
179  * Local declarations.
180  */
181 void insmntque(struct vnode *, struct mount *);
182 int getdevvp(dev_t, struct vnode **, enum vtype);
183 void vgoneall(struct vnode *);
184 
185 void vclean(struct vnode *, int, struct proc *);
186 
187 static int vfs_hang_addrlist(struct mount *, struct netexport *,
188 			     struct export_args *);
189 static int vfs_free_netcred(struct radix_node *, void *);
190 static void vfs_free_addrlist(struct netexport *);
191 static struct vnode *getcleanvnode(struct proc *);
192 
193 #ifdef DEBUG
194 void printlockedvnodes(void);
195 #endif
196 
197 /*
198  * Initialize the vnode management data structures.
199  */
200 void
201 vntblinit()
202 {
203 
204 	/*
205 	 * Initialize the filesystem syncer.
206 	 */
207 	vn_initialize_syncerd();
208 }
209 
210 int
211 vfs_drainvnodes(long target, struct proc *p)
212 {
213 
214 	simple_lock(&vnode_free_list_slock);
215 	while (numvnodes > target) {
216 		struct vnode *vp;
217 
218 		vp = getcleanvnode(p);
219 		if (vp == NULL)
220 			return EBUSY; /* give up */
221 		pool_put(&vnode_pool, vp);
222 		simple_lock(&vnode_free_list_slock);
223 		numvnodes--;
224 	}
225 	simple_unlock(&vnode_free_list_slock);
226 
227 	return 0;
228 }
229 
230 /*
231  * grab a vnode from freelist and clean it.
232  */
233 struct vnode *
234 getcleanvnode(p)
235 	struct proc *p;
236 {
237 	struct vnode *vp;
238 	struct mount *mp;
239 	struct freelst *listhd;
240 
241 	LOCK_ASSERT(simple_lock_held(&vnode_free_list_slock));
242 
243 	listhd = &vnode_free_list;
244 try_nextlist:
245 	TAILQ_FOREACH(vp, listhd, v_freelist) {
246 		if (!simple_lock_try(&vp->v_interlock))
247 			continue;
248 		/*
249 		 * as our lwp might hold the underlying vnode locked,
250 		 * don't try to reclaim the VLAYER vnode if it's locked.
251 		 */
252 		if ((vp->v_flag & VXLOCK) == 0 &&
253 		    ((vp->v_flag & VLAYER) == 0 || VOP_ISLOCKED(vp) == 0)) {
254 			if (vn_start_write(vp, &mp, V_NOWAIT) == 0)
255 				break;
256 		}
257 		mp = NULL;
258 		simple_unlock(&vp->v_interlock);
259 	}
260 
261 	if (vp == NULLVP) {
262 		if (listhd == &vnode_free_list) {
263 			listhd = &vnode_hold_list;
264 			goto try_nextlist;
265 		}
266 		simple_unlock(&vnode_free_list_slock);
267 		return NULLVP;
268 	}
269 
270 	if (vp->v_usecount)
271 		panic("free vnode isn't, vp %p", vp);
272 	TAILQ_REMOVE(listhd, vp, v_freelist);
273 	/* see comment on why 0xdeadb is set at end of vgone (below) */
274 	vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb;
275 	simple_unlock(&vnode_free_list_slock);
276 	vp->v_lease = NULL;
277 
278 	if (vp->v_type != VBAD)
279 		vgonel(vp, p);
280 	else
281 		simple_unlock(&vp->v_interlock);
282 	vn_finished_write(mp, 0);
283 #ifdef DIAGNOSTIC
284 	if (vp->v_data || vp->v_uobj.uo_npages ||
285 	    TAILQ_FIRST(&vp->v_uobj.memq))
286 		panic("cleaned vnode isn't, vp %p", vp);
287 	if (vp->v_numoutput)
288 		panic("clean vnode has pending I/O's, vp %p", vp);
289 #endif
290 	KASSERT((vp->v_flag & VONWORKLST) == 0);
291 
292 	return vp;
293 }
294 
295 /*
296  * Mark a mount point as busy. Used to synchronize access and to delay
297  * unmounting. Interlock is not released on failure.
298  */
299 int
300 vfs_busy(mp, flags, interlkp)
301 	struct mount *mp;
302 	int flags;
303 	struct simplelock *interlkp;
304 {
305 	int lkflags;
306 
307 	while (mp->mnt_iflag & IMNT_UNMOUNT) {
308 		int gone, n;
309 
310 		if (flags & LK_NOWAIT)
311 			return (ENOENT);
312 		if ((flags & LK_RECURSEFAIL) && mp->mnt_unmounter != NULL
313 		    && mp->mnt_unmounter == curproc)
314 			return (EDEADLK);
315 		if (interlkp)
316 			simple_unlock(interlkp);
317 		/*
318 		 * Since all busy locks are shared except the exclusive
319 		 * lock granted when unmounting, the only place that a
320 		 * wakeup needs to be done is at the release of the
321 		 * exclusive lock at the end of dounmount.
322 		 */
323 		simple_lock(&mp->mnt_slock);
324 		mp->mnt_wcnt++;
325 		ltsleep((caddr_t)mp, PVFS, "vfs_busy", 0, &mp->mnt_slock);
326 		n = --mp->mnt_wcnt;
327 		simple_unlock(&mp->mnt_slock);
328 		gone = mp->mnt_iflag & IMNT_GONE;
329 
330 		if (n == 0)
331 			wakeup(&mp->mnt_wcnt);
332 		if (interlkp)
333 			simple_lock(interlkp);
334 		if (gone)
335 			return (ENOENT);
336 	}
337 	lkflags = LK_SHARED;
338 	if (interlkp)
339 		lkflags |= LK_INTERLOCK;
340 	if (lockmgr(&mp->mnt_lock, lkflags, interlkp))
341 		panic("vfs_busy: unexpected lock failure");
342 	return (0);
343 }
344 
345 /*
346  * Free a busy filesystem.
347  */
348 void
349 vfs_unbusy(mp)
350 	struct mount *mp;
351 {
352 
353 	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL);
354 }
355 
356 /*
357  * Lookup a filesystem type, and if found allocate and initialize
358  * a mount structure for it.
359  *
360  * Devname is usually updated by mount(8) after booting.
361  */
362 int
363 vfs_rootmountalloc(fstypename, devname, mpp)
364 	char *fstypename;
365 	char *devname;
366 	struct mount **mpp;
367 {
368 	struct vfsops *vfsp = NULL;
369 	struct mount *mp;
370 
371 	LIST_FOREACH(vfsp, &vfs_list, vfs_list)
372 		if (!strncmp(vfsp->vfs_name, fstypename, MFSNAMELEN))
373 			break;
374 
375 	if (vfsp == NULL)
376 		return (ENODEV);
377 	mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
378 	memset((char *)mp, 0, (u_long)sizeof(struct mount));
379 	lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
380 	simple_lock_init(&mp->mnt_slock);
381 	(void)vfs_busy(mp, LK_NOWAIT, 0);
382 	LIST_INIT(&mp->mnt_vnodelist);
383 	mp->mnt_op = vfsp;
384 	mp->mnt_flag = MNT_RDONLY;
385 	mp->mnt_vnodecovered = NULLVP;
386 	vfsp->vfs_refcount++;
387 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfs_name, MFSNAMELEN);
388 	mp->mnt_stat.f_mntonname[0] = '/';
389 	(void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
390 	*mpp = mp;
391 	return (0);
392 }
393 
394 /*
395  * Lookup a mount point by filesystem identifier.
396  */
397 struct mount *
398 vfs_getvfs(fsid)
399 	fsid_t *fsid;
400 {
401 	struct mount *mp;
402 
403 	simple_lock(&mountlist_slock);
404 	CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) {
405 		if (mp->mnt_stat.f_fsidx.__fsid_val[0] == fsid->__fsid_val[0] &&
406 		    mp->mnt_stat.f_fsidx.__fsid_val[1] == fsid->__fsid_val[1]) {
407 			simple_unlock(&mountlist_slock);
408 			return (mp);
409 		}
410 	}
411 	simple_unlock(&mountlist_slock);
412 	return ((struct mount *)0);
413 }
414 
415 /*
416  * Get a new unique fsid
417  */
418 void
419 vfs_getnewfsid(mp)
420 	struct mount *mp;
421 {
422 	static u_short xxxfs_mntid;
423 	fsid_t tfsid;
424 	int mtype;
425 
426 	simple_lock(&mntid_slock);
427 	mtype = makefstype(mp->mnt_op->vfs_name);
428 	mp->mnt_stat.f_fsidx.__fsid_val[0] = makedev(mtype, 0);
429 	mp->mnt_stat.f_fsidx.__fsid_val[1] = mtype;
430 	mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
431 	if (xxxfs_mntid == 0)
432 		++xxxfs_mntid;
433 	tfsid.__fsid_val[0] = makedev(mtype & 0xff, xxxfs_mntid);
434 	tfsid.__fsid_val[1] = mtype;
435 	if (!CIRCLEQ_EMPTY(&mountlist)) {
436 		while (vfs_getvfs(&tfsid)) {
437 			tfsid.__fsid_val[0]++;
438 			xxxfs_mntid++;
439 		}
440 	}
441 	mp->mnt_stat.f_fsidx.__fsid_val[0] = tfsid.__fsid_val[0];
442 	mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
443 	simple_unlock(&mntid_slock);
444 }
445 
446 /*
447  * Make a 'unique' number from a mount type name.
448  */
449 long
450 makefstype(type)
451 	const char *type;
452 {
453 	long rv;
454 
455 	for (rv = 0; *type; type++) {
456 		rv <<= 2;
457 		rv ^= *type;
458 	}
459 	return rv;
460 }
461 
462 
463 /*
464  * Set vnode attributes to VNOVAL
465  */
466 void
467 vattr_null(vap)
468 	struct vattr *vap;
469 {
470 
471 	vap->va_type = VNON;
472 
473 	/*
474 	 * Assign individually so that it is safe even if size and
475 	 * sign of each member are varied.
476 	 */
477 	vap->va_mode = VNOVAL;
478 	vap->va_nlink = VNOVAL;
479 	vap->va_uid = VNOVAL;
480 	vap->va_gid = VNOVAL;
481 	vap->va_fsid = VNOVAL;
482 	vap->va_fileid = VNOVAL;
483 	vap->va_size = VNOVAL;
484 	vap->va_blocksize = VNOVAL;
485 	vap->va_atime.tv_sec =
486 	    vap->va_mtime.tv_sec =
487 	    vap->va_ctime.tv_sec =
488 	    vap->va_birthtime.tv_sec = VNOVAL;
489 	vap->va_atime.tv_nsec =
490 	    vap->va_mtime.tv_nsec =
491 	    vap->va_ctime.tv_nsec =
492 	    vap->va_birthtime.tv_nsec = VNOVAL;
493 	vap->va_gen = VNOVAL;
494 	vap->va_flags = VNOVAL;
495 	vap->va_rdev = VNOVAL;
496 	vap->va_bytes = VNOVAL;
497 	vap->va_vaflags = 0;
498 }
499 
500 /*
501  * Routines having to do with the management of the vnode table.
502  */
503 extern int (**dead_vnodeop_p)(void *);
504 long numvnodes;
505 
506 /*
507  * Return the next vnode from the free list.
508  */
509 int
510 getnewvnode(tag, mp, vops, vpp)
511 	enum vtagtype tag;
512 	struct mount *mp;
513 	int (**vops)(void *);
514 	struct vnode **vpp;
515 {
516 	extern struct uvm_pagerops uvm_vnodeops;
517 	struct uvm_object *uobj;
518 	struct proc *p = curproc;	/* XXX */
519 	static int toggle;
520 	struct vnode *vp;
521 	int error = 0, tryalloc;
522 
523  try_again:
524 	if (mp) {
525 		/*
526 		 * Mark filesystem busy while we're creating a vnode.
527 		 * If unmount is in progress, this will wait; if the
528 		 * unmount succeeds (only if umount -f), this will
529 		 * return an error.  If the unmount fails, we'll keep
530 		 * going afterwards.
531 		 * (This puts the per-mount vnode list logically under
532 		 * the protection of the vfs_busy lock).
533 		 */
534 		error = vfs_busy(mp, LK_RECURSEFAIL, 0);
535 		if (error && error != EDEADLK)
536 			return error;
537 	}
538 
539 	/*
540 	 * We must choose whether to allocate a new vnode or recycle an
541 	 * existing one. The criterion for allocating a new one is that
542 	 * the total number of vnodes is less than the number desired or
543 	 * there are no vnodes on either free list. Generally we only
544 	 * want to recycle vnodes that have no buffers associated with
545 	 * them, so we look first on the vnode_free_list. If it is empty,
546 	 * we next consider vnodes with referencing buffers on the
547 	 * vnode_hold_list. The toggle ensures that half the time we
548 	 * will use a buffer from the vnode_hold_list, and half the time
549 	 * we will allocate a new one unless the list has grown to twice
550 	 * the desired size. We are reticent to recycle vnodes from the
551 	 * vnode_hold_list because we will lose the identity of all its
552 	 * referencing buffers.
553 	 */
554 
555 	vp = NULL;
556 
557 	simple_lock(&vnode_free_list_slock);
558 
559 	toggle ^= 1;
560 	if (numvnodes > 2 * desiredvnodes)
561 		toggle = 0;
562 
563 	tryalloc = numvnodes < desiredvnodes ||
564 	    (TAILQ_FIRST(&vnode_free_list) == NULL &&
565 	     (TAILQ_FIRST(&vnode_hold_list) == NULL || toggle));
566 
567 	if (tryalloc &&
568 	    (vp = pool_get(&vnode_pool, PR_NOWAIT)) != NULL) {
569 		numvnodes++;
570 		simple_unlock(&vnode_free_list_slock);
571 		memset(vp, 0, sizeof(*vp));
572 		simple_lock_init(&vp->v_interlock);
573 		uobj = &vp->v_uobj;
574 		uobj->pgops = &uvm_vnodeops;
575 		TAILQ_INIT(&uobj->memq);
576 		/*
577 		 * done by memset() above.
578 		 *	uobj->uo_npages = 0;
579 		 *	LIST_INIT(&vp->v_nclist);
580 		 *	LIST_INIT(&vp->v_dnclist);
581 		 */
582 	} else {
583 		vp = getcleanvnode(p);
584 		/*
585 		 * Unless this is a bad time of the month, at most
586 		 * the first NCPUS items on the free list are
587 		 * locked, so this is close enough to being empty.
588 		 */
589 		if (vp == NULLVP) {
590 			if (mp && error != EDEADLK)
591 				vfs_unbusy(mp);
592 			if (tryalloc) {
593 				printf("WARNING: unable to allocate new "
594 				    "vnode, retrying...\n");
595 				(void) tsleep(&lbolt, PRIBIO, "newvn", hz);
596 				goto try_again;
597 			}
598 			tablefull("vnode", "increase kern.maxvnodes or NVNODE");
599 			*vpp = 0;
600 			return (ENFILE);
601 		}
602 		vp->v_flag = 0;
603 		vp->v_socket = NULL;
604 #ifdef VERIFIED_EXEC
605 		vp->fp_status = FINGERPRINT_INVALID;
606 #endif
607 	}
608 	vp->v_type = VNON;
609 	vp->v_vnlock = &vp->v_lock;
610 	lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
611 	KASSERT(LIST_EMPTY(&vp->v_nclist));
612 	KASSERT(LIST_EMPTY(&vp->v_dnclist));
613 	vp->v_tag = tag;
614 	vp->v_op = vops;
615 	insmntque(vp, mp);
616 	*vpp = vp;
617 	vp->v_usecount = 1;
618 	vp->v_data = 0;
619 	simple_lock_init(&vp->v_uobj.vmobjlock);
620 
621 	/*
622 	 * initialize uvm_object within vnode.
623 	 */
624 
625 	uobj = &vp->v_uobj;
626 	KASSERT(uobj->pgops == &uvm_vnodeops);
627 	KASSERT(uobj->uo_npages == 0);
628 	KASSERT(TAILQ_FIRST(&uobj->memq) == NULL);
629 	vp->v_size = VSIZENOTSET;
630 
631 	if (mp && error != EDEADLK)
632 		vfs_unbusy(mp);
633 	return (0);
634 }
635 
636 /*
637  * This is really just the reverse of getnewvnode(). Needed for
638  * VFS_VGET functions who may need to push back a vnode in case
639  * of a locking race.
640  */
641 void
642 ungetnewvnode(vp)
643 	struct vnode *vp;
644 {
645 #ifdef DIAGNOSTIC
646 	if (vp->v_usecount != 1)
647 		panic("ungetnewvnode: busy vnode");
648 #endif
649 	vp->v_usecount--;
650 	insmntque(vp, NULL);
651 	vp->v_type = VBAD;
652 
653 	simple_lock(&vp->v_interlock);
654 	/*
655 	 * Insert at head of LRU list
656 	 */
657 	simple_lock(&vnode_free_list_slock);
658 	if (vp->v_holdcnt > 0)
659 		TAILQ_INSERT_HEAD(&vnode_hold_list, vp, v_freelist);
660 	else
661 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
662 	simple_unlock(&vnode_free_list_slock);
663 	simple_unlock(&vp->v_interlock);
664 }
665 
666 /*
667  * Move a vnode from one mount queue to another.
668  */
669 void
670 insmntque(vp, mp)
671 	struct vnode *vp;
672 	struct mount *mp;
673 {
674 
675 #ifdef DIAGNOSTIC
676 	if ((mp != NULL) &&
677 	    (mp->mnt_iflag & IMNT_UNMOUNT) &&
678 	    !(mp->mnt_flag & MNT_SOFTDEP) &&
679 	    vp->v_tag != VT_VFS) {
680 		panic("insmntque into dying filesystem");
681 	}
682 #endif
683 
684 	simple_lock(&mntvnode_slock);
685 	/*
686 	 * Delete from old mount point vnode list, if on one.
687 	 */
688 	if (vp->v_mount != NULL)
689 		LIST_REMOVE(vp, v_mntvnodes);
690 	/*
691 	 * Insert into list of vnodes for the new mount point, if available.
692 	 */
693 	if ((vp->v_mount = mp) != NULL)
694 		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
695 	simple_unlock(&mntvnode_slock);
696 }
697 
698 /*
699  * Update outstanding I/O count and do wakeup if requested.
700  */
701 void
702 vwakeup(bp)
703 	struct buf *bp;
704 {
705 	struct vnode *vp;
706 
707 	if ((vp = bp->b_vp) != NULL) {
708 		/* XXX global lock hack
709 		 * can't use v_interlock here since this is called
710 		 * in interrupt context from biodone().
711 		 */
712 		simple_lock(&global_v_numoutput_slock);
713 		if (--vp->v_numoutput < 0)
714 			panic("vwakeup: neg numoutput, vp %p", vp);
715 		if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
716 			vp->v_flag &= ~VBWAIT;
717 			wakeup((caddr_t)&vp->v_numoutput);
718 		}
719 		simple_unlock(&global_v_numoutput_slock);
720 	}
721 }
722 
723 /*
724  * Flush out and invalidate all buffers associated with a vnode.
725  * Called with the underlying vnode locked, which should prevent new dirty
726  * buffers from being queued.
727  */
728 int
729 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
730 	struct vnode *vp;
731 	int flags;
732 	struct ucred *cred;
733 	struct proc *p;
734 	int slpflag, slptimeo;
735 {
736 	struct buf *bp, *nbp;
737 	int s, error;
738 	int flushflags = PGO_ALLPAGES | PGO_FREE | PGO_SYNCIO |
739 		(flags & V_SAVE ? PGO_CLEANIT : 0);
740 
741 	/* XXXUBC this doesn't look at flags or slp* */
742 	simple_lock(&vp->v_interlock);
743 	error = VOP_PUTPAGES(vp, 0, 0, flushflags);
744 	if (error) {
745 		return error;
746 	}
747 
748 	if (flags & V_SAVE) {
749 		error = VOP_FSYNC(vp, cred, FSYNC_WAIT|FSYNC_RECLAIM, 0, 0, p);
750 		if (error)
751 		        return (error);
752 #ifdef DIAGNOSTIC
753 		s = splbio();
754 		if (vp->v_numoutput > 0 || !LIST_EMPTY(&vp->v_dirtyblkhd))
755 		        panic("vinvalbuf: dirty bufs, vp %p", vp);
756 		splx(s);
757 #endif
758 	}
759 
760 	s = splbio();
761 
762 restart:
763 	for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
764 		nbp = LIST_NEXT(bp, b_vnbufs);
765 		simple_lock(&bp->b_interlock);
766 		if (bp->b_flags & B_BUSY) {
767 			bp->b_flags |= B_WANTED;
768 			error = ltsleep((caddr_t)bp,
769 				    slpflag | (PRIBIO + 1) | PNORELOCK,
770 				    "vinvalbuf", slptimeo, &bp->b_interlock);
771 			if (error) {
772 				splx(s);
773 				return (error);
774 			}
775 			goto restart;
776 		}
777 		bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
778 		simple_unlock(&bp->b_interlock);
779 		brelse(bp);
780 	}
781 
782 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
783 		nbp = LIST_NEXT(bp, b_vnbufs);
784 		simple_lock(&bp->b_interlock);
785 		if (bp->b_flags & B_BUSY) {
786 			bp->b_flags |= B_WANTED;
787 			error = ltsleep((caddr_t)bp,
788 				    slpflag | (PRIBIO + 1) | PNORELOCK,
789 				    "vinvalbuf", slptimeo, &bp->b_interlock);
790 			if (error) {
791 				splx(s);
792 				return (error);
793 			}
794 			goto restart;
795 		}
796 		/*
797 		 * XXX Since there are no node locks for NFS, I believe
798 		 * there is a slight chance that a delayed write will
799 		 * occur while sleeping just above, so check for it.
800 		 */
801 		if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
802 #ifdef DEBUG
803 			printf("buffer still DELWRI\n");
804 #endif
805 			bp->b_flags |= B_BUSY | B_VFLUSH;
806 			simple_unlock(&bp->b_interlock);
807 			VOP_BWRITE(bp);
808 			goto restart;
809 		}
810 		bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
811 		simple_unlock(&bp->b_interlock);
812 		brelse(bp);
813 	}
814 
815 #ifdef DIAGNOSTIC
816 	if (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd))
817 		panic("vinvalbuf: flush failed, vp %p", vp);
818 #endif
819 
820 	splx(s);
821 
822 	return (0);
823 }
824 
825 /*
826  * Destroy any in core blocks past the truncation length.
827  * Called with the underlying vnode locked, which should prevent new dirty
828  * buffers from being queued.
829  */
830 int
831 vtruncbuf(vp, lbn, slpflag, slptimeo)
832 	struct vnode *vp;
833 	daddr_t lbn;
834 	int slpflag, slptimeo;
835 {
836 	struct buf *bp, *nbp;
837 	int s, error;
838 	voff_t off;
839 
840 	off = round_page((voff_t)lbn << vp->v_mount->mnt_fs_bshift);
841 	simple_lock(&vp->v_interlock);
842 	error = VOP_PUTPAGES(vp, off, 0, PGO_FREE | PGO_SYNCIO);
843 	if (error) {
844 		return error;
845 	}
846 
847 	s = splbio();
848 
849 restart:
850 	for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
851 		nbp = LIST_NEXT(bp, b_vnbufs);
852 		if (bp->b_lblkno < lbn)
853 			continue;
854 		simple_lock(&bp->b_interlock);
855 		if (bp->b_flags & B_BUSY) {
856 			bp->b_flags |= B_WANTED;
857 			error = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
858 			    "vtruncbuf", slptimeo, &bp->b_interlock);
859 			if (error) {
860 				splx(s);
861 				return (error);
862 			}
863 			goto restart;
864 		}
865 		bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
866 		simple_unlock(&bp->b_interlock);
867 		brelse(bp);
868 	}
869 
870 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
871 		nbp = LIST_NEXT(bp, b_vnbufs);
872 		if (bp->b_lblkno < lbn)
873 			continue;
874 		simple_lock(&bp->b_interlock);
875 		if (bp->b_flags & B_BUSY) {
876 			bp->b_flags |= B_WANTED;
877 			error = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
878 			    "vtruncbuf", slptimeo, &bp->b_interlock);
879 			if (error) {
880 				splx(s);
881 				return (error);
882 			}
883 			goto restart;
884 		}
885 		bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
886 		simple_unlock(&bp->b_interlock);
887 		brelse(bp);
888 	}
889 
890 	splx(s);
891 
892 	return (0);
893 }
894 
895 void
896 vflushbuf(vp, sync)
897 	struct vnode *vp;
898 	int sync;
899 {
900 	struct buf *bp, *nbp;
901 	int flags = PGO_CLEANIT | PGO_ALLPAGES | (sync ? PGO_SYNCIO : 0);
902 	int s;
903 
904 	simple_lock(&vp->v_interlock);
905 	(void) VOP_PUTPAGES(vp, 0, 0, flags);
906 
907 loop:
908 	s = splbio();
909 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
910 		nbp = LIST_NEXT(bp, b_vnbufs);
911 		simple_lock(&bp->b_interlock);
912 		if ((bp->b_flags & B_BUSY)) {
913 			simple_unlock(&bp->b_interlock);
914 			continue;
915 		}
916 		if ((bp->b_flags & B_DELWRI) == 0)
917 			panic("vflushbuf: not dirty, bp %p", bp);
918 		bp->b_flags |= B_BUSY | B_VFLUSH;
919 		simple_unlock(&bp->b_interlock);
920 		splx(s);
921 		/*
922 		 * Wait for I/O associated with indirect blocks to complete,
923 		 * since there is no way to quickly wait for them below.
924 		 */
925 		if (bp->b_vp == vp || sync == 0)
926 			(void) bawrite(bp);
927 		else
928 			(void) bwrite(bp);
929 		goto loop;
930 	}
931 	if (sync == 0) {
932 		splx(s);
933 		return;
934 	}
935 	simple_lock(&global_v_numoutput_slock);
936 	while (vp->v_numoutput) {
937 		vp->v_flag |= VBWAIT;
938 		ltsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "vflushbuf", 0,
939 			&global_v_numoutput_slock);
940 	}
941 	simple_unlock(&global_v_numoutput_slock);
942 	splx(s);
943 	if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
944 		vprint("vflushbuf: dirty", vp);
945 		goto loop;
946 	}
947 }
948 
949 /*
950  * Associate a buffer with a vnode.
951  */
952 void
953 bgetvp(vp, bp)
954 	struct vnode *vp;
955 	struct buf *bp;
956 {
957 	int s;
958 
959 	if (bp->b_vp)
960 		panic("bgetvp: not free, bp %p", bp);
961 	VHOLD(vp);
962 	s = splbio();
963 	bp->b_vp = vp;
964 	if (vp->v_type == VBLK || vp->v_type == VCHR)
965 		bp->b_dev = vp->v_rdev;
966 	else
967 		bp->b_dev = NODEV;
968 	/*
969 	 * Insert onto list for new vnode.
970 	 */
971 	bufinsvn(bp, &vp->v_cleanblkhd);
972 	splx(s);
973 }
974 
975 /*
976  * Disassociate a buffer from a vnode.
977  */
978 void
979 brelvp(bp)
980 	struct buf *bp;
981 {
982 	struct vnode *vp;
983 	int s;
984 
985 	if (bp->b_vp == NULL)
986 		panic("brelvp: vp NULL, bp %p", bp);
987 
988 	s = splbio();
989 	vp = bp->b_vp;
990 	/*
991 	 * Delete from old vnode list, if on one.
992 	 */
993 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
994 		bufremvn(bp);
995 
996 	if (TAILQ_EMPTY(&vp->v_uobj.memq) && (vp->v_flag & VONWORKLST) &&
997 	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
998 		vp->v_flag &= ~VONWORKLST;
999 		LIST_REMOVE(vp, v_synclist);
1000 	}
1001 
1002 	bp->b_vp = NULL;
1003 	HOLDRELE(vp);
1004 	splx(s);
1005 }
1006 
1007 /*
1008  * Reassign a buffer from one vnode to another.
1009  * Used to assign file specific control information
1010  * (indirect blocks) to the vnode to which they belong.
1011  *
1012  * This function must be called at splbio().
1013  */
1014 void
1015 reassignbuf(bp, newvp)
1016 	struct buf *bp;
1017 	struct vnode *newvp;
1018 {
1019 	struct buflists *listheadp;
1020 	int delay;
1021 
1022 	/*
1023 	 * Delete from old vnode list, if on one.
1024 	 */
1025 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
1026 		bufremvn(bp);
1027 	/*
1028 	 * If dirty, put on list of dirty buffers;
1029 	 * otherwise insert onto list of clean buffers.
1030 	 */
1031 	if ((bp->b_flags & B_DELWRI) == 0) {
1032 		listheadp = &newvp->v_cleanblkhd;
1033 		if (TAILQ_EMPTY(&newvp->v_uobj.memq) &&
1034 		    (newvp->v_flag & VONWORKLST) &&
1035 		    LIST_FIRST(&newvp->v_dirtyblkhd) == NULL) {
1036 			newvp->v_flag &= ~VONWORKLST;
1037 			LIST_REMOVE(newvp, v_synclist);
1038 		}
1039 	} else {
1040 		listheadp = &newvp->v_dirtyblkhd;
1041 		if ((newvp->v_flag & VONWORKLST) == 0) {
1042 			switch (newvp->v_type) {
1043 			case VDIR:
1044 				delay = dirdelay;
1045 				break;
1046 			case VBLK:
1047 				if (newvp->v_specmountpoint != NULL) {
1048 					delay = metadelay;
1049 					break;
1050 				}
1051 				/* fall through */
1052 			default:
1053 				delay = filedelay;
1054 				break;
1055 			}
1056 			if (!newvp->v_mount ||
1057 			    (newvp->v_mount->mnt_flag & MNT_ASYNC) == 0)
1058 				vn_syncer_add_to_worklist(newvp, delay);
1059 		}
1060 	}
1061 	bufinsvn(bp, listheadp);
1062 }
1063 
1064 /*
1065  * Create a vnode for a block device.
1066  * Used for root filesystem and swap areas.
1067  * Also used for memory file system special devices.
1068  */
1069 int
1070 bdevvp(dev, vpp)
1071 	dev_t dev;
1072 	struct vnode **vpp;
1073 {
1074 
1075 	return (getdevvp(dev, vpp, VBLK));
1076 }
1077 
1078 /*
1079  * Create a vnode for a character device.
1080  * Used for kernfs and some console handling.
1081  */
1082 int
1083 cdevvp(dev, vpp)
1084 	dev_t dev;
1085 	struct vnode **vpp;
1086 {
1087 
1088 	return (getdevvp(dev, vpp, VCHR));
1089 }
1090 
1091 /*
1092  * Create a vnode for a device.
1093  * Used by bdevvp (block device) for root file system etc.,
1094  * and by cdevvp (character device) for console and kernfs.
1095  */
1096 int
1097 getdevvp(dev, vpp, type)
1098 	dev_t dev;
1099 	struct vnode **vpp;
1100 	enum vtype type;
1101 {
1102 	struct vnode *vp;
1103 	struct vnode *nvp;
1104 	int error;
1105 
1106 	if (dev == NODEV) {
1107 		*vpp = NULLVP;
1108 		return (0);
1109 	}
1110 	error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp);
1111 	if (error) {
1112 		*vpp = NULLVP;
1113 		return (error);
1114 	}
1115 	vp = nvp;
1116 	vp->v_type = type;
1117 	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
1118 		vput(vp);
1119 		vp = nvp;
1120 	}
1121 	*vpp = vp;
1122 	return (0);
1123 }
1124 
1125 /*
1126  * Check to see if the new vnode represents a special device
1127  * for which we already have a vnode (either because of
1128  * bdevvp() or because of a different vnode representing
1129  * the same block device). If such an alias exists, deallocate
1130  * the existing contents and return the aliased vnode. The
1131  * caller is responsible for filling it with its new contents.
1132  */
1133 struct vnode *
1134 checkalias(nvp, nvp_rdev, mp)
1135 	struct vnode *nvp;
1136 	dev_t nvp_rdev;
1137 	struct mount *mp;
1138 {
1139 	struct proc *p = curproc;       /* XXX */
1140 	struct vnode *vp;
1141 	struct vnode **vpp;
1142 
1143 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
1144 		return (NULLVP);
1145 
1146 	vpp = &speclisth[SPECHASH(nvp_rdev)];
1147 loop:
1148 	simple_lock(&spechash_slock);
1149 	for (vp = *vpp; vp; vp = vp->v_specnext) {
1150 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
1151 			continue;
1152 		/*
1153 		 * Alias, but not in use, so flush it out.
1154 		 */
1155 		simple_lock(&vp->v_interlock);
1156 		if (vp->v_usecount == 0) {
1157 			simple_unlock(&spechash_slock);
1158 			vgonel(vp, p);
1159 			goto loop;
1160 		}
1161 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT)) {
1162 			simple_unlock(&spechash_slock);
1163 			goto loop;
1164 		}
1165 		break;
1166 	}
1167 	if (vp == NULL || vp->v_tag != VT_NON || vp->v_type != VBLK) {
1168 		MALLOC(nvp->v_specinfo, struct specinfo *,
1169 			sizeof(struct specinfo), M_VNODE, M_NOWAIT);
1170 		/* XXX Erg. */
1171 		if (nvp->v_specinfo == NULL) {
1172 			simple_unlock(&spechash_slock);
1173 			uvm_wait("checkalias");
1174 			goto loop;
1175 		}
1176 
1177 		nvp->v_rdev = nvp_rdev;
1178 		nvp->v_hashchain = vpp;
1179 		nvp->v_specnext = *vpp;
1180 		nvp->v_specmountpoint = NULL;
1181 		simple_unlock(&spechash_slock);
1182 		nvp->v_speclockf = NULL;
1183 		simple_lock_init(&nvp->v_spec_cow_slock);
1184 		SLIST_INIT(&nvp->v_spec_cow_head);
1185 		nvp->v_spec_cow_req = 0;
1186 		nvp->v_spec_cow_count = 0;
1187 
1188 		*vpp = nvp;
1189 		if (vp != NULLVP) {
1190 			nvp->v_flag |= VALIASED;
1191 			vp->v_flag |= VALIASED;
1192 			vput(vp);
1193 		}
1194 		return (NULLVP);
1195 	}
1196 	simple_unlock(&spechash_slock);
1197 	VOP_UNLOCK(vp, 0);
1198 	simple_lock(&vp->v_interlock);
1199 	vclean(vp, 0, p);
1200 	vp->v_op = nvp->v_op;
1201 	vp->v_tag = nvp->v_tag;
1202 	vp->v_vnlock = &vp->v_lock;
1203 	lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
1204 	nvp->v_type = VNON;
1205 	insmntque(vp, mp);
1206 	return (vp);
1207 }
1208 
1209 /*
1210  * Grab a particular vnode from the free list, increment its
1211  * reference count and lock it. If the vnode lock bit is set the
1212  * vnode is being eliminated in vgone. In that case, we can not
1213  * grab the vnode, so the process is awakened when the transition is
1214  * completed, and an error returned to indicate that the vnode is no
1215  * longer usable (possibly having been changed to a new file system type).
1216  */
1217 int
1218 vget(vp, flags)
1219 	struct vnode *vp;
1220 	int flags;
1221 {
1222 	int error;
1223 
1224 	/*
1225 	 * If the vnode is in the process of being cleaned out for
1226 	 * another use, we wait for the cleaning to finish and then
1227 	 * return failure. Cleaning is determined by checking that
1228 	 * the VXLOCK flag is set.
1229 	 */
1230 
1231 	if ((flags & LK_INTERLOCK) == 0)
1232 		simple_lock(&vp->v_interlock);
1233 	if (vp->v_flag & VXLOCK) {
1234 		if (flags & LK_NOWAIT) {
1235 			simple_unlock(&vp->v_interlock);
1236 			return EBUSY;
1237 		}
1238 		vp->v_flag |= VXWANT;
1239 		ltsleep(vp, PINOD|PNORELOCK, "vget", 0, &vp->v_interlock);
1240 		return (ENOENT);
1241 	}
1242 	if (vp->v_usecount == 0) {
1243 		simple_lock(&vnode_free_list_slock);
1244 		if (vp->v_holdcnt > 0)
1245 			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
1246 		else
1247 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1248 		simple_unlock(&vnode_free_list_slock);
1249 	}
1250 	vp->v_usecount++;
1251 #ifdef DIAGNOSTIC
1252 	if (vp->v_usecount == 0) {
1253 		vprint("vget", vp);
1254 		panic("vget: usecount overflow, vp %p", vp);
1255 	}
1256 #endif
1257 	if (flags & LK_TYPE_MASK) {
1258 		if ((error = vn_lock(vp, flags | LK_INTERLOCK))) {
1259 			/*
1260 			 * must expand vrele here because we do not want
1261 			 * to call VOP_INACTIVE if the reference count
1262 			 * drops back to zero since it was never really
1263 			 * active. We must remove it from the free list
1264 			 * before sleeping so that multiple processes do
1265 			 * not try to recycle it.
1266 			 */
1267 			simple_lock(&vp->v_interlock);
1268 			vp->v_usecount--;
1269 			if (vp->v_usecount > 0) {
1270 				simple_unlock(&vp->v_interlock);
1271 				return (error);
1272 			}
1273 			/*
1274 			 * insert at tail of LRU list
1275 			 */
1276 			simple_lock(&vnode_free_list_slock);
1277 			if (vp->v_holdcnt > 0)
1278 				TAILQ_INSERT_TAIL(&vnode_hold_list, vp,
1279 				    v_freelist);
1280 			else
1281 				TAILQ_INSERT_TAIL(&vnode_free_list, vp,
1282 				    v_freelist);
1283 			simple_unlock(&vnode_free_list_slock);
1284 			simple_unlock(&vp->v_interlock);
1285 		}
1286 		return (error);
1287 	}
1288 	simple_unlock(&vp->v_interlock);
1289 	return (0);
1290 }
1291 
1292 /*
1293  * vput(), just unlock and vrele()
1294  */
1295 void
1296 vput(vp)
1297 	struct vnode *vp;
1298 {
1299 	struct proc *p = curproc;	/* XXX */
1300 
1301 #ifdef DIAGNOSTIC
1302 	if (vp == NULL)
1303 		panic("vput: null vp");
1304 #endif
1305 	simple_lock(&vp->v_interlock);
1306 	vp->v_usecount--;
1307 	if (vp->v_usecount > 0) {
1308 		simple_unlock(&vp->v_interlock);
1309 		VOP_UNLOCK(vp, 0);
1310 		return;
1311 	}
1312 #ifdef DIAGNOSTIC
1313 	if (vp->v_usecount < 0 || vp->v_writecount != 0) {
1314 		vprint("vput: bad ref count", vp);
1315 		panic("vput: ref cnt");
1316 	}
1317 #endif
1318 	/*
1319 	 * Insert at tail of LRU list.
1320 	 */
1321 	simple_lock(&vnode_free_list_slock);
1322 	if (vp->v_holdcnt > 0)
1323 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
1324 	else
1325 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1326 	simple_unlock(&vnode_free_list_slock);
1327 	if (vp->v_flag & VEXECMAP) {
1328 		uvmexp.execpages -= vp->v_uobj.uo_npages;
1329 		uvmexp.filepages += vp->v_uobj.uo_npages;
1330 	}
1331 	vp->v_flag &= ~(VTEXT|VEXECMAP);
1332 	simple_unlock(&vp->v_interlock);
1333 	VOP_INACTIVE(vp, p);
1334 }
1335 
1336 /*
1337  * Vnode release.
1338  * If count drops to zero, call inactive routine and return to freelist.
1339  */
1340 void
1341 vrele(vp)
1342 	struct vnode *vp;
1343 {
1344 	struct proc *p = curproc;	/* XXX */
1345 
1346 #ifdef DIAGNOSTIC
1347 	if (vp == NULL)
1348 		panic("vrele: null vp");
1349 #endif
1350 	simple_lock(&vp->v_interlock);
1351 	vp->v_usecount--;
1352 	if (vp->v_usecount > 0) {
1353 		simple_unlock(&vp->v_interlock);
1354 		return;
1355 	}
1356 #ifdef DIAGNOSTIC
1357 	if (vp->v_usecount < 0 || vp->v_writecount != 0) {
1358 		vprint("vrele: bad ref count", vp);
1359 		panic("vrele: ref cnt vp %p", vp);
1360 	}
1361 #endif
1362 	/*
1363 	 * Insert at tail of LRU list.
1364 	 */
1365 	simple_lock(&vnode_free_list_slock);
1366 	if (vp->v_holdcnt > 0)
1367 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
1368 	else
1369 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1370 	simple_unlock(&vnode_free_list_slock);
1371 	if (vp->v_flag & VEXECMAP) {
1372 		uvmexp.execpages -= vp->v_uobj.uo_npages;
1373 		uvmexp.filepages += vp->v_uobj.uo_npages;
1374 	}
1375 	vp->v_flag &= ~(VTEXT|VEXECMAP);
1376 	if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK) == 0)
1377 		VOP_INACTIVE(vp, p);
1378 }
1379 
1380 #ifdef DIAGNOSTIC
1381 /*
1382  * Page or buffer structure gets a reference.
1383  */
1384 void
1385 vholdl(vp)
1386 	struct vnode *vp;
1387 {
1388 
1389 	/*
1390 	 * If it is on the freelist and the hold count is currently
1391 	 * zero, move it to the hold list. The test of the back
1392 	 * pointer and the use reference count of zero is because
1393 	 * it will be removed from a free list by getnewvnode,
1394 	 * but will not have its reference count incremented until
1395 	 * after calling vgone. If the reference count were
1396 	 * incremented first, vgone would (incorrectly) try to
1397 	 * close the previous instance of the underlying object.
1398 	 * So, the back pointer is explicitly set to `0xdeadb' in
1399 	 * getnewvnode after removing it from a freelist to ensure
1400 	 * that we do not try to move it here.
1401 	 */
1402 	if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) &&
1403 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
1404 		simple_lock(&vnode_free_list_slock);
1405 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1406 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
1407 		simple_unlock(&vnode_free_list_slock);
1408 	}
1409 	vp->v_holdcnt++;
1410 }
1411 
1412 /*
1413  * Page or buffer structure frees a reference.
1414  */
1415 void
1416 holdrelel(vp)
1417 	struct vnode *vp;
1418 {
1419 
1420 	if (vp->v_holdcnt <= 0)
1421 		panic("holdrelel: holdcnt vp %p", vp);
1422 	vp->v_holdcnt--;
1423 
1424 	/*
1425 	 * If it is on the holdlist and the hold count drops to
1426 	 * zero, move it to the free list. The test of the back
1427 	 * pointer and the use reference count of zero is because
1428 	 * it will be removed from a free list by getnewvnode,
1429 	 * but will not have its reference count incremented until
1430 	 * after calling vgone. If the reference count were
1431 	 * incremented first, vgone would (incorrectly) try to
1432 	 * close the previous instance of the underlying object.
1433 	 * So, the back pointer is explicitly set to `0xdeadb' in
1434 	 * getnewvnode after removing it from a freelist to ensure
1435 	 * that we do not try to move it here.
1436 	 */
1437 
1438 	if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) &&
1439 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
1440 		simple_lock(&vnode_free_list_slock);
1441 		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
1442 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1443 		simple_unlock(&vnode_free_list_slock);
1444 	}
1445 }
1446 
1447 /*
1448  * Vnode reference.
1449  */
1450 void
1451 vref(vp)
1452 	struct vnode *vp;
1453 {
1454 
1455 	simple_lock(&vp->v_interlock);
1456 	if (vp->v_usecount <= 0)
1457 		panic("vref used where vget required, vp %p", vp);
1458 	vp->v_usecount++;
1459 #ifdef DIAGNOSTIC
1460 	if (vp->v_usecount == 0) {
1461 		vprint("vref", vp);
1462 		panic("vref: usecount overflow, vp %p", vp);
1463 	}
1464 #endif
1465 	simple_unlock(&vp->v_interlock);
1466 }
1467 #endif /* DIAGNOSTIC */
1468 
1469 /*
1470  * Remove any vnodes in the vnode table belonging to mount point mp.
1471  *
1472  * If FORCECLOSE is not specified, there should not be any active ones,
1473  * return error if any are found (nb: this is a user error, not a
1474  * system error). If FORCECLOSE is specified, detach any active vnodes
1475  * that are found.
1476  *
1477  * If WRITECLOSE is set, only flush out regular file vnodes open for
1478  * writing.
1479  *
1480  * SKIPSYSTEM causes any vnodes marked V_SYSTEM to be skipped.
1481  */
1482 #ifdef DEBUG
1483 int busyprt = 0;	/* print out busy vnodes */
1484 struct ctldebug debug1 = { "busyprt", &busyprt };
1485 #endif
1486 
1487 int
1488 vflush(mp, skipvp, flags)
1489 	struct mount *mp;
1490 	struct vnode *skipvp;
1491 	int flags;
1492 {
1493 	struct proc *p = curproc;	/* XXX */
1494 	struct vnode *vp, *nvp;
1495 	int busy = 0;
1496 
1497 	simple_lock(&mntvnode_slock);
1498 loop:
1499 	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
1500 		if (vp->v_mount != mp)
1501 			goto loop;
1502 		nvp = LIST_NEXT(vp, v_mntvnodes);
1503 		/*
1504 		 * Skip over a selected vnode.
1505 		 */
1506 		if (vp == skipvp)
1507 			continue;
1508 		simple_lock(&vp->v_interlock);
1509 		/*
1510 		 * Skip over a vnodes marked VSYSTEM.
1511 		 */
1512 		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1513 			simple_unlock(&vp->v_interlock);
1514 			continue;
1515 		}
1516 		/*
1517 		 * If WRITECLOSE is set, only flush out regular file
1518 		 * vnodes open for writing.
1519 		 */
1520 		if ((flags & WRITECLOSE) &&
1521 		    (vp->v_writecount == 0 || vp->v_type != VREG)) {
1522 			simple_unlock(&vp->v_interlock);
1523 			continue;
1524 		}
1525 		/*
1526 		 * With v_usecount == 0, all we need to do is clear
1527 		 * out the vnode data structures and we are done.
1528 		 */
1529 		if (vp->v_usecount == 0) {
1530 			simple_unlock(&mntvnode_slock);
1531 			vgonel(vp, p);
1532 			simple_lock(&mntvnode_slock);
1533 			continue;
1534 		}
1535 		/*
1536 		 * If FORCECLOSE is set, forcibly close the vnode.
1537 		 * For block or character devices, revert to an
1538 		 * anonymous device. For all other files, just kill them.
1539 		 */
1540 		if (flags & FORCECLOSE) {
1541 			simple_unlock(&mntvnode_slock);
1542 			if (vp->v_type != VBLK && vp->v_type != VCHR) {
1543 				vgonel(vp, p);
1544 			} else {
1545 				vclean(vp, 0, p);
1546 				vp->v_op = spec_vnodeop_p;
1547 				insmntque(vp, (struct mount *)0);
1548 			}
1549 			simple_lock(&mntvnode_slock);
1550 			continue;
1551 		}
1552 #ifdef DEBUG
1553 		if (busyprt)
1554 			vprint("vflush: busy vnode", vp);
1555 #endif
1556 		simple_unlock(&vp->v_interlock);
1557 		busy++;
1558 	}
1559 	simple_unlock(&mntvnode_slock);
1560 	if (busy)
1561 		return (EBUSY);
1562 	return (0);
1563 }
1564 
1565 /*
1566  * Disassociate the underlying file system from a vnode.
1567  */
1568 void
1569 vclean(vp, flags, p)
1570 	struct vnode *vp;
1571 	int flags;
1572 	struct proc *p;
1573 {
1574 	struct mount *mp;
1575 	int active;
1576 
1577 	LOCK_ASSERT(simple_lock_held(&vp->v_interlock));
1578 
1579 	/*
1580 	 * Check to see if the vnode is in use.
1581 	 * If so we have to reference it before we clean it out
1582 	 * so that its count cannot fall to zero and generate a
1583 	 * race against ourselves to recycle it.
1584 	 */
1585 
1586 	if ((active = vp->v_usecount) != 0) {
1587 		vp->v_usecount++;
1588 #ifdef DIAGNOSTIC
1589 		if (vp->v_usecount == 0) {
1590 			vprint("vclean", vp);
1591 			panic("vclean: usecount overflow");
1592 		}
1593 #endif
1594 	}
1595 
1596 	/*
1597 	 * Prevent the vnode from being recycled or
1598 	 * brought into use while we clean it out.
1599 	 */
1600 	if (vp->v_flag & VXLOCK)
1601 		panic("vclean: deadlock, vp %p", vp);
1602 	vp->v_flag |= VXLOCK;
1603 	if (vp->v_flag & VEXECMAP) {
1604 		uvmexp.execpages -= vp->v_uobj.uo_npages;
1605 		uvmexp.filepages += vp->v_uobj.uo_npages;
1606 	}
1607 	vp->v_flag &= ~(VTEXT|VEXECMAP);
1608 
1609 	/*
1610 	 * Even if the count is zero, the VOP_INACTIVE routine may still
1611 	 * have the object locked while it cleans it out. The VOP_LOCK
1612 	 * ensures that the VOP_INACTIVE routine is done with its work.
1613 	 * For active vnodes, it ensures that no other activity can
1614 	 * occur while the underlying object is being cleaned out.
1615 	 */
1616 	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK);
1617 
1618 	/*
1619 	 * Clean out any cached data associated with the vnode.
1620 	 */
1621 	if (flags & DOCLOSE) {
1622 		int error;
1623 		vn_start_write(vp, &mp, V_WAIT | V_LOWER);
1624 		error = vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
1625 		vn_finished_write(mp, V_LOWER);
1626 		if (error)
1627 			error = vinvalbuf(vp, 0, NOCRED, p, 0, 0);
1628 		KASSERT(error == 0);
1629 		KASSERT((vp->v_flag & VONWORKLST) == 0);
1630 	}
1631 	LOCK_ASSERT(!simple_lock_held(&vp->v_interlock));
1632 
1633 	/*
1634 	 * If purging an active vnode, it must be closed and
1635 	 * deactivated before being reclaimed. Note that the
1636 	 * VOP_INACTIVE will unlock the vnode.
1637 	 */
1638 	if (active) {
1639 		if (flags & DOCLOSE)
1640 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, NULL);
1641 		VOP_INACTIVE(vp, p);
1642 	} else {
1643 		/*
1644 		 * Any other processes trying to obtain this lock must first
1645 		 * wait for VXLOCK to clear, then call the new lock operation.
1646 		 */
1647 		VOP_UNLOCK(vp, 0);
1648 	}
1649 	/*
1650 	 * Reclaim the vnode.
1651 	 */
1652 	if (VOP_RECLAIM(vp, p))
1653 		panic("vclean: cannot reclaim, vp %p", vp);
1654 	if (active) {
1655 		/*
1656 		 * Inline copy of vrele() since VOP_INACTIVE
1657 		 * has already been called.
1658 		 */
1659 		simple_lock(&vp->v_interlock);
1660 		if (--vp->v_usecount <= 0) {
1661 #ifdef DIAGNOSTIC
1662 			if (vp->v_usecount < 0 || vp->v_writecount != 0) {
1663 				vprint("vclean: bad ref count", vp);
1664 				panic("vclean: ref cnt");
1665 			}
1666 #endif
1667 			/*
1668 			 * Insert at tail of LRU list.
1669 			 */
1670 
1671 			simple_unlock(&vp->v_interlock);
1672 			simple_lock(&vnode_free_list_slock);
1673 #ifdef DIAGNOSTIC
1674 			if (vp->v_holdcnt > 0)
1675 				panic("vclean: not clean, vp %p", vp);
1676 #endif
1677 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1678 			simple_unlock(&vnode_free_list_slock);
1679 		} else
1680 			simple_unlock(&vp->v_interlock);
1681 	}
1682 
1683 	KASSERT(vp->v_uobj.uo_npages == 0);
1684 	cache_purge(vp);
1685 
1686 	/*
1687 	 * Done with purge, notify sleepers of the grim news.
1688 	 */
1689 	vp->v_op = dead_vnodeop_p;
1690 	vp->v_tag = VT_NON;
1691 	simple_lock(&vp->v_interlock);
1692 	VN_KNOTE(vp, NOTE_REVOKE);	/* FreeBSD has this in vn_pollgone() */
1693 	vp->v_flag &= ~VXLOCK;
1694 	if (vp->v_flag & VXWANT) {
1695 		vp->v_flag &= ~VXWANT;
1696 		simple_unlock(&vp->v_interlock);
1697 		wakeup((caddr_t)vp);
1698 	} else
1699 		simple_unlock(&vp->v_interlock);
1700 }
1701 
1702 /*
1703  * Recycle an unused vnode to the front of the free list.
1704  * Release the passed interlock if the vnode will be recycled.
1705  */
1706 int
1707 vrecycle(vp, inter_lkp, p)
1708 	struct vnode *vp;
1709 	struct simplelock *inter_lkp;
1710 	struct proc *p;
1711 {
1712 
1713 	simple_lock(&vp->v_interlock);
1714 	if (vp->v_usecount == 0) {
1715 		if (inter_lkp)
1716 			simple_unlock(inter_lkp);
1717 		vgonel(vp, p);
1718 		return (1);
1719 	}
1720 	simple_unlock(&vp->v_interlock);
1721 	return (0);
1722 }
1723 
1724 /*
1725  * Eliminate all activity associated with a vnode
1726  * in preparation for reuse.
1727  */
1728 void
1729 vgone(vp)
1730 	struct vnode *vp;
1731 {
1732 	struct proc *p = curproc;	/* XXX */
1733 
1734 	simple_lock(&vp->v_interlock);
1735 	vgonel(vp, p);
1736 }
1737 
1738 /*
1739  * vgone, with the vp interlock held.
1740  */
1741 void
1742 vgonel(vp, p)
1743 	struct vnode *vp;
1744 	struct proc *p;
1745 {
1746 	struct vnode *vq;
1747 	struct vnode *vx;
1748 
1749 	LOCK_ASSERT(simple_lock_held(&vp->v_interlock));
1750 
1751 	/*
1752 	 * If a vgone (or vclean) is already in progress,
1753 	 * wait until it is done and return.
1754 	 */
1755 
1756 	if (vp->v_flag & VXLOCK) {
1757 		vp->v_flag |= VXWANT;
1758 		ltsleep(vp, PINOD | PNORELOCK, "vgone", 0, &vp->v_interlock);
1759 		return;
1760 	}
1761 
1762 	/*
1763 	 * Clean out the filesystem specific data.
1764 	 */
1765 
1766 	vclean(vp, DOCLOSE, p);
1767 	KASSERT((vp->v_flag & VONWORKLST) == 0);
1768 
1769 	/*
1770 	 * Delete from old mount point vnode list, if on one.
1771 	 */
1772 
1773 	if (vp->v_mount != NULL)
1774 		insmntque(vp, (struct mount *)0);
1775 
1776 	/*
1777 	 * If special device, remove it from special device alias list.
1778 	 * if it is on one.
1779 	 */
1780 
1781 	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1782 		simple_lock(&spechash_slock);
1783 		if (vp->v_hashchain != NULL) {
1784 			if (*vp->v_hashchain == vp) {
1785 				*vp->v_hashchain = vp->v_specnext;
1786 			} else {
1787 				for (vq = *vp->v_hashchain; vq;
1788 							vq = vq->v_specnext) {
1789 					if (vq->v_specnext != vp)
1790 						continue;
1791 					vq->v_specnext = vp->v_specnext;
1792 					break;
1793 				}
1794 				if (vq == NULL)
1795 					panic("missing bdev");
1796 			}
1797 			if (vp->v_flag & VALIASED) {
1798 				vx = NULL;
1799 				for (vq = *vp->v_hashchain; vq;
1800 							vq = vq->v_specnext) {
1801 					if (vq->v_rdev != vp->v_rdev ||
1802 					    vq->v_type != vp->v_type)
1803 						continue;
1804 					if (vx)
1805 						break;
1806 					vx = vq;
1807 				}
1808 				if (vx == NULL)
1809 					panic("missing alias");
1810 				if (vq == NULL)
1811 					vx->v_flag &= ~VALIASED;
1812 				vp->v_flag &= ~VALIASED;
1813 			}
1814 		}
1815 		simple_unlock(&spechash_slock);
1816 		FREE(vp->v_specinfo, M_VNODE);
1817 		vp->v_specinfo = NULL;
1818 	}
1819 
1820 	/*
1821 	 * The test of the back pointer and the reference count of
1822 	 * zero is because it will be removed from the free list by
1823 	 * getcleanvnode, but will not have its reference count
1824 	 * incremented until after calling vgone. If the reference
1825 	 * count were incremented first, vgone would (incorrectly)
1826 	 * try to close the previous instance of the underlying object.
1827 	 * So, the back pointer is explicitly set to `0xdeadb' in
1828 	 * getnewvnode after removing it from the freelist to ensure
1829 	 * that we do not try to move it here.
1830 	 */
1831 
1832 	vp->v_type = VBAD;
1833 	if (vp->v_usecount == 0) {
1834 		boolean_t dofree;
1835 
1836 		simple_lock(&vnode_free_list_slock);
1837 		if (vp->v_holdcnt > 0)
1838 			panic("vgonel: not clean, vp %p", vp);
1839 		/*
1840 		 * if it isn't on the freelist, we're called by getcleanvnode
1841 		 * and vnode is being re-used.  otherwise, we'll free it.
1842 		 */
1843 		dofree = vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb;
1844 		if (dofree) {
1845 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1846 			numvnodes--;
1847 		}
1848 		simple_unlock(&vnode_free_list_slock);
1849 		if (dofree)
1850 			pool_put(&vnode_pool, vp);
1851 	}
1852 }
1853 
1854 /*
1855  * Lookup a vnode by device number.
1856  */
1857 int
1858 vfinddev(dev, type, vpp)
1859 	dev_t dev;
1860 	enum vtype type;
1861 	struct vnode **vpp;
1862 {
1863 	struct vnode *vp;
1864 	int rc = 0;
1865 
1866 	simple_lock(&spechash_slock);
1867 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1868 		if (dev != vp->v_rdev || type != vp->v_type)
1869 			continue;
1870 		*vpp = vp;
1871 		rc = 1;
1872 		break;
1873 	}
1874 	simple_unlock(&spechash_slock);
1875 	return (rc);
1876 }
1877 
1878 /*
1879  * Revoke all the vnodes corresponding to the specified minor number
1880  * range (endpoints inclusive) of the specified major.
1881  */
1882 void
1883 vdevgone(maj, minl, minh, type)
1884 	int maj, minl, minh;
1885 	enum vtype type;
1886 {
1887 	struct vnode *vp;
1888 	int mn;
1889 
1890 	for (mn = minl; mn <= minh; mn++)
1891 		if (vfinddev(makedev(maj, mn), type, &vp))
1892 			VOP_REVOKE(vp, REVOKEALL);
1893 }
1894 
1895 /*
1896  * Calculate the total number of references to a special device.
1897  */
1898 int
1899 vcount(vp)
1900 	struct vnode *vp;
1901 {
1902 	struct vnode *vq, *vnext;
1903 	int count;
1904 
1905 loop:
1906 	if ((vp->v_flag & VALIASED) == 0)
1907 		return (vp->v_usecount);
1908 	simple_lock(&spechash_slock);
1909 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1910 		vnext = vq->v_specnext;
1911 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1912 			continue;
1913 		/*
1914 		 * Alias, but not in use, so flush it out.
1915 		 */
1916 		if (vq->v_usecount == 0 && vq != vp &&
1917 		    (vq->v_flag & VXLOCK) == 0) {
1918 			simple_unlock(&spechash_slock);
1919 			vgone(vq);
1920 			goto loop;
1921 		}
1922 		count += vq->v_usecount;
1923 	}
1924 	simple_unlock(&spechash_slock);
1925 	return (count);
1926 }
1927 
1928 /*
1929  * Print out a description of a vnode.
1930  */
1931 const char * const vnode_types[] = {
1932 	"VNON",
1933 	"VREG",
1934 	"VDIR",
1935 	"VBLK",
1936 	"VCHR",
1937 	"VLNK",
1938 	"VSOCK",
1939 	"VFIFO",
1940 	"VBAD"
1941 };
1942 
1943 void
1944 vprint(label, vp)
1945 	char *label;
1946 	struct vnode *vp;
1947 {
1948 	char buf[96];
1949 
1950 	if (label != NULL)
1951 		printf("%s: ", label);
1952 	printf("tag %d type %s, usecount %d, writecount %ld, refcount %ld,",
1953 	    vp->v_tag, vnode_types[vp->v_type],
1954 	    vp->v_usecount, vp->v_writecount, vp->v_holdcnt);
1955 	buf[0] = '\0';
1956 	if (vp->v_flag & VROOT)
1957 		strlcat(buf, "|VROOT", sizeof(buf));
1958 	if (vp->v_flag & VTEXT)
1959 		strlcat(buf, "|VTEXT", sizeof(buf));
1960 	if (vp->v_flag & VEXECMAP)
1961 		strlcat(buf, "|VEXECMAP", sizeof(buf));
1962 	if (vp->v_flag & VSYSTEM)
1963 		strlcat(buf, "|VSYSTEM", sizeof(buf));
1964 	if (vp->v_flag & VXLOCK)
1965 		strlcat(buf, "|VXLOCK", sizeof(buf));
1966 	if (vp->v_flag & VXWANT)
1967 		strlcat(buf, "|VXWANT", sizeof(buf));
1968 	if (vp->v_flag & VBWAIT)
1969 		strlcat(buf, "|VBWAIT", sizeof(buf));
1970 	if (vp->v_flag & VALIASED)
1971 		strlcat(buf, "|VALIASED", sizeof(buf));
1972 	if (buf[0] != '\0')
1973 		printf(" flags (%s)", &buf[1]);
1974 	if (vp->v_data == NULL) {
1975 		printf("\n");
1976 	} else {
1977 		printf("\n\t");
1978 		VOP_PRINT(vp);
1979 	}
1980 }
1981 
1982 #ifdef DEBUG
1983 /*
1984  * List all of the locked vnodes in the system.
1985  * Called when debugging the kernel.
1986  */
1987 void
1988 printlockedvnodes()
1989 {
1990 	struct mount *mp, *nmp;
1991 	struct vnode *vp;
1992 
1993 	printf("Locked vnodes\n");
1994 	simple_lock(&mountlist_slock);
1995 	for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
1996 	     mp = nmp) {
1997 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
1998 			nmp = CIRCLEQ_NEXT(mp, mnt_list);
1999 			continue;
2000 		}
2001 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2002 			if (VOP_ISLOCKED(vp))
2003 				vprint(NULL, vp);
2004 		}
2005 		simple_lock(&mountlist_slock);
2006 		nmp = CIRCLEQ_NEXT(mp, mnt_list);
2007 		vfs_unbusy(mp);
2008 	}
2009 	simple_unlock(&mountlist_slock);
2010 }
2011 #endif
2012 
2013 /*
2014  * sysctl helper routine for vfs.generic.conf lookups.
2015  */
2016 #if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44)
2017 static int
2018 sysctl_vfs_generic_conf(SYSCTLFN_ARGS)
2019 {
2020         struct vfsconf vfc;
2021         extern const char * const mountcompatnames[];
2022         extern int nmountcompatnames;
2023 	struct sysctlnode node;
2024 	struct vfsops *vfsp;
2025 	u_int vfsnum;
2026 
2027 	if (namelen != 1)
2028 		return (ENOTDIR);
2029 	vfsnum = name[0];
2030 	if (vfsnum >= nmountcompatnames ||
2031 	    mountcompatnames[vfsnum] == NULL)
2032 		return (EOPNOTSUPP);
2033 	vfsp = vfs_getopsbyname(mountcompatnames[vfsnum]);
2034 	if (vfsp == NULL)
2035 		return (EOPNOTSUPP);
2036 
2037 	vfc.vfc_vfsops = vfsp;
2038 	strncpy(vfc.vfc_name, vfsp->vfs_name, MFSNAMELEN);
2039 	vfc.vfc_typenum = vfsnum;
2040 	vfc.vfc_refcount = vfsp->vfs_refcount;
2041 	vfc.vfc_flags = 0;
2042 	vfc.vfc_mountroot = vfsp->vfs_mountroot;
2043 	vfc.vfc_next = NULL;
2044 
2045 	node = *rnode;
2046 	node.sysctl_data = &vfc;
2047 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
2048 }
2049 #endif
2050 
2051 /*
2052  * sysctl helper routine to return list of supported fstypes
2053  */
2054 static int
2055 sysctl_vfs_generic_fstypes(SYSCTLFN_ARGS)
2056 {
2057 	char buf[MFSNAMELEN];
2058 	char *where = oldp;
2059 	struct vfsops *v;
2060 	size_t needed, left, slen;
2061 	int error, first;
2062 
2063 	if (newp != NULL)
2064 		return (EPERM);
2065 	if (namelen != 0)
2066 		return (EINVAL);
2067 
2068 	first = 1;
2069 	error = 0;
2070 	needed = 0;
2071 	left = *oldlenp;
2072 
2073 	LIST_FOREACH(v, &vfs_list, vfs_list) {
2074 		if (where == NULL)
2075 			needed += strlen(v->vfs_name) + 1;
2076 		else {
2077 			memset(buf, 0, sizeof(buf));
2078 			if (first) {
2079 				strncpy(buf, v->vfs_name, sizeof(buf));
2080 				first = 0;
2081 			} else {
2082 				buf[0] = ' ';
2083 				strncpy(buf + 1, v->vfs_name, sizeof(buf) - 1);
2084 			}
2085 			buf[sizeof(buf)-1] = '\0';
2086 			slen = strlen(buf);
2087 			if (left < slen + 1)
2088 				break;
2089 			/* +1 to copy out the trailing NUL byte */
2090 			error = copyout(buf, where, slen + 1);
2091 			if (error)
2092 				break;
2093 			where += slen;
2094 			needed += slen;
2095 			left -= slen;
2096 		}
2097 	}
2098 	*oldlenp = needed;
2099 	return (error);
2100 }
2101 
2102 /*
2103  * Top level filesystem related information gathering.
2104  */
2105 SYSCTL_SETUP(sysctl_vfs_setup, "sysctl vfs subtree setup")
2106 {
2107 #if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44)
2108 	extern int nmountcompatnames;
2109 #endif
2110 
2111 	sysctl_createv(clog, 0, NULL, NULL,
2112 		       CTLFLAG_PERMANENT,
2113 		       CTLTYPE_NODE, "vfs", NULL,
2114 		       NULL, 0, NULL, 0,
2115 		       CTL_VFS, CTL_EOL);
2116 	sysctl_createv(clog, 0, NULL, NULL,
2117 		       CTLFLAG_PERMANENT,
2118 		       CTLTYPE_NODE, "generic",
2119 		       SYSCTL_DESCR("Non-specific vfs related information"),
2120 		       NULL, 0, NULL, 0,
2121 		       CTL_VFS, VFS_GENERIC, CTL_EOL);
2122 
2123 #if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44)
2124 	sysctl_createv(clog, 0, NULL, NULL,
2125 		       CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
2126 		       CTLTYPE_INT, "maxtypenum",
2127 		       SYSCTL_DESCR("Highest valid filesystem type number"),
2128 		       NULL, nmountcompatnames, NULL, 0,
2129 		       CTL_VFS, VFS_GENERIC, VFS_MAXTYPENUM, CTL_EOL);
2130 #endif
2131 	sysctl_createv(clog, 0, NULL, NULL,
2132 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
2133 		       CTLTYPE_INT, "usermount",
2134 		       SYSCTL_DESCR("Whether unprivileged users may mount "
2135 				    "filesystems"),
2136 		       NULL, 0, &dovfsusermount, 0,
2137 		       CTL_VFS, VFS_GENERIC, VFS_USERMOUNT, CTL_EOL);
2138 	sysctl_createv(clog, 0, NULL, NULL,
2139 		       CTLFLAG_PERMANENT,
2140 		       CTLTYPE_STRING, "fstypes",
2141 		       SYSCTL_DESCR("List of file systems present"),
2142 		       sysctl_vfs_generic_fstypes, 0, NULL, 0,
2143 		       CTL_VFS, VFS_GENERIC, CTL_CREATE, CTL_EOL);
2144 #if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44)
2145 	sysctl_createv(clog, 0, NULL, NULL,
2146 		       CTLFLAG_PERMANENT,
2147 		       CTLTYPE_STRUCT, "conf",
2148 		       SYSCTL_DESCR("Filesystem configuration information"),
2149 		       sysctl_vfs_generic_conf, 0, NULL,
2150 		       sizeof(struct vfsconf),
2151 		       CTL_VFS, VFS_GENERIC, VFS_CONF, CTL_EOL);
2152 #endif
2153 }
2154 
2155 
2156 int kinfo_vdebug = 1;
2157 int kinfo_vgetfailed;
2158 #define KINFO_VNODESLOP	10
2159 /*
2160  * Dump vnode list (via sysctl).
2161  * Copyout address of vnode followed by vnode.
2162  */
2163 /* ARGSUSED */
2164 int
2165 sysctl_kern_vnode(SYSCTLFN_ARGS)
2166 {
2167 	char *where = oldp;
2168 	size_t *sizep = oldlenp;
2169 	struct mount *mp, *nmp;
2170 	struct vnode *nvp, *vp;
2171 	char *bp = where, *savebp;
2172 	char *ewhere;
2173 	int error;
2174 
2175 	if (namelen != 0)
2176 		return (EOPNOTSUPP);
2177 	if (newp != NULL)
2178 		return (EPERM);
2179 
2180 #define VPTRSZ	sizeof(struct vnode *)
2181 #define VNODESZ	sizeof(struct vnode)
2182 	if (where == NULL) {
2183 		*sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
2184 		return (0);
2185 	}
2186 	ewhere = where + *sizep;
2187 
2188 	simple_lock(&mountlist_slock);
2189 	for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
2190 	     mp = nmp) {
2191 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
2192 			nmp = CIRCLEQ_NEXT(mp, mnt_list);
2193 			continue;
2194 		}
2195 		savebp = bp;
2196 again:
2197 		simple_lock(&mntvnode_slock);
2198 		for (vp = LIST_FIRST(&mp->mnt_vnodelist);
2199 		     vp != NULL;
2200 		     vp = nvp) {
2201 			/*
2202 			 * Check that the vp is still associated with
2203 			 * this filesystem.  RACE: could have been
2204 			 * recycled onto the same filesystem.
2205 			 */
2206 			if (vp->v_mount != mp) {
2207 				simple_unlock(&mntvnode_slock);
2208 				if (kinfo_vdebug)
2209 					printf("kinfo: vp changed\n");
2210 				bp = savebp;
2211 				goto again;
2212 			}
2213 			nvp = LIST_NEXT(vp, v_mntvnodes);
2214 			if (bp + VPTRSZ + VNODESZ > ewhere) {
2215 				simple_unlock(&mntvnode_slock);
2216 				*sizep = bp - where;
2217 				return (ENOMEM);
2218 			}
2219 			simple_unlock(&mntvnode_slock);
2220 			if ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
2221 			   (error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ)))
2222 				return (error);
2223 			bp += VPTRSZ + VNODESZ;
2224 			simple_lock(&mntvnode_slock);
2225 		}
2226 		simple_unlock(&mntvnode_slock);
2227 		simple_lock(&mountlist_slock);
2228 		nmp = CIRCLEQ_NEXT(mp, mnt_list);
2229 		vfs_unbusy(mp);
2230 	}
2231 	simple_unlock(&mountlist_slock);
2232 
2233 	*sizep = bp - where;
2234 	return (0);
2235 }
2236 
2237 /*
2238  * Check to see if a filesystem is mounted on a block device.
2239  */
2240 int
2241 vfs_mountedon(vp)
2242 	struct vnode *vp;
2243 {
2244 	struct vnode *vq;
2245 	int error = 0;
2246 
2247 	if (vp->v_specmountpoint != NULL)
2248 		return (EBUSY);
2249 	if (vp->v_flag & VALIASED) {
2250 		simple_lock(&spechash_slock);
2251 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2252 			if (vq->v_rdev != vp->v_rdev ||
2253 			    vq->v_type != vp->v_type)
2254 				continue;
2255 			if (vq->v_specmountpoint != NULL) {
2256 				error = EBUSY;
2257 				break;
2258 			}
2259 		}
2260 		simple_unlock(&spechash_slock);
2261 	}
2262 	return (error);
2263 }
2264 
2265 static int
2266 sacheck(struct sockaddr *sa)
2267 {
2268 	switch (sa->sa_family) {
2269 #ifdef INET
2270 	case AF_INET: {
2271 		struct sockaddr_in *sin = (struct sockaddr_in *)sa;
2272 		char *p = (char *)sin->sin_zero;
2273 		size_t i;
2274 
2275 		if (sin->sin_len != sizeof(*sin))
2276 			return -1;
2277 		if (sin->sin_port != 0)
2278 			return -1;
2279 		for (i = 0; i < sizeof(sin->sin_zero); i++)
2280 			if (*p++ != '\0')
2281 				return -1;
2282 		return 0;
2283 	}
2284 #endif
2285 #ifdef INET6
2286 	case AF_INET6: {
2287 		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
2288 
2289 		if (sin6->sin6_len != sizeof(*sin6))
2290 			return -1;
2291 		if (sin6->sin6_port != 0)
2292 			return -1;
2293 		return 0;
2294 	}
2295 #endif
2296 	default:
2297 		return -1;
2298 	}
2299 }
2300 
2301 /*
2302  * Build hash lists of net addresses and hang them off the mount point.
2303  * Called by ufs_mount() to set up the lists of export addresses.
2304  */
2305 static int
2306 vfs_hang_addrlist(mp, nep, argp)
2307 	struct mount *mp;
2308 	struct netexport *nep;
2309 	struct export_args *argp;
2310 {
2311 	struct netcred *np, *enp;
2312 	struct radix_node_head *rnh;
2313 	int i;
2314 	struct sockaddr *saddr, *smask = 0;
2315 	struct domain *dom;
2316 	int error;
2317 
2318 	if (argp->ex_addrlen == 0) {
2319 		if (mp->mnt_flag & MNT_DEFEXPORTED)
2320 			return (EPERM);
2321 		np = &nep->ne_defexported;
2322 		np->netc_exflags = argp->ex_flags;
2323 		crcvt(&np->netc_anon, &argp->ex_anon);
2324 		np->netc_anon.cr_ref = 1;
2325 		mp->mnt_flag |= MNT_DEFEXPORTED;
2326 		return (0);
2327 	}
2328 
2329 	if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN)
2330 		return (EINVAL);
2331 
2332 	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
2333 	np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK);
2334 	memset((caddr_t)np, 0, i);
2335 	saddr = (struct sockaddr *)(np + 1);
2336 	error = copyin(argp->ex_addr, (caddr_t)saddr, argp->ex_addrlen);
2337 	if (error)
2338 		goto out;
2339 	if (saddr->sa_len > argp->ex_addrlen)
2340 		saddr->sa_len = argp->ex_addrlen;
2341 	if (sacheck(saddr) == -1)
2342 		return EINVAL;
2343 	if (argp->ex_masklen) {
2344 		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
2345 		error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen);
2346 		if (error)
2347 			goto out;
2348 		if (smask->sa_len > argp->ex_masklen)
2349 			smask->sa_len = argp->ex_masklen;
2350 		if (smask->sa_family != saddr->sa_family)
2351 			return EINVAL;
2352 		if (sacheck(smask) == -1)
2353 			return EINVAL;
2354 	}
2355 	i = saddr->sa_family;
2356 	if ((rnh = nep->ne_rtable[i]) == 0) {
2357 		/*
2358 		 * Seems silly to initialize every AF when most are not
2359 		 * used, do so on demand here
2360 		 */
2361 		for (dom = domains; dom; dom = dom->dom_next)
2362 			if (dom->dom_family == i && dom->dom_rtattach) {
2363 				dom->dom_rtattach((void **)&nep->ne_rtable[i],
2364 					dom->dom_rtoffset);
2365 				break;
2366 			}
2367 		if ((rnh = nep->ne_rtable[i]) == 0) {
2368 			error = ENOBUFS;
2369 			goto out;
2370 		}
2371 	}
2372 
2373 	enp = (struct netcred *)(*rnh->rnh_addaddr)(saddr, smask, rnh,
2374 	    np->netc_rnodes);
2375 	if (enp != np) {
2376 		if (enp == NULL) {
2377 			enp = (struct netcred *)(*rnh->rnh_lookup)(saddr,
2378 			    smask, rnh);
2379 			if (enp == NULL) {
2380 				error = EPERM;
2381 				goto out;
2382 			}
2383 		} else
2384 			enp->netc_refcnt++;
2385 
2386 		goto check;
2387 	} else
2388 		enp->netc_refcnt = 1;
2389 
2390 	np->netc_exflags = argp->ex_flags;
2391 	crcvt(&np->netc_anon, &argp->ex_anon);
2392 	np->netc_anon.cr_ref = 1;
2393 	return 0;
2394 check:
2395 	if (enp->netc_exflags != argp->ex_flags ||
2396 	    crcmp(&enp->netc_anon, &argp->ex_anon) != 0)
2397 		error = EPERM;
2398 	else
2399 		error = 0;
2400 out:
2401 	free(np, M_NETADDR);
2402 	return error;
2403 }
2404 
2405 /* ARGSUSED */
2406 static int
2407 vfs_free_netcred(rn, w)
2408 	struct radix_node *rn;
2409 	void *w;
2410 {
2411 	struct radix_node_head *rnh = (struct radix_node_head *)w;
2412 	struct netcred *np = (struct netcred *)(void *)rn;
2413 
2414 	(*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh);
2415 	if (--(np->netc_refcnt) <= 0)
2416 		free(np, M_NETADDR);
2417 	return (0);
2418 }
2419 
2420 /*
2421  * Free the net address hash lists that are hanging off the mount points.
2422  */
2423 static void
2424 vfs_free_addrlist(nep)
2425 	struct netexport *nep;
2426 {
2427 	int i;
2428 	struct radix_node_head *rnh;
2429 
2430 	for (i = 0; i <= AF_MAX; i++)
2431 		if ((rnh = nep->ne_rtable[i]) != NULL) {
2432 			(*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh);
2433 			free((caddr_t)rnh, M_RTABLE);
2434 			nep->ne_rtable[i] = 0;
2435 		}
2436 }
2437 
2438 int
2439 vfs_export(mp, nep, argp)
2440 	struct mount *mp;
2441 	struct netexport *nep;
2442 	struct export_args *argp;
2443 {
2444 	int error;
2445 
2446 	if (argp->ex_flags & MNT_DELEXPORT) {
2447 		if (mp->mnt_flag & MNT_EXPUBLIC) {
2448 			vfs_setpublicfs(NULL, NULL, NULL);
2449 			mp->mnt_flag &= ~MNT_EXPUBLIC;
2450 		}
2451 		vfs_free_addrlist(nep);
2452 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
2453 	}
2454 	if (argp->ex_flags & MNT_EXPORTED) {
2455 		if (argp->ex_flags & MNT_EXPUBLIC) {
2456 			if ((error = vfs_setpublicfs(mp, nep, argp)) != 0)
2457 				return (error);
2458 			mp->mnt_flag |= MNT_EXPUBLIC;
2459 		}
2460 		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
2461 			return (error);
2462 		mp->mnt_flag |= MNT_EXPORTED;
2463 	}
2464 	return (0);
2465 }
2466 
2467 /*
2468  * Set the publicly exported filesystem (WebNFS). Currently, only
2469  * one public filesystem is possible in the spec (RFC 2054 and 2055)
2470  */
2471 int
2472 vfs_setpublicfs(mp, nep, argp)
2473 	struct mount *mp;
2474 	struct netexport *nep;
2475 	struct export_args *argp;
2476 {
2477 	int error;
2478 	struct vnode *rvp;
2479 	char *cp;
2480 
2481 	/*
2482 	 * mp == NULL -> invalidate the current info, the FS is
2483 	 * no longer exported. May be called from either vfs_export
2484 	 * or unmount, so check if it hasn't already been done.
2485 	 */
2486 	if (mp == NULL) {
2487 		if (nfs_pub.np_valid) {
2488 			nfs_pub.np_valid = 0;
2489 			if (nfs_pub.np_index != NULL) {
2490 				FREE(nfs_pub.np_index, M_TEMP);
2491 				nfs_pub.np_index = NULL;
2492 			}
2493 		}
2494 		return (0);
2495 	}
2496 
2497 	/*
2498 	 * Only one allowed at a time.
2499 	 */
2500 	if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount)
2501 		return (EBUSY);
2502 
2503 	/*
2504 	 * Get real filehandle for root of exported FS.
2505 	 */
2506 	memset((caddr_t)&nfs_pub.np_handle, 0, sizeof(nfs_pub.np_handle));
2507 	nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsidx;
2508 
2509 	if ((error = VFS_ROOT(mp, &rvp)))
2510 		return (error);
2511 
2512 	if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid)))
2513 		return (error);
2514 
2515 	vput(rvp);
2516 
2517 	/*
2518 	 * If an indexfile was specified, pull it in.
2519 	 */
2520 	if (argp->ex_indexfile != NULL) {
2521 		MALLOC(nfs_pub.np_index, char *, MAXNAMLEN + 1, M_TEMP,
2522 		    M_WAITOK);
2523 		error = copyinstr(argp->ex_indexfile, nfs_pub.np_index,
2524 		    MAXNAMLEN, (size_t *)0);
2525 		if (!error) {
2526 			/*
2527 			 * Check for illegal filenames.
2528 			 */
2529 			for (cp = nfs_pub.np_index; *cp; cp++) {
2530 				if (*cp == '/') {
2531 					error = EINVAL;
2532 					break;
2533 				}
2534 			}
2535 		}
2536 		if (error) {
2537 			FREE(nfs_pub.np_index, M_TEMP);
2538 			return (error);
2539 		}
2540 	}
2541 
2542 	nfs_pub.np_mount = mp;
2543 	nfs_pub.np_valid = 1;
2544 	return (0);
2545 }
2546 
2547 struct netcred *
2548 vfs_export_lookup(mp, nep, nam)
2549 	struct mount *mp;
2550 	struct netexport *nep;
2551 	struct mbuf *nam;
2552 {
2553 	struct netcred *np;
2554 	struct radix_node_head *rnh;
2555 	struct sockaddr *saddr;
2556 
2557 	np = NULL;
2558 	if (mp->mnt_flag & MNT_EXPORTED) {
2559 		/*
2560 		 * Lookup in the export list first.
2561 		 */
2562 		if (nam != NULL) {
2563 			saddr = mtod(nam, struct sockaddr *);
2564 			rnh = nep->ne_rtable[saddr->sa_family];
2565 			if (rnh != NULL) {
2566 				np = (struct netcred *)
2567 					(*rnh->rnh_matchaddr)((caddr_t)saddr,
2568 							      rnh);
2569 				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
2570 					np = NULL;
2571 			}
2572 		}
2573 		/*
2574 		 * If no address match, use the default if it exists.
2575 		 */
2576 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
2577 			np = &nep->ne_defexported;
2578 	}
2579 	return (np);
2580 }
2581 
2582 /*
2583  * Do the usual access checking.
2584  * file_mode, uid and gid are from the vnode in question,
2585  * while acc_mode and cred are from the VOP_ACCESS parameter list
2586  */
2587 int
2588 vaccess(type, file_mode, uid, gid, acc_mode, cred)
2589 	enum vtype type;
2590 	mode_t file_mode;
2591 	uid_t uid;
2592 	gid_t gid;
2593 	mode_t acc_mode;
2594 	struct ucred *cred;
2595 {
2596 	mode_t mask;
2597 
2598 	/*
2599 	 * Super-user always gets read/write access, but execute access depends
2600 	 * on at least one execute bit being set.
2601 	 */
2602 	if (cred->cr_uid == 0) {
2603 		if ((acc_mode & VEXEC) && type != VDIR &&
2604 		    (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
2605 			return (EACCES);
2606 		return (0);
2607 	}
2608 
2609 	mask = 0;
2610 
2611 	/* Otherwise, check the owner. */
2612 	if (cred->cr_uid == uid) {
2613 		if (acc_mode & VEXEC)
2614 			mask |= S_IXUSR;
2615 		if (acc_mode & VREAD)
2616 			mask |= S_IRUSR;
2617 		if (acc_mode & VWRITE)
2618 			mask |= S_IWUSR;
2619 		return ((file_mode & mask) == mask ? 0 : EACCES);
2620 	}
2621 
2622 	/* Otherwise, check the groups. */
2623 	if (cred->cr_gid == gid || groupmember(gid, cred)) {
2624 		if (acc_mode & VEXEC)
2625 			mask |= S_IXGRP;
2626 		if (acc_mode & VREAD)
2627 			mask |= S_IRGRP;
2628 		if (acc_mode & VWRITE)
2629 			mask |= S_IWGRP;
2630 		return ((file_mode & mask) == mask ? 0 : EACCES);
2631 	}
2632 
2633 	/* Otherwise, check everyone else. */
2634 	if (acc_mode & VEXEC)
2635 		mask |= S_IXOTH;
2636 	if (acc_mode & VREAD)
2637 		mask |= S_IROTH;
2638 	if (acc_mode & VWRITE)
2639 		mask |= S_IWOTH;
2640 	return ((file_mode & mask) == mask ? 0 : EACCES);
2641 }
2642 
2643 /*
2644  * Unmount all file systems.
2645  * We traverse the list in reverse order under the assumption that doing so
2646  * will avoid needing to worry about dependencies.
2647  */
2648 void
2649 vfs_unmountall(p)
2650 	struct proc *p;
2651 {
2652 	struct mount *mp, *nmp;
2653 	int allerror, error;
2654 
2655 	for (allerror = 0,
2656 	     mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
2657 		nmp = mp->mnt_list.cqe_prev;
2658 #ifdef DEBUG
2659 		printf("unmounting %s (%s)...\n",
2660 		    mp->mnt_stat.f_mntonname, mp->mnt_stat.f_mntfromname);
2661 #endif
2662 		/*
2663 		 * XXX Freeze syncer.  Must do this before locking the
2664 		 * mount point.  See dounmount() for details.
2665 		 */
2666 		lockmgr(&syncer_lock, LK_EXCLUSIVE, NULL);
2667 		if (vfs_busy(mp, 0, 0)) {
2668 			lockmgr(&syncer_lock, LK_RELEASE, NULL);
2669 			continue;
2670 		}
2671 		if ((error = dounmount(mp, MNT_FORCE, p)) != 0) {
2672 			printf("unmount of %s failed with error %d\n",
2673 			    mp->mnt_stat.f_mntonname, error);
2674 			allerror = 1;
2675 		}
2676 	}
2677 	if (allerror)
2678 		printf("WARNING: some file systems would not unmount\n");
2679 }
2680 
2681 extern struct simplelock bqueue_slock; /* XXX */
2682 
2683 /*
2684  * Sync and unmount file systems before shutting down.
2685  */
2686 void
2687 vfs_shutdown()
2688 {
2689 	struct lwp *l = curlwp;
2690 	struct proc *p;
2691 
2692 	/* XXX we're certainly not running in proc0's context! */
2693 	if (l == NULL || (p = l->l_proc) == NULL)
2694 		p = &proc0;
2695 
2696 	printf("syncing disks... ");
2697 
2698 	/* remove user process from run queue */
2699 	suspendsched();
2700 	(void) spl0();
2701 
2702 	/* avoid coming back this way again if we panic. */
2703 	doing_shutdown = 1;
2704 
2705 	sys_sync(l, NULL, NULL);
2706 
2707 	/* Wait for sync to finish. */
2708 	if (buf_syncwait() != 0) {
2709 #if defined(DDB) && defined(DEBUG_HALT_BUSY)
2710 		Debugger();
2711 #endif
2712 		printf("giving up\n");
2713 		return;
2714 	} else
2715 		printf("done\n");
2716 
2717 	/*
2718 	 * If we've panic'd, don't make the situation potentially
2719 	 * worse by unmounting the file systems.
2720 	 */
2721 	if (panicstr != NULL)
2722 		return;
2723 
2724 	/* Release inodes held by texts before update. */
2725 #ifdef notdef
2726 	vnshutdown();
2727 #endif
2728 	/* Unmount file systems. */
2729 	vfs_unmountall(p);
2730 }
2731 
2732 /*
2733  * Mount the root file system.  If the operator didn't specify a
2734  * file system to use, try all possible file systems until one
2735  * succeeds.
2736  */
2737 int
2738 vfs_mountroot()
2739 {
2740 	struct vfsops *v;
2741 
2742 	if (root_device == NULL)
2743 		panic("vfs_mountroot: root device unknown");
2744 
2745 	switch (root_device->dv_class) {
2746 	case DV_IFNET:
2747 		if (rootdev != NODEV)
2748 			panic("vfs_mountroot: rootdev set for DV_IFNET "
2749 			    "(0x%08x -> %d,%d)", rootdev,
2750 			    major(rootdev), minor(rootdev));
2751 		break;
2752 
2753 	case DV_DISK:
2754 		if (rootdev == NODEV)
2755 			panic("vfs_mountroot: rootdev not set for DV_DISK");
2756 		break;
2757 
2758 	default:
2759 		printf("%s: inappropriate for root file system\n",
2760 		    root_device->dv_xname);
2761 		return (ENODEV);
2762 	}
2763 
2764 	/*
2765 	 * If user specified a file system, use it.
2766 	 */
2767 	if (mountroot != NULL)
2768 		return ((*mountroot)());
2769 
2770 	/*
2771 	 * Try each file system currently configured into the kernel.
2772 	 */
2773 	LIST_FOREACH(v, &vfs_list, vfs_list) {
2774 		if (v->vfs_mountroot == NULL)
2775 			continue;
2776 #ifdef DEBUG
2777 		aprint_normal("mountroot: trying %s...\n", v->vfs_name);
2778 #endif
2779 		if ((*v->vfs_mountroot)() == 0) {
2780 			aprint_normal("root file system type: %s\n",
2781 			    v->vfs_name);
2782 			break;
2783 		}
2784 	}
2785 
2786 	if (v == NULL) {
2787 		printf("no file system for %s", root_device->dv_xname);
2788 		if (root_device->dv_class == DV_DISK)
2789 			printf(" (dev 0x%x)", rootdev);
2790 		printf("\n");
2791 		return (EFTYPE);
2792 	}
2793 	return (0);
2794 }
2795 
2796 /*
2797  * Given a file system name, look up the vfsops for that
2798  * file system, or return NULL if file system isn't present
2799  * in the kernel.
2800  */
2801 struct vfsops *
2802 vfs_getopsbyname(name)
2803 	const char *name;
2804 {
2805 	struct vfsops *v;
2806 
2807 	LIST_FOREACH(v, &vfs_list, vfs_list) {
2808 		if (strcmp(v->vfs_name, name) == 0)
2809 			break;
2810 	}
2811 
2812 	return (v);
2813 }
2814 
2815 /*
2816  * Establish a file system and initialize it.
2817  */
2818 int
2819 vfs_attach(vfs)
2820 	struct vfsops *vfs;
2821 {
2822 	struct vfsops *v;
2823 	int error = 0;
2824 
2825 
2826 	/*
2827 	 * Make sure this file system doesn't already exist.
2828 	 */
2829 	LIST_FOREACH(v, &vfs_list, vfs_list) {
2830 		if (strcmp(vfs->vfs_name, v->vfs_name) == 0) {
2831 			error = EEXIST;
2832 			goto out;
2833 		}
2834 	}
2835 
2836 	/*
2837 	 * Initialize the vnode operations for this file system.
2838 	 */
2839 	vfs_opv_init(vfs->vfs_opv_descs);
2840 
2841 	/*
2842 	 * Now initialize the file system itself.
2843 	 */
2844 	(*vfs->vfs_init)();
2845 
2846 	/*
2847 	 * ...and link it into the kernel's list.
2848 	 */
2849 	LIST_INSERT_HEAD(&vfs_list, vfs, vfs_list);
2850 
2851 	/*
2852 	 * Sanity: make sure the reference count is 0.
2853 	 */
2854 	vfs->vfs_refcount = 0;
2855 
2856  out:
2857 	return (error);
2858 }
2859 
2860 /*
2861  * Remove a file system from the kernel.
2862  */
2863 int
2864 vfs_detach(vfs)
2865 	struct vfsops *vfs;
2866 {
2867 	struct vfsops *v;
2868 
2869 	/*
2870 	 * Make sure no one is using the filesystem.
2871 	 */
2872 	if (vfs->vfs_refcount != 0)
2873 		return (EBUSY);
2874 
2875 	/*
2876 	 * ...and remove it from the kernel's list.
2877 	 */
2878 	LIST_FOREACH(v, &vfs_list, vfs_list) {
2879 		if (v == vfs) {
2880 			LIST_REMOVE(v, vfs_list);
2881 			break;
2882 		}
2883 	}
2884 
2885 	if (v == NULL)
2886 		return (ESRCH);
2887 
2888 	/*
2889 	 * Now run the file system-specific cleanups.
2890 	 */
2891 	(*vfs->vfs_done)();
2892 
2893 	/*
2894 	 * Free the vnode operations vector.
2895 	 */
2896 	vfs_opv_free(vfs->vfs_opv_descs);
2897 	return (0);
2898 }
2899 
2900 void
2901 vfs_reinit(void)
2902 {
2903 	struct vfsops *vfs;
2904 
2905 	LIST_FOREACH(vfs, &vfs_list, vfs_list) {
2906 		if (vfs->vfs_reinit) {
2907 			(*vfs->vfs_reinit)();
2908 		}
2909 	}
2910 }
2911 
2912 /*
2913  * Request a filesystem to suspend write operations.
2914  */
2915 int
2916 vfs_write_suspend(struct mount *mp, int slpflag, int slptimeo)
2917 {
2918 	struct proc *p = curproc;	/* XXX */
2919 	int error;
2920 
2921 	while ((mp->mnt_iflag & IMNT_SUSPEND)) {
2922 		if (slptimeo < 0)
2923 			return EWOULDBLOCK;
2924 		error = tsleep(&mp->mnt_flag, slpflag, "suspwt1", slptimeo);
2925 		if (error)
2926 			return error;
2927 	}
2928 	mp->mnt_iflag |= IMNT_SUSPEND;
2929 
2930 	simple_lock(&mp->mnt_slock);
2931 	if (mp->mnt_writeopcountupper > 0)
2932 		ltsleep(&mp->mnt_writeopcountupper, PUSER - 1, "suspwt",
2933 			0, &mp->mnt_slock);
2934 	simple_unlock(&mp->mnt_slock);
2935 
2936 	error = VFS_SYNC(mp, MNT_WAIT, p->p_ucred, p);
2937 	if (error) {
2938 		vfs_write_resume(mp);
2939 		return error;
2940 	}
2941 	mp->mnt_iflag |= IMNT_SUSPENDLOW;
2942 
2943 	simple_lock(&mp->mnt_slock);
2944 	if (mp->mnt_writeopcountlower > 0)
2945 		ltsleep(&mp->mnt_writeopcountlower, PUSER - 1, "suspwt",
2946 			0, &mp->mnt_slock);
2947 	mp->mnt_iflag |= IMNT_SUSPENDED;
2948 	simple_unlock(&mp->mnt_slock);
2949 
2950 	return 0;
2951 }
2952 
2953 /*
2954  * Request a filesystem to resume write operations.
2955  */
2956 void
2957 vfs_write_resume(struct mount *mp)
2958 {
2959 
2960 	if ((mp->mnt_iflag & IMNT_SUSPEND) == 0)
2961 		return;
2962 	mp->mnt_iflag &= ~(IMNT_SUSPEND | IMNT_SUSPENDLOW | IMNT_SUSPENDED);
2963 	wakeup(&mp->mnt_flag);
2964 }
2965 
2966 void
2967 copy_statvfs_info(struct statvfs *sbp, const struct mount *mp)
2968 {
2969 	const struct statvfs *mbp;
2970 
2971 	if (sbp == (mbp = &mp->mnt_stat))
2972 		return;
2973 
2974 	(void)memcpy(&sbp->f_fsidx, &mbp->f_fsidx, sizeof(sbp->f_fsidx));
2975 	sbp->f_fsid = mbp->f_fsid;
2976 	sbp->f_owner = mbp->f_owner;
2977 	sbp->f_flag = mbp->f_flag;
2978 	sbp->f_syncwrites = mbp->f_syncwrites;
2979 	sbp->f_asyncwrites = mbp->f_asyncwrites;
2980 	sbp->f_syncreads = mbp->f_syncreads;
2981 	sbp->f_asyncreads = mbp->f_asyncreads;
2982 	(void)memcpy(sbp->f_spare, mbp->f_spare, sizeof(mbp->f_spare));
2983 	(void)memcpy(sbp->f_fstypename, mbp->f_fstypename,
2984 	    sizeof(sbp->f_fstypename));
2985 	(void)memcpy(sbp->f_mntonname, mbp->f_mntonname,
2986 	    sizeof(sbp->f_mntonname));
2987 	(void)memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname,
2988 	    sizeof(sbp->f_mntfromname));
2989 }
2990 
2991 int
2992 set_statvfs_info(const char *onp, int ukon, const char *fromp, int ukfrom,
2993     struct mount *mp, struct proc *p)
2994 {
2995 	int error;
2996 	size_t size;
2997 	struct statvfs *sfs = &mp->mnt_stat;
2998 	int (*fun)(const void *, void *, size_t, size_t *);
2999 
3000 	(void)strncpy(mp->mnt_stat.f_fstypename, mp->mnt_op->vfs_name,
3001 	    sizeof(mp->mnt_stat.f_fstypename));
3002 
3003 	if (onp) {
3004 		struct cwdinfo *cwdi = p->p_cwdi;
3005 		fun = (ukon == UIO_SYSSPACE) ? copystr : copyinstr;
3006 		if (cwdi->cwdi_rdir != NULL) {
3007 			size_t len;
3008 			char *bp;
3009 			char *path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
3010 
3011 			if (!path) /* XXX can't happen with M_WAITOK */
3012 				return ENOMEM;
3013 
3014 			bp = path + MAXPATHLEN;
3015 			*--bp = '\0';
3016 			error = getcwd_common(cwdi->cwdi_rdir, rootvnode, &bp,
3017 			    path, MAXPATHLEN / 2, 0, p);
3018 			if (error) {
3019 				free(path, M_TEMP);
3020 				return error;
3021 			}
3022 
3023 			len = strlen(bp);
3024 			if (len > sizeof(sfs->f_mntonname) - 1)
3025 				len = sizeof(sfs->f_mntonname) - 1;
3026 			(void)strncpy(sfs->f_mntonname, bp, len);
3027 			free(path, M_TEMP);
3028 
3029 			if (len < sizeof(sfs->f_mntonname) - 1) {
3030 				error = (*fun)(onp, &sfs->f_mntonname[len],
3031 				    sizeof(sfs->f_mntonname) - len - 1, &size);
3032 				if (error)
3033 					return error;
3034 				size += len;
3035 			} else {
3036 				size = len;
3037 			}
3038 		} else {
3039 			error = (*fun)(onp, &sfs->f_mntonname,
3040 			    sizeof(sfs->f_mntonname) - 1, &size);
3041 			if (error)
3042 				return error;
3043 		}
3044 		(void)memset(sfs->f_mntonname + size, 0,
3045 		    sizeof(sfs->f_mntonname) - size);
3046 	}
3047 
3048 	if (fromp) {
3049 		fun = (ukfrom == UIO_SYSSPACE) ? copystr : copyinstr;
3050 		error = (*fun)(fromp, sfs->f_mntfromname,
3051 		    sizeof(sfs->f_mntfromname) - 1, &size);
3052 		if (error)
3053 			return error;
3054 		(void)memset(sfs->f_mntfromname + size, 0,
3055 		    sizeof(sfs->f_mntfromname) - size);
3056 	}
3057 	return 0;
3058 }
3059 
3060 #ifdef DDB
3061 const char buf_flagbits[] =
3062 	"\20\1AGE\2NEEDCOMMIT\3ASYNC\4BAD\5BUSY\6SCANNED\7CALL\10DELWRI"
3063 	"\11DIRTY\12DONE\13EINTR\14ERROR\15GATHERED\16INVAL\17LOCKED\20NOCACHE"
3064 	"\21ORDERED\22CACHE\23PHYS\24RAW\25READ\26TAPE\30WANTED"
3065 	"\32XXX\33VFLUSH";
3066 
3067 void
3068 vfs_buf_print(bp, full, pr)
3069 	struct buf *bp;
3070 	int full;
3071 	void (*pr)(const char *, ...);
3072 {
3073 	char buf[1024];
3074 
3075 	(*pr)("  vp %p lblkno 0x%"PRIx64" blkno 0x%"PRIx64" dev 0x%x\n",
3076 		  bp->b_vp, bp->b_lblkno, bp->b_blkno, bp->b_dev);
3077 
3078 	bitmask_snprintf(bp->b_flags, buf_flagbits, buf, sizeof(buf));
3079 	(*pr)("  error %d flags 0x%s\n", bp->b_error, buf);
3080 
3081 	(*pr)("  bufsize 0x%lx bcount 0x%lx resid 0x%lx\n",
3082 		  bp->b_bufsize, bp->b_bcount, bp->b_resid);
3083 	(*pr)("  data %p saveaddr %p dep %p\n",
3084 		  bp->b_data, bp->b_saveaddr, LIST_FIRST(&bp->b_dep));
3085 	(*pr)("  iodone %p\n", bp->b_iodone);
3086 }
3087 
3088 
3089 const char vnode_flagbits[] =
3090 	"\20\1ROOT\2TEXT\3SYSTEM\4ISTTY\5EXECMAP"
3091 	"\11XLOCK\12XWANT\13BWAIT\14ALIASED"
3092 	"\15DIROP\16LAYER\17ONWORKLIST\20DIRTY";
3093 
3094 const char * const vnode_tags[] = {
3095 	"VT_NON",
3096 	"VT_UFS",
3097 	"VT_NFS",
3098 	"VT_MFS",
3099 	"VT_MSDOSFS",
3100 	"VT_LFS",
3101 	"VT_LOFS",
3102 	"VT_FDESC",
3103 	"VT_PORTAL",
3104 	"VT_NULL",
3105 	"VT_UMAP",
3106 	"VT_KERNFS",
3107 	"VT_PROCFS",
3108 	"VT_AFS",
3109 	"VT_ISOFS",
3110 	"VT_UNION",
3111 	"VT_ADOSFS",
3112 	"VT_EXT2FS",
3113 	"VT_CODA",
3114 	"VT_FILECORE",
3115 	"VT_NTFS",
3116 	"VT_VFS",
3117 	"VT_OVERLAY",
3118 	"VT_SMBFS"
3119 };
3120 
3121 void
3122 vfs_vnode_print(vp, full, pr)
3123 	struct vnode *vp;
3124 	int full;
3125 	void (*pr)(const char *, ...);
3126 {
3127 	char buf[256];
3128 	const char *vtype, *vtag;
3129 
3130 	uvm_object_printit(&vp->v_uobj, full, pr);
3131 	bitmask_snprintf(vp->v_flag, vnode_flagbits, buf, sizeof(buf));
3132 	(*pr)("\nVNODE flags %s\n", buf);
3133 	(*pr)("mp %p numoutput %d size 0x%llx\n",
3134 	      vp->v_mount, vp->v_numoutput, vp->v_size);
3135 
3136 	(*pr)("data %p usecount %d writecount %ld holdcnt %ld numoutput %d\n",
3137 	      vp->v_data, vp->v_usecount, vp->v_writecount,
3138 	      vp->v_holdcnt, vp->v_numoutput);
3139 
3140 	vtype = (vp->v_type >= 0 &&
3141 		 vp->v_type < sizeof(vnode_types) / sizeof(vnode_types[0])) ?
3142 		vnode_types[vp->v_type] : "UNKNOWN";
3143 	vtag = (vp->v_tag >= 0 &&
3144 		vp->v_tag < sizeof(vnode_tags) / sizeof(vnode_tags[0])) ?
3145 		vnode_tags[vp->v_tag] : "UNKNOWN";
3146 
3147 	(*pr)("type %s(%d) tag %s(%d) mount %p typedata %p\n",
3148 	      vtype, vp->v_type, vtag, vp->v_tag,
3149 	      vp->v_mount, vp->v_mountedhere);
3150 
3151 	if (full) {
3152 		struct buf *bp;
3153 
3154 		(*pr)("clean bufs:\n");
3155 		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
3156 			(*pr)(" bp %p\n", bp);
3157 			vfs_buf_print(bp, full, pr);
3158 		}
3159 
3160 		(*pr)("dirty bufs:\n");
3161 		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
3162 			(*pr)(" bp %p\n", bp);
3163 			vfs_buf_print(bp, full, pr);
3164 		}
3165 	}
3166 }
3167 
3168 void
3169 vfs_mount_print(mp, full, pr)
3170 	struct mount *mp;
3171 	int full;
3172 	void (*pr)(const char *, ...);
3173 {
3174 	char sbuf[256];
3175 
3176 	(*pr)("vnodecovered = %p syncer = %p data = %p\n",
3177 			mp->mnt_vnodecovered,mp->mnt_syncer,mp->mnt_data);
3178 
3179 	(*pr)("fs_bshift %d dev_bshift = %d maxsymlinklen = %d\n",
3180 			mp->mnt_fs_bshift,mp->mnt_dev_bshift,mp->mnt_maxsymlinklen);
3181 
3182 	bitmask_snprintf(mp->mnt_flag, __MNT_FLAG_BITS, sbuf, sizeof(sbuf));
3183 	(*pr)("flag = %s\n", sbuf);
3184 
3185 	bitmask_snprintf(mp->mnt_iflag, __IMNT_FLAG_BITS, sbuf, sizeof(sbuf));
3186 	(*pr)("iflag = %s\n", sbuf);
3187 
3188 	/* XXX use lockmgr_printinfo */
3189 	if (mp->mnt_lock.lk_sharecount)
3190 		(*pr)(" lock type %s: SHARED (count %d)", mp->mnt_lock.lk_wmesg,
3191 		    mp->mnt_lock.lk_sharecount);
3192 	else if (mp->mnt_lock.lk_flags & LK_HAVE_EXCL) {
3193 		(*pr)(" lock type %s: EXCL (count %d) by ",
3194 		    mp->mnt_lock.lk_wmesg, mp->mnt_lock.lk_exclusivecount);
3195 		if (mp->mnt_lock.lk_flags & LK_SPIN)
3196 			(*pr)("processor %lu", mp->mnt_lock.lk_cpu);
3197 		else
3198 			(*pr)("pid %d.%d", mp->mnt_lock.lk_lockholder,
3199 			    mp->mnt_lock.lk_locklwp);
3200 	} else
3201 		(*pr)(" not locked");
3202 	if ((mp->mnt_lock.lk_flags & LK_SPIN) == 0 && mp->mnt_lock.lk_waitcount > 0)
3203 		(*pr)(" with %d pending", mp->mnt_lock.lk_waitcount);
3204 
3205 	(*pr)("\n");
3206 
3207 	if (mp->mnt_unmounter) {
3208 		(*pr)("unmounter pid = %d ",mp->mnt_unmounter->p_pid);
3209 	}
3210 	(*pr)("wcnt = %d, writeopcountupper = %d, writeopcountupper = %d\n",
3211 		mp->mnt_wcnt,mp->mnt_writeopcountupper,mp->mnt_writeopcountlower);
3212 
3213 	(*pr)("statvfs cache:\n");
3214 	(*pr)("\tbsize = %lu\n",mp->mnt_stat.f_bsize);
3215 	(*pr)("\tfrsize = %lu\n",mp->mnt_stat.f_frsize);
3216 	(*pr)("\tiosize = %lu\n",mp->mnt_stat.f_iosize);
3217 
3218 	(*pr)("\tblocks = "PRIu64"\n",mp->mnt_stat.f_blocks);
3219 	(*pr)("\tbfree = "PRIu64"\n",mp->mnt_stat.f_bfree);
3220 	(*pr)("\tbavail = "PRIu64"\n",mp->mnt_stat.f_bavail);
3221 	(*pr)("\tbresvd = "PRIu64"\n",mp->mnt_stat.f_bresvd);
3222 
3223 	(*pr)("\tfiles = "PRIu64"\n",mp->mnt_stat.f_files);
3224 	(*pr)("\tffree = "PRIu64"\n",mp->mnt_stat.f_ffree);
3225 	(*pr)("\tfavail = "PRIu64"\n",mp->mnt_stat.f_favail);
3226 	(*pr)("\tfresvd = "PRIu64"\n",mp->mnt_stat.f_fresvd);
3227 
3228 	(*pr)("\tf_fsidx = { 0x%"PRIx32", 0x%"PRIx32" }\n",
3229 			mp->mnt_stat.f_fsidx.__fsid_val[0],
3230 			mp->mnt_stat.f_fsidx.__fsid_val[1]);
3231 
3232 	(*pr)("\towner = %"PRIu32"\n",mp->mnt_stat.f_owner);
3233 	(*pr)("\tnamemax = %lu\n",mp->mnt_stat.f_namemax);
3234 
3235 	bitmask_snprintf(mp->mnt_stat.f_flag, __MNT_FLAG_BITS, sbuf,
3236 	    sizeof(sbuf));
3237 	(*pr)("\tflag = %s\n",sbuf);
3238 	(*pr)("\tsyncwrites = " PRIu64 "\n",mp->mnt_stat.f_syncwrites);
3239 	(*pr)("\tasyncwrites = " PRIu64 "\n",mp->mnt_stat.f_asyncwrites);
3240 	(*pr)("\tsyncreads = " PRIu64 "\n",mp->mnt_stat.f_syncreads);
3241 	(*pr)("\tasyncreads = " PRIu64 "\n",mp->mnt_stat.f_asyncreads);
3242 	(*pr)("\tfstypename = %s\n",mp->mnt_stat.f_fstypename);
3243 	(*pr)("\tmntonname = %s\n",mp->mnt_stat.f_mntonname);
3244 	(*pr)("\tmntfromname = %s\n",mp->mnt_stat.f_mntfromname);
3245 
3246 	{
3247 		int cnt = 0;
3248 		struct vnode *vp;
3249 		(*pr)("locked vnodes =");
3250 		/* XXX would take mountlist lock, except ddb may not have context */
3251 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
3252 			if (VOP_ISLOCKED(vp)) {
3253 				if ((++cnt % 6) == 0) {
3254 					(*pr)(" %p,\n\t", vp);
3255 				} else {
3256 					(*pr)(" %p,", vp);
3257 				}
3258 			}
3259 		}
3260 		(*pr)("\n");
3261 	}
3262 
3263 	if (full) {
3264 		int cnt = 0;
3265 		struct vnode *vp;
3266 		(*pr)("all vnodes =");
3267 		/* XXX would take mountlist lock, except ddb may not have context */
3268 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
3269 			if (!LIST_NEXT(vp, v_mntvnodes)) {
3270 				(*pr)(" %p", vp);
3271 			} else if ((++cnt % 6) == 0) {
3272 				(*pr)(" %p,\n\t", vp);
3273 			} else {
3274 				(*pr)(" %p,", vp);
3275 			}
3276 		}
3277 		(*pr)("\n", vp);
3278 	}
3279 }
3280 
3281 #endif
3282