xref: /netbsd-src/sys/kern/vfs_subr.c (revision e55cffd8e520e9b03f18a1bd98bb04223e79f69f)
1 /*	$NetBSD: vfs_subr.c,v 1.149 2001/04/16 22:41:10 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Copyright (c) 1989, 1993
42  *	The Regents of the University of California.  All rights reserved.
43  * (c) UNIX System Laboratories, Inc.
44  * All or some portions of this file are derived from material licensed
45  * to the University of California by American Telephone and Telegraph
46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47  * the permission of UNIX System Laboratories, Inc.
48  *
49  * Redistribution and use in source and binary forms, with or without
50  * modification, are permitted provided that the following conditions
51  * are met:
52  * 1. Redistributions of source code must retain the above copyright
53  *    notice, this list of conditions and the following disclaimer.
54  * 2. Redistributions in binary form must reproduce the above copyright
55  *    notice, this list of conditions and the following disclaimer in the
56  *    documentation and/or other materials provided with the distribution.
57  * 3. All advertising materials mentioning features or use of this software
58  *    must display the following acknowledgement:
59  *	This product includes software developed by the University of
60  *	California, Berkeley and its contributors.
61  * 4. Neither the name of the University nor the names of its contributors
62  *    may be used to endorse or promote products derived from this software
63  *    without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75  * SUCH DAMAGE.
76  *
77  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
78  */
79 
80 /*
81  * External virtual filesystem routines
82  */
83 
84 #include "opt_ddb.h"
85 #include "opt_compat_netbsd.h"
86 #include "opt_compat_43.h"
87 
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/proc.h>
91 #include <sys/kernel.h>
92 #include <sys/mount.h>
93 #include <sys/time.h>
94 #include <sys/fcntl.h>
95 #include <sys/vnode.h>
96 #include <sys/stat.h>
97 #include <sys/namei.h>
98 #include <sys/ucred.h>
99 #include <sys/buf.h>
100 #include <sys/errno.h>
101 #include <sys/malloc.h>
102 #include <sys/domain.h>
103 #include <sys/mbuf.h>
104 #include <sys/syscallargs.h>
105 #include <sys/device.h>
106 #include <sys/dirent.h>
107 
108 #include <miscfs/specfs/specdev.h>
109 #include <miscfs/genfs/genfs.h>
110 #include <miscfs/syncfs/syncfs.h>
111 
112 #include <uvm/uvm.h>
113 #include <uvm/uvm_ddb.h>
114 
115 #include <sys/sysctl.h>
116 
117 enum vtype iftovt_tab[16] = {
118 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
119 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
120 };
121 const int	vttoif_tab[9] = {
122 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
123 	S_IFSOCK, S_IFIFO, S_IFMT,
124 };
125 
126 int doforce = 1;		/* 1 => permit forcible unmounting */
127 int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
128 
129 extern int dovfsusermount;	/* 1 => permit any user to mount filesystems */
130 
131 /*
132  * Insq/Remq for the vnode usage lists.
133  */
134 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
135 #define	bufremvn(bp) {							\
136 	LIST_REMOVE(bp, b_vnbufs);					\
137 	(bp)->b_vnbufs.le_next = NOLIST;				\
138 }
139 /* TAILQ_HEAD(freelst, vnode) vnode_free_list =	vnode free list (in vnode.h) */
140 struct freelst vnode_free_list = TAILQ_HEAD_INITIALIZER(vnode_free_list);
141 struct freelst vnode_hold_list = TAILQ_HEAD_INITIALIZER(vnode_hold_list);
142 
143 struct mntlist mountlist =			/* mounted filesystem list */
144     CIRCLEQ_HEAD_INITIALIZER(mountlist);
145 struct vfs_list_head vfs_list =			/* vfs list */
146     LIST_HEAD_INITIALIZER(vfs_list);
147 
148 struct nfs_public nfs_pub;			/* publicly exported FS */
149 
150 struct simplelock mountlist_slock = SIMPLELOCK_INITIALIZER;
151 static struct simplelock mntid_slock = SIMPLELOCK_INITIALIZER;
152 struct simplelock mntvnode_slock = SIMPLELOCK_INITIALIZER;
153 struct simplelock vnode_free_list_slock = SIMPLELOCK_INITIALIZER;
154 struct simplelock spechash_slock = SIMPLELOCK_INITIALIZER;
155 
156 /*
157  * These define the root filesystem and device.
158  */
159 struct mount *rootfs;
160 struct vnode *rootvnode;
161 struct device *root_device;			/* root device */
162 
163 struct pool vnode_pool;				/* memory pool for vnodes */
164 
165 /*
166  * Local declarations.
167  */
168 void insmntque __P((struct vnode *, struct mount *));
169 int getdevvp __P((dev_t, struct vnode **, enum vtype));
170 void vgoneall __P((struct vnode *));
171 
172 static int vfs_hang_addrlist __P((struct mount *, struct netexport *,
173 				  struct export_args *));
174 static int vfs_free_netcred __P((struct radix_node *, void *));
175 static void vfs_free_addrlist __P((struct netexport *));
176 
177 #ifdef DEBUG
178 void printlockedvnodes __P((void));
179 #endif
180 
181 /*
182  * Initialize the vnode management data structures.
183  */
184 void
185 vntblinit()
186 {
187 
188 	pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodepl",
189 	    0, pool_page_alloc_nointr, pool_page_free_nointr, M_VNODE);
190 
191 	/*
192 	 * Initialize the filesystem syncer.
193 	 */
194 	vn_initialize_syncerd();
195 }
196 
197 /*
198  * Mark a mount point as busy. Used to synchronize access and to delay
199  * unmounting. Interlock is not released on failure.
200  */
201 int
202 vfs_busy(mp, flags, interlkp)
203 	struct mount *mp;
204 	int flags;
205 	struct simplelock *interlkp;
206 {
207 	int lkflags;
208 
209 	while (mp->mnt_flag & MNT_UNMOUNT) {
210 		int gone;
211 
212 		if (flags & LK_NOWAIT)
213 			return (ENOENT);
214 		if ((flags & LK_RECURSEFAIL) && mp->mnt_unmounter != NULL
215 		    && mp->mnt_unmounter == curproc)
216 			return (EDEADLK);
217 		if (interlkp)
218 			simple_unlock(interlkp);
219 		/*
220 		 * Since all busy locks are shared except the exclusive
221 		 * lock granted when unmounting, the only place that a
222 		 * wakeup needs to be done is at the release of the
223 		 * exclusive lock at the end of dounmount.
224 		 *
225 		 * XXX MP: add spinlock protecting mnt_wcnt here once you
226 		 * can atomically unlock-and-sleep.
227 		 */
228 		mp->mnt_wcnt++;
229 		tsleep((caddr_t)mp, PVFS, "vfs_busy", 0);
230 		mp->mnt_wcnt--;
231 		gone = mp->mnt_flag & MNT_GONE;
232 
233 		if (mp->mnt_wcnt == 0)
234 			wakeup(&mp->mnt_wcnt);
235 		if (interlkp)
236 			simple_lock(interlkp);
237 		if (gone)
238 			return (ENOENT);
239 	}
240 	lkflags = LK_SHARED;
241 	if (interlkp)
242 		lkflags |= LK_INTERLOCK;
243 	if (lockmgr(&mp->mnt_lock, lkflags, interlkp))
244 		panic("vfs_busy: unexpected lock failure");
245 	return (0);
246 }
247 
248 /*
249  * Free a busy filesystem.
250  */
251 void
252 vfs_unbusy(mp)
253 	struct mount *mp;
254 {
255 
256 	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL);
257 }
258 
259 /*
260  * Lookup a filesystem type, and if found allocate and initialize
261  * a mount structure for it.
262  *
263  * Devname is usually updated by mount(8) after booting.
264  */
265 int
266 vfs_rootmountalloc(fstypename, devname, mpp)
267 	char *fstypename;
268 	char *devname;
269 	struct mount **mpp;
270 {
271 	struct vfsops *vfsp = NULL;
272 	struct mount *mp;
273 
274 	for (vfsp = LIST_FIRST(&vfs_list); vfsp != NULL;
275 	     vfsp = LIST_NEXT(vfsp, vfs_list))
276 		if (!strncmp(vfsp->vfs_name, fstypename, MFSNAMELEN))
277 			break;
278 
279 	if (vfsp == NULL)
280 		return (ENODEV);
281 	mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
282 	memset((char *)mp, 0, (u_long)sizeof(struct mount));
283 	lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
284 	(void)vfs_busy(mp, LK_NOWAIT, 0);
285 	LIST_INIT(&mp->mnt_vnodelist);
286 	mp->mnt_op = vfsp;
287 	mp->mnt_flag = MNT_RDONLY;
288 	mp->mnt_vnodecovered = NULLVP;
289 	vfsp->vfs_refcount++;
290 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfs_name, MFSNAMELEN);
291 	mp->mnt_stat.f_mntonname[0] = '/';
292 	(void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
293 	*mpp = mp;
294 	return (0);
295 }
296 
297 /*
298  * Lookup a mount point by filesystem identifier.
299  */
300 struct mount *
301 vfs_getvfs(fsid)
302 	fsid_t *fsid;
303 {
304 	struct mount *mp;
305 
306 	simple_lock(&mountlist_slock);
307 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist;
308 	     mp = mp->mnt_list.cqe_next) {
309 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
310 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
311 			simple_unlock(&mountlist_slock);
312 			return (mp);
313 		}
314 	}
315 	simple_unlock(&mountlist_slock);
316 	return ((struct mount *)0);
317 }
318 
319 /*
320  * Get a new unique fsid
321  */
322 void
323 vfs_getnewfsid(mp)
324 	struct mount *mp;
325 {
326 	static u_short xxxfs_mntid;
327 	fsid_t tfsid;
328 	int mtype;
329 
330 	simple_lock(&mntid_slock);
331 	mtype = makefstype(mp->mnt_op->vfs_name);
332 	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
333 	mp->mnt_stat.f_fsid.val[1] = mtype;
334 	if (xxxfs_mntid == 0)
335 		++xxxfs_mntid;
336 	tfsid.val[0] = makedev((nblkdev + mtype) & 0xff, xxxfs_mntid);
337 	tfsid.val[1] = mtype;
338 	if (mountlist.cqh_first != (void *)&mountlist) {
339 		while (vfs_getvfs(&tfsid)) {
340 			tfsid.val[0]++;
341 			xxxfs_mntid++;
342 		}
343 	}
344 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
345 	simple_unlock(&mntid_slock);
346 }
347 
348 /*
349  * Make a 'unique' number from a mount type name.
350  */
351 long
352 makefstype(type)
353 	const char *type;
354 {
355 	long rv;
356 
357 	for (rv = 0; *type; type++) {
358 		rv <<= 2;
359 		rv ^= *type;
360 	}
361 	return rv;
362 }
363 
364 
365 /*
366  * Set vnode attributes to VNOVAL
367  */
368 void
369 vattr_null(vap)
370 	struct vattr *vap;
371 {
372 
373 	vap->va_type = VNON;
374 
375 	/*
376 	 * Assign individually so that it is safe even if size and
377 	 * sign of each member are varied.
378 	 */
379 	vap->va_mode = VNOVAL;
380 	vap->va_nlink = VNOVAL;
381 	vap->va_uid = VNOVAL;
382 	vap->va_gid = VNOVAL;
383 	vap->va_fsid = VNOVAL;
384 	vap->va_fileid = VNOVAL;
385 	vap->va_size = VNOVAL;
386 	vap->va_blocksize = VNOVAL;
387 	vap->va_atime.tv_sec =
388 	    vap->va_mtime.tv_sec =
389 	    vap->va_ctime.tv_sec = VNOVAL;
390 	vap->va_atime.tv_nsec =
391 	    vap->va_mtime.tv_nsec =
392 	    vap->va_ctime.tv_nsec = VNOVAL;
393 	vap->va_gen = VNOVAL;
394 	vap->va_flags = VNOVAL;
395 	vap->va_rdev = VNOVAL;
396 	vap->va_bytes = VNOVAL;
397 	vap->va_vaflags = 0;
398 }
399 
400 /*
401  * Routines having to do with the management of the vnode table.
402  */
403 extern int (**dead_vnodeop_p) __P((void *));
404 long numvnodes;
405 
406 /*
407  * Return the next vnode from the free list.
408  */
409 int
410 getnewvnode(tag, mp, vops, vpp)
411 	enum vtagtype tag;
412 	struct mount *mp;
413 	int (**vops) __P((void *));
414 	struct vnode **vpp;
415 {
416 	extern struct uvm_pagerops uvm_vnodeops;
417 	struct uvm_object *uobj;
418 	struct proc *p = curproc;	/* XXX */
419 	struct freelst *listhd;
420 	static int toggle;
421 	struct vnode *vp;
422 	int error = 0;
423 #ifdef DIAGNOSTIC
424 	int s;
425 #endif
426 	if (mp) {
427 		/*
428 		 * Mark filesystem busy while we're creating a vnode.
429 		 * If unmount is in progress, this will wait; if the
430 		 * unmount succeeds (only if umount -f), this will
431 		 * return an error.  If the unmount fails, we'll keep
432 		 * going afterwards.
433 		 * (This puts the per-mount vnode list logically under
434 		 * the protection of the vfs_busy lock).
435 		 */
436 		error = vfs_busy(mp, LK_RECURSEFAIL, 0);
437 		if (error && error != EDEADLK)
438 			return error;
439 	}
440 
441 	/*
442 	 * We must choose whether to allocate a new vnode or recycle an
443 	 * existing one. The criterion for allocating a new one is that
444 	 * the total number of vnodes is less than the number desired or
445 	 * there are no vnodes on either free list. Generally we only
446 	 * want to recycle vnodes that have no buffers associated with
447 	 * them, so we look first on the vnode_free_list. If it is empty,
448 	 * we next consider vnodes with referencing buffers on the
449 	 * vnode_hold_list. The toggle ensures that half the time we
450 	 * will use a buffer from the vnode_hold_list, and half the time
451 	 * we will allocate a new one unless the list has grown to twice
452 	 * the desired size. We are reticent to recycle vnodes from the
453 	 * vnode_hold_list because we will lose the identity of all its
454 	 * referencing buffers.
455 	 */
456 
457 	toggle ^= 1;
458 	if (numvnodes > 2 * desiredvnodes)
459 		toggle = 0;
460 
461 	simple_lock(&vnode_free_list_slock);
462 	if (numvnodes < desiredvnodes ||
463 	    (TAILQ_FIRST(listhd = &vnode_free_list) == NULL &&
464 	    (TAILQ_FIRST(listhd = &vnode_hold_list) == NULL || toggle))) {
465 		simple_unlock(&vnode_free_list_slock);
466 		vp = pool_get(&vnode_pool, PR_WAITOK);
467 		memset(vp, 0, sizeof(*vp));
468 		simple_lock_init(&vp->v_interlock);
469 		numvnodes++;
470 	} else {
471 		for (vp = TAILQ_FIRST(listhd); vp != NULLVP;
472 		    vp = TAILQ_NEXT(vp, v_freelist)) {
473 			if (simple_lock_try(&vp->v_interlock)) {
474 				if ((vp->v_flag & VLAYER) == 0) {
475 					break;
476 				}
477 				if (VOP_ISLOCKED(vp) == 0)
478 					break;
479 				else
480 					simple_unlock(&vp->v_interlock);
481 			}
482 		}
483 		/*
484 		 * Unless this is a bad time of the month, at most
485 		 * the first NCPUS items on the free list are
486 		 * locked, so this is close enough to being empty.
487 		 */
488 		if (vp == NULLVP) {
489 			simple_unlock(&vnode_free_list_slock);
490 			if (mp && error != EDEADLK)
491 				vfs_unbusy(mp);
492 			tablefull("vnode", "increase kern.maxvnodes or NVNODE");
493 			*vpp = 0;
494 			return (ENFILE);
495 		}
496 		if (vp->v_usecount)
497 			panic("free vnode isn't, vp %p", vp);
498 		TAILQ_REMOVE(listhd, vp, v_freelist);
499 		/* see comment on why 0xdeadb is set at end of vgone (below) */
500 		vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb;
501 		simple_unlock(&vnode_free_list_slock);
502 		vp->v_lease = NULL;
503 		if (vp->v_type != VBAD)
504 			vgonel(vp, p);
505 		else
506 			simple_unlock(&vp->v_interlock);
507 #ifdef DIAGNOSTIC
508 		if (vp->v_data)
509 			panic("cleaned vnode isn't, vp %p", vp);
510 		s = splbio();
511 		if (vp->v_numoutput)
512 			panic("clean vnode has pending I/O's, vp %p", vp);
513 		splx(s);
514 #endif
515 		vp->v_flag = 0;
516 		vp->v_lastr = 0;
517 		vp->v_ralen = 0;
518 		vp->v_maxra = 0;
519 		vp->v_lastw = 0;
520 		vp->v_lasta = 0;
521 		vp->v_cstart = 0;
522 		vp->v_clen = 0;
523 		vp->v_socket = 0;
524 	}
525 	vp->v_type = VNON;
526 	vp->v_vnlock = &vp->v_lock;
527 	lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
528 	lockinit(&vp->v_glock, PVFS, "glock", 0, 0);
529 	cache_purge(vp);
530 	vp->v_tag = tag;
531 	vp->v_op = vops;
532 	insmntque(vp, mp);
533 	*vpp = vp;
534 	vp->v_usecount = 1;
535 	vp->v_data = 0;
536 	simple_lock_init(&vp->v_uvm.u_obj.vmobjlock);
537 
538 	/*
539 	 * initialize uvm_object within vnode.
540 	 */
541 
542 	uobj = &vp->v_uvm.u_obj;
543 	uobj->pgops = &uvm_vnodeops;
544 	TAILQ_INIT(&uobj->memq);
545 	vp->v_uvm.u_size = VSIZENOTSET;
546 
547 	if (mp && error != EDEADLK)
548 		vfs_unbusy(mp);
549 	return (0);
550 }
551 
552 /*
553  * This is really just the reverse of getnewvnode(). Needed for
554  * VFS_VGET functions who may need to push back a vnode in case
555  * of a locking race.
556  */
557 void
558 ungetnewvnode(vp)
559 	struct vnode *vp;
560 {
561 #ifdef DIAGNOSTIC
562 	if (vp->v_usecount != 1)
563 		panic("ungetnewvnode: busy vnode");
564 #endif
565 	vp->v_usecount--;
566 	insmntque(vp, NULL);
567 	vp->v_type = VBAD;
568 
569 	simple_lock(&vp->v_interlock);
570 	/*
571 	 * Insert at head of LRU list
572 	 */
573 	simple_lock(&vnode_free_list_slock);
574 	if (vp->v_holdcnt > 0)
575 		TAILQ_INSERT_HEAD(&vnode_hold_list, vp, v_freelist);
576 	else
577 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
578 	simple_unlock(&vnode_free_list_slock);
579 	simple_unlock(&vp->v_interlock);
580 }
581 
582 /*
583  * Move a vnode from one mount queue to another.
584  */
585 void
586 insmntque(vp, mp)
587 	struct vnode *vp;
588 	struct mount *mp;
589 {
590 
591 #ifdef DIAGNOSTIC
592 	if ((mp != NULL) &&
593 	    (mp->mnt_flag & MNT_UNMOUNT) &&
594 	    !(mp->mnt_flag & MNT_SOFTDEP) &&
595 	    vp->v_tag != VT_VFS) {
596 		panic("insmntque into dying filesystem");
597 	}
598 #endif
599 
600 	simple_lock(&mntvnode_slock);
601 	/*
602 	 * Delete from old mount point vnode list, if on one.
603 	 */
604 	if (vp->v_mount != NULL)
605 		LIST_REMOVE(vp, v_mntvnodes);
606 	/*
607 	 * Insert into list of vnodes for the new mount point, if available.
608 	 */
609 	if ((vp->v_mount = mp) != NULL)
610 		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
611 	simple_unlock(&mntvnode_slock);
612 }
613 
614 /*
615  * Update outstanding I/O count and do wakeup if requested.
616  */
617 void
618 vwakeup(bp)
619 	struct buf *bp;
620 {
621 	struct vnode *vp;
622 
623 	if ((vp = bp->b_vp) != NULL) {
624 		if (--vp->v_numoutput < 0)
625 			panic("vwakeup: neg numoutput, vp %p", vp);
626 		if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
627 			vp->v_flag &= ~VBWAIT;
628 			wakeup((caddr_t)&vp->v_numoutput);
629 		}
630 	}
631 }
632 
633 /*
634  * Flush out and invalidate all buffers associated with a vnode.
635  * Called with the underlying vnode locked, which should prevent new dirty
636  * buffers from being queued.
637  */
638 int
639 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
640 	struct vnode *vp;
641 	int flags;
642 	struct ucred *cred;
643 	struct proc *p;
644 	int slpflag, slptimeo;
645 {
646 	struct uvm_object *uobj = &vp->v_uvm.u_obj;
647 	struct buf *bp, *nbp;
648 	int s, error, rv;
649 	int flushflags = PGO_ALLPAGES|PGO_FREE|PGO_SYNCIO|
650 		(flags & V_SAVE ? PGO_CLEANIT : 0);
651 
652 	/* XXXUBC this doesn't look at flags or slp* */
653 	if (vp->v_type == VREG) {
654 		simple_lock(&uobj->vmobjlock);
655 		rv = (uobj->pgops->pgo_flush)(uobj, 0, 0, flushflags);
656 		simple_unlock(&uobj->vmobjlock);
657 		if (!rv) {
658 			return EIO;
659 		}
660 	}
661 	if (flags & V_SAVE) {
662 		error = VOP_FSYNC(vp, cred, FSYNC_WAIT|FSYNC_RECLAIM, 0, 0, p);
663 		if (error)
664 		        return (error);
665 #ifdef DIAGNOSTIC
666 		s = splbio();
667 		if (vp->v_numoutput > 0 || !LIST_EMPTY(&vp->v_dirtyblkhd))
668 		        panic("vinvalbuf: dirty bufs, vp %p", vp);
669 		splx(s);
670 #endif
671 	}
672 
673 	s = splbio();
674 
675 restart:
676 	for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
677 		nbp = LIST_NEXT(bp, b_vnbufs);
678 		if (bp->b_flags & B_BUSY) {
679 			bp->b_flags |= B_WANTED;
680 			error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1),
681 			    "vinvalbuf", slptimeo);
682 			if (error) {
683 				splx(s);
684 				return (error);
685 			}
686 			goto restart;
687 		}
688 		bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
689 		brelse(bp);
690 	}
691 
692 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
693 		nbp = LIST_NEXT(bp, b_vnbufs);
694 		if (bp->b_flags & B_BUSY) {
695 			bp->b_flags |= B_WANTED;
696 			error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1),
697 			    "vinvalbuf", slptimeo);
698 			if (error) {
699 				splx(s);
700 				return (error);
701 			}
702 			goto restart;
703 		}
704 		/*
705 		 * XXX Since there are no node locks for NFS, I believe
706 		 * there is a slight chance that a delayed write will
707 		 * occur while sleeping just above, so check for it.
708 		 */
709 		if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
710 #ifdef DEBUG
711 			printf("buffer still DELWRI\n");
712 #endif
713 			bp->b_flags |= B_BUSY | B_VFLUSH;
714 			VOP_BWRITE(bp);
715 			goto restart;
716 		}
717 		bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
718 		brelse(bp);
719 	}
720 
721 #ifdef DIAGNOSTIC
722 	if (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd))
723 		panic("vinvalbuf: flush failed, vp %p", vp);
724 #endif
725 
726 	splx(s);
727 
728 	return (0);
729 }
730 
731 /*
732  * Destroy any in core blocks past the truncation length.
733  * Called with the underlying vnode locked, which should prevent new dirty
734  * buffers from being queued.
735  */
736 int
737 vtruncbuf(vp, lbn, slpflag, slptimeo)
738 	struct vnode *vp;
739 	daddr_t lbn;
740 	int slpflag, slptimeo;
741 {
742 	struct uvm_object *uobj = &vp->v_uvm.u_obj;
743 	struct buf *bp, *nbp;
744 	int s, error, rv;
745 
746 	s = splbio();
747 	if (vp->v_type == VREG) {
748 		simple_lock(&uobj->vmobjlock);
749 		rv = (uobj->pgops->pgo_flush)(uobj,
750 		    round_page((voff_t)lbn << vp->v_mount->mnt_fs_bshift), 0,
751 		    PGO_FREE|PGO_SYNCIO);
752 		simple_unlock(&uobj->vmobjlock);
753 		if (!rv) {
754 			splx(s);
755 			return EIO;
756 		}
757 	}
758 
759 restart:
760 	for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
761 		nbp = LIST_NEXT(bp, b_vnbufs);
762 		if (bp->b_lblkno < lbn)
763 			continue;
764 		if (bp->b_flags & B_BUSY) {
765 			bp->b_flags |= B_WANTED;
766 			error = tsleep(bp, slpflag | (PRIBIO + 1),
767 			    "vtruncbuf", slptimeo);
768 			if (error) {
769 				splx(s);
770 				return (error);
771 			}
772 			goto restart;
773 		}
774 		bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
775 		brelse(bp);
776 	}
777 
778 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
779 		nbp = LIST_NEXT(bp, b_vnbufs);
780 		if (bp->b_lblkno < lbn)
781 			continue;
782 		if (bp->b_flags & B_BUSY) {
783 			bp->b_flags |= B_WANTED;
784 			error = tsleep(bp, slpflag | (PRIBIO + 1),
785 			    "vtruncbuf", slptimeo);
786 			if (error) {
787 				splx(s);
788 				return (error);
789 			}
790 			goto restart;
791 		}
792 		bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
793 		brelse(bp);
794 	}
795 
796 	splx(s);
797 
798 	return (0);
799 }
800 
801 void
802 vflushbuf(vp, sync)
803 	struct vnode *vp;
804 	int sync;
805 {
806 	struct uvm_object *uobj = &vp->v_uvm.u_obj;
807 	struct buf *bp, *nbp;
808 	int s;
809 
810 	if (vp->v_type == VREG) {
811 		int flags = PGO_CLEANIT|PGO_ALLPAGES| (sync ? PGO_SYNCIO : 0);
812 
813 		simple_lock(&uobj->vmobjlock);
814 		(uobj->pgops->pgo_flush)(uobj, 0, 0, flags);
815 		simple_unlock(&uobj->vmobjlock);
816 	}
817 
818 loop:
819 	s = splbio();
820 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
821 		nbp = LIST_NEXT(bp, b_vnbufs);
822 		if ((bp->b_flags & B_BUSY))
823 			continue;
824 		if ((bp->b_flags & B_DELWRI) == 0)
825 			panic("vflushbuf: not dirty, bp %p", bp);
826 		bp->b_flags |= B_BUSY | B_VFLUSH;
827 		splx(s);
828 		/*
829 		 * Wait for I/O associated with indirect blocks to complete,
830 		 * since there is no way to quickly wait for them below.
831 		 */
832 		if (bp->b_vp == vp || sync == 0)
833 			(void) bawrite(bp);
834 		else
835 			(void) bwrite(bp);
836 		goto loop;
837 	}
838 	if (sync == 0) {
839 		splx(s);
840 		return;
841 	}
842 	while (vp->v_numoutput) {
843 		vp->v_flag |= VBWAIT;
844 		tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "vflushbuf", 0);
845 	}
846 	splx(s);
847 	if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
848 		vprint("vflushbuf: dirty", vp);
849 		goto loop;
850 	}
851 }
852 
853 /*
854  * Associate a buffer with a vnode.
855  */
856 void
857 bgetvp(vp, bp)
858 	struct vnode *vp;
859 	struct buf *bp;
860 {
861 	int s;
862 
863 	if (bp->b_vp)
864 		panic("bgetvp: not free, bp %p", bp);
865 	VHOLD(vp);
866 	s = splbio();
867 	bp->b_vp = vp;
868 	if (vp->v_type == VBLK || vp->v_type == VCHR)
869 		bp->b_dev = vp->v_rdev;
870 	else
871 		bp->b_dev = NODEV;
872 	/*
873 	 * Insert onto list for new vnode.
874 	 */
875 	bufinsvn(bp, &vp->v_cleanblkhd);
876 	splx(s);
877 }
878 
879 /*
880  * Disassociate a buffer from a vnode.
881  */
882 void
883 brelvp(bp)
884 	struct buf *bp;
885 {
886 	struct vnode *vp;
887 	int s;
888 
889 	if (bp->b_vp == NULL)
890 		panic("brelvp: vp NULL, bp %p", bp);
891 
892 	s = splbio();
893 	vp = bp->b_vp;
894 	/*
895 	 * Delete from old vnode list, if on one.
896 	 */
897 	if (bp->b_vnbufs.le_next != NOLIST)
898 		bufremvn(bp);
899 
900 	if (vp->v_type != VREG && (vp->v_flag & VONWORKLST) &&
901 	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
902 		vp->v_flag &= ~VONWORKLST;
903 		LIST_REMOVE(vp, v_synclist);
904 	}
905 
906 	bp->b_vp = NULL;
907 	HOLDRELE(vp);
908 	splx(s);
909 }
910 
911 /*
912  * Reassign a buffer from one vnode to another.
913  * Used to assign file specific control information
914  * (indirect blocks) to the vnode to which they belong.
915  *
916  * This function must be called at splbio().
917  */
918 void
919 reassignbuf(bp, newvp)
920 	struct buf *bp;
921 	struct vnode *newvp;
922 {
923 	struct buflists *listheadp;
924 	int delay;
925 
926 	/*
927 	 * Delete from old vnode list, if on one.
928 	 */
929 	if (bp->b_vnbufs.le_next != NOLIST)
930 		bufremvn(bp);
931 	/*
932 	 * If dirty, put on list of dirty buffers;
933 	 * otherwise insert onto list of clean buffers.
934 	 */
935 	if ((bp->b_flags & B_DELWRI) == 0) {
936 		listheadp = &newvp->v_cleanblkhd;
937 		if (newvp->v_type != VREG &&
938 		    (newvp->v_flag & VONWORKLST) &&
939 		    LIST_FIRST(&newvp->v_dirtyblkhd) == NULL) {
940 			newvp->v_flag &= ~VONWORKLST;
941 			LIST_REMOVE(newvp, v_synclist);
942 		}
943 	} else {
944 		listheadp = &newvp->v_dirtyblkhd;
945 		if ((newvp->v_flag & VONWORKLST) == 0) {
946 			switch (newvp->v_type) {
947 			case VDIR:
948 				delay = dirdelay;
949 				break;
950 			case VBLK:
951 				if (newvp->v_specmountpoint != NULL) {
952 					delay = metadelay;
953 					break;
954 				}
955 				/* fall through */
956 			default:
957 				delay = filedelay;
958 				break;
959 			}
960 			if (!newvp->v_mount ||
961 			    (newvp->v_mount->mnt_flag & MNT_ASYNC) == 0)
962 				vn_syncer_add_to_worklist(newvp, delay);
963 		}
964 	}
965 	bufinsvn(bp, listheadp);
966 }
967 
968 /*
969  * Create a vnode for a block device.
970  * Used for root filesystem and swap areas.
971  * Also used for memory file system special devices.
972  */
973 int
974 bdevvp(dev, vpp)
975 	dev_t dev;
976 	struct vnode **vpp;
977 {
978 
979 	return (getdevvp(dev, vpp, VBLK));
980 }
981 
982 /*
983  * Create a vnode for a character device.
984  * Used for kernfs and some console handling.
985  */
986 int
987 cdevvp(dev, vpp)
988 	dev_t dev;
989 	struct vnode **vpp;
990 {
991 
992 	return (getdevvp(dev, vpp, VCHR));
993 }
994 
995 /*
996  * Create a vnode for a device.
997  * Used by bdevvp (block device) for root file system etc.,
998  * and by cdevvp (character device) for console and kernfs.
999  */
1000 int
1001 getdevvp(dev, vpp, type)
1002 	dev_t dev;
1003 	struct vnode **vpp;
1004 	enum vtype type;
1005 {
1006 	struct vnode *vp;
1007 	struct vnode *nvp;
1008 	int error;
1009 
1010 	if (dev == NODEV) {
1011 		*vpp = NULLVP;
1012 		return (0);
1013 	}
1014 	error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp);
1015 	if (error) {
1016 		*vpp = NULLVP;
1017 		return (error);
1018 	}
1019 	vp = nvp;
1020 	vp->v_type = type;
1021 	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
1022 		vput(vp);
1023 		vp = nvp;
1024 	}
1025 	*vpp = vp;
1026 	return (0);
1027 }
1028 
1029 /*
1030  * Check to see if the new vnode represents a special device
1031  * for which we already have a vnode (either because of
1032  * bdevvp() or because of a different vnode representing
1033  * the same block device). If such an alias exists, deallocate
1034  * the existing contents and return the aliased vnode. The
1035  * caller is responsible for filling it with its new contents.
1036  */
1037 struct vnode *
1038 checkalias(nvp, nvp_rdev, mp)
1039 	struct vnode *nvp;
1040 	dev_t nvp_rdev;
1041 	struct mount *mp;
1042 {
1043 	struct proc *p = curproc;       /* XXX */
1044 	struct vnode *vp;
1045 	struct vnode **vpp;
1046 
1047 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
1048 		return (NULLVP);
1049 
1050 	vpp = &speclisth[SPECHASH(nvp_rdev)];
1051 loop:
1052 	simple_lock(&spechash_slock);
1053 	for (vp = *vpp; vp; vp = vp->v_specnext) {
1054 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
1055 			continue;
1056 		/*
1057 		 * Alias, but not in use, so flush it out.
1058 		 */
1059 		simple_lock(&vp->v_interlock);
1060 		if (vp->v_usecount == 0) {
1061 			simple_unlock(&spechash_slock);
1062 			vgonel(vp, p);
1063 			goto loop;
1064 		}
1065 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
1066 			simple_unlock(&spechash_slock);
1067 			goto loop;
1068 		}
1069 		break;
1070 	}
1071 	if (vp == NULL || vp->v_tag != VT_NON || vp->v_type != VBLK) {
1072 		MALLOC(nvp->v_specinfo, struct specinfo *,
1073 			sizeof(struct specinfo), M_VNODE, M_WAITOK);
1074 		nvp->v_rdev = nvp_rdev;
1075 		nvp->v_hashchain = vpp;
1076 		nvp->v_specnext = *vpp;
1077 		nvp->v_specmountpoint = NULL;
1078 		simple_unlock(&spechash_slock);
1079 		nvp->v_speclockf = NULL;
1080 		*vpp = nvp;
1081 		if (vp != NULLVP) {
1082 			nvp->v_flag |= VALIASED;
1083 			vp->v_flag |= VALIASED;
1084 			vput(vp);
1085 		}
1086 		return (NULLVP);
1087 	}
1088 	simple_unlock(&spechash_slock);
1089 	VOP_UNLOCK(vp, 0);
1090 	simple_lock(&vp->v_interlock);
1091 	vclean(vp, 0, p);
1092 	vp->v_op = nvp->v_op;
1093 	vp->v_tag = nvp->v_tag;
1094 	vp->v_vnlock = &vp->v_lock;
1095 	lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
1096 	nvp->v_type = VNON;
1097 	insmntque(vp, mp);
1098 	return (vp);
1099 }
1100 
1101 /*
1102  * Grab a particular vnode from the free list, increment its
1103  * reference count and lock it. If the vnode lock bit is set the
1104  * vnode is being eliminated in vgone. In that case, we can not
1105  * grab the vnode, so the process is awakened when the transition is
1106  * completed, and an error returned to indicate that the vnode is no
1107  * longer usable (possibly having been changed to a new file system type).
1108  */
1109 int
1110 vget(vp, flags)
1111 	struct vnode *vp;
1112 	int flags;
1113 {
1114 	int error;
1115 
1116 	/*
1117 	 * If the vnode is in the process of being cleaned out for
1118 	 * another use, we wait for the cleaning to finish and then
1119 	 * return failure. Cleaning is determined by checking that
1120 	 * the VXLOCK flag is set.
1121 	 */
1122 
1123 	if ((flags & LK_INTERLOCK) == 0)
1124 		simple_lock(&vp->v_interlock);
1125 	if (vp->v_flag & VXLOCK) {
1126 		if (flags & LK_NOWAIT) {
1127 			simple_unlock(&vp->v_interlock);
1128 			return EBUSY;
1129 		}
1130 		vp->v_flag |= VXWANT;
1131 		ltsleep((caddr_t)vp, PINOD|PNORELOCK,
1132 		    "vget", 0, &vp->v_interlock);
1133 		return (ENOENT);
1134 	}
1135 	if (vp->v_usecount == 0) {
1136 		simple_lock(&vnode_free_list_slock);
1137 		if (vp->v_holdcnt > 0)
1138 			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
1139 		else
1140 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1141 		simple_unlock(&vnode_free_list_slock);
1142 	}
1143 	vp->v_usecount++;
1144 #ifdef DIAGNOSTIC
1145 	if (vp->v_usecount == 0) {
1146 		vprint("vget", vp);
1147 		panic("vget: usecount overflow, vp %p", vp);
1148 	}
1149 #endif
1150 	if (flags & LK_TYPE_MASK) {
1151 		if ((error = vn_lock(vp, flags | LK_INTERLOCK))) {
1152 			/*
1153 			 * must expand vrele here because we do not want
1154 			 * to call VOP_INACTIVE if the reference count
1155 			 * drops back to zero since it was never really
1156 			 * active. We must remove it from the free list
1157 			 * before sleeping so that multiple processes do
1158 			 * not try to recycle it.
1159 			 */
1160 			simple_lock(&vp->v_interlock);
1161 			vp->v_usecount--;
1162 			if (vp->v_usecount > 0) {
1163 				simple_unlock(&vp->v_interlock);
1164 				return (error);
1165 			}
1166 			/*
1167 			 * insert at tail of LRU list
1168 			 */
1169 			simple_lock(&vnode_free_list_slock);
1170 			if (vp->v_holdcnt > 0)
1171 				TAILQ_INSERT_TAIL(&vnode_hold_list, vp,
1172 				    v_freelist);
1173 			else
1174 				TAILQ_INSERT_TAIL(&vnode_free_list, vp,
1175 				    v_freelist);
1176 			simple_unlock(&vnode_free_list_slock);
1177 			simple_unlock(&vp->v_interlock);
1178 		}
1179 		return (error);
1180 	}
1181 	simple_unlock(&vp->v_interlock);
1182 	return (0);
1183 }
1184 
1185 /*
1186  * vput(), just unlock and vrele()
1187  */
1188 void
1189 vput(vp)
1190 	struct vnode *vp;
1191 {
1192 	struct proc *p = curproc;	/* XXX */
1193 
1194 #ifdef DIAGNOSTIC
1195 	if (vp == NULL)
1196 		panic("vput: null vp");
1197 #endif
1198 	simple_lock(&vp->v_interlock);
1199 	vp->v_usecount--;
1200 	if (vp->v_usecount > 0) {
1201 		simple_unlock(&vp->v_interlock);
1202 		VOP_UNLOCK(vp, 0);
1203 		return;
1204 	}
1205 #ifdef DIAGNOSTIC
1206 	if (vp->v_usecount < 0 || vp->v_writecount != 0) {
1207 		vprint("vput: bad ref count", vp);
1208 		panic("vput: ref cnt");
1209 	}
1210 #endif
1211 	/*
1212 	 * Insert at tail of LRU list.
1213 	 */
1214 	simple_lock(&vnode_free_list_slock);
1215 	if (vp->v_holdcnt > 0)
1216 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
1217 	else
1218 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1219 	simple_unlock(&vnode_free_list_slock);
1220 	if (vp->v_flag & VTEXT) {
1221 		uvmexp.vtextpages -= vp->v_uvm.u_obj.uo_npages;
1222 		uvmexp.vnodepages += vp->v_uvm.u_obj.uo_npages;
1223 	}
1224 	vp->v_flag &= ~VTEXT;
1225 	simple_unlock(&vp->v_interlock);
1226 	VOP_INACTIVE(vp, p);
1227 }
1228 
1229 /*
1230  * Vnode release.
1231  * If count drops to zero, call inactive routine and return to freelist.
1232  */
1233 void
1234 vrele(vp)
1235 	struct vnode *vp;
1236 {
1237 	struct proc *p = curproc;	/* XXX */
1238 
1239 #ifdef DIAGNOSTIC
1240 	if (vp == NULL)
1241 		panic("vrele: null vp");
1242 #endif
1243 	simple_lock(&vp->v_interlock);
1244 	vp->v_usecount--;
1245 	if (vp->v_usecount > 0) {
1246 		simple_unlock(&vp->v_interlock);
1247 		return;
1248 	}
1249 #ifdef DIAGNOSTIC
1250 	if (vp->v_usecount < 0 || vp->v_writecount != 0) {
1251 		vprint("vrele: bad ref count", vp);
1252 		panic("vrele: ref cnt vp %p", vp);
1253 	}
1254 #endif
1255 	/*
1256 	 * Insert at tail of LRU list.
1257 	 */
1258 	simple_lock(&vnode_free_list_slock);
1259 	if (vp->v_holdcnt > 0)
1260 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
1261 	else
1262 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1263 	simple_unlock(&vnode_free_list_slock);
1264 	if (vp->v_flag & VTEXT) {
1265 		uvmexp.vtextpages -= vp->v_uvm.u_obj.uo_npages;
1266 		uvmexp.vnodepages += vp->v_uvm.u_obj.uo_npages;
1267 	}
1268 	vp->v_flag &= ~VTEXT;
1269 	if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK) == 0)
1270 		VOP_INACTIVE(vp, p);
1271 }
1272 
1273 #ifdef DIAGNOSTIC
1274 /*
1275  * Page or buffer structure gets a reference.
1276  */
1277 void
1278 vhold(vp)
1279 	struct vnode *vp;
1280 {
1281 
1282 	/*
1283 	 * If it is on the freelist and the hold count is currently
1284 	 * zero, move it to the hold list. The test of the back
1285 	 * pointer and the use reference count of zero is because
1286 	 * it will be removed from a free list by getnewvnode,
1287 	 * but will not have its reference count incremented until
1288 	 * after calling vgone. If the reference count were
1289 	 * incremented first, vgone would (incorrectly) try to
1290 	 * close the previous instance of the underlying object.
1291 	 * So, the back pointer is explicitly set to `0xdeadb' in
1292 	 * getnewvnode after removing it from a freelist to ensure
1293 	 * that we do not try to move it here.
1294 	 */
1295   	simple_lock(&vp->v_interlock);
1296 	if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) &&
1297 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
1298 		simple_lock(&vnode_free_list_slock);
1299 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1300 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
1301 		simple_unlock(&vnode_free_list_slock);
1302 	}
1303 	vp->v_holdcnt++;
1304 	simple_unlock(&vp->v_interlock);
1305 }
1306 
1307 /*
1308  * Page or buffer structure frees a reference.
1309  */
1310 void
1311 holdrele(vp)
1312 	struct vnode *vp;
1313 {
1314 
1315 	simple_lock(&vp->v_interlock);
1316 	if (vp->v_holdcnt <= 0)
1317 		panic("holdrele: holdcnt vp %p", vp);
1318 	vp->v_holdcnt--;
1319 
1320 	/*
1321 	 * If it is on the holdlist and the hold count drops to
1322 	 * zero, move it to the free list. The test of the back
1323 	 * pointer and the use reference count of zero is because
1324 	 * it will be removed from a free list by getnewvnode,
1325 	 * but will not have its reference count incremented until
1326 	 * after calling vgone. If the reference count were
1327 	 * incremented first, vgone would (incorrectly) try to
1328 	 * close the previous instance of the underlying object.
1329 	 * So, the back pointer is explicitly set to `0xdeadb' in
1330 	 * getnewvnode after removing it from a freelist to ensure
1331 	 * that we do not try to move it here.
1332 	 */
1333 
1334 	if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) &&
1335 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
1336 		simple_lock(&vnode_free_list_slock);
1337 		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
1338 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1339 		simple_unlock(&vnode_free_list_slock);
1340 	}
1341 	simple_unlock(&vp->v_interlock);
1342 }
1343 
1344 /*
1345  * Vnode reference.
1346  */
1347 void
1348 vref(vp)
1349 	struct vnode *vp;
1350 {
1351 
1352 	simple_lock(&vp->v_interlock);
1353 	if (vp->v_usecount <= 0)
1354 		panic("vref used where vget required, vp %p", vp);
1355 	vp->v_usecount++;
1356 #ifdef DIAGNOSTIC
1357 	if (vp->v_usecount == 0) {
1358 		vprint("vref", vp);
1359 		panic("vref: usecount overflow, vp %p", vp);
1360 	}
1361 #endif
1362 	simple_unlock(&vp->v_interlock);
1363 }
1364 #endif /* DIAGNOSTIC */
1365 
1366 /*
1367  * Remove any vnodes in the vnode table belonging to mount point mp.
1368  *
1369  * If MNT_NOFORCE is specified, there should not be any active ones,
1370  * return error if any are found (nb: this is a user error, not a
1371  * system error). If MNT_FORCE is specified, detach any active vnodes
1372  * that are found.
1373  */
1374 #ifdef DEBUG
1375 int busyprt = 0;	/* print out busy vnodes */
1376 struct ctldebug debug1 = { "busyprt", &busyprt };
1377 #endif
1378 
1379 int
1380 vflush(mp, skipvp, flags)
1381 	struct mount *mp;
1382 	struct vnode *skipvp;
1383 	int flags;
1384 {
1385 	struct proc *p = curproc;	/* XXX */
1386 	struct vnode *vp, *nvp;
1387 	int busy = 0;
1388 
1389 	simple_lock(&mntvnode_slock);
1390 loop:
1391 	for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
1392 		if (vp->v_mount != mp)
1393 			goto loop;
1394 		nvp = vp->v_mntvnodes.le_next;
1395 		/*
1396 		 * Skip over a selected vnode.
1397 		 */
1398 		if (vp == skipvp)
1399 			continue;
1400 		simple_lock(&vp->v_interlock);
1401 		/*
1402 		 * Skip over a vnodes marked VSYSTEM.
1403 		 */
1404 		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1405 			simple_unlock(&vp->v_interlock);
1406 			continue;
1407 		}
1408 		/*
1409 		 * If WRITECLOSE is set, only flush out regular file
1410 		 * vnodes open for writing.
1411 		 */
1412 		if ((flags & WRITECLOSE) &&
1413 		    (vp->v_writecount == 0 || vp->v_type != VREG)) {
1414 			simple_unlock(&vp->v_interlock);
1415 			continue;
1416 		}
1417 		/*
1418 		 * With v_usecount == 0, all we need to do is clear
1419 		 * out the vnode data structures and we are done.
1420 		 */
1421 		if (vp->v_usecount == 0) {
1422 			simple_unlock(&mntvnode_slock);
1423 			vgonel(vp, p);
1424 			simple_lock(&mntvnode_slock);
1425 			continue;
1426 		}
1427 		/*
1428 		 * If FORCECLOSE is set, forcibly close the vnode.
1429 		 * For block or character devices, revert to an
1430 		 * anonymous device. For all other files, just kill them.
1431 		 */
1432 		if (flags & FORCECLOSE) {
1433 			simple_unlock(&mntvnode_slock);
1434 			if (vp->v_type != VBLK && vp->v_type != VCHR) {
1435 				vgonel(vp, p);
1436 			} else {
1437 				vclean(vp, 0, p);
1438 				vp->v_op = spec_vnodeop_p;
1439 				insmntque(vp, (struct mount *)0);
1440 			}
1441 			simple_lock(&mntvnode_slock);
1442 			continue;
1443 		}
1444 #ifdef DEBUG
1445 		if (busyprt)
1446 			vprint("vflush: busy vnode", vp);
1447 #endif
1448 		simple_unlock(&vp->v_interlock);
1449 		busy++;
1450 	}
1451 	simple_unlock(&mntvnode_slock);
1452 	if (busy)
1453 		return (EBUSY);
1454 	return (0);
1455 }
1456 
1457 /*
1458  * Disassociate the underlying file system from a vnode.
1459  */
1460 void
1461 vclean(vp, flags, p)
1462 	struct vnode *vp;
1463 	int flags;
1464 	struct proc *p;
1465 {
1466 	int active;
1467 
1468 	/*
1469 	 * Check to see if the vnode is in use.
1470 	 * If so we have to reference it before we clean it out
1471 	 * so that its count cannot fall to zero and generate a
1472 	 * race against ourselves to recycle it.
1473 	 */
1474 	if ((active = vp->v_usecount) != 0) {
1475 		/* We have the vnode interlock. */
1476 		vp->v_usecount++;
1477 #ifdef DIAGNOSTIC
1478 		if (vp->v_usecount == 0) {
1479 			vprint("vclean", vp);
1480 			panic("vclean: usecount overflow");
1481 		}
1482 #endif
1483 	}
1484 
1485 	/*
1486 	 * Prevent the vnode from being recycled or
1487 	 * brought into use while we clean it out.
1488 	 */
1489 	if (vp->v_flag & VXLOCK)
1490 		panic("vclean: deadlock, vp %p", vp);
1491 	vp->v_flag |= VXLOCK;
1492 	if (vp->v_flag & VTEXT) {
1493 		uvmexp.vtextpages -= vp->v_uvm.u_obj.uo_npages;
1494 		uvmexp.vnodepages += vp->v_uvm.u_obj.uo_npages;
1495 	}
1496 	vp->v_flag &= ~VTEXT;
1497 
1498 	/*
1499 	 * Even if the count is zero, the VOP_INACTIVE routine may still
1500 	 * have the object locked while it cleans it out. The VOP_LOCK
1501 	 * ensures that the VOP_INACTIVE routine is done with its work.
1502 	 * For active vnodes, it ensures that no other activity can
1503 	 * occur while the underlying object is being cleaned out.
1504 	 */
1505 	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK);
1506 
1507 	/*
1508 	 * Clean out any cached data associated with the vnode.
1509 	 */
1510 	if (flags & DOCLOSE)
1511 		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
1512 
1513 	/*
1514 	 * If purging an active vnode, it must be closed and
1515 	 * deactivated before being reclaimed. Note that the
1516 	 * VOP_INACTIVE will unlock the vnode.
1517 	 */
1518 	if (active) {
1519 		if (flags & DOCLOSE)
1520 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, NULL);
1521 		VOP_INACTIVE(vp, p);
1522 	} else {
1523 		/*
1524 		 * Any other processes trying to obtain this lock must first
1525 		 * wait for VXLOCK to clear, then call the new lock operation.
1526 		 */
1527 		VOP_UNLOCK(vp, 0);
1528 	}
1529 	/*
1530 	 * Reclaim the vnode.
1531 	 */
1532 	if (VOP_RECLAIM(vp, p))
1533 		panic("vclean: cannot reclaim, vp %p", vp);
1534 	if (active) {
1535 		/*
1536 		 * Inline copy of vrele() since VOP_INACTIVE
1537 		 * has already been called.
1538 		 */
1539 		simple_lock(&vp->v_interlock);
1540 		if (--vp->v_usecount <= 0) {
1541 #ifdef DIAGNOSTIC
1542 			if (vp->v_usecount < 0 || vp->v_writecount != 0) {
1543 				vprint("vclean: bad ref count", vp);
1544 				panic("vclean: ref cnt");
1545 			}
1546 #endif
1547 			/*
1548 			 * Insert at tail of LRU list.
1549 			 */
1550 
1551 			simple_unlock(&vp->v_interlock);
1552 			simple_lock(&vnode_free_list_slock);
1553 #ifdef DIAGNOSTIC
1554 			if (vp->v_vnlock) {
1555 				if ((vp->v_vnlock->lk_flags & LK_DRAINED) == 0)
1556 					vprint("vclean: lock not drained", vp);
1557 			}
1558 			if (vp->v_holdcnt > 0)
1559 				panic("vclean: not clean, vp %p", vp);
1560 #endif
1561 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1562 			simple_unlock(&vnode_free_list_slock);
1563 		} else
1564 			simple_unlock(&vp->v_interlock);
1565 	}
1566 
1567 	cache_purge(vp);
1568 
1569 	/*
1570 	 * Done with purge, notify sleepers of the grim news.
1571 	 */
1572 	vp->v_op = dead_vnodeop_p;
1573 	vp->v_tag = VT_NON;
1574 	simple_lock(&vp->v_interlock);
1575 	vp->v_flag &= ~VXLOCK;
1576 	if (vp->v_flag & VXWANT) {
1577 		vp->v_flag &= ~VXWANT;
1578 		simple_unlock(&vp->v_interlock);
1579 		wakeup((caddr_t)vp);
1580 	} else
1581 		simple_unlock(&vp->v_interlock);
1582 }
1583 
1584 /*
1585  * Recycle an unused vnode to the front of the free list.
1586  * Release the passed interlock if the vnode will be recycled.
1587  */
1588 int
1589 vrecycle(vp, inter_lkp, p)
1590 	struct vnode *vp;
1591 	struct simplelock *inter_lkp;
1592 	struct proc *p;
1593 {
1594 
1595 	simple_lock(&vp->v_interlock);
1596 	if (vp->v_usecount == 0) {
1597 		if (inter_lkp)
1598 			simple_unlock(inter_lkp);
1599 		vgonel(vp, p);
1600 		return (1);
1601 	}
1602 	simple_unlock(&vp->v_interlock);
1603 	return (0);
1604 }
1605 
1606 /*
1607  * Eliminate all activity associated with a vnode
1608  * in preparation for reuse.
1609  */
1610 void
1611 vgone(vp)
1612 	struct vnode *vp;
1613 {
1614 	struct proc *p = curproc;	/* XXX */
1615 
1616 	simple_lock(&vp->v_interlock);
1617 	vgonel(vp, p);
1618 }
1619 
1620 /*
1621  * vgone, with the vp interlock held.
1622  */
1623 void
1624 vgonel(vp, p)
1625 	struct vnode *vp;
1626 	struct proc *p;
1627 {
1628 	struct vnode *vq;
1629 	struct vnode *vx;
1630 
1631 	/*
1632 	 * If a vgone (or vclean) is already in progress,
1633 	 * wait until it is done and return.
1634 	 */
1635 	if (vp->v_flag & VXLOCK) {
1636 		vp->v_flag |= VXWANT;
1637 		ltsleep((caddr_t)vp, PINOD | PNORELOCK,
1638 		    "vgone", 0, &vp->v_interlock);
1639 		return;
1640 	}
1641 	/*
1642 	 * Clean out the filesystem specific data.
1643 	 */
1644 	vclean(vp, DOCLOSE, p);
1645 	/*
1646 	 * Delete from old mount point vnode list, if on one.
1647 	 */
1648 	if (vp->v_mount != NULL)
1649 		insmntque(vp, (struct mount *)0);
1650 	/*
1651 	 * If special device, remove it from special device alias list.
1652 	 * if it is on one.
1653 	 */
1654 	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1655 		simple_lock(&spechash_slock);
1656 		if (vp->v_hashchain != NULL) {
1657 			if (*vp->v_hashchain == vp) {
1658 				*vp->v_hashchain = vp->v_specnext;
1659 			} else {
1660 				for (vq = *vp->v_hashchain; vq;
1661 							vq = vq->v_specnext) {
1662 					if (vq->v_specnext != vp)
1663 						continue;
1664 					vq->v_specnext = vp->v_specnext;
1665 					break;
1666 				}
1667 				if (vq == NULL)
1668 					panic("missing bdev");
1669 			}
1670 			if (vp->v_flag & VALIASED) {
1671 				vx = NULL;
1672 				for (vq = *vp->v_hashchain; vq;
1673 							vq = vq->v_specnext) {
1674 					if (vq->v_rdev != vp->v_rdev ||
1675 					    vq->v_type != vp->v_type)
1676 						continue;
1677 					if (vx)
1678 						break;
1679 					vx = vq;
1680 				}
1681 				if (vx == NULL)
1682 					panic("missing alias");
1683 				if (vq == NULL)
1684 					vx->v_flag &= ~VALIASED;
1685 				vp->v_flag &= ~VALIASED;
1686 			}
1687 		}
1688 		simple_unlock(&spechash_slock);
1689 		FREE(vp->v_specinfo, M_VNODE);
1690 		vp->v_specinfo = NULL;
1691 	}
1692 	/*
1693 	 * If it is on the freelist and not already at the head,
1694 	 * move it to the head of the list. The test of the back
1695 	 * pointer and the reference count of zero is because
1696 	 * it will be removed from the free list by getnewvnode,
1697 	 * but will not have its reference count incremented until
1698 	 * after calling vgone. If the reference count were
1699 	 * incremented first, vgone would (incorrectly) try to
1700 	 * close the previous instance of the underlying object.
1701 	 * So, the back pointer is explicitly set to `0xdeadb' in
1702 	 * getnewvnode after removing it from the freelist to ensure
1703 	 * that we do not try to move it here.
1704 	 */
1705 	if (vp->v_usecount == 0) {
1706 		simple_lock(&vnode_free_list_slock);
1707 		if (vp->v_holdcnt > 0)
1708 			panic("vgonel: not clean, vp %p", vp);
1709 		if (vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb &&
1710 		    TAILQ_FIRST(&vnode_free_list) != vp) {
1711 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1712 			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1713 		}
1714 		simple_unlock(&vnode_free_list_slock);
1715 	}
1716 	vp->v_type = VBAD;
1717 }
1718 
1719 /*
1720  * Lookup a vnode by device number.
1721  */
1722 int
1723 vfinddev(dev, type, vpp)
1724 	dev_t dev;
1725 	enum vtype type;
1726 	struct vnode **vpp;
1727 {
1728 	struct vnode *vp;
1729 	int rc = 0;
1730 
1731 	simple_lock(&spechash_slock);
1732 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1733 		if (dev != vp->v_rdev || type != vp->v_type)
1734 			continue;
1735 		*vpp = vp;
1736 		rc = 1;
1737 		break;
1738 	}
1739 	simple_unlock(&spechash_slock);
1740 	return (rc);
1741 }
1742 
1743 /*
1744  * Revoke all the vnodes corresponding to the specified minor number
1745  * range (endpoints inclusive) of the specified major.
1746  */
1747 void
1748 vdevgone(maj, minl, minh, type)
1749 	int maj, minl, minh;
1750 	enum vtype type;
1751 {
1752 	struct vnode *vp;
1753 	int mn;
1754 
1755 	for (mn = minl; mn <= minh; mn++)
1756 		if (vfinddev(makedev(maj, mn), type, &vp))
1757 			VOP_REVOKE(vp, REVOKEALL);
1758 }
1759 
1760 /*
1761  * Calculate the total number of references to a special device.
1762  */
1763 int
1764 vcount(vp)
1765 	struct vnode *vp;
1766 {
1767 	struct vnode *vq, *vnext;
1768 	int count;
1769 
1770 loop:
1771 	if ((vp->v_flag & VALIASED) == 0)
1772 		return (vp->v_usecount);
1773 	simple_lock(&spechash_slock);
1774 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1775 		vnext = vq->v_specnext;
1776 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1777 			continue;
1778 		/*
1779 		 * Alias, but not in use, so flush it out.
1780 		 */
1781 		if (vq->v_usecount == 0 && vq != vp) {
1782 			simple_unlock(&spechash_slock);
1783 			vgone(vq);
1784 			goto loop;
1785 		}
1786 		count += vq->v_usecount;
1787 	}
1788 	simple_unlock(&spechash_slock);
1789 	return (count);
1790 }
1791 
1792 /*
1793  * Print out a description of a vnode.
1794  */
1795 static const char * const typename[] =
1796    { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1797 
1798 void
1799 vprint(label, vp)
1800 	char *label;
1801 	struct vnode *vp;
1802 {
1803 	char buf[64];
1804 
1805 	if (label != NULL)
1806 		printf("%s: ", label);
1807 	printf("tag %d type %s, usecount %d, writecount %ld, refcount %ld,",
1808 	    vp->v_tag, typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1809 	    vp->v_holdcnt);
1810 	buf[0] = '\0';
1811 	if (vp->v_flag & VROOT)
1812 		strcat(buf, "|VROOT");
1813 	if (vp->v_flag & VTEXT)
1814 		strcat(buf, "|VTEXT");
1815 	if (vp->v_flag & VSYSTEM)
1816 		strcat(buf, "|VSYSTEM");
1817 	if (vp->v_flag & VXLOCK)
1818 		strcat(buf, "|VXLOCK");
1819 	if (vp->v_flag & VXWANT)
1820 		strcat(buf, "|VXWANT");
1821 	if (vp->v_flag & VBWAIT)
1822 		strcat(buf, "|VBWAIT");
1823 	if (vp->v_flag & VALIASED)
1824 		strcat(buf, "|VALIASED");
1825 	if (buf[0] != '\0')
1826 		printf(" flags (%s)", &buf[1]);
1827 	if (vp->v_data == NULL) {
1828 		printf("\n");
1829 	} else {
1830 		printf("\n\t");
1831 		VOP_PRINT(vp);
1832 	}
1833 }
1834 
1835 #ifdef DEBUG
1836 /*
1837  * List all of the locked vnodes in the system.
1838  * Called when debugging the kernel.
1839  */
1840 void
1841 printlockedvnodes()
1842 {
1843 	struct mount *mp, *nmp;
1844 	struct vnode *vp;
1845 
1846 	printf("Locked vnodes\n");
1847 	simple_lock(&mountlist_slock);
1848 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
1849 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
1850 			nmp = mp->mnt_list.cqe_next;
1851 			continue;
1852 		}
1853 		for (vp = mp->mnt_vnodelist.lh_first;
1854 		     vp != NULL;
1855 		     vp = vp->v_mntvnodes.le_next) {
1856 			if (VOP_ISLOCKED(vp))
1857 				vprint((char *)0, vp);
1858 		}
1859 		simple_lock(&mountlist_slock);
1860 		nmp = mp->mnt_list.cqe_next;
1861 		vfs_unbusy(mp);
1862 	}
1863 	simple_unlock(&mountlist_slock);
1864 }
1865 #endif
1866 
1867 extern const char *mountcompatnames[];
1868 extern const int nmountcompatnames;
1869 
1870 /*
1871  * Top level filesystem related information gathering.
1872  */
1873 int
1874 vfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1875 	int *name;
1876 	u_int namelen;
1877 	void *oldp;
1878 	size_t *oldlenp;
1879 	void *newp;
1880 	size_t newlen;
1881 	struct proc *p;
1882 {
1883 #if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44)
1884 	struct vfsconf vfc;
1885 #endif
1886 	struct vfsops *vfsp;
1887 
1888 	/* all sysctl names at this level are at least name and field */
1889 	if (namelen < 2)
1890 		return (ENOTDIR);		/* overloaded */
1891 
1892 	/* Not generic: goes to file system. */
1893 	if (name[0] != VFS_GENERIC) {
1894 		if (name[0] >= nmountcompatnames || name[0] < 0 ||
1895 		    mountcompatnames[name[0]] == NULL)
1896 			return (EOPNOTSUPP);
1897 		vfsp = vfs_getopsbyname(mountcompatnames[name[0]]);
1898 		if (vfsp == NULL || vfsp->vfs_sysctl == NULL)
1899 			return (EOPNOTSUPP);
1900 		return ((*vfsp->vfs_sysctl)(&name[1], namelen - 1,
1901 		    oldp, oldlenp, newp, newlen, p));
1902 	}
1903 
1904 	/* The rest are generic vfs sysctls. */
1905 	switch (name[1]) {
1906 	case VFS_USERMOUNT:
1907 		return sysctl_int(oldp, oldlenp, newp, newlen, &dovfsusermount);
1908 #if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44)
1909 	case VFS_MAXTYPENUM:
1910 		/*
1911 		 * Provided for 4.4BSD-Lite2 compatibility.
1912 		 */
1913 		return (sysctl_rdint(oldp, oldlenp, newp, nmountcompatnames));
1914 	case VFS_CONF:
1915 		/*
1916 		 * Special: a node, next is a file system name.
1917 		 * Provided for 4.4BSD-Lite2 compatibility.
1918 		 */
1919 		if (namelen < 3)
1920 			return (ENOTDIR);	/* overloaded */
1921 		if (name[2] >= nmountcompatnames || name[2] < 0 ||
1922 		    mountcompatnames[name[2]] == NULL)
1923 			return (EOPNOTSUPP);
1924 		vfsp = vfs_getopsbyname(mountcompatnames[name[2]]);
1925 		if (vfsp == NULL)
1926 			return (EOPNOTSUPP);
1927 		vfc.vfc_vfsops = vfsp;
1928 		strncpy(vfc.vfc_name, vfsp->vfs_name, MFSNAMELEN);
1929 		vfc.vfc_typenum = name[2];
1930 		vfc.vfc_refcount = vfsp->vfs_refcount;
1931 		vfc.vfc_flags = 0;
1932 		vfc.vfc_mountroot = vfsp->vfs_mountroot;
1933 		vfc.vfc_next = NULL;
1934 		return (sysctl_rdstruct(oldp, oldlenp, newp, &vfc,
1935 		    sizeof(struct vfsconf)));
1936 #endif
1937 	default:
1938 		break;
1939 	}
1940 	return (EOPNOTSUPP);
1941 }
1942 
1943 int kinfo_vdebug = 1;
1944 int kinfo_vgetfailed;
1945 #define KINFO_VNODESLOP	10
1946 /*
1947  * Dump vnode list (via sysctl).
1948  * Copyout address of vnode followed by vnode.
1949  */
1950 /* ARGSUSED */
1951 int
1952 sysctl_vnode(where, sizep, p)
1953 	char *where;
1954 	size_t *sizep;
1955 	struct proc *p;
1956 {
1957 	struct mount *mp, *nmp;
1958 	struct vnode *nvp, *vp;
1959 	char *bp = where, *savebp;
1960 	char *ewhere;
1961 	int error;
1962 
1963 #define VPTRSZ	sizeof(struct vnode *)
1964 #define VNODESZ	sizeof(struct vnode)
1965 	if (where == NULL) {
1966 		*sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
1967 		return (0);
1968 	}
1969 	ewhere = where + *sizep;
1970 
1971 	simple_lock(&mountlist_slock);
1972 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
1973 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
1974 			nmp = mp->mnt_list.cqe_next;
1975 			continue;
1976 		}
1977 		savebp = bp;
1978 again:
1979 		simple_lock(&mntvnode_slock);
1980 		for (vp = mp->mnt_vnodelist.lh_first;
1981 		     vp != NULL;
1982 		     vp = nvp) {
1983 			/*
1984 			 * Check that the vp is still associated with
1985 			 * this filesystem.  RACE: could have been
1986 			 * recycled onto the same filesystem.
1987 			 */
1988 			if (vp->v_mount != mp) {
1989 				simple_unlock(&mntvnode_slock);
1990 				if (kinfo_vdebug)
1991 					printf("kinfo: vp changed\n");
1992 				bp = savebp;
1993 				goto again;
1994 			}
1995 			nvp = vp->v_mntvnodes.le_next;
1996 			if (bp + VPTRSZ + VNODESZ > ewhere) {
1997 				simple_unlock(&mntvnode_slock);
1998 				*sizep = bp - where;
1999 				return (ENOMEM);
2000 			}
2001 			simple_unlock(&mntvnode_slock);
2002 			if ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
2003 			   (error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ)))
2004 				return (error);
2005 			bp += VPTRSZ + VNODESZ;
2006 			simple_lock(&mntvnode_slock);
2007 		}
2008 		simple_unlock(&mntvnode_slock);
2009 		simple_lock(&mountlist_slock);
2010 		nmp = mp->mnt_list.cqe_next;
2011 		vfs_unbusy(mp);
2012 	}
2013 	simple_unlock(&mountlist_slock);
2014 
2015 	*sizep = bp - where;
2016 	return (0);
2017 }
2018 
2019 /*
2020  * Check to see if a filesystem is mounted on a block device.
2021  */
2022 int
2023 vfs_mountedon(vp)
2024 	struct vnode *vp;
2025 {
2026 	struct vnode *vq;
2027 	int error = 0;
2028 
2029 	if (vp->v_specmountpoint != NULL)
2030 		return (EBUSY);
2031 	if (vp->v_flag & VALIASED) {
2032 		simple_lock(&spechash_slock);
2033 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2034 			if (vq->v_rdev != vp->v_rdev ||
2035 			    vq->v_type != vp->v_type)
2036 				continue;
2037 			if (vq->v_specmountpoint != NULL) {
2038 				error = EBUSY;
2039 				break;
2040 			}
2041 		}
2042 		simple_unlock(&spechash_slock);
2043 	}
2044 	return (error);
2045 }
2046 
2047 /*
2048  * Build hash lists of net addresses and hang them off the mount point.
2049  * Called by ufs_mount() to set up the lists of export addresses.
2050  */
2051 static int
2052 vfs_hang_addrlist(mp, nep, argp)
2053 	struct mount *mp;
2054 	struct netexport *nep;
2055 	struct export_args *argp;
2056 {
2057 	struct netcred *np, *enp;
2058 	struct radix_node_head *rnh;
2059 	int i;
2060 	struct radix_node *rn;
2061 	struct sockaddr *saddr, *smask = 0;
2062 	struct domain *dom;
2063 	int error;
2064 
2065 	if (argp->ex_addrlen == 0) {
2066 		if (mp->mnt_flag & MNT_DEFEXPORTED)
2067 			return (EPERM);
2068 		np = &nep->ne_defexported;
2069 		np->netc_exflags = argp->ex_flags;
2070 		np->netc_anon = argp->ex_anon;
2071 		np->netc_anon.cr_ref = 1;
2072 		mp->mnt_flag |= MNT_DEFEXPORTED;
2073 		return (0);
2074 	}
2075 	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
2076 	np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK);
2077 	memset((caddr_t)np, 0, i);
2078 	saddr = (struct sockaddr *)(np + 1);
2079 	error = copyin(argp->ex_addr, (caddr_t)saddr, argp->ex_addrlen);
2080 	if (error)
2081 		goto out;
2082 	if (saddr->sa_len > argp->ex_addrlen)
2083 		saddr->sa_len = argp->ex_addrlen;
2084 	if (argp->ex_masklen) {
2085 		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
2086 		error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen);
2087 		if (error)
2088 			goto out;
2089 		if (smask->sa_len > argp->ex_masklen)
2090 			smask->sa_len = argp->ex_masklen;
2091 	}
2092 	i = saddr->sa_family;
2093 	if ((rnh = nep->ne_rtable[i]) == 0) {
2094 		/*
2095 		 * Seems silly to initialize every AF when most are not
2096 		 * used, do so on demand here
2097 		 */
2098 		for (dom = domains; dom; dom = dom->dom_next)
2099 			if (dom->dom_family == i && dom->dom_rtattach) {
2100 				dom->dom_rtattach((void **)&nep->ne_rtable[i],
2101 					dom->dom_rtoffset);
2102 				break;
2103 			}
2104 		if ((rnh = nep->ne_rtable[i]) == 0) {
2105 			error = ENOBUFS;
2106 			goto out;
2107 		}
2108 	}
2109 	rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh,
2110 		np->netc_rnodes);
2111 	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
2112 		if (rn == 0) {
2113 			enp = (struct netcred *)(*rnh->rnh_lookup)(saddr,
2114 				smask, rnh);
2115 			if (enp == 0) {
2116 				error = EPERM;
2117 				goto out;
2118 			}
2119 		} else
2120 			enp = (struct netcred *)rn;
2121 
2122 		if (enp->netc_exflags != argp->ex_flags ||
2123 		    enp->netc_anon.cr_uid != argp->ex_anon.cr_uid ||
2124 		    enp->netc_anon.cr_gid != argp->ex_anon.cr_gid ||
2125 		    enp->netc_anon.cr_ngroups != argp->ex_anon.cr_ngroups ||
2126 		    memcmp(&enp->netc_anon.cr_groups, &argp->ex_anon.cr_groups,
2127 			enp->netc_anon.cr_ngroups))
2128 				error = EPERM;
2129 		else
2130 			error = 0;
2131 		goto out;
2132 	}
2133 	np->netc_exflags = argp->ex_flags;
2134 	np->netc_anon = argp->ex_anon;
2135 	np->netc_anon.cr_ref = 1;
2136 	return (0);
2137 out:
2138 	free(np, M_NETADDR);
2139 	return (error);
2140 }
2141 
2142 /* ARGSUSED */
2143 static int
2144 vfs_free_netcred(rn, w)
2145 	struct radix_node *rn;
2146 	void *w;
2147 {
2148 	struct radix_node_head *rnh = (struct radix_node_head *)w;
2149 
2150 	(*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh);
2151 	free((caddr_t)rn, M_NETADDR);
2152 	return (0);
2153 }
2154 
2155 /*
2156  * Free the net address hash lists that are hanging off the mount points.
2157  */
2158 static void
2159 vfs_free_addrlist(nep)
2160 	struct netexport *nep;
2161 {
2162 	int i;
2163 	struct radix_node_head *rnh;
2164 
2165 	for (i = 0; i <= AF_MAX; i++)
2166 		if ((rnh = nep->ne_rtable[i]) != NULL) {
2167 			(*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh);
2168 			free((caddr_t)rnh, M_RTABLE);
2169 			nep->ne_rtable[i] = 0;
2170 		}
2171 }
2172 
2173 int
2174 vfs_export(mp, nep, argp)
2175 	struct mount *mp;
2176 	struct netexport *nep;
2177 	struct export_args *argp;
2178 {
2179 	int error;
2180 
2181 	if (argp->ex_flags & MNT_DELEXPORT) {
2182 		if (mp->mnt_flag & MNT_EXPUBLIC) {
2183 			vfs_setpublicfs(NULL, NULL, NULL);
2184 			mp->mnt_flag &= ~MNT_EXPUBLIC;
2185 		}
2186 		vfs_free_addrlist(nep);
2187 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
2188 	}
2189 	if (argp->ex_flags & MNT_EXPORTED) {
2190 		if (argp->ex_flags & MNT_EXPUBLIC) {
2191 			if ((error = vfs_setpublicfs(mp, nep, argp)) != 0)
2192 				return (error);
2193 			mp->mnt_flag |= MNT_EXPUBLIC;
2194 		}
2195 		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
2196 			return (error);
2197 		mp->mnt_flag |= MNT_EXPORTED;
2198 	}
2199 	return (0);
2200 }
2201 
2202 /*
2203  * Set the publicly exported filesystem (WebNFS). Currently, only
2204  * one public filesystem is possible in the spec (RFC 2054 and 2055)
2205  */
2206 int
2207 vfs_setpublicfs(mp, nep, argp)
2208 	struct mount *mp;
2209 	struct netexport *nep;
2210 	struct export_args *argp;
2211 {
2212 	int error;
2213 	struct vnode *rvp;
2214 	char *cp;
2215 
2216 	/*
2217 	 * mp == NULL -> invalidate the current info, the FS is
2218 	 * no longer exported. May be called from either vfs_export
2219 	 * or unmount, so check if it hasn't already been done.
2220 	 */
2221 	if (mp == NULL) {
2222 		if (nfs_pub.np_valid) {
2223 			nfs_pub.np_valid = 0;
2224 			if (nfs_pub.np_index != NULL) {
2225 				FREE(nfs_pub.np_index, M_TEMP);
2226 				nfs_pub.np_index = NULL;
2227 			}
2228 		}
2229 		return (0);
2230 	}
2231 
2232 	/*
2233 	 * Only one allowed at a time.
2234 	 */
2235 	if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount)
2236 		return (EBUSY);
2237 
2238 	/*
2239 	 * Get real filehandle for root of exported FS.
2240 	 */
2241 	memset((caddr_t)&nfs_pub.np_handle, 0, sizeof(nfs_pub.np_handle));
2242 	nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid;
2243 
2244 	if ((error = VFS_ROOT(mp, &rvp)))
2245 		return (error);
2246 
2247 	if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid)))
2248 		return (error);
2249 
2250 	vput(rvp);
2251 
2252 	/*
2253 	 * If an indexfile was specified, pull it in.
2254 	 */
2255 	if (argp->ex_indexfile != NULL) {
2256 		MALLOC(nfs_pub.np_index, char *, MAXNAMLEN + 1, M_TEMP,
2257 		    M_WAITOK);
2258 		error = copyinstr(argp->ex_indexfile, nfs_pub.np_index,
2259 		    MAXNAMLEN, (size_t *)0);
2260 		if (!error) {
2261 			/*
2262 			 * Check for illegal filenames.
2263 			 */
2264 			for (cp = nfs_pub.np_index; *cp; cp++) {
2265 				if (*cp == '/') {
2266 					error = EINVAL;
2267 					break;
2268 				}
2269 			}
2270 		}
2271 		if (error) {
2272 			FREE(nfs_pub.np_index, M_TEMP);
2273 			return (error);
2274 		}
2275 	}
2276 
2277 	nfs_pub.np_mount = mp;
2278 	nfs_pub.np_valid = 1;
2279 	return (0);
2280 }
2281 
2282 struct netcred *
2283 vfs_export_lookup(mp, nep, nam)
2284 	struct mount *mp;
2285 	struct netexport *nep;
2286 	struct mbuf *nam;
2287 {
2288 	struct netcred *np;
2289 	struct radix_node_head *rnh;
2290 	struct sockaddr *saddr;
2291 
2292 	np = NULL;
2293 	if (mp->mnt_flag & MNT_EXPORTED) {
2294 		/*
2295 		 * Lookup in the export list first.
2296 		 */
2297 		if (nam != NULL) {
2298 			saddr = mtod(nam, struct sockaddr *);
2299 			rnh = nep->ne_rtable[saddr->sa_family];
2300 			if (rnh != NULL) {
2301 				np = (struct netcred *)
2302 					(*rnh->rnh_matchaddr)((caddr_t)saddr,
2303 							      rnh);
2304 				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
2305 					np = NULL;
2306 			}
2307 		}
2308 		/*
2309 		 * If no address match, use the default if it exists.
2310 		 */
2311 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
2312 			np = &nep->ne_defexported;
2313 	}
2314 	return (np);
2315 }
2316 
2317 /*
2318  * Do the usual access checking.
2319  * file_mode, uid and gid are from the vnode in question,
2320  * while acc_mode and cred are from the VOP_ACCESS parameter list
2321  */
2322 int
2323 vaccess(type, file_mode, uid, gid, acc_mode, cred)
2324 	enum vtype type;
2325 	mode_t file_mode;
2326 	uid_t uid;
2327 	gid_t gid;
2328 	mode_t acc_mode;
2329 	struct ucred *cred;
2330 {
2331 	mode_t mask;
2332 
2333 	/*
2334 	 * Super-user always gets read/write access, but execute access depends
2335 	 * on at least one execute bit being set.
2336 	 */
2337 	if (cred->cr_uid == 0) {
2338 		if ((acc_mode & VEXEC) && type != VDIR &&
2339 		    (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
2340 			return (EACCES);
2341 		return (0);
2342 	}
2343 
2344 	mask = 0;
2345 
2346 	/* Otherwise, check the owner. */
2347 	if (cred->cr_uid == uid) {
2348 		if (acc_mode & VEXEC)
2349 			mask |= S_IXUSR;
2350 		if (acc_mode & VREAD)
2351 			mask |= S_IRUSR;
2352 		if (acc_mode & VWRITE)
2353 			mask |= S_IWUSR;
2354 		return ((file_mode & mask) == mask ? 0 : EACCES);
2355 	}
2356 
2357 	/* Otherwise, check the groups. */
2358 	if (cred->cr_gid == gid || groupmember(gid, cred)) {
2359 		if (acc_mode & VEXEC)
2360 			mask |= S_IXGRP;
2361 		if (acc_mode & VREAD)
2362 			mask |= S_IRGRP;
2363 		if (acc_mode & VWRITE)
2364 			mask |= S_IWGRP;
2365 		return ((file_mode & mask) == mask ? 0 : EACCES);
2366 	}
2367 
2368 	/* Otherwise, check everyone else. */
2369 	if (acc_mode & VEXEC)
2370 		mask |= S_IXOTH;
2371 	if (acc_mode & VREAD)
2372 		mask |= S_IROTH;
2373 	if (acc_mode & VWRITE)
2374 		mask |= S_IWOTH;
2375 	return ((file_mode & mask) == mask ? 0 : EACCES);
2376 }
2377 
2378 /*
2379  * Unmount all file systems.
2380  * We traverse the list in reverse order under the assumption that doing so
2381  * will avoid needing to worry about dependencies.
2382  */
2383 void
2384 vfs_unmountall(p)
2385 	struct proc *p;
2386 {
2387 	struct mount *mp, *nmp;
2388 	int allerror, error;
2389 
2390 	for (allerror = 0,
2391 	     mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
2392 		nmp = mp->mnt_list.cqe_prev;
2393 #ifdef DEBUG
2394 		printf("unmounting %s (%s)...\n",
2395 		    mp->mnt_stat.f_mntonname, mp->mnt_stat.f_mntfromname);
2396 #endif
2397 		/*
2398 		 * XXX Freeze syncer.  Must do this before locking the
2399 		 * mount point.  See dounmount() for details.
2400 		 */
2401 		lockmgr(&syncer_lock, LK_EXCLUSIVE, NULL);
2402 		if (vfs_busy(mp, 0, 0)) {
2403 			lockmgr(&syncer_lock, LK_RELEASE, NULL);
2404 			continue;
2405 		}
2406 		if ((error = dounmount(mp, MNT_FORCE, p)) != 0) {
2407 			printf("unmount of %s failed with error %d\n",
2408 			    mp->mnt_stat.f_mntonname, error);
2409 			allerror = 1;
2410 		}
2411 	}
2412 	if (allerror)
2413 		printf("WARNING: some file systems would not unmount\n");
2414 }
2415 
2416 /*
2417  * Sync and unmount file systems before shutting down.
2418  */
2419 void
2420 vfs_shutdown()
2421 {
2422 	struct buf *bp;
2423 	int iter, nbusy, nbusy_prev = 0, dcount, s;
2424 	struct proc *p = curproc;
2425 
2426 	/* XXX we're certainly not running in proc0's context! */
2427 	if (p == NULL)
2428 		p = &proc0;
2429 
2430 	printf("syncing disks... ");
2431 
2432 	/* remove user process from run queue */
2433 	suspendsched();
2434 	(void) spl0();
2435 
2436 	/* avoid coming back this way again if we panic. */
2437 	doing_shutdown = 1;
2438 
2439 	sys_sync(p, NULL, NULL);
2440 
2441 	/* Wait for sync to finish. */
2442 	dcount = 10000;
2443 	for (iter = 0; iter < 20;) {
2444 		nbusy = 0;
2445 		for (bp = &buf[nbuf]; --bp >= buf; ) {
2446 			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
2447 				nbusy++;
2448 			/*
2449 			 * With soft updates, some buffers that are
2450 			 * written will be remarked as dirty until other
2451 			 * buffers are written.
2452 			 */
2453 			if (bp->b_vp && bp->b_vp->v_mount
2454 			    && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP)
2455 			    && (bp->b_flags & B_DELWRI)) {
2456 				s = splbio();
2457 				bremfree(bp);
2458 				bp->b_flags |= B_BUSY;
2459 				splx(s);
2460 				nbusy++;
2461 				bawrite(bp);
2462 				if (dcount-- <= 0) {
2463 					printf("softdep ");
2464 					goto fail;
2465 				}
2466 			}
2467 		}
2468 		if (nbusy == 0)
2469 			break;
2470 		if (nbusy_prev == 0)
2471 			nbusy_prev = nbusy;
2472 		printf("%d ", nbusy);
2473 		tsleep(&nbusy, PRIBIO, "bflush",
2474 		    (iter == 0) ? 1 : hz / 25 * iter);
2475 		if (nbusy >= nbusy_prev) /* we didn't flush anything */
2476 			iter++;
2477 		else
2478 			nbusy_prev = nbusy;
2479 	}
2480 	if (nbusy) {
2481 fail:
2482 #if defined(DEBUG) || defined(DEBUG_HALT_BUSY)
2483 		printf("giving up\nPrinting vnodes for busy buffers\n");
2484 		for (bp = &buf[nbuf]; --bp >= buf; )
2485 			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
2486 				vprint(NULL, bp->b_vp);
2487 
2488 #if defined(DDB) && defined(DEBUG_HALT_BUSY)
2489 		Debugger();
2490 #endif
2491 
2492 #else  /* defined(DEBUG) || defined(DEBUG_HALT_BUSY) */
2493 		printf("giving up\n");
2494 #endif /* defined(DEBUG) || defined(DEBUG_HALT_BUSY) */
2495 		return;
2496 	} else
2497 		printf("done\n");
2498 
2499 	/*
2500 	 * If we've panic'd, don't make the situation potentially
2501 	 * worse by unmounting the file systems.
2502 	 */
2503 	if (panicstr != NULL)
2504 		return;
2505 
2506 	/* Release inodes held by texts before update. */
2507 #ifdef notdef
2508 	vnshutdown();
2509 #endif
2510 	/* Unmount file systems. */
2511 	vfs_unmountall(p);
2512 }
2513 
2514 /*
2515  * Mount the root file system.  If the operator didn't specify a
2516  * file system to use, try all possible file systems until one
2517  * succeeds.
2518  */
2519 int
2520 vfs_mountroot()
2521 {
2522 	extern int (*mountroot) __P((void));
2523 	struct vfsops *v;
2524 
2525 	if (root_device == NULL)
2526 		panic("vfs_mountroot: root device unknown");
2527 
2528 	switch (root_device->dv_class) {
2529 	case DV_IFNET:
2530 		if (rootdev != NODEV)
2531 			panic("vfs_mountroot: rootdev set for DV_IFNET");
2532 		break;
2533 
2534 	case DV_DISK:
2535 		if (rootdev == NODEV)
2536 			panic("vfs_mountroot: rootdev not set for DV_DISK");
2537 		break;
2538 
2539 	default:
2540 		printf("%s: inappropriate for root file system\n",
2541 		    root_device->dv_xname);
2542 		return (ENODEV);
2543 	}
2544 
2545 	/*
2546 	 * If user specified a file system, use it.
2547 	 */
2548 	if (mountroot != NULL)
2549 		return ((*mountroot)());
2550 
2551 	/*
2552 	 * Try each file system currently configured into the kernel.
2553 	 */
2554 	for (v = LIST_FIRST(&vfs_list); v != NULL; v = LIST_NEXT(v, vfs_list)) {
2555 		if (v->vfs_mountroot == NULL)
2556 			continue;
2557 #ifdef DEBUG
2558 		printf("mountroot: trying %s...\n", v->vfs_name);
2559 #endif
2560 		if ((*v->vfs_mountroot)() == 0) {
2561 			printf("root file system type: %s\n", v->vfs_name);
2562 			break;
2563 		}
2564 	}
2565 
2566 	if (v == NULL) {
2567 		printf("no file system for %s", root_device->dv_xname);
2568 		if (root_device->dv_class == DV_DISK)
2569 			printf(" (dev 0x%x)", rootdev);
2570 		printf("\n");
2571 		return (EFTYPE);
2572 	}
2573 	return (0);
2574 }
2575 
2576 /*
2577  * Given a file system name, look up the vfsops for that
2578  * file system, or return NULL if file system isn't present
2579  * in the kernel.
2580  */
2581 struct vfsops *
2582 vfs_getopsbyname(name)
2583 	const char *name;
2584 {
2585 	struct vfsops *v;
2586 
2587 	for (v = LIST_FIRST(&vfs_list); v != NULL; v = LIST_NEXT(v, vfs_list)) {
2588 		if (strcmp(v->vfs_name, name) == 0)
2589 			break;
2590 	}
2591 
2592 	return (v);
2593 }
2594 
2595 /*
2596  * Establish a file system and initialize it.
2597  */
2598 int
2599 vfs_attach(vfs)
2600 	struct vfsops *vfs;
2601 {
2602 	struct vfsops *v;
2603 	int error = 0;
2604 
2605 
2606 	/*
2607 	 * Make sure this file system doesn't already exist.
2608 	 */
2609 	for (v = LIST_FIRST(&vfs_list); v != NULL; v = LIST_NEXT(v, vfs_list)) {
2610 		if (strcmp(vfs->vfs_name, v->vfs_name) == 0) {
2611 			error = EEXIST;
2612 			goto out;
2613 		}
2614 	}
2615 
2616 	/*
2617 	 * Initialize the vnode operations for this file system.
2618 	 */
2619 	vfs_opv_init(vfs->vfs_opv_descs);
2620 
2621 	/*
2622 	 * Now initialize the file system itself.
2623 	 */
2624 	(*vfs->vfs_init)();
2625 
2626 	/*
2627 	 * ...and link it into the kernel's list.
2628 	 */
2629 	LIST_INSERT_HEAD(&vfs_list, vfs, vfs_list);
2630 
2631 	/*
2632 	 * Sanity: make sure the reference count is 0.
2633 	 */
2634 	vfs->vfs_refcount = 0;
2635 
2636  out:
2637 	return (error);
2638 }
2639 
2640 /*
2641  * Remove a file system from the kernel.
2642  */
2643 int
2644 vfs_detach(vfs)
2645 	struct vfsops *vfs;
2646 {
2647 	struct vfsops *v;
2648 
2649 	/*
2650 	 * Make sure no one is using the filesystem.
2651 	 */
2652 	if (vfs->vfs_refcount != 0)
2653 		return (EBUSY);
2654 
2655 	/*
2656 	 * ...and remove it from the kernel's list.
2657 	 */
2658 	for (v = LIST_FIRST(&vfs_list); v != NULL; v = LIST_NEXT(v, vfs_list)) {
2659 		if (v == vfs) {
2660 			LIST_REMOVE(v, vfs_list);
2661 			break;
2662 		}
2663 	}
2664 
2665 	if (v == NULL)
2666 		return (ESRCH);
2667 
2668 	/*
2669 	 * Now run the file system-specific cleanups.
2670 	 */
2671 	(*vfs->vfs_done)();
2672 
2673 	/*
2674 	 * Free the vnode operations vector.
2675 	 */
2676 	vfs_opv_free(vfs->vfs_opv_descs);
2677 	return (0);
2678 }
2679 
2680 #ifdef DDB
2681 const char buf_flagbits[] =
2682 	"\20\1AGE\2NEEDCOMMIT\3ASYNC\4BAD\5BUSY\6SCANNED\7CALL\10DELWRI"
2683 	"\11DIRTY\12DONE\13EINTR\14ERROR\15GATHERED\16INVAL\17LOCKED\20NOCACHE"
2684 	"\21ORDERED\22CACHE\23PHYS\24RAW\25READ\26TAPE\30WANTED"
2685 	"\32XXX\33VFLUSH";
2686 
2687 void
2688 vfs_buf_print(bp, full, pr)
2689 	struct buf *bp;
2690 	int full;
2691 	void (*pr) __P((const char *, ...));
2692 {
2693 	char buf[1024];
2694 
2695 	(*pr)("  vp %p lblkno 0x%x blkno 0x%x dev 0x%x\n",
2696 		  bp->b_vp, bp->b_lblkno, bp->b_blkno, bp->b_dev);
2697 
2698 	bitmask_snprintf(bp->b_flags, buf_flagbits, buf, sizeof(buf));
2699 	(*pr)("  error %d flags 0x%s\n", bp->b_error, buf);
2700 
2701 	(*pr)("  bufsize 0x%x bcount 0x%x resid 0x%x\n",
2702 		  bp->b_bufsize, bp->b_bcount, bp->b_resid);
2703 	(*pr)("  data %p saveaddr %p dep %p\n",
2704 		  bp->b_data, bp->b_saveaddr, LIST_FIRST(&bp->b_dep));
2705 	(*pr)("  iodone %p\n", bp->b_iodone);
2706 }
2707 
2708 
2709 const char vnode_flagbits[] =
2710 	"\20\1ROOT\2TEXT\3SYSTEM\4ISTTY\11XLOCK\12XWANT\13BWAIT\14ALIASED"
2711 	"\15DIROP\16LAYER\17ONWORKLIST\20DIRTY";
2712 
2713 const char *vnode_types[] = {
2714 	"VNON",
2715 	"VREG",
2716 	"VDIR",
2717 	"VBLK",
2718 	"VCHR",
2719 	"VLNK",
2720 	"VSOCK",
2721 	"VFIFO",
2722 	"VBAD",
2723 };
2724 
2725 const char *vnode_tags[] = {
2726 	"VT_NON",
2727 	"VT_UFS",
2728 	"VT_NFS",
2729 	"VT_MFS",
2730 	"VT_MSDOSFS",
2731 	"VT_LFS",
2732 	"VT_LOFS",
2733 	"VT_FDESC",
2734 	"VT_PORTAL",
2735 	"VT_NULL",
2736 	"VT_UMAP",
2737 	"VT_KERNFS",
2738 	"VT_PROCFS",
2739 	"VT_AFS",
2740 	"VT_ISOFS",
2741 	"VT_UNION",
2742 	"VT_ADOSFS",
2743 	"VT_EXT2FS",
2744 	"VT_CODA",
2745 	"VT_FILECORE",
2746 	"VT_NTFS",
2747 	"VT_VFS",
2748 	"VT_OVERLAY"
2749 };
2750 
2751 void
2752 vfs_vnode_print(vp, full, pr)
2753 	struct vnode *vp;
2754 	int full;
2755 	void (*pr) __P((const char *, ...));
2756 {
2757 	char buf[256];
2758 
2759 	const char *vtype, *vtag;
2760 
2761 	uvm_object_printit(&vp->v_uvm.u_obj, full, pr);
2762 	bitmask_snprintf(vp->v_flag, vnode_flagbits, buf, sizeof(buf));
2763 	(*pr)("\nVNODE flags %s\n", buf);
2764 	(*pr)("mp %p nio %d size 0x%x rwlock 0x%x glock 0x%x\n",
2765 	      vp->v_mount, vp->v_uvm.u_nio, (int)vp->v_uvm.u_size,
2766 	      vp->v_vnlock ? lockstatus(vp->v_vnlock) : 0x999,
2767 	      lockstatus(&vp->v_glock));
2768 
2769 	(*pr)("data %p usecount %d writecount %d holdcnt %d numoutput %d\n",
2770 	      vp->v_data, vp->v_usecount, vp->v_writecount,
2771 	      vp->v_holdcnt, vp->v_numoutput);
2772 
2773 	vtype = (vp->v_type >= 0 &&
2774 		 vp->v_type < sizeof(vnode_types) / sizeof(vnode_types[0])) ?
2775 		vnode_types[vp->v_type] : "UNKNOWN";
2776 	vtag = (vp->v_tag >= 0 &&
2777 		vp->v_tag < sizeof(vnode_tags) / sizeof(vnode_tags[0])) ?
2778 		vnode_tags[vp->v_tag] : "UNKNOWN";
2779 
2780 	(*pr)("type %s(%d) tag %s(%d) id 0x%x mount %p typedata %p\n",
2781 	      vtype, vp->v_type, vtag, vp->v_tag,
2782 	      vp->v_id, vp->v_mount, vp->v_mountedhere);
2783 	(*pr)("lastr 0x%x lastw 0x%x lasta 0x%x\n",
2784 	      vp->v_lastr, vp->v_lastw, vp->v_lasta);
2785 	(*pr)("cstart 0x%x clen 0x%x ralen 0x%x maxra 0x%x\n",
2786 	      vp->v_cstart, vp->v_clen, vp->v_ralen, vp->v_maxra);
2787 
2788 	if (full) {
2789 		struct buf *bp;
2790 
2791 		(*pr)("clean bufs:\n");
2792 		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
2793 			(*pr)(" bp %p\n", bp);
2794 			vfs_buf_print(bp, full, pr);
2795 		}
2796 
2797 		(*pr)("dirty bufs:\n");
2798 		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
2799 			(*pr)(" bp %p\n", bp);
2800 			vfs_buf_print(bp, full, pr);
2801 		}
2802 	}
2803 }
2804 #endif
2805