xref: /netbsd-src/sys/kern/vfs_subr.c (revision 89c5a767f8fc7a4633b2d409966e2becbb98ff92)
1 /*	$NetBSD: vfs_subr.c,v 1.119 2000/03/11 05:00:18 perseant Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Copyright (c) 1989, 1993
42  *	The Regents of the University of California.  All rights reserved.
43  * (c) UNIX System Laboratories, Inc.
44  * All or some portions of this file are derived from material licensed
45  * to the University of California by American Telephone and Telegraph
46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47  * the permission of UNIX System Laboratories, Inc.
48  *
49  * Redistribution and use in source and binary forms, with or without
50  * modification, are permitted provided that the following conditions
51  * are met:
52  * 1. Redistributions of source code must retain the above copyright
53  *    notice, this list of conditions and the following disclaimer.
54  * 2. Redistributions in binary form must reproduce the above copyright
55  *    notice, this list of conditions and the following disclaimer in the
56  *    documentation and/or other materials provided with the distribution.
57  * 3. All advertising materials mentioning features or use of this software
58  *    must display the following acknowledgement:
59  *	This product includes software developed by the University of
60  *	California, Berkeley and its contributors.
61  * 4. Neither the name of the University nor the names of its contributors
62  *    may be used to endorse or promote products derived from this software
63  *    without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75  * SUCH DAMAGE.
76  *
77  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
78  */
79 
80 /*
81  * External virtual filesystem routines
82  */
83 
84 #include "opt_compat_netbsd.h"
85 #include "opt_compat_43.h"
86 
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/proc.h>
90 #include <sys/mount.h>
91 #include <sys/time.h>
92 #include <sys/fcntl.h>
93 #include <sys/vnode.h>
94 #include <sys/stat.h>
95 #include <sys/namei.h>
96 #include <sys/ucred.h>
97 #include <sys/buf.h>
98 #include <sys/errno.h>
99 #include <sys/malloc.h>
100 #include <sys/domain.h>
101 #include <sys/mbuf.h>
102 #include <sys/syscallargs.h>
103 #include <sys/device.h>
104 #include <sys/dirent.h>
105 
106 #include <vm/vm.h>
107 #include <sys/sysctl.h>
108 
109 #include <miscfs/specfs/specdev.h>
110 #include <miscfs/genfs/genfs.h>
111 #include <miscfs/syncfs/syncfs.h>
112 
113 #include <uvm/uvm_extern.h>
114 
115 enum vtype iftovt_tab[16] = {
116 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
117 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
118 };
119 int	vttoif_tab[9] = {
120 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
121 	S_IFSOCK, S_IFIFO, S_IFMT,
122 };
123 
124 int doforce = 1;		/* 1 => permit forcible unmounting */
125 int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
126 
127 extern int dovfsusermount;	/* 1 => permit any user to mount filesystems */
128 
129 /*
130  * Insq/Remq for the vnode usage lists.
131  */
132 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
133 #define	bufremvn(bp) {							\
134 	LIST_REMOVE(bp, b_vnbufs);					\
135 	(bp)->b_vnbufs.le_next = NOLIST;				\
136 }
137 /* TAILQ_HEAD(freelst, vnode) vnode_free_list =	vnode free list (in vnode.h) */
138 struct freelst vnode_free_list = TAILQ_HEAD_INITIALIZER(vnode_free_list);
139 struct freelst vnode_hold_list = TAILQ_HEAD_INITIALIZER(vnode_hold_list);
140 
141 struct mntlist mountlist =			/* mounted filesystem list */
142     CIRCLEQ_HEAD_INITIALIZER(mountlist);
143 struct vfs_list_head vfs_list =			/* vfs list */
144     LIST_HEAD_INITIALIZER(vfs_list);
145 
146 struct nfs_public nfs_pub;			/* publicly exported FS */
147 
148 struct simplelock mountlist_slock;
149 static struct simplelock mntid_slock;
150 struct simplelock mntvnode_slock;
151 struct simplelock vnode_free_list_slock;
152 struct simplelock spechash_slock;
153 
154 /*
155  * These define the root filesystem and device.
156  */
157 struct mount *rootfs;
158 struct vnode *rootvnode;
159 struct device *root_device;			/* root device */
160 
161 struct pool vnode_pool;				/* memory pool for vnodes */
162 
163 /*
164  * Local declarations.
165  */
166 void insmntque __P((struct vnode *, struct mount *));
167 int getdevvp __P((dev_t, struct vnode **, enum vtype));
168 void vgoneall __P((struct vnode *));
169 
170 static int vfs_hang_addrlist __P((struct mount *, struct netexport *,
171 				  struct export_args *));
172 static int vfs_free_netcred __P((struct radix_node *, void *));
173 static void vfs_free_addrlist __P((struct netexport *));
174 
175 #ifdef DEBUG
176 void printlockedvnodes __P((void));
177 #endif
178 
179 /*
180  * Initialize the vnode management data structures.
181  */
182 void
183 vntblinit()
184 {
185 
186 	simple_lock_init(&mntvnode_slock);
187 	simple_lock_init(&mntid_slock);
188 	simple_lock_init(&spechash_slock);
189 	simple_lock_init(&vnode_free_list_slock);
190 
191 	pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodepl",
192 	    0, pool_page_alloc_nointr, pool_page_free_nointr, M_VNODE);
193 
194 	/*
195 	 * Initialize the filesystem syncer.
196 	 */
197 	vn_initialize_syncerd();
198 }
199 
200 /*
201  * Mark a mount point as busy. Used to synchronize access and to delay
202  * unmounting. Interlock is not released on failure.
203  */
204 int
205 vfs_busy(mp, flags, interlkp)
206 	struct mount *mp;
207 	int flags;
208 	struct simplelock *interlkp;
209 {
210 	int lkflags;
211 
212 	while (mp->mnt_flag & MNT_UNMOUNT) {
213 		int gone;
214 
215 		if (flags & LK_NOWAIT)
216 			return (ENOENT);
217 		if ((flags & LK_RECURSEFAIL) && mp->mnt_unmounter != NULL
218 		    && mp->mnt_unmounter == curproc)
219 			return (EDEADLK);
220 		if (interlkp)
221 			simple_unlock(interlkp);
222 		/*
223 		 * Since all busy locks are shared except the exclusive
224 		 * lock granted when unmounting, the only place that a
225 		 * wakeup needs to be done is at the release of the
226 		 * exclusive lock at the end of dounmount.
227 		 *
228 		 * XXX MP: add spinlock protecting mnt_wcnt here once you
229 		 * can atomically unlock-and-sleep.
230 		 */
231 		mp->mnt_wcnt++;
232 		tsleep((caddr_t)mp, PVFS, "vfs_busy", 0);
233 		mp->mnt_wcnt--;
234 		gone = mp->mnt_flag & MNT_GONE;
235 
236 		if (mp->mnt_wcnt == 0)
237 			wakeup(&mp->mnt_wcnt);
238 		if (interlkp)
239 			simple_lock(interlkp);
240 		if (gone)
241 			return (ENOENT);
242 	}
243 	lkflags = LK_SHARED;
244 	if (interlkp)
245 		lkflags |= LK_INTERLOCK;
246 	if (lockmgr(&mp->mnt_lock, lkflags, interlkp))
247 		panic("vfs_busy: unexpected lock failure");
248 	return (0);
249 }
250 
251 /*
252  * Free a busy filesystem.
253  */
254 void
255 vfs_unbusy(mp)
256 	struct mount *mp;
257 {
258 
259 	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL);
260 }
261 
262 /*
263  * Lookup a filesystem type, and if found allocate and initialize
264  * a mount structure for it.
265  *
266  * Devname is usually updated by mount(8) after booting.
267  */
268 int
269 vfs_rootmountalloc(fstypename, devname, mpp)
270 	char *fstypename;
271 	char *devname;
272 	struct mount **mpp;
273 {
274 	struct vfsops *vfsp = NULL;
275 	struct mount *mp;
276 
277 	for (vfsp = LIST_FIRST(&vfs_list); vfsp != NULL;
278 	     vfsp = LIST_NEXT(vfsp, vfs_list))
279 		if (!strncmp(vfsp->vfs_name, fstypename, MFSNAMELEN))
280 			break;
281 
282 	if (vfsp == NULL)
283 		return (ENODEV);
284 	mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
285 	memset((char *)mp, 0, (u_long)sizeof(struct mount));
286 	lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
287 	(void)vfs_busy(mp, LK_NOWAIT, 0);
288 	LIST_INIT(&mp->mnt_vnodelist);
289 	mp->mnt_op = vfsp;
290 	mp->mnt_flag = MNT_RDONLY;
291 	mp->mnt_vnodecovered = NULLVP;
292 	vfsp->vfs_refcount++;
293 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfs_name, MFSNAMELEN);
294 	mp->mnt_stat.f_mntonname[0] = '/';
295 	(void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
296 	*mpp = mp;
297 	return (0);
298 }
299 
300 /*
301  * Lookup a mount point by filesystem identifier.
302  */
303 struct mount *
304 vfs_getvfs(fsid)
305 	fsid_t *fsid;
306 {
307 	register struct mount *mp;
308 
309 	simple_lock(&mountlist_slock);
310 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist;
311 	     mp = mp->mnt_list.cqe_next) {
312 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
313 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
314 			simple_unlock(&mountlist_slock);
315 			return (mp);
316 		}
317 	}
318 	simple_unlock(&mountlist_slock);
319 	return ((struct mount *)0);
320 }
321 
322 /*
323  * Get a new unique fsid
324  */
325 void
326 vfs_getnewfsid(mp, fstypename)
327 	struct mount *mp;
328 	char *fstypename;
329 {
330 	static u_short xxxfs_mntid;
331 	fsid_t tfsid;
332 	int mtype;
333 
334 	simple_lock(&mntid_slock);
335 	mtype = makefstype(fstypename);
336 	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
337 	mp->mnt_stat.f_fsid.val[1] = mtype;
338 	if (xxxfs_mntid == 0)
339 		++xxxfs_mntid;
340 	tfsid.val[0] = makedev((nblkdev + mtype) & 0xff, xxxfs_mntid);
341 	tfsid.val[1] = mtype;
342 	if (mountlist.cqh_first != (void *)&mountlist) {
343 		while (vfs_getvfs(&tfsid)) {
344 			tfsid.val[0]++;
345 			xxxfs_mntid++;
346 		}
347 	}
348 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
349 	simple_unlock(&mntid_slock);
350 }
351 
352 /*
353  * Make a 'unique' number from a mount type name.
354  */
355 long
356 makefstype(type)
357 	char *type;
358 {
359 	long rv;
360 
361 	for (rv = 0; *type; type++) {
362 		rv <<= 2;
363 		rv ^= *type;
364 	}
365 	return rv;
366 }
367 
368 
369 /*
370  * Set vnode attributes to VNOVAL
371  */
372 void
373 vattr_null(vap)
374 	register struct vattr *vap;
375 {
376 
377 	vap->va_type = VNON;
378 
379 	/*
380 	 * Assign individually so that it is safe even if size and
381 	 * sign of each member are varied.
382 	 */
383 	vap->va_mode = VNOVAL;
384 	vap->va_nlink = VNOVAL;
385 	vap->va_uid = VNOVAL;
386 	vap->va_gid = VNOVAL;
387 	vap->va_fsid = VNOVAL;
388 	vap->va_fileid = VNOVAL;
389 	vap->va_size = VNOVAL;
390 	vap->va_blocksize = VNOVAL;
391 	vap->va_atime.tv_sec =
392 	    vap->va_mtime.tv_sec =
393 	    vap->va_ctime.tv_sec = VNOVAL;
394 	vap->va_atime.tv_nsec =
395 	    vap->va_mtime.tv_nsec =
396 	    vap->va_ctime.tv_nsec = VNOVAL;
397 	vap->va_gen = VNOVAL;
398 	vap->va_flags = VNOVAL;
399 	vap->va_rdev = VNOVAL;
400 	vap->va_bytes = VNOVAL;
401 	vap->va_vaflags = 0;
402 }
403 
404 /*
405  * Routines having to do with the management of the vnode table.
406  */
407 extern int (**dead_vnodeop_p) __P((void *));
408 long numvnodes;
409 
410 /*
411  * Return the next vnode from the free list.
412  */
413 int
414 getnewvnode(tag, mp, vops, vpp)
415 	enum vtagtype tag;
416 	struct mount *mp;
417 	int (**vops) __P((void *));
418 	struct vnode **vpp;
419 {
420 	struct proc *p = curproc;	/* XXX */
421 	struct freelst *listhd;
422 	static int toggle;
423 	struct vnode *vp;
424 	int error = 0;
425 #ifdef DIAGNOSTIC
426 	int s;
427 #endif
428 	if (mp) {
429 		/*
430 		 * Mark filesystem busy while we're creating a vnode.
431 		 * If unmount is in progress, this will wait; if the
432 		 * unmount succeeds (only if umount -f), this will
433 		 * return an error.  If the unmount fails, we'll keep
434 		 * going afterwards.
435 		 * (This puts the per-mount vnode list logically under
436 		 * the protection of the vfs_busy lock).
437 		 */
438 		error = vfs_busy(mp, LK_RECURSEFAIL, 0);
439 		if (error && error != EDEADLK)
440 			return error;
441 	}
442 
443 	/*
444 	 * We must choose whether to allocate a new vnode or recycle an
445 	 * existing one. The criterion for allocating a new one is that
446 	 * the total number of vnodes is less than the number desired or
447 	 * there are no vnodes on either free list. Generally we only
448 	 * want to recycle vnodes that have no buffers associated with
449 	 * them, so we look first on the vnode_free_list. If it is empty,
450 	 * we next consider vnodes with referencing buffers on the
451 	 * vnode_hold_list. The toggle ensures that half the time we
452 	 * will use a buffer from the vnode_hold_list, and half the time
453 	 * we will allocate a new one unless the list has grown to twice
454 	 * the desired size. We are reticent to recycle vnodes from the
455 	 * vnode_hold_list because we will lose the identity of all its
456 	 * referencing buffers.
457 	 */
458 	toggle ^= 1;
459 	if (numvnodes > 2 * desiredvnodes)
460 		toggle = 0;
461 
462 	simple_lock(&vnode_free_list_slock);
463 	if (numvnodes < desiredvnodes ||
464 	    (TAILQ_FIRST(listhd = &vnode_free_list) == NULL &&
465 	    (TAILQ_FIRST(listhd = &vnode_hold_list) == NULL || toggle))) {
466 		simple_unlock(&vnode_free_list_slock);
467 		vp = pool_get(&vnode_pool, PR_WAITOK);
468 		memset((char *)vp, 0, sizeof(*vp));
469 		simple_lock_init(&vp->v_interlock);
470 		numvnodes++;
471 	} else {
472 		for (vp = TAILQ_FIRST(listhd); vp != NULLVP;
473 		    vp = TAILQ_NEXT(vp, v_freelist)) {
474 			if (simple_lock_try(&vp->v_interlock)) {
475 				if ((vp->v_flag & VLAYER) == 0) {
476 					break;
477 				}
478 				if (VOP_ISLOCKED(vp) == 0)
479 					break;
480 				else
481 					simple_unlock(&vp->v_interlock);
482 			}
483 		}
484 		/*
485 		 * Unless this is a bad time of the month, at most
486 		 * the first NCPUS items on the free list are
487 		 * locked, so this is close enough to being empty.
488 		 */
489 		if (vp == NULLVP) {
490 			simple_unlock(&vnode_free_list_slock);
491 			if (mp && error != EDEADLK)
492 				vfs_unbusy(mp);
493 			tablefull("vnode");
494 			*vpp = 0;
495 			return (ENFILE);
496 		}
497 		if (vp->v_usecount)
498 			panic("free vnode isn't");
499 		TAILQ_REMOVE(listhd, vp, v_freelist);
500 		/* see comment on why 0xdeadb is set at end of vgone (below) */
501 		vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb;
502 		simple_unlock(&vnode_free_list_slock);
503 		vp->v_lease = NULL;
504 		if (vp->v_type != VBAD)
505 			vgonel(vp, p);
506 		else
507 			simple_unlock(&vp->v_interlock);
508 #ifdef DIAGNOSTIC
509 		if (vp->v_data)
510 			panic("cleaned vnode isn't");
511 		s = splbio();
512 		if (vp->v_numoutput)
513 			panic("Clean vnode has pending I/O's");
514 		splx(s);
515 #endif
516 		vp->v_flag = 0;
517 		vp->v_lastr = 0;
518 		vp->v_ralen = 0;
519 		vp->v_maxra = 0;
520 		vp->v_lastw = 0;
521 		vp->v_lasta = 0;
522 		vp->v_cstart = 0;
523 		vp->v_clen = 0;
524 		vp->v_socket = 0;
525 	}
526 	vp->v_type = VNON;
527 	vp->v_vnlock = &vp->v_lock;
528 	lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
529 	cache_purge(vp);
530 	vp->v_tag = tag;
531 	vp->v_op = vops;
532 	insmntque(vp, mp);
533 	*vpp = vp;
534 	vp->v_usecount = 1;
535 	vp->v_data = 0;
536 	simple_lock_init(&vp->v_uvm.u_obj.vmobjlock);
537 	if (mp && error != EDEADLK)
538 		vfs_unbusy(mp);
539 	return (0);
540 }
541 
542 /*
543  * Move a vnode from one mount queue to another.
544  */
545 void
546 insmntque(vp, mp)
547 	register struct vnode *vp;
548 	register struct mount *mp;
549 {
550 
551 #ifdef DIAGNOSTIC
552 	if ((mp != NULL) &&
553 	    (mp->mnt_flag & MNT_UNMOUNT) &&
554 	    !(mp->mnt_flag & MNT_SOFTDEP) &&
555 	    vp->v_tag != VT_VFS) {
556 		panic("insmntque into dying filesystem");
557 	}
558 #endif
559 
560 	simple_lock(&mntvnode_slock);
561 	/*
562 	 * Delete from old mount point vnode list, if on one.
563 	 */
564 	if (vp->v_mount != NULL)
565 		LIST_REMOVE(vp, v_mntvnodes);
566 	/*
567 	 * Insert into list of vnodes for the new mount point, if available.
568 	 */
569 	if ((vp->v_mount = mp) != NULL)
570 		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
571 	simple_unlock(&mntvnode_slock);
572 }
573 
574 /*
575  * Update outstanding I/O count and do wakeup if requested.
576  */
577 void
578 vwakeup(bp)
579 	register struct buf *bp;
580 {
581 	register struct vnode *vp;
582 
583 	bp->b_flags &= ~B_WRITEINPROG;
584 	if ((vp = bp->b_vp) != NULL) {
585 		if (--vp->v_numoutput < 0)
586 			panic("vwakeup: neg numoutput");
587 		if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
588 			vp->v_flag &= ~VBWAIT;
589 			wakeup((caddr_t)&vp->v_numoutput);
590 		}
591 	}
592 }
593 
594 /*
595  * Flush out and invalidate all buffers associated with a vnode.
596  * Called with the underlying object locked.
597  */
598 int
599 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
600 	register struct vnode *vp;
601 	int flags;
602 	struct ucred *cred;
603 	struct proc *p;
604 	int slpflag, slptimeo;
605 {
606 	register struct buf *bp;
607 	struct buf *nbp, *blist;
608 	int s, error;
609 
610 	if (flags & V_SAVE) {
611 		s = splbio();
612 		while (vp->v_numoutput) {
613 			vp->v_flag |= VBWAIT;
614 			tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1,
615 			    "vbwait", 0);
616 		}
617 		splx(s);
618 		if ((error = VOP_FSYNC(vp, cred, FSYNC_WAIT, p)) != 0)
619 		        return (error);
620 		s = splbio();
621 		if (vp->v_numoutput > 0 || vp->v_dirtyblkhd.lh_first != NULL)
622 		        panic("vinvalbuf: dirty bufs");
623 		splx(s);
624 	}
625 
626 	s = splbio();
627 
628 	for (;;) {
629 		if ((blist = vp->v_cleanblkhd.lh_first) && (flags & V_SAVEMETA))
630 			while (blist && blist->b_lblkno < 0)
631 				blist = blist->b_vnbufs.le_next;
632 		if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
633 		    (flags & V_SAVEMETA)) {
634 			while (blist && blist->b_lblkno < 0)
635 				blist = blist->b_vnbufs.le_next;
636 		}
637 		if (!blist)
638 			break;
639 
640 		for (bp = blist; bp; bp = nbp) {
641 			nbp = bp->b_vnbufs.le_next;
642 			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
643 				continue;
644 			if (bp->b_flags & B_BUSY) {
645 				bp->b_flags |= B_WANTED;
646 				error = tsleep((caddr_t)bp,
647 					slpflag | (PRIBIO + 1), "vinvalbuf",
648 					slptimeo);
649 				if (error) {
650 					splx(s);
651 					return (error);
652 				}
653 				break;
654 			}
655 			bp->b_flags |= B_BUSY | B_VFLUSH;
656 			/*
657 			 * XXX Since there are no node locks for NFS, I believe
658 			 * there is a slight chance that a delayed write will
659 			 * occur while sleeping just above, so check for it.
660 			 */
661 			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
662 				VOP_BWRITE(bp);
663 #ifdef DEBUG
664 				printf("buffer still DELWRI\n");
665 #endif
666 				/* VOP_FSYNC(vp, cred, FSYNC_WAIT, p); */
667 				continue;
668 			}
669 			bp->b_flags |= B_INVAL;
670 			brelse(bp);
671 		}
672 	}
673 
674 	if (!(flags & V_SAVEMETA) &&
675 	    (vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first))
676 		panic("vinvalbuf: flush failed");
677 
678 	splx(s);
679 
680 	return (0);
681 }
682 
683 void
684 vflushbuf(vp, sync)
685 	register struct vnode *vp;
686 	int sync;
687 {
688 	register struct buf *bp, *nbp;
689 	int s;
690 
691 loop:
692 	s = splbio();
693 	for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
694 		nbp = bp->b_vnbufs.le_next;
695 		if ((bp->b_flags & B_BUSY))
696 			continue;
697 		if ((bp->b_flags & B_DELWRI) == 0)
698 			panic("vflushbuf: not dirty");
699 		bp->b_flags |= B_BUSY | B_VFLUSH;
700 		splx(s);
701 		/*
702 		 * Wait for I/O associated with indirect blocks to complete,
703 		 * since there is no way to quickly wait for them below.
704 		 */
705 		if (bp->b_vp == vp || sync == 0)
706 			(void) bawrite(bp);
707 		else
708 			(void) bwrite(bp);
709 		goto loop;
710 	}
711 	if (sync == 0) {
712 		splx(s);
713 		return;
714 	}
715 	while (vp->v_numoutput) {
716 		vp->v_flag |= VBWAIT;
717 		tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "vflushbuf", 0);
718 	}
719 	splx(s);
720 	if (vp->v_dirtyblkhd.lh_first != NULL) {
721 		vprint("vflushbuf: dirty", vp);
722 		goto loop;
723 	}
724 }
725 
726 /*
727  * Associate a buffer with a vnode.
728  */
729 void
730 bgetvp(vp, bp)
731 	register struct vnode *vp;
732 	register struct buf *bp;
733 {
734 	int s;
735 
736 	if (bp->b_vp)
737 		panic("bgetvp: not free");
738 	VHOLD(vp);
739 	s = splbio();
740 	bp->b_vp = vp;
741 	if (vp->v_type == VBLK || vp->v_type == VCHR)
742 		bp->b_dev = vp->v_rdev;
743 	else
744 		bp->b_dev = NODEV;
745 	/*
746 	 * Insert onto list for new vnode.
747 	 */
748 	bufinsvn(bp, &vp->v_cleanblkhd);
749 	splx(s);
750 }
751 
752 /*
753  * Disassociate a buffer from a vnode.
754  */
755 void
756 brelvp(bp)
757 	register struct buf *bp;
758 {
759 	struct vnode *vp;
760 	int s;
761 
762 	if (bp->b_vp == (struct vnode *) 0)
763 		panic("brelvp: NULL");
764 
765 	s = splbio();
766 	vp = bp->b_vp;
767 	/*
768 	 * Delete from old vnode list, if on one.
769 	 */
770 	if (bp->b_vnbufs.le_next != NOLIST)
771 		bufremvn(bp);
772 	if ((vp->v_flag & VONWORKLST) && LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
773 		vp->v_flag &= ~VONWORKLST;
774 		LIST_REMOVE(vp, v_synclist);
775 	}
776 	bp->b_vp = (struct vnode *) 0;
777 	HOLDRELE(vp);
778 	splx(s);
779 }
780 
781 /*
782  * Reassign a buffer from one vnode to another.
783  * Used to assign file specific control information
784  * (indirect blocks) to the vnode to which they belong.
785  *
786  * This function must be called at splbio().
787  */
788 void
789 reassignbuf(bp, newvp)
790 	struct buf *bp;
791 	struct vnode *newvp;
792 {
793 	struct buflists *listheadp;
794 	int delay;
795 
796 	if (newvp == NULL) {
797 		printf("reassignbuf: NULL");
798 		return;
799 	}
800 
801 	/*
802 	 * Delete from old vnode list, if on one.
803 	 */
804 	if (bp->b_vnbufs.le_next != NOLIST)
805 		bufremvn(bp);
806 	/*
807 	 * If dirty, put on list of dirty buffers;
808 	 * otherwise insert onto list of clean buffers.
809 	 */
810 	if ((bp->b_flags & B_DELWRI) == 0) {
811 		listheadp = &newvp->v_cleanblkhd;
812 		if ((newvp->v_flag & VONWORKLST) &&
813 		    LIST_FIRST(&newvp->v_dirtyblkhd) == NULL) {
814 			newvp->v_flag &= ~VONWORKLST;
815 			LIST_REMOVE(newvp, v_synclist);
816 		}
817 	} else {
818 		listheadp = &newvp->v_dirtyblkhd;
819 		if ((newvp->v_flag & VONWORKLST) == 0) {
820 			switch (newvp->v_type) {
821 			case VDIR:
822 				delay = dirdelay;
823 				break;
824 			case VBLK:
825 				if (newvp->v_specmountpoint != NULL) {
826 					delay = metadelay;
827 					break;
828 				}
829 				/* fall through */
830 			default:
831 				delay = filedelay;
832 				break;
833 			}
834 			if (!newvp->v_mount ||
835 			    (newvp->v_mount->mnt_flag & MNT_ASYNC) == 0)
836 				vn_syncer_add_to_worklist(newvp, delay);
837 		}
838 	}
839 	bufinsvn(bp, listheadp);
840 }
841 
842 /*
843  * Create a vnode for a block device.
844  * Used for root filesystem and swap areas.
845  * Also used for memory file system special devices.
846  */
847 int
848 bdevvp(dev, vpp)
849 	dev_t dev;
850 	struct vnode **vpp;
851 {
852 
853 	return (getdevvp(dev, vpp, VBLK));
854 }
855 
856 /*
857  * Create a vnode for a character device.
858  * Used for kernfs and some console handling.
859  */
860 int
861 cdevvp(dev, vpp)
862 	dev_t dev;
863 	struct vnode **vpp;
864 {
865 
866 	return (getdevvp(dev, vpp, VCHR));
867 }
868 
869 /*
870  * Create a vnode for a device.
871  * Used by bdevvp (block device) for root file system etc.,
872  * and by cdevvp (character device) for console and kernfs.
873  */
874 int
875 getdevvp(dev, vpp, type)
876 	dev_t dev;
877 	struct vnode **vpp;
878 	enum vtype type;
879 {
880 	register struct vnode *vp;
881 	struct vnode *nvp;
882 	int error;
883 
884 	if (dev == NODEV) {
885 		*vpp = NULLVP;
886 		return (0);
887 	}
888 	error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp);
889 	if (error) {
890 		*vpp = NULLVP;
891 		return (error);
892 	}
893 	vp = nvp;
894 	vp->v_type = type;
895 	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
896 		vput(vp);
897 		vp = nvp;
898 	}
899 	*vpp = vp;
900 	return (0);
901 }
902 
903 /*
904  * Check to see if the new vnode represents a special device
905  * for which we already have a vnode (either because of
906  * bdevvp() or because of a different vnode representing
907  * the same block device). If such an alias exists, deallocate
908  * the existing contents and return the aliased vnode. The
909  * caller is responsible for filling it with its new contents.
910  */
911 struct vnode *
912 checkalias(nvp, nvp_rdev, mp)
913 	register struct vnode *nvp;
914 	dev_t nvp_rdev;
915 	struct mount *mp;
916 {
917 	struct proc *p = curproc;       /* XXX */
918 	register struct vnode *vp;
919 	struct vnode **vpp;
920 
921 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
922 		return (NULLVP);
923 
924 	vpp = &speclisth[SPECHASH(nvp_rdev)];
925 loop:
926 	simple_lock(&spechash_slock);
927 	for (vp = *vpp; vp; vp = vp->v_specnext) {
928 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
929 			continue;
930 		/*
931 		 * Alias, but not in use, so flush it out.
932 		 */
933 		simple_lock(&vp->v_interlock);
934 		if (vp->v_usecount == 0) {
935 			simple_unlock(&spechash_slock);
936 			vgonel(vp, p);
937 			goto loop;
938 		}
939 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
940 			simple_unlock(&spechash_slock);
941 			goto loop;
942 		}
943 		break;
944 	}
945 	if (vp == NULL || vp->v_tag != VT_NON || vp->v_type != VBLK) {
946 		MALLOC(nvp->v_specinfo, struct specinfo *,
947 			sizeof(struct specinfo), M_VNODE, M_WAITOK);
948 		nvp->v_rdev = nvp_rdev;
949 		nvp->v_hashchain = vpp;
950 		nvp->v_specnext = *vpp;
951 		nvp->v_specmountpoint = NULL;
952 		simple_unlock(&spechash_slock);
953 		nvp->v_speclockf = NULL;
954 		*vpp = nvp;
955 		if (vp != NULLVP) {
956 			nvp->v_flag |= VALIASED;
957 			vp->v_flag |= VALIASED;
958 			vput(vp);
959 		}
960 		return (NULLVP);
961 	}
962 	simple_unlock(&spechash_slock);
963 	VOP_UNLOCK(vp, 0);
964 	simple_lock(&vp->v_interlock);
965 	vclean(vp, 0, p);
966 	vp->v_op = nvp->v_op;
967 	vp->v_tag = nvp->v_tag;
968 	vp->v_vnlock = &vp->v_lock;
969 	lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
970 	nvp->v_type = VNON;
971 	insmntque(vp, mp);
972 	return (vp);
973 }
974 
975 /*
976  * Grab a particular vnode from the free list, increment its
977  * reference count and lock it. If the vnode lock bit is set the
978  * vnode is being eliminated in vgone. In that case, we can not
979  * grab the vnode, so the process is awakened when the transition is
980  * completed, and an error returned to indicate that the vnode is no
981  * longer usable (possibly having been changed to a new file system type).
982  */
983 int
984 vget(vp, flags)
985 	struct vnode *vp;
986 	int flags;
987 {
988 	int error;
989 
990 	/*
991 	 * If the vnode is in the process of being cleaned out for
992 	 * another use, we wait for the cleaning to finish and then
993 	 * return failure. Cleaning is determined by checking that
994 	 * the VXLOCK flag is set.
995 	 */
996 	if ((flags & LK_INTERLOCK) == 0)
997 		simple_lock(&vp->v_interlock);
998 	if (vp->v_flag & VXLOCK) {
999 		vp->v_flag |= VXWANT;
1000 		simple_unlock(&vp->v_interlock);
1001 		tsleep((caddr_t)vp, PINOD, "vget", 0);
1002 		return (ENOENT);
1003 	}
1004 	if (vp->v_usecount == 0) {
1005 		simple_lock(&vnode_free_list_slock);
1006 		if (vp->v_holdcnt > 0)
1007 			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
1008 		else
1009 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1010 		simple_unlock(&vnode_free_list_slock);
1011 	}
1012 	vp->v_usecount++;
1013 #ifdef DIAGNOSTIC
1014 	if (vp->v_usecount == 0) {
1015 		vprint("vget", vp);
1016 		panic("vget: usecount overflow");
1017 	}
1018 #endif
1019 	if (flags & LK_TYPE_MASK) {
1020 		if ((error = vn_lock(vp, flags | LK_INTERLOCK))) {
1021 			/*
1022 			 * must expand vrele here because we do not want
1023 			 * to call VOP_INACTIVE if the reference count
1024 			 * drops back to zero since it was never really
1025 			 * active. We must remove it from the free list
1026 			 * before sleeping so that multiple processes do
1027 			 * not try to recycle it.
1028 			 */
1029 			simple_lock(&vp->v_interlock);
1030 			vp->v_usecount--;
1031 			if (vp->v_usecount > 0) {
1032 				simple_unlock(&vp->v_interlock);
1033 				return (error);
1034 			}
1035 			/*
1036 			 * insert at tail of LRU list
1037 			 */
1038 			simple_lock(&vnode_free_list_slock);
1039 			if (vp->v_holdcnt > 0)
1040 				TAILQ_INSERT_TAIL(&vnode_hold_list, vp,
1041 				    v_freelist);
1042 			else
1043 				TAILQ_INSERT_TAIL(&vnode_free_list, vp,
1044 				    v_freelist);
1045 			simple_unlock(&vnode_free_list_slock);
1046 			simple_unlock(&vp->v_interlock);
1047 		}
1048 		return (error);
1049 	}
1050 	simple_unlock(&vp->v_interlock);
1051 	return (0);
1052 }
1053 
1054 /*
1055  * vput(), just unlock and vrele()
1056  */
1057 void
1058 vput(vp)
1059 	struct vnode *vp;
1060 {
1061 	struct proc *p = curproc;	/* XXX */
1062 
1063 #ifdef DIAGNOSTIC
1064 	if (vp == NULL)
1065 		panic("vput: null vp");
1066 #endif
1067 	simple_lock(&vp->v_interlock);
1068 	vp->v_usecount--;
1069 	if (vp->v_usecount > 0) {
1070 		simple_unlock(&vp->v_interlock);
1071 		VOP_UNLOCK(vp, 0);
1072 		return;
1073 	}
1074 #ifdef DIAGNOSTIC
1075 	if (vp->v_usecount < 0 || vp->v_writecount != 0) {
1076 		vprint("vput: bad ref count", vp);
1077 		panic("vput: ref cnt");
1078 	}
1079 #endif
1080 	/*
1081 	 * Insert at tail of LRU list.
1082 	 */
1083 	simple_lock(&vnode_free_list_slock);
1084 	if (vp->v_holdcnt > 0)
1085 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
1086 	else
1087 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1088 	simple_unlock(&vnode_free_list_slock);
1089 	simple_unlock(&vp->v_interlock);
1090 	VOP_INACTIVE(vp, p);
1091 }
1092 
1093 /*
1094  * Vnode release.
1095  * If count drops to zero, call inactive routine and return to freelist.
1096  */
1097 void
1098 vrele(vp)
1099 	struct vnode *vp;
1100 {
1101 	struct proc *p = curproc;	/* XXX */
1102 
1103 #ifdef DIAGNOSTIC
1104 	if (vp == NULL)
1105 		panic("vrele: null vp");
1106 #endif
1107 	simple_lock(&vp->v_interlock);
1108 	vp->v_usecount--;
1109 	if (vp->v_usecount > 0) {
1110 		simple_unlock(&vp->v_interlock);
1111 		return;
1112 	}
1113 #ifdef DIAGNOSTIC
1114 	if (vp->v_usecount < 0 || vp->v_writecount != 0) {
1115 		vprint("vrele: bad ref count", vp);
1116 		panic("vrele: ref cnt");
1117 	}
1118 #endif
1119 	/*
1120 	 * Insert at tail of LRU list.
1121 	 */
1122 	simple_lock(&vnode_free_list_slock);
1123 	if (vp->v_holdcnt > 0)
1124 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
1125 	else
1126 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1127 	simple_unlock(&vnode_free_list_slock);
1128 	if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK) == 0)
1129 		VOP_INACTIVE(vp, p);
1130 }
1131 
1132 #ifdef DIAGNOSTIC
1133 /*
1134  * Page or buffer structure gets a reference.
1135  */
1136 void
1137 vhold(vp)
1138 	register struct vnode *vp;
1139 {
1140 
1141 	/*
1142 	 * If it is on the freelist and the hold count is currently
1143 	 * zero, move it to the hold list. The test of the back
1144 	 * pointer and the use reference count of zero is because
1145 	 * it will be removed from a free list by getnewvnode,
1146 	 * but will not have its reference count incremented until
1147 	 * after calling vgone. If the reference count were
1148 	 * incremented first, vgone would (incorrectly) try to
1149 	 * close the previous instance of the underlying object.
1150 	 * So, the back pointer is explicitly set to `0xdeadb' in
1151 	 * getnewvnode after removing it from a freelist to ensure
1152 	 * that we do not try to move it here.
1153 	 */
1154   	simple_lock(&vp->v_interlock);
1155 	if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) &&
1156 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
1157 		simple_lock(&vnode_free_list_slock);
1158 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1159 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
1160 		simple_unlock(&vnode_free_list_slock);
1161 	}
1162 	vp->v_holdcnt++;
1163 	simple_unlock(&vp->v_interlock);
1164 }
1165 
1166 /*
1167  * Page or buffer structure frees a reference.
1168  */
1169 void
1170 holdrele(vp)
1171 	register struct vnode *vp;
1172 {
1173 
1174 	simple_lock(&vp->v_interlock);
1175 	if (vp->v_holdcnt <= 0)
1176 		panic("holdrele: holdcnt");
1177 	vp->v_holdcnt--;
1178 	/*
1179 	 * If it is on the holdlist and the hold count drops to
1180 	 * zero, move it to the free list. The test of the back
1181 	 * pointer and the use reference count of zero is because
1182 	 * it will be removed from a free list by getnewvnode,
1183 	 * but will not have its reference count incremented until
1184 	 * after calling vgone. If the reference count were
1185 	 * incremented first, vgone would (incorrectly) try to
1186 	 * close the previous instance of the underlying object.
1187 	 * So, the back pointer is explicitly set to `0xdeadb' in
1188 	 * getnewvnode after removing it from a freelist to ensure
1189 	 * that we do not try to move it here.
1190 	 */
1191 	if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) &&
1192 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
1193 		simple_lock(&vnode_free_list_slock);
1194 		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
1195 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1196 		simple_unlock(&vnode_free_list_slock);
1197 	}
1198 	simple_unlock(&vp->v_interlock);
1199 }
1200 
1201 /*
1202  * Vnode reference.
1203  */
1204 void
1205 vref(vp)
1206 	struct vnode *vp;
1207 {
1208 
1209 	simple_lock(&vp->v_interlock);
1210 	if (vp->v_usecount <= 0)
1211 		panic("vref used where vget required");
1212 	vp->v_usecount++;
1213 #ifdef DIAGNOSTIC
1214 	if (vp->v_usecount == 0) {
1215 		vprint("vref", vp);
1216 		panic("vref: usecount overflow");
1217 	}
1218 #endif
1219 	simple_unlock(&vp->v_interlock);
1220 }
1221 #endif /* DIAGNOSTIC */
1222 
1223 /*
1224  * Remove any vnodes in the vnode table belonging to mount point mp.
1225  *
1226  * If MNT_NOFORCE is specified, there should not be any active ones,
1227  * return error if any are found (nb: this is a user error, not a
1228  * system error). If MNT_FORCE is specified, detach any active vnodes
1229  * that are found.
1230  */
1231 #ifdef DEBUG
1232 int busyprt = 0;	/* print out busy vnodes */
1233 struct ctldebug debug1 = { "busyprt", &busyprt };
1234 #endif
1235 
1236 int
1237 vflush(mp, skipvp, flags)
1238 	struct mount *mp;
1239 	struct vnode *skipvp;
1240 	int flags;
1241 {
1242 	struct proc *p = curproc;	/* XXX */
1243 	register struct vnode *vp, *nvp;
1244 	int busy = 0;
1245 
1246 	simple_lock(&mntvnode_slock);
1247 loop:
1248 	for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
1249 		if (vp->v_mount != mp)
1250 			goto loop;
1251 		nvp = vp->v_mntvnodes.le_next;
1252 		/*
1253 		 * Skip over a selected vnode.
1254 		 */
1255 		if (vp == skipvp)
1256 			continue;
1257 		simple_lock(&vp->v_interlock);
1258 		/*
1259 		 * Skip over a vnodes marked VSYSTEM.
1260 		 */
1261 		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1262 			simple_unlock(&vp->v_interlock);
1263 			continue;
1264 		}
1265 		/*
1266 		 * If WRITECLOSE is set, only flush out regular file
1267 		 * vnodes open for writing.
1268 		 */
1269 		if ((flags & WRITECLOSE) &&
1270 		    (vp->v_writecount == 0 || vp->v_type != VREG)) {
1271 			simple_unlock(&vp->v_interlock);
1272 			continue;
1273 		}
1274 		/*
1275 		 * With v_usecount == 0, all we need to do is clear
1276 		 * out the vnode data structures and we are done.
1277 		 */
1278 		if (vp->v_usecount == 0) {
1279 			simple_unlock(&mntvnode_slock);
1280 			vgonel(vp, p);
1281 			simple_lock(&mntvnode_slock);
1282 			continue;
1283 		}
1284 		/*
1285 		 * If FORCECLOSE is set, forcibly close the vnode.
1286 		 * For block or character devices, revert to an
1287 		 * anonymous device. For all other files, just kill them.
1288 		 */
1289 		if (flags & FORCECLOSE) {
1290 			simple_unlock(&mntvnode_slock);
1291 			if (vp->v_type != VBLK && vp->v_type != VCHR) {
1292 				vgonel(vp, p);
1293 			} else {
1294 				vclean(vp, 0, p);
1295 				vp->v_op = spec_vnodeop_p;
1296 				insmntque(vp, (struct mount *)0);
1297 			}
1298 			simple_lock(&mntvnode_slock);
1299 			continue;
1300 		}
1301 #ifdef DEBUG
1302 		if (busyprt)
1303 			vprint("vflush: busy vnode", vp);
1304 #endif
1305 		simple_unlock(&vp->v_interlock);
1306 		busy++;
1307 	}
1308 	simple_unlock(&mntvnode_slock);
1309 	if (busy)
1310 		return (EBUSY);
1311 	return (0);
1312 }
1313 
1314 /*
1315  * Disassociate the underlying file system from a vnode.
1316  */
1317 void
1318 vclean(vp, flags, p)
1319 	register struct vnode *vp;
1320 	int flags;
1321 	struct proc *p;
1322 {
1323 	int active;
1324 
1325 	/*
1326 	 * Check to see if the vnode is in use.
1327 	 * If so we have to reference it before we clean it out
1328 	 * so that its count cannot fall to zero and generate a
1329 	 * race against ourselves to recycle it.
1330 	 */
1331 	if ((active = vp->v_usecount) != 0) {
1332 		/* We have the vnode interlock. */
1333 		vp->v_usecount++;
1334 #ifdef DIAGNOSTIC
1335 		if (vp->v_usecount == 0) {
1336 			vprint("vclean", vp);
1337 			panic("vclean: usecount overflow");
1338 		}
1339 #endif
1340 	}
1341 
1342 	/*
1343 	 * Prevent the vnode from being recycled or
1344 	 * brought into use while we clean it out.
1345 	 */
1346 	if (vp->v_flag & VXLOCK)
1347 		panic("vclean: deadlock");
1348 	vp->v_flag |= VXLOCK;
1349 	/*
1350 	 * Even if the count is zero, the VOP_INACTIVE routine may still
1351 	 * have the object locked while it cleans it out. The VOP_LOCK
1352 	 * ensures that the VOP_INACTIVE routine is done with its work.
1353 	 * For active vnodes, it ensures that no other activity can
1354 	 * occur while the underlying object is being cleaned out.
1355 	 */
1356 	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK);
1357 
1358 	/*
1359 	 * clean out any VM data associated with the vnode.
1360 	 */
1361 	uvm_vnp_terminate(vp);
1362 	/*
1363 	 * Clean out any buffers associated with the vnode.
1364 	 */
1365 	if (flags & DOCLOSE)
1366 		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
1367 
1368 	/*
1369 	 * If purging an active vnode, it must be closed and
1370 	 * deactivated before being reclaimed. Note that the
1371 	 * VOP_INACTIVE will unlock the vnode.
1372 	 */
1373 	if (active) {
1374 		if (flags & DOCLOSE)
1375 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, NULL);
1376 		VOP_INACTIVE(vp, p);
1377 	} else {
1378 		/*
1379 		 * Any other processes trying to obtain this lock must first
1380 		 * wait for VXLOCK to clear, then call the new lock operation.
1381 		 */
1382 		VOP_UNLOCK(vp, 0);
1383 	}
1384 	/*
1385 	 * Reclaim the vnode.
1386 	 */
1387 	if (VOP_RECLAIM(vp, p))
1388 		panic("vclean: cannot reclaim");
1389 
1390 	if (active) {
1391 		/*
1392 		 * Inline copy of vrele() since VOP_INACTIVE
1393 		 * has already been called.
1394 		 */
1395 		simple_lock(&vp->v_interlock);
1396 		if (--vp->v_usecount <= 0) {
1397 #ifdef DIAGNOSTIC
1398 			if (vp->v_usecount < 0 || vp->v_writecount != 0) {
1399 				vprint("vclean: bad ref count", vp);
1400 				panic("vclean: ref cnt");
1401 			}
1402 #endif
1403 			/*
1404 			 * Insert at tail of LRU list.
1405 			 */
1406 			simple_unlock(&vp->v_interlock);
1407 			simple_lock(&vnode_free_list_slock);
1408 #ifdef DIAGNOSTIC
1409 			if (vp->v_vnlock) {
1410 				if ((vp->v_vnlock->lk_flags & LK_DRAINED) == 0)
1411 					vprint("vclean: lock not drained", vp);
1412 			}
1413 			if (vp->v_holdcnt > 0)
1414 				panic("vclean: not clean");
1415 #endif
1416 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1417 			simple_unlock(&vnode_free_list_slock);
1418 		} else
1419 			simple_unlock(&vp->v_interlock);
1420 	}
1421 
1422 	cache_purge(vp);
1423 
1424 	/*
1425 	 * Done with purge, notify sleepers of the grim news.
1426 	 */
1427 	vp->v_op = dead_vnodeop_p;
1428 	vp->v_tag = VT_NON;
1429 	vp->v_flag &= ~VXLOCK;
1430 	if (vp->v_flag & VXWANT) {
1431 		vp->v_flag &= ~VXWANT;
1432 		wakeup((caddr_t)vp);
1433 	}
1434 }
1435 
1436 /*
1437  * Recycle an unused vnode to the front of the free list.
1438  * Release the passed interlock if the vnode will be recycled.
1439  */
1440 int
1441 vrecycle(vp, inter_lkp, p)
1442 	struct vnode *vp;
1443 	struct simplelock *inter_lkp;
1444 	struct proc *p;
1445 {
1446 
1447 	simple_lock(&vp->v_interlock);
1448 	if (vp->v_usecount == 0) {
1449 		if (inter_lkp)
1450 			simple_unlock(inter_lkp);
1451 		vgonel(vp, p);
1452 		return (1);
1453 	}
1454 	simple_unlock(&vp->v_interlock);
1455 	return (0);
1456 }
1457 
1458 /*
1459  * Eliminate all activity associated with a vnode
1460  * in preparation for reuse.
1461  */
1462 void
1463 vgone(vp)
1464 	struct vnode *vp;
1465 {
1466 	struct proc *p = curproc;	/* XXX */
1467 
1468 	simple_lock(&vp->v_interlock);
1469 	vgonel(vp, p);
1470 }
1471 
1472 /*
1473  * vgone, with the vp interlock held.
1474  */
1475 void
1476 vgonel(vp, p)
1477 	register struct vnode *vp;
1478 	struct proc *p;
1479 {
1480 	struct vnode *vq;
1481 	struct vnode *vx;
1482 
1483 	/*
1484 	 * If a vgone (or vclean) is already in progress,
1485 	 * wait until it is done and return.
1486 	 */
1487 	if (vp->v_flag & VXLOCK) {
1488 		vp->v_flag |= VXWANT;
1489 		simple_unlock(&vp->v_interlock);
1490 		tsleep((caddr_t)vp, PINOD, "vgone", 0);
1491 		return;
1492 	}
1493 	/*
1494 	 * Clean out the filesystem specific data.
1495 	 */
1496 	vclean(vp, DOCLOSE, p);
1497 	/*
1498 	 * Delete from old mount point vnode list, if on one.
1499 	 */
1500 	if (vp->v_mount != NULL)
1501 		insmntque(vp, (struct mount *)0);
1502 	/*
1503 	 * If special device, remove it from special device alias list.
1504 	 * if it is on one.
1505 	 */
1506 	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1507 		simple_lock(&spechash_slock);
1508 		if (vp->v_hashchain != NULL) {
1509 			if (*vp->v_hashchain == vp) {
1510 				*vp->v_hashchain = vp->v_specnext;
1511 			} else {
1512 				for (vq = *vp->v_hashchain; vq;
1513 							vq = vq->v_specnext) {
1514 					if (vq->v_specnext != vp)
1515 						continue;
1516 					vq->v_specnext = vp->v_specnext;
1517 					break;
1518 				}
1519 				if (vq == NULL)
1520 					panic("missing bdev");
1521 			}
1522 			if (vp->v_flag & VALIASED) {
1523 				vx = NULL;
1524 				for (vq = *vp->v_hashchain; vq;
1525 							vq = vq->v_specnext) {
1526 					if (vq->v_rdev != vp->v_rdev ||
1527 					    vq->v_type != vp->v_type)
1528 						continue;
1529 					if (vx)
1530 						break;
1531 					vx = vq;
1532 				}
1533 				if (vx == NULL)
1534 					panic("missing alias");
1535 				if (vq == NULL)
1536 					vx->v_flag &= ~VALIASED;
1537 				vp->v_flag &= ~VALIASED;
1538 			}
1539 		}
1540 		simple_unlock(&spechash_slock);
1541 		FREE(vp->v_specinfo, M_VNODE);
1542 		vp->v_specinfo = NULL;
1543 	}
1544 	/*
1545 	 * If it is on the freelist and not already at the head,
1546 	 * move it to the head of the list. The test of the back
1547 	 * pointer and the reference count of zero is because
1548 	 * it will be removed from the free list by getnewvnode,
1549 	 * but will not have its reference count incremented until
1550 	 * after calling vgone. If the reference count were
1551 	 * incremented first, vgone would (incorrectly) try to
1552 	 * close the previous instance of the underlying object.
1553 	 * So, the back pointer is explicitly set to `0xdeadb' in
1554 	 * getnewvnode after removing it from the freelist to ensure
1555 	 * that we do not try to move it here.
1556 	 */
1557 	if (vp->v_usecount == 0) {
1558 		simple_lock(&vnode_free_list_slock);
1559 		if (vp->v_holdcnt > 0)
1560 			panic("vgonel: not clean");
1561 		if (vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb &&
1562 		    TAILQ_FIRST(&vnode_free_list) != vp) {
1563 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1564 			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1565 		}
1566 		simple_unlock(&vnode_free_list_slock);
1567 	}
1568 	vp->v_type = VBAD;
1569 }
1570 
1571 /*
1572  * Lookup a vnode by device number.
1573  */
1574 int
1575 vfinddev(dev, type, vpp)
1576 	dev_t dev;
1577 	enum vtype type;
1578 	struct vnode **vpp;
1579 {
1580 	struct vnode *vp;
1581 	int rc = 0;
1582 
1583 	simple_lock(&spechash_slock);
1584 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1585 		if (dev != vp->v_rdev || type != vp->v_type)
1586 			continue;
1587 		*vpp = vp;
1588 		rc = 1;
1589 		break;
1590 	}
1591 	simple_unlock(&spechash_slock);
1592 	return (rc);
1593 }
1594 
1595 /*
1596  * Revoke all the vnodes corresponding to the specified minor number
1597  * range (endpoints inclusive) of the specified major.
1598  */
1599 void
1600 vdevgone(maj, minl, minh, type)
1601 	int maj, minl, minh;
1602 	enum vtype type;
1603 {
1604 	struct vnode *vp;
1605 	int mn;
1606 
1607 	for (mn = minl; mn <= minh; mn++)
1608 		if (vfinddev(makedev(maj, mn), type, &vp))
1609 			VOP_REVOKE(vp, REVOKEALL);
1610 }
1611 
1612 /*
1613  * Calculate the total number of references to a special device.
1614  */
1615 int
1616 vcount(vp)
1617 	register struct vnode *vp;
1618 {
1619 	register struct vnode *vq, *vnext;
1620 	int count;
1621 
1622 loop:
1623 	if ((vp->v_flag & VALIASED) == 0)
1624 		return (vp->v_usecount);
1625 	simple_lock(&spechash_slock);
1626 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1627 		vnext = vq->v_specnext;
1628 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1629 			continue;
1630 		/*
1631 		 * Alias, but not in use, so flush it out.
1632 		 */
1633 		if (vq->v_usecount == 0 && vq != vp) {
1634 			simple_unlock(&spechash_slock);
1635 			vgone(vq);
1636 			goto loop;
1637 		}
1638 		count += vq->v_usecount;
1639 	}
1640 	simple_unlock(&spechash_slock);
1641 	return (count);
1642 }
1643 
1644 /*
1645  * Print out a description of a vnode.
1646  */
1647 static char *typename[] =
1648    { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1649 
1650 void
1651 vprint(label, vp)
1652 	char *label;
1653 	register struct vnode *vp;
1654 {
1655 	char buf[64];
1656 
1657 	if (label != NULL)
1658 		printf("%s: ", label);
1659 	printf("tag %d type %s, usecount %ld, writecount %ld, refcount %ld,",
1660 	    vp->v_tag, typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1661 	    vp->v_holdcnt);
1662 	buf[0] = '\0';
1663 	if (vp->v_flag & VROOT)
1664 		strcat(buf, "|VROOT");
1665 	if (vp->v_flag & VTEXT)
1666 		strcat(buf, "|VTEXT");
1667 	if (vp->v_flag & VSYSTEM)
1668 		strcat(buf, "|VSYSTEM");
1669 	if (vp->v_flag & VXLOCK)
1670 		strcat(buf, "|VXLOCK");
1671 	if (vp->v_flag & VXWANT)
1672 		strcat(buf, "|VXWANT");
1673 	if (vp->v_flag & VBWAIT)
1674 		strcat(buf, "|VBWAIT");
1675 	if (vp->v_flag & VALIASED)
1676 		strcat(buf, "|VALIASED");
1677 	if (buf[0] != '\0')
1678 		printf(" flags (%s)", &buf[1]);
1679 	if (vp->v_data == NULL) {
1680 		printf("\n");
1681 	} else {
1682 		printf("\n\t");
1683 		VOP_PRINT(vp);
1684 	}
1685 }
1686 
1687 #ifdef DEBUG
1688 /*
1689  * List all of the locked vnodes in the system.
1690  * Called when debugging the kernel.
1691  */
1692 void
1693 printlockedvnodes()
1694 {
1695 	struct mount *mp, *nmp;
1696 	struct vnode *vp;
1697 
1698 	printf("Locked vnodes\n");
1699 	simple_lock(&mountlist_slock);
1700 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
1701 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
1702 			nmp = mp->mnt_list.cqe_next;
1703 			continue;
1704 		}
1705 		for (vp = mp->mnt_vnodelist.lh_first;
1706 		     vp != NULL;
1707 		     vp = vp->v_mntvnodes.le_next) {
1708 			if (VOP_ISLOCKED(vp))
1709 				vprint((char *)0, vp);
1710 		}
1711 		simple_lock(&mountlist_slock);
1712 		nmp = mp->mnt_list.cqe_next;
1713 		vfs_unbusy(mp);
1714 	}
1715 	simple_unlock(&mountlist_slock);
1716 }
1717 #endif
1718 
1719 extern const char *mountcompatnames[];
1720 extern const int nmountcompatnames;
1721 
1722 /*
1723  * Top level filesystem related information gathering.
1724  */
1725 int
1726 vfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1727 	int *name;
1728 	u_int namelen;
1729 	void *oldp;
1730 	size_t *oldlenp;
1731 	void *newp;
1732 	size_t newlen;
1733 	struct proc *p;
1734 {
1735 #if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44)
1736 	struct vfsconf vfc;
1737 #endif
1738 	struct vfsops *vfsp;
1739 
1740 	/* all sysctl names at this level are at least name and field */
1741 	if (namelen < 2)
1742 		return (ENOTDIR);		/* overloaded */
1743 
1744 	/* Not generic: goes to file system. */
1745 	if (name[0] != VFS_GENERIC) {
1746 		if (name[0] >= nmountcompatnames || name[0] < 0 ||
1747 		    mountcompatnames[name[0]] == NULL)
1748 			return (EOPNOTSUPP);
1749 		vfsp = vfs_getopsbyname(mountcompatnames[name[0]]);
1750 		if (vfsp == NULL || vfsp->vfs_sysctl == NULL)
1751 			return (EOPNOTSUPP);
1752 		return ((*vfsp->vfs_sysctl)(&name[1], namelen - 1,
1753 		    oldp, oldlenp, newp, newlen, p));
1754 	}
1755 
1756 	/* The rest are generic vfs sysctls. */
1757 	switch (name[1]) {
1758 	case VFS_USERMOUNT:
1759 		return sysctl_int(oldp, oldlenp, newp, newlen, &dovfsusermount);
1760 #if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44)
1761 	case VFS_MAXTYPENUM:
1762 		/*
1763 		 * Provided for 4.4BSD-Lite2 compatibility.
1764 		 */
1765 		return (sysctl_rdint(oldp, oldlenp, newp, nmountcompatnames));
1766 	case VFS_CONF:
1767 		/*
1768 		 * Special: a node, next is a file system name.
1769 		 * Provided for 4.4BSD-Lite2 compatibility.
1770 		 */
1771 		if (namelen < 3)
1772 			return (ENOTDIR);	/* overloaded */
1773 		if (name[2] >= nmountcompatnames || name[2] < 0 ||
1774 		    mountcompatnames[name[2]] == NULL)
1775 			return (EOPNOTSUPP);
1776 		vfsp = vfs_getopsbyname(mountcompatnames[name[2]]);
1777 		if (vfsp == NULL)
1778 			return (EOPNOTSUPP);
1779 		vfc.vfc_vfsops = vfsp;
1780 		strncpy(vfc.vfc_name, vfsp->vfs_name, MFSNAMELEN);
1781 		vfc.vfc_typenum = name[2];
1782 		vfc.vfc_refcount = vfsp->vfs_refcount;
1783 		vfc.vfc_flags = 0;
1784 		vfc.vfc_mountroot = vfsp->vfs_mountroot;
1785 		vfc.vfc_next = NULL;
1786 		return (sysctl_rdstruct(oldp, oldlenp, newp, &vfc,
1787 		    sizeof(struct vfsconf)));
1788 #endif
1789 	default:
1790 		break;
1791 	}
1792 	return (EOPNOTSUPP);
1793 }
1794 
1795 int kinfo_vdebug = 1;
1796 int kinfo_vgetfailed;
1797 #define KINFO_VNODESLOP	10
1798 /*
1799  * Dump vnode list (via sysctl).
1800  * Copyout address of vnode followed by vnode.
1801  */
1802 /* ARGSUSED */
1803 int
1804 sysctl_vnode(where, sizep, p)
1805 	char *where;
1806 	size_t *sizep;
1807 	struct proc *p;
1808 {
1809 	struct mount *mp, *nmp;
1810 	struct vnode *nvp, *vp;
1811 	char *bp = where, *savebp;
1812 	char *ewhere;
1813 	int error;
1814 
1815 #define VPTRSZ	sizeof(struct vnode *)
1816 #define VNODESZ	sizeof(struct vnode)
1817 	if (where == NULL) {
1818 		*sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
1819 		return (0);
1820 	}
1821 	ewhere = where + *sizep;
1822 
1823 	simple_lock(&mountlist_slock);
1824 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
1825 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
1826 			nmp = mp->mnt_list.cqe_next;
1827 			continue;
1828 		}
1829 		savebp = bp;
1830 again:
1831 		simple_lock(&mntvnode_slock);
1832 		for (vp = mp->mnt_vnodelist.lh_first;
1833 		     vp != NULL;
1834 		     vp = nvp) {
1835 			/*
1836 			 * Check that the vp is still associated with
1837 			 * this filesystem.  RACE: could have been
1838 			 * recycled onto the same filesystem.
1839 			 */
1840 			if (vp->v_mount != mp) {
1841 				simple_unlock(&mntvnode_slock);
1842 				if (kinfo_vdebug)
1843 					printf("kinfo: vp changed\n");
1844 				bp = savebp;
1845 				goto again;
1846 			}
1847 			nvp = vp->v_mntvnodes.le_next;
1848 			if (bp + VPTRSZ + VNODESZ > ewhere) {
1849 				simple_unlock(&mntvnode_slock);
1850 				*sizep = bp - where;
1851 				return (ENOMEM);
1852 			}
1853 			simple_unlock(&mntvnode_slock);
1854 			if ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
1855 			   (error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ)))
1856 				return (error);
1857 			bp += VPTRSZ + VNODESZ;
1858 			simple_lock(&mntvnode_slock);
1859 		}
1860 		simple_unlock(&mntvnode_slock);
1861 		simple_lock(&mountlist_slock);
1862 		nmp = mp->mnt_list.cqe_next;
1863 		vfs_unbusy(mp);
1864 	}
1865 	simple_unlock(&mountlist_slock);
1866 
1867 	*sizep = bp - where;
1868 	return (0);
1869 }
1870 
1871 /*
1872  * Check to see if a filesystem is mounted on a block device.
1873  */
1874 int
1875 vfs_mountedon(vp)
1876 	struct vnode *vp;
1877 {
1878 	struct vnode *vq;
1879 	int error = 0;
1880 
1881 	if (vp->v_specmountpoint != NULL)
1882 		return (EBUSY);
1883 	if (vp->v_flag & VALIASED) {
1884 		simple_lock(&spechash_slock);
1885 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1886 			if (vq->v_rdev != vp->v_rdev ||
1887 			    vq->v_type != vp->v_type)
1888 				continue;
1889 			if (vq->v_specmountpoint != NULL) {
1890 				error = EBUSY;
1891 				break;
1892 			}
1893 		}
1894 		simple_unlock(&spechash_slock);
1895 	}
1896 	return (error);
1897 }
1898 
1899 /*
1900  * Build hash lists of net addresses and hang them off the mount point.
1901  * Called by ufs_mount() to set up the lists of export addresses.
1902  */
1903 static int
1904 vfs_hang_addrlist(mp, nep, argp)
1905 	struct mount *mp;
1906 	struct netexport *nep;
1907 	struct export_args *argp;
1908 {
1909 	register struct netcred *np, *enp;
1910 	register struct radix_node_head *rnh;
1911 	register int i;
1912 	struct radix_node *rn;
1913 	struct sockaddr *saddr, *smask = 0;
1914 	struct domain *dom;
1915 	int error;
1916 
1917 	if (argp->ex_addrlen == 0) {
1918 		if (mp->mnt_flag & MNT_DEFEXPORTED)
1919 			return (EPERM);
1920 		np = &nep->ne_defexported;
1921 		np->netc_exflags = argp->ex_flags;
1922 		np->netc_anon = argp->ex_anon;
1923 		np->netc_anon.cr_ref = 1;
1924 		mp->mnt_flag |= MNT_DEFEXPORTED;
1925 		return (0);
1926 	}
1927 	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1928 	np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK);
1929 	memset((caddr_t)np, 0, i);
1930 	saddr = (struct sockaddr *)(np + 1);
1931 	error = copyin(argp->ex_addr, (caddr_t)saddr, argp->ex_addrlen);
1932 	if (error)
1933 		goto out;
1934 	if (saddr->sa_len > argp->ex_addrlen)
1935 		saddr->sa_len = argp->ex_addrlen;
1936 	if (argp->ex_masklen) {
1937 		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1938 		error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen);
1939 		if (error)
1940 			goto out;
1941 		if (smask->sa_len > argp->ex_masklen)
1942 			smask->sa_len = argp->ex_masklen;
1943 	}
1944 	i = saddr->sa_family;
1945 	if ((rnh = nep->ne_rtable[i]) == 0) {
1946 		/*
1947 		 * Seems silly to initialize every AF when most are not
1948 		 * used, do so on demand here
1949 		 */
1950 		for (dom = domains; dom; dom = dom->dom_next)
1951 			if (dom->dom_family == i && dom->dom_rtattach) {
1952 				dom->dom_rtattach((void **)&nep->ne_rtable[i],
1953 					dom->dom_rtoffset);
1954 				break;
1955 			}
1956 		if ((rnh = nep->ne_rtable[i]) == 0) {
1957 			error = ENOBUFS;
1958 			goto out;
1959 		}
1960 	}
1961 	rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh,
1962 		np->netc_rnodes);
1963 	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1964 		if (rn == 0) {
1965 			enp = (struct netcred *)(*rnh->rnh_lookup)(saddr,
1966 				smask, rnh);
1967 			if (enp == 0) {
1968 				error = EPERM;
1969 				goto out;
1970 			}
1971 		} else
1972 			enp = (struct netcred *)rn;
1973 
1974 		if (enp->netc_exflags != argp->ex_flags ||
1975 		    enp->netc_anon.cr_uid != argp->ex_anon.cr_uid ||
1976 		    enp->netc_anon.cr_gid != argp->ex_anon.cr_gid ||
1977 		    enp->netc_anon.cr_ngroups != argp->ex_anon.cr_ngroups ||
1978 		    memcmp(&enp->netc_anon.cr_groups, &argp->ex_anon.cr_groups,
1979 			enp->netc_anon.cr_ngroups))
1980 				error = EPERM;
1981 		else
1982 			error = 0;
1983 		goto out;
1984 	}
1985 	np->netc_exflags = argp->ex_flags;
1986 	np->netc_anon = argp->ex_anon;
1987 	np->netc_anon.cr_ref = 1;
1988 	return (0);
1989 out:
1990 	free(np, M_NETADDR);
1991 	return (error);
1992 }
1993 
1994 /* ARGSUSED */
1995 static int
1996 vfs_free_netcred(rn, w)
1997 	struct radix_node *rn;
1998 	void *w;
1999 {
2000 	register struct radix_node_head *rnh = (struct radix_node_head *)w;
2001 
2002 	(*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh);
2003 	free((caddr_t)rn, M_NETADDR);
2004 	return (0);
2005 }
2006 
2007 /*
2008  * Free the net address hash lists that are hanging off the mount points.
2009  */
2010 static void
2011 vfs_free_addrlist(nep)
2012 	struct netexport *nep;
2013 {
2014 	register int i;
2015 	register struct radix_node_head *rnh;
2016 
2017 	for (i = 0; i <= AF_MAX; i++)
2018 		if ((rnh = nep->ne_rtable[i]) != NULL) {
2019 			(*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh);
2020 			free((caddr_t)rnh, M_RTABLE);
2021 			nep->ne_rtable[i] = 0;
2022 		}
2023 }
2024 
2025 int
2026 vfs_export(mp, nep, argp)
2027 	struct mount *mp;
2028 	struct netexport *nep;
2029 	struct export_args *argp;
2030 {
2031 	int error;
2032 
2033 	if (argp->ex_flags & MNT_DELEXPORT) {
2034 		if (mp->mnt_flag & MNT_EXPUBLIC) {
2035 			vfs_setpublicfs(NULL, NULL, NULL);
2036 			mp->mnt_flag &= ~MNT_EXPUBLIC;
2037 		}
2038 		vfs_free_addrlist(nep);
2039 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
2040 	}
2041 	if (argp->ex_flags & MNT_EXPORTED) {
2042 		if (argp->ex_flags & MNT_EXPUBLIC) {
2043 			if ((error = vfs_setpublicfs(mp, nep, argp)) != 0)
2044 				return (error);
2045 			mp->mnt_flag |= MNT_EXPUBLIC;
2046 		}
2047 		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
2048 			return (error);
2049 		mp->mnt_flag |= MNT_EXPORTED;
2050 	}
2051 	return (0);
2052 }
2053 
2054 /*
2055  * Set the publicly exported filesystem (WebNFS). Currently, only
2056  * one public filesystem is possible in the spec (RFC 2054 and 2055)
2057  */
2058 int
2059 vfs_setpublicfs(mp, nep, argp)
2060 	struct mount *mp;
2061 	struct netexport *nep;
2062 	struct export_args *argp;
2063 {
2064 	int error;
2065 	struct vnode *rvp;
2066 	char *cp;
2067 
2068 	/*
2069 	 * mp == NULL -> invalidate the current info, the FS is
2070 	 * no longer exported. May be called from either vfs_export
2071 	 * or unmount, so check if it hasn't already been done.
2072 	 */
2073 	if (mp == NULL) {
2074 		if (nfs_pub.np_valid) {
2075 			nfs_pub.np_valid = 0;
2076 			if (nfs_pub.np_index != NULL) {
2077 				FREE(nfs_pub.np_index, M_TEMP);
2078 				nfs_pub.np_index = NULL;
2079 			}
2080 		}
2081 		return (0);
2082 	}
2083 
2084 	/*
2085 	 * Only one allowed at a time.
2086 	 */
2087 	if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount)
2088 		return (EBUSY);
2089 
2090 	/*
2091 	 * Get real filehandle for root of exported FS.
2092 	 */
2093 	memset((caddr_t)&nfs_pub.np_handle, 0, sizeof(nfs_pub.np_handle));
2094 	nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid;
2095 
2096 	if ((error = VFS_ROOT(mp, &rvp)))
2097 		return (error);
2098 
2099 	if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid)))
2100 		return (error);
2101 
2102 	vput(rvp);
2103 
2104 	/*
2105 	 * If an indexfile was specified, pull it in.
2106 	 */
2107 	if (argp->ex_indexfile != NULL) {
2108 		MALLOC(nfs_pub.np_index, char *, MAXNAMLEN + 1, M_TEMP,
2109 		    M_WAITOK);
2110 		error = copyinstr(argp->ex_indexfile, nfs_pub.np_index,
2111 		    MAXNAMLEN, (size_t *)0);
2112 		if (!error) {
2113 			/*
2114 			 * Check for illegal filenames.
2115 			 */
2116 			for (cp = nfs_pub.np_index; *cp; cp++) {
2117 				if (*cp == '/') {
2118 					error = EINVAL;
2119 					break;
2120 				}
2121 			}
2122 		}
2123 		if (error) {
2124 			FREE(nfs_pub.np_index, M_TEMP);
2125 			return (error);
2126 		}
2127 	}
2128 
2129 	nfs_pub.np_mount = mp;
2130 	nfs_pub.np_valid = 1;
2131 	return (0);
2132 }
2133 
2134 struct netcred *
2135 vfs_export_lookup(mp, nep, nam)
2136 	register struct mount *mp;
2137 	struct netexport *nep;
2138 	struct mbuf *nam;
2139 {
2140 	register struct netcred *np;
2141 	register struct radix_node_head *rnh;
2142 	struct sockaddr *saddr;
2143 
2144 	np = NULL;
2145 	if (mp->mnt_flag & MNT_EXPORTED) {
2146 		/*
2147 		 * Lookup in the export list first.
2148 		 */
2149 		if (nam != NULL) {
2150 			saddr = mtod(nam, struct sockaddr *);
2151 			rnh = nep->ne_rtable[saddr->sa_family];
2152 			if (rnh != NULL) {
2153 				np = (struct netcred *)
2154 					(*rnh->rnh_matchaddr)((caddr_t)saddr,
2155 							      rnh);
2156 				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
2157 					np = NULL;
2158 			}
2159 		}
2160 		/*
2161 		 * If no address match, use the default if it exists.
2162 		 */
2163 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
2164 			np = &nep->ne_defexported;
2165 	}
2166 	return (np);
2167 }
2168 
2169 /*
2170  * Do the usual access checking.
2171  * file_mode, uid and gid are from the vnode in question,
2172  * while acc_mode and cred are from the VOP_ACCESS parameter list
2173  */
2174 int
2175 vaccess(type, file_mode, uid, gid, acc_mode, cred)
2176 	enum vtype type;
2177 	mode_t file_mode;
2178 	uid_t uid;
2179 	gid_t gid;
2180 	mode_t acc_mode;
2181 	struct ucred *cred;
2182 {
2183 	mode_t mask;
2184 
2185 	/*
2186 	 * Super-user always gets read/write access, but execute access depends
2187 	 * on at least one execute bit being set.
2188 	 */
2189 	if (cred->cr_uid == 0) {
2190 		if ((acc_mode & VEXEC) && type != VDIR &&
2191 		    (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
2192 			return (EACCES);
2193 		return (0);
2194 	}
2195 
2196 	mask = 0;
2197 
2198 	/* Otherwise, check the owner. */
2199 	if (cred->cr_uid == uid) {
2200 		if (acc_mode & VEXEC)
2201 			mask |= S_IXUSR;
2202 		if (acc_mode & VREAD)
2203 			mask |= S_IRUSR;
2204 		if (acc_mode & VWRITE)
2205 			mask |= S_IWUSR;
2206 		return ((file_mode & mask) == mask ? 0 : EACCES);
2207 	}
2208 
2209 	/* Otherwise, check the groups. */
2210 	if (cred->cr_gid == gid || groupmember(gid, cred)) {
2211 		if (acc_mode & VEXEC)
2212 			mask |= S_IXGRP;
2213 		if (acc_mode & VREAD)
2214 			mask |= S_IRGRP;
2215 		if (acc_mode & VWRITE)
2216 			mask |= S_IWGRP;
2217 		return ((file_mode & mask) == mask ? 0 : EACCES);
2218 	}
2219 
2220 	/* Otherwise, check everyone else. */
2221 	if (acc_mode & VEXEC)
2222 		mask |= S_IXOTH;
2223 	if (acc_mode & VREAD)
2224 		mask |= S_IROTH;
2225 	if (acc_mode & VWRITE)
2226 		mask |= S_IWOTH;
2227 	return ((file_mode & mask) == mask ? 0 : EACCES);
2228 }
2229 
2230 /*
2231  * Unmount all file systems.
2232  * We traverse the list in reverse order under the assumption that doing so
2233  * will avoid needing to worry about dependencies.
2234  */
2235 void
2236 vfs_unmountall()
2237 {
2238 	register struct mount *mp, *nmp;
2239 	int allerror, error;
2240 	struct proc *p = curproc;	/* XXX */
2241 
2242 	/*
2243 	 * Unmounting a file system blocks the requesting process.
2244 	 * However, it's possible for this routine to be called when
2245 	 * curproc is NULL (e.g. panic situation, or via the debugger).
2246 	 * If we get stuck in this situation, just abort, since any
2247 	 * attempts to sleep will fault.
2248 	 */
2249 	if (p == NULL) {
2250 		printf("vfs_unmountall: no context, aborting\n");
2251 		return;
2252 	}
2253 
2254 	for (allerror = 0,
2255 	     mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
2256 		nmp = mp->mnt_list.cqe_prev;
2257 #ifdef DEBUG
2258 		printf("unmounting %s (%s)...\n",
2259 		    mp->mnt_stat.f_mntonname, mp->mnt_stat.f_mntfromname);
2260 #endif
2261 		if (vfs_busy(mp, 0, 0))
2262 			continue;
2263 		if ((error = dounmount(mp, MNT_FORCE, p)) != 0) {
2264 			printf("unmount of %s failed with error %d\n",
2265 			    mp->mnt_stat.f_mntonname, error);
2266 			allerror = 1;
2267 		}
2268 	}
2269 	if (allerror)
2270 		printf("WARNING: some file systems would not unmount\n");
2271 }
2272 
2273 /*
2274  * Sync and unmount file systems before shutting down.
2275  */
2276 void
2277 vfs_shutdown()
2278 {
2279 	register struct buf *bp;
2280 	int iter, nbusy, dcount, s;
2281 
2282 	printf("syncing disks... ");
2283 
2284 	/* XXX Should suspend scheduling. */
2285 	(void) spl0();
2286 
2287 	sys_sync(&proc0, (void *)0, (register_t *)0);
2288 
2289 	/* Wait for sync to finish. */
2290 	dcount = 10000;
2291 	for (iter = 0; iter < 20; iter++) {
2292 		nbusy = 0;
2293 		for (bp = &buf[nbuf]; --bp >= buf; ) {
2294 			if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
2295 				nbusy++;
2296 			/*
2297 			 * With soft updates, some buffers that are
2298 			 * written will be remarked as dirty until other
2299 			 * buffers are written.
2300 			 */
2301 			if (bp->b_vp && bp->b_vp->v_mount
2302 			    && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP)
2303 			    && (bp->b_flags & B_DELWRI)) {
2304 				s = splbio();
2305 				bremfree(bp);
2306 				bp->b_flags |= B_BUSY;
2307 				splx(s);
2308 				nbusy++;
2309 				bawrite(bp);
2310 				if (dcount-- <= 0) {
2311 					printf("softdep ");
2312 					goto fail;
2313 				}
2314 			}
2315 		}
2316 		if (nbusy == 0)
2317 			break;
2318 		printf("%d ", nbusy);
2319 		DELAY(40000 * iter);
2320 	}
2321 	if (nbusy) {
2322 fail:
2323 #ifdef DEBUG
2324 		printf("giving up\nPrinting vnodes for busy buffers\n");
2325 		for (bp = &buf[nbuf]; --bp >= buf; )
2326 			if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
2327 				vprint(NULL, bp->b_vp);
2328 #else
2329 		printf("giving up\n");
2330 #endif
2331 		return;
2332 	} else
2333 		printf("done\n");
2334 
2335 	/*
2336 	 * If we've panic'd, don't make the situation potentially
2337 	 * worse by unmounting the file systems.
2338 	 */
2339 	if (panicstr != NULL)
2340 		return;
2341 
2342 	/* Release inodes held by texts before update. */
2343 #ifdef notdef
2344 	vnshutdown();
2345 #endif
2346 	/* Unmount file systems. */
2347 	vfs_unmountall();
2348 }
2349 
2350 /*
2351  * Mount the root file system.  If the operator didn't specify a
2352  * file system to use, try all possible file systems until one
2353  * succeeds.
2354  */
2355 int
2356 vfs_mountroot()
2357 {
2358 	extern int (*mountroot) __P((void));
2359 	struct vfsops *v;
2360 
2361 	if (root_device == NULL)
2362 		panic("vfs_mountroot: root device unknown");
2363 
2364 	switch (root_device->dv_class) {
2365 	case DV_IFNET:
2366 		if (rootdev != NODEV)
2367 			panic("vfs_mountroot: rootdev set for DV_IFNET");
2368 		break;
2369 
2370 	case DV_DISK:
2371 		if (rootdev == NODEV)
2372 			panic("vfs_mountroot: rootdev not set for DV_DISK");
2373 		break;
2374 
2375 	default:
2376 		printf("%s: inappropriate for root file system\n",
2377 		    root_device->dv_xname);
2378 		return (ENODEV);
2379 	}
2380 
2381 	/*
2382 	 * If user specified a file system, use it.
2383 	 */
2384 	if (mountroot != NULL)
2385 		return ((*mountroot)());
2386 
2387 	/*
2388 	 * Try each file system currently configured into the kernel.
2389 	 */
2390 	for (v = LIST_FIRST(&vfs_list); v != NULL; v = LIST_NEXT(v, vfs_list)) {
2391 		if (v->vfs_mountroot == NULL)
2392 			continue;
2393 #ifdef DEBUG
2394 		printf("mountroot: trying %s...\n", v->vfs_name);
2395 #endif
2396 		if ((*v->vfs_mountroot)() == 0) {
2397 			printf("root file system type: %s\n", v->vfs_name);
2398 			break;
2399 		}
2400 	}
2401 
2402 	if (v == NULL) {
2403 		printf("no file system for %s", root_device->dv_xname);
2404 		if (root_device->dv_class == DV_DISK)
2405 			printf(" (dev 0x%x)", rootdev);
2406 		printf("\n");
2407 		return (EFTYPE);
2408 	}
2409 	return (0);
2410 }
2411 
2412 /*
2413  * Given a file system name, look up the vfsops for that
2414  * file system, or return NULL if file system isn't present
2415  * in the kernel.
2416  */
2417 struct vfsops *
2418 vfs_getopsbyname(name)
2419 	const char *name;
2420 {
2421 	struct vfsops *v;
2422 
2423 	for (v = LIST_FIRST(&vfs_list); v != NULL; v = LIST_NEXT(v, vfs_list)) {
2424 		if (strcmp(v->vfs_name, name) == 0)
2425 			break;
2426 	}
2427 
2428 	return (v);
2429 }
2430 
2431 /*
2432  * Establish a file system and initialize it.
2433  */
2434 int
2435 vfs_attach(vfs)
2436 	struct vfsops *vfs;
2437 {
2438 	struct vfsops *v;
2439 	int error = 0;
2440 
2441 
2442 	/*
2443 	 * Make sure this file system doesn't already exist.
2444 	 */
2445 	for (v = LIST_FIRST(&vfs_list); v != NULL; v = LIST_NEXT(v, vfs_list)) {
2446 		if (strcmp(vfs->vfs_name, v->vfs_name) == 0) {
2447 			error = EEXIST;
2448 			goto out;
2449 		}
2450 	}
2451 
2452 	/*
2453 	 * Initialize the vnode operations for this file system.
2454 	 */
2455 	vfs_opv_init(vfs->vfs_opv_descs);
2456 
2457 	/*
2458 	 * Now initialize the file system itself.
2459 	 */
2460 	(*vfs->vfs_init)();
2461 
2462 	/*
2463 	 * ...and link it into the kernel's list.
2464 	 */
2465 	LIST_INSERT_HEAD(&vfs_list, vfs, vfs_list);
2466 
2467 	/*
2468 	 * Sanity: make sure the reference count is 0.
2469 	 */
2470 	vfs->vfs_refcount = 0;
2471 
2472  out:
2473 	return (error);
2474 }
2475 
2476 /*
2477  * Remove a file system from the kernel.
2478  */
2479 int
2480 vfs_detach(vfs)
2481 	struct vfsops *vfs;
2482 {
2483 	struct vfsops *v;
2484 
2485 	/*
2486 	 * Make sure no one is using the filesystem.
2487 	 */
2488 	if (vfs->vfs_refcount != 0)
2489 		return (EBUSY);
2490 
2491 	/*
2492 	 * ...and remove it from the kernel's list.
2493 	 */
2494 	for (v = LIST_FIRST(&vfs_list); v != NULL; v = LIST_NEXT(v, vfs_list)) {
2495 		if (v == vfs) {
2496 			LIST_REMOVE(v, vfs_list);
2497 			break;
2498 		}
2499 	}
2500 
2501 	if (v == NULL)
2502 		return (ESRCH);
2503 
2504 	/*
2505 	 * Free the vnode operations vector.
2506 	 */
2507 	vfs_opv_free(vfs->vfs_opv_descs);
2508 	return (0);
2509 }
2510