xref: /openbsd-src/sys/kern/vfs_subr.c (revision fca963907f5e56cf1fbe4c02a91a5133e2fa45be)
1 /*	$OpenBSD: vfs_subr.c,v 1.159 2007/11/15 16:50:28 deraadt Exp $	*/
2 /*	$NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
38  */
39 
40 /*
41  * External virtual filesystem routines
42  */
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/mount.h>
48 #include <sys/time.h>
49 #include <sys/fcntl.h>
50 #include <sys/kernel.h>
51 #include <sys/vnode.h>
52 #include <sys/stat.h>
53 #include <sys/namei.h>
54 #include <sys/ucred.h>
55 #include <sys/buf.h>
56 #include <sys/errno.h>
57 #include <sys/malloc.h>
58 #include <sys/domain.h>
59 #include <sys/mbuf.h>
60 #include <sys/syscallargs.h>
61 #include <sys/pool.h>
62 
63 #include <uvm/uvm_extern.h>
64 #include <sys/sysctl.h>
65 
66 #include <miscfs/specfs/specdev.h>
67 
68 enum vtype iftovt_tab[16] = {
69 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
70 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
71 };
72 
73 int	vttoif_tab[9] = {
74 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
75 	S_IFSOCK, S_IFIFO, S_IFMT,
76 };
77 
78 int doforce = 1;		/* 1 => permit forcible unmounting */
79 int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
80 int suid_clear = 1;		/* 1 => clear SUID / SGID on owner change */
81 
82 /*
83  * Insq/Remq for the vnode usage lists.
84  */
85 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
86 #define	bufremvn(bp) {							\
87 	LIST_REMOVE(bp, b_vnbufs);					\
88 	LIST_NEXT(bp, b_vnbufs) = NOLIST;				\
89 }
90 
91 struct freelst vnode_hold_list;	/* list of vnodes referencing buffers */
92 struct freelst vnode_free_list;	/* vnode free list */
93 
94 struct mntlist mountlist;	/* mounted filesystem list */
95 
96 void	vclean(struct vnode *, int, struct proc *);
97 
98 void insmntque(struct vnode *, struct mount *);
99 int getdevvp(dev_t, struct vnode **, enum vtype);
100 
101 int vfs_hang_addrlist(struct mount *, struct netexport *,
102 				  struct export_args *);
103 int vfs_free_netcred(struct radix_node *, void *);
104 void vfs_free_addrlist(struct netexport *);
105 void vputonfreelist(struct vnode *);
106 
107 int vflush_vnode(struct vnode *, void *);
108 int maxvnodes;
109 
110 #ifdef DEBUG
111 void printlockedvnodes(void);
112 #endif
113 
114 struct pool vnode_pool;
115 
116 /*
117  * Initialize the vnode management data structures.
118  */
119 void
120 vntblinit(void)
121 {
122 	/* buffer cache may need a vnode for each buffer */
123 	maxvnodes = desiredvnodes;
124 	pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodes",
125 	    &pool_allocator_nointr);
126 	TAILQ_INIT(&vnode_hold_list);
127 	TAILQ_INIT(&vnode_free_list);
128 	CIRCLEQ_INIT(&mountlist);
129 	/*
130 	 * Initialize the filesystem syncer.
131 	 */
132 	vn_initialize_syncerd();
133 }
134 
135 /*
136  * Mark a mount point as busy. Used to synchronize access and to delay
137  * unmounting.
138  *
139  * Default behaviour is to attempt getting a READ lock and in case of an
140  * ongoing unmount, to wait for it to finish and then return failure.
141  */
142 int
143 vfs_busy(struct mount *mp, int flags)
144 {
145 	int rwflags = 0;
146 
147 	/* new mountpoints need their lock initialised */
148 	if (mp->mnt_lock.rwl_name == NULL)
149 		rw_init(&mp->mnt_lock, "vfslock");
150 
151 	if (flags & VB_WRITE)
152 		rwflags |= RW_WRITE;
153 	else
154 		rwflags |= RW_READ;
155 
156 	if (flags & VB_WAIT)
157 		rwflags |= RW_SLEEPFAIL;
158 	else
159 		rwflags |= RW_NOSLEEP;
160 
161 	if (rw_enter(&mp->mnt_lock, rwflags))
162 		return (EBUSY);
163 
164 	return (0);
165 }
166 
167 /*
168  * Free a busy file system
169  */
170 void
171 vfs_unbusy(struct mount *mp)
172 {
173 	rw_exit(&mp->mnt_lock);
174 }
175 
176 int
177 vfs_isbusy(struct mount *mp)
178 {
179 	if (RWLOCK_OWNER(&mp->mnt_lock) > 0)
180 		return (1);
181 	else
182 		return (0);
183 }
184 
185 /*
186  * Lookup a filesystem type, and if found allocate and initialize
187  * a mount structure for it.
188  *
189  * Devname is usually updated by mount(8) after booting.
190  */
191 int
192 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
193 {
194 	struct vfsconf *vfsp;
195 	struct mount *mp;
196 
197 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
198 		if (!strcmp(vfsp->vfc_name, fstypename))
199 			break;
200 	if (vfsp == NULL)
201 		return (ENODEV);
202 	mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK|M_ZERO);
203 	(void)vfs_busy(mp, VB_READ|VB_NOWAIT);
204 	LIST_INIT(&mp->mnt_vnodelist);
205 	mp->mnt_vfc = vfsp;
206 	mp->mnt_op = vfsp->vfc_vfsops;
207 	mp->mnt_flag = MNT_RDONLY;
208 	mp->mnt_vnodecovered = NULLVP;
209 	vfsp->vfc_refcount++;
210 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
211 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
212 	mp->mnt_stat.f_mntonname[0] = '/';
213 	(void)copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
214 	*mpp = mp;
215  	return (0);
216  }
217 
218 /*
219  * Find an appropriate filesystem to use for the root. If a filesystem
220  * has not been preselected, walk through the list of known filesystems
221  * trying those that have mountroot routines, and try them until one
222  * works or we have tried them all.
223  */
224 int
225 vfs_mountroot(void)
226 {
227 	struct vfsconf *vfsp;
228 	int error;
229 
230 	if (mountroot != NULL)
231 		return ((*mountroot)());
232 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
233 		if (vfsp->vfc_mountroot == NULL)
234 			continue;
235 		if ((error = (*vfsp->vfc_mountroot)()) == 0)
236 			return (0);
237 		printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
238  	}
239 	return (ENODEV);
240 }
241 
242 /*
243  * Lookup a mount point by filesystem identifier.
244  */
245 struct mount *
246 vfs_getvfs(fsid_t *fsid)
247 {
248 	struct mount *mp;
249 
250 	CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) {
251 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
252 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
253 			return (mp);
254 		}
255 	}
256 
257 	return (NULL);
258 }
259 
260 
261 /*
262  * Get a new unique fsid
263  */
264 void
265 vfs_getnewfsid(struct mount *mp)
266 {
267 	static u_short xxxfs_mntid;
268 
269 	fsid_t tfsid;
270 	int mtype;
271 
272 	mtype = mp->mnt_vfc->vfc_typenum;
273 	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
274 	mp->mnt_stat.f_fsid.val[1] = mtype;
275 	if (xxxfs_mntid == 0)
276 		++xxxfs_mntid;
277 	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
278 	tfsid.val[1] = mtype;
279 	if (!CIRCLEQ_EMPTY(&mountlist)) {
280 		while (vfs_getvfs(&tfsid)) {
281 			tfsid.val[0]++;
282 			xxxfs_mntid++;
283 		}
284 	}
285 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
286 }
287 
288 /*
289  * Make a 'unique' number from a mount type name.
290  * Note that this is no longer used for ffs which
291  * now has an on-disk filesystem id.
292  */
293 long
294 makefstype(char *type)
295 {
296 	long rv;
297 
298 	for (rv = 0; *type; type++) {
299 		rv <<= 2;
300 		rv ^= *type;
301 	}
302 	return rv;
303 }
304 
305 /*
306  * Set vnode attributes to VNOVAL
307  */
308 void
309 vattr_null(struct vattr *vap)
310 {
311 
312 	vap->va_type = VNON;
313 	/* XXX These next two used to be one line, but for a GCC bug. */
314 	vap->va_size = VNOVAL;
315 	vap->va_bytes = VNOVAL;
316 	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
317 		vap->va_fsid = vap->va_fileid =
318 		vap->va_blocksize = vap->va_rdev =
319 		vap->va_atime.tv_sec = vap->va_atime.tv_nsec =
320 		vap->va_mtime.tv_sec = vap->va_mtime.tv_nsec =
321 		vap->va_ctime.tv_sec = vap->va_ctime.tv_nsec =
322 		vap->va_flags = vap->va_gen = VNOVAL;
323 	vap->va_vaflags = 0;
324 }
325 
326 /*
327  * Routines having to do with the management of the vnode table.
328  */
329 extern int (**dead_vnodeop_p)(void *);
330 long numvnodes;
331 
332 /*
333  * Return the next vnode from the free list.
334  */
335 int
336 getnewvnode(enum vtagtype tag, struct mount *mp, int (**vops)(void *),
337     struct vnode **vpp)
338 {
339 	struct proc *p = curproc;
340 	struct freelst *listhd;
341 	static int toggle;
342 	struct vnode *vp;
343 	int s;
344 
345 	/*
346 	 * We must choose whether to allocate a new vnode or recycle an
347 	 * existing one. The criterion for allocating a new one is that
348 	 * the total number of vnodes is less than the number desired or
349 	 * there are no vnodes on either free list. Generally we only
350 	 * want to recycle vnodes that have no buffers associated with
351 	 * them, so we look first on the vnode_free_list. If it is empty,
352 	 * we next consider vnodes with referencing buffers on the
353 	 * vnode_hold_list. The toggle ensures that half the time we
354 	 * will use a buffer from the vnode_hold_list, and half the time
355 	 * we will allocate a new one unless the list has grown to twice
356 	 * the desired size. We are reticent to recycle vnodes from the
357 	 * vnode_hold_list because we will lose the identity of all its
358 	 * referencing buffers.
359 	 */
360 	toggle ^= 1;
361 	if (numvnodes > 2 * maxvnodes)
362 		toggle = 0;
363 
364 	s = splbio();
365 	if ((numvnodes < maxvnodes) ||
366 	    ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
367 	    ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
368 		splx(s);
369 		vp = pool_get(&vnode_pool, PR_WAITOK);
370 		bzero((char *)vp, sizeof *vp);
371 		numvnodes++;
372 	} else {
373 		for (vp = TAILQ_FIRST(listhd); vp != NULLVP;
374 		    vp = TAILQ_NEXT(vp, v_freelist)) {
375 			if (VOP_ISLOCKED(vp) == 0)
376 				break;
377 		}
378 		/*
379 		 * Unless this is a bad time of the month, at most
380 		 * the first NCPUS items on the free list are
381 		 * locked, so this is close enough to being empty.
382 		 */
383 		if (vp == NULL) {
384 			splx(s);
385 			tablefull("vnode");
386 			*vpp = 0;
387 			return (ENFILE);
388 		}
389 
390 #ifdef DIAGNOSTIC
391 		if (vp->v_usecount) {
392 			vprint("free vnode", vp);
393 			panic("free vnode isn't");
394 		}
395 #endif
396 
397 		TAILQ_REMOVE(listhd, vp, v_freelist);
398 		vp->v_bioflag &= ~VBIOONFREELIST;
399 		splx(s);
400 
401 		if (vp->v_type != VBAD)
402 			vgonel(vp, p);
403 #ifdef DIAGNOSTIC
404 		if (vp->v_data) {
405 			vprint("cleaned vnode", vp);
406 			panic("cleaned vnode isn't");
407 		}
408 		s = splbio();
409 		if (vp->v_numoutput)
410 			panic("Clean vnode has pending I/O's");
411 		splx(s);
412 #endif
413 		vp->v_flag = 0;
414 		vp->v_socket = 0;
415 	}
416 	vp->v_type = VNON;
417 	cache_purge(vp);
418 	vp->v_tag = tag;
419 	vp->v_op = vops;
420 	insmntque(vp, mp);
421 	*vpp = vp;
422 	vp->v_usecount = 1;
423 	vp->v_data = 0;
424 	simple_lock_init(&vp->v_uvm.u_obj.vmobjlock);
425 	return (0);
426 }
427 
428 /*
429  * Move a vnode from one mount queue to another.
430  */
431 void
432 insmntque(struct vnode *vp, struct mount *mp)
433 {
434 	/*
435 	 * Delete from old mount point vnode list, if on one.
436 	 */
437 	if (vp->v_mount != NULL)
438 		LIST_REMOVE(vp, v_mntvnodes);
439 	/*
440 	 * Insert into list of vnodes for the new mount point, if available.
441 	 */
442 	if ((vp->v_mount = mp) != NULL)
443 		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
444 }
445 
446 /*
447  * Create a vnode for a block device.
448  * Used for root filesystem, argdev, and swap areas.
449  * Also used for memory file system special devices.
450  */
451 int
452 bdevvp(dev_t dev, struct vnode **vpp)
453 {
454 	return (getdevvp(dev, vpp, VBLK));
455 }
456 
457 /*
458  * Create a vnode for a character device.
459  * Used for console handling.
460  */
461 int
462 cdevvp(dev_t dev, struct vnode **vpp)
463 {
464 	return (getdevvp(dev, vpp, VCHR));
465 }
466 
467 /*
468  * Create a vnode for a device.
469  * Used by bdevvp (block device) for root file system etc.,
470  * and by cdevvp (character device) for console.
471  */
472 int
473 getdevvp(dev_t dev, struct vnode **vpp, enum vtype type)
474 {
475 	struct vnode *vp;
476 	struct vnode *nvp;
477 	int error;
478 
479 	if (dev == NODEV) {
480 		*vpp = NULLVP;
481 		return (0);
482 	}
483 	error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp);
484 	if (error) {
485 		*vpp = NULLVP;
486 		return (error);
487 	}
488 	vp = nvp;
489 	vp->v_type = type;
490 	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
491 		vput(vp);
492 		vp = nvp;
493 	}
494 	*vpp = vp;
495 	return (0);
496 }
497 
498 /*
499  * Check to see if the new vnode represents a special device
500  * for which we already have a vnode (either because of
501  * bdevvp() or because of a different vnode representing
502  * the same block device). If such an alias exists, deallocate
503  * the existing contents and return the aliased vnode. The
504  * caller is responsible for filling it with its new contents.
505  */
506 struct vnode *
507 checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp)
508 {
509 	struct proc *p = curproc;
510 	struct vnode *vp;
511 	struct vnode **vpp;
512 
513 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
514 		return (NULLVP);
515 
516 	vpp = &speclisth[SPECHASH(nvp_rdev)];
517 loop:
518 	for (vp = *vpp; vp; vp = vp->v_specnext) {
519 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) {
520 			continue;
521 		}
522 		/*
523 		 * Alias, but not in use, so flush it out.
524 		 */
525 		if (vp->v_usecount == 0) {
526 			vgonel(vp, p);
527 			goto loop;
528 		}
529 		if (vget(vp, LK_EXCLUSIVE, p)) {
530 			goto loop;
531 		}
532 		break;
533 	}
534 
535 	/*
536 	 * Common case is actually in the if statement
537 	 */
538 	if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
539 		nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE,
540 			M_WAITOK);
541 		nvp->v_rdev = nvp_rdev;
542 		nvp->v_hashchain = vpp;
543 		nvp->v_specnext = *vpp;
544 		nvp->v_specmountpoint = NULL;
545 		nvp->v_speclockf = NULL;
546 		bzero(nvp->v_specbitmap, sizeof(nvp->v_specbitmap));
547 		*vpp = nvp;
548 		if (vp != NULLVP) {
549 			nvp->v_flag |= VALIASED;
550 			vp->v_flag |= VALIASED;
551 			vput(vp);
552 		}
553 		return (NULLVP);
554 	}
555 
556 	/*
557 	 * This code is the uncommon case. It is called in case
558 	 * we found an alias that was VT_NON && vtype of VBLK
559 	 * This means we found a block device that was created
560 	 * using bdevvp.
561 	 * An example of such a vnode is the root partition device vnode
562 	 * created in ffs_mountroot.
563 	 *
564 	 * The vnodes created by bdevvp should not be aliased (why?).
565 	 */
566 
567 	VOP_UNLOCK(vp, 0, p);
568 	vclean(vp, 0, p);
569 	vp->v_op = nvp->v_op;
570 	vp->v_tag = nvp->v_tag;
571 	nvp->v_type = VNON;
572 	insmntque(vp, mp);
573 	return (vp);
574 }
575 
576 /*
577  * Grab a particular vnode from the free list, increment its
578  * reference count and lock it. If the vnode lock bit is set,
579  * the vnode is being eliminated in vgone. In that case, we
580  * cannot grab it, so the process is awakened when the
581  * transition is completed, and an error code is returned to
582  * indicate that the vnode is no longer usable, possibly
583  * having been changed to a new file system type.
584  */
585 int
586 vget(struct vnode *vp, int flags, struct proc *p)
587 {
588 	int error, s, onfreelist;
589 
590 	/*
591 	 * If the vnode is in the process of being cleaned out for
592 	 * another use, we wait for the cleaning to finish and then
593 	 * return failure. Cleaning is determined by checking that
594 	 * the VXLOCK flag is set.
595 	 */
596 
597 	if (vp->v_flag & VXLOCK) {
598 		if (flags & LK_NOWAIT) {
599 			return (EBUSY);
600 		}
601 
602 		vp->v_flag |= VXWANT;
603 		ltsleep(vp, PINOD | PNORELOCK, "vget", 0, NULL);
604 		return (ENOENT);
605 	}
606 
607 	onfreelist = vp->v_bioflag & VBIOONFREELIST;
608 	if (vp->v_usecount == 0 && onfreelist) {
609 		s = splbio();
610 		if (vp->v_holdcnt > 0)
611 			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
612 		else
613 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
614 		vp->v_bioflag &= ~VBIOONFREELIST;
615 		splx(s);
616 	}
617 
618  	vp->v_usecount++;
619 	if (flags & LK_TYPE_MASK) {
620 		if ((error = vn_lock(vp, flags, p)) != 0) {
621 			vp->v_usecount--;
622 			if (vp->v_usecount == 0 && onfreelist)
623 				vputonfreelist(vp);
624 		}
625 		return (error);
626 	}
627 
628 	return (0);
629 }
630 
631 
632 /* Vnode reference. */
633 void
634 vref(struct vnode *vp)
635 {
636 #ifdef DIAGNOSTIC
637 	if (vp->v_usecount == 0)
638 		panic("vref used where vget required");
639 #endif
640 	vp->v_usecount++;
641 }
642 
643 void
644 vputonfreelist(struct vnode *vp)
645 {
646 	int s;
647 	struct freelst *lst;
648 
649 	s = splbio();
650 #ifdef DIAGNOSTIC
651 	if (vp->v_usecount != 0)
652 		panic("Use count is not zero!");
653 
654 	if (vp->v_bioflag & VBIOONFREELIST) {
655 		vprint("vnode already on free list: ", vp);
656 		panic("vnode already on free list");
657 	}
658 #endif
659 
660 	vp->v_bioflag |= VBIOONFREELIST;
661 
662 	if (vp->v_holdcnt > 0)
663 		lst = &vnode_hold_list;
664 	else
665 		lst = &vnode_free_list;
666 
667 	if (vp->v_type == VBAD)
668 		TAILQ_INSERT_HEAD(lst, vp, v_freelist);
669 	else
670 		TAILQ_INSERT_TAIL(lst, vp, v_freelist);
671 
672 	splx(s);
673 }
674 
675 /*
676  * vput(), just unlock and vrele()
677  */
678 void
679 vput(struct vnode *vp)
680 {
681 	struct proc *p = curproc;
682 
683 #ifdef DIAGNOSTIC
684 	if (vp == NULL)
685 		panic("vput: null vp");
686 #endif
687 
688 #ifdef DIAGNOSTIC
689 	if (vp->v_usecount == 0) {
690 		vprint("vput: bad ref count", vp);
691 		panic("vput: ref cnt");
692 	}
693 #endif
694 	vp->v_usecount--;
695 	if (vp->v_usecount > 0) {
696 		VOP_UNLOCK(vp, 0, p);
697 		return;
698 	}
699 
700 #ifdef DIAGNOSTIC
701 	if (vp->v_writecount != 0) {
702 		vprint("vput: bad writecount", vp);
703 		panic("vput: v_writecount != 0");
704 	}
705 #endif
706 
707 	VOP_INACTIVE(vp, p);
708 
709 	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
710 		vputonfreelist(vp);
711 }
712 
713 /*
714  * Vnode release - use for active VNODES.
715  * If count drops to zero, call inactive routine and return to freelist.
716  */
717 void
718 vrele(struct vnode *vp)
719 {
720 	struct proc *p = curproc;
721 
722 #ifdef DIAGNOSTIC
723 	if (vp == NULL)
724 		panic("vrele: null vp");
725 #endif
726 #ifdef DIAGNOSTIC
727 	if (vp->v_usecount == 0) {
728 		vprint("vrele: bad ref count", vp);
729 		panic("vrele: ref cnt");
730 	}
731 #endif
732 	vp->v_usecount--;
733 	if (vp->v_usecount > 0) {
734 		return;
735 	}
736 
737 #ifdef DIAGNOSTIC
738 	if (vp->v_writecount != 0) {
739 		vprint("vrele: bad writecount", vp);
740 		panic("vrele: v_writecount != 0");
741 	}
742 #endif
743 
744 	if (vn_lock(vp, LK_EXCLUSIVE, p)) {
745 #ifdef DIAGNOSTIC
746 		vprint("vrele: cannot lock", vp);
747 #endif
748 		return;
749 	}
750 
751 	VOP_INACTIVE(vp, p);
752 
753 	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
754 		vputonfreelist(vp);
755 }
756 
757 void vhold(struct vnode *vp);
758 
759 /*
760  * Page or buffer structure gets a reference.
761  */
762 void
763 vhold(struct vnode *vp)
764 {
765 	/*
766 	 * If it is on the freelist and the hold count is currently
767 	 * zero, move it to the hold list.
768 	 */
769 	if ((vp->v_bioflag & VBIOONFREELIST) &&
770 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
771 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
772 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
773 	}
774 	vp->v_holdcnt++;
775 }
776 
777 /*
778  * Remove any vnodes in the vnode table belonging to mount point mp.
779  *
780  * If MNT_NOFORCE is specified, there should not be any active ones,
781  * return error if any are found (nb: this is a user error, not a
782  * system error). If MNT_FORCE is specified, detach any active vnodes
783  * that are found.
784  */
785 #ifdef DEBUG
786 int busyprt = 0;	/* print out busy vnodes */
787 struct ctldebug debug1 = { "busyprt", &busyprt };
788 #endif
789 
790 int
791 vfs_mount_foreach_vnode(struct mount *mp,
792     int (*func)(struct vnode *, void *), void *arg) {
793 	struct vnode *vp, *nvp;
794 	int error = 0;
795 
796 loop:
797 	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
798 		if (vp->v_mount != mp)
799 			goto loop;
800 		nvp = LIST_NEXT(vp, v_mntvnodes);
801 
802 		error = func(vp, arg);
803 
804 		if (error != 0)
805 			break;
806 	}
807 
808 	return (error);
809 }
810 
811 struct vflush_args {
812 	struct vnode *skipvp;
813 	int busy;
814 	int flags;
815 };
816 
817 int
818 vflush_vnode(struct vnode *vp, void *arg) {
819 	struct vflush_args *va = arg;
820 	struct proc *p = curproc;
821 
822 	if (vp == va->skipvp) {
823 		return (0);
824 	}
825 
826 	if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
827 		return (0);
828 	}
829 
830 	/*
831 	 * If WRITECLOSE is set, only flush out regular file
832 	 * vnodes open for writing.
833 	 */
834 	if ((va->flags & WRITECLOSE) &&
835 	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
836 		return (0);
837 	}
838 
839 	/*
840 	 * With v_usecount == 0, all we need to do is clear
841 	 * out the vnode data structures and we are done.
842 	 */
843 	if (vp->v_usecount == 0) {
844 		vgonel(vp, p);
845 		return (0);
846 	}
847 
848 	/*
849 	 * If FORCECLOSE is set, forcibly close the vnode.
850 	 * For block or character devices, revert to an
851 	 * anonymous device. For all other files, just kill them.
852 	 */
853 	if (va->flags & FORCECLOSE) {
854 		if (vp->v_type != VBLK && vp->v_type != VCHR) {
855 			vgonel(vp, p);
856 		} else {
857 			vclean(vp, 0, p);
858 			vp->v_op = spec_vnodeop_p;
859 			insmntque(vp, (struct mount *)0);
860 		}
861 		return (0);
862 	}
863 
864 #ifdef DEBUG
865 	if (busyprt)
866 		vprint("vflush: busy vnode", vp);
867 #endif
868 	va->busy++;
869 	return (0);
870 }
871 
872 int
873 vflush(struct mount *mp, struct vnode *skipvp, int flags)
874 {
875 	struct vflush_args va;
876 	va.skipvp = skipvp;
877 	va.busy = 0;
878 	va.flags = flags;
879 
880 	vfs_mount_foreach_vnode(mp, vflush_vnode, &va);
881 
882 	if (va.busy)
883 		return (EBUSY);
884 	return (0);
885 }
886 
887 /*
888  * Disassociate the underlying file system from a vnode.
889  */
890 void
891 vclean(struct vnode *vp, int flags, struct proc *p)
892 {
893 	int active;
894 
895 	/*
896 	 * Check to see if the vnode is in use.
897 	 * If so we have to reference it before we clean it out
898 	 * so that its count cannot fall to zero and generate a
899 	 * race against ourselves to recycle it.
900 	 */
901 	if ((active = vp->v_usecount) != 0)
902 		vp->v_usecount++;
903 
904 	/*
905 	 * Prevent the vnode from being recycled or
906 	 * brought into use while we clean it out.
907 	 */
908 	if (vp->v_flag & VXLOCK)
909 		panic("vclean: deadlock");
910 	vp->v_flag |= VXLOCK;
911 	/*
912 	 * Even if the count is zero, the VOP_INACTIVE routine may still
913 	 * have the object locked while it cleans it out. The VOP_LOCK
914 	 * ensures that the VOP_INACTIVE routine is done with its work.
915 	 * For active vnodes, it ensures that no other activity can
916 	 * occur while the underlying object is being cleaned out.
917 	 */
918 	VOP_LOCK(vp, LK_DRAIN, p);
919 
920 	/*
921 	 * Clean out any VM data associated with the vnode.
922 	 */
923 	uvm_vnp_terminate(vp);
924 	/*
925 	 * Clean out any buffers associated with the vnode.
926 	 */
927 	if (flags & DOCLOSE)
928 		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
929 	/*
930 	 * If purging an active vnode, it must be closed and
931 	 * deactivated before being reclaimed. Note that the
932 	 * VOP_INACTIVE will unlock the vnode
933 	 */
934 	if (active) {
935 		if (flags & DOCLOSE)
936 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
937 		VOP_INACTIVE(vp, p);
938 	} else {
939 		/*
940 		 * Any other processes trying to obtain this lock must first
941 		 * wait for VXLOCK to clear, then call the new lock operation.
942 		 */
943 		VOP_UNLOCK(vp, 0, p);
944 	}
945 
946 	/*
947 	 * Reclaim the vnode.
948 	 */
949 	if (VOP_RECLAIM(vp, p))
950 		panic("vclean: cannot reclaim");
951 	if (active) {
952 		vp->v_usecount--;
953 		if (vp->v_usecount == 0) {
954 			if (vp->v_holdcnt > 0)
955 				panic("vclean: not clean");
956 			vputonfreelist(vp);
957 		}
958 	}
959 	cache_purge(vp);
960 
961 	/*
962 	 * Done with purge, notify sleepers of the grim news.
963 	 */
964 	vp->v_op = dead_vnodeop_p;
965 	VN_KNOTE(vp, NOTE_REVOKE);
966 	vp->v_tag = VT_NON;
967 	vp->v_flag &= ~VXLOCK;
968 #ifdef VFSDEBUG
969 	vp->v_flag &= ~VLOCKSWORK;
970 #endif
971 	if (vp->v_flag & VXWANT) {
972 		vp->v_flag &= ~VXWANT;
973 		wakeup(vp);
974 	}
975 }
976 
977 /*
978  * Recycle an unused vnode to the front of the free list.
979  */
980 int
981 vrecycle(struct vnode *vp, struct proc *p)
982 {
983 	if (vp->v_usecount == 0) {
984 		vgonel(vp, p);
985 		return (1);
986 	}
987 	return (0);
988 }
989 
990 /*
991  * Eliminate all activity associated with a vnode
992  * in preparation for reuse.
993  */
994 void
995 vgone(struct vnode *vp)
996 {
997 	struct proc *p = curproc;
998 	vgonel(vp, p);
999 }
1000 
1001 /*
1002  * vgone, with struct proc.
1003  */
1004 void
1005 vgonel(struct vnode *vp, struct proc *p)
1006 {
1007 	struct vnode *vq;
1008 	struct vnode *vx;
1009 
1010 	/*
1011 	 * If a vgone (or vclean) is already in progress,
1012 	 * wait until it is done and return.
1013 	 */
1014 	if (vp->v_flag & VXLOCK) {
1015 		vp->v_flag |= VXWANT;
1016 		ltsleep(vp, PINOD | PNORELOCK, "vgone", 0, NULL);
1017 		return;
1018 	}
1019 
1020 	/*
1021 	 * Clean out the filesystem specific data.
1022 	 */
1023 	vclean(vp, DOCLOSE, p);
1024 	/*
1025 	 * Delete from old mount point vnode list, if on one.
1026 	 */
1027 	if (vp->v_mount != NULL)
1028 		insmntque(vp, (struct mount *)0);
1029 	/*
1030 	 * If special device, remove it from special device alias list
1031 	 * if it is on one.
1032 	 */
1033 	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1034 		if (*vp->v_hashchain == vp) {
1035 			*vp->v_hashchain = vp->v_specnext;
1036 		} else {
1037 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1038 				if (vq->v_specnext != vp)
1039 					continue;
1040 				vq->v_specnext = vp->v_specnext;
1041 				break;
1042 			}
1043 			if (vq == NULL)
1044 				panic("missing bdev");
1045 		}
1046 		if (vp->v_flag & VALIASED) {
1047 			vx = NULL;
1048 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1049 				if (vq->v_rdev != vp->v_rdev ||
1050 				    vq->v_type != vp->v_type)
1051 					continue;
1052 				if (vx)
1053 					break;
1054 				vx = vq;
1055 			}
1056 			if (vx == NULL)
1057 				panic("missing alias");
1058 			if (vq == NULL)
1059 				vx->v_flag &= ~VALIASED;
1060 			vp->v_flag &= ~VALIASED;
1061 		}
1062 		free(vp->v_specinfo, M_VNODE);
1063 		vp->v_specinfo = NULL;
1064 	}
1065 	/*
1066 	 * If it is on the freelist and not already at the head,
1067 	 * move it to the head of the list.
1068 	 */
1069 	vp->v_type = VBAD;
1070 
1071 	/*
1072 	 * Move onto the free list, unless we were called from
1073 	 * getnewvnode and we're not on any free list
1074 	 */
1075 	if (vp->v_usecount == 0 &&
1076 	    (vp->v_bioflag & VBIOONFREELIST)) {
1077 		int s;
1078 
1079 		s = splbio();
1080 
1081 		if (vp->v_holdcnt > 0)
1082 			panic("vgonel: not clean");
1083 
1084 		if (TAILQ_FIRST(&vnode_free_list) != vp) {
1085 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1086 			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1087 		}
1088 		splx(s);
1089 	}
1090 }
1091 
1092 /*
1093  * Lookup a vnode by device number.
1094  */
1095 int
1096 vfinddev(dev_t dev, enum vtype type, struct vnode **vpp)
1097 {
1098 	struct vnode *vp;
1099 	int rc =0;
1100 
1101 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1102 		if (dev != vp->v_rdev || type != vp->v_type)
1103 			continue;
1104 		*vpp = vp;
1105 		rc = 1;
1106 		break;
1107 	}
1108 	return (rc);
1109 }
1110 
1111 /*
1112  * Revoke all the vnodes corresponding to the specified minor number
1113  * range (endpoints inclusive) of the specified major.
1114  */
1115 void
1116 vdevgone(int maj, int minl, int minh, enum vtype type)
1117 {
1118 	struct vnode *vp;
1119 	int mn;
1120 
1121 	for (mn = minl; mn <= minh; mn++)
1122 		if (vfinddev(makedev(maj, mn), type, &vp))
1123 			VOP_REVOKE(vp, REVOKEALL);
1124 }
1125 
1126 /*
1127  * Calculate the total number of references to a special device.
1128  */
1129 int
1130 vcount(struct vnode *vp)
1131 {
1132 	struct vnode *vq, *vnext;
1133 	int count;
1134 
1135 loop:
1136 	if ((vp->v_flag & VALIASED) == 0)
1137 		return (vp->v_usecount);
1138 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1139 		vnext = vq->v_specnext;
1140 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1141 			continue;
1142 		/*
1143 		 * Alias, but not in use, so flush it out.
1144 		 */
1145 		if (vq->v_usecount == 0 && vq != vp) {
1146 			vgone(vq);
1147 			goto loop;
1148 		}
1149 		count += vq->v_usecount;
1150 	}
1151 	return (count);
1152 }
1153 
1154 #if defined(DEBUG) || defined(DIAGNOSTIC)
1155 /*
1156  * Print out a description of a vnode.
1157  */
1158 static char *typename[] =
1159    { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1160 
1161 void
1162 vprint(char *label, struct vnode *vp)
1163 {
1164 	char buf[64];
1165 
1166 	if (label != NULL)
1167 		printf("%s: ", label);
1168 	printf("%p, type %s, use %u, write %u, hold %u,",
1169 		vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1170 		vp->v_holdcnt);
1171 	buf[0] = '\0';
1172 	if (vp->v_flag & VROOT)
1173 		strlcat(buf, "|VROOT", sizeof buf);
1174 	if (vp->v_flag & VTEXT)
1175 		strlcat(buf, "|VTEXT", sizeof buf);
1176 	if (vp->v_flag & VSYSTEM)
1177 		strlcat(buf, "|VSYSTEM", sizeof buf);
1178 	if (vp->v_flag & VXLOCK)
1179 		strlcat(buf, "|VXLOCK", sizeof buf);
1180 	if (vp->v_flag & VXWANT)
1181 		strlcat(buf, "|VXWANT", sizeof buf);
1182 	if (vp->v_bioflag & VBIOWAIT)
1183 		strlcat(buf, "|VBIOWAIT", sizeof buf);
1184 	if (vp->v_bioflag & VBIOONFREELIST)
1185 		strlcat(buf, "|VBIOONFREELIST", sizeof buf);
1186 	if (vp->v_bioflag & VBIOONSYNCLIST)
1187 		strlcat(buf, "|VBIOONSYNCLIST", sizeof buf);
1188 	if (vp->v_flag & VALIASED)
1189 		strlcat(buf, "|VALIASED", sizeof buf);
1190 	if (buf[0] != '\0')
1191 		printf(" flags (%s)", &buf[1]);
1192 	if (vp->v_data == NULL) {
1193 		printf("\n");
1194 	} else {
1195 		printf("\n\t");
1196 		VOP_PRINT(vp);
1197 	}
1198 }
1199 #endif /* DEBUG || DIAGNOSTIC */
1200 
1201 #ifdef DEBUG
1202 /*
1203  * List all of the locked vnodes in the system.
1204  * Called when debugging the kernel.
1205  */
1206 void
1207 printlockedvnodes(void)
1208 {
1209 	struct mount *mp, *nmp;
1210 	struct vnode *vp;
1211 
1212 	printf("Locked vnodes\n");
1213 
1214 	for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1215 	    mp = nmp) {
1216 		if (vfs_busy(mp, VB_READ|VB_NOWAIT)) {
1217 			nmp = CIRCLEQ_NEXT(mp, mnt_list);
1218 			continue;
1219 		}
1220 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1221 			if (VOP_ISLOCKED(vp))
1222 				vprint((char *)0, vp);
1223 		}
1224 		nmp = CIRCLEQ_NEXT(mp, mnt_list);
1225 		vfs_unbusy(mp);
1226  	}
1227 
1228 }
1229 #endif
1230 
1231 /*
1232  * Top level filesystem related information gathering.
1233  */
1234 int
1235 vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1236     size_t newlen, struct proc *p)
1237 {
1238 	struct vfsconf *vfsp, *tmpvfsp;
1239 	int ret;
1240 
1241 	/* all sysctl names at this level are at least name and field */
1242 	if (namelen < 2)
1243 		return (ENOTDIR);		/* overloaded */
1244 
1245 	if (name[0] != VFS_GENERIC) {
1246 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1247 			if (vfsp->vfc_typenum == name[0])
1248 				break;
1249 
1250 		if (vfsp == NULL)
1251 			return (EOPNOTSUPP);
1252 
1253 		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1254 		    oldp, oldlenp, newp, newlen, p));
1255 	}
1256 
1257 	switch (name[1]) {
1258 	case VFS_MAXTYPENUM:
1259 		return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
1260 
1261 	case VFS_CONF:
1262 		if (namelen < 3)
1263 			return (ENOTDIR);	/* overloaded */
1264 
1265 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1266 			if (vfsp->vfc_typenum == name[2])
1267 				break;
1268 
1269 		if (vfsp == NULL)
1270 			return (EOPNOTSUPP);
1271 
1272 		/* Make a copy, clear out kernel pointers */
1273 		tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK);
1274 		bcopy(vfsp, tmpvfsp, sizeof(*tmpvfsp));
1275 		tmpvfsp->vfc_vfsops = NULL;
1276 		tmpvfsp->vfc_mountroot = NULL;
1277 		tmpvfsp->vfc_next = NULL;
1278 
1279 		ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp,
1280 		    sizeof(struct vfsconf));
1281 
1282 		free(tmpvfsp, M_TEMP);
1283 		return (ret);
1284 	}
1285 
1286 	return (EOPNOTSUPP);
1287 }
1288 
1289 int kinfo_vdebug = 1;
1290 #define KINFO_VNODESLOP	10
1291 /*
1292  * Dump vnode list (via sysctl).
1293  * Copyout address of vnode followed by vnode.
1294  */
1295 /* ARGSUSED */
1296 int
1297 sysctl_vnode(char *where, size_t *sizep, struct proc *p)
1298 {
1299 	struct mount *mp, *nmp;
1300 	struct vnode *vp, *nvp;
1301 	char *bp = where, *savebp;
1302 	char *ewhere;
1303 	int error;
1304 
1305 	if (where == NULL) {
1306 		*sizep = (numvnodes + KINFO_VNODESLOP) * sizeof(struct e_vnode);
1307 		return (0);
1308 	}
1309 	ewhere = where + *sizep;
1310 
1311 	for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1312 	    mp = nmp) {
1313 		if (vfs_busy(mp, VB_READ|VB_NOWAIT)) {
1314 			nmp = CIRCLEQ_NEXT(mp, mnt_list);
1315 			continue;
1316 		}
1317 		savebp = bp;
1318 again:
1319 		for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL;
1320 		    vp = nvp) {
1321 			/*
1322 			 * Check that the vp is still associated with
1323 			 * this filesystem.  RACE: could have been
1324 			 * recycled onto the same filesystem.
1325 			 */
1326 			if (vp->v_mount != mp) {
1327 				if (kinfo_vdebug)
1328 					printf("kinfo: vp changed\n");
1329 				bp = savebp;
1330 				goto again;
1331 			}
1332 			nvp = LIST_NEXT(vp, v_mntvnodes);
1333 			if (bp + sizeof(struct e_vnode) > ewhere) {
1334 				*sizep = bp - where;
1335 				vfs_unbusy(mp);
1336 				return (ENOMEM);
1337 			}
1338 			if ((error = copyout(&vp,
1339 			    &((struct e_vnode *)bp)->vptr,
1340 			    sizeof(struct vnode *))) ||
1341 			   (error = copyout(vp,
1342 			    &((struct e_vnode *)bp)->vnode,
1343 			    sizeof(struct vnode)))) {
1344 				vfs_unbusy(mp);
1345 				return (error);
1346 			}
1347 			bp += sizeof(struct e_vnode);
1348 		}
1349 
1350 		nmp = CIRCLEQ_NEXT(mp, mnt_list);
1351 		vfs_unbusy(mp);
1352 	}
1353 
1354 	*sizep = bp - where;
1355 
1356 	return (0);
1357 }
1358 
1359 /*
1360  * Check to see if a filesystem is mounted on a block device.
1361  */
1362 int
1363 vfs_mountedon(struct vnode *vp)
1364 {
1365 	struct vnode *vq;
1366 	int error = 0;
1367 
1368  	if (vp->v_specmountpoint != NULL)
1369 		return (EBUSY);
1370 	if (vp->v_flag & VALIASED) {
1371 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1372 			if (vq->v_rdev != vp->v_rdev ||
1373 			    vq->v_type != vp->v_type)
1374 				continue;
1375 			if (vq->v_specmountpoint != NULL) {
1376 				error = EBUSY;
1377 				break;
1378 			}
1379  		}
1380 	}
1381 	return (error);
1382 }
1383 
1384 /*
1385  * Build hash lists of net addresses and hang them off the mount point.
1386  * Called by ufs_mount() to set up the lists of export addresses.
1387  */
1388 int
1389 vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1390     struct export_args *argp)
1391 {
1392 	struct netcred *np;
1393 	struct radix_node_head *rnh;
1394 	int i;
1395 	struct radix_node *rn;
1396 	struct sockaddr *saddr, *smask = 0;
1397 	struct domain *dom;
1398 	int error;
1399 
1400 	if (argp->ex_addrlen == 0) {
1401 		if (mp->mnt_flag & MNT_DEFEXPORTED)
1402 			return (EPERM);
1403 		np = &nep->ne_defexported;
1404 		np->netc_exflags = argp->ex_flags;
1405 		np->netc_anon = argp->ex_anon;
1406 		np->netc_anon.cr_ref = 1;
1407 		mp->mnt_flag |= MNT_DEFEXPORTED;
1408 		return (0);
1409 	}
1410 	if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN ||
1411 	    argp->ex_addrlen < 0 || argp->ex_masklen < 0)
1412 		return (EINVAL);
1413 	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1414 	np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK|M_ZERO);
1415 	saddr = (struct sockaddr *)(np + 1);
1416 	error = copyin(argp->ex_addr, saddr, argp->ex_addrlen);
1417 	if (error)
1418 		goto out;
1419 	if (saddr->sa_len > argp->ex_addrlen)
1420 		saddr->sa_len = argp->ex_addrlen;
1421 	if (argp->ex_masklen) {
1422 		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1423 		error = copyin(argp->ex_mask, smask, argp->ex_masklen);
1424 		if (error)
1425 			goto out;
1426 		if (smask->sa_len > argp->ex_masklen)
1427 			smask->sa_len = argp->ex_masklen;
1428 	}
1429 	i = saddr->sa_family;
1430 	if (i < 0 || i > AF_MAX) {
1431 		error = EINVAL;
1432 		goto out;
1433 	}
1434 	if ((rnh = nep->ne_rtable[i]) == 0) {
1435 		/*
1436 		 * Seems silly to initialize every AF when most are not
1437 		 * used, do so on demand here
1438 		 */
1439 		for (dom = domains; dom; dom = dom->dom_next)
1440 			if (dom->dom_family == i && dom->dom_rtattach) {
1441 				dom->dom_rtattach((void **)&nep->ne_rtable[i],
1442 					dom->dom_rtoffset);
1443 				break;
1444 			}
1445 		if ((rnh = nep->ne_rtable[i]) == 0) {
1446 			error = ENOBUFS;
1447 			goto out;
1448 		}
1449 	}
1450 	rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh,
1451 		np->netc_rnodes);
1452 	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1453 		error = EPERM;
1454 		goto out;
1455 	}
1456 	np->netc_exflags = argp->ex_flags;
1457 	np->netc_anon = argp->ex_anon;
1458 	np->netc_anon.cr_ref = 1;
1459 	return (0);
1460 out:
1461 	free(np, M_NETADDR);
1462 	return (error);
1463 }
1464 
1465 /* ARGSUSED */
1466 int
1467 vfs_free_netcred(struct radix_node *rn, void *w)
1468 {
1469 	struct radix_node_head *rnh = (struct radix_node_head *)w;
1470 
1471 	(*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh, NULL);
1472 	free(rn, M_NETADDR);
1473 	return (0);
1474 }
1475 
1476 /*
1477  * Free the net address hash lists that are hanging off the mount points.
1478  */
1479 void
1480 vfs_free_addrlist(struct netexport *nep)
1481 {
1482 	int i;
1483 	struct radix_node_head *rnh;
1484 
1485 	for (i = 0; i <= AF_MAX; i++)
1486 		if ((rnh = nep->ne_rtable[i]) != NULL) {
1487 			(*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh);
1488 			free(rnh, M_RTABLE);
1489 			nep->ne_rtable[i] = 0;
1490 		}
1491 }
1492 
1493 int
1494 vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp)
1495 {
1496 	int error;
1497 
1498 	if (argp->ex_flags & MNT_DELEXPORT) {
1499 		vfs_free_addrlist(nep);
1500 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1501 	}
1502 	if (argp->ex_flags & MNT_EXPORTED) {
1503 		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1504 			return (error);
1505 		mp->mnt_flag |= MNT_EXPORTED;
1506 	}
1507 	return (0);
1508 }
1509 
1510 struct netcred *
1511 vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam)
1512 {
1513 	struct netcred *np;
1514 	struct radix_node_head *rnh;
1515 	struct sockaddr *saddr;
1516 
1517 	np = NULL;
1518 	if (mp->mnt_flag & MNT_EXPORTED) {
1519 		/*
1520 		 * Lookup in the export list first.
1521 		 */
1522 		if (nam != NULL) {
1523 			saddr = mtod(nam, struct sockaddr *);
1524 			rnh = nep->ne_rtable[saddr->sa_family];
1525 			if (rnh != NULL) {
1526 				np = (struct netcred *)
1527 					(*rnh->rnh_matchaddr)((caddr_t)saddr,
1528 					    rnh);
1529 				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
1530 					np = NULL;
1531 			}
1532 		}
1533 		/*
1534 		 * If no address match, use the default if it exists.
1535 		 */
1536 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1537 			np = &nep->ne_defexported;
1538 	}
1539 	return (np);
1540 }
1541 
1542 /*
1543  * Do the usual access checking.
1544  * file_mode, uid and gid are from the vnode in question,
1545  * while acc_mode and cred are from the VOP_ACCESS parameter list
1546  */
1547 int
1548 vaccess(mode_t file_mode, uid_t uid, gid_t gid, mode_t acc_mode,
1549     struct ucred *cred)
1550 {
1551 	mode_t mask;
1552 
1553 	/* User id 0 always gets access. */
1554 	if (cred->cr_uid == 0)
1555 		return 0;
1556 
1557 	mask = 0;
1558 
1559 	/* Otherwise, check the owner. */
1560 	if (cred->cr_uid == uid) {
1561 		if (acc_mode & VEXEC)
1562 			mask |= S_IXUSR;
1563 		if (acc_mode & VREAD)
1564 			mask |= S_IRUSR;
1565 		if (acc_mode & VWRITE)
1566 			mask |= S_IWUSR;
1567 		return (file_mode & mask) == mask ? 0 : EACCES;
1568 	}
1569 
1570 	/* Otherwise, check the groups. */
1571 	if (cred->cr_gid == gid || groupmember(gid, cred)) {
1572 		if (acc_mode & VEXEC)
1573 			mask |= S_IXGRP;
1574 		if (acc_mode & VREAD)
1575 			mask |= S_IRGRP;
1576 		if (acc_mode & VWRITE)
1577 			mask |= S_IWGRP;
1578 		return (file_mode & mask) == mask ? 0 : EACCES;
1579 	}
1580 
1581 	/* Otherwise, check everyone else. */
1582 	if (acc_mode & VEXEC)
1583 		mask |= S_IXOTH;
1584 	if (acc_mode & VREAD)
1585 		mask |= S_IROTH;
1586 	if (acc_mode & VWRITE)
1587 		mask |= S_IWOTH;
1588 	return (file_mode & mask) == mask ? 0 : EACCES;
1589 }
1590 
1591 /*
1592  * Unmount all file systems.
1593  * We traverse the list in reverse order under the assumption that doing so
1594  * will avoid needing to worry about dependencies.
1595  */
1596 void
1597 vfs_unmountall(void)
1598 {
1599 	struct mount *mp, *nmp;
1600 	int allerror, error, again = 1;
1601 
1602  retry:
1603 	allerror = 0;
1604 	for (mp = CIRCLEQ_LAST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1605 	    mp = nmp) {
1606 		nmp = CIRCLEQ_PREV(mp, mnt_list);
1607 		if ((vfs_busy(mp, VB_WRITE|VB_NOWAIT)) != 0)
1608 			continue;
1609 		if ((error = dounmount(mp, MNT_FORCE, curproc, NULL)) != 0) {
1610 			printf("unmount of %s failed with error %d\n",
1611 			    mp->mnt_stat.f_mntonname, error);
1612 			allerror = 1;
1613 		}
1614 	}
1615 
1616 	if (allerror) {
1617 		printf("WARNING: some file systems would not unmount\n");
1618 		if (again) {
1619 			printf("retrying\n");
1620 			again = 0;
1621 			goto retry;
1622 		}
1623 	}
1624 }
1625 
1626 /*
1627  * Sync and unmount file systems before shutting down.
1628  */
1629 void
1630 vfs_shutdown(void)
1631 {
1632 #ifdef ACCOUNTING
1633 	extern void acct_shutdown(void);
1634 
1635 	acct_shutdown();
1636 #endif
1637 
1638 	/* XXX Should suspend scheduling. */
1639 	(void) spl0();
1640 
1641 	printf("\nsyncing disks... ");
1642 
1643 	if (panicstr == 0) {
1644 		/* Sync before unmount, in case we hang on something. */
1645 		sys_sync(&proc0, (void *)0, (register_t *)0);
1646 
1647 		/* Unmount file systems. */
1648 		vfs_unmountall();
1649 	}
1650 
1651 	if (vfs_syncwait(1))
1652 		printf("giving up\n");
1653 	else
1654 		printf("done\n");
1655 }
1656 
1657 /*
1658  * perform sync() operation and wait for buffers to flush.
1659  * assumtions: called w/ scheduler disabled and physical io enabled
1660  * for now called at spl0() XXX
1661  */
1662 int
1663 vfs_syncwait(int verbose)
1664 {
1665 	struct buf *bp;
1666 	int iter, nbusy, dcount, s;
1667 	struct proc *p;
1668 
1669 	p = curproc? curproc : &proc0;
1670 	sys_sync(p, (void *)0, (register_t *)0);
1671 
1672 	/* Wait for sync to finish. */
1673 	dcount = 10000;
1674 	for (iter = 0; iter < 20; iter++) {
1675 		nbusy = 0;
1676 		LIST_FOREACH(bp, &bufhead, b_list) {
1677 			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1678 				nbusy++;
1679 			/*
1680 			 * With soft updates, some buffers that are
1681 			 * written will be remarked as dirty until other
1682 			 * buffers are written.
1683 			 */
1684 			if (bp->b_flags & B_DELWRI) {
1685 				s = splbio();
1686 				bremfree(bp);
1687 				bp->b_flags |= B_BUSY;
1688 				splx(s);
1689 				nbusy++;
1690 				bawrite(bp);
1691 				if (dcount-- <= 0) {
1692 					if (verbose)
1693 						printf("softdep ");
1694 					return 1;
1695 				}
1696 			}
1697 		}
1698 		if (nbusy == 0)
1699 			break;
1700 		if (verbose)
1701 			printf("%d ", nbusy);
1702 		DELAY(40000 * iter);
1703 	}
1704 
1705 	return nbusy;
1706 }
1707 
1708 /*
1709  * posix file system related system variables.
1710  */
1711 int
1712 fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1713     void *newp, size_t newlen, struct proc *p)
1714 {
1715 	/* all sysctl names at this level are terminal */
1716 	if (namelen != 1)
1717 		return (ENOTDIR);
1718 
1719 	switch (name[0]) {
1720 	case FS_POSIX_SETUID:
1721 		if (newp && securelevel > 0)
1722 			return (EPERM);
1723 		return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear));
1724 	default:
1725 		return (EOPNOTSUPP);
1726 	}
1727 	/* NOTREACHED */
1728 }
1729 
1730 /*
1731  * file system related system variables.
1732  */
1733 int
1734 fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1735     size_t newlen, struct proc *p)
1736 {
1737 	sysctlfn *fn;
1738 
1739 	switch (name[0]) {
1740 	case FS_POSIX:
1741 		fn = fs_posix_sysctl;
1742 		break;
1743 	default:
1744 		return (EOPNOTSUPP);
1745 	}
1746 	return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
1747 }
1748 
1749 
1750 /*
1751  * Routines dealing with vnodes and buffers
1752  */
1753 
1754 /*
1755  * Wait for all outstanding I/Os to complete
1756  *
1757  * Manipulates v_numoutput. Must be called at splbio()
1758  */
1759 int
1760 vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo)
1761 {
1762 	int error = 0;
1763 
1764 	splassert(IPL_BIO);
1765 
1766 	while (vp->v_numoutput) {
1767 		vp->v_bioflag |= VBIOWAIT;
1768 		error = tsleep(&vp->v_numoutput,
1769 		    slpflag | (PRIBIO + 1), wmesg, timeo);
1770 		if (error)
1771 			break;
1772 	}
1773 
1774 	return (error);
1775 }
1776 
1777 /*
1778  * Update outstanding I/O count and do wakeup if requested.
1779  *
1780  * Manipulates v_numoutput. Must be called at splbio()
1781  */
1782 void
1783 vwakeup(struct vnode *vp)
1784 {
1785 	splassert(IPL_BIO);
1786 
1787 	if (vp != NULL) {
1788 		if (vp->v_numoutput-- == 0)
1789 			panic("vwakeup: neg numoutput");
1790 		if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) {
1791 			vp->v_bioflag &= ~VBIOWAIT;
1792 			wakeup(&vp->v_numoutput);
1793 		}
1794 	}
1795 }
1796 
1797 /*
1798  * Flush out and invalidate all buffers associated with a vnode.
1799  * Called with the underlying object locked.
1800  */
1801 int
1802 vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p,
1803     int slpflag, int slptimeo)
1804 {
1805 	struct buf *bp;
1806 	struct buf *nbp, *blist;
1807 	int s, error;
1808 
1809 #ifdef VFSDEBUG
1810 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
1811 		panic("vinvalbuf(): vp isn't locked");
1812 #endif
1813 
1814 	if (flags & V_SAVE) {
1815 		s = splbio();
1816 		vwaitforio(vp, 0, "vinvalbuf", 0);
1817 		if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1818 			splx(s);
1819 			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
1820 				return (error);
1821 			s = splbio();
1822 			if (vp->v_numoutput > 0 ||
1823 			    !LIST_EMPTY(&vp->v_dirtyblkhd))
1824 				panic("vinvalbuf: dirty bufs");
1825 		}
1826 		splx(s);
1827 	}
1828 loop:
1829 	s = splbio();
1830 	for (;;) {
1831 		if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) &&
1832 		    (flags & V_SAVEMETA))
1833 			while (blist && blist->b_lblkno < 0)
1834 				blist = LIST_NEXT(blist, b_vnbufs);
1835 		if (blist == NULL &&
1836 		    (blist = LIST_FIRST(&vp->v_dirtyblkhd)) &&
1837 		    (flags & V_SAVEMETA))
1838 			while (blist && blist->b_lblkno < 0)
1839 				blist = LIST_NEXT(blist, b_vnbufs);
1840 		if (!blist)
1841 			break;
1842 
1843 		for (bp = blist; bp; bp = nbp) {
1844 			nbp = LIST_NEXT(bp, b_vnbufs);
1845 			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
1846 				continue;
1847 			if (bp->b_flags & B_BUSY) {
1848 				bp->b_flags |= B_WANTED;
1849 				error = tsleep(bp, slpflag | (PRIBIO + 1),
1850 				    "vinvalbuf", slptimeo);
1851 				if (error) {
1852 					splx(s);
1853 					return (error);
1854 				}
1855 				break;
1856 			}
1857 			bremfree(bp);
1858 			bp->b_flags |= B_BUSY;
1859 			/*
1860 			 * XXX Since there are no node locks for NFS, I believe
1861 			 * there is a slight chance that a delayed write will
1862 			 * occur while sleeping just above, so check for it.
1863 			 */
1864 			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
1865 				splx(s);
1866 				(void) VOP_BWRITE(bp);
1867 				goto loop;
1868 			}
1869 			bp->b_flags |= B_INVAL;
1870 			brelse(bp);
1871 		}
1872 	}
1873 	if (!(flags & V_SAVEMETA) &&
1874 	    (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd)))
1875 		panic("vinvalbuf: flush failed");
1876 	splx(s);
1877 	return (0);
1878 }
1879 
1880 void
1881 vflushbuf(struct vnode *vp, int sync)
1882 {
1883 	struct buf *bp, *nbp;
1884 	int s;
1885 
1886 loop:
1887 	s = splbio();
1888 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd);
1889 	    bp != LIST_END(&vp->v_dirtyblkhd); bp = nbp) {
1890 		nbp = LIST_NEXT(bp, b_vnbufs);
1891 		if ((bp->b_flags & B_BUSY))
1892 			continue;
1893 		if ((bp->b_flags & B_DELWRI) == 0)
1894 			panic("vflushbuf: not dirty");
1895 		bremfree(bp);
1896 		bp->b_flags |= B_BUSY;
1897 		splx(s);
1898 		/*
1899 		 * Wait for I/O associated with indirect blocks to complete,
1900 		 * since there is no way to quickly wait for them below.
1901 		 */
1902 		if (bp->b_vp == vp || sync == 0)
1903 			(void) bawrite(bp);
1904 		else
1905 			(void) bwrite(bp);
1906 		goto loop;
1907 	}
1908 	if (sync == 0) {
1909 		splx(s);
1910 		return;
1911 	}
1912 	vwaitforio(vp, 0, "vflushbuf", 0);
1913 	if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1914 		splx(s);
1915 #ifdef DIAGNOSTIC
1916 		vprint("vflushbuf: dirty", vp);
1917 #endif
1918 		goto loop;
1919 	}
1920 	splx(s);
1921 }
1922 
1923 /*
1924  * Associate a buffer with a vnode.
1925  *
1926  * Manipulates buffer vnode queues. Must be called at splbio().
1927  */
1928 void
1929 bgetvp(struct vnode *vp, struct buf *bp)
1930 {
1931 	splassert(IPL_BIO);
1932 
1933 
1934 	if (bp->b_vp)
1935 		panic("bgetvp: not free");
1936 	vhold(vp);
1937 	bp->b_vp = vp;
1938 	if (vp->v_type == VBLK || vp->v_type == VCHR)
1939 		bp->b_dev = vp->v_rdev;
1940 	else
1941 		bp->b_dev = NODEV;
1942 	/*
1943 	 * Insert onto list for new vnode.
1944 	 */
1945 	bufinsvn(bp, &vp->v_cleanblkhd);
1946 }
1947 
1948 /*
1949  * Disassociate a buffer from a vnode.
1950  *
1951  * Manipulates vnode buffer queues. Must be called at splbio().
1952  */
1953 void
1954 brelvp(struct buf *bp)
1955 {
1956 	struct vnode *vp;
1957 
1958 	splassert(IPL_BIO);
1959 
1960 	if ((vp = bp->b_vp) == (struct vnode *) 0)
1961 		panic("brelvp: NULL");
1962 	/*
1963 	 * Delete from old vnode list, if on one.
1964 	 */
1965 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
1966 		bufremvn(bp);
1967 	if ((vp->v_bioflag & VBIOONSYNCLIST) &&
1968 	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
1969 		vp->v_bioflag &= ~VBIOONSYNCLIST;
1970 		LIST_REMOVE(vp, v_synclist);
1971 	}
1972 	bp->b_vp = (struct vnode *) 0;
1973 
1974 #ifdef DIAGNOSTIC
1975 	if (vp->v_holdcnt == 0)
1976 		panic("brelvp: holdcnt");
1977 #endif
1978 	vp->v_holdcnt--;
1979 
1980 	/*
1981 	 * If it is on the holdlist and the hold count drops to
1982 	 * zero, move it to the free list.
1983 	 */
1984 	if ((vp->v_bioflag & VBIOONFREELIST) &&
1985 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
1986 		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
1987 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1988 	}
1989 }
1990 
1991 /*
1992  * Replaces the current vnode associated with the buffer, if any,
1993  * with a new vnode.
1994  *
1995  * If an output I/O is pending on the buffer, the old vnode
1996  * I/O count is adjusted.
1997  *
1998  * Ignores vnode buffer queues. Must be called at splbio().
1999  */
2000 void
2001 buf_replacevnode(struct buf *bp, struct vnode *newvp)
2002 {
2003 	struct vnode *oldvp = bp->b_vp;
2004 
2005 	splassert(IPL_BIO);
2006 
2007 	if (oldvp)
2008 		brelvp(bp);
2009 
2010 	if ((bp->b_flags & (B_READ | B_DONE)) == 0) {
2011 		newvp->v_numoutput++;	/* put it on swapdev */
2012 		vwakeup(oldvp);
2013 	}
2014 
2015 	bgetvp(newvp, bp);
2016 	bufremvn(bp);
2017 }
2018 
2019 /*
2020  * Used to assign buffers to the appropriate clean or dirty list on
2021  * the vnode and to add newly dirty vnodes to the appropriate
2022  * filesystem syncer list.
2023  *
2024  * Manipulates vnode buffer queues. Must be called at splbio().
2025  */
2026 void
2027 reassignbuf(struct buf *bp)
2028 {
2029 	struct buflists *listheadp;
2030 	int delay;
2031 	struct vnode *vp = bp->b_vp;
2032 
2033 	splassert(IPL_BIO);
2034 
2035 	/*
2036 	 * Delete from old vnode list, if on one.
2037 	 */
2038 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2039 		bufremvn(bp);
2040 
2041 	/*
2042 	 * If dirty, put on list of dirty buffers;
2043 	 * otherwise insert onto list of clean buffers.
2044 	 */
2045 	if ((bp->b_flags & B_DELWRI) == 0) {
2046 		listheadp = &vp->v_cleanblkhd;
2047 		if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2048 		    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2049 			vp->v_bioflag &= ~VBIOONSYNCLIST;
2050 			LIST_REMOVE(vp, v_synclist);
2051 		}
2052 	} else {
2053 		listheadp = &vp->v_dirtyblkhd;
2054 		if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) {
2055 			switch (vp->v_type) {
2056 			case VDIR:
2057 				delay = syncdelay / 2;
2058 				break;
2059 			case VBLK:
2060 				if (vp->v_specmountpoint != NULL) {
2061 					delay = syncdelay / 3;
2062 					break;
2063 				}
2064 				/* FALLTHROUGH */
2065 			default:
2066 				delay = syncdelay;
2067 			}
2068 			vn_syncer_add_to_worklist(vp, delay);
2069 		}
2070 	}
2071 	bufinsvn(bp, listheadp);
2072 }
2073 
2074 int
2075 vfs_register(struct vfsconf *vfs)
2076 {
2077 	struct vfsconf *vfsp;
2078 	struct vfsconf **vfspp;
2079 
2080 #ifdef DIAGNOSTIC
2081 	/* Paranoia? */
2082 	if (vfs->vfc_refcount != 0)
2083 		printf("vfs_register called with vfc_refcount > 0\n");
2084 #endif
2085 
2086 	/* Check if filesystem already known */
2087 	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2088 	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next)
2089 		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2090 			return (EEXIST);
2091 
2092 	if (vfs->vfc_typenum > maxvfsconf)
2093 		maxvfsconf = vfs->vfc_typenum;
2094 
2095 	vfs->vfc_next = NULL;
2096 
2097 	/* Add to the end of the list */
2098 	*vfspp = vfs;
2099 
2100 	/* Call vfs_init() */
2101 	if (vfs->vfc_vfsops->vfs_init)
2102 		(*(vfs->vfc_vfsops->vfs_init))(vfs);
2103 
2104 	return 0;
2105 }
2106 
2107 int
2108 vfs_unregister(struct vfsconf *vfs)
2109 {
2110 	struct vfsconf *vfsp;
2111 	struct vfsconf **vfspp;
2112 	int maxtypenum;
2113 
2114 	/* Find our vfsconf struct */
2115 	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2116 	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) {
2117 		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2118 			break;
2119 	}
2120 
2121 	if (!vfsp)			/* Not found */
2122 		return (ENOENT);
2123 
2124 	if (vfsp->vfc_refcount)		/* In use */
2125 		return (EBUSY);
2126 
2127 	/* Remove from list and free */
2128 	*vfspp = vfsp->vfc_next;
2129 
2130 	maxtypenum = 0;
2131 
2132 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2133 		if (vfsp->vfc_typenum > maxtypenum)
2134 			maxtypenum = vfsp->vfc_typenum;
2135 
2136 	maxvfsconf = maxtypenum;
2137 	return 0;
2138 }
2139 
2140 /*
2141  * Check if vnode represents a disk device
2142  */
2143 int
2144 vn_isdisk(struct vnode *vp, int *errp)
2145 {
2146 	if (vp->v_type != VBLK && vp->v_type != VCHR)
2147 		return (0);
2148 
2149 	return (1);
2150 }
2151 
2152 #ifdef DDB
2153 #include <machine/db_machdep.h>
2154 #include <ddb/db_interface.h>
2155 #include <ddb/db_output.h>
2156 
2157 void
2158 vfs_buf_print(struct buf *bp, int full, int (*pr)(const char *, ...))
2159 {
2160 
2161 	(*pr)("  vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n"
2162 	      "  proc %p error %d flags %b\n",
2163 	    bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev,
2164 	    bp->b_proc, bp->b_error, bp->b_flags, B_BITS);
2165 
2166 	(*pr)("  bufsize 0x%lx bcount 0x%lx resid 0x%lx sync 0x%x\n"
2167 	      "  data %p saveaddr %p dep %p iodone %p\n",
2168 	    bp->b_bufsize, bp->b_bcount, (long)bp->b_resid, bp->b_synctime,
2169 	    bp->b_data, bp->b_saveaddr, LIST_FIRST(&bp->b_dep), bp->b_iodone);
2170 
2171 	(*pr)("  dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n",
2172 	    bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend);
2173 
2174 #ifdef FFS_SOFTUPDATES
2175 	if (full)
2176 		softdep_print(bp, full, pr);
2177 #endif
2178 }
2179 
2180 const char *vtypes[] = { VTYPE_NAMES };
2181 const char *vtags[] = { VTAG_NAMES };
2182 
2183 void
2184 vfs_vnode_print(struct vnode *vp, int full, int (*pr)(const char *, ...))
2185 {
2186 
2187 #define	NENTS(n)	(sizeof n / sizeof(n[0]))
2188 	(*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n",
2189 	      vp->v_tag > NENTS(vtags)? "<unk>":vtags[vp->v_tag], vp->v_tag,
2190 	      vp->v_type > NENTS(vtypes)? "<unk>":vtypes[vp->v_type],
2191 	      vp->v_type, vp->v_mount, vp->v_mountedhere);
2192 
2193 	(*pr)("data %p usecount %d writecount %ld holdcnt %ld numoutput %d\n",
2194 	      vp->v_data, vp->v_usecount, vp->v_writecount,
2195 	      vp->v_holdcnt, vp->v_numoutput);
2196 
2197 	/* uvm_object_printit(&vp->v_uobj, full, pr); */
2198 
2199 	if (full) {
2200 		struct buf *bp;
2201 
2202 		(*pr)("clean bufs:\n");
2203 		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
2204 			(*pr)(" bp %p\n", bp);
2205 			vfs_buf_print(bp, full, pr);
2206 		}
2207 
2208 		(*pr)("dirty bufs:\n");
2209 		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
2210 			(*pr)(" bp %p\n", bp);
2211 			vfs_buf_print(bp, full, pr);
2212 		}
2213 	}
2214 }
2215 
2216 void
2217 vfs_mount_print(struct mount *mp, int full, int (*pr)(const char *, ...))
2218 {
2219 	struct vfsconf *vfc = mp->mnt_vfc;
2220 	struct vnode *vp;
2221 	int cnt = 0;
2222 
2223 	(*pr)("flags %b\nvnodecovered %p syncer %p data %p\n",
2224 	    mp->mnt_flag, MNT_BITS,
2225 	    mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data);
2226 
2227 	(*pr)("vfsconf: ops %p name \"%s\" num %d ref %d flags 0x%x\n",
2228             vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum,
2229 	    vfc->vfc_refcount, vfc->vfc_flags);
2230 
2231 	(*pr)("statvfs cache: bsize %x iosize %x\nblocks %u free %u avail %u\n",
2232 	    mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks,
2233 	    mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail);
2234 
2235 	(*pr)("  files %u ffiles %u\n", mp->mnt_stat.f_files,
2236 	    mp->mnt_stat.f_ffree);
2237 
2238 	(*pr)("  f_fsidx {0x%x, 0x%x} owner %u ctime 0x%x\n",
2239 	    mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1],
2240 	    mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime);
2241 
2242  	(*pr)("  syncwrites %lu asyncwrites = %lu\n",
2243 	    mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites);
2244 
2245 	(*pr)("  fstype \"%s\" mnton \"%s\" mntfrom \"%s\"\n",
2246 	    mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname,
2247 	    mp->mnt_stat.f_mntfromname);
2248 
2249 	(*pr)("locked vnodes:");
2250 	/* XXX would take mountlist lock, except ddb has no context */
2251 	LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)
2252 		if (VOP_ISLOCKED(vp)) {
2253 			if (!LIST_NEXT(vp, v_mntvnodes))
2254 				(*pr)(" %p", vp);
2255 			else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4))))
2256 				(*pr)("\n\t%p", vp);
2257 			else
2258 				(*pr)(", %p", vp);
2259 		}
2260 	(*pr)("\n");
2261 
2262 	if (full) {
2263 		(*pr)("all vnodes:\n\t");
2264 		/* XXX would take mountlist lock, except ddb has no context */
2265 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)
2266 			if (!LIST_NEXT(vp, v_mntvnodes))
2267 				(*pr)(" %p", vp);
2268 			else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4))))
2269 				(*pr)(" %p,\n\t", vp);
2270 			else
2271 				(*pr)(" %p,", vp);
2272 		(*pr)("\n", vp);
2273 	}
2274 }
2275 #endif /* DDB */
2276