xref: /openbsd-src/sys/kern/vfs_subr.c (revision 03adc85b7600a1f8f04886b8321c1c1c0c4933d4)
1 /*	$OpenBSD: vfs_subr.c,v 1.257 2017/01/15 23:18:05 bluhm Exp $	*/
2 /*	$NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
38  */
39 
40 /*
41  * External virtual filesystem routines
42  */
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/sysctl.h>
48 #include <sys/mount.h>
49 #include <sys/time.h>
50 #include <sys/fcntl.h>
51 #include <sys/kernel.h>
52 #include <sys/conf.h>
53 #include <sys/vnode.h>
54 #include <sys/lock.h>
55 #include <sys/stat.h>
56 #include <sys/acct.h>
57 #include <sys/namei.h>
58 #include <sys/ucred.h>
59 #include <sys/buf.h>
60 #include <sys/errno.h>
61 #include <sys/malloc.h>
62 #include <sys/mbuf.h>
63 #include <sys/syscallargs.h>
64 #include <sys/pool.h>
65 #include <sys/tree.h>
66 #include <sys/specdev.h>
67 
68 #include <netinet/in.h>
69 
70 #include <uvm/uvm_extern.h>
71 #include <uvm/uvm_vnode.h>
72 
73 #include "softraid.h"
74 
75 void sr_shutdown(void);
76 
77 enum vtype iftovt_tab[16] = {
78 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
79 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
80 };
81 
82 int	vttoif_tab[9] = {
83 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
84 	S_IFSOCK, S_IFIFO, S_IFMT,
85 };
86 
87 int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
88 int suid_clear = 1;		/* 1 => clear SUID / SGID on owner change */
89 
90 /*
91  * Insq/Remq for the vnode usage lists.
92  */
93 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
94 #define	bufremvn(bp) {							\
95 	LIST_REMOVE(bp, b_vnbufs);					\
96 	LIST_NEXT(bp, b_vnbufs) = NOLIST;				\
97 }
98 
99 struct freelst vnode_hold_list;	/* list of vnodes referencing buffers */
100 struct freelst vnode_free_list;	/* vnode free list */
101 
102 struct mntlist mountlist;	/* mounted filesystem list */
103 
104 void	vclean(struct vnode *, int, struct proc *);
105 
106 void insmntque(struct vnode *, struct mount *);
107 int getdevvp(dev_t, struct vnode **, enum vtype);
108 
109 int vfs_hang_addrlist(struct mount *, struct netexport *,
110 				  struct export_args *);
111 int vfs_free_netcred(struct radix_node *, void *, u_int);
112 void vfs_free_addrlist(struct netexport *);
113 void vputonfreelist(struct vnode *);
114 
115 int vflush_vnode(struct vnode *, void *);
116 int maxvnodes;
117 
118 #ifdef DEBUG
119 void printlockedvnodes(void);
120 #endif
121 
122 struct pool vnode_pool;
123 struct pool uvm_vnode_pool;
124 
125 static inline int rb_buf_compare(const struct buf *b1, const struct buf *b2);
126 RBT_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare);
127 
128 static inline int
129 rb_buf_compare(const struct buf *b1, const struct buf *b2)
130 {
131 	if (b1->b_lblkno < b2->b_lblkno)
132 		return(-1);
133 	if (b1->b_lblkno > b2->b_lblkno)
134 		return(1);
135 	return(0);
136 }
137 
138 /*
139  * Initialize the vnode management data structures.
140  */
141 void
142 vntblinit(void)
143 {
144 	/* buffer cache may need a vnode for each buffer */
145 	maxvnodes = 2 * initialvnodes;
146 	pool_init(&vnode_pool, sizeof(struct vnode), 0, IPL_NONE,
147 	    PR_WAITOK, "vnodes", NULL);
148 	pool_init(&uvm_vnode_pool, sizeof(struct uvm_vnode), 0, IPL_NONE,
149 	    PR_WAITOK, "uvmvnodes", NULL);
150 	TAILQ_INIT(&vnode_hold_list);
151 	TAILQ_INIT(&vnode_free_list);
152 	TAILQ_INIT(&mountlist);
153 	/*
154 	 * Initialize the filesystem syncer.
155 	 */
156 	vn_initialize_syncerd();
157 
158 	rn_init(sizeof(struct sockaddr_in));
159 }
160 
161 /*
162  * Mark a mount point as busy. Used to synchronize access and to delay
163  * unmounting.
164  *
165  * Default behaviour is to attempt getting a READ lock and in case of an
166  * ongoing unmount, to wait for it to finish and then return failure.
167  */
168 int
169 vfs_busy(struct mount *mp, int flags)
170 {
171 	int rwflags = 0;
172 
173 	/* new mountpoints need their lock initialised */
174 	if (mp->mnt_lock.rwl_name == NULL)
175 		rw_init(&mp->mnt_lock, "vfslock");
176 
177 	if (flags & VB_WRITE)
178 		rwflags |= RW_WRITE;
179 	else
180 		rwflags |= RW_READ;
181 
182 	if (flags & VB_WAIT)
183 		rwflags |= RW_SLEEPFAIL;
184 	else
185 		rwflags |= RW_NOSLEEP;
186 
187 	if (rw_enter(&mp->mnt_lock, rwflags))
188 		return (EBUSY);
189 
190 	return (0);
191 }
192 
193 /*
194  * Free a busy file system
195  */
196 void
197 vfs_unbusy(struct mount *mp)
198 {
199 	rw_exit(&mp->mnt_lock);
200 }
201 
202 int
203 vfs_isbusy(struct mount *mp)
204 {
205 	if (RWLOCK_OWNER(&mp->mnt_lock) > 0)
206 		return (1);
207 	else
208 		return (0);
209 }
210 
211 /*
212  * Lookup a filesystem type, and if found allocate and initialize
213  * a mount structure for it.
214  *
215  * Devname is usually updated by mount(8) after booting.
216  */
217 int
218 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
219 {
220 	struct vfsconf *vfsp;
221 	struct mount *mp;
222 
223 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
224 		if (!strcmp(vfsp->vfc_name, fstypename))
225 			break;
226 	if (vfsp == NULL)
227 		return (ENODEV);
228 	mp = malloc(sizeof(*mp), M_MOUNT, M_WAITOK|M_ZERO);
229 	(void)vfs_busy(mp, VB_READ|VB_NOWAIT);
230 	LIST_INIT(&mp->mnt_vnodelist);
231 	mp->mnt_vfc = vfsp;
232 	mp->mnt_op = vfsp->vfc_vfsops;
233 	mp->mnt_flag = MNT_RDONLY;
234 	mp->mnt_vnodecovered = NULLVP;
235 	vfsp->vfc_refcount++;
236 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
237 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
238 	mp->mnt_stat.f_mntonname[0] = '/';
239 	copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN, 0);
240 	copystr(devname, mp->mnt_stat.f_mntfromspec, MNAMELEN, 0);
241 	*mpp = mp;
242  	return (0);
243  }
244 
245 /*
246  * Lookup a mount point by filesystem identifier.
247  */
248 struct mount *
249 vfs_getvfs(fsid_t *fsid)
250 {
251 	struct mount *mp;
252 
253 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
254 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
255 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
256 			return (mp);
257 		}
258 	}
259 
260 	return (NULL);
261 }
262 
263 
264 /*
265  * Get a new unique fsid
266  */
267 void
268 vfs_getnewfsid(struct mount *mp)
269 {
270 	static u_short xxxfs_mntid;
271 
272 	fsid_t tfsid;
273 	int mtype;
274 
275 	mtype = mp->mnt_vfc->vfc_typenum;
276 	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
277 	mp->mnt_stat.f_fsid.val[1] = mtype;
278 	if (xxxfs_mntid == 0)
279 		++xxxfs_mntid;
280 	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
281 	tfsid.val[1] = mtype;
282 	if (!TAILQ_EMPTY(&mountlist)) {
283 		while (vfs_getvfs(&tfsid)) {
284 			tfsid.val[0]++;
285 			xxxfs_mntid++;
286 		}
287 	}
288 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
289 }
290 
291 /*
292  * Set vnode attributes to VNOVAL
293  */
294 void
295 vattr_null(struct vattr *vap)
296 {
297 
298 	vap->va_type = VNON;
299 	/*
300 	 * Don't get fancy: u_quad_t = u_int = VNOVAL leaves the u_quad_t
301 	 * with 2^31-1 instead of 2^64-1.  Just write'm out and let
302 	 * the compiler do its job.
303 	 */
304 	vap->va_mode = VNOVAL;
305 	vap->va_nlink = VNOVAL;
306 	vap->va_uid = VNOVAL;
307 	vap->va_gid = VNOVAL;
308 	vap->va_fsid = VNOVAL;
309 	vap->va_fileid = VNOVAL;
310 	vap->va_size = VNOVAL;
311 	vap->va_blocksize = VNOVAL;
312 	vap->va_atime.tv_sec = VNOVAL;
313 	vap->va_atime.tv_nsec = VNOVAL;
314 	vap->va_mtime.tv_sec = VNOVAL;
315 	vap->va_mtime.tv_nsec = VNOVAL;
316 	vap->va_ctime.tv_sec = VNOVAL;
317 	vap->va_ctime.tv_nsec = VNOVAL;
318 	vap->va_gen = VNOVAL;
319 	vap->va_flags = VNOVAL;
320 	vap->va_rdev = VNOVAL;
321 	vap->va_bytes = VNOVAL;
322 	vap->va_filerev = VNOVAL;
323 	vap->va_vaflags = 0;
324 }
325 
326 /*
327  * Routines having to do with the management of the vnode table.
328  */
329 long numvnodes;
330 
331 /*
332  * Return the next vnode from the free list.
333  */
334 int
335 getnewvnode(enum vtagtype tag, struct mount *mp, struct vops *vops,
336     struct vnode **vpp)
337 {
338 	struct proc *p = curproc;
339 	struct freelst *listhd;
340 	static int toggle;
341 	struct vnode *vp;
342 	int s;
343 
344 	/*
345 	 * allow maxvnodes to increase if the buffer cache itself
346 	 * is big enough to justify it. (we don't shrink it ever)
347 	 */
348 	maxvnodes = maxvnodes < bcstats.numbufs ? bcstats.numbufs
349 	    : maxvnodes;
350 
351 	/*
352 	 * We must choose whether to allocate a new vnode or recycle an
353 	 * existing one. The criterion for allocating a new one is that
354 	 * the total number of vnodes is less than the number desired or
355 	 * there are no vnodes on either free list. Generally we only
356 	 * want to recycle vnodes that have no buffers associated with
357 	 * them, so we look first on the vnode_free_list. If it is empty,
358 	 * we next consider vnodes with referencing buffers on the
359 	 * vnode_hold_list. The toggle ensures that half the time we
360 	 * will use a buffer from the vnode_hold_list, and half the time
361 	 * we will allocate a new one unless the list has grown to twice
362 	 * the desired size. We are reticent to recycle vnodes from the
363 	 * vnode_hold_list because we will lose the identity of all its
364 	 * referencing buffers.
365 	 */
366 	toggle ^= 1;
367 	if (numvnodes / 2 > maxvnodes)
368 		toggle = 0;
369 
370 	s = splbio();
371 	if ((numvnodes < maxvnodes) ||
372 	    ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
373 	    ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
374 		splx(s);
375 		vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO);
376 		vp->v_uvm = pool_get(&uvm_vnode_pool, PR_WAITOK | PR_ZERO);
377 		vp->v_uvm->u_vnode = vp;
378 		RBT_INIT(buf_rb_bufs, &vp->v_bufs_tree);
379 		cache_tree_init(&vp->v_nc_tree);
380 		TAILQ_INIT(&vp->v_cache_dst);
381 		numvnodes++;
382 	} else {
383 		TAILQ_FOREACH(vp, listhd, v_freelist) {
384 			if (VOP_ISLOCKED(vp) == 0)
385 				break;
386 		}
387 		/*
388 		 * Unless this is a bad time of the month, at most
389 		 * the first NCPUS items on the free list are
390 		 * locked, so this is close enough to being empty.
391 		 */
392 		if (vp == NULL) {
393 			splx(s);
394 			tablefull("vnode");
395 			*vpp = 0;
396 			return (ENFILE);
397 		}
398 
399 #ifdef DIAGNOSTIC
400 		if (vp->v_usecount) {
401 			vprint("free vnode", vp);
402 			panic("free vnode isn't");
403 		}
404 #endif
405 
406 		TAILQ_REMOVE(listhd, vp, v_freelist);
407 		vp->v_bioflag &= ~VBIOONFREELIST;
408 		splx(s);
409 
410 		if (vp->v_type != VBAD)
411 			vgonel(vp, p);
412 #ifdef DIAGNOSTIC
413 		if (vp->v_data) {
414 			vprint("cleaned vnode", vp);
415 			panic("cleaned vnode isn't");
416 		}
417 		s = splbio();
418 		if (vp->v_numoutput)
419 			panic("Clean vnode has pending I/O's");
420 		splx(s);
421 #endif
422 		vp->v_flag = 0;
423 		vp->v_socket = 0;
424 	}
425 	cache_purge(vp);
426 	vp->v_type = VNON;
427 	vp->v_tag = tag;
428 	vp->v_op = vops;
429 	insmntque(vp, mp);
430 	*vpp = vp;
431 	vp->v_usecount = 1;
432 	vp->v_data = 0;
433 	return (0);
434 }
435 
436 /*
437  * Move a vnode from one mount queue to another.
438  */
439 void
440 insmntque(struct vnode *vp, struct mount *mp)
441 {
442 	/*
443 	 * Delete from old mount point vnode list, if on one.
444 	 */
445 	if (vp->v_mount != NULL)
446 		LIST_REMOVE(vp, v_mntvnodes);
447 	/*
448 	 * Insert into list of vnodes for the new mount point, if available.
449 	 */
450 	if ((vp->v_mount = mp) != NULL)
451 		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
452 }
453 
454 /*
455  * Create a vnode for a block device.
456  * Used for root filesystem, argdev, and swap areas.
457  * Also used for memory file system special devices.
458  */
459 int
460 bdevvp(dev_t dev, struct vnode **vpp)
461 {
462 	return (getdevvp(dev, vpp, VBLK));
463 }
464 
465 /*
466  * Create a vnode for a character device.
467  * Used for console handling.
468  */
469 int
470 cdevvp(dev_t dev, struct vnode **vpp)
471 {
472 	return (getdevvp(dev, vpp, VCHR));
473 }
474 
475 /*
476  * Create a vnode for a device.
477  * Used by bdevvp (block device) for root file system etc.,
478  * and by cdevvp (character device) for console.
479  */
480 int
481 getdevvp(dev_t dev, struct vnode **vpp, enum vtype type)
482 {
483 	struct vnode *vp;
484 	struct vnode *nvp;
485 	int error;
486 
487 	if (dev == NODEV) {
488 		*vpp = NULLVP;
489 		return (0);
490 	}
491 	error = getnewvnode(VT_NON, NULL, &spec_vops, &nvp);
492 	if (error) {
493 		*vpp = NULLVP;
494 		return (error);
495 	}
496 	vp = nvp;
497 	vp->v_type = type;
498 	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
499 		vput(vp);
500 		vp = nvp;
501 	}
502 	if (vp->v_type == VCHR && cdevsw[major(vp->v_rdev)].d_type == D_TTY)
503 		vp->v_flag |= VISTTY;
504 	*vpp = vp;
505 	return (0);
506 }
507 
508 /*
509  * Check to see if the new vnode represents a special device
510  * for which we already have a vnode (either because of
511  * bdevvp() or because of a different vnode representing
512  * the same block device). If such an alias exists, deallocate
513  * the existing contents and return the aliased vnode. The
514  * caller is responsible for filling it with its new contents.
515  */
516 struct vnode *
517 checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp)
518 {
519 	struct proc *p = curproc;
520 	struct vnode *vp;
521 	struct vnode **vpp;
522 
523 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
524 		return (NULLVP);
525 
526 	vpp = &speclisth[SPECHASH(nvp_rdev)];
527 loop:
528 	for (vp = *vpp; vp; vp = vp->v_specnext) {
529 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) {
530 			continue;
531 		}
532 		/*
533 		 * Alias, but not in use, so flush it out.
534 		 */
535 		if (vp->v_usecount == 0) {
536 			vgonel(vp, p);
537 			goto loop;
538 		}
539 		if (vget(vp, LK_EXCLUSIVE, p)) {
540 			goto loop;
541 		}
542 		break;
543 	}
544 
545 	/*
546 	 * Common case is actually in the if statement
547 	 */
548 	if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
549 		nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE,
550 			M_WAITOK);
551 		nvp->v_rdev = nvp_rdev;
552 		nvp->v_hashchain = vpp;
553 		nvp->v_specnext = *vpp;
554 		nvp->v_specmountpoint = NULL;
555 		nvp->v_speclockf = NULL;
556 		nvp->v_specbitmap = NULL;
557 		if (nvp->v_type == VCHR &&
558 		    (cdevsw[major(nvp_rdev)].d_flags & D_CLONE) &&
559 		    (minor(nvp_rdev) >> CLONE_SHIFT == 0)) {
560 			if (vp != NULLVP)
561 				nvp->v_specbitmap = vp->v_specbitmap;
562 			else
563 				nvp->v_specbitmap = malloc(CLONE_MAPSZ,
564 				    M_VNODE, M_WAITOK | M_ZERO);
565 		}
566 		*vpp = nvp;
567 		if (vp != NULLVP) {
568 			nvp->v_flag |= VALIASED;
569 			vp->v_flag |= VALIASED;
570 			vput(vp);
571 		}
572 		return (NULLVP);
573 	}
574 
575 	/*
576 	 * This code is the uncommon case. It is called in case
577 	 * we found an alias that was VT_NON && vtype of VBLK
578 	 * This means we found a block device that was created
579 	 * using bdevvp.
580 	 * An example of such a vnode is the root partition device vnode
581 	 * created in ffs_mountroot.
582 	 *
583 	 * The vnodes created by bdevvp should not be aliased (why?).
584 	 */
585 
586 	VOP_UNLOCK(vp, p);
587 	vclean(vp, 0, p);
588 	vp->v_op = nvp->v_op;
589 	vp->v_tag = nvp->v_tag;
590 	nvp->v_type = VNON;
591 	insmntque(vp, mp);
592 	return (vp);
593 }
594 
595 /*
596  * Grab a particular vnode from the free list, increment its
597  * reference count and lock it. If the vnode lock bit is set,
598  * the vnode is being eliminated in vgone. In that case, we
599  * cannot grab it, so the process is awakened when the
600  * transition is completed, and an error code is returned to
601  * indicate that the vnode is no longer usable, possibly
602  * having been changed to a new file system type.
603  */
604 int
605 vget(struct vnode *vp, int flags, struct proc *p)
606 {
607 	int error, s, onfreelist;
608 
609 	/*
610 	 * If the vnode is in the process of being cleaned out for
611 	 * another use, we wait for the cleaning to finish and then
612 	 * return failure. Cleaning is determined by checking that
613 	 * the VXLOCK flag is set.
614 	 */
615 
616 	if (vp->v_flag & VXLOCK) {
617 		if (flags & LK_NOWAIT) {
618 			return (EBUSY);
619 		}
620 
621 		vp->v_flag |= VXWANT;
622 		tsleep(vp, PINOD, "vget", 0);
623 		return (ENOENT);
624 	}
625 
626 	onfreelist = vp->v_bioflag & VBIOONFREELIST;
627 	if (vp->v_usecount == 0 && onfreelist) {
628 		s = splbio();
629 		if (vp->v_holdcnt > 0)
630 			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
631 		else
632 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
633 		vp->v_bioflag &= ~VBIOONFREELIST;
634 		splx(s);
635 	}
636 
637  	vp->v_usecount++;
638 	if (flags & LK_TYPE_MASK) {
639 		if ((error = vn_lock(vp, flags, p)) != 0) {
640 			vp->v_usecount--;
641 			if (vp->v_usecount == 0 && onfreelist)
642 				vputonfreelist(vp);
643 		}
644 		return (error);
645 	}
646 
647 	return (0);
648 }
649 
650 
651 /* Vnode reference. */
652 void
653 vref(struct vnode *vp)
654 {
655 #ifdef DIAGNOSTIC
656 	if (vp->v_usecount == 0)
657 		panic("vref used where vget required");
658 	if (vp->v_type == VNON)
659 		panic("vref on a VNON vnode");
660 #endif
661 	vp->v_usecount++;
662 }
663 
664 void
665 vputonfreelist(struct vnode *vp)
666 {
667 	int s;
668 	struct freelst *lst;
669 
670 	s = splbio();
671 #ifdef DIAGNOSTIC
672 	if (vp->v_usecount != 0)
673 		panic("Use count is not zero!");
674 
675 	if (vp->v_bioflag & VBIOONFREELIST) {
676 		vprint("vnode already on free list: ", vp);
677 		panic("vnode already on free list");
678 	}
679 #endif
680 
681 	vp->v_bioflag |= VBIOONFREELIST;
682 
683 	if (vp->v_holdcnt > 0)
684 		lst = &vnode_hold_list;
685 	else
686 		lst = &vnode_free_list;
687 
688 	if (vp->v_type == VBAD)
689 		TAILQ_INSERT_HEAD(lst, vp, v_freelist);
690 	else
691 		TAILQ_INSERT_TAIL(lst, vp, v_freelist);
692 
693 	splx(s);
694 }
695 
696 /*
697  * vput(), just unlock and vrele()
698  */
699 void
700 vput(struct vnode *vp)
701 {
702 	struct proc *p = curproc;
703 
704 #ifdef DIAGNOSTIC
705 	if (vp == NULL)
706 		panic("vput: null vp");
707 #endif
708 
709 #ifdef DIAGNOSTIC
710 	if (vp->v_usecount == 0) {
711 		vprint("vput: bad ref count", vp);
712 		panic("vput: ref cnt");
713 	}
714 #endif
715 	vp->v_usecount--;
716 	if (vp->v_usecount > 0) {
717 		VOP_UNLOCK(vp, p);
718 		return;
719 	}
720 
721 #ifdef DIAGNOSTIC
722 	if (vp->v_writecount != 0) {
723 		vprint("vput: bad writecount", vp);
724 		panic("vput: v_writecount != 0");
725 	}
726 #endif
727 
728 	VOP_INACTIVE(vp, p);
729 
730 	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
731 		vputonfreelist(vp);
732 }
733 
734 /*
735  * Vnode release - use for active VNODES.
736  * If count drops to zero, call inactive routine and return to freelist.
737  * Returns 0 if it did not sleep.
738  */
739 int
740 vrele(struct vnode *vp)
741 {
742 	struct proc *p = curproc;
743 
744 #ifdef DIAGNOSTIC
745 	if (vp == NULL)
746 		panic("vrele: null vp");
747 #endif
748 #ifdef DIAGNOSTIC
749 	if (vp->v_usecount == 0) {
750 		vprint("vrele: bad ref count", vp);
751 		panic("vrele: ref cnt");
752 	}
753 #endif
754 	vp->v_usecount--;
755 	if (vp->v_usecount > 0) {
756 		return (0);
757 	}
758 
759 #ifdef DIAGNOSTIC
760 	if (vp->v_writecount != 0) {
761 		vprint("vrele: bad writecount", vp);
762 		panic("vrele: v_writecount != 0");
763 	}
764 #endif
765 
766 	if (vn_lock(vp, LK_EXCLUSIVE, p)) {
767 #ifdef DIAGNOSTIC
768 		vprint("vrele: cannot lock", vp);
769 #endif
770 		return (1);
771 	}
772 
773 	VOP_INACTIVE(vp, p);
774 
775 	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
776 		vputonfreelist(vp);
777 	return (1);
778 }
779 
780 /* Page or buffer structure gets a reference. */
781 void
782 vhold(struct vnode *vp)
783 {
784 	/*
785 	 * If it is on the freelist and the hold count is currently
786 	 * zero, move it to the hold list.
787 	 */
788 	if ((vp->v_bioflag & VBIOONFREELIST) &&
789 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
790 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
791 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
792 	}
793 	vp->v_holdcnt++;
794 }
795 
796 /* Lose interest in a vnode. */
797 void
798 vdrop(struct vnode *vp)
799 {
800 #ifdef DIAGNOSTIC
801 	if (vp->v_holdcnt == 0)
802 		panic("vdrop: zero holdcnt");
803 #endif
804 
805 	vp->v_holdcnt--;
806 
807 	/*
808 	 * If it is on the holdlist and the hold count drops to
809 	 * zero, move it to the free list.
810 	 */
811 	if ((vp->v_bioflag & VBIOONFREELIST) &&
812 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
813 		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
814 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
815 	}
816 }
817 
818 /*
819  * Remove any vnodes in the vnode table belonging to mount point mp.
820  *
821  * If MNT_NOFORCE is specified, there should not be any active ones,
822  * return error if any are found (nb: this is a user error, not a
823  * system error). If MNT_FORCE is specified, detach any active vnodes
824  * that are found.
825  */
826 #ifdef DEBUG
827 int busyprt = 0;	/* print out busy vnodes */
828 struct ctldebug debug1 = { "busyprt", &busyprt };
829 #endif
830 
831 int
832 vfs_mount_foreach_vnode(struct mount *mp,
833     int (*func)(struct vnode *, void *), void *arg) {
834 	struct vnode *vp, *nvp;
835 	int error = 0;
836 
837 loop:
838 	LIST_FOREACH_SAFE(vp , &mp->mnt_vnodelist, v_mntvnodes, nvp) {
839 		if (vp->v_mount != mp)
840 			goto loop;
841 
842 		error = func(vp, arg);
843 
844 		if (error != 0)
845 			break;
846 	}
847 
848 	return (error);
849 }
850 
851 struct vflush_args {
852 	struct vnode *skipvp;
853 	int busy;
854 	int flags;
855 };
856 
857 int
858 vflush_vnode(struct vnode *vp, void *arg) {
859 	struct vflush_args *va = arg;
860 	struct proc *p = curproc;
861 
862 	if (vp == va->skipvp) {
863 		return (0);
864 	}
865 
866 	if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
867 		return (0);
868 	}
869 
870 	/*
871 	 * If WRITECLOSE is set, only flush out regular file
872 	 * vnodes open for writing.
873 	 */
874 	if ((va->flags & WRITECLOSE) &&
875 	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
876 		return (0);
877 	}
878 
879 	/*
880 	 * With v_usecount == 0, all we need to do is clear
881 	 * out the vnode data structures and we are done.
882 	 */
883 	if (vp->v_usecount == 0) {
884 		vgonel(vp, p);
885 		return (0);
886 	}
887 
888 	/*
889 	 * If FORCECLOSE is set, forcibly close the vnode.
890 	 * For block or character devices, revert to an
891 	 * anonymous device. For all other files, just kill them.
892 	 */
893 	if (va->flags & FORCECLOSE) {
894 		if (vp->v_type != VBLK && vp->v_type != VCHR) {
895 			vgonel(vp, p);
896 		} else {
897 			vclean(vp, 0, p);
898 			vp->v_op = &spec_vops;
899 			insmntque(vp, NULL);
900 		}
901 		return (0);
902 	}
903 
904 #ifdef DEBUG
905 	if (busyprt)
906 		vprint("vflush: busy vnode", vp);
907 #endif
908 	va->busy++;
909 	return (0);
910 }
911 
912 int
913 vflush(struct mount *mp, struct vnode *skipvp, int flags)
914 {
915 	struct vflush_args va;
916 	va.skipvp = skipvp;
917 	va.busy = 0;
918 	va.flags = flags;
919 
920 	vfs_mount_foreach_vnode(mp, vflush_vnode, &va);
921 
922 	if (va.busy)
923 		return (EBUSY);
924 	return (0);
925 }
926 
927 /*
928  * Disassociate the underlying file system from a vnode.
929  */
930 void
931 vclean(struct vnode *vp, int flags, struct proc *p)
932 {
933 	int active;
934 
935 	/*
936 	 * Check to see if the vnode is in use.
937 	 * If so we have to reference it before we clean it out
938 	 * so that its count cannot fall to zero and generate a
939 	 * race against ourselves to recycle it.
940 	 */
941 	if ((active = vp->v_usecount) != 0)
942 		vp->v_usecount++;
943 
944 	/*
945 	 * Prevent the vnode from being recycled or
946 	 * brought into use while we clean it out.
947 	 */
948 	if (vp->v_flag & VXLOCK)
949 		panic("vclean: deadlock");
950 	vp->v_flag |= VXLOCK;
951 	/*
952 	 * Even if the count is zero, the VOP_INACTIVE routine may still
953 	 * have the object locked while it cleans it out. The VOP_LOCK
954 	 * ensures that the VOP_INACTIVE routine is done with its work.
955 	 * For active vnodes, it ensures that no other activity can
956 	 * occur while the underlying object is being cleaned out.
957 	 */
958 	VOP_LOCK(vp, LK_DRAIN | LK_EXCLUSIVE, p);
959 
960 	/*
961 	 * Clean out any VM data associated with the vnode.
962 	 */
963 	uvm_vnp_terminate(vp);
964 	/*
965 	 * Clean out any buffers associated with the vnode.
966 	 */
967 	if (flags & DOCLOSE)
968 		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
969 	/*
970 	 * If purging an active vnode, it must be closed and
971 	 * deactivated before being reclaimed. Note that the
972 	 * VOP_INACTIVE will unlock the vnode
973 	 */
974 	if (active) {
975 		if (flags & DOCLOSE)
976 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
977 		VOP_INACTIVE(vp, p);
978 	} else {
979 		/*
980 		 * Any other processes trying to obtain this lock must first
981 		 * wait for VXLOCK to clear, then call the new lock operation.
982 		 */
983 		VOP_UNLOCK(vp, p);
984 	}
985 
986 	/*
987 	 * Reclaim the vnode.
988 	 */
989 	if (VOP_RECLAIM(vp, p))
990 		panic("vclean: cannot reclaim");
991 	if (active) {
992 		vp->v_usecount--;
993 		if (vp->v_usecount == 0) {
994 			if (vp->v_holdcnt > 0)
995 				panic("vclean: not clean");
996 			vputonfreelist(vp);
997 		}
998 	}
999 	cache_purge(vp);
1000 
1001 	/*
1002 	 * Done with purge, notify sleepers of the grim news.
1003 	 */
1004 	vp->v_op = &dead_vops;
1005 	VN_KNOTE(vp, NOTE_REVOKE);
1006 	vp->v_tag = VT_NON;
1007 	vp->v_flag &= ~VXLOCK;
1008 #ifdef VFSLCKDEBUG
1009 	vp->v_flag &= ~VLOCKSWORK;
1010 #endif
1011 	if (vp->v_flag & VXWANT) {
1012 		vp->v_flag &= ~VXWANT;
1013 		wakeup(vp);
1014 	}
1015 }
1016 
1017 /*
1018  * Recycle an unused vnode to the front of the free list.
1019  */
1020 int
1021 vrecycle(struct vnode *vp, struct proc *p)
1022 {
1023 	if (vp->v_usecount == 0) {
1024 		vgonel(vp, p);
1025 		return (1);
1026 	}
1027 	return (0);
1028 }
1029 
1030 /*
1031  * Eliminate all activity associated with a vnode
1032  * in preparation for reuse.
1033  */
1034 void
1035 vgone(struct vnode *vp)
1036 {
1037 	struct proc *p = curproc;
1038 	vgonel(vp, p);
1039 }
1040 
1041 /*
1042  * vgone, with struct proc.
1043  */
1044 void
1045 vgonel(struct vnode *vp, struct proc *p)
1046 {
1047 	struct vnode *vq;
1048 	struct vnode *vx;
1049 
1050 	/*
1051 	 * If a vgone (or vclean) is already in progress,
1052 	 * wait until it is done and return.
1053 	 */
1054 	if (vp->v_flag & VXLOCK) {
1055 		vp->v_flag |= VXWANT;
1056 		tsleep(vp, PINOD, "vgone", 0);
1057 		return;
1058 	}
1059 
1060 	/*
1061 	 * Clean out the filesystem specific data.
1062 	 */
1063 	vclean(vp, DOCLOSE, p);
1064 	/*
1065 	 * Delete from old mount point vnode list, if on one.
1066 	 */
1067 	if (vp->v_mount != NULL)
1068 		insmntque(vp, NULL);
1069 	/*
1070 	 * If special device, remove it from special device alias list
1071 	 * if it is on one.
1072 	 */
1073 	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1074 		if ((vp->v_flag & VALIASED) == 0 && vp->v_type == VCHR &&
1075 		    (cdevsw[major(vp->v_rdev)].d_flags & D_CLONE) &&
1076 		    (minor(vp->v_rdev) >> CLONE_SHIFT == 0)) {
1077 			free(vp->v_specbitmap, M_VNODE, CLONE_MAPSZ);
1078 		}
1079 		if (*vp->v_hashchain == vp) {
1080 			*vp->v_hashchain = vp->v_specnext;
1081 		} else {
1082 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1083 				if (vq->v_specnext != vp)
1084 					continue;
1085 				vq->v_specnext = vp->v_specnext;
1086 				break;
1087 			}
1088 			if (vq == NULL)
1089 				panic("missing bdev");
1090 		}
1091 		if (vp->v_flag & VALIASED) {
1092 			vx = NULL;
1093 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1094 				if (vq->v_rdev != vp->v_rdev ||
1095 				    vq->v_type != vp->v_type)
1096 					continue;
1097 				if (vx)
1098 					break;
1099 				vx = vq;
1100 			}
1101 			if (vx == NULL)
1102 				panic("missing alias");
1103 			if (vq == NULL)
1104 				vx->v_flag &= ~VALIASED;
1105 			vp->v_flag &= ~VALIASED;
1106 		}
1107 		free(vp->v_specinfo, M_VNODE, sizeof(struct specinfo));
1108 		vp->v_specinfo = NULL;
1109 	}
1110 	/*
1111 	 * If it is on the freelist and not already at the head,
1112 	 * move it to the head of the list.
1113 	 */
1114 	vp->v_type = VBAD;
1115 
1116 	/*
1117 	 * Move onto the free list, unless we were called from
1118 	 * getnewvnode and we're not on any free list
1119 	 */
1120 	if (vp->v_usecount == 0 &&
1121 	    (vp->v_bioflag & VBIOONFREELIST)) {
1122 		int s;
1123 
1124 		s = splbio();
1125 
1126 		if (vp->v_holdcnt > 0)
1127 			panic("vgonel: not clean");
1128 
1129 		if (TAILQ_FIRST(&vnode_free_list) != vp) {
1130 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1131 			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1132 		}
1133 		splx(s);
1134 	}
1135 }
1136 
1137 /*
1138  * Lookup a vnode by device number.
1139  */
1140 int
1141 vfinddev(dev_t dev, enum vtype type, struct vnode **vpp)
1142 {
1143 	struct vnode *vp;
1144 	int rc =0;
1145 
1146 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1147 		if (dev != vp->v_rdev || type != vp->v_type)
1148 			continue;
1149 		*vpp = vp;
1150 		rc = 1;
1151 		break;
1152 	}
1153 	return (rc);
1154 }
1155 
1156 /*
1157  * Revoke all the vnodes corresponding to the specified minor number
1158  * range (endpoints inclusive) of the specified major.
1159  */
1160 void
1161 vdevgone(int maj, int minl, int minh, enum vtype type)
1162 {
1163 	struct vnode *vp;
1164 	int mn;
1165 
1166 	for (mn = minl; mn <= minh; mn++)
1167 		if (vfinddev(makedev(maj, mn), type, &vp))
1168 			VOP_REVOKE(vp, REVOKEALL);
1169 }
1170 
1171 /*
1172  * Calculate the total number of references to a special device.
1173  */
1174 int
1175 vcount(struct vnode *vp)
1176 {
1177 	struct vnode *vq, *vnext;
1178 	int count;
1179 
1180 loop:
1181 	if ((vp->v_flag & VALIASED) == 0)
1182 		return (vp->v_usecount);
1183 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1184 		vnext = vq->v_specnext;
1185 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1186 			continue;
1187 		/*
1188 		 * Alias, but not in use, so flush it out.
1189 		 */
1190 		if (vq->v_usecount == 0 && vq != vp) {
1191 			vgone(vq);
1192 			goto loop;
1193 		}
1194 		count += vq->v_usecount;
1195 	}
1196 	return (count);
1197 }
1198 
1199 #if defined(DEBUG) || defined(DIAGNOSTIC)
1200 /*
1201  * Print out a description of a vnode.
1202  */
1203 static char *typename[] =
1204    { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1205 
1206 void
1207 vprint(char *label, struct vnode *vp)
1208 {
1209 	char buf[64];
1210 
1211 	if (label != NULL)
1212 		printf("%s: ", label);
1213 	printf("%p, type %s, use %u, write %u, hold %u,",
1214 		vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1215 		vp->v_holdcnt);
1216 	buf[0] = '\0';
1217 	if (vp->v_flag & VROOT)
1218 		strlcat(buf, "|VROOT", sizeof buf);
1219 	if (vp->v_flag & VTEXT)
1220 		strlcat(buf, "|VTEXT", sizeof buf);
1221 	if (vp->v_flag & VSYSTEM)
1222 		strlcat(buf, "|VSYSTEM", sizeof buf);
1223 	if (vp->v_flag & VXLOCK)
1224 		strlcat(buf, "|VXLOCK", sizeof buf);
1225 	if (vp->v_flag & VXWANT)
1226 		strlcat(buf, "|VXWANT", sizeof buf);
1227 	if (vp->v_bioflag & VBIOWAIT)
1228 		strlcat(buf, "|VBIOWAIT", sizeof buf);
1229 	if (vp->v_bioflag & VBIOONFREELIST)
1230 		strlcat(buf, "|VBIOONFREELIST", sizeof buf);
1231 	if (vp->v_bioflag & VBIOONSYNCLIST)
1232 		strlcat(buf, "|VBIOONSYNCLIST", sizeof buf);
1233 	if (vp->v_flag & VALIASED)
1234 		strlcat(buf, "|VALIASED", sizeof buf);
1235 	if (buf[0] != '\0')
1236 		printf(" flags (%s)", &buf[1]);
1237 	if (vp->v_data == NULL) {
1238 		printf("\n");
1239 	} else {
1240 		printf("\n\t");
1241 		VOP_PRINT(vp);
1242 	}
1243 }
1244 #endif /* DEBUG || DIAGNOSTIC */
1245 
1246 #ifdef DEBUG
1247 /*
1248  * List all of the locked vnodes in the system.
1249  * Called when debugging the kernel.
1250  */
1251 void
1252 printlockedvnodes(void)
1253 {
1254 	struct mount *mp;
1255 	struct vnode *vp;
1256 
1257 	printf("Locked vnodes\n");
1258 
1259 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1260 		if (vfs_busy(mp, VB_READ|VB_NOWAIT))
1261 			continue;
1262 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1263 			if (VOP_ISLOCKED(vp))
1264 				vprint(NULL, vp);
1265 		}
1266 		vfs_unbusy(mp);
1267  	}
1268 
1269 }
1270 #endif
1271 
1272 /*
1273  * Top level filesystem related information gathering.
1274  */
1275 int
1276 vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1277     size_t newlen, struct proc *p)
1278 {
1279 	struct vfsconf *vfsp, *tmpvfsp;
1280 	int ret;
1281 
1282 	/* all sysctl names at this level are at least name and field */
1283 	if (namelen < 2)
1284 		return (ENOTDIR);		/* overloaded */
1285 
1286 	if (name[0] != VFS_GENERIC) {
1287 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1288 			if (vfsp->vfc_typenum == name[0])
1289 				break;
1290 
1291 		if (vfsp == NULL || vfsp->vfc_vfsops->vfs_sysctl == NULL)
1292 			return (EOPNOTSUPP);
1293 
1294 		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1295 		    oldp, oldlenp, newp, newlen, p));
1296 	}
1297 
1298 	switch (name[1]) {
1299 	case VFS_MAXTYPENUM:
1300 		return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
1301 
1302 	case VFS_CONF:
1303 		if (namelen < 3)
1304 			return (ENOTDIR);	/* overloaded */
1305 
1306 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1307 			if (vfsp->vfc_typenum == name[2])
1308 				break;
1309 
1310 		if (vfsp == NULL)
1311 			return (EOPNOTSUPP);
1312 
1313 		/* Make a copy, clear out kernel pointers */
1314 		tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK);
1315 		memcpy(tmpvfsp, vfsp, sizeof(*tmpvfsp));
1316 		tmpvfsp->vfc_vfsops = NULL;
1317 		tmpvfsp->vfc_next = NULL;
1318 
1319 		ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp,
1320 		    sizeof(struct vfsconf));
1321 
1322 		free(tmpvfsp, M_TEMP, sizeof(*tmpvfsp));
1323 		return (ret);
1324 	case VFS_BCACHESTAT:	/* buffer cache statistics */
1325 		ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats,
1326 		    sizeof(struct bcachestats));
1327 		return(ret);
1328 	}
1329 	return (EOPNOTSUPP);
1330 }
1331 
1332 /*
1333  * Check to see if a filesystem is mounted on a block device.
1334  */
1335 int
1336 vfs_mountedon(struct vnode *vp)
1337 {
1338 	struct vnode *vq;
1339 	int error = 0;
1340 
1341  	if (vp->v_specmountpoint != NULL)
1342 		return (EBUSY);
1343 	if (vp->v_flag & VALIASED) {
1344 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1345 			if (vq->v_rdev != vp->v_rdev ||
1346 			    vq->v_type != vp->v_type)
1347 				continue;
1348 			if (vq->v_specmountpoint != NULL) {
1349 				error = EBUSY;
1350 				break;
1351 			}
1352  		}
1353 	}
1354 	return (error);
1355 }
1356 
1357 /*
1358  * Build hash lists of net addresses and hang them off the mount point.
1359  * Called by ufs_mount() to set up the lists of export addresses.
1360  */
1361 int
1362 vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1363     struct export_args *argp)
1364 {
1365 	struct netcred *np;
1366 	struct radix_node_head *rnh;
1367 	int nplen, i;
1368 	struct radix_node *rn;
1369 	struct sockaddr *saddr, *smask = 0;
1370 	int error;
1371 
1372 	if (argp->ex_addrlen == 0) {
1373 		if (mp->mnt_flag & MNT_DEFEXPORTED)
1374 			return (EPERM);
1375 		np = &nep->ne_defexported;
1376 		/* fill in the kernel's ucred from userspace's xucred */
1377 		if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1378 			return (error);
1379 		mp->mnt_flag |= MNT_DEFEXPORTED;
1380 		goto finish;
1381 	}
1382 	if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN ||
1383 	    argp->ex_addrlen < 0 || argp->ex_masklen < 0)
1384 		return (EINVAL);
1385 	nplen = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1386 	np = (struct netcred *)malloc(nplen, M_NETADDR, M_WAITOK|M_ZERO);
1387 	saddr = (struct sockaddr *)(np + 1);
1388 	error = copyin(argp->ex_addr, saddr, argp->ex_addrlen);
1389 	if (error)
1390 		goto out;
1391 	if (saddr->sa_len > argp->ex_addrlen)
1392 		saddr->sa_len = argp->ex_addrlen;
1393 	if (argp->ex_masklen) {
1394 		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1395 		error = copyin(argp->ex_mask, smask, argp->ex_masklen);
1396 		if (error)
1397 			goto out;
1398 		if (smask->sa_len > argp->ex_masklen)
1399 			smask->sa_len = argp->ex_masklen;
1400 	}
1401 	/* fill in the kernel's ucred from userspace's xucred */
1402 	if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1403 		goto out;
1404 	i = saddr->sa_family;
1405 	switch (i) {
1406 	case AF_INET:
1407 		if ((rnh = nep->ne_rtable_inet) == NULL) {
1408 			if (!rn_inithead((void **)&nep->ne_rtable_inet,
1409 			    offsetof(struct sockaddr_in, sin_addr))) {
1410 				error = ENOBUFS;
1411 				goto out;
1412 			}
1413 			rnh = nep->ne_rtable_inet;
1414 		}
1415 		break;
1416 	default:
1417 		error = EINVAL;
1418 		goto out;
1419 	}
1420 	rn = rn_addroute(saddr, smask, rnh, np->netc_rnodes, 0);
1421 	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1422 		error = EPERM;
1423 		goto out;
1424 	}
1425 finish:
1426 	np->netc_exflags = argp->ex_flags;
1427 	return (0);
1428 out:
1429 	free(np, M_NETADDR, nplen);
1430 	return (error);
1431 }
1432 
1433 int
1434 vfs_free_netcred(struct radix_node *rn, void *w, u_int id)
1435 {
1436 	struct radix_node_head *rnh = (struct radix_node_head *)w;
1437 
1438 	rn_delete(rn->rn_key, rn->rn_mask, rnh, NULL);
1439 	free(rn, M_NETADDR, 0);
1440 	return (0);
1441 }
1442 
1443 /*
1444  * Free the net address hash lists that are hanging off the mount points.
1445  */
1446 void
1447 vfs_free_addrlist(struct netexport *nep)
1448 {
1449 	struct radix_node_head *rnh;
1450 
1451 	if ((rnh = nep->ne_rtable_inet) != NULL) {
1452 		rn_walktree(rnh, vfs_free_netcred, rnh);
1453 		free(rnh, M_RTABLE, 0);
1454 		nep->ne_rtable_inet = NULL;
1455 	}
1456 }
1457 
1458 int
1459 vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp)
1460 {
1461 	int error;
1462 
1463 	if (argp->ex_flags & MNT_DELEXPORT) {
1464 		vfs_free_addrlist(nep);
1465 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1466 	}
1467 	if (argp->ex_flags & MNT_EXPORTED) {
1468 		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1469 			return (error);
1470 		mp->mnt_flag |= MNT_EXPORTED;
1471 	}
1472 	return (0);
1473 }
1474 
1475 struct netcred *
1476 vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam)
1477 {
1478 	struct netcred *np;
1479 	struct radix_node_head *rnh;
1480 	struct sockaddr *saddr;
1481 
1482 	np = NULL;
1483 	if (mp->mnt_flag & MNT_EXPORTED) {
1484 		/*
1485 		 * Lookup in the export list first.
1486 		 */
1487 		if (nam != NULL) {
1488 			saddr = mtod(nam, struct sockaddr *);
1489 			switch(saddr->sa_family) {
1490 			case AF_INET:
1491 				rnh = nep->ne_rtable_inet;
1492 				break;
1493 			default:
1494 				rnh = NULL;
1495 				break;
1496 			}
1497 			if (rnh != NULL)
1498 				np = (struct netcred *)rn_match(saddr, rnh);
1499 		}
1500 		/*
1501 		 * If no address match, use the default if it exists.
1502 		 */
1503 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1504 			np = &nep->ne_defexported;
1505 	}
1506 	return (np);
1507 }
1508 
1509 /*
1510  * Do the usual access checking.
1511  * file_mode, uid and gid are from the vnode in question,
1512  * while acc_mode and cred are from the VOP_ACCESS parameter list
1513  */
1514 int
1515 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
1516     mode_t acc_mode, struct ucred *cred)
1517 {
1518 	mode_t mask;
1519 
1520 	/* User id 0 always gets read/write access. */
1521 	if (cred->cr_uid == 0) {
1522 		/* For VEXEC, at least one of the execute bits must be set. */
1523 		if ((acc_mode & VEXEC) && type != VDIR &&
1524 		    (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
1525 			return EACCES;
1526 		return 0;
1527 	}
1528 
1529 	mask = 0;
1530 
1531 	/* Otherwise, check the owner. */
1532 	if (cred->cr_uid == uid) {
1533 		if (acc_mode & VEXEC)
1534 			mask |= S_IXUSR;
1535 		if (acc_mode & VREAD)
1536 			mask |= S_IRUSR;
1537 		if (acc_mode & VWRITE)
1538 			mask |= S_IWUSR;
1539 		return (file_mode & mask) == mask ? 0 : EACCES;
1540 	}
1541 
1542 	/* Otherwise, check the groups. */
1543 	if (groupmember(gid, cred)) {
1544 		if (acc_mode & VEXEC)
1545 			mask |= S_IXGRP;
1546 		if (acc_mode & VREAD)
1547 			mask |= S_IRGRP;
1548 		if (acc_mode & VWRITE)
1549 			mask |= S_IWGRP;
1550 		return (file_mode & mask) == mask ? 0 : EACCES;
1551 	}
1552 
1553 	/* Otherwise, check everyone else. */
1554 	if (acc_mode & VEXEC)
1555 		mask |= S_IXOTH;
1556 	if (acc_mode & VREAD)
1557 		mask |= S_IROTH;
1558 	if (acc_mode & VWRITE)
1559 		mask |= S_IWOTH;
1560 	return (file_mode & mask) == mask ? 0 : EACCES;
1561 }
1562 
1563 /*
1564  * Unmount all file systems.
1565  * We traverse the list in reverse order under the assumption that doing so
1566  * will avoid needing to worry about dependencies.
1567  */
1568 void
1569 vfs_unmountall(void)
1570 {
1571 	struct mount *mp, *nmp;
1572 	int allerror, error, again = 1;
1573 
1574  retry:
1575 	allerror = 0;
1576 	TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, nmp) {
1577 		if (vfs_busy(mp, VB_WRITE|VB_NOWAIT))
1578 			continue;
1579 		/* XXX Here is a race, the next pointer is not locked. */
1580 		if ((error = dounmount(mp, MNT_FORCE, curproc)) != 0) {
1581 			printf("unmount of %s failed with error %d\n",
1582 			    mp->mnt_stat.f_mntonname, error);
1583 			allerror = 1;
1584 		}
1585 	}
1586 
1587 	if (allerror) {
1588 		printf("WARNING: some file systems would not unmount\n");
1589 		if (again) {
1590 			printf("retrying\n");
1591 			again = 0;
1592 			goto retry;
1593 		}
1594 	}
1595 }
1596 
1597 /*
1598  * Sync and unmount file systems before shutting down.
1599  */
1600 void
1601 vfs_shutdown(void)
1602 {
1603 #ifdef ACCOUNTING
1604 	acct_shutdown();
1605 #endif
1606 
1607 	/* XXX Should suspend scheduling. */
1608 	(void) spl0();
1609 
1610 	printf("syncing disks... ");
1611 
1612 	if (panicstr == 0) {
1613 		/* Sync before unmount, in case we hang on something. */
1614 		sys_sync(&proc0, NULL, NULL);
1615 
1616 		/* Unmount file systems. */
1617 		vfs_unmountall();
1618 	}
1619 
1620 	if (vfs_syncwait(1))
1621 		printf("giving up\n");
1622 	else
1623 		printf("done\n");
1624 
1625 #if NSOFTRAID > 0
1626 	sr_shutdown();
1627 #endif
1628 }
1629 
1630 /*
1631  * perform sync() operation and wait for buffers to flush.
1632  * assumptions: called w/ scheduler disabled and physical io enabled
1633  * for now called at spl0() XXX
1634  */
1635 int
1636 vfs_syncwait(int verbose)
1637 {
1638 	struct buf *bp;
1639 	int iter, nbusy, dcount, s;
1640 	struct proc *p;
1641 #ifdef MULTIPROCESSOR
1642 	int hold_count;
1643 #endif
1644 
1645 	p = curproc? curproc : &proc0;
1646 	sys_sync(p, NULL, NULL);
1647 
1648 	/* Wait for sync to finish. */
1649 	dcount = 10000;
1650 	for (iter = 0; iter < 20; iter++) {
1651 		nbusy = 0;
1652 		LIST_FOREACH(bp, &bufhead, b_list) {
1653 			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1654 				nbusy++;
1655 			/*
1656 			 * With soft updates, some buffers that are
1657 			 * written will be remarked as dirty until other
1658 			 * buffers are written.
1659 			 */
1660 			if (bp->b_flags & B_DELWRI) {
1661 				s = splbio();
1662 				bremfree(bp);
1663 				buf_acquire(bp);
1664 				splx(s);
1665 				nbusy++;
1666 				bawrite(bp);
1667 				if (dcount-- <= 0) {
1668 					if (verbose)
1669 						printf("softdep ");
1670 					return 1;
1671 				}
1672 			}
1673 		}
1674 		if (nbusy == 0)
1675 			break;
1676 		if (verbose)
1677 			printf("%d ", nbusy);
1678 #ifdef MULTIPROCESSOR
1679 		if (__mp_lock_held(&kernel_lock))
1680 			hold_count = __mp_release_all(&kernel_lock);
1681 		else
1682 			hold_count = 0;
1683 #endif
1684 		DELAY(40000 * iter);
1685 #ifdef MULTIPROCESSOR
1686 		if (hold_count)
1687 			__mp_acquire_count(&kernel_lock, hold_count);
1688 #endif
1689 	}
1690 
1691 	return nbusy;
1692 }
1693 
1694 /*
1695  * posix file system related system variables.
1696  */
1697 int
1698 fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1699     void *newp, size_t newlen, struct proc *p)
1700 {
1701 	/* all sysctl names at this level are terminal */
1702 	if (namelen != 1)
1703 		return (ENOTDIR);
1704 
1705 	switch (name[0]) {
1706 	case FS_POSIX_SETUID:
1707 		if (newp && securelevel > 0)
1708 			return (EPERM);
1709 		return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear));
1710 	default:
1711 		return (EOPNOTSUPP);
1712 	}
1713 	/* NOTREACHED */
1714 }
1715 
1716 /*
1717  * file system related system variables.
1718  */
1719 int
1720 fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1721     size_t newlen, struct proc *p)
1722 {
1723 	sysctlfn *fn;
1724 
1725 	switch (name[0]) {
1726 	case FS_POSIX:
1727 		fn = fs_posix_sysctl;
1728 		break;
1729 	default:
1730 		return (EOPNOTSUPP);
1731 	}
1732 	return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
1733 }
1734 
1735 
1736 /*
1737  * Routines dealing with vnodes and buffers
1738  */
1739 
1740 /*
1741  * Wait for all outstanding I/Os to complete
1742  *
1743  * Manipulates v_numoutput. Must be called at splbio()
1744  */
1745 int
1746 vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo)
1747 {
1748 	int error = 0;
1749 
1750 	splassert(IPL_BIO);
1751 
1752 	while (vp->v_numoutput) {
1753 		vp->v_bioflag |= VBIOWAIT;
1754 		error = tsleep(&vp->v_numoutput,
1755 		    slpflag | (PRIBIO + 1), wmesg, timeo);
1756 		if (error)
1757 			break;
1758 	}
1759 
1760 	return (error);
1761 }
1762 
1763 /*
1764  * Update outstanding I/O count and do wakeup if requested.
1765  *
1766  * Manipulates v_numoutput. Must be called at splbio()
1767  */
1768 void
1769 vwakeup(struct vnode *vp)
1770 {
1771 	splassert(IPL_BIO);
1772 
1773 	if (vp != NULL) {
1774 		if (vp->v_numoutput-- == 0)
1775 			panic("vwakeup: neg numoutput");
1776 		if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) {
1777 			vp->v_bioflag &= ~VBIOWAIT;
1778 			wakeup(&vp->v_numoutput);
1779 		}
1780 	}
1781 }
1782 
1783 /*
1784  * Flush out and invalidate all buffers associated with a vnode.
1785  * Called with the underlying object locked.
1786  */
1787 int
1788 vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p,
1789     int slpflag, int slptimeo)
1790 {
1791 	struct buf *bp;
1792 	struct buf *nbp, *blist;
1793 	int s, error;
1794 
1795 #ifdef VFSLCKDEBUG
1796 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
1797 		panic("vinvalbuf(): vp isn't locked");
1798 #endif
1799 
1800 	if (flags & V_SAVE) {
1801 		s = splbio();
1802 		vwaitforio(vp, 0, "vinvalbuf", 0);
1803 		if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1804 			splx(s);
1805 			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
1806 				return (error);
1807 			s = splbio();
1808 			if (vp->v_numoutput > 0 ||
1809 			    !LIST_EMPTY(&vp->v_dirtyblkhd))
1810 				panic("vinvalbuf: dirty bufs");
1811 		}
1812 		splx(s);
1813 	}
1814 loop:
1815 	s = splbio();
1816 	for (;;) {
1817 		if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) &&
1818 		    (flags & V_SAVEMETA))
1819 			while (blist && blist->b_lblkno < 0)
1820 				blist = LIST_NEXT(blist, b_vnbufs);
1821 		if (blist == NULL &&
1822 		    (blist = LIST_FIRST(&vp->v_dirtyblkhd)) &&
1823 		    (flags & V_SAVEMETA))
1824 			while (blist && blist->b_lblkno < 0)
1825 				blist = LIST_NEXT(blist, b_vnbufs);
1826 		if (!blist)
1827 			break;
1828 
1829 		for (bp = blist; bp; bp = nbp) {
1830 			nbp = LIST_NEXT(bp, b_vnbufs);
1831 			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
1832 				continue;
1833 			if (bp->b_flags & B_BUSY) {
1834 				bp->b_flags |= B_WANTED;
1835 				error = tsleep(bp, slpflag | (PRIBIO + 1),
1836 				    "vinvalbuf", slptimeo);
1837 				if (error) {
1838 					splx(s);
1839 					return (error);
1840 				}
1841 				break;
1842 			}
1843 			bremfree(bp);
1844 			/*
1845 			 * XXX Since there are no node locks for NFS, I believe
1846 			 * there is a slight chance that a delayed write will
1847 			 * occur while sleeping just above, so check for it.
1848 			 */
1849 			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
1850 				buf_acquire(bp);
1851 				splx(s);
1852 				(void) VOP_BWRITE(bp);
1853 				goto loop;
1854 			}
1855 			buf_acquire_nomap(bp);
1856 			bp->b_flags |= B_INVAL;
1857 			brelse(bp);
1858 		}
1859 	}
1860 	if (!(flags & V_SAVEMETA) &&
1861 	    (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd)))
1862 		panic("vinvalbuf: flush failed");
1863 	splx(s);
1864 	return (0);
1865 }
1866 
1867 void
1868 vflushbuf(struct vnode *vp, int sync)
1869 {
1870 	struct buf *bp, *nbp;
1871 	int s;
1872 
1873 loop:
1874 	s = splbio();
1875 	LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp) {
1876 		if ((bp->b_flags & B_BUSY))
1877 			continue;
1878 		if ((bp->b_flags & B_DELWRI) == 0)
1879 			panic("vflushbuf: not dirty");
1880 		bremfree(bp);
1881 		buf_acquire(bp);
1882 		splx(s);
1883 		/*
1884 		 * Wait for I/O associated with indirect blocks to complete,
1885 		 * since there is no way to quickly wait for them below.
1886 		 */
1887 		if (bp->b_vp == vp || sync == 0)
1888 			(void) bawrite(bp);
1889 		else
1890 			(void) bwrite(bp);
1891 		goto loop;
1892 	}
1893 	if (sync == 0) {
1894 		splx(s);
1895 		return;
1896 	}
1897 	vwaitforio(vp, 0, "vflushbuf", 0);
1898 	if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1899 		splx(s);
1900 #ifdef DIAGNOSTIC
1901 		vprint("vflushbuf: dirty", vp);
1902 #endif
1903 		goto loop;
1904 	}
1905 	splx(s);
1906 }
1907 
1908 /*
1909  * Associate a buffer with a vnode.
1910  *
1911  * Manipulates buffer vnode queues. Must be called at splbio().
1912  */
1913 void
1914 bgetvp(struct vnode *vp, struct buf *bp)
1915 {
1916 	splassert(IPL_BIO);
1917 
1918 
1919 	if (bp->b_vp)
1920 		panic("bgetvp: not free");
1921 	vhold(vp);
1922 	bp->b_vp = vp;
1923 	if (vp->v_type == VBLK || vp->v_type == VCHR)
1924 		bp->b_dev = vp->v_rdev;
1925 	else
1926 		bp->b_dev = NODEV;
1927 	/*
1928 	 * Insert onto list for new vnode.
1929 	 */
1930 	bufinsvn(bp, &vp->v_cleanblkhd);
1931 }
1932 
1933 /*
1934  * Disassociate a buffer from a vnode.
1935  *
1936  * Manipulates vnode buffer queues. Must be called at splbio().
1937  */
1938 void
1939 brelvp(struct buf *bp)
1940 {
1941 	struct vnode *vp;
1942 
1943 	splassert(IPL_BIO);
1944 
1945 	if ((vp = bp->b_vp) == (struct vnode *) 0)
1946 		panic("brelvp: NULL");
1947 	/*
1948 	 * Delete from old vnode list, if on one.
1949 	 */
1950 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
1951 		bufremvn(bp);
1952 	if ((vp->v_bioflag & VBIOONSYNCLIST) &&
1953 	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
1954 		vp->v_bioflag &= ~VBIOONSYNCLIST;
1955 		LIST_REMOVE(vp, v_synclist);
1956 	}
1957 	bp->b_vp = NULL;
1958 
1959 	vdrop(vp);
1960 }
1961 
1962 /*
1963  * Replaces the current vnode associated with the buffer, if any,
1964  * with a new vnode.
1965  *
1966  * If an output I/O is pending on the buffer, the old vnode
1967  * I/O count is adjusted.
1968  *
1969  * Ignores vnode buffer queues. Must be called at splbio().
1970  */
1971 void
1972 buf_replacevnode(struct buf *bp, struct vnode *newvp)
1973 {
1974 	struct vnode *oldvp = bp->b_vp;
1975 
1976 	splassert(IPL_BIO);
1977 
1978 	if (oldvp)
1979 		brelvp(bp);
1980 
1981 	if ((bp->b_flags & (B_READ | B_DONE)) == 0) {
1982 		newvp->v_numoutput++;	/* put it on swapdev */
1983 		vwakeup(oldvp);
1984 	}
1985 
1986 	bgetvp(newvp, bp);
1987 	bufremvn(bp);
1988 }
1989 
1990 /*
1991  * Used to assign buffers to the appropriate clean or dirty list on
1992  * the vnode and to add newly dirty vnodes to the appropriate
1993  * filesystem syncer list.
1994  *
1995  * Manipulates vnode buffer queues. Must be called at splbio().
1996  */
1997 void
1998 reassignbuf(struct buf *bp)
1999 {
2000 	struct buflists *listheadp;
2001 	int delay;
2002 	struct vnode *vp = bp->b_vp;
2003 
2004 	splassert(IPL_BIO);
2005 
2006 	/*
2007 	 * Delete from old vnode list, if on one.
2008 	 */
2009 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2010 		bufremvn(bp);
2011 
2012 	/*
2013 	 * If dirty, put on list of dirty buffers;
2014 	 * otherwise insert onto list of clean buffers.
2015 	 */
2016 	if ((bp->b_flags & B_DELWRI) == 0) {
2017 		listheadp = &vp->v_cleanblkhd;
2018 		if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2019 		    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2020 			vp->v_bioflag &= ~VBIOONSYNCLIST;
2021 			LIST_REMOVE(vp, v_synclist);
2022 		}
2023 	} else {
2024 		listheadp = &vp->v_dirtyblkhd;
2025 		if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) {
2026 			switch (vp->v_type) {
2027 			case VDIR:
2028 				delay = syncdelay / 2;
2029 				break;
2030 			case VBLK:
2031 				if (vp->v_specmountpoint != NULL) {
2032 					delay = syncdelay / 3;
2033 					break;
2034 				}
2035 				/* FALLTHROUGH */
2036 			default:
2037 				delay = syncdelay;
2038 			}
2039 			vn_syncer_add_to_worklist(vp, delay);
2040 		}
2041 	}
2042 	bufinsvn(bp, listheadp);
2043 }
2044 
2045 int
2046 vfs_register(struct vfsconf *vfs)
2047 {
2048 	struct vfsconf *vfsp;
2049 	struct vfsconf **vfspp;
2050 
2051 #ifdef DIAGNOSTIC
2052 	/* Paranoia? */
2053 	if (vfs->vfc_refcount != 0)
2054 		printf("vfs_register called with vfc_refcount > 0\n");
2055 #endif
2056 
2057 	/* Check if filesystem already known */
2058 	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2059 	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next)
2060 		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2061 			return (EEXIST);
2062 
2063 	if (vfs->vfc_typenum > maxvfsconf)
2064 		maxvfsconf = vfs->vfc_typenum;
2065 
2066 	vfs->vfc_next = NULL;
2067 
2068 	/* Add to the end of the list */
2069 	*vfspp = vfs;
2070 
2071 	/* Call vfs_init() */
2072 	if (vfs->vfc_vfsops->vfs_init)
2073 		(*(vfs->vfc_vfsops->vfs_init))(vfs);
2074 
2075 	return 0;
2076 }
2077 
2078 int
2079 vfs_unregister(struct vfsconf *vfs)
2080 {
2081 	struct vfsconf *vfsp;
2082 	struct vfsconf **vfspp;
2083 	int maxtypenum;
2084 
2085 	/* Find our vfsconf struct */
2086 	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2087 	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) {
2088 		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2089 			break;
2090 	}
2091 
2092 	if (!vfsp)			/* Not found */
2093 		return (ENOENT);
2094 
2095 	if (vfsp->vfc_refcount)		/* In use */
2096 		return (EBUSY);
2097 
2098 	/* Remove from list and free */
2099 	*vfspp = vfsp->vfc_next;
2100 
2101 	maxtypenum = 0;
2102 
2103 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2104 		if (vfsp->vfc_typenum > maxtypenum)
2105 			maxtypenum = vfsp->vfc_typenum;
2106 
2107 	maxvfsconf = maxtypenum;
2108 	return 0;
2109 }
2110 
2111 /*
2112  * Check if vnode represents a disk device
2113  */
2114 int
2115 vn_isdisk(struct vnode *vp, int *errp)
2116 {
2117 	if (vp->v_type != VBLK && vp->v_type != VCHR)
2118 		return (0);
2119 
2120 	return (1);
2121 }
2122 
2123 #ifdef DDB
2124 #include <machine/db_machdep.h>
2125 #include <ddb/db_interface.h>
2126 
2127 void
2128 vfs_buf_print(void *b, int full,
2129     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2130 {
2131 	struct buf *bp = b;
2132 
2133 	(*pr)("  vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n"
2134 	      "  proc %p error %d flags %lb\n",
2135 	    bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev,
2136 	    bp->b_proc, bp->b_error, bp->b_flags, B_BITS);
2137 
2138 	(*pr)("  bufsize 0x%lx bcount 0x%lx resid 0x%lx\n"
2139 	      "  data %p saveaddr %p dep %p iodone %p\n",
2140 	    bp->b_bufsize, bp->b_bcount, (long)bp->b_resid,
2141 	    bp->b_data, bp->b_saveaddr,
2142 	    LIST_FIRST(&bp->b_dep), bp->b_iodone);
2143 
2144 	(*pr)("  dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n",
2145 	    bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend);
2146 
2147 #ifdef FFS_SOFTUPDATES
2148 	if (full)
2149 		softdep_print(bp, full, pr);
2150 #endif
2151 }
2152 
2153 const char *vtypes[] = { VTYPE_NAMES };
2154 const char *vtags[] = { VTAG_NAMES };
2155 
2156 void
2157 vfs_vnode_print(void *v, int full,
2158     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2159 {
2160 	struct vnode *vp = v;
2161 
2162 	(*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n",
2163 	      (u_int)vp->v_tag >= nitems(vtags)? "<unk>":vtags[vp->v_tag],
2164 	      vp->v_tag,
2165 	      (u_int)vp->v_type >= nitems(vtypes)? "<unk>":vtypes[vp->v_type],
2166 	      vp->v_type, vp->v_mount, vp->v_mountedhere);
2167 
2168 	(*pr)("data %p usecount %d writecount %d holdcnt %d numoutput %d\n",
2169 	      vp->v_data, vp->v_usecount, vp->v_writecount,
2170 	      vp->v_holdcnt, vp->v_numoutput);
2171 
2172 	/* uvm_object_printit(&vp->v_uobj, full, pr); */
2173 
2174 	if (full) {
2175 		struct buf *bp;
2176 
2177 		(*pr)("clean bufs:\n");
2178 		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
2179 			(*pr)(" bp %p\n", bp);
2180 			vfs_buf_print(bp, full, pr);
2181 		}
2182 
2183 		(*pr)("dirty bufs:\n");
2184 		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
2185 			(*pr)(" bp %p\n", bp);
2186 			vfs_buf_print(bp, full, pr);
2187 		}
2188 	}
2189 }
2190 
2191 void
2192 vfs_mount_print(struct mount *mp, int full,
2193     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2194 {
2195 	struct vfsconf *vfc = mp->mnt_vfc;
2196 	struct vnode *vp;
2197 	int cnt = 0;
2198 
2199 	(*pr)("flags %b\nvnodecovered %p syncer %p data %p\n",
2200 	    mp->mnt_flag, MNT_BITS,
2201 	    mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data);
2202 
2203 	(*pr)("vfsconf: ops %p name \"%s\" num %d ref %d flags 0x%x\n",
2204             vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum,
2205 	    vfc->vfc_refcount, vfc->vfc_flags);
2206 
2207 	(*pr)("statvfs cache: bsize %x iosize %x\nblocks %llu free %llu avail %lld\n",
2208 	    mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks,
2209 	    mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail);
2210 
2211 	(*pr)("  files %llu ffiles %llu favail %lld\n", mp->mnt_stat.f_files,
2212 	    mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail);
2213 
2214 	(*pr)("  f_fsidx {0x%x, 0x%x} owner %u ctime 0x%llx\n",
2215 	    mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1],
2216 	    mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime);
2217 
2218  	(*pr)("  syncwrites %llu asyncwrites = %llu\n",
2219 	    mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites);
2220 
2221  	(*pr)("  syncreads %llu asyncreads = %llu\n",
2222 	    mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads);
2223 
2224 	(*pr)("  fstype \"%s\" mnton \"%s\" mntfrom \"%s\" mntspec \"%s\"\n",
2225 	    mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname,
2226 	    mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntfromspec);
2227 
2228 	(*pr)("locked vnodes:");
2229 	/* XXX would take mountlist lock, except ddb has no context */
2230 	LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)
2231 		if (VOP_ISLOCKED(vp)) {
2232 			if (!LIST_NEXT(vp, v_mntvnodes))
2233 				(*pr)(" %p", vp);
2234 			else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4))))
2235 				(*pr)("\n\t%p", vp);
2236 			else
2237 				(*pr)(", %p", vp);
2238 		}
2239 	(*pr)("\n");
2240 
2241 	if (full) {
2242 		(*pr)("all vnodes:\n\t");
2243 		/* XXX would take mountlist lock, except ddb has no context */
2244 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)
2245 			if (!LIST_NEXT(vp, v_mntvnodes))
2246 				(*pr)(" %p", vp);
2247 			else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4))))
2248 				(*pr)(" %p,\n\t", vp);
2249 			else
2250 				(*pr)(" %p,", vp);
2251 		(*pr)("\n");
2252 	}
2253 }
2254 #endif /* DDB */
2255 
2256 void
2257 copy_statfs_info(struct statfs *sbp, const struct mount *mp)
2258 {
2259 	const struct statfs *mbp;
2260 
2261 	strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN);
2262 
2263 	if (sbp == (mbp = &mp->mnt_stat))
2264 		return;
2265 
2266 	sbp->f_fsid = mbp->f_fsid;
2267 	sbp->f_owner = mbp->f_owner;
2268 	sbp->f_flags = mbp->f_flags;
2269 	sbp->f_syncwrites = mbp->f_syncwrites;
2270 	sbp->f_asyncwrites = mbp->f_asyncwrites;
2271 	sbp->f_syncreads = mbp->f_syncreads;
2272 	sbp->f_asyncreads = mbp->f_asyncreads;
2273 	sbp->f_namemax = mbp->f_namemax;
2274 	memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
2275 	memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
2276 	memcpy(sbp->f_mntfromspec, mp->mnt_stat.f_mntfromspec, MNAMELEN);
2277 	memcpy(&sbp->mount_info, &mp->mnt_stat.mount_info,
2278 	    sizeof(union mount_info));
2279 }
2280