xref: /openbsd-src/sys/kern/vfs_subr.c (revision f763167468dba5339ed4b14b7ecaca2a397ab0f6)
1 /*	$OpenBSD: vfs_subr.c,v 1.260 2017/07/31 16:47:03 florian Exp $	*/
2 /*	$NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
38  */
39 
40 /*
41  * External virtual filesystem routines
42  */
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/sysctl.h>
48 #include <sys/mount.h>
49 #include <sys/time.h>
50 #include <sys/fcntl.h>
51 #include <sys/kernel.h>
52 #include <sys/conf.h>
53 #include <sys/vnode.h>
54 #include <sys/lock.h>
55 #include <sys/stat.h>
56 #include <sys/acct.h>
57 #include <sys/namei.h>
58 #include <sys/ucred.h>
59 #include <sys/buf.h>
60 #include <sys/errno.h>
61 #include <sys/malloc.h>
62 #include <sys/mbuf.h>
63 #include <sys/syscallargs.h>
64 #include <sys/pool.h>
65 #include <sys/tree.h>
66 #include <sys/specdev.h>
67 
68 #include <netinet/in.h>
69 
70 #include <uvm/uvm_extern.h>
71 #include <uvm/uvm_vnode.h>
72 
73 #include "softraid.h"
74 
75 void sr_shutdown(void);
76 
77 enum vtype iftovt_tab[16] = {
78 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
79 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
80 };
81 
82 int	vttoif_tab[9] = {
83 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
84 	S_IFSOCK, S_IFIFO, S_IFMT,
85 };
86 
87 int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
88 int suid_clear = 1;		/* 1 => clear SUID / SGID on owner change */
89 
90 /*
91  * Insq/Remq for the vnode usage lists.
92  */
93 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
94 #define	bufremvn(bp) {							\
95 	LIST_REMOVE(bp, b_vnbufs);					\
96 	LIST_NEXT(bp, b_vnbufs) = NOLIST;				\
97 }
98 
99 struct freelst vnode_hold_list;	/* list of vnodes referencing buffers */
100 struct freelst vnode_free_list;	/* vnode free list */
101 
102 struct mntlist mountlist;	/* mounted filesystem list */
103 
104 void	vclean(struct vnode *, int, struct proc *);
105 
106 void insmntque(struct vnode *, struct mount *);
107 int getdevvp(dev_t, struct vnode **, enum vtype);
108 
109 int vfs_hang_addrlist(struct mount *, struct netexport *,
110 				  struct export_args *);
111 int vfs_free_netcred(struct radix_node *, void *, u_int);
112 void vfs_free_addrlist(struct netexport *);
113 void vputonfreelist(struct vnode *);
114 
115 int vflush_vnode(struct vnode *, void *);
116 int maxvnodes;
117 
118 #ifdef DEBUG
119 void printlockedvnodes(void);
120 #endif
121 
122 struct pool vnode_pool;
123 struct pool uvm_vnode_pool;
124 
125 static inline int rb_buf_compare(const struct buf *b1, const struct buf *b2);
126 RBT_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare);
127 
128 static inline int
129 rb_buf_compare(const struct buf *b1, const struct buf *b2)
130 {
131 	if (b1->b_lblkno < b2->b_lblkno)
132 		return(-1);
133 	if (b1->b_lblkno > b2->b_lblkno)
134 		return(1);
135 	return(0);
136 }
137 
138 /*
139  * Initialize the vnode management data structures.
140  */
141 void
142 vntblinit(void)
143 {
144 	/* buffer cache may need a vnode for each buffer */
145 	maxvnodes = 2 * initialvnodes;
146 	pool_init(&vnode_pool, sizeof(struct vnode), 0, IPL_NONE,
147 	    PR_WAITOK, "vnodes", NULL);
148 	pool_init(&uvm_vnode_pool, sizeof(struct uvm_vnode), 0, IPL_NONE,
149 	    PR_WAITOK, "uvmvnodes", NULL);
150 	TAILQ_INIT(&vnode_hold_list);
151 	TAILQ_INIT(&vnode_free_list);
152 	TAILQ_INIT(&mountlist);
153 	/*
154 	 * Initialize the filesystem syncer.
155 	 */
156 	vn_initialize_syncerd();
157 
158 #ifdef NFSSERVER
159 	rn_init(sizeof(struct sockaddr_in));
160 #endif /* NFSSERVER */
161 }
162 
163 /*
164  * Mark a mount point as busy. Used to synchronize access and to delay
165  * unmounting.
166  *
167  * Default behaviour is to attempt getting a READ lock and in case of an
168  * ongoing unmount, to wait for it to finish and then return failure.
169  */
170 int
171 vfs_busy(struct mount *mp, int flags)
172 {
173 	int rwflags = 0;
174 
175 	/* new mountpoints need their lock initialised */
176 	if (mp->mnt_lock.rwl_name == NULL)
177 		rw_init_flags(&mp->mnt_lock, "vfslock", RWL_IS_VNODE);
178 
179 	if (flags & VB_WRITE)
180 		rwflags |= RW_WRITE;
181 	else
182 		rwflags |= RW_READ;
183 
184 	if (flags & VB_WAIT)
185 		rwflags |= RW_SLEEPFAIL;
186 	else
187 		rwflags |= RW_NOSLEEP;
188 
189 	if (rw_enter(&mp->mnt_lock, rwflags))
190 		return (EBUSY);
191 
192 	return (0);
193 }
194 
195 /*
196  * Free a busy file system
197  */
198 void
199 vfs_unbusy(struct mount *mp)
200 {
201 	rw_exit(&mp->mnt_lock);
202 }
203 
204 int
205 vfs_isbusy(struct mount *mp)
206 {
207 	if (RWLOCK_OWNER(&mp->mnt_lock) > 0)
208 		return (1);
209 	else
210 		return (0);
211 }
212 
213 /*
214  * Lookup a filesystem type, and if found allocate and initialize
215  * a mount structure for it.
216  *
217  * Devname is usually updated by mount(8) after booting.
218  */
219 int
220 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
221 {
222 	struct vfsconf *vfsp;
223 	struct mount *mp;
224 
225 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
226 		if (!strcmp(vfsp->vfc_name, fstypename))
227 			break;
228 	if (vfsp == NULL)
229 		return (ENODEV);
230 	mp = malloc(sizeof(*mp), M_MOUNT, M_WAITOK|M_ZERO);
231 	(void)vfs_busy(mp, VB_READ|VB_NOWAIT);
232 	LIST_INIT(&mp->mnt_vnodelist);
233 	mp->mnt_vfc = vfsp;
234 	mp->mnt_op = vfsp->vfc_vfsops;
235 	mp->mnt_flag = MNT_RDONLY;
236 	mp->mnt_vnodecovered = NULLVP;
237 	vfsp->vfc_refcount++;
238 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
239 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
240 	mp->mnt_stat.f_mntonname[0] = '/';
241 	copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN, 0);
242 	copystr(devname, mp->mnt_stat.f_mntfromspec, MNAMELEN, 0);
243 	*mpp = mp;
244  	return (0);
245  }
246 
247 /*
248  * Lookup a mount point by filesystem identifier.
249  */
250 struct mount *
251 vfs_getvfs(fsid_t *fsid)
252 {
253 	struct mount *mp;
254 
255 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
256 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
257 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
258 			return (mp);
259 		}
260 	}
261 
262 	return (NULL);
263 }
264 
265 
266 /*
267  * Get a new unique fsid
268  */
269 void
270 vfs_getnewfsid(struct mount *mp)
271 {
272 	static u_short xxxfs_mntid;
273 
274 	fsid_t tfsid;
275 	int mtype;
276 
277 	mtype = mp->mnt_vfc->vfc_typenum;
278 	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
279 	mp->mnt_stat.f_fsid.val[1] = mtype;
280 	if (xxxfs_mntid == 0)
281 		++xxxfs_mntid;
282 	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
283 	tfsid.val[1] = mtype;
284 	if (!TAILQ_EMPTY(&mountlist)) {
285 		while (vfs_getvfs(&tfsid)) {
286 			tfsid.val[0]++;
287 			xxxfs_mntid++;
288 		}
289 	}
290 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
291 }
292 
293 /*
294  * Set vnode attributes to VNOVAL
295  */
296 void
297 vattr_null(struct vattr *vap)
298 {
299 
300 	vap->va_type = VNON;
301 	/*
302 	 * Don't get fancy: u_quad_t = u_int = VNOVAL leaves the u_quad_t
303 	 * with 2^31-1 instead of 2^64-1.  Just write'm out and let
304 	 * the compiler do its job.
305 	 */
306 	vap->va_mode = VNOVAL;
307 	vap->va_nlink = VNOVAL;
308 	vap->va_uid = VNOVAL;
309 	vap->va_gid = VNOVAL;
310 	vap->va_fsid = VNOVAL;
311 	vap->va_fileid = VNOVAL;
312 	vap->va_size = VNOVAL;
313 	vap->va_blocksize = VNOVAL;
314 	vap->va_atime.tv_sec = VNOVAL;
315 	vap->va_atime.tv_nsec = VNOVAL;
316 	vap->va_mtime.tv_sec = VNOVAL;
317 	vap->va_mtime.tv_nsec = VNOVAL;
318 	vap->va_ctime.tv_sec = VNOVAL;
319 	vap->va_ctime.tv_nsec = VNOVAL;
320 	vap->va_gen = VNOVAL;
321 	vap->va_flags = VNOVAL;
322 	vap->va_rdev = VNOVAL;
323 	vap->va_bytes = VNOVAL;
324 	vap->va_filerev = VNOVAL;
325 	vap->va_vaflags = 0;
326 }
327 
328 /*
329  * Routines having to do with the management of the vnode table.
330  */
331 long numvnodes;
332 
333 /*
334  * Return the next vnode from the free list.
335  */
336 int
337 getnewvnode(enum vtagtype tag, struct mount *mp, struct vops *vops,
338     struct vnode **vpp)
339 {
340 	struct proc *p = curproc;
341 	struct freelst *listhd;
342 	static int toggle;
343 	struct vnode *vp;
344 	int s;
345 
346 	/*
347 	 * allow maxvnodes to increase if the buffer cache itself
348 	 * is big enough to justify it. (we don't shrink it ever)
349 	 */
350 	maxvnodes = maxvnodes < bcstats.numbufs ? bcstats.numbufs
351 	    : maxvnodes;
352 
353 	/*
354 	 * We must choose whether to allocate a new vnode or recycle an
355 	 * existing one. The criterion for allocating a new one is that
356 	 * the total number of vnodes is less than the number desired or
357 	 * there are no vnodes on either free list. Generally we only
358 	 * want to recycle vnodes that have no buffers associated with
359 	 * them, so we look first on the vnode_free_list. If it is empty,
360 	 * we next consider vnodes with referencing buffers on the
361 	 * vnode_hold_list. The toggle ensures that half the time we
362 	 * will use a buffer from the vnode_hold_list, and half the time
363 	 * we will allocate a new one unless the list has grown to twice
364 	 * the desired size. We are reticent to recycle vnodes from the
365 	 * vnode_hold_list because we will lose the identity of all its
366 	 * referencing buffers.
367 	 */
368 	toggle ^= 1;
369 	if (numvnodes / 2 > maxvnodes)
370 		toggle = 0;
371 
372 	s = splbio();
373 	if ((numvnodes < maxvnodes) ||
374 	    ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
375 	    ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
376 		splx(s);
377 		vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO);
378 		vp->v_uvm = pool_get(&uvm_vnode_pool, PR_WAITOK | PR_ZERO);
379 		vp->v_uvm->u_vnode = vp;
380 		RBT_INIT(buf_rb_bufs, &vp->v_bufs_tree);
381 		cache_tree_init(&vp->v_nc_tree);
382 		TAILQ_INIT(&vp->v_cache_dst);
383 		numvnodes++;
384 	} else {
385 		TAILQ_FOREACH(vp, listhd, v_freelist) {
386 			if (VOP_ISLOCKED(vp) == 0)
387 				break;
388 		}
389 		/*
390 		 * Unless this is a bad time of the month, at most
391 		 * the first NCPUS items on the free list are
392 		 * locked, so this is close enough to being empty.
393 		 */
394 		if (vp == NULL) {
395 			splx(s);
396 			tablefull("vnode");
397 			*vpp = 0;
398 			return (ENFILE);
399 		}
400 
401 #ifdef DIAGNOSTIC
402 		if (vp->v_usecount) {
403 			vprint("free vnode", vp);
404 			panic("free vnode isn't");
405 		}
406 #endif
407 
408 		TAILQ_REMOVE(listhd, vp, v_freelist);
409 		vp->v_bioflag &= ~VBIOONFREELIST;
410 		splx(s);
411 
412 		if (vp->v_type != VBAD)
413 			vgonel(vp, p);
414 #ifdef DIAGNOSTIC
415 		if (vp->v_data) {
416 			vprint("cleaned vnode", vp);
417 			panic("cleaned vnode isn't");
418 		}
419 		s = splbio();
420 		if (vp->v_numoutput)
421 			panic("Clean vnode has pending I/O's");
422 		splx(s);
423 #endif
424 		vp->v_flag = 0;
425 		vp->v_socket = 0;
426 	}
427 	cache_purge(vp);
428 	vp->v_type = VNON;
429 	vp->v_tag = tag;
430 	vp->v_op = vops;
431 	insmntque(vp, mp);
432 	*vpp = vp;
433 	vp->v_usecount = 1;
434 	vp->v_data = 0;
435 	return (0);
436 }
437 
438 /*
439  * Move a vnode from one mount queue to another.
440  */
441 void
442 insmntque(struct vnode *vp, struct mount *mp)
443 {
444 	/*
445 	 * Delete from old mount point vnode list, if on one.
446 	 */
447 	if (vp->v_mount != NULL)
448 		LIST_REMOVE(vp, v_mntvnodes);
449 	/*
450 	 * Insert into list of vnodes for the new mount point, if available.
451 	 */
452 	if ((vp->v_mount = mp) != NULL)
453 		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
454 }
455 
456 /*
457  * Create a vnode for a block device.
458  * Used for root filesystem, argdev, and swap areas.
459  * Also used for memory file system special devices.
460  */
461 int
462 bdevvp(dev_t dev, struct vnode **vpp)
463 {
464 	return (getdevvp(dev, vpp, VBLK));
465 }
466 
467 /*
468  * Create a vnode for a character device.
469  * Used for console handling.
470  */
471 int
472 cdevvp(dev_t dev, struct vnode **vpp)
473 {
474 	return (getdevvp(dev, vpp, VCHR));
475 }
476 
477 /*
478  * Create a vnode for a device.
479  * Used by bdevvp (block device) for root file system etc.,
480  * and by cdevvp (character device) for console.
481  */
482 int
483 getdevvp(dev_t dev, struct vnode **vpp, enum vtype type)
484 {
485 	struct vnode *vp;
486 	struct vnode *nvp;
487 	int error;
488 
489 	if (dev == NODEV) {
490 		*vpp = NULLVP;
491 		return (0);
492 	}
493 	error = getnewvnode(VT_NON, NULL, &spec_vops, &nvp);
494 	if (error) {
495 		*vpp = NULLVP;
496 		return (error);
497 	}
498 	vp = nvp;
499 	vp->v_type = type;
500 	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
501 		vput(vp);
502 		vp = nvp;
503 	}
504 	if (vp->v_type == VCHR && cdevsw[major(vp->v_rdev)].d_type == D_TTY)
505 		vp->v_flag |= VISTTY;
506 	*vpp = vp;
507 	return (0);
508 }
509 
510 /*
511  * Check to see if the new vnode represents a special device
512  * for which we already have a vnode (either because of
513  * bdevvp() or because of a different vnode representing
514  * the same block device). If such an alias exists, deallocate
515  * the existing contents and return the aliased vnode. The
516  * caller is responsible for filling it with its new contents.
517  */
518 struct vnode *
519 checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp)
520 {
521 	struct proc *p = curproc;
522 	struct vnode *vp;
523 	struct vnode **vpp;
524 
525 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
526 		return (NULLVP);
527 
528 	vpp = &speclisth[SPECHASH(nvp_rdev)];
529 loop:
530 	for (vp = *vpp; vp; vp = vp->v_specnext) {
531 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) {
532 			continue;
533 		}
534 		/*
535 		 * Alias, but not in use, so flush it out.
536 		 */
537 		if (vp->v_usecount == 0) {
538 			vgonel(vp, p);
539 			goto loop;
540 		}
541 		if (vget(vp, LK_EXCLUSIVE, p)) {
542 			goto loop;
543 		}
544 		break;
545 	}
546 
547 	/*
548 	 * Common case is actually in the if statement
549 	 */
550 	if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
551 		nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE,
552 			M_WAITOK);
553 		nvp->v_rdev = nvp_rdev;
554 		nvp->v_hashchain = vpp;
555 		nvp->v_specnext = *vpp;
556 		nvp->v_specmountpoint = NULL;
557 		nvp->v_speclockf = NULL;
558 		nvp->v_specbitmap = NULL;
559 		if (nvp->v_type == VCHR &&
560 		    (cdevsw[major(nvp_rdev)].d_flags & D_CLONE) &&
561 		    (minor(nvp_rdev) >> CLONE_SHIFT == 0)) {
562 			if (vp != NULLVP)
563 				nvp->v_specbitmap = vp->v_specbitmap;
564 			else
565 				nvp->v_specbitmap = malloc(CLONE_MAPSZ,
566 				    M_VNODE, M_WAITOK | M_ZERO);
567 		}
568 		*vpp = nvp;
569 		if (vp != NULLVP) {
570 			nvp->v_flag |= VALIASED;
571 			vp->v_flag |= VALIASED;
572 			vput(vp);
573 		}
574 		return (NULLVP);
575 	}
576 
577 	/*
578 	 * This code is the uncommon case. It is called in case
579 	 * we found an alias that was VT_NON && vtype of VBLK
580 	 * This means we found a block device that was created
581 	 * using bdevvp.
582 	 * An example of such a vnode is the root partition device vnode
583 	 * created in ffs_mountroot.
584 	 *
585 	 * The vnodes created by bdevvp should not be aliased (why?).
586 	 */
587 
588 	VOP_UNLOCK(vp, p);
589 	vclean(vp, 0, p);
590 	vp->v_op = nvp->v_op;
591 	vp->v_tag = nvp->v_tag;
592 	nvp->v_type = VNON;
593 	insmntque(vp, mp);
594 	return (vp);
595 }
596 
597 /*
598  * Grab a particular vnode from the free list, increment its
599  * reference count and lock it. If the vnode lock bit is set,
600  * the vnode is being eliminated in vgone. In that case, we
601  * cannot grab it, so the process is awakened when the
602  * transition is completed, and an error code is returned to
603  * indicate that the vnode is no longer usable, possibly
604  * having been changed to a new file system type.
605  */
606 int
607 vget(struct vnode *vp, int flags, struct proc *p)
608 {
609 	int error, s, onfreelist;
610 
611 	/*
612 	 * If the vnode is in the process of being cleaned out for
613 	 * another use, we wait for the cleaning to finish and then
614 	 * return failure. Cleaning is determined by checking that
615 	 * the VXLOCK flag is set.
616 	 */
617 
618 	if (vp->v_flag & VXLOCK) {
619 		if (flags & LK_NOWAIT) {
620 			return (EBUSY);
621 		}
622 
623 		vp->v_flag |= VXWANT;
624 		tsleep(vp, PINOD, "vget", 0);
625 		return (ENOENT);
626 	}
627 
628 	onfreelist = vp->v_bioflag & VBIOONFREELIST;
629 	if (vp->v_usecount == 0 && onfreelist) {
630 		s = splbio();
631 		if (vp->v_holdcnt > 0)
632 			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
633 		else
634 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
635 		vp->v_bioflag &= ~VBIOONFREELIST;
636 		splx(s);
637 	}
638 
639  	vp->v_usecount++;
640 	if (flags & LK_TYPE_MASK) {
641 		if ((error = vn_lock(vp, flags, p)) != 0) {
642 			vp->v_usecount--;
643 			if (vp->v_usecount == 0 && onfreelist)
644 				vputonfreelist(vp);
645 		}
646 		return (error);
647 	}
648 
649 	return (0);
650 }
651 
652 
653 /* Vnode reference. */
654 void
655 vref(struct vnode *vp)
656 {
657 #ifdef DIAGNOSTIC
658 	if (vp->v_usecount == 0)
659 		panic("vref used where vget required");
660 	if (vp->v_type == VNON)
661 		panic("vref on a VNON vnode");
662 #endif
663 	vp->v_usecount++;
664 }
665 
666 void
667 vputonfreelist(struct vnode *vp)
668 {
669 	int s;
670 	struct freelst *lst;
671 
672 	s = splbio();
673 #ifdef DIAGNOSTIC
674 	if (vp->v_usecount != 0)
675 		panic("Use count is not zero!");
676 
677 	if (vp->v_bioflag & VBIOONFREELIST) {
678 		vprint("vnode already on free list: ", vp);
679 		panic("vnode already on free list");
680 	}
681 #endif
682 
683 	vp->v_bioflag |= VBIOONFREELIST;
684 
685 	if (vp->v_holdcnt > 0)
686 		lst = &vnode_hold_list;
687 	else
688 		lst = &vnode_free_list;
689 
690 	if (vp->v_type == VBAD)
691 		TAILQ_INSERT_HEAD(lst, vp, v_freelist);
692 	else
693 		TAILQ_INSERT_TAIL(lst, vp, v_freelist);
694 
695 	splx(s);
696 }
697 
698 /*
699  * vput(), just unlock and vrele()
700  */
701 void
702 vput(struct vnode *vp)
703 {
704 	struct proc *p = curproc;
705 
706 #ifdef DIAGNOSTIC
707 	if (vp == NULL)
708 		panic("vput: null vp");
709 #endif
710 
711 #ifdef DIAGNOSTIC
712 	if (vp->v_usecount == 0) {
713 		vprint("vput: bad ref count", vp);
714 		panic("vput: ref cnt");
715 	}
716 #endif
717 	vp->v_usecount--;
718 	if (vp->v_usecount > 0) {
719 		VOP_UNLOCK(vp, p);
720 		return;
721 	}
722 
723 #ifdef DIAGNOSTIC
724 	if (vp->v_writecount != 0) {
725 		vprint("vput: bad writecount", vp);
726 		panic("vput: v_writecount != 0");
727 	}
728 #endif
729 
730 	VOP_INACTIVE(vp, p);
731 
732 	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
733 		vputonfreelist(vp);
734 }
735 
736 /*
737  * Vnode release - use for active VNODES.
738  * If count drops to zero, call inactive routine and return to freelist.
739  * Returns 0 if it did not sleep.
740  */
741 int
742 vrele(struct vnode *vp)
743 {
744 	struct proc *p = curproc;
745 
746 #ifdef DIAGNOSTIC
747 	if (vp == NULL)
748 		panic("vrele: null vp");
749 #endif
750 #ifdef DIAGNOSTIC
751 	if (vp->v_usecount == 0) {
752 		vprint("vrele: bad ref count", vp);
753 		panic("vrele: ref cnt");
754 	}
755 #endif
756 	vp->v_usecount--;
757 	if (vp->v_usecount > 0) {
758 		return (0);
759 	}
760 
761 #ifdef DIAGNOSTIC
762 	if (vp->v_writecount != 0) {
763 		vprint("vrele: bad writecount", vp);
764 		panic("vrele: v_writecount != 0");
765 	}
766 #endif
767 
768 	if (vn_lock(vp, LK_EXCLUSIVE, p)) {
769 #ifdef DIAGNOSTIC
770 		vprint("vrele: cannot lock", vp);
771 #endif
772 		return (1);
773 	}
774 
775 	VOP_INACTIVE(vp, p);
776 
777 	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
778 		vputonfreelist(vp);
779 	return (1);
780 }
781 
782 /* Page or buffer structure gets a reference. */
783 void
784 vhold(struct vnode *vp)
785 {
786 	/*
787 	 * If it is on the freelist and the hold count is currently
788 	 * zero, move it to the hold list.
789 	 */
790 	if ((vp->v_bioflag & VBIOONFREELIST) &&
791 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
792 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
793 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
794 	}
795 	vp->v_holdcnt++;
796 }
797 
798 /* Lose interest in a vnode. */
799 void
800 vdrop(struct vnode *vp)
801 {
802 #ifdef DIAGNOSTIC
803 	if (vp->v_holdcnt == 0)
804 		panic("vdrop: zero holdcnt");
805 #endif
806 
807 	vp->v_holdcnt--;
808 
809 	/*
810 	 * If it is on the holdlist and the hold count drops to
811 	 * zero, move it to the free list.
812 	 */
813 	if ((vp->v_bioflag & VBIOONFREELIST) &&
814 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
815 		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
816 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
817 	}
818 }
819 
820 /*
821  * Remove any vnodes in the vnode table belonging to mount point mp.
822  *
823  * If MNT_NOFORCE is specified, there should not be any active ones,
824  * return error if any are found (nb: this is a user error, not a
825  * system error). If MNT_FORCE is specified, detach any active vnodes
826  * that are found.
827  */
828 #ifdef DEBUG
829 int busyprt = 0;	/* print out busy vnodes */
830 struct ctldebug debug1 = { "busyprt", &busyprt };
831 #endif
832 
833 int
834 vfs_mount_foreach_vnode(struct mount *mp,
835     int (*func)(struct vnode *, void *), void *arg) {
836 	struct vnode *vp, *nvp;
837 	int error = 0;
838 
839 loop:
840 	LIST_FOREACH_SAFE(vp , &mp->mnt_vnodelist, v_mntvnodes, nvp) {
841 		if (vp->v_mount != mp)
842 			goto loop;
843 
844 		error = func(vp, arg);
845 
846 		if (error != 0)
847 			break;
848 	}
849 
850 	return (error);
851 }
852 
853 struct vflush_args {
854 	struct vnode *skipvp;
855 	int busy;
856 	int flags;
857 };
858 
859 int
860 vflush_vnode(struct vnode *vp, void *arg) {
861 	struct vflush_args *va = arg;
862 	struct proc *p = curproc;
863 
864 	if (vp == va->skipvp) {
865 		return (0);
866 	}
867 
868 	if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
869 		return (0);
870 	}
871 
872 	/*
873 	 * If WRITECLOSE is set, only flush out regular file
874 	 * vnodes open for writing.
875 	 */
876 	if ((va->flags & WRITECLOSE) &&
877 	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
878 		return (0);
879 	}
880 
881 	/*
882 	 * With v_usecount == 0, all we need to do is clear
883 	 * out the vnode data structures and we are done.
884 	 */
885 	if (vp->v_usecount == 0) {
886 		vgonel(vp, p);
887 		return (0);
888 	}
889 
890 	/*
891 	 * If FORCECLOSE is set, forcibly close the vnode.
892 	 * For block or character devices, revert to an
893 	 * anonymous device. For all other files, just kill them.
894 	 */
895 	if (va->flags & FORCECLOSE) {
896 		if (vp->v_type != VBLK && vp->v_type != VCHR) {
897 			vgonel(vp, p);
898 		} else {
899 			vclean(vp, 0, p);
900 			vp->v_op = &spec_vops;
901 			insmntque(vp, NULL);
902 		}
903 		return (0);
904 	}
905 
906 #ifdef DEBUG
907 	if (busyprt)
908 		vprint("vflush: busy vnode", vp);
909 #endif
910 	va->busy++;
911 	return (0);
912 }
913 
914 int
915 vflush(struct mount *mp, struct vnode *skipvp, int flags)
916 {
917 	struct vflush_args va;
918 	va.skipvp = skipvp;
919 	va.busy = 0;
920 	va.flags = flags;
921 
922 	vfs_mount_foreach_vnode(mp, vflush_vnode, &va);
923 
924 	if (va.busy)
925 		return (EBUSY);
926 	return (0);
927 }
928 
929 /*
930  * Disassociate the underlying file system from a vnode.
931  */
932 void
933 vclean(struct vnode *vp, int flags, struct proc *p)
934 {
935 	int active;
936 
937 	/*
938 	 * Check to see if the vnode is in use.
939 	 * If so we have to reference it before we clean it out
940 	 * so that its count cannot fall to zero and generate a
941 	 * race against ourselves to recycle it.
942 	 */
943 	if ((active = vp->v_usecount) != 0)
944 		vp->v_usecount++;
945 
946 	/*
947 	 * Prevent the vnode from being recycled or
948 	 * brought into use while we clean it out.
949 	 */
950 	if (vp->v_flag & VXLOCK)
951 		panic("vclean: deadlock");
952 	vp->v_flag |= VXLOCK;
953 	/*
954 	 * Even if the count is zero, the VOP_INACTIVE routine may still
955 	 * have the object locked while it cleans it out. The VOP_LOCK
956 	 * ensures that the VOP_INACTIVE routine is done with its work.
957 	 * For active vnodes, it ensures that no other activity can
958 	 * occur while the underlying object is being cleaned out.
959 	 */
960 	VOP_LOCK(vp, LK_DRAIN | LK_EXCLUSIVE, p);
961 
962 	/*
963 	 * Clean out any VM data associated with the vnode.
964 	 */
965 	uvm_vnp_terminate(vp);
966 	/*
967 	 * Clean out any buffers associated with the vnode.
968 	 */
969 	if (flags & DOCLOSE)
970 		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
971 	/*
972 	 * If purging an active vnode, it must be closed and
973 	 * deactivated before being reclaimed. Note that the
974 	 * VOP_INACTIVE will unlock the vnode
975 	 */
976 	if (active) {
977 		if (flags & DOCLOSE)
978 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
979 		VOP_INACTIVE(vp, p);
980 	} else {
981 		/*
982 		 * Any other processes trying to obtain this lock must first
983 		 * wait for VXLOCK to clear, then call the new lock operation.
984 		 */
985 		VOP_UNLOCK(vp, p);
986 	}
987 
988 	/*
989 	 * Reclaim the vnode.
990 	 */
991 	if (VOP_RECLAIM(vp, p))
992 		panic("vclean: cannot reclaim");
993 	if (active) {
994 		vp->v_usecount--;
995 		if (vp->v_usecount == 0) {
996 			if (vp->v_holdcnt > 0)
997 				panic("vclean: not clean");
998 			vputonfreelist(vp);
999 		}
1000 	}
1001 	cache_purge(vp);
1002 
1003 	/*
1004 	 * Done with purge, notify sleepers of the grim news.
1005 	 */
1006 	vp->v_op = &dead_vops;
1007 	VN_KNOTE(vp, NOTE_REVOKE);
1008 	vp->v_tag = VT_NON;
1009 	vp->v_flag &= ~VXLOCK;
1010 #ifdef VFSLCKDEBUG
1011 	vp->v_flag &= ~VLOCKSWORK;
1012 #endif
1013 	if (vp->v_flag & VXWANT) {
1014 		vp->v_flag &= ~VXWANT;
1015 		wakeup(vp);
1016 	}
1017 }
1018 
1019 /*
1020  * Recycle an unused vnode to the front of the free list.
1021  */
1022 int
1023 vrecycle(struct vnode *vp, struct proc *p)
1024 {
1025 	if (vp->v_usecount == 0) {
1026 		vgonel(vp, p);
1027 		return (1);
1028 	}
1029 	return (0);
1030 }
1031 
1032 /*
1033  * Eliminate all activity associated with a vnode
1034  * in preparation for reuse.
1035  */
1036 void
1037 vgone(struct vnode *vp)
1038 {
1039 	struct proc *p = curproc;
1040 	vgonel(vp, p);
1041 }
1042 
1043 /*
1044  * vgone, with struct proc.
1045  */
1046 void
1047 vgonel(struct vnode *vp, struct proc *p)
1048 {
1049 	struct vnode *vq;
1050 	struct vnode *vx;
1051 
1052 	/*
1053 	 * If a vgone (or vclean) is already in progress,
1054 	 * wait until it is done and return.
1055 	 */
1056 	if (vp->v_flag & VXLOCK) {
1057 		vp->v_flag |= VXWANT;
1058 		tsleep(vp, PINOD, "vgone", 0);
1059 		return;
1060 	}
1061 
1062 	/*
1063 	 * Clean out the filesystem specific data.
1064 	 */
1065 	vclean(vp, DOCLOSE, p);
1066 	/*
1067 	 * Delete from old mount point vnode list, if on one.
1068 	 */
1069 	if (vp->v_mount != NULL)
1070 		insmntque(vp, NULL);
1071 	/*
1072 	 * If special device, remove it from special device alias list
1073 	 * if it is on one.
1074 	 */
1075 	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1076 		if ((vp->v_flag & VALIASED) == 0 && vp->v_type == VCHR &&
1077 		    (cdevsw[major(vp->v_rdev)].d_flags & D_CLONE) &&
1078 		    (minor(vp->v_rdev) >> CLONE_SHIFT == 0)) {
1079 			free(vp->v_specbitmap, M_VNODE, CLONE_MAPSZ);
1080 		}
1081 		if (*vp->v_hashchain == vp) {
1082 			*vp->v_hashchain = vp->v_specnext;
1083 		} else {
1084 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1085 				if (vq->v_specnext != vp)
1086 					continue;
1087 				vq->v_specnext = vp->v_specnext;
1088 				break;
1089 			}
1090 			if (vq == NULL)
1091 				panic("missing bdev");
1092 		}
1093 		if (vp->v_flag & VALIASED) {
1094 			vx = NULL;
1095 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1096 				if (vq->v_rdev != vp->v_rdev ||
1097 				    vq->v_type != vp->v_type)
1098 					continue;
1099 				if (vx)
1100 					break;
1101 				vx = vq;
1102 			}
1103 			if (vx == NULL)
1104 				panic("missing alias");
1105 			if (vq == NULL)
1106 				vx->v_flag &= ~VALIASED;
1107 			vp->v_flag &= ~VALIASED;
1108 		}
1109 		free(vp->v_specinfo, M_VNODE, sizeof(struct specinfo));
1110 		vp->v_specinfo = NULL;
1111 	}
1112 	/*
1113 	 * If it is on the freelist and not already at the head,
1114 	 * move it to the head of the list.
1115 	 */
1116 	vp->v_type = VBAD;
1117 
1118 	/*
1119 	 * Move onto the free list, unless we were called from
1120 	 * getnewvnode and we're not on any free list
1121 	 */
1122 	if (vp->v_usecount == 0 &&
1123 	    (vp->v_bioflag & VBIOONFREELIST)) {
1124 		int s;
1125 
1126 		s = splbio();
1127 
1128 		if (vp->v_holdcnt > 0)
1129 			panic("vgonel: not clean");
1130 
1131 		if (TAILQ_FIRST(&vnode_free_list) != vp) {
1132 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1133 			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1134 		}
1135 		splx(s);
1136 	}
1137 }
1138 
1139 /*
1140  * Lookup a vnode by device number.
1141  */
1142 int
1143 vfinddev(dev_t dev, enum vtype type, struct vnode **vpp)
1144 {
1145 	struct vnode *vp;
1146 	int rc =0;
1147 
1148 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1149 		if (dev != vp->v_rdev || type != vp->v_type)
1150 			continue;
1151 		*vpp = vp;
1152 		rc = 1;
1153 		break;
1154 	}
1155 	return (rc);
1156 }
1157 
1158 /*
1159  * Revoke all the vnodes corresponding to the specified minor number
1160  * range (endpoints inclusive) of the specified major.
1161  */
1162 void
1163 vdevgone(int maj, int minl, int minh, enum vtype type)
1164 {
1165 	struct vnode *vp;
1166 	int mn;
1167 
1168 	for (mn = minl; mn <= minh; mn++)
1169 		if (vfinddev(makedev(maj, mn), type, &vp))
1170 			VOP_REVOKE(vp, REVOKEALL);
1171 }
1172 
1173 /*
1174  * Calculate the total number of references to a special device.
1175  */
1176 int
1177 vcount(struct vnode *vp)
1178 {
1179 	struct vnode *vq, *vnext;
1180 	int count;
1181 
1182 loop:
1183 	if ((vp->v_flag & VALIASED) == 0)
1184 		return (vp->v_usecount);
1185 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1186 		vnext = vq->v_specnext;
1187 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1188 			continue;
1189 		/*
1190 		 * Alias, but not in use, so flush it out.
1191 		 */
1192 		if (vq->v_usecount == 0 && vq != vp) {
1193 			vgone(vq);
1194 			goto loop;
1195 		}
1196 		count += vq->v_usecount;
1197 	}
1198 	return (count);
1199 }
1200 
1201 #if defined(DEBUG) || defined(DIAGNOSTIC)
1202 /*
1203  * Print out a description of a vnode.
1204  */
1205 static char *typename[] =
1206    { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1207 
1208 void
1209 vprint(char *label, struct vnode *vp)
1210 {
1211 	char buf[64];
1212 
1213 	if (label != NULL)
1214 		printf("%s: ", label);
1215 	printf("%p, type %s, use %u, write %u, hold %u,",
1216 		vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1217 		vp->v_holdcnt);
1218 	buf[0] = '\0';
1219 	if (vp->v_flag & VROOT)
1220 		strlcat(buf, "|VROOT", sizeof buf);
1221 	if (vp->v_flag & VTEXT)
1222 		strlcat(buf, "|VTEXT", sizeof buf);
1223 	if (vp->v_flag & VSYSTEM)
1224 		strlcat(buf, "|VSYSTEM", sizeof buf);
1225 	if (vp->v_flag & VXLOCK)
1226 		strlcat(buf, "|VXLOCK", sizeof buf);
1227 	if (vp->v_flag & VXWANT)
1228 		strlcat(buf, "|VXWANT", sizeof buf);
1229 	if (vp->v_bioflag & VBIOWAIT)
1230 		strlcat(buf, "|VBIOWAIT", sizeof buf);
1231 	if (vp->v_bioflag & VBIOONFREELIST)
1232 		strlcat(buf, "|VBIOONFREELIST", sizeof buf);
1233 	if (vp->v_bioflag & VBIOONSYNCLIST)
1234 		strlcat(buf, "|VBIOONSYNCLIST", sizeof buf);
1235 	if (vp->v_flag & VALIASED)
1236 		strlcat(buf, "|VALIASED", sizeof buf);
1237 	if (buf[0] != '\0')
1238 		printf(" flags (%s)", &buf[1]);
1239 	if (vp->v_data == NULL) {
1240 		printf("\n");
1241 	} else {
1242 		printf("\n\t");
1243 		VOP_PRINT(vp);
1244 	}
1245 }
1246 #endif /* DEBUG || DIAGNOSTIC */
1247 
1248 #ifdef DEBUG
1249 /*
1250  * List all of the locked vnodes in the system.
1251  * Called when debugging the kernel.
1252  */
1253 void
1254 printlockedvnodes(void)
1255 {
1256 	struct mount *mp;
1257 	struct vnode *vp;
1258 
1259 	printf("Locked vnodes\n");
1260 
1261 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1262 		if (vfs_busy(mp, VB_READ|VB_NOWAIT))
1263 			continue;
1264 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1265 			if (VOP_ISLOCKED(vp))
1266 				vprint(NULL, vp);
1267 		}
1268 		vfs_unbusy(mp);
1269  	}
1270 
1271 }
1272 #endif
1273 
1274 /*
1275  * Top level filesystem related information gathering.
1276  */
1277 int
1278 vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1279     size_t newlen, struct proc *p)
1280 {
1281 	struct vfsconf *vfsp, *tmpvfsp;
1282 	int ret;
1283 
1284 	/* all sysctl names at this level are at least name and field */
1285 	if (namelen < 2)
1286 		return (ENOTDIR);		/* overloaded */
1287 
1288 	if (name[0] != VFS_GENERIC) {
1289 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1290 			if (vfsp->vfc_typenum == name[0])
1291 				break;
1292 
1293 		if (vfsp == NULL || vfsp->vfc_vfsops->vfs_sysctl == NULL)
1294 			return (EOPNOTSUPP);
1295 
1296 		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1297 		    oldp, oldlenp, newp, newlen, p));
1298 	}
1299 
1300 	switch (name[1]) {
1301 	case VFS_MAXTYPENUM:
1302 		return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
1303 
1304 	case VFS_CONF:
1305 		if (namelen < 3)
1306 			return (ENOTDIR);	/* overloaded */
1307 
1308 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1309 			if (vfsp->vfc_typenum == name[2])
1310 				break;
1311 
1312 		if (vfsp == NULL)
1313 			return (EOPNOTSUPP);
1314 
1315 		/* Make a copy, clear out kernel pointers */
1316 		tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK|M_ZERO);
1317 		memcpy(tmpvfsp, vfsp, sizeof(*tmpvfsp));
1318 		tmpvfsp->vfc_vfsops = NULL;
1319 		tmpvfsp->vfc_next = NULL;
1320 
1321 		ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp,
1322 		    sizeof(struct vfsconf));
1323 
1324 		free(tmpvfsp, M_TEMP, sizeof(*tmpvfsp));
1325 		return (ret);
1326 	case VFS_BCACHESTAT:	/* buffer cache statistics */
1327 		ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats,
1328 		    sizeof(struct bcachestats));
1329 		return(ret);
1330 	}
1331 	return (EOPNOTSUPP);
1332 }
1333 
1334 /*
1335  * Check to see if a filesystem is mounted on a block device.
1336  */
1337 int
1338 vfs_mountedon(struct vnode *vp)
1339 {
1340 	struct vnode *vq;
1341 	int error = 0;
1342 
1343  	if (vp->v_specmountpoint != NULL)
1344 		return (EBUSY);
1345 	if (vp->v_flag & VALIASED) {
1346 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1347 			if (vq->v_rdev != vp->v_rdev ||
1348 			    vq->v_type != vp->v_type)
1349 				continue;
1350 			if (vq->v_specmountpoint != NULL) {
1351 				error = EBUSY;
1352 				break;
1353 			}
1354  		}
1355 	}
1356 	return (error);
1357 }
1358 
1359 #ifdef NFSSERVER
1360 /*
1361  * Build hash lists of net addresses and hang them off the mount point.
1362  * Called by vfs_export() to set up the lists of export addresses.
1363  */
1364 int
1365 vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1366     struct export_args *argp)
1367 {
1368 	struct netcred *np;
1369 	struct radix_node_head *rnh;
1370 	int nplen, i;
1371 	struct radix_node *rn;
1372 	struct sockaddr *saddr, *smask = 0;
1373 	int error;
1374 
1375 	if (argp->ex_addrlen == 0) {
1376 		if (mp->mnt_flag & MNT_DEFEXPORTED)
1377 			return (EPERM);
1378 		np = &nep->ne_defexported;
1379 		/* fill in the kernel's ucred from userspace's xucred */
1380 		if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1381 			return (error);
1382 		mp->mnt_flag |= MNT_DEFEXPORTED;
1383 		goto finish;
1384 	}
1385 	if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN ||
1386 	    argp->ex_addrlen < 0 || argp->ex_masklen < 0)
1387 		return (EINVAL);
1388 	nplen = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1389 	np = (struct netcred *)malloc(nplen, M_NETADDR, M_WAITOK|M_ZERO);
1390 	saddr = (struct sockaddr *)(np + 1);
1391 	error = copyin(argp->ex_addr, saddr, argp->ex_addrlen);
1392 	if (error)
1393 		goto out;
1394 	if (saddr->sa_len > argp->ex_addrlen)
1395 		saddr->sa_len = argp->ex_addrlen;
1396 	if (argp->ex_masklen) {
1397 		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1398 		error = copyin(argp->ex_mask, smask, argp->ex_masklen);
1399 		if (error)
1400 			goto out;
1401 		if (smask->sa_len > argp->ex_masklen)
1402 			smask->sa_len = argp->ex_masklen;
1403 	}
1404 	/* fill in the kernel's ucred from userspace's xucred */
1405 	if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1406 		goto out;
1407 	i = saddr->sa_family;
1408 	switch (i) {
1409 	case AF_INET:
1410 		if ((rnh = nep->ne_rtable_inet) == NULL) {
1411 			if (!rn_inithead((void **)&nep->ne_rtable_inet,
1412 			    offsetof(struct sockaddr_in, sin_addr))) {
1413 				error = ENOBUFS;
1414 				goto out;
1415 			}
1416 			rnh = nep->ne_rtable_inet;
1417 		}
1418 		break;
1419 	default:
1420 		error = EINVAL;
1421 		goto out;
1422 	}
1423 	rn = rn_addroute(saddr, smask, rnh, np->netc_rnodes, 0);
1424 	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1425 		error = EPERM;
1426 		goto out;
1427 	}
1428 finish:
1429 	np->netc_exflags = argp->ex_flags;
1430 	return (0);
1431 out:
1432 	free(np, M_NETADDR, nplen);
1433 	return (error);
1434 }
1435 
1436 int
1437 vfs_free_netcred(struct radix_node *rn, void *w, u_int id)
1438 {
1439 	struct radix_node_head *rnh = (struct radix_node_head *)w;
1440 
1441 	rn_delete(rn->rn_key, rn->rn_mask, rnh, NULL);
1442 	free(rn, M_NETADDR, 0);
1443 	return (0);
1444 }
1445 
1446 /*
1447  * Free the net address hash lists that are hanging off the mount points.
1448  */
1449 void
1450 vfs_free_addrlist(struct netexport *nep)
1451 {
1452 	struct radix_node_head *rnh;
1453 
1454 	if ((rnh = nep->ne_rtable_inet) != NULL) {
1455 		rn_walktree(rnh, vfs_free_netcred, rnh);
1456 		free(rnh, M_RTABLE, 0);
1457 		nep->ne_rtable_inet = NULL;
1458 	}
1459 }
1460 #endif /* NFSSERVER */
1461 
1462 int
1463 vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp)
1464 {
1465 #ifdef NFSSERVER
1466 	int error;
1467 
1468 	if (argp->ex_flags & MNT_DELEXPORT) {
1469 		vfs_free_addrlist(nep);
1470 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1471 	}
1472 	if (argp->ex_flags & MNT_EXPORTED) {
1473 		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1474 			return (error);
1475 		mp->mnt_flag |= MNT_EXPORTED;
1476 	}
1477 	return (0);
1478 #else
1479 	return (ENOTSUP);
1480 #endif /* NFSSERVER */
1481 }
1482 
1483 struct netcred *
1484 vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam)
1485 {
1486 #ifdef NFSSERVER
1487 	struct netcred *np;
1488 	struct radix_node_head *rnh;
1489 	struct sockaddr *saddr;
1490 
1491 	np = NULL;
1492 	if (mp->mnt_flag & MNT_EXPORTED) {
1493 		/*
1494 		 * Lookup in the export list first.
1495 		 */
1496 		if (nam != NULL) {
1497 			saddr = mtod(nam, struct sockaddr *);
1498 			switch(saddr->sa_family) {
1499 			case AF_INET:
1500 				rnh = nep->ne_rtable_inet;
1501 				break;
1502 			default:
1503 				rnh = NULL;
1504 				break;
1505 			}
1506 			if (rnh != NULL)
1507 				np = (struct netcred *)rn_match(saddr, rnh);
1508 		}
1509 		/*
1510 		 * If no address match, use the default if it exists.
1511 		 */
1512 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1513 			np = &nep->ne_defexported;
1514 	}
1515 	return (np);
1516 #else
1517 	return (NULL);
1518 #endif /* NFSSERVER */
1519 }
1520 
1521 /*
1522  * Do the usual access checking.
1523  * file_mode, uid and gid are from the vnode in question,
1524  * while acc_mode and cred are from the VOP_ACCESS parameter list
1525  */
1526 int
1527 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
1528     mode_t acc_mode, struct ucred *cred)
1529 {
1530 	mode_t mask;
1531 
1532 	/* User id 0 always gets read/write access. */
1533 	if (cred->cr_uid == 0) {
1534 		/* For VEXEC, at least one of the execute bits must be set. */
1535 		if ((acc_mode & VEXEC) && type != VDIR &&
1536 		    (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
1537 			return EACCES;
1538 		return 0;
1539 	}
1540 
1541 	mask = 0;
1542 
1543 	/* Otherwise, check the owner. */
1544 	if (cred->cr_uid == uid) {
1545 		if (acc_mode & VEXEC)
1546 			mask |= S_IXUSR;
1547 		if (acc_mode & VREAD)
1548 			mask |= S_IRUSR;
1549 		if (acc_mode & VWRITE)
1550 			mask |= S_IWUSR;
1551 		return (file_mode & mask) == mask ? 0 : EACCES;
1552 	}
1553 
1554 	/* Otherwise, check the groups. */
1555 	if (groupmember(gid, cred)) {
1556 		if (acc_mode & VEXEC)
1557 			mask |= S_IXGRP;
1558 		if (acc_mode & VREAD)
1559 			mask |= S_IRGRP;
1560 		if (acc_mode & VWRITE)
1561 			mask |= S_IWGRP;
1562 		return (file_mode & mask) == mask ? 0 : EACCES;
1563 	}
1564 
1565 	/* Otherwise, check everyone else. */
1566 	if (acc_mode & VEXEC)
1567 		mask |= S_IXOTH;
1568 	if (acc_mode & VREAD)
1569 		mask |= S_IROTH;
1570 	if (acc_mode & VWRITE)
1571 		mask |= S_IWOTH;
1572 	return (file_mode & mask) == mask ? 0 : EACCES;
1573 }
1574 
1575 /*
1576  * Unmount all file systems.
1577  * We traverse the list in reverse order under the assumption that doing so
1578  * will avoid needing to worry about dependencies.
1579  */
1580 void
1581 vfs_unmountall(void)
1582 {
1583 	struct mount *mp, *nmp;
1584 	int allerror, error, again = 1;
1585 
1586  retry:
1587 	allerror = 0;
1588 	TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, nmp) {
1589 		if (vfs_busy(mp, VB_WRITE|VB_NOWAIT))
1590 			continue;
1591 		/* XXX Here is a race, the next pointer is not locked. */
1592 		if ((error = dounmount(mp, MNT_FORCE, curproc)) != 0) {
1593 			printf("unmount of %s failed with error %d\n",
1594 			    mp->mnt_stat.f_mntonname, error);
1595 			allerror = 1;
1596 		}
1597 	}
1598 
1599 	if (allerror) {
1600 		printf("WARNING: some file systems would not unmount\n");
1601 		if (again) {
1602 			printf("retrying\n");
1603 			again = 0;
1604 			goto retry;
1605 		}
1606 	}
1607 }
1608 
1609 /*
1610  * Sync and unmount file systems before shutting down.
1611  */
1612 void
1613 vfs_shutdown(void)
1614 {
1615 #ifdef ACCOUNTING
1616 	acct_shutdown();
1617 #endif
1618 
1619 	/* XXX Should suspend scheduling. */
1620 	(void) spl0();
1621 
1622 	printf("syncing disks... ");
1623 
1624 	if (panicstr == 0) {
1625 		/* Sync before unmount, in case we hang on something. */
1626 		sys_sync(&proc0, NULL, NULL);
1627 
1628 		/* Unmount file systems. */
1629 		vfs_unmountall();
1630 	}
1631 
1632 	if (vfs_syncwait(1))
1633 		printf("giving up\n");
1634 	else
1635 		printf("done\n");
1636 
1637 #if NSOFTRAID > 0
1638 	sr_shutdown();
1639 #endif
1640 }
1641 
1642 /*
1643  * perform sync() operation and wait for buffers to flush.
1644  * assumptions: called w/ scheduler disabled and physical io enabled
1645  * for now called at spl0() XXX
1646  */
1647 int
1648 vfs_syncwait(int verbose)
1649 {
1650 	struct buf *bp;
1651 	int iter, nbusy, dcount, s;
1652 	struct proc *p;
1653 #ifdef MULTIPROCESSOR
1654 	int hold_count;
1655 #endif
1656 
1657 	p = curproc? curproc : &proc0;
1658 	sys_sync(p, NULL, NULL);
1659 
1660 	/* Wait for sync to finish. */
1661 	dcount = 10000;
1662 	for (iter = 0; iter < 20; iter++) {
1663 		nbusy = 0;
1664 		LIST_FOREACH(bp, &bufhead, b_list) {
1665 			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1666 				nbusy++;
1667 			/*
1668 			 * With soft updates, some buffers that are
1669 			 * written will be remarked as dirty until other
1670 			 * buffers are written.
1671 			 */
1672 			if (bp->b_flags & B_DELWRI) {
1673 				s = splbio();
1674 				bremfree(bp);
1675 				buf_acquire(bp);
1676 				splx(s);
1677 				nbusy++;
1678 				bawrite(bp);
1679 				if (dcount-- <= 0) {
1680 					if (verbose)
1681 						printf("softdep ");
1682 					return 1;
1683 				}
1684 			}
1685 		}
1686 		if (nbusy == 0)
1687 			break;
1688 		if (verbose)
1689 			printf("%d ", nbusy);
1690 #ifdef MULTIPROCESSOR
1691 		if (__mp_lock_held(&kernel_lock))
1692 			hold_count = __mp_release_all(&kernel_lock);
1693 		else
1694 			hold_count = 0;
1695 #endif
1696 		DELAY(40000 * iter);
1697 #ifdef MULTIPROCESSOR
1698 		if (hold_count)
1699 			__mp_acquire_count(&kernel_lock, hold_count);
1700 #endif
1701 	}
1702 
1703 	return nbusy;
1704 }
1705 
1706 /*
1707  * posix file system related system variables.
1708  */
1709 int
1710 fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1711     void *newp, size_t newlen, struct proc *p)
1712 {
1713 	/* all sysctl names at this level are terminal */
1714 	if (namelen != 1)
1715 		return (ENOTDIR);
1716 
1717 	switch (name[0]) {
1718 	case FS_POSIX_SETUID:
1719 		if (newp && securelevel > 0)
1720 			return (EPERM);
1721 		return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear));
1722 	default:
1723 		return (EOPNOTSUPP);
1724 	}
1725 	/* NOTREACHED */
1726 }
1727 
1728 /*
1729  * file system related system variables.
1730  */
1731 int
1732 fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1733     size_t newlen, struct proc *p)
1734 {
1735 	sysctlfn *fn;
1736 
1737 	switch (name[0]) {
1738 	case FS_POSIX:
1739 		fn = fs_posix_sysctl;
1740 		break;
1741 	default:
1742 		return (EOPNOTSUPP);
1743 	}
1744 	return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
1745 }
1746 
1747 
1748 /*
1749  * Routines dealing with vnodes and buffers
1750  */
1751 
1752 /*
1753  * Wait for all outstanding I/Os to complete
1754  *
1755  * Manipulates v_numoutput. Must be called at splbio()
1756  */
1757 int
1758 vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo)
1759 {
1760 	int error = 0;
1761 
1762 	splassert(IPL_BIO);
1763 
1764 	while (vp->v_numoutput) {
1765 		vp->v_bioflag |= VBIOWAIT;
1766 		error = tsleep(&vp->v_numoutput,
1767 		    slpflag | (PRIBIO + 1), wmesg, timeo);
1768 		if (error)
1769 			break;
1770 	}
1771 
1772 	return (error);
1773 }
1774 
1775 /*
1776  * Update outstanding I/O count and do wakeup if requested.
1777  *
1778  * Manipulates v_numoutput. Must be called at splbio()
1779  */
1780 void
1781 vwakeup(struct vnode *vp)
1782 {
1783 	splassert(IPL_BIO);
1784 
1785 	if (vp != NULL) {
1786 		if (vp->v_numoutput-- == 0)
1787 			panic("vwakeup: neg numoutput");
1788 		if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) {
1789 			vp->v_bioflag &= ~VBIOWAIT;
1790 			wakeup(&vp->v_numoutput);
1791 		}
1792 	}
1793 }
1794 
1795 /*
1796  * Flush out and invalidate all buffers associated with a vnode.
1797  * Called with the underlying object locked.
1798  */
1799 int
1800 vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p,
1801     int slpflag, int slptimeo)
1802 {
1803 	struct buf *bp;
1804 	struct buf *nbp, *blist;
1805 	int s, error;
1806 
1807 #ifdef VFSLCKDEBUG
1808 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
1809 		panic("vinvalbuf(): vp isn't locked");
1810 #endif
1811 
1812 	if (flags & V_SAVE) {
1813 		s = splbio();
1814 		vwaitforio(vp, 0, "vinvalbuf", 0);
1815 		if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1816 			splx(s);
1817 			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
1818 				return (error);
1819 			s = splbio();
1820 			if (vp->v_numoutput > 0 ||
1821 			    !LIST_EMPTY(&vp->v_dirtyblkhd))
1822 				panic("vinvalbuf: dirty bufs");
1823 		}
1824 		splx(s);
1825 	}
1826 loop:
1827 	s = splbio();
1828 	for (;;) {
1829 		if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) &&
1830 		    (flags & V_SAVEMETA))
1831 			while (blist && blist->b_lblkno < 0)
1832 				blist = LIST_NEXT(blist, b_vnbufs);
1833 		if (blist == NULL &&
1834 		    (blist = LIST_FIRST(&vp->v_dirtyblkhd)) &&
1835 		    (flags & V_SAVEMETA))
1836 			while (blist && blist->b_lblkno < 0)
1837 				blist = LIST_NEXT(blist, b_vnbufs);
1838 		if (!blist)
1839 			break;
1840 
1841 		for (bp = blist; bp; bp = nbp) {
1842 			nbp = LIST_NEXT(bp, b_vnbufs);
1843 			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
1844 				continue;
1845 			if (bp->b_flags & B_BUSY) {
1846 				bp->b_flags |= B_WANTED;
1847 				error = tsleep(bp, slpflag | (PRIBIO + 1),
1848 				    "vinvalbuf", slptimeo);
1849 				if (error) {
1850 					splx(s);
1851 					return (error);
1852 				}
1853 				break;
1854 			}
1855 			bremfree(bp);
1856 			/*
1857 			 * XXX Since there are no node locks for NFS, I believe
1858 			 * there is a slight chance that a delayed write will
1859 			 * occur while sleeping just above, so check for it.
1860 			 */
1861 			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
1862 				buf_acquire(bp);
1863 				splx(s);
1864 				(void) VOP_BWRITE(bp);
1865 				goto loop;
1866 			}
1867 			buf_acquire_nomap(bp);
1868 			bp->b_flags |= B_INVAL;
1869 			brelse(bp);
1870 		}
1871 	}
1872 	if (!(flags & V_SAVEMETA) &&
1873 	    (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd)))
1874 		panic("vinvalbuf: flush failed");
1875 	splx(s);
1876 	return (0);
1877 }
1878 
1879 void
1880 vflushbuf(struct vnode *vp, int sync)
1881 {
1882 	struct buf *bp, *nbp;
1883 	int s;
1884 
1885 loop:
1886 	s = splbio();
1887 	LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp) {
1888 		if ((bp->b_flags & B_BUSY))
1889 			continue;
1890 		if ((bp->b_flags & B_DELWRI) == 0)
1891 			panic("vflushbuf: not dirty");
1892 		bremfree(bp);
1893 		buf_acquire(bp);
1894 		splx(s);
1895 		/*
1896 		 * Wait for I/O associated with indirect blocks to complete,
1897 		 * since there is no way to quickly wait for them below.
1898 		 */
1899 		if (bp->b_vp == vp || sync == 0)
1900 			(void) bawrite(bp);
1901 		else
1902 			(void) bwrite(bp);
1903 		goto loop;
1904 	}
1905 	if (sync == 0) {
1906 		splx(s);
1907 		return;
1908 	}
1909 	vwaitforio(vp, 0, "vflushbuf", 0);
1910 	if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1911 		splx(s);
1912 #ifdef DIAGNOSTIC
1913 		vprint("vflushbuf: dirty", vp);
1914 #endif
1915 		goto loop;
1916 	}
1917 	splx(s);
1918 }
1919 
1920 /*
1921  * Associate a buffer with a vnode.
1922  *
1923  * Manipulates buffer vnode queues. Must be called at splbio().
1924  */
1925 void
1926 bgetvp(struct vnode *vp, struct buf *bp)
1927 {
1928 	splassert(IPL_BIO);
1929 
1930 
1931 	if (bp->b_vp)
1932 		panic("bgetvp: not free");
1933 	vhold(vp);
1934 	bp->b_vp = vp;
1935 	if (vp->v_type == VBLK || vp->v_type == VCHR)
1936 		bp->b_dev = vp->v_rdev;
1937 	else
1938 		bp->b_dev = NODEV;
1939 	/*
1940 	 * Insert onto list for new vnode.
1941 	 */
1942 	bufinsvn(bp, &vp->v_cleanblkhd);
1943 }
1944 
1945 /*
1946  * Disassociate a buffer from a vnode.
1947  *
1948  * Manipulates vnode buffer queues. Must be called at splbio().
1949  */
1950 void
1951 brelvp(struct buf *bp)
1952 {
1953 	struct vnode *vp;
1954 
1955 	splassert(IPL_BIO);
1956 
1957 	if ((vp = bp->b_vp) == (struct vnode *) 0)
1958 		panic("brelvp: NULL");
1959 	/*
1960 	 * Delete from old vnode list, if on one.
1961 	 */
1962 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
1963 		bufremvn(bp);
1964 	if ((vp->v_bioflag & VBIOONSYNCLIST) &&
1965 	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
1966 		vp->v_bioflag &= ~VBIOONSYNCLIST;
1967 		LIST_REMOVE(vp, v_synclist);
1968 	}
1969 	bp->b_vp = NULL;
1970 
1971 	vdrop(vp);
1972 }
1973 
1974 /*
1975  * Replaces the current vnode associated with the buffer, if any,
1976  * with a new vnode.
1977  *
1978  * If an output I/O is pending on the buffer, the old vnode
1979  * I/O count is adjusted.
1980  *
1981  * Ignores vnode buffer queues. Must be called at splbio().
1982  */
1983 void
1984 buf_replacevnode(struct buf *bp, struct vnode *newvp)
1985 {
1986 	struct vnode *oldvp = bp->b_vp;
1987 
1988 	splassert(IPL_BIO);
1989 
1990 	if (oldvp)
1991 		brelvp(bp);
1992 
1993 	if ((bp->b_flags & (B_READ | B_DONE)) == 0) {
1994 		newvp->v_numoutput++;	/* put it on swapdev */
1995 		vwakeup(oldvp);
1996 	}
1997 
1998 	bgetvp(newvp, bp);
1999 	bufremvn(bp);
2000 }
2001 
2002 /*
2003  * Used to assign buffers to the appropriate clean or dirty list on
2004  * the vnode and to add newly dirty vnodes to the appropriate
2005  * filesystem syncer list.
2006  *
2007  * Manipulates vnode buffer queues. Must be called at splbio().
2008  */
2009 void
2010 reassignbuf(struct buf *bp)
2011 {
2012 	struct buflists *listheadp;
2013 	int delay;
2014 	struct vnode *vp = bp->b_vp;
2015 
2016 	splassert(IPL_BIO);
2017 
2018 	/*
2019 	 * Delete from old vnode list, if on one.
2020 	 */
2021 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2022 		bufremvn(bp);
2023 
2024 	/*
2025 	 * If dirty, put on list of dirty buffers;
2026 	 * otherwise insert onto list of clean buffers.
2027 	 */
2028 	if ((bp->b_flags & B_DELWRI) == 0) {
2029 		listheadp = &vp->v_cleanblkhd;
2030 		if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2031 		    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2032 			vp->v_bioflag &= ~VBIOONSYNCLIST;
2033 			LIST_REMOVE(vp, v_synclist);
2034 		}
2035 	} else {
2036 		listheadp = &vp->v_dirtyblkhd;
2037 		if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) {
2038 			switch (vp->v_type) {
2039 			case VDIR:
2040 				delay = syncdelay / 2;
2041 				break;
2042 			case VBLK:
2043 				if (vp->v_specmountpoint != NULL) {
2044 					delay = syncdelay / 3;
2045 					break;
2046 				}
2047 				/* FALLTHROUGH */
2048 			default:
2049 				delay = syncdelay;
2050 			}
2051 			vn_syncer_add_to_worklist(vp, delay);
2052 		}
2053 	}
2054 	bufinsvn(bp, listheadp);
2055 }
2056 
2057 int
2058 vfs_register(struct vfsconf *vfs)
2059 {
2060 	struct vfsconf *vfsp;
2061 	struct vfsconf **vfspp;
2062 
2063 #ifdef DIAGNOSTIC
2064 	/* Paranoia? */
2065 	if (vfs->vfc_refcount != 0)
2066 		printf("vfs_register called with vfc_refcount > 0\n");
2067 #endif
2068 
2069 	/* Check if filesystem already known */
2070 	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2071 	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next)
2072 		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2073 			return (EEXIST);
2074 
2075 	if (vfs->vfc_typenum > maxvfsconf)
2076 		maxvfsconf = vfs->vfc_typenum;
2077 
2078 	vfs->vfc_next = NULL;
2079 
2080 	/* Add to the end of the list */
2081 	*vfspp = vfs;
2082 
2083 	/* Call vfs_init() */
2084 	if (vfs->vfc_vfsops->vfs_init)
2085 		(*(vfs->vfc_vfsops->vfs_init))(vfs);
2086 
2087 	return 0;
2088 }
2089 
2090 int
2091 vfs_unregister(struct vfsconf *vfs)
2092 {
2093 	struct vfsconf *vfsp;
2094 	struct vfsconf **vfspp;
2095 	int maxtypenum;
2096 
2097 	/* Find our vfsconf struct */
2098 	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2099 	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) {
2100 		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2101 			break;
2102 	}
2103 
2104 	if (!vfsp)			/* Not found */
2105 		return (ENOENT);
2106 
2107 	if (vfsp->vfc_refcount)		/* In use */
2108 		return (EBUSY);
2109 
2110 	/* Remove from list and free */
2111 	*vfspp = vfsp->vfc_next;
2112 
2113 	maxtypenum = 0;
2114 
2115 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2116 		if (vfsp->vfc_typenum > maxtypenum)
2117 			maxtypenum = vfsp->vfc_typenum;
2118 
2119 	maxvfsconf = maxtypenum;
2120 	return 0;
2121 }
2122 
2123 /*
2124  * Check if vnode represents a disk device
2125  */
2126 int
2127 vn_isdisk(struct vnode *vp, int *errp)
2128 {
2129 	if (vp->v_type != VBLK && vp->v_type != VCHR)
2130 		return (0);
2131 
2132 	return (1);
2133 }
2134 
2135 #ifdef DDB
2136 #include <machine/db_machdep.h>
2137 #include <ddb/db_interface.h>
2138 
2139 void
2140 vfs_buf_print(void *b, int full,
2141     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2142 {
2143 	struct buf *bp = b;
2144 
2145 	(*pr)("  vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n"
2146 	      "  proc %p error %d flags %lb\n",
2147 	    bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev,
2148 	    bp->b_proc, bp->b_error, bp->b_flags, B_BITS);
2149 
2150 	(*pr)("  bufsize 0x%lx bcount 0x%lx resid 0x%lx\n"
2151 	      "  data %p saveaddr %p dep %p iodone %p\n",
2152 	    bp->b_bufsize, bp->b_bcount, (long)bp->b_resid,
2153 	    bp->b_data, bp->b_saveaddr,
2154 	    LIST_FIRST(&bp->b_dep), bp->b_iodone);
2155 
2156 	(*pr)("  dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n",
2157 	    bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend);
2158 
2159 #ifdef FFS_SOFTUPDATES
2160 	if (full)
2161 		softdep_print(bp, full, pr);
2162 #endif
2163 }
2164 
2165 const char *vtypes[] = { VTYPE_NAMES };
2166 const char *vtags[] = { VTAG_NAMES };
2167 
2168 void
2169 vfs_vnode_print(void *v, int full,
2170     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2171 {
2172 	struct vnode *vp = v;
2173 
2174 	(*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n",
2175 	      (u_int)vp->v_tag >= nitems(vtags)? "<unk>":vtags[vp->v_tag],
2176 	      vp->v_tag,
2177 	      (u_int)vp->v_type >= nitems(vtypes)? "<unk>":vtypes[vp->v_type],
2178 	      vp->v_type, vp->v_mount, vp->v_mountedhere);
2179 
2180 	(*pr)("data %p usecount %d writecount %d holdcnt %d numoutput %d\n",
2181 	      vp->v_data, vp->v_usecount, vp->v_writecount,
2182 	      vp->v_holdcnt, vp->v_numoutput);
2183 
2184 	/* uvm_object_printit(&vp->v_uobj, full, pr); */
2185 
2186 	if (full) {
2187 		struct buf *bp;
2188 
2189 		(*pr)("clean bufs:\n");
2190 		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
2191 			(*pr)(" bp %p\n", bp);
2192 			vfs_buf_print(bp, full, pr);
2193 		}
2194 
2195 		(*pr)("dirty bufs:\n");
2196 		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
2197 			(*pr)(" bp %p\n", bp);
2198 			vfs_buf_print(bp, full, pr);
2199 		}
2200 	}
2201 }
2202 
2203 void
2204 vfs_mount_print(struct mount *mp, int full,
2205     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2206 {
2207 	struct vfsconf *vfc = mp->mnt_vfc;
2208 	struct vnode *vp;
2209 	int cnt = 0;
2210 
2211 	(*pr)("flags %b\nvnodecovered %p syncer %p data %p\n",
2212 	    mp->mnt_flag, MNT_BITS,
2213 	    mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data);
2214 
2215 	(*pr)("vfsconf: ops %p name \"%s\" num %d ref %d flags 0x%x\n",
2216             vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum,
2217 	    vfc->vfc_refcount, vfc->vfc_flags);
2218 
2219 	(*pr)("statvfs cache: bsize %x iosize %x\nblocks %llu free %llu avail %lld\n",
2220 	    mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks,
2221 	    mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail);
2222 
2223 	(*pr)("  files %llu ffiles %llu favail %lld\n", mp->mnt_stat.f_files,
2224 	    mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail);
2225 
2226 	(*pr)("  f_fsidx {0x%x, 0x%x} owner %u ctime 0x%llx\n",
2227 	    mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1],
2228 	    mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime);
2229 
2230  	(*pr)("  syncwrites %llu asyncwrites = %llu\n",
2231 	    mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites);
2232 
2233  	(*pr)("  syncreads %llu asyncreads = %llu\n",
2234 	    mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads);
2235 
2236 	(*pr)("  fstype \"%s\" mnton \"%s\" mntfrom \"%s\" mntspec \"%s\"\n",
2237 	    mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname,
2238 	    mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntfromspec);
2239 
2240 	(*pr)("locked vnodes:");
2241 	/* XXX would take mountlist lock, except ddb has no context */
2242 	LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)
2243 		if (VOP_ISLOCKED(vp)) {
2244 			if (!LIST_NEXT(vp, v_mntvnodes))
2245 				(*pr)(" %p", vp);
2246 			else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4))))
2247 				(*pr)("\n\t%p", vp);
2248 			else
2249 				(*pr)(", %p", vp);
2250 		}
2251 	(*pr)("\n");
2252 
2253 	if (full) {
2254 		(*pr)("all vnodes:\n\t");
2255 		/* XXX would take mountlist lock, except ddb has no context */
2256 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)
2257 			if (!LIST_NEXT(vp, v_mntvnodes))
2258 				(*pr)(" %p", vp);
2259 			else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4))))
2260 				(*pr)(" %p,\n\t", vp);
2261 			else
2262 				(*pr)(" %p,", vp);
2263 		(*pr)("\n");
2264 	}
2265 }
2266 #endif /* DDB */
2267 
2268 void
2269 copy_statfs_info(struct statfs *sbp, const struct mount *mp)
2270 {
2271 	const struct statfs *mbp;
2272 
2273 	strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN);
2274 
2275 	if (sbp == (mbp = &mp->mnt_stat))
2276 		return;
2277 
2278 	sbp->f_fsid = mbp->f_fsid;
2279 	sbp->f_owner = mbp->f_owner;
2280 	sbp->f_flags = mbp->f_flags;
2281 	sbp->f_syncwrites = mbp->f_syncwrites;
2282 	sbp->f_asyncwrites = mbp->f_asyncwrites;
2283 	sbp->f_syncreads = mbp->f_syncreads;
2284 	sbp->f_asyncreads = mbp->f_asyncreads;
2285 	sbp->f_namemax = mbp->f_namemax;
2286 	memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
2287 	memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
2288 	memcpy(sbp->f_mntfromspec, mp->mnt_stat.f_mntfromspec, MNAMELEN);
2289 	memcpy(&sbp->mount_info, &mp->mnt_stat.mount_info,
2290 	    sizeof(union mount_info));
2291 }
2292