xref: /openbsd-src/sys/kern/vfs_subr.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: vfs_subr.c,v 1.218 2014/07/13 15:00:40 tedu Exp $	*/
2 /*	$NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
38  */
39 
40 /*
41  * External virtual filesystem routines
42  */
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/sysctl.h>
48 #include <sys/mount.h>
49 #include <sys/time.h>
50 #include <sys/fcntl.h>
51 #include <sys/kernel.h>
52 #include <sys/vnode.h>
53 #include <sys/stat.h>
54 #include <sys/acct.h>
55 #include <sys/namei.h>
56 #include <sys/ucred.h>
57 #include <sys/buf.h>
58 #include <sys/errno.h>
59 #include <sys/malloc.h>
60 #include <sys/mbuf.h>
61 #include <sys/syscallargs.h>
62 #include <sys/pool.h>
63 #include <sys/tree.h>
64 #include <sys/specdev.h>
65 
66 #include <netinet/in.h>
67 
68 #include "softraid.h"
69 
70 void sr_shutdown(void);
71 
72 enum vtype iftovt_tab[16] = {
73 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
74 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
75 };
76 
77 int	vttoif_tab[9] = {
78 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
79 	S_IFSOCK, S_IFIFO, S_IFMT,
80 };
81 
82 int doforce = 1;		/* 1 => permit forcible unmounting */
83 int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
84 int suid_clear = 1;		/* 1 => clear SUID / SGID on owner change */
85 
86 /*
87  * Insq/Remq for the vnode usage lists.
88  */
89 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
90 #define	bufremvn(bp) {							\
91 	LIST_REMOVE(bp, b_vnbufs);					\
92 	LIST_NEXT(bp, b_vnbufs) = NOLIST;				\
93 }
94 
95 struct freelst vnode_hold_list;	/* list of vnodes referencing buffers */
96 struct freelst vnode_free_list;	/* vnode free list */
97 
98 struct mntlist mountlist;	/* mounted filesystem list */
99 
100 void	vclean(struct vnode *, int, struct proc *);
101 
102 void insmntque(struct vnode *, struct mount *);
103 int getdevvp(dev_t, struct vnode **, enum vtype);
104 
105 int vfs_hang_addrlist(struct mount *, struct netexport *,
106 				  struct export_args *);
107 int vfs_free_netcred(struct radix_node *, void *, u_int);
108 void vfs_free_addrlist(struct netexport *);
109 void vputonfreelist(struct vnode *);
110 
111 int vflush_vnode(struct vnode *, void *);
112 int maxvnodes;
113 
114 #ifdef DEBUG
115 void printlockedvnodes(void);
116 #endif
117 
118 struct pool vnode_pool;
119 
120 static int rb_buf_compare(struct buf *b1, struct buf *b2);
121 RB_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare);
122 
123 static int
124 rb_buf_compare(struct buf *b1, struct buf *b2)
125 {
126 	if (b1->b_lblkno < b2->b_lblkno)
127 		return(-1);
128 	if (b1->b_lblkno > b2->b_lblkno)
129 		return(1);
130 	return(0);
131 }
132 
133 /*
134  * Initialize the vnode management data structures.
135  */
136 void
137 vntblinit(void)
138 {
139 	/* buffer cache may need a vnode for each buffer */
140 	maxvnodes = 2 * desiredvnodes;
141 	pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodes",
142 	    &pool_allocator_nointr);
143 	TAILQ_INIT(&vnode_hold_list);
144 	TAILQ_INIT(&vnode_free_list);
145 	TAILQ_INIT(&mountlist);
146 	/*
147 	 * Initialize the filesystem syncer.
148 	 */
149 	vn_initialize_syncerd();
150 }
151 
152 /*
153  * Mark a mount point as busy. Used to synchronize access and to delay
154  * unmounting.
155  *
156  * Default behaviour is to attempt getting a READ lock and in case of an
157  * ongoing unmount, to wait for it to finish and then return failure.
158  */
159 int
160 vfs_busy(struct mount *mp, int flags)
161 {
162 	int rwflags = 0;
163 
164 	/* new mountpoints need their lock initialised */
165 	if (mp->mnt_lock.rwl_name == NULL)
166 		rw_init(&mp->mnt_lock, "vfslock");
167 
168 	if (flags & VB_WRITE)
169 		rwflags |= RW_WRITE;
170 	else
171 		rwflags |= RW_READ;
172 
173 	if (flags & VB_WAIT)
174 		rwflags |= RW_SLEEPFAIL;
175 	else
176 		rwflags |= RW_NOSLEEP;
177 
178 	if (rw_enter(&mp->mnt_lock, rwflags))
179 		return (EBUSY);
180 
181 	return (0);
182 }
183 
184 /*
185  * Free a busy file system
186  */
187 void
188 vfs_unbusy(struct mount *mp)
189 {
190 	rw_exit(&mp->mnt_lock);
191 }
192 
193 int
194 vfs_isbusy(struct mount *mp)
195 {
196 	if (RWLOCK_OWNER(&mp->mnt_lock) > 0)
197 		return (1);
198 	else
199 		return (0);
200 }
201 
202 /*
203  * Lookup a filesystem type, and if found allocate and initialize
204  * a mount structure for it.
205  *
206  * Devname is usually updated by mount(8) after booting.
207  */
208 int
209 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
210 {
211 	struct vfsconf *vfsp;
212 	struct mount *mp;
213 
214 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
215 		if (!strcmp(vfsp->vfc_name, fstypename))
216 			break;
217 	if (vfsp == NULL)
218 		return (ENODEV);
219 	mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK|M_ZERO);
220 	(void)vfs_busy(mp, VB_READ|VB_NOWAIT);
221 	LIST_INIT(&mp->mnt_vnodelist);
222 	mp->mnt_vfc = vfsp;
223 	mp->mnt_op = vfsp->vfc_vfsops;
224 	mp->mnt_flag = MNT_RDONLY;
225 	mp->mnt_vnodecovered = NULLVP;
226 	vfsp->vfc_refcount++;
227 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
228 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
229 	mp->mnt_stat.f_mntonname[0] = '/';
230 	copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN, 0);
231 	copystr(devname, mp->mnt_stat.f_mntfromspec, MNAMELEN, 0);
232 	*mpp = mp;
233  	return (0);
234  }
235 
236 /*
237  * Lookup a mount point by filesystem identifier.
238  */
239 struct mount *
240 vfs_getvfs(fsid_t *fsid)
241 {
242 	struct mount *mp;
243 
244 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
245 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
246 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
247 			return (mp);
248 		}
249 	}
250 
251 	return (NULL);
252 }
253 
254 
255 /*
256  * Get a new unique fsid
257  */
258 void
259 vfs_getnewfsid(struct mount *mp)
260 {
261 	static u_short xxxfs_mntid;
262 
263 	fsid_t tfsid;
264 	int mtype;
265 
266 	mtype = mp->mnt_vfc->vfc_typenum;
267 	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
268 	mp->mnt_stat.f_fsid.val[1] = mtype;
269 	if (xxxfs_mntid == 0)
270 		++xxxfs_mntid;
271 	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
272 	tfsid.val[1] = mtype;
273 	if (!TAILQ_EMPTY(&mountlist)) {
274 		while (vfs_getvfs(&tfsid)) {
275 			tfsid.val[0]++;
276 			xxxfs_mntid++;
277 		}
278 	}
279 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
280 }
281 
282 /*
283  * Set vnode attributes to VNOVAL
284  */
285 void
286 vattr_null(struct vattr *vap)
287 {
288 
289 	vap->va_type = VNON;
290 	/* XXX These next two used to be one line, but for a GCC bug. */
291 	vap->va_size = VNOVAL;
292 	vap->va_bytes = VNOVAL;
293 	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
294 		vap->va_fsid = vap->va_fileid =
295 		vap->va_blocksize = vap->va_rdev =
296 		vap->va_atime.tv_sec = vap->va_atime.tv_nsec =
297 		vap->va_mtime.tv_sec = vap->va_mtime.tv_nsec =
298 		vap->va_ctime.tv_sec = vap->va_ctime.tv_nsec =
299 		vap->va_flags = vap->va_gen = VNOVAL;
300 	vap->va_vaflags = 0;
301 }
302 
303 /*
304  * Routines having to do with the management of the vnode table.
305  */
306 long numvnodes;
307 
308 /*
309  * Return the next vnode from the free list.
310  */
311 int
312 getnewvnode(enum vtagtype tag, struct mount *mp, struct vops *vops,
313     struct vnode **vpp)
314 {
315 	struct proc *p = curproc;
316 	struct freelst *listhd;
317 	static int toggle;
318 	struct vnode *vp;
319 	int s;
320 
321 	/*
322 	 * allow maxvnodes to increase if the buffer cache itself
323 	 * is big enough to justify it. (we don't shrink it ever)
324 	 */
325 	maxvnodes = maxvnodes < bcstats.numbufs ? bcstats.numbufs
326 	    : maxvnodes;
327 
328 	/*
329 	 * We must choose whether to allocate a new vnode or recycle an
330 	 * existing one. The criterion for allocating a new one is that
331 	 * the total number of vnodes is less than the number desired or
332 	 * there are no vnodes on either free list. Generally we only
333 	 * want to recycle vnodes that have no buffers associated with
334 	 * them, so we look first on the vnode_free_list. If it is empty,
335 	 * we next consider vnodes with referencing buffers on the
336 	 * vnode_hold_list. The toggle ensures that half the time we
337 	 * will use a buffer from the vnode_hold_list, and half the time
338 	 * we will allocate a new one unless the list has grown to twice
339 	 * the desired size. We are reticent to recycle vnodes from the
340 	 * vnode_hold_list because we will lose the identity of all its
341 	 * referencing buffers.
342 	 */
343 	toggle ^= 1;
344 	if (numvnodes / 2 > maxvnodes)
345 		toggle = 0;
346 
347 	s = splbio();
348 	if ((numvnodes < maxvnodes) ||
349 	    ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
350 	    ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
351 		splx(s);
352 		vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO);
353 		RB_INIT(&vp->v_bufs_tree);
354 		RB_INIT(&vp->v_nc_tree);
355 		TAILQ_INIT(&vp->v_cache_dst);
356 		numvnodes++;
357 	} else {
358 		for (vp = TAILQ_FIRST(listhd); vp != NULLVP;
359 		    vp = TAILQ_NEXT(vp, v_freelist)) {
360 			if (VOP_ISLOCKED(vp) == 0)
361 				break;
362 		}
363 		/*
364 		 * Unless this is a bad time of the month, at most
365 		 * the first NCPUS items on the free list are
366 		 * locked, so this is close enough to being empty.
367 		 */
368 		if (vp == NULL) {
369 			splx(s);
370 			tablefull("vnode");
371 			*vpp = 0;
372 			return (ENFILE);
373 		}
374 
375 #ifdef DIAGNOSTIC
376 		if (vp->v_usecount) {
377 			vprint("free vnode", vp);
378 			panic("free vnode isn't");
379 		}
380 #endif
381 
382 		TAILQ_REMOVE(listhd, vp, v_freelist);
383 		vp->v_bioflag &= ~VBIOONFREELIST;
384 		splx(s);
385 
386 		if (vp->v_type != VBAD)
387 			vgonel(vp, p);
388 #ifdef DIAGNOSTIC
389 		if (vp->v_data) {
390 			vprint("cleaned vnode", vp);
391 			panic("cleaned vnode isn't");
392 		}
393 		s = splbio();
394 		if (vp->v_numoutput)
395 			panic("Clean vnode has pending I/O's");
396 		splx(s);
397 #endif
398 		vp->v_flag = 0;
399 		vp->v_socket = 0;
400 	}
401 	cache_purge(vp);
402 	vp->v_type = VNON;
403 	vp->v_tag = tag;
404 	vp->v_op = vops;
405 	insmntque(vp, mp);
406 	*vpp = vp;
407 	vp->v_usecount = 1;
408 	vp->v_data = 0;
409 	simple_lock_init(&vp->v_uvm.u_obj.vmobjlock);
410 	return (0);
411 }
412 
413 /*
414  * Move a vnode from one mount queue to another.
415  */
416 void
417 insmntque(struct vnode *vp, struct mount *mp)
418 {
419 	/*
420 	 * Delete from old mount point vnode list, if on one.
421 	 */
422 	if (vp->v_mount != NULL)
423 		LIST_REMOVE(vp, v_mntvnodes);
424 	/*
425 	 * Insert into list of vnodes for the new mount point, if available.
426 	 */
427 	if ((vp->v_mount = mp) != NULL)
428 		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
429 }
430 
431 /*
432  * Create a vnode for a block device.
433  * Used for root filesystem, argdev, and swap areas.
434  * Also used for memory file system special devices.
435  */
436 int
437 bdevvp(dev_t dev, struct vnode **vpp)
438 {
439 	return (getdevvp(dev, vpp, VBLK));
440 }
441 
442 /*
443  * Create a vnode for a character device.
444  * Used for console handling.
445  */
446 int
447 cdevvp(dev_t dev, struct vnode **vpp)
448 {
449 	return (getdevvp(dev, vpp, VCHR));
450 }
451 
452 /*
453  * Create a vnode for a device.
454  * Used by bdevvp (block device) for root file system etc.,
455  * and by cdevvp (character device) for console.
456  */
457 int
458 getdevvp(dev_t dev, struct vnode **vpp, enum vtype type)
459 {
460 	struct vnode *vp;
461 	struct vnode *nvp;
462 	int error;
463 
464 	if (dev == NODEV) {
465 		*vpp = NULLVP;
466 		return (0);
467 	}
468 	error = getnewvnode(VT_NON, NULL, &spec_vops, &nvp);
469 	if (error) {
470 		*vpp = NULLVP;
471 		return (error);
472 	}
473 	vp = nvp;
474 	vp->v_type = type;
475 	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
476 		vput(vp);
477 		vp = nvp;
478 	}
479 	*vpp = vp;
480 	return (0);
481 }
482 
483 /*
484  * Check to see if the new vnode represents a special device
485  * for which we already have a vnode (either because of
486  * bdevvp() or because of a different vnode representing
487  * the same block device). If such an alias exists, deallocate
488  * the existing contents and return the aliased vnode. The
489  * caller is responsible for filling it with its new contents.
490  */
491 struct vnode *
492 checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp)
493 {
494 	struct proc *p = curproc;
495 	struct vnode *vp;
496 	struct vnode **vpp;
497 
498 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
499 		return (NULLVP);
500 
501 	vpp = &speclisth[SPECHASH(nvp_rdev)];
502 loop:
503 	for (vp = *vpp; vp; vp = vp->v_specnext) {
504 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) {
505 			continue;
506 		}
507 		/*
508 		 * Alias, but not in use, so flush it out.
509 		 */
510 		if (vp->v_usecount == 0) {
511 			vgonel(vp, p);
512 			goto loop;
513 		}
514 		if (vget(vp, LK_EXCLUSIVE, p)) {
515 			goto loop;
516 		}
517 		break;
518 	}
519 
520 	/*
521 	 * Common case is actually in the if statement
522 	 */
523 	if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
524 		nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE,
525 			M_WAITOK);
526 		nvp->v_rdev = nvp_rdev;
527 		nvp->v_hashchain = vpp;
528 		nvp->v_specnext = *vpp;
529 		nvp->v_specmountpoint = NULL;
530 		nvp->v_speclockf = NULL;
531 		memset(nvp->v_specbitmap, 0, sizeof(nvp->v_specbitmap));
532 		*vpp = nvp;
533 		if (vp != NULLVP) {
534 			nvp->v_flag |= VALIASED;
535 			vp->v_flag |= VALIASED;
536 			vput(vp);
537 		}
538 		return (NULLVP);
539 	}
540 
541 	/*
542 	 * This code is the uncommon case. It is called in case
543 	 * we found an alias that was VT_NON && vtype of VBLK
544 	 * This means we found a block device that was created
545 	 * using bdevvp.
546 	 * An example of such a vnode is the root partition device vnode
547 	 * created in ffs_mountroot.
548 	 *
549 	 * The vnodes created by bdevvp should not be aliased (why?).
550 	 */
551 
552 	VOP_UNLOCK(vp, 0, p);
553 	vclean(vp, 0, p);
554 	vp->v_op = nvp->v_op;
555 	vp->v_tag = nvp->v_tag;
556 	nvp->v_type = VNON;
557 	insmntque(vp, mp);
558 	return (vp);
559 }
560 
561 /*
562  * Grab a particular vnode from the free list, increment its
563  * reference count and lock it. If the vnode lock bit is set,
564  * the vnode is being eliminated in vgone. In that case, we
565  * cannot grab it, so the process is awakened when the
566  * transition is completed, and an error code is returned to
567  * indicate that the vnode is no longer usable, possibly
568  * having been changed to a new file system type.
569  */
570 int
571 vget(struct vnode *vp, int flags, struct proc *p)
572 {
573 	int error, s, onfreelist;
574 
575 	/*
576 	 * If the vnode is in the process of being cleaned out for
577 	 * another use, we wait for the cleaning to finish and then
578 	 * return failure. Cleaning is determined by checking that
579 	 * the VXLOCK flag is set.
580 	 */
581 
582 	if (vp->v_flag & VXLOCK) {
583 		if (flags & LK_NOWAIT) {
584 			return (EBUSY);
585 		}
586 
587 		vp->v_flag |= VXWANT;
588 		tsleep(vp, PINOD, "vget", 0);
589 		return (ENOENT);
590 	}
591 
592 	onfreelist = vp->v_bioflag & VBIOONFREELIST;
593 	if (vp->v_usecount == 0 && onfreelist) {
594 		s = splbio();
595 		if (vp->v_holdcnt > 0)
596 			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
597 		else
598 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
599 		vp->v_bioflag &= ~VBIOONFREELIST;
600 		splx(s);
601 	}
602 
603  	vp->v_usecount++;
604 	if (flags & LK_TYPE_MASK) {
605 		if ((error = vn_lock(vp, flags, p)) != 0) {
606 			vp->v_usecount--;
607 			if (vp->v_usecount == 0 && onfreelist)
608 				vputonfreelist(vp);
609 		}
610 		return (error);
611 	}
612 
613 	return (0);
614 }
615 
616 
617 /* Vnode reference. */
618 void
619 vref(struct vnode *vp)
620 {
621 #ifdef DIAGNOSTIC
622 	if (vp->v_usecount == 0)
623 		panic("vref used where vget required");
624 	if (vp->v_type == VNON)
625 		panic("vref on a VNON vnode");
626 #endif
627 	vp->v_usecount++;
628 }
629 
630 void
631 vputonfreelist(struct vnode *vp)
632 {
633 	int s;
634 	struct freelst *lst;
635 
636 	s = splbio();
637 #ifdef DIAGNOSTIC
638 	if (vp->v_usecount != 0)
639 		panic("Use count is not zero!");
640 
641 	if (vp->v_bioflag & VBIOONFREELIST) {
642 		vprint("vnode already on free list: ", vp);
643 		panic("vnode already on free list");
644 	}
645 #endif
646 
647 	vp->v_bioflag |= VBIOONFREELIST;
648 
649 	if (vp->v_holdcnt > 0)
650 		lst = &vnode_hold_list;
651 	else
652 		lst = &vnode_free_list;
653 
654 	if (vp->v_type == VBAD)
655 		TAILQ_INSERT_HEAD(lst, vp, v_freelist);
656 	else
657 		TAILQ_INSERT_TAIL(lst, vp, v_freelist);
658 
659 	splx(s);
660 }
661 
662 /*
663  * vput(), just unlock and vrele()
664  */
665 void
666 vput(struct vnode *vp)
667 {
668 	struct proc *p = curproc;
669 
670 #ifdef DIAGNOSTIC
671 	if (vp == NULL)
672 		panic("vput: null vp");
673 #endif
674 
675 #ifdef DIAGNOSTIC
676 	if (vp->v_usecount == 0) {
677 		vprint("vput: bad ref count", vp);
678 		panic("vput: ref cnt");
679 	}
680 #endif
681 	vp->v_usecount--;
682 	if (vp->v_usecount > 0) {
683 		VOP_UNLOCK(vp, 0, p);
684 		return;
685 	}
686 
687 #ifdef DIAGNOSTIC
688 	if (vp->v_writecount != 0) {
689 		vprint("vput: bad writecount", vp);
690 		panic("vput: v_writecount != 0");
691 	}
692 #endif
693 
694 	VOP_INACTIVE(vp, p);
695 
696 	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
697 		vputonfreelist(vp);
698 }
699 
700 /*
701  * Vnode release - use for active VNODES.
702  * If count drops to zero, call inactive routine and return to freelist.
703  * Returns 0 if it did not sleep.
704  */
705 int
706 vrele(struct vnode *vp)
707 {
708 	struct proc *p = curproc;
709 
710 #ifdef DIAGNOSTIC
711 	if (vp == NULL)
712 		panic("vrele: null vp");
713 #endif
714 #ifdef DIAGNOSTIC
715 	if (vp->v_usecount == 0) {
716 		vprint("vrele: bad ref count", vp);
717 		panic("vrele: ref cnt");
718 	}
719 #endif
720 	vp->v_usecount--;
721 	if (vp->v_usecount > 0) {
722 		return (0);
723 	}
724 
725 #ifdef DIAGNOSTIC
726 	if (vp->v_writecount != 0) {
727 		vprint("vrele: bad writecount", vp);
728 		panic("vrele: v_writecount != 0");
729 	}
730 #endif
731 
732 	if (vn_lock(vp, LK_EXCLUSIVE, p)) {
733 #ifdef DIAGNOSTIC
734 		vprint("vrele: cannot lock", vp);
735 #endif
736 		return (1);
737 	}
738 
739 	VOP_INACTIVE(vp, p);
740 
741 	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
742 		vputonfreelist(vp);
743 	return (1);
744 }
745 
746 /* Page or buffer structure gets a reference. */
747 void
748 vhold(struct vnode *vp)
749 {
750 	/*
751 	 * If it is on the freelist and the hold count is currently
752 	 * zero, move it to the hold list.
753 	 */
754 	if ((vp->v_bioflag & VBIOONFREELIST) &&
755 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
756 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
757 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
758 	}
759 	vp->v_holdcnt++;
760 }
761 
762 /* Lose interest in a vnode. */
763 void
764 vdrop(struct vnode *vp)
765 {
766 #ifdef DIAGNOSTIC
767 	if (vp->v_holdcnt == 0)
768 		panic("vdrop: zero holdcnt");
769 #endif
770 
771 	vp->v_holdcnt--;
772 
773 	/*
774 	 * If it is on the holdlist and the hold count drops to
775 	 * zero, move it to the free list.
776 	 */
777 	if ((vp->v_bioflag & VBIOONFREELIST) &&
778 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
779 		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
780 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
781 	}
782 }
783 
784 /*
785  * Remove any vnodes in the vnode table belonging to mount point mp.
786  *
787  * If MNT_NOFORCE is specified, there should not be any active ones,
788  * return error if any are found (nb: this is a user error, not a
789  * system error). If MNT_FORCE is specified, detach any active vnodes
790  * that are found.
791  */
792 #ifdef DEBUG
793 int busyprt = 0;	/* print out busy vnodes */
794 struct ctldebug debug1 = { "busyprt", &busyprt };
795 #endif
796 
797 int
798 vfs_mount_foreach_vnode(struct mount *mp,
799     int (*func)(struct vnode *, void *), void *arg) {
800 	struct vnode *vp, *nvp;
801 	int error = 0;
802 
803 loop:
804 	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
805 		if (vp->v_mount != mp)
806 			goto loop;
807 		nvp = LIST_NEXT(vp, v_mntvnodes);
808 
809 		error = func(vp, arg);
810 
811 		if (error != 0)
812 			break;
813 	}
814 
815 	return (error);
816 }
817 
818 struct vflush_args {
819 	struct vnode *skipvp;
820 	int busy;
821 	int flags;
822 };
823 
824 int
825 vflush_vnode(struct vnode *vp, void *arg) {
826 	struct vflush_args *va = arg;
827 	struct proc *p = curproc;
828 
829 	if (vp == va->skipvp) {
830 		return (0);
831 	}
832 
833 	if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
834 		return (0);
835 	}
836 
837 	/*
838 	 * If WRITECLOSE is set, only flush out regular file
839 	 * vnodes open for writing.
840 	 */
841 	if ((va->flags & WRITECLOSE) &&
842 	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
843 		return (0);
844 	}
845 
846 	/*
847 	 * With v_usecount == 0, all we need to do is clear
848 	 * out the vnode data structures and we are done.
849 	 */
850 	if (vp->v_usecount == 0) {
851 		vgonel(vp, p);
852 		return (0);
853 	}
854 
855 	/*
856 	 * If FORCECLOSE is set, forcibly close the vnode.
857 	 * For block or character devices, revert to an
858 	 * anonymous device. For all other files, just kill them.
859 	 */
860 	if (va->flags & FORCECLOSE) {
861 		if (vp->v_type != VBLK && vp->v_type != VCHR) {
862 			vgonel(vp, p);
863 		} else {
864 			vclean(vp, 0, p);
865 			vp->v_op = &spec_vops;
866 			insmntque(vp, (struct mount *)0);
867 		}
868 		return (0);
869 	}
870 
871 #ifdef DEBUG
872 	if (busyprt)
873 		vprint("vflush: busy vnode", vp);
874 #endif
875 	va->busy++;
876 	return (0);
877 }
878 
879 int
880 vflush(struct mount *mp, struct vnode *skipvp, int flags)
881 {
882 	struct vflush_args va;
883 	va.skipvp = skipvp;
884 	va.busy = 0;
885 	va.flags = flags;
886 
887 	vfs_mount_foreach_vnode(mp, vflush_vnode, &va);
888 
889 	if (va.busy)
890 		return (EBUSY);
891 	return (0);
892 }
893 
894 /*
895  * Disassociate the underlying file system from a vnode.
896  */
897 void
898 vclean(struct vnode *vp, int flags, struct proc *p)
899 {
900 	int active;
901 
902 	/*
903 	 * Check to see if the vnode is in use.
904 	 * If so we have to reference it before we clean it out
905 	 * so that its count cannot fall to zero and generate a
906 	 * race against ourselves to recycle it.
907 	 */
908 	if ((active = vp->v_usecount) != 0)
909 		vp->v_usecount++;
910 
911 	/*
912 	 * Prevent the vnode from being recycled or
913 	 * brought into use while we clean it out.
914 	 */
915 	if (vp->v_flag & VXLOCK)
916 		panic("vclean: deadlock");
917 	vp->v_flag |= VXLOCK;
918 	/*
919 	 * Even if the count is zero, the VOP_INACTIVE routine may still
920 	 * have the object locked while it cleans it out. The VOP_LOCK
921 	 * ensures that the VOP_INACTIVE routine is done with its work.
922 	 * For active vnodes, it ensures that no other activity can
923 	 * occur while the underlying object is being cleaned out.
924 	 */
925 	VOP_LOCK(vp, LK_DRAIN, p);
926 
927 	/*
928 	 * Clean out any VM data associated with the vnode.
929 	 */
930 	uvm_vnp_terminate(vp);
931 	/*
932 	 * Clean out any buffers associated with the vnode.
933 	 */
934 	if (flags & DOCLOSE)
935 		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
936 	/*
937 	 * If purging an active vnode, it must be closed and
938 	 * deactivated before being reclaimed. Note that the
939 	 * VOP_INACTIVE will unlock the vnode
940 	 */
941 	if (active) {
942 		if (flags & DOCLOSE)
943 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
944 		VOP_INACTIVE(vp, p);
945 	} else {
946 		/*
947 		 * Any other processes trying to obtain this lock must first
948 		 * wait for VXLOCK to clear, then call the new lock operation.
949 		 */
950 		VOP_UNLOCK(vp, 0, p);
951 	}
952 
953 	/*
954 	 * Reclaim the vnode.
955 	 */
956 	if (VOP_RECLAIM(vp, p))
957 		panic("vclean: cannot reclaim");
958 	if (active) {
959 		vp->v_usecount--;
960 		if (vp->v_usecount == 0) {
961 			if (vp->v_holdcnt > 0)
962 				panic("vclean: not clean");
963 			vputonfreelist(vp);
964 		}
965 	}
966 	cache_purge(vp);
967 
968 	/*
969 	 * Done with purge, notify sleepers of the grim news.
970 	 */
971 	vp->v_op = &dead_vops;
972 	VN_KNOTE(vp, NOTE_REVOKE);
973 	vp->v_tag = VT_NON;
974 	vp->v_flag &= ~VXLOCK;
975 #ifdef VFSLCKDEBUG
976 	vp->v_flag &= ~VLOCKSWORK;
977 #endif
978 	if (vp->v_flag & VXWANT) {
979 		vp->v_flag &= ~VXWANT;
980 		wakeup(vp);
981 	}
982 }
983 
984 /*
985  * Recycle an unused vnode to the front of the free list.
986  */
987 int
988 vrecycle(struct vnode *vp, struct proc *p)
989 {
990 	if (vp->v_usecount == 0) {
991 		vgonel(vp, p);
992 		return (1);
993 	}
994 	return (0);
995 }
996 
997 /*
998  * Eliminate all activity associated with a vnode
999  * in preparation for reuse.
1000  */
1001 void
1002 vgone(struct vnode *vp)
1003 {
1004 	struct proc *p = curproc;
1005 	vgonel(vp, p);
1006 }
1007 
1008 /*
1009  * vgone, with struct proc.
1010  */
1011 void
1012 vgonel(struct vnode *vp, struct proc *p)
1013 {
1014 	struct vnode *vq;
1015 	struct vnode *vx;
1016 
1017 	/*
1018 	 * If a vgone (or vclean) is already in progress,
1019 	 * wait until it is done and return.
1020 	 */
1021 	if (vp->v_flag & VXLOCK) {
1022 		vp->v_flag |= VXWANT;
1023 		tsleep(vp, PINOD, "vgone", 0);
1024 		return;
1025 	}
1026 
1027 	/*
1028 	 * Clean out the filesystem specific data.
1029 	 */
1030 	vclean(vp, DOCLOSE, p);
1031 	/*
1032 	 * Delete from old mount point vnode list, if on one.
1033 	 */
1034 	if (vp->v_mount != NULL)
1035 		insmntque(vp, (struct mount *)0);
1036 	/*
1037 	 * If special device, remove it from special device alias list
1038 	 * if it is on one.
1039 	 */
1040 	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1041 		if (*vp->v_hashchain == vp) {
1042 			*vp->v_hashchain = vp->v_specnext;
1043 		} else {
1044 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1045 				if (vq->v_specnext != vp)
1046 					continue;
1047 				vq->v_specnext = vp->v_specnext;
1048 				break;
1049 			}
1050 			if (vq == NULL)
1051 				panic("missing bdev");
1052 		}
1053 		if (vp->v_flag & VALIASED) {
1054 			vx = NULL;
1055 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1056 				if (vq->v_rdev != vp->v_rdev ||
1057 				    vq->v_type != vp->v_type)
1058 					continue;
1059 				if (vx)
1060 					break;
1061 				vx = vq;
1062 			}
1063 			if (vx == NULL)
1064 				panic("missing alias");
1065 			if (vq == NULL)
1066 				vx->v_flag &= ~VALIASED;
1067 			vp->v_flag &= ~VALIASED;
1068 		}
1069 		free(vp->v_specinfo, M_VNODE, sizeof(struct specinfo));
1070 		vp->v_specinfo = NULL;
1071 	}
1072 	/*
1073 	 * If it is on the freelist and not already at the head,
1074 	 * move it to the head of the list.
1075 	 */
1076 	vp->v_type = VBAD;
1077 
1078 	/*
1079 	 * Move onto the free list, unless we were called from
1080 	 * getnewvnode and we're not on any free list
1081 	 */
1082 	if (vp->v_usecount == 0 &&
1083 	    (vp->v_bioflag & VBIOONFREELIST)) {
1084 		int s;
1085 
1086 		s = splbio();
1087 
1088 		if (vp->v_holdcnt > 0)
1089 			panic("vgonel: not clean");
1090 
1091 		if (TAILQ_FIRST(&vnode_free_list) != vp) {
1092 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1093 			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1094 		}
1095 		splx(s);
1096 	}
1097 }
1098 
1099 /*
1100  * Lookup a vnode by device number.
1101  */
1102 int
1103 vfinddev(dev_t dev, enum vtype type, struct vnode **vpp)
1104 {
1105 	struct vnode *vp;
1106 	int rc =0;
1107 
1108 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1109 		if (dev != vp->v_rdev || type != vp->v_type)
1110 			continue;
1111 		*vpp = vp;
1112 		rc = 1;
1113 		break;
1114 	}
1115 	return (rc);
1116 }
1117 
1118 /*
1119  * Revoke all the vnodes corresponding to the specified minor number
1120  * range (endpoints inclusive) of the specified major.
1121  */
1122 void
1123 vdevgone(int maj, int minl, int minh, enum vtype type)
1124 {
1125 	struct vnode *vp;
1126 	int mn;
1127 
1128 	for (mn = minl; mn <= minh; mn++)
1129 		if (vfinddev(makedev(maj, mn), type, &vp))
1130 			VOP_REVOKE(vp, REVOKEALL);
1131 }
1132 
1133 /*
1134  * Calculate the total number of references to a special device.
1135  */
1136 int
1137 vcount(struct vnode *vp)
1138 {
1139 	struct vnode *vq, *vnext;
1140 	int count;
1141 
1142 loop:
1143 	if ((vp->v_flag & VALIASED) == 0)
1144 		return (vp->v_usecount);
1145 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1146 		vnext = vq->v_specnext;
1147 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1148 			continue;
1149 		/*
1150 		 * Alias, but not in use, so flush it out.
1151 		 */
1152 		if (vq->v_usecount == 0 && vq != vp) {
1153 			vgone(vq);
1154 			goto loop;
1155 		}
1156 		count += vq->v_usecount;
1157 	}
1158 	return (count);
1159 }
1160 
1161 #if defined(DEBUG) || defined(DIAGNOSTIC)
1162 /*
1163  * Print out a description of a vnode.
1164  */
1165 static char *typename[] =
1166    { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1167 
1168 void
1169 vprint(char *label, struct vnode *vp)
1170 {
1171 	char buf[64];
1172 
1173 	if (label != NULL)
1174 		printf("%s: ", label);
1175 	printf("%p, type %s, use %u, write %u, hold %u,",
1176 		vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1177 		vp->v_holdcnt);
1178 	buf[0] = '\0';
1179 	if (vp->v_flag & VROOT)
1180 		strlcat(buf, "|VROOT", sizeof buf);
1181 	if (vp->v_flag & VTEXT)
1182 		strlcat(buf, "|VTEXT", sizeof buf);
1183 	if (vp->v_flag & VSYSTEM)
1184 		strlcat(buf, "|VSYSTEM", sizeof buf);
1185 	if (vp->v_flag & VXLOCK)
1186 		strlcat(buf, "|VXLOCK", sizeof buf);
1187 	if (vp->v_flag & VXWANT)
1188 		strlcat(buf, "|VXWANT", sizeof buf);
1189 	if (vp->v_bioflag & VBIOWAIT)
1190 		strlcat(buf, "|VBIOWAIT", sizeof buf);
1191 	if (vp->v_bioflag & VBIOONFREELIST)
1192 		strlcat(buf, "|VBIOONFREELIST", sizeof buf);
1193 	if (vp->v_bioflag & VBIOONSYNCLIST)
1194 		strlcat(buf, "|VBIOONSYNCLIST", sizeof buf);
1195 	if (vp->v_flag & VALIASED)
1196 		strlcat(buf, "|VALIASED", sizeof buf);
1197 	if (buf[0] != '\0')
1198 		printf(" flags (%s)", &buf[1]);
1199 	if (vp->v_data == NULL) {
1200 		printf("\n");
1201 	} else {
1202 		printf("\n\t");
1203 		VOP_PRINT(vp);
1204 	}
1205 }
1206 #endif /* DEBUG || DIAGNOSTIC */
1207 
1208 #ifdef DEBUG
1209 /*
1210  * List all of the locked vnodes in the system.
1211  * Called when debugging the kernel.
1212  */
1213 void
1214 printlockedvnodes(void)
1215 {
1216 	struct mount *mp, *nmp;
1217 	struct vnode *vp;
1218 
1219 	printf("Locked vnodes\n");
1220 
1221 	TAILQ_FOREACH_SAFE(mp, &mountlist, mnt_list, nmp) {
1222 		if (vfs_busy(mp, VB_READ|VB_NOWAIT))
1223 			continue;
1224 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1225 			if (VOP_ISLOCKED(vp))
1226 				vprint((char *)0, vp);
1227 		}
1228 		vfs_unbusy(mp);
1229  	}
1230 
1231 }
1232 #endif
1233 
1234 /*
1235  * Top level filesystem related information gathering.
1236  */
1237 int
1238 vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1239     size_t newlen, struct proc *p)
1240 {
1241 	struct vfsconf *vfsp, *tmpvfsp;
1242 	int ret;
1243 
1244 	/* all sysctl names at this level are at least name and field */
1245 	if (namelen < 2)
1246 		return (ENOTDIR);		/* overloaded */
1247 
1248 	if (name[0] != VFS_GENERIC) {
1249 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1250 			if (vfsp->vfc_typenum == name[0])
1251 				break;
1252 
1253 		if (vfsp == NULL)
1254 			return (EOPNOTSUPP);
1255 
1256 		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1257 		    oldp, oldlenp, newp, newlen, p));
1258 	}
1259 
1260 	switch (name[1]) {
1261 	case VFS_MAXTYPENUM:
1262 		return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
1263 
1264 	case VFS_CONF:
1265 		if (namelen < 3)
1266 			return (ENOTDIR);	/* overloaded */
1267 
1268 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1269 			if (vfsp->vfc_typenum == name[2])
1270 				break;
1271 
1272 		if (vfsp == NULL)
1273 			return (EOPNOTSUPP);
1274 
1275 		/* Make a copy, clear out kernel pointers */
1276 		tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK);
1277 		bcopy(vfsp, tmpvfsp, sizeof(*tmpvfsp));
1278 		tmpvfsp->vfc_vfsops = NULL;
1279 		tmpvfsp->vfc_next = NULL;
1280 
1281 		ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp,
1282 		    sizeof(struct vfsconf));
1283 
1284 		free(tmpvfsp, M_TEMP, sizeof(*tmpvfsp));
1285 		return (ret);
1286 	case VFS_BCACHESTAT:	/* buffer cache statistics */
1287 		ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats,
1288 		    sizeof(struct bcachestats));
1289 		return(ret);
1290 	}
1291 	return (EOPNOTSUPP);
1292 }
1293 
1294 int kinfo_vdebug = 1;
1295 #define KINFO_VNODESLOP	10
1296 /*
1297  * Dump vnode list (via sysctl).
1298  * Copyout address of vnode followed by vnode.
1299  */
1300 /* ARGSUSED */
1301 int
1302 sysctl_vnode(char *where, size_t *sizep, struct proc *p)
1303 {
1304 	struct mount *mp, *nmp;
1305 	struct vnode *vp, *nvp;
1306 	char *bp = where, *savebp;
1307 	char *ewhere;
1308 	int error;
1309 
1310 	if (where == NULL) {
1311 		*sizep = (numvnodes + KINFO_VNODESLOP) * sizeof(struct e_vnode);
1312 		return (0);
1313 	}
1314 	ewhere = where + *sizep;
1315 
1316 	TAILQ_FOREACH_SAFE(mp, &mountlist, mnt_list, nmp) {
1317 		if (vfs_busy(mp, VB_READ|VB_NOWAIT))
1318 			continue;
1319 		savebp = bp;
1320 again:
1321 		LIST_FOREACH_SAFE(vp, &mp->mnt_vnodelist, v_mntvnodes, nvp) {
1322 			/*
1323 			 * Check that the vp is still associated with
1324 			 * this filesystem.  RACE: could have been
1325 			 * recycled onto the same filesystem.
1326 			 */
1327 			if (vp->v_mount != mp) {
1328 				if (kinfo_vdebug)
1329 					printf("kinfo: vp changed\n");
1330 				bp = savebp;
1331 				goto again;
1332 			}
1333 			if (bp + sizeof(struct e_vnode) > ewhere) {
1334 				*sizep = bp - where;
1335 				vfs_unbusy(mp);
1336 				return (ENOMEM);
1337 			}
1338 			if ((error = copyout(&vp,
1339 			    &((struct e_vnode *)bp)->vptr,
1340 			    sizeof(struct vnode *))) ||
1341 			   (error = copyout(vp,
1342 			    &((struct e_vnode *)bp)->vnode,
1343 			    sizeof(struct vnode)))) {
1344 				vfs_unbusy(mp);
1345 				return (error);
1346 			}
1347 			bp += sizeof(struct e_vnode);
1348 		}
1349 
1350 		vfs_unbusy(mp);
1351 	}
1352 
1353 	*sizep = bp - where;
1354 
1355 	return (0);
1356 }
1357 
1358 /*
1359  * Check to see if a filesystem is mounted on a block device.
1360  */
1361 int
1362 vfs_mountedon(struct vnode *vp)
1363 {
1364 	struct vnode *vq;
1365 	int error = 0;
1366 
1367  	if (vp->v_specmountpoint != NULL)
1368 		return (EBUSY);
1369 	if (vp->v_flag & VALIASED) {
1370 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1371 			if (vq->v_rdev != vp->v_rdev ||
1372 			    vq->v_type != vp->v_type)
1373 				continue;
1374 			if (vq->v_specmountpoint != NULL) {
1375 				error = EBUSY;
1376 				break;
1377 			}
1378  		}
1379 	}
1380 	return (error);
1381 }
1382 
1383 /*
1384  * Build hash lists of net addresses and hang them off the mount point.
1385  * Called by ufs_mount() to set up the lists of export addresses.
1386  */
1387 int
1388 vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1389     struct export_args *argp)
1390 {
1391 	struct netcred *np;
1392 	struct radix_node_head *rnh;
1393 	int i;
1394 	struct radix_node *rn;
1395 	struct sockaddr *saddr, *smask = 0;
1396 	int error;
1397 
1398 	if (argp->ex_addrlen == 0) {
1399 		if (mp->mnt_flag & MNT_DEFEXPORTED)
1400 			return (EPERM);
1401 		np = &nep->ne_defexported;
1402 		mp->mnt_flag |= MNT_DEFEXPORTED;
1403 		goto finish;
1404 	}
1405 	if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN ||
1406 	    argp->ex_addrlen < 0 || argp->ex_masklen < 0)
1407 		return (EINVAL);
1408 	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1409 	np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK|M_ZERO);
1410 	saddr = (struct sockaddr *)(np + 1);
1411 	error = copyin(argp->ex_addr, saddr, argp->ex_addrlen);
1412 	if (error)
1413 		goto out;
1414 	if (saddr->sa_len > argp->ex_addrlen)
1415 		saddr->sa_len = argp->ex_addrlen;
1416 	if (argp->ex_masklen) {
1417 		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1418 		error = copyin(argp->ex_mask, smask, argp->ex_masklen);
1419 		if (error)
1420 			goto out;
1421 		if (smask->sa_len > argp->ex_masklen)
1422 			smask->sa_len = argp->ex_masklen;
1423 	}
1424 	i = saddr->sa_family;
1425 	switch (i) {
1426 	case AF_INET:
1427 		if ((rnh = nep->ne_rtable_inet) == NULL) {
1428 			if (!rn_inithead((void **)&nep->ne_rtable_inet,
1429 			    offsetof(struct sockaddr_in, sin_addr) * 8)) {
1430 				error = ENOBUFS;
1431 				goto out;
1432 			}
1433 			rnh = nep->ne_rtable_inet;
1434 		}
1435 		break;
1436 	default:
1437 		error = EINVAL;
1438 		goto out;
1439 	}
1440 	rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh,
1441 		np->netc_rnodes, 0);
1442 	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1443 		error = EPERM;
1444 		goto out;
1445 	}
1446 finish:
1447 	np->netc_exflags = argp->ex_flags;
1448 	/* fill in the kernel's ucred from userspace's xucred */
1449 	crfromxucred(&np->netc_anon, &argp->ex_anon);
1450 	return (0);
1451 out:
1452 	free(np, M_NETADDR, 0);
1453 	return (error);
1454 }
1455 
1456 /* ARGSUSED */
1457 int
1458 vfs_free_netcred(struct radix_node *rn, void *w, u_int id)
1459 {
1460 	struct radix_node_head *rnh = (struct radix_node_head *)w;
1461 
1462 	(*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh, NULL);
1463 	free(rn, M_NETADDR, 0);
1464 	return (0);
1465 }
1466 
1467 /*
1468  * Free the net address hash lists that are hanging off the mount points.
1469  */
1470 void
1471 vfs_free_addrlist(struct netexport *nep)
1472 {
1473 	struct radix_node_head *rnh;
1474 
1475 	if ((rnh = nep->ne_rtable_inet) != NULL) {
1476 		(*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh);
1477 		free(rnh, M_RTABLE, 0);
1478 		nep->ne_rtable_inet = NULL;
1479 	}
1480 }
1481 
1482 int
1483 vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp)
1484 {
1485 	int error;
1486 
1487 	if (argp->ex_flags & MNT_DELEXPORT) {
1488 		vfs_free_addrlist(nep);
1489 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1490 	}
1491 	if (argp->ex_flags & MNT_EXPORTED) {
1492 		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1493 			return (error);
1494 		mp->mnt_flag |= MNT_EXPORTED;
1495 	}
1496 	return (0);
1497 }
1498 
1499 struct netcred *
1500 vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam)
1501 {
1502 	struct netcred *np;
1503 	struct radix_node_head *rnh;
1504 	struct sockaddr *saddr;
1505 
1506 	np = NULL;
1507 	if (mp->mnt_flag & MNT_EXPORTED) {
1508 		/*
1509 		 * Lookup in the export list first.
1510 		 */
1511 		if (nam != NULL) {
1512 			saddr = mtod(nam, struct sockaddr *);
1513 			switch(saddr->sa_family) {
1514 			case AF_INET:
1515 				rnh = nep->ne_rtable_inet;
1516 				break;
1517 			default:
1518 				rnh = NULL;
1519 				break;
1520 			}
1521 			if (rnh != NULL) {
1522 				np = (struct netcred *)
1523 					(*rnh->rnh_matchaddr)((caddr_t)saddr,
1524 					    rnh);
1525 				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
1526 					np = NULL;
1527 			}
1528 		}
1529 		/*
1530 		 * If no address match, use the default if it exists.
1531 		 */
1532 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1533 			np = &nep->ne_defexported;
1534 	}
1535 	return (np);
1536 }
1537 
1538 /*
1539  * Do the usual access checking.
1540  * file_mode, uid and gid are from the vnode in question,
1541  * while acc_mode and cred are from the VOP_ACCESS parameter list
1542  */
1543 int
1544 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
1545     mode_t acc_mode, struct ucred *cred)
1546 {
1547 	mode_t mask;
1548 
1549 	/* User id 0 always gets read/write access. */
1550 	if (cred->cr_uid == 0) {
1551 		/* For VEXEC, at least one of the execute bits must be set. */
1552 		if ((acc_mode & VEXEC) && type != VDIR &&
1553 		    (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
1554 			return EACCES;
1555 		return 0;
1556 	}
1557 
1558 	mask = 0;
1559 
1560 	/* Otherwise, check the owner. */
1561 	if (cred->cr_uid == uid) {
1562 		if (acc_mode & VEXEC)
1563 			mask |= S_IXUSR;
1564 		if (acc_mode & VREAD)
1565 			mask |= S_IRUSR;
1566 		if (acc_mode & VWRITE)
1567 			mask |= S_IWUSR;
1568 		return (file_mode & mask) == mask ? 0 : EACCES;
1569 	}
1570 
1571 	/* Otherwise, check the groups. */
1572 	if (groupmember(gid, cred)) {
1573 		if (acc_mode & VEXEC)
1574 			mask |= S_IXGRP;
1575 		if (acc_mode & VREAD)
1576 			mask |= S_IRGRP;
1577 		if (acc_mode & VWRITE)
1578 			mask |= S_IWGRP;
1579 		return (file_mode & mask) == mask ? 0 : EACCES;
1580 	}
1581 
1582 	/* Otherwise, check everyone else. */
1583 	if (acc_mode & VEXEC)
1584 		mask |= S_IXOTH;
1585 	if (acc_mode & VREAD)
1586 		mask |= S_IROTH;
1587 	if (acc_mode & VWRITE)
1588 		mask |= S_IWOTH;
1589 	return (file_mode & mask) == mask ? 0 : EACCES;
1590 }
1591 
1592 /*
1593  * Unmount all file systems.
1594  * We traverse the list in reverse order under the assumption that doing so
1595  * will avoid needing to worry about dependencies.
1596  */
1597 void
1598 vfs_unmountall(void)
1599 {
1600 	struct mount *mp, *nmp;
1601 	int allerror, error, again = 1;
1602 
1603  retry:
1604 	allerror = 0;
1605 	TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, nmp) {
1606 		if ((vfs_busy(mp, VB_WRITE|VB_NOWAIT)) != 0)
1607 			continue;
1608 		if ((error = dounmount(mp, MNT_FORCE, curproc, NULL)) != 0) {
1609 			printf("unmount of %s failed with error %d\n",
1610 			    mp->mnt_stat.f_mntonname, error);
1611 			allerror = 1;
1612 		}
1613 	}
1614 
1615 	if (allerror) {
1616 		printf("WARNING: some file systems would not unmount\n");
1617 		if (again) {
1618 			printf("retrying\n");
1619 			again = 0;
1620 			goto retry;
1621 		}
1622 	}
1623 }
1624 
1625 /*
1626  * Sync and unmount file systems before shutting down.
1627  */
1628 void
1629 vfs_shutdown(void)
1630 {
1631 #ifdef ACCOUNTING
1632 	acct_shutdown();
1633 #endif
1634 
1635 	/* XXX Should suspend scheduling. */
1636 	(void) spl0();
1637 
1638 	printf("syncing disks... ");
1639 
1640 	if (panicstr == 0) {
1641 		/* Sync before unmount, in case we hang on something. */
1642 		sys_sync(&proc0, (void *)0, (register_t *)0);
1643 
1644 		/* Unmount file systems. */
1645 		vfs_unmountall();
1646 	}
1647 
1648 	if (vfs_syncwait(1))
1649 		printf("giving up\n");
1650 	else
1651 		printf("done\n");
1652 
1653 #if NSOFTRAID > 0
1654 	sr_shutdown();
1655 #endif
1656 }
1657 
1658 /*
1659  * perform sync() operation and wait for buffers to flush.
1660  * assumptions: called w/ scheduler disabled and physical io enabled
1661  * for now called at spl0() XXX
1662  */
1663 int
1664 vfs_syncwait(int verbose)
1665 {
1666 	struct buf *bp;
1667 	int iter, nbusy, dcount, s;
1668 	struct proc *p;
1669 
1670 	p = curproc? curproc : &proc0;
1671 	sys_sync(p, (void *)0, (register_t *)0);
1672 
1673 	/* Wait for sync to finish. */
1674 	dcount = 10000;
1675 	for (iter = 0; iter < 20; iter++) {
1676 		nbusy = 0;
1677 		LIST_FOREACH(bp, &bufhead, b_list) {
1678 			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1679 				nbusy++;
1680 			/*
1681 			 * With soft updates, some buffers that are
1682 			 * written will be remarked as dirty until other
1683 			 * buffers are written.
1684 			 */
1685 			if (bp->b_flags & B_DELWRI) {
1686 				s = splbio();
1687 				bremfree(bp);
1688 				buf_acquire(bp);
1689 				splx(s);
1690 				nbusy++;
1691 				bawrite(bp);
1692 				if (dcount-- <= 0) {
1693 					if (verbose)
1694 						printf("softdep ");
1695 					return 1;
1696 				}
1697 			}
1698 		}
1699 		if (nbusy == 0)
1700 			break;
1701 		if (verbose)
1702 			printf("%d ", nbusy);
1703 		DELAY(40000 * iter);
1704 	}
1705 
1706 	return nbusy;
1707 }
1708 
1709 /*
1710  * posix file system related system variables.
1711  */
1712 int
1713 fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1714     void *newp, size_t newlen, struct proc *p)
1715 {
1716 	/* all sysctl names at this level are terminal */
1717 	if (namelen != 1)
1718 		return (ENOTDIR);
1719 
1720 	switch (name[0]) {
1721 	case FS_POSIX_SETUID:
1722 		if (newp && securelevel > 0)
1723 			return (EPERM);
1724 		return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear));
1725 	default:
1726 		return (EOPNOTSUPP);
1727 	}
1728 	/* NOTREACHED */
1729 }
1730 
1731 /*
1732  * file system related system variables.
1733  */
1734 int
1735 fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1736     size_t newlen, struct proc *p)
1737 {
1738 	sysctlfn *fn;
1739 
1740 	switch (name[0]) {
1741 	case FS_POSIX:
1742 		fn = fs_posix_sysctl;
1743 		break;
1744 	default:
1745 		return (EOPNOTSUPP);
1746 	}
1747 	return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
1748 }
1749 
1750 
1751 /*
1752  * Routines dealing with vnodes and buffers
1753  */
1754 
1755 /*
1756  * Wait for all outstanding I/Os to complete
1757  *
1758  * Manipulates v_numoutput. Must be called at splbio()
1759  */
1760 int
1761 vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo)
1762 {
1763 	int error = 0;
1764 
1765 	splassert(IPL_BIO);
1766 
1767 	while (vp->v_numoutput) {
1768 		vp->v_bioflag |= VBIOWAIT;
1769 		error = tsleep(&vp->v_numoutput,
1770 		    slpflag | (PRIBIO + 1), wmesg, timeo);
1771 		if (error)
1772 			break;
1773 	}
1774 
1775 	return (error);
1776 }
1777 
1778 /*
1779  * Update outstanding I/O count and do wakeup if requested.
1780  *
1781  * Manipulates v_numoutput. Must be called at splbio()
1782  */
1783 void
1784 vwakeup(struct vnode *vp)
1785 {
1786 	splassert(IPL_BIO);
1787 
1788 	if (vp != NULL) {
1789 		if (vp->v_numoutput-- == 0)
1790 			panic("vwakeup: neg numoutput");
1791 		if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) {
1792 			vp->v_bioflag &= ~VBIOWAIT;
1793 			wakeup(&vp->v_numoutput);
1794 		}
1795 	}
1796 }
1797 
1798 /*
1799  * Flush out and invalidate all buffers associated with a vnode.
1800  * Called with the underlying object locked.
1801  */
1802 int
1803 vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p,
1804     int slpflag, int slptimeo)
1805 {
1806 	struct buf *bp;
1807 	struct buf *nbp, *blist;
1808 	int s, error;
1809 
1810 #ifdef VFSLCKDEBUG
1811 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
1812 		panic("vinvalbuf(): vp isn't locked");
1813 #endif
1814 
1815 	if (flags & V_SAVE) {
1816 		s = splbio();
1817 		vwaitforio(vp, 0, "vinvalbuf", 0);
1818 		if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1819 			splx(s);
1820 			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
1821 				return (error);
1822 			s = splbio();
1823 			if (vp->v_numoutput > 0 ||
1824 			    !LIST_EMPTY(&vp->v_dirtyblkhd))
1825 				panic("vinvalbuf: dirty bufs");
1826 		}
1827 		splx(s);
1828 	}
1829 loop:
1830 	s = splbio();
1831 	for (;;) {
1832 		if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) &&
1833 		    (flags & V_SAVEMETA))
1834 			while (blist && blist->b_lblkno < 0)
1835 				blist = LIST_NEXT(blist, b_vnbufs);
1836 		if (blist == NULL &&
1837 		    (blist = LIST_FIRST(&vp->v_dirtyblkhd)) &&
1838 		    (flags & V_SAVEMETA))
1839 			while (blist && blist->b_lblkno < 0)
1840 				blist = LIST_NEXT(blist, b_vnbufs);
1841 		if (!blist)
1842 			break;
1843 
1844 		for (bp = blist; bp; bp = nbp) {
1845 			nbp = LIST_NEXT(bp, b_vnbufs);
1846 			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
1847 				continue;
1848 			if (bp->b_flags & B_BUSY) {
1849 				bp->b_flags |= B_WANTED;
1850 				error = tsleep(bp, slpflag | (PRIBIO + 1),
1851 				    "vinvalbuf", slptimeo);
1852 				if (error) {
1853 					splx(s);
1854 					return (error);
1855 				}
1856 				break;
1857 			}
1858 			bremfree(bp);
1859 			/*
1860 			 * XXX Since there are no node locks for NFS, I believe
1861 			 * there is a slight chance that a delayed write will
1862 			 * occur while sleeping just above, so check for it.
1863 			 */
1864 			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
1865 				buf_acquire(bp);
1866 				splx(s);
1867 				(void) VOP_BWRITE(bp);
1868 				goto loop;
1869 			}
1870 			buf_acquire_nomap(bp);
1871 			bp->b_flags |= B_INVAL;
1872 			brelse(bp);
1873 		}
1874 	}
1875 	if (!(flags & V_SAVEMETA) &&
1876 	    (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd)))
1877 		panic("vinvalbuf: flush failed");
1878 	splx(s);
1879 	return (0);
1880 }
1881 
1882 void
1883 vflushbuf(struct vnode *vp, int sync)
1884 {
1885 	struct buf *bp, *nbp;
1886 	int s;
1887 
1888 loop:
1889 	s = splbio();
1890 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd);
1891 	    bp != LIST_END(&vp->v_dirtyblkhd); bp = nbp) {
1892 		nbp = LIST_NEXT(bp, b_vnbufs);
1893 		if ((bp->b_flags & B_BUSY))
1894 			continue;
1895 		if ((bp->b_flags & B_DELWRI) == 0)
1896 			panic("vflushbuf: not dirty");
1897 		bremfree(bp);
1898 		buf_acquire(bp);
1899 		splx(s);
1900 		/*
1901 		 * Wait for I/O associated with indirect blocks to complete,
1902 		 * since there is no way to quickly wait for them below.
1903 		 */
1904 		if (bp->b_vp == vp || sync == 0)
1905 			(void) bawrite(bp);
1906 		else
1907 			(void) bwrite(bp);
1908 		goto loop;
1909 	}
1910 	if (sync == 0) {
1911 		splx(s);
1912 		return;
1913 	}
1914 	vwaitforio(vp, 0, "vflushbuf", 0);
1915 	if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1916 		splx(s);
1917 #ifdef DIAGNOSTIC
1918 		vprint("vflushbuf: dirty", vp);
1919 #endif
1920 		goto loop;
1921 	}
1922 	splx(s);
1923 }
1924 
1925 /*
1926  * Associate a buffer with a vnode.
1927  *
1928  * Manipulates buffer vnode queues. Must be called at splbio().
1929  */
1930 void
1931 bgetvp(struct vnode *vp, struct buf *bp)
1932 {
1933 	splassert(IPL_BIO);
1934 
1935 
1936 	if (bp->b_vp)
1937 		panic("bgetvp: not free");
1938 	vhold(vp);
1939 	bp->b_vp = vp;
1940 	if (vp->v_type == VBLK || vp->v_type == VCHR)
1941 		bp->b_dev = vp->v_rdev;
1942 	else
1943 		bp->b_dev = NODEV;
1944 	/*
1945 	 * Insert onto list for new vnode.
1946 	 */
1947 	bufinsvn(bp, &vp->v_cleanblkhd);
1948 }
1949 
1950 /*
1951  * Disassociate a buffer from a vnode.
1952  *
1953  * Manipulates vnode buffer queues. Must be called at splbio().
1954  */
1955 void
1956 brelvp(struct buf *bp)
1957 {
1958 	struct vnode *vp;
1959 
1960 	splassert(IPL_BIO);
1961 
1962 	if ((vp = bp->b_vp) == (struct vnode *) 0)
1963 		panic("brelvp: NULL");
1964 	/*
1965 	 * Delete from old vnode list, if on one.
1966 	 */
1967 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
1968 		bufremvn(bp);
1969 	if ((vp->v_bioflag & VBIOONSYNCLIST) &&
1970 	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
1971 		vp->v_bioflag &= ~VBIOONSYNCLIST;
1972 		LIST_REMOVE(vp, v_synclist);
1973 	}
1974 	bp->b_vp = NULL;
1975 
1976 	vdrop(vp);
1977 }
1978 
1979 /*
1980  * Replaces the current vnode associated with the buffer, if any,
1981  * with a new vnode.
1982  *
1983  * If an output I/O is pending on the buffer, the old vnode
1984  * I/O count is adjusted.
1985  *
1986  * Ignores vnode buffer queues. Must be called at splbio().
1987  */
1988 void
1989 buf_replacevnode(struct buf *bp, struct vnode *newvp)
1990 {
1991 	struct vnode *oldvp = bp->b_vp;
1992 
1993 	splassert(IPL_BIO);
1994 
1995 	if (oldvp)
1996 		brelvp(bp);
1997 
1998 	if ((bp->b_flags & (B_READ | B_DONE)) == 0) {
1999 		newvp->v_numoutput++;	/* put it on swapdev */
2000 		vwakeup(oldvp);
2001 	}
2002 
2003 	bgetvp(newvp, bp);
2004 	bufremvn(bp);
2005 }
2006 
2007 /*
2008  * Used to assign buffers to the appropriate clean or dirty list on
2009  * the vnode and to add newly dirty vnodes to the appropriate
2010  * filesystem syncer list.
2011  *
2012  * Manipulates vnode buffer queues. Must be called at splbio().
2013  */
2014 void
2015 reassignbuf(struct buf *bp)
2016 {
2017 	struct buflists *listheadp;
2018 	int delay;
2019 	struct vnode *vp = bp->b_vp;
2020 
2021 	splassert(IPL_BIO);
2022 
2023 	/*
2024 	 * Delete from old vnode list, if on one.
2025 	 */
2026 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2027 		bufremvn(bp);
2028 
2029 	/*
2030 	 * If dirty, put on list of dirty buffers;
2031 	 * otherwise insert onto list of clean buffers.
2032 	 */
2033 	if ((bp->b_flags & B_DELWRI) == 0) {
2034 		listheadp = &vp->v_cleanblkhd;
2035 		if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2036 		    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2037 			vp->v_bioflag &= ~VBIOONSYNCLIST;
2038 			LIST_REMOVE(vp, v_synclist);
2039 		}
2040 	} else {
2041 		listheadp = &vp->v_dirtyblkhd;
2042 		if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) {
2043 			switch (vp->v_type) {
2044 			case VDIR:
2045 				delay = syncdelay / 2;
2046 				break;
2047 			case VBLK:
2048 				if (vp->v_specmountpoint != NULL) {
2049 					delay = syncdelay / 3;
2050 					break;
2051 				}
2052 				/* FALLTHROUGH */
2053 			default:
2054 				delay = syncdelay;
2055 			}
2056 			vn_syncer_add_to_worklist(vp, delay);
2057 		}
2058 	}
2059 	bufinsvn(bp, listheadp);
2060 }
2061 
2062 int
2063 vfs_register(struct vfsconf *vfs)
2064 {
2065 	struct vfsconf *vfsp;
2066 	struct vfsconf **vfspp;
2067 
2068 #ifdef DIAGNOSTIC
2069 	/* Paranoia? */
2070 	if (vfs->vfc_refcount != 0)
2071 		printf("vfs_register called with vfc_refcount > 0\n");
2072 #endif
2073 
2074 	/* Check if filesystem already known */
2075 	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2076 	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next)
2077 		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2078 			return (EEXIST);
2079 
2080 	if (vfs->vfc_typenum > maxvfsconf)
2081 		maxvfsconf = vfs->vfc_typenum;
2082 
2083 	vfs->vfc_next = NULL;
2084 
2085 	/* Add to the end of the list */
2086 	*vfspp = vfs;
2087 
2088 	/* Call vfs_init() */
2089 	if (vfs->vfc_vfsops->vfs_init)
2090 		(*(vfs->vfc_vfsops->vfs_init))(vfs);
2091 
2092 	return 0;
2093 }
2094 
2095 int
2096 vfs_unregister(struct vfsconf *vfs)
2097 {
2098 	struct vfsconf *vfsp;
2099 	struct vfsconf **vfspp;
2100 	int maxtypenum;
2101 
2102 	/* Find our vfsconf struct */
2103 	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2104 	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) {
2105 		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2106 			break;
2107 	}
2108 
2109 	if (!vfsp)			/* Not found */
2110 		return (ENOENT);
2111 
2112 	if (vfsp->vfc_refcount)		/* In use */
2113 		return (EBUSY);
2114 
2115 	/* Remove from list and free */
2116 	*vfspp = vfsp->vfc_next;
2117 
2118 	maxtypenum = 0;
2119 
2120 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2121 		if (vfsp->vfc_typenum > maxtypenum)
2122 			maxtypenum = vfsp->vfc_typenum;
2123 
2124 	maxvfsconf = maxtypenum;
2125 	return 0;
2126 }
2127 
2128 /*
2129  * Check if vnode represents a disk device
2130  */
2131 int
2132 vn_isdisk(struct vnode *vp, int *errp)
2133 {
2134 	if (vp->v_type != VBLK && vp->v_type != VCHR)
2135 		return (0);
2136 
2137 	return (1);
2138 }
2139 
2140 #ifdef DDB
2141 #include <machine/db_machdep.h>
2142 #include <ddb/db_interface.h>
2143 #include <ddb/db_output.h>
2144 
2145 void
2146 vfs_buf_print(void *b, int full,
2147     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2148 {
2149 	struct buf *bp = b;
2150 
2151 	(*pr)("  vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n"
2152 	      "  proc %p error %d flags %lb\n",
2153 	    bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev,
2154 	    bp->b_proc, bp->b_error, bp->b_flags, B_BITS);
2155 
2156 	(*pr)("  bufsize 0x%lx bcount 0x%lx resid 0x%lx\n"
2157 	      "  data %p saveaddr %p dep %p iodone %p\n",
2158 	    bp->b_bufsize, bp->b_bcount, (long)bp->b_resid,
2159 	    bp->b_data, bp->b_saveaddr,
2160 	    LIST_FIRST(&bp->b_dep), bp->b_iodone);
2161 
2162 	(*pr)("  dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n",
2163 	    bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend);
2164 
2165 #ifdef FFS_SOFTUPDATES
2166 	if (full)
2167 		softdep_print(bp, full, pr);
2168 #endif
2169 }
2170 
2171 const char *vtypes[] = { VTYPE_NAMES };
2172 const char *vtags[] = { VTAG_NAMES };
2173 
2174 void
2175 vfs_vnode_print(void *v, int full,
2176     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2177 {
2178 	struct vnode *vp = v;
2179 
2180 	(*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n",
2181 	      vp->v_tag > nitems(vtags)? "<unk>":vtags[vp->v_tag], vp->v_tag,
2182 	      vp->v_type > nitems(vtypes)? "<unk>":vtypes[vp->v_type],
2183 	      vp->v_type, vp->v_mount, vp->v_mountedhere);
2184 
2185 	(*pr)("data %p usecount %d writecount %d holdcnt %d numoutput %d\n",
2186 	      vp->v_data, vp->v_usecount, vp->v_writecount,
2187 	      vp->v_holdcnt, vp->v_numoutput);
2188 
2189 	/* uvm_object_printit(&vp->v_uobj, full, pr); */
2190 
2191 	if (full) {
2192 		struct buf *bp;
2193 
2194 		(*pr)("clean bufs:\n");
2195 		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
2196 			(*pr)(" bp %p\n", bp);
2197 			vfs_buf_print(bp, full, pr);
2198 		}
2199 
2200 		(*pr)("dirty bufs:\n");
2201 		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
2202 			(*pr)(" bp %p\n", bp);
2203 			vfs_buf_print(bp, full, pr);
2204 		}
2205 	}
2206 }
2207 
2208 void
2209 vfs_mount_print(struct mount *mp, int full,
2210     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2211 {
2212 	struct vfsconf *vfc = mp->mnt_vfc;
2213 	struct vnode *vp;
2214 	int cnt = 0;
2215 
2216 	(*pr)("flags %b\nvnodecovered %p syncer %p data %p\n",
2217 	    mp->mnt_flag, MNT_BITS,
2218 	    mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data);
2219 
2220 	(*pr)("vfsconf: ops %p name \"%s\" num %d ref %d flags 0x%x\n",
2221             vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum,
2222 	    vfc->vfc_refcount, vfc->vfc_flags);
2223 
2224 	(*pr)("statvfs cache: bsize %x iosize %x\nblocks %llu free %llu avail %lld\n",
2225 	    mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks,
2226 	    mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail);
2227 
2228 	(*pr)("  files %llu ffiles %llu favail %lld\n", mp->mnt_stat.f_files,
2229 	    mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail);
2230 
2231 	(*pr)("  f_fsidx {0x%x, 0x%x} owner %u ctime 0x%llx\n",
2232 	    mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1],
2233 	    mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime);
2234 
2235  	(*pr)("  syncwrites %llu asyncwrites = %llu\n",
2236 	    mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites);
2237 
2238  	(*pr)("  syncreads %llu asyncreads = %llu\n",
2239 	    mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads);
2240 
2241 	(*pr)("  fstype \"%s\" mnton \"%s\" mntfrom \"%s\" mntspec \"%s\"\n",
2242 	    mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname,
2243 	    mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntfromspec);
2244 
2245 	(*pr)("locked vnodes:");
2246 	/* XXX would take mountlist lock, except ddb has no context */
2247 	LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)
2248 		if (VOP_ISLOCKED(vp)) {
2249 			if (!LIST_NEXT(vp, v_mntvnodes))
2250 				(*pr)(" %p", vp);
2251 			else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4))))
2252 				(*pr)("\n\t%p", vp);
2253 			else
2254 				(*pr)(", %p", vp);
2255 		}
2256 	(*pr)("\n");
2257 
2258 	if (full) {
2259 		(*pr)("all vnodes:\n\t");
2260 		/* XXX would take mountlist lock, except ddb has no context */
2261 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)
2262 			if (!LIST_NEXT(vp, v_mntvnodes))
2263 				(*pr)(" %p", vp);
2264 			else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4))))
2265 				(*pr)(" %p,\n\t", vp);
2266 			else
2267 				(*pr)(" %p,", vp);
2268 		(*pr)("\n");
2269 	}
2270 }
2271 #endif /* DDB */
2272 
2273 void
2274 copy_statfs_info(struct statfs *sbp, const struct mount *mp)
2275 {
2276 	const struct statfs *mbp;
2277 
2278 	strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN);
2279 
2280 	if (sbp == (mbp = &mp->mnt_stat))
2281 		return;
2282 
2283 	sbp->f_fsid = mbp->f_fsid;
2284 	sbp->f_owner = mbp->f_owner;
2285 	sbp->f_flags = mbp->f_flags;
2286 	sbp->f_syncwrites = mbp->f_syncwrites;
2287 	sbp->f_asyncwrites = mbp->f_asyncwrites;
2288 	sbp->f_syncreads = mbp->f_syncreads;
2289 	sbp->f_asyncreads = mbp->f_asyncreads;
2290 	sbp->f_namemax = mbp->f_namemax;
2291 	bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN);
2292 	bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN);
2293 	bcopy(mp->mnt_stat.f_mntfromspec, sbp->f_mntfromspec, MNAMELEN);
2294 	bcopy(&mp->mnt_stat.mount_info.ufs_args, &sbp->mount_info.ufs_args,
2295 	    sizeof(struct ufs_args));
2296 }
2297