xref: /openbsd-src/sys/kern/vfs_subr.c (revision 8b23add8c74b86d0da67de43302cf21b97b028be)
1 /*	$OpenBSD: vfs_subr.c,v 1.277 2018/07/13 09:25:23 beck Exp $	*/
2 /*	$NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
38  */
39 
40 /*
41  * External virtual filesystem routines
42  */
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/sysctl.h>
48 #include <sys/mount.h>
49 #include <sys/time.h>
50 #include <sys/fcntl.h>
51 #include <sys/kernel.h>
52 #include <sys/conf.h>
53 #include <sys/vnode.h>
54 #include <sys/lock.h>
55 #include <sys/stat.h>
56 #include <sys/acct.h>
57 #include <sys/namei.h>
58 #include <sys/ucred.h>
59 #include <sys/buf.h>
60 #include <sys/errno.h>
61 #include <sys/malloc.h>
62 #include <sys/mbuf.h>
63 #include <sys/syscallargs.h>
64 #include <sys/pool.h>
65 #include <sys/tree.h>
66 #include <sys/specdev.h>
67 
68 #include <netinet/in.h>
69 
70 #include <uvm/uvm_extern.h>
71 #include <uvm/uvm_vnode.h>
72 
73 #include "softraid.h"
74 
75 void sr_quiesce(void);
76 
77 enum vtype iftovt_tab[16] = {
78 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
79 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
80 };
81 
82 int	vttoif_tab[9] = {
83 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
84 	S_IFSOCK, S_IFIFO, S_IFMT,
85 };
86 
87 int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
88 int suid_clear = 1;		/* 1 => clear SUID / SGID on owner change */
89 
90 /*
91  * Insq/Remq for the vnode usage lists.
92  */
93 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
94 #define	bufremvn(bp) {							\
95 	LIST_REMOVE(bp, b_vnbufs);					\
96 	LIST_NEXT(bp, b_vnbufs) = NOLIST;				\
97 }
98 
99 struct freelst vnode_hold_list;	/* list of vnodes referencing buffers */
100 struct freelst vnode_free_list;	/* vnode free list */
101 
102 struct mntlist mountlist;	/* mounted filesystem list */
103 
104 void	vclean(struct vnode *, int, struct proc *);
105 
106 void insmntque(struct vnode *, struct mount *);
107 int getdevvp(dev_t, struct vnode **, enum vtype);
108 
109 int vfs_hang_addrlist(struct mount *, struct netexport *,
110 				  struct export_args *);
111 int vfs_free_netcred(struct radix_node *, void *, u_int);
112 void vfs_free_addrlist(struct netexport *);
113 void vputonfreelist(struct vnode *);
114 
115 int vflush_vnode(struct vnode *, void *);
116 int maxvnodes;
117 
118 void vfs_unmountall(void);
119 
120 #ifdef DEBUG
121 void printlockedvnodes(void);
122 #endif
123 
124 struct pool vnode_pool;
125 struct pool uvm_vnode_pool;
126 
127 static inline int rb_buf_compare(const struct buf *b1, const struct buf *b2);
128 RBT_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare);
129 
130 static inline int
131 rb_buf_compare(const struct buf *b1, const struct buf *b2)
132 {
133 	if (b1->b_lblkno < b2->b_lblkno)
134 		return(-1);
135 	if (b1->b_lblkno > b2->b_lblkno)
136 		return(1);
137 	return(0);
138 }
139 
140 /*
141  * Initialize the vnode management data structures.
142  */
143 void
144 vntblinit(void)
145 {
146 	/* buffer cache may need a vnode for each buffer */
147 	maxvnodes = 2 * initialvnodes;
148 	pool_init(&vnode_pool, sizeof(struct vnode), 0, IPL_NONE,
149 	    PR_WAITOK, "vnodes", NULL);
150 	pool_init(&uvm_vnode_pool, sizeof(struct uvm_vnode), 0, IPL_NONE,
151 	    PR_WAITOK, "uvmvnodes", NULL);
152 	TAILQ_INIT(&vnode_hold_list);
153 	TAILQ_INIT(&vnode_free_list);
154 	TAILQ_INIT(&mountlist);
155 	/*
156 	 * Initialize the filesystem syncer.
157 	 */
158 	vn_initialize_syncerd();
159 
160 #ifdef NFSSERVER
161 	rn_init(sizeof(struct sockaddr_in));
162 #endif /* NFSSERVER */
163 }
164 
165 /*
166  * Mark a mount point as busy. Used to synchronize access and to delay
167  * unmounting.
168  *
169  * Default behaviour is to attempt getting a READ lock and in case of an
170  * ongoing unmount, to wait for it to finish and then return failure.
171  */
172 int
173 vfs_busy(struct mount *mp, int flags)
174 {
175 	int rwflags = 0;
176 
177 	/* new mountpoints need their lock initialised */
178 	if (mp->mnt_lock.rwl_name == NULL)
179 		rw_init_flags(&mp->mnt_lock, "vfslock", RWL_IS_VNODE);
180 
181 	if (flags & VB_WRITE)
182 		rwflags |= RW_WRITE;
183 	else
184 		rwflags |= RW_READ;
185 
186 	if (flags & VB_WAIT)
187 		rwflags |= RW_SLEEPFAIL;
188 	else
189 		rwflags |= RW_NOSLEEP;
190 
191 #ifdef WITNESS
192 	if (flags & VB_DUPOK)
193 		rwflags |= RW_DUPOK;
194 #endif
195 
196 	if (rw_enter(&mp->mnt_lock, rwflags))
197 		return (EBUSY);
198 
199 	return (0);
200 }
201 
202 /*
203  * Free a busy file system
204  */
205 void
206 vfs_unbusy(struct mount *mp)
207 {
208 	rw_exit(&mp->mnt_lock);
209 }
210 
211 int
212 vfs_isbusy(struct mount *mp)
213 {
214 	if (RWLOCK_OWNER(&mp->mnt_lock) > 0)
215 		return (1);
216 	else
217 		return (0);
218 }
219 
220 /*
221  * Lookup a filesystem type, and if found allocate and initialize
222  * a mount structure for it.
223  *
224  * Devname is usually updated by mount(8) after booting.
225  */
226 int
227 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
228 {
229 	struct vfsconf *vfsp;
230 	struct mount *mp;
231 
232 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
233 		if (!strcmp(vfsp->vfc_name, fstypename))
234 			break;
235 	if (vfsp == NULL)
236 		return (ENODEV);
237 	mp = malloc(sizeof(*mp), M_MOUNT, M_WAITOK|M_ZERO);
238 	(void)vfs_busy(mp, VB_READ|VB_NOWAIT);
239 	LIST_INIT(&mp->mnt_vnodelist);
240 	mp->mnt_vfc = vfsp;
241 	mp->mnt_op = vfsp->vfc_vfsops;
242 	mp->mnt_flag = MNT_RDONLY;
243 	mp->mnt_vnodecovered = NULLVP;
244 	vfsp->vfc_refcount++;
245 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
246 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
247 	mp->mnt_stat.f_mntonname[0] = '/';
248 	copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN, 0);
249 	copystr(devname, mp->mnt_stat.f_mntfromspec, MNAMELEN, 0);
250 	*mpp = mp;
251  	return (0);
252  }
253 
254 /*
255  * Lookup a mount point by filesystem identifier.
256  */
257 struct mount *
258 vfs_getvfs(fsid_t *fsid)
259 {
260 	struct mount *mp;
261 
262 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
263 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
264 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
265 			return (mp);
266 		}
267 	}
268 
269 	return (NULL);
270 }
271 
272 
273 /*
274  * Get a new unique fsid
275  */
276 void
277 vfs_getnewfsid(struct mount *mp)
278 {
279 	static u_short xxxfs_mntid;
280 
281 	fsid_t tfsid;
282 	int mtype;
283 
284 	mtype = mp->mnt_vfc->vfc_typenum;
285 	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
286 	mp->mnt_stat.f_fsid.val[1] = mtype;
287 	if (xxxfs_mntid == 0)
288 		++xxxfs_mntid;
289 	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
290 	tfsid.val[1] = mtype;
291 	if (!TAILQ_EMPTY(&mountlist)) {
292 		while (vfs_getvfs(&tfsid)) {
293 			tfsid.val[0]++;
294 			xxxfs_mntid++;
295 		}
296 	}
297 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
298 }
299 
300 /*
301  * Set vnode attributes to VNOVAL
302  */
303 void
304 vattr_null(struct vattr *vap)
305 {
306 
307 	vap->va_type = VNON;
308 	/*
309 	 * Don't get fancy: u_quad_t = u_int = VNOVAL leaves the u_quad_t
310 	 * with 2^31-1 instead of 2^64-1.  Just write'm out and let
311 	 * the compiler do its job.
312 	 */
313 	vap->va_mode = VNOVAL;
314 	vap->va_nlink = VNOVAL;
315 	vap->va_uid = VNOVAL;
316 	vap->va_gid = VNOVAL;
317 	vap->va_fsid = VNOVAL;
318 	vap->va_fileid = VNOVAL;
319 	vap->va_size = VNOVAL;
320 	vap->va_blocksize = VNOVAL;
321 	vap->va_atime.tv_sec = VNOVAL;
322 	vap->va_atime.tv_nsec = VNOVAL;
323 	vap->va_mtime.tv_sec = VNOVAL;
324 	vap->va_mtime.tv_nsec = VNOVAL;
325 	vap->va_ctime.tv_sec = VNOVAL;
326 	vap->va_ctime.tv_nsec = VNOVAL;
327 	vap->va_gen = VNOVAL;
328 	vap->va_flags = VNOVAL;
329 	vap->va_rdev = VNOVAL;
330 	vap->va_bytes = VNOVAL;
331 	vap->va_filerev = VNOVAL;
332 	vap->va_vaflags = 0;
333 }
334 
335 /*
336  * Routines having to do with the management of the vnode table.
337  */
338 long numvnodes;
339 
340 /*
341  * Return the next vnode from the free list.
342  */
343 int
344 getnewvnode(enum vtagtype tag, struct mount *mp, struct vops *vops,
345     struct vnode **vpp)
346 {
347 	struct proc *p = curproc;
348 	struct freelst *listhd;
349 	static int toggle;
350 	struct vnode *vp;
351 	int s;
352 
353 	/*
354 	 * allow maxvnodes to increase if the buffer cache itself
355 	 * is big enough to justify it. (we don't shrink it ever)
356 	 */
357 	maxvnodes = maxvnodes < bcstats.numbufs ? bcstats.numbufs
358 	    : maxvnodes;
359 
360 	/*
361 	 * We must choose whether to allocate a new vnode or recycle an
362 	 * existing one. The criterion for allocating a new one is that
363 	 * the total number of vnodes is less than the number desired or
364 	 * there are no vnodes on either free list. Generally we only
365 	 * want to recycle vnodes that have no buffers associated with
366 	 * them, so we look first on the vnode_free_list. If it is empty,
367 	 * we next consider vnodes with referencing buffers on the
368 	 * vnode_hold_list. The toggle ensures that half the time we
369 	 * will use a buffer from the vnode_hold_list, and half the time
370 	 * we will allocate a new one unless the list has grown to twice
371 	 * the desired size. We are reticent to recycle vnodes from the
372 	 * vnode_hold_list because we will lose the identity of all its
373 	 * referencing buffers.
374 	 */
375 	toggle ^= 1;
376 	if (numvnodes / 2 > maxvnodes)
377 		toggle = 0;
378 
379 	s = splbio();
380 	if ((numvnodes < maxvnodes) ||
381 	    ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
382 	    ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
383 		splx(s);
384 		vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO);
385 		vp->v_uvm = pool_get(&uvm_vnode_pool, PR_WAITOK | PR_ZERO);
386 		vp->v_uvm->u_vnode = vp;
387 		RBT_INIT(buf_rb_bufs, &vp->v_bufs_tree);
388 		cache_tree_init(&vp->v_nc_tree);
389 		TAILQ_INIT(&vp->v_cache_dst);
390 		numvnodes++;
391 	} else {
392 		TAILQ_FOREACH(vp, listhd, v_freelist) {
393 			if (VOP_ISLOCKED(vp) == 0)
394 				break;
395 		}
396 		/*
397 		 * Unless this is a bad time of the month, at most
398 		 * the first NCPUS items on the free list are
399 		 * locked, so this is close enough to being empty.
400 		 */
401 		if (vp == NULL) {
402 			splx(s);
403 			tablefull("vnode");
404 			*vpp = 0;
405 			return (ENFILE);
406 		}
407 
408 #ifdef DIAGNOSTIC
409 		if (vp->v_usecount) {
410 			vprint("free vnode", vp);
411 			panic("free vnode isn't");
412 		}
413 #endif
414 
415 		TAILQ_REMOVE(listhd, vp, v_freelist);
416 		vp->v_bioflag &= ~VBIOONFREELIST;
417 		splx(s);
418 
419 		if (vp->v_type != VBAD)
420 			vgonel(vp, p);
421 #ifdef DIAGNOSTIC
422 		if (vp->v_data) {
423 			vprint("cleaned vnode", vp);
424 			panic("cleaned vnode isn't");
425 		}
426 		s = splbio();
427 		if (vp->v_numoutput)
428 			panic("Clean vnode has pending I/O's");
429 		splx(s);
430 #endif
431 		vp->v_flag = 0;
432 		vp->v_socket = 0;
433 	}
434 	cache_purge(vp);
435 	vp->v_type = VNON;
436 	vp->v_tag = tag;
437 	vp->v_op = vops;
438 	insmntque(vp, mp);
439 	*vpp = vp;
440 	vp->v_usecount = 1;
441 	vp->v_data = 0;
442 	return (0);
443 }
444 
445 /*
446  * Move a vnode from one mount queue to another.
447  */
448 void
449 insmntque(struct vnode *vp, struct mount *mp)
450 {
451 	/*
452 	 * Delete from old mount point vnode list, if on one.
453 	 */
454 	if (vp->v_mount != NULL)
455 		LIST_REMOVE(vp, v_mntvnodes);
456 	/*
457 	 * Insert into list of vnodes for the new mount point, if available.
458 	 */
459 	if ((vp->v_mount = mp) != NULL)
460 		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
461 }
462 
463 /*
464  * Create a vnode for a block device.
465  * Used for root filesystem, argdev, and swap areas.
466  * Also used for memory file system special devices.
467  */
468 int
469 bdevvp(dev_t dev, struct vnode **vpp)
470 {
471 	return (getdevvp(dev, vpp, VBLK));
472 }
473 
474 /*
475  * Create a vnode for a character device.
476  * Used for console handling.
477  */
478 int
479 cdevvp(dev_t dev, struct vnode **vpp)
480 {
481 	return (getdevvp(dev, vpp, VCHR));
482 }
483 
484 /*
485  * Create a vnode for a device.
486  * Used by bdevvp (block device) for root file system etc.,
487  * and by cdevvp (character device) for console.
488  */
489 int
490 getdevvp(dev_t dev, struct vnode **vpp, enum vtype type)
491 {
492 	struct vnode *vp;
493 	struct vnode *nvp;
494 	int error;
495 
496 	if (dev == NODEV) {
497 		*vpp = NULLVP;
498 		return (0);
499 	}
500 	error = getnewvnode(VT_NON, NULL, &spec_vops, &nvp);
501 	if (error) {
502 		*vpp = NULLVP;
503 		return (error);
504 	}
505 	vp = nvp;
506 	vp->v_type = type;
507 	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
508 		vput(vp);
509 		vp = nvp;
510 	}
511 	if (vp->v_type == VCHR && cdevsw[major(vp->v_rdev)].d_type == D_TTY)
512 		vp->v_flag |= VISTTY;
513 	*vpp = vp;
514 	return (0);
515 }
516 
517 /*
518  * Check to see if the new vnode represents a special device
519  * for which we already have a vnode (either because of
520  * bdevvp() or because of a different vnode representing
521  * the same block device). If such an alias exists, deallocate
522  * the existing contents and return the aliased vnode. The
523  * caller is responsible for filling it with its new contents.
524  */
525 struct vnode *
526 checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp)
527 {
528 	struct proc *p = curproc;
529 	struct vnode *vp;
530 	struct vnode **vpp;
531 
532 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
533 		return (NULLVP);
534 
535 	vpp = &speclisth[SPECHASH(nvp_rdev)];
536 loop:
537 	for (vp = *vpp; vp; vp = vp->v_specnext) {
538 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) {
539 			continue;
540 		}
541 		/*
542 		 * Alias, but not in use, so flush it out.
543 		 */
544 		if (vp->v_usecount == 0) {
545 			vgonel(vp, p);
546 			goto loop;
547 		}
548 		if (vget(vp, LK_EXCLUSIVE)) {
549 			goto loop;
550 		}
551 		break;
552 	}
553 
554 	/*
555 	 * Common case is actually in the if statement
556 	 */
557 	if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
558 		nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE,
559 			M_WAITOK);
560 		nvp->v_rdev = nvp_rdev;
561 		nvp->v_hashchain = vpp;
562 		nvp->v_specnext = *vpp;
563 		nvp->v_specmountpoint = NULL;
564 		nvp->v_speclockf = NULL;
565 		nvp->v_specbitmap = NULL;
566 		if (nvp->v_type == VCHR &&
567 		    (cdevsw[major(nvp_rdev)].d_flags & D_CLONE) &&
568 		    (minor(nvp_rdev) >> CLONE_SHIFT == 0)) {
569 			if (vp != NULLVP)
570 				nvp->v_specbitmap = vp->v_specbitmap;
571 			else
572 				nvp->v_specbitmap = malloc(CLONE_MAPSZ,
573 				    M_VNODE, M_WAITOK | M_ZERO);
574 		}
575 		*vpp = nvp;
576 		if (vp != NULLVP) {
577 			nvp->v_flag |= VALIASED;
578 			vp->v_flag |= VALIASED;
579 			vput(vp);
580 		}
581 		return (NULLVP);
582 	}
583 
584 	/*
585 	 * This code is the uncommon case. It is called in case
586 	 * we found an alias that was VT_NON && vtype of VBLK
587 	 * This means we found a block device that was created
588 	 * using bdevvp.
589 	 * An example of such a vnode is the root partition device vnode
590 	 * created in ffs_mountroot.
591 	 *
592 	 * The vnodes created by bdevvp should not be aliased (why?).
593 	 */
594 
595 	VOP_UNLOCK(vp);
596 	vclean(vp, 0, p);
597 	vp->v_op = nvp->v_op;
598 	vp->v_tag = nvp->v_tag;
599 	nvp->v_type = VNON;
600 	insmntque(vp, mp);
601 	return (vp);
602 }
603 
604 /*
605  * Grab a particular vnode from the free list, increment its
606  * reference count and lock it. If the vnode lock bit is set,
607  * the vnode is being eliminated in vgone. In that case, we
608  * cannot grab it, so the process is awakened when the
609  * transition is completed, and an error code is returned to
610  * indicate that the vnode is no longer usable, possibly
611  * having been changed to a new file system type.
612  */
613 int
614 vget(struct vnode *vp, int flags)
615 {
616 	int error, s, onfreelist;
617 
618 	/*
619 	 * If the vnode is in the process of being cleaned out for
620 	 * another use, we wait for the cleaning to finish and then
621 	 * return failure. Cleaning is determined by checking that
622 	 * the VXLOCK flag is set.
623 	 */
624 
625 	if (vp->v_flag & VXLOCK) {
626 		if (flags & LK_NOWAIT) {
627 			return (EBUSY);
628 		}
629 
630 		vp->v_flag |= VXWANT;
631 		tsleep(vp, PINOD, "vget", 0);
632 		return (ENOENT);
633 	}
634 
635 	onfreelist = vp->v_bioflag & VBIOONFREELIST;
636 	if (vp->v_usecount == 0 && onfreelist) {
637 		s = splbio();
638 		if (vp->v_holdcnt > 0)
639 			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
640 		else
641 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
642 		vp->v_bioflag &= ~VBIOONFREELIST;
643 		splx(s);
644 	}
645 
646  	vp->v_usecount++;
647 	if (flags & LK_TYPE_MASK) {
648 		if ((error = vn_lock(vp, flags)) != 0) {
649 			vp->v_usecount--;
650 			if (vp->v_usecount == 0 && onfreelist)
651 				vputonfreelist(vp);
652 		}
653 		return (error);
654 	}
655 
656 	return (0);
657 }
658 
659 
660 /* Vnode reference. */
661 void
662 vref(struct vnode *vp)
663 {
664 #ifdef DIAGNOSTIC
665 	if (vp->v_usecount == 0)
666 		panic("vref used where vget required");
667 	if (vp->v_type == VNON)
668 		panic("vref on a VNON vnode");
669 #endif
670 	vp->v_usecount++;
671 }
672 
673 void
674 vputonfreelist(struct vnode *vp)
675 {
676 	int s;
677 	struct freelst *lst;
678 
679 	s = splbio();
680 #ifdef DIAGNOSTIC
681 	if (vp->v_usecount != 0)
682 		panic("Use count is not zero!");
683 
684 	if (vp->v_bioflag & VBIOONFREELIST) {
685 		vprint("vnode already on free list: ", vp);
686 		panic("vnode already on free list");
687 	}
688 #endif
689 
690 	vp->v_bioflag |= VBIOONFREELIST;
691 
692 	if (vp->v_holdcnt > 0)
693 		lst = &vnode_hold_list;
694 	else
695 		lst = &vnode_free_list;
696 
697 	if (vp->v_type == VBAD)
698 		TAILQ_INSERT_HEAD(lst, vp, v_freelist);
699 	else
700 		TAILQ_INSERT_TAIL(lst, vp, v_freelist);
701 
702 	splx(s);
703 }
704 
705 /*
706  * vput(), just unlock and vrele()
707  */
708 void
709 vput(struct vnode *vp)
710 {
711 	struct proc *p = curproc;
712 
713 #ifdef DIAGNOSTIC
714 	if (vp == NULL)
715 		panic("vput: null vp");
716 #endif
717 
718 #ifdef DIAGNOSTIC
719 	if (vp->v_usecount == 0) {
720 		vprint("vput: bad ref count", vp);
721 		panic("vput: ref cnt");
722 	}
723 #endif
724 	vp->v_usecount--;
725 	KASSERT(vp->v_usecount > 0 || vp->v_uvcount == 0);
726 	if (vp->v_usecount > 0) {
727 		VOP_UNLOCK(vp);
728 		return;
729 	}
730 
731 #ifdef DIAGNOSTIC
732 	if (vp->v_writecount != 0) {
733 		vprint("vput: bad writecount", vp);
734 		panic("vput: v_writecount != 0");
735 	}
736 #endif
737 
738 	VOP_INACTIVE(vp, p);
739 
740 	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
741 		vputonfreelist(vp);
742 }
743 
744 /*
745  * Vnode release - use for active VNODES.
746  * If count drops to zero, call inactive routine and return to freelist.
747  * Returns 0 if it did not sleep.
748  */
749 int
750 vrele(struct vnode *vp)
751 {
752 	struct proc *p = curproc;
753 
754 #ifdef DIAGNOSTIC
755 	if (vp == NULL)
756 		panic("vrele: null vp");
757 #endif
758 #ifdef DIAGNOSTIC
759 	if (vp->v_usecount == 0) {
760 		vprint("vrele: bad ref count", vp);
761 		panic("vrele: ref cnt");
762 	}
763 #endif
764 	vp->v_usecount--;
765 	if (vp->v_usecount > 0) {
766 		return (0);
767 	}
768 
769 #ifdef DIAGNOSTIC
770 	if (vp->v_writecount != 0) {
771 		vprint("vrele: bad writecount", vp);
772 		panic("vrele: v_writecount != 0");
773 	}
774 #endif
775 
776 	if (vn_lock(vp, LK_EXCLUSIVE)) {
777 #ifdef DIAGNOSTIC
778 		vprint("vrele: cannot lock", vp);
779 #endif
780 		return (1);
781 	}
782 
783 	VOP_INACTIVE(vp, p);
784 
785 	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
786 		vputonfreelist(vp);
787 	return (1);
788 }
789 
790 /* Page or buffer structure gets a reference. */
791 void
792 vhold(struct vnode *vp)
793 {
794 	/*
795 	 * If it is on the freelist and the hold count is currently
796 	 * zero, move it to the hold list.
797 	 */
798 	if ((vp->v_bioflag & VBIOONFREELIST) &&
799 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
800 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
801 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
802 	}
803 	vp->v_holdcnt++;
804 }
805 
806 /* Lose interest in a vnode. */
807 void
808 vdrop(struct vnode *vp)
809 {
810 #ifdef DIAGNOSTIC
811 	if (vp->v_holdcnt == 0)
812 		panic("vdrop: zero holdcnt");
813 #endif
814 
815 	vp->v_holdcnt--;
816 
817 	/*
818 	 * If it is on the holdlist and the hold count drops to
819 	 * zero, move it to the free list.
820 	 */
821 	if ((vp->v_bioflag & VBIOONFREELIST) &&
822 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
823 		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
824 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
825 	}
826 }
827 
828 /*
829  * Remove any vnodes in the vnode table belonging to mount point mp.
830  *
831  * If MNT_NOFORCE is specified, there should not be any active ones,
832  * return error if any are found (nb: this is a user error, not a
833  * system error). If MNT_FORCE is specified, detach any active vnodes
834  * that are found.
835  */
836 #ifdef DEBUG
837 int busyprt = 0;	/* print out busy vnodes */
838 struct ctldebug debug1 = { "busyprt", &busyprt };
839 #endif
840 
841 int
842 vfs_mount_foreach_vnode(struct mount *mp,
843     int (*func)(struct vnode *, void *), void *arg) {
844 	struct vnode *vp, *nvp;
845 	int error = 0;
846 
847 loop:
848 	LIST_FOREACH_SAFE(vp , &mp->mnt_vnodelist, v_mntvnodes, nvp) {
849 		if (vp->v_mount != mp)
850 			goto loop;
851 
852 		error = func(vp, arg);
853 
854 		if (error != 0)
855 			break;
856 	}
857 
858 	return (error);
859 }
860 
861 struct vflush_args {
862 	struct vnode *skipvp;
863 	int busy;
864 	int flags;
865 };
866 
867 int
868 vflush_vnode(struct vnode *vp, void *arg)
869 {
870 	struct vflush_args *va = arg;
871 	struct proc *p = curproc;
872 
873 	if (vp == va->skipvp) {
874 		return (0);
875 	}
876 
877 	if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
878 		return (0);
879 	}
880 
881 	/*
882 	 * If WRITECLOSE is set, only flush out regular file
883 	 * vnodes open for writing.
884 	 */
885 	if ((va->flags & WRITECLOSE) &&
886 	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
887 		return (0);
888 	}
889 
890 	/*
891 	 * With v_usecount == 0, all we need to do is clear
892 	 * out the vnode data structures and we are done.
893 	 */
894 	if (vp->v_usecount == 0) {
895 		vgonel(vp, p);
896 		return (0);
897 	}
898 
899 	/*
900 	 * If FORCECLOSE is set, forcibly close the vnode.
901 	 * For block or character devices, revert to an
902 	 * anonymous device. For all other files, just kill them.
903 	 */
904 	if (va->flags & FORCECLOSE) {
905 		if (vp->v_type != VBLK && vp->v_type != VCHR) {
906 			vgonel(vp, p);
907 		} else {
908 			vclean(vp, 0, p);
909 			vp->v_op = &spec_vops;
910 			insmntque(vp, NULL);
911 		}
912 		return (0);
913 	}
914 
915 	/*
916 	 * If set, this is allowed to ignore vnodes which don't
917 	 * have changes pending to disk.
918 	 * XXX Might be nice to check per-fs "inode" flags, but
919 	 * generally the filesystem is sync'd already, right?
920 	 */
921 	if ((va->flags & IGNORECLEAN) &&
922 	    LIST_EMPTY(&vp->v_dirtyblkhd))
923 		return (0);
924 
925 #ifdef DEBUG
926 	if (busyprt)
927 		vprint("vflush: busy vnode", vp);
928 #endif
929 	va->busy++;
930 	return (0);
931 }
932 
933 int
934 vflush(struct mount *mp, struct vnode *skipvp, int flags)
935 {
936 	struct vflush_args va;
937 	va.skipvp = skipvp;
938 	va.busy = 0;
939 	va.flags = flags;
940 
941 	vfs_mount_foreach_vnode(mp, vflush_vnode, &va);
942 
943 	if (va.busy)
944 		return (EBUSY);
945 	return (0);
946 }
947 
948 /*
949  * Disassociate the underlying file system from a vnode.
950  */
951 void
952 vclean(struct vnode *vp, int flags, struct proc *p)
953 {
954 	int active;
955 
956 	/*
957 	 * Check to see if the vnode is in use.
958 	 * If so we have to reference it before we clean it out
959 	 * so that its count cannot fall to zero and generate a
960 	 * race against ourselves to recycle it.
961 	 */
962 	if ((active = vp->v_usecount) != 0)
963 		vp->v_usecount++;
964 
965 	/*
966 	 * Prevent the vnode from being recycled or
967 	 * brought into use while we clean it out.
968 	 */
969 	if (vp->v_flag & VXLOCK)
970 		panic("vclean: deadlock");
971 	vp->v_flag |= VXLOCK;
972 	/*
973 	 * Even if the count is zero, the VOP_INACTIVE routine may still
974 	 * have the object locked while it cleans it out. The VOP_LOCK
975 	 * ensures that the VOP_INACTIVE routine is done with its work.
976 	 * For active vnodes, it ensures that no other activity can
977 	 * occur while the underlying object is being cleaned out.
978 	 */
979 	VOP_LOCK(vp, LK_DRAIN | LK_EXCLUSIVE);
980 
981 	/*
982 	 * Clean out any VM data associated with the vnode.
983 	 */
984 	uvm_vnp_terminate(vp);
985 	/*
986 	 * Clean out any buffers associated with the vnode.
987 	 */
988 	if (flags & DOCLOSE)
989 		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
990 	/*
991 	 * If purging an active vnode, it must be closed and
992 	 * deactivated before being reclaimed. Note that the
993 	 * VOP_INACTIVE will unlock the vnode
994 	 */
995 	if (active) {
996 		if (flags & DOCLOSE)
997 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
998 		VOP_INACTIVE(vp, p);
999 	} else {
1000 		/*
1001 		 * Any other processes trying to obtain this lock must first
1002 		 * wait for VXLOCK to clear, then call the new lock operation.
1003 		 */
1004 		VOP_UNLOCK(vp);
1005 	}
1006 
1007 	/*
1008 	 * Reclaim the vnode.
1009 	 */
1010 	if (VOP_RECLAIM(vp, p))
1011 		panic("vclean: cannot reclaim");
1012 	if (active) {
1013 		vp->v_usecount--;
1014 		if (vp->v_usecount == 0) {
1015 			if (vp->v_holdcnt > 0)
1016 				panic("vclean: not clean");
1017 			vputonfreelist(vp);
1018 		}
1019 	}
1020 	cache_purge(vp);
1021 
1022 	/*
1023 	 * Done with purge, notify sleepers of the grim news.
1024 	 */
1025 	vp->v_op = &dead_vops;
1026 	VN_KNOTE(vp, NOTE_REVOKE);
1027 	vp->v_tag = VT_NON;
1028 	vp->v_flag &= ~VXLOCK;
1029 #ifdef VFSLCKDEBUG
1030 	vp->v_flag &= ~VLOCKSWORK;
1031 #endif
1032 	if (vp->v_flag & VXWANT) {
1033 		vp->v_flag &= ~VXWANT;
1034 		wakeup(vp);
1035 	}
1036 }
1037 
1038 /*
1039  * Recycle an unused vnode to the front of the free list.
1040  */
1041 int
1042 vrecycle(struct vnode *vp, struct proc *p)
1043 {
1044 	if (vp->v_usecount == 0) {
1045 		vgonel(vp, p);
1046 		return (1);
1047 	}
1048 	return (0);
1049 }
1050 
1051 /*
1052  * Eliminate all activity associated with a vnode
1053  * in preparation for reuse.
1054  */
1055 void
1056 vgone(struct vnode *vp)
1057 {
1058 	struct proc *p = curproc;
1059 	vgonel(vp, p);
1060 }
1061 
1062 /*
1063  * vgone, with struct proc.
1064  */
1065 void
1066 vgonel(struct vnode *vp, struct proc *p)
1067 {
1068 	struct vnode *vq;
1069 	struct vnode *vx;
1070 
1071 	KASSERT(vp->v_uvcount == 0);
1072 
1073 	/*
1074 	 * If a vgone (or vclean) is already in progress,
1075 	 * wait until it is done and return.
1076 	 */
1077 	if (vp->v_flag & VXLOCK) {
1078 		vp->v_flag |= VXWANT;
1079 		tsleep(vp, PINOD, "vgone", 0);
1080 		return;
1081 	}
1082 
1083 	/*
1084 	 * Clean out the filesystem specific data.
1085 	 */
1086 	vclean(vp, DOCLOSE, p);
1087 	/*
1088 	 * Delete from old mount point vnode list, if on one.
1089 	 */
1090 	if (vp->v_mount != NULL)
1091 		insmntque(vp, NULL);
1092 	/*
1093 	 * If special device, remove it from special device alias list
1094 	 * if it is on one.
1095 	 */
1096 	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1097 		if ((vp->v_flag & VALIASED) == 0 && vp->v_type == VCHR &&
1098 		    (cdevsw[major(vp->v_rdev)].d_flags & D_CLONE) &&
1099 		    (minor(vp->v_rdev) >> CLONE_SHIFT == 0)) {
1100 			free(vp->v_specbitmap, M_VNODE, CLONE_MAPSZ);
1101 		}
1102 		if (*vp->v_hashchain == vp) {
1103 			*vp->v_hashchain = vp->v_specnext;
1104 		} else {
1105 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1106 				if (vq->v_specnext != vp)
1107 					continue;
1108 				vq->v_specnext = vp->v_specnext;
1109 				break;
1110 			}
1111 			if (vq == NULL)
1112 				panic("missing bdev");
1113 		}
1114 		if (vp->v_flag & VALIASED) {
1115 			vx = NULL;
1116 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1117 				if (vq->v_rdev != vp->v_rdev ||
1118 				    vq->v_type != vp->v_type)
1119 					continue;
1120 				if (vx)
1121 					break;
1122 				vx = vq;
1123 			}
1124 			if (vx == NULL)
1125 				panic("missing alias");
1126 			if (vq == NULL)
1127 				vx->v_flag &= ~VALIASED;
1128 			vp->v_flag &= ~VALIASED;
1129 		}
1130 		free(vp->v_specinfo, M_VNODE, sizeof(struct specinfo));
1131 		vp->v_specinfo = NULL;
1132 	}
1133 	/*
1134 	 * If it is on the freelist and not already at the head,
1135 	 * move it to the head of the list.
1136 	 */
1137 	vp->v_type = VBAD;
1138 
1139 	/*
1140 	 * Move onto the free list, unless we were called from
1141 	 * getnewvnode and we're not on any free list
1142 	 */
1143 	if (vp->v_usecount == 0 &&
1144 	    (vp->v_bioflag & VBIOONFREELIST)) {
1145 		int s;
1146 
1147 		s = splbio();
1148 
1149 		if (vp->v_holdcnt > 0)
1150 			panic("vgonel: not clean");
1151 
1152 		if (TAILQ_FIRST(&vnode_free_list) != vp) {
1153 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1154 			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1155 		}
1156 		splx(s);
1157 	}
1158 }
1159 
1160 /*
1161  * Lookup a vnode by device number.
1162  */
1163 int
1164 vfinddev(dev_t dev, enum vtype type, struct vnode **vpp)
1165 {
1166 	struct vnode *vp;
1167 	int rc =0;
1168 
1169 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1170 		if (dev != vp->v_rdev || type != vp->v_type)
1171 			continue;
1172 		*vpp = vp;
1173 		rc = 1;
1174 		break;
1175 	}
1176 	return (rc);
1177 }
1178 
1179 /*
1180  * Revoke all the vnodes corresponding to the specified minor number
1181  * range (endpoints inclusive) of the specified major.
1182  */
1183 void
1184 vdevgone(int maj, int minl, int minh, enum vtype type)
1185 {
1186 	struct vnode *vp;
1187 	int mn;
1188 
1189 	for (mn = minl; mn <= minh; mn++)
1190 		if (vfinddev(makedev(maj, mn), type, &vp))
1191 			VOP_REVOKE(vp, REVOKEALL);
1192 }
1193 
1194 /*
1195  * Calculate the total number of references to a special device.
1196  */
1197 int
1198 vcount(struct vnode *vp)
1199 {
1200 	struct vnode *vq, *vnext;
1201 	int count;
1202 
1203 loop:
1204 	if ((vp->v_flag & VALIASED) == 0)
1205 		return (vp->v_usecount);
1206 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1207 		vnext = vq->v_specnext;
1208 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1209 			continue;
1210 		/*
1211 		 * Alias, but not in use, so flush it out.
1212 		 */
1213 		if (vq->v_usecount == 0 && vq != vp) {
1214 			vgone(vq);
1215 			goto loop;
1216 		}
1217 		count += vq->v_usecount;
1218 	}
1219 	return (count);
1220 }
1221 
1222 #if defined(DEBUG) || defined(DIAGNOSTIC)
1223 /*
1224  * Print out a description of a vnode.
1225  */
1226 static char *typename[] =
1227    { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1228 
1229 void
1230 vprint(char *label, struct vnode *vp)
1231 {
1232 	char buf[64];
1233 
1234 	if (label != NULL)
1235 		printf("%s: ", label);
1236 	printf("%p, type %s, use %u, write %u, hold %u,",
1237 		vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1238 		vp->v_holdcnt);
1239 	buf[0] = '\0';
1240 	if (vp->v_flag & VROOT)
1241 		strlcat(buf, "|VROOT", sizeof buf);
1242 	if (vp->v_flag & VTEXT)
1243 		strlcat(buf, "|VTEXT", sizeof buf);
1244 	if (vp->v_flag & VSYSTEM)
1245 		strlcat(buf, "|VSYSTEM", sizeof buf);
1246 	if (vp->v_flag & VXLOCK)
1247 		strlcat(buf, "|VXLOCK", sizeof buf);
1248 	if (vp->v_flag & VXWANT)
1249 		strlcat(buf, "|VXWANT", sizeof buf);
1250 	if (vp->v_bioflag & VBIOWAIT)
1251 		strlcat(buf, "|VBIOWAIT", sizeof buf);
1252 	if (vp->v_bioflag & VBIOONFREELIST)
1253 		strlcat(buf, "|VBIOONFREELIST", sizeof buf);
1254 	if (vp->v_bioflag & VBIOONSYNCLIST)
1255 		strlcat(buf, "|VBIOONSYNCLIST", sizeof buf);
1256 	if (vp->v_flag & VALIASED)
1257 		strlcat(buf, "|VALIASED", sizeof buf);
1258 	if (buf[0] != '\0')
1259 		printf(" flags (%s)", &buf[1]);
1260 	if (vp->v_data == NULL) {
1261 		printf("\n");
1262 	} else {
1263 		printf("\n\t");
1264 		VOP_PRINT(vp);
1265 	}
1266 }
1267 #endif /* DEBUG || DIAGNOSTIC */
1268 
1269 #ifdef DEBUG
1270 /*
1271  * List all of the locked vnodes in the system.
1272  * Called when debugging the kernel.
1273  */
1274 void
1275 printlockedvnodes(void)
1276 {
1277 	struct mount *mp;
1278 	struct vnode *vp;
1279 
1280 	printf("Locked vnodes\n");
1281 
1282 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1283 		if (vfs_busy(mp, VB_READ|VB_NOWAIT))
1284 			continue;
1285 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1286 			if (VOP_ISLOCKED(vp))
1287 				vprint(NULL, vp);
1288 		}
1289 		vfs_unbusy(mp);
1290  	}
1291 
1292 }
1293 #endif
1294 
1295 /*
1296  * Top level filesystem related information gathering.
1297  */
1298 int
1299 vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1300     size_t newlen, struct proc *p)
1301 {
1302 	struct vfsconf *vfsp, *tmpvfsp;
1303 	int ret;
1304 
1305 	/* all sysctl names at this level are at least name and field */
1306 	if (namelen < 2)
1307 		return (ENOTDIR);		/* overloaded */
1308 
1309 	if (name[0] != VFS_GENERIC) {
1310 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1311 			if (vfsp->vfc_typenum == name[0])
1312 				break;
1313 
1314 		if (vfsp == NULL || vfsp->vfc_vfsops->vfs_sysctl == NULL)
1315 			return (EOPNOTSUPP);
1316 
1317 		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1318 		    oldp, oldlenp, newp, newlen, p));
1319 	}
1320 
1321 	switch (name[1]) {
1322 	case VFS_MAXTYPENUM:
1323 		return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
1324 
1325 	case VFS_CONF:
1326 		if (namelen < 3)
1327 			return (ENOTDIR);	/* overloaded */
1328 
1329 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1330 			if (vfsp->vfc_typenum == name[2])
1331 				break;
1332 
1333 		if (vfsp == NULL)
1334 			return (EOPNOTSUPP);
1335 
1336 		/* Make a copy, clear out kernel pointers */
1337 		tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK|M_ZERO);
1338 		memcpy(tmpvfsp, vfsp, sizeof(*tmpvfsp));
1339 		tmpvfsp->vfc_vfsops = NULL;
1340 		tmpvfsp->vfc_next = NULL;
1341 
1342 		ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp,
1343 		    sizeof(struct vfsconf));
1344 
1345 		free(tmpvfsp, M_TEMP, sizeof(*tmpvfsp));
1346 		return (ret);
1347 	case VFS_BCACHESTAT:	/* buffer cache statistics */
1348 		ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats,
1349 		    sizeof(struct bcachestats));
1350 		return(ret);
1351 	}
1352 	return (EOPNOTSUPP);
1353 }
1354 
1355 /*
1356  * Check to see if a filesystem is mounted on a block device.
1357  */
1358 int
1359 vfs_mountedon(struct vnode *vp)
1360 {
1361 	struct vnode *vq;
1362 	int error = 0;
1363 
1364  	if (vp->v_specmountpoint != NULL)
1365 		return (EBUSY);
1366 	if (vp->v_flag & VALIASED) {
1367 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1368 			if (vq->v_rdev != vp->v_rdev ||
1369 			    vq->v_type != vp->v_type)
1370 				continue;
1371 			if (vq->v_specmountpoint != NULL) {
1372 				error = EBUSY;
1373 				break;
1374 			}
1375  		}
1376 	}
1377 	return (error);
1378 }
1379 
1380 #ifdef NFSSERVER
1381 /*
1382  * Build hash lists of net addresses and hang them off the mount point.
1383  * Called by vfs_export() to set up the lists of export addresses.
1384  */
1385 int
1386 vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1387     struct export_args *argp)
1388 {
1389 	struct netcred *np;
1390 	struct radix_node_head *rnh;
1391 	int nplen, i;
1392 	struct radix_node *rn;
1393 	struct sockaddr *saddr, *smask = 0;
1394 	int error;
1395 
1396 	if (argp->ex_addrlen == 0) {
1397 		if (mp->mnt_flag & MNT_DEFEXPORTED)
1398 			return (EPERM);
1399 		np = &nep->ne_defexported;
1400 		/* fill in the kernel's ucred from userspace's xucred */
1401 		if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1402 			return (error);
1403 		mp->mnt_flag |= MNT_DEFEXPORTED;
1404 		goto finish;
1405 	}
1406 	if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN ||
1407 	    argp->ex_addrlen < 0 || argp->ex_masklen < 0)
1408 		return (EINVAL);
1409 	nplen = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1410 	np = (struct netcred *)malloc(nplen, M_NETADDR, M_WAITOK|M_ZERO);
1411 	saddr = (struct sockaddr *)(np + 1);
1412 	error = copyin(argp->ex_addr, saddr, argp->ex_addrlen);
1413 	if (error)
1414 		goto out;
1415 	if (saddr->sa_len > argp->ex_addrlen)
1416 		saddr->sa_len = argp->ex_addrlen;
1417 	if (argp->ex_masklen) {
1418 		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1419 		error = copyin(argp->ex_mask, smask, argp->ex_masklen);
1420 		if (error)
1421 			goto out;
1422 		if (smask->sa_len > argp->ex_masklen)
1423 			smask->sa_len = argp->ex_masklen;
1424 	}
1425 	/* fill in the kernel's ucred from userspace's xucred */
1426 	if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1427 		goto out;
1428 	i = saddr->sa_family;
1429 	switch (i) {
1430 	case AF_INET:
1431 		if ((rnh = nep->ne_rtable_inet) == NULL) {
1432 			if (!rn_inithead((void **)&nep->ne_rtable_inet,
1433 			    offsetof(struct sockaddr_in, sin_addr))) {
1434 				error = ENOBUFS;
1435 				goto out;
1436 			}
1437 			rnh = nep->ne_rtable_inet;
1438 		}
1439 		break;
1440 	default:
1441 		error = EINVAL;
1442 		goto out;
1443 	}
1444 	rn = rn_addroute(saddr, smask, rnh, np->netc_rnodes, 0);
1445 	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1446 		error = EPERM;
1447 		goto out;
1448 	}
1449 finish:
1450 	np->netc_exflags = argp->ex_flags;
1451 	return (0);
1452 out:
1453 	free(np, M_NETADDR, nplen);
1454 	return (error);
1455 }
1456 
1457 int
1458 vfs_free_netcred(struct radix_node *rn, void *w, u_int id)
1459 {
1460 	struct radix_node_head *rnh = (struct radix_node_head *)w;
1461 
1462 	rn_delete(rn->rn_key, rn->rn_mask, rnh, NULL);
1463 	free(rn, M_NETADDR, 0);
1464 	return (0);
1465 }
1466 
1467 /*
1468  * Free the net address hash lists that are hanging off the mount points.
1469  */
1470 void
1471 vfs_free_addrlist(struct netexport *nep)
1472 {
1473 	struct radix_node_head *rnh;
1474 
1475 	if ((rnh = nep->ne_rtable_inet) != NULL) {
1476 		rn_walktree(rnh, vfs_free_netcred, rnh);
1477 		free(rnh, M_RTABLE, 0);
1478 		nep->ne_rtable_inet = NULL;
1479 	}
1480 }
1481 #endif /* NFSSERVER */
1482 
1483 int
1484 vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp)
1485 {
1486 #ifdef NFSSERVER
1487 	int error;
1488 
1489 	if (argp->ex_flags & MNT_DELEXPORT) {
1490 		vfs_free_addrlist(nep);
1491 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1492 	}
1493 	if (argp->ex_flags & MNT_EXPORTED) {
1494 		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1495 			return (error);
1496 		mp->mnt_flag |= MNT_EXPORTED;
1497 	}
1498 	return (0);
1499 #else
1500 	return (ENOTSUP);
1501 #endif /* NFSSERVER */
1502 }
1503 
1504 struct netcred *
1505 vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam)
1506 {
1507 #ifdef NFSSERVER
1508 	struct netcred *np;
1509 	struct radix_node_head *rnh;
1510 	struct sockaddr *saddr;
1511 
1512 	np = NULL;
1513 	if (mp->mnt_flag & MNT_EXPORTED) {
1514 		/*
1515 		 * Lookup in the export list first.
1516 		 */
1517 		if (nam != NULL) {
1518 			saddr = mtod(nam, struct sockaddr *);
1519 			switch(saddr->sa_family) {
1520 			case AF_INET:
1521 				rnh = nep->ne_rtable_inet;
1522 				break;
1523 			default:
1524 				rnh = NULL;
1525 				break;
1526 			}
1527 			if (rnh != NULL)
1528 				np = (struct netcred *)rn_match(saddr, rnh);
1529 		}
1530 		/*
1531 		 * If no address match, use the default if it exists.
1532 		 */
1533 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1534 			np = &nep->ne_defexported;
1535 	}
1536 	return (np);
1537 #else
1538 	return (NULL);
1539 #endif /* NFSSERVER */
1540 }
1541 
1542 /*
1543  * Do the usual access checking.
1544  * file_mode, uid and gid are from the vnode in question,
1545  * while acc_mode and cred are from the VOP_ACCESS parameter list
1546  */
1547 int
1548 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
1549     mode_t acc_mode, struct ucred *cred)
1550 {
1551 	mode_t mask;
1552 
1553 	/* User id 0 always gets read/write access. */
1554 	if (cred->cr_uid == 0) {
1555 		/* For VEXEC, at least one of the execute bits must be set. */
1556 		if ((acc_mode & VEXEC) && type != VDIR &&
1557 		    (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
1558 			return EACCES;
1559 		return 0;
1560 	}
1561 
1562 	mask = 0;
1563 
1564 	/* Otherwise, check the owner. */
1565 	if (cred->cr_uid == uid) {
1566 		if (acc_mode & VEXEC)
1567 			mask |= S_IXUSR;
1568 		if (acc_mode & VREAD)
1569 			mask |= S_IRUSR;
1570 		if (acc_mode & VWRITE)
1571 			mask |= S_IWUSR;
1572 		return (file_mode & mask) == mask ? 0 : EACCES;
1573 	}
1574 
1575 	/* Otherwise, check the groups. */
1576 	if (groupmember(gid, cred)) {
1577 		if (acc_mode & VEXEC)
1578 			mask |= S_IXGRP;
1579 		if (acc_mode & VREAD)
1580 			mask |= S_IRGRP;
1581 		if (acc_mode & VWRITE)
1582 			mask |= S_IWGRP;
1583 		return (file_mode & mask) == mask ? 0 : EACCES;
1584 	}
1585 
1586 	/* Otherwise, check everyone else. */
1587 	if (acc_mode & VEXEC)
1588 		mask |= S_IXOTH;
1589 	if (acc_mode & VREAD)
1590 		mask |= S_IROTH;
1591 	if (acc_mode & VWRITE)
1592 		mask |= S_IWOTH;
1593 	return (file_mode & mask) == mask ? 0 : EACCES;
1594 }
1595 
1596 struct rwlock vfs_stall_lock = RWLOCK_INITIALIZER("vfs_stall");
1597 
1598 int
1599 vfs_stall(struct proc *p, int stall)
1600 {
1601 	struct mount *mp;
1602 	int allerror = 0, error;
1603 
1604 	if (stall)
1605 		rw_enter_write(&vfs_stall_lock);
1606 
1607 	/*
1608 	 * The loop variable mp is protected by vfs_busy() so that it cannot
1609 	 * be unmounted while VFS_SYNC() sleeps.  Traverse forward to keep the
1610 	 * lock order consistent with dounmount().
1611 	 */
1612 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1613 		if (stall) {
1614 			error = vfs_busy(mp, VB_WRITE|VB_WAIT|VB_DUPOK);
1615 			if (error) {
1616 				printf("%s: busy\n", mp->mnt_stat.f_mntonname);
1617 				allerror = error;
1618 				continue;
1619 			}
1620 			uvm_vnp_sync(mp);
1621 			error = VFS_SYNC(mp, MNT_WAIT, stall, p->p_ucred, p);
1622 			if (error) {
1623 				printf("%s: failed to sync\n", mp->mnt_stat.f_mntonname);
1624 				vfs_unbusy(mp);
1625 				allerror = error;
1626 				continue;
1627 			}
1628 			mp->mnt_flag |= MNT_STALLED;
1629 		} else {
1630 			if (mp->mnt_flag & MNT_STALLED) {
1631 				vfs_unbusy(mp);
1632 				mp->mnt_flag &= ~MNT_STALLED;
1633 			}
1634 		}
1635 	}
1636 
1637 	if (!stall)
1638 		rw_exit_write(&vfs_stall_lock);
1639 
1640 	return (allerror);
1641 }
1642 
1643 void
1644 vfs_stall_barrier(void)
1645 {
1646 	rw_enter_read(&vfs_stall_lock);
1647 	rw_exit_read(&vfs_stall_lock);
1648 }
1649 
1650 /*
1651  * Unmount all file systems.
1652  * We traverse the list in reverse order under the assumption that doing so
1653  * will avoid needing to worry about dependencies.
1654  */
1655 void
1656 vfs_unmountall(void)
1657 {
1658 	struct mount *mp, *nmp;
1659 	int allerror, error, again = 1;
1660 
1661  retry:
1662 	allerror = 0;
1663 	TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, nmp) {
1664 		if (vfs_busy(mp, VB_WRITE|VB_NOWAIT))
1665 			continue;
1666 		/* XXX Here is a race, the next pointer is not locked. */
1667 		if ((error = dounmount(mp, MNT_FORCE, curproc)) != 0) {
1668 			printf("unmount of %s failed with error %d\n",
1669 			    mp->mnt_stat.f_mntonname, error);
1670 			allerror = 1;
1671 		}
1672 	}
1673 
1674 	if (allerror) {
1675 		printf("WARNING: some file systems would not unmount\n");
1676 		if (again) {
1677 			printf("retrying\n");
1678 			again = 0;
1679 			goto retry;
1680 		}
1681 	}
1682 }
1683 
1684 /*
1685  * Sync and unmount file systems before shutting down.
1686  */
1687 void
1688 vfs_shutdown(struct proc *p)
1689 {
1690 #ifdef ACCOUNTING
1691 	acct_shutdown();
1692 #endif
1693 
1694 	printf("syncing disks... ");
1695 
1696 	if (panicstr == 0) {
1697 		/* Sync before unmount, in case we hang on something. */
1698 		sys_sync(p, NULL, NULL);
1699 		vfs_unmountall();
1700 	}
1701 
1702 #if NSOFTRAID > 0
1703 	sr_quiesce();
1704 #endif
1705 
1706 	if (vfs_syncwait(p, 1))
1707 		printf("giving up\n");
1708 	else
1709 		printf("done\n");
1710 }
1711 
1712 /*
1713  * perform sync() operation and wait for buffers to flush.
1714  */
1715 int
1716 vfs_syncwait(struct proc *p, int verbose)
1717 {
1718 	struct buf *bp;
1719 	int iter, nbusy, dcount, s;
1720 #ifdef MULTIPROCESSOR
1721 	int hold_count;
1722 #endif
1723 
1724 	sys_sync(p, NULL, NULL);
1725 
1726 	/* Wait for sync to finish. */
1727 	dcount = 10000;
1728 	for (iter = 0; iter < 20; iter++) {
1729 		nbusy = 0;
1730 		LIST_FOREACH(bp, &bufhead, b_list) {
1731 			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1732 				nbusy++;
1733 			/*
1734 			 * With soft updates, some buffers that are
1735 			 * written will be remarked as dirty until other
1736 			 * buffers are written.
1737 			 */
1738 			if (bp->b_flags & B_DELWRI) {
1739 				s = splbio();
1740 				bremfree(bp);
1741 				buf_acquire(bp);
1742 				splx(s);
1743 				nbusy++;
1744 				bawrite(bp);
1745 				if (dcount-- <= 0) {
1746 					if (verbose)
1747 						printf("softdep ");
1748 					return 1;
1749 				}
1750 			}
1751 		}
1752 		if (nbusy == 0)
1753 			break;
1754 		if (verbose)
1755 			printf("%d ", nbusy);
1756 #ifdef MULTIPROCESSOR
1757 		if (_kernel_lock_held())
1758 			hold_count = __mp_release_all(&kernel_lock);
1759 		else
1760 			hold_count = 0;
1761 #endif
1762 		DELAY(40000 * iter);
1763 #ifdef MULTIPROCESSOR
1764 		if (hold_count)
1765 			__mp_acquire_count(&kernel_lock, hold_count);
1766 #endif
1767 	}
1768 
1769 	return nbusy;
1770 }
1771 
1772 /*
1773  * posix file system related system variables.
1774  */
1775 int
1776 fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1777     void *newp, size_t newlen, struct proc *p)
1778 {
1779 	/* all sysctl names at this level are terminal */
1780 	if (namelen != 1)
1781 		return (ENOTDIR);
1782 
1783 	switch (name[0]) {
1784 	case FS_POSIX_SETUID:
1785 		if (newp && securelevel > 0)
1786 			return (EPERM);
1787 		return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear));
1788 	default:
1789 		return (EOPNOTSUPP);
1790 	}
1791 	/* NOTREACHED */
1792 }
1793 
1794 /*
1795  * file system related system variables.
1796  */
1797 int
1798 fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1799     size_t newlen, struct proc *p)
1800 {
1801 	sysctlfn *fn;
1802 
1803 	switch (name[0]) {
1804 	case FS_POSIX:
1805 		fn = fs_posix_sysctl;
1806 		break;
1807 	default:
1808 		return (EOPNOTSUPP);
1809 	}
1810 	return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
1811 }
1812 
1813 
1814 /*
1815  * Routines dealing with vnodes and buffers
1816  */
1817 
1818 /*
1819  * Wait for all outstanding I/Os to complete
1820  *
1821  * Manipulates v_numoutput. Must be called at splbio()
1822  */
1823 int
1824 vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo)
1825 {
1826 	int error = 0;
1827 
1828 	splassert(IPL_BIO);
1829 
1830 	while (vp->v_numoutput) {
1831 		vp->v_bioflag |= VBIOWAIT;
1832 		error = tsleep(&vp->v_numoutput,
1833 		    slpflag | (PRIBIO + 1), wmesg, timeo);
1834 		if (error)
1835 			break;
1836 	}
1837 
1838 	return (error);
1839 }
1840 
1841 /*
1842  * Update outstanding I/O count and do wakeup if requested.
1843  *
1844  * Manipulates v_numoutput. Must be called at splbio()
1845  */
1846 void
1847 vwakeup(struct vnode *vp)
1848 {
1849 	splassert(IPL_BIO);
1850 
1851 	if (vp != NULL) {
1852 		if (vp->v_numoutput-- == 0)
1853 			panic("vwakeup: neg numoutput");
1854 		if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) {
1855 			vp->v_bioflag &= ~VBIOWAIT;
1856 			wakeup(&vp->v_numoutput);
1857 		}
1858 	}
1859 }
1860 
1861 /*
1862  * Flush out and invalidate all buffers associated with a vnode.
1863  * Called with the underlying object locked.
1864  */
1865 int
1866 vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p,
1867     int slpflag, int slptimeo)
1868 {
1869 	struct buf *bp;
1870 	struct buf *nbp, *blist;
1871 	int s, error;
1872 
1873 #ifdef VFSLCKDEBUG
1874 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
1875 		panic("%s: vp isn't locked, vp %p", __func__, vp);
1876 #endif
1877 
1878 	if (flags & V_SAVE) {
1879 		s = splbio();
1880 		vwaitforio(vp, 0, "vinvalbuf", 0);
1881 		if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1882 			splx(s);
1883 			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
1884 				return (error);
1885 			s = splbio();
1886 			if (vp->v_numoutput > 0 ||
1887 			    !LIST_EMPTY(&vp->v_dirtyblkhd))
1888 				panic("%s: dirty bufs, vp %p", __func__, vp);
1889 		}
1890 		splx(s);
1891 	}
1892 loop:
1893 	s = splbio();
1894 	for (;;) {
1895 		if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) &&
1896 		    (flags & V_SAVEMETA))
1897 			while (blist && blist->b_lblkno < 0)
1898 				blist = LIST_NEXT(blist, b_vnbufs);
1899 		if (blist == NULL &&
1900 		    (blist = LIST_FIRST(&vp->v_dirtyblkhd)) &&
1901 		    (flags & V_SAVEMETA))
1902 			while (blist && blist->b_lblkno < 0)
1903 				blist = LIST_NEXT(blist, b_vnbufs);
1904 		if (!blist)
1905 			break;
1906 
1907 		for (bp = blist; bp; bp = nbp) {
1908 			nbp = LIST_NEXT(bp, b_vnbufs);
1909 			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
1910 				continue;
1911 			if (bp->b_flags & B_BUSY) {
1912 				bp->b_flags |= B_WANTED;
1913 				error = tsleep(bp, slpflag | (PRIBIO + 1),
1914 				    "vinvalbuf", slptimeo);
1915 				if (error) {
1916 					splx(s);
1917 					return (error);
1918 				}
1919 				break;
1920 			}
1921 			bremfree(bp);
1922 			/*
1923 			 * XXX Since there are no node locks for NFS, I believe
1924 			 * there is a slight chance that a delayed write will
1925 			 * occur while sleeping just above, so check for it.
1926 			 */
1927 			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
1928 				buf_acquire(bp);
1929 				splx(s);
1930 				(void) VOP_BWRITE(bp);
1931 				goto loop;
1932 			}
1933 			buf_acquire_nomap(bp);
1934 			bp->b_flags |= B_INVAL;
1935 			brelse(bp);
1936 		}
1937 	}
1938 	if (!(flags & V_SAVEMETA) &&
1939 	    (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd)))
1940 		panic("%s: flush failed, vp %p", __func__, vp);
1941 	splx(s);
1942 	return (0);
1943 }
1944 
1945 void
1946 vflushbuf(struct vnode *vp, int sync)
1947 {
1948 	struct buf *bp, *nbp;
1949 	int s;
1950 
1951 loop:
1952 	s = splbio();
1953 	LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp) {
1954 		if ((bp->b_flags & B_BUSY))
1955 			continue;
1956 		if ((bp->b_flags & B_DELWRI) == 0)
1957 			panic("vflushbuf: not dirty");
1958 		bremfree(bp);
1959 		buf_acquire(bp);
1960 		splx(s);
1961 		/*
1962 		 * Wait for I/O associated with indirect blocks to complete,
1963 		 * since there is no way to quickly wait for them below.
1964 		 */
1965 		if (bp->b_vp == vp || sync == 0)
1966 			(void) bawrite(bp);
1967 		else
1968 			(void) bwrite(bp);
1969 		goto loop;
1970 	}
1971 	if (sync == 0) {
1972 		splx(s);
1973 		return;
1974 	}
1975 	vwaitforio(vp, 0, "vflushbuf", 0);
1976 	if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1977 		splx(s);
1978 #ifdef DIAGNOSTIC
1979 		vprint("vflushbuf: dirty", vp);
1980 #endif
1981 		goto loop;
1982 	}
1983 	splx(s);
1984 }
1985 
1986 /*
1987  * Associate a buffer with a vnode.
1988  *
1989  * Manipulates buffer vnode queues. Must be called at splbio().
1990  */
1991 void
1992 bgetvp(struct vnode *vp, struct buf *bp)
1993 {
1994 	splassert(IPL_BIO);
1995 
1996 
1997 	if (bp->b_vp)
1998 		panic("bgetvp: not free");
1999 	vhold(vp);
2000 	bp->b_vp = vp;
2001 	if (vp->v_type == VBLK || vp->v_type == VCHR)
2002 		bp->b_dev = vp->v_rdev;
2003 	else
2004 		bp->b_dev = NODEV;
2005 	/*
2006 	 * Insert onto list for new vnode.
2007 	 */
2008 	bufinsvn(bp, &vp->v_cleanblkhd);
2009 }
2010 
2011 /*
2012  * Disassociate a buffer from a vnode.
2013  *
2014  * Manipulates vnode buffer queues. Must be called at splbio().
2015  */
2016 void
2017 brelvp(struct buf *bp)
2018 {
2019 	struct vnode *vp;
2020 
2021 	splassert(IPL_BIO);
2022 
2023 	if ((vp = bp->b_vp) == (struct vnode *) 0)
2024 		panic("brelvp: NULL");
2025 	/*
2026 	 * Delete from old vnode list, if on one.
2027 	 */
2028 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2029 		bufremvn(bp);
2030 	if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2031 	    LIST_EMPTY(&vp->v_dirtyblkhd)) {
2032 		vp->v_bioflag &= ~VBIOONSYNCLIST;
2033 		LIST_REMOVE(vp, v_synclist);
2034 	}
2035 	bp->b_vp = NULL;
2036 
2037 	vdrop(vp);
2038 }
2039 
2040 /*
2041  * Replaces the current vnode associated with the buffer, if any,
2042  * with a new vnode.
2043  *
2044  * If an output I/O is pending on the buffer, the old vnode
2045  * I/O count is adjusted.
2046  *
2047  * Ignores vnode buffer queues. Must be called at splbio().
2048  */
2049 void
2050 buf_replacevnode(struct buf *bp, struct vnode *newvp)
2051 {
2052 	struct vnode *oldvp = bp->b_vp;
2053 
2054 	splassert(IPL_BIO);
2055 
2056 	if (oldvp)
2057 		brelvp(bp);
2058 
2059 	if ((bp->b_flags & (B_READ | B_DONE)) == 0) {
2060 		newvp->v_numoutput++;	/* put it on swapdev */
2061 		vwakeup(oldvp);
2062 	}
2063 
2064 	bgetvp(newvp, bp);
2065 	bufremvn(bp);
2066 }
2067 
2068 /*
2069  * Used to assign buffers to the appropriate clean or dirty list on
2070  * the vnode and to add newly dirty vnodes to the appropriate
2071  * filesystem syncer list.
2072  *
2073  * Manipulates vnode buffer queues. Must be called at splbio().
2074  */
2075 void
2076 reassignbuf(struct buf *bp)
2077 {
2078 	struct buflists *listheadp;
2079 	int delay;
2080 	struct vnode *vp = bp->b_vp;
2081 
2082 	splassert(IPL_BIO);
2083 
2084 	/*
2085 	 * Delete from old vnode list, if on one.
2086 	 */
2087 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2088 		bufremvn(bp);
2089 
2090 	/*
2091 	 * If dirty, put on list of dirty buffers;
2092 	 * otherwise insert onto list of clean buffers.
2093 	 */
2094 	if ((bp->b_flags & B_DELWRI) == 0) {
2095 		listheadp = &vp->v_cleanblkhd;
2096 		if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2097 		    LIST_EMPTY(&vp->v_dirtyblkhd)) {
2098 			vp->v_bioflag &= ~VBIOONSYNCLIST;
2099 			LIST_REMOVE(vp, v_synclist);
2100 		}
2101 	} else {
2102 		listheadp = &vp->v_dirtyblkhd;
2103 		if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) {
2104 			switch (vp->v_type) {
2105 			case VDIR:
2106 				delay = syncdelay / 2;
2107 				break;
2108 			case VBLK:
2109 				if (vp->v_specmountpoint != NULL) {
2110 					delay = syncdelay / 3;
2111 					break;
2112 				}
2113 				/* FALLTHROUGH */
2114 			default:
2115 				delay = syncdelay;
2116 			}
2117 			vn_syncer_add_to_worklist(vp, delay);
2118 		}
2119 	}
2120 	bufinsvn(bp, listheadp);
2121 }
2122 
2123 int
2124 vfs_register(struct vfsconf *vfs)
2125 {
2126 	struct vfsconf *vfsp;
2127 	struct vfsconf **vfspp;
2128 
2129 #ifdef DIAGNOSTIC
2130 	/* Paranoia? */
2131 	if (vfs->vfc_refcount != 0)
2132 		printf("vfs_register called with vfc_refcount > 0\n");
2133 #endif
2134 
2135 	/* Check if filesystem already known */
2136 	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2137 	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next)
2138 		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2139 			return (EEXIST);
2140 
2141 	if (vfs->vfc_typenum > maxvfsconf)
2142 		maxvfsconf = vfs->vfc_typenum;
2143 
2144 	vfs->vfc_next = NULL;
2145 
2146 	/* Add to the end of the list */
2147 	*vfspp = vfs;
2148 
2149 	/* Call vfs_init() */
2150 	if (vfs->vfc_vfsops->vfs_init)
2151 		(*(vfs->vfc_vfsops->vfs_init))(vfs);
2152 
2153 	return 0;
2154 }
2155 
2156 int
2157 vfs_unregister(struct vfsconf *vfs)
2158 {
2159 	struct vfsconf *vfsp;
2160 	struct vfsconf **vfspp;
2161 	int maxtypenum;
2162 
2163 	/* Find our vfsconf struct */
2164 	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2165 	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) {
2166 		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2167 			break;
2168 	}
2169 
2170 	if (!vfsp)			/* Not found */
2171 		return (ENOENT);
2172 
2173 	if (vfsp->vfc_refcount)		/* In use */
2174 		return (EBUSY);
2175 
2176 	/* Remove from list and free */
2177 	*vfspp = vfsp->vfc_next;
2178 
2179 	maxtypenum = 0;
2180 
2181 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2182 		if (vfsp->vfc_typenum > maxtypenum)
2183 			maxtypenum = vfsp->vfc_typenum;
2184 
2185 	maxvfsconf = maxtypenum;
2186 	return 0;
2187 }
2188 
2189 /*
2190  * Check if vnode represents a disk device
2191  */
2192 int
2193 vn_isdisk(struct vnode *vp, int *errp)
2194 {
2195 	if (vp->v_type != VBLK && vp->v_type != VCHR)
2196 		return (0);
2197 
2198 	return (1);
2199 }
2200 
2201 #ifdef DDB
2202 #include <machine/db_machdep.h>
2203 #include <ddb/db_interface.h>
2204 
2205 void
2206 vfs_buf_print(void *b, int full,
2207     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2208 {
2209 	struct buf *bp = b;
2210 
2211 	(*pr)("  vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n"
2212 	      "  proc %p error %d flags %lb\n",
2213 	    bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev,
2214 	    bp->b_proc, bp->b_error, bp->b_flags, B_BITS);
2215 
2216 	(*pr)("  bufsize 0x%lx bcount 0x%lx resid 0x%lx\n"
2217 	      "  data %p saveaddr %p dep %p iodone %p\n",
2218 	    bp->b_bufsize, bp->b_bcount, (long)bp->b_resid,
2219 	    bp->b_data, bp->b_saveaddr,
2220 	    LIST_FIRST(&bp->b_dep), bp->b_iodone);
2221 
2222 	(*pr)("  dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n",
2223 	    bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend);
2224 
2225 #ifdef FFS_SOFTUPDATES
2226 	if (full)
2227 		softdep_print(bp, full, pr);
2228 #endif
2229 }
2230 
2231 const char *vtypes[] = { VTYPE_NAMES };
2232 const char *vtags[] = { VTAG_NAMES };
2233 
2234 void
2235 vfs_vnode_print(void *v, int full,
2236     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2237 {
2238 	struct vnode *vp = v;
2239 
2240 	(*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n",
2241 	      (u_int)vp->v_tag >= nitems(vtags)? "<unk>":vtags[vp->v_tag],
2242 	      vp->v_tag,
2243 	      (u_int)vp->v_type >= nitems(vtypes)? "<unk>":vtypes[vp->v_type],
2244 	      vp->v_type, vp->v_mount, vp->v_mountedhere);
2245 
2246 	(*pr)("data %p usecount %d writecount %d holdcnt %d numoutput %d\n",
2247 	      vp->v_data, vp->v_usecount, vp->v_writecount,
2248 	      vp->v_holdcnt, vp->v_numoutput);
2249 
2250 	/* uvm_object_printit(&vp->v_uobj, full, pr); */
2251 
2252 	if (full) {
2253 		struct buf *bp;
2254 
2255 		(*pr)("clean bufs:\n");
2256 		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
2257 			(*pr)(" bp %p\n", bp);
2258 			vfs_buf_print(bp, full, pr);
2259 		}
2260 
2261 		(*pr)("dirty bufs:\n");
2262 		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
2263 			(*pr)(" bp %p\n", bp);
2264 			vfs_buf_print(bp, full, pr);
2265 		}
2266 	}
2267 }
2268 
2269 void
2270 vfs_mount_print(struct mount *mp, int full,
2271     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2272 {
2273 	struct vfsconf *vfc = mp->mnt_vfc;
2274 	struct vnode *vp;
2275 	int cnt;
2276 
2277 	(*pr)("flags %b\nvnodecovered %p syncer %p data %p\n",
2278 	    mp->mnt_flag, MNT_BITS,
2279 	    mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data);
2280 
2281 	(*pr)("vfsconf: ops %p name \"%s\" num %d ref %d flags 0x%x\n",
2282             vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum,
2283 	    vfc->vfc_refcount, vfc->vfc_flags);
2284 
2285 	(*pr)("statvfs cache: bsize %x iosize %x\nblocks %llu free %llu avail %lld\n",
2286 	    mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks,
2287 	    mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail);
2288 
2289 	(*pr)("  files %llu ffiles %llu favail %lld\n", mp->mnt_stat.f_files,
2290 	    mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail);
2291 
2292 	(*pr)("  f_fsidx {0x%x, 0x%x} owner %u ctime 0x%llx\n",
2293 	    mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1],
2294 	    mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime);
2295 
2296  	(*pr)("  syncwrites %llu asyncwrites = %llu\n",
2297 	    mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites);
2298 
2299  	(*pr)("  syncreads %llu asyncreads = %llu\n",
2300 	    mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads);
2301 
2302 	(*pr)("  fstype \"%s\" mnton \"%s\" mntfrom \"%s\" mntspec \"%s\"\n",
2303 	    mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname,
2304 	    mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntfromspec);
2305 
2306 	(*pr)("locked vnodes:");
2307 	/* XXX would take mountlist lock, except ddb has no context */
2308 	cnt = 0;
2309 	LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2310 		if (VOP_ISLOCKED(vp)) {
2311 			if (cnt == 0)
2312 				(*pr)("\n  %p", vp);
2313 			else if ((cnt % (72 / (sizeof(void *) * 2 + 4))) == 0)
2314 				(*pr)(",\n  %p", vp);
2315 			else
2316 				(*pr)(", %p", vp);
2317 			cnt++;
2318 		}
2319 	}
2320 	(*pr)("\n");
2321 
2322 	if (full) {
2323 		(*pr)("all vnodes:");
2324 		/* XXX would take mountlist lock, except ddb has no context */
2325 		cnt = 0;
2326 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2327 			if (cnt == 0)
2328 				(*pr)("\n  %p", vp);
2329 			else if ((cnt % (72 / (sizeof(void *) * 2 + 4))) == 0)
2330 				(*pr)(",\n  %p", vp);
2331 			else
2332 				(*pr)(", %p", vp);
2333 			cnt++;
2334 		}
2335 		(*pr)("\n");
2336 	}
2337 }
2338 #endif /* DDB */
2339 
2340 void
2341 copy_statfs_info(struct statfs *sbp, const struct mount *mp)
2342 {
2343 	const struct statfs *mbp;
2344 
2345 	strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN);
2346 
2347 	if (sbp == (mbp = &mp->mnt_stat))
2348 		return;
2349 
2350 	sbp->f_fsid = mbp->f_fsid;
2351 	sbp->f_owner = mbp->f_owner;
2352 	sbp->f_flags = mbp->f_flags;
2353 	sbp->f_syncwrites = mbp->f_syncwrites;
2354 	sbp->f_asyncwrites = mbp->f_asyncwrites;
2355 	sbp->f_syncreads = mbp->f_syncreads;
2356 	sbp->f_asyncreads = mbp->f_asyncreads;
2357 	sbp->f_namemax = mbp->f_namemax;
2358 	memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
2359 	memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
2360 	memcpy(sbp->f_mntfromspec, mp->mnt_stat.f_mntfromspec, MNAMELEN);
2361 	memcpy(&sbp->mount_info, &mp->mnt_stat.mount_info,
2362 	    sizeof(union mount_info));
2363 }
2364