xref: /openbsd-src/sys/kern/vfs_subr.c (revision f993456ddff9448d4e6b0f00a524c98af2840f43)
1 /*	$OpenBSD: vfs_subr.c,v 1.275 2018/06/06 19:02:38 bluhm Exp $	*/
2 /*	$NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
38  */
39 
40 /*
41  * External virtual filesystem routines
42  */
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/sysctl.h>
48 #include <sys/mount.h>
49 #include <sys/time.h>
50 #include <sys/fcntl.h>
51 #include <sys/kernel.h>
52 #include <sys/conf.h>
53 #include <sys/vnode.h>
54 #include <sys/lock.h>
55 #include <sys/stat.h>
56 #include <sys/acct.h>
57 #include <sys/namei.h>
58 #include <sys/ucred.h>
59 #include <sys/buf.h>
60 #include <sys/errno.h>
61 #include <sys/malloc.h>
62 #include <sys/mbuf.h>
63 #include <sys/syscallargs.h>
64 #include <sys/pool.h>
65 #include <sys/tree.h>
66 #include <sys/specdev.h>
67 
68 #include <netinet/in.h>
69 
70 #include <uvm/uvm_extern.h>
71 #include <uvm/uvm_vnode.h>
72 
73 #include "softraid.h"
74 
75 void sr_quiesce(void);
76 
77 enum vtype iftovt_tab[16] = {
78 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
79 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
80 };
81 
82 int	vttoif_tab[9] = {
83 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
84 	S_IFSOCK, S_IFIFO, S_IFMT,
85 };
86 
87 int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
88 int suid_clear = 1;		/* 1 => clear SUID / SGID on owner change */
89 
90 /*
91  * Insq/Remq for the vnode usage lists.
92  */
93 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
94 #define	bufremvn(bp) {							\
95 	LIST_REMOVE(bp, b_vnbufs);					\
96 	LIST_NEXT(bp, b_vnbufs) = NOLIST;				\
97 }
98 
99 struct freelst vnode_hold_list;	/* list of vnodes referencing buffers */
100 struct freelst vnode_free_list;	/* vnode free list */
101 
102 struct mntlist mountlist;	/* mounted filesystem list */
103 
104 void	vclean(struct vnode *, int, struct proc *);
105 
106 void insmntque(struct vnode *, struct mount *);
107 int getdevvp(dev_t, struct vnode **, enum vtype);
108 
109 int vfs_hang_addrlist(struct mount *, struct netexport *,
110 				  struct export_args *);
111 int vfs_free_netcred(struct radix_node *, void *, u_int);
112 void vfs_free_addrlist(struct netexport *);
113 void vputonfreelist(struct vnode *);
114 
115 int vflush_vnode(struct vnode *, void *);
116 int maxvnodes;
117 
118 void vfs_unmountall(void);
119 
120 #ifdef DEBUG
121 void printlockedvnodes(void);
122 #endif
123 
124 struct pool vnode_pool;
125 struct pool uvm_vnode_pool;
126 
127 static inline int rb_buf_compare(const struct buf *b1, const struct buf *b2);
128 RBT_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare);
129 
130 static inline int
131 rb_buf_compare(const struct buf *b1, const struct buf *b2)
132 {
133 	if (b1->b_lblkno < b2->b_lblkno)
134 		return(-1);
135 	if (b1->b_lblkno > b2->b_lblkno)
136 		return(1);
137 	return(0);
138 }
139 
140 /*
141  * Initialize the vnode management data structures.
142  */
143 void
144 vntblinit(void)
145 {
146 	/* buffer cache may need a vnode for each buffer */
147 	maxvnodes = 2 * initialvnodes;
148 	pool_init(&vnode_pool, sizeof(struct vnode), 0, IPL_NONE,
149 	    PR_WAITOK, "vnodes", NULL);
150 	pool_init(&uvm_vnode_pool, sizeof(struct uvm_vnode), 0, IPL_NONE,
151 	    PR_WAITOK, "uvmvnodes", NULL);
152 	TAILQ_INIT(&vnode_hold_list);
153 	TAILQ_INIT(&vnode_free_list);
154 	TAILQ_INIT(&mountlist);
155 	/*
156 	 * Initialize the filesystem syncer.
157 	 */
158 	vn_initialize_syncerd();
159 
160 #ifdef NFSSERVER
161 	rn_init(sizeof(struct sockaddr_in));
162 #endif /* NFSSERVER */
163 }
164 
165 /*
166  * Mark a mount point as busy. Used to synchronize access and to delay
167  * unmounting.
168  *
169  * Default behaviour is to attempt getting a READ lock and in case of an
170  * ongoing unmount, to wait for it to finish and then return failure.
171  */
172 int
173 vfs_busy(struct mount *mp, int flags)
174 {
175 	int rwflags = 0;
176 
177 	/* new mountpoints need their lock initialised */
178 	if (mp->mnt_lock.rwl_name == NULL)
179 		rw_init_flags(&mp->mnt_lock, "vfslock", RWL_IS_VNODE);
180 
181 	if (flags & VB_WRITE)
182 		rwflags |= RW_WRITE;
183 	else
184 		rwflags |= RW_READ;
185 
186 	if (flags & VB_WAIT)
187 		rwflags |= RW_SLEEPFAIL;
188 	else
189 		rwflags |= RW_NOSLEEP;
190 
191 #ifdef WITNESS
192 	if (flags & VB_DUPOK)
193 		rwflags |= RW_DUPOK;
194 #endif
195 
196 	if (rw_enter(&mp->mnt_lock, rwflags))
197 		return (EBUSY);
198 
199 	return (0);
200 }
201 
202 /*
203  * Free a busy file system
204  */
205 void
206 vfs_unbusy(struct mount *mp)
207 {
208 	rw_exit(&mp->mnt_lock);
209 }
210 
211 int
212 vfs_isbusy(struct mount *mp)
213 {
214 	if (RWLOCK_OWNER(&mp->mnt_lock) > 0)
215 		return (1);
216 	else
217 		return (0);
218 }
219 
220 /*
221  * Lookup a filesystem type, and if found allocate and initialize
222  * a mount structure for it.
223  *
224  * Devname is usually updated by mount(8) after booting.
225  */
226 int
227 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
228 {
229 	struct vfsconf *vfsp;
230 	struct mount *mp;
231 
232 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
233 		if (!strcmp(vfsp->vfc_name, fstypename))
234 			break;
235 	if (vfsp == NULL)
236 		return (ENODEV);
237 	mp = malloc(sizeof(*mp), M_MOUNT, M_WAITOK|M_ZERO);
238 	(void)vfs_busy(mp, VB_READ|VB_NOWAIT);
239 	LIST_INIT(&mp->mnt_vnodelist);
240 	mp->mnt_vfc = vfsp;
241 	mp->mnt_op = vfsp->vfc_vfsops;
242 	mp->mnt_flag = MNT_RDONLY;
243 	mp->mnt_vnodecovered = NULLVP;
244 	vfsp->vfc_refcount++;
245 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
246 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
247 	mp->mnt_stat.f_mntonname[0] = '/';
248 	copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN, 0);
249 	copystr(devname, mp->mnt_stat.f_mntfromspec, MNAMELEN, 0);
250 	*mpp = mp;
251  	return (0);
252  }
253 
254 /*
255  * Lookup a mount point by filesystem identifier.
256  */
257 struct mount *
258 vfs_getvfs(fsid_t *fsid)
259 {
260 	struct mount *mp;
261 
262 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
263 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
264 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
265 			return (mp);
266 		}
267 	}
268 
269 	return (NULL);
270 }
271 
272 
273 /*
274  * Get a new unique fsid
275  */
276 void
277 vfs_getnewfsid(struct mount *mp)
278 {
279 	static u_short xxxfs_mntid;
280 
281 	fsid_t tfsid;
282 	int mtype;
283 
284 	mtype = mp->mnt_vfc->vfc_typenum;
285 	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
286 	mp->mnt_stat.f_fsid.val[1] = mtype;
287 	if (xxxfs_mntid == 0)
288 		++xxxfs_mntid;
289 	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
290 	tfsid.val[1] = mtype;
291 	if (!TAILQ_EMPTY(&mountlist)) {
292 		while (vfs_getvfs(&tfsid)) {
293 			tfsid.val[0]++;
294 			xxxfs_mntid++;
295 		}
296 	}
297 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
298 }
299 
300 /*
301  * Set vnode attributes to VNOVAL
302  */
303 void
304 vattr_null(struct vattr *vap)
305 {
306 
307 	vap->va_type = VNON;
308 	/*
309 	 * Don't get fancy: u_quad_t = u_int = VNOVAL leaves the u_quad_t
310 	 * with 2^31-1 instead of 2^64-1.  Just write'm out and let
311 	 * the compiler do its job.
312 	 */
313 	vap->va_mode = VNOVAL;
314 	vap->va_nlink = VNOVAL;
315 	vap->va_uid = VNOVAL;
316 	vap->va_gid = VNOVAL;
317 	vap->va_fsid = VNOVAL;
318 	vap->va_fileid = VNOVAL;
319 	vap->va_size = VNOVAL;
320 	vap->va_blocksize = VNOVAL;
321 	vap->va_atime.tv_sec = VNOVAL;
322 	vap->va_atime.tv_nsec = VNOVAL;
323 	vap->va_mtime.tv_sec = VNOVAL;
324 	vap->va_mtime.tv_nsec = VNOVAL;
325 	vap->va_ctime.tv_sec = VNOVAL;
326 	vap->va_ctime.tv_nsec = VNOVAL;
327 	vap->va_gen = VNOVAL;
328 	vap->va_flags = VNOVAL;
329 	vap->va_rdev = VNOVAL;
330 	vap->va_bytes = VNOVAL;
331 	vap->va_filerev = VNOVAL;
332 	vap->va_vaflags = 0;
333 }
334 
335 /*
336  * Routines having to do with the management of the vnode table.
337  */
338 long numvnodes;
339 
340 /*
341  * Return the next vnode from the free list.
342  */
343 int
344 getnewvnode(enum vtagtype tag, struct mount *mp, struct vops *vops,
345     struct vnode **vpp)
346 {
347 	struct proc *p = curproc;
348 	struct freelst *listhd;
349 	static int toggle;
350 	struct vnode *vp;
351 	int s;
352 
353 	/*
354 	 * allow maxvnodes to increase if the buffer cache itself
355 	 * is big enough to justify it. (we don't shrink it ever)
356 	 */
357 	maxvnodes = maxvnodes < bcstats.numbufs ? bcstats.numbufs
358 	    : maxvnodes;
359 
360 	/*
361 	 * We must choose whether to allocate a new vnode or recycle an
362 	 * existing one. The criterion for allocating a new one is that
363 	 * the total number of vnodes is less than the number desired or
364 	 * there are no vnodes on either free list. Generally we only
365 	 * want to recycle vnodes that have no buffers associated with
366 	 * them, so we look first on the vnode_free_list. If it is empty,
367 	 * we next consider vnodes with referencing buffers on the
368 	 * vnode_hold_list. The toggle ensures that half the time we
369 	 * will use a buffer from the vnode_hold_list, and half the time
370 	 * we will allocate a new one unless the list has grown to twice
371 	 * the desired size. We are reticent to recycle vnodes from the
372 	 * vnode_hold_list because we will lose the identity of all its
373 	 * referencing buffers.
374 	 */
375 	toggle ^= 1;
376 	if (numvnodes / 2 > maxvnodes)
377 		toggle = 0;
378 
379 	s = splbio();
380 	if ((numvnodes < maxvnodes) ||
381 	    ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
382 	    ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
383 		splx(s);
384 		vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO);
385 		vp->v_uvm = pool_get(&uvm_vnode_pool, PR_WAITOK | PR_ZERO);
386 		vp->v_uvm->u_vnode = vp;
387 		RBT_INIT(buf_rb_bufs, &vp->v_bufs_tree);
388 		cache_tree_init(&vp->v_nc_tree);
389 		TAILQ_INIT(&vp->v_cache_dst);
390 		numvnodes++;
391 	} else {
392 		TAILQ_FOREACH(vp, listhd, v_freelist) {
393 			if (VOP_ISLOCKED(vp) == 0)
394 				break;
395 		}
396 		/*
397 		 * Unless this is a bad time of the month, at most
398 		 * the first NCPUS items on the free list are
399 		 * locked, so this is close enough to being empty.
400 		 */
401 		if (vp == NULL) {
402 			splx(s);
403 			tablefull("vnode");
404 			*vpp = 0;
405 			return (ENFILE);
406 		}
407 
408 #ifdef DIAGNOSTIC
409 		if (vp->v_usecount) {
410 			vprint("free vnode", vp);
411 			panic("free vnode isn't");
412 		}
413 #endif
414 
415 		TAILQ_REMOVE(listhd, vp, v_freelist);
416 		vp->v_bioflag &= ~VBIOONFREELIST;
417 		splx(s);
418 
419 		if (vp->v_type != VBAD)
420 			vgonel(vp, p);
421 #ifdef DIAGNOSTIC
422 		if (vp->v_data) {
423 			vprint("cleaned vnode", vp);
424 			panic("cleaned vnode isn't");
425 		}
426 		s = splbio();
427 		if (vp->v_numoutput)
428 			panic("Clean vnode has pending I/O's");
429 		splx(s);
430 #endif
431 		vp->v_flag = 0;
432 		vp->v_socket = 0;
433 	}
434 	cache_purge(vp);
435 	vp->v_type = VNON;
436 	vp->v_tag = tag;
437 	vp->v_op = vops;
438 	insmntque(vp, mp);
439 	*vpp = vp;
440 	vp->v_usecount = 1;
441 	vp->v_data = 0;
442 	return (0);
443 }
444 
445 /*
446  * Move a vnode from one mount queue to another.
447  */
448 void
449 insmntque(struct vnode *vp, struct mount *mp)
450 {
451 	/*
452 	 * Delete from old mount point vnode list, if on one.
453 	 */
454 	if (vp->v_mount != NULL)
455 		LIST_REMOVE(vp, v_mntvnodes);
456 	/*
457 	 * Insert into list of vnodes for the new mount point, if available.
458 	 */
459 	if ((vp->v_mount = mp) != NULL)
460 		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
461 }
462 
463 /*
464  * Create a vnode for a block device.
465  * Used for root filesystem, argdev, and swap areas.
466  * Also used for memory file system special devices.
467  */
468 int
469 bdevvp(dev_t dev, struct vnode **vpp)
470 {
471 	return (getdevvp(dev, vpp, VBLK));
472 }
473 
474 /*
475  * Create a vnode for a character device.
476  * Used for console handling.
477  */
478 int
479 cdevvp(dev_t dev, struct vnode **vpp)
480 {
481 	return (getdevvp(dev, vpp, VCHR));
482 }
483 
484 /*
485  * Create a vnode for a device.
486  * Used by bdevvp (block device) for root file system etc.,
487  * and by cdevvp (character device) for console.
488  */
489 int
490 getdevvp(dev_t dev, struct vnode **vpp, enum vtype type)
491 {
492 	struct vnode *vp;
493 	struct vnode *nvp;
494 	int error;
495 
496 	if (dev == NODEV) {
497 		*vpp = NULLVP;
498 		return (0);
499 	}
500 	error = getnewvnode(VT_NON, NULL, &spec_vops, &nvp);
501 	if (error) {
502 		*vpp = NULLVP;
503 		return (error);
504 	}
505 	vp = nvp;
506 	vp->v_type = type;
507 	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
508 		vput(vp);
509 		vp = nvp;
510 	}
511 	if (vp->v_type == VCHR && cdevsw[major(vp->v_rdev)].d_type == D_TTY)
512 		vp->v_flag |= VISTTY;
513 	*vpp = vp;
514 	return (0);
515 }
516 
517 /*
518  * Check to see if the new vnode represents a special device
519  * for which we already have a vnode (either because of
520  * bdevvp() or because of a different vnode representing
521  * the same block device). If such an alias exists, deallocate
522  * the existing contents and return the aliased vnode. The
523  * caller is responsible for filling it with its new contents.
524  */
525 struct vnode *
526 checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp)
527 {
528 	struct proc *p = curproc;
529 	struct vnode *vp;
530 	struct vnode **vpp;
531 
532 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
533 		return (NULLVP);
534 
535 	vpp = &speclisth[SPECHASH(nvp_rdev)];
536 loop:
537 	for (vp = *vpp; vp; vp = vp->v_specnext) {
538 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) {
539 			continue;
540 		}
541 		/*
542 		 * Alias, but not in use, so flush it out.
543 		 */
544 		if (vp->v_usecount == 0) {
545 			vgonel(vp, p);
546 			goto loop;
547 		}
548 		if (vget(vp, LK_EXCLUSIVE)) {
549 			goto loop;
550 		}
551 		break;
552 	}
553 
554 	/*
555 	 * Common case is actually in the if statement
556 	 */
557 	if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
558 		nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE,
559 			M_WAITOK);
560 		nvp->v_rdev = nvp_rdev;
561 		nvp->v_hashchain = vpp;
562 		nvp->v_specnext = *vpp;
563 		nvp->v_specmountpoint = NULL;
564 		nvp->v_speclockf = NULL;
565 		nvp->v_specbitmap = NULL;
566 		if (nvp->v_type == VCHR &&
567 		    (cdevsw[major(nvp_rdev)].d_flags & D_CLONE) &&
568 		    (minor(nvp_rdev) >> CLONE_SHIFT == 0)) {
569 			if (vp != NULLVP)
570 				nvp->v_specbitmap = vp->v_specbitmap;
571 			else
572 				nvp->v_specbitmap = malloc(CLONE_MAPSZ,
573 				    M_VNODE, M_WAITOK | M_ZERO);
574 		}
575 		*vpp = nvp;
576 		if (vp != NULLVP) {
577 			nvp->v_flag |= VALIASED;
578 			vp->v_flag |= VALIASED;
579 			vput(vp);
580 		}
581 		return (NULLVP);
582 	}
583 
584 	/*
585 	 * This code is the uncommon case. It is called in case
586 	 * we found an alias that was VT_NON && vtype of VBLK
587 	 * This means we found a block device that was created
588 	 * using bdevvp.
589 	 * An example of such a vnode is the root partition device vnode
590 	 * created in ffs_mountroot.
591 	 *
592 	 * The vnodes created by bdevvp should not be aliased (why?).
593 	 */
594 
595 	VOP_UNLOCK(vp);
596 	vclean(vp, 0, p);
597 	vp->v_op = nvp->v_op;
598 	vp->v_tag = nvp->v_tag;
599 	nvp->v_type = VNON;
600 	insmntque(vp, mp);
601 	return (vp);
602 }
603 
604 /*
605  * Grab a particular vnode from the free list, increment its
606  * reference count and lock it. If the vnode lock bit is set,
607  * the vnode is being eliminated in vgone. In that case, we
608  * cannot grab it, so the process is awakened when the
609  * transition is completed, and an error code is returned to
610  * indicate that the vnode is no longer usable, possibly
611  * having been changed to a new file system type.
612  */
613 int
614 vget(struct vnode *vp, int flags)
615 {
616 	int error, s, onfreelist;
617 
618 	/*
619 	 * If the vnode is in the process of being cleaned out for
620 	 * another use, we wait for the cleaning to finish and then
621 	 * return failure. Cleaning is determined by checking that
622 	 * the VXLOCK flag is set.
623 	 */
624 
625 	if (vp->v_flag & VXLOCK) {
626 		if (flags & LK_NOWAIT) {
627 			return (EBUSY);
628 		}
629 
630 		vp->v_flag |= VXWANT;
631 		tsleep(vp, PINOD, "vget", 0);
632 		return (ENOENT);
633 	}
634 
635 	onfreelist = vp->v_bioflag & VBIOONFREELIST;
636 	if (vp->v_usecount == 0 && onfreelist) {
637 		s = splbio();
638 		if (vp->v_holdcnt > 0)
639 			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
640 		else
641 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
642 		vp->v_bioflag &= ~VBIOONFREELIST;
643 		splx(s);
644 	}
645 
646  	vp->v_usecount++;
647 	if (flags & LK_TYPE_MASK) {
648 		if ((error = vn_lock(vp, flags)) != 0) {
649 			vp->v_usecount--;
650 			if (vp->v_usecount == 0 && onfreelist)
651 				vputonfreelist(vp);
652 		}
653 		return (error);
654 	}
655 
656 	return (0);
657 }
658 
659 
660 /* Vnode reference. */
661 void
662 vref(struct vnode *vp)
663 {
664 #ifdef DIAGNOSTIC
665 	if (vp->v_usecount == 0)
666 		panic("vref used where vget required");
667 	if (vp->v_type == VNON)
668 		panic("vref on a VNON vnode");
669 #endif
670 	vp->v_usecount++;
671 }
672 
673 void
674 vputonfreelist(struct vnode *vp)
675 {
676 	int s;
677 	struct freelst *lst;
678 
679 	s = splbio();
680 #ifdef DIAGNOSTIC
681 	if (vp->v_usecount != 0)
682 		panic("Use count is not zero!");
683 
684 	if (vp->v_bioflag & VBIOONFREELIST) {
685 		vprint("vnode already on free list: ", vp);
686 		panic("vnode already on free list");
687 	}
688 #endif
689 
690 	vp->v_bioflag |= VBIOONFREELIST;
691 
692 	if (vp->v_holdcnt > 0)
693 		lst = &vnode_hold_list;
694 	else
695 		lst = &vnode_free_list;
696 
697 	if (vp->v_type == VBAD)
698 		TAILQ_INSERT_HEAD(lst, vp, v_freelist);
699 	else
700 		TAILQ_INSERT_TAIL(lst, vp, v_freelist);
701 
702 	splx(s);
703 }
704 
705 /*
706  * vput(), just unlock and vrele()
707  */
708 void
709 vput(struct vnode *vp)
710 {
711 	struct proc *p = curproc;
712 
713 #ifdef DIAGNOSTIC
714 	if (vp == NULL)
715 		panic("vput: null vp");
716 #endif
717 
718 #ifdef DIAGNOSTIC
719 	if (vp->v_usecount == 0) {
720 		vprint("vput: bad ref count", vp);
721 		panic("vput: ref cnt");
722 	}
723 #endif
724 	vp->v_usecount--;
725 	if (vp->v_usecount > 0) {
726 		VOP_UNLOCK(vp);
727 		return;
728 	}
729 
730 #ifdef DIAGNOSTIC
731 	if (vp->v_writecount != 0) {
732 		vprint("vput: bad writecount", vp);
733 		panic("vput: v_writecount != 0");
734 	}
735 #endif
736 
737 	VOP_INACTIVE(vp, p);
738 
739 	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
740 		vputonfreelist(vp);
741 }
742 
743 /*
744  * Vnode release - use for active VNODES.
745  * If count drops to zero, call inactive routine and return to freelist.
746  * Returns 0 if it did not sleep.
747  */
748 int
749 vrele(struct vnode *vp)
750 {
751 	struct proc *p = curproc;
752 
753 #ifdef DIAGNOSTIC
754 	if (vp == NULL)
755 		panic("vrele: null vp");
756 #endif
757 #ifdef DIAGNOSTIC
758 	if (vp->v_usecount == 0) {
759 		vprint("vrele: bad ref count", vp);
760 		panic("vrele: ref cnt");
761 	}
762 #endif
763 	vp->v_usecount--;
764 	if (vp->v_usecount > 0) {
765 		return (0);
766 	}
767 
768 #ifdef DIAGNOSTIC
769 	if (vp->v_writecount != 0) {
770 		vprint("vrele: bad writecount", vp);
771 		panic("vrele: v_writecount != 0");
772 	}
773 #endif
774 
775 	if (vn_lock(vp, LK_EXCLUSIVE)) {
776 #ifdef DIAGNOSTIC
777 		vprint("vrele: cannot lock", vp);
778 #endif
779 		return (1);
780 	}
781 
782 	VOP_INACTIVE(vp, p);
783 
784 	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
785 		vputonfreelist(vp);
786 	return (1);
787 }
788 
789 /* Page or buffer structure gets a reference. */
790 void
791 vhold(struct vnode *vp)
792 {
793 	/*
794 	 * If it is on the freelist and the hold count is currently
795 	 * zero, move it to the hold list.
796 	 */
797 	if ((vp->v_bioflag & VBIOONFREELIST) &&
798 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
799 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
800 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
801 	}
802 	vp->v_holdcnt++;
803 }
804 
805 /* Lose interest in a vnode. */
806 void
807 vdrop(struct vnode *vp)
808 {
809 #ifdef DIAGNOSTIC
810 	if (vp->v_holdcnt == 0)
811 		panic("vdrop: zero holdcnt");
812 #endif
813 
814 	vp->v_holdcnt--;
815 
816 	/*
817 	 * If it is on the holdlist and the hold count drops to
818 	 * zero, move it to the free list.
819 	 */
820 	if ((vp->v_bioflag & VBIOONFREELIST) &&
821 	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
822 		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
823 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
824 	}
825 }
826 
827 /*
828  * Remove any vnodes in the vnode table belonging to mount point mp.
829  *
830  * If MNT_NOFORCE is specified, there should not be any active ones,
831  * return error if any are found (nb: this is a user error, not a
832  * system error). If MNT_FORCE is specified, detach any active vnodes
833  * that are found.
834  */
835 #ifdef DEBUG
836 int busyprt = 0;	/* print out busy vnodes */
837 struct ctldebug debug1 = { "busyprt", &busyprt };
838 #endif
839 
840 int
841 vfs_mount_foreach_vnode(struct mount *mp,
842     int (*func)(struct vnode *, void *), void *arg) {
843 	struct vnode *vp, *nvp;
844 	int error = 0;
845 
846 loop:
847 	LIST_FOREACH_SAFE(vp , &mp->mnt_vnodelist, v_mntvnodes, nvp) {
848 		if (vp->v_mount != mp)
849 			goto loop;
850 
851 		error = func(vp, arg);
852 
853 		if (error != 0)
854 			break;
855 	}
856 
857 	return (error);
858 }
859 
860 struct vflush_args {
861 	struct vnode *skipvp;
862 	int busy;
863 	int flags;
864 };
865 
866 int
867 vflush_vnode(struct vnode *vp, void *arg)
868 {
869 	struct vflush_args *va = arg;
870 	struct proc *p = curproc;
871 
872 	if (vp == va->skipvp) {
873 		return (0);
874 	}
875 
876 	if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
877 		return (0);
878 	}
879 
880 	/*
881 	 * If WRITECLOSE is set, only flush out regular file
882 	 * vnodes open for writing.
883 	 */
884 	if ((va->flags & WRITECLOSE) &&
885 	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
886 		return (0);
887 	}
888 
889 	/*
890 	 * With v_usecount == 0, all we need to do is clear
891 	 * out the vnode data structures and we are done.
892 	 */
893 	if (vp->v_usecount == 0) {
894 		vgonel(vp, p);
895 		return (0);
896 	}
897 
898 	/*
899 	 * If FORCECLOSE is set, forcibly close the vnode.
900 	 * For block or character devices, revert to an
901 	 * anonymous device. For all other files, just kill them.
902 	 */
903 	if (va->flags & FORCECLOSE) {
904 		if (vp->v_type != VBLK && vp->v_type != VCHR) {
905 			vgonel(vp, p);
906 		} else {
907 			vclean(vp, 0, p);
908 			vp->v_op = &spec_vops;
909 			insmntque(vp, NULL);
910 		}
911 		return (0);
912 	}
913 
914 	/*
915 	 * If set, this is allowed to ignore vnodes which don't
916 	 * have changes pending to disk.
917 	 * XXX Might be nice to check per-fs "inode" flags, but
918 	 * generally the filesystem is sync'd already, right?
919 	 */
920 	if ((va->flags & IGNORECLEAN) &&
921 	    LIST_EMPTY(&vp->v_dirtyblkhd))
922 		return (0);
923 
924 #ifdef DEBUG
925 	if (busyprt)
926 		vprint("vflush: busy vnode", vp);
927 #endif
928 	va->busy++;
929 	return (0);
930 }
931 
932 int
933 vflush(struct mount *mp, struct vnode *skipvp, int flags)
934 {
935 	struct vflush_args va;
936 	va.skipvp = skipvp;
937 	va.busy = 0;
938 	va.flags = flags;
939 
940 	vfs_mount_foreach_vnode(mp, vflush_vnode, &va);
941 
942 	if (va.busy)
943 		return (EBUSY);
944 	return (0);
945 }
946 
947 /*
948  * Disassociate the underlying file system from a vnode.
949  */
950 void
951 vclean(struct vnode *vp, int flags, struct proc *p)
952 {
953 	int active;
954 
955 	/*
956 	 * Check to see if the vnode is in use.
957 	 * If so we have to reference it before we clean it out
958 	 * so that its count cannot fall to zero and generate a
959 	 * race against ourselves to recycle it.
960 	 */
961 	if ((active = vp->v_usecount) != 0)
962 		vp->v_usecount++;
963 
964 	/*
965 	 * Prevent the vnode from being recycled or
966 	 * brought into use while we clean it out.
967 	 */
968 	if (vp->v_flag & VXLOCK)
969 		panic("vclean: deadlock");
970 	vp->v_flag |= VXLOCK;
971 	/*
972 	 * Even if the count is zero, the VOP_INACTIVE routine may still
973 	 * have the object locked while it cleans it out. The VOP_LOCK
974 	 * ensures that the VOP_INACTIVE routine is done with its work.
975 	 * For active vnodes, it ensures that no other activity can
976 	 * occur while the underlying object is being cleaned out.
977 	 */
978 	VOP_LOCK(vp, LK_DRAIN | LK_EXCLUSIVE);
979 
980 	/*
981 	 * Clean out any VM data associated with the vnode.
982 	 */
983 	uvm_vnp_terminate(vp);
984 	/*
985 	 * Clean out any buffers associated with the vnode.
986 	 */
987 	if (flags & DOCLOSE)
988 		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
989 	/*
990 	 * If purging an active vnode, it must be closed and
991 	 * deactivated before being reclaimed. Note that the
992 	 * VOP_INACTIVE will unlock the vnode
993 	 */
994 	if (active) {
995 		if (flags & DOCLOSE)
996 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
997 		VOP_INACTIVE(vp, p);
998 	} else {
999 		/*
1000 		 * Any other processes trying to obtain this lock must first
1001 		 * wait for VXLOCK to clear, then call the new lock operation.
1002 		 */
1003 		VOP_UNLOCK(vp);
1004 	}
1005 
1006 	/*
1007 	 * Reclaim the vnode.
1008 	 */
1009 	if (VOP_RECLAIM(vp, p))
1010 		panic("vclean: cannot reclaim");
1011 	if (active) {
1012 		vp->v_usecount--;
1013 		if (vp->v_usecount == 0) {
1014 			if (vp->v_holdcnt > 0)
1015 				panic("vclean: not clean");
1016 			vputonfreelist(vp);
1017 		}
1018 	}
1019 	cache_purge(vp);
1020 
1021 	/*
1022 	 * Done with purge, notify sleepers of the grim news.
1023 	 */
1024 	vp->v_op = &dead_vops;
1025 	VN_KNOTE(vp, NOTE_REVOKE);
1026 	vp->v_tag = VT_NON;
1027 	vp->v_flag &= ~VXLOCK;
1028 #ifdef VFSLCKDEBUG
1029 	vp->v_flag &= ~VLOCKSWORK;
1030 #endif
1031 	if (vp->v_flag & VXWANT) {
1032 		vp->v_flag &= ~VXWANT;
1033 		wakeup(vp);
1034 	}
1035 }
1036 
1037 /*
1038  * Recycle an unused vnode to the front of the free list.
1039  */
1040 int
1041 vrecycle(struct vnode *vp, struct proc *p)
1042 {
1043 	if (vp->v_usecount == 0) {
1044 		vgonel(vp, p);
1045 		return (1);
1046 	}
1047 	return (0);
1048 }
1049 
1050 /*
1051  * Eliminate all activity associated with a vnode
1052  * in preparation for reuse.
1053  */
1054 void
1055 vgone(struct vnode *vp)
1056 {
1057 	struct proc *p = curproc;
1058 	vgonel(vp, p);
1059 }
1060 
1061 /*
1062  * vgone, with struct proc.
1063  */
1064 void
1065 vgonel(struct vnode *vp, struct proc *p)
1066 {
1067 	struct vnode *vq;
1068 	struct vnode *vx;
1069 
1070 	/*
1071 	 * If a vgone (or vclean) is already in progress,
1072 	 * wait until it is done and return.
1073 	 */
1074 	if (vp->v_flag & VXLOCK) {
1075 		vp->v_flag |= VXWANT;
1076 		tsleep(vp, PINOD, "vgone", 0);
1077 		return;
1078 	}
1079 
1080 	/*
1081 	 * Clean out the filesystem specific data.
1082 	 */
1083 	vclean(vp, DOCLOSE, p);
1084 	/*
1085 	 * Delete from old mount point vnode list, if on one.
1086 	 */
1087 	if (vp->v_mount != NULL)
1088 		insmntque(vp, NULL);
1089 	/*
1090 	 * If special device, remove it from special device alias list
1091 	 * if it is on one.
1092 	 */
1093 	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1094 		if ((vp->v_flag & VALIASED) == 0 && vp->v_type == VCHR &&
1095 		    (cdevsw[major(vp->v_rdev)].d_flags & D_CLONE) &&
1096 		    (minor(vp->v_rdev) >> CLONE_SHIFT == 0)) {
1097 			free(vp->v_specbitmap, M_VNODE, CLONE_MAPSZ);
1098 		}
1099 		if (*vp->v_hashchain == vp) {
1100 			*vp->v_hashchain = vp->v_specnext;
1101 		} else {
1102 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1103 				if (vq->v_specnext != vp)
1104 					continue;
1105 				vq->v_specnext = vp->v_specnext;
1106 				break;
1107 			}
1108 			if (vq == NULL)
1109 				panic("missing bdev");
1110 		}
1111 		if (vp->v_flag & VALIASED) {
1112 			vx = NULL;
1113 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1114 				if (vq->v_rdev != vp->v_rdev ||
1115 				    vq->v_type != vp->v_type)
1116 					continue;
1117 				if (vx)
1118 					break;
1119 				vx = vq;
1120 			}
1121 			if (vx == NULL)
1122 				panic("missing alias");
1123 			if (vq == NULL)
1124 				vx->v_flag &= ~VALIASED;
1125 			vp->v_flag &= ~VALIASED;
1126 		}
1127 		free(vp->v_specinfo, M_VNODE, sizeof(struct specinfo));
1128 		vp->v_specinfo = NULL;
1129 	}
1130 	/*
1131 	 * If it is on the freelist and not already at the head,
1132 	 * move it to the head of the list.
1133 	 */
1134 	vp->v_type = VBAD;
1135 
1136 	/*
1137 	 * Move onto the free list, unless we were called from
1138 	 * getnewvnode and we're not on any free list
1139 	 */
1140 	if (vp->v_usecount == 0 &&
1141 	    (vp->v_bioflag & VBIOONFREELIST)) {
1142 		int s;
1143 
1144 		s = splbio();
1145 
1146 		if (vp->v_holdcnt > 0)
1147 			panic("vgonel: not clean");
1148 
1149 		if (TAILQ_FIRST(&vnode_free_list) != vp) {
1150 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1151 			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1152 		}
1153 		splx(s);
1154 	}
1155 }
1156 
1157 /*
1158  * Lookup a vnode by device number.
1159  */
1160 int
1161 vfinddev(dev_t dev, enum vtype type, struct vnode **vpp)
1162 {
1163 	struct vnode *vp;
1164 	int rc =0;
1165 
1166 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1167 		if (dev != vp->v_rdev || type != vp->v_type)
1168 			continue;
1169 		*vpp = vp;
1170 		rc = 1;
1171 		break;
1172 	}
1173 	return (rc);
1174 }
1175 
1176 /*
1177  * Revoke all the vnodes corresponding to the specified minor number
1178  * range (endpoints inclusive) of the specified major.
1179  */
1180 void
1181 vdevgone(int maj, int minl, int minh, enum vtype type)
1182 {
1183 	struct vnode *vp;
1184 	int mn;
1185 
1186 	for (mn = minl; mn <= minh; mn++)
1187 		if (vfinddev(makedev(maj, mn), type, &vp))
1188 			VOP_REVOKE(vp, REVOKEALL);
1189 }
1190 
1191 /*
1192  * Calculate the total number of references to a special device.
1193  */
1194 int
1195 vcount(struct vnode *vp)
1196 {
1197 	struct vnode *vq, *vnext;
1198 	int count;
1199 
1200 loop:
1201 	if ((vp->v_flag & VALIASED) == 0)
1202 		return (vp->v_usecount);
1203 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1204 		vnext = vq->v_specnext;
1205 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1206 			continue;
1207 		/*
1208 		 * Alias, but not in use, so flush it out.
1209 		 */
1210 		if (vq->v_usecount == 0 && vq != vp) {
1211 			vgone(vq);
1212 			goto loop;
1213 		}
1214 		count += vq->v_usecount;
1215 	}
1216 	return (count);
1217 }
1218 
1219 #if defined(DEBUG) || defined(DIAGNOSTIC)
1220 /*
1221  * Print out a description of a vnode.
1222  */
1223 static char *typename[] =
1224    { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1225 
1226 void
1227 vprint(char *label, struct vnode *vp)
1228 {
1229 	char buf[64];
1230 
1231 	if (label != NULL)
1232 		printf("%s: ", label);
1233 	printf("%p, type %s, use %u, write %u, hold %u,",
1234 		vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1235 		vp->v_holdcnt);
1236 	buf[0] = '\0';
1237 	if (vp->v_flag & VROOT)
1238 		strlcat(buf, "|VROOT", sizeof buf);
1239 	if (vp->v_flag & VTEXT)
1240 		strlcat(buf, "|VTEXT", sizeof buf);
1241 	if (vp->v_flag & VSYSTEM)
1242 		strlcat(buf, "|VSYSTEM", sizeof buf);
1243 	if (vp->v_flag & VXLOCK)
1244 		strlcat(buf, "|VXLOCK", sizeof buf);
1245 	if (vp->v_flag & VXWANT)
1246 		strlcat(buf, "|VXWANT", sizeof buf);
1247 	if (vp->v_bioflag & VBIOWAIT)
1248 		strlcat(buf, "|VBIOWAIT", sizeof buf);
1249 	if (vp->v_bioflag & VBIOONFREELIST)
1250 		strlcat(buf, "|VBIOONFREELIST", sizeof buf);
1251 	if (vp->v_bioflag & VBIOONSYNCLIST)
1252 		strlcat(buf, "|VBIOONSYNCLIST", sizeof buf);
1253 	if (vp->v_flag & VALIASED)
1254 		strlcat(buf, "|VALIASED", sizeof buf);
1255 	if (buf[0] != '\0')
1256 		printf(" flags (%s)", &buf[1]);
1257 	if (vp->v_data == NULL) {
1258 		printf("\n");
1259 	} else {
1260 		printf("\n\t");
1261 		VOP_PRINT(vp);
1262 	}
1263 }
1264 #endif /* DEBUG || DIAGNOSTIC */
1265 
1266 #ifdef DEBUG
1267 /*
1268  * List all of the locked vnodes in the system.
1269  * Called when debugging the kernel.
1270  */
1271 void
1272 printlockedvnodes(void)
1273 {
1274 	struct mount *mp;
1275 	struct vnode *vp;
1276 
1277 	printf("Locked vnodes\n");
1278 
1279 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1280 		if (vfs_busy(mp, VB_READ|VB_NOWAIT))
1281 			continue;
1282 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1283 			if (VOP_ISLOCKED(vp))
1284 				vprint(NULL, vp);
1285 		}
1286 		vfs_unbusy(mp);
1287  	}
1288 
1289 }
1290 #endif
1291 
1292 /*
1293  * Top level filesystem related information gathering.
1294  */
1295 int
1296 vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1297     size_t newlen, struct proc *p)
1298 {
1299 	struct vfsconf *vfsp, *tmpvfsp;
1300 	int ret;
1301 
1302 	/* all sysctl names at this level are at least name and field */
1303 	if (namelen < 2)
1304 		return (ENOTDIR);		/* overloaded */
1305 
1306 	if (name[0] != VFS_GENERIC) {
1307 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1308 			if (vfsp->vfc_typenum == name[0])
1309 				break;
1310 
1311 		if (vfsp == NULL || vfsp->vfc_vfsops->vfs_sysctl == NULL)
1312 			return (EOPNOTSUPP);
1313 
1314 		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1315 		    oldp, oldlenp, newp, newlen, p));
1316 	}
1317 
1318 	switch (name[1]) {
1319 	case VFS_MAXTYPENUM:
1320 		return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
1321 
1322 	case VFS_CONF:
1323 		if (namelen < 3)
1324 			return (ENOTDIR);	/* overloaded */
1325 
1326 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1327 			if (vfsp->vfc_typenum == name[2])
1328 				break;
1329 
1330 		if (vfsp == NULL)
1331 			return (EOPNOTSUPP);
1332 
1333 		/* Make a copy, clear out kernel pointers */
1334 		tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK|M_ZERO);
1335 		memcpy(tmpvfsp, vfsp, sizeof(*tmpvfsp));
1336 		tmpvfsp->vfc_vfsops = NULL;
1337 		tmpvfsp->vfc_next = NULL;
1338 
1339 		ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp,
1340 		    sizeof(struct vfsconf));
1341 
1342 		free(tmpvfsp, M_TEMP, sizeof(*tmpvfsp));
1343 		return (ret);
1344 	case VFS_BCACHESTAT:	/* buffer cache statistics */
1345 		ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats,
1346 		    sizeof(struct bcachestats));
1347 		return(ret);
1348 	}
1349 	return (EOPNOTSUPP);
1350 }
1351 
1352 /*
1353  * Check to see if a filesystem is mounted on a block device.
1354  */
1355 int
1356 vfs_mountedon(struct vnode *vp)
1357 {
1358 	struct vnode *vq;
1359 	int error = 0;
1360 
1361  	if (vp->v_specmountpoint != NULL)
1362 		return (EBUSY);
1363 	if (vp->v_flag & VALIASED) {
1364 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1365 			if (vq->v_rdev != vp->v_rdev ||
1366 			    vq->v_type != vp->v_type)
1367 				continue;
1368 			if (vq->v_specmountpoint != NULL) {
1369 				error = EBUSY;
1370 				break;
1371 			}
1372  		}
1373 	}
1374 	return (error);
1375 }
1376 
1377 #ifdef NFSSERVER
1378 /*
1379  * Build hash lists of net addresses and hang them off the mount point.
1380  * Called by vfs_export() to set up the lists of export addresses.
1381  */
1382 int
1383 vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1384     struct export_args *argp)
1385 {
1386 	struct netcred *np;
1387 	struct radix_node_head *rnh;
1388 	int nplen, i;
1389 	struct radix_node *rn;
1390 	struct sockaddr *saddr, *smask = 0;
1391 	int error;
1392 
1393 	if (argp->ex_addrlen == 0) {
1394 		if (mp->mnt_flag & MNT_DEFEXPORTED)
1395 			return (EPERM);
1396 		np = &nep->ne_defexported;
1397 		/* fill in the kernel's ucred from userspace's xucred */
1398 		if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1399 			return (error);
1400 		mp->mnt_flag |= MNT_DEFEXPORTED;
1401 		goto finish;
1402 	}
1403 	if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN ||
1404 	    argp->ex_addrlen < 0 || argp->ex_masklen < 0)
1405 		return (EINVAL);
1406 	nplen = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1407 	np = (struct netcred *)malloc(nplen, M_NETADDR, M_WAITOK|M_ZERO);
1408 	saddr = (struct sockaddr *)(np + 1);
1409 	error = copyin(argp->ex_addr, saddr, argp->ex_addrlen);
1410 	if (error)
1411 		goto out;
1412 	if (saddr->sa_len > argp->ex_addrlen)
1413 		saddr->sa_len = argp->ex_addrlen;
1414 	if (argp->ex_masklen) {
1415 		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1416 		error = copyin(argp->ex_mask, smask, argp->ex_masklen);
1417 		if (error)
1418 			goto out;
1419 		if (smask->sa_len > argp->ex_masklen)
1420 			smask->sa_len = argp->ex_masklen;
1421 	}
1422 	/* fill in the kernel's ucred from userspace's xucred */
1423 	if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1424 		goto out;
1425 	i = saddr->sa_family;
1426 	switch (i) {
1427 	case AF_INET:
1428 		if ((rnh = nep->ne_rtable_inet) == NULL) {
1429 			if (!rn_inithead((void **)&nep->ne_rtable_inet,
1430 			    offsetof(struct sockaddr_in, sin_addr))) {
1431 				error = ENOBUFS;
1432 				goto out;
1433 			}
1434 			rnh = nep->ne_rtable_inet;
1435 		}
1436 		break;
1437 	default:
1438 		error = EINVAL;
1439 		goto out;
1440 	}
1441 	rn = rn_addroute(saddr, smask, rnh, np->netc_rnodes, 0);
1442 	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1443 		error = EPERM;
1444 		goto out;
1445 	}
1446 finish:
1447 	np->netc_exflags = argp->ex_flags;
1448 	return (0);
1449 out:
1450 	free(np, M_NETADDR, nplen);
1451 	return (error);
1452 }
1453 
1454 int
1455 vfs_free_netcred(struct radix_node *rn, void *w, u_int id)
1456 {
1457 	struct radix_node_head *rnh = (struct radix_node_head *)w;
1458 
1459 	rn_delete(rn->rn_key, rn->rn_mask, rnh, NULL);
1460 	free(rn, M_NETADDR, 0);
1461 	return (0);
1462 }
1463 
1464 /*
1465  * Free the net address hash lists that are hanging off the mount points.
1466  */
1467 void
1468 vfs_free_addrlist(struct netexport *nep)
1469 {
1470 	struct radix_node_head *rnh;
1471 
1472 	if ((rnh = nep->ne_rtable_inet) != NULL) {
1473 		rn_walktree(rnh, vfs_free_netcred, rnh);
1474 		free(rnh, M_RTABLE, 0);
1475 		nep->ne_rtable_inet = NULL;
1476 	}
1477 }
1478 #endif /* NFSSERVER */
1479 
1480 int
1481 vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp)
1482 {
1483 #ifdef NFSSERVER
1484 	int error;
1485 
1486 	if (argp->ex_flags & MNT_DELEXPORT) {
1487 		vfs_free_addrlist(nep);
1488 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1489 	}
1490 	if (argp->ex_flags & MNT_EXPORTED) {
1491 		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1492 			return (error);
1493 		mp->mnt_flag |= MNT_EXPORTED;
1494 	}
1495 	return (0);
1496 #else
1497 	return (ENOTSUP);
1498 #endif /* NFSSERVER */
1499 }
1500 
1501 struct netcred *
1502 vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam)
1503 {
1504 #ifdef NFSSERVER
1505 	struct netcred *np;
1506 	struct radix_node_head *rnh;
1507 	struct sockaddr *saddr;
1508 
1509 	np = NULL;
1510 	if (mp->mnt_flag & MNT_EXPORTED) {
1511 		/*
1512 		 * Lookup in the export list first.
1513 		 */
1514 		if (nam != NULL) {
1515 			saddr = mtod(nam, struct sockaddr *);
1516 			switch(saddr->sa_family) {
1517 			case AF_INET:
1518 				rnh = nep->ne_rtable_inet;
1519 				break;
1520 			default:
1521 				rnh = NULL;
1522 				break;
1523 			}
1524 			if (rnh != NULL)
1525 				np = (struct netcred *)rn_match(saddr, rnh);
1526 		}
1527 		/*
1528 		 * If no address match, use the default if it exists.
1529 		 */
1530 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1531 			np = &nep->ne_defexported;
1532 	}
1533 	return (np);
1534 #else
1535 	return (NULL);
1536 #endif /* NFSSERVER */
1537 }
1538 
1539 /*
1540  * Do the usual access checking.
1541  * file_mode, uid and gid are from the vnode in question,
1542  * while acc_mode and cred are from the VOP_ACCESS parameter list
1543  */
1544 int
1545 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
1546     mode_t acc_mode, struct ucred *cred)
1547 {
1548 	mode_t mask;
1549 
1550 	/* User id 0 always gets read/write access. */
1551 	if (cred->cr_uid == 0) {
1552 		/* For VEXEC, at least one of the execute bits must be set. */
1553 		if ((acc_mode & VEXEC) && type != VDIR &&
1554 		    (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
1555 			return EACCES;
1556 		return 0;
1557 	}
1558 
1559 	mask = 0;
1560 
1561 	/* Otherwise, check the owner. */
1562 	if (cred->cr_uid == uid) {
1563 		if (acc_mode & VEXEC)
1564 			mask |= S_IXUSR;
1565 		if (acc_mode & VREAD)
1566 			mask |= S_IRUSR;
1567 		if (acc_mode & VWRITE)
1568 			mask |= S_IWUSR;
1569 		return (file_mode & mask) == mask ? 0 : EACCES;
1570 	}
1571 
1572 	/* Otherwise, check the groups. */
1573 	if (groupmember(gid, cred)) {
1574 		if (acc_mode & VEXEC)
1575 			mask |= S_IXGRP;
1576 		if (acc_mode & VREAD)
1577 			mask |= S_IRGRP;
1578 		if (acc_mode & VWRITE)
1579 			mask |= S_IWGRP;
1580 		return (file_mode & mask) == mask ? 0 : EACCES;
1581 	}
1582 
1583 	/* Otherwise, check everyone else. */
1584 	if (acc_mode & VEXEC)
1585 		mask |= S_IXOTH;
1586 	if (acc_mode & VREAD)
1587 		mask |= S_IROTH;
1588 	if (acc_mode & VWRITE)
1589 		mask |= S_IWOTH;
1590 	return (file_mode & mask) == mask ? 0 : EACCES;
1591 }
1592 
1593 struct rwlock vfs_stall_lock = RWLOCK_INITIALIZER("vfs_stall");
1594 
1595 int
1596 vfs_stall(struct proc *p, int stall)
1597 {
1598 	struct mount *mp;
1599 	int allerror = 0, error;
1600 
1601 	if (stall)
1602 		rw_enter_write(&vfs_stall_lock);
1603 
1604 	/*
1605 	 * The loop variable mp is protected by vfs_busy() so that it cannot
1606 	 * be unmounted while VFS_SYNC() sleeps.  Traverse forward to keep the
1607 	 * lock order consistent with dounmount().
1608 	 */
1609 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1610 		if (stall) {
1611 			error = vfs_busy(mp, VB_WRITE|VB_WAIT|VB_DUPOK);
1612 			if (error) {
1613 				printf("%s: busy\n", mp->mnt_stat.f_mntonname);
1614 				allerror = error;
1615 				continue;
1616 			}
1617 			uvm_vnp_sync(mp);
1618 			error = VFS_SYNC(mp, MNT_WAIT, stall, p->p_ucred, p);
1619 			if (error) {
1620 				printf("%s: failed to sync\n", mp->mnt_stat.f_mntonname);
1621 				vfs_unbusy(mp);
1622 				allerror = error;
1623 				continue;
1624 			}
1625 			mp->mnt_flag |= MNT_STALLED;
1626 		} else {
1627 			if (mp->mnt_flag & MNT_STALLED) {
1628 				vfs_unbusy(mp);
1629 				mp->mnt_flag &= ~MNT_STALLED;
1630 			}
1631 		}
1632 	}
1633 
1634 	if (!stall)
1635 		rw_exit_write(&vfs_stall_lock);
1636 
1637 	return (allerror);
1638 }
1639 
1640 void
1641 vfs_stall_barrier(void)
1642 {
1643 	rw_enter_read(&vfs_stall_lock);
1644 	rw_exit_read(&vfs_stall_lock);
1645 }
1646 
1647 /*
1648  * Unmount all file systems.
1649  * We traverse the list in reverse order under the assumption that doing so
1650  * will avoid needing to worry about dependencies.
1651  */
1652 void
1653 vfs_unmountall(void)
1654 {
1655 	struct mount *mp, *nmp;
1656 	int allerror, error, again = 1;
1657 
1658  retry:
1659 	allerror = 0;
1660 	TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, nmp) {
1661 		if (vfs_busy(mp, VB_WRITE|VB_NOWAIT))
1662 			continue;
1663 		/* XXX Here is a race, the next pointer is not locked. */
1664 		if ((error = dounmount(mp, MNT_FORCE, curproc)) != 0) {
1665 			printf("unmount of %s failed with error %d\n",
1666 			    mp->mnt_stat.f_mntonname, error);
1667 			allerror = 1;
1668 		}
1669 	}
1670 
1671 	if (allerror) {
1672 		printf("WARNING: some file systems would not unmount\n");
1673 		if (again) {
1674 			printf("retrying\n");
1675 			again = 0;
1676 			goto retry;
1677 		}
1678 	}
1679 }
1680 
1681 /*
1682  * Sync and unmount file systems before shutting down.
1683  */
1684 void
1685 vfs_shutdown(struct proc *p)
1686 {
1687 #ifdef ACCOUNTING
1688 	acct_shutdown();
1689 #endif
1690 
1691 	printf("syncing disks... ");
1692 
1693 	if (panicstr == 0) {
1694 		/* Sync before unmount, in case we hang on something. */
1695 		sys_sync(p, NULL, NULL);
1696 		vfs_unmountall();
1697 	}
1698 
1699 #if NSOFTRAID > 0
1700 	sr_quiesce();
1701 #endif
1702 
1703 	if (vfs_syncwait(p, 1))
1704 		printf("giving up\n");
1705 	else
1706 		printf("done\n");
1707 }
1708 
1709 /*
1710  * perform sync() operation and wait for buffers to flush.
1711  */
1712 int
1713 vfs_syncwait(struct proc *p, int verbose)
1714 {
1715 	struct buf *bp;
1716 	int iter, nbusy, dcount, s;
1717 #ifdef MULTIPROCESSOR
1718 	int hold_count;
1719 #endif
1720 
1721 	sys_sync(p, NULL, NULL);
1722 
1723 	/* Wait for sync to finish. */
1724 	dcount = 10000;
1725 	for (iter = 0; iter < 20; iter++) {
1726 		nbusy = 0;
1727 		LIST_FOREACH(bp, &bufhead, b_list) {
1728 			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1729 				nbusy++;
1730 			/*
1731 			 * With soft updates, some buffers that are
1732 			 * written will be remarked as dirty until other
1733 			 * buffers are written.
1734 			 */
1735 			if (bp->b_flags & B_DELWRI) {
1736 				s = splbio();
1737 				bremfree(bp);
1738 				buf_acquire(bp);
1739 				splx(s);
1740 				nbusy++;
1741 				bawrite(bp);
1742 				if (dcount-- <= 0) {
1743 					if (verbose)
1744 						printf("softdep ");
1745 					return 1;
1746 				}
1747 			}
1748 		}
1749 		if (nbusy == 0)
1750 			break;
1751 		if (verbose)
1752 			printf("%d ", nbusy);
1753 #ifdef MULTIPROCESSOR
1754 		if (_kernel_lock_held())
1755 			hold_count = __mp_release_all(&kernel_lock);
1756 		else
1757 			hold_count = 0;
1758 #endif
1759 		DELAY(40000 * iter);
1760 #ifdef MULTIPROCESSOR
1761 		if (hold_count)
1762 			__mp_acquire_count(&kernel_lock, hold_count);
1763 #endif
1764 	}
1765 
1766 	return nbusy;
1767 }
1768 
1769 /*
1770  * posix file system related system variables.
1771  */
1772 int
1773 fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1774     void *newp, size_t newlen, struct proc *p)
1775 {
1776 	/* all sysctl names at this level are terminal */
1777 	if (namelen != 1)
1778 		return (ENOTDIR);
1779 
1780 	switch (name[0]) {
1781 	case FS_POSIX_SETUID:
1782 		if (newp && securelevel > 0)
1783 			return (EPERM);
1784 		return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear));
1785 	default:
1786 		return (EOPNOTSUPP);
1787 	}
1788 	/* NOTREACHED */
1789 }
1790 
1791 /*
1792  * file system related system variables.
1793  */
1794 int
1795 fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1796     size_t newlen, struct proc *p)
1797 {
1798 	sysctlfn *fn;
1799 
1800 	switch (name[0]) {
1801 	case FS_POSIX:
1802 		fn = fs_posix_sysctl;
1803 		break;
1804 	default:
1805 		return (EOPNOTSUPP);
1806 	}
1807 	return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
1808 }
1809 
1810 
1811 /*
1812  * Routines dealing with vnodes and buffers
1813  */
1814 
1815 /*
1816  * Wait for all outstanding I/Os to complete
1817  *
1818  * Manipulates v_numoutput. Must be called at splbio()
1819  */
1820 int
1821 vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo)
1822 {
1823 	int error = 0;
1824 
1825 	splassert(IPL_BIO);
1826 
1827 	while (vp->v_numoutput) {
1828 		vp->v_bioflag |= VBIOWAIT;
1829 		error = tsleep(&vp->v_numoutput,
1830 		    slpflag | (PRIBIO + 1), wmesg, timeo);
1831 		if (error)
1832 			break;
1833 	}
1834 
1835 	return (error);
1836 }
1837 
1838 /*
1839  * Update outstanding I/O count and do wakeup if requested.
1840  *
1841  * Manipulates v_numoutput. Must be called at splbio()
1842  */
1843 void
1844 vwakeup(struct vnode *vp)
1845 {
1846 	splassert(IPL_BIO);
1847 
1848 	if (vp != NULL) {
1849 		if (vp->v_numoutput-- == 0)
1850 			panic("vwakeup: neg numoutput");
1851 		if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) {
1852 			vp->v_bioflag &= ~VBIOWAIT;
1853 			wakeup(&vp->v_numoutput);
1854 		}
1855 	}
1856 }
1857 
1858 /*
1859  * Flush out and invalidate all buffers associated with a vnode.
1860  * Called with the underlying object locked.
1861  */
1862 int
1863 vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p,
1864     int slpflag, int slptimeo)
1865 {
1866 	struct buf *bp;
1867 	struct buf *nbp, *blist;
1868 	int s, error;
1869 
1870 #ifdef VFSLCKDEBUG
1871 	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
1872 		panic("%s: vp isn't locked, vp %p", __func__, vp);
1873 #endif
1874 
1875 	if (flags & V_SAVE) {
1876 		s = splbio();
1877 		vwaitforio(vp, 0, "vinvalbuf", 0);
1878 		if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1879 			splx(s);
1880 			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
1881 				return (error);
1882 			s = splbio();
1883 			if (vp->v_numoutput > 0 ||
1884 			    !LIST_EMPTY(&vp->v_dirtyblkhd))
1885 				panic("%s: dirty bufs, vp %p", __func__, vp);
1886 		}
1887 		splx(s);
1888 	}
1889 loop:
1890 	s = splbio();
1891 	for (;;) {
1892 		if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) &&
1893 		    (flags & V_SAVEMETA))
1894 			while (blist && blist->b_lblkno < 0)
1895 				blist = LIST_NEXT(blist, b_vnbufs);
1896 		if (blist == NULL &&
1897 		    (blist = LIST_FIRST(&vp->v_dirtyblkhd)) &&
1898 		    (flags & V_SAVEMETA))
1899 			while (blist && blist->b_lblkno < 0)
1900 				blist = LIST_NEXT(blist, b_vnbufs);
1901 		if (!blist)
1902 			break;
1903 
1904 		for (bp = blist; bp; bp = nbp) {
1905 			nbp = LIST_NEXT(bp, b_vnbufs);
1906 			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
1907 				continue;
1908 			if (bp->b_flags & B_BUSY) {
1909 				bp->b_flags |= B_WANTED;
1910 				error = tsleep(bp, slpflag | (PRIBIO + 1),
1911 				    "vinvalbuf", slptimeo);
1912 				if (error) {
1913 					splx(s);
1914 					return (error);
1915 				}
1916 				break;
1917 			}
1918 			bremfree(bp);
1919 			/*
1920 			 * XXX Since there are no node locks for NFS, I believe
1921 			 * there is a slight chance that a delayed write will
1922 			 * occur while sleeping just above, so check for it.
1923 			 */
1924 			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
1925 				buf_acquire(bp);
1926 				splx(s);
1927 				(void) VOP_BWRITE(bp);
1928 				goto loop;
1929 			}
1930 			buf_acquire_nomap(bp);
1931 			bp->b_flags |= B_INVAL;
1932 			brelse(bp);
1933 		}
1934 	}
1935 	if (!(flags & V_SAVEMETA) &&
1936 	    (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd)))
1937 		panic("%s: flush failed, vp %p", __func__, vp);
1938 	splx(s);
1939 	return (0);
1940 }
1941 
1942 void
1943 vflushbuf(struct vnode *vp, int sync)
1944 {
1945 	struct buf *bp, *nbp;
1946 	int s;
1947 
1948 loop:
1949 	s = splbio();
1950 	LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp) {
1951 		if ((bp->b_flags & B_BUSY))
1952 			continue;
1953 		if ((bp->b_flags & B_DELWRI) == 0)
1954 			panic("vflushbuf: not dirty");
1955 		bremfree(bp);
1956 		buf_acquire(bp);
1957 		splx(s);
1958 		/*
1959 		 * Wait for I/O associated with indirect blocks to complete,
1960 		 * since there is no way to quickly wait for them below.
1961 		 */
1962 		if (bp->b_vp == vp || sync == 0)
1963 			(void) bawrite(bp);
1964 		else
1965 			(void) bwrite(bp);
1966 		goto loop;
1967 	}
1968 	if (sync == 0) {
1969 		splx(s);
1970 		return;
1971 	}
1972 	vwaitforio(vp, 0, "vflushbuf", 0);
1973 	if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1974 		splx(s);
1975 #ifdef DIAGNOSTIC
1976 		vprint("vflushbuf: dirty", vp);
1977 #endif
1978 		goto loop;
1979 	}
1980 	splx(s);
1981 }
1982 
1983 /*
1984  * Associate a buffer with a vnode.
1985  *
1986  * Manipulates buffer vnode queues. Must be called at splbio().
1987  */
1988 void
1989 bgetvp(struct vnode *vp, struct buf *bp)
1990 {
1991 	splassert(IPL_BIO);
1992 
1993 
1994 	if (bp->b_vp)
1995 		panic("bgetvp: not free");
1996 	vhold(vp);
1997 	bp->b_vp = vp;
1998 	if (vp->v_type == VBLK || vp->v_type == VCHR)
1999 		bp->b_dev = vp->v_rdev;
2000 	else
2001 		bp->b_dev = NODEV;
2002 	/*
2003 	 * Insert onto list for new vnode.
2004 	 */
2005 	bufinsvn(bp, &vp->v_cleanblkhd);
2006 }
2007 
2008 /*
2009  * Disassociate a buffer from a vnode.
2010  *
2011  * Manipulates vnode buffer queues. Must be called at splbio().
2012  */
2013 void
2014 brelvp(struct buf *bp)
2015 {
2016 	struct vnode *vp;
2017 
2018 	splassert(IPL_BIO);
2019 
2020 	if ((vp = bp->b_vp) == (struct vnode *) 0)
2021 		panic("brelvp: NULL");
2022 	/*
2023 	 * Delete from old vnode list, if on one.
2024 	 */
2025 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2026 		bufremvn(bp);
2027 	if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2028 	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2029 		vp->v_bioflag &= ~VBIOONSYNCLIST;
2030 		LIST_REMOVE(vp, v_synclist);
2031 	}
2032 	bp->b_vp = NULL;
2033 
2034 	vdrop(vp);
2035 }
2036 
2037 /*
2038  * Replaces the current vnode associated with the buffer, if any,
2039  * with a new vnode.
2040  *
2041  * If an output I/O is pending on the buffer, the old vnode
2042  * I/O count is adjusted.
2043  *
2044  * Ignores vnode buffer queues. Must be called at splbio().
2045  */
2046 void
2047 buf_replacevnode(struct buf *bp, struct vnode *newvp)
2048 {
2049 	struct vnode *oldvp = bp->b_vp;
2050 
2051 	splassert(IPL_BIO);
2052 
2053 	if (oldvp)
2054 		brelvp(bp);
2055 
2056 	if ((bp->b_flags & (B_READ | B_DONE)) == 0) {
2057 		newvp->v_numoutput++;	/* put it on swapdev */
2058 		vwakeup(oldvp);
2059 	}
2060 
2061 	bgetvp(newvp, bp);
2062 	bufremvn(bp);
2063 }
2064 
2065 /*
2066  * Used to assign buffers to the appropriate clean or dirty list on
2067  * the vnode and to add newly dirty vnodes to the appropriate
2068  * filesystem syncer list.
2069  *
2070  * Manipulates vnode buffer queues. Must be called at splbio().
2071  */
2072 void
2073 reassignbuf(struct buf *bp)
2074 {
2075 	struct buflists *listheadp;
2076 	int delay;
2077 	struct vnode *vp = bp->b_vp;
2078 
2079 	splassert(IPL_BIO);
2080 
2081 	/*
2082 	 * Delete from old vnode list, if on one.
2083 	 */
2084 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2085 		bufremvn(bp);
2086 
2087 	/*
2088 	 * If dirty, put on list of dirty buffers;
2089 	 * otherwise insert onto list of clean buffers.
2090 	 */
2091 	if ((bp->b_flags & B_DELWRI) == 0) {
2092 		listheadp = &vp->v_cleanblkhd;
2093 		if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2094 		    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2095 			vp->v_bioflag &= ~VBIOONSYNCLIST;
2096 			LIST_REMOVE(vp, v_synclist);
2097 		}
2098 	} else {
2099 		listheadp = &vp->v_dirtyblkhd;
2100 		if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) {
2101 			switch (vp->v_type) {
2102 			case VDIR:
2103 				delay = syncdelay / 2;
2104 				break;
2105 			case VBLK:
2106 				if (vp->v_specmountpoint != NULL) {
2107 					delay = syncdelay / 3;
2108 					break;
2109 				}
2110 				/* FALLTHROUGH */
2111 			default:
2112 				delay = syncdelay;
2113 			}
2114 			vn_syncer_add_to_worklist(vp, delay);
2115 		}
2116 	}
2117 	bufinsvn(bp, listheadp);
2118 }
2119 
2120 int
2121 vfs_register(struct vfsconf *vfs)
2122 {
2123 	struct vfsconf *vfsp;
2124 	struct vfsconf **vfspp;
2125 
2126 #ifdef DIAGNOSTIC
2127 	/* Paranoia? */
2128 	if (vfs->vfc_refcount != 0)
2129 		printf("vfs_register called with vfc_refcount > 0\n");
2130 #endif
2131 
2132 	/* Check if filesystem already known */
2133 	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2134 	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next)
2135 		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2136 			return (EEXIST);
2137 
2138 	if (vfs->vfc_typenum > maxvfsconf)
2139 		maxvfsconf = vfs->vfc_typenum;
2140 
2141 	vfs->vfc_next = NULL;
2142 
2143 	/* Add to the end of the list */
2144 	*vfspp = vfs;
2145 
2146 	/* Call vfs_init() */
2147 	if (vfs->vfc_vfsops->vfs_init)
2148 		(*(vfs->vfc_vfsops->vfs_init))(vfs);
2149 
2150 	return 0;
2151 }
2152 
2153 int
2154 vfs_unregister(struct vfsconf *vfs)
2155 {
2156 	struct vfsconf *vfsp;
2157 	struct vfsconf **vfspp;
2158 	int maxtypenum;
2159 
2160 	/* Find our vfsconf struct */
2161 	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2162 	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) {
2163 		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2164 			break;
2165 	}
2166 
2167 	if (!vfsp)			/* Not found */
2168 		return (ENOENT);
2169 
2170 	if (vfsp->vfc_refcount)		/* In use */
2171 		return (EBUSY);
2172 
2173 	/* Remove from list and free */
2174 	*vfspp = vfsp->vfc_next;
2175 
2176 	maxtypenum = 0;
2177 
2178 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2179 		if (vfsp->vfc_typenum > maxtypenum)
2180 			maxtypenum = vfsp->vfc_typenum;
2181 
2182 	maxvfsconf = maxtypenum;
2183 	return 0;
2184 }
2185 
2186 /*
2187  * Check if vnode represents a disk device
2188  */
2189 int
2190 vn_isdisk(struct vnode *vp, int *errp)
2191 {
2192 	if (vp->v_type != VBLK && vp->v_type != VCHR)
2193 		return (0);
2194 
2195 	return (1);
2196 }
2197 
2198 #ifdef DDB
2199 #include <machine/db_machdep.h>
2200 #include <ddb/db_interface.h>
2201 
2202 void
2203 vfs_buf_print(void *b, int full,
2204     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2205 {
2206 	struct buf *bp = b;
2207 
2208 	(*pr)("  vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n"
2209 	      "  proc %p error %d flags %lb\n",
2210 	    bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev,
2211 	    bp->b_proc, bp->b_error, bp->b_flags, B_BITS);
2212 
2213 	(*pr)("  bufsize 0x%lx bcount 0x%lx resid 0x%lx\n"
2214 	      "  data %p saveaddr %p dep %p iodone %p\n",
2215 	    bp->b_bufsize, bp->b_bcount, (long)bp->b_resid,
2216 	    bp->b_data, bp->b_saveaddr,
2217 	    LIST_FIRST(&bp->b_dep), bp->b_iodone);
2218 
2219 	(*pr)("  dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n",
2220 	    bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend);
2221 
2222 #ifdef FFS_SOFTUPDATES
2223 	if (full)
2224 		softdep_print(bp, full, pr);
2225 #endif
2226 }
2227 
2228 const char *vtypes[] = { VTYPE_NAMES };
2229 const char *vtags[] = { VTAG_NAMES };
2230 
2231 void
2232 vfs_vnode_print(void *v, int full,
2233     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2234 {
2235 	struct vnode *vp = v;
2236 
2237 	(*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n",
2238 	      (u_int)vp->v_tag >= nitems(vtags)? "<unk>":vtags[vp->v_tag],
2239 	      vp->v_tag,
2240 	      (u_int)vp->v_type >= nitems(vtypes)? "<unk>":vtypes[vp->v_type],
2241 	      vp->v_type, vp->v_mount, vp->v_mountedhere);
2242 
2243 	(*pr)("data %p usecount %d writecount %d holdcnt %d numoutput %d\n",
2244 	      vp->v_data, vp->v_usecount, vp->v_writecount,
2245 	      vp->v_holdcnt, vp->v_numoutput);
2246 
2247 	/* uvm_object_printit(&vp->v_uobj, full, pr); */
2248 
2249 	if (full) {
2250 		struct buf *bp;
2251 
2252 		(*pr)("clean bufs:\n");
2253 		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
2254 			(*pr)(" bp %p\n", bp);
2255 			vfs_buf_print(bp, full, pr);
2256 		}
2257 
2258 		(*pr)("dirty bufs:\n");
2259 		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
2260 			(*pr)(" bp %p\n", bp);
2261 			vfs_buf_print(bp, full, pr);
2262 		}
2263 	}
2264 }
2265 
2266 void
2267 vfs_mount_print(struct mount *mp, int full,
2268     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2269 {
2270 	struct vfsconf *vfc = mp->mnt_vfc;
2271 	struct vnode *vp;
2272 	int cnt;
2273 
2274 	(*pr)("flags %b\nvnodecovered %p syncer %p data %p\n",
2275 	    mp->mnt_flag, MNT_BITS,
2276 	    mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data);
2277 
2278 	(*pr)("vfsconf: ops %p name \"%s\" num %d ref %d flags 0x%x\n",
2279             vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum,
2280 	    vfc->vfc_refcount, vfc->vfc_flags);
2281 
2282 	(*pr)("statvfs cache: bsize %x iosize %x\nblocks %llu free %llu avail %lld\n",
2283 	    mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks,
2284 	    mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail);
2285 
2286 	(*pr)("  files %llu ffiles %llu favail %lld\n", mp->mnt_stat.f_files,
2287 	    mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail);
2288 
2289 	(*pr)("  f_fsidx {0x%x, 0x%x} owner %u ctime 0x%llx\n",
2290 	    mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1],
2291 	    mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime);
2292 
2293  	(*pr)("  syncwrites %llu asyncwrites = %llu\n",
2294 	    mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites);
2295 
2296  	(*pr)("  syncreads %llu asyncreads = %llu\n",
2297 	    mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads);
2298 
2299 	(*pr)("  fstype \"%s\" mnton \"%s\" mntfrom \"%s\" mntspec \"%s\"\n",
2300 	    mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname,
2301 	    mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntfromspec);
2302 
2303 	(*pr)("locked vnodes:");
2304 	/* XXX would take mountlist lock, except ddb has no context */
2305 	cnt = 0;
2306 	LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2307 		if (VOP_ISLOCKED(vp)) {
2308 			if (cnt == 0)
2309 				(*pr)("\n  %p", vp);
2310 			else if ((cnt % (72 / (sizeof(void *) * 2 + 4))) == 0)
2311 				(*pr)(",\n  %p", vp);
2312 			else
2313 				(*pr)(", %p", vp);
2314 			cnt++;
2315 		}
2316 	}
2317 	(*pr)("\n");
2318 
2319 	if (full) {
2320 		(*pr)("all vnodes:");
2321 		/* XXX would take mountlist lock, except ddb has no context */
2322 		cnt = 0;
2323 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2324 			if (cnt == 0)
2325 				(*pr)("\n  %p", vp);
2326 			else if ((cnt % (72 / (sizeof(void *) * 2 + 4))) == 0)
2327 				(*pr)(",\n  %p", vp);
2328 			else
2329 				(*pr)(", %p", vp);
2330 			cnt++;
2331 		}
2332 		(*pr)("\n");
2333 	}
2334 }
2335 #endif /* DDB */
2336 
2337 void
2338 copy_statfs_info(struct statfs *sbp, const struct mount *mp)
2339 {
2340 	const struct statfs *mbp;
2341 
2342 	strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN);
2343 
2344 	if (sbp == (mbp = &mp->mnt_stat))
2345 		return;
2346 
2347 	sbp->f_fsid = mbp->f_fsid;
2348 	sbp->f_owner = mbp->f_owner;
2349 	sbp->f_flags = mbp->f_flags;
2350 	sbp->f_syncwrites = mbp->f_syncwrites;
2351 	sbp->f_asyncwrites = mbp->f_asyncwrites;
2352 	sbp->f_syncreads = mbp->f_syncreads;
2353 	sbp->f_asyncreads = mbp->f_asyncreads;
2354 	sbp->f_namemax = mbp->f_namemax;
2355 	memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
2356 	memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
2357 	memcpy(sbp->f_mntfromspec, mp->mnt_stat.f_mntfromspec, MNAMELEN);
2358 	memcpy(&sbp->mount_info, &mp->mnt_stat.mount_info,
2359 	    sizeof(union mount_info));
2360 }
2361