xref: /dflybsd-src/sys/kern/vfs_mount.c (revision ee173d09dc3fba168bf56a31bffd0468b38f06ef)
1 /*
2  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * Copyright (c) 1989, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  * (c) UNIX System Laboratories, Inc.
37  * All or some portions of this file are derived from material licensed
38  * to the University of California by American Telephone and Telegraph
39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40  * the permission of UNIX System Laboratories, Inc.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  */
66 
67 /*
68  * External virtual filesystem routines
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/malloc.h>
75 #include <sys/mount.h>
76 #include <sys/proc.h>
77 #include <sys/vnode.h>
78 #include <sys/buf.h>
79 #include <sys/eventhandler.h>
80 #include <sys/kthread.h>
81 #include <sys/sysctl.h>
82 
83 #include <machine/limits.h>
84 
85 #include <sys/buf2.h>
86 #include <sys/thread2.h>
87 #include <sys/sysref2.h>
88 
89 #include <vm/vm.h>
90 #include <vm/vm_object.h>
91 
92 struct mountscan_info {
93 	TAILQ_ENTRY(mountscan_info) msi_entry;
94 	int msi_how;
95 	struct mount *msi_node;
96 };
97 
98 struct vmntvnodescan_info {
99 	TAILQ_ENTRY(vmntvnodescan_info) entry;
100 	struct vnode *vp;
101 };
102 
103 struct vnlru_info {
104 	int	pass;
105 };
106 
107 static int vnlru_nowhere = 0;
108 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD,
109 	    &vnlru_nowhere, 0,
110 	    "Number of times the vnlru process ran without success");
111 
112 
113 static struct lwkt_token mntid_token;
114 static struct mount dummymount;
115 
116 /* note: mountlist exported to pstat */
117 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
118 static TAILQ_HEAD(,mountscan_info) mountscan_list;
119 static struct lwkt_token mountlist_token;
120 
121 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list);
122 
123 /*
124  * Called from vfsinit()
125  */
126 void
127 vfs_mount_init(void)
128 {
129 	lwkt_token_init(&mountlist_token, "mntlist");
130 	lwkt_token_init(&mntid_token, "mntid");
131 	TAILQ_INIT(&mountscan_list);
132 	mount_init(&dummymount);
133 	dummymount.mnt_flag |= MNT_RDONLY;
134 	dummymount.mnt_kern_flag |= MNTK_ALL_MPSAFE;
135 }
136 
137 /*
138  * Support function called to remove a vnode from the mountlist and
139  * deal with side effects for scans in progress.
140  *
141  * Target mnt_token is held on call.
142  */
143 static void
144 vremovevnodemnt(struct vnode *vp)
145 {
146         struct vmntvnodescan_info *info;
147 	struct mount *mp = vp->v_mount;
148 
149 	TAILQ_FOREACH(info, &mp->mnt_vnodescan_list, entry) {
150 		if (info->vp == vp)
151 			info->vp = TAILQ_NEXT(vp, v_nmntvnodes);
152 	}
153 	TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
154 }
155 
156 /*
157  * Allocate a new vnode and associate it with a tag, mount point, and
158  * operations vector.
159  *
160  * A VX locked and refd vnode is returned.  The caller should setup the
161  * remaining fields and vx_put() or, if he wishes to leave a vref,
162  * vx_unlock() the vnode.
163  */
164 int
165 getnewvnode(enum vtagtype tag, struct mount *mp,
166 		struct vnode **vpp, int lktimeout, int lkflags)
167 {
168 	struct vnode *vp;
169 
170 	KKASSERT(mp != NULL);
171 
172 	vp = allocvnode(lktimeout, lkflags);
173 	vp->v_tag = tag;
174 	vp->v_data = NULL;
175 
176 	/*
177 	 * By default the vnode is assigned the mount point's normal
178 	 * operations vector.
179 	 */
180 	vp->v_ops = &mp->mnt_vn_use_ops;
181 
182 	/*
183 	 * Placing the vnode on the mount point's queue makes it visible.
184 	 * VNON prevents it from being messed with, however.
185 	 */
186 	insmntque(vp, mp);
187 
188 	/*
189 	 * A VX locked & refd vnode is returned.
190 	 */
191 	*vpp = vp;
192 	return (0);
193 }
194 
195 /*
196  * This function creates vnodes with special operations vectors.  The
197  * mount point is optional.
198  *
199  * This routine is being phased out but is still used by vfs_conf to
200  * create vnodes for devices prior to the root mount (with mp == NULL).
201  */
202 int
203 getspecialvnode(enum vtagtype tag, struct mount *mp,
204 		struct vop_ops **ops,
205 		struct vnode **vpp, int lktimeout, int lkflags)
206 {
207 	struct vnode *vp;
208 
209 	vp = allocvnode(lktimeout, lkflags);
210 	vp->v_tag = tag;
211 	vp->v_data = NULL;
212 	vp->v_ops = ops;
213 
214 	if (mp == NULL)
215 		mp = &dummymount;
216 
217 	/*
218 	 * Placing the vnode on the mount point's queue makes it visible.
219 	 * VNON prevents it from being messed with, however.
220 	 */
221 	insmntque(vp, mp);
222 
223 	/*
224 	 * A VX locked & refd vnode is returned.
225 	 */
226 	*vpp = vp;
227 	return (0);
228 }
229 
230 /*
231  * Interlock against an unmount, return 0 on success, non-zero on failure.
232  *
233  * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount
234  * is in-progress.
235  *
236  * If no unmount is in-progress LK_NOWAIT is ignored.  No other flag bits
237  * are used.  A shared locked will be obtained and the filesystem will not
238  * be unmountable until the lock is released.
239  */
240 int
241 vfs_busy(struct mount *mp, int flags)
242 {
243 	int lkflags;
244 
245 	atomic_add_int(&mp->mnt_refs, 1);
246 	lwkt_gettoken(&mp->mnt_token);
247 	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
248 		if (flags & LK_NOWAIT) {
249 			lwkt_reltoken(&mp->mnt_token);
250 			atomic_add_int(&mp->mnt_refs, -1);
251 			return (ENOENT);
252 		}
253 		/* XXX not MP safe */
254 		mp->mnt_kern_flag |= MNTK_MWAIT;
255 		/*
256 		 * Since all busy locks are shared except the exclusive
257 		 * lock granted when unmounting, the only place that a
258 		 * wakeup needs to be done is at the release of the
259 		 * exclusive lock at the end of dounmount.
260 		 */
261 		tsleep((caddr_t)mp, 0, "vfs_busy", 0);
262 		lwkt_reltoken(&mp->mnt_token);
263 		atomic_add_int(&mp->mnt_refs, -1);
264 		return (ENOENT);
265 	}
266 	lkflags = LK_SHARED;
267 	if (lockmgr(&mp->mnt_lock, lkflags))
268 		panic("vfs_busy: unexpected lock failure");
269 	lwkt_reltoken(&mp->mnt_token);
270 	return (0);
271 }
272 
273 /*
274  * Free a busy filesystem.
275  *
276  * Decrement refs before releasing the lock so e.g. a pending umount
277  * doesn't give us an unexpected busy error.
278  */
279 void
280 vfs_unbusy(struct mount *mp)
281 {
282 	atomic_add_int(&mp->mnt_refs, -1);
283 	lockmgr(&mp->mnt_lock, LK_RELEASE);
284 }
285 
286 /*
287  * Lookup a filesystem type, and if found allocate and initialize
288  * a mount structure for it.
289  *
290  * Devname is usually updated by mount(8) after booting.
291  */
292 int
293 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
294 {
295 	struct vfsconf *vfsp;
296 	struct mount *mp;
297 
298 	if (fstypename == NULL)
299 		return (ENODEV);
300 
301 	vfsp = vfsconf_find_by_name(fstypename);
302 	if (vfsp == NULL)
303 		return (ENODEV);
304 	mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
305 	mount_init(mp);
306 	lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0);
307 
308 	vfs_busy(mp, 0);
309 	mp->mnt_vfc = vfsp;
310 	mp->mnt_op = vfsp->vfc_vfsops;
311 	vfsp->vfc_refcount++;
312 	mp->mnt_stat.f_type = vfsp->vfc_typenum;
313 	mp->mnt_flag |= MNT_RDONLY;
314 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
315 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
316 	copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
317 	*mpp = mp;
318 	return (0);
319 }
320 
321 /*
322  * Basic mount structure initialization
323  */
324 void
325 mount_init(struct mount *mp)
326 {
327 	lockinit(&mp->mnt_lock, "vfslock", hz*5, 0);
328 	lwkt_token_init(&mp->mnt_token, "permnt");
329 
330 	TAILQ_INIT(&mp->mnt_vnodescan_list);
331 	TAILQ_INIT(&mp->mnt_nvnodelist);
332 	TAILQ_INIT(&mp->mnt_reservedvnlist);
333 	TAILQ_INIT(&mp->mnt_jlist);
334 	mp->mnt_nvnodelistsize = 0;
335 	mp->mnt_flag = 0;
336 	mp->mnt_iosize_max = MAXPHYS;
337 	vn_syncer_thr_create(mp);
338 }
339 
340 /*
341  * Lookup a mount point by filesystem identifier.
342  */
343 struct mount *
344 vfs_getvfs(fsid_t *fsid)
345 {
346 	struct mount *mp;
347 
348 	lwkt_gettoken(&mountlist_token);
349 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
350 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
351 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
352 			break;
353 		}
354 	}
355 	lwkt_reltoken(&mountlist_token);
356 	return (mp);
357 }
358 
359 /*
360  * Get a new unique fsid.  Try to make its val[0] unique, since this value
361  * will be used to create fake device numbers for stat().  Also try (but
362  * not so hard) make its val[0] unique mod 2^16, since some emulators only
363  * support 16-bit device numbers.  We end up with unique val[0]'s for the
364  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
365  *
366  * Keep in mind that several mounts may be running in parallel.  Starting
367  * the search one past where the previous search terminated is both a
368  * micro-optimization and a defense against returning the same fsid to
369  * different mounts.
370  */
371 void
372 vfs_getnewfsid(struct mount *mp)
373 {
374 	static u_int16_t mntid_base;
375 	fsid_t tfsid;
376 	int mtype;
377 
378 	lwkt_gettoken(&mntid_token);
379 	mtype = mp->mnt_vfc->vfc_typenum;
380 	tfsid.val[1] = mtype;
381 	mtype = (mtype & 0xFF) << 24;
382 	for (;;) {
383 		tfsid.val[0] = makeudev(255,
384 		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
385 		mntid_base++;
386 		if (vfs_getvfs(&tfsid) == NULL)
387 			break;
388 	}
389 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
390 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
391 	lwkt_reltoken(&mntid_token);
392 }
393 
394 /*
395  * Set the FSID for a new mount point to the template.  Adjust
396  * the FSID to avoid collisions.
397  */
398 int
399 vfs_setfsid(struct mount *mp, fsid_t *template)
400 {
401 	int didmunge = 0;
402 
403 	bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid));
404 	for (;;) {
405 		if (vfs_getvfs(template) == NULL)
406 			break;
407 		didmunge = 1;
408 		++template->val[1];
409 	}
410 	mp->mnt_stat.f_fsid = *template;
411 	return(didmunge);
412 }
413 
414 /*
415  * This routine is called when we have too many vnodes.  It attempts
416  * to free <count> vnodes and will potentially free vnodes that still
417  * have VM backing store (VM backing store is typically the cause
418  * of a vnode blowout so we want to do this).  Therefore, this operation
419  * is not considered cheap.
420  *
421  * A number of conditions may prevent a vnode from being reclaimed.
422  * the buffer cache may have references on the vnode, a directory
423  * vnode may still have references due to the namei cache representing
424  * underlying files, or the vnode may be in active use.   It is not
425  * desireable to reuse such vnodes.  These conditions may cause the
426  * number of vnodes to reach some minimum value regardless of what
427  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
428  */
429 
430 /*
431  * This is a quick non-blocking check to determine if the vnode is a good
432  * candidate for being (eventually) vgone()'d.  Returns 0 if the vnode is
433  * not a good candidate, 1 if it is.
434  */
435 static __inline int
436 vmightfree(struct vnode *vp, int page_count, int pass)
437 {
438 	if (vp->v_flag & VRECLAIMED)
439 		return (0);
440 	if (VREFCNT(vp) > 0)
441 		return (0);
442 	if (vp->v_object && vp->v_object->resident_page_count >= page_count)
443 		return (0);
444 
445 	/*
446 	 * XXX horrible hack.  Up to four passes will be taken.  Each pass
447 	 * makes a larger set of vnodes eligible.  For now what this really
448 	 * means is that we try to recycle files opened only once before
449 	 * recycling files opened multiple times.
450 	 */
451 	switch(vp->v_flag & (VAGE0 | VAGE1)) {
452 	case 0:
453 		if (pass < 3)
454 			return(0);
455 		break;
456 	case VAGE0:
457 		if (pass < 2)
458 			return(0);
459 		break;
460 	case VAGE1:
461 		if (pass < 1)
462 			return(0);
463 		break;
464 	case VAGE0 | VAGE1:
465 		break;
466 	}
467 	return (1);
468 }
469 
470 /*
471  * The vnode was found to be possibly vgone()able and the caller has locked it
472  * (thus the usecount should be 1 now).  Determine if the vnode is actually
473  * vgone()able, doing some cleanups in the process.  Returns 1 if the vnode
474  * can be vgone()'d, 0 otherwise.
475  *
476  * Note that v_auxrefs may be non-zero because (A) this vnode is not a leaf
477  * in the namecache topology and (B) this vnode has buffer cache bufs.
478  * We cannot remove vnodes with non-leaf namecache associations.  We do a
479  * tentitive leaf check prior to attempting to flush out any buffers but the
480  * 'real' test when all is said in done is that v_auxrefs must become 0 for
481  * the vnode to be freeable.
482  *
483  * We could theoretically just unconditionally flush when v_auxrefs != 0,
484  * but flushing data associated with non-leaf nodes (which are always
485  * directories), just throws it away for no benefit.  It is the buffer
486  * cache's responsibility to choose buffers to recycle from the cached
487  * data point of view.
488  */
489 static int
490 visleaf(struct vnode *vp)
491 {
492 	struct namecache *ncp;
493 
494 	spin_lock(&vp->v_spin);
495 	TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
496 		if (!TAILQ_EMPTY(&ncp->nc_list)) {
497 			spin_unlock(&vp->v_spin);
498 			return(0);
499 		}
500 	}
501 	spin_unlock(&vp->v_spin);
502 	return(1);
503 }
504 
505 /*
506  * Try to clean up the vnode to the point where it can be vgone()'d, returning
507  * 0 if it cannot be vgone()'d (or already has been), 1 if it can.  Unlike
508  * vmightfree() this routine may flush the vnode and block.
509  */
510 static int
511 vtrytomakegoneable(struct vnode *vp, int page_count)
512 {
513 	if (vp->v_flag & VRECLAIMED)
514 		return (0);
515 	if (VREFCNT(vp) > 1)
516 		return (0);
517 	if (vp->v_object && vp->v_object->resident_page_count >= page_count)
518 		return (0);
519 	if (vp->v_auxrefs && visleaf(vp)) {
520 		vinvalbuf(vp, V_SAVE, 0, 0);
521 #if 0	/* DEBUG */
522 		kprintf((vp->v_auxrefs ? "vrecycle: vp %p failed: %s\n" :
523 			"vrecycle: vp %p succeeded: %s\n"), vp,
524 			(TAILQ_FIRST(&vp->v_namecache) ?
525 			    TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"));
526 #endif
527 	}
528 
529 	/*
530 	 * This sequence may seem a little strange, but we need to optimize
531 	 * the critical path a bit.  We can't recycle vnodes with other
532 	 * references and because we are trying to recycle an otherwise
533 	 * perfectly fine vnode we have to invalidate the namecache in a
534 	 * way that avoids possible deadlocks (since the vnode lock is being
535 	 * held here).  Finally, we have to check for other references one
536 	 * last time in case something snuck in during the inval.
537 	 */
538 	if (VREFCNT(vp) > 1 || vp->v_auxrefs != 0)
539 		return (0);
540 	if (cache_inval_vp_nonblock(vp))
541 		return (0);
542 	return (VREFCNT(vp) <= 1 && vp->v_auxrefs == 0);
543 }
544 
545 /*
546  * Reclaim up to 1/10 of the vnodes associated with a mount point.  Try
547  * to avoid vnodes which have lots of resident pages (we are trying to free
548  * vnodes, not memory).
549  *
550  * This routine is a callback from the mountlist scan.  The mount point
551  * in question will be busied.
552  *
553  * NOTE: The 1/10 reclamation also ensures that the inactive data set
554  *	 (the vnodes being recycled by the one-time use) does not degenerate
555  *	 into too-small a set.  This is important because once a vnode is
556  *	 marked as not being one-time-use (VAGE0/VAGE1 both 0) that vnode
557  *	 will not be destroyed EXCEPT by this mechanism.  VM pages can still
558  *	 be cleaned/freed by the pageout daemon.
559  */
560 static int
561 vlrureclaim(struct mount *mp, void *data)
562 {
563 	struct vnlru_info *info = data;
564 	struct vnode *vp;
565 	int done;
566 	int trigger;
567 	int usevnodes;
568 	int count;
569 	int trigger_mult = vnlru_nowhere;
570 
571 	/*
572 	 * Calculate the trigger point for the resident pages check.  The
573 	 * minimum trigger value is approximately the number of pages in
574 	 * the system divded by the number of vnodes.  However, due to
575 	 * various other system memory overheads unrelated to data caching
576 	 * it is a good idea to double the trigger (at least).
577 	 *
578 	 * trigger_mult starts at 0.  If the recycler is having problems
579 	 * finding enough freeable vnodes it will increase trigger_mult.
580 	 * This should not happen in normal operation, even on machines with
581 	 * low amounts of memory, but extraordinary memory use by the system
582 	 * verses the amount of cached data can trigger it.
583 	 *
584 	 * (long) -> deal with 64 bit machines, intermediate overflow
585 	 */
586 	usevnodes = desiredvnodes;
587 	if (usevnodes <= 0)
588 		usevnodes = 1;
589 	trigger = (long)vmstats.v_page_count * (trigger_mult + 2) / usevnodes;
590 
591 	done = 0;
592 	lwkt_gettoken(&mp->mnt_token);
593 	count = mp->mnt_nvnodelistsize / 10 + 1;
594 
595 	while (count && mp->mnt_syncer) {
596 		/*
597 		 * Next vnode.  Use the special syncer vnode to placemark
598 		 * the LRU.  This way the LRU code does not interfere with
599 		 * vmntvnodescan().
600 		 */
601 		vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes);
602 		TAILQ_REMOVE(&mp->mnt_nvnodelist, mp->mnt_syncer, v_nmntvnodes);
603 		if (vp) {
604 			TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp,
605 					   mp->mnt_syncer, v_nmntvnodes);
606 		} else {
607 			TAILQ_INSERT_HEAD(&mp->mnt_nvnodelist, mp->mnt_syncer,
608 					  v_nmntvnodes);
609 			vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes);
610 			if (vp == NULL)
611 				break;
612 		}
613 
614 		/*
615 		 * __VNODESCAN__
616 		 *
617 		 * The VP will stick around while we hold mnt_token,
618 		 * at least until we block, so we can safely do an initial
619 		 * check, and then must check again after we lock the vnode.
620 		 */
621 		if (vp->v_type == VNON ||	/* syncer or indeterminant */
622 		    !vmightfree(vp, trigger, info->pass) /* critical path opt */
623 		) {
624 			--count;
625 			continue;
626 		}
627 
628 		/*
629 		 * VX get the candidate vnode.  If the VX get fails the
630 		 * vnode might still be on the mountlist.  Our loop depends
631 		 * on us at least cycling the vnode to the end of the
632 		 * mountlist.
633 		 */
634 		if (vx_get_nonblock(vp) != 0) {
635 			--count;
636 			continue;
637 		}
638 
639 		/*
640 		 * Since we blocked locking the vp, make sure it is still
641 		 * a candidate for reclamation.  That is, it has not already
642 		 * been reclaimed and only has our VX reference associated
643 		 * with it.
644 		 */
645 		if (vp->v_type == VNON ||	/* syncer or indeterminant */
646 		    (vp->v_flag & VRECLAIMED) ||
647 		    vp->v_mount != mp ||
648 		    !vtrytomakegoneable(vp, trigger)	/* critical path opt */
649 		) {
650 			--count;
651 			vx_put(vp);
652 			continue;
653 		}
654 
655 		/*
656 		 * All right, we are good, move the vp to the end of the
657 		 * mountlist and clean it out.  The vget will have returned
658 		 * an error if the vnode was destroyed (VRECLAIMED set), so we
659 		 * do not have to check again.  The vput() will move the
660 		 * vnode to the free list if the vgone() was successful.
661 		 */
662 		KKASSERT(vp->v_mount == mp);
663 		vgone_vxlocked(vp);
664 		vx_put(vp);
665 		++done;
666 		--count;
667 	}
668 	lwkt_reltoken(&mp->mnt_token);
669 	return (done);
670 }
671 
672 /*
673  * Attempt to recycle vnodes in a context that is always safe to block.
674  * Calling vlrurecycle() from the bowels of file system code has some
675  * interesting deadlock problems.
676  */
677 static struct thread *vnlruthread;
678 
679 static void
680 vnlru_proc(void)
681 {
682 	struct thread *td = curthread;
683 	struct vnlru_info info;
684 	int done;
685 
686 	EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
687 			      SHUTDOWN_PRI_FIRST);
688 
689 	for (;;) {
690 		kproc_suspend_loop();
691 
692 		/*
693 		 * Do some opportunistic roving.
694 		 */
695 		if (numvnodes > 100000)
696 			vnode_free_rover_scan(50);
697 		else if (numvnodes > 10000)
698 			vnode_free_rover_scan(20);
699 		else
700 			vnode_free_rover_scan(5);
701 
702 		/*
703 		 * Try to free some vnodes if we have too many
704 		 *
705 		 * (long) -> deal with 64 bit machines, intermediate overflow
706 		 */
707 		if (numvnodes > desiredvnodes &&
708 		    cachedvnodes > desiredvnodes * 2 / 10) {
709 			int count = numvnodes - desiredvnodes;
710 
711 			if (count > cachedvnodes / 100)
712 				count = cachedvnodes / 100;
713 			if (count < 5)
714 				count = 5;
715 			freesomevnodes(count);
716 		}
717 
718 		/*
719 		 * Do non-critical-path (more robust) cache cleaning,
720 		 * even if vnode counts are nominal, to try to avoid
721 		 * having to do it in the critical path.
722 		 */
723 		cache_hysteresis(0);
724 
725 		/*
726 		 * Nothing to do if most of our vnodes are already on
727 		 * the free list.
728 		 */
729 		if (numvnodes - cachedvnodes <= (long)desiredvnodes * 9 / 10) {
730 			tsleep(vnlruthread, 0, "vlruwt", hz);
731 			continue;
732 		}
733 
734 		/*
735 		 * The pass iterates through the four combinations of
736 		 * VAGE0/VAGE1.  We want to get rid of aged small files
737 		 * first.
738 		 */
739 		info.pass = 0;
740 		done = 0;
741 		while (done == 0 && info.pass < 4) {
742 			done = mountlist_scan(vlrureclaim, &info,
743 					      MNTSCAN_FORWARD);
744 			++info.pass;
745 		}
746 
747 		/*
748 		 * The vlrureclaim() call only processes 1/10 of the vnodes
749 		 * on each mount.  If we couldn't find any repeat the loop
750 		 * at least enough times to cover all available vnodes before
751 		 * we start sleeping.  Complain if the failure extends past
752 		 * 30 second, every 30 seconds.
753 		 */
754 		if (done == 0) {
755 			++vnlru_nowhere;
756 			if (vnlru_nowhere % 10 == 0)
757 				tsleep(vnlruthread, 0, "vlrup", hz * 3);
758 			if (vnlru_nowhere % 100 == 0)
759 				kprintf("vnlru_proc: vnode recycler stopped working!\n");
760 			if (vnlru_nowhere == 1000)
761 				vnlru_nowhere = 900;
762 		} else {
763 			vnlru_nowhere = 0;
764 		}
765 	}
766 }
767 
768 /*
769  * MOUNTLIST FUNCTIONS
770  */
771 
772 /*
773  * mountlist_insert (MP SAFE)
774  *
775  * Add a new mount point to the mount list.
776  */
777 void
778 mountlist_insert(struct mount *mp, int how)
779 {
780 	lwkt_gettoken(&mountlist_token);
781 	if (how == MNTINS_FIRST)
782 	    TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
783 	else
784 	    TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
785 	lwkt_reltoken(&mountlist_token);
786 }
787 
788 /*
789  * mountlist_interlock (MP SAFE)
790  *
791  * Execute the specified interlock function with the mountlist token
792  * held.  The function will be called in a serialized fashion verses
793  * other functions called through this mechanism.
794  */
795 int
796 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp)
797 {
798 	int error;
799 
800 	lwkt_gettoken(&mountlist_token);
801 	error = callback(mp);
802 	lwkt_reltoken(&mountlist_token);
803 	return (error);
804 }
805 
806 /*
807  * mountlist_boot_getfirst (DURING BOOT ONLY)
808  *
809  * This function returns the first mount on the mountlist, which is
810  * expected to be the root mount.  Since no interlocks are obtained
811  * this function is only safe to use during booting.
812  */
813 
814 struct mount *
815 mountlist_boot_getfirst(void)
816 {
817 	return(TAILQ_FIRST(&mountlist));
818 }
819 
820 /*
821  * mountlist_remove (MP SAFE)
822  *
823  * Remove a node from the mountlist.  If this node is the next scan node
824  * for any active mountlist scans, the active mountlist scan will be
825  * adjusted to skip the node, thus allowing removals during mountlist
826  * scans.
827  */
828 void
829 mountlist_remove(struct mount *mp)
830 {
831 	struct mountscan_info *msi;
832 
833 	lwkt_gettoken(&mountlist_token);
834 	TAILQ_FOREACH(msi, &mountscan_list, msi_entry) {
835 		if (msi->msi_node == mp) {
836 			if (msi->msi_how & MNTSCAN_FORWARD)
837 				msi->msi_node = TAILQ_NEXT(mp, mnt_list);
838 			else
839 				msi->msi_node = TAILQ_PREV(mp, mntlist, mnt_list);
840 		}
841 	}
842 	TAILQ_REMOVE(&mountlist, mp, mnt_list);
843 	lwkt_reltoken(&mountlist_token);
844 }
845 
846 /*
847  * mountlist_exists (MP SAFE)
848  *
849  * Checks if a node exists in the mountlist.
850  * This function is mainly used by VFS quota code to check if a
851  * cached nullfs struct mount pointer is still valid at use time
852  *
853  * FIXME: there is no warranty the mp passed to that function
854  * will be the same one used by VFS_ACCOUNT() later
855  */
856 int
857 mountlist_exists(struct mount *mp)
858 {
859 	int node_exists = 0;
860 	struct mount* lmp;
861 
862 	lwkt_gettoken(&mountlist_token);
863 	TAILQ_FOREACH(lmp, &mountlist, mnt_list) {
864 		if (lmp == mp) {
865 			node_exists = 1;
866 			break;
867 		}
868 	}
869 	lwkt_reltoken(&mountlist_token);
870 	return(node_exists);
871 }
872 
873 /*
874  * mountlist_scan (MP SAFE)
875  *
876  * Safely scan the mount points on the mount list.  Unless otherwise
877  * specified each mount point will be busied prior to the callback and
878  * unbusied afterwords.  The callback may safely remove any mount point
879  * without interfering with the scan.  If the current callback
880  * mount is removed the scanner will not attempt to unbusy it.
881  *
882  * If a mount node cannot be busied it is silently skipped.
883  *
884  * The callback return value is aggregated and a total is returned.  A return
885  * value of < 0 is not aggregated and will terminate the scan.
886  *
887  * MNTSCAN_FORWARD	- the mountlist is scanned in the forward direction
888  * MNTSCAN_REVERSE	- the mountlist is scanned in reverse
889  * MNTSCAN_NOBUSY	- the scanner will make the callback without busying
890  *			  the mount node.
891  */
892 int
893 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how)
894 {
895 	struct mountscan_info info;
896 	struct mount *mp;
897 	int count;
898 	int res;
899 
900 	lwkt_gettoken(&mountlist_token);
901 
902 	info.msi_how = how;
903 	info.msi_node = NULL;	/* paranoia */
904 	TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry);
905 
906 	res = 0;
907 
908 	if (how & MNTSCAN_FORWARD) {
909 		info.msi_node = TAILQ_FIRST(&mountlist);
910 		while ((mp = info.msi_node) != NULL) {
911 			if (how & MNTSCAN_NOBUSY) {
912 				count = callback(mp, data);
913 			} else if (vfs_busy(mp, LK_NOWAIT) == 0) {
914 				count = callback(mp, data);
915 				if (mp == info.msi_node)
916 					vfs_unbusy(mp);
917 			} else {
918 				count = 0;
919 			}
920 			if (count < 0)
921 				break;
922 			res += count;
923 			if (mp == info.msi_node)
924 				info.msi_node = TAILQ_NEXT(mp, mnt_list);
925 		}
926 	} else if (how & MNTSCAN_REVERSE) {
927 		info.msi_node = TAILQ_LAST(&mountlist, mntlist);
928 		while ((mp = info.msi_node) != NULL) {
929 			if (how & MNTSCAN_NOBUSY) {
930 				count = callback(mp, data);
931 			} else if (vfs_busy(mp, LK_NOWAIT) == 0) {
932 				count = callback(mp, data);
933 				if (mp == info.msi_node)
934 					vfs_unbusy(mp);
935 			} else {
936 				count = 0;
937 			}
938 			if (count < 0)
939 				break;
940 			res += count;
941 			if (mp == info.msi_node)
942 				info.msi_node = TAILQ_PREV(mp, mntlist, mnt_list);
943 		}
944 	}
945 	TAILQ_REMOVE(&mountscan_list, &info, msi_entry);
946 	lwkt_reltoken(&mountlist_token);
947 	return(res);
948 }
949 
950 /*
951  * MOUNT RELATED VNODE FUNCTIONS
952  */
953 
954 static struct kproc_desc vnlru_kp = {
955 	"vnlru",
956 	vnlru_proc,
957 	&vnlruthread
958 };
959 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
960 
961 /*
962  * Move a vnode from one mount queue to another.
963  *
964  * MPSAFE
965  */
966 void
967 insmntque(struct vnode *vp, struct mount *mp)
968 {
969 	struct mount *omp;
970 
971 	/*
972 	 * Delete from old mount point vnode list, if on one.
973 	 */
974 	if ((omp = vp->v_mount) != NULL) {
975 		lwkt_gettoken(&omp->mnt_token);
976 		KKASSERT(omp == vp->v_mount);
977 		KASSERT(omp->mnt_nvnodelistsize > 0,
978 			("bad mount point vnode list size"));
979 		vremovevnodemnt(vp);
980 		omp->mnt_nvnodelistsize--;
981 		lwkt_reltoken(&omp->mnt_token);
982 	}
983 
984 	/*
985 	 * Insert into list of vnodes for the new mount point, if available.
986 	 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer.
987 	 */
988 	if (mp == NULL) {
989 		vp->v_mount = NULL;
990 		return;
991 	}
992 	lwkt_gettoken(&mp->mnt_token);
993 	vp->v_mount = mp;
994 	if (mp->mnt_syncer) {
995 		TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes);
996 	} else {
997 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
998 	}
999 	mp->mnt_nvnodelistsize++;
1000 	lwkt_reltoken(&mp->mnt_token);
1001 }
1002 
1003 
1004 /*
1005  * Scan the vnodes under a mount point and issue appropriate callbacks.
1006  *
1007  * The fastfunc() callback is called with just the mountlist token held
1008  * (no vnode lock).  It may not block and the vnode may be undergoing
1009  * modifications while the caller is processing it.  The vnode will
1010  * not be entirely destroyed, however, due to the fact that the mountlist
1011  * token is held.  A return value < 0 skips to the next vnode without calling
1012  * the slowfunc(), a return value > 0 terminates the loop.
1013  *
1014  * The slowfunc() callback is called after the vnode has been successfully
1015  * locked based on passed flags.  The vnode is skipped if it gets rearranged
1016  * or destroyed while blocking on the lock.  A non-zero return value from
1017  * the slow function terminates the loop.  The slow function is allowed to
1018  * arbitrarily block.  The scanning code guarentees consistency of operation
1019  * even if the slow function deletes or moves the node, or blocks and some
1020  * other thread deletes or moves the node.
1021  *
1022  * NOTE: We hold vmobj_token to prevent a VM object from being destroyed
1023  *	 out from under the fastfunc()'s vnode test.  It will not prevent
1024  *	 v_object from getting NULL'd out but it will ensure that the
1025  *	 pointer (if we race) will remain stable.  Only needed when
1026  *	 fastfunc is non-NULL.
1027  */
1028 int
1029 vmntvnodescan(
1030     struct mount *mp,
1031     int flags,
1032     int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data),
1033     int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
1034     void *data
1035 ) {
1036 	struct vmntvnodescan_info info;
1037 	struct vnode *vp;
1038 	int r = 0;
1039 	int maxcount = mp->mnt_nvnodelistsize * 2;
1040 	int stopcount = 0;
1041 	int count = 0;
1042 
1043 	lwkt_gettoken(&mp->mnt_token);
1044 	if (fastfunc)
1045 		lwkt_gettoken(&vmobj_token);
1046 
1047 	/*
1048 	 * If asked to do one pass stop after iterating available vnodes.
1049 	 * Under heavy loads new vnodes can be added while we are scanning,
1050 	 * so this isn't perfect.  Create a slop factor of 2x.
1051 	 */
1052 	if (flags & VMSC_ONEPASS)
1053 		stopcount = mp->mnt_nvnodelistsize;
1054 
1055 	info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
1056 	TAILQ_INSERT_TAIL(&mp->mnt_vnodescan_list, &info, entry);
1057 
1058 	while ((vp = info.vp) != NULL) {
1059 		if (--maxcount == 0) {
1060 			kprintf("Warning: excessive fssync iteration\n");
1061 			maxcount = mp->mnt_nvnodelistsize * 2;
1062 		}
1063 
1064 		/*
1065 		 * Skip if visible but not ready, or special (e.g.
1066 		 * mp->mnt_syncer)
1067 		 */
1068 		if (vp->v_type == VNON)
1069 			goto next;
1070 		KKASSERT(vp->v_mount == mp);
1071 
1072 		/*
1073 		 * Quick test.  A negative return continues the loop without
1074 		 * calling the slow test.  0 continues onto the slow test.
1075 		 * A positive number aborts the loop.
1076 		 */
1077 		if (fastfunc) {
1078 			if ((r = fastfunc(mp, vp, data)) < 0) {
1079 				r = 0;
1080 				goto next;
1081 			}
1082 			if (r)
1083 				break;
1084 		}
1085 
1086 		/*
1087 		 * Get a vxlock on the vnode, retry if it has moved or isn't
1088 		 * in the mountlist where we expect it.
1089 		 */
1090 		if (slowfunc) {
1091 			int error;
1092 
1093 			switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
1094 			case VMSC_GETVP:
1095 				error = vget(vp, LK_EXCLUSIVE);
1096 				break;
1097 			case VMSC_GETVP|VMSC_NOWAIT:
1098 				error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT);
1099 				break;
1100 			case VMSC_GETVX:
1101 				vx_get(vp);
1102 				error = 0;
1103 				break;
1104 			default:
1105 				error = 0;
1106 				break;
1107 			}
1108 			if (error)
1109 				goto next;
1110 			/*
1111 			 * Do not call the slow function if the vnode is
1112 			 * invalid or if it was ripped out from under us
1113 			 * while we (potentially) blocked.
1114 			 */
1115 			if (info.vp == vp && vp->v_type != VNON)
1116 				r = slowfunc(mp, vp, data);
1117 
1118 			/*
1119 			 * Cleanup
1120 			 */
1121 			switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
1122 			case VMSC_GETVP:
1123 			case VMSC_GETVP|VMSC_NOWAIT:
1124 				vput(vp);
1125 				break;
1126 			case VMSC_GETVX:
1127 				vx_put(vp);
1128 				break;
1129 			default:
1130 				break;
1131 			}
1132 			if (r != 0)
1133 				break;
1134 		}
1135 
1136 next:
1137 		/*
1138 		 * Yield after some processing.  Depending on the number
1139 		 * of vnodes, we might wind up running for a long time.
1140 		 * Because threads are not preemptable, time critical
1141 		 * userland processes might starve.  Give them a chance
1142 		 * now and then.
1143 		 */
1144 		if (++count == 10000) {
1145 			/*
1146 			 * We really want to yield a bit, so we simply
1147 			 * sleep a tick
1148 			 */
1149 			tsleep(mp, 0, "vnodescn", 1);
1150 			count = 0;
1151 		}
1152 
1153 		/*
1154 		 * If doing one pass this decrements to zero.  If it starts
1155 		 * at zero it is effectively unlimited for the purposes of
1156 		 * this loop.
1157 		 */
1158 		if (--stopcount == 0)
1159 			break;
1160 
1161 		/*
1162 		 * Iterate.  If the vnode was ripped out from under us
1163 		 * info.vp will already point to the next vnode, otherwise
1164 		 * we have to obtain the next valid vnode ourselves.
1165 		 */
1166 		if (info.vp == vp)
1167 			info.vp = TAILQ_NEXT(vp, v_nmntvnodes);
1168 	}
1169 
1170 	TAILQ_REMOVE(&mp->mnt_vnodescan_list, &info, entry);
1171 	if (fastfunc)
1172 		lwkt_reltoken(&vmobj_token);
1173 	lwkt_reltoken(&mp->mnt_token);
1174 	return(r);
1175 }
1176 
1177 /*
1178  * Remove any vnodes in the vnode table belonging to mount point mp.
1179  *
1180  * If FORCECLOSE is not specified, there should not be any active ones,
1181  * return error if any are found (nb: this is a user error, not a
1182  * system error). If FORCECLOSE is specified, detach any active vnodes
1183  * that are found.
1184  *
1185  * If WRITECLOSE is set, only flush out regular file vnodes open for
1186  * writing.
1187  *
1188  * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped.
1189  *
1190  * `rootrefs' specifies the base reference count for the root vnode
1191  * of this filesystem. The root vnode is considered busy if its
1192  * v_refcnt exceeds this value. On a successful return, vflush()
1193  * will call vrele() on the root vnode exactly rootrefs times.
1194  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
1195  * be zero.
1196  */
1197 #ifdef DIAGNOSTIC
1198 static int busyprt = 0;		/* print out busy vnodes */
1199 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
1200 #endif
1201 
1202 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data);
1203 
1204 struct vflush_info {
1205 	int flags;
1206 	int busy;
1207 	thread_t td;
1208 };
1209 
1210 int
1211 vflush(struct mount *mp, int rootrefs, int flags)
1212 {
1213 	struct thread *td = curthread;	/* XXX */
1214 	struct vnode *rootvp = NULL;
1215 	int error;
1216 	struct vflush_info vflush_info;
1217 
1218 	if (rootrefs > 0) {
1219 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
1220 		    ("vflush: bad args"));
1221 		/*
1222 		 * Get the filesystem root vnode. We can vput() it
1223 		 * immediately, since with rootrefs > 0, it won't go away.
1224 		 */
1225 		if ((error = VFS_ROOT(mp, &rootvp)) != 0) {
1226 			if ((flags & FORCECLOSE) == 0)
1227 				return (error);
1228 			rootrefs = 0;
1229 			/* continue anyway */
1230 		}
1231 		if (rootrefs)
1232 			vput(rootvp);
1233 	}
1234 
1235 	vflush_info.busy = 0;
1236 	vflush_info.flags = flags;
1237 	vflush_info.td = td;
1238 	vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info);
1239 
1240 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
1241 		/*
1242 		 * If just the root vnode is busy, and if its refcount
1243 		 * is equal to `rootrefs', then go ahead and kill it.
1244 		 */
1245 		KASSERT(vflush_info.busy > 0, ("vflush: not busy"));
1246 		KASSERT(VREFCNT(rootvp) >= rootrefs, ("vflush: rootrefs"));
1247 		if (vflush_info.busy == 1 && VREFCNT(rootvp) == rootrefs) {
1248 			vx_lock(rootvp);
1249 			vgone_vxlocked(rootvp);
1250 			vx_unlock(rootvp);
1251 			vflush_info.busy = 0;
1252 		}
1253 	}
1254 	if (vflush_info.busy)
1255 		return (EBUSY);
1256 	for (; rootrefs > 0; rootrefs--)
1257 		vrele(rootvp);
1258 	return (0);
1259 }
1260 
1261 /*
1262  * The scan callback is made with an VX locked vnode.
1263  */
1264 static int
1265 vflush_scan(struct mount *mp, struct vnode *vp, void *data)
1266 {
1267 	struct vflush_info *info = data;
1268 	struct vattr vattr;
1269 	int flags = info->flags;
1270 
1271 	/*
1272 	 * Generally speaking try to deactivate on 0 refs (catch-all)
1273 	 */
1274 	atomic_set_int(&vp->v_refcnt, VREF_FINALIZE);
1275 
1276 	/*
1277 	 * Skip over a vnodes marked VSYSTEM.
1278 	 */
1279 	if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1280 		return(0);
1281 	}
1282 
1283 	/*
1284 	 * Do not force-close VCHR or VBLK vnodes
1285 	 */
1286 	if (vp->v_type == VCHR || vp->v_type == VBLK)
1287 		flags &= ~(WRITECLOSE|FORCECLOSE);
1288 
1289 	/*
1290 	 * If WRITECLOSE is set, flush out unlinked but still open
1291 	 * files (even if open only for reading) and regular file
1292 	 * vnodes open for writing.
1293 	 */
1294 	if ((flags & WRITECLOSE) &&
1295 	    (vp->v_type == VNON ||
1296 	    (VOP_GETATTR(vp, &vattr) == 0 &&
1297 	    vattr.va_nlink > 0)) &&
1298 	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
1299 		return(0);
1300 	}
1301 
1302 	/*
1303 	 * If we are the only holder (refcnt of 1) or the vnode is in
1304 	 * termination (refcnt < 0), we can vgone the vnode.
1305 	 */
1306 	if (VREFCNT(vp) <= 1) {
1307 		vgone_vxlocked(vp);
1308 		return(0);
1309 	}
1310 
1311 	/*
1312 	 * If FORCECLOSE is set, forcibly destroy the vnode and then move
1313 	 * it to a dummymount structure so vop_*() functions don't deref
1314 	 * a NULL pointer.
1315 	 */
1316 	if (flags & FORCECLOSE) {
1317 		vhold(vp);
1318 		vgone_vxlocked(vp);
1319 		if (vp->v_mount == NULL)
1320 			insmntque(vp, &dummymount);
1321 		vdrop(vp);
1322 		return(0);
1323 	}
1324 	if (vp->v_type == VCHR || vp->v_type == VBLK)
1325 		kprintf("vflush: Warning, cannot destroy busy device vnode\n");
1326 #ifdef DIAGNOSTIC
1327 	if (busyprt)
1328 		vprint("vflush: busy vnode", vp);
1329 #endif
1330 	++info->busy;
1331 	return(0);
1332 }
1333 
1334 void
1335 add_bio_ops(struct bio_ops *ops)
1336 {
1337 	TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry);
1338 }
1339 
1340 void
1341 rem_bio_ops(struct bio_ops *ops)
1342 {
1343 	TAILQ_REMOVE(&bio_ops_list, ops, entry);
1344 }
1345 
1346 /*
1347  * This calls the bio_ops io_sync function either for a mount point
1348  * or generally.
1349  *
1350  * WARNING: softdeps is weirdly coded and just isn't happy unless
1351  * io_sync is called with a NULL mount from the general syncing code.
1352  */
1353 void
1354 bio_ops_sync(struct mount *mp)
1355 {
1356 	struct bio_ops *ops;
1357 
1358 	if (mp) {
1359 		if ((ops = mp->mnt_bioops) != NULL)
1360 			ops->io_sync(mp);
1361 	} else {
1362 		TAILQ_FOREACH(ops, &bio_ops_list, entry) {
1363 			ops->io_sync(NULL);
1364 		}
1365 	}
1366 }
1367 
1368 /*
1369  * Lookup a mount point by nch
1370  */
1371 struct mount *
1372 mount_get_by_nc(struct namecache *ncp)
1373 {
1374 	struct mount *mp = NULL;
1375 
1376 	lwkt_gettoken(&mountlist_token);
1377 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1378 		if (ncp == mp->mnt_ncmountpt.ncp)
1379 			break;
1380 	}
1381 	lwkt_reltoken(&mountlist_token);
1382 	return (mp);
1383 }
1384 
1385