xref: /dflybsd-src/sys/kern/vfs_mount.c (revision 9bbdde3598baabd3206445e589eb185bfed745d2)
1 /*
2  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * Copyright (c) 1989, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  * (c) UNIX System Laboratories, Inc.
37  * All or some portions of this file are derived from material licensed
38  * to the University of California by American Telephone and Telegraph
39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40  * the permission of UNIX System Laboratories, Inc.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by the University of
53  *	California, Berkeley and its contributors.
54  * 4. Neither the name of the University nor the names of its contributors
55  *    may be used to endorse or promote products derived from this software
56  *    without specific prior written permission.
57  *
58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68  * SUCH DAMAGE.
69  *
70  * $DragonFly: src/sys/kern/vfs_mount.c,v 1.37 2008/09/17 21:44:18 dillon Exp $
71  */
72 
73 /*
74  * External virtual filesystem routines
75  */
76 #include "opt_ddb.h"
77 
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/kernel.h>
81 #include <sys/malloc.h>
82 #include <sys/mount.h>
83 #include <sys/proc.h>
84 #include <sys/vnode.h>
85 #include <sys/buf.h>
86 #include <sys/eventhandler.h>
87 #include <sys/kthread.h>
88 #include <sys/sysctl.h>
89 
90 #include <machine/limits.h>
91 
92 #include <sys/buf2.h>
93 #include <sys/thread2.h>
94 #include <sys/sysref2.h>
95 #include <sys/mplock2.h>
96 
97 #include <vm/vm.h>
98 #include <vm/vm_object.h>
99 
100 struct mountscan_info {
101 	TAILQ_ENTRY(mountscan_info) msi_entry;
102 	int msi_how;
103 	struct mount *msi_node;
104 };
105 
106 struct vmntvnodescan_info {
107 	TAILQ_ENTRY(vmntvnodescan_info) entry;
108 	struct vnode *vp;
109 };
110 
111 struct vnlru_info {
112 	int	pass;
113 };
114 
115 static int vnlru_nowhere = 0;
116 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD,
117 	    &vnlru_nowhere, 0,
118 	    "Number of times the vnlru process ran without success");
119 
120 
121 static struct lwkt_token mntid_token;
122 static struct mount dummymount;
123 
124 /* note: mountlist exported to pstat */
125 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
126 static TAILQ_HEAD(,mountscan_info) mountscan_list;
127 static struct lwkt_token mountlist_token;
128 static TAILQ_HEAD(,vmntvnodescan_info) mntvnodescan_list;
129 struct lwkt_token mntvnode_token;
130 
131 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list);
132 
133 /*
134  * Called from vfsinit()
135  */
136 void
137 vfs_mount_init(void)
138 {
139 	lwkt_token_init(&mountlist_token, "mntlist");
140 	lwkt_token_init(&mntvnode_token, "mntvnode");
141 	lwkt_token_init(&mntid_token, "mntid");
142 	TAILQ_INIT(&mountscan_list);
143 	TAILQ_INIT(&mntvnodescan_list);
144 	mount_init(&dummymount);
145 	dummymount.mnt_flag |= MNT_RDONLY;
146 	dummymount.mnt_kern_flag |= MNTK_ALL_MPSAFE;
147 }
148 
149 /*
150  * Support function called with mntvnode_token held to remove a vnode
151  * from the mountlist.  We must update any list scans which are in progress.
152  */
153 static void
154 vremovevnodemnt(struct vnode *vp)
155 {
156         struct vmntvnodescan_info *info;
157 
158 	TAILQ_FOREACH(info, &mntvnodescan_list, entry) {
159 		if (info->vp == vp)
160 			info->vp = TAILQ_NEXT(vp, v_nmntvnodes);
161 	}
162 	TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
163 }
164 
165 /*
166  * Allocate a new vnode and associate it with a tag, mount point, and
167  * operations vector.
168  *
169  * A VX locked and refd vnode is returned.  The caller should setup the
170  * remaining fields and vx_put() or, if he wishes to leave a vref,
171  * vx_unlock() the vnode.
172  */
173 int
174 getnewvnode(enum vtagtype tag, struct mount *mp,
175 		struct vnode **vpp, int lktimeout, int lkflags)
176 {
177 	struct vnode *vp;
178 
179 	KKASSERT(mp != NULL);
180 
181 	vp = allocvnode(lktimeout, lkflags);
182 	vp->v_tag = tag;
183 	vp->v_data = NULL;
184 
185 	/*
186 	 * By default the vnode is assigned the mount point's normal
187 	 * operations vector.
188 	 */
189 	vp->v_ops = &mp->mnt_vn_use_ops;
190 
191 	/*
192 	 * Placing the vnode on the mount point's queue makes it visible.
193 	 * VNON prevents it from being messed with, however.
194 	 */
195 	insmntque(vp, mp);
196 
197 	/*
198 	 * A VX locked & refd vnode is returned.
199 	 */
200 	*vpp = vp;
201 	return (0);
202 }
203 
204 /*
205  * This function creates vnodes with special operations vectors.  The
206  * mount point is optional.
207  *
208  * This routine is being phased out but is still used by vfs_conf to
209  * create vnodes for devices prior to the root mount (with mp == NULL).
210  */
211 int
212 getspecialvnode(enum vtagtype tag, struct mount *mp,
213 		struct vop_ops **ops,
214 		struct vnode **vpp, int lktimeout, int lkflags)
215 {
216 	struct vnode *vp;
217 
218 	vp = allocvnode(lktimeout, lkflags);
219 	vp->v_tag = tag;
220 	vp->v_data = NULL;
221 	vp->v_ops = ops;
222 
223 	if (mp == NULL)
224 		mp = &dummymount;
225 
226 	/*
227 	 * Placing the vnode on the mount point's queue makes it visible.
228 	 * VNON prevents it from being messed with, however.
229 	 */
230 	insmntque(vp, mp);
231 
232 	/*
233 	 * A VX locked & refd vnode is returned.
234 	 */
235 	*vpp = vp;
236 	return (0);
237 }
238 
239 /*
240  * Interlock against an unmount, return 0 on success, non-zero on failure.
241  *
242  * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount
243  * is in-progress.
244  *
245  * If no unmount is in-progress LK_NOWAIT is ignored.  No other flag bits
246  * are used.  A shared locked will be obtained and the filesystem will not
247  * be unmountable until the lock is released.
248  */
249 int
250 vfs_busy(struct mount *mp, int flags)
251 {
252 	int lkflags;
253 
254 	atomic_add_int(&mp->mnt_refs, 1);
255 	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
256 		if (flags & LK_NOWAIT) {
257 			atomic_add_int(&mp->mnt_refs, -1);
258 			return (ENOENT);
259 		}
260 		/* XXX not MP safe */
261 		mp->mnt_kern_flag |= MNTK_MWAIT;
262 		/*
263 		 * Since all busy locks are shared except the exclusive
264 		 * lock granted when unmounting, the only place that a
265 		 * wakeup needs to be done is at the release of the
266 		 * exclusive lock at the end of dounmount.
267 		 */
268 		tsleep((caddr_t)mp, 0, "vfs_busy", 0);
269 		atomic_add_int(&mp->mnt_refs, -1);
270 		return (ENOENT);
271 	}
272 	lkflags = LK_SHARED;
273 	if (lockmgr(&mp->mnt_lock, lkflags))
274 		panic("vfs_busy: unexpected lock failure");
275 	return (0);
276 }
277 
278 /*
279  * Free a busy filesystem.
280  *
281  * Decrement refs before releasing the lock so e.g. a pending umount
282  * doesn't give us an unexpected busy error.
283  */
284 void
285 vfs_unbusy(struct mount *mp)
286 {
287 	atomic_add_int(&mp->mnt_refs, -1);
288 	lockmgr(&mp->mnt_lock, LK_RELEASE);
289 }
290 
291 /*
292  * Lookup a filesystem type, and if found allocate and initialize
293  * a mount structure for it.
294  *
295  * Devname is usually updated by mount(8) after booting.
296  */
297 int
298 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
299 {
300 	struct vfsconf *vfsp;
301 	struct mount *mp;
302 
303 	if (fstypename == NULL)
304 		return (ENODEV);
305 
306 	vfsp = vfsconf_find_by_name(fstypename);
307 	if (vfsp == NULL)
308 		return (ENODEV);
309 	mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
310 	mount_init(mp);
311 	lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0);
312 
313 	vfs_busy(mp, 0);
314 	mp->mnt_vfc = vfsp;
315 	mp->mnt_op = vfsp->vfc_vfsops;
316 	vfsp->vfc_refcount++;
317 	mp->mnt_stat.f_type = vfsp->vfc_typenum;
318 	mp->mnt_flag |= MNT_RDONLY;
319 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
320 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
321 	copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
322 	*mpp = mp;
323 	return (0);
324 }
325 
326 /*
327  * Basic mount structure initialization
328  */
329 void
330 mount_init(struct mount *mp)
331 {
332 	lockinit(&mp->mnt_lock, "vfslock", 0, 0);
333 	lwkt_token_init(&mp->mnt_token, "permnt");
334 
335 	TAILQ_INIT(&mp->mnt_nvnodelist);
336 	TAILQ_INIT(&mp->mnt_reservedvnlist);
337 	TAILQ_INIT(&mp->mnt_jlist);
338 	mp->mnt_nvnodelistsize = 0;
339 	mp->mnt_flag = 0;
340 	mp->mnt_iosize_max = MAXPHYS;
341 }
342 
343 /*
344  * Lookup a mount point by filesystem identifier.
345  */
346 struct mount *
347 vfs_getvfs(fsid_t *fsid)
348 {
349 	struct mount *mp;
350 
351 	lwkt_gettoken(&mountlist_token);
352 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
353 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
354 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
355 			break;
356 		}
357 	}
358 	lwkt_reltoken(&mountlist_token);
359 	return (mp);
360 }
361 
362 /*
363  * Get a new unique fsid.  Try to make its val[0] unique, since this value
364  * will be used to create fake device numbers for stat().  Also try (but
365  * not so hard) make its val[0] unique mod 2^16, since some emulators only
366  * support 16-bit device numbers.  We end up with unique val[0]'s for the
367  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
368  *
369  * Keep in mind that several mounts may be running in parallel.  Starting
370  * the search one past where the previous search terminated is both a
371  * micro-optimization and a defense against returning the same fsid to
372  * different mounts.
373  */
374 void
375 vfs_getnewfsid(struct mount *mp)
376 {
377 	static u_int16_t mntid_base;
378 	fsid_t tfsid;
379 	int mtype;
380 
381 	lwkt_gettoken(&mntid_token);
382 	mtype = mp->mnt_vfc->vfc_typenum;
383 	tfsid.val[1] = mtype;
384 	mtype = (mtype & 0xFF) << 24;
385 	for (;;) {
386 		tfsid.val[0] = makeudev(255,
387 		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
388 		mntid_base++;
389 		if (vfs_getvfs(&tfsid) == NULL)
390 			break;
391 	}
392 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
393 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
394 	lwkt_reltoken(&mntid_token);
395 }
396 
397 /*
398  * Set the FSID for a new mount point to the template.  Adjust
399  * the FSID to avoid collisions.
400  */
401 int
402 vfs_setfsid(struct mount *mp, fsid_t *template)
403 {
404 	int didmunge = 0;
405 
406 	bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid));
407 	for (;;) {
408 		if (vfs_getvfs(template) == NULL)
409 			break;
410 		didmunge = 1;
411 		++template->val[1];
412 	}
413 	mp->mnt_stat.f_fsid = *template;
414 	return(didmunge);
415 }
416 
417 /*
418  * This routine is called when we have too many vnodes.  It attempts
419  * to free <count> vnodes and will potentially free vnodes that still
420  * have VM backing store (VM backing store is typically the cause
421  * of a vnode blowout so we want to do this).  Therefore, this operation
422  * is not considered cheap.
423  *
424  * A number of conditions may prevent a vnode from being reclaimed.
425  * the buffer cache may have references on the vnode, a directory
426  * vnode may still have references due to the namei cache representing
427  * underlying files, or the vnode may be in active use.   It is not
428  * desireable to reuse such vnodes.  These conditions may cause the
429  * number of vnodes to reach some minimum value regardless of what
430  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
431  */
432 
433 /*
434  * This is a quick non-blocking check to determine if the vnode is a good
435  * candidate for being (eventually) vgone()'d.  Returns 0 if the vnode is
436  * not a good candidate, 1 if it is.
437  */
438 static __inline int
439 vmightfree(struct vnode *vp, int page_count, int pass)
440 {
441 	if (vp->v_flag & VRECLAIMED)
442 		return (0);
443 #if 0
444 	if ((vp->v_flag & VFREE) && TAILQ_EMPTY(&vp->v_namecache))
445 		return (0);
446 #endif
447 	if (sysref_isactive(&vp->v_sysref))
448 		return (0);
449 	if (vp->v_object && vp->v_object->resident_page_count >= page_count)
450 		return (0);
451 
452 	/*
453 	 * XXX horrible hack.  Up to four passes will be taken.  Each pass
454 	 * makes a larger set of vnodes eligible.  For now what this really
455 	 * means is that we try to recycle files opened only once before
456 	 * recycling files opened multiple times.
457 	 */
458 	switch(vp->v_flag & (VAGE0 | VAGE1)) {
459 	case 0:
460 		if (pass < 3)
461 			return(0);
462 		break;
463 	case VAGE0:
464 		if (pass < 2)
465 			return(0);
466 		break;
467 	case VAGE1:
468 		if (pass < 1)
469 			return(0);
470 		break;
471 	case VAGE0 | VAGE1:
472 		break;
473 	}
474 	return (1);
475 }
476 
477 /*
478  * The vnode was found to be possibly vgone()able and the caller has locked it
479  * (thus the usecount should be 1 now).  Determine if the vnode is actually
480  * vgone()able, doing some cleanups in the process.  Returns 1 if the vnode
481  * can be vgone()'d, 0 otherwise.
482  *
483  * Note that v_auxrefs may be non-zero because (A) this vnode is not a leaf
484  * in the namecache topology and (B) this vnode has buffer cache bufs.
485  * We cannot remove vnodes with non-leaf namecache associations.  We do a
486  * tentitive leaf check prior to attempting to flush out any buffers but the
487  * 'real' test when all is said in done is that v_auxrefs must become 0 for
488  * the vnode to be freeable.
489  *
490  * We could theoretically just unconditionally flush when v_auxrefs != 0,
491  * but flushing data associated with non-leaf nodes (which are always
492  * directories), just throws it away for no benefit.  It is the buffer
493  * cache's responsibility to choose buffers to recycle from the cached
494  * data point of view.
495  */
496 static int
497 visleaf(struct vnode *vp)
498 {
499 	struct namecache *ncp;
500 
501 	spin_lock(&vp->v_spin);
502 	TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
503 		if (!TAILQ_EMPTY(&ncp->nc_list)) {
504 			spin_unlock(&vp->v_spin);
505 			return(0);
506 		}
507 	}
508 	spin_unlock(&vp->v_spin);
509 	return(1);
510 }
511 
512 /*
513  * Try to clean up the vnode to the point where it can be vgone()'d, returning
514  * 0 if it cannot be vgone()'d (or already has been), 1 if it can.  Unlike
515  * vmightfree() this routine may flush the vnode and block.  Vnodes marked
516  * VFREE are still candidates for vgone()ing because they may hold namecache
517  * resources and could be blocking the namecache directory hierarchy (and
518  * related vnodes) from being freed.
519  */
520 static int
521 vtrytomakegoneable(struct vnode *vp, int page_count)
522 {
523 	if (vp->v_flag & VRECLAIMED)
524 		return (0);
525 	if (vp->v_sysref.refcnt > 1)
526 		return (0);
527 	if (vp->v_object && vp->v_object->resident_page_count >= page_count)
528 		return (0);
529 	if (vp->v_auxrefs && visleaf(vp)) {
530 		vinvalbuf(vp, V_SAVE, 0, 0);
531 #if 0	/* DEBUG */
532 		kprintf((vp->v_auxrefs ? "vrecycle: vp %p failed: %s\n" :
533 			"vrecycle: vp %p succeeded: %s\n"), vp,
534 			(TAILQ_FIRST(&vp->v_namecache) ?
535 			    TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"));
536 #endif
537 	}
538 
539 	/*
540 	 * This sequence may seem a little strange, but we need to optimize
541 	 * the critical path a bit.  We can't recycle vnodes with other
542 	 * references and because we are trying to recycle an otherwise
543 	 * perfectly fine vnode we have to invalidate the namecache in a
544 	 * way that avoids possible deadlocks (since the vnode lock is being
545 	 * held here).  Finally, we have to check for other references one
546 	 * last time in case something snuck in during the inval.
547 	 */
548 	if (vp->v_sysref.refcnt > 1 || vp->v_auxrefs != 0)
549 		return (0);
550 	if (cache_inval_vp_nonblock(vp))
551 		return (0);
552 	return (vp->v_sysref.refcnt <= 1 && vp->v_auxrefs == 0);
553 }
554 
555 /*
556  * Reclaim up to 1/10 of the vnodes associated with a mount point.  Try
557  * to avoid vnodes which have lots of resident pages (we are trying to free
558  * vnodes, not memory).
559  *
560  * This routine is a callback from the mountlist scan.  The mount point
561  * in question will be busied.
562  *
563  * NOTE: The 1/10 reclamation also ensures that the inactive data set
564  *	 (the vnodes being recycled by the one-time use) does not degenerate
565  *	 into too-small a set.  This is important because once a vnode is
566  *	 marked as not being one-time-use (VAGE0/VAGE1 both 0) that vnode
567  *	 will not be destroyed EXCEPT by this mechanism.  VM pages can still
568  *	 be cleaned/freed by the pageout daemon.
569  */
570 static int
571 vlrureclaim(struct mount *mp, void *data)
572 {
573 	struct vnlru_info *info = data;
574 	struct vnode *vp;
575 	int done;
576 	int trigger;
577 	int usevnodes;
578 	int count;
579 	int trigger_mult = vnlru_nowhere;
580 
581 	/*
582 	 * Calculate the trigger point for the resident pages check.  The
583 	 * minimum trigger value is approximately the number of pages in
584 	 * the system divded by the number of vnodes.  However, due to
585 	 * various other system memory overheads unrelated to data caching
586 	 * it is a good idea to double the trigger (at least).
587 	 *
588 	 * trigger_mult starts at 0.  If the recycler is having problems
589 	 * finding enough freeable vnodes it will increase trigger_mult.
590 	 * This should not happen in normal operation, even on machines with
591 	 * low amounts of memory, but extraordinary memory use by the system
592 	 * verses the amount of cached data can trigger it.
593 	 */
594 	usevnodes = desiredvnodes;
595 	if (usevnodes <= 0)
596 		usevnodes = 1;
597 	trigger = vmstats.v_page_count * (trigger_mult + 2) / usevnodes;
598 
599 	done = 0;
600 	lwkt_gettoken(&mntvnode_token);
601 	count = mp->mnt_nvnodelistsize / 10 + 1;
602 
603 	while (count && mp->mnt_syncer) {
604 		/*
605 		 * Next vnode.  Use the special syncer vnode to placemark
606 		 * the LRU.  This way the LRU code does not interfere with
607 		 * vmntvnodescan().
608 		 */
609 		vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes);
610 		TAILQ_REMOVE(&mp->mnt_nvnodelist, mp->mnt_syncer, v_nmntvnodes);
611 		if (vp) {
612 			TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp,
613 					   mp->mnt_syncer, v_nmntvnodes);
614 		} else {
615 			TAILQ_INSERT_HEAD(&mp->mnt_nvnodelist, mp->mnt_syncer,
616 					  v_nmntvnodes);
617 			vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes);
618 			if (vp == NULL)
619 				break;
620 		}
621 
622 		/*
623 		 * __VNODESCAN__
624 		 *
625 		 * The VP will stick around while we hold mntvnode_token,
626 		 * at least until we block, so we can safely do an initial
627 		 * check, and then must check again after we lock the vnode.
628 		 */
629 		if (vp->v_type == VNON ||	/* syncer or indeterminant */
630 		    !vmightfree(vp, trigger, info->pass) /* critical path opt */
631 		) {
632 			--count;
633 			continue;
634 		}
635 
636 		/*
637 		 * VX get the candidate vnode.  If the VX get fails the
638 		 * vnode might still be on the mountlist.  Our loop depends
639 		 * on us at least cycling the vnode to the end of the
640 		 * mountlist.
641 		 */
642 		if (vx_get_nonblock(vp) != 0) {
643 			--count;
644 			continue;
645 		}
646 
647 		/*
648 		 * Since we blocked locking the vp, make sure it is still
649 		 * a candidate for reclamation.  That is, it has not already
650 		 * been reclaimed and only has our VX reference associated
651 		 * with it.
652 		 */
653 		if (vp->v_type == VNON ||	/* syncer or indeterminant */
654 		    (vp->v_flag & VRECLAIMED) ||
655 		    vp->v_mount != mp ||
656 		    !vtrytomakegoneable(vp, trigger)	/* critical path opt */
657 		) {
658 			--count;
659 			vx_put(vp);
660 			continue;
661 		}
662 
663 		/*
664 		 * All right, we are good, move the vp to the end of the
665 		 * mountlist and clean it out.  The vget will have returned
666 		 * an error if the vnode was destroyed (VRECLAIMED set), so we
667 		 * do not have to check again.  The vput() will move the
668 		 * vnode to the free list if the vgone() was successful.
669 		 */
670 		KKASSERT(vp->v_mount == mp);
671 		vgone_vxlocked(vp);
672 		vx_put(vp);
673 		++done;
674 		--count;
675 	}
676 	lwkt_reltoken(&mntvnode_token);
677 	return (done);
678 }
679 
680 /*
681  * Attempt to recycle vnodes in a context that is always safe to block.
682  * Calling vlrurecycle() from the bowels of file system code has some
683  * interesting deadlock problems.
684  */
685 static struct thread *vnlruthread;
686 static int vnlruproc_sig;
687 
688 void
689 vnlru_proc_wait(void)
690 {
691 	tsleep_interlock(&vnlruproc_sig, 0);
692 	if (vnlruproc_sig == 0) {
693 		vnlruproc_sig = 1;      /* avoid unnecessary wakeups */
694 		wakeup(vnlruthread);
695 	}
696 	tsleep(&vnlruproc_sig, PINTERLOCKED, "vlruwk", hz);
697 }
698 
699 static void
700 vnlru_proc(void)
701 {
702 	struct thread *td = curthread;
703 	struct vnlru_info info;
704 	int done;
705 
706 	EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
707 			      SHUTDOWN_PRI_FIRST);
708 
709 	get_mplock();
710 	crit_enter();
711 
712 	for (;;) {
713 		kproc_suspend_loop();
714 
715 		/*
716 		 * Try to free some vnodes if we have too many
717 		 */
718 		if (numvnodes > desiredvnodes &&
719 		    freevnodes > desiredvnodes * 2 / 10) {
720 			int count = numvnodes - desiredvnodes;
721 
722 			if (count > freevnodes / 100)
723 				count = freevnodes / 100;
724 			if (count < 5)
725 				count = 5;
726 			freesomevnodes(count);
727 		}
728 
729 		/*
730 		 * Nothing to do if most of our vnodes are already on
731 		 * the free list.
732 		 */
733 		if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
734 			vnlruproc_sig = 0;
735 			wakeup(&vnlruproc_sig);
736 			tsleep(vnlruthread, 0, "vlruwt", hz);
737 			continue;
738 		}
739 		cache_hysteresis();
740 
741 		/*
742 		 * The pass iterates through the four combinations of
743 		 * VAGE0/VAGE1.  We want to get rid of aged small files
744 		 * first.
745 		 */
746 		info.pass = 0;
747 		done = 0;
748 		while (done == 0 && info.pass < 4) {
749 			done = mountlist_scan(vlrureclaim, &info,
750 					      MNTSCAN_FORWARD);
751 			++info.pass;
752 		}
753 
754 		/*
755 		 * The vlrureclaim() call only processes 1/10 of the vnodes
756 		 * on each mount.  If we couldn't find any repeat the loop
757 		 * at least enough times to cover all available vnodes before
758 		 * we start sleeping.  Complain if the failure extends past
759 		 * 30 second, every 30 seconds.
760 		 */
761 		if (done == 0) {
762 			++vnlru_nowhere;
763 			if (vnlru_nowhere % 10 == 0)
764 				tsleep(vnlruthread, 0, "vlrup", hz * 3);
765 			if (vnlru_nowhere % 100 == 0)
766 				kprintf("vnlru_proc: vnode recycler stopped working!\n");
767 			if (vnlru_nowhere == 1000)
768 				vnlru_nowhere = 900;
769 		} else {
770 			vnlru_nowhere = 0;
771 		}
772 	}
773 
774 	crit_exit();
775 	rel_mplock();
776 }
777 
778 /*
779  * MOUNTLIST FUNCTIONS
780  */
781 
782 /*
783  * mountlist_insert (MP SAFE)
784  *
785  * Add a new mount point to the mount list.
786  */
787 void
788 mountlist_insert(struct mount *mp, int how)
789 {
790 	lwkt_gettoken(&mountlist_token);
791 	if (how == MNTINS_FIRST)
792 	    TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
793 	else
794 	    TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
795 	lwkt_reltoken(&mountlist_token);
796 }
797 
798 /*
799  * mountlist_interlock (MP SAFE)
800  *
801  * Execute the specified interlock function with the mountlist token
802  * held.  The function will be called in a serialized fashion verses
803  * other functions called through this mechanism.
804  */
805 int
806 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp)
807 {
808 	int error;
809 
810 	lwkt_gettoken(&mountlist_token);
811 	error = callback(mp);
812 	lwkt_reltoken(&mountlist_token);
813 	return (error);
814 }
815 
816 /*
817  * mountlist_boot_getfirst (DURING BOOT ONLY)
818  *
819  * This function returns the first mount on the mountlist, which is
820  * expected to be the root mount.  Since no interlocks are obtained
821  * this function is only safe to use during booting.
822  */
823 
824 struct mount *
825 mountlist_boot_getfirst(void)
826 {
827 	return(TAILQ_FIRST(&mountlist));
828 }
829 
830 /*
831  * mountlist_remove (MP SAFE)
832  *
833  * Remove a node from the mountlist.  If this node is the next scan node
834  * for any active mountlist scans, the active mountlist scan will be
835  * adjusted to skip the node, thus allowing removals during mountlist
836  * scans.
837  */
838 void
839 mountlist_remove(struct mount *mp)
840 {
841 	struct mountscan_info *msi;
842 
843 	lwkt_gettoken(&mountlist_token);
844 	TAILQ_FOREACH(msi, &mountscan_list, msi_entry) {
845 		if (msi->msi_node == mp) {
846 			if (msi->msi_how & MNTSCAN_FORWARD)
847 				msi->msi_node = TAILQ_NEXT(mp, mnt_list);
848 			else
849 				msi->msi_node = TAILQ_PREV(mp, mntlist, mnt_list);
850 		}
851 	}
852 	TAILQ_REMOVE(&mountlist, mp, mnt_list);
853 	lwkt_reltoken(&mountlist_token);
854 }
855 
856 /*
857  * mountlist_exists (MP SAFE)
858  *
859  * Checks if a node exists in the mountlist.
860  * This function is mainly used by VFS accounting code to check if a
861  * cached nullfs struct mount pointer is still valid at use time
862  *
863  * FIXME: there is no warranty the mp passed to that function
864  * will be the same one used by VFS_ACCOUNT() later
865  */
866 int
867 mountlist_exists(struct mount *mp)
868 {
869 	int node_exists = 0;
870 	struct mount* lmp;
871 
872 	lwkt_gettoken(&mountlist_token);
873 	TAILQ_FOREACH(lmp, &mountlist, mnt_list) {
874 		if (lmp == mp) {
875 			node_exists = 1;
876 			break;
877 		}
878 	}
879 	lwkt_reltoken(&mountlist_token);
880 	return(node_exists);
881 }
882 
883 /*
884  * mountlist_scan (MP SAFE)
885  *
886  * Safely scan the mount points on the mount list.  Unless otherwise
887  * specified each mount point will be busied prior to the callback and
888  * unbusied afterwords.  The callback may safely remove any mount point
889  * without interfering with the scan.  If the current callback
890  * mount is removed the scanner will not attempt to unbusy it.
891  *
892  * If a mount node cannot be busied it is silently skipped.
893  *
894  * The callback return value is aggregated and a total is returned.  A return
895  * value of < 0 is not aggregated and will terminate the scan.
896  *
897  * MNTSCAN_FORWARD	- the mountlist is scanned in the forward direction
898  * MNTSCAN_REVERSE	- the mountlist is scanned in reverse
899  * MNTSCAN_NOBUSY	- the scanner will make the callback without busying
900  *			  the mount node.
901  */
902 int
903 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how)
904 {
905 	struct mountscan_info info;
906 	struct mount *mp;
907 	int count;
908 	int res;
909 
910 	lwkt_gettoken(&mountlist_token);
911 
912 	info.msi_how = how;
913 	info.msi_node = NULL;	/* paranoia */
914 	TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry);
915 
916 	res = 0;
917 
918 	if (how & MNTSCAN_FORWARD) {
919 		info.msi_node = TAILQ_FIRST(&mountlist);
920 		while ((mp = info.msi_node) != NULL) {
921 			if (how & MNTSCAN_NOBUSY) {
922 				count = callback(mp, data);
923 			} else if (vfs_busy(mp, LK_NOWAIT) == 0) {
924 				count = callback(mp, data);
925 				if (mp == info.msi_node)
926 					vfs_unbusy(mp);
927 			} else {
928 				count = 0;
929 			}
930 			if (count < 0)
931 				break;
932 			res += count;
933 			if (mp == info.msi_node)
934 				info.msi_node = TAILQ_NEXT(mp, mnt_list);
935 		}
936 	} else if (how & MNTSCAN_REVERSE) {
937 		info.msi_node = TAILQ_LAST(&mountlist, mntlist);
938 		while ((mp = info.msi_node) != NULL) {
939 			if (how & MNTSCAN_NOBUSY) {
940 				count = callback(mp, data);
941 			} else if (vfs_busy(mp, LK_NOWAIT) == 0) {
942 				count = callback(mp, data);
943 				if (mp == info.msi_node)
944 					vfs_unbusy(mp);
945 			} else {
946 				count = 0;
947 			}
948 			if (count < 0)
949 				break;
950 			res += count;
951 			if (mp == info.msi_node)
952 				info.msi_node = TAILQ_PREV(mp, mntlist, mnt_list);
953 		}
954 	}
955 	TAILQ_REMOVE(&mountscan_list, &info, msi_entry);
956 	lwkt_reltoken(&mountlist_token);
957 	return(res);
958 }
959 
960 /*
961  * MOUNT RELATED VNODE FUNCTIONS
962  */
963 
964 static struct kproc_desc vnlru_kp = {
965 	"vnlru",
966 	vnlru_proc,
967 	&vnlruthread
968 };
969 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
970 
971 /*
972  * Move a vnode from one mount queue to another.
973  *
974  * MPSAFE
975  */
976 void
977 insmntque(struct vnode *vp, struct mount *mp)
978 {
979 	lwkt_gettoken(&mntvnode_token);
980 	/*
981 	 * Delete from old mount point vnode list, if on one.
982 	 */
983 	if (vp->v_mount != NULL) {
984 		KASSERT(vp->v_mount->mnt_nvnodelistsize > 0,
985 			("bad mount point vnode list size"));
986 		vremovevnodemnt(vp);
987 		vp->v_mount->mnt_nvnodelistsize--;
988 	}
989 	/*
990 	 * Insert into list of vnodes for the new mount point, if available.
991 	 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer.
992 	 */
993 	if ((vp->v_mount = mp) == NULL) {
994 		lwkt_reltoken(&mntvnode_token);
995 		return;
996 	}
997 	if (mp->mnt_syncer) {
998 		TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes);
999 	} else {
1000 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1001 	}
1002 	mp->mnt_nvnodelistsize++;
1003 	lwkt_reltoken(&mntvnode_token);
1004 }
1005 
1006 
1007 /*
1008  * Scan the vnodes under a mount point and issue appropriate callbacks.
1009  *
1010  * The fastfunc() callback is called with just the mountlist token held
1011  * (no vnode lock).  It may not block and the vnode may be undergoing
1012  * modifications while the caller is processing it.  The vnode will
1013  * not be entirely destroyed, however, due to the fact that the mountlist
1014  * token is held.  A return value < 0 skips to the next vnode without calling
1015  * the slowfunc(), a return value > 0 terminates the loop.
1016  *
1017  * The slowfunc() callback is called after the vnode has been successfully
1018  * locked based on passed flags.  The vnode is skipped if it gets rearranged
1019  * or destroyed while blocking on the lock.  A non-zero return value from
1020  * the slow function terminates the loop.  The slow function is allowed to
1021  * arbitrarily block.  The scanning code guarentees consistency of operation
1022  * even if the slow function deletes or moves the node, or blocks and some
1023  * other thread deletes or moves the node.
1024  *
1025  * NOTE: We hold vmobj_token to prevent a VM object from being destroyed
1026  *	 out from under the fastfunc()'s vnode test.  It will not prevent
1027  *	 v_object from getting NULL'd out but it will ensure that the
1028  *	 pointer (if we race) will remain stable.
1029  */
1030 int
1031 vmntvnodescan(
1032     struct mount *mp,
1033     int flags,
1034     int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data),
1035     int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
1036     void *data
1037 ) {
1038 	struct vmntvnodescan_info info;
1039 	struct vnode *vp;
1040 	int r = 0;
1041 	int maxcount = mp->mnt_nvnodelistsize * 2;
1042 	int stopcount = 0;
1043 	int count = 0;
1044 
1045 	lwkt_gettoken(&mntvnode_token);
1046 	lwkt_gettoken(&vmobj_token);
1047 
1048 	/*
1049 	 * If asked to do one pass stop after iterating available vnodes.
1050 	 * Under heavy loads new vnodes can be added while we are scanning,
1051 	 * so this isn't perfect.  Create a slop factor of 2x.
1052 	 */
1053 	if (flags & VMSC_ONEPASS)
1054 		stopcount = mp->mnt_nvnodelistsize;
1055 
1056 	info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
1057 	TAILQ_INSERT_TAIL(&mntvnodescan_list, &info, entry);
1058 	while ((vp = info.vp) != NULL) {
1059 		if (--maxcount == 0) {
1060 			kprintf("Warning: excessive fssync iteration\n");
1061 			maxcount = mp->mnt_nvnodelistsize * 2;
1062 		}
1063 
1064 		/*
1065 		 * Skip if visible but not ready, or special (e.g.
1066 		 * mp->mnt_syncer)
1067 		 */
1068 		if (vp->v_type == VNON)
1069 			goto next;
1070 		KKASSERT(vp->v_mount == mp);
1071 
1072 		/*
1073 		 * Quick test.  A negative return continues the loop without
1074 		 * calling the slow test.  0 continues onto the slow test.
1075 		 * A positive number aborts the loop.
1076 		 */
1077 		if (fastfunc) {
1078 			if ((r = fastfunc(mp, vp, data)) < 0) {
1079 				r = 0;
1080 				goto next;
1081 			}
1082 			if (r)
1083 				break;
1084 		}
1085 
1086 		/*
1087 		 * Get a vxlock on the vnode, retry if it has moved or isn't
1088 		 * in the mountlist where we expect it.
1089 		 */
1090 		if (slowfunc) {
1091 			int error;
1092 
1093 			switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
1094 			case VMSC_GETVP:
1095 				error = vget(vp, LK_EXCLUSIVE);
1096 				break;
1097 			case VMSC_GETVP|VMSC_NOWAIT:
1098 				error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT);
1099 				break;
1100 			case VMSC_GETVX:
1101 				vx_get(vp);
1102 				error = 0;
1103 				break;
1104 			default:
1105 				error = 0;
1106 				break;
1107 			}
1108 			if (error)
1109 				goto next;
1110 			/*
1111 			 * Do not call the slow function if the vnode is
1112 			 * invalid or if it was ripped out from under us
1113 			 * while we (potentially) blocked.
1114 			 */
1115 			if (info.vp == vp && vp->v_type != VNON)
1116 				r = slowfunc(mp, vp, data);
1117 
1118 			/*
1119 			 * Cleanup
1120 			 */
1121 			switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
1122 			case VMSC_GETVP:
1123 			case VMSC_GETVP|VMSC_NOWAIT:
1124 				vput(vp);
1125 				break;
1126 			case VMSC_GETVX:
1127 				vx_put(vp);
1128 				break;
1129 			default:
1130 				break;
1131 			}
1132 			if (r != 0)
1133 				break;
1134 		}
1135 
1136 next:
1137 		/*
1138 		 * Yield after some processing.  Depending on the number
1139 		 * of vnodes, we might wind up running for a long time.
1140 		 * Because threads are not preemptable, time critical
1141 		 * userland processes might starve.  Give them a chance
1142 		 * now and then.
1143 		 */
1144 		if (++count == 10000) {
1145 			/* We really want to yield a bit, so we simply sleep a tick */
1146 			tsleep(mp, 0, "vnodescn", 1);
1147 			count = 0;
1148 		}
1149 
1150 		/*
1151 		 * If doing one pass this decrements to zero.  If it starts
1152 		 * at zero it is effectively unlimited for the purposes of
1153 		 * this loop.
1154 		 */
1155 		if (--stopcount == 0)
1156 			break;
1157 
1158 		/*
1159 		 * Iterate.  If the vnode was ripped out from under us
1160 		 * info.vp will already point to the next vnode, otherwise
1161 		 * we have to obtain the next valid vnode ourselves.
1162 		 */
1163 		if (info.vp == vp)
1164 			info.vp = TAILQ_NEXT(vp, v_nmntvnodes);
1165 	}
1166 	TAILQ_REMOVE(&mntvnodescan_list, &info, entry);
1167 	lwkt_reltoken(&vmobj_token);
1168 	lwkt_reltoken(&mntvnode_token);
1169 	return(r);
1170 }
1171 
1172 /*
1173  * Remove any vnodes in the vnode table belonging to mount point mp.
1174  *
1175  * If FORCECLOSE is not specified, there should not be any active ones,
1176  * return error if any are found (nb: this is a user error, not a
1177  * system error). If FORCECLOSE is specified, detach any active vnodes
1178  * that are found.
1179  *
1180  * If WRITECLOSE is set, only flush out regular file vnodes open for
1181  * writing.
1182  *
1183  * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped.
1184  *
1185  * `rootrefs' specifies the base reference count for the root vnode
1186  * of this filesystem. The root vnode is considered busy if its
1187  * v_sysref.refcnt exceeds this value. On a successful return, vflush()
1188  * will call vrele() on the root vnode exactly rootrefs times.
1189  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
1190  * be zero.
1191  */
1192 #ifdef DIAGNOSTIC
1193 static int busyprt = 0;		/* print out busy vnodes */
1194 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
1195 #endif
1196 
1197 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data);
1198 
1199 struct vflush_info {
1200 	int flags;
1201 	int busy;
1202 	thread_t td;
1203 };
1204 
1205 int
1206 vflush(struct mount *mp, int rootrefs, int flags)
1207 {
1208 	struct thread *td = curthread;	/* XXX */
1209 	struct vnode *rootvp = NULL;
1210 	int error;
1211 	struct vflush_info vflush_info;
1212 
1213 	if (rootrefs > 0) {
1214 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
1215 		    ("vflush: bad args"));
1216 		/*
1217 		 * Get the filesystem root vnode. We can vput() it
1218 		 * immediately, since with rootrefs > 0, it won't go away.
1219 		 */
1220 		if ((error = VFS_ROOT(mp, &rootvp)) != 0) {
1221 			if ((flags & FORCECLOSE) == 0)
1222 				return (error);
1223 			rootrefs = 0;
1224 			/* continue anyway */
1225 		}
1226 		if (rootrefs)
1227 			vput(rootvp);
1228 	}
1229 
1230 	vflush_info.busy = 0;
1231 	vflush_info.flags = flags;
1232 	vflush_info.td = td;
1233 	vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info);
1234 
1235 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
1236 		/*
1237 		 * If just the root vnode is busy, and if its refcount
1238 		 * is equal to `rootrefs', then go ahead and kill it.
1239 		 */
1240 		KASSERT(vflush_info.busy > 0, ("vflush: not busy"));
1241 		KASSERT(rootvp->v_sysref.refcnt >= rootrefs, ("vflush: rootrefs"));
1242 		if (vflush_info.busy == 1 && rootvp->v_sysref.refcnt == rootrefs) {
1243 			vx_lock(rootvp);
1244 			vgone_vxlocked(rootvp);
1245 			vx_unlock(rootvp);
1246 			vflush_info.busy = 0;
1247 		}
1248 	}
1249 	if (vflush_info.busy)
1250 		return (EBUSY);
1251 	for (; rootrefs > 0; rootrefs--)
1252 		vrele(rootvp);
1253 	return (0);
1254 }
1255 
1256 /*
1257  * The scan callback is made with an VX locked vnode.
1258  */
1259 static int
1260 vflush_scan(struct mount *mp, struct vnode *vp, void *data)
1261 {
1262 	struct vflush_info *info = data;
1263 	struct vattr vattr;
1264 
1265 	/*
1266 	 * Skip over a vnodes marked VSYSTEM.
1267 	 */
1268 	if ((info->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1269 		return(0);
1270 	}
1271 
1272 	/*
1273 	 * If WRITECLOSE is set, flush out unlinked but still open
1274 	 * files (even if open only for reading) and regular file
1275 	 * vnodes open for writing.
1276 	 */
1277 	if ((info->flags & WRITECLOSE) &&
1278 	    (vp->v_type == VNON ||
1279 	    (VOP_GETATTR(vp, &vattr) == 0 &&
1280 	    vattr.va_nlink > 0)) &&
1281 	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
1282 		return(0);
1283 	}
1284 
1285 	/*
1286 	 * If we are the only holder (refcnt of 1) or the vnode is in
1287 	 * termination (refcnt < 0), we can vgone the vnode.
1288 	 */
1289 	if (vp->v_sysref.refcnt <= 1) {
1290 		vgone_vxlocked(vp);
1291 		return(0);
1292 	}
1293 
1294 	/*
1295 	 * If FORCECLOSE is set, forcibly destroy the vnode and then move
1296 	 * it to a dummymount structure so vop_*() functions don't deref
1297 	 * a NULL pointer.
1298 	 */
1299 	if (info->flags & FORCECLOSE) {
1300 		vhold(vp);
1301 		vgone_vxlocked(vp);
1302 		if (vp->v_mount == NULL)
1303 			insmntque(vp, &dummymount);
1304 		vdrop(vp);
1305 		return(0);
1306 	}
1307 #ifdef DIAGNOSTIC
1308 	if (busyprt)
1309 		vprint("vflush: busy vnode", vp);
1310 #endif
1311 	++info->busy;
1312 	return(0);
1313 }
1314 
1315 void
1316 add_bio_ops(struct bio_ops *ops)
1317 {
1318 	TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry);
1319 }
1320 
1321 void
1322 rem_bio_ops(struct bio_ops *ops)
1323 {
1324 	TAILQ_REMOVE(&bio_ops_list, ops, entry);
1325 }
1326 
1327 /*
1328  * This calls the bio_ops io_sync function either for a mount point
1329  * or generally.
1330  *
1331  * WARNING: softdeps is weirdly coded and just isn't happy unless
1332  * io_sync is called with a NULL mount from the general syncing code.
1333  */
1334 void
1335 bio_ops_sync(struct mount *mp)
1336 {
1337 	struct bio_ops *ops;
1338 
1339 	if (mp) {
1340 		if ((ops = mp->mnt_bioops) != NULL)
1341 			ops->io_sync(mp);
1342 	} else {
1343 		TAILQ_FOREACH(ops, &bio_ops_list, entry) {
1344 			ops->io_sync(NULL);
1345 		}
1346 	}
1347 }
1348 
1349 /*
1350  * Lookup a mount point by nch
1351  */
1352 struct mount *
1353 mount_get_by_nc(struct namecache *ncp)
1354 {
1355 	struct mount *mp = NULL;
1356 
1357 	lwkt_gettoken(&mountlist_token);
1358 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1359 		if (ncp == mp->mnt_ncmountpt.ncp)
1360 			break;
1361 	}
1362 	lwkt_reltoken(&mountlist_token);
1363 	return (mp);
1364 }
1365 
1366