xref: /dflybsd-src/sys/kern/vfs_mount.c (revision ae788f37fe53d5d1ca1e12a184a662192caad3c5)
1 /*
2  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * Copyright (c) 1989, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  * (c) UNIX System Laboratories, Inc.
37  * All or some portions of this file are derived from material licensed
38  * to the University of California by American Telephone and Telegraph
39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40  * the permission of UNIX System Laboratories, Inc.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by the University of
53  *	California, Berkeley and its contributors.
54  * 4. Neither the name of the University nor the names of its contributors
55  *    may be used to endorse or promote products derived from this software
56  *    without specific prior written permission.
57  *
58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68  * SUCH DAMAGE.
69  *
70  * $DragonFly: src/sys/kern/vfs_mount.c,v 1.37 2008/09/17 21:44:18 dillon Exp $
71  */
72 
73 /*
74  * External virtual filesystem routines
75  */
76 #include "opt_ddb.h"
77 
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/kernel.h>
81 #include <sys/malloc.h>
82 #include <sys/mount.h>
83 #include <sys/proc.h>
84 #include <sys/vnode.h>
85 #include <sys/buf.h>
86 #include <sys/eventhandler.h>
87 #include <sys/kthread.h>
88 #include <sys/sysctl.h>
89 
90 #include <machine/limits.h>
91 
92 #include <sys/buf2.h>
93 #include <sys/thread2.h>
94 #include <sys/sysref2.h>
95 
96 #include <vm/vm.h>
97 #include <vm/vm_object.h>
98 
99 struct mountscan_info {
100 	TAILQ_ENTRY(mountscan_info) msi_entry;
101 	int msi_how;
102 	struct mount *msi_node;
103 };
104 
105 struct vmntvnodescan_info {
106 	TAILQ_ENTRY(vmntvnodescan_info) entry;
107 	struct vnode *vp;
108 };
109 
110 struct vnlru_info {
111 	int	pass;
112 };
113 
114 static int vnlru_nowhere = 0;
115 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD,
116 	    &vnlru_nowhere, 0,
117 	    "Number of times the vnlru process ran without success");
118 
119 
120 static struct lwkt_token mntid_token;
121 
122 /* note: mountlist exported to pstat */
123 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
124 static TAILQ_HEAD(,mountscan_info) mountscan_list;
125 static struct lwkt_token mountlist_token;
126 static TAILQ_HEAD(,vmntvnodescan_info) mntvnodescan_list;
127 struct lwkt_token mntvnode_token;
128 
129 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list);
130 
131 /*
132  * Called from vfsinit()
133  */
134 void
135 vfs_mount_init(void)
136 {
137 	lwkt_token_init(&mountlist_token);
138 	lwkt_token_init(&mntvnode_token);
139 	lwkt_token_init(&mntid_token);
140 	TAILQ_INIT(&mountscan_list);
141 	TAILQ_INIT(&mntvnodescan_list);
142 }
143 
144 /*
145  * Support function called with mntvnode_token held to remove a vnode
146  * from the mountlist.  We must update any list scans which are in progress.
147  */
148 static void
149 vremovevnodemnt(struct vnode *vp)
150 {
151         struct vmntvnodescan_info *info;
152 
153 	TAILQ_FOREACH(info, &mntvnodescan_list, entry) {
154 		if (info->vp == vp)
155 			info->vp = TAILQ_NEXT(vp, v_nmntvnodes);
156 	}
157 	TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
158 }
159 
160 /*
161  * Allocate a new vnode and associate it with a tag, mount point, and
162  * operations vector.
163  *
164  * A VX locked and refd vnode is returned.  The caller should setup the
165  * remaining fields and vx_put() or, if he wishes to leave a vref,
166  * vx_unlock() the vnode.
167  */
168 int
169 getnewvnode(enum vtagtype tag, struct mount *mp,
170 		struct vnode **vpp, int lktimeout, int lkflags)
171 {
172 	struct vnode *vp;
173 
174 	KKASSERT(mp != NULL);
175 
176 	vp = allocvnode(lktimeout, lkflags);
177 	vp->v_tag = tag;
178 	vp->v_data = NULL;
179 
180 	/*
181 	 * By default the vnode is assigned the mount point's normal
182 	 * operations vector.
183 	 */
184 	vp->v_ops = &mp->mnt_vn_use_ops;
185 
186 	/*
187 	 * Placing the vnode on the mount point's queue makes it visible.
188 	 * VNON prevents it from being messed with, however.
189 	 */
190 	insmntque(vp, mp);
191 
192 	/*
193 	 * A VX locked & refd vnode is returned.
194 	 */
195 	*vpp = vp;
196 	return (0);
197 }
198 
199 /*
200  * This function creates vnodes with special operations vectors.  The
201  * mount point is optional.
202  *
203  * This routine is being phased out.
204  */
205 int
206 getspecialvnode(enum vtagtype tag, struct mount *mp,
207 		struct vop_ops **ops,
208 		struct vnode **vpp, int lktimeout, int lkflags)
209 {
210 	struct vnode *vp;
211 
212 	vp = allocvnode(lktimeout, lkflags);
213 	vp->v_tag = tag;
214 	vp->v_data = NULL;
215 	vp->v_ops = ops;
216 
217 	/*
218 	 * Placing the vnode on the mount point's queue makes it visible.
219 	 * VNON prevents it from being messed with, however.
220 	 */
221 	insmntque(vp, mp);
222 
223 	/*
224 	 * A VX locked & refd vnode is returned.
225 	 */
226 	*vpp = vp;
227 	return (0);
228 }
229 
230 /*
231  * Interlock against an unmount, return 0 on success, non-zero on failure.
232  *
233  * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount
234  * is in-progress.
235  *
236  * If no unmount is in-progress LK_NOWAIT is ignored.  No other flag bits
237  * are used.  A shared locked will be obtained and the filesystem will not
238  * be unmountable until the lock is released.
239  */
240 int
241 vfs_busy(struct mount *mp, int flags)
242 {
243 	int lkflags;
244 
245 	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
246 		if (flags & LK_NOWAIT)
247 			return (ENOENT);
248 		/* XXX not MP safe */
249 		mp->mnt_kern_flag |= MNTK_MWAIT;
250 		/*
251 		 * Since all busy locks are shared except the exclusive
252 		 * lock granted when unmounting, the only place that a
253 		 * wakeup needs to be done is at the release of the
254 		 * exclusive lock at the end of dounmount.
255 		 */
256 		tsleep((caddr_t)mp, 0, "vfs_busy", 0);
257 		return (ENOENT);
258 	}
259 	lkflags = LK_SHARED;
260 	if (lockmgr(&mp->mnt_lock, lkflags))
261 		panic("vfs_busy: unexpected lock failure");
262 	return (0);
263 }
264 
265 /*
266  * Free a busy filesystem.
267  */
268 void
269 vfs_unbusy(struct mount *mp)
270 {
271 	lockmgr(&mp->mnt_lock, LK_RELEASE);
272 }
273 
274 /*
275  * Lookup a filesystem type, and if found allocate and initialize
276  * a mount structure for it.
277  *
278  * Devname is usually updated by mount(8) after booting.
279  */
280 int
281 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
282 {
283 	struct vfsconf *vfsp;
284 	struct mount *mp;
285 
286 	if (fstypename == NULL)
287 		return (ENODEV);
288 
289 	vfsp = vfsconf_find_by_name(fstypename);
290 	if (vfsp == NULL)
291 		return (ENODEV);
292 	mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
293 	lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0);
294 	vfs_busy(mp, LK_NOWAIT);
295 	TAILQ_INIT(&mp->mnt_nvnodelist);
296 	TAILQ_INIT(&mp->mnt_reservedvnlist);
297 	TAILQ_INIT(&mp->mnt_jlist);
298 	mp->mnt_nvnodelistsize = 0;
299 	mp->mnt_vfc = vfsp;
300 	mp->mnt_op = vfsp->vfc_vfsops;
301 	mp->mnt_flag = MNT_RDONLY;
302 	vfsp->vfc_refcount++;
303 	mp->mnt_iosize_max = DFLTPHYS;
304 	mp->mnt_stat.f_type = vfsp->vfc_typenum;
305 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
306 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
307 	copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
308 	*mpp = mp;
309 	return (0);
310 }
311 
312 /*
313  * Lookup a mount point by filesystem identifier.
314  */
315 struct mount *
316 vfs_getvfs(fsid_t *fsid)
317 {
318 	struct mount *mp;
319 	lwkt_tokref ilock;
320 
321 	lwkt_gettoken(&ilock, &mountlist_token);
322 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
323 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
324 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
325 			break;
326 		}
327 	}
328 	lwkt_reltoken(&ilock);
329 	return (mp);
330 }
331 
332 /*
333  * Get a new unique fsid.  Try to make its val[0] unique, since this value
334  * will be used to create fake device numbers for stat().  Also try (but
335  * not so hard) make its val[0] unique mod 2^16, since some emulators only
336  * support 16-bit device numbers.  We end up with unique val[0]'s for the
337  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
338  *
339  * Keep in mind that several mounts may be running in parallel.  Starting
340  * the search one past where the previous search terminated is both a
341  * micro-optimization and a defense against returning the same fsid to
342  * different mounts.
343  */
344 void
345 vfs_getnewfsid(struct mount *mp)
346 {
347 	static u_int16_t mntid_base;
348 	lwkt_tokref ilock;
349 	fsid_t tfsid;
350 	int mtype;
351 
352 	lwkt_gettoken(&ilock, &mntid_token);
353 	mtype = mp->mnt_vfc->vfc_typenum;
354 	tfsid.val[1] = mtype;
355 	mtype = (mtype & 0xFF) << 24;
356 	for (;;) {
357 		tfsid.val[0] = makeudev(255,
358 		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
359 		mntid_base++;
360 		if (vfs_getvfs(&tfsid) == NULL)
361 			break;
362 	}
363 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
364 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
365 	lwkt_reltoken(&ilock);
366 }
367 
368 /*
369  * Set the FSID for a new mount point to the template.  Adjust
370  * the FSID to avoid collisions.
371  */
372 int
373 vfs_setfsid(struct mount *mp, fsid_t *template)
374 {
375 	int didmunge = 0;
376 
377 	bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid));
378 	for (;;) {
379 		if (vfs_getvfs(template) == NULL)
380 			break;
381 		didmunge = 1;
382 		++template->val[1];
383 	}
384 	mp->mnt_stat.f_fsid = *template;
385 	return(didmunge);
386 }
387 
388 /*
389  * This routine is called when we have too many vnodes.  It attempts
390  * to free <count> vnodes and will potentially free vnodes that still
391  * have VM backing store (VM backing store is typically the cause
392  * of a vnode blowout so we want to do this).  Therefore, this operation
393  * is not considered cheap.
394  *
395  * A number of conditions may prevent a vnode from being reclaimed.
396  * the buffer cache may have references on the vnode, a directory
397  * vnode may still have references due to the namei cache representing
398  * underlying files, or the vnode may be in active use.   It is not
399  * desireable to reuse such vnodes.  These conditions may cause the
400  * number of vnodes to reach some minimum value regardless of what
401  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
402  */
403 
404 /*
405  * This is a quick non-blocking check to determine if the vnode is a good
406  * candidate for being (eventually) vgone()'d.  Returns 0 if the vnode is
407  * not a good candidate, 1 if it is.
408  */
409 static __inline int
410 vmightfree(struct vnode *vp, int page_count, int pass)
411 {
412 	if (vp->v_flag & VRECLAIMED)
413 		return (0);
414 #if 0
415 	if ((vp->v_flag & VFREE) && TAILQ_EMPTY(&vp->v_namecache))
416 		return (0);
417 #endif
418 	if (sysref_isactive(&vp->v_sysref))
419 		return (0);
420 	if (vp->v_object && vp->v_object->resident_page_count >= page_count)
421 		return (0);
422 
423 	/*
424 	 * XXX horrible hack.  Up to four passes will be taken.  Each pass
425 	 * makes a larger set of vnodes eligible.  For now what this really
426 	 * means is that we try to recycle files opened only once before
427 	 * recycling files opened multiple times.
428 	 */
429 	switch(vp->v_flag & (VAGE0 | VAGE1)) {
430 	case 0:
431 		if (pass < 3)
432 			return(0);
433 		break;
434 	case VAGE0:
435 		if (pass < 2)
436 			return(0);
437 		break;
438 	case VAGE1:
439 		if (pass < 1)
440 			return(0);
441 		break;
442 	case VAGE0 | VAGE1:
443 		break;
444 	}
445 	return (1);
446 }
447 
448 /*
449  * The vnode was found to be possibly vgone()able and the caller has locked it
450  * (thus the usecount should be 1 now).  Determine if the vnode is actually
451  * vgone()able, doing some cleanups in the process.  Returns 1 if the vnode
452  * can be vgone()'d, 0 otherwise.
453  *
454  * Note that v_auxrefs may be non-zero because (A) this vnode is not a leaf
455  * in the namecache topology and (B) this vnode has buffer cache bufs.
456  * We cannot remove vnodes with non-leaf namecache associations.  We do a
457  * tentitive leaf check prior to attempting to flush out any buffers but the
458  * 'real' test when all is said in done is that v_auxrefs must become 0 for
459  * the vnode to be freeable.
460  *
461  * We could theoretically just unconditionally flush when v_auxrefs != 0,
462  * but flushing data associated with non-leaf nodes (which are always
463  * directories), just throws it away for no benefit.  It is the buffer
464  * cache's responsibility to choose buffers to recycle from the cached
465  * data point of view.
466  */
467 static int
468 visleaf(struct vnode *vp)
469 {
470 	struct namecache *ncp;
471 
472 	TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
473 		if (!TAILQ_EMPTY(&ncp->nc_list))
474 			return(0);
475 	}
476 	return(1);
477 }
478 
479 /*
480  * Try to clean up the vnode to the point where it can be vgone()'d, returning
481  * 0 if it cannot be vgone()'d (or already has been), 1 if it can.  Unlike
482  * vmightfree() this routine may flush the vnode and block.  Vnodes marked
483  * VFREE are still candidates for vgone()ing because they may hold namecache
484  * resources and could be blocking the namecache directory hierarchy (and
485  * related vnodes) from being freed.
486  */
487 static int
488 vtrytomakegoneable(struct vnode *vp, int page_count)
489 {
490 	if (vp->v_flag & VRECLAIMED)
491 		return (0);
492 	if (vp->v_sysref.refcnt > 1)
493 		return (0);
494 	if (vp->v_object && vp->v_object->resident_page_count >= page_count)
495 		return (0);
496 	if (vp->v_auxrefs && visleaf(vp)) {
497 		vinvalbuf(vp, V_SAVE, 0, 0);
498 #if 0	/* DEBUG */
499 		kprintf((vp->v_auxrefs ? "vrecycle: vp %p failed: %s\n" :
500 			"vrecycle: vp %p succeeded: %s\n"), vp,
501 			(TAILQ_FIRST(&vp->v_namecache) ?
502 			    TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"));
503 #endif
504 	}
505 
506 	/*
507 	 * This sequence may seem a little strange, but we need to optimize
508 	 * the critical path a bit.  We can't recycle vnodes with other
509 	 * references and because we are trying to recycle an otherwise
510 	 * perfectly fine vnode we have to invalidate the namecache in a
511 	 * way that avoids possible deadlocks (since the vnode lock is being
512 	 * held here).  Finally, we have to check for other references one
513 	 * last time in case something snuck in during the inval.
514 	 */
515 	if (vp->v_sysref.refcnt > 1 || vp->v_auxrefs != 0)
516 		return (0);
517 	if (cache_inval_vp_nonblock(vp))
518 		return (0);
519 	return (vp->v_sysref.refcnt <= 1 && vp->v_auxrefs == 0);
520 }
521 
522 /*
523  * Reclaim up to 1/10 of the vnodes associated with a mount point.  Try
524  * to avoid vnodes which have lots of resident pages (we are trying to free
525  * vnodes, not memory).
526  *
527  * This routine is a callback from the mountlist scan.  The mount point
528  * in question will be busied.
529  *
530  * NOTE: The 1/10 reclamation also ensures that the inactive data set
531  *	 (the vnodes being recycled by the one-time use) does not degenerate
532  *	 into too-small a set.  This is important because once a vnode is
533  *	 marked as not being one-time-use (VAGE0/VAGE1 both 0) that vnode
534  *	 will not be destroyed EXCEPT by this mechanism.  VM pages can still
535  *	 be cleaned/freed by the pageout daemon.
536  */
537 static int
538 vlrureclaim(struct mount *mp, void *data)
539 {
540 	struct vnlru_info *info = data;
541 	struct vnode *vp;
542 	lwkt_tokref ilock;
543 	int done;
544 	int trigger;
545 	int usevnodes;
546 	int count;
547 	int trigger_mult = vnlru_nowhere;
548 
549 	/*
550 	 * Calculate the trigger point for the resident pages check.  The
551 	 * minimum trigger value is approximately the number of pages in
552 	 * the system divded by the number of vnodes.  However, due to
553 	 * various other system memory overheads unrelated to data caching
554 	 * it is a good idea to double the trigger (at least).
555 	 *
556 	 * trigger_mult starts at 0.  If the recycler is having problems
557 	 * finding enough freeable vnodes it will increase trigger_mult.
558 	 * This should not happen in normal operation, even on machines with
559 	 * low amounts of memory, but extraordinary memory use by the system
560 	 * verses the amount of cached data can trigger it.
561 	 */
562 	usevnodes = desiredvnodes;
563 	if (usevnodes <= 0)
564 		usevnodes = 1;
565 	trigger = vmstats.v_page_count * (trigger_mult + 2) / usevnodes;
566 
567 	done = 0;
568 	lwkt_gettoken(&ilock, &mntvnode_token);
569 	count = mp->mnt_nvnodelistsize / 10 + 1;
570 
571 	while (count && mp->mnt_syncer) {
572 		/*
573 		 * Next vnode.  Use the special syncer vnode to placemark
574 		 * the LRU.  This way the LRU code does not interfere with
575 		 * vmntvnodescan().
576 		 */
577 		vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes);
578 		TAILQ_REMOVE(&mp->mnt_nvnodelist, mp->mnt_syncer, v_nmntvnodes);
579 		if (vp) {
580 			TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp,
581 					   mp->mnt_syncer, v_nmntvnodes);
582 		} else {
583 			TAILQ_INSERT_HEAD(&mp->mnt_nvnodelist, mp->mnt_syncer,
584 					  v_nmntvnodes);
585 			vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes);
586 			if (vp == NULL)
587 				break;
588 		}
589 
590 		/*
591 		 * __VNODESCAN__
592 		 *
593 		 * The VP will stick around while we hold mntvnode_token,
594 		 * at least until we block, so we can safely do an initial
595 		 * check, and then must check again after we lock the vnode.
596 		 */
597 		if (vp->v_type == VNON ||	/* syncer or indeterminant */
598 		    !vmightfree(vp, trigger, info->pass) /* critical path opt */
599 		) {
600 			--count;
601 			continue;
602 		}
603 
604 		/*
605 		 * VX get the candidate vnode.  If the VX get fails the
606 		 * vnode might still be on the mountlist.  Our loop depends
607 		 * on us at least cycling the vnode to the end of the
608 		 * mountlist.
609 		 */
610 		if (vx_get_nonblock(vp) != 0) {
611 			--count;
612 			continue;
613 		}
614 
615 		/*
616 		 * Since we blocked locking the vp, make sure it is still
617 		 * a candidate for reclamation.  That is, it has not already
618 		 * been reclaimed and only has our VX reference associated
619 		 * with it.
620 		 */
621 		if (vp->v_type == VNON ||	/* syncer or indeterminant */
622 		    (vp->v_flag & VRECLAIMED) ||
623 		    vp->v_mount != mp ||
624 		    !vtrytomakegoneable(vp, trigger)	/* critical path opt */
625 		) {
626 			--count;
627 			vx_put(vp);
628 			continue;
629 		}
630 
631 		/*
632 		 * All right, we are good, move the vp to the end of the
633 		 * mountlist and clean it out.  The vget will have returned
634 		 * an error if the vnode was destroyed (VRECLAIMED set), so we
635 		 * do not have to check again.  The vput() will move the
636 		 * vnode to the free list if the vgone() was successful.
637 		 */
638 		KKASSERT(vp->v_mount == mp);
639 		vgone_vxlocked(vp);
640 		vx_put(vp);
641 		++done;
642 		--count;
643 	}
644 	lwkt_reltoken(&ilock);
645 	return (done);
646 }
647 
648 /*
649  * Attempt to recycle vnodes in a context that is always safe to block.
650  * Calling vlrurecycle() from the bowels of file system code has some
651  * interesting deadlock problems.
652  */
653 static struct thread *vnlruthread;
654 static int vnlruproc_sig;
655 
656 void
657 vnlru_proc_wait(void)
658 {
659 	if (vnlruproc_sig == 0) {
660 		vnlruproc_sig = 1;      /* avoid unnecessary wakeups */
661 		wakeup(vnlruthread);
662 	}
663 	tsleep(&vnlruproc_sig, 0, "vlruwk", hz);
664 }
665 
666 static void
667 vnlru_proc(void)
668 {
669 	struct thread *td = curthread;
670 	struct vnlru_info info;
671 	int done;
672 
673 	EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
674 	    SHUTDOWN_PRI_FIRST);
675 
676 	crit_enter();
677 	for (;;) {
678 		kproc_suspend_loop();
679 
680 		/*
681 		 * Try to free some vnodes if we have too many
682 		 */
683 		if (numvnodes > desiredvnodes &&
684 		    freevnodes > desiredvnodes * 2 / 10) {
685 			int count = numvnodes - desiredvnodes;
686 
687 			if (count > freevnodes / 100)
688 				count = freevnodes / 100;
689 			if (count < 5)
690 				count = 5;
691 			freesomevnodes(count);
692 		}
693 
694 		/*
695 		 * Nothing to do if most of our vnodes are already on
696 		 * the free list.
697 		 */
698 		if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
699 			vnlruproc_sig = 0;
700 			wakeup(&vnlruproc_sig);
701 			tsleep(td, 0, "vlruwt", hz);
702 			continue;
703 		}
704 		cache_cleanneg(0);
705 
706 		/*
707 		 * The pass iterates through the four combinations of
708 		 * VAGE0/VAGE1.  We want to get rid of aged small files
709 		 * first.
710 		 */
711 		info.pass = 0;
712 		done = 0;
713 		while (done == 0 && info.pass < 4) {
714 			done = mountlist_scan(vlrureclaim, &info,
715 					      MNTSCAN_FORWARD);
716 			++info.pass;
717 		}
718 
719 		/*
720 		 * The vlrureclaim() call only processes 1/10 of the vnodes
721 		 * on each mount.  If we couldn't find any repeat the loop
722 		 * at least enough times to cover all available vnodes before
723 		 * we start sleeping.  Complain if the failure extends past
724 		 * 30 second, every 30 seconds.
725 		 */
726 		if (done == 0) {
727 			++vnlru_nowhere;
728 			if (vnlru_nowhere % 10 == 0)
729 				tsleep(td, 0, "vlrup", hz * 3);
730 			if (vnlru_nowhere % 100 == 0)
731 				kprintf("vnlru_proc: vnode recycler stopped working!\n");
732 			if (vnlru_nowhere == 1000)
733 				vnlru_nowhere = 900;
734 		} else {
735 			vnlru_nowhere = 0;
736 		}
737 	}
738 	crit_exit();
739 }
740 
741 /*
742  * MOUNTLIST FUNCTIONS
743  */
744 
745 /*
746  * mountlist_insert (MP SAFE)
747  *
748  * Add a new mount point to the mount list.
749  */
750 void
751 mountlist_insert(struct mount *mp, int how)
752 {
753 	lwkt_tokref ilock;
754 
755 	lwkt_gettoken(&ilock, &mountlist_token);
756 	if (how == MNTINS_FIRST)
757 	    TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
758 	else
759 	    TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
760 	lwkt_reltoken(&ilock);
761 }
762 
763 /*
764  * mountlist_interlock (MP SAFE)
765  *
766  * Execute the specified interlock function with the mountlist token
767  * held.  The function will be called in a serialized fashion verses
768  * other functions called through this mechanism.
769  */
770 int
771 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp)
772 {
773 	lwkt_tokref ilock;
774 	int error;
775 
776 	lwkt_gettoken(&ilock, &mountlist_token);
777 	error = callback(mp);
778 	lwkt_reltoken(&ilock);
779 	return (error);
780 }
781 
782 /*
783  * mountlist_boot_getfirst (DURING BOOT ONLY)
784  *
785  * This function returns the first mount on the mountlist, which is
786  * expected to be the root mount.  Since no interlocks are obtained
787  * this function is only safe to use during booting.
788  */
789 
790 struct mount *
791 mountlist_boot_getfirst(void)
792 {
793 	return(TAILQ_FIRST(&mountlist));
794 }
795 
796 /*
797  * mountlist_remove (MP SAFE)
798  *
799  * Remove a node from the mountlist.  If this node is the next scan node
800  * for any active mountlist scans, the active mountlist scan will be
801  * adjusted to skip the node, thus allowing removals during mountlist
802  * scans.
803  */
804 void
805 mountlist_remove(struct mount *mp)
806 {
807 	struct mountscan_info *msi;
808 	lwkt_tokref ilock;
809 
810 	lwkt_gettoken(&ilock, &mountlist_token);
811 	TAILQ_FOREACH(msi, &mountscan_list, msi_entry) {
812 		if (msi->msi_node == mp) {
813 			if (msi->msi_how & MNTSCAN_FORWARD)
814 				msi->msi_node = TAILQ_NEXT(mp, mnt_list);
815 			else
816 				msi->msi_node = TAILQ_PREV(mp, mntlist, mnt_list);
817 		}
818 	}
819 	TAILQ_REMOVE(&mountlist, mp, mnt_list);
820 	lwkt_reltoken(&ilock);
821 }
822 
823 /*
824  * mountlist_scan (MP SAFE)
825  *
826  * Safely scan the mount points on the mount list.  Unless otherwise
827  * specified each mount point will be busied prior to the callback and
828  * unbusied afterwords.  The callback may safely remove any mount point
829  * without interfering with the scan.  If the current callback
830  * mount is removed the scanner will not attempt to unbusy it.
831  *
832  * If a mount node cannot be busied it is silently skipped.
833  *
834  * The callback return value is aggregated and a total is returned.  A return
835  * value of < 0 is not aggregated and will terminate the scan.
836  *
837  * MNTSCAN_FORWARD	- the mountlist is scanned in the forward direction
838  * MNTSCAN_REVERSE	- the mountlist is scanned in reverse
839  * MNTSCAN_NOBUSY	- the scanner will make the callback without busying
840  *			  the mount node.
841  */
842 int
843 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how)
844 {
845 	struct mountscan_info info;
846 	lwkt_tokref ilock;
847 	struct mount *mp;
848 	thread_t td;
849 	int count;
850 	int res;
851 
852 	lwkt_gettoken(&ilock, &mountlist_token);
853 
854 	info.msi_how = how;
855 	info.msi_node = NULL;	/* paranoia */
856 	TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry);
857 
858 	res = 0;
859 	td = curthread;
860 
861 	if (how & MNTSCAN_FORWARD) {
862 		info.msi_node = TAILQ_FIRST(&mountlist);
863 		while ((mp = info.msi_node) != NULL) {
864 			if (how & MNTSCAN_NOBUSY) {
865 				count = callback(mp, data);
866 			} else if (vfs_busy(mp, LK_NOWAIT) == 0) {
867 				count = callback(mp, data);
868 				if (mp == info.msi_node)
869 					vfs_unbusy(mp);
870 			} else {
871 				count = 0;
872 			}
873 			if (count < 0)
874 				break;
875 			res += count;
876 			if (mp == info.msi_node)
877 				info.msi_node = TAILQ_NEXT(mp, mnt_list);
878 		}
879 	} else if (how & MNTSCAN_REVERSE) {
880 		info.msi_node = TAILQ_LAST(&mountlist, mntlist);
881 		while ((mp = info.msi_node) != NULL) {
882 			if (how & MNTSCAN_NOBUSY) {
883 				count = callback(mp, data);
884 			} else if (vfs_busy(mp, LK_NOWAIT) == 0) {
885 				count = callback(mp, data);
886 				if (mp == info.msi_node)
887 					vfs_unbusy(mp);
888 			} else {
889 				count = 0;
890 			}
891 			if (count < 0)
892 				break;
893 			res += count;
894 			if (mp == info.msi_node)
895 				info.msi_node = TAILQ_PREV(mp, mntlist, mnt_list);
896 		}
897 	}
898 	TAILQ_REMOVE(&mountscan_list, &info, msi_entry);
899 	lwkt_reltoken(&ilock);
900 	return(res);
901 }
902 
903 /*
904  * MOUNT RELATED VNODE FUNCTIONS
905  */
906 
907 static struct kproc_desc vnlru_kp = {
908 	"vnlru",
909 	vnlru_proc,
910 	&vnlruthread
911 };
912 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
913 
914 /*
915  * Move a vnode from one mount queue to another.
916  */
917 void
918 insmntque(struct vnode *vp, struct mount *mp)
919 {
920 	lwkt_tokref ilock;
921 
922 	lwkt_gettoken(&ilock, &mntvnode_token);
923 	/*
924 	 * Delete from old mount point vnode list, if on one.
925 	 */
926 	if (vp->v_mount != NULL) {
927 		KASSERT(vp->v_mount->mnt_nvnodelistsize > 0,
928 			("bad mount point vnode list size"));
929 		vremovevnodemnt(vp);
930 		vp->v_mount->mnt_nvnodelistsize--;
931 	}
932 	/*
933 	 * Insert into list of vnodes for the new mount point, if available.
934 	 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer.
935 	 */
936 	if ((vp->v_mount = mp) == NULL) {
937 		lwkt_reltoken(&ilock);
938 		return;
939 	}
940 	if (mp->mnt_syncer) {
941 		TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes);
942 	} else {
943 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
944 	}
945 	mp->mnt_nvnodelistsize++;
946 	lwkt_reltoken(&ilock);
947 }
948 
949 
950 /*
951  * Scan the vnodes under a mount point and issue appropriate callbacks.
952  *
953  * The fastfunc() callback is called with just the mountlist token held
954  * (no vnode lock).  It may not block and the vnode may be undergoing
955  * modifications while the caller is processing it.  The vnode will
956  * not be entirely destroyed, however, due to the fact that the mountlist
957  * token is held.  A return value < 0 skips to the next vnode without calling
958  * the slowfunc(), a return value > 0 terminates the loop.
959  *
960  * The slowfunc() callback is called after the vnode has been successfully
961  * locked based on passed flags.  The vnode is skipped if it gets rearranged
962  * or destroyed while blocking on the lock.  A non-zero return value from
963  * the slow function terminates the loop.  The slow function is allowed to
964  * arbitrarily block.  The scanning code guarentees consistency of operation
965  * even if the slow function deletes or moves the node, or blocks and some
966  * other thread deletes or moves the node.
967  */
968 int
969 vmntvnodescan(
970     struct mount *mp,
971     int flags,
972     int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data),
973     int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
974     void *data
975 ) {
976 	struct vmntvnodescan_info info;
977 	lwkt_tokref ilock;
978 	struct vnode *vp;
979 	int r = 0;
980 	int maxcount = 1000000;
981 	int stopcount = 0;
982 	int count = 0;
983 
984 	lwkt_gettoken(&ilock, &mntvnode_token);
985 
986 	/*
987 	 * If asked to do one pass stop after iterating available vnodes.
988 	 * Under heavy loads new vnodes can be added while we are scanning,
989 	 * so this isn't perfect.  Create a slop factor of 2x.
990 	 */
991 	if (flags & VMSC_ONEPASS)
992 		stopcount = mp->mnt_nvnodelistsize * 2;
993 
994 	info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
995 	TAILQ_INSERT_TAIL(&mntvnodescan_list, &info, entry);
996 	while ((vp = info.vp) != NULL) {
997 		if (--maxcount == 0)
998 			panic("maxcount reached during vmntvnodescan");
999 
1000 		/*
1001 		 * Skip if visible but not ready, or special (e.g.
1002 		 * mp->mnt_syncer)
1003 		 */
1004 		if (vp->v_type == VNON)
1005 			goto next;
1006 		KKASSERT(vp->v_mount == mp);
1007 
1008 		/*
1009 		 * Quick test.  A negative return continues the loop without
1010 		 * calling the slow test.  0 continues onto the slow test.
1011 		 * A positive number aborts the loop.
1012 		 */
1013 		if (fastfunc) {
1014 			if ((r = fastfunc(mp, vp, data)) < 0) {
1015 				r = 0;
1016 				goto next;
1017 			}
1018 			if (r)
1019 				break;
1020 		}
1021 
1022 		/*
1023 		 * Get a vxlock on the vnode, retry if it has moved or isn't
1024 		 * in the mountlist where we expect it.
1025 		 */
1026 		if (slowfunc) {
1027 			int error;
1028 
1029 			switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
1030 			case VMSC_GETVP:
1031 				error = vget(vp, LK_EXCLUSIVE);
1032 				break;
1033 			case VMSC_GETVP|VMSC_NOWAIT:
1034 				error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT);
1035 				break;
1036 			case VMSC_GETVX:
1037 				vx_get(vp);
1038 				error = 0;
1039 				break;
1040 			default:
1041 				error = 0;
1042 				break;
1043 			}
1044 			if (error)
1045 				goto next;
1046 			/*
1047 			 * Do not call the slow function if the vnode is
1048 			 * invalid or if it was ripped out from under us
1049 			 * while we (potentially) blocked.
1050 			 */
1051 			if (info.vp == vp && vp->v_type != VNON)
1052 				r = slowfunc(mp, vp, data);
1053 
1054 			/*
1055 			 * Cleanup
1056 			 */
1057 			switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
1058 			case VMSC_GETVP:
1059 			case VMSC_GETVP|VMSC_NOWAIT:
1060 				vput(vp);
1061 				break;
1062 			case VMSC_GETVX:
1063 				vx_put(vp);
1064 				break;
1065 			default:
1066 				break;
1067 			}
1068 			if (r != 0)
1069 				break;
1070 		}
1071 
1072 next:
1073 		/*
1074 		 * Yield after some processing.  Depending on the number
1075 		 * of vnodes, we might wind up running for a long time.
1076 		 * Because threads are not preemptable, time critical
1077 		 * userland processes might starve.  Give them a chance
1078 		 * now and then.
1079 		 */
1080 		if (++count == 10000) {
1081 			/* We really want to yield a bit, so we simply sleep a tick */
1082 			tsleep(mp, 0, "vnodescn", 1);
1083 			count = 0;
1084 		}
1085 
1086 		/*
1087 		 * If doing one pass this decrements to zero.  If it starts
1088 		 * at zero it is effectively unlimited for the purposes of
1089 		 * this loop.
1090 		 */
1091 		if (--stopcount == 0)
1092 			break;
1093 
1094 		/*
1095 		 * Iterate.  If the vnode was ripped out from under us
1096 		 * info.vp will already point to the next vnode, otherwise
1097 		 * we have to obtain the next valid vnode ourselves.
1098 		 */
1099 		if (info.vp == vp)
1100 			info.vp = TAILQ_NEXT(vp, v_nmntvnodes);
1101 	}
1102 	TAILQ_REMOVE(&mntvnodescan_list, &info, entry);
1103 	lwkt_reltoken(&ilock);
1104 	return(r);
1105 }
1106 
1107 /*
1108  * Remove any vnodes in the vnode table belonging to mount point mp.
1109  *
1110  * If FORCECLOSE is not specified, there should not be any active ones,
1111  * return error if any are found (nb: this is a user error, not a
1112  * system error). If FORCECLOSE is specified, detach any active vnodes
1113  * that are found.
1114  *
1115  * If WRITECLOSE is set, only flush out regular file vnodes open for
1116  * writing.
1117  *
1118  * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped.
1119  *
1120  * `rootrefs' specifies the base reference count for the root vnode
1121  * of this filesystem. The root vnode is considered busy if its
1122  * v_sysref.refcnt exceeds this value. On a successful return, vflush()
1123  * will call vrele() on the root vnode exactly rootrefs times.
1124  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
1125  * be zero.
1126  */
1127 #ifdef DIAGNOSTIC
1128 static int busyprt = 0;		/* print out busy vnodes */
1129 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
1130 #endif
1131 
1132 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data);
1133 
1134 struct vflush_info {
1135 	int flags;
1136 	int busy;
1137 	thread_t td;
1138 };
1139 
1140 int
1141 vflush(struct mount *mp, int rootrefs, int flags)
1142 {
1143 	struct thread *td = curthread;	/* XXX */
1144 	struct vnode *rootvp = NULL;
1145 	int error;
1146 	struct vflush_info vflush_info;
1147 
1148 	if (rootrefs > 0) {
1149 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
1150 		    ("vflush: bad args"));
1151 		/*
1152 		 * Get the filesystem root vnode. We can vput() it
1153 		 * immediately, since with rootrefs > 0, it won't go away.
1154 		 */
1155 		if ((error = VFS_ROOT(mp, &rootvp)) != 0) {
1156 			if ((flags & FORCECLOSE) == 0)
1157 				return (error);
1158 			rootrefs = 0;
1159 			/* continue anyway */
1160 		}
1161 		if (rootrefs)
1162 			vput(rootvp);
1163 	}
1164 
1165 	vflush_info.busy = 0;
1166 	vflush_info.flags = flags;
1167 	vflush_info.td = td;
1168 	vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info);
1169 
1170 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
1171 		/*
1172 		 * If just the root vnode is busy, and if its refcount
1173 		 * is equal to `rootrefs', then go ahead and kill it.
1174 		 */
1175 		KASSERT(vflush_info.busy > 0, ("vflush: not busy"));
1176 		KASSERT(rootvp->v_sysref.refcnt >= rootrefs, ("vflush: rootrefs"));
1177 		if (vflush_info.busy == 1 && rootvp->v_sysref.refcnt == rootrefs) {
1178 			vx_lock(rootvp);
1179 			vgone_vxlocked(rootvp);
1180 			vx_unlock(rootvp);
1181 			vflush_info.busy = 0;
1182 		}
1183 	}
1184 	if (vflush_info.busy)
1185 		return (EBUSY);
1186 	for (; rootrefs > 0; rootrefs--)
1187 		vrele(rootvp);
1188 	return (0);
1189 }
1190 
1191 /*
1192  * The scan callback is made with an VX locked vnode.
1193  */
1194 static int
1195 vflush_scan(struct mount *mp, struct vnode *vp, void *data)
1196 {
1197 	struct vflush_info *info = data;
1198 	struct vattr vattr;
1199 
1200 	/*
1201 	 * Skip over a vnodes marked VSYSTEM.
1202 	 */
1203 	if ((info->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1204 		return(0);
1205 	}
1206 
1207 	/*
1208 	 * If WRITECLOSE is set, flush out unlinked but still open
1209 	 * files (even if open only for reading) and regular file
1210 	 * vnodes open for writing.
1211 	 */
1212 	if ((info->flags & WRITECLOSE) &&
1213 	    (vp->v_type == VNON ||
1214 	    (VOP_GETATTR(vp, &vattr) == 0 &&
1215 	    vattr.va_nlink > 0)) &&
1216 	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
1217 		return(0);
1218 	}
1219 
1220 	/*
1221 	 * If we are the only holder (refcnt of 1) or the vnode is in
1222 	 * termination (refcnt < 0), we can vgone the vnode.
1223 	 */
1224 	if (vp->v_sysref.refcnt <= 1) {
1225 		vgone_vxlocked(vp);
1226 		return(0);
1227 	}
1228 
1229 	/*
1230 	 * If FORCECLOSE is set, forcibly close the vnode. For block
1231 	 * or character devices we just clean and leave the vp
1232 	 * associated with devfs.  For all other files, just kill them.
1233 	 *
1234 	 * XXX we need to do something about devfs here, I'd rather not
1235 	 *     blow away device associations.
1236 	 */
1237 	if (info->flags & FORCECLOSE) {
1238 		vgone_vxlocked(vp);
1239 #if 0
1240 		if (vp->v_type != VBLK && vp->v_type != VCHR) {
1241 			vgone_vxlocked(vp);
1242 		} else {
1243 			vclean_vxlocked(vp, 0);
1244 			/*vp->v_ops = &devfs_vnode_dev_vops_p;*/
1245 			insmntque(vp, NULL);
1246 		}
1247 #endif
1248 		return(0);
1249 	}
1250 #ifdef DIAGNOSTIC
1251 	if (busyprt)
1252 		vprint("vflush: busy vnode", vp);
1253 #endif
1254 	++info->busy;
1255 	return(0);
1256 }
1257 
1258 void
1259 add_bio_ops(struct bio_ops *ops)
1260 {
1261 	TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry);
1262 }
1263 
1264 void
1265 rem_bio_ops(struct bio_ops *ops)
1266 {
1267 	TAILQ_REMOVE(&bio_ops_list, ops, entry);
1268 }
1269 
1270 /*
1271  * This calls the bio_ops io_sync function either for a mount point
1272  * or generally.
1273  *
1274  * WARNING: softdeps is weirdly coded and just isn't happy unless
1275  * io_sync is called with a NULL mount from the general syncing code.
1276  */
1277 void
1278 bio_ops_sync(struct mount *mp)
1279 {
1280 	struct bio_ops *ops;
1281 
1282 	if (mp) {
1283 		if ((ops = mp->mnt_bioops) != NULL)
1284 			ops->io_sync(mp);
1285 	} else {
1286 		TAILQ_FOREACH(ops, &bio_ops_list, entry) {
1287 			ops->io_sync(NULL);
1288 		}
1289 	}
1290 }
1291 
1292