xref: /dflybsd-src/sys/kern/vfs_mount.c (revision 8d1ea4cc63e8cedbe8f670822d4606adf2cf7d45)
1 /*
2  * Copyright (c) 2004,2013 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * Copyright (c) 1989, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  * (c) UNIX System Laboratories, Inc.
37  * All or some portions of this file are derived from material licensed
38  * to the University of California by American Telephone and Telegraph
39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40  * the permission of UNIX System Laboratories, Inc.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  */
66 
67 /*
68  * External virtual filesystem routines
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/malloc.h>
75 #include <sys/mount.h>
76 #include <sys/proc.h>
77 #include <sys/vnode.h>
78 #include <sys/buf.h>
79 #include <sys/eventhandler.h>
80 #include <sys/kthread.h>
81 #include <sys/sysctl.h>
82 
83 #include <machine/limits.h>
84 
85 #include <sys/buf2.h>
86 
87 #include <vm/vm.h>
88 #include <vm/vm_object.h>
89 
90 struct mountscan_info {
91 	TAILQ_ENTRY(mountscan_info) msi_entry;
92 	int msi_how;
93 	struct mount *msi_node;
94 };
95 
96 struct vmntvnodescan_info {
97 	TAILQ_ENTRY(vmntvnodescan_info) entry;
98 	struct vnode *vp;
99 };
100 
101 struct vnlru_info {
102 	int	pass;
103 };
104 
105 static int vnlru_nowhere = 0;
106 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD,
107 	    &vnlru_nowhere, 0,
108 	    "Number of times the vnlru process ran without success");
109 
110 
111 static struct lwkt_token mntid_token;
112 static struct mount dummymount;
113 
114 /* note: mountlist exported to pstat */
115 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
116 static TAILQ_HEAD(,mountscan_info) mountscan_list;
117 static struct lwkt_token mountlist_token;
118 
119 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list);
120 
121 /*
122  * Called from vfsinit()
123  */
124 void
125 vfs_mount_init(void)
126 {
127 	lwkt_token_init(&mountlist_token, "mntlist");
128 	lwkt_token_init(&mntid_token, "mntid");
129 	TAILQ_INIT(&mountscan_list);
130 	mount_init(&dummymount);
131 	dummymount.mnt_flag |= MNT_RDONLY;
132 	dummymount.mnt_kern_flag |= MNTK_ALL_MPSAFE;
133 }
134 
135 /*
136  * Support function called to remove a vnode from the mountlist and
137  * deal with side effects for scans in progress.
138  *
139  * Target mnt_token is held on call.
140  */
141 static void
142 vremovevnodemnt(struct vnode *vp)
143 {
144         struct vmntvnodescan_info *info;
145 	struct mount *mp = vp->v_mount;
146 
147 	TAILQ_FOREACH(info, &mp->mnt_vnodescan_list, entry) {
148 		if (info->vp == vp)
149 			info->vp = TAILQ_NEXT(vp, v_nmntvnodes);
150 	}
151 	TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
152 }
153 
154 /*
155  * Allocate a new vnode and associate it with a tag, mount point, and
156  * operations vector.
157  *
158  * A VX locked and refd vnode is returned.  The caller should setup the
159  * remaining fields and vx_put() or, if he wishes to leave a vref,
160  * vx_unlock() the vnode.
161  */
162 int
163 getnewvnode(enum vtagtype tag, struct mount *mp,
164 		struct vnode **vpp, int lktimeout, int lkflags)
165 {
166 	struct vnode *vp;
167 
168 	KKASSERT(mp != NULL);
169 
170 	vp = allocvnode(lktimeout, lkflags);
171 	vp->v_tag = tag;
172 	vp->v_data = NULL;
173 
174 	/*
175 	 * By default the vnode is assigned the mount point's normal
176 	 * operations vector.
177 	 */
178 	vp->v_ops = &mp->mnt_vn_use_ops;
179 	vp->v_pbuf_count = nswbuf_kva / NSWBUF_SPLIT;
180 
181 	/*
182 	 * Placing the vnode on the mount point's queue makes it visible.
183 	 * VNON prevents it from being messed with, however.
184 	 */
185 	insmntque(vp, mp);
186 
187 	/*
188 	 * A VX locked & refd vnode is returned.
189 	 */
190 	*vpp = vp;
191 	return (0);
192 }
193 
194 /*
195  * This function creates vnodes with special operations vectors.  The
196  * mount point is optional.
197  *
198  * This routine is being phased out but is still used by vfs_conf to
199  * create vnodes for devices prior to the root mount (with mp == NULL).
200  */
201 int
202 getspecialvnode(enum vtagtype tag, struct mount *mp,
203 		struct vop_ops **ops,
204 		struct vnode **vpp, int lktimeout, int lkflags)
205 {
206 	struct vnode *vp;
207 
208 	vp = allocvnode(lktimeout, lkflags);
209 	vp->v_tag = tag;
210 	vp->v_data = NULL;
211 	vp->v_ops = ops;
212 
213 	if (mp == NULL)
214 		mp = &dummymount;
215 
216 	/*
217 	 * Placing the vnode on the mount point's queue makes it visible.
218 	 * VNON prevents it from being messed with, however.
219 	 */
220 	insmntque(vp, mp);
221 
222 	/*
223 	 * A VX locked & refd vnode is returned.
224 	 */
225 	*vpp = vp;
226 	return (0);
227 }
228 
229 /*
230  * Interlock against an unmount, return 0 on success, non-zero on failure.
231  *
232  * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount
233  * is in-progress.
234  *
235  * If no unmount is in-progress LK_NOWAIT is ignored.  No other flag bits
236  * are used.  A shared locked will be obtained and the filesystem will not
237  * be unmountable until the lock is released.
238  */
239 int
240 vfs_busy(struct mount *mp, int flags)
241 {
242 	int lkflags;
243 
244 	atomic_add_int(&mp->mnt_refs, 1);
245 	lwkt_gettoken(&mp->mnt_token);
246 	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
247 		if (flags & LK_NOWAIT) {
248 			lwkt_reltoken(&mp->mnt_token);
249 			atomic_add_int(&mp->mnt_refs, -1);
250 			return (ENOENT);
251 		}
252 		/* XXX not MP safe */
253 		mp->mnt_kern_flag |= MNTK_MWAIT;
254 
255 		/*
256 		 * Since all busy locks are shared except the exclusive
257 		 * lock granted when unmounting, the only place that a
258 		 * wakeup needs to be done is at the release of the
259 		 * exclusive lock at the end of dounmount.
260 		 *
261 		 * WARNING! mp can potentially go away once we release
262 		 *	    our ref.
263 		 */
264 		tsleep((caddr_t)mp, 0, "vfs_busy", 0);
265 		lwkt_reltoken(&mp->mnt_token);
266 		atomic_add_int(&mp->mnt_refs, -1);
267 		return (ENOENT);
268 	}
269 	lkflags = LK_SHARED;
270 	if (lockmgr(&mp->mnt_lock, lkflags))
271 		panic("vfs_busy: unexpected lock failure");
272 	lwkt_reltoken(&mp->mnt_token);
273 	return (0);
274 }
275 
276 /*
277  * Free a busy filesystem.
278  *
279  * Once refs is decremented the mount point can potentially get ripped
280  * out from under us, but we want to clean up our refs before unlocking
281  * so do a hold/drop around the whole mess.
282  *
283  * This is not in the critical path (I hope).
284  */
285 void
286 vfs_unbusy(struct mount *mp)
287 {
288 	mount_hold(mp);
289 	atomic_add_int(&mp->mnt_refs, -1);
290 	lockmgr(&mp->mnt_lock, LK_RELEASE);
291 	mount_drop(mp);
292 }
293 
294 /*
295  * Lookup a filesystem type, and if found allocate and initialize
296  * a mount structure for it.
297  *
298  * Devname is usually updated by mount(8) after booting.
299  */
300 int
301 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
302 {
303 	struct vfsconf *vfsp;
304 	struct mount *mp;
305 
306 	if (fstypename == NULL)
307 		return (ENODEV);
308 
309 	vfsp = vfsconf_find_by_name(fstypename);
310 	if (vfsp == NULL)
311 		return (ENODEV);
312 	mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
313 	mount_init(mp);
314 	lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0);
315 
316 	vfs_busy(mp, 0);
317 	mp->mnt_vfc = vfsp;
318 	mp->mnt_op = vfsp->vfc_vfsops;
319 	mp->mnt_pbuf_count = nswbuf_kva / NSWBUF_SPLIT;
320 	vfsp->vfc_refcount++;
321 	mp->mnt_stat.f_type = vfsp->vfc_typenum;
322 	mp->mnt_flag |= MNT_RDONLY;
323 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
324 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
325 	copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
326 
327 	/*
328 	 * Pre-set MPSAFE flags for VFS_MOUNT() call.
329 	 */
330 	if (vfsp->vfc_flags & VFCF_MPSAFE)
331 		mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;
332 
333 	*mpp = mp;
334 
335 	return (0);
336 }
337 
338 /*
339  * Basic mount structure initialization
340  */
341 void
342 mount_init(struct mount *mp)
343 {
344 	lockinit(&mp->mnt_lock, "vfslock", hz*5, 0);
345 	lwkt_token_init(&mp->mnt_token, "permnt");
346 
347 	TAILQ_INIT(&mp->mnt_vnodescan_list);
348 	TAILQ_INIT(&mp->mnt_nvnodelist);
349 	TAILQ_INIT(&mp->mnt_reservedvnlist);
350 	TAILQ_INIT(&mp->mnt_jlist);
351 	mp->mnt_nvnodelistsize = 0;
352 	mp->mnt_flag = 0;
353 	mp->mnt_hold = 1;		/* hold for umount last drop */
354 	mp->mnt_iosize_max = MAXPHYS;
355 	vn_syncer_thr_create(mp);
356 }
357 
358 void
359 mount_hold(struct mount *mp)
360 {
361 	atomic_add_int(&mp->mnt_hold, 1);
362 }
363 
364 void
365 mount_drop(struct mount *mp)
366 {
367 	if (atomic_fetchadd_int(&mp->mnt_hold, -1) == 1) {
368 		KKASSERT(mp->mnt_refs == 0);
369 		kfree(mp, M_MOUNT);
370 	}
371 }
372 
373 /*
374  * Lookup a mount point by filesystem identifier.
375  *
376  * If not NULL, the returned mp is held and the caller is expected to drop
377  * it via mount_drop().
378  */
379 struct mount *
380 vfs_getvfs(fsid_t *fsid)
381 {
382 	struct mount *mp;
383 
384 	lwkt_gettoken_shared(&mountlist_token);
385 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
386 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
387 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
388 			mount_hold(mp);
389 			break;
390 		}
391 	}
392 	lwkt_reltoken(&mountlist_token);
393 	return (mp);
394 }
395 
396 /*
397  * Get a new unique fsid.  Try to make its val[0] unique, since this value
398  * will be used to create fake device numbers for stat().  Also try (but
399  * not so hard) make its val[0] unique mod 2^16, since some emulators only
400  * support 16-bit device numbers.  We end up with unique val[0]'s for the
401  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
402  *
403  * Keep in mind that several mounts may be running in parallel.  Starting
404  * the search one past where the previous search terminated is both a
405  * micro-optimization and a defense against returning the same fsid to
406  * different mounts.
407  */
408 void
409 vfs_getnewfsid(struct mount *mp)
410 {
411 	static u_int16_t mntid_base;
412 	struct mount *mptmp;
413 	fsid_t tfsid;
414 	int mtype;
415 
416 	lwkt_gettoken(&mntid_token);
417 	mtype = mp->mnt_vfc->vfc_typenum;
418 	tfsid.val[1] = mtype;
419 	mtype = (mtype & 0xFF) << 24;
420 	for (;;) {
421 		tfsid.val[0] = makeudev(255,
422 		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
423 		mntid_base++;
424 		mptmp = vfs_getvfs(&tfsid);
425 		if (mptmp == NULL)
426 			break;
427 		mount_drop(mptmp);
428 	}
429 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
430 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
431 	lwkt_reltoken(&mntid_token);
432 }
433 
434 /*
435  * Set the FSID for a new mount point to the template.  Adjust
436  * the FSID to avoid collisions.
437  */
438 int
439 vfs_setfsid(struct mount *mp, fsid_t *template)
440 {
441 	struct mount *mptmp;
442 	int didmunge = 0;
443 
444 	bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid));
445 
446 	lwkt_gettoken(&mntid_token);
447 	for (;;) {
448 		mptmp = vfs_getvfs(template);
449 		if (mptmp == NULL)
450 			break;
451 		mount_drop(mptmp);
452 		didmunge = 1;
453 		++template->val[1];
454 	}
455 	mp->mnt_stat.f_fsid = *template;
456 	lwkt_reltoken(&mntid_token);
457 
458 	return(didmunge);
459 }
460 
461 /*
462  * This routine is called when we have too many vnodes.  It attempts
463  * to free <count> vnodes and will potentially free vnodes that still
464  * have VM backing store (VM backing store is typically the cause
465  * of a vnode blowout so we want to do this).  Therefore, this operation
466  * is not considered cheap.
467  *
468  * A number of conditions may prevent a vnode from being reclaimed.
469  * the buffer cache may have references on the vnode, a directory
470  * vnode may still have references due to the namei cache representing
471  * underlying files, or the vnode may be in active use.   It is not
472  * desireable to reuse such vnodes.  These conditions may cause the
473  * number of vnodes to reach some minimum value regardless of what
474  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
475  */
476 
477 /*
478  * Attempt to recycle vnodes in a context that is always safe to block.
479  * Calling vlrurecycle() from the bowels of file system code has some
480  * interesting deadlock problems.
481  */
482 static struct thread *vnlruthread;
483 
484 static void
485 vnlru_proc(void)
486 {
487 	struct thread *td = curthread;
488 
489 	EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
490 			      SHUTDOWN_PRI_FIRST);
491 
492 	for (;;) {
493 		int ncachedandinactive;
494 
495 		kproc_suspend_loop();
496 
497 		/*
498 		 * Try to free some vnodes if we have too many.  Trigger based
499 		 * on potentially freeable vnodes but calculate the count
500 		 * based on total vnodes.
501 		 *
502 		 * (long) -> deal with 64 bit machines, intermediate overflow
503 		 */
504 		synchronizevnodecount();
505 		ncachedandinactive = countcachedandinactivevnodes();
506 		if (numvnodes >= maxvnodes * 9 / 10 &&
507 		    ncachedandinactive >= maxvnodes * 5 / 10) {
508 			int count = numvnodes - maxvnodes * 9 / 10;
509 
510 			if (count > (ncachedandinactive) / 100)
511 				count = (ncachedandinactive) / 100;
512 			if (count < 5)
513 				count = 5;
514 			freesomevnodes(count);
515 		}
516 
517 		/*
518 		 * Do non-critical-path (more robust) cache cleaning,
519 		 * even if vnode counts are nominal, to try to avoid
520 		 * having to do it in the critical path.
521 		 */
522 		cache_hysteresis(0);
523 
524 		/*
525 		 * Nothing to do if most of our vnodes are already on
526 		 * the free list.
527 		 */
528 		synchronizevnodecount();
529 		ncachedandinactive = countcachedandinactivevnodes();
530 		if (numvnodes <= maxvnodes * 9 / 10 ||
531 		    ncachedandinactive <= maxvnodes * 5 / 10) {
532 			tsleep(vnlruthread, 0, "vlruwt", hz);
533 			continue;
534 		}
535 	}
536 }
537 
538 /*
539  * MOUNTLIST FUNCTIONS
540  */
541 
542 /*
543  * mountlist_insert (MP SAFE)
544  *
545  * Add a new mount point to the mount list.
546  */
547 void
548 mountlist_insert(struct mount *mp, int how)
549 {
550 	lwkt_gettoken(&mountlist_token);
551 	if (how == MNTINS_FIRST)
552 		TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
553 	else
554 		TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
555 	lwkt_reltoken(&mountlist_token);
556 }
557 
558 /*
559  * mountlist_interlock (MP SAFE)
560  *
561  * Execute the specified interlock function with the mountlist token
562  * held.  The function will be called in a serialized fashion verses
563  * other functions called through this mechanism.
564  *
565  * The function is expected to be very short-lived.
566  */
567 int
568 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp)
569 {
570 	int error;
571 
572 	lwkt_gettoken(&mountlist_token);
573 	error = callback(mp);
574 	lwkt_reltoken(&mountlist_token);
575 	return (error);
576 }
577 
578 /*
579  * mountlist_boot_getfirst (DURING BOOT ONLY)
580  *
581  * This function returns the first mount on the mountlist, which is
582  * expected to be the root mount.  Since no interlocks are obtained
583  * this function is only safe to use during booting.
584  */
585 
586 struct mount *
587 mountlist_boot_getfirst(void)
588 {
589 	return(TAILQ_FIRST(&mountlist));
590 }
591 
592 /*
593  * mountlist_remove (MP SAFE)
594  *
595  * Remove a node from the mountlist.  If this node is the next scan node
596  * for any active mountlist scans, the active mountlist scan will be
597  * adjusted to skip the node, thus allowing removals during mountlist
598  * scans.
599  */
600 void
601 mountlist_remove(struct mount *mp)
602 {
603 	struct mountscan_info *msi;
604 
605 	lwkt_gettoken(&mountlist_token);
606 	TAILQ_FOREACH(msi, &mountscan_list, msi_entry) {
607 		if (msi->msi_node == mp) {
608 			if (msi->msi_how & MNTSCAN_FORWARD)
609 				msi->msi_node = TAILQ_NEXT(mp, mnt_list);
610 			else
611 				msi->msi_node = TAILQ_PREV(mp, mntlist,
612 							   mnt_list);
613 		}
614 	}
615 	TAILQ_REMOVE(&mountlist, mp, mnt_list);
616 	lwkt_reltoken(&mountlist_token);
617 }
618 
619 /*
620  * mountlist_exists (MP SAFE)
621  *
622  * Checks if a node exists in the mountlist.
623  * This function is mainly used by VFS quota code to check if a
624  * cached nullfs struct mount pointer is still valid at use time
625  *
626  * FIXME: there is no warranty the mp passed to that function
627  * will be the same one used by VFS_ACCOUNT() later
628  */
629 int
630 mountlist_exists(struct mount *mp)
631 {
632 	int node_exists = 0;
633 	struct mount* lmp;
634 
635 	lwkt_gettoken_shared(&mountlist_token);
636 	TAILQ_FOREACH(lmp, &mountlist, mnt_list) {
637 		if (lmp == mp) {
638 			node_exists = 1;
639 			break;
640 		}
641 	}
642 	lwkt_reltoken(&mountlist_token);
643 
644 	return(node_exists);
645 }
646 
647 /*
648  * mountlist_scan
649  *
650  * Safely scan the mount points on the mount list.  Each mountpoint
651  * is held across the callback.  The callback is responsible for
652  * acquiring any further tokens or locks.
653  *
654  * Unless otherwise specified each mount point will be busied prior to the
655  * callback and unbusied afterwords.  The callback may safely remove any
656  * mount point without interfering with the scan.  If the current callback
657  * mount is removed the scanner will not attempt to unbusy it.
658  *
659  * If a mount node cannot be busied it is silently skipped.
660  *
661  * The callback return value is aggregated and a total is returned.  A return
662  * value of < 0 is not aggregated and will terminate the scan.
663  *
664  * MNTSCAN_FORWARD	- the mountlist is scanned in the forward direction
665  * MNTSCAN_REVERSE	- the mountlist is scanned in reverse
666  * MNTSCAN_NOBUSY	- the scanner will make the callback without busying
667  *			  the mount node.
668  *
669  * NOTE: mountlist_token is not held across the callback.
670  */
671 int
672 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how)
673 {
674 	struct mountscan_info info;
675 	struct mount *mp;
676 	int count;
677 	int res;
678 
679 	lwkt_gettoken(&mountlist_token);
680 	info.msi_how = how;
681 	info.msi_node = NULL;	/* paranoia */
682 	TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry);
683 	lwkt_reltoken(&mountlist_token);
684 
685 	res = 0;
686 	lwkt_gettoken_shared(&mountlist_token);
687 
688 	if (how & MNTSCAN_FORWARD) {
689 		info.msi_node = TAILQ_FIRST(&mountlist);
690 		while ((mp = info.msi_node) != NULL) {
691 			mount_hold(mp);
692 			if (how & MNTSCAN_NOBUSY) {
693 				lwkt_reltoken(&mountlist_token);
694 				count = callback(mp, data);
695 				lwkt_gettoken_shared(&mountlist_token);
696 			} else if (vfs_busy(mp, LK_NOWAIT) == 0) {
697 				lwkt_reltoken(&mountlist_token);
698 				count = callback(mp, data);
699 				lwkt_gettoken_shared(&mountlist_token);
700 				if (mp == info.msi_node)
701 					vfs_unbusy(mp);
702 			} else {
703 				count = 0;
704 			}
705 			mount_drop(mp);
706 			if (count < 0)
707 				break;
708 			res += count;
709 			if (mp == info.msi_node)
710 				info.msi_node = TAILQ_NEXT(mp, mnt_list);
711 		}
712 	} else if (how & MNTSCAN_REVERSE) {
713 		info.msi_node = TAILQ_LAST(&mountlist, mntlist);
714 		while ((mp = info.msi_node) != NULL) {
715 			mount_hold(mp);
716 			if (how & MNTSCAN_NOBUSY) {
717 				lwkt_reltoken(&mountlist_token);
718 				count = callback(mp, data);
719 				lwkt_gettoken_shared(&mountlist_token);
720 			} else if (vfs_busy(mp, LK_NOWAIT) == 0) {
721 				lwkt_reltoken(&mountlist_token);
722 				count = callback(mp, data);
723 				lwkt_gettoken_shared(&mountlist_token);
724 				if (mp == info.msi_node)
725 					vfs_unbusy(mp);
726 			} else {
727 				count = 0;
728 			}
729 			mount_drop(mp);
730 			if (count < 0)
731 				break;
732 			res += count;
733 			if (mp == info.msi_node)
734 				info.msi_node = TAILQ_PREV(mp, mntlist,
735 							   mnt_list);
736 		}
737 	}
738 	lwkt_reltoken(&mountlist_token);
739 
740 	lwkt_gettoken(&mountlist_token);
741 	TAILQ_REMOVE(&mountscan_list, &info, msi_entry);
742 	lwkt_reltoken(&mountlist_token);
743 
744 	return(res);
745 }
746 
747 /*
748  * MOUNT RELATED VNODE FUNCTIONS
749  */
750 
751 static struct kproc_desc vnlru_kp = {
752 	"vnlru",
753 	vnlru_proc,
754 	&vnlruthread
755 };
756 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp);
757 
758 /*
759  * Move a vnode from one mount queue to another.
760  */
761 void
762 insmntque(struct vnode *vp, struct mount *mp)
763 {
764 	struct mount *omp;
765 
766 	/*
767 	 * Delete from old mount point vnode list, if on one.
768 	 */
769 	if ((omp = vp->v_mount) != NULL) {
770 		lwkt_gettoken(&omp->mnt_token);
771 		KKASSERT(omp == vp->v_mount);
772 		KASSERT(omp->mnt_nvnodelistsize > 0,
773 			("bad mount point vnode list size"));
774 		vremovevnodemnt(vp);
775 		omp->mnt_nvnodelistsize--;
776 		lwkt_reltoken(&omp->mnt_token);
777 	}
778 
779 	/*
780 	 * Insert into list of vnodes for the new mount point, if available.
781 	 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer.
782 	 */
783 	if (mp == NULL) {
784 		vp->v_mount = NULL;
785 		return;
786 	}
787 	lwkt_gettoken(&mp->mnt_token);
788 	vp->v_mount = mp;
789 	if (mp->mnt_syncer) {
790 		TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes);
791 	} else {
792 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
793 	}
794 	mp->mnt_nvnodelistsize++;
795 	lwkt_reltoken(&mp->mnt_token);
796 }
797 
798 
799 /*
800  * Scan the vnodes under a mount point and issue appropriate callbacks.
801  *
802  * The fastfunc() callback is called with just the mountlist token held
803  * (no vnode lock).  It may not block and the vnode may be undergoing
804  * modifications while the caller is processing it.  The vnode will
805  * not be entirely destroyed, however, due to the fact that the mountlist
806  * token is held.  A return value < 0 skips to the next vnode without calling
807  * the slowfunc(), a return value > 0 terminates the loop.
808  *
809  * WARNING! The fastfunc() should not indirect through vp->v_object, the vp
810  *	    data structure is unstable when called from fastfunc().
811  *
812  * The slowfunc() callback is called after the vnode has been successfully
813  * locked based on passed flags.  The vnode is skipped if it gets rearranged
814  * or destroyed while blocking on the lock.  A non-zero return value from
815  * the slow function terminates the loop.  The slow function is allowed to
816  * arbitrarily block.  The scanning code guarentees consistency of operation
817  * even if the slow function deletes or moves the node, or blocks and some
818  * other thread deletes or moves the node.
819  */
820 int
821 vmntvnodescan(
822     struct mount *mp,
823     int flags,
824     int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data),
825     int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
826     void *data
827 ) {
828 	struct vmntvnodescan_info info;
829 	struct vnode *vp;
830 	int r = 0;
831 	int maxcount = mp->mnt_nvnodelistsize * 2;
832 	int stopcount = 0;
833 	int count = 0;
834 
835 	lwkt_gettoken(&mp->mnt_token);
836 
837 	/*
838 	 * If asked to do one pass stop after iterating available vnodes.
839 	 * Under heavy loads new vnodes can be added while we are scanning,
840 	 * so this isn't perfect.  Create a slop factor of 2x.
841 	 */
842 	if (flags & VMSC_ONEPASS)
843 		stopcount = mp->mnt_nvnodelistsize;
844 
845 	info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
846 	TAILQ_INSERT_TAIL(&mp->mnt_vnodescan_list, &info, entry);
847 
848 	while ((vp = info.vp) != NULL) {
849 		if (--maxcount == 0) {
850 			kprintf("Warning: excessive fssync iteration\n");
851 			maxcount = mp->mnt_nvnodelistsize * 2;
852 		}
853 
854 		/*
855 		 * Skip if visible but not ready, or special (e.g.
856 		 * mp->mnt_syncer)
857 		 */
858 		if (vp->v_type == VNON)
859 			goto next;
860 		KKASSERT(vp->v_mount == mp);
861 
862 		/*
863 		 * Quick test.  A negative return continues the loop without
864 		 * calling the slow test.  0 continues onto the slow test.
865 		 * A positive number aborts the loop.
866 		 */
867 		if (fastfunc) {
868 			if ((r = fastfunc(mp, vp, data)) < 0) {
869 				r = 0;
870 				goto next;
871 			}
872 			if (r)
873 				break;
874 		}
875 
876 		/*
877 		 * Get a vxlock on the vnode, retry if it has moved or isn't
878 		 * in the mountlist where we expect it.
879 		 */
880 		if (slowfunc) {
881 			int error;
882 
883 			switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
884 			case VMSC_GETVP:
885 				error = vget(vp, LK_EXCLUSIVE);
886 				break;
887 			case VMSC_GETVP|VMSC_NOWAIT:
888 				error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT);
889 				break;
890 			case VMSC_GETVX:
891 				vx_get(vp);
892 				error = 0;
893 				break;
894 			default:
895 				error = 0;
896 				break;
897 			}
898 			if (error)
899 				goto next;
900 			/*
901 			 * Do not call the slow function if the vnode is
902 			 * invalid or if it was ripped out from under us
903 			 * while we (potentially) blocked.
904 			 */
905 			if (info.vp == vp && vp->v_type != VNON)
906 				r = slowfunc(mp, vp, data);
907 
908 			/*
909 			 * Cleanup
910 			 */
911 			switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
912 			case VMSC_GETVP:
913 			case VMSC_GETVP|VMSC_NOWAIT:
914 				vput(vp);
915 				break;
916 			case VMSC_GETVX:
917 				vx_put(vp);
918 				break;
919 			default:
920 				break;
921 			}
922 			if (r != 0)
923 				break;
924 		}
925 
926 next:
927 		/*
928 		 * Yield after some processing.  Depending on the number
929 		 * of vnodes, we might wind up running for a long time.
930 		 * Because threads are not preemptable, time critical
931 		 * userland processes might starve.  Give them a chance
932 		 * now and then.
933 		 */
934 		if (++count == 10000) {
935 			/*
936 			 * We really want to yield a bit, so we simply
937 			 * sleep a tick
938 			 */
939 			tsleep(mp, 0, "vnodescn", 1);
940 			count = 0;
941 		}
942 
943 		/*
944 		 * If doing one pass this decrements to zero.  If it starts
945 		 * at zero it is effectively unlimited for the purposes of
946 		 * this loop.
947 		 */
948 		if (--stopcount == 0)
949 			break;
950 
951 		/*
952 		 * Iterate.  If the vnode was ripped out from under us
953 		 * info.vp will already point to the next vnode, otherwise
954 		 * we have to obtain the next valid vnode ourselves.
955 		 */
956 		if (info.vp == vp)
957 			info.vp = TAILQ_NEXT(vp, v_nmntvnodes);
958 	}
959 
960 	TAILQ_REMOVE(&mp->mnt_vnodescan_list, &info, entry);
961 	lwkt_reltoken(&mp->mnt_token);
962 	return(r);
963 }
964 
965 /*
966  * Remove any vnodes in the vnode table belonging to mount point mp.
967  *
968  * If FORCECLOSE is not specified, there should not be any active ones,
969  * return error if any are found (nb: this is a user error, not a
970  * system error). If FORCECLOSE is specified, detach any active vnodes
971  * that are found.
972  *
973  * If WRITECLOSE is set, only flush out regular file vnodes open for
974  * writing.
975  *
976  * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped.
977  *
978  * `rootrefs' specifies the base reference count for the root vnode
979  * of this filesystem. The root vnode is considered busy if its
980  * v_refcnt exceeds this value. On a successful return, vflush()
981  * will call vrele() on the root vnode exactly rootrefs times.
982  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
983  * be zero.
984  */
985 static int debug_busyprt = 0;		/* print out busy vnodes */
986 SYSCTL_INT(_vfs, OID_AUTO, debug_busyprt, CTLFLAG_RW, &debug_busyprt, 0, "");
987 
988 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data);
989 
990 struct vflush_info {
991 	int flags;
992 	int busy;
993 	thread_t td;
994 };
995 
996 int
997 vflush(struct mount *mp, int rootrefs, int flags)
998 {
999 	struct thread *td = curthread;	/* XXX */
1000 	struct vnode *rootvp = NULL;
1001 	int error;
1002 	struct vflush_info vflush_info;
1003 
1004 	if (rootrefs > 0) {
1005 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
1006 		    ("vflush: bad args"));
1007 		/*
1008 		 * Get the filesystem root vnode. We can vput() it
1009 		 * immediately, since with rootrefs > 0, it won't go away.
1010 		 */
1011 		if ((error = VFS_ROOT(mp, &rootvp)) != 0) {
1012 			if ((flags & FORCECLOSE) == 0)
1013 				return (error);
1014 			rootrefs = 0;
1015 			/* continue anyway */
1016 		}
1017 		if (rootrefs)
1018 			vput(rootvp);
1019 	}
1020 
1021 	vflush_info.busy = 0;
1022 	vflush_info.flags = flags;
1023 	vflush_info.td = td;
1024 	vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info);
1025 
1026 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
1027 		/*
1028 		 * If just the root vnode is busy, and if its refcount
1029 		 * is equal to `rootrefs', then go ahead and kill it.
1030 		 */
1031 		KASSERT(vflush_info.busy > 0, ("vflush: not busy"));
1032 		KASSERT(VREFCNT(rootvp) >= rootrefs, ("vflush: rootrefs"));
1033 		if (vflush_info.busy == 1 && VREFCNT(rootvp) == rootrefs) {
1034 			vx_lock(rootvp);
1035 			vgone_vxlocked(rootvp);
1036 			vx_unlock(rootvp);
1037 			vflush_info.busy = 0;
1038 		}
1039 	}
1040 	if (vflush_info.busy)
1041 		return (EBUSY);
1042 	for (; rootrefs > 0; rootrefs--)
1043 		vrele(rootvp);
1044 	return (0);
1045 }
1046 
1047 /*
1048  * The scan callback is made with an VX locked vnode.
1049  */
1050 static int
1051 vflush_scan(struct mount *mp, struct vnode *vp, void *data)
1052 {
1053 	struct vflush_info *info = data;
1054 	struct vattr vattr;
1055 	int flags = info->flags;
1056 
1057 	/*
1058 	 * Generally speaking try to deactivate on 0 refs (catch-all)
1059 	 */
1060 	atomic_set_int(&vp->v_refcnt, VREF_FINALIZE);
1061 
1062 	/*
1063 	 * Skip over a vnodes marked VSYSTEM.
1064 	 */
1065 	if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1066 		return(0);
1067 	}
1068 
1069 	/*
1070 	 * Do not force-close VCHR or VBLK vnodes
1071 	 */
1072 	if (vp->v_type == VCHR || vp->v_type == VBLK)
1073 		flags &= ~(WRITECLOSE|FORCECLOSE);
1074 
1075 	/*
1076 	 * If WRITECLOSE is set, flush out unlinked but still open
1077 	 * files (even if open only for reading) and regular file
1078 	 * vnodes open for writing.
1079 	 */
1080 	if ((flags & WRITECLOSE) &&
1081 	    (vp->v_type == VNON ||
1082 	    (VOP_GETATTR(vp, &vattr) == 0 &&
1083 	    vattr.va_nlink > 0)) &&
1084 	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
1085 		return(0);
1086 	}
1087 
1088 	/*
1089 	 * If we are the only holder (refcnt of 1) or the vnode is in
1090 	 * termination (refcnt < 0), we can vgone the vnode.
1091 	 */
1092 	if (VREFCNT(vp) <= 1) {
1093 		vgone_vxlocked(vp);
1094 		return(0);
1095 	}
1096 
1097 	/*
1098 	 * If FORCECLOSE is set, forcibly destroy the vnode and then move
1099 	 * it to a dummymount structure so vop_*() functions don't deref
1100 	 * a NULL pointer.
1101 	 */
1102 	if (flags & FORCECLOSE) {
1103 		vhold(vp);
1104 		vgone_vxlocked(vp);
1105 		if (vp->v_mount == NULL)
1106 			insmntque(vp, &dummymount);
1107 		vdrop(vp);
1108 		return(0);
1109 	}
1110 	if (vp->v_type == VCHR || vp->v_type == VBLK)
1111 		kprintf("vflush: Warning, cannot destroy busy device vnode\n");
1112 	if (debug_busyprt) {
1113 		const char *filename;
1114 
1115 		spin_lock(&vp->v_spin);
1116 		filename = TAILQ_FIRST(&vp->v_namecache) ?
1117 			   TAILQ_FIRST(&vp->v_namecache)->nc_name : "?";
1118 		spin_unlock(&vp->v_spin);
1119 		kprintf("vflush: busy vnode (%p) %s\n", vp, filename);
1120 	}
1121 	++info->busy;
1122 	return(0);
1123 }
1124 
1125 void
1126 add_bio_ops(struct bio_ops *ops)
1127 {
1128 	TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry);
1129 }
1130 
1131 void
1132 rem_bio_ops(struct bio_ops *ops)
1133 {
1134 	TAILQ_REMOVE(&bio_ops_list, ops, entry);
1135 }
1136 
1137 /*
1138  * This calls the bio_ops io_sync function either for a mount point
1139  * or generally.
1140  *
1141  * WARNING: softdeps is weirdly coded and just isn't happy unless
1142  * io_sync is called with a NULL mount from the general syncing code.
1143  */
1144 void
1145 bio_ops_sync(struct mount *mp)
1146 {
1147 	struct bio_ops *ops;
1148 
1149 	if (mp) {
1150 		if ((ops = mp->mnt_bioops) != NULL)
1151 			ops->io_sync(mp);
1152 	} else {
1153 		TAILQ_FOREACH(ops, &bio_ops_list, entry) {
1154 			ops->io_sync(NULL);
1155 		}
1156 	}
1157 }
1158 
1159 /*
1160  * Lookup a mount point by nch
1161  */
1162 struct mount *
1163 mount_get_by_nc(struct namecache *ncp)
1164 {
1165 	struct mount *mp = NULL;
1166 
1167 	lwkt_gettoken_shared(&mountlist_token);
1168 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1169 		if (ncp == mp->mnt_ncmountpt.ncp)
1170 			break;
1171 	}
1172 	lwkt_reltoken(&mountlist_token);
1173 
1174 	return (mp);
1175 }
1176 
1177