xref: /dflybsd-src/sys/kern/vfs_mount.c (revision c6cf4f8f1ebc9e3fe2a8c566f08adfc86122c7bf)
1 /*
2  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * Copyright (c) 1989, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  * (c) UNIX System Laboratories, Inc.
37  * All or some portions of this file are derived from material licensed
38  * to the University of California by American Telephone and Telegraph
39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40  * the permission of UNIX System Laboratories, Inc.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by the University of
53  *	California, Berkeley and its contributors.
54  * 4. Neither the name of the University nor the names of its contributors
55  *    may be used to endorse or promote products derived from this software
56  *    without specific prior written permission.
57  *
58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68  * SUCH DAMAGE.
69  *
70  * $DragonFly: src/sys/kern/vfs_mount.c,v 1.7 2005/02/09 02:51:04 dillon Exp $
71  */
72 
73 /*
74  * External virtual filesystem routines
75  */
76 #include "opt_ddb.h"
77 
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/kernel.h>
81 #include <sys/malloc.h>
82 #include <sys/mount.h>
83 #include <sys/proc.h>
84 #include <sys/vnode.h>
85 #include <sys/buf.h>
86 #include <sys/eventhandler.h>
87 #include <sys/kthread.h>
88 #include <sys/sysctl.h>
89 
90 #include <machine/limits.h>
91 
92 #include <sys/buf2.h>
93 #include <sys/thread2.h>
94 
95 #include <vm/vm.h>
96 #include <vm/vm_object.h>
97 
98 static int vnlru_nowhere = 0;
99 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW,
100 	    &vnlru_nowhere, 0,
101 	    "Number of times the vnlru process ran without success");
102 
103 
104 static struct lwkt_token mntid_token;
105 
106 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); /* mounted fs */
107 struct lwkt_token mountlist_token;
108 struct lwkt_token mntvnode_token;
109 
110 
111 /*
112  * Called from vfsinit()
113  */
114 void
115 vfs_mount_init(void)
116 {
117 	lwkt_token_init(&mountlist_token);
118 	lwkt_token_init(&mntvnode_token);
119 	lwkt_token_init(&mntid_token);
120 }
121 
122 /*
123  * Allocate a new vnode and associate it with a tag, mount point, and
124  * operations vector.
125  *
126  * A VX locked and refd vnode is returned.  The caller should setup the
127  * remaining fields and vx_put() or, if he wishes to leave a vref,
128  * vx_unlock() the vnode.
129  */
130 int
131 getnewvnode(enum vtagtype tag, struct mount *mp,
132 		struct vnode **vpp, int lktimeout, int lkflags)
133 {
134 	struct vnode *vp;
135 
136 	KKASSERT(mp != NULL);
137 
138 	vp = allocvnode(lktimeout, lkflags);
139 	vp->v_tag = tag;
140 	vp->v_data = NULL;
141 
142 	/*
143 	 * By default the vnode is assigned the mount point's normal
144 	 * operations vector.
145 	 */
146 	vp->v_ops = &mp->mnt_vn_use_ops;
147 
148 	/*
149 	 * Placing the vnode on the mount point's queue makes it visible.
150 	 * VNON prevents it from being messed with, however.
151 	 */
152 	insmntque(vp, mp);
153 	vfs_object_create(vp, curthread);
154 
155 	/*
156 	 * A VX locked & refd vnode is returned.
157 	 */
158 	*vpp = vp;
159 	return (0);
160 }
161 
162 /*
163  * This function creates vnodes with special operations vectors.  The
164  * mount point is optional.
165  *
166  * This routine is being phased out.
167  */
168 int
169 getspecialvnode(enum vtagtype tag, struct mount *mp,
170 		struct vop_ops **ops_pp,
171 		struct vnode **vpp, int lktimeout, int lkflags)
172 {
173 	struct vnode *vp;
174 
175 	vp = allocvnode(lktimeout, lkflags);
176 	vp->v_tag = tag;
177 	vp->v_data = NULL;
178 	vp->v_ops = ops_pp;
179 
180 	/*
181 	 * Placing the vnode on the mount point's queue makes it visible.
182 	 * VNON prevents it from being messed with, however.
183 	 */
184 	insmntque(vp, mp);
185 	vfs_object_create(vp, curthread);
186 
187 	/*
188 	 * A VX locked & refd vnode is returned.
189 	 */
190 	*vpp = vp;
191 	return (0);
192 }
193 
194 /*
195  * Mark a mount point as busy. Used to synchronize access and to delay
196  * unmounting. Interlock is not released on failure.
197  */
198 int
199 vfs_busy(struct mount *mp, int flags,
200 	lwkt_tokref_t interlkp, struct thread *td)
201 {
202 	int lkflags;
203 
204 	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
205 		if (flags & LK_NOWAIT)
206 			return (ENOENT);
207 		mp->mnt_kern_flag |= MNTK_MWAIT;
208 		/*
209 		 * Since all busy locks are shared except the exclusive
210 		 * lock granted when unmounting, the only place that a
211 		 * wakeup needs to be done is at the release of the
212 		 * exclusive lock at the end of dounmount.
213 		 *
214 		 * note: interlkp is a serializer and thus can be safely
215 		 * held through any sleep
216 		 */
217 		tsleep((caddr_t)mp, 0, "vfs_busy", 0);
218 		return (ENOENT);
219 	}
220 	lkflags = LK_SHARED | LK_NOPAUSE;
221 	if (interlkp)
222 		lkflags |= LK_INTERLOCK;
223 	if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td))
224 		panic("vfs_busy: unexpected lock failure");
225 	return (0);
226 }
227 
228 /*
229  * Free a busy filesystem.
230  */
231 void
232 vfs_unbusy(struct mount *mp, struct thread *td)
233 {
234 	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
235 }
236 
237 /*
238  * Lookup a filesystem type, and if found allocate and initialize
239  * a mount structure for it.
240  *
241  * Devname is usually updated by mount(8) after booting.
242  */
243 int
244 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
245 {
246 	struct thread *td = curthread;	/* XXX */
247 	struct vfsconf *vfsp;
248 	struct mount *mp;
249 
250 	if (fstypename == NULL)
251 		return (ENODEV);
252 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
253 		if (!strcmp(vfsp->vfc_name, fstypename))
254 			break;
255 	}
256 	if (vfsp == NULL)
257 		return (ENODEV);
258 	mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK);
259 	bzero((char *)mp, (u_long)sizeof(struct mount));
260 	lockinit(&mp->mnt_lock, 0, "vfslock", VLKTIMEOUT, LK_NOPAUSE);
261 	vfs_busy(mp, LK_NOWAIT, NULL, td);
262 	TAILQ_INIT(&mp->mnt_nvnodelist);
263 	TAILQ_INIT(&mp->mnt_reservedvnlist);
264 	TAILQ_INIT(&mp->mnt_jlist);
265 	mp->mnt_nvnodelistsize = 0;
266 	mp->mnt_vfc = vfsp;
267 	mp->mnt_op = vfsp->vfc_vfsops;
268 	mp->mnt_flag = MNT_RDONLY;
269 	mp->mnt_vnodecovered = NULLVP;
270 	vfsp->vfc_refcount++;
271 	mp->mnt_iosize_max = DFLTPHYS;
272 	mp->mnt_stat.f_type = vfsp->vfc_typenum;
273 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
274 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
275 	copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
276 	*mpp = mp;
277 	return (0);
278 }
279 
280 /*
281  * Lookup a mount point by filesystem identifier.
282  */
283 struct mount *
284 vfs_getvfs(fsid_t *fsid)
285 {
286 	struct mount *mp;
287 	lwkt_tokref ilock;
288 
289 	lwkt_gettoken(&ilock, &mountlist_token);
290 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
291 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
292 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
293 			break;
294 	    }
295 	}
296 	lwkt_reltoken(&ilock);
297 	return (mp);
298 }
299 
300 /*
301  * Get a new unique fsid.  Try to make its val[0] unique, since this value
302  * will be used to create fake device numbers for stat().  Also try (but
303  * not so hard) make its val[0] unique mod 2^16, since some emulators only
304  * support 16-bit device numbers.  We end up with unique val[0]'s for the
305  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
306  *
307  * Keep in mind that several mounts may be running in parallel.  Starting
308  * the search one past where the previous search terminated is both a
309  * micro-optimization and a defense against returning the same fsid to
310  * different mounts.
311  */
312 void
313 vfs_getnewfsid(struct mount *mp)
314 {
315 	static u_int16_t mntid_base;
316 	lwkt_tokref ilock;
317 	fsid_t tfsid;
318 	int mtype;
319 
320 	lwkt_gettoken(&ilock, &mntid_token);
321 	mtype = mp->mnt_vfc->vfc_typenum;
322 	tfsid.val[1] = mtype;
323 	mtype = (mtype & 0xFF) << 24;
324 	for (;;) {
325 		tfsid.val[0] = makeudev(255,
326 		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
327 		mntid_base++;
328 		if (vfs_getvfs(&tfsid) == NULL)
329 			break;
330 	}
331 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
332 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
333 	lwkt_reltoken(&ilock);
334 }
335 
336 /*
337  * This routine is called when we have too many vnodes.  It attempts
338  * to free <count> vnodes and will potentially free vnodes that still
339  * have VM backing store (VM backing store is typically the cause
340  * of a vnode blowout so we want to do this).  Therefore, this operation
341  * is not considered cheap.
342  *
343  * A number of conditions may prevent a vnode from being reclaimed.
344  * the buffer cache may have references on the vnode, a directory
345  * vnode may still have references due to the namei cache representing
346  * underlying files, or the vnode may be in active use.   It is not
347  * desireable to reuse such vnodes.  These conditions may cause the
348  * number of vnodes to reach some minimum value regardless of what
349  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
350  */
351 
352 /*
353  * Return 0 if the vnode is not already on the free list, return 1 if the
354  * vnode, with some additional work could possibly be placed on the free list.
355  * We try to avoid recycling vnodes with lots of cached pages.  The cache
356  * trigger level is calculated dynamically.
357  */
358 static __inline int
359 vmightfree(struct vnode *vp, int page_count)
360 {
361 	if (vp->v_flag & VFREE)
362 		return (0);
363 	if (vp->v_usecount != 0)
364 		return (0);
365 	if (vp->v_object && vp->v_object->resident_page_count >= page_count)
366 		return (0);
367 	return (1);
368 }
369 
370 /*
371  * The vnode was found to be possibly freeable and the caller has locked it
372  * (thus the usecount should be 1 now).  Determine if the vnode is actually
373  * freeable, doing some cleanups in the process.  Returns 1 if the vnode
374  * can be freed, 0 otherwise.
375  *
376  * Note that v_holdcnt may be non-zero because (A) this vnode is not a leaf
377  * in the namecache topology and (B) this vnode has buffer cache bufs.
378  * We cannot remove vnodes with non-leaf namecache associations.  We do a
379  * tentitive leaf check prior to attempting to flush out any buffers but the
380  * 'real' test when all is said in done is that v_holdcnt must become 0 for
381  * the vnode to be freeable.
382  *
383  * We could theoretically just unconditionally flush when v_holdcnt != 0,
384  * but flushing data associated with non-leaf nodes (which are always
385  * directories), just throws it away for no benefit.  It is the buffer
386  * cache's responsibility to choose buffers to recycle from the cached
387  * data point of view.
388  */
389 static int
390 visleaf(struct vnode *vp)
391 {
392 	struct namecache *ncp;
393 
394 	TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
395 		if (!TAILQ_EMPTY(&ncp->nc_list))
396 			return(0);
397 	}
398 	return(1);
399 }
400 
401 static int
402 vtrytomakefreeable(struct vnode *vp, int page_count)
403 {
404 	if (vp->v_flag & VFREE)
405 		return (0);
406 	if (vp->v_usecount != 1)
407 		return (0);
408 	if (vp->v_object && vp->v_object->resident_page_count >= page_count)
409 		return (0);
410 	if (vp->v_holdcnt && visleaf(vp)) {
411 		vinvalbuf(vp, V_SAVE, NULL, 0, 0);
412 #if 0	/* DEBUG */
413 		printf((vp->v_holdcnt ? "vrecycle: vp %p failed: %s\n" :
414 			"vrecycle: vp %p succeeded: %s\n"), vp,
415 			(TAILQ_FIRST(&vp->v_namecache) ?
416 			    TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"));
417 #endif
418 	}
419 	return(vp->v_usecount == 1 && vp->v_holdcnt == 0);
420 }
421 
422 static int
423 vlrureclaim(struct mount *mp)
424 {
425 	struct vnode *vp;
426 	lwkt_tokref ilock;
427 	int done;
428 	int trigger;
429 	int usevnodes;
430 	int count;
431 
432 	/*
433 	 * Calculate the trigger point, don't allow user
434 	 * screwups to blow us up.   This prevents us from
435 	 * recycling vnodes with lots of resident pages.  We
436 	 * aren't trying to free memory, we are trying to
437 	 * free vnodes.
438 	 */
439 	usevnodes = desiredvnodes;
440 	if (usevnodes <= 0)
441 		usevnodes = 1;
442 	trigger = vmstats.v_page_count * 2 / usevnodes;
443 
444 	done = 0;
445 	lwkt_gettoken(&ilock, &mntvnode_token);
446 	count = mp->mnt_nvnodelistsize / 10 + 1;
447 	while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) {
448 		/*
449 		 * __VNODESCAN__
450 		 *
451 		 * The VP will stick around while we hold mntvnode_token,
452 		 * at least until we block, so we can safely do an initial
453 		 * check, and then must check again after we lock the vnode.
454 		 */
455 		if (vp->v_type == VNON ||	/* XXX */
456 		    vp->v_type == VBAD ||	/* XXX */
457 		    !vmightfree(vp, trigger)	/* critical path opt */
458 		) {
459 			TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
460 			TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist,vp, v_nmntvnodes);
461 			--count;
462 			continue;
463 		}
464 
465 		/*
466 		 * VX get the candidate vnode.  If the VX get fails the
467 		 * vnode might still be on the mountlist.  Our loop depends
468 		 * on us at least cycling the vnode to the end of the
469 		 * mountlist.
470 		 */
471 		if (vx_get_nonblock(vp) != 0) {
472 			if (vp->v_mount == mp) {
473 				TAILQ_REMOVE(&mp->mnt_nvnodelist,
474 						vp, v_nmntvnodes);
475 				TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist,
476 						vp, v_nmntvnodes);
477 			}
478 			--count;
479 			continue;
480 		}
481 
482 		/*
483 		 * Since we blocked locking the vp, make sure it is still
484 		 * a candidate for reclamation.  That is, it has not already
485 		 * been reclaimed and only has our VX reference associated
486 		 * with it.
487 		 */
488 		if (vp->v_type == VNON ||	/* XXX */
489 		    vp->v_type == VBAD ||	/* XXX */
490 		    (vp->v_flag & VRECLAIMED) ||
491 		    vp->v_mount != mp ||
492 		    !vtrytomakefreeable(vp, trigger)	/* critical path opt */
493 		) {
494 			if (vp->v_mount == mp) {
495 				TAILQ_REMOVE(&mp->mnt_nvnodelist,
496 						vp, v_nmntvnodes);
497 				TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist,
498 						vp, v_nmntvnodes);
499 			}
500 			--count;
501 			vx_put(vp);
502 			continue;
503 		}
504 
505 		/*
506 		 * All right, we are good, move the vp to the end of the
507 		 * mountlist and clean it out.  The vget will have returned
508 		 * an error if the vnode was destroyed (VRECLAIMED set), so we
509 		 * do not have to check again.  The vput() will move the
510 		 * vnode to the free list if the vgone() was successful.
511 		 */
512 		KKASSERT(vp->v_mount == mp);
513 		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
514 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist,vp, v_nmntvnodes);
515 		vgone(vp);
516 		vx_put(vp);
517 		++done;
518 		--count;
519 	}
520 	lwkt_reltoken(&ilock);
521 	return (done);
522 }
523 
524 /*
525  * Attempt to recycle vnodes in a context that is always safe to block.
526  * Calling vlrurecycle() from the bowels of file system code has some
527  * interesting deadlock problems.
528  */
529 static struct thread *vnlruthread;
530 static int vnlruproc_sig;
531 
532 void
533 vnlru_proc_wait(void)
534 {
535 	if (vnlruproc_sig == 0) {
536 		vnlruproc_sig = 1;      /* avoid unnecessary wakeups */
537 		wakeup(vnlruthread);
538 	}
539 	tsleep(&vnlruproc_sig, 0, "vlruwk", hz);
540 }
541 
542 static void
543 vnlru_proc(void)
544 {
545 	struct mount *mp, *nmp;
546 	lwkt_tokref ilock;
547 	int s;
548 	int done;
549 	struct thread *td = curthread;
550 
551 	EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
552 	    SHUTDOWN_PRI_FIRST);
553 
554 	s = splbio();
555 	for (;;) {
556 		kproc_suspend_loop();
557 		if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
558 			vnlruproc_sig = 0;
559 			wakeup(&vnlruproc_sig);
560 			tsleep(td, 0, "vlruwt", hz);
561 			continue;
562 		}
563 		done = 0;
564 		cache_cleanneg(0);
565 		lwkt_gettoken(&ilock, &mountlist_token);
566 		for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
567 			if (vfs_busy(mp, LK_NOWAIT, &ilock, td)) {
568 				nmp = TAILQ_NEXT(mp, mnt_list);
569 				continue;
570 			}
571 			done += vlrureclaim(mp);
572 			lwkt_gettokref(&ilock);
573 			nmp = TAILQ_NEXT(mp, mnt_list);
574 			vfs_unbusy(mp, td);
575 		}
576 		lwkt_reltoken(&ilock);
577 		if (done == 0) {
578 			++vnlru_nowhere;
579 			tsleep(td, 0, "vlrup", hz * 3);
580 			if (vnlru_nowhere % 10 == 0)
581 				printf("vnlru_proc: vnode recycler stopped working!\n");
582 		} else {
583 			vnlru_nowhere = 0;
584 		}
585 	}
586 	splx(s);
587 }
588 
589 static struct kproc_desc vnlru_kp = {
590 	"vnlru",
591 	vnlru_proc,
592 	&vnlruthread
593 };
594 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
595 
596 /*
597  * Move a vnode from one mount queue to another.
598  */
599 void
600 insmntque(struct vnode *vp, struct mount *mp)
601 {
602 	lwkt_tokref ilock;
603 
604 	lwkt_gettoken(&ilock, &mntvnode_token);
605 	/*
606 	 * Delete from old mount point vnode list, if on one.
607 	 */
608 	if (vp->v_mount != NULL) {
609 		KASSERT(vp->v_mount->mnt_nvnodelistsize > 0,
610 			("bad mount point vnode list size"));
611 		TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
612 		vp->v_mount->mnt_nvnodelistsize--;
613 	}
614 	/*
615 	 * Insert into list of vnodes for the new mount point, if available.
616 	 */
617 	if ((vp->v_mount = mp) == NULL) {
618 		lwkt_reltoken(&ilock);
619 		return;
620 	}
621 	TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
622 	mp->mnt_nvnodelistsize++;
623 	lwkt_reltoken(&ilock);
624 }
625 
626 
627 /*
628  * Scan the vnodes under a mount point.  The first function is called
629  * with just the mountlist token held (no vnode lock).  The second
630  * function is called with the vnode VX locked.
631  */
632 int
633 vmntvnodescan(
634     struct mount *mp,
635     int flags,
636     int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data),
637     int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
638     void *data
639 ) {
640 	lwkt_tokref ilock;
641 	struct vnode *pvp;
642 	struct vnode *vp;
643 	int r = 0;
644 
645 	/*
646 	 * Scan the vnodes on the mount's vnode list.  Use a placemarker
647 	 */
648 	pvp = allocvnode_placemarker();
649 
650 	lwkt_gettoken(&ilock, &mntvnode_token);
651 	TAILQ_INSERT_HEAD(&mp->mnt_nvnodelist, pvp, v_nmntvnodes);
652 
653 	while ((vp = TAILQ_NEXT(pvp, v_nmntvnodes)) != NULL) {
654 		/*
655 		 * Move the placemarker and skip other placemarkers we
656 		 * encounter.  The nothing can get in our way so the
657 		 * mount point on the vp must be valid.
658 		 */
659 		TAILQ_REMOVE(&mp->mnt_nvnodelist, pvp, v_nmntvnodes);
660 		TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, pvp, v_nmntvnodes);
661 		if (vp->v_flag & VPLACEMARKER)	/* another procs placemarker */
662 			continue;
663 		if (vp->v_type == VNON)		/* visible but not ready */
664 			continue;
665 		KKASSERT(vp->v_mount == mp);
666 
667 		/*
668 		 * Quick test.  A negative return continues the loop without
669 		 * calling the slow test.  0 continues onto the slow test.
670 		 * A positive number aborts the loop.
671 		 */
672 		if (fastfunc) {
673 			if ((r = fastfunc(mp, vp, data)) < 0)
674 				continue;
675 			if (r)
676 				break;
677 		}
678 
679 		/*
680 		 * Get a vxlock on the vnode, retry if it has moved or isn't
681 		 * in the mountlist where we expect it.
682 		 */
683 		if (slowfunc) {
684 			int error;
685 
686 			switch(flags) {
687 			case VMSC_GETVP:
688 				error = vget(vp, LK_EXCLUSIVE, curthread);
689 				break;
690 			case VMSC_GETVP|VMSC_NOWAIT:
691 				error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT,
692 						curthread);
693 				break;
694 			case VMSC_GETVX:
695 				error = vx_get(vp);
696 				break;
697 			case VMSC_REFVP:
698 				vref(vp);
699 				/* fall through */
700 			default:
701 				error = 0;
702 				break;
703 			}
704 			if (error)
705 				continue;
706 			if (TAILQ_PREV(pvp, vnodelst, v_nmntvnodes) != vp)
707 				goto skip;
708 			if (vp->v_type == VNON)
709 				goto skip;
710 			r = slowfunc(mp, vp, data);
711 skip:
712 			switch(flags) {
713 			case VMSC_GETVP:
714 			case VMSC_GETVP|VMSC_NOWAIT:
715 				vput(vp);
716 				break;
717 			case VMSC_GETVX:
718 				vx_put(vp);
719 				break;
720 			case VMSC_REFVP:
721 				vrele(vp);
722 				/* fall through */
723 			default:
724 				break;
725 			}
726 			if (r != 0)
727 				break;
728 		}
729 	}
730 	TAILQ_REMOVE(&mp->mnt_nvnodelist, pvp, v_nmntvnodes);
731 	freevnode_placemarker(pvp);
732 	lwkt_reltoken(&ilock);
733 	return(r);
734 }
735 
736 /*
737  * Remove any vnodes in the vnode table belonging to mount point mp.
738  *
739  * If FORCECLOSE is not specified, there should not be any active ones,
740  * return error if any are found (nb: this is a user error, not a
741  * system error). If FORCECLOSE is specified, detach any active vnodes
742  * that are found.
743  *
744  * If WRITECLOSE is set, only flush out regular file vnodes open for
745  * writing.
746  *
747  * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped.
748  *
749  * `rootrefs' specifies the base reference count for the root vnode
750  * of this filesystem. The root vnode is considered busy if its
751  * v_usecount exceeds this value. On a successful return, vflush()
752  * will call vrele() on the root vnode exactly rootrefs times.
753  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
754  * be zero.
755  */
756 #ifdef DIAGNOSTIC
757 static int busyprt = 0;		/* print out busy vnodes */
758 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
759 #endif
760 
761 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data);
762 
763 struct vflush_info {
764 	int flags;
765 	int busy;
766 	thread_t td;
767 };
768 
769 int
770 vflush(struct mount *mp, int rootrefs, int flags)
771 {
772 	struct thread *td = curthread;	/* XXX */
773 	struct vnode *rootvp = NULL;
774 	int error;
775 	struct vflush_info vflush_info;
776 
777 	if (rootrefs > 0) {
778 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
779 		    ("vflush: bad args"));
780 		/*
781 		 * Get the filesystem root vnode. We can vput() it
782 		 * immediately, since with rootrefs > 0, it won't go away.
783 		 */
784 		if ((error = VFS_ROOT(mp, &rootvp)) != 0)
785 			return (error);
786 		vput(rootvp);
787 	}
788 
789 	vflush_info.busy = 0;
790 	vflush_info.flags = flags;
791 	vflush_info.td = td;
792 	vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info);
793 
794 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
795 		/*
796 		 * If just the root vnode is busy, and if its refcount
797 		 * is equal to `rootrefs', then go ahead and kill it.
798 		 */
799 		KASSERT(vflush_info.busy > 0, ("vflush: not busy"));
800 		KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs"));
801 		if (vflush_info.busy == 1 && rootvp->v_usecount == rootrefs) {
802 			if (vx_lock(rootvp) == 0) {
803 				vgone(rootvp);
804 				vx_unlock(rootvp);
805 				vflush_info.busy = 0;
806 			}
807 		}
808 	}
809 	if (vflush_info.busy)
810 		return (EBUSY);
811 	for (; rootrefs > 0; rootrefs--)
812 		vrele(rootvp);
813 	return (0);
814 }
815 
816 /*
817  * The scan callback is made with an VX locked vnode.
818  */
819 static int
820 vflush_scan(struct mount *mp, struct vnode *vp, void *data)
821 {
822 	struct vflush_info *info = data;
823 	struct vattr vattr;
824 
825 	/*
826 	 * Skip over a vnodes marked VSYSTEM.
827 	 */
828 	if ((info->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
829 		return(0);
830 	}
831 
832 	/*
833 	 * If WRITECLOSE is set, flush out unlinked but still open
834 	 * files (even if open only for reading) and regular file
835 	 * vnodes open for writing.
836 	 */
837 	if ((info->flags & WRITECLOSE) &&
838 	    (vp->v_type == VNON ||
839 	    (VOP_GETATTR(vp, &vattr, info->td) == 0 &&
840 	    vattr.va_nlink > 0)) &&
841 	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
842 		return(0);
843 	}
844 
845 	/*
846 	 * With v_usecount == 0, all we need to do is clear out the
847 	 * vnode data structures and we are done.
848 	 */
849 	if (vp->v_usecount == 1) {
850 		vgone(vp);
851 		return(0);
852 	}
853 
854 	/*
855 	 * If FORCECLOSE is set, forcibly close the vnode. For block
856 	 * or character devices, revert to an anonymous device. For
857 	 * all other files, just kill them.
858 	 */
859 	if (info->flags & FORCECLOSE) {
860 		if (vp->v_type != VBLK && vp->v_type != VCHR) {
861 			vgone(vp);
862 		} else {
863 			vclean(vp, 0, info->td);
864 			vp->v_ops = &spec_vnode_vops;
865 			insmntque(vp, NULL);
866 		}
867 		return(0);
868 	}
869 #ifdef DIAGNOSTIC
870 	if (busyprt)
871 		vprint("vflush: busy vnode", vp);
872 #endif
873 	++info->busy;
874 	return(0);
875 }
876 
877