xref: /dflybsd-src/sys/kern/vfs_mount.c (revision a31d362788e8f158ecc861d1247b0648ccd22f9a)
1 /*
2  * Copyright (c) 2004,2013-2019 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * Copyright (c) 1989, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  * (c) UNIX System Laboratories, Inc.
37  * All or some portions of this file are derived from material licensed
38  * to the University of California by American Telephone and Telegraph
39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40  * the permission of UNIX System Laboratories, Inc.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  */
66 
67 /*
68  * External virtual filesystem routines
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/malloc.h>
75 #include <sys/mount.h>
76 #include <sys/proc.h>
77 #include <sys/vnode.h>
78 #include <sys/spinlock2.h>
79 #include <sys/eventhandler.h>
80 #include <sys/kthread.h>
81 #include <sys/sysctl.h>
82 
83 #include <machine/limits.h>
84 
85 #include <vm/vm.h>
86 #include <vm/vm_object.h>
87 
88 struct mountscan_info {
89 	TAILQ_ENTRY(mountscan_info) msi_entry;
90 	int msi_how;
91 	struct mount *msi_node;
92 };
93 
94 struct vmntvnodescan_info {
95 	TAILQ_ENTRY(vmntvnodescan_info) entry;
96 	struct vnode *vp;
97 };
98 
99 struct vnlru_info {
100 	int	pass;
101 };
102 
103 static int
104 mount_cmp(struct mount *mnt1, struct mount *mnt2)
105 {
106 	if (mnt1->mnt_stat.f_fsid.val[0] < mnt2->mnt_stat.f_fsid.val[0])
107 		return -1;
108 	if (mnt1->mnt_stat.f_fsid.val[0] > mnt2->mnt_stat.f_fsid.val[0])
109 		return 1;
110 	if (mnt1->mnt_stat.f_fsid.val[1] < mnt2->mnt_stat.f_fsid.val[1])
111 		return -1;
112 	if (mnt1->mnt_stat.f_fsid.val[1] > mnt2->mnt_stat.f_fsid.val[1])
113 		return 1;
114 	return 0;
115 }
116 
117 static int
118 mount_fsid_cmp(fsid_t *fsid, struct mount *mnt)
119 {
120 	if (fsid->val[0] < mnt->mnt_stat.f_fsid.val[0])
121 		return -1;
122 	if (fsid->val[0] > mnt->mnt_stat.f_fsid.val[0])
123 		return 1;
124 	if (fsid->val[1] < mnt->mnt_stat.f_fsid.val[1])
125 		return -1;
126 	if (fsid->val[1] > mnt->mnt_stat.f_fsid.val[1])
127 		return 1;
128 	return 0;
129 }
130 
131 RB_HEAD(mount_rb_tree, mount);
132 RB_PROTOTYPEX(mount_rb_tree, FSID, mount, mnt_node, mount_cmp, fsid_t *);
133 RB_GENERATE(mount_rb_tree, mount, mnt_node, mount_cmp);
134 RB_GENERATE_XLOOKUP(mount_rb_tree, FSID, mount, mnt_node,
135 			mount_fsid_cmp, fsid_t *);
136 
137 static int vnlru_nowhere = 0;
138 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD,
139 	    &vnlru_nowhere, 0,
140 	    "Number of times the vnlru process ran without success");
141 
142 
143 static struct lwkt_token mntid_token;
144 static struct mount dummymount;
145 
146 /* note: mountlist exported to pstat */
147 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
148 struct mount_rb_tree mounttree = RB_INITIALIZER(dev_tree_mounttree);
149 static TAILQ_HEAD(,mountscan_info) mountscan_list;
150 static struct lwkt_token mountlist_token;
151 
152 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list);
153 
154 /*
155  * Called from vfsinit()
156  */
157 void
158 vfs_mount_init(void)
159 {
160 	lwkt_token_init(&mountlist_token, "mntlist");
161 	lwkt_token_init(&mntid_token, "mntid");
162 	TAILQ_INIT(&mountscan_list);
163 	mount_init(&dummymount, NULL);
164 	dummymount.mnt_flag |= MNT_RDONLY;
165 	dummymount.mnt_kern_flag |= MNTK_ALL_MPSAFE;
166 }
167 
168 /*
169  * Support function called to remove a vnode from the mountlist and
170  * deal with side effects for scans in progress.
171  *
172  * Target mnt_token is held on call.
173  */
174 static void
175 vremovevnodemnt(struct vnode *vp)
176 {
177         struct vmntvnodescan_info *info;
178 	struct mount *mp = vp->v_mount;
179 
180 	TAILQ_FOREACH(info, &mp->mnt_vnodescan_list, entry) {
181 		if (info->vp == vp)
182 			info->vp = TAILQ_NEXT(vp, v_nmntvnodes);
183 	}
184 	TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
185 }
186 
187 /*
188  * Allocate a new vnode and associate it with a tag, mount point, and
189  * operations vector.
190  *
191  * A VX locked and refd vnode is returned.  The caller should setup the
192  * remaining fields and vx_put() or, if he wishes to leave a vref,
193  * vx_unlock() the vnode.  Or if he wishes to return a normal locked
194  * vnode, call vx_downgrade(vp); to downgrade the VX lock to a normal
195  * VN lock.
196  */
197 int
198 getnewvnode(enum vtagtype tag, struct mount *mp,
199 		struct vnode **vpp, int lktimeout, int lkflags)
200 {
201 	struct vnode *vp;
202 
203 	KKASSERT(mp != NULL);
204 
205 	vp = allocvnode(lktimeout, lkflags);
206 	vp->v_tag = tag;
207 	vp->v_data = NULL;
208 
209 	/*
210 	 * By default the vnode is assigned the mount point's normal
211 	 * operations vector.
212 	 */
213 	vp->v_ops = &mp->mnt_vn_use_ops;
214 	vp->v_pbuf_count = nswbuf_kva / NSWBUF_SPLIT;
215 
216 	/*
217 	 * Placing the vnode on the mount point's queue makes it visible.
218 	 * VNON prevents it from being messed with, however.
219 	 */
220 	insmntque(vp, mp);
221 
222 	/*
223 	 * A VX locked & refd vnode is returned.
224 	 */
225 	*vpp = vp;
226 	return (0);
227 }
228 
229 /*
230  * This function creates vnodes with special operations vectors.  The
231  * mount point is optional.
232  *
233  * This routine is being phased out but is still used by vfs_conf to
234  * create vnodes for devices prior to the root mount (with mp == NULL).
235  */
236 int
237 getspecialvnode(enum vtagtype tag, struct mount *mp,
238 		struct vop_ops **ops,
239 		struct vnode **vpp, int lktimeout, int lkflags)
240 {
241 	struct vnode *vp;
242 
243 	vp = allocvnode(lktimeout, lkflags);
244 	vp->v_tag = tag;
245 	vp->v_data = NULL;
246 	vp->v_ops = ops;
247 
248 	if (mp == NULL)
249 		mp = &dummymount;
250 
251 	/*
252 	 * Placing the vnode on the mount point's queue makes it visible.
253 	 * VNON prevents it from being messed with, however.
254 	 */
255 	insmntque(vp, mp);
256 
257 	/*
258 	 * A VX locked & refd vnode is returned.
259 	 */
260 	*vpp = vp;
261 	return (0);
262 }
263 
264 /*
265  * Interlock against an unmount, return 0 on success, non-zero on failure.
266  *
267  * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount
268  * is in-progress.
269  *
270  * If no unmount is in-progress LK_NOWAIT is ignored.  No other flag bits
271  * are used.  A shared locked will be obtained and the filesystem will not
272  * be unmountable until the lock is released.
273  */
274 int
275 vfs_busy(struct mount *mp, int flags)
276 {
277 	int lkflags;
278 
279 	atomic_add_int(&mp->mnt_refs, 1);
280 	lwkt_gettoken(&mp->mnt_token);
281 	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
282 		if (flags & LK_NOWAIT) {
283 			lwkt_reltoken(&mp->mnt_token);
284 			atomic_add_int(&mp->mnt_refs, -1);
285 			return (ENOENT);
286 		}
287 		/* XXX not MP safe */
288 		mp->mnt_kern_flag |= MNTK_MWAIT;
289 
290 		/*
291 		 * Since all busy locks are shared except the exclusive
292 		 * lock granted when unmounting, the only place that a
293 		 * wakeup needs to be done is at the release of the
294 		 * exclusive lock at the end of dounmount.
295 		 *
296 		 * WARNING! mp can potentially go away once we release
297 		 *	    our ref.
298 		 */
299 		tsleep((caddr_t)mp, 0, "vfs_busy", 0);
300 		lwkt_reltoken(&mp->mnt_token);
301 		atomic_add_int(&mp->mnt_refs, -1);
302 		return (ENOENT);
303 	}
304 	lkflags = LK_SHARED;
305 	if (lockmgr(&mp->mnt_lock, lkflags))
306 		panic("vfs_busy: unexpected lock failure");
307 	lwkt_reltoken(&mp->mnt_token);
308 	return (0);
309 }
310 
311 /*
312  * Free a busy filesystem.
313  *
314  * Once refs is decremented the mount point can potentially get ripped
315  * out from under us, but we want to clean up our refs before unlocking
316  * so do a hold/drop around the whole mess.
317  *
318  * This is not in the critical path (I hope).
319  */
320 void
321 vfs_unbusy(struct mount *mp)
322 {
323 	mount_hold(mp);
324 	atomic_add_int(&mp->mnt_refs, -1);
325 	lockmgr(&mp->mnt_lock, LK_RELEASE);
326 	mount_drop(mp);
327 }
328 
329 /*
330  * Lookup a filesystem type, and if found allocate and initialize
331  * a mount structure for it.
332  *
333  * Devname is usually updated by mount(8) after booting.
334  */
335 int
336 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
337 {
338 	struct vfsconf *vfsp;
339 	struct mount *mp;
340 
341 	if (fstypename == NULL)
342 		return (ENODEV);
343 
344 	vfsp = vfsconf_find_by_name(fstypename);
345 	if (vfsp == NULL)
346 		return (ENODEV);
347 	mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
348 	mount_init(mp, vfsp->vfc_vfsops);
349 	lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0);
350 	lockinit(&mp->mnt_renlock, "renamlk", VLKTIMEOUT, 0);
351 
352 	vfs_busy(mp, 0);
353 	mp->mnt_vfc = vfsp;
354 	mp->mnt_pbuf_count = nswbuf_kva / NSWBUF_SPLIT;
355 	vfsp->vfc_refcount++;
356 	mp->mnt_stat.f_type = vfsp->vfc_typenum;
357 	mp->mnt_flag |= MNT_RDONLY;
358 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
359 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
360 	copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
361 
362 	/*
363 	 * Pre-set MPSAFE flags for VFS_MOUNT() call.
364 	 */
365 	if (vfsp->vfc_flags & VFCF_MPSAFE)
366 		mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;
367 
368 	*mpp = mp;
369 
370 	return (0);
371 }
372 
373 /*
374  * Basic mount structure initialization
375  */
376 void
377 mount_init(struct mount *mp, struct vfsops *ops)
378 {
379 	lockinit(&mp->mnt_lock, "vfslock", hz*5, 0);
380 	lockinit(&mp->mnt_renlock, "renamlk", hz*5, 0);
381 	lwkt_token_init(&mp->mnt_token, "permnt");
382 
383 	TAILQ_INIT(&mp->mnt_vnodescan_list);
384 	TAILQ_INIT(&mp->mnt_nvnodelist);
385 	TAILQ_INIT(&mp->mnt_reservedvnlist);
386 	TAILQ_INIT(&mp->mnt_jlist);
387 	mp->mnt_nvnodelistsize = 0;
388 	mp->mnt_flag = 0;
389 	mp->mnt_hold = 1;		/* hold for umount last drop */
390 	mp->mnt_iosize_max = MAXPHYS;
391 	mp->mnt_op = ops;
392 	if (ops == NULL || (ops->vfs_flags & VFSOPSF_NOSYNCERTHR) == 0)
393 		vn_syncer_thr_create(mp);
394 }
395 
396 void
397 mount_hold(struct mount *mp)
398 {
399 	atomic_add_int(&mp->mnt_hold, 1);
400 }
401 
402 void
403 mount_drop(struct mount *mp)
404 {
405 	if (atomic_fetchadd_int(&mp->mnt_hold, -1) == 1) {
406 		KKASSERT(mp->mnt_refs == 0);
407 		kfree(mp, M_MOUNT);
408 	}
409 }
410 
411 /*
412  * Lookup a mount point by filesystem identifier.
413  *
414  * If not NULL, the returned mp is held and the caller is expected to drop
415  * it via mount_drop().
416  */
417 struct mount *
418 vfs_getvfs(fsid_t *fsid)
419 {
420 	struct mount *mp;
421 
422 	lwkt_gettoken_shared(&mountlist_token);
423 	mp = mount_rb_tree_RB_LOOKUP_FSID(&mounttree, fsid);
424 	if (mp)
425 		mount_hold(mp);
426 	lwkt_reltoken(&mountlist_token);
427 	return (mp);
428 }
429 
430 /*
431  * Generate a FSID based on the mountpt.  The FSID will be adjusted to avoid
432  * collisions when the mount is added to mountlist.
433  *
434  * May only be called prior to the mount succeeding.
435  *
436  * OLD:
437  *
438  * Get a new unique fsid.  Try to make its val[0] unique, since this value
439  * will be used to create fake device numbers for stat().  Also try (but
440  * not so hard) make its val[0] unique mod 2^16, since some emulators only
441  * support 16-bit device numbers.  We end up with unique val[0]'s for the
442  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
443  */
444 void
445 vfs_getnewfsid(struct mount *mp)
446 {
447 	fsid_t tfsid;
448 	int mtype;
449 	int error;
450 	char *retbuf;
451 	char *freebuf;
452 
453 	mtype = mp->mnt_vfc->vfc_typenum;
454 	tfsid.val[1] = mtype;
455 	error = cache_fullpath(NULL, &mp->mnt_ncmounton, NULL,
456 			       &retbuf, &freebuf, 0);
457 	if (error) {
458 		tfsid.val[0] = makeudev(255, 0);
459 	} else {
460 		tfsid.val[0] = makeudev(255,
461 					iscsi_crc32(retbuf, strlen(retbuf)) &
462 					~makeudev(255, 0));
463 		kfree(freebuf, M_TEMP);
464 	}
465 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
466 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
467 }
468 
469 /*
470  * Set the FSID for a new mount point to the template.
471  *
472  * The FSID will be adjusted to avoid collisions when the mount is
473  * added to mountlist.
474  *
475  * May only be called prior to the mount succeeding.
476  */
477 void
478 vfs_setfsid(struct mount *mp, fsid_t *template)
479 {
480 	bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid));
481 
482 #if 0
483 	struct mount *mptmp;
484 
485 	lwkt_gettoken(&mntid_token);
486 	for (;;) {
487 		mptmp = vfs_getvfs(template);
488 		if (mptmp == NULL)
489 			break;
490 		mount_drop(mptmp);
491 		++template->val[1];
492 	}
493 	lwkt_reltoken(&mntid_token);
494 #endif
495 	mp->mnt_stat.f_fsid = *template;
496 }
497 
498 /*
499  * This routine is called when we have too many vnodes.  It attempts
500  * to free <count> vnodes and will potentially free vnodes that still
501  * have VM backing store (VM backing store is typically the cause
502  * of a vnode blowout so we want to do this).  Therefore, this operation
503  * is not considered cheap.
504  *
505  * A number of conditions may prevent a vnode from being reclaimed.
506  * the buffer cache may have references on the vnode, a directory
507  * vnode may still have references due to the namei cache representing
508  * underlying files, or the vnode may be in active use.   It is not
509  * desireable to reuse such vnodes.  These conditions may cause the
510  * number of vnodes to reach some minimum value regardless of what
511  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
512  */
513 
514 /*
515  * Attempt to recycle vnodes in a context that is always safe to block.
516  * Calling vlrurecycle() from the bowels of file system code has some
517  * interesting deadlock problems.
518  */
519 static struct thread *vnlruthread;
520 
521 static void
522 vnlru_proc(void)
523 {
524 	struct thread *td = curthread;
525 
526 	EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
527 			      SHUTDOWN_PRI_FIRST);
528 
529 	for (;;) {
530 		int ncachedandinactive;
531 
532 		kproc_suspend_loop();
533 
534 		/*
535 		 * Try to free some vnodes if we have too many.  Trigger based
536 		 * on potentially freeable vnodes but calculate the count
537 		 * based on total vnodes.
538 		 *
539 		 * (long) -> deal with 64 bit machines, intermediate overflow
540 		 */
541 		synchronizevnodecount();
542 		ncachedandinactive = countcachedandinactivevnodes();
543 		if (numvnodes >= maxvnodes * 9 / 10 &&
544 		    ncachedandinactive >= maxvnodes * 5 / 10) {
545 			int count = numvnodes - maxvnodes * 9 / 10;
546 
547 			if (count > (ncachedandinactive) / 100)
548 				count = (ncachedandinactive) / 100;
549 			if (count < 5)
550 				count = 5;
551 			freesomevnodes(count);
552 		}
553 
554 		/*
555 		 * Do non-critical-path (more robust) cache cleaning,
556 		 * even if vnode counts are nominal, to try to avoid
557 		 * having to do it in the critical path.
558 		 */
559 		cache_hysteresis(0);
560 
561 		/*
562 		 * Nothing to do if most of our vnodes are already on
563 		 * the free list.
564 		 */
565 		synchronizevnodecount();
566 		ncachedandinactive = countcachedandinactivevnodes();
567 		if (numvnodes <= maxvnodes * 9 / 10 ||
568 		    ncachedandinactive <= maxvnodes * 5 / 10) {
569 			tsleep(vnlruthread, 0, "vlruwt", hz);
570 			continue;
571 		}
572 	}
573 }
574 
575 /*
576  * MOUNTLIST FUNCTIONS
577  */
578 
579 /*
580  * mountlist_insert (MP SAFE)
581  *
582  * Add a new mount point to the mount list.  Filesystem should attempt to
583  * supply a unique fsid but if a duplicate occurs adjust the fsid to ensure
584  * uniqueness.
585  */
586 void
587 mountlist_insert(struct mount *mp, int how)
588 {
589 	int lim = 0x01000000;
590 
591 	lwkt_gettoken(&mountlist_token);
592 	if (how == MNTINS_FIRST)
593 		TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
594 	else
595 		TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
596 	while (mount_rb_tree_RB_INSERT(&mounttree, mp)) {
597 		int32_t val;
598 
599 		/*
600 		 * minor device mask: 0xFFFF00FF
601 		 */
602 		val = mp->mnt_stat.f_fsid.val[0];
603 		val = ((val & 0xFFFF0000) >> 8) | (val & 0x000000FF);
604 		++val;
605 		val = ((val << 8) & 0xFFFF0000) | (val & 0x000000FF);
606 		mp->mnt_stat.f_fsid.val[0] = val;
607 		if (--lim == 0) {
608 			lim = 0x01000000;
609 			mp->mnt_stat.f_fsid.val[1] += 0x0100;
610 			kprintf("mountlist_insert: fsid collision, "
611 				"too many mounts\n");
612 		}
613 	}
614 	lwkt_reltoken(&mountlist_token);
615 }
616 
617 /*
618  * mountlist_interlock (MP SAFE)
619  *
620  * Execute the specified interlock function with the mountlist token
621  * held.  The function will be called in a serialized fashion verses
622  * other functions called through this mechanism.
623  *
624  * The function is expected to be very short-lived.
625  */
626 int
627 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp)
628 {
629 	int error;
630 
631 	lwkt_gettoken(&mountlist_token);
632 	error = callback(mp);
633 	lwkt_reltoken(&mountlist_token);
634 	return (error);
635 }
636 
637 /*
638  * mountlist_boot_getfirst (DURING BOOT ONLY)
639  *
640  * This function returns the first mount on the mountlist, which is
641  * expected to be the root mount.  Since no interlocks are obtained
642  * this function is only safe to use during booting.
643  */
644 
645 struct mount *
646 mountlist_boot_getfirst(void)
647 {
648 	return(TAILQ_FIRST(&mountlist));
649 }
650 
651 /*
652  * mountlist_remove (MP SAFE)
653  *
654  * Remove a node from the mountlist.  If this node is the next scan node
655  * for any active mountlist scans, the active mountlist scan will be
656  * adjusted to skip the node, thus allowing removals during mountlist
657  * scans.
658  */
659 void
660 mountlist_remove(struct mount *mp)
661 {
662 	struct mountscan_info *msi;
663 
664 	lwkt_gettoken(&mountlist_token);
665 	TAILQ_FOREACH(msi, &mountscan_list, msi_entry) {
666 		if (msi->msi_node == mp) {
667 			if (msi->msi_how & MNTSCAN_FORWARD)
668 				msi->msi_node = TAILQ_NEXT(mp, mnt_list);
669 			else
670 				msi->msi_node = TAILQ_PREV(mp, mntlist,
671 							   mnt_list);
672 		}
673 	}
674 	TAILQ_REMOVE(&mountlist, mp, mnt_list);
675 	mount_rb_tree_RB_REMOVE(&mounttree, mp);
676 	lwkt_reltoken(&mountlist_token);
677 }
678 
679 /*
680  * mountlist_exists (MP SAFE)
681  *
682  * Checks if a node exists in the mountlist.
683  * This function is mainly used by VFS quota code to check if a
684  * cached nullfs struct mount pointer is still valid at use time
685  *
686  * FIXME: there is no warranty the mp passed to that function
687  * will be the same one used by VFS_ACCOUNT() later
688  */
689 int
690 mountlist_exists(struct mount *mp)
691 {
692 	int node_exists = 0;
693 	struct mount* lmp;
694 
695 	lwkt_gettoken_shared(&mountlist_token);
696 	TAILQ_FOREACH(lmp, &mountlist, mnt_list) {
697 		if (lmp == mp) {
698 			node_exists = 1;
699 			break;
700 		}
701 	}
702 	lwkt_reltoken(&mountlist_token);
703 
704 	return(node_exists);
705 }
706 
707 /*
708  * mountlist_scan
709  *
710  * Safely scan the mount points on the mount list.  Each mountpoint
711  * is held across the callback.  The callback is responsible for
712  * acquiring any further tokens or locks.
713  *
714  * Unless otherwise specified each mount point will be busied prior to the
715  * callback and unbusied afterwords.  The callback may safely remove any
716  * mount point without interfering with the scan.  If the current callback
717  * mount is removed the scanner will not attempt to unbusy it.
718  *
719  * If a mount node cannot be busied it is silently skipped.
720  *
721  * The callback return value is aggregated and a total is returned.  A return
722  * value of < 0 is not aggregated and will terminate the scan.
723  *
724  * MNTSCAN_FORWARD	- the mountlist is scanned in the forward direction
725  * MNTSCAN_REVERSE	- the mountlist is scanned in reverse
726  * MNTSCAN_NOBUSY	- the scanner will make the callback without busying
727  *			  the mount node.
728  * MNTSCAN_NOUNLOCK	- Do not unlock mountlist_token across callback
729  *
730  * NOTE: mountlist_token is not held across the callback.
731  */
732 int
733 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how)
734 {
735 	struct mountscan_info info;
736 	struct mount *mp;
737 	int count;
738 	int res;
739 	int dounlock = ((how & MNTSCAN_NOUNLOCK) == 0);
740 
741 	lwkt_gettoken(&mountlist_token);
742 	info.msi_how = how;
743 	info.msi_node = NULL;	/* paranoia */
744 	TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry);
745 	lwkt_reltoken(&mountlist_token);
746 
747 	res = 0;
748 	lwkt_gettoken_shared(&mountlist_token);
749 
750 	if (how & MNTSCAN_FORWARD) {
751 		info.msi_node = TAILQ_FIRST(&mountlist);
752 		while ((mp = info.msi_node) != NULL) {
753 			mount_hold(mp);
754 			if (how & MNTSCAN_NOBUSY) {
755 				if (dounlock)
756 					lwkt_reltoken(&mountlist_token);
757 				count = callback(mp, data);
758 				if (dounlock)
759 					lwkt_gettoken_shared(&mountlist_token);
760 			} else if (vfs_busy(mp, LK_NOWAIT) == 0) {
761 				if (dounlock)
762 					lwkt_reltoken(&mountlist_token);
763 				count = callback(mp, data);
764 				if (dounlock)
765 					lwkt_gettoken_shared(&mountlist_token);
766 				if (mp == info.msi_node)
767 					vfs_unbusy(mp);
768 			} else {
769 				count = 0;
770 			}
771 			mount_drop(mp);
772 			if (count < 0)
773 				break;
774 			res += count;
775 			if (mp == info.msi_node)
776 				info.msi_node = TAILQ_NEXT(mp, mnt_list);
777 		}
778 	} else if (how & MNTSCAN_REVERSE) {
779 		info.msi_node = TAILQ_LAST(&mountlist, mntlist);
780 		while ((mp = info.msi_node) != NULL) {
781 			mount_hold(mp);
782 			if (how & MNTSCAN_NOBUSY) {
783 				if (dounlock)
784 					lwkt_reltoken(&mountlist_token);
785 				count = callback(mp, data);
786 				if (dounlock)
787 					lwkt_gettoken_shared(&mountlist_token);
788 			} else if (vfs_busy(mp, LK_NOWAIT) == 0) {
789 				if (dounlock)
790 					lwkt_reltoken(&mountlist_token);
791 				count = callback(mp, data);
792 				if (dounlock)
793 					lwkt_gettoken_shared(&mountlist_token);
794 				if (mp == info.msi_node)
795 					vfs_unbusy(mp);
796 			} else {
797 				count = 0;
798 			}
799 			mount_drop(mp);
800 			if (count < 0)
801 				break;
802 			res += count;
803 			if (mp == info.msi_node)
804 				info.msi_node = TAILQ_PREV(mp, mntlist,
805 							   mnt_list);
806 		}
807 	}
808 	lwkt_reltoken(&mountlist_token);
809 
810 	lwkt_gettoken(&mountlist_token);
811 	TAILQ_REMOVE(&mountscan_list, &info, msi_entry);
812 	lwkt_reltoken(&mountlist_token);
813 
814 	return(res);
815 }
816 
817 /*
818  * MOUNT RELATED VNODE FUNCTIONS
819  */
820 
821 static struct kproc_desc vnlru_kp = {
822 	"vnlru",
823 	vnlru_proc,
824 	&vnlruthread
825 };
826 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp);
827 
828 /*
829  * Move a vnode from one mount queue to another.
830  */
831 void
832 insmntque(struct vnode *vp, struct mount *mp)
833 {
834 	struct mount *omp;
835 
836 	/*
837 	 * Delete from old mount point vnode list, if on one.
838 	 */
839 	if ((omp = vp->v_mount) != NULL) {
840 		lwkt_gettoken(&omp->mnt_token);
841 		KKASSERT(omp == vp->v_mount);
842 		KASSERT(omp->mnt_nvnodelistsize > 0,
843 			("bad mount point vnode list size"));
844 		vremovevnodemnt(vp);
845 		omp->mnt_nvnodelistsize--;
846 		lwkt_reltoken(&omp->mnt_token);
847 	}
848 
849 	/*
850 	 * Insert into list of vnodes for the new mount point, if available.
851 	 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer.
852 	 */
853 	if (mp == NULL) {
854 		vp->v_mount = NULL;
855 		return;
856 	}
857 	lwkt_gettoken(&mp->mnt_token);
858 	vp->v_mount = mp;
859 	if (mp->mnt_syncer) {
860 		TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes);
861 	} else {
862 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
863 	}
864 	mp->mnt_nvnodelistsize++;
865 	lwkt_reltoken(&mp->mnt_token);
866 }
867 
868 
869 /*
870  * Scan the vnodes under a mount point and issue appropriate callbacks.
871  *
872  * The fastfunc() callback is called with just the mountlist token held
873  * (no vnode lock).  It may not block and the vnode may be undergoing
874  * modifications while the caller is processing it.  The vnode will
875  * not be entirely destroyed, however, due to the fact that the mountlist
876  * token is held.  A return value < 0 skips to the next vnode without calling
877  * the slowfunc(), a return value > 0 terminates the loop.
878  *
879  * WARNING! The fastfunc() should not indirect through vp->v_object, the vp
880  *	    data structure is unstable when called from fastfunc().
881  *
882  * The slowfunc() callback is called after the vnode has been successfully
883  * locked based on passed flags.  The vnode is skipped if it gets rearranged
884  * or destroyed while blocking on the lock.  A non-zero return value from
885  * the slow function terminates the loop.  The slow function is allowed to
886  * arbitrarily block.  The scanning code guarentees consistency of operation
887  * even if the slow function deletes or moves the node, or blocks and some
888  * other thread deletes or moves the node.
889  */
890 int
891 vmntvnodescan(
892     struct mount *mp,
893     int flags,
894     int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data),
895     int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
896     void *data
897 ) {
898 	struct vmntvnodescan_info info;
899 	struct vnode *vp;
900 	int r = 0;
901 	int maxcount = mp->mnt_nvnodelistsize * 2;
902 	int stopcount = 0;
903 	int count = 0;
904 
905 	lwkt_gettoken(&mp->mnt_token);
906 
907 	/*
908 	 * If asked to do one pass stop after iterating available vnodes.
909 	 * Under heavy loads new vnodes can be added while we are scanning,
910 	 * so this isn't perfect.  Create a slop factor of 2x.
911 	 */
912 	if (flags & VMSC_ONEPASS)
913 		stopcount = mp->mnt_nvnodelistsize;
914 
915 	info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
916 	TAILQ_INSERT_TAIL(&mp->mnt_vnodescan_list, &info, entry);
917 
918 	while ((vp = info.vp) != NULL) {
919 		if (--maxcount == 0) {
920 			kprintf("Warning: excessive fssync iteration\n");
921 			maxcount = mp->mnt_nvnodelistsize * 2;
922 		}
923 
924 		/*
925 		 * Skip if visible but not ready, or special (e.g.
926 		 * mp->mnt_syncer)
927 		 */
928 		if (vp->v_type == VNON)
929 			goto next;
930 		KKASSERT(vp->v_mount == mp);
931 
932 		/*
933 		 * Quick test.  A negative return continues the loop without
934 		 * calling the slow test.  0 continues onto the slow test.
935 		 * A positive number aborts the loop.
936 		 */
937 		if (fastfunc) {
938 			if ((r = fastfunc(mp, vp, data)) < 0) {
939 				r = 0;
940 				goto next;
941 			}
942 			if (r)
943 				break;
944 		}
945 
946 		/*
947 		 * Get a vxlock on the vnode, retry if it has moved or isn't
948 		 * in the mountlist where we expect it.
949 		 */
950 		if (slowfunc) {
951 			int error;
952 
953 			switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
954 			case VMSC_GETVP:
955 				error = vget(vp, LK_EXCLUSIVE);
956 				break;
957 			case VMSC_GETVP|VMSC_NOWAIT:
958 				error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT);
959 				break;
960 			case VMSC_GETVX:
961 				vx_get(vp);
962 				error = 0;
963 				break;
964 			default:
965 				error = 0;
966 				break;
967 			}
968 			if (error)
969 				goto next;
970 			/*
971 			 * Do not call the slow function if the vnode is
972 			 * invalid or if it was ripped out from under us
973 			 * while we (potentially) blocked.
974 			 */
975 			if (info.vp == vp && vp->v_type != VNON)
976 				r = slowfunc(mp, vp, data);
977 
978 			/*
979 			 * Cleanup
980 			 */
981 			switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
982 			case VMSC_GETVP:
983 			case VMSC_GETVP|VMSC_NOWAIT:
984 				vput(vp);
985 				break;
986 			case VMSC_GETVX:
987 				vx_put(vp);
988 				break;
989 			default:
990 				break;
991 			}
992 			if (r != 0)
993 				break;
994 		}
995 
996 next:
997 		/*
998 		 * Yield after some processing.  Depending on the number
999 		 * of vnodes, we might wind up running for a long time.
1000 		 * Because threads are not preemptable, time critical
1001 		 * userland processes might starve.  Give them a chance
1002 		 * now and then.
1003 		 */
1004 		if (++count == 10000) {
1005 			/*
1006 			 * We really want to yield a bit, so we simply
1007 			 * sleep a tick
1008 			 */
1009 			tsleep(mp, 0, "vnodescn", 1);
1010 			count = 0;
1011 		}
1012 
1013 		/*
1014 		 * If doing one pass this decrements to zero.  If it starts
1015 		 * at zero it is effectively unlimited for the purposes of
1016 		 * this loop.
1017 		 */
1018 		if (--stopcount == 0)
1019 			break;
1020 
1021 		/*
1022 		 * Iterate.  If the vnode was ripped out from under us
1023 		 * info.vp will already point to the next vnode, otherwise
1024 		 * we have to obtain the next valid vnode ourselves.
1025 		 */
1026 		if (info.vp == vp)
1027 			info.vp = TAILQ_NEXT(vp, v_nmntvnodes);
1028 	}
1029 
1030 	TAILQ_REMOVE(&mp->mnt_vnodescan_list, &info, entry);
1031 	lwkt_reltoken(&mp->mnt_token);
1032 	return(r);
1033 }
1034 
1035 /*
1036  * Remove any vnodes in the vnode table belonging to mount point mp.
1037  *
1038  * If FORCECLOSE is not specified, there should not be any active ones,
1039  * return error if any are found (nb: this is a user error, not a
1040  * system error). If FORCECLOSE is specified, detach any active vnodes
1041  * that are found.
1042  *
1043  * If WRITECLOSE is set, only flush out regular file vnodes open for
1044  * writing.
1045  *
1046  * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped.
1047  *
1048  * `rootrefs' specifies the base reference count for the root vnode
1049  * of this filesystem. The root vnode is considered busy if its
1050  * v_refcnt exceeds this value. On a successful return, vflush()
1051  * will call vrele() on the root vnode exactly rootrefs times.
1052  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
1053  * be zero.
1054  */
1055 static int debug_busyprt = 0;		/* print out busy vnodes */
1056 SYSCTL_INT(_vfs, OID_AUTO, debug_busyprt, CTLFLAG_RW, &debug_busyprt, 0, "");
1057 
1058 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data);
1059 
1060 struct vflush_info {
1061 	int flags;
1062 	int busy;
1063 	thread_t td;
1064 };
1065 
1066 int
1067 vflush(struct mount *mp, int rootrefs, int flags)
1068 {
1069 	struct thread *td = curthread;	/* XXX */
1070 	struct vnode *rootvp = NULL;
1071 	int error;
1072 	struct vflush_info vflush_info;
1073 
1074 	if (rootrefs > 0) {
1075 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
1076 		    ("vflush: bad args"));
1077 		/*
1078 		 * Get the filesystem root vnode. We can vput() it
1079 		 * immediately, since with rootrefs > 0, it won't go away.
1080 		 */
1081 		if ((error = VFS_ROOT(mp, &rootvp)) != 0) {
1082 			if ((flags & FORCECLOSE) == 0)
1083 				return (error);
1084 			rootrefs = 0;
1085 			/* continue anyway */
1086 		}
1087 		if (rootrefs)
1088 			vput(rootvp);
1089 	}
1090 
1091 	vflush_info.busy = 0;
1092 	vflush_info.flags = flags;
1093 	vflush_info.td = td;
1094 	vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info);
1095 
1096 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
1097 		/*
1098 		 * If just the root vnode is busy, and if its refcount
1099 		 * is equal to `rootrefs', then go ahead and kill it.
1100 		 */
1101 		KASSERT(vflush_info.busy > 0, ("vflush: not busy"));
1102 		KASSERT(VREFCNT(rootvp) >= rootrefs, ("vflush: rootrefs"));
1103 		if (vflush_info.busy == 1 && VREFCNT(rootvp) == rootrefs) {
1104 			vx_lock(rootvp);
1105 			vgone_vxlocked(rootvp);
1106 			vx_unlock(rootvp);
1107 			vflush_info.busy = 0;
1108 		}
1109 	}
1110 	if (vflush_info.busy)
1111 		return (EBUSY);
1112 	for (; rootrefs > 0; rootrefs--)
1113 		vrele(rootvp);
1114 	return (0);
1115 }
1116 
1117 /*
1118  * The scan callback is made with an VX locked vnode.
1119  */
1120 static int
1121 vflush_scan(struct mount *mp, struct vnode *vp, void *data)
1122 {
1123 	struct vflush_info *info = data;
1124 	struct vattr vattr;
1125 	int flags = info->flags;
1126 
1127 	/*
1128 	 * Generally speaking try to deactivate on 0 refs (catch-all)
1129 	 */
1130 	atomic_set_int(&vp->v_refcnt, VREF_FINALIZE);
1131 
1132 	/*
1133 	 * Skip over a vnodes marked VSYSTEM.
1134 	 */
1135 	if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1136 		return(0);
1137 	}
1138 
1139 	/*
1140 	 * Do not force-close VCHR or VBLK vnodes
1141 	 */
1142 	if (vp->v_type == VCHR || vp->v_type == VBLK)
1143 		flags &= ~(WRITECLOSE|FORCECLOSE);
1144 
1145 	/*
1146 	 * If WRITECLOSE is set, flush out unlinked but still open
1147 	 * files (even if open only for reading) and regular file
1148 	 * vnodes open for writing.
1149 	 */
1150 	if ((flags & WRITECLOSE) &&
1151 	    (vp->v_type == VNON ||
1152 	    (VOP_GETATTR(vp, &vattr) == 0 &&
1153 	    vattr.va_nlink > 0)) &&
1154 	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
1155 		return(0);
1156 	}
1157 
1158 	/*
1159 	 * If we are the only holder (refcnt of 1) or the vnode is in
1160 	 * termination (refcnt < 0), we can vgone the vnode.
1161 	 */
1162 	if (VREFCNT(vp) <= 1) {
1163 		vgone_vxlocked(vp);
1164 		return(0);
1165 	}
1166 
1167 	/*
1168 	 * If FORCECLOSE is set, forcibly destroy the vnode and then move
1169 	 * it to a dummymount structure so vop_*() functions don't deref
1170 	 * a NULL pointer.
1171 	 */
1172 	if (flags & FORCECLOSE) {
1173 		vhold(vp);
1174 		vgone_vxlocked(vp);
1175 		if (vp->v_mount == NULL)
1176 			insmntque(vp, &dummymount);
1177 		vdrop(vp);
1178 		return(0);
1179 	}
1180 	if (vp->v_type == VCHR || vp->v_type == VBLK)
1181 		kprintf("vflush: Warning, cannot destroy busy device vnode\n");
1182 	if (debug_busyprt) {
1183 		const char *filename;
1184 
1185 		spin_lock(&vp->v_spin);
1186 		filename = TAILQ_FIRST(&vp->v_namecache) ?
1187 			   TAILQ_FIRST(&vp->v_namecache)->nc_name : "?";
1188 		spin_unlock(&vp->v_spin);
1189 		kprintf("vflush: busy vnode (%p) %s\n", vp, filename);
1190 	}
1191 	++info->busy;
1192 	return(0);
1193 }
1194 
1195 void
1196 add_bio_ops(struct bio_ops *ops)
1197 {
1198 	TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry);
1199 }
1200 
1201 void
1202 rem_bio_ops(struct bio_ops *ops)
1203 {
1204 	TAILQ_REMOVE(&bio_ops_list, ops, entry);
1205 }
1206 
1207 /*
1208  * This calls the bio_ops io_sync function either for a mount point
1209  * or generally.
1210  *
1211  * WARNING: softdeps is weirdly coded and just isn't happy unless
1212  * io_sync is called with a NULL mount from the general syncing code.
1213  */
1214 void
1215 bio_ops_sync(struct mount *mp)
1216 {
1217 	struct bio_ops *ops;
1218 
1219 	if (mp) {
1220 		if ((ops = mp->mnt_bioops) != NULL)
1221 			ops->io_sync(mp);
1222 	} else {
1223 		TAILQ_FOREACH(ops, &bio_ops_list, entry) {
1224 			ops->io_sync(NULL);
1225 		}
1226 	}
1227 }
1228 
1229 /*
1230  * Lookup a mount point by nch
1231  */
1232 struct mount *
1233 mount_get_by_nc(struct namecache *ncp)
1234 {
1235 	struct mount *mp = NULL;
1236 
1237 	lwkt_gettoken_shared(&mountlist_token);
1238 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1239 		if (ncp == mp->mnt_ncmountpt.ncp)
1240 			break;
1241 	}
1242 	lwkt_reltoken(&mountlist_token);
1243 
1244 	return (mp);
1245 }
1246 
1247