1 /* 2 * Copyright (c) 2004,2013-2019 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993 35 * The Regents of the University of California. All rights reserved. 36 * (c) UNIX System Laboratories, Inc. 37 * All or some portions of this file are derived from material licensed 38 * to the University of California by American Telephone and Telegraph 39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 40 * the permission of UNIX System Laboratories, Inc. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67 /* 68 * External virtual filesystem routines 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/kernel.h> 74 #include <sys/malloc.h> 75 #include <sys/mount.h> 76 #include <sys/proc.h> 77 #include <sys/vnode.h> 78 #include <sys/spinlock2.h> 79 #include <sys/eventhandler.h> 80 #include <sys/kthread.h> 81 #include <sys/sysctl.h> 82 83 #include <machine/limits.h> 84 85 #include <vm/vm.h> 86 #include <vm/vm_object.h> 87 88 struct mountscan_info { 89 TAILQ_ENTRY(mountscan_info) msi_entry; 90 int msi_how; 91 struct mount *msi_node; 92 }; 93 94 struct vmntvnodescan_info { 95 TAILQ_ENTRY(vmntvnodescan_info) entry; 96 struct vnode *vp; 97 }; 98 99 struct vnlru_info { 100 int pass; 101 }; 102 103 static int 104 mount_cmp(struct mount *mnt1, struct mount *mnt2) 105 { 106 if (mnt1->mnt_stat.f_fsid.val[0] < mnt2->mnt_stat.f_fsid.val[0]) 107 return -1; 108 if (mnt1->mnt_stat.f_fsid.val[0] > mnt2->mnt_stat.f_fsid.val[0]) 109 return 1; 110 if (mnt1->mnt_stat.f_fsid.val[1] < mnt2->mnt_stat.f_fsid.val[1]) 111 return -1; 112 if (mnt1->mnt_stat.f_fsid.val[1] > mnt2->mnt_stat.f_fsid.val[1]) 113 return 1; 114 return 0; 115 } 116 117 static int 118 mount_fsid_cmp(fsid_t *fsid, struct mount *mnt) 119 { 120 if (fsid->val[0] < mnt->mnt_stat.f_fsid.val[0]) 121 return -1; 122 if (fsid->val[0] > mnt->mnt_stat.f_fsid.val[0]) 123 return 1; 124 if (fsid->val[1] < mnt->mnt_stat.f_fsid.val[1]) 125 return -1; 126 if (fsid->val[1] > mnt->mnt_stat.f_fsid.val[1]) 127 return 1; 128 return 0; 129 } 130 131 RB_HEAD(mount_rb_tree, mount); 132 RB_PROTOTYPEX(mount_rb_tree, FSID, mount, mnt_node, mount_cmp, fsid_t *); 133 RB_GENERATE(mount_rb_tree, mount, mnt_node, mount_cmp); 134 RB_GENERATE_XLOOKUP(mount_rb_tree, FSID, mount, mnt_node, 135 mount_fsid_cmp, fsid_t *); 136 137 static int vnlru_nowhere = 0; 138 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD, 139 &vnlru_nowhere, 0, 140 "Number of times the vnlru process ran without success"); 141 142 143 static struct lwkt_token mntid_token; 144 static struct mount dummymount; 145 146 /* note: mountlist exported to pstat */ 147 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); 148 struct mount_rb_tree mounttree = RB_INITIALIZER(dev_tree_mounttree); 149 static TAILQ_HEAD(,mountscan_info) mountscan_list; 150 static struct lwkt_token mountlist_token; 151 152 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list); 153 154 /* 155 * Called from vfsinit() 156 */ 157 void 158 vfs_mount_init(void) 159 { 160 lwkt_token_init(&mountlist_token, "mntlist"); 161 lwkt_token_init(&mntid_token, "mntid"); 162 TAILQ_INIT(&mountscan_list); 163 mount_init(&dummymount, NULL); 164 dummymount.mnt_flag |= MNT_RDONLY; 165 dummymount.mnt_kern_flag |= MNTK_ALL_MPSAFE; 166 } 167 168 /* 169 * Support function called to remove a vnode from the mountlist and 170 * deal with side effects for scans in progress. 171 * 172 * Target mnt_token is held on call. 173 */ 174 static void 175 vremovevnodemnt(struct vnode *vp) 176 { 177 struct vmntvnodescan_info *info; 178 struct mount *mp = vp->v_mount; 179 180 TAILQ_FOREACH(info, &mp->mnt_vnodescan_list, entry) { 181 if (info->vp == vp) 182 info->vp = TAILQ_NEXT(vp, v_nmntvnodes); 183 } 184 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 185 } 186 187 /* 188 * Allocate a new vnode and associate it with a tag, mount point, and 189 * operations vector. 190 * 191 * A VX locked and refd vnode is returned. The caller should setup the 192 * remaining fields and vx_put() or, if he wishes to leave a vref, 193 * vx_unlock() the vnode. 194 */ 195 int 196 getnewvnode(enum vtagtype tag, struct mount *mp, 197 struct vnode **vpp, int lktimeout, int lkflags) 198 { 199 struct vnode *vp; 200 201 KKASSERT(mp != NULL); 202 203 vp = allocvnode(lktimeout, lkflags); 204 vp->v_tag = tag; 205 vp->v_data = NULL; 206 207 /* 208 * By default the vnode is assigned the mount point's normal 209 * operations vector. 210 */ 211 vp->v_ops = &mp->mnt_vn_use_ops; 212 vp->v_pbuf_count = nswbuf_kva / NSWBUF_SPLIT; 213 214 /* 215 * Placing the vnode on the mount point's queue makes it visible. 216 * VNON prevents it from being messed with, however. 217 */ 218 insmntque(vp, mp); 219 220 /* 221 * A VX locked & refd vnode is returned. 222 */ 223 *vpp = vp; 224 return (0); 225 } 226 227 /* 228 * This function creates vnodes with special operations vectors. The 229 * mount point is optional. 230 * 231 * This routine is being phased out but is still used by vfs_conf to 232 * create vnodes for devices prior to the root mount (with mp == NULL). 233 */ 234 int 235 getspecialvnode(enum vtagtype tag, struct mount *mp, 236 struct vop_ops **ops, 237 struct vnode **vpp, int lktimeout, int lkflags) 238 { 239 struct vnode *vp; 240 241 vp = allocvnode(lktimeout, lkflags); 242 vp->v_tag = tag; 243 vp->v_data = NULL; 244 vp->v_ops = ops; 245 246 if (mp == NULL) 247 mp = &dummymount; 248 249 /* 250 * Placing the vnode on the mount point's queue makes it visible. 251 * VNON prevents it from being messed with, however. 252 */ 253 insmntque(vp, mp); 254 255 /* 256 * A VX locked & refd vnode is returned. 257 */ 258 *vpp = vp; 259 return (0); 260 } 261 262 /* 263 * Interlock against an unmount, return 0 on success, non-zero on failure. 264 * 265 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount 266 * is in-progress. 267 * 268 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits 269 * are used. A shared locked will be obtained and the filesystem will not 270 * be unmountable until the lock is released. 271 */ 272 int 273 vfs_busy(struct mount *mp, int flags) 274 { 275 int lkflags; 276 277 atomic_add_int(&mp->mnt_refs, 1); 278 lwkt_gettoken(&mp->mnt_token); 279 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 280 if (flags & LK_NOWAIT) { 281 lwkt_reltoken(&mp->mnt_token); 282 atomic_add_int(&mp->mnt_refs, -1); 283 return (ENOENT); 284 } 285 /* XXX not MP safe */ 286 mp->mnt_kern_flag |= MNTK_MWAIT; 287 288 /* 289 * Since all busy locks are shared except the exclusive 290 * lock granted when unmounting, the only place that a 291 * wakeup needs to be done is at the release of the 292 * exclusive lock at the end of dounmount. 293 * 294 * WARNING! mp can potentially go away once we release 295 * our ref. 296 */ 297 tsleep((caddr_t)mp, 0, "vfs_busy", 0); 298 lwkt_reltoken(&mp->mnt_token); 299 atomic_add_int(&mp->mnt_refs, -1); 300 return (ENOENT); 301 } 302 lkflags = LK_SHARED; 303 if (lockmgr(&mp->mnt_lock, lkflags)) 304 panic("vfs_busy: unexpected lock failure"); 305 lwkt_reltoken(&mp->mnt_token); 306 return (0); 307 } 308 309 /* 310 * Free a busy filesystem. 311 * 312 * Once refs is decremented the mount point can potentially get ripped 313 * out from under us, but we want to clean up our refs before unlocking 314 * so do a hold/drop around the whole mess. 315 * 316 * This is not in the critical path (I hope). 317 */ 318 void 319 vfs_unbusy(struct mount *mp) 320 { 321 mount_hold(mp); 322 atomic_add_int(&mp->mnt_refs, -1); 323 lockmgr(&mp->mnt_lock, LK_RELEASE); 324 mount_drop(mp); 325 } 326 327 /* 328 * Lookup a filesystem type, and if found allocate and initialize 329 * a mount structure for it. 330 * 331 * Devname is usually updated by mount(8) after booting. 332 */ 333 int 334 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 335 { 336 struct vfsconf *vfsp; 337 struct mount *mp; 338 339 if (fstypename == NULL) 340 return (ENODEV); 341 342 vfsp = vfsconf_find_by_name(fstypename); 343 if (vfsp == NULL) 344 return (ENODEV); 345 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO); 346 mount_init(mp, vfsp->vfc_vfsops); 347 lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0); 348 349 vfs_busy(mp, 0); 350 mp->mnt_vfc = vfsp; 351 mp->mnt_pbuf_count = nswbuf_kva / NSWBUF_SPLIT; 352 vfsp->vfc_refcount++; 353 mp->mnt_stat.f_type = vfsp->vfc_typenum; 354 mp->mnt_flag |= MNT_RDONLY; 355 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 356 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 357 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 358 359 /* 360 * Pre-set MPSAFE flags for VFS_MOUNT() call. 361 */ 362 if (vfsp->vfc_flags & VFCF_MPSAFE) 363 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; 364 365 *mpp = mp; 366 367 return (0); 368 } 369 370 /* 371 * Basic mount structure initialization 372 */ 373 void 374 mount_init(struct mount *mp, struct vfsops *ops) 375 { 376 lockinit(&mp->mnt_lock, "vfslock", hz*5, 0); 377 lwkt_token_init(&mp->mnt_token, "permnt"); 378 379 TAILQ_INIT(&mp->mnt_vnodescan_list); 380 TAILQ_INIT(&mp->mnt_nvnodelist); 381 TAILQ_INIT(&mp->mnt_reservedvnlist); 382 TAILQ_INIT(&mp->mnt_jlist); 383 mp->mnt_nvnodelistsize = 0; 384 mp->mnt_flag = 0; 385 mp->mnt_hold = 1; /* hold for umount last drop */ 386 mp->mnt_iosize_max = MAXPHYS; 387 mp->mnt_op = ops; 388 if (ops == NULL || (ops->vfs_flags & VFSOPSF_NOSYNCERTHR) == 0) 389 vn_syncer_thr_create(mp); 390 } 391 392 void 393 mount_hold(struct mount *mp) 394 { 395 atomic_add_int(&mp->mnt_hold, 1); 396 } 397 398 void 399 mount_drop(struct mount *mp) 400 { 401 if (atomic_fetchadd_int(&mp->mnt_hold, -1) == 1) { 402 KKASSERT(mp->mnt_refs == 0); 403 kfree(mp, M_MOUNT); 404 } 405 } 406 407 /* 408 * Lookup a mount point by filesystem identifier. 409 * 410 * If not NULL, the returned mp is held and the caller is expected to drop 411 * it via mount_drop(). 412 */ 413 struct mount * 414 vfs_getvfs(fsid_t *fsid) 415 { 416 struct mount *mp; 417 418 lwkt_gettoken_shared(&mountlist_token); 419 mp = mount_rb_tree_RB_LOOKUP_FSID(&mounttree, fsid); 420 if (mp) 421 mount_hold(mp); 422 lwkt_reltoken(&mountlist_token); 423 return (mp); 424 } 425 426 /* 427 * Generate a FSID based on the mountpt. The FSID will be adjusted to avoid 428 * collisions when the mount is added to mountlist. 429 * 430 * May only be called prior to the mount succeeding. 431 * 432 * OLD: 433 * 434 * Get a new unique fsid. Try to make its val[0] unique, since this value 435 * will be used to create fake device numbers for stat(). Also try (but 436 * not so hard) make its val[0] unique mod 2^16, since some emulators only 437 * support 16-bit device numbers. We end up with unique val[0]'s for the 438 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 439 */ 440 void 441 vfs_getnewfsid(struct mount *mp) 442 { 443 fsid_t tfsid; 444 int mtype; 445 int error; 446 char *retbuf; 447 char *freebuf; 448 449 mtype = mp->mnt_vfc->vfc_typenum; 450 tfsid.val[1] = mtype; 451 error = cache_fullpath(NULL, &mp->mnt_ncmounton, NULL, 452 &retbuf, &freebuf, 0); 453 if (error) { 454 tfsid.val[0] = makeudev(255, 0); 455 } else { 456 tfsid.val[0] = makeudev(255, 457 iscsi_crc32(retbuf, strlen(retbuf)) & 458 ~makeudev(255, 0)); 459 kfree(freebuf, M_TEMP); 460 } 461 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 462 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 463 } 464 465 /* 466 * Set the FSID for a new mount point to the template. 467 * 468 * The FSID will be adjusted to avoid collisions when the mount is 469 * added to mountlist. 470 * 471 * May only be called prior to the mount succeeding. 472 */ 473 void 474 vfs_setfsid(struct mount *mp, fsid_t *template) 475 { 476 bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid)); 477 478 #if 0 479 struct mount *mptmp; 480 481 lwkt_gettoken(&mntid_token); 482 for (;;) { 483 mptmp = vfs_getvfs(template); 484 if (mptmp == NULL) 485 break; 486 mount_drop(mptmp); 487 ++template->val[1]; 488 } 489 lwkt_reltoken(&mntid_token); 490 #endif 491 mp->mnt_stat.f_fsid = *template; 492 } 493 494 /* 495 * This routine is called when we have too many vnodes. It attempts 496 * to free <count> vnodes and will potentially free vnodes that still 497 * have VM backing store (VM backing store is typically the cause 498 * of a vnode blowout so we want to do this). Therefore, this operation 499 * is not considered cheap. 500 * 501 * A number of conditions may prevent a vnode from being reclaimed. 502 * the buffer cache may have references on the vnode, a directory 503 * vnode may still have references due to the namei cache representing 504 * underlying files, or the vnode may be in active use. It is not 505 * desireable to reuse such vnodes. These conditions may cause the 506 * number of vnodes to reach some minimum value regardless of what 507 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 508 */ 509 510 /* 511 * Attempt to recycle vnodes in a context that is always safe to block. 512 * Calling vlrurecycle() from the bowels of file system code has some 513 * interesting deadlock problems. 514 */ 515 static struct thread *vnlruthread; 516 517 static void 518 vnlru_proc(void) 519 { 520 struct thread *td = curthread; 521 522 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 523 SHUTDOWN_PRI_FIRST); 524 525 for (;;) { 526 int ncachedandinactive; 527 528 kproc_suspend_loop(); 529 530 /* 531 * Try to free some vnodes if we have too many. Trigger based 532 * on potentially freeable vnodes but calculate the count 533 * based on total vnodes. 534 * 535 * (long) -> deal with 64 bit machines, intermediate overflow 536 */ 537 synchronizevnodecount(); 538 ncachedandinactive = countcachedandinactivevnodes(); 539 if (numvnodes >= maxvnodes * 9 / 10 && 540 ncachedandinactive >= maxvnodes * 5 / 10) { 541 int count = numvnodes - maxvnodes * 9 / 10; 542 543 if (count > (ncachedandinactive) / 100) 544 count = (ncachedandinactive) / 100; 545 if (count < 5) 546 count = 5; 547 freesomevnodes(count); 548 } 549 550 /* 551 * Do non-critical-path (more robust) cache cleaning, 552 * even if vnode counts are nominal, to try to avoid 553 * having to do it in the critical path. 554 */ 555 cache_hysteresis(0); 556 557 /* 558 * Nothing to do if most of our vnodes are already on 559 * the free list. 560 */ 561 synchronizevnodecount(); 562 ncachedandinactive = countcachedandinactivevnodes(); 563 if (numvnodes <= maxvnodes * 9 / 10 || 564 ncachedandinactive <= maxvnodes * 5 / 10) { 565 tsleep(vnlruthread, 0, "vlruwt", hz); 566 continue; 567 } 568 } 569 } 570 571 /* 572 * MOUNTLIST FUNCTIONS 573 */ 574 575 /* 576 * mountlist_insert (MP SAFE) 577 * 578 * Add a new mount point to the mount list. Filesystem should attempt to 579 * supply a unique fsid but if a duplicate occurs adjust the fsid to ensure 580 * uniqueness. 581 */ 582 void 583 mountlist_insert(struct mount *mp, int how) 584 { 585 int lim = 0x01000000; 586 587 lwkt_gettoken(&mountlist_token); 588 if (how == MNTINS_FIRST) 589 TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list); 590 else 591 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); 592 while (mount_rb_tree_RB_INSERT(&mounttree, mp)) { 593 int32_t val; 594 595 /* 596 * minor device mask: 0xFFFF00FF 597 */ 598 val = mp->mnt_stat.f_fsid.val[0]; 599 val = ((val & 0xFFFF0000) >> 8) | (val & 0x000000FF); 600 ++val; 601 val = ((val << 8) & 0xFFFF0000) | (val & 0x000000FF); 602 mp->mnt_stat.f_fsid.val[0] = val; 603 if (--lim == 0) { 604 lim = 0x01000000; 605 mp->mnt_stat.f_fsid.val[1] += 0x0100; 606 kprintf("mountlist_insert: fsid collision, " 607 "too many mounts\n"); 608 } 609 } 610 lwkt_reltoken(&mountlist_token); 611 } 612 613 /* 614 * mountlist_interlock (MP SAFE) 615 * 616 * Execute the specified interlock function with the mountlist token 617 * held. The function will be called in a serialized fashion verses 618 * other functions called through this mechanism. 619 * 620 * The function is expected to be very short-lived. 621 */ 622 int 623 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp) 624 { 625 int error; 626 627 lwkt_gettoken(&mountlist_token); 628 error = callback(mp); 629 lwkt_reltoken(&mountlist_token); 630 return (error); 631 } 632 633 /* 634 * mountlist_boot_getfirst (DURING BOOT ONLY) 635 * 636 * This function returns the first mount on the mountlist, which is 637 * expected to be the root mount. Since no interlocks are obtained 638 * this function is only safe to use during booting. 639 */ 640 641 struct mount * 642 mountlist_boot_getfirst(void) 643 { 644 return(TAILQ_FIRST(&mountlist)); 645 } 646 647 /* 648 * mountlist_remove (MP SAFE) 649 * 650 * Remove a node from the mountlist. If this node is the next scan node 651 * for any active mountlist scans, the active mountlist scan will be 652 * adjusted to skip the node, thus allowing removals during mountlist 653 * scans. 654 */ 655 void 656 mountlist_remove(struct mount *mp) 657 { 658 struct mountscan_info *msi; 659 660 lwkt_gettoken(&mountlist_token); 661 TAILQ_FOREACH(msi, &mountscan_list, msi_entry) { 662 if (msi->msi_node == mp) { 663 if (msi->msi_how & MNTSCAN_FORWARD) 664 msi->msi_node = TAILQ_NEXT(mp, mnt_list); 665 else 666 msi->msi_node = TAILQ_PREV(mp, mntlist, 667 mnt_list); 668 } 669 } 670 TAILQ_REMOVE(&mountlist, mp, mnt_list); 671 mount_rb_tree_RB_REMOVE(&mounttree, mp); 672 lwkt_reltoken(&mountlist_token); 673 } 674 675 /* 676 * mountlist_exists (MP SAFE) 677 * 678 * Checks if a node exists in the mountlist. 679 * This function is mainly used by VFS quota code to check if a 680 * cached nullfs struct mount pointer is still valid at use time 681 * 682 * FIXME: there is no warranty the mp passed to that function 683 * will be the same one used by VFS_ACCOUNT() later 684 */ 685 int 686 mountlist_exists(struct mount *mp) 687 { 688 int node_exists = 0; 689 struct mount* lmp; 690 691 lwkt_gettoken_shared(&mountlist_token); 692 TAILQ_FOREACH(lmp, &mountlist, mnt_list) { 693 if (lmp == mp) { 694 node_exists = 1; 695 break; 696 } 697 } 698 lwkt_reltoken(&mountlist_token); 699 700 return(node_exists); 701 } 702 703 /* 704 * mountlist_scan 705 * 706 * Safely scan the mount points on the mount list. Each mountpoint 707 * is held across the callback. The callback is responsible for 708 * acquiring any further tokens or locks. 709 * 710 * Unless otherwise specified each mount point will be busied prior to the 711 * callback and unbusied afterwords. The callback may safely remove any 712 * mount point without interfering with the scan. If the current callback 713 * mount is removed the scanner will not attempt to unbusy it. 714 * 715 * If a mount node cannot be busied it is silently skipped. 716 * 717 * The callback return value is aggregated and a total is returned. A return 718 * value of < 0 is not aggregated and will terminate the scan. 719 * 720 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction 721 * MNTSCAN_REVERSE - the mountlist is scanned in reverse 722 * MNTSCAN_NOBUSY - the scanner will make the callback without busying 723 * the mount node. 724 * MNTSCAN_NOUNLOCK - Do not unlock mountlist_token across callback 725 * 726 * NOTE: mountlist_token is not held across the callback. 727 */ 728 int 729 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how) 730 { 731 struct mountscan_info info; 732 struct mount *mp; 733 int count; 734 int res; 735 int dounlock = ((how & MNTSCAN_NOUNLOCK) == 0); 736 737 lwkt_gettoken(&mountlist_token); 738 info.msi_how = how; 739 info.msi_node = NULL; /* paranoia */ 740 TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry); 741 lwkt_reltoken(&mountlist_token); 742 743 res = 0; 744 lwkt_gettoken_shared(&mountlist_token); 745 746 if (how & MNTSCAN_FORWARD) { 747 info.msi_node = TAILQ_FIRST(&mountlist); 748 while ((mp = info.msi_node) != NULL) { 749 mount_hold(mp); 750 if (how & MNTSCAN_NOBUSY) { 751 if (dounlock) 752 lwkt_reltoken(&mountlist_token); 753 count = callback(mp, data); 754 if (dounlock) 755 lwkt_gettoken_shared(&mountlist_token); 756 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 757 if (dounlock) 758 lwkt_reltoken(&mountlist_token); 759 count = callback(mp, data); 760 if (dounlock) 761 lwkt_gettoken_shared(&mountlist_token); 762 if (mp == info.msi_node) 763 vfs_unbusy(mp); 764 } else { 765 count = 0; 766 } 767 mount_drop(mp); 768 if (count < 0) 769 break; 770 res += count; 771 if (mp == info.msi_node) 772 info.msi_node = TAILQ_NEXT(mp, mnt_list); 773 } 774 } else if (how & MNTSCAN_REVERSE) { 775 info.msi_node = TAILQ_LAST(&mountlist, mntlist); 776 while ((mp = info.msi_node) != NULL) { 777 mount_hold(mp); 778 if (how & MNTSCAN_NOBUSY) { 779 if (dounlock) 780 lwkt_reltoken(&mountlist_token); 781 count = callback(mp, data); 782 if (dounlock) 783 lwkt_gettoken_shared(&mountlist_token); 784 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 785 if (dounlock) 786 lwkt_reltoken(&mountlist_token); 787 count = callback(mp, data); 788 if (dounlock) 789 lwkt_gettoken_shared(&mountlist_token); 790 if (mp == info.msi_node) 791 vfs_unbusy(mp); 792 } else { 793 count = 0; 794 } 795 mount_drop(mp); 796 if (count < 0) 797 break; 798 res += count; 799 if (mp == info.msi_node) 800 info.msi_node = TAILQ_PREV(mp, mntlist, 801 mnt_list); 802 } 803 } 804 lwkt_reltoken(&mountlist_token); 805 806 lwkt_gettoken(&mountlist_token); 807 TAILQ_REMOVE(&mountscan_list, &info, msi_entry); 808 lwkt_reltoken(&mountlist_token); 809 810 return(res); 811 } 812 813 /* 814 * MOUNT RELATED VNODE FUNCTIONS 815 */ 816 817 static struct kproc_desc vnlru_kp = { 818 "vnlru", 819 vnlru_proc, 820 &vnlruthread 821 }; 822 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp); 823 824 /* 825 * Move a vnode from one mount queue to another. 826 */ 827 void 828 insmntque(struct vnode *vp, struct mount *mp) 829 { 830 struct mount *omp; 831 832 /* 833 * Delete from old mount point vnode list, if on one. 834 */ 835 if ((omp = vp->v_mount) != NULL) { 836 lwkt_gettoken(&omp->mnt_token); 837 KKASSERT(omp == vp->v_mount); 838 KASSERT(omp->mnt_nvnodelistsize > 0, 839 ("bad mount point vnode list size")); 840 vremovevnodemnt(vp); 841 omp->mnt_nvnodelistsize--; 842 lwkt_reltoken(&omp->mnt_token); 843 } 844 845 /* 846 * Insert into list of vnodes for the new mount point, if available. 847 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer. 848 */ 849 if (mp == NULL) { 850 vp->v_mount = NULL; 851 return; 852 } 853 lwkt_gettoken(&mp->mnt_token); 854 vp->v_mount = mp; 855 if (mp->mnt_syncer) { 856 TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes); 857 } else { 858 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 859 } 860 mp->mnt_nvnodelistsize++; 861 lwkt_reltoken(&mp->mnt_token); 862 } 863 864 865 /* 866 * Scan the vnodes under a mount point and issue appropriate callbacks. 867 * 868 * The fastfunc() callback is called with just the mountlist token held 869 * (no vnode lock). It may not block and the vnode may be undergoing 870 * modifications while the caller is processing it. The vnode will 871 * not be entirely destroyed, however, due to the fact that the mountlist 872 * token is held. A return value < 0 skips to the next vnode without calling 873 * the slowfunc(), a return value > 0 terminates the loop. 874 * 875 * WARNING! The fastfunc() should not indirect through vp->v_object, the vp 876 * data structure is unstable when called from fastfunc(). 877 * 878 * The slowfunc() callback is called after the vnode has been successfully 879 * locked based on passed flags. The vnode is skipped if it gets rearranged 880 * or destroyed while blocking on the lock. A non-zero return value from 881 * the slow function terminates the loop. The slow function is allowed to 882 * arbitrarily block. The scanning code guarentees consistency of operation 883 * even if the slow function deletes or moves the node, or blocks and some 884 * other thread deletes or moves the node. 885 */ 886 int 887 vmntvnodescan( 888 struct mount *mp, 889 int flags, 890 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data), 891 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data), 892 void *data 893 ) { 894 struct vmntvnodescan_info info; 895 struct vnode *vp; 896 int r = 0; 897 int maxcount = mp->mnt_nvnodelistsize * 2; 898 int stopcount = 0; 899 int count = 0; 900 901 lwkt_gettoken(&mp->mnt_token); 902 903 /* 904 * If asked to do one pass stop after iterating available vnodes. 905 * Under heavy loads new vnodes can be added while we are scanning, 906 * so this isn't perfect. Create a slop factor of 2x. 907 */ 908 if (flags & VMSC_ONEPASS) 909 stopcount = mp->mnt_nvnodelistsize; 910 911 info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 912 TAILQ_INSERT_TAIL(&mp->mnt_vnodescan_list, &info, entry); 913 914 while ((vp = info.vp) != NULL) { 915 if (--maxcount == 0) { 916 kprintf("Warning: excessive fssync iteration\n"); 917 maxcount = mp->mnt_nvnodelistsize * 2; 918 } 919 920 /* 921 * Skip if visible but not ready, or special (e.g. 922 * mp->mnt_syncer) 923 */ 924 if (vp->v_type == VNON) 925 goto next; 926 KKASSERT(vp->v_mount == mp); 927 928 /* 929 * Quick test. A negative return continues the loop without 930 * calling the slow test. 0 continues onto the slow test. 931 * A positive number aborts the loop. 932 */ 933 if (fastfunc) { 934 if ((r = fastfunc(mp, vp, data)) < 0) { 935 r = 0; 936 goto next; 937 } 938 if (r) 939 break; 940 } 941 942 /* 943 * Get a vxlock on the vnode, retry if it has moved or isn't 944 * in the mountlist where we expect it. 945 */ 946 if (slowfunc) { 947 int error; 948 949 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 950 case VMSC_GETVP: 951 error = vget(vp, LK_EXCLUSIVE); 952 break; 953 case VMSC_GETVP|VMSC_NOWAIT: 954 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT); 955 break; 956 case VMSC_GETVX: 957 vx_get(vp); 958 error = 0; 959 break; 960 default: 961 error = 0; 962 break; 963 } 964 if (error) 965 goto next; 966 /* 967 * Do not call the slow function if the vnode is 968 * invalid or if it was ripped out from under us 969 * while we (potentially) blocked. 970 */ 971 if (info.vp == vp && vp->v_type != VNON) 972 r = slowfunc(mp, vp, data); 973 974 /* 975 * Cleanup 976 */ 977 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 978 case VMSC_GETVP: 979 case VMSC_GETVP|VMSC_NOWAIT: 980 vput(vp); 981 break; 982 case VMSC_GETVX: 983 vx_put(vp); 984 break; 985 default: 986 break; 987 } 988 if (r != 0) 989 break; 990 } 991 992 next: 993 /* 994 * Yield after some processing. Depending on the number 995 * of vnodes, we might wind up running for a long time. 996 * Because threads are not preemptable, time critical 997 * userland processes might starve. Give them a chance 998 * now and then. 999 */ 1000 if (++count == 10000) { 1001 /* 1002 * We really want to yield a bit, so we simply 1003 * sleep a tick 1004 */ 1005 tsleep(mp, 0, "vnodescn", 1); 1006 count = 0; 1007 } 1008 1009 /* 1010 * If doing one pass this decrements to zero. If it starts 1011 * at zero it is effectively unlimited for the purposes of 1012 * this loop. 1013 */ 1014 if (--stopcount == 0) 1015 break; 1016 1017 /* 1018 * Iterate. If the vnode was ripped out from under us 1019 * info.vp will already point to the next vnode, otherwise 1020 * we have to obtain the next valid vnode ourselves. 1021 */ 1022 if (info.vp == vp) 1023 info.vp = TAILQ_NEXT(vp, v_nmntvnodes); 1024 } 1025 1026 TAILQ_REMOVE(&mp->mnt_vnodescan_list, &info, entry); 1027 lwkt_reltoken(&mp->mnt_token); 1028 return(r); 1029 } 1030 1031 /* 1032 * Remove any vnodes in the vnode table belonging to mount point mp. 1033 * 1034 * If FORCECLOSE is not specified, there should not be any active ones, 1035 * return error if any are found (nb: this is a user error, not a 1036 * system error). If FORCECLOSE is specified, detach any active vnodes 1037 * that are found. 1038 * 1039 * If WRITECLOSE is set, only flush out regular file vnodes open for 1040 * writing. 1041 * 1042 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 1043 * 1044 * `rootrefs' specifies the base reference count for the root vnode 1045 * of this filesystem. The root vnode is considered busy if its 1046 * v_refcnt exceeds this value. On a successful return, vflush() 1047 * will call vrele() on the root vnode exactly rootrefs times. 1048 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 1049 * be zero. 1050 */ 1051 static int debug_busyprt = 0; /* print out busy vnodes */ 1052 SYSCTL_INT(_vfs, OID_AUTO, debug_busyprt, CTLFLAG_RW, &debug_busyprt, 0, ""); 1053 1054 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data); 1055 1056 struct vflush_info { 1057 int flags; 1058 int busy; 1059 thread_t td; 1060 }; 1061 1062 int 1063 vflush(struct mount *mp, int rootrefs, int flags) 1064 { 1065 struct thread *td = curthread; /* XXX */ 1066 struct vnode *rootvp = NULL; 1067 int error; 1068 struct vflush_info vflush_info; 1069 1070 if (rootrefs > 0) { 1071 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 1072 ("vflush: bad args")); 1073 /* 1074 * Get the filesystem root vnode. We can vput() it 1075 * immediately, since with rootrefs > 0, it won't go away. 1076 */ 1077 if ((error = VFS_ROOT(mp, &rootvp)) != 0) { 1078 if ((flags & FORCECLOSE) == 0) 1079 return (error); 1080 rootrefs = 0; 1081 /* continue anyway */ 1082 } 1083 if (rootrefs) 1084 vput(rootvp); 1085 } 1086 1087 vflush_info.busy = 0; 1088 vflush_info.flags = flags; 1089 vflush_info.td = td; 1090 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info); 1091 1092 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 1093 /* 1094 * If just the root vnode is busy, and if its refcount 1095 * is equal to `rootrefs', then go ahead and kill it. 1096 */ 1097 KASSERT(vflush_info.busy > 0, ("vflush: not busy")); 1098 KASSERT(VREFCNT(rootvp) >= rootrefs, ("vflush: rootrefs")); 1099 if (vflush_info.busy == 1 && VREFCNT(rootvp) == rootrefs) { 1100 vx_lock(rootvp); 1101 vgone_vxlocked(rootvp); 1102 vx_unlock(rootvp); 1103 vflush_info.busy = 0; 1104 } 1105 } 1106 if (vflush_info.busy) 1107 return (EBUSY); 1108 for (; rootrefs > 0; rootrefs--) 1109 vrele(rootvp); 1110 return (0); 1111 } 1112 1113 /* 1114 * The scan callback is made with an VX locked vnode. 1115 */ 1116 static int 1117 vflush_scan(struct mount *mp, struct vnode *vp, void *data) 1118 { 1119 struct vflush_info *info = data; 1120 struct vattr vattr; 1121 int flags = info->flags; 1122 1123 /* 1124 * Generally speaking try to deactivate on 0 refs (catch-all) 1125 */ 1126 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE); 1127 1128 /* 1129 * Skip over a vnodes marked VSYSTEM. 1130 */ 1131 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1132 return(0); 1133 } 1134 1135 /* 1136 * Do not force-close VCHR or VBLK vnodes 1137 */ 1138 if (vp->v_type == VCHR || vp->v_type == VBLK) 1139 flags &= ~(WRITECLOSE|FORCECLOSE); 1140 1141 /* 1142 * If WRITECLOSE is set, flush out unlinked but still open 1143 * files (even if open only for reading) and regular file 1144 * vnodes open for writing. 1145 */ 1146 if ((flags & WRITECLOSE) && 1147 (vp->v_type == VNON || 1148 (VOP_GETATTR(vp, &vattr) == 0 && 1149 vattr.va_nlink > 0)) && 1150 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1151 return(0); 1152 } 1153 1154 /* 1155 * If we are the only holder (refcnt of 1) or the vnode is in 1156 * termination (refcnt < 0), we can vgone the vnode. 1157 */ 1158 if (VREFCNT(vp) <= 1) { 1159 vgone_vxlocked(vp); 1160 return(0); 1161 } 1162 1163 /* 1164 * If FORCECLOSE is set, forcibly destroy the vnode and then move 1165 * it to a dummymount structure so vop_*() functions don't deref 1166 * a NULL pointer. 1167 */ 1168 if (flags & FORCECLOSE) { 1169 vhold(vp); 1170 vgone_vxlocked(vp); 1171 if (vp->v_mount == NULL) 1172 insmntque(vp, &dummymount); 1173 vdrop(vp); 1174 return(0); 1175 } 1176 if (vp->v_type == VCHR || vp->v_type == VBLK) 1177 kprintf("vflush: Warning, cannot destroy busy device vnode\n"); 1178 if (debug_busyprt) { 1179 const char *filename; 1180 1181 spin_lock(&vp->v_spin); 1182 filename = TAILQ_FIRST(&vp->v_namecache) ? 1183 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"; 1184 spin_unlock(&vp->v_spin); 1185 kprintf("vflush: busy vnode (%p) %s\n", vp, filename); 1186 } 1187 ++info->busy; 1188 return(0); 1189 } 1190 1191 void 1192 add_bio_ops(struct bio_ops *ops) 1193 { 1194 TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry); 1195 } 1196 1197 void 1198 rem_bio_ops(struct bio_ops *ops) 1199 { 1200 TAILQ_REMOVE(&bio_ops_list, ops, entry); 1201 } 1202 1203 /* 1204 * This calls the bio_ops io_sync function either for a mount point 1205 * or generally. 1206 * 1207 * WARNING: softdeps is weirdly coded and just isn't happy unless 1208 * io_sync is called with a NULL mount from the general syncing code. 1209 */ 1210 void 1211 bio_ops_sync(struct mount *mp) 1212 { 1213 struct bio_ops *ops; 1214 1215 if (mp) { 1216 if ((ops = mp->mnt_bioops) != NULL) 1217 ops->io_sync(mp); 1218 } else { 1219 TAILQ_FOREACH(ops, &bio_ops_list, entry) { 1220 ops->io_sync(NULL); 1221 } 1222 } 1223 } 1224 1225 /* 1226 * Lookup a mount point by nch 1227 */ 1228 struct mount * 1229 mount_get_by_nc(struct namecache *ncp) 1230 { 1231 struct mount *mp = NULL; 1232 1233 lwkt_gettoken_shared(&mountlist_token); 1234 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 1235 if (ncp == mp->mnt_ncmountpt.ncp) 1236 break; 1237 } 1238 lwkt_reltoken(&mountlist_token); 1239 1240 return (mp); 1241 } 1242 1243