1 /* 2 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993 35 * The Regents of the University of California. All rights reserved. 36 * (c) UNIX System Laboratories, Inc. 37 * All or some portions of this file are derived from material licensed 38 * to the University of California by American Telephone and Telegraph 39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 40 * the permission of UNIX System Laboratories, Inc. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. All advertising materials mentioning features or use of this software 51 * must display the following acknowledgement: 52 * This product includes software developed by the University of 53 * California, Berkeley and its contributors. 54 * 4. Neither the name of the University nor the names of its contributors 55 * may be used to endorse or promote products derived from this software 56 * without specific prior written permission. 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 68 * SUCH DAMAGE. 69 * 70 * $DragonFly: src/sys/kern/vfs_mount.c,v 1.3 2004/12/17 00:18:07 dillon Exp $ 71 */ 72 73 /* 74 * External virtual filesystem routines 75 */ 76 #include "opt_ddb.h" 77 78 #include <sys/param.h> 79 #include <sys/systm.h> 80 #include <sys/kernel.h> 81 #include <sys/malloc.h> 82 #include <sys/mount.h> 83 #include <sys/proc.h> 84 #include <sys/vnode.h> 85 #include <sys/buf.h> 86 #include <sys/eventhandler.h> 87 #include <sys/kthread.h> 88 #include <sys/sysctl.h> 89 90 #include <machine/limits.h> 91 92 #include <sys/buf2.h> 93 #include <sys/thread2.h> 94 95 #include <vm/vm.h> 96 #include <vm/vm_object.h> 97 98 static int vnlru_nowhere = 0; 99 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 100 &vnlru_nowhere, 0, 101 "Number of times the vnlru process ran without success"); 102 103 104 static struct lwkt_token mntid_token; 105 106 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); /* mounted fs */ 107 struct lwkt_token mountlist_token; 108 struct lwkt_token mntvnode_token; 109 110 111 /* 112 * Called from vfsinit() 113 */ 114 void 115 vfs_mount_init(void) 116 { 117 lwkt_token_init(&mountlist_token); 118 lwkt_token_init(&mntvnode_token); 119 lwkt_token_init(&mntid_token); 120 } 121 122 /* 123 * Allocate a new vnode and associate it with a tag, mount point, and 124 * operations vector. 125 * 126 * A VX locked and refd vnode is returned. The caller should setup the 127 * remaining fields and vx_put() or, if he wishes to leave a vref, 128 * vx_unlock() the vnode. 129 */ 130 int 131 getnewvnode(enum vtagtype tag, struct mount *mp, 132 struct vnode **vpp, int lktimeout, int lkflags) 133 { 134 struct vnode *vp; 135 136 KKASSERT(mp != NULL); 137 138 vp = allocvnode(lktimeout, lkflags); 139 vp->v_tag = tag; 140 vp->v_data = NULL; 141 142 /* 143 * By default the vnode is assigned the mount point's normal 144 * operations vector. 145 */ 146 vp->v_ops = &mp->mnt_vn_use_ops; 147 148 /* 149 * Placing the vnode on the mount point's queue makes it visible. 150 * VNON prevents it from being messed with, however. 151 */ 152 insmntque(vp, mp); 153 vfs_object_create(vp, curthread); 154 155 /* 156 * A VX locked & refd vnode is returned. 157 */ 158 *vpp = vp; 159 return (0); 160 } 161 162 /* 163 * This function creates vnodes with special operations vectors. The 164 * mount point is optional. 165 * 166 * This routine is being phased out. 167 */ 168 int 169 getspecialvnode(enum vtagtype tag, struct mount *mp, 170 struct vop_ops **ops_pp, 171 struct vnode **vpp, int lktimeout, int lkflags) 172 { 173 struct vnode *vp; 174 175 vp = allocvnode(lktimeout, lkflags); 176 vp->v_tag = tag; 177 vp->v_data = NULL; 178 vp->v_ops = ops_pp; 179 180 /* 181 * Placing the vnode on the mount point's queue makes it visible. 182 * VNON prevents it from being messed with, however. 183 */ 184 insmntque(vp, mp); 185 vfs_object_create(vp, curthread); 186 187 /* 188 * A VX locked & refd vnode is returned. 189 */ 190 *vpp = vp; 191 return (0); 192 } 193 194 /* 195 * Mark a mount point as busy. Used to synchronize access and to delay 196 * unmounting. Interlock is not released on failure. 197 */ 198 int 199 vfs_busy(struct mount *mp, int flags, 200 lwkt_tokref_t interlkp, struct thread *td) 201 { 202 int lkflags; 203 204 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 205 if (flags & LK_NOWAIT) 206 return (ENOENT); 207 mp->mnt_kern_flag |= MNTK_MWAIT; 208 /* 209 * Since all busy locks are shared except the exclusive 210 * lock granted when unmounting, the only place that a 211 * wakeup needs to be done is at the release of the 212 * exclusive lock at the end of dounmount. 213 * 214 * note: interlkp is a serializer and thus can be safely 215 * held through any sleep 216 */ 217 tsleep((caddr_t)mp, 0, "vfs_busy", 0); 218 return (ENOENT); 219 } 220 lkflags = LK_SHARED | LK_NOPAUSE; 221 if (interlkp) 222 lkflags |= LK_INTERLOCK; 223 if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td)) 224 panic("vfs_busy: unexpected lock failure"); 225 return (0); 226 } 227 228 /* 229 * Free a busy filesystem. 230 */ 231 void 232 vfs_unbusy(struct mount *mp, struct thread *td) 233 { 234 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td); 235 } 236 237 /* 238 * Lookup a filesystem type, and if found allocate and initialize 239 * a mount structure for it. 240 * 241 * Devname is usually updated by mount(8) after booting. 242 */ 243 int 244 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 245 { 246 struct thread *td = curthread; /* XXX */ 247 struct vfsconf *vfsp; 248 struct mount *mp; 249 250 if (fstypename == NULL) 251 return (ENODEV); 252 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 253 if (!strcmp(vfsp->vfc_name, fstypename)) 254 break; 255 } 256 if (vfsp == NULL) 257 return (ENODEV); 258 mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); 259 bzero((char *)mp, (u_long)sizeof(struct mount)); 260 lockinit(&mp->mnt_lock, 0, "vfslock", VLKTIMEOUT, LK_NOPAUSE); 261 vfs_busy(mp, LK_NOWAIT, NULL, td); 262 TAILQ_INIT(&mp->mnt_nvnodelist); 263 TAILQ_INIT(&mp->mnt_reservedvnlist); 264 mp->mnt_nvnodelistsize = 0; 265 mp->mnt_vfc = vfsp; 266 mp->mnt_op = vfsp->vfc_vfsops; 267 mp->mnt_flag = MNT_RDONLY; 268 mp->mnt_vnodecovered = NULLVP; 269 vfsp->vfc_refcount++; 270 mp->mnt_iosize_max = DFLTPHYS; 271 mp->mnt_stat.f_type = vfsp->vfc_typenum; 272 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 273 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 274 mp->mnt_stat.f_mntonname[0] = '/'; 275 mp->mnt_stat.f_mntonname[1] = 0; 276 (void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 277 *mpp = mp; 278 return (0); 279 } 280 281 /* 282 * Lookup a mount point by filesystem identifier. 283 */ 284 struct mount * 285 vfs_getvfs(fsid_t *fsid) 286 { 287 struct mount *mp; 288 lwkt_tokref ilock; 289 290 lwkt_gettoken(&ilock, &mountlist_token); 291 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 292 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 293 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 294 break; 295 } 296 } 297 lwkt_reltoken(&ilock); 298 return (mp); 299 } 300 301 /* 302 * Get a new unique fsid. Try to make its val[0] unique, since this value 303 * will be used to create fake device numbers for stat(). Also try (but 304 * not so hard) make its val[0] unique mod 2^16, since some emulators only 305 * support 16-bit device numbers. We end up with unique val[0]'s for the 306 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 307 * 308 * Keep in mind that several mounts may be running in parallel. Starting 309 * the search one past where the previous search terminated is both a 310 * micro-optimization and a defense against returning the same fsid to 311 * different mounts. 312 */ 313 void 314 vfs_getnewfsid(struct mount *mp) 315 { 316 static u_int16_t mntid_base; 317 lwkt_tokref ilock; 318 fsid_t tfsid; 319 int mtype; 320 321 lwkt_gettoken(&ilock, &mntid_token); 322 mtype = mp->mnt_vfc->vfc_typenum; 323 tfsid.val[1] = mtype; 324 mtype = (mtype & 0xFF) << 24; 325 for (;;) { 326 tfsid.val[0] = makeudev(255, 327 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 328 mntid_base++; 329 if (vfs_getvfs(&tfsid) == NULL) 330 break; 331 } 332 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 333 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 334 lwkt_reltoken(&ilock); 335 } 336 337 /* 338 * This routine is called when we have too many vnodes. It attempts 339 * to free <count> vnodes and will potentially free vnodes that still 340 * have VM backing store (VM backing store is typically the cause 341 * of a vnode blowout so we want to do this). Therefore, this operation 342 * is not considered cheap. 343 * 344 * A number of conditions may prevent a vnode from being reclaimed. 345 * the buffer cache may have references on the vnode, a directory 346 * vnode may still have references due to the namei cache representing 347 * underlying files, or the vnode may be in active use. It is not 348 * desireable to reuse such vnodes. These conditions may cause the 349 * number of vnodes to reach some minimum value regardless of what 350 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 351 */ 352 353 /* 354 * Return 0 if the vnode is not already on the free list, return 1 if the 355 * vnode, with some additional work could possibly be placed on the free list. 356 */ 357 static __inline int 358 vmightfree(struct vnode *vp, int use_count, int page_count) 359 { 360 if (vp->v_flag & VFREE) 361 return (0); 362 if (vp->v_usecount != use_count || vp->v_holdcnt) 363 return (0); 364 if (vp->v_object && vp->v_object->resident_page_count >= page_count) 365 return (0); 366 return (1); 367 } 368 369 370 static int 371 vlrureclaim(struct mount *mp) 372 { 373 struct vnode *vp; 374 lwkt_tokref ilock; 375 int done; 376 int trigger; 377 int usevnodes; 378 int count; 379 380 /* 381 * Calculate the trigger point, don't allow user 382 * screwups to blow us up. This prevents us from 383 * recycling vnodes with lots of resident pages. We 384 * aren't trying to free memory, we are trying to 385 * free vnodes. 386 */ 387 usevnodes = desiredvnodes; 388 if (usevnodes <= 0) 389 usevnodes = 1; 390 trigger = vmstats.v_page_count * 2 / usevnodes; 391 392 done = 0; 393 lwkt_gettoken(&ilock, &mntvnode_token); 394 count = mp->mnt_nvnodelistsize / 10 + 1; 395 while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) { 396 /* 397 * __VNODESCAN__ 398 * 399 * The VP will stick around while we hold mntvnode_token, 400 * at least until we block, so we can safely do an initial 401 * check, and then must check again after we lock the vnode. 402 */ 403 if (vp->v_type == VNON || /* XXX */ 404 vp->v_type == VBAD || /* XXX */ 405 !vmightfree(vp, 0, trigger) /* critical path opt */ 406 ) { 407 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 408 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist,vp, v_nmntvnodes); 409 --count; 410 continue; 411 } 412 413 /* 414 * VX get the candidate vnode. If the VX get fails the 415 * vnode might still be on the mountlist. Our loop depends 416 * on us at least cycling the vnode to the end of the 417 * mountlist. 418 */ 419 if (vx_get_nonblock(vp) != 0) { 420 if (vp->v_mount == mp) { 421 TAILQ_REMOVE(&mp->mnt_nvnodelist, 422 vp, v_nmntvnodes); 423 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, 424 vp, v_nmntvnodes); 425 } 426 --count; 427 continue; 428 } 429 430 /* 431 * Since we blocked locking the vp, make sure it is still 432 * a candidate for reclamation. That is, it has not already 433 * been reclaimed and only has our VX reference associated 434 * with it. 435 */ 436 if (vp->v_type == VNON || /* XXX */ 437 vp->v_type == VBAD || /* XXX */ 438 (vp->v_flag & VRECLAIMED) || 439 vp->v_mount != mp || 440 !vmightfree(vp, 1, trigger) /* critical path opt */ 441 ) { 442 if (vp->v_mount == mp) { 443 TAILQ_REMOVE(&mp->mnt_nvnodelist, 444 vp, v_nmntvnodes); 445 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, 446 vp, v_nmntvnodes); 447 } 448 --count; 449 vx_put(vp); 450 continue; 451 } 452 453 /* 454 * All right, we are good, move the vp to the end of the 455 * mountlist and clean it out. The vget will have returned 456 * an error if the vnode was destroyed (VRECLAIMED set), so we 457 * do not have to check again. The vput() will move the 458 * vnode to the free list if the vgone() was successful. 459 */ 460 KKASSERT(vp->v_mount == mp); 461 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 462 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist,vp, v_nmntvnodes); 463 vgone(vp); 464 vx_put(vp); 465 ++done; 466 --count; 467 } 468 lwkt_reltoken(&ilock); 469 return (done); 470 } 471 472 /* 473 * Attempt to recycle vnodes in a context that is always safe to block. 474 * Calling vlrurecycle() from the bowels of file system code has some 475 * interesting deadlock problems. 476 */ 477 static struct thread *vnlruthread; 478 static int vnlruproc_sig; 479 480 void 481 vnlru_proc_wait(void) 482 { 483 if (vnlruproc_sig == 0) { 484 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 485 wakeup(vnlruthread); 486 } 487 tsleep(&vnlruproc_sig, 0, "vlruwk", hz); 488 } 489 490 static void 491 vnlru_proc(void) 492 { 493 struct mount *mp, *nmp; 494 lwkt_tokref ilock; 495 int s; 496 int done; 497 struct thread *td = curthread; 498 499 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 500 SHUTDOWN_PRI_FIRST); 501 502 s = splbio(); 503 for (;;) { 504 kproc_suspend_loop(); 505 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) { 506 vnlruproc_sig = 0; 507 wakeup(&vnlruproc_sig); 508 tsleep(td, 0, "vlruwt", hz); 509 continue; 510 } 511 done = 0; 512 cache_cleanneg(0); 513 lwkt_gettoken(&ilock, &mountlist_token); 514 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 515 if (vfs_busy(mp, LK_NOWAIT, &ilock, td)) { 516 nmp = TAILQ_NEXT(mp, mnt_list); 517 continue; 518 } 519 done += vlrureclaim(mp); 520 lwkt_gettokref(&ilock); 521 nmp = TAILQ_NEXT(mp, mnt_list); 522 vfs_unbusy(mp, td); 523 } 524 lwkt_reltoken(&ilock); 525 if (done == 0) { 526 ++vnlru_nowhere; 527 tsleep(td, 0, "vlrup", hz * 3); 528 if (vnlru_nowhere % 10 == 0) 529 printf("vnlru_proc: vnode recycler stopped working!\n"); 530 } else { 531 vnlru_nowhere = 0; 532 } 533 } 534 splx(s); 535 } 536 537 static struct kproc_desc vnlru_kp = { 538 "vnlru", 539 vnlru_proc, 540 &vnlruthread 541 }; 542 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp) 543 544 /* 545 * Move a vnode from one mount queue to another. 546 */ 547 void 548 insmntque(struct vnode *vp, struct mount *mp) 549 { 550 lwkt_tokref ilock; 551 552 lwkt_gettoken(&ilock, &mntvnode_token); 553 /* 554 * Delete from old mount point vnode list, if on one. 555 */ 556 if (vp->v_mount != NULL) { 557 KASSERT(vp->v_mount->mnt_nvnodelistsize > 0, 558 ("bad mount point vnode list size")); 559 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 560 vp->v_mount->mnt_nvnodelistsize--; 561 } 562 /* 563 * Insert into list of vnodes for the new mount point, if available. 564 */ 565 if ((vp->v_mount = mp) == NULL) { 566 lwkt_reltoken(&ilock); 567 return; 568 } 569 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 570 mp->mnt_nvnodelistsize++; 571 lwkt_reltoken(&ilock); 572 } 573 574 575 /* 576 * Scan the vnodes under a mount point. The first function is called 577 * with just the mountlist token held (no vnode lock). The second 578 * function is called with the vnode VX locked. 579 */ 580 int 581 vmntvnodescan( 582 struct mount *mp, 583 int flags, 584 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data), 585 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data), 586 void *data 587 ) { 588 lwkt_tokref ilock; 589 struct vnode *pvp; 590 struct vnode *vp; 591 int r = 0; 592 593 /* 594 * Scan the vnodes on the mount's vnode list. Use a placemarker 595 */ 596 pvp = allocvnode_placemarker(); 597 598 lwkt_gettoken(&ilock, &mntvnode_token); 599 TAILQ_INSERT_HEAD(&mp->mnt_nvnodelist, pvp, v_nmntvnodes); 600 601 while ((vp = TAILQ_NEXT(pvp, v_nmntvnodes)) != NULL) { 602 /* 603 * Move the placemarker and skip other placemarkers we 604 * encounter. The nothing can get in our way so the 605 * mount point on the vp must be valid. 606 */ 607 TAILQ_REMOVE(&mp->mnt_nvnodelist, pvp, v_nmntvnodes); 608 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, pvp, v_nmntvnodes); 609 if (vp->v_flag & VPLACEMARKER) /* another procs placemarker */ 610 continue; 611 if (vp->v_type == VNON) /* visible but not ready */ 612 continue; 613 KKASSERT(vp->v_mount == mp); 614 615 /* 616 * Quick test. A negative return continues the loop without 617 * calling the slow test. 0 continues onto the slow test. 618 * A positive number aborts the loop. 619 */ 620 if (fastfunc) { 621 if ((r = fastfunc(mp, vp, data)) < 0) 622 continue; 623 if (r) 624 break; 625 } 626 627 /* 628 * Get a vxlock on the vnode, retry if it has moved or isn't 629 * in the mountlist where we expect it. 630 */ 631 if (slowfunc) { 632 int error; 633 634 switch(flags) { 635 case VMSC_GETVP: 636 error = vget(vp, LK_EXCLUSIVE, curthread); 637 break; 638 case VMSC_GETVP|VMSC_NOWAIT: 639 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT, 640 curthread); 641 break; 642 case VMSC_GETVX: 643 error = vx_get(vp); 644 break; 645 case VMSC_REFVP: 646 vref(vp); 647 /* fall through */ 648 default: 649 error = 0; 650 break; 651 } 652 if (error) 653 continue; 654 if (TAILQ_PREV(pvp, vnodelst, v_nmntvnodes) != vp) 655 goto skip; 656 if (vp->v_type == VNON) 657 goto skip; 658 r = slowfunc(mp, vp, data); 659 skip: 660 switch(flags) { 661 case VMSC_GETVP: 662 case VMSC_GETVP|VMSC_NOWAIT: 663 vput(vp); 664 break; 665 case VMSC_GETVX: 666 vx_put(vp); 667 break; 668 case VMSC_REFVP: 669 vrele(vp); 670 /* fall through */ 671 default: 672 break; 673 } 674 if (r != 0) 675 break; 676 } 677 } 678 TAILQ_REMOVE(&mp->mnt_nvnodelist, pvp, v_nmntvnodes); 679 freevnode_placemarker(pvp); 680 lwkt_reltoken(&ilock); 681 return(r); 682 } 683 684 /* 685 * Remove any vnodes in the vnode table belonging to mount point mp. 686 * 687 * If FORCECLOSE is not specified, there should not be any active ones, 688 * return error if any are found (nb: this is a user error, not a 689 * system error). If FORCECLOSE is specified, detach any active vnodes 690 * that are found. 691 * 692 * If WRITECLOSE is set, only flush out regular file vnodes open for 693 * writing. 694 * 695 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 696 * 697 * `rootrefs' specifies the base reference count for the root vnode 698 * of this filesystem. The root vnode is considered busy if its 699 * v_usecount exceeds this value. On a successful return, vflush() 700 * will call vrele() on the root vnode exactly rootrefs times. 701 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 702 * be zero. 703 */ 704 #ifdef DIAGNOSTIC 705 static int busyprt = 0; /* print out busy vnodes */ 706 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 707 #endif 708 709 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data); 710 711 struct vflush_info { 712 int flags; 713 int busy; 714 thread_t td; 715 }; 716 717 int 718 vflush(struct mount *mp, int rootrefs, int flags) 719 { 720 struct thread *td = curthread; /* XXX */ 721 struct vnode *rootvp = NULL; 722 int error; 723 struct vflush_info vflush_info; 724 725 if (rootrefs > 0) { 726 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 727 ("vflush: bad args")); 728 /* 729 * Get the filesystem root vnode. We can vput() it 730 * immediately, since with rootrefs > 0, it won't go away. 731 */ 732 if ((error = VFS_ROOT(mp, &rootvp)) != 0) 733 return (error); 734 vput(rootvp); 735 } 736 737 vflush_info.busy = 0; 738 vflush_info.flags = flags; 739 vflush_info.td = td; 740 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info); 741 742 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 743 /* 744 * If just the root vnode is busy, and if its refcount 745 * is equal to `rootrefs', then go ahead and kill it. 746 */ 747 KASSERT(vflush_info.busy > 0, ("vflush: not busy")); 748 KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs")); 749 if (vflush_info.busy == 1 && rootvp->v_usecount == rootrefs) { 750 if (vx_lock(rootvp) == 0) { 751 vgone(rootvp); 752 vx_unlock(rootvp); 753 vflush_info.busy = 0; 754 } 755 } 756 } 757 if (vflush_info.busy) 758 return (EBUSY); 759 for (; rootrefs > 0; rootrefs--) 760 vrele(rootvp); 761 return (0); 762 } 763 764 /* 765 * The scan callback is made with an VX locked vnode. 766 */ 767 static int 768 vflush_scan(struct mount *mp, struct vnode *vp, void *data) 769 { 770 struct vflush_info *info = data; 771 struct vattr vattr; 772 773 /* 774 * Skip over a vnodes marked VSYSTEM. 775 */ 776 if ((info->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 777 return(0); 778 } 779 780 /* 781 * If WRITECLOSE is set, flush out unlinked but still open 782 * files (even if open only for reading) and regular file 783 * vnodes open for writing. 784 */ 785 if ((info->flags & WRITECLOSE) && 786 (vp->v_type == VNON || 787 (VOP_GETATTR(vp, &vattr, info->td) == 0 && 788 vattr.va_nlink > 0)) && 789 (vp->v_writecount == 0 || vp->v_type != VREG)) { 790 return(0); 791 } 792 793 /* 794 * With v_usecount == 0, all we need to do is clear out the 795 * vnode data structures and we are done. 796 */ 797 if (vp->v_usecount == 1) { 798 vgone(vp); 799 return(0); 800 } 801 802 /* 803 * If FORCECLOSE is set, forcibly close the vnode. For block 804 * or character devices, revert to an anonymous device. For 805 * all other files, just kill them. 806 */ 807 if (info->flags & FORCECLOSE) { 808 if (vp->v_type != VBLK && vp->v_type != VCHR) { 809 vgone(vp); 810 } else { 811 vclean(vp, 0, info->td); 812 vp->v_ops = &spec_vnode_vops; 813 insmntque(vp, NULL); 814 } 815 return(0); 816 } 817 #ifdef DIAGNOSTIC 818 if (busyprt) 819 vprint("vflush: busy vnode", vp); 820 #endif 821 ++info->busy; 822 return(0); 823 } 824 825