1 /* 2 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993 35 * The Regents of the University of California. All rights reserved. 36 * (c) UNIX System Laboratories, Inc. 37 * All or some portions of this file are derived from material licensed 38 * to the University of California by American Telephone and Telegraph 39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 40 * the permission of UNIX System Laboratories, Inc. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. All advertising materials mentioning features or use of this software 51 * must display the following acknowledgement: 52 * This product includes software developed by the University of 53 * California, Berkeley and its contributors. 54 * 4. Neither the name of the University nor the names of its contributors 55 * may be used to endorse or promote products derived from this software 56 * without specific prior written permission. 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 68 * SUCH DAMAGE. 69 * 70 * $DragonFly: src/sys/kern/vfs_mount.c,v 1.4 2004/12/29 02:40:02 dillon Exp $ 71 */ 72 73 /* 74 * External virtual filesystem routines 75 */ 76 #include "opt_ddb.h" 77 78 #include <sys/param.h> 79 #include <sys/systm.h> 80 #include <sys/kernel.h> 81 #include <sys/malloc.h> 82 #include <sys/mount.h> 83 #include <sys/proc.h> 84 #include <sys/vnode.h> 85 #include <sys/buf.h> 86 #include <sys/eventhandler.h> 87 #include <sys/kthread.h> 88 #include <sys/sysctl.h> 89 90 #include <machine/limits.h> 91 92 #include <sys/buf2.h> 93 #include <sys/thread2.h> 94 95 #include <vm/vm.h> 96 #include <vm/vm_object.h> 97 98 static int vnlru_nowhere = 0; 99 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 100 &vnlru_nowhere, 0, 101 "Number of times the vnlru process ran without success"); 102 103 104 static struct lwkt_token mntid_token; 105 106 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); /* mounted fs */ 107 struct lwkt_token mountlist_token; 108 struct lwkt_token mntvnode_token; 109 110 111 /* 112 * Called from vfsinit() 113 */ 114 void 115 vfs_mount_init(void) 116 { 117 lwkt_token_init(&mountlist_token); 118 lwkt_token_init(&mntvnode_token); 119 lwkt_token_init(&mntid_token); 120 } 121 122 /* 123 * Allocate a new vnode and associate it with a tag, mount point, and 124 * operations vector. 125 * 126 * A VX locked and refd vnode is returned. The caller should setup the 127 * remaining fields and vx_put() or, if he wishes to leave a vref, 128 * vx_unlock() the vnode. 129 */ 130 int 131 getnewvnode(enum vtagtype tag, struct mount *mp, 132 struct vnode **vpp, int lktimeout, int lkflags) 133 { 134 struct vnode *vp; 135 136 KKASSERT(mp != NULL); 137 138 vp = allocvnode(lktimeout, lkflags); 139 vp->v_tag = tag; 140 vp->v_data = NULL; 141 142 /* 143 * By default the vnode is assigned the mount point's normal 144 * operations vector. 145 */ 146 vp->v_ops = &mp->mnt_vn_use_ops; 147 148 /* 149 * Placing the vnode on the mount point's queue makes it visible. 150 * VNON prevents it from being messed with, however. 151 */ 152 insmntque(vp, mp); 153 vfs_object_create(vp, curthread); 154 155 /* 156 * A VX locked & refd vnode is returned. 157 */ 158 *vpp = vp; 159 return (0); 160 } 161 162 /* 163 * This function creates vnodes with special operations vectors. The 164 * mount point is optional. 165 * 166 * This routine is being phased out. 167 */ 168 int 169 getspecialvnode(enum vtagtype tag, struct mount *mp, 170 struct vop_ops **ops_pp, 171 struct vnode **vpp, int lktimeout, int lkflags) 172 { 173 struct vnode *vp; 174 175 vp = allocvnode(lktimeout, lkflags); 176 vp->v_tag = tag; 177 vp->v_data = NULL; 178 vp->v_ops = ops_pp; 179 180 /* 181 * Placing the vnode on the mount point's queue makes it visible. 182 * VNON prevents it from being messed with, however. 183 */ 184 insmntque(vp, mp); 185 vfs_object_create(vp, curthread); 186 187 /* 188 * A VX locked & refd vnode is returned. 189 */ 190 *vpp = vp; 191 return (0); 192 } 193 194 /* 195 * Mark a mount point as busy. Used to synchronize access and to delay 196 * unmounting. Interlock is not released on failure. 197 */ 198 int 199 vfs_busy(struct mount *mp, int flags, 200 lwkt_tokref_t interlkp, struct thread *td) 201 { 202 int lkflags; 203 204 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 205 if (flags & LK_NOWAIT) 206 return (ENOENT); 207 mp->mnt_kern_flag |= MNTK_MWAIT; 208 /* 209 * Since all busy locks are shared except the exclusive 210 * lock granted when unmounting, the only place that a 211 * wakeup needs to be done is at the release of the 212 * exclusive lock at the end of dounmount. 213 * 214 * note: interlkp is a serializer and thus can be safely 215 * held through any sleep 216 */ 217 tsleep((caddr_t)mp, 0, "vfs_busy", 0); 218 return (ENOENT); 219 } 220 lkflags = LK_SHARED | LK_NOPAUSE; 221 if (interlkp) 222 lkflags |= LK_INTERLOCK; 223 if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td)) 224 panic("vfs_busy: unexpected lock failure"); 225 return (0); 226 } 227 228 /* 229 * Free a busy filesystem. 230 */ 231 void 232 vfs_unbusy(struct mount *mp, struct thread *td) 233 { 234 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td); 235 } 236 237 /* 238 * Lookup a filesystem type, and if found allocate and initialize 239 * a mount structure for it. 240 * 241 * Devname is usually updated by mount(8) after booting. 242 */ 243 int 244 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 245 { 246 struct thread *td = curthread; /* XXX */ 247 struct vfsconf *vfsp; 248 struct mount *mp; 249 250 if (fstypename == NULL) 251 return (ENODEV); 252 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 253 if (!strcmp(vfsp->vfc_name, fstypename)) 254 break; 255 } 256 if (vfsp == NULL) 257 return (ENODEV); 258 mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK); 259 bzero((char *)mp, (u_long)sizeof(struct mount)); 260 lockinit(&mp->mnt_lock, 0, "vfslock", VLKTIMEOUT, LK_NOPAUSE); 261 vfs_busy(mp, LK_NOWAIT, NULL, td); 262 TAILQ_INIT(&mp->mnt_nvnodelist); 263 TAILQ_INIT(&mp->mnt_reservedvnlist); 264 TAILQ_INIT(&mp->mnt_jlist); 265 mp->mnt_nvnodelistsize = 0; 266 mp->mnt_vfc = vfsp; 267 mp->mnt_op = vfsp->vfc_vfsops; 268 mp->mnt_flag = MNT_RDONLY; 269 mp->mnt_vnodecovered = NULLVP; 270 vfsp->vfc_refcount++; 271 mp->mnt_iosize_max = DFLTPHYS; 272 mp->mnt_stat.f_type = vfsp->vfc_typenum; 273 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 274 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 275 mp->mnt_stat.f_mntonname[0] = '/'; 276 mp->mnt_stat.f_mntonname[1] = 0; 277 (void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 278 *mpp = mp; 279 return (0); 280 } 281 282 /* 283 * Lookup a mount point by filesystem identifier. 284 */ 285 struct mount * 286 vfs_getvfs(fsid_t *fsid) 287 { 288 struct mount *mp; 289 lwkt_tokref ilock; 290 291 lwkt_gettoken(&ilock, &mountlist_token); 292 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 293 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 294 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 295 break; 296 } 297 } 298 lwkt_reltoken(&ilock); 299 return (mp); 300 } 301 302 /* 303 * Get a new unique fsid. Try to make its val[0] unique, since this value 304 * will be used to create fake device numbers for stat(). Also try (but 305 * not so hard) make its val[0] unique mod 2^16, since some emulators only 306 * support 16-bit device numbers. We end up with unique val[0]'s for the 307 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 308 * 309 * Keep in mind that several mounts may be running in parallel. Starting 310 * the search one past where the previous search terminated is both a 311 * micro-optimization and a defense against returning the same fsid to 312 * different mounts. 313 */ 314 void 315 vfs_getnewfsid(struct mount *mp) 316 { 317 static u_int16_t mntid_base; 318 lwkt_tokref ilock; 319 fsid_t tfsid; 320 int mtype; 321 322 lwkt_gettoken(&ilock, &mntid_token); 323 mtype = mp->mnt_vfc->vfc_typenum; 324 tfsid.val[1] = mtype; 325 mtype = (mtype & 0xFF) << 24; 326 for (;;) { 327 tfsid.val[0] = makeudev(255, 328 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 329 mntid_base++; 330 if (vfs_getvfs(&tfsid) == NULL) 331 break; 332 } 333 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 334 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 335 lwkt_reltoken(&ilock); 336 } 337 338 /* 339 * This routine is called when we have too many vnodes. It attempts 340 * to free <count> vnodes and will potentially free vnodes that still 341 * have VM backing store (VM backing store is typically the cause 342 * of a vnode blowout so we want to do this). Therefore, this operation 343 * is not considered cheap. 344 * 345 * A number of conditions may prevent a vnode from being reclaimed. 346 * the buffer cache may have references on the vnode, a directory 347 * vnode may still have references due to the namei cache representing 348 * underlying files, or the vnode may be in active use. It is not 349 * desireable to reuse such vnodes. These conditions may cause the 350 * number of vnodes to reach some minimum value regardless of what 351 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 352 */ 353 354 /* 355 * Return 0 if the vnode is not already on the free list, return 1 if the 356 * vnode, with some additional work could possibly be placed on the free list. 357 */ 358 static __inline int 359 vmightfree(struct vnode *vp, int use_count, int page_count) 360 { 361 if (vp->v_flag & VFREE) 362 return (0); 363 if (vp->v_usecount != use_count || vp->v_holdcnt) 364 return (0); 365 if (vp->v_object && vp->v_object->resident_page_count >= page_count) 366 return (0); 367 return (1); 368 } 369 370 371 static int 372 vlrureclaim(struct mount *mp) 373 { 374 struct vnode *vp; 375 lwkt_tokref ilock; 376 int done; 377 int trigger; 378 int usevnodes; 379 int count; 380 381 /* 382 * Calculate the trigger point, don't allow user 383 * screwups to blow us up. This prevents us from 384 * recycling vnodes with lots of resident pages. We 385 * aren't trying to free memory, we are trying to 386 * free vnodes. 387 */ 388 usevnodes = desiredvnodes; 389 if (usevnodes <= 0) 390 usevnodes = 1; 391 trigger = vmstats.v_page_count * 2 / usevnodes; 392 393 done = 0; 394 lwkt_gettoken(&ilock, &mntvnode_token); 395 count = mp->mnt_nvnodelistsize / 10 + 1; 396 while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) { 397 /* 398 * __VNODESCAN__ 399 * 400 * The VP will stick around while we hold mntvnode_token, 401 * at least until we block, so we can safely do an initial 402 * check, and then must check again after we lock the vnode. 403 */ 404 if (vp->v_type == VNON || /* XXX */ 405 vp->v_type == VBAD || /* XXX */ 406 !vmightfree(vp, 0, trigger) /* critical path opt */ 407 ) { 408 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 409 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist,vp, v_nmntvnodes); 410 --count; 411 continue; 412 } 413 414 /* 415 * VX get the candidate vnode. If the VX get fails the 416 * vnode might still be on the mountlist. Our loop depends 417 * on us at least cycling the vnode to the end of the 418 * mountlist. 419 */ 420 if (vx_get_nonblock(vp) != 0) { 421 if (vp->v_mount == mp) { 422 TAILQ_REMOVE(&mp->mnt_nvnodelist, 423 vp, v_nmntvnodes); 424 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, 425 vp, v_nmntvnodes); 426 } 427 --count; 428 continue; 429 } 430 431 /* 432 * Since we blocked locking the vp, make sure it is still 433 * a candidate for reclamation. That is, it has not already 434 * been reclaimed and only has our VX reference associated 435 * with it. 436 */ 437 if (vp->v_type == VNON || /* XXX */ 438 vp->v_type == VBAD || /* XXX */ 439 (vp->v_flag & VRECLAIMED) || 440 vp->v_mount != mp || 441 !vmightfree(vp, 1, trigger) /* critical path opt */ 442 ) { 443 if (vp->v_mount == mp) { 444 TAILQ_REMOVE(&mp->mnt_nvnodelist, 445 vp, v_nmntvnodes); 446 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, 447 vp, v_nmntvnodes); 448 } 449 --count; 450 vx_put(vp); 451 continue; 452 } 453 454 /* 455 * All right, we are good, move the vp to the end of the 456 * mountlist and clean it out. The vget will have returned 457 * an error if the vnode was destroyed (VRECLAIMED set), so we 458 * do not have to check again. The vput() will move the 459 * vnode to the free list if the vgone() was successful. 460 */ 461 KKASSERT(vp->v_mount == mp); 462 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 463 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist,vp, v_nmntvnodes); 464 vgone(vp); 465 vx_put(vp); 466 ++done; 467 --count; 468 } 469 lwkt_reltoken(&ilock); 470 return (done); 471 } 472 473 /* 474 * Attempt to recycle vnodes in a context that is always safe to block. 475 * Calling vlrurecycle() from the bowels of file system code has some 476 * interesting deadlock problems. 477 */ 478 static struct thread *vnlruthread; 479 static int vnlruproc_sig; 480 481 void 482 vnlru_proc_wait(void) 483 { 484 if (vnlruproc_sig == 0) { 485 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 486 wakeup(vnlruthread); 487 } 488 tsleep(&vnlruproc_sig, 0, "vlruwk", hz); 489 } 490 491 static void 492 vnlru_proc(void) 493 { 494 struct mount *mp, *nmp; 495 lwkt_tokref ilock; 496 int s; 497 int done; 498 struct thread *td = curthread; 499 500 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 501 SHUTDOWN_PRI_FIRST); 502 503 s = splbio(); 504 for (;;) { 505 kproc_suspend_loop(); 506 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) { 507 vnlruproc_sig = 0; 508 wakeup(&vnlruproc_sig); 509 tsleep(td, 0, "vlruwt", hz); 510 continue; 511 } 512 done = 0; 513 cache_cleanneg(0); 514 lwkt_gettoken(&ilock, &mountlist_token); 515 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 516 if (vfs_busy(mp, LK_NOWAIT, &ilock, td)) { 517 nmp = TAILQ_NEXT(mp, mnt_list); 518 continue; 519 } 520 done += vlrureclaim(mp); 521 lwkt_gettokref(&ilock); 522 nmp = TAILQ_NEXT(mp, mnt_list); 523 vfs_unbusy(mp, td); 524 } 525 lwkt_reltoken(&ilock); 526 if (done == 0) { 527 ++vnlru_nowhere; 528 tsleep(td, 0, "vlrup", hz * 3); 529 if (vnlru_nowhere % 10 == 0) 530 printf("vnlru_proc: vnode recycler stopped working!\n"); 531 } else { 532 vnlru_nowhere = 0; 533 } 534 } 535 splx(s); 536 } 537 538 static struct kproc_desc vnlru_kp = { 539 "vnlru", 540 vnlru_proc, 541 &vnlruthread 542 }; 543 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp) 544 545 /* 546 * Move a vnode from one mount queue to another. 547 */ 548 void 549 insmntque(struct vnode *vp, struct mount *mp) 550 { 551 lwkt_tokref ilock; 552 553 lwkt_gettoken(&ilock, &mntvnode_token); 554 /* 555 * Delete from old mount point vnode list, if on one. 556 */ 557 if (vp->v_mount != NULL) { 558 KASSERT(vp->v_mount->mnt_nvnodelistsize > 0, 559 ("bad mount point vnode list size")); 560 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 561 vp->v_mount->mnt_nvnodelistsize--; 562 } 563 /* 564 * Insert into list of vnodes for the new mount point, if available. 565 */ 566 if ((vp->v_mount = mp) == NULL) { 567 lwkt_reltoken(&ilock); 568 return; 569 } 570 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 571 mp->mnt_nvnodelistsize++; 572 lwkt_reltoken(&ilock); 573 } 574 575 576 /* 577 * Scan the vnodes under a mount point. The first function is called 578 * with just the mountlist token held (no vnode lock). The second 579 * function is called with the vnode VX locked. 580 */ 581 int 582 vmntvnodescan( 583 struct mount *mp, 584 int flags, 585 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data), 586 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data), 587 void *data 588 ) { 589 lwkt_tokref ilock; 590 struct vnode *pvp; 591 struct vnode *vp; 592 int r = 0; 593 594 /* 595 * Scan the vnodes on the mount's vnode list. Use a placemarker 596 */ 597 pvp = allocvnode_placemarker(); 598 599 lwkt_gettoken(&ilock, &mntvnode_token); 600 TAILQ_INSERT_HEAD(&mp->mnt_nvnodelist, pvp, v_nmntvnodes); 601 602 while ((vp = TAILQ_NEXT(pvp, v_nmntvnodes)) != NULL) { 603 /* 604 * Move the placemarker and skip other placemarkers we 605 * encounter. The nothing can get in our way so the 606 * mount point on the vp must be valid. 607 */ 608 TAILQ_REMOVE(&mp->mnt_nvnodelist, pvp, v_nmntvnodes); 609 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, pvp, v_nmntvnodes); 610 if (vp->v_flag & VPLACEMARKER) /* another procs placemarker */ 611 continue; 612 if (vp->v_type == VNON) /* visible but not ready */ 613 continue; 614 KKASSERT(vp->v_mount == mp); 615 616 /* 617 * Quick test. A negative return continues the loop without 618 * calling the slow test. 0 continues onto the slow test. 619 * A positive number aborts the loop. 620 */ 621 if (fastfunc) { 622 if ((r = fastfunc(mp, vp, data)) < 0) 623 continue; 624 if (r) 625 break; 626 } 627 628 /* 629 * Get a vxlock on the vnode, retry if it has moved or isn't 630 * in the mountlist where we expect it. 631 */ 632 if (slowfunc) { 633 int error; 634 635 switch(flags) { 636 case VMSC_GETVP: 637 error = vget(vp, LK_EXCLUSIVE, curthread); 638 break; 639 case VMSC_GETVP|VMSC_NOWAIT: 640 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT, 641 curthread); 642 break; 643 case VMSC_GETVX: 644 error = vx_get(vp); 645 break; 646 case VMSC_REFVP: 647 vref(vp); 648 /* fall through */ 649 default: 650 error = 0; 651 break; 652 } 653 if (error) 654 continue; 655 if (TAILQ_PREV(pvp, vnodelst, v_nmntvnodes) != vp) 656 goto skip; 657 if (vp->v_type == VNON) 658 goto skip; 659 r = slowfunc(mp, vp, data); 660 skip: 661 switch(flags) { 662 case VMSC_GETVP: 663 case VMSC_GETVP|VMSC_NOWAIT: 664 vput(vp); 665 break; 666 case VMSC_GETVX: 667 vx_put(vp); 668 break; 669 case VMSC_REFVP: 670 vrele(vp); 671 /* fall through */ 672 default: 673 break; 674 } 675 if (r != 0) 676 break; 677 } 678 } 679 TAILQ_REMOVE(&mp->mnt_nvnodelist, pvp, v_nmntvnodes); 680 freevnode_placemarker(pvp); 681 lwkt_reltoken(&ilock); 682 return(r); 683 } 684 685 /* 686 * Remove any vnodes in the vnode table belonging to mount point mp. 687 * 688 * If FORCECLOSE is not specified, there should not be any active ones, 689 * return error if any are found (nb: this is a user error, not a 690 * system error). If FORCECLOSE is specified, detach any active vnodes 691 * that are found. 692 * 693 * If WRITECLOSE is set, only flush out regular file vnodes open for 694 * writing. 695 * 696 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 697 * 698 * `rootrefs' specifies the base reference count for the root vnode 699 * of this filesystem. The root vnode is considered busy if its 700 * v_usecount exceeds this value. On a successful return, vflush() 701 * will call vrele() on the root vnode exactly rootrefs times. 702 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 703 * be zero. 704 */ 705 #ifdef DIAGNOSTIC 706 static int busyprt = 0; /* print out busy vnodes */ 707 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 708 #endif 709 710 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data); 711 712 struct vflush_info { 713 int flags; 714 int busy; 715 thread_t td; 716 }; 717 718 int 719 vflush(struct mount *mp, int rootrefs, int flags) 720 { 721 struct thread *td = curthread; /* XXX */ 722 struct vnode *rootvp = NULL; 723 int error; 724 struct vflush_info vflush_info; 725 726 if (rootrefs > 0) { 727 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 728 ("vflush: bad args")); 729 /* 730 * Get the filesystem root vnode. We can vput() it 731 * immediately, since with rootrefs > 0, it won't go away. 732 */ 733 if ((error = VFS_ROOT(mp, &rootvp)) != 0) 734 return (error); 735 vput(rootvp); 736 } 737 738 vflush_info.busy = 0; 739 vflush_info.flags = flags; 740 vflush_info.td = td; 741 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info); 742 743 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 744 /* 745 * If just the root vnode is busy, and if its refcount 746 * is equal to `rootrefs', then go ahead and kill it. 747 */ 748 KASSERT(vflush_info.busy > 0, ("vflush: not busy")); 749 KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs")); 750 if (vflush_info.busy == 1 && rootvp->v_usecount == rootrefs) { 751 if (vx_lock(rootvp) == 0) { 752 vgone(rootvp); 753 vx_unlock(rootvp); 754 vflush_info.busy = 0; 755 } 756 } 757 } 758 if (vflush_info.busy) 759 return (EBUSY); 760 for (; rootrefs > 0; rootrefs--) 761 vrele(rootvp); 762 return (0); 763 } 764 765 /* 766 * The scan callback is made with an VX locked vnode. 767 */ 768 static int 769 vflush_scan(struct mount *mp, struct vnode *vp, void *data) 770 { 771 struct vflush_info *info = data; 772 struct vattr vattr; 773 774 /* 775 * Skip over a vnodes marked VSYSTEM. 776 */ 777 if ((info->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 778 return(0); 779 } 780 781 /* 782 * If WRITECLOSE is set, flush out unlinked but still open 783 * files (even if open only for reading) and regular file 784 * vnodes open for writing. 785 */ 786 if ((info->flags & WRITECLOSE) && 787 (vp->v_type == VNON || 788 (VOP_GETATTR(vp, &vattr, info->td) == 0 && 789 vattr.va_nlink > 0)) && 790 (vp->v_writecount == 0 || vp->v_type != VREG)) { 791 return(0); 792 } 793 794 /* 795 * With v_usecount == 0, all we need to do is clear out the 796 * vnode data structures and we are done. 797 */ 798 if (vp->v_usecount == 1) { 799 vgone(vp); 800 return(0); 801 } 802 803 /* 804 * If FORCECLOSE is set, forcibly close the vnode. For block 805 * or character devices, revert to an anonymous device. For 806 * all other files, just kill them. 807 */ 808 if (info->flags & FORCECLOSE) { 809 if (vp->v_type != VBLK && vp->v_type != VCHR) { 810 vgone(vp); 811 } else { 812 vclean(vp, 0, info->td); 813 vp->v_ops = &spec_vnode_vops; 814 insmntque(vp, NULL); 815 } 816 return(0); 817 } 818 #ifdef DIAGNOSTIC 819 if (busyprt) 820 vprint("vflush: busy vnode", vp); 821 #endif 822 ++info->busy; 823 return(0); 824 } 825 826