1 /* 2 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993 35 * The Regents of the University of California. All rights reserved. 36 * (c) UNIX System Laboratories, Inc. 37 * All or some portions of this file are derived from material licensed 38 * to the University of California by American Telephone and Telegraph 39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 40 * the permission of UNIX System Laboratories, Inc. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. All advertising materials mentioning features or use of this software 51 * must display the following acknowledgement: 52 * This product includes software developed by the University of 53 * California, Berkeley and its contributors. 54 * 4. Neither the name of the University nor the names of its contributors 55 * may be used to endorse or promote products derived from this software 56 * without specific prior written permission. 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 68 * SUCH DAMAGE. 69 * 70 * $DragonFly: src/sys/kern/vfs_mount.c,v 1.37 2008/09/17 21:44:18 dillon Exp $ 71 */ 72 73 /* 74 * External virtual filesystem routines 75 */ 76 #include "opt_ddb.h" 77 78 #include <sys/param.h> 79 #include <sys/systm.h> 80 #include <sys/kernel.h> 81 #include <sys/malloc.h> 82 #include <sys/mount.h> 83 #include <sys/proc.h> 84 #include <sys/vnode.h> 85 #include <sys/buf.h> 86 #include <sys/eventhandler.h> 87 #include <sys/kthread.h> 88 #include <sys/sysctl.h> 89 90 #include <machine/limits.h> 91 92 #include <sys/buf2.h> 93 #include <sys/thread2.h> 94 #include <sys/sysref2.h> 95 96 #include <vm/vm.h> 97 #include <vm/vm_object.h> 98 99 struct mountscan_info { 100 TAILQ_ENTRY(mountscan_info) msi_entry; 101 int msi_how; 102 struct mount *msi_node; 103 }; 104 105 struct vmntvnodescan_info { 106 TAILQ_ENTRY(vmntvnodescan_info) entry; 107 struct vnode *vp; 108 }; 109 110 struct vnlru_info { 111 int pass; 112 }; 113 114 static int vnlru_nowhere = 0; 115 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD, 116 &vnlru_nowhere, 0, 117 "Number of times the vnlru process ran without success"); 118 119 120 static struct lwkt_token mntid_token; 121 static struct mount dummymount; 122 123 /* note: mountlist exported to pstat */ 124 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); 125 static TAILQ_HEAD(,mountscan_info) mountscan_list; 126 static struct lwkt_token mountlist_token; 127 static TAILQ_HEAD(,vmntvnodescan_info) mntvnodescan_list; 128 struct lwkt_token mntvnode_token; 129 130 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list); 131 132 /* 133 * Called from vfsinit() 134 */ 135 void 136 vfs_mount_init(void) 137 { 138 lwkt_token_init(&mountlist_token, "mntlist"); 139 lwkt_token_init(&mntvnode_token, "mntvnode"); 140 lwkt_token_init(&mntid_token, "mntid"); 141 TAILQ_INIT(&mountscan_list); 142 TAILQ_INIT(&mntvnodescan_list); 143 mount_init(&dummymount); 144 dummymount.mnt_flag |= MNT_RDONLY; 145 dummymount.mnt_kern_flag |= MNTK_ALL_MPSAFE; 146 } 147 148 /* 149 * Support function called with mntvnode_token held to remove a vnode 150 * from the mountlist. We must update any list scans which are in progress. 151 */ 152 static void 153 vremovevnodemnt(struct vnode *vp) 154 { 155 struct vmntvnodescan_info *info; 156 157 TAILQ_FOREACH(info, &mntvnodescan_list, entry) { 158 if (info->vp == vp) 159 info->vp = TAILQ_NEXT(vp, v_nmntvnodes); 160 } 161 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 162 } 163 164 /* 165 * Allocate a new vnode and associate it with a tag, mount point, and 166 * operations vector. 167 * 168 * A VX locked and refd vnode is returned. The caller should setup the 169 * remaining fields and vx_put() or, if he wishes to leave a vref, 170 * vx_unlock() the vnode. 171 */ 172 int 173 getnewvnode(enum vtagtype tag, struct mount *mp, 174 struct vnode **vpp, int lktimeout, int lkflags) 175 { 176 struct vnode *vp; 177 178 KKASSERT(mp != NULL); 179 180 vp = allocvnode(lktimeout, lkflags); 181 vp->v_tag = tag; 182 vp->v_data = NULL; 183 184 /* 185 * By default the vnode is assigned the mount point's normal 186 * operations vector. 187 */ 188 vp->v_ops = &mp->mnt_vn_use_ops; 189 190 /* 191 * Placing the vnode on the mount point's queue makes it visible. 192 * VNON prevents it from being messed with, however. 193 */ 194 insmntque(vp, mp); 195 196 /* 197 * A VX locked & refd vnode is returned. 198 */ 199 *vpp = vp; 200 return (0); 201 } 202 203 /* 204 * This function creates vnodes with special operations vectors. The 205 * mount point is optional. 206 * 207 * This routine is being phased out but is still used by vfs_conf to 208 * create vnodes for devices prior to the root mount (with mp == NULL). 209 */ 210 int 211 getspecialvnode(enum vtagtype tag, struct mount *mp, 212 struct vop_ops **ops, 213 struct vnode **vpp, int lktimeout, int lkflags) 214 { 215 struct vnode *vp; 216 217 vp = allocvnode(lktimeout, lkflags); 218 vp->v_tag = tag; 219 vp->v_data = NULL; 220 vp->v_ops = ops; 221 222 if (mp == NULL) 223 mp = &dummymount; 224 225 /* 226 * Placing the vnode on the mount point's queue makes it visible. 227 * VNON prevents it from being messed with, however. 228 */ 229 insmntque(vp, mp); 230 231 /* 232 * A VX locked & refd vnode is returned. 233 */ 234 *vpp = vp; 235 return (0); 236 } 237 238 /* 239 * Interlock against an unmount, return 0 on success, non-zero on failure. 240 * 241 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount 242 * is in-progress. 243 * 244 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits 245 * are used. A shared locked will be obtained and the filesystem will not 246 * be unmountable until the lock is released. 247 */ 248 int 249 vfs_busy(struct mount *mp, int flags) 250 { 251 int lkflags; 252 253 atomic_add_int(&mp->mnt_refs, 1); 254 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 255 if (flags & LK_NOWAIT) { 256 atomic_add_int(&mp->mnt_refs, -1); 257 return (ENOENT); 258 } 259 /* XXX not MP safe */ 260 mp->mnt_kern_flag |= MNTK_MWAIT; 261 /* 262 * Since all busy locks are shared except the exclusive 263 * lock granted when unmounting, the only place that a 264 * wakeup needs to be done is at the release of the 265 * exclusive lock at the end of dounmount. 266 */ 267 tsleep((caddr_t)mp, 0, "vfs_busy", 0); 268 atomic_add_int(&mp->mnt_refs, -1); 269 return (ENOENT); 270 } 271 lkflags = LK_SHARED; 272 if (lockmgr(&mp->mnt_lock, lkflags)) 273 panic("vfs_busy: unexpected lock failure"); 274 return (0); 275 } 276 277 /* 278 * Free a busy filesystem. 279 * 280 * Decrement refs before releasing the lock so e.g. a pending umount 281 * doesn't give us an unexpected busy error. 282 */ 283 void 284 vfs_unbusy(struct mount *mp) 285 { 286 atomic_add_int(&mp->mnt_refs, -1); 287 lockmgr(&mp->mnt_lock, LK_RELEASE); 288 } 289 290 /* 291 * Lookup a filesystem type, and if found allocate and initialize 292 * a mount structure for it. 293 * 294 * Devname is usually updated by mount(8) after booting. 295 */ 296 int 297 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 298 { 299 struct vfsconf *vfsp; 300 struct mount *mp; 301 302 if (fstypename == NULL) 303 return (ENODEV); 304 305 vfsp = vfsconf_find_by_name(fstypename); 306 if (vfsp == NULL) 307 return (ENODEV); 308 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO); 309 mount_init(mp); 310 lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0); 311 312 vfs_busy(mp, 0); 313 mp->mnt_vfc = vfsp; 314 mp->mnt_op = vfsp->vfc_vfsops; 315 vfsp->vfc_refcount++; 316 mp->mnt_stat.f_type = vfsp->vfc_typenum; 317 mp->mnt_flag |= MNT_RDONLY; 318 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 319 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 320 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 321 *mpp = mp; 322 return (0); 323 } 324 325 /* 326 * Basic mount structure initialization 327 */ 328 void 329 mount_init(struct mount *mp) 330 { 331 lockinit(&mp->mnt_lock, "vfslock", 0, 0); 332 lwkt_token_init(&mp->mnt_token, "permnt"); 333 334 TAILQ_INIT(&mp->mnt_nvnodelist); 335 TAILQ_INIT(&mp->mnt_reservedvnlist); 336 TAILQ_INIT(&mp->mnt_jlist); 337 mp->mnt_nvnodelistsize = 0; 338 mp->mnt_flag = 0; 339 mp->mnt_iosize_max = MAXPHYS; 340 } 341 342 /* 343 * Lookup a mount point by filesystem identifier. 344 */ 345 struct mount * 346 vfs_getvfs(fsid_t *fsid) 347 { 348 struct mount *mp; 349 350 lwkt_gettoken(&mountlist_token); 351 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 352 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 353 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 354 break; 355 } 356 } 357 lwkt_reltoken(&mountlist_token); 358 return (mp); 359 } 360 361 /* 362 * Get a new unique fsid. Try to make its val[0] unique, since this value 363 * will be used to create fake device numbers for stat(). Also try (but 364 * not so hard) make its val[0] unique mod 2^16, since some emulators only 365 * support 16-bit device numbers. We end up with unique val[0]'s for the 366 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 367 * 368 * Keep in mind that several mounts may be running in parallel. Starting 369 * the search one past where the previous search terminated is both a 370 * micro-optimization and a defense against returning the same fsid to 371 * different mounts. 372 */ 373 void 374 vfs_getnewfsid(struct mount *mp) 375 { 376 static u_int16_t mntid_base; 377 fsid_t tfsid; 378 int mtype; 379 380 lwkt_gettoken(&mntid_token); 381 mtype = mp->mnt_vfc->vfc_typenum; 382 tfsid.val[1] = mtype; 383 mtype = (mtype & 0xFF) << 24; 384 for (;;) { 385 tfsid.val[0] = makeudev(255, 386 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 387 mntid_base++; 388 if (vfs_getvfs(&tfsid) == NULL) 389 break; 390 } 391 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 392 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 393 lwkt_reltoken(&mntid_token); 394 } 395 396 /* 397 * Set the FSID for a new mount point to the template. Adjust 398 * the FSID to avoid collisions. 399 */ 400 int 401 vfs_setfsid(struct mount *mp, fsid_t *template) 402 { 403 int didmunge = 0; 404 405 bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid)); 406 for (;;) { 407 if (vfs_getvfs(template) == NULL) 408 break; 409 didmunge = 1; 410 ++template->val[1]; 411 } 412 mp->mnt_stat.f_fsid = *template; 413 return(didmunge); 414 } 415 416 /* 417 * This routine is called when we have too many vnodes. It attempts 418 * to free <count> vnodes and will potentially free vnodes that still 419 * have VM backing store (VM backing store is typically the cause 420 * of a vnode blowout so we want to do this). Therefore, this operation 421 * is not considered cheap. 422 * 423 * A number of conditions may prevent a vnode from being reclaimed. 424 * the buffer cache may have references on the vnode, a directory 425 * vnode may still have references due to the namei cache representing 426 * underlying files, or the vnode may be in active use. It is not 427 * desireable to reuse such vnodes. These conditions may cause the 428 * number of vnodes to reach some minimum value regardless of what 429 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 430 */ 431 432 /* 433 * This is a quick non-blocking check to determine if the vnode is a good 434 * candidate for being (eventually) vgone()'d. Returns 0 if the vnode is 435 * not a good candidate, 1 if it is. 436 */ 437 static __inline int 438 vmightfree(struct vnode *vp, int page_count, int pass) 439 { 440 if (vp->v_flag & VRECLAIMED) 441 return (0); 442 #if 0 443 if ((vp->v_flag & VFREE) && TAILQ_EMPTY(&vp->v_namecache)) 444 return (0); 445 #endif 446 if (sysref_isactive(&vp->v_sysref)) 447 return (0); 448 if (vp->v_object && vp->v_object->resident_page_count >= page_count) 449 return (0); 450 451 /* 452 * XXX horrible hack. Up to four passes will be taken. Each pass 453 * makes a larger set of vnodes eligible. For now what this really 454 * means is that we try to recycle files opened only once before 455 * recycling files opened multiple times. 456 */ 457 switch(vp->v_flag & (VAGE0 | VAGE1)) { 458 case 0: 459 if (pass < 3) 460 return(0); 461 break; 462 case VAGE0: 463 if (pass < 2) 464 return(0); 465 break; 466 case VAGE1: 467 if (pass < 1) 468 return(0); 469 break; 470 case VAGE0 | VAGE1: 471 break; 472 } 473 return (1); 474 } 475 476 /* 477 * The vnode was found to be possibly vgone()able and the caller has locked it 478 * (thus the usecount should be 1 now). Determine if the vnode is actually 479 * vgone()able, doing some cleanups in the process. Returns 1 if the vnode 480 * can be vgone()'d, 0 otherwise. 481 * 482 * Note that v_auxrefs may be non-zero because (A) this vnode is not a leaf 483 * in the namecache topology and (B) this vnode has buffer cache bufs. 484 * We cannot remove vnodes with non-leaf namecache associations. We do a 485 * tentitive leaf check prior to attempting to flush out any buffers but the 486 * 'real' test when all is said in done is that v_auxrefs must become 0 for 487 * the vnode to be freeable. 488 * 489 * We could theoretically just unconditionally flush when v_auxrefs != 0, 490 * but flushing data associated with non-leaf nodes (which are always 491 * directories), just throws it away for no benefit. It is the buffer 492 * cache's responsibility to choose buffers to recycle from the cached 493 * data point of view. 494 */ 495 static int 496 visleaf(struct vnode *vp) 497 { 498 struct namecache *ncp; 499 500 spin_lock(&vp->v_spin); 501 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 502 if (!TAILQ_EMPTY(&ncp->nc_list)) { 503 spin_unlock(&vp->v_spin); 504 return(0); 505 } 506 } 507 spin_unlock(&vp->v_spin); 508 return(1); 509 } 510 511 /* 512 * Try to clean up the vnode to the point where it can be vgone()'d, returning 513 * 0 if it cannot be vgone()'d (or already has been), 1 if it can. Unlike 514 * vmightfree() this routine may flush the vnode and block. Vnodes marked 515 * VFREE are still candidates for vgone()ing because they may hold namecache 516 * resources and could be blocking the namecache directory hierarchy (and 517 * related vnodes) from being freed. 518 */ 519 static int 520 vtrytomakegoneable(struct vnode *vp, int page_count) 521 { 522 if (vp->v_flag & VRECLAIMED) 523 return (0); 524 if (vp->v_sysref.refcnt > 1) 525 return (0); 526 if (vp->v_object && vp->v_object->resident_page_count >= page_count) 527 return (0); 528 if (vp->v_auxrefs && visleaf(vp)) { 529 vinvalbuf(vp, V_SAVE, 0, 0); 530 #if 0 /* DEBUG */ 531 kprintf((vp->v_auxrefs ? "vrecycle: vp %p failed: %s\n" : 532 "vrecycle: vp %p succeeded: %s\n"), vp, 533 (TAILQ_FIRST(&vp->v_namecache) ? 534 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?")); 535 #endif 536 } 537 538 /* 539 * This sequence may seem a little strange, but we need to optimize 540 * the critical path a bit. We can't recycle vnodes with other 541 * references and because we are trying to recycle an otherwise 542 * perfectly fine vnode we have to invalidate the namecache in a 543 * way that avoids possible deadlocks (since the vnode lock is being 544 * held here). Finally, we have to check for other references one 545 * last time in case something snuck in during the inval. 546 */ 547 if (vp->v_sysref.refcnt > 1 || vp->v_auxrefs != 0) 548 return (0); 549 if (cache_inval_vp_nonblock(vp)) 550 return (0); 551 return (vp->v_sysref.refcnt <= 1 && vp->v_auxrefs == 0); 552 } 553 554 /* 555 * Reclaim up to 1/10 of the vnodes associated with a mount point. Try 556 * to avoid vnodes which have lots of resident pages (we are trying to free 557 * vnodes, not memory). 558 * 559 * This routine is a callback from the mountlist scan. The mount point 560 * in question will be busied. 561 * 562 * NOTE: The 1/10 reclamation also ensures that the inactive data set 563 * (the vnodes being recycled by the one-time use) does not degenerate 564 * into too-small a set. This is important because once a vnode is 565 * marked as not being one-time-use (VAGE0/VAGE1 both 0) that vnode 566 * will not be destroyed EXCEPT by this mechanism. VM pages can still 567 * be cleaned/freed by the pageout daemon. 568 */ 569 static int 570 vlrureclaim(struct mount *mp, void *data) 571 { 572 struct vnlru_info *info = data; 573 struct vnode *vp; 574 int done; 575 int trigger; 576 int usevnodes; 577 int count; 578 int trigger_mult = vnlru_nowhere; 579 580 /* 581 * Calculate the trigger point for the resident pages check. The 582 * minimum trigger value is approximately the number of pages in 583 * the system divded by the number of vnodes. However, due to 584 * various other system memory overheads unrelated to data caching 585 * it is a good idea to double the trigger (at least). 586 * 587 * trigger_mult starts at 0. If the recycler is having problems 588 * finding enough freeable vnodes it will increase trigger_mult. 589 * This should not happen in normal operation, even on machines with 590 * low amounts of memory, but extraordinary memory use by the system 591 * verses the amount of cached data can trigger it. 592 */ 593 usevnodes = desiredvnodes; 594 if (usevnodes <= 0) 595 usevnodes = 1; 596 trigger = vmstats.v_page_count * (trigger_mult + 2) / usevnodes; 597 598 done = 0; 599 lwkt_gettoken(&mntvnode_token); 600 count = mp->mnt_nvnodelistsize / 10 + 1; 601 602 while (count && mp->mnt_syncer) { 603 /* 604 * Next vnode. Use the special syncer vnode to placemark 605 * the LRU. This way the LRU code does not interfere with 606 * vmntvnodescan(). 607 */ 608 vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes); 609 TAILQ_REMOVE(&mp->mnt_nvnodelist, mp->mnt_syncer, v_nmntvnodes); 610 if (vp) { 611 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, 612 mp->mnt_syncer, v_nmntvnodes); 613 } else { 614 TAILQ_INSERT_HEAD(&mp->mnt_nvnodelist, mp->mnt_syncer, 615 v_nmntvnodes); 616 vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes); 617 if (vp == NULL) 618 break; 619 } 620 621 /* 622 * __VNODESCAN__ 623 * 624 * The VP will stick around while we hold mntvnode_token, 625 * at least until we block, so we can safely do an initial 626 * check, and then must check again after we lock the vnode. 627 */ 628 if (vp->v_type == VNON || /* syncer or indeterminant */ 629 !vmightfree(vp, trigger, info->pass) /* critical path opt */ 630 ) { 631 --count; 632 continue; 633 } 634 635 /* 636 * VX get the candidate vnode. If the VX get fails the 637 * vnode might still be on the mountlist. Our loop depends 638 * on us at least cycling the vnode to the end of the 639 * mountlist. 640 */ 641 if (vx_get_nonblock(vp) != 0) { 642 --count; 643 continue; 644 } 645 646 /* 647 * Since we blocked locking the vp, make sure it is still 648 * a candidate for reclamation. That is, it has not already 649 * been reclaimed and only has our VX reference associated 650 * with it. 651 */ 652 if (vp->v_type == VNON || /* syncer or indeterminant */ 653 (vp->v_flag & VRECLAIMED) || 654 vp->v_mount != mp || 655 !vtrytomakegoneable(vp, trigger) /* critical path opt */ 656 ) { 657 --count; 658 vx_put(vp); 659 continue; 660 } 661 662 /* 663 * All right, we are good, move the vp to the end of the 664 * mountlist and clean it out. The vget will have returned 665 * an error if the vnode was destroyed (VRECLAIMED set), so we 666 * do not have to check again. The vput() will move the 667 * vnode to the free list if the vgone() was successful. 668 */ 669 KKASSERT(vp->v_mount == mp); 670 vgone_vxlocked(vp); 671 vx_put(vp); 672 ++done; 673 --count; 674 } 675 lwkt_reltoken(&mntvnode_token); 676 return (done); 677 } 678 679 /* 680 * Attempt to recycle vnodes in a context that is always safe to block. 681 * Calling vlrurecycle() from the bowels of file system code has some 682 * interesting deadlock problems. 683 */ 684 static struct thread *vnlruthread; 685 static int vnlruproc_sig; 686 687 void 688 vnlru_proc_wait(void) 689 { 690 tsleep_interlock(&vnlruproc_sig, 0); 691 if (vnlruproc_sig == 0) { 692 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 693 wakeup(vnlruthread); 694 } 695 tsleep(&vnlruproc_sig, PINTERLOCKED, "vlruwk", hz); 696 } 697 698 static void 699 vnlru_proc(void) 700 { 701 struct thread *td = curthread; 702 struct vnlru_info info; 703 int done; 704 705 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 706 SHUTDOWN_PRI_FIRST); 707 708 for (;;) { 709 kproc_suspend_loop(); 710 711 /* 712 * Try to free some vnodes if we have too many 713 */ 714 if (numvnodes > desiredvnodes && 715 freevnodes > desiredvnodes * 2 / 10) { 716 int count = numvnodes - desiredvnodes; 717 718 if (count > freevnodes / 100) 719 count = freevnodes / 100; 720 if (count < 5) 721 count = 5; 722 freesomevnodes(count); 723 } 724 725 /* 726 * Nothing to do if most of our vnodes are already on 727 * the free list. 728 */ 729 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) { 730 vnlruproc_sig = 0; 731 wakeup(&vnlruproc_sig); 732 tsleep(vnlruthread, 0, "vlruwt", hz); 733 continue; 734 } 735 cache_hysteresis(); 736 737 /* 738 * The pass iterates through the four combinations of 739 * VAGE0/VAGE1. We want to get rid of aged small files 740 * first. 741 */ 742 info.pass = 0; 743 done = 0; 744 while (done == 0 && info.pass < 4) { 745 done = mountlist_scan(vlrureclaim, &info, 746 MNTSCAN_FORWARD); 747 ++info.pass; 748 } 749 750 /* 751 * The vlrureclaim() call only processes 1/10 of the vnodes 752 * on each mount. If we couldn't find any repeat the loop 753 * at least enough times to cover all available vnodes before 754 * we start sleeping. Complain if the failure extends past 755 * 30 second, every 30 seconds. 756 */ 757 if (done == 0) { 758 ++vnlru_nowhere; 759 if (vnlru_nowhere % 10 == 0) 760 tsleep(vnlruthread, 0, "vlrup", hz * 3); 761 if (vnlru_nowhere % 100 == 0) 762 kprintf("vnlru_proc: vnode recycler stopped working!\n"); 763 if (vnlru_nowhere == 1000) 764 vnlru_nowhere = 900; 765 } else { 766 vnlru_nowhere = 0; 767 } 768 } 769 } 770 771 /* 772 * MOUNTLIST FUNCTIONS 773 */ 774 775 /* 776 * mountlist_insert (MP SAFE) 777 * 778 * Add a new mount point to the mount list. 779 */ 780 void 781 mountlist_insert(struct mount *mp, int how) 782 { 783 lwkt_gettoken(&mountlist_token); 784 if (how == MNTINS_FIRST) 785 TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list); 786 else 787 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); 788 lwkt_reltoken(&mountlist_token); 789 } 790 791 /* 792 * mountlist_interlock (MP SAFE) 793 * 794 * Execute the specified interlock function with the mountlist token 795 * held. The function will be called in a serialized fashion verses 796 * other functions called through this mechanism. 797 */ 798 int 799 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp) 800 { 801 int error; 802 803 lwkt_gettoken(&mountlist_token); 804 error = callback(mp); 805 lwkt_reltoken(&mountlist_token); 806 return (error); 807 } 808 809 /* 810 * mountlist_boot_getfirst (DURING BOOT ONLY) 811 * 812 * This function returns the first mount on the mountlist, which is 813 * expected to be the root mount. Since no interlocks are obtained 814 * this function is only safe to use during booting. 815 */ 816 817 struct mount * 818 mountlist_boot_getfirst(void) 819 { 820 return(TAILQ_FIRST(&mountlist)); 821 } 822 823 /* 824 * mountlist_remove (MP SAFE) 825 * 826 * Remove a node from the mountlist. If this node is the next scan node 827 * for any active mountlist scans, the active mountlist scan will be 828 * adjusted to skip the node, thus allowing removals during mountlist 829 * scans. 830 */ 831 void 832 mountlist_remove(struct mount *mp) 833 { 834 struct mountscan_info *msi; 835 836 lwkt_gettoken(&mountlist_token); 837 TAILQ_FOREACH(msi, &mountscan_list, msi_entry) { 838 if (msi->msi_node == mp) { 839 if (msi->msi_how & MNTSCAN_FORWARD) 840 msi->msi_node = TAILQ_NEXT(mp, mnt_list); 841 else 842 msi->msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 843 } 844 } 845 TAILQ_REMOVE(&mountlist, mp, mnt_list); 846 lwkt_reltoken(&mountlist_token); 847 } 848 849 /* 850 * mountlist_exists (MP SAFE) 851 * 852 * Checks if a node exists in the mountlist. 853 * This function is mainly used by VFS quota code to check if a 854 * cached nullfs struct mount pointer is still valid at use time 855 * 856 * FIXME: there is no warranty the mp passed to that function 857 * will be the same one used by VFS_ACCOUNT() later 858 */ 859 int 860 mountlist_exists(struct mount *mp) 861 { 862 int node_exists = 0; 863 struct mount* lmp; 864 865 lwkt_gettoken(&mountlist_token); 866 TAILQ_FOREACH(lmp, &mountlist, mnt_list) { 867 if (lmp == mp) { 868 node_exists = 1; 869 break; 870 } 871 } 872 lwkt_reltoken(&mountlist_token); 873 return(node_exists); 874 } 875 876 /* 877 * mountlist_scan (MP SAFE) 878 * 879 * Safely scan the mount points on the mount list. Unless otherwise 880 * specified each mount point will be busied prior to the callback and 881 * unbusied afterwords. The callback may safely remove any mount point 882 * without interfering with the scan. If the current callback 883 * mount is removed the scanner will not attempt to unbusy it. 884 * 885 * If a mount node cannot be busied it is silently skipped. 886 * 887 * The callback return value is aggregated and a total is returned. A return 888 * value of < 0 is not aggregated and will terminate the scan. 889 * 890 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction 891 * MNTSCAN_REVERSE - the mountlist is scanned in reverse 892 * MNTSCAN_NOBUSY - the scanner will make the callback without busying 893 * the mount node. 894 */ 895 int 896 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how) 897 { 898 struct mountscan_info info; 899 struct mount *mp; 900 int count; 901 int res; 902 903 lwkt_gettoken(&mountlist_token); 904 905 info.msi_how = how; 906 info.msi_node = NULL; /* paranoia */ 907 TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry); 908 909 res = 0; 910 911 if (how & MNTSCAN_FORWARD) { 912 info.msi_node = TAILQ_FIRST(&mountlist); 913 while ((mp = info.msi_node) != NULL) { 914 if (how & MNTSCAN_NOBUSY) { 915 count = callback(mp, data); 916 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 917 count = callback(mp, data); 918 if (mp == info.msi_node) 919 vfs_unbusy(mp); 920 } else { 921 count = 0; 922 } 923 if (count < 0) 924 break; 925 res += count; 926 if (mp == info.msi_node) 927 info.msi_node = TAILQ_NEXT(mp, mnt_list); 928 } 929 } else if (how & MNTSCAN_REVERSE) { 930 info.msi_node = TAILQ_LAST(&mountlist, mntlist); 931 while ((mp = info.msi_node) != NULL) { 932 if (how & MNTSCAN_NOBUSY) { 933 count = callback(mp, data); 934 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 935 count = callback(mp, data); 936 if (mp == info.msi_node) 937 vfs_unbusy(mp); 938 } else { 939 count = 0; 940 } 941 if (count < 0) 942 break; 943 res += count; 944 if (mp == info.msi_node) 945 info.msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 946 } 947 } 948 TAILQ_REMOVE(&mountscan_list, &info, msi_entry); 949 lwkt_reltoken(&mountlist_token); 950 return(res); 951 } 952 953 /* 954 * MOUNT RELATED VNODE FUNCTIONS 955 */ 956 957 static struct kproc_desc vnlru_kp = { 958 "vnlru", 959 vnlru_proc, 960 &vnlruthread 961 }; 962 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp) 963 964 /* 965 * Move a vnode from one mount queue to another. 966 * 967 * MPSAFE 968 */ 969 void 970 insmntque(struct vnode *vp, struct mount *mp) 971 { 972 lwkt_gettoken(&mntvnode_token); 973 /* 974 * Delete from old mount point vnode list, if on one. 975 */ 976 if (vp->v_mount != NULL) { 977 KASSERT(vp->v_mount->mnt_nvnodelistsize > 0, 978 ("bad mount point vnode list size")); 979 vremovevnodemnt(vp); 980 vp->v_mount->mnt_nvnodelistsize--; 981 } 982 /* 983 * Insert into list of vnodes for the new mount point, if available. 984 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer. 985 */ 986 if ((vp->v_mount = mp) == NULL) { 987 lwkt_reltoken(&mntvnode_token); 988 return; 989 } 990 if (mp->mnt_syncer) { 991 TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes); 992 } else { 993 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 994 } 995 mp->mnt_nvnodelistsize++; 996 lwkt_reltoken(&mntvnode_token); 997 } 998 999 1000 /* 1001 * Scan the vnodes under a mount point and issue appropriate callbacks. 1002 * 1003 * The fastfunc() callback is called with just the mountlist token held 1004 * (no vnode lock). It may not block and the vnode may be undergoing 1005 * modifications while the caller is processing it. The vnode will 1006 * not be entirely destroyed, however, due to the fact that the mountlist 1007 * token is held. A return value < 0 skips to the next vnode without calling 1008 * the slowfunc(), a return value > 0 terminates the loop. 1009 * 1010 * The slowfunc() callback is called after the vnode has been successfully 1011 * locked based on passed flags. The vnode is skipped if it gets rearranged 1012 * or destroyed while blocking on the lock. A non-zero return value from 1013 * the slow function terminates the loop. The slow function is allowed to 1014 * arbitrarily block. The scanning code guarentees consistency of operation 1015 * even if the slow function deletes or moves the node, or blocks and some 1016 * other thread deletes or moves the node. 1017 * 1018 * NOTE: We hold vmobj_token to prevent a VM object from being destroyed 1019 * out from under the fastfunc()'s vnode test. It will not prevent 1020 * v_object from getting NULL'd out but it will ensure that the 1021 * pointer (if we race) will remain stable. 1022 */ 1023 int 1024 vmntvnodescan( 1025 struct mount *mp, 1026 int flags, 1027 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data), 1028 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data), 1029 void *data 1030 ) { 1031 struct vmntvnodescan_info info; 1032 struct vnode *vp; 1033 int r = 0; 1034 int maxcount = mp->mnt_nvnodelistsize * 2; 1035 int stopcount = 0; 1036 int count = 0; 1037 1038 lwkt_gettoken(&mntvnode_token); 1039 lwkt_gettoken(&vmobj_token); 1040 1041 /* 1042 * If asked to do one pass stop after iterating available vnodes. 1043 * Under heavy loads new vnodes can be added while we are scanning, 1044 * so this isn't perfect. Create a slop factor of 2x. 1045 */ 1046 if (flags & VMSC_ONEPASS) 1047 stopcount = mp->mnt_nvnodelistsize; 1048 1049 info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 1050 TAILQ_INSERT_TAIL(&mntvnodescan_list, &info, entry); 1051 while ((vp = info.vp) != NULL) { 1052 if (--maxcount == 0) { 1053 kprintf("Warning: excessive fssync iteration\n"); 1054 maxcount = mp->mnt_nvnodelistsize * 2; 1055 } 1056 1057 /* 1058 * Skip if visible but not ready, or special (e.g. 1059 * mp->mnt_syncer) 1060 */ 1061 if (vp->v_type == VNON) 1062 goto next; 1063 KKASSERT(vp->v_mount == mp); 1064 1065 /* 1066 * Quick test. A negative return continues the loop without 1067 * calling the slow test. 0 continues onto the slow test. 1068 * A positive number aborts the loop. 1069 */ 1070 if (fastfunc) { 1071 if ((r = fastfunc(mp, vp, data)) < 0) { 1072 r = 0; 1073 goto next; 1074 } 1075 if (r) 1076 break; 1077 } 1078 1079 /* 1080 * Get a vxlock on the vnode, retry if it has moved or isn't 1081 * in the mountlist where we expect it. 1082 */ 1083 if (slowfunc) { 1084 int error; 1085 1086 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 1087 case VMSC_GETVP: 1088 error = vget(vp, LK_EXCLUSIVE); 1089 break; 1090 case VMSC_GETVP|VMSC_NOWAIT: 1091 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT); 1092 break; 1093 case VMSC_GETVX: 1094 vx_get(vp); 1095 error = 0; 1096 break; 1097 default: 1098 error = 0; 1099 break; 1100 } 1101 if (error) 1102 goto next; 1103 /* 1104 * Do not call the slow function if the vnode is 1105 * invalid or if it was ripped out from under us 1106 * while we (potentially) blocked. 1107 */ 1108 if (info.vp == vp && vp->v_type != VNON) 1109 r = slowfunc(mp, vp, data); 1110 1111 /* 1112 * Cleanup 1113 */ 1114 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 1115 case VMSC_GETVP: 1116 case VMSC_GETVP|VMSC_NOWAIT: 1117 vput(vp); 1118 break; 1119 case VMSC_GETVX: 1120 vx_put(vp); 1121 break; 1122 default: 1123 break; 1124 } 1125 if (r != 0) 1126 break; 1127 } 1128 1129 next: 1130 /* 1131 * Yield after some processing. Depending on the number 1132 * of vnodes, we might wind up running for a long time. 1133 * Because threads are not preemptable, time critical 1134 * userland processes might starve. Give them a chance 1135 * now and then. 1136 */ 1137 if (++count == 10000) { 1138 /* We really want to yield a bit, so we simply sleep a tick */ 1139 tsleep(mp, 0, "vnodescn", 1); 1140 count = 0; 1141 } 1142 1143 /* 1144 * If doing one pass this decrements to zero. If it starts 1145 * at zero it is effectively unlimited for the purposes of 1146 * this loop. 1147 */ 1148 if (--stopcount == 0) 1149 break; 1150 1151 /* 1152 * Iterate. If the vnode was ripped out from under us 1153 * info.vp will already point to the next vnode, otherwise 1154 * we have to obtain the next valid vnode ourselves. 1155 */ 1156 if (info.vp == vp) 1157 info.vp = TAILQ_NEXT(vp, v_nmntvnodes); 1158 } 1159 TAILQ_REMOVE(&mntvnodescan_list, &info, entry); 1160 lwkt_reltoken(&vmobj_token); 1161 lwkt_reltoken(&mntvnode_token); 1162 return(r); 1163 } 1164 1165 /* 1166 * Remove any vnodes in the vnode table belonging to mount point mp. 1167 * 1168 * If FORCECLOSE is not specified, there should not be any active ones, 1169 * return error if any are found (nb: this is a user error, not a 1170 * system error). If FORCECLOSE is specified, detach any active vnodes 1171 * that are found. 1172 * 1173 * If WRITECLOSE is set, only flush out regular file vnodes open for 1174 * writing. 1175 * 1176 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 1177 * 1178 * `rootrefs' specifies the base reference count for the root vnode 1179 * of this filesystem. The root vnode is considered busy if its 1180 * v_sysref.refcnt exceeds this value. On a successful return, vflush() 1181 * will call vrele() on the root vnode exactly rootrefs times. 1182 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 1183 * be zero. 1184 */ 1185 #ifdef DIAGNOSTIC 1186 static int busyprt = 0; /* print out busy vnodes */ 1187 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 1188 #endif 1189 1190 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data); 1191 1192 struct vflush_info { 1193 int flags; 1194 int busy; 1195 thread_t td; 1196 }; 1197 1198 int 1199 vflush(struct mount *mp, int rootrefs, int flags) 1200 { 1201 struct thread *td = curthread; /* XXX */ 1202 struct vnode *rootvp = NULL; 1203 int error; 1204 struct vflush_info vflush_info; 1205 1206 if (rootrefs > 0) { 1207 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 1208 ("vflush: bad args")); 1209 /* 1210 * Get the filesystem root vnode. We can vput() it 1211 * immediately, since with rootrefs > 0, it won't go away. 1212 */ 1213 if ((error = VFS_ROOT(mp, &rootvp)) != 0) { 1214 if ((flags & FORCECLOSE) == 0) 1215 return (error); 1216 rootrefs = 0; 1217 /* continue anyway */ 1218 } 1219 if (rootrefs) 1220 vput(rootvp); 1221 } 1222 1223 vflush_info.busy = 0; 1224 vflush_info.flags = flags; 1225 vflush_info.td = td; 1226 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info); 1227 1228 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 1229 /* 1230 * If just the root vnode is busy, and if its refcount 1231 * is equal to `rootrefs', then go ahead and kill it. 1232 */ 1233 KASSERT(vflush_info.busy > 0, ("vflush: not busy")); 1234 KASSERT(rootvp->v_sysref.refcnt >= rootrefs, ("vflush: rootrefs")); 1235 if (vflush_info.busy == 1 && rootvp->v_sysref.refcnt == rootrefs) { 1236 vx_lock(rootvp); 1237 vgone_vxlocked(rootvp); 1238 vx_unlock(rootvp); 1239 vflush_info.busy = 0; 1240 } 1241 } 1242 if (vflush_info.busy) 1243 return (EBUSY); 1244 for (; rootrefs > 0; rootrefs--) 1245 vrele(rootvp); 1246 return (0); 1247 } 1248 1249 /* 1250 * The scan callback is made with an VX locked vnode. 1251 */ 1252 static int 1253 vflush_scan(struct mount *mp, struct vnode *vp, void *data) 1254 { 1255 struct vflush_info *info = data; 1256 struct vattr vattr; 1257 1258 /* 1259 * Skip over a vnodes marked VSYSTEM. 1260 */ 1261 if ((info->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1262 return(0); 1263 } 1264 1265 /* 1266 * If WRITECLOSE is set, flush out unlinked but still open 1267 * files (even if open only for reading) and regular file 1268 * vnodes open for writing. 1269 */ 1270 if ((info->flags & WRITECLOSE) && 1271 (vp->v_type == VNON || 1272 (VOP_GETATTR(vp, &vattr) == 0 && 1273 vattr.va_nlink > 0)) && 1274 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1275 return(0); 1276 } 1277 1278 /* 1279 * If we are the only holder (refcnt of 1) or the vnode is in 1280 * termination (refcnt < 0), we can vgone the vnode. 1281 */ 1282 if (vp->v_sysref.refcnt <= 1) { 1283 vgone_vxlocked(vp); 1284 return(0); 1285 } 1286 1287 /* 1288 * If FORCECLOSE is set, forcibly destroy the vnode and then move 1289 * it to a dummymount structure so vop_*() functions don't deref 1290 * a NULL pointer. 1291 */ 1292 if (info->flags & FORCECLOSE) { 1293 vhold(vp); 1294 vgone_vxlocked(vp); 1295 if (vp->v_mount == NULL) 1296 insmntque(vp, &dummymount); 1297 vdrop(vp); 1298 return(0); 1299 } 1300 #ifdef DIAGNOSTIC 1301 if (busyprt) 1302 vprint("vflush: busy vnode", vp); 1303 #endif 1304 ++info->busy; 1305 return(0); 1306 } 1307 1308 void 1309 add_bio_ops(struct bio_ops *ops) 1310 { 1311 TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry); 1312 } 1313 1314 void 1315 rem_bio_ops(struct bio_ops *ops) 1316 { 1317 TAILQ_REMOVE(&bio_ops_list, ops, entry); 1318 } 1319 1320 /* 1321 * This calls the bio_ops io_sync function either for a mount point 1322 * or generally. 1323 * 1324 * WARNING: softdeps is weirdly coded and just isn't happy unless 1325 * io_sync is called with a NULL mount from the general syncing code. 1326 */ 1327 void 1328 bio_ops_sync(struct mount *mp) 1329 { 1330 struct bio_ops *ops; 1331 1332 if (mp) { 1333 if ((ops = mp->mnt_bioops) != NULL) 1334 ops->io_sync(mp); 1335 } else { 1336 TAILQ_FOREACH(ops, &bio_ops_list, entry) { 1337 ops->io_sync(NULL); 1338 } 1339 } 1340 } 1341 1342 /* 1343 * Lookup a mount point by nch 1344 */ 1345 struct mount * 1346 mount_get_by_nc(struct namecache *ncp) 1347 { 1348 struct mount *mp = NULL; 1349 1350 lwkt_gettoken(&mountlist_token); 1351 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 1352 if (ncp == mp->mnt_ncmountpt.ncp) 1353 break; 1354 } 1355 lwkt_reltoken(&mountlist_token); 1356 return (mp); 1357 } 1358 1359