1 /* 2 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993 35 * The Regents of the University of California. All rights reserved. 36 * (c) UNIX System Laboratories, Inc. 37 * All or some portions of this file are derived from material licensed 38 * to the University of California by American Telephone and Telegraph 39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 40 * the permission of UNIX System Laboratories, Inc. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. All advertising materials mentioning features or use of this software 51 * must display the following acknowledgement: 52 * This product includes software developed by the University of 53 * California, Berkeley and its contributors. 54 * 4. Neither the name of the University nor the names of its contributors 55 * may be used to endorse or promote products derived from this software 56 * without specific prior written permission. 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 68 * SUCH DAMAGE. 69 */ 70 71 /* 72 * External virtual filesystem routines 73 */ 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/kernel.h> 78 #include <sys/malloc.h> 79 #include <sys/mount.h> 80 #include <sys/proc.h> 81 #include <sys/vnode.h> 82 #include <sys/buf.h> 83 #include <sys/eventhandler.h> 84 #include <sys/kthread.h> 85 #include <sys/sysctl.h> 86 87 #include <machine/limits.h> 88 89 #include <sys/buf2.h> 90 #include <sys/thread2.h> 91 #include <sys/sysref2.h> 92 93 #include <vm/vm.h> 94 #include <vm/vm_object.h> 95 96 struct mountscan_info { 97 TAILQ_ENTRY(mountscan_info) msi_entry; 98 int msi_how; 99 struct mount *msi_node; 100 }; 101 102 struct vmntvnodescan_info { 103 TAILQ_ENTRY(vmntvnodescan_info) entry; 104 struct vnode *vp; 105 }; 106 107 struct vnlru_info { 108 int pass; 109 }; 110 111 static int vnlru_nowhere = 0; 112 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD, 113 &vnlru_nowhere, 0, 114 "Number of times the vnlru process ran without success"); 115 116 117 static struct lwkt_token mntid_token; 118 static struct mount dummymount; 119 120 /* note: mountlist exported to pstat */ 121 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); 122 static TAILQ_HEAD(,mountscan_info) mountscan_list; 123 static struct lwkt_token mountlist_token; 124 static TAILQ_HEAD(,vmntvnodescan_info) mntvnodescan_list; 125 struct lwkt_token mntvnode_token; 126 127 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list); 128 129 /* 130 * Called from vfsinit() 131 */ 132 void 133 vfs_mount_init(void) 134 { 135 lwkt_token_init(&mountlist_token, "mntlist"); 136 lwkt_token_init(&mntvnode_token, "mntvnode"); 137 lwkt_token_init(&mntid_token, "mntid"); 138 TAILQ_INIT(&mountscan_list); 139 TAILQ_INIT(&mntvnodescan_list); 140 mount_init(&dummymount); 141 dummymount.mnt_flag |= MNT_RDONLY; 142 dummymount.mnt_kern_flag |= MNTK_ALL_MPSAFE; 143 } 144 145 /* 146 * Support function called with mntvnode_token held to remove a vnode 147 * from the mountlist. We must update any list scans which are in progress. 148 */ 149 static void 150 vremovevnodemnt(struct vnode *vp) 151 { 152 struct vmntvnodescan_info *info; 153 154 TAILQ_FOREACH(info, &mntvnodescan_list, entry) { 155 if (info->vp == vp) 156 info->vp = TAILQ_NEXT(vp, v_nmntvnodes); 157 } 158 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 159 } 160 161 /* 162 * Allocate a new vnode and associate it with a tag, mount point, and 163 * operations vector. 164 * 165 * A VX locked and refd vnode is returned. The caller should setup the 166 * remaining fields and vx_put() or, if he wishes to leave a vref, 167 * vx_unlock() the vnode. 168 */ 169 int 170 getnewvnode(enum vtagtype tag, struct mount *mp, 171 struct vnode **vpp, int lktimeout, int lkflags) 172 { 173 struct vnode *vp; 174 175 KKASSERT(mp != NULL); 176 177 vp = allocvnode(lktimeout, lkflags); 178 vp->v_tag = tag; 179 vp->v_data = NULL; 180 181 /* 182 * By default the vnode is assigned the mount point's normal 183 * operations vector. 184 */ 185 vp->v_ops = &mp->mnt_vn_use_ops; 186 187 /* 188 * Placing the vnode on the mount point's queue makes it visible. 189 * VNON prevents it from being messed with, however. 190 */ 191 insmntque(vp, mp); 192 193 /* 194 * A VX locked & refd vnode is returned. 195 */ 196 *vpp = vp; 197 return (0); 198 } 199 200 /* 201 * This function creates vnodes with special operations vectors. The 202 * mount point is optional. 203 * 204 * This routine is being phased out but is still used by vfs_conf to 205 * create vnodes for devices prior to the root mount (with mp == NULL). 206 */ 207 int 208 getspecialvnode(enum vtagtype tag, struct mount *mp, 209 struct vop_ops **ops, 210 struct vnode **vpp, int lktimeout, int lkflags) 211 { 212 struct vnode *vp; 213 214 vp = allocvnode(lktimeout, lkflags); 215 vp->v_tag = tag; 216 vp->v_data = NULL; 217 vp->v_ops = ops; 218 219 if (mp == NULL) 220 mp = &dummymount; 221 222 /* 223 * Placing the vnode on the mount point's queue makes it visible. 224 * VNON prevents it from being messed with, however. 225 */ 226 insmntque(vp, mp); 227 228 /* 229 * A VX locked & refd vnode is returned. 230 */ 231 *vpp = vp; 232 return (0); 233 } 234 235 /* 236 * Interlock against an unmount, return 0 on success, non-zero on failure. 237 * 238 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount 239 * is in-progress. 240 * 241 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits 242 * are used. A shared locked will be obtained and the filesystem will not 243 * be unmountable until the lock is released. 244 */ 245 int 246 vfs_busy(struct mount *mp, int flags) 247 { 248 int lkflags; 249 250 atomic_add_int(&mp->mnt_refs, 1); 251 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 252 if (flags & LK_NOWAIT) { 253 atomic_add_int(&mp->mnt_refs, -1); 254 return (ENOENT); 255 } 256 /* XXX not MP safe */ 257 mp->mnt_kern_flag |= MNTK_MWAIT; 258 /* 259 * Since all busy locks are shared except the exclusive 260 * lock granted when unmounting, the only place that a 261 * wakeup needs to be done is at the release of the 262 * exclusive lock at the end of dounmount. 263 */ 264 tsleep((caddr_t)mp, 0, "vfs_busy", 0); 265 atomic_add_int(&mp->mnt_refs, -1); 266 return (ENOENT); 267 } 268 lkflags = LK_SHARED; 269 if (lockmgr(&mp->mnt_lock, lkflags)) 270 panic("vfs_busy: unexpected lock failure"); 271 return (0); 272 } 273 274 /* 275 * Free a busy filesystem. 276 * 277 * Decrement refs before releasing the lock so e.g. a pending umount 278 * doesn't give us an unexpected busy error. 279 */ 280 void 281 vfs_unbusy(struct mount *mp) 282 { 283 atomic_add_int(&mp->mnt_refs, -1); 284 lockmgr(&mp->mnt_lock, LK_RELEASE); 285 } 286 287 /* 288 * Lookup a filesystem type, and if found allocate and initialize 289 * a mount structure for it. 290 * 291 * Devname is usually updated by mount(8) after booting. 292 */ 293 int 294 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 295 { 296 struct vfsconf *vfsp; 297 struct mount *mp; 298 299 if (fstypename == NULL) 300 return (ENODEV); 301 302 vfsp = vfsconf_find_by_name(fstypename); 303 if (vfsp == NULL) 304 return (ENODEV); 305 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO); 306 mount_init(mp); 307 lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0); 308 309 vfs_busy(mp, 0); 310 mp->mnt_vfc = vfsp; 311 mp->mnt_op = vfsp->vfc_vfsops; 312 vfsp->vfc_refcount++; 313 mp->mnt_stat.f_type = vfsp->vfc_typenum; 314 mp->mnt_flag |= MNT_RDONLY; 315 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 316 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 317 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 318 *mpp = mp; 319 return (0); 320 } 321 322 /* 323 * Basic mount structure initialization 324 */ 325 void 326 mount_init(struct mount *mp) 327 { 328 lockinit(&mp->mnt_lock, "vfslock", 0, 0); 329 lwkt_token_init(&mp->mnt_token, "permnt"); 330 331 TAILQ_INIT(&mp->mnt_nvnodelist); 332 TAILQ_INIT(&mp->mnt_reservedvnlist); 333 TAILQ_INIT(&mp->mnt_jlist); 334 mp->mnt_nvnodelistsize = 0; 335 mp->mnt_flag = 0; 336 mp->mnt_iosize_max = MAXPHYS; 337 } 338 339 /* 340 * Lookup a mount point by filesystem identifier. 341 */ 342 struct mount * 343 vfs_getvfs(fsid_t *fsid) 344 { 345 struct mount *mp; 346 347 lwkt_gettoken(&mountlist_token); 348 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 349 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 350 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 351 break; 352 } 353 } 354 lwkt_reltoken(&mountlist_token); 355 return (mp); 356 } 357 358 /* 359 * Get a new unique fsid. Try to make its val[0] unique, since this value 360 * will be used to create fake device numbers for stat(). Also try (but 361 * not so hard) make its val[0] unique mod 2^16, since some emulators only 362 * support 16-bit device numbers. We end up with unique val[0]'s for the 363 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 364 * 365 * Keep in mind that several mounts may be running in parallel. Starting 366 * the search one past where the previous search terminated is both a 367 * micro-optimization and a defense against returning the same fsid to 368 * different mounts. 369 */ 370 void 371 vfs_getnewfsid(struct mount *mp) 372 { 373 static u_int16_t mntid_base; 374 fsid_t tfsid; 375 int mtype; 376 377 lwkt_gettoken(&mntid_token); 378 mtype = mp->mnt_vfc->vfc_typenum; 379 tfsid.val[1] = mtype; 380 mtype = (mtype & 0xFF) << 24; 381 for (;;) { 382 tfsid.val[0] = makeudev(255, 383 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 384 mntid_base++; 385 if (vfs_getvfs(&tfsid) == NULL) 386 break; 387 } 388 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 389 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 390 lwkt_reltoken(&mntid_token); 391 } 392 393 /* 394 * Set the FSID for a new mount point to the template. Adjust 395 * the FSID to avoid collisions. 396 */ 397 int 398 vfs_setfsid(struct mount *mp, fsid_t *template) 399 { 400 int didmunge = 0; 401 402 bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid)); 403 for (;;) { 404 if (vfs_getvfs(template) == NULL) 405 break; 406 didmunge = 1; 407 ++template->val[1]; 408 } 409 mp->mnt_stat.f_fsid = *template; 410 return(didmunge); 411 } 412 413 /* 414 * This routine is called when we have too many vnodes. It attempts 415 * to free <count> vnodes and will potentially free vnodes that still 416 * have VM backing store (VM backing store is typically the cause 417 * of a vnode blowout so we want to do this). Therefore, this operation 418 * is not considered cheap. 419 * 420 * A number of conditions may prevent a vnode from being reclaimed. 421 * the buffer cache may have references on the vnode, a directory 422 * vnode may still have references due to the namei cache representing 423 * underlying files, or the vnode may be in active use. It is not 424 * desireable to reuse such vnodes. These conditions may cause the 425 * number of vnodes to reach some minimum value regardless of what 426 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 427 */ 428 429 /* 430 * This is a quick non-blocking check to determine if the vnode is a good 431 * candidate for being (eventually) vgone()'d. Returns 0 if the vnode is 432 * not a good candidate, 1 if it is. 433 */ 434 static __inline int 435 vmightfree(struct vnode *vp, int page_count, int pass) 436 { 437 if (vp->v_flag & VRECLAIMED) 438 return (0); 439 #if 0 440 if ((vp->v_flag & VFREE) && TAILQ_EMPTY(&vp->v_namecache)) 441 return (0); 442 #endif 443 if (sysref_isactive(&vp->v_sysref)) 444 return (0); 445 if (vp->v_object && vp->v_object->resident_page_count >= page_count) 446 return (0); 447 448 /* 449 * XXX horrible hack. Up to four passes will be taken. Each pass 450 * makes a larger set of vnodes eligible. For now what this really 451 * means is that we try to recycle files opened only once before 452 * recycling files opened multiple times. 453 */ 454 switch(vp->v_flag & (VAGE0 | VAGE1)) { 455 case 0: 456 if (pass < 3) 457 return(0); 458 break; 459 case VAGE0: 460 if (pass < 2) 461 return(0); 462 break; 463 case VAGE1: 464 if (pass < 1) 465 return(0); 466 break; 467 case VAGE0 | VAGE1: 468 break; 469 } 470 return (1); 471 } 472 473 /* 474 * The vnode was found to be possibly vgone()able and the caller has locked it 475 * (thus the usecount should be 1 now). Determine if the vnode is actually 476 * vgone()able, doing some cleanups in the process. Returns 1 if the vnode 477 * can be vgone()'d, 0 otherwise. 478 * 479 * Note that v_auxrefs may be non-zero because (A) this vnode is not a leaf 480 * in the namecache topology and (B) this vnode has buffer cache bufs. 481 * We cannot remove vnodes with non-leaf namecache associations. We do a 482 * tentitive leaf check prior to attempting to flush out any buffers but the 483 * 'real' test when all is said in done is that v_auxrefs must become 0 for 484 * the vnode to be freeable. 485 * 486 * We could theoretically just unconditionally flush when v_auxrefs != 0, 487 * but flushing data associated with non-leaf nodes (which are always 488 * directories), just throws it away for no benefit. It is the buffer 489 * cache's responsibility to choose buffers to recycle from the cached 490 * data point of view. 491 */ 492 static int 493 visleaf(struct vnode *vp) 494 { 495 struct namecache *ncp; 496 497 spin_lock(&vp->v_spin); 498 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 499 if (!TAILQ_EMPTY(&ncp->nc_list)) { 500 spin_unlock(&vp->v_spin); 501 return(0); 502 } 503 } 504 spin_unlock(&vp->v_spin); 505 return(1); 506 } 507 508 /* 509 * Try to clean up the vnode to the point where it can be vgone()'d, returning 510 * 0 if it cannot be vgone()'d (or already has been), 1 if it can. Unlike 511 * vmightfree() this routine may flush the vnode and block. Vnodes marked 512 * VFREE are still candidates for vgone()ing because they may hold namecache 513 * resources and could be blocking the namecache directory hierarchy (and 514 * related vnodes) from being freed. 515 */ 516 static int 517 vtrytomakegoneable(struct vnode *vp, int page_count) 518 { 519 if (vp->v_flag & VRECLAIMED) 520 return (0); 521 if (vp->v_sysref.refcnt > 1) 522 return (0); 523 if (vp->v_object && vp->v_object->resident_page_count >= page_count) 524 return (0); 525 if (vp->v_auxrefs && visleaf(vp)) { 526 vinvalbuf(vp, V_SAVE, 0, 0); 527 #if 0 /* DEBUG */ 528 kprintf((vp->v_auxrefs ? "vrecycle: vp %p failed: %s\n" : 529 "vrecycle: vp %p succeeded: %s\n"), vp, 530 (TAILQ_FIRST(&vp->v_namecache) ? 531 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?")); 532 #endif 533 } 534 535 /* 536 * This sequence may seem a little strange, but we need to optimize 537 * the critical path a bit. We can't recycle vnodes with other 538 * references and because we are trying to recycle an otherwise 539 * perfectly fine vnode we have to invalidate the namecache in a 540 * way that avoids possible deadlocks (since the vnode lock is being 541 * held here). Finally, we have to check for other references one 542 * last time in case something snuck in during the inval. 543 */ 544 if (vp->v_sysref.refcnt > 1 || vp->v_auxrefs != 0) 545 return (0); 546 if (cache_inval_vp_nonblock(vp)) 547 return (0); 548 return (vp->v_sysref.refcnt <= 1 && vp->v_auxrefs == 0); 549 } 550 551 /* 552 * Reclaim up to 1/10 of the vnodes associated with a mount point. Try 553 * to avoid vnodes which have lots of resident pages (we are trying to free 554 * vnodes, not memory). 555 * 556 * This routine is a callback from the mountlist scan. The mount point 557 * in question will be busied. 558 * 559 * NOTE: The 1/10 reclamation also ensures that the inactive data set 560 * (the vnodes being recycled by the one-time use) does not degenerate 561 * into too-small a set. This is important because once a vnode is 562 * marked as not being one-time-use (VAGE0/VAGE1 both 0) that vnode 563 * will not be destroyed EXCEPT by this mechanism. VM pages can still 564 * be cleaned/freed by the pageout daemon. 565 */ 566 static int 567 vlrureclaim(struct mount *mp, void *data) 568 { 569 struct vnlru_info *info = data; 570 struct vnode *vp; 571 int done; 572 int trigger; 573 int usevnodes; 574 int count; 575 int trigger_mult = vnlru_nowhere; 576 577 /* 578 * Calculate the trigger point for the resident pages check. The 579 * minimum trigger value is approximately the number of pages in 580 * the system divded by the number of vnodes. However, due to 581 * various other system memory overheads unrelated to data caching 582 * it is a good idea to double the trigger (at least). 583 * 584 * trigger_mult starts at 0. If the recycler is having problems 585 * finding enough freeable vnodes it will increase trigger_mult. 586 * This should not happen in normal operation, even on machines with 587 * low amounts of memory, but extraordinary memory use by the system 588 * verses the amount of cached data can trigger it. 589 */ 590 usevnodes = desiredvnodes; 591 if (usevnodes <= 0) 592 usevnodes = 1; 593 trigger = vmstats.v_page_count * (trigger_mult + 2) / usevnodes; 594 595 done = 0; 596 lwkt_gettoken(&mntvnode_token); 597 count = mp->mnt_nvnodelistsize / 10 + 1; 598 599 while (count && mp->mnt_syncer) { 600 /* 601 * Next vnode. Use the special syncer vnode to placemark 602 * the LRU. This way the LRU code does not interfere with 603 * vmntvnodescan(). 604 */ 605 vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes); 606 TAILQ_REMOVE(&mp->mnt_nvnodelist, mp->mnt_syncer, v_nmntvnodes); 607 if (vp) { 608 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, 609 mp->mnt_syncer, v_nmntvnodes); 610 } else { 611 TAILQ_INSERT_HEAD(&mp->mnt_nvnodelist, mp->mnt_syncer, 612 v_nmntvnodes); 613 vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes); 614 if (vp == NULL) 615 break; 616 } 617 618 /* 619 * __VNODESCAN__ 620 * 621 * The VP will stick around while we hold mntvnode_token, 622 * at least until we block, so we can safely do an initial 623 * check, and then must check again after we lock the vnode. 624 */ 625 if (vp->v_type == VNON || /* syncer or indeterminant */ 626 !vmightfree(vp, trigger, info->pass) /* critical path opt */ 627 ) { 628 --count; 629 continue; 630 } 631 632 /* 633 * VX get the candidate vnode. If the VX get fails the 634 * vnode might still be on the mountlist. Our loop depends 635 * on us at least cycling the vnode to the end of the 636 * mountlist. 637 */ 638 if (vx_get_nonblock(vp) != 0) { 639 --count; 640 continue; 641 } 642 643 /* 644 * Since we blocked locking the vp, make sure it is still 645 * a candidate for reclamation. That is, it has not already 646 * been reclaimed and only has our VX reference associated 647 * with it. 648 */ 649 if (vp->v_type == VNON || /* syncer or indeterminant */ 650 (vp->v_flag & VRECLAIMED) || 651 vp->v_mount != mp || 652 !vtrytomakegoneable(vp, trigger) /* critical path opt */ 653 ) { 654 --count; 655 vx_put(vp); 656 continue; 657 } 658 659 /* 660 * All right, we are good, move the vp to the end of the 661 * mountlist and clean it out. The vget will have returned 662 * an error if the vnode was destroyed (VRECLAIMED set), so we 663 * do not have to check again. The vput() will move the 664 * vnode to the free list if the vgone() was successful. 665 */ 666 KKASSERT(vp->v_mount == mp); 667 vgone_vxlocked(vp); 668 vx_put(vp); 669 ++done; 670 --count; 671 } 672 lwkt_reltoken(&mntvnode_token); 673 return (done); 674 } 675 676 /* 677 * Attempt to recycle vnodes in a context that is always safe to block. 678 * Calling vlrurecycle() from the bowels of file system code has some 679 * interesting deadlock problems. 680 */ 681 static struct thread *vnlruthread; 682 static int vnlruproc_sig; 683 684 void 685 vnlru_proc_wait(void) 686 { 687 tsleep_interlock(&vnlruproc_sig, 0); 688 if (vnlruproc_sig == 0) { 689 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 690 wakeup(vnlruthread); 691 } 692 tsleep(&vnlruproc_sig, PINTERLOCKED, "vlruwk", hz); 693 } 694 695 static void 696 vnlru_proc(void) 697 { 698 struct thread *td = curthread; 699 struct vnlru_info info; 700 int done; 701 702 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 703 SHUTDOWN_PRI_FIRST); 704 705 for (;;) { 706 kproc_suspend_loop(); 707 708 /* 709 * Try to free some vnodes if we have too many 710 */ 711 if (numvnodes > desiredvnodes && 712 freevnodes > desiredvnodes * 2 / 10) { 713 int count = numvnodes - desiredvnodes; 714 715 if (count > freevnodes / 100) 716 count = freevnodes / 100; 717 if (count < 5) 718 count = 5; 719 freesomevnodes(count); 720 } 721 722 /* 723 * Nothing to do if most of our vnodes are already on 724 * the free list. 725 */ 726 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) { 727 vnlruproc_sig = 0; 728 wakeup(&vnlruproc_sig); 729 tsleep(vnlruthread, 0, "vlruwt", hz); 730 continue; 731 } 732 cache_hysteresis(); 733 734 /* 735 * The pass iterates through the four combinations of 736 * VAGE0/VAGE1. We want to get rid of aged small files 737 * first. 738 */ 739 info.pass = 0; 740 done = 0; 741 while (done == 0 && info.pass < 4) { 742 done = mountlist_scan(vlrureclaim, &info, 743 MNTSCAN_FORWARD); 744 ++info.pass; 745 } 746 747 /* 748 * The vlrureclaim() call only processes 1/10 of the vnodes 749 * on each mount. If we couldn't find any repeat the loop 750 * at least enough times to cover all available vnodes before 751 * we start sleeping. Complain if the failure extends past 752 * 30 second, every 30 seconds. 753 */ 754 if (done == 0) { 755 ++vnlru_nowhere; 756 if (vnlru_nowhere % 10 == 0) 757 tsleep(vnlruthread, 0, "vlrup", hz * 3); 758 if (vnlru_nowhere % 100 == 0) 759 kprintf("vnlru_proc: vnode recycler stopped working!\n"); 760 if (vnlru_nowhere == 1000) 761 vnlru_nowhere = 900; 762 } else { 763 vnlru_nowhere = 0; 764 } 765 } 766 } 767 768 /* 769 * MOUNTLIST FUNCTIONS 770 */ 771 772 /* 773 * mountlist_insert (MP SAFE) 774 * 775 * Add a new mount point to the mount list. 776 */ 777 void 778 mountlist_insert(struct mount *mp, int how) 779 { 780 lwkt_gettoken(&mountlist_token); 781 if (how == MNTINS_FIRST) 782 TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list); 783 else 784 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); 785 lwkt_reltoken(&mountlist_token); 786 } 787 788 /* 789 * mountlist_interlock (MP SAFE) 790 * 791 * Execute the specified interlock function with the mountlist token 792 * held. The function will be called in a serialized fashion verses 793 * other functions called through this mechanism. 794 */ 795 int 796 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp) 797 { 798 int error; 799 800 lwkt_gettoken(&mountlist_token); 801 error = callback(mp); 802 lwkt_reltoken(&mountlist_token); 803 return (error); 804 } 805 806 /* 807 * mountlist_boot_getfirst (DURING BOOT ONLY) 808 * 809 * This function returns the first mount on the mountlist, which is 810 * expected to be the root mount. Since no interlocks are obtained 811 * this function is only safe to use during booting. 812 */ 813 814 struct mount * 815 mountlist_boot_getfirst(void) 816 { 817 return(TAILQ_FIRST(&mountlist)); 818 } 819 820 /* 821 * mountlist_remove (MP SAFE) 822 * 823 * Remove a node from the mountlist. If this node is the next scan node 824 * for any active mountlist scans, the active mountlist scan will be 825 * adjusted to skip the node, thus allowing removals during mountlist 826 * scans. 827 */ 828 void 829 mountlist_remove(struct mount *mp) 830 { 831 struct mountscan_info *msi; 832 833 lwkt_gettoken(&mountlist_token); 834 TAILQ_FOREACH(msi, &mountscan_list, msi_entry) { 835 if (msi->msi_node == mp) { 836 if (msi->msi_how & MNTSCAN_FORWARD) 837 msi->msi_node = TAILQ_NEXT(mp, mnt_list); 838 else 839 msi->msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 840 } 841 } 842 TAILQ_REMOVE(&mountlist, mp, mnt_list); 843 lwkt_reltoken(&mountlist_token); 844 } 845 846 /* 847 * mountlist_exists (MP SAFE) 848 * 849 * Checks if a node exists in the mountlist. 850 * This function is mainly used by VFS quota code to check if a 851 * cached nullfs struct mount pointer is still valid at use time 852 * 853 * FIXME: there is no warranty the mp passed to that function 854 * will be the same one used by VFS_ACCOUNT() later 855 */ 856 int 857 mountlist_exists(struct mount *mp) 858 { 859 int node_exists = 0; 860 struct mount* lmp; 861 862 lwkt_gettoken(&mountlist_token); 863 TAILQ_FOREACH(lmp, &mountlist, mnt_list) { 864 if (lmp == mp) { 865 node_exists = 1; 866 break; 867 } 868 } 869 lwkt_reltoken(&mountlist_token); 870 return(node_exists); 871 } 872 873 /* 874 * mountlist_scan (MP SAFE) 875 * 876 * Safely scan the mount points on the mount list. Unless otherwise 877 * specified each mount point will be busied prior to the callback and 878 * unbusied afterwords. The callback may safely remove any mount point 879 * without interfering with the scan. If the current callback 880 * mount is removed the scanner will not attempt to unbusy it. 881 * 882 * If a mount node cannot be busied it is silently skipped. 883 * 884 * The callback return value is aggregated and a total is returned. A return 885 * value of < 0 is not aggregated and will terminate the scan. 886 * 887 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction 888 * MNTSCAN_REVERSE - the mountlist is scanned in reverse 889 * MNTSCAN_NOBUSY - the scanner will make the callback without busying 890 * the mount node. 891 */ 892 int 893 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how) 894 { 895 struct mountscan_info info; 896 struct mount *mp; 897 int count; 898 int res; 899 900 lwkt_gettoken(&mountlist_token); 901 902 info.msi_how = how; 903 info.msi_node = NULL; /* paranoia */ 904 TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry); 905 906 res = 0; 907 908 if (how & MNTSCAN_FORWARD) { 909 info.msi_node = TAILQ_FIRST(&mountlist); 910 while ((mp = info.msi_node) != NULL) { 911 if (how & MNTSCAN_NOBUSY) { 912 count = callback(mp, data); 913 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 914 count = callback(mp, data); 915 if (mp == info.msi_node) 916 vfs_unbusy(mp); 917 } else { 918 count = 0; 919 } 920 if (count < 0) 921 break; 922 res += count; 923 if (mp == info.msi_node) 924 info.msi_node = TAILQ_NEXT(mp, mnt_list); 925 } 926 } else if (how & MNTSCAN_REVERSE) { 927 info.msi_node = TAILQ_LAST(&mountlist, mntlist); 928 while ((mp = info.msi_node) != NULL) { 929 if (how & MNTSCAN_NOBUSY) { 930 count = callback(mp, data); 931 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 932 count = callback(mp, data); 933 if (mp == info.msi_node) 934 vfs_unbusy(mp); 935 } else { 936 count = 0; 937 } 938 if (count < 0) 939 break; 940 res += count; 941 if (mp == info.msi_node) 942 info.msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 943 } 944 } 945 TAILQ_REMOVE(&mountscan_list, &info, msi_entry); 946 lwkt_reltoken(&mountlist_token); 947 return(res); 948 } 949 950 /* 951 * MOUNT RELATED VNODE FUNCTIONS 952 */ 953 954 static struct kproc_desc vnlru_kp = { 955 "vnlru", 956 vnlru_proc, 957 &vnlruthread 958 }; 959 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp) 960 961 /* 962 * Move a vnode from one mount queue to another. 963 * 964 * MPSAFE 965 */ 966 void 967 insmntque(struct vnode *vp, struct mount *mp) 968 { 969 lwkt_gettoken(&mntvnode_token); 970 /* 971 * Delete from old mount point vnode list, if on one. 972 */ 973 if (vp->v_mount != NULL) { 974 KASSERT(vp->v_mount->mnt_nvnodelistsize > 0, 975 ("bad mount point vnode list size")); 976 vremovevnodemnt(vp); 977 vp->v_mount->mnt_nvnodelistsize--; 978 } 979 /* 980 * Insert into list of vnodes for the new mount point, if available. 981 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer. 982 */ 983 if ((vp->v_mount = mp) == NULL) { 984 lwkt_reltoken(&mntvnode_token); 985 return; 986 } 987 if (mp->mnt_syncer) { 988 TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes); 989 } else { 990 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 991 } 992 mp->mnt_nvnodelistsize++; 993 lwkt_reltoken(&mntvnode_token); 994 } 995 996 997 /* 998 * Scan the vnodes under a mount point and issue appropriate callbacks. 999 * 1000 * The fastfunc() callback is called with just the mountlist token held 1001 * (no vnode lock). It may not block and the vnode may be undergoing 1002 * modifications while the caller is processing it. The vnode will 1003 * not be entirely destroyed, however, due to the fact that the mountlist 1004 * token is held. A return value < 0 skips to the next vnode without calling 1005 * the slowfunc(), a return value > 0 terminates the loop. 1006 * 1007 * The slowfunc() callback is called after the vnode has been successfully 1008 * locked based on passed flags. The vnode is skipped if it gets rearranged 1009 * or destroyed while blocking on the lock. A non-zero return value from 1010 * the slow function terminates the loop. The slow function is allowed to 1011 * arbitrarily block. The scanning code guarentees consistency of operation 1012 * even if the slow function deletes or moves the node, or blocks and some 1013 * other thread deletes or moves the node. 1014 * 1015 * NOTE: We hold vmobj_token to prevent a VM object from being destroyed 1016 * out from under the fastfunc()'s vnode test. It will not prevent 1017 * v_object from getting NULL'd out but it will ensure that the 1018 * pointer (if we race) will remain stable. 1019 */ 1020 int 1021 vmntvnodescan( 1022 struct mount *mp, 1023 int flags, 1024 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data), 1025 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data), 1026 void *data 1027 ) { 1028 struct vmntvnodescan_info info; 1029 struct vnode *vp; 1030 int r = 0; 1031 int maxcount = mp->mnt_nvnodelistsize * 2; 1032 int stopcount = 0; 1033 int count = 0; 1034 1035 lwkt_gettoken(&mntvnode_token); 1036 lwkt_gettoken(&vmobj_token); 1037 1038 /* 1039 * If asked to do one pass stop after iterating available vnodes. 1040 * Under heavy loads new vnodes can be added while we are scanning, 1041 * so this isn't perfect. Create a slop factor of 2x. 1042 */ 1043 if (flags & VMSC_ONEPASS) 1044 stopcount = mp->mnt_nvnodelistsize; 1045 1046 info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 1047 TAILQ_INSERT_TAIL(&mntvnodescan_list, &info, entry); 1048 while ((vp = info.vp) != NULL) { 1049 if (--maxcount == 0) { 1050 kprintf("Warning: excessive fssync iteration\n"); 1051 maxcount = mp->mnt_nvnodelistsize * 2; 1052 } 1053 1054 /* 1055 * Skip if visible but not ready, or special (e.g. 1056 * mp->mnt_syncer) 1057 */ 1058 if (vp->v_type == VNON) 1059 goto next; 1060 KKASSERT(vp->v_mount == mp); 1061 1062 /* 1063 * Quick test. A negative return continues the loop without 1064 * calling the slow test. 0 continues onto the slow test. 1065 * A positive number aborts the loop. 1066 */ 1067 if (fastfunc) { 1068 if ((r = fastfunc(mp, vp, data)) < 0) { 1069 r = 0; 1070 goto next; 1071 } 1072 if (r) 1073 break; 1074 } 1075 1076 /* 1077 * Get a vxlock on the vnode, retry if it has moved or isn't 1078 * in the mountlist where we expect it. 1079 */ 1080 if (slowfunc) { 1081 int error; 1082 1083 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 1084 case VMSC_GETVP: 1085 error = vget(vp, LK_EXCLUSIVE); 1086 break; 1087 case VMSC_GETVP|VMSC_NOWAIT: 1088 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT); 1089 break; 1090 case VMSC_GETVX: 1091 vx_get(vp); 1092 error = 0; 1093 break; 1094 default: 1095 error = 0; 1096 break; 1097 } 1098 if (error) 1099 goto next; 1100 /* 1101 * Do not call the slow function if the vnode is 1102 * invalid or if it was ripped out from under us 1103 * while we (potentially) blocked. 1104 */ 1105 if (info.vp == vp && vp->v_type != VNON) 1106 r = slowfunc(mp, vp, data); 1107 1108 /* 1109 * Cleanup 1110 */ 1111 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 1112 case VMSC_GETVP: 1113 case VMSC_GETVP|VMSC_NOWAIT: 1114 vput(vp); 1115 break; 1116 case VMSC_GETVX: 1117 vx_put(vp); 1118 break; 1119 default: 1120 break; 1121 } 1122 if (r != 0) 1123 break; 1124 } 1125 1126 next: 1127 /* 1128 * Yield after some processing. Depending on the number 1129 * of vnodes, we might wind up running for a long time. 1130 * Because threads are not preemptable, time critical 1131 * userland processes might starve. Give them a chance 1132 * now and then. 1133 */ 1134 if (++count == 10000) { 1135 /* We really want to yield a bit, so we simply sleep a tick */ 1136 tsleep(mp, 0, "vnodescn", 1); 1137 count = 0; 1138 } 1139 1140 /* 1141 * If doing one pass this decrements to zero. If it starts 1142 * at zero it is effectively unlimited for the purposes of 1143 * this loop. 1144 */ 1145 if (--stopcount == 0) 1146 break; 1147 1148 /* 1149 * Iterate. If the vnode was ripped out from under us 1150 * info.vp will already point to the next vnode, otherwise 1151 * we have to obtain the next valid vnode ourselves. 1152 */ 1153 if (info.vp == vp) 1154 info.vp = TAILQ_NEXT(vp, v_nmntvnodes); 1155 } 1156 TAILQ_REMOVE(&mntvnodescan_list, &info, entry); 1157 lwkt_reltoken(&vmobj_token); 1158 lwkt_reltoken(&mntvnode_token); 1159 return(r); 1160 } 1161 1162 /* 1163 * Remove any vnodes in the vnode table belonging to mount point mp. 1164 * 1165 * If FORCECLOSE is not specified, there should not be any active ones, 1166 * return error if any are found (nb: this is a user error, not a 1167 * system error). If FORCECLOSE is specified, detach any active vnodes 1168 * that are found. 1169 * 1170 * If WRITECLOSE is set, only flush out regular file vnodes open for 1171 * writing. 1172 * 1173 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 1174 * 1175 * `rootrefs' specifies the base reference count for the root vnode 1176 * of this filesystem. The root vnode is considered busy if its 1177 * v_sysref.refcnt exceeds this value. On a successful return, vflush() 1178 * will call vrele() on the root vnode exactly rootrefs times. 1179 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 1180 * be zero. 1181 */ 1182 #ifdef DIAGNOSTIC 1183 static int busyprt = 0; /* print out busy vnodes */ 1184 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 1185 #endif 1186 1187 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data); 1188 1189 struct vflush_info { 1190 int flags; 1191 int busy; 1192 thread_t td; 1193 }; 1194 1195 int 1196 vflush(struct mount *mp, int rootrefs, int flags) 1197 { 1198 struct thread *td = curthread; /* XXX */ 1199 struct vnode *rootvp = NULL; 1200 int error; 1201 struct vflush_info vflush_info; 1202 1203 if (rootrefs > 0) { 1204 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 1205 ("vflush: bad args")); 1206 /* 1207 * Get the filesystem root vnode. We can vput() it 1208 * immediately, since with rootrefs > 0, it won't go away. 1209 */ 1210 if ((error = VFS_ROOT(mp, &rootvp)) != 0) { 1211 if ((flags & FORCECLOSE) == 0) 1212 return (error); 1213 rootrefs = 0; 1214 /* continue anyway */ 1215 } 1216 if (rootrefs) 1217 vput(rootvp); 1218 } 1219 1220 vflush_info.busy = 0; 1221 vflush_info.flags = flags; 1222 vflush_info.td = td; 1223 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info); 1224 1225 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 1226 /* 1227 * If just the root vnode is busy, and if its refcount 1228 * is equal to `rootrefs', then go ahead and kill it. 1229 */ 1230 KASSERT(vflush_info.busy > 0, ("vflush: not busy")); 1231 KASSERT(rootvp->v_sysref.refcnt >= rootrefs, ("vflush: rootrefs")); 1232 if (vflush_info.busy == 1 && rootvp->v_sysref.refcnt == rootrefs) { 1233 vx_lock(rootvp); 1234 vgone_vxlocked(rootvp); 1235 vx_unlock(rootvp); 1236 vflush_info.busy = 0; 1237 } 1238 } 1239 if (vflush_info.busy) 1240 return (EBUSY); 1241 for (; rootrefs > 0; rootrefs--) 1242 vrele(rootvp); 1243 return (0); 1244 } 1245 1246 /* 1247 * The scan callback is made with an VX locked vnode. 1248 */ 1249 static int 1250 vflush_scan(struct mount *mp, struct vnode *vp, void *data) 1251 { 1252 struct vflush_info *info = data; 1253 struct vattr vattr; 1254 1255 /* 1256 * Skip over a vnodes marked VSYSTEM. 1257 */ 1258 if ((info->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1259 return(0); 1260 } 1261 1262 /* 1263 * If WRITECLOSE is set, flush out unlinked but still open 1264 * files (even if open only for reading) and regular file 1265 * vnodes open for writing. 1266 */ 1267 if ((info->flags & WRITECLOSE) && 1268 (vp->v_type == VNON || 1269 (VOP_GETATTR(vp, &vattr) == 0 && 1270 vattr.va_nlink > 0)) && 1271 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1272 return(0); 1273 } 1274 1275 /* 1276 * If we are the only holder (refcnt of 1) or the vnode is in 1277 * termination (refcnt < 0), we can vgone the vnode. 1278 */ 1279 if (vp->v_sysref.refcnt <= 1) { 1280 vgone_vxlocked(vp); 1281 return(0); 1282 } 1283 1284 /* 1285 * If FORCECLOSE is set, forcibly destroy the vnode and then move 1286 * it to a dummymount structure so vop_*() functions don't deref 1287 * a NULL pointer. 1288 */ 1289 if (info->flags & FORCECLOSE) { 1290 vhold(vp); 1291 vgone_vxlocked(vp); 1292 if (vp->v_mount == NULL) 1293 insmntque(vp, &dummymount); 1294 vdrop(vp); 1295 return(0); 1296 } 1297 #ifdef DIAGNOSTIC 1298 if (busyprt) 1299 vprint("vflush: busy vnode", vp); 1300 #endif 1301 ++info->busy; 1302 return(0); 1303 } 1304 1305 void 1306 add_bio_ops(struct bio_ops *ops) 1307 { 1308 TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry); 1309 } 1310 1311 void 1312 rem_bio_ops(struct bio_ops *ops) 1313 { 1314 TAILQ_REMOVE(&bio_ops_list, ops, entry); 1315 } 1316 1317 /* 1318 * This calls the bio_ops io_sync function either for a mount point 1319 * or generally. 1320 * 1321 * WARNING: softdeps is weirdly coded and just isn't happy unless 1322 * io_sync is called with a NULL mount from the general syncing code. 1323 */ 1324 void 1325 bio_ops_sync(struct mount *mp) 1326 { 1327 struct bio_ops *ops; 1328 1329 if (mp) { 1330 if ((ops = mp->mnt_bioops) != NULL) 1331 ops->io_sync(mp); 1332 } else { 1333 TAILQ_FOREACH(ops, &bio_ops_list, entry) { 1334 ops->io_sync(NULL); 1335 } 1336 } 1337 } 1338 1339 /* 1340 * Lookup a mount point by nch 1341 */ 1342 struct mount * 1343 mount_get_by_nc(struct namecache *ncp) 1344 { 1345 struct mount *mp = NULL; 1346 1347 lwkt_gettoken(&mountlist_token); 1348 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 1349 if (ncp == mp->mnt_ncmountpt.ncp) 1350 break; 1351 } 1352 lwkt_reltoken(&mountlist_token); 1353 return (mp); 1354 } 1355 1356