1 /* 2 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993 35 * The Regents of the University of California. All rights reserved. 36 * (c) UNIX System Laboratories, Inc. 37 * All or some portions of this file are derived from material licensed 38 * to the University of California by American Telephone and Telegraph 39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 40 * the permission of UNIX System Laboratories, Inc. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67 /* 68 * External virtual filesystem routines 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/kernel.h> 74 #include <sys/malloc.h> 75 #include <sys/mount.h> 76 #include <sys/proc.h> 77 #include <sys/vnode.h> 78 #include <sys/buf.h> 79 #include <sys/eventhandler.h> 80 #include <sys/kthread.h> 81 #include <sys/sysctl.h> 82 83 #include <machine/limits.h> 84 85 #include <sys/buf2.h> 86 #include <sys/thread2.h> 87 #include <sys/sysref2.h> 88 89 #include <vm/vm.h> 90 #include <vm/vm_object.h> 91 92 struct mountscan_info { 93 TAILQ_ENTRY(mountscan_info) msi_entry; 94 int msi_how; 95 struct mount *msi_node; 96 }; 97 98 struct vmntvnodescan_info { 99 TAILQ_ENTRY(vmntvnodescan_info) entry; 100 struct vnode *vp; 101 }; 102 103 struct vnlru_info { 104 int pass; 105 }; 106 107 static int vnlru_nowhere = 0; 108 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD, 109 &vnlru_nowhere, 0, 110 "Number of times the vnlru process ran without success"); 111 112 113 static struct lwkt_token mntid_token; 114 static struct mount dummymount; 115 116 /* note: mountlist exported to pstat */ 117 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); 118 static TAILQ_HEAD(,mountscan_info) mountscan_list; 119 static struct lwkt_token mountlist_token; 120 121 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list); 122 123 /* 124 * Called from vfsinit() 125 */ 126 void 127 vfs_mount_init(void) 128 { 129 lwkt_token_init(&mountlist_token, "mntlist"); 130 lwkt_token_init(&mntid_token, "mntid"); 131 TAILQ_INIT(&mountscan_list); 132 mount_init(&dummymount); 133 dummymount.mnt_flag |= MNT_RDONLY; 134 dummymount.mnt_kern_flag |= MNTK_ALL_MPSAFE; 135 } 136 137 /* 138 * Support function called to remove a vnode from the mountlist and 139 * deal with side effects for scans in progress. 140 * 141 * Target mnt_token is held on call. 142 */ 143 static void 144 vremovevnodemnt(struct vnode *vp) 145 { 146 struct vmntvnodescan_info *info; 147 struct mount *mp = vp->v_mount; 148 149 TAILQ_FOREACH(info, &mp->mnt_vnodescan_list, entry) { 150 if (info->vp == vp) 151 info->vp = TAILQ_NEXT(vp, v_nmntvnodes); 152 } 153 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 154 } 155 156 /* 157 * Allocate a new vnode and associate it with a tag, mount point, and 158 * operations vector. 159 * 160 * A VX locked and refd vnode is returned. The caller should setup the 161 * remaining fields and vx_put() or, if he wishes to leave a vref, 162 * vx_unlock() the vnode. 163 */ 164 int 165 getnewvnode(enum vtagtype tag, struct mount *mp, 166 struct vnode **vpp, int lktimeout, int lkflags) 167 { 168 struct vnode *vp; 169 170 KKASSERT(mp != NULL); 171 172 vp = allocvnode(lktimeout, lkflags); 173 vp->v_tag = tag; 174 vp->v_data = NULL; 175 176 /* 177 * By default the vnode is assigned the mount point's normal 178 * operations vector. 179 */ 180 vp->v_ops = &mp->mnt_vn_use_ops; 181 182 /* 183 * Placing the vnode on the mount point's queue makes it visible. 184 * VNON prevents it from being messed with, however. 185 */ 186 insmntque(vp, mp); 187 188 /* 189 * A VX locked & refd vnode is returned. 190 */ 191 *vpp = vp; 192 return (0); 193 } 194 195 /* 196 * This function creates vnodes with special operations vectors. The 197 * mount point is optional. 198 * 199 * This routine is being phased out but is still used by vfs_conf to 200 * create vnodes for devices prior to the root mount (with mp == NULL). 201 */ 202 int 203 getspecialvnode(enum vtagtype tag, struct mount *mp, 204 struct vop_ops **ops, 205 struct vnode **vpp, int lktimeout, int lkflags) 206 { 207 struct vnode *vp; 208 209 vp = allocvnode(lktimeout, lkflags); 210 vp->v_tag = tag; 211 vp->v_data = NULL; 212 vp->v_ops = ops; 213 214 if (mp == NULL) 215 mp = &dummymount; 216 217 /* 218 * Placing the vnode on the mount point's queue makes it visible. 219 * VNON prevents it from being messed with, however. 220 */ 221 insmntque(vp, mp); 222 223 /* 224 * A VX locked & refd vnode is returned. 225 */ 226 *vpp = vp; 227 return (0); 228 } 229 230 /* 231 * Interlock against an unmount, return 0 on success, non-zero on failure. 232 * 233 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount 234 * is in-progress. 235 * 236 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits 237 * are used. A shared locked will be obtained and the filesystem will not 238 * be unmountable until the lock is released. 239 */ 240 int 241 vfs_busy(struct mount *mp, int flags) 242 { 243 int lkflags; 244 245 atomic_add_int(&mp->mnt_refs, 1); 246 lwkt_gettoken(&mp->mnt_token); 247 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 248 if (flags & LK_NOWAIT) { 249 lwkt_reltoken(&mp->mnt_token); 250 atomic_add_int(&mp->mnt_refs, -1); 251 return (ENOENT); 252 } 253 /* XXX not MP safe */ 254 mp->mnt_kern_flag |= MNTK_MWAIT; 255 /* 256 * Since all busy locks are shared except the exclusive 257 * lock granted when unmounting, the only place that a 258 * wakeup needs to be done is at the release of the 259 * exclusive lock at the end of dounmount. 260 */ 261 tsleep((caddr_t)mp, 0, "vfs_busy", 0); 262 lwkt_reltoken(&mp->mnt_token); 263 atomic_add_int(&mp->mnt_refs, -1); 264 return (ENOENT); 265 } 266 lkflags = LK_SHARED; 267 if (lockmgr(&mp->mnt_lock, lkflags)) 268 panic("vfs_busy: unexpected lock failure"); 269 lwkt_reltoken(&mp->mnt_token); 270 return (0); 271 } 272 273 /* 274 * Free a busy filesystem. 275 * 276 * Decrement refs before releasing the lock so e.g. a pending umount 277 * doesn't give us an unexpected busy error. 278 */ 279 void 280 vfs_unbusy(struct mount *mp) 281 { 282 atomic_add_int(&mp->mnt_refs, -1); 283 lockmgr(&mp->mnt_lock, LK_RELEASE); 284 } 285 286 /* 287 * Lookup a filesystem type, and if found allocate and initialize 288 * a mount structure for it. 289 * 290 * Devname is usually updated by mount(8) after booting. 291 */ 292 int 293 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 294 { 295 struct vfsconf *vfsp; 296 struct mount *mp; 297 298 if (fstypename == NULL) 299 return (ENODEV); 300 301 vfsp = vfsconf_find_by_name(fstypename); 302 if (vfsp == NULL) 303 return (ENODEV); 304 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO); 305 mount_init(mp); 306 lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0); 307 308 vfs_busy(mp, 0); 309 mp->mnt_vfc = vfsp; 310 mp->mnt_op = vfsp->vfc_vfsops; 311 vfsp->vfc_refcount++; 312 mp->mnt_stat.f_type = vfsp->vfc_typenum; 313 mp->mnt_flag |= MNT_RDONLY; 314 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 315 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 316 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 317 *mpp = mp; 318 return (0); 319 } 320 321 /* 322 * Basic mount structure initialization 323 */ 324 void 325 mount_init(struct mount *mp) 326 { 327 lockinit(&mp->mnt_lock, "vfslock", hz*5, 0); 328 lwkt_token_init(&mp->mnt_token, "permnt"); 329 330 TAILQ_INIT(&mp->mnt_vnodescan_list); 331 TAILQ_INIT(&mp->mnt_nvnodelist); 332 TAILQ_INIT(&mp->mnt_reservedvnlist); 333 TAILQ_INIT(&mp->mnt_jlist); 334 mp->mnt_nvnodelistsize = 0; 335 mp->mnt_flag = 0; 336 mp->mnt_iosize_max = MAXPHYS; 337 vn_syncer_thr_create(mp); 338 } 339 340 /* 341 * Lookup a mount point by filesystem identifier. 342 */ 343 struct mount * 344 vfs_getvfs(fsid_t *fsid) 345 { 346 struct mount *mp; 347 348 lwkt_gettoken(&mountlist_token); 349 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 350 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 351 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 352 break; 353 } 354 } 355 lwkt_reltoken(&mountlist_token); 356 return (mp); 357 } 358 359 /* 360 * Get a new unique fsid. Try to make its val[0] unique, since this value 361 * will be used to create fake device numbers for stat(). Also try (but 362 * not so hard) make its val[0] unique mod 2^16, since some emulators only 363 * support 16-bit device numbers. We end up with unique val[0]'s for the 364 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 365 * 366 * Keep in mind that several mounts may be running in parallel. Starting 367 * the search one past where the previous search terminated is both a 368 * micro-optimization and a defense against returning the same fsid to 369 * different mounts. 370 */ 371 void 372 vfs_getnewfsid(struct mount *mp) 373 { 374 static u_int16_t mntid_base; 375 fsid_t tfsid; 376 int mtype; 377 378 lwkt_gettoken(&mntid_token); 379 mtype = mp->mnt_vfc->vfc_typenum; 380 tfsid.val[1] = mtype; 381 mtype = (mtype & 0xFF) << 24; 382 for (;;) { 383 tfsid.val[0] = makeudev(255, 384 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 385 mntid_base++; 386 if (vfs_getvfs(&tfsid) == NULL) 387 break; 388 } 389 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 390 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 391 lwkt_reltoken(&mntid_token); 392 } 393 394 /* 395 * Set the FSID for a new mount point to the template. Adjust 396 * the FSID to avoid collisions. 397 */ 398 int 399 vfs_setfsid(struct mount *mp, fsid_t *template) 400 { 401 int didmunge = 0; 402 403 bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid)); 404 for (;;) { 405 if (vfs_getvfs(template) == NULL) 406 break; 407 didmunge = 1; 408 ++template->val[1]; 409 } 410 mp->mnt_stat.f_fsid = *template; 411 return(didmunge); 412 } 413 414 /* 415 * This routine is called when we have too many vnodes. It attempts 416 * to free <count> vnodes and will potentially free vnodes that still 417 * have VM backing store (VM backing store is typically the cause 418 * of a vnode blowout so we want to do this). Therefore, this operation 419 * is not considered cheap. 420 * 421 * A number of conditions may prevent a vnode from being reclaimed. 422 * the buffer cache may have references on the vnode, a directory 423 * vnode may still have references due to the namei cache representing 424 * underlying files, or the vnode may be in active use. It is not 425 * desireable to reuse such vnodes. These conditions may cause the 426 * number of vnodes to reach some minimum value regardless of what 427 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 428 */ 429 430 /* 431 * This is a quick non-blocking check to determine if the vnode is a good 432 * candidate for being (eventually) vgone()'d. Returns 0 if the vnode is 433 * not a good candidate, 1 if it is. 434 */ 435 static __inline int 436 vmightfree(struct vnode *vp, int page_count, int pass) 437 { 438 if (vp->v_flag & VRECLAIMED) 439 return (0); 440 #if 0 441 if ((vp->v_flag & VFREE) && TAILQ_EMPTY(&vp->v_namecache)) 442 return (0); 443 #endif 444 if (sysref_isactive(&vp->v_sysref)) 445 return (0); 446 if (vp->v_object && vp->v_object->resident_page_count >= page_count) 447 return (0); 448 449 /* 450 * XXX horrible hack. Up to four passes will be taken. Each pass 451 * makes a larger set of vnodes eligible. For now what this really 452 * means is that we try to recycle files opened only once before 453 * recycling files opened multiple times. 454 */ 455 switch(vp->v_flag & (VAGE0 | VAGE1)) { 456 case 0: 457 if (pass < 3) 458 return(0); 459 break; 460 case VAGE0: 461 if (pass < 2) 462 return(0); 463 break; 464 case VAGE1: 465 if (pass < 1) 466 return(0); 467 break; 468 case VAGE0 | VAGE1: 469 break; 470 } 471 return (1); 472 } 473 474 /* 475 * The vnode was found to be possibly vgone()able and the caller has locked it 476 * (thus the usecount should be 1 now). Determine if the vnode is actually 477 * vgone()able, doing some cleanups in the process. Returns 1 if the vnode 478 * can be vgone()'d, 0 otherwise. 479 * 480 * Note that v_auxrefs may be non-zero because (A) this vnode is not a leaf 481 * in the namecache topology and (B) this vnode has buffer cache bufs. 482 * We cannot remove vnodes with non-leaf namecache associations. We do a 483 * tentitive leaf check prior to attempting to flush out any buffers but the 484 * 'real' test when all is said in done is that v_auxrefs must become 0 for 485 * the vnode to be freeable. 486 * 487 * We could theoretically just unconditionally flush when v_auxrefs != 0, 488 * but flushing data associated with non-leaf nodes (which are always 489 * directories), just throws it away for no benefit. It is the buffer 490 * cache's responsibility to choose buffers to recycle from the cached 491 * data point of view. 492 */ 493 static int 494 visleaf(struct vnode *vp) 495 { 496 struct namecache *ncp; 497 498 spin_lock(&vp->v_spin); 499 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 500 if (!TAILQ_EMPTY(&ncp->nc_list)) { 501 spin_unlock(&vp->v_spin); 502 return(0); 503 } 504 } 505 spin_unlock(&vp->v_spin); 506 return(1); 507 } 508 509 /* 510 * Try to clean up the vnode to the point where it can be vgone()'d, returning 511 * 0 if it cannot be vgone()'d (or already has been), 1 if it can. Unlike 512 * vmightfree() this routine may flush the vnode and block. Vnodes marked 513 * VFREE are still candidates for vgone()ing because they may hold namecache 514 * resources and could be blocking the namecache directory hierarchy (and 515 * related vnodes) from being freed. 516 */ 517 static int 518 vtrytomakegoneable(struct vnode *vp, int page_count) 519 { 520 if (vp->v_flag & VRECLAIMED) 521 return (0); 522 if (vp->v_sysref.refcnt > 1) 523 return (0); 524 if (vp->v_object && vp->v_object->resident_page_count >= page_count) 525 return (0); 526 if (vp->v_auxrefs && visleaf(vp)) { 527 vinvalbuf(vp, V_SAVE, 0, 0); 528 #if 0 /* DEBUG */ 529 kprintf((vp->v_auxrefs ? "vrecycle: vp %p failed: %s\n" : 530 "vrecycle: vp %p succeeded: %s\n"), vp, 531 (TAILQ_FIRST(&vp->v_namecache) ? 532 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?")); 533 #endif 534 } 535 536 /* 537 * This sequence may seem a little strange, but we need to optimize 538 * the critical path a bit. We can't recycle vnodes with other 539 * references and because we are trying to recycle an otherwise 540 * perfectly fine vnode we have to invalidate the namecache in a 541 * way that avoids possible deadlocks (since the vnode lock is being 542 * held here). Finally, we have to check for other references one 543 * last time in case something snuck in during the inval. 544 */ 545 if (vp->v_sysref.refcnt > 1 || vp->v_auxrefs != 0) 546 return (0); 547 if (cache_inval_vp_nonblock(vp)) 548 return (0); 549 return (vp->v_sysref.refcnt <= 1 && vp->v_auxrefs == 0); 550 } 551 552 /* 553 * Reclaim up to 1/10 of the vnodes associated with a mount point. Try 554 * to avoid vnodes which have lots of resident pages (we are trying to free 555 * vnodes, not memory). 556 * 557 * This routine is a callback from the mountlist scan. The mount point 558 * in question will be busied. 559 * 560 * NOTE: The 1/10 reclamation also ensures that the inactive data set 561 * (the vnodes being recycled by the one-time use) does not degenerate 562 * into too-small a set. This is important because once a vnode is 563 * marked as not being one-time-use (VAGE0/VAGE1 both 0) that vnode 564 * will not be destroyed EXCEPT by this mechanism. VM pages can still 565 * be cleaned/freed by the pageout daemon. 566 */ 567 static int 568 vlrureclaim(struct mount *mp, void *data) 569 { 570 struct vnlru_info *info = data; 571 struct vnode *vp; 572 int done; 573 int trigger; 574 int usevnodes; 575 int count; 576 int trigger_mult = vnlru_nowhere; 577 578 /* 579 * Calculate the trigger point for the resident pages check. The 580 * minimum trigger value is approximately the number of pages in 581 * the system divded by the number of vnodes. However, due to 582 * various other system memory overheads unrelated to data caching 583 * it is a good idea to double the trigger (at least). 584 * 585 * trigger_mult starts at 0. If the recycler is having problems 586 * finding enough freeable vnodes it will increase trigger_mult. 587 * This should not happen in normal operation, even on machines with 588 * low amounts of memory, but extraordinary memory use by the system 589 * verses the amount of cached data can trigger it. 590 * 591 * (long) -> deal with 64 bit machines, intermediate overflow 592 */ 593 usevnodes = desiredvnodes; 594 if (usevnodes <= 0) 595 usevnodes = 1; 596 trigger = (long)vmstats.v_page_count * (trigger_mult + 2) / usevnodes; 597 598 done = 0; 599 lwkt_gettoken(&mp->mnt_token); 600 count = mp->mnt_nvnodelistsize / 10 + 1; 601 602 while (count && mp->mnt_syncer) { 603 /* 604 * Next vnode. Use the special syncer vnode to placemark 605 * the LRU. This way the LRU code does not interfere with 606 * vmntvnodescan(). 607 */ 608 vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes); 609 TAILQ_REMOVE(&mp->mnt_nvnodelist, mp->mnt_syncer, v_nmntvnodes); 610 if (vp) { 611 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, 612 mp->mnt_syncer, v_nmntvnodes); 613 } else { 614 TAILQ_INSERT_HEAD(&mp->mnt_nvnodelist, mp->mnt_syncer, 615 v_nmntvnodes); 616 vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes); 617 if (vp == NULL) 618 break; 619 } 620 621 /* 622 * __VNODESCAN__ 623 * 624 * The VP will stick around while we hold mnt_token, 625 * at least until we block, so we can safely do an initial 626 * check, and then must check again after we lock the vnode. 627 */ 628 if (vp->v_type == VNON || /* syncer or indeterminant */ 629 !vmightfree(vp, trigger, info->pass) /* critical path opt */ 630 ) { 631 --count; 632 continue; 633 } 634 635 /* 636 * VX get the candidate vnode. If the VX get fails the 637 * vnode might still be on the mountlist. Our loop depends 638 * on us at least cycling the vnode to the end of the 639 * mountlist. 640 */ 641 if (vx_get_nonblock(vp) != 0) { 642 --count; 643 continue; 644 } 645 646 /* 647 * Since we blocked locking the vp, make sure it is still 648 * a candidate for reclamation. That is, it has not already 649 * been reclaimed and only has our VX reference associated 650 * with it. 651 */ 652 if (vp->v_type == VNON || /* syncer or indeterminant */ 653 (vp->v_flag & VRECLAIMED) || 654 vp->v_mount != mp || 655 !vtrytomakegoneable(vp, trigger) /* critical path opt */ 656 ) { 657 --count; 658 vx_put(vp); 659 continue; 660 } 661 662 /* 663 * All right, we are good, move the vp to the end of the 664 * mountlist and clean it out. The vget will have returned 665 * an error if the vnode was destroyed (VRECLAIMED set), so we 666 * do not have to check again. The vput() will move the 667 * vnode to the free list if the vgone() was successful. 668 */ 669 KKASSERT(vp->v_mount == mp); 670 vgone_vxlocked(vp); 671 vx_put(vp); 672 ++done; 673 --count; 674 } 675 lwkt_reltoken(&mp->mnt_token); 676 return (done); 677 } 678 679 /* 680 * Attempt to recycle vnodes in a context that is always safe to block. 681 * Calling vlrurecycle() from the bowels of file system code has some 682 * interesting deadlock problems. 683 */ 684 static struct thread *vnlruthread; 685 686 static void 687 vnlru_proc(void) 688 { 689 struct thread *td = curthread; 690 struct vnlru_info info; 691 int done; 692 693 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 694 SHUTDOWN_PRI_FIRST); 695 696 for (;;) { 697 kproc_suspend_loop(); 698 699 /* 700 * Do some opportunistic roving. 701 */ 702 if (numvnodes > 100000) 703 vnode_free_rover_scan(50); 704 else if (numvnodes > 10000) 705 vnode_free_rover_scan(20); 706 else 707 vnode_free_rover_scan(5); 708 709 /* 710 * Try to free some vnodes if we have too many 711 * 712 * (long) -> deal with 64 bit machines, intermediate overflow 713 */ 714 if (numvnodes > desiredvnodes && 715 freevnodes > desiredvnodes * 2 / 10) { 716 int count = numvnodes - desiredvnodes; 717 718 if (count > freevnodes / 100) 719 count = freevnodes / 100; 720 if (count < 5) 721 count = 5; 722 freesomevnodes(count); 723 } 724 725 /* 726 * Do non-critical-path (more robust) cache cleaning, 727 * even if vnode counts are nominal, to try to avoid 728 * having to do it in the critical path. 729 */ 730 cache_hysteresis(0); 731 732 /* 733 * Nothing to do if most of our vnodes are already on 734 * the free list. 735 */ 736 if (numvnodes - freevnodes <= (long)desiredvnodes * 9 / 10) { 737 tsleep(vnlruthread, 0, "vlruwt", hz); 738 continue; 739 } 740 741 /* 742 * The pass iterates through the four combinations of 743 * VAGE0/VAGE1. We want to get rid of aged small files 744 * first. 745 */ 746 info.pass = 0; 747 done = 0; 748 while (done == 0 && info.pass < 4) { 749 done = mountlist_scan(vlrureclaim, &info, 750 MNTSCAN_FORWARD); 751 ++info.pass; 752 } 753 754 /* 755 * The vlrureclaim() call only processes 1/10 of the vnodes 756 * on each mount. If we couldn't find any repeat the loop 757 * at least enough times to cover all available vnodes before 758 * we start sleeping. Complain if the failure extends past 759 * 30 second, every 30 seconds. 760 */ 761 if (done == 0) { 762 ++vnlru_nowhere; 763 if (vnlru_nowhere % 10 == 0) 764 tsleep(vnlruthread, 0, "vlrup", hz * 3); 765 if (vnlru_nowhere % 100 == 0) 766 kprintf("vnlru_proc: vnode recycler stopped working!\n"); 767 if (vnlru_nowhere == 1000) 768 vnlru_nowhere = 900; 769 } else { 770 vnlru_nowhere = 0; 771 } 772 } 773 } 774 775 /* 776 * MOUNTLIST FUNCTIONS 777 */ 778 779 /* 780 * mountlist_insert (MP SAFE) 781 * 782 * Add a new mount point to the mount list. 783 */ 784 void 785 mountlist_insert(struct mount *mp, int how) 786 { 787 lwkt_gettoken(&mountlist_token); 788 if (how == MNTINS_FIRST) 789 TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list); 790 else 791 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); 792 lwkt_reltoken(&mountlist_token); 793 } 794 795 /* 796 * mountlist_interlock (MP SAFE) 797 * 798 * Execute the specified interlock function with the mountlist token 799 * held. The function will be called in a serialized fashion verses 800 * other functions called through this mechanism. 801 */ 802 int 803 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp) 804 { 805 int error; 806 807 lwkt_gettoken(&mountlist_token); 808 error = callback(mp); 809 lwkt_reltoken(&mountlist_token); 810 return (error); 811 } 812 813 /* 814 * mountlist_boot_getfirst (DURING BOOT ONLY) 815 * 816 * This function returns the first mount on the mountlist, which is 817 * expected to be the root mount. Since no interlocks are obtained 818 * this function is only safe to use during booting. 819 */ 820 821 struct mount * 822 mountlist_boot_getfirst(void) 823 { 824 return(TAILQ_FIRST(&mountlist)); 825 } 826 827 /* 828 * mountlist_remove (MP SAFE) 829 * 830 * Remove a node from the mountlist. If this node is the next scan node 831 * for any active mountlist scans, the active mountlist scan will be 832 * adjusted to skip the node, thus allowing removals during mountlist 833 * scans. 834 */ 835 void 836 mountlist_remove(struct mount *mp) 837 { 838 struct mountscan_info *msi; 839 840 lwkt_gettoken(&mountlist_token); 841 TAILQ_FOREACH(msi, &mountscan_list, msi_entry) { 842 if (msi->msi_node == mp) { 843 if (msi->msi_how & MNTSCAN_FORWARD) 844 msi->msi_node = TAILQ_NEXT(mp, mnt_list); 845 else 846 msi->msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 847 } 848 } 849 TAILQ_REMOVE(&mountlist, mp, mnt_list); 850 lwkt_reltoken(&mountlist_token); 851 } 852 853 /* 854 * mountlist_exists (MP SAFE) 855 * 856 * Checks if a node exists in the mountlist. 857 * This function is mainly used by VFS quota code to check if a 858 * cached nullfs struct mount pointer is still valid at use time 859 * 860 * FIXME: there is no warranty the mp passed to that function 861 * will be the same one used by VFS_ACCOUNT() later 862 */ 863 int 864 mountlist_exists(struct mount *mp) 865 { 866 int node_exists = 0; 867 struct mount* lmp; 868 869 lwkt_gettoken(&mountlist_token); 870 TAILQ_FOREACH(lmp, &mountlist, mnt_list) { 871 if (lmp == mp) { 872 node_exists = 1; 873 break; 874 } 875 } 876 lwkt_reltoken(&mountlist_token); 877 return(node_exists); 878 } 879 880 /* 881 * mountlist_scan (MP SAFE) 882 * 883 * Safely scan the mount points on the mount list. Unless otherwise 884 * specified each mount point will be busied prior to the callback and 885 * unbusied afterwords. The callback may safely remove any mount point 886 * without interfering with the scan. If the current callback 887 * mount is removed the scanner will not attempt to unbusy it. 888 * 889 * If a mount node cannot be busied it is silently skipped. 890 * 891 * The callback return value is aggregated and a total is returned. A return 892 * value of < 0 is not aggregated and will terminate the scan. 893 * 894 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction 895 * MNTSCAN_REVERSE - the mountlist is scanned in reverse 896 * MNTSCAN_NOBUSY - the scanner will make the callback without busying 897 * the mount node. 898 */ 899 int 900 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how) 901 { 902 struct mountscan_info info; 903 struct mount *mp; 904 int count; 905 int res; 906 907 lwkt_gettoken(&mountlist_token); 908 909 info.msi_how = how; 910 info.msi_node = NULL; /* paranoia */ 911 TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry); 912 913 res = 0; 914 915 if (how & MNTSCAN_FORWARD) { 916 info.msi_node = TAILQ_FIRST(&mountlist); 917 while ((mp = info.msi_node) != NULL) { 918 if (how & MNTSCAN_NOBUSY) { 919 count = callback(mp, data); 920 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 921 count = callback(mp, data); 922 if (mp == info.msi_node) 923 vfs_unbusy(mp); 924 } else { 925 count = 0; 926 } 927 if (count < 0) 928 break; 929 res += count; 930 if (mp == info.msi_node) 931 info.msi_node = TAILQ_NEXT(mp, mnt_list); 932 } 933 } else if (how & MNTSCAN_REVERSE) { 934 info.msi_node = TAILQ_LAST(&mountlist, mntlist); 935 while ((mp = info.msi_node) != NULL) { 936 if (how & MNTSCAN_NOBUSY) { 937 count = callback(mp, data); 938 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 939 count = callback(mp, data); 940 if (mp == info.msi_node) 941 vfs_unbusy(mp); 942 } else { 943 count = 0; 944 } 945 if (count < 0) 946 break; 947 res += count; 948 if (mp == info.msi_node) 949 info.msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 950 } 951 } 952 TAILQ_REMOVE(&mountscan_list, &info, msi_entry); 953 lwkt_reltoken(&mountlist_token); 954 return(res); 955 } 956 957 /* 958 * MOUNT RELATED VNODE FUNCTIONS 959 */ 960 961 static struct kproc_desc vnlru_kp = { 962 "vnlru", 963 vnlru_proc, 964 &vnlruthread 965 }; 966 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp) 967 968 /* 969 * Move a vnode from one mount queue to another. 970 * 971 * MPSAFE 972 */ 973 void 974 insmntque(struct vnode *vp, struct mount *mp) 975 { 976 struct mount *omp; 977 978 /* 979 * Delete from old mount point vnode list, if on one. 980 */ 981 if ((omp = vp->v_mount) != NULL) { 982 lwkt_gettoken(&omp->mnt_token); 983 KKASSERT(omp == vp->v_mount); 984 KASSERT(omp->mnt_nvnodelistsize > 0, 985 ("bad mount point vnode list size")); 986 vremovevnodemnt(vp); 987 omp->mnt_nvnodelistsize--; 988 lwkt_reltoken(&omp->mnt_token); 989 } 990 991 /* 992 * Insert into list of vnodes for the new mount point, if available. 993 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer. 994 */ 995 if (mp == NULL) { 996 vp->v_mount = NULL; 997 return; 998 } 999 lwkt_gettoken(&mp->mnt_token); 1000 vp->v_mount = mp; 1001 if (mp->mnt_syncer) { 1002 TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes); 1003 } else { 1004 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1005 } 1006 mp->mnt_nvnodelistsize++; 1007 lwkt_reltoken(&mp->mnt_token); 1008 } 1009 1010 1011 /* 1012 * Scan the vnodes under a mount point and issue appropriate callbacks. 1013 * 1014 * The fastfunc() callback is called with just the mountlist token held 1015 * (no vnode lock). It may not block and the vnode may be undergoing 1016 * modifications while the caller is processing it. The vnode will 1017 * not be entirely destroyed, however, due to the fact that the mountlist 1018 * token is held. A return value < 0 skips to the next vnode without calling 1019 * the slowfunc(), a return value > 0 terminates the loop. 1020 * 1021 * The slowfunc() callback is called after the vnode has been successfully 1022 * locked based on passed flags. The vnode is skipped if it gets rearranged 1023 * or destroyed while blocking on the lock. A non-zero return value from 1024 * the slow function terminates the loop. The slow function is allowed to 1025 * arbitrarily block. The scanning code guarentees consistency of operation 1026 * even if the slow function deletes or moves the node, or blocks and some 1027 * other thread deletes or moves the node. 1028 * 1029 * NOTE: We hold vmobj_token to prevent a VM object from being destroyed 1030 * out from under the fastfunc()'s vnode test. It will not prevent 1031 * v_object from getting NULL'd out but it will ensure that the 1032 * pointer (if we race) will remain stable. Only needed when 1033 * fastfunc is non-NULL. 1034 */ 1035 int 1036 vmntvnodescan( 1037 struct mount *mp, 1038 int flags, 1039 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data), 1040 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data), 1041 void *data 1042 ) { 1043 struct vmntvnodescan_info info; 1044 struct vnode *vp; 1045 int r = 0; 1046 int maxcount = mp->mnt_nvnodelistsize * 2; 1047 int stopcount = 0; 1048 int count = 0; 1049 1050 lwkt_gettoken(&mp->mnt_token); 1051 if (fastfunc) 1052 lwkt_gettoken(&vmobj_token); 1053 1054 /* 1055 * If asked to do one pass stop after iterating available vnodes. 1056 * Under heavy loads new vnodes can be added while we are scanning, 1057 * so this isn't perfect. Create a slop factor of 2x. 1058 */ 1059 if (flags & VMSC_ONEPASS) 1060 stopcount = mp->mnt_nvnodelistsize; 1061 1062 info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 1063 TAILQ_INSERT_TAIL(&mp->mnt_vnodescan_list, &info, entry); 1064 1065 while ((vp = info.vp) != NULL) { 1066 if (--maxcount == 0) { 1067 kprintf("Warning: excessive fssync iteration\n"); 1068 maxcount = mp->mnt_nvnodelistsize * 2; 1069 } 1070 1071 /* 1072 * Skip if visible but not ready, or special (e.g. 1073 * mp->mnt_syncer) 1074 */ 1075 if (vp->v_type == VNON) 1076 goto next; 1077 KKASSERT(vp->v_mount == mp); 1078 1079 /* 1080 * Quick test. A negative return continues the loop without 1081 * calling the slow test. 0 continues onto the slow test. 1082 * A positive number aborts the loop. 1083 */ 1084 if (fastfunc) { 1085 if ((r = fastfunc(mp, vp, data)) < 0) { 1086 r = 0; 1087 goto next; 1088 } 1089 if (r) 1090 break; 1091 } 1092 1093 /* 1094 * Get a vxlock on the vnode, retry if it has moved or isn't 1095 * in the mountlist where we expect it. 1096 */ 1097 if (slowfunc) { 1098 int error; 1099 1100 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 1101 case VMSC_GETVP: 1102 error = vget(vp, LK_EXCLUSIVE); 1103 break; 1104 case VMSC_GETVP|VMSC_NOWAIT: 1105 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT); 1106 break; 1107 case VMSC_GETVX: 1108 vx_get(vp); 1109 error = 0; 1110 break; 1111 default: 1112 error = 0; 1113 break; 1114 } 1115 if (error) 1116 goto next; 1117 /* 1118 * Do not call the slow function if the vnode is 1119 * invalid or if it was ripped out from under us 1120 * while we (potentially) blocked. 1121 */ 1122 if (info.vp == vp && vp->v_type != VNON) 1123 r = slowfunc(mp, vp, data); 1124 1125 /* 1126 * Cleanup 1127 */ 1128 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 1129 case VMSC_GETVP: 1130 case VMSC_GETVP|VMSC_NOWAIT: 1131 vput(vp); 1132 break; 1133 case VMSC_GETVX: 1134 vx_put(vp); 1135 break; 1136 default: 1137 break; 1138 } 1139 if (r != 0) 1140 break; 1141 } 1142 1143 next: 1144 /* 1145 * Yield after some processing. Depending on the number 1146 * of vnodes, we might wind up running for a long time. 1147 * Because threads are not preemptable, time critical 1148 * userland processes might starve. Give them a chance 1149 * now and then. 1150 */ 1151 if (++count == 10000) { 1152 /* 1153 * We really want to yield a bit, so we simply 1154 * sleep a tick 1155 */ 1156 tsleep(mp, 0, "vnodescn", 1); 1157 count = 0; 1158 } 1159 1160 /* 1161 * If doing one pass this decrements to zero. If it starts 1162 * at zero it is effectively unlimited for the purposes of 1163 * this loop. 1164 */ 1165 if (--stopcount == 0) 1166 break; 1167 1168 /* 1169 * Iterate. If the vnode was ripped out from under us 1170 * info.vp will already point to the next vnode, otherwise 1171 * we have to obtain the next valid vnode ourselves. 1172 */ 1173 if (info.vp == vp) 1174 info.vp = TAILQ_NEXT(vp, v_nmntvnodes); 1175 } 1176 1177 TAILQ_REMOVE(&mp->mnt_vnodescan_list, &info, entry); 1178 if (fastfunc) 1179 lwkt_reltoken(&vmobj_token); 1180 lwkt_reltoken(&mp->mnt_token); 1181 return(r); 1182 } 1183 1184 /* 1185 * Remove any vnodes in the vnode table belonging to mount point mp. 1186 * 1187 * If FORCECLOSE is not specified, there should not be any active ones, 1188 * return error if any are found (nb: this is a user error, not a 1189 * system error). If FORCECLOSE is specified, detach any active vnodes 1190 * that are found. 1191 * 1192 * If WRITECLOSE is set, only flush out regular file vnodes open for 1193 * writing. 1194 * 1195 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 1196 * 1197 * `rootrefs' specifies the base reference count for the root vnode 1198 * of this filesystem. The root vnode is considered busy if its 1199 * v_sysref.refcnt exceeds this value. On a successful return, vflush() 1200 * will call vrele() on the root vnode exactly rootrefs times. 1201 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 1202 * be zero. 1203 */ 1204 #ifdef DIAGNOSTIC 1205 static int busyprt = 0; /* print out busy vnodes */ 1206 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 1207 #endif 1208 1209 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data); 1210 1211 struct vflush_info { 1212 int flags; 1213 int busy; 1214 thread_t td; 1215 }; 1216 1217 int 1218 vflush(struct mount *mp, int rootrefs, int flags) 1219 { 1220 struct thread *td = curthread; /* XXX */ 1221 struct vnode *rootvp = NULL; 1222 int error; 1223 struct vflush_info vflush_info; 1224 1225 if (rootrefs > 0) { 1226 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 1227 ("vflush: bad args")); 1228 /* 1229 * Get the filesystem root vnode. We can vput() it 1230 * immediately, since with rootrefs > 0, it won't go away. 1231 */ 1232 if ((error = VFS_ROOT(mp, &rootvp)) != 0) { 1233 if ((flags & FORCECLOSE) == 0) 1234 return (error); 1235 rootrefs = 0; 1236 /* continue anyway */ 1237 } 1238 if (rootrefs) 1239 vput(rootvp); 1240 } 1241 1242 vflush_info.busy = 0; 1243 vflush_info.flags = flags; 1244 vflush_info.td = td; 1245 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info); 1246 1247 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 1248 /* 1249 * If just the root vnode is busy, and if its refcount 1250 * is equal to `rootrefs', then go ahead and kill it. 1251 */ 1252 KASSERT(vflush_info.busy > 0, ("vflush: not busy")); 1253 KASSERT(rootvp->v_sysref.refcnt >= rootrefs, ("vflush: rootrefs")); 1254 if (vflush_info.busy == 1 && rootvp->v_sysref.refcnt == rootrefs) { 1255 vx_lock(rootvp); 1256 vgone_vxlocked(rootvp); 1257 vx_unlock(rootvp); 1258 vflush_info.busy = 0; 1259 } 1260 } 1261 if (vflush_info.busy) 1262 return (EBUSY); 1263 for (; rootrefs > 0; rootrefs--) 1264 vrele(rootvp); 1265 return (0); 1266 } 1267 1268 /* 1269 * The scan callback is made with an VX locked vnode. 1270 */ 1271 static int 1272 vflush_scan(struct mount *mp, struct vnode *vp, void *data) 1273 { 1274 struct vflush_info *info = data; 1275 struct vattr vattr; 1276 int flags = info->flags; 1277 1278 /* 1279 * Skip over a vnodes marked VSYSTEM. 1280 */ 1281 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1282 return(0); 1283 } 1284 1285 /* 1286 * Do not force-close VCHR or VBLK vnodes 1287 */ 1288 if (vp->v_type == VCHR || vp->v_type == VBLK) 1289 flags &= ~(WRITECLOSE|FORCECLOSE); 1290 1291 /* 1292 * If WRITECLOSE is set, flush out unlinked but still open 1293 * files (even if open only for reading) and regular file 1294 * vnodes open for writing. 1295 */ 1296 if ((flags & WRITECLOSE) && 1297 (vp->v_type == VNON || 1298 (VOP_GETATTR(vp, &vattr) == 0 && 1299 vattr.va_nlink > 0)) && 1300 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1301 return(0); 1302 } 1303 1304 /* 1305 * If we are the only holder (refcnt of 1) or the vnode is in 1306 * termination (refcnt < 0), we can vgone the vnode. 1307 */ 1308 if (vp->v_sysref.refcnt <= 1) { 1309 vgone_vxlocked(vp); 1310 return(0); 1311 } 1312 1313 /* 1314 * If FORCECLOSE is set, forcibly destroy the vnode and then move 1315 * it to a dummymount structure so vop_*() functions don't deref 1316 * a NULL pointer. 1317 */ 1318 if (flags & FORCECLOSE) { 1319 vhold(vp); 1320 vgone_vxlocked(vp); 1321 if (vp->v_mount == NULL) 1322 insmntque(vp, &dummymount); 1323 vdrop(vp); 1324 return(0); 1325 } 1326 if (vp->v_type == VCHR || vp->v_type == VBLK) 1327 kprintf("vflush: Warning, cannot destroy busy device vnode\n"); 1328 #ifdef DIAGNOSTIC 1329 if (busyprt) 1330 vprint("vflush: busy vnode", vp); 1331 #endif 1332 ++info->busy; 1333 return(0); 1334 } 1335 1336 void 1337 add_bio_ops(struct bio_ops *ops) 1338 { 1339 TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry); 1340 } 1341 1342 void 1343 rem_bio_ops(struct bio_ops *ops) 1344 { 1345 TAILQ_REMOVE(&bio_ops_list, ops, entry); 1346 } 1347 1348 /* 1349 * This calls the bio_ops io_sync function either for a mount point 1350 * or generally. 1351 * 1352 * WARNING: softdeps is weirdly coded and just isn't happy unless 1353 * io_sync is called with a NULL mount from the general syncing code. 1354 */ 1355 void 1356 bio_ops_sync(struct mount *mp) 1357 { 1358 struct bio_ops *ops; 1359 1360 if (mp) { 1361 if ((ops = mp->mnt_bioops) != NULL) 1362 ops->io_sync(mp); 1363 } else { 1364 TAILQ_FOREACH(ops, &bio_ops_list, entry) { 1365 ops->io_sync(NULL); 1366 } 1367 } 1368 } 1369 1370 /* 1371 * Lookup a mount point by nch 1372 */ 1373 struct mount * 1374 mount_get_by_nc(struct namecache *ncp) 1375 { 1376 struct mount *mp = NULL; 1377 1378 lwkt_gettoken(&mountlist_token); 1379 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 1380 if (ncp == mp->mnt_ncmountpt.ncp) 1381 break; 1382 } 1383 lwkt_reltoken(&mountlist_token); 1384 return (mp); 1385 } 1386 1387