1 /* $NetBSD: lfs_bio.c,v 1.128 2013/11/27 17:24:44 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2000, 2001, 2002, 2003, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Konrad E. Schroder <perseant@hhhh.org>. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /* 32 * Copyright (c) 1991, 1993 33 * The Regents of the University of California. All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 3. Neither the name of the University nor the names of its contributors 44 * may be used to endorse or promote products derived from this software 45 * without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 57 * SUCH DAMAGE. 58 * 59 * @(#)lfs_bio.c 8.10 (Berkeley) 6/10/95 60 */ 61 62 #include <sys/cdefs.h> 63 __KERNEL_RCSID(0, "$NetBSD: lfs_bio.c,v 1.128 2013/11/27 17:24:44 christos Exp $"); 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/proc.h> 68 #include <sys/buf.h> 69 #include <sys/vnode.h> 70 #include <sys/resourcevar.h> 71 #include <sys/mount.h> 72 #include <sys/kernel.h> 73 #include <sys/kauth.h> 74 75 #include <ufs/lfs/ulfs_inode.h> 76 #include <ufs/lfs/ulfsmount.h> 77 #include <ufs/lfs/ulfs_extern.h> 78 79 #include <ufs/lfs/lfs.h> 80 #include <ufs/lfs/lfs_extern.h> 81 #include <ufs/lfs/lfs_kernel.h> 82 83 #include <uvm/uvm.h> 84 85 /* 86 * LFS block write function. 87 * 88 * XXX 89 * No write cost accounting is done. 90 * This is almost certainly wrong for synchronous operations and NFS. 91 * 92 * protected by lfs_lock. 93 */ 94 int locked_queue_count = 0; /* Count of locked-down buffers. */ 95 long locked_queue_bytes = 0L; /* Total size of locked buffers. */ 96 int lfs_subsys_pages = 0L; /* Total number LFS-written pages */ 97 int lfs_fs_pagetrip = 0; /* # of pages to trip per-fs write */ 98 int lfs_writing = 0; /* Set if already kicked off a writer 99 because of buffer space */ 100 int locked_queue_waiters = 0; /* Number of processes waiting on lq */ 101 102 /* Lock and condition variables for above. */ 103 kcondvar_t locked_queue_cv; 104 kcondvar_t lfs_writing_cv; 105 kmutex_t lfs_lock; 106 107 extern int lfs_dostats; 108 109 /* 110 * reserved number/bytes of locked buffers 111 */ 112 int locked_queue_rcount = 0; 113 long locked_queue_rbytes = 0L; 114 115 static int lfs_fits_buf(struct lfs *, int, int); 116 static int lfs_reservebuf(struct lfs *, struct vnode *vp, struct vnode *vp2, 117 int, int); 118 static int lfs_reserveavail(struct lfs *, struct vnode *vp, struct vnode *vp2, 119 int); 120 121 static int 122 lfs_fits_buf(struct lfs *fs, int n, int bytes) 123 { 124 int count_fit, bytes_fit; 125 126 ASSERT_NO_SEGLOCK(fs); 127 KASSERT(mutex_owned(&lfs_lock)); 128 129 count_fit = 130 (locked_queue_count + locked_queue_rcount + n <= LFS_WAIT_BUFS); 131 bytes_fit = 132 (locked_queue_bytes + locked_queue_rbytes + bytes <= LFS_WAIT_BYTES); 133 134 #ifdef DEBUG 135 if (!count_fit) { 136 DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit count: %d + %d + %d >= %d\n", 137 locked_queue_count, locked_queue_rcount, 138 n, LFS_WAIT_BUFS)); 139 } 140 if (!bytes_fit) { 141 DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit bytes: %ld + %ld + %d >= %ld\n", 142 locked_queue_bytes, locked_queue_rbytes, 143 bytes, LFS_WAIT_BYTES)); 144 } 145 #endif /* DEBUG */ 146 147 return (count_fit && bytes_fit); 148 } 149 150 /* ARGSUSED */ 151 static int 152 lfs_reservebuf(struct lfs *fs, struct vnode *vp, 153 struct vnode *vp2, int n, int bytes) 154 { 155 int cantwait; 156 157 ASSERT_MAYBE_SEGLOCK(fs); 158 KASSERT(locked_queue_rcount >= 0); 159 KASSERT(locked_queue_rbytes >= 0); 160 161 cantwait = (VTOI(vp)->i_flag & IN_ADIROP) || fs->lfs_unlockvp == vp; 162 mutex_enter(&lfs_lock); 163 while (!cantwait && n > 0 && !lfs_fits_buf(fs, n, bytes)) { 164 int error; 165 166 lfs_flush(fs, 0, 0); 167 168 DLOG((DLOG_AVAIL, "lfs_reservebuf: waiting: count=%d, bytes=%ld\n", 169 locked_queue_count, locked_queue_bytes)); 170 ++locked_queue_waiters; 171 error = cv_timedwait_sig(&locked_queue_cv, &lfs_lock, 172 hz * LFS_BUFWAIT); 173 --locked_queue_waiters; 174 if (error && error != EWOULDBLOCK) { 175 mutex_exit(&lfs_lock); 176 return error; 177 } 178 } 179 180 locked_queue_rcount += n; 181 locked_queue_rbytes += bytes; 182 183 if (n < 0 && locked_queue_waiters > 0) { 184 DLOG((DLOG_AVAIL, "lfs_reservebuf: broadcast: count=%d, bytes=%ld\n", 185 locked_queue_count, locked_queue_bytes)); 186 cv_broadcast(&locked_queue_cv); 187 } 188 189 mutex_exit(&lfs_lock); 190 191 KASSERT(locked_queue_rcount >= 0); 192 KASSERT(locked_queue_rbytes >= 0); 193 194 return 0; 195 } 196 197 /* 198 * Try to reserve some blocks, prior to performing a sensitive operation that 199 * requires the vnode lock to be honored. If there is not enough space, give 200 * up the vnode lock temporarily and wait for the space to become available. 201 * 202 * Called with vp locked. (Note nowever that if fsb < 0, vp is ignored.) 203 * 204 * XXX YAMT - it isn't safe to unlock vp here 205 * because the node might be modified while we sleep. 206 * (eg. cached states like i_offset might be stale, 207 * the vnode might be truncated, etc..) 208 * maybe we should have a way to restart the vnodeop (EVOPRESTART?) 209 * or rearrange vnodeop interface to leave vnode locking to file system 210 * specific code so that each file systems can have their own vnode locking and 211 * vnode re-using strategies. 212 */ 213 static int 214 lfs_reserveavail(struct lfs *fs, struct vnode *vp, 215 struct vnode *vp2, int fsb) 216 { 217 CLEANERINFO *cip; 218 struct buf *bp; 219 int error, slept; 220 int cantwait; 221 222 ASSERT_MAYBE_SEGLOCK(fs); 223 slept = 0; 224 mutex_enter(&lfs_lock); 225 cantwait = (VTOI(vp)->i_flag & IN_ADIROP) || fs->lfs_unlockvp == vp; 226 while (!cantwait && fsb > 0 && 227 !lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail)) { 228 mutex_exit(&lfs_lock); 229 230 if (!slept) { 231 DLOG((DLOG_AVAIL, "lfs_reserve: waiting for %ld (bfree = %d," 232 " est_bfree = %d)\n", 233 fsb + fs->lfs_ravail + fs->lfs_favail, 234 fs->lfs_bfree, LFS_EST_BFREE(fs))); 235 } 236 ++slept; 237 238 /* Wake up the cleaner */ 239 LFS_CLEANERINFO(cip, fs, bp); 240 LFS_SYNC_CLEANERINFO(cip, fs, bp, 0); 241 lfs_wakeup_cleaner(fs); 242 243 mutex_enter(&lfs_lock); 244 /* Cleaner might have run while we were reading, check again */ 245 if (lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail)) 246 break; 247 248 error = mtsleep(&fs->lfs_avail, PCATCH | PUSER, "lfs_reserve", 249 0, &lfs_lock); 250 if (error) { 251 mutex_exit(&lfs_lock); 252 return error; 253 } 254 } 255 #ifdef DEBUG 256 if (slept) { 257 DLOG((DLOG_AVAIL, "lfs_reserve: woke up\n")); 258 } 259 #endif 260 fs->lfs_ravail += fsb; 261 mutex_exit(&lfs_lock); 262 263 return 0; 264 } 265 266 #ifdef DIAGNOSTIC 267 int lfs_rescount; 268 int lfs_rescountdirop; 269 #endif 270 271 int 272 lfs_reserve(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb) 273 { 274 int error; 275 276 ASSERT_MAYBE_SEGLOCK(fs); 277 if (vp2) { 278 /* Make sure we're not in the process of reclaiming vp2 */ 279 mutex_enter(&lfs_lock); 280 while(fs->lfs_flags & LFS_UNDIROP) { 281 mtsleep(&fs->lfs_flags, PRIBIO + 1, "lfsrundirop", 0, 282 &lfs_lock); 283 } 284 mutex_exit(&lfs_lock); 285 } 286 287 KASSERT(fsb < 0 || VOP_ISLOCKED(vp)); 288 KASSERT(vp2 == NULL || fsb < 0 || VOP_ISLOCKED(vp2)); 289 KASSERT(vp2 == NULL || vp2 != fs->lfs_unlockvp); 290 291 #ifdef DIAGNOSTIC 292 mutex_enter(&lfs_lock); 293 if (fsb > 0) 294 lfs_rescount++; 295 else if (fsb < 0) 296 lfs_rescount--; 297 if (lfs_rescount < 0) 298 panic("lfs_rescount"); 299 mutex_exit(&lfs_lock); 300 #endif 301 302 /* 303 * XXX 304 * vref vnodes here so that cleaner doesn't try to reuse them. 305 * (see XXX comment in lfs_reserveavail) 306 */ 307 vhold(vp); 308 if (vp2 != NULL) { 309 vhold(vp2); 310 } 311 312 error = lfs_reserveavail(fs, vp, vp2, fsb); 313 if (error) 314 goto done; 315 316 /* 317 * XXX just a guess. should be more precise. 318 */ 319 error = lfs_reservebuf(fs, vp, vp2, fsb, lfs_fsbtob(fs, fsb)); 320 if (error) 321 lfs_reserveavail(fs, vp, vp2, -fsb); 322 323 done: 324 holdrele(vp); 325 if (vp2 != NULL) { 326 holdrele(vp2); 327 } 328 329 return error; 330 } 331 332 int 333 lfs_bwrite(void *v) 334 { 335 struct vop_bwrite_args /* { 336 struct vnode *a_vp; 337 struct buf *a_bp; 338 } */ *ap = v; 339 struct buf *bp = ap->a_bp; 340 341 #ifdef DIAGNOSTIC 342 if (VTOI(bp->b_vp)->i_lfs->lfs_ronly == 0 && (bp->b_flags & B_ASYNC)) { 343 panic("bawrite LFS buffer"); 344 } 345 #endif /* DIAGNOSTIC */ 346 return lfs_bwrite_ext(bp, 0); 347 } 348 349 /* 350 * Determine if there is enough room currently available to write fsb 351 * blocks. We need enough blocks for the new blocks, the current 352 * inode blocks (including potentially the ifile inode), a summary block, 353 * and the segment usage table, plus an ifile block. 354 */ 355 int 356 lfs_fits(struct lfs *fs, int fsb) 357 { 358 int needed; 359 360 ASSERT_NO_SEGLOCK(fs); 361 needed = fsb + lfs_btofsb(fs, fs->lfs_sumsize) + 362 ((howmany(fs->lfs_uinodes + 1, LFS_INOPB(fs)) + fs->lfs_segtabsz + 363 1) << (fs->lfs_bshift - fs->lfs_ffshift)); 364 365 if (needed >= fs->lfs_avail) { 366 #ifdef DEBUG 367 DLOG((DLOG_AVAIL, "lfs_fits: no fit: fsb = %ld, uinodes = %ld, " 368 "needed = %ld, avail = %ld\n", 369 (long)fsb, (long)fs->lfs_uinodes, (long)needed, 370 (long)fs->lfs_avail)); 371 #endif 372 return 0; 373 } 374 return 1; 375 } 376 377 int 378 lfs_availwait(struct lfs *fs, int fsb) 379 { 380 int error; 381 CLEANERINFO *cip; 382 struct buf *cbp; 383 384 ASSERT_NO_SEGLOCK(fs); 385 /* Push cleaner blocks through regardless */ 386 mutex_enter(&lfs_lock); 387 if (LFS_SEGLOCK_HELD(fs) && 388 fs->lfs_sp->seg_flags & (SEGM_CLEAN | SEGM_FORCE_CKP)) { 389 mutex_exit(&lfs_lock); 390 return 0; 391 } 392 mutex_exit(&lfs_lock); 393 394 while (!lfs_fits(fs, fsb)) { 395 /* 396 * Out of space, need cleaner to run. 397 * Update the cleaner info, then wake it up. 398 * Note the cleanerinfo block is on the ifile 399 * so it CANT_WAIT. 400 */ 401 LFS_CLEANERINFO(cip, fs, cbp); 402 LFS_SYNC_CLEANERINFO(cip, fs, cbp, 0); 403 404 #ifdef DEBUG 405 DLOG((DLOG_AVAIL, "lfs_availwait: out of available space, " 406 "waiting on cleaner\n")); 407 #endif 408 409 lfs_wakeup_cleaner(fs); 410 #ifdef DIAGNOSTIC 411 if (LFS_SEGLOCK_HELD(fs)) 412 panic("lfs_availwait: deadlock"); 413 #endif 414 error = tsleep(&fs->lfs_avail, PCATCH | PUSER, "cleaner", 0); 415 if (error) 416 return (error); 417 } 418 return 0; 419 } 420 421 int 422 lfs_bwrite_ext(struct buf *bp, int flags) 423 { 424 struct lfs *fs; 425 struct inode *ip; 426 struct vnode *vp; 427 int fsb; 428 429 vp = bp->b_vp; 430 fs = VFSTOULFS(vp->v_mount)->um_lfs; 431 432 ASSERT_MAYBE_SEGLOCK(fs); 433 KASSERT(bp->b_cflags & BC_BUSY); 434 KASSERT(flags & BW_CLEAN || !LFS_IS_MALLOC_BUF(bp)); 435 KASSERT(((bp->b_oflags | bp->b_flags) & (BO_DELWRI|B_LOCKED)) 436 != BO_DELWRI); 437 438 /* 439 * Don't write *any* blocks if we're mounted read-only, or 440 * if we are "already unmounted". 441 * 442 * In particular the cleaner can't write blocks either. 443 */ 444 if (fs->lfs_ronly || (fs->lfs_pflags & LFS_PF_CLEAN)) { 445 bp->b_oflags &= ~BO_DELWRI; 446 bp->b_flags |= B_READ; /* XXX is this right? --ks */ 447 bp->b_error = 0; 448 mutex_enter(&bufcache_lock); 449 LFS_UNLOCK_BUF(bp); 450 if (LFS_IS_MALLOC_BUF(bp)) 451 bp->b_cflags &= ~BC_BUSY; 452 else 453 brelsel(bp, 0); 454 mutex_exit(&bufcache_lock); 455 return (fs->lfs_ronly ? EROFS : 0); 456 } 457 458 /* 459 * Set the delayed write flag and use reassignbuf to move the buffer 460 * from the clean list to the dirty one. 461 * 462 * Set the B_LOCKED flag and unlock the buffer, causing brelse to move 463 * the buffer onto the LOCKED free list. This is necessary, otherwise 464 * getnewbuf() would try to reclaim the buffers using bawrite, which 465 * isn't going to work. 466 * 467 * XXX we don't let meta-data writes run out of space because they can 468 * come from the segment writer. We need to make sure that there is 469 * enough space reserved so that there's room to write meta-data 470 * blocks. 471 */ 472 if ((bp->b_flags & B_LOCKED) == 0) { 473 fsb = lfs_numfrags(fs, bp->b_bcount); 474 475 ip = VTOI(vp); 476 mutex_enter(&lfs_lock); 477 if (flags & BW_CLEAN) { 478 LFS_SET_UINO(ip, IN_CLEANING); 479 } else { 480 LFS_SET_UINO(ip, IN_MODIFIED); 481 } 482 mutex_exit(&lfs_lock); 483 fs->lfs_avail -= fsb; 484 485 mutex_enter(&bufcache_lock); 486 mutex_enter(vp->v_interlock); 487 bp->b_oflags = (bp->b_oflags | BO_DELWRI) & ~BO_DONE; 488 LFS_LOCK_BUF(bp); 489 bp->b_flags &= ~B_READ; 490 bp->b_error = 0; 491 reassignbuf(bp, bp->b_vp); 492 mutex_exit(vp->v_interlock); 493 } else { 494 mutex_enter(&bufcache_lock); 495 } 496 497 if (bp->b_iodone != NULL) 498 bp->b_cflags &= ~BC_BUSY; 499 else 500 brelsel(bp, 0); 501 mutex_exit(&bufcache_lock); 502 503 return (0); 504 } 505 506 /* 507 * Called and return with the lfs_lock held. 508 */ 509 void 510 lfs_flush_fs(struct lfs *fs, int flags) 511 { 512 ASSERT_NO_SEGLOCK(fs); 513 KASSERT(mutex_owned(&lfs_lock)); 514 if (fs->lfs_ronly) 515 return; 516 517 if (lfs_dostats) 518 ++lfs_stats.flush_invoked; 519 520 fs->lfs_pdflush = 0; 521 mutex_exit(&lfs_lock); 522 lfs_writer_enter(fs, "fldirop"); 523 lfs_segwrite(fs->lfs_ivnode->v_mount, flags); 524 lfs_writer_leave(fs); 525 mutex_enter(&lfs_lock); 526 fs->lfs_favail = 0; /* XXX */ 527 } 528 529 /* 530 * This routine initiates segment writes when LFS is consuming too many 531 * resources. Ideally the pageout daemon would be able to direct LFS 532 * more subtly. 533 * XXX We have one static count of locked buffers; 534 * XXX need to think more about the multiple filesystem case. 535 * 536 * Called and return with lfs_lock held. 537 * If fs != NULL, we hold the segment lock for fs. 538 */ 539 void 540 lfs_flush(struct lfs *fs, int flags, int only_onefs) 541 { 542 extern u_int64_t locked_fakequeue_count; 543 struct mount *mp, *nmp; 544 struct lfs *tfs; 545 546 KASSERT(mutex_owned(&lfs_lock)); 547 KDASSERT(fs == NULL || !LFS_SEGLOCK_HELD(fs)); 548 549 if (lfs_dostats) 550 ++lfs_stats.write_exceeded; 551 /* XXX should we include SEGM_CKP here? */ 552 if (lfs_writing && !(flags & SEGM_SYNC)) { 553 DLOG((DLOG_FLUSH, "lfs_flush: not flushing because another flush is active\n")); 554 return; 555 } 556 while (lfs_writing) 557 cv_wait(&lfs_writing_cv, &lfs_lock); 558 lfs_writing = 1; 559 560 mutex_exit(&lfs_lock); 561 562 if (only_onefs) { 563 KASSERT(fs != NULL); 564 if (vfs_busy(fs->lfs_ivnode->v_mount, NULL)) 565 goto errout; 566 mutex_enter(&lfs_lock); 567 lfs_flush_fs(fs, flags); 568 mutex_exit(&lfs_lock); 569 vfs_unbusy(fs->lfs_ivnode->v_mount, false, NULL); 570 } else { 571 locked_fakequeue_count = 0; 572 mutex_enter(&mountlist_lock); 573 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 574 if (vfs_busy(mp, &nmp)) { 575 DLOG((DLOG_FLUSH, "lfs_flush: fs vfs_busy\n")); 576 continue; 577 } 578 if (strncmp(&mp->mnt_stat.f_fstypename[0], MOUNT_LFS, 579 sizeof(mp->mnt_stat.f_fstypename)) == 0) { 580 tfs = VFSTOULFS(mp)->um_lfs; 581 mutex_enter(&lfs_lock); 582 lfs_flush_fs(tfs, flags); 583 mutex_exit(&lfs_lock); 584 } 585 vfs_unbusy(mp, false, &nmp); 586 } 587 mutex_exit(&mountlist_lock); 588 } 589 LFS_DEBUG_COUNTLOCKED("flush"); 590 wakeup(&lfs_subsys_pages); 591 592 errout: 593 mutex_enter(&lfs_lock); 594 KASSERT(lfs_writing); 595 lfs_writing = 0; 596 wakeup(&lfs_writing); 597 } 598 599 #define INOCOUNT(fs) howmany((fs)->lfs_uinodes, LFS_INOPB(fs)) 600 #define INOBYTES(fs) ((fs)->lfs_uinodes * sizeof (struct ulfs1_dinode)) 601 602 /* 603 * make sure that we don't have too many locked buffers. 604 * flush buffers if needed. 605 */ 606 int 607 lfs_check(struct vnode *vp, daddr_t blkno, int flags) 608 { 609 int error; 610 struct lfs *fs; 611 struct inode *ip; 612 extern pid_t lfs_writer_daemon; 613 614 error = 0; 615 ip = VTOI(vp); 616 617 /* If out of buffers, wait on writer */ 618 /* XXX KS - if it's the Ifile, we're probably the cleaner! */ 619 if (ip->i_number == LFS_IFILE_INUM) 620 return 0; 621 /* If we're being called from inside a dirop, don't sleep */ 622 if (ip->i_flag & IN_ADIROP) 623 return 0; 624 625 fs = ip->i_lfs; 626 627 ASSERT_NO_SEGLOCK(fs); 628 629 /* 630 * If we would flush below, but dirops are active, sleep. 631 * Note that a dirop cannot ever reach this code! 632 */ 633 mutex_enter(&lfs_lock); 634 while (fs->lfs_dirops > 0 && 635 (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS || 636 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES || 637 lfs_subsys_pages > LFS_MAX_PAGES || 638 fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) || 639 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0)) 640 { 641 ++fs->lfs_diropwait; 642 mtsleep(&fs->lfs_writer, PRIBIO+1, "bufdirop", 0, 643 &lfs_lock); 644 --fs->lfs_diropwait; 645 } 646 647 #ifdef DEBUG 648 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS) 649 DLOG((DLOG_FLUSH, "lfs_check: lqc = %d, max %d\n", 650 locked_queue_count + INOCOUNT(fs), LFS_MAX_BUFS)); 651 if (locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES) 652 DLOG((DLOG_FLUSH, "lfs_check: lqb = %ld, max %ld\n", 653 locked_queue_bytes + INOBYTES(fs), LFS_MAX_BYTES)); 654 if (lfs_subsys_pages > LFS_MAX_PAGES) 655 DLOG((DLOG_FLUSH, "lfs_check: lssp = %d, max %d\n", 656 lfs_subsys_pages, LFS_MAX_PAGES)); 657 if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip) 658 DLOG((DLOG_FLUSH, "lfs_check: fssp = %d, trip at %d\n", 659 fs->lfs_pages, lfs_fs_pagetrip)); 660 if (lfs_dirvcount > LFS_MAX_DIROP) 661 DLOG((DLOG_FLUSH, "lfs_check: ldvc = %d, max %d\n", 662 lfs_dirvcount, LFS_MAX_DIROP)); 663 if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs)) 664 DLOG((DLOG_FLUSH, "lfs_check: lfdvc = %d, max %d\n", 665 fs->lfs_dirvcount, LFS_MAX_FSDIROP(fs))); 666 if (fs->lfs_diropwait > 0) 667 DLOG((DLOG_FLUSH, "lfs_check: ldvw = %d\n", 668 fs->lfs_diropwait)); 669 #endif 670 671 /* If there are too many pending dirops, we have to flush them. */ 672 if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) || 673 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) { 674 mutex_exit(&lfs_lock); 675 lfs_flush_dirops(fs); 676 mutex_enter(&lfs_lock); 677 } else if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS || 678 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES || 679 lfs_subsys_pages > LFS_MAX_PAGES || 680 fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) || 681 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) { 682 lfs_flush(fs, flags, 0); 683 } else if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip) { 684 /* 685 * If we didn't flush the whole thing, some filesystems 686 * still might want to be flushed. 687 */ 688 ++fs->lfs_pdflush; 689 wakeup(&lfs_writer_daemon); 690 } 691 692 while (locked_queue_count + INOCOUNT(fs) >= LFS_WAIT_BUFS || 693 locked_queue_bytes + INOBYTES(fs) >= LFS_WAIT_BYTES || 694 lfs_subsys_pages > LFS_WAIT_PAGES || 695 fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) || 696 lfs_dirvcount > LFS_MAX_DIROP) { 697 698 if (lfs_dostats) 699 ++lfs_stats.wait_exceeded; 700 DLOG((DLOG_AVAIL, "lfs_check: waiting: count=%d, bytes=%ld\n", 701 locked_queue_count, locked_queue_bytes)); 702 ++locked_queue_waiters; 703 error = cv_timedwait_sig(&locked_queue_cv, &lfs_lock, 704 hz * LFS_BUFWAIT); 705 --locked_queue_waiters; 706 if (error != EWOULDBLOCK) 707 break; 708 709 /* 710 * lfs_flush might not flush all the buffers, if some of the 711 * inodes were locked or if most of them were Ifile blocks 712 * and we weren't asked to checkpoint. Try flushing again 713 * to keep us from blocking indefinitely. 714 */ 715 if (locked_queue_count + INOCOUNT(fs) >= LFS_MAX_BUFS || 716 locked_queue_bytes + INOBYTES(fs) >= LFS_MAX_BYTES) { 717 lfs_flush(fs, flags | SEGM_CKP, 0); 718 } 719 } 720 mutex_exit(&lfs_lock); 721 return (error); 722 } 723 724 /* 725 * Allocate a new buffer header. 726 */ 727 struct buf * 728 lfs_newbuf(struct lfs *fs, struct vnode *vp, daddr_t daddr, size_t size, int type) 729 { 730 struct buf *bp; 731 size_t nbytes; 732 733 ASSERT_MAYBE_SEGLOCK(fs); 734 nbytes = roundup(size, lfs_fsbtob(fs, 1)); 735 736 bp = getiobuf(NULL, true); 737 if (nbytes) { 738 bp->b_data = lfs_malloc(fs, nbytes, type); 739 /* memset(bp->b_data, 0, nbytes); */ 740 } 741 #ifdef DIAGNOSTIC 742 if (vp == NULL) 743 panic("vp is NULL in lfs_newbuf"); 744 if (bp == NULL) 745 panic("bp is NULL after malloc in lfs_newbuf"); 746 #endif 747 748 bp->b_bufsize = size; 749 bp->b_bcount = size; 750 bp->b_lblkno = daddr; 751 bp->b_blkno = daddr; 752 bp->b_error = 0; 753 bp->b_resid = 0; 754 bp->b_iodone = lfs_callback; 755 bp->b_cflags = BC_BUSY | BC_NOCACHE; 756 bp->b_private = fs; 757 758 mutex_enter(&bufcache_lock); 759 mutex_enter(vp->v_interlock); 760 bgetvp(vp, bp); 761 mutex_exit(vp->v_interlock); 762 mutex_exit(&bufcache_lock); 763 764 return (bp); 765 } 766 767 void 768 lfs_freebuf(struct lfs *fs, struct buf *bp) 769 { 770 struct vnode *vp; 771 772 if ((vp = bp->b_vp) != NULL) { 773 mutex_enter(&bufcache_lock); 774 mutex_enter(vp->v_interlock); 775 brelvp(bp); 776 mutex_exit(vp->v_interlock); 777 mutex_exit(&bufcache_lock); 778 } 779 if (!(bp->b_cflags & BC_INVAL)) { /* BC_INVAL indicates a "fake" buffer */ 780 lfs_free(fs, bp->b_data, LFS_NB_UNKNOWN); 781 bp->b_data = NULL; 782 } 783 putiobuf(bp); 784 } 785 786 /* 787 * Count buffers on the "locked" queue, and compare it to a pro-forma count. 788 * Don't count malloced buffers, since they don't detract from the total. 789 */ 790 void 791 lfs_countlocked(int *count, long *bytes, const char *msg) 792 { 793 struct buf *bp; 794 int n = 0; 795 long int size = 0L; 796 797 mutex_enter(&bufcache_lock); 798 TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED].bq_queue, b_freelist) { 799 KASSERT(bp->b_iodone == NULL); 800 n++; 801 size += bp->b_bufsize; 802 #ifdef DIAGNOSTIC 803 if (n > nbuf) 804 panic("lfs_countlocked: this can't happen: more" 805 " buffers locked than exist"); 806 #endif 807 } 808 /* 809 * Theoretically this function never really does anything. 810 * Give a warning if we have to fix the accounting. 811 */ 812 if (n != *count) { 813 DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted buf count" 814 " from %d to %d\n", msg, *count, n)); 815 } 816 if (size != *bytes) { 817 DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted byte count" 818 " from %ld to %ld\n", msg, *bytes, size)); 819 } 820 *count = n; 821 *bytes = size; 822 mutex_exit(&bufcache_lock); 823 return; 824 } 825 826 int 827 lfs_wait_pages(void) 828 { 829 int active, inactive; 830 831 uvm_estimatepageable(&active, &inactive); 832 return LFS_WAIT_RESOURCE(active + inactive + uvmexp.free, 1); 833 } 834 835 int 836 lfs_max_pages(void) 837 { 838 int active, inactive; 839 840 uvm_estimatepageable(&active, &inactive); 841 return LFS_MAX_RESOURCE(active + inactive + uvmexp.free, 1); 842 } 843