151184Sbostic /* 251493Sbostic * Copyright (c) 1991 Regents of the University of California. 351184Sbostic * All rights reserved. 451184Sbostic * 551184Sbostic * %sccs.include.redist.c% 651184Sbostic * 7*56865Smargo * @(#)lfs_bio.c 7.18 (Berkeley) 11/17/92 851184Sbostic */ 951184Sbostic 1051480Sbostic #include <sys/param.h> 1151480Sbostic #include <sys/proc.h> 1251480Sbostic #include <sys/buf.h> 1352081Sbostic #include <sys/vnode.h> 1451480Sbostic #include <sys/resourcevar.h> 1552081Sbostic #include <sys/mount.h> 16*56865Smargo #include <sys/kernel.h> 1751184Sbostic 1852081Sbostic #include <ufs/ufs/quota.h> 1952081Sbostic #include <ufs/ufs/inode.h> 2052081Sbostic #include <ufs/ufs/ufsmount.h> 2152081Sbostic 2251493Sbostic #include <ufs/lfs/lfs.h> 2351493Sbostic #include <ufs/lfs/lfs_extern.h> 2451480Sbostic 2552081Sbostic /* 2652081Sbostic * LFS block write function. 2752081Sbostic * 2852081Sbostic * XXX 2952081Sbostic * No write cost accounting is done. 3052081Sbostic * This is almost certainly wrong for synchronous operations and NFS. 3152081Sbostic */ 3256052Sbostic int lfs_allclean_wakeup; /* Cleaner wakeup address. */ 3352081Sbostic int locked_queue_count; /* XXX Count of locked-down buffers. */ 3455935Sbostic int lfs_writing; /* Set if already kicked off a writer 3555935Sbostic because of buffer space */ 3655935Sbostic #define WRITE_THRESHHOLD ((nbuf >> 2) - 10) 3755935Sbostic #define WAIT_THRESHHOLD ((nbuf >> 1) - 10) 38*56865Smargo #define LFS_BUFWAIT 2 3952081Sbostic 4051480Sbostic int 4154621Smckusick lfs_bwrite(ap) 4254621Smckusick struct vop_bwrite_args /* { 4354621Smckusick struct buf *a_bp; 4454621Smckusick } */ *ap; 4551184Sbostic { 4653867Sheideman register struct buf *bp = ap->a_bp; 4755935Sbostic struct lfs *fs; 4855935Sbostic struct inode *ip; 49*56865Smargo int error, s; 5055807Sbostic 5151851Sbostic /* 5252081Sbostic * Set the delayed write flag and use reassignbuf to move the buffer 5352081Sbostic * from the clean list to the dirty one. 5451851Sbostic * 5552081Sbostic * Set the B_LOCKED flag and unlock the buffer, causing brelse to move 5652081Sbostic * the buffer onto the LOCKED free list. This is necessary, otherwise 5752081Sbostic * getnewbuf() would try to reclaim the buffers using bawrite, which 5852081Sbostic * isn't going to work. 5956155Smargo * 6056155Smargo * XXX we don't let meta-data writes run out of space because they can 6156155Smargo * come from the segment writer. We need to make sure that there is 6256155Smargo * enough space reserved so that there's room to write meta-data 6356155Smargo * blocks. 6451851Sbostic */ 6553867Sheideman if (!(bp->b_flags & B_LOCKED)) { 6655935Sbostic fs = VFSTOUFS(bp->b_vp->v_mount)->um_lfs; 67*56865Smargo while (!LFS_FITS(fs, fsbtodb(fs, 1)) && !IS_IFILE(bp) && 6856155Smargo bp->b_lblkno > 0) { 69*56865Smargo /* Out of space, need cleaner to run */ 7056052Sbostic wakeup(&lfs_allclean_wakeup); 71*56865Smargo if (error = tsleep(&fs->lfs_avail, PCATCH | PUSER, 72*56865Smargo "cleaner", NULL)) { 73*56865Smargo brelse(bp); 74*56865Smargo return (error); 75*56865Smargo } 7655935Sbostic } 7755935Sbostic ip = VTOI((bp)->b_vp); 7855935Sbostic if (!(ip->i_flag & IMOD)) 7955935Sbostic ++fs->lfs_uinodes; 8055935Sbostic ip->i_flag |= IMOD | ICHG | IUPD; \ 8155935Sbostic fs->lfs_avail -= fsbtodb(fs, 1); 8252081Sbostic ++locked_queue_count; 8353867Sheideman bp->b_flags |= B_DELWRI | B_LOCKED; 8455935Sbostic bp->b_flags &= ~(B_READ | B_ERROR); 8553143Sstaelin s = splbio(); 8653867Sheideman reassignbuf(bp, bp->b_vp); 8753143Sstaelin splx(s); 8853143Sstaelin } 8953867Sheideman brelse(bp); 9051480Sbostic return (0); 9151184Sbostic } 9252081Sbostic 9352081Sbostic /* 9452081Sbostic * XXX 9552081Sbostic * This routine flushes buffers out of the B_LOCKED queue when LFS has too 9652081Sbostic * many locked down. Eventually the pageout daemon will simply call LFS 9752325Sbostic * when pages need to be reclaimed. Note, we have one static count of locked 9852325Sbostic * buffers, so we can't have more than a single file system. To make this 9952325Sbostic * work for multiple file systems, put the count into the mount structure. 10052081Sbostic */ 10152081Sbostic void 10252081Sbostic lfs_flush() 10352081Sbostic { 10452081Sbostic register struct mount *mp; 10552081Sbostic 10655935Sbostic if (lfs_writing) 10752081Sbostic return; 10855935Sbostic lfs_writing = 1; 10952081Sbostic mp = rootfs; 11052081Sbostic do { 11155935Sbostic /* The lock check below is to avoid races with unmount. */ 11252081Sbostic if (mp->mnt_stat.f_type == MOUNT_LFS && 11355935Sbostic (mp->mnt_flag & (MNT_MLOCK|MNT_RDONLY|MNT_UNMOUNT)) == 0 && 11455935Sbostic !((((struct ufsmount *)mp->mnt_data))->ufsmount_u.lfs)->lfs_dirops ) { 11552347Sbostic /* 11652347Sbostic * We set the queue to 0 here because we are about to 11752347Sbostic * write all the dirty buffers we have. If more come 11852347Sbostic * in while we're writing the segment, they may not 11952347Sbostic * get written, so we want the count to reflect these 12052347Sbostic * new writes after the segwrite completes. 12152347Sbostic */ 12252081Sbostic lfs_segwrite(mp, 0); 12355935Sbostic } 12455935Sbostic mp = mp->mnt_next; 12552081Sbostic } while (mp != rootfs); 12655935Sbostic lfs_writing = 0; 12752081Sbostic } 12855935Sbostic 12955935Sbostic int 13055935Sbostic lfs_check(vp, blkno) 13155935Sbostic struct vnode *vp; 13255935Sbostic daddr_t blkno; 13355935Sbostic { 13455935Sbostic extern int lfs_allclean_wakeup; 13555935Sbostic int error; 13655935Sbostic 137*56865Smargo error = 0; 13855935Sbostic if (incore(vp, blkno)) 13955935Sbostic return (0); 14055935Sbostic if (locked_queue_count > WRITE_THRESHHOLD) 14155935Sbostic lfs_flush(); 142*56865Smargo 143*56865Smargo /* If out of buffers, wait on writer */ 144*56865Smargo while (locked_queue_count > WAIT_THRESHHOLD) 145*56865Smargo error = tsleep(&locked_queue_count, PCATCH | PUSER, "buffers", 146*56865Smargo hz * LFS_BUFWAIT); 147*56865Smargo 14855935Sbostic return (error); 14955935Sbostic } 150