151184Sbostic /* 251493Sbostic * Copyright (c) 1991 Regents of the University of California. 351184Sbostic * All rights reserved. 451184Sbostic * 551184Sbostic * %sccs.include.redist.c% 651184Sbostic * 7*55807Sbostic * @(#)lfs_bio.c 7.14 (Berkeley) 08/01/92 851184Sbostic */ 951184Sbostic 1051480Sbostic #include <sys/param.h> 1151480Sbostic #include <sys/proc.h> 1251480Sbostic #include <sys/buf.h> 1352081Sbostic #include <sys/vnode.h> 1451480Sbostic #include <sys/resourcevar.h> 1552081Sbostic #include <sys/mount.h> 1651184Sbostic 1752081Sbostic #include <ufs/ufs/quota.h> 1852081Sbostic #include <ufs/ufs/inode.h> 1952081Sbostic #include <ufs/ufs/ufsmount.h> 2052081Sbostic 2151493Sbostic #include <ufs/lfs/lfs.h> 2251493Sbostic #include <ufs/lfs/lfs_extern.h> 2351480Sbostic 2452081Sbostic /* 2552081Sbostic * LFS block write function. 2652081Sbostic * 2752081Sbostic * XXX 2852081Sbostic * No write cost accounting is done. 2952081Sbostic * This is almost certainly wrong for synchronous operations and NFS. 3052081Sbostic */ 3152081Sbostic int locked_queue_count; /* XXX Count of locked-down buffers. */ 3252081Sbostic 3351480Sbostic int 3454621Smckusick lfs_bwrite(ap) 3554621Smckusick struct vop_bwrite_args /* { 3654621Smckusick struct buf *a_bp; 3754621Smckusick } */ *ap; 3851184Sbostic { 3953867Sheideman register struct buf *bp = ap->a_bp; 4052347Sbostic int s; 41*55807Sbostic 4251851Sbostic /* 4352081Sbostic * Set the delayed write flag and use reassignbuf to move the buffer 4452081Sbostic * from the clean list to the dirty one. 4551851Sbostic * 4652081Sbostic * Set the B_LOCKED flag and unlock the buffer, causing brelse to move 4752081Sbostic * the buffer onto the LOCKED free list. This is necessary, otherwise 4852081Sbostic * getnewbuf() would try to reclaim the buffers using bawrite, which 4952081Sbostic * isn't going to work. 5051851Sbostic */ 5153867Sheideman if (!(bp->b_flags & B_LOCKED)) { 5252081Sbostic ++locked_queue_count; 5353867Sheideman bp->b_flags |= B_DELWRI | B_LOCKED; 5453867Sheideman bp->b_flags &= ~(B_READ | B_DONE | B_ERROR); 5553143Sstaelin s = splbio(); 5653867Sheideman reassignbuf(bp, bp->b_vp); 5753143Sstaelin splx(s); 5853143Sstaelin } 5953867Sheideman brelse(bp); 6051480Sbostic return (0); 6151184Sbostic } 6252081Sbostic 6352081Sbostic /* 6452081Sbostic * XXX 6552081Sbostic * This routine flushes buffers out of the B_LOCKED queue when LFS has too 6652081Sbostic * many locked down. Eventually the pageout daemon will simply call LFS 6752325Sbostic * when pages need to be reclaimed. Note, we have one static count of locked 6852325Sbostic * buffers, so we can't have more than a single file system. To make this 6952325Sbostic * work for multiple file systems, put the count into the mount structure. 7052081Sbostic */ 7152081Sbostic void 7252081Sbostic lfs_flush() 7352081Sbostic { 7452081Sbostic register struct mount *mp; 7552081Sbostic struct mount *omp; 7652081Sbostic 7752325Sbostic /* 1M in a 4K file system. */ 7852325Sbostic if (locked_queue_count < 256) 7952081Sbostic return; 8052081Sbostic mp = rootfs; 8152081Sbostic do { 8252081Sbostic /* 8352081Sbostic * The lock check below is to avoid races with mount 8452081Sbostic * and unmount. 8552081Sbostic */ 8652081Sbostic if (mp->mnt_stat.f_type == MOUNT_LFS && 8752081Sbostic (mp->mnt_flag & (MNT_MLOCK|MNT_RDONLY|MNT_MPBUSY)) == 0 && 8852081Sbostic !vfs_busy(mp)) { 8952347Sbostic /* 9052347Sbostic * We set the queue to 0 here because we are about to 9152347Sbostic * write all the dirty buffers we have. If more come 9252347Sbostic * in while we're writing the segment, they may not 9352347Sbostic * get written, so we want the count to reflect these 9452347Sbostic * new writes after the segwrite completes. 9552347Sbostic */ 9652347Sbostic locked_queue_count = 0; 9752081Sbostic lfs_segwrite(mp, 0); 9852081Sbostic omp = mp; 9952081Sbostic mp = mp->mnt_next; 10052081Sbostic vfs_unbusy(omp); 10152081Sbostic } else 10252081Sbostic mp = mp->mnt_next; 10352081Sbostic } while (mp != rootfs); 10452081Sbostic } 105