151184Sbostic /* 251493Sbostic * Copyright (c) 1991 Regents of the University of California. 351184Sbostic * All rights reserved. 451184Sbostic * 551184Sbostic * %sccs.include.redist.c% 651184Sbostic * 7*52347Sbostic * @(#)lfs_bio.c 7.6 (Berkeley) 02/04/92 851184Sbostic */ 951184Sbostic 1051480Sbostic #include <sys/param.h> 1151480Sbostic #include <sys/proc.h> 1251480Sbostic #include <sys/buf.h> 1352081Sbostic #include <sys/vnode.h> 1451480Sbostic #include <sys/resourcevar.h> 1552081Sbostic #include <sys/mount.h> 1651184Sbostic 1752081Sbostic #include <ufs/ufs/quota.h> 1852081Sbostic #include <ufs/ufs/inode.h> 1952081Sbostic #include <ufs/ufs/ufsmount.h> 2052081Sbostic 2151493Sbostic #include <ufs/lfs/lfs.h> 2251493Sbostic #include <ufs/lfs/lfs_extern.h> 2351480Sbostic 2452081Sbostic /* 2552081Sbostic * LFS block write function. 2652081Sbostic * 2752081Sbostic * XXX 2852081Sbostic * No write cost accounting is done. 2952081Sbostic * This is almost certainly wrong for synchronous operations and NFS. 3052081Sbostic */ 3152081Sbostic int locked_queue_count; /* XXX Count of locked-down buffers. */ 3252081Sbostic 3351480Sbostic int 3451184Sbostic lfs_bwrite(bp) 3551184Sbostic register BUF *bp; 3651184Sbostic { 37*52347Sbostic int s; 3851851Sbostic #ifdef VERBOSE 3951851Sbostic printf("lfs_bwrite\n"); 4051851Sbostic #endif 4151851Sbostic /* 4252081Sbostic * Set the delayed write flag and use reassignbuf to move the buffer 4352081Sbostic * from the clean list to the dirty one. 4451851Sbostic * 4552081Sbostic * Set the B_LOCKED flag and unlock the buffer, causing brelse to move 4652081Sbostic * the buffer onto the LOCKED free list. This is necessary, otherwise 4752081Sbostic * getnewbuf() would try to reclaim the buffers using bawrite, which 4852081Sbostic * isn't going to work. 4951851Sbostic */ 5052081Sbostic if (!(bp->b_flags & B_LOCKED)) 5152081Sbostic ++locked_queue_count; 5251913Sbostic bp->b_flags |= B_DELWRI | B_LOCKED; 5351215Sbostic bp->b_flags &= ~(B_READ | B_DONE | B_ERROR); 54*52347Sbostic s = splbio(); 5551851Sbostic reassignbuf(bp, bp->b_vp); 56*52347Sbostic splx(s); 5751184Sbostic brelse(bp); 5851480Sbostic return (0); 5951184Sbostic } 6052081Sbostic 6152081Sbostic /* 6252081Sbostic * XXX 6352081Sbostic * This routine flushes buffers out of the B_LOCKED queue when LFS has too 6452081Sbostic * many locked down. Eventually the pageout daemon will simply call LFS 6552325Sbostic * when pages need to be reclaimed. Note, we have one static count of locked 6652325Sbostic * buffers, so we can't have more than a single file system. To make this 6752325Sbostic * work for multiple file systems, put the count into the mount structure. 6852081Sbostic */ 6952081Sbostic void 7052081Sbostic lfs_flush() 7152081Sbostic { 7252081Sbostic register struct mount *mp; 7352081Sbostic struct mount *omp; 7452081Sbostic 7552325Sbostic /* 1M in a 4K file system. */ 7652325Sbostic if (locked_queue_count < 256) 7752081Sbostic return; 7852081Sbostic mp = rootfs; 7952081Sbostic do { 8052081Sbostic /* 8152081Sbostic * The lock check below is to avoid races with mount 8252081Sbostic * and unmount. 8352081Sbostic */ 8452081Sbostic if (mp->mnt_stat.f_type == MOUNT_LFS && 8552081Sbostic (mp->mnt_flag & (MNT_MLOCK|MNT_RDONLY|MNT_MPBUSY)) == 0 && 8652081Sbostic !vfs_busy(mp)) { 87*52347Sbostic /* 88*52347Sbostic * We set the queue to 0 here because we are about to 89*52347Sbostic * write all the dirty buffers we have. If more come 90*52347Sbostic * in while we're writing the segment, they may not 91*52347Sbostic * get written, so we want the count to reflect these 92*52347Sbostic * new writes after the segwrite completes. 93*52347Sbostic */ 94*52347Sbostic locked_queue_count = 0; 9552081Sbostic lfs_segwrite(mp, 0); 9652081Sbostic omp = mp; 9752081Sbostic mp = mp->mnt_next; 9852081Sbostic vfs_unbusy(omp); 9952081Sbostic } else 10052081Sbostic mp = mp->mnt_next; 10152081Sbostic } while (mp != rootfs); 10252081Sbostic } 103