xref: /csrg-svn/sys/ufs/lfs/lfs_bio.c (revision 56155)
151184Sbostic /*
251493Sbostic  * Copyright (c) 1991 Regents of the University of California.
351184Sbostic  * All rights reserved.
451184Sbostic  *
551184Sbostic  * %sccs.include.redist.c%
651184Sbostic  *
7*56155Smargo  *	@(#)lfs_bio.c	7.17 (Berkeley) 09/02/92
851184Sbostic  */
951184Sbostic 
1051480Sbostic #include <sys/param.h>
1151480Sbostic #include <sys/proc.h>
1251480Sbostic #include <sys/buf.h>
1352081Sbostic #include <sys/vnode.h>
1451480Sbostic #include <sys/resourcevar.h>
1552081Sbostic #include <sys/mount.h>
1651184Sbostic 
1752081Sbostic #include <ufs/ufs/quota.h>
1852081Sbostic #include <ufs/ufs/inode.h>
1952081Sbostic #include <ufs/ufs/ufsmount.h>
2052081Sbostic 
2151493Sbostic #include <ufs/lfs/lfs.h>
2251493Sbostic #include <ufs/lfs/lfs_extern.h>
2351480Sbostic 
2452081Sbostic /*
2552081Sbostic  * LFS block write function.
2652081Sbostic  *
2752081Sbostic  * XXX
2852081Sbostic  * No write cost accounting is done.
2952081Sbostic  * This is almost certainly wrong for synchronous operations and NFS.
3052081Sbostic  */
3156052Sbostic int	lfs_allclean_wakeup;		/* Cleaner wakeup address. */
3252081Sbostic int	locked_queue_count;		/* XXX Count of locked-down buffers. */
3355935Sbostic int	lfs_writing;			/* Set if already kicked off a writer
3455935Sbostic 					   because of buffer space */
3555935Sbostic #define WRITE_THRESHHOLD	((nbuf >> 2) - 10)
3655935Sbostic #define WAIT_THRESHHOLD		((nbuf >> 1) - 10)
3752081Sbostic 
3851480Sbostic int
3954621Smckusick lfs_bwrite(ap)
4054621Smckusick 	struct vop_bwrite_args /* {
4154621Smckusick 		struct buf *a_bp;
4254621Smckusick 	} */ *ap;
4351184Sbostic {
4453867Sheideman 	register struct buf *bp = ap->a_bp;
4555935Sbostic 	struct lfs *fs;
4655935Sbostic 	struct inode *ip;
4752347Sbostic 	int s;
4855807Sbostic 
4951851Sbostic 	/*
5052081Sbostic 	 * Set the delayed write flag and use reassignbuf to move the buffer
5152081Sbostic 	 * from the clean list to the dirty one.
5251851Sbostic 	 *
5352081Sbostic 	 * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
5452081Sbostic 	 * the buffer onto the LOCKED free list.  This is necessary, otherwise
5552081Sbostic 	 * getnewbuf() would try to reclaim the buffers using bawrite, which
5652081Sbostic 	 * isn't going to work.
57*56155Smargo 	 *
58*56155Smargo 	 * XXX we don't let meta-data writes run out of space because they can
59*56155Smargo 	 * come from the segment writer.  We need to make sure that there is
60*56155Smargo 	 * enough space reserved so that there's room to write meta-data
61*56155Smargo 	 * blocks.
6251851Sbostic 	 */
6353867Sheideman 	if (!(bp->b_flags & B_LOCKED)) {
6455935Sbostic 		fs = VFSTOUFS(bp->b_vp->v_mount)->um_lfs;
65*56155Smargo 		if (!LFS_FITS(fs, fsbtodb(fs, 1)) && !IS_IFILE(bp) &&
66*56155Smargo 		    bp->b_lblkno > 0) {
6755935Sbostic 			brelse(bp);
6856052Sbostic 			wakeup(&lfs_allclean_wakeup);
6955935Sbostic 			return (ENOSPC);
7055935Sbostic 		}
7155935Sbostic 		ip = VTOI((bp)->b_vp);
7255935Sbostic 		if (!(ip->i_flag & IMOD))
7355935Sbostic 			++fs->lfs_uinodes;
7455935Sbostic 		ip->i_flag |= IMOD | ICHG | IUPD;			\
7555935Sbostic 		fs->lfs_avail -= fsbtodb(fs, 1);
7652081Sbostic 		++locked_queue_count;
7753867Sheideman 		bp->b_flags |= B_DELWRI | B_LOCKED;
7855935Sbostic 		bp->b_flags &= ~(B_READ | B_ERROR);
7953143Sstaelin 		s = splbio();
8053867Sheideman 		reassignbuf(bp, bp->b_vp);
8153143Sstaelin 		splx(s);
8253143Sstaelin 	}
8353867Sheideman 	brelse(bp);
8451480Sbostic 	return (0);
8551184Sbostic }
8652081Sbostic 
8752081Sbostic /*
8852081Sbostic  * XXX
8952081Sbostic  * This routine flushes buffers out of the B_LOCKED queue when LFS has too
9052081Sbostic  * many locked down.  Eventually the pageout daemon will simply call LFS
9152325Sbostic  * when pages need to be reclaimed.  Note, we have one static count of locked
9252325Sbostic  * buffers, so we can't have more than a single file system.  To make this
9352325Sbostic  * work for multiple file systems, put the count into the mount structure.
9452081Sbostic  */
9552081Sbostic void
9652081Sbostic lfs_flush()
9752081Sbostic {
9852081Sbostic 	register struct mount *mp;
9952081Sbostic 
10055935Sbostic 	if (lfs_writing)
10152081Sbostic 		return;
10255935Sbostic 	lfs_writing = 1;
10352081Sbostic 	mp = rootfs;
10452081Sbostic 	do {
10555935Sbostic 		/* The lock check below is to avoid races with unmount. */
10652081Sbostic 		if (mp->mnt_stat.f_type == MOUNT_LFS &&
10755935Sbostic 		    (mp->mnt_flag & (MNT_MLOCK|MNT_RDONLY|MNT_UNMOUNT)) == 0 &&
10855935Sbostic 		    !((((struct ufsmount *)mp->mnt_data))->ufsmount_u.lfs)->lfs_dirops ) {
10952347Sbostic 			/*
11052347Sbostic 			 * We set the queue to 0 here because we are about to
11152347Sbostic 			 * write all the dirty buffers we have.  If more come
11252347Sbostic 			 * in while we're writing the segment, they may not
11352347Sbostic 			 * get written, so we want the count to reflect these
11452347Sbostic 			 * new writes after the segwrite completes.
11552347Sbostic 			 */
11652081Sbostic 			lfs_segwrite(mp, 0);
11755935Sbostic 		}
11855935Sbostic 		mp = mp->mnt_next;
11952081Sbostic 	} while (mp != rootfs);
12055935Sbostic 	lfs_writing = 0;
12152081Sbostic }
12255935Sbostic 
12355935Sbostic int
12455935Sbostic lfs_check(vp, blkno)
12555935Sbostic 	struct vnode *vp;
12655935Sbostic 	daddr_t blkno;
12755935Sbostic {
12855935Sbostic 	extern int lfs_allclean_wakeup;
12955935Sbostic 	int error;
13055935Sbostic 
13155935Sbostic 	if (incore(vp, blkno))
13255935Sbostic 		return (0);
13355935Sbostic 	if (locked_queue_count > WRITE_THRESHHOLD)
13455935Sbostic 		lfs_flush();
13555935Sbostic 	if (locked_queue_count > WAIT_THRESHHOLD)
13655935Sbostic 		error = tsleep(&lfs_allclean_wakeup, PCATCH | PUSER,
13755935Sbostic 		    "buffers", NULL);
13855935Sbostic 	return (error);
13955935Sbostic }
140