xref: /csrg-svn/sys/ufs/lfs/lfs_bio.c (revision 55807)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)lfs_bio.c	7.14 (Berkeley) 08/01/92
8  */
9 
10 #include <sys/param.h>
11 #include <sys/proc.h>
12 #include <sys/buf.h>
13 #include <sys/vnode.h>
14 #include <sys/resourcevar.h>
15 #include <sys/mount.h>
16 
17 #include <ufs/ufs/quota.h>
18 #include <ufs/ufs/inode.h>
19 #include <ufs/ufs/ufsmount.h>
20 
21 #include <ufs/lfs/lfs.h>
22 #include <ufs/lfs/lfs_extern.h>
23 
24 /*
25  * LFS block write function.
26  *
27  * XXX
28  * No write cost accounting is done.
29  * This is almost certainly wrong for synchronous operations and NFS.
30  */
31 int	locked_queue_count;		/* XXX Count of locked-down buffers. */
32 
33 int
34 lfs_bwrite(ap)
35 	struct vop_bwrite_args /* {
36 		struct buf *a_bp;
37 	} */ *ap;
38 {
39 	register struct buf *bp = ap->a_bp;
40 	int s;
41 
42 	/*
43 	 * Set the delayed write flag and use reassignbuf to move the buffer
44 	 * from the clean list to the dirty one.
45 	 *
46 	 * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
47 	 * the buffer onto the LOCKED free list.  This is necessary, otherwise
48 	 * getnewbuf() would try to reclaim the buffers using bawrite, which
49 	 * isn't going to work.
50 	 */
51 	if (!(bp->b_flags & B_LOCKED)) {
52 		++locked_queue_count;
53 		bp->b_flags |= B_DELWRI | B_LOCKED;
54 		bp->b_flags &= ~(B_READ | B_DONE | B_ERROR);
55 		s = splbio();
56 		reassignbuf(bp, bp->b_vp);
57 		splx(s);
58 	}
59 	brelse(bp);
60 	return (0);
61 }
62 
63 /*
64  * XXX
65  * This routine flushes buffers out of the B_LOCKED queue when LFS has too
66  * many locked down.  Eventually the pageout daemon will simply call LFS
67  * when pages need to be reclaimed.  Note, we have one static count of locked
68  * buffers, so we can't have more than a single file system.  To make this
69  * work for multiple file systems, put the count into the mount structure.
70  */
71 void
72 lfs_flush()
73 {
74 	register struct mount *mp;
75 	struct mount *omp;
76 
77 	/* 1M in a 4K file system. */
78 	if (locked_queue_count < 256)
79 		return;
80 	mp = rootfs;
81 	do {
82 		/*
83 		 * The lock check below is to avoid races with mount
84 		 * and unmount.
85 		 */
86 		if (mp->mnt_stat.f_type == MOUNT_LFS &&
87 		    (mp->mnt_flag & (MNT_MLOCK|MNT_RDONLY|MNT_MPBUSY)) == 0 &&
88 		    !vfs_busy(mp)) {
89 			/*
90 			 * We set the queue to 0 here because we are about to
91 			 * write all the dirty buffers we have.  If more come
92 			 * in while we're writing the segment, they may not
93 			 * get written, so we want the count to reflect these
94 			 * new writes after the segwrite completes.
95 			 */
96 			locked_queue_count = 0;
97 			lfs_segwrite(mp, 0);
98 			omp = mp;
99 			mp = mp->mnt_next;
100 			vfs_unbusy(omp);
101 		} else
102 			mp = mp->mnt_next;
103 	} while (mp != rootfs);
104 }
105