xref: /csrg-svn/sys/kern/vfs_cluster.c (revision 64717)
149589Sbostic /*-
263180Sbostic  * Copyright (c) 1993
363180Sbostic  *	The Regents of the University of California.  All rights reserved.
423395Smckusick  *
559878Smckusick  * %sccs.include.redist.c%
637736Smckusick  *
7*64717Smckusick  *	@(#)vfs_cluster.c	8.3 (Berkeley) 10/14/93
823395Smckusick  */
98Sbill 
1051455Sbostic #include <sys/param.h>
1151455Sbostic #include <sys/proc.h>
1251455Sbostic #include <sys/buf.h>
1351455Sbostic #include <sys/vnode.h>
1451455Sbostic #include <sys/mount.h>
1551455Sbostic #include <sys/trace.h>
1659878Smckusick #include <sys/malloc.h>
1751455Sbostic #include <sys/resourcevar.h>
1856395Smckusick #include <libkern/libkern.h>
198Sbill 
2091Sbill /*
2157045Smargo  * Local declarations
2257045Smargo  */
2357045Smargo struct buf *cluster_newbuf __P((struct vnode *, struct buf *, long, daddr_t,
2457045Smargo 	    daddr_t, long, int));
2557045Smargo struct buf *cluster_rbuild __P((struct vnode *, u_quad_t, struct buf *,
2657045Smargo 	    daddr_t, daddr_t, long, int, long));
27*64717Smckusick void	    cluster_wbuild __P((struct vnode *, struct buf *, long,
28*64717Smckusick 	    daddr_t, int, daddr_t));
2957045Smargo 
3056395Smckusick /*
3157045Smargo  * We could optimize this by keeping track of where the last read-ahead
3257045Smargo  * was, but it would involve adding fields to the vnode.  For now, let's
3357045Smargo  * just get it working.
3457045Smargo  *
3557045Smargo  * This replaces bread.  If this is a bread at the beginning of a file and
3657045Smargo  * lastr is 0, we assume this is the first read and we'll read up to two
3757045Smargo  * blocks if they are sequential.  After that, we'll do regular read ahead
3857045Smargo  * in clustered chunks.
3957045Smargo  *
4057045Smargo  * There are 4 or 5 cases depending on how you count:
4157045Smargo  *	Desired block is in the cache:
4257045Smargo  *	    1 Not sequential access (0 I/Os).
4357045Smargo  *	    2 Access is sequential, do read-ahead (1 ASYNC).
4457045Smargo  *	Desired block is not in cache:
4557045Smargo  *	    3 Not sequential access (1 SYNC).
4657045Smargo  *	    4 Sequential access, next block is contiguous (1 SYNC).
4757045Smargo  *	    5 Sequential access, next block is not contiguous (1 SYNC, 1 ASYNC)
4857045Smargo  *
4957045Smargo  * There are potentially two buffers that require I/O.
5057045Smargo  * 	bp is the block requested.
5157045Smargo  *	rbp is the read-ahead block.
5257045Smargo  *	If either is NULL, then you don't have to do the I/O.
5357045Smargo  */
5457045Smargo cluster_read(vp, filesize, lblkno, size, cred, bpp)
5557045Smargo 	struct vnode *vp;
5657045Smargo 	u_quad_t filesize;
5757045Smargo 	daddr_t lblkno;
5857045Smargo 	long size;
5957045Smargo 	struct ucred *cred;
6057045Smargo 	struct buf **bpp;
6157045Smargo {
6257045Smargo 	struct buf *bp, *rbp;
6357045Smargo 	daddr_t blkno, ioblkno;
6457045Smargo 	long flags;
6557045Smargo 	int error, num_ra, alreadyincore;
6657045Smargo 
6757045Smargo #ifdef DIAGNOSTIC
6857045Smargo 	if (size == 0)
6957045Smargo 		panic("cluster_read: size = 0");
7057045Smargo #endif
7157045Smargo 
7257045Smargo 	error = 0;
7357045Smargo 	flags = B_READ;
7457797Smckusick 	*bpp = bp = getblk(vp, lblkno, size, 0, 0);
7557045Smargo 	if (bp->b_flags & (B_CACHE | B_DONE | B_DELWRI)) {
7657045Smargo 		/*
7757045Smargo 		 * Desired block is in cache; do any readahead ASYNC.
7857045Smargo 		 * Case 1, 2.
7957045Smargo 		 */
8057045Smargo 		trace(TR_BREADHIT, pack(vp, size), lblkno);
8157045Smargo 		flags |= B_ASYNC;
8257045Smargo 		ioblkno = lblkno +
8357045Smargo 		    (lblkno < vp->v_ralen ? vp->v_ralen >> 1 : vp->v_ralen);
8457797Smckusick 		alreadyincore = (int)incore(vp, ioblkno);
8557045Smargo 		bp = NULL;
8657045Smargo 	} else {
8757045Smargo 		/* Block wasn't in cache, case 3, 4, 5. */
8857045Smargo 		trace(TR_BREADMISS, pack(vp, size), lblkno);
8957045Smargo 		ioblkno = lblkno;
9057045Smargo 		bp->b_flags |= flags;
9157045Smargo 		alreadyincore = 0;
9257045Smargo 		curproc->p_stats->p_ru.ru_inblock++;		/* XXX */
9357045Smargo 	}
9457045Smargo 	/*
9557045Smargo 	 * XXX
9657045Smargo 	 * Replace 1 with a window size based on some permutation of
9757045Smargo 	 * maxcontig and rot_delay.  This will let you figure out how
9857045Smargo 	 * many blocks you should read-ahead (case 2, 4, 5).
9957045Smargo 	 *
10057045Smargo 	 * If the access isn't sequential, cut the window size in half.
10157045Smargo 	 */
10257045Smargo 	rbp = NULL;
10357045Smargo 	if (lblkno != vp->v_lastr + 1 && lblkno != 0)
10457045Smargo 		vp->v_ralen = max(vp->v_ralen >> 1, 1);
10557045Smargo 	else if ((ioblkno + 1) * size < filesize && !alreadyincore &&
106*64717Smckusick 	    !(error = VOP_BMAP(vp, ioblkno, NULL, &blkno, &num_ra)) &&
107*64717Smckusick 	    blkno != -1) {
10857045Smargo 		/*
10957045Smargo 		 * Reading sequentially, and the next block is not in the
11057045Smargo 		 * cache.  We are going to try reading ahead. If this is
11157045Smargo 		 * the first read of a file, then limit read-ahead to a
11257045Smargo 		 * single block, else read as much as we're allowed.
11357045Smargo 		 */
11457045Smargo 		if (num_ra > vp->v_ralen) {
11557045Smargo 			num_ra = vp->v_ralen;
11657045Smargo 			vp->v_ralen = min(MAXPHYS / size, vp->v_ralen << 1);
11757045Smargo 		} else
11857045Smargo 			vp->v_ralen = num_ra + 1;
11957045Smargo 
12057045Smargo 
12157045Smargo 		if (num_ra)				/* case 2, 4 */
12257045Smargo 			rbp = cluster_rbuild(vp, filesize,
12357045Smargo 			    bp, ioblkno, blkno, size, num_ra, flags);
12457045Smargo 		else if (lblkno != 0 && ioblkno == lblkno) {
12557045Smargo 			/* Case 5: check how many blocks to read ahead */
12657045Smargo 			++ioblkno;
12757045Smargo 			if ((ioblkno + 1) * size > filesize ||
128*64717Smckusick 			    (error = VOP_BMAP(vp, ioblkno, NULL, &blkno,
129*64717Smckusick 			    &num_ra)) || blkno == -1)
13057045Smargo 				goto skip_readahead;
13157045Smargo 			flags |= B_ASYNC;
13257045Smargo 			if (num_ra)
13357045Smargo 				rbp = cluster_rbuild(vp, filesize,
13457045Smargo 				    NULL, ioblkno, blkno, size, num_ra, flags);
13557045Smargo 			else {
13657797Smckusick 				rbp = getblk(vp, ioblkno, size, 0, 0);
13757045Smargo 				rbp->b_flags |= flags;
13857045Smargo 				rbp->b_blkno = blkno;
13957045Smargo 			}
14057045Smargo 		} else if (lblkno != 0) {
14157045Smargo 			/* case 2; read ahead single block */
14257797Smckusick 			rbp = getblk(vp, ioblkno, size, 0, 0);
14357045Smargo 			rbp->b_flags |= flags;
14457045Smargo 			rbp->b_blkno = blkno;
14557045Smargo 		} else if (bp)				/* case 1, 3, block 0 */
14657045Smargo 			bp->b_blkno = blkno;
14757045Smargo 		/* Case 1 on block 0; not really doing sequential I/O */
14857045Smargo 
14957045Smargo 		if (rbp == bp)		/* case 4 */
15057045Smargo 			rbp = NULL;
15157045Smargo 		else if (rbp) {			/* case 2, 5 */
15257045Smargo 			trace(TR_BREADMISSRA,
15357045Smargo 			    pack(vp, (num_ra + 1) * size), ioblkno);
15457045Smargo 			curproc->p_stats->p_ru.ru_inblock++;	/* XXX */
15557045Smargo 		}
15657045Smargo 	}
15757045Smargo 
15857045Smargo 	/* XXX Kirk, do we need to make sure the bp has creds? */
15957045Smargo skip_readahead:
16057045Smargo 	if (bp)
16157045Smargo 		if (bp->b_flags & (B_DONE | B_DELWRI))
16257045Smargo 			panic("cluster_read: DONE bp");
16357045Smargo 		else
16457045Smargo 			error = VOP_STRATEGY(bp);
16557045Smargo 
16657045Smargo 	if (rbp)
16757045Smargo 		if (error || rbp->b_flags & (B_DONE | B_DELWRI)) {
16857045Smargo 			rbp->b_flags &= ~(B_ASYNC | B_READ);
16957045Smargo 			brelse(rbp);
17057045Smargo 		} else
17157045Smargo 			(void) VOP_STRATEGY(rbp);
17257045Smargo 
17357045Smargo 	if (bp)
17457045Smargo 		return(biowait(bp));
17557045Smargo 	return(error);
17657045Smargo }
17757045Smargo 
17857045Smargo /*
17957045Smargo  * If blocks are contiguous on disk, use this to provide clustered
18057045Smargo  * read ahead.  We will read as many blocks as possible sequentially
18157045Smargo  * and then parcel them up into logical blocks in the buffer hash table.
18257045Smargo  */
18357045Smargo struct buf *
18457045Smargo cluster_rbuild(vp, filesize, bp, lbn, blkno, size, run, flags)
18557045Smargo 	struct vnode *vp;
18657045Smargo 	u_quad_t filesize;
18757045Smargo 	struct buf *bp;
18857045Smargo 	daddr_t lbn;
18957045Smargo 	daddr_t blkno;
19057045Smargo 	long size;
19157045Smargo 	int run;
19257045Smargo 	long flags;
19357045Smargo {
19457045Smargo 	struct cluster_save *b_save;
19557045Smargo 	struct buf *tbp;
19657045Smargo 	daddr_t bn;
19757045Smargo 	int i, inc;
19857045Smargo 
19959872Smargo #ifdef DIAGNOSTIC
20059872Smargo 	if (size != vp->v_mount->mnt_stat.f_iosize)
20159872Smargo 		panic("cluster_rbuild: size %d != filesize %d\n",
20259872Smargo 			size, vp->v_mount->mnt_stat.f_iosize);
20359872Smargo #endif
20457045Smargo 	if (size * (lbn + run + 1) > filesize)
20557045Smargo 		--run;
20657045Smargo 	if (run == 0) {
20757045Smargo 		if (!bp) {
20857797Smckusick 			bp = getblk(vp, lbn, size, 0, 0);
20957045Smargo 			bp->b_blkno = blkno;
21057045Smargo 			bp->b_flags |= flags;
21157045Smargo 		}
21257045Smargo 		return(bp);
21357045Smargo 	}
21457045Smargo 
21557045Smargo 	bp = cluster_newbuf(vp, bp, flags, blkno, lbn, size, run + 1);
21657045Smargo 	if (bp->b_flags & (B_DONE | B_DELWRI))
21757045Smargo 		return (bp);
21857045Smargo 
21957045Smargo 	b_save = malloc(sizeof(struct buf *) * run + sizeof(struct cluster_save),
22057045Smargo 	    M_SEGMENT, M_WAITOK);
22157045Smargo 	b_save->bs_bufsize = b_save->bs_bcount = size;
22257045Smargo 	b_save->bs_nchildren = 0;
22357045Smargo 	b_save->bs_children = (struct buf **)(b_save + 1);
22457045Smargo 	b_save->bs_saveaddr = bp->b_saveaddr;
22557045Smargo 	bp->b_saveaddr = (caddr_t) b_save;
22657045Smargo 
22757045Smargo 	inc = size / DEV_BSIZE;
22857045Smargo 	for (bn = blkno + inc, i = 1; i <= run; ++i, bn += inc) {
22957045Smargo 		if (incore(vp, lbn + i)) {
23057045Smargo 			if (i == 1) {
23157045Smargo 				bp->b_saveaddr = b_save->bs_saveaddr;
23257045Smargo 				bp->b_flags &= ~B_CALL;
23357045Smargo 				bp->b_iodone = NULL;
23457045Smargo 				allocbuf(bp, size);
23557045Smargo 				free(b_save, M_SEGMENT);
23657045Smargo 			} else
23757045Smargo 				allocbuf(bp, size * i);
23857045Smargo 			break;
23957045Smargo 		}
24057797Smckusick 		tbp = getblk(vp, lbn + i, 0, 0, 0);
24157045Smargo 		tbp->b_bcount = tbp->b_bufsize = size;
24257045Smargo 		tbp->b_blkno = bn;
24357045Smargo 		tbp->b_flags |= flags | B_READ | B_ASYNC;
24457045Smargo 		++b_save->bs_nchildren;
24557045Smargo 		b_save->bs_children[i - 1] = tbp;
24657045Smargo 	}
24757045Smargo 	if (!(bp->b_flags & B_ASYNC))
24857045Smargo 		vp->v_ralen = max(vp->v_ralen - 1, 1);
24957045Smargo 	return(bp);
25057045Smargo }
25157045Smargo 
25257045Smargo /*
25357045Smargo  * Either get a new buffer or grow the existing one.
25457045Smargo  */
25557045Smargo struct buf *
25657045Smargo cluster_newbuf(vp, bp, flags, blkno, lblkno, size, run)
25757045Smargo 	struct vnode *vp;
25857045Smargo 	struct buf *bp;
25957045Smargo 	long flags;
26057045Smargo 	daddr_t blkno;
26157045Smargo 	daddr_t lblkno;
26257045Smargo 	long size;
26357045Smargo 	int run;
26457045Smargo {
26557045Smargo 	if (!bp) {
26657797Smckusick 		bp = getblk(vp, lblkno, size, 0, 0);
26757045Smargo 		if (bp->b_flags & (B_DONE | B_DELWRI)) {
26857045Smargo 			bp->b_blkno = blkno;
26957045Smargo 			return(bp);
27057045Smargo 		}
27157045Smargo 	}
27257045Smargo 	allocbuf(bp, run * size);
27357045Smargo 	bp->b_blkno = blkno;
27457045Smargo 	bp->b_iodone = cluster_callback;
27557045Smargo 	bp->b_flags |= flags | B_CALL;
27657045Smargo 	return(bp);
27757045Smargo }
27857045Smargo 
27957045Smargo /*
28057045Smargo  * Cleanup after a clustered read or write.
28157045Smargo  */
28257045Smargo void
28357045Smargo cluster_callback(bp)
28457045Smargo 	struct buf *bp;
28557045Smargo {
28657045Smargo 	struct cluster_save *b_save;
28757045Smargo 	struct buf **tbp;
28857045Smargo 	long bsize;
28957045Smargo 	caddr_t cp;
290*64717Smckusick 
29157045Smargo 	b_save = (struct cluster_save *)(bp->b_saveaddr);
29257045Smargo 	bp->b_saveaddr = b_save->bs_saveaddr;
29357045Smargo 
29464528Sbostic 	cp = (char *)bp->b_data + b_save->bs_bufsize;
29557045Smargo 	for (tbp = b_save->bs_children; b_save->bs_nchildren--; ++tbp) {
29664528Sbostic 		pagemove(cp, (*tbp)->b_data, (*tbp)->b_bufsize);
29757045Smargo 		cp += (*tbp)->b_bufsize;
29857045Smargo 		bp->b_bufsize -= (*tbp)->b_bufsize;
29957045Smargo 		biodone(*tbp);
30057045Smargo 	}
30157045Smargo #ifdef DIAGNOSTIC
30257045Smargo 	if (bp->b_bufsize != b_save->bs_bufsize)
30357045Smargo 		panic ("cluster_callback: more space to reclaim");
30457045Smargo #endif
30557045Smargo 	bp->b_bcount = bp->b_bufsize;
30657045Smargo 	bp->b_iodone = NULL;
30757045Smargo 	free(b_save, M_SEGMENT);
30857045Smargo 	if (bp->b_flags & B_ASYNC)
30957045Smargo 		brelse(bp);
31057045Smargo 	else
31157045Smargo 		wakeup((caddr_t)bp);
31257045Smargo }
31357045Smargo 
31457045Smargo /*
31557045Smargo  * Do clustered write for FFS.
31657045Smargo  *
31757045Smargo  * Three cases:
31857045Smargo  *	1. Write is not sequential (write asynchronously)
31957045Smargo  *	Write is sequential:
32057045Smargo  *	2.	beginning of cluster - begin cluster
32157045Smargo  *	3.	middle of a cluster - add to cluster
32257045Smargo  *	4.	end of a cluster - asynchronously write cluster
32357045Smargo  */
32457045Smargo void
32557045Smargo cluster_write(bp, filesize)
32657045Smargo         struct buf *bp;
32757045Smargo 	u_quad_t filesize;
32857045Smargo {
32957045Smargo         struct vnode *vp;
33057045Smargo         daddr_t lbn;
33159872Smargo         int clen;
33257045Smargo 
33357045Smargo         vp = bp->b_vp;
33457045Smargo         lbn = bp->b_lblkno;
33557045Smargo 
33659872Smargo 	/* Initialize vnode to beginning of file. */
33759872Smargo 	if (lbn == 0)
33859872Smargo 		vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
33959872Smargo 
34059872Smargo         if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
34159872Smargo 	    (bp->b_blkno != vp->v_lasta + bp->b_bcount / DEV_BSIZE)) {
34257045Smargo 		if (vp->v_clen != 0)
34357045Smargo 			/*
34457045Smargo 			 * Write is not sequential.
34557045Smargo 			 */
34657045Smargo 			cluster_wbuild(vp, NULL, bp->b_bcount, vp->v_cstart,
34757045Smargo 			    vp->v_lastw - vp->v_cstart + 1, lbn);
34857045Smargo 		/*
34957045Smargo 		 * Consider beginning a cluster.
35057045Smargo 		 */
35159872Smargo 		if ((lbn + 1) * bp->b_bcount == filesize)
35259872Smargo 			/* End of file, make cluster as large as possible */
35359872Smargo 			clen = MAXBSIZE / vp->v_mount->mnt_stat.f_iosize - 1;
354*64717Smckusick 		else if (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &clen) ||
355*64717Smckusick 			    bp->b_blkno == -1) {
35657045Smargo 			bawrite(bp);
35759872Smargo 			vp->v_clen = 0;
35859872Smargo 			vp->v_lasta = bp->b_blkno;
35957045Smargo 			vp->v_cstart = lbn + 1;
36057045Smargo 			vp->v_lastw = lbn;
36157045Smargo 			return;
362*64717Smckusick 		}
36357045Smargo                 vp->v_clen = clen;
36457045Smargo                 if (clen == 0) {		/* I/O not contiguous */
36557045Smargo 			vp->v_cstart = lbn + 1;
36657045Smargo                         bawrite(bp);
36757045Smargo                 } else {			/* Wait for rest of cluster */
36857045Smargo 			vp->v_cstart = lbn;
36957045Smargo                         bdwrite(bp);
37057045Smargo 		}
37157045Smargo         } else if (lbn == vp->v_cstart + vp->v_clen) {
37257045Smargo 		/*
37357045Smargo 		 * At end of cluster, write it out.
37457045Smargo 		 */
37557045Smargo 		cluster_wbuild(vp, bp, bp->b_bcount, vp->v_cstart,
37657045Smargo 		    vp->v_clen + 1, lbn);
37757045Smargo 		vp->v_clen = 0;
37857045Smargo 		vp->v_cstart = lbn + 1;
37957045Smargo         } else
38057045Smargo 		/*
38157045Smargo 		 * In the middle of a cluster, so just delay the
38257045Smargo 		 * I/O for now.
38357045Smargo 		 */
38457045Smargo                 bdwrite(bp);
38557045Smargo         vp->v_lastw = lbn;
38659872Smargo 	vp->v_lasta = bp->b_blkno;
38757045Smargo }
38857045Smargo 
38957045Smargo 
39057045Smargo /*
39157045Smargo  * This is an awful lot like cluster_rbuild...wish they could be combined.
39257045Smargo  * The last lbn argument is the current block on which I/O is being
39357045Smargo  * performed.  Check to see that it doesn't fall in the middle of
39457045Smargo  * the current block.
39557045Smargo  */
39657045Smargo void
39757045Smargo cluster_wbuild(vp, last_bp, size, start_lbn, len, lbn)
39857045Smargo 	struct vnode *vp;
39957045Smargo 	struct buf *last_bp;
40057045Smargo 	long size;
40157045Smargo 	daddr_t start_lbn;
40257045Smargo 	int len;
40357045Smargo 	daddr_t	lbn;
40457045Smargo {
40557045Smargo 	struct cluster_save *b_save;
40657045Smargo 	struct buf *bp, *tbp;
40757045Smargo 	caddr_t	cp;
40857045Smargo 	int i, s;
40957045Smargo 
41059872Smargo #ifdef DIAGNOSTIC
41159872Smargo 	if (size != vp->v_mount->mnt_stat.f_iosize)
41259872Smargo 		panic("cluster_wbuild: size %d != filesize %d\n",
41359872Smargo 			size, vp->v_mount->mnt_stat.f_iosize);
41459872Smargo #endif
41557045Smargo redo:
41657045Smargo 	while ((!incore(vp, start_lbn) || start_lbn == lbn) && len) {
41757045Smargo 		++start_lbn;
41857045Smargo 		--len;
41957045Smargo 	}
42057045Smargo 
42157045Smargo 	/* Get more memory for current buffer */
42257045Smargo 	if (len <= 1) {
42359872Smargo 		if (last_bp) {
42457045Smargo 			bawrite(last_bp);
42559872Smargo 		} else if (len) {
42659872Smargo 			bp = getblk(vp, start_lbn, size, 0, 0);
42759872Smargo 			bawrite(bp);
42859872Smargo 		}
42957045Smargo 		return;
43057045Smargo 	}
43157045Smargo 
43257797Smckusick 	bp = getblk(vp, start_lbn, size, 0, 0);
43357045Smargo 	if (!(bp->b_flags & B_DELWRI)) {
43457045Smargo 		++start_lbn;
43557045Smargo 		--len;
43657045Smargo 		brelse(bp);
43757045Smargo 		goto redo;
43857045Smargo 	}
43957045Smargo 
44057045Smargo 	--len;
44157045Smargo 	b_save = malloc(sizeof(struct buf *) * len + sizeof(struct cluster_save),
44257045Smargo 	    M_SEGMENT, M_WAITOK);
44357045Smargo 	b_save->bs_bcount = bp->b_bcount;
44457045Smargo 	b_save->bs_bufsize = bp->b_bufsize;
44557045Smargo 	b_save->bs_nchildren = 0;
44657045Smargo 	b_save->bs_children = (struct buf **)(b_save + 1);
44757045Smargo 	b_save->bs_saveaddr = bp->b_saveaddr;
44857045Smargo 	bp->b_saveaddr = (caddr_t) b_save;
44957045Smargo 
45057045Smargo 
45157045Smargo 	bp->b_flags |= B_CALL;
45257045Smargo 	bp->b_iodone = cluster_callback;
45364528Sbostic 	cp = (char *)bp->b_data + bp->b_bufsize;
45457045Smargo 	for (++start_lbn, i = 0; i < len; ++i, ++start_lbn) {
45557045Smargo 		if (!incore(vp, start_lbn) || start_lbn == lbn)
45657045Smargo 			break;
45757045Smargo 
45857045Smargo 		if (last_bp == NULL || start_lbn != last_bp->b_lblkno) {
45957797Smckusick 			tbp = getblk(vp, start_lbn, size, 0, 0);
46057045Smargo #ifdef DIAGNOSTIC
46157045Smargo 			if (tbp->b_bcount != tbp->b_bufsize)
46257045Smargo 				panic("cluster_wbuild: Buffer too big");
46357045Smargo #endif
46457045Smargo 			if (!(tbp->b_flags & B_DELWRI)) {
46557045Smargo 				brelse(tbp);
46657045Smargo 				break;
46757045Smargo 			}
46857045Smargo 		} else
46957045Smargo 			tbp = last_bp;
47057045Smargo 
47157045Smargo 		++b_save->bs_nchildren;
47257045Smargo 
47357045Smargo 		/* Move memory from children to parent */
47459872Smargo 		if (tbp->b_blkno != (bp->b_blkno + bp->b_bufsize / DEV_BSIZE)) {
47559872Smargo 			printf("Clustered Block: %d addr %x bufsize: %d\n",
47659872Smargo 			    bp->b_lblkno, bp->b_blkno, bp->b_bufsize);
47759872Smargo 			printf("Child Block: %d addr: %x\n", tbp->b_lblkno,
47859872Smargo 			    tbp->b_blkno);
47959872Smargo 			panic("Clustered write to wrong blocks");
48059872Smargo 		}
48159872Smargo 
48264528Sbostic 		pagemove(tbp->b_data, cp, size);
48357045Smargo 		bp->b_bcount += size;
48457045Smargo 		bp->b_bufsize += size;
48557045Smargo 
48657045Smargo 		tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
48757045Smargo 		tbp->b_flags |= B_ASYNC;
48857045Smargo 		s = splbio();
48957045Smargo 		reassignbuf(tbp, tbp->b_vp);		/* put on clean list */
49057045Smargo 		++tbp->b_vp->v_numoutput;
49157045Smargo 		splx(s);
49257045Smargo 		b_save->bs_children[i] = tbp;
49357045Smargo 
49457045Smargo 		cp += tbp->b_bufsize;
49557045Smargo 	}
49657045Smargo 
49757045Smargo 	if (i == 0) {
49857045Smargo 		/* None to cluster */
49957045Smargo 		bp->b_saveaddr = b_save->bs_saveaddr;
50057045Smargo 		bp->b_flags &= ~B_CALL;
50157045Smargo 		bp->b_iodone = NULL;
50257045Smargo 		free(b_save, M_SEGMENT);
50357045Smargo 	}
50457045Smargo 	bawrite(bp);
50557045Smargo 	if (i < len) {
50657045Smargo 		len -= i + 1;
50757045Smargo 		start_lbn += 1;
50857045Smargo 		goto redo;
50957045Smargo 	}
51057045Smargo }
511