xref: /csrg-svn/sys/kern/vfs_cluster.c (revision 63180)
149589Sbostic /*-
2*63180Sbostic  * Copyright (c) 1993
3*63180Sbostic  *	The Regents of the University of California.  All rights reserved.
423395Smckusick  *
559878Smckusick  * %sccs.include.redist.c%
637736Smckusick  *
7*63180Sbostic  *	@(#)vfs_cluster.c	8.1 (Berkeley) 06/10/93
823395Smckusick  */
98Sbill 
1051455Sbostic #include <sys/param.h>
1151455Sbostic #include <sys/proc.h>
1251455Sbostic #include <sys/buf.h>
1351455Sbostic #include <sys/vnode.h>
1451455Sbostic #include <sys/mount.h>
1551455Sbostic #include <sys/trace.h>
1659878Smckusick #include <sys/malloc.h>
1751455Sbostic #include <sys/resourcevar.h>
1856395Smckusick #include <libkern/libkern.h>
198Sbill 
2091Sbill /*
2157045Smargo  * Local declarations
2257045Smargo  */
2357045Smargo struct buf *cluster_newbuf __P((struct vnode *, struct buf *, long, daddr_t,
2457045Smargo 	    daddr_t, long, int));
2557045Smargo struct buf *cluster_rbuild __P((struct vnode *, u_quad_t, struct buf *,
2657045Smargo 	    daddr_t, daddr_t, long, int, long));
2757045Smargo void	    cluster_wbuild __P((struct vnode *, struct buf *, long size,
2857045Smargo 	    daddr_t start_lbn, int len, daddr_t lbn));
2957045Smargo 
3056395Smckusick /*
3157045Smargo  * We could optimize this by keeping track of where the last read-ahead
3257045Smargo  * was, but it would involve adding fields to the vnode.  For now, let's
3357045Smargo  * just get it working.
3457045Smargo  *
3557045Smargo  * This replaces bread.  If this is a bread at the beginning of a file and
3657045Smargo  * lastr is 0, we assume this is the first read and we'll read up to two
3757045Smargo  * blocks if they are sequential.  After that, we'll do regular read ahead
3857045Smargo  * in clustered chunks.
3957045Smargo  *
4057045Smargo  * There are 4 or 5 cases depending on how you count:
4157045Smargo  *	Desired block is in the cache:
4257045Smargo  *	    1 Not sequential access (0 I/Os).
4357045Smargo  *	    2 Access is sequential, do read-ahead (1 ASYNC).
4457045Smargo  *	Desired block is not in cache:
4557045Smargo  *	    3 Not sequential access (1 SYNC).
4657045Smargo  *	    4 Sequential access, next block is contiguous (1 SYNC).
4757045Smargo  *	    5 Sequential access, next block is not contiguous (1 SYNC, 1 ASYNC)
4857045Smargo  *
4957045Smargo  * There are potentially two buffers that require I/O.
5057045Smargo  * 	bp is the block requested.
5157045Smargo  *	rbp is the read-ahead block.
5257045Smargo  *	If either is NULL, then you don't have to do the I/O.
5357045Smargo  */
5457045Smargo cluster_read(vp, filesize, lblkno, size, cred, bpp)
5557045Smargo 	struct vnode *vp;
5657045Smargo 	u_quad_t filesize;
5757045Smargo 	daddr_t lblkno;
5857045Smargo 	long size;
5957045Smargo 	struct ucred *cred;
6057045Smargo 	struct buf **bpp;
6157045Smargo {
6257045Smargo 	struct buf *bp, *rbp;
6357045Smargo 	daddr_t blkno, ioblkno;
6457045Smargo 	long flags;
6557045Smargo 	int error, num_ra, alreadyincore;
6657045Smargo 
6757045Smargo #ifdef DIAGNOSTIC
6857045Smargo 	if (size == 0)
6957045Smargo 		panic("cluster_read: size = 0");
7057045Smargo #endif
7157045Smargo 
7257045Smargo 	error = 0;
7357045Smargo 	flags = B_READ;
7457797Smckusick 	*bpp = bp = getblk(vp, lblkno, size, 0, 0);
7557045Smargo 	if (bp->b_flags & (B_CACHE | B_DONE | B_DELWRI)) {
7657045Smargo 		/*
7757045Smargo 		 * Desired block is in cache; do any readahead ASYNC.
7857045Smargo 		 * Case 1, 2.
7957045Smargo 		 */
8057045Smargo 		trace(TR_BREADHIT, pack(vp, size), lblkno);
8157045Smargo 		flags |= B_ASYNC;
8257045Smargo 		ioblkno = lblkno +
8357045Smargo 		    (lblkno < vp->v_ralen ? vp->v_ralen >> 1 : vp->v_ralen);
8457797Smckusick 		alreadyincore = (int)incore(vp, ioblkno);
8557045Smargo 		bp = NULL;
8657045Smargo 	} else {
8757045Smargo 		/* Block wasn't in cache, case 3, 4, 5. */
8857045Smargo 		trace(TR_BREADMISS, pack(vp, size), lblkno);
8957045Smargo 		ioblkno = lblkno;
9057045Smargo 		bp->b_flags |= flags;
9157045Smargo 		alreadyincore = 0;
9257045Smargo 		curproc->p_stats->p_ru.ru_inblock++;		/* XXX */
9357045Smargo 	}
9457045Smargo 	/*
9557045Smargo 	 * XXX
9657045Smargo 	 * Replace 1 with a window size based on some permutation of
9757045Smargo 	 * maxcontig and rot_delay.  This will let you figure out how
9857045Smargo 	 * many blocks you should read-ahead (case 2, 4, 5).
9957045Smargo 	 *
10057045Smargo 	 * If the access isn't sequential, cut the window size in half.
10157045Smargo 	 */
10257045Smargo 	rbp = NULL;
10357045Smargo 	if (lblkno != vp->v_lastr + 1 && lblkno != 0)
10457045Smargo 		vp->v_ralen = max(vp->v_ralen >> 1, 1);
10557045Smargo 	else if ((ioblkno + 1) * size < filesize && !alreadyincore &&
10657045Smargo 	    !(error = VOP_BMAP(vp, ioblkno, NULL, &blkno, &num_ra))) {
10757045Smargo 		/*
10857045Smargo 		 * Reading sequentially, and the next block is not in the
10957045Smargo 		 * cache.  We are going to try reading ahead. If this is
11057045Smargo 		 * the first read of a file, then limit read-ahead to a
11157045Smargo 		 * single block, else read as much as we're allowed.
11257045Smargo 		 */
11357045Smargo 		if (num_ra > vp->v_ralen) {
11457045Smargo 			num_ra = vp->v_ralen;
11557045Smargo 			vp->v_ralen = min(MAXPHYS / size, vp->v_ralen << 1);
11657045Smargo 		} else
11757045Smargo 			vp->v_ralen = num_ra + 1;
11857045Smargo 
11957045Smargo 
12057045Smargo 		if (num_ra)				/* case 2, 4 */
12157045Smargo 			rbp = cluster_rbuild(vp, filesize,
12257045Smargo 			    bp, ioblkno, blkno, size, num_ra, flags);
12357045Smargo 		else if (lblkno != 0 && ioblkno == lblkno) {
12457045Smargo 			/* Case 5: check how many blocks to read ahead */
12557045Smargo 			++ioblkno;
12657045Smargo 			if ((ioblkno + 1) * size > filesize ||
12757045Smargo 			    (error = VOP_BMAP(vp,
12857045Smargo 			    ioblkno, NULL, &blkno, &num_ra)))
12957045Smargo 				goto skip_readahead;
13057045Smargo 			flags |= B_ASYNC;
13157045Smargo 			if (num_ra)
13257045Smargo 				rbp = cluster_rbuild(vp, filesize,
13357045Smargo 				    NULL, ioblkno, blkno, size, num_ra, flags);
13457045Smargo 			else {
13557797Smckusick 				rbp = getblk(vp, ioblkno, size, 0, 0);
13657045Smargo 				rbp->b_flags |= flags;
13757045Smargo 				rbp->b_blkno = blkno;
13857045Smargo 			}
13957045Smargo 		} else if (lblkno != 0) {
14057045Smargo 			/* case 2; read ahead single block */
14157797Smckusick 			rbp = getblk(vp, ioblkno, size, 0, 0);
14257045Smargo 			rbp->b_flags |= flags;
14357045Smargo 			rbp->b_blkno = blkno;
14457045Smargo 		} else if (bp)				/* case 1, 3, block 0 */
14557045Smargo 			bp->b_blkno = blkno;
14657045Smargo 		/* Case 1 on block 0; not really doing sequential I/O */
14757045Smargo 
14857045Smargo 		if (rbp == bp)		/* case 4 */
14957045Smargo 			rbp = NULL;
15057045Smargo 		else if (rbp) {			/* case 2, 5 */
15157045Smargo 			trace(TR_BREADMISSRA,
15257045Smargo 			    pack(vp, (num_ra + 1) * size), ioblkno);
15357045Smargo 			curproc->p_stats->p_ru.ru_inblock++;	/* XXX */
15457045Smargo 		}
15557045Smargo 	}
15657045Smargo 
15757045Smargo 	/* XXX Kirk, do we need to make sure the bp has creds? */
15857045Smargo skip_readahead:
15957045Smargo 	if (bp)
16057045Smargo 		if (bp->b_flags & (B_DONE | B_DELWRI))
16157045Smargo 			panic("cluster_read: DONE bp");
16257045Smargo 		else
16357045Smargo 			error = VOP_STRATEGY(bp);
16457045Smargo 
16557045Smargo 	if (rbp)
16657045Smargo 		if (error || rbp->b_flags & (B_DONE | B_DELWRI)) {
16757045Smargo 			rbp->b_flags &= ~(B_ASYNC | B_READ);
16857045Smargo 			brelse(rbp);
16957045Smargo 		} else
17057045Smargo 			(void) VOP_STRATEGY(rbp);
17157045Smargo 
17257045Smargo 	if (bp)
17357045Smargo 		return(biowait(bp));
17457045Smargo 	return(error);
17557045Smargo }
17657045Smargo 
17757045Smargo /*
17857045Smargo  * If blocks are contiguous on disk, use this to provide clustered
17957045Smargo  * read ahead.  We will read as many blocks as possible sequentially
18057045Smargo  * and then parcel them up into logical blocks in the buffer hash table.
18157045Smargo  */
18257045Smargo struct buf *
18357045Smargo cluster_rbuild(vp, filesize, bp, lbn, blkno, size, run, flags)
18457045Smargo 	struct vnode *vp;
18557045Smargo 	u_quad_t filesize;
18657045Smargo 	struct buf *bp;
18757045Smargo 	daddr_t lbn;
18857045Smargo 	daddr_t blkno;
18957045Smargo 	long size;
19057045Smargo 	int run;
19157045Smargo 	long flags;
19257045Smargo {
19357045Smargo 	struct cluster_save *b_save;
19457045Smargo 	struct buf *tbp;
19557045Smargo 	daddr_t bn;
19657045Smargo 	int i, inc;
19757045Smargo 
19859872Smargo #ifdef DIAGNOSTIC
19959872Smargo 	if (size != vp->v_mount->mnt_stat.f_iosize)
20059872Smargo 		panic("cluster_rbuild: size %d != filesize %d\n",
20159872Smargo 			size, vp->v_mount->mnt_stat.f_iosize);
20259872Smargo #endif
20357045Smargo 	if (size * (lbn + run + 1) > filesize)
20457045Smargo 		--run;
20557045Smargo 	if (run == 0) {
20657045Smargo 		if (!bp) {
20757797Smckusick 			bp = getblk(vp, lbn, size, 0, 0);
20857045Smargo 			bp->b_blkno = blkno;
20957045Smargo 			bp->b_flags |= flags;
21057045Smargo 		}
21157045Smargo 		return(bp);
21257045Smargo 	}
21357045Smargo 
21457045Smargo 	bp = cluster_newbuf(vp, bp, flags, blkno, lbn, size, run + 1);
21557045Smargo 	if (bp->b_flags & (B_DONE | B_DELWRI))
21657045Smargo 		return (bp);
21757045Smargo 
21857045Smargo 	b_save = malloc(sizeof(struct buf *) * run + sizeof(struct cluster_save),
21957045Smargo 	    M_SEGMENT, M_WAITOK);
22057045Smargo 	b_save->bs_bufsize = b_save->bs_bcount = size;
22157045Smargo 	b_save->bs_nchildren = 0;
22257045Smargo 	b_save->bs_children = (struct buf **)(b_save + 1);
22357045Smargo 	b_save->bs_saveaddr = bp->b_saveaddr;
22457045Smargo 	bp->b_saveaddr = (caddr_t) b_save;
22557045Smargo 
22657045Smargo 	inc = size / DEV_BSIZE;
22757045Smargo 	for (bn = blkno + inc, i = 1; i <= run; ++i, bn += inc) {
22857045Smargo 		if (incore(vp, lbn + i)) {
22957045Smargo 			if (i == 1) {
23057045Smargo 				bp->b_saveaddr = b_save->bs_saveaddr;
23157045Smargo 				bp->b_flags &= ~B_CALL;
23257045Smargo 				bp->b_iodone = NULL;
23357045Smargo 				allocbuf(bp, size);
23457045Smargo 				free(b_save, M_SEGMENT);
23557045Smargo 			} else
23657045Smargo 				allocbuf(bp, size * i);
23757045Smargo 			break;
23857045Smargo 		}
23957797Smckusick 		tbp = getblk(vp, lbn + i, 0, 0, 0);
24057045Smargo 		tbp->b_bcount = tbp->b_bufsize = size;
24157045Smargo 		tbp->b_blkno = bn;
24257045Smargo 		tbp->b_flags |= flags | B_READ | B_ASYNC;
24357045Smargo 		++b_save->bs_nchildren;
24457045Smargo 		b_save->bs_children[i - 1] = tbp;
24557045Smargo 	}
24657045Smargo 	if (!(bp->b_flags & B_ASYNC))
24757045Smargo 		vp->v_ralen = max(vp->v_ralen - 1, 1);
24857045Smargo 	return(bp);
24957045Smargo }
25057045Smargo 
25157045Smargo /*
25257045Smargo  * Either get a new buffer or grow the existing one.
25357045Smargo  */
25457045Smargo struct buf *
25557045Smargo cluster_newbuf(vp, bp, flags, blkno, lblkno, size, run)
25657045Smargo 	struct vnode *vp;
25757045Smargo 	struct buf *bp;
25857045Smargo 	long flags;
25957045Smargo 	daddr_t blkno;
26057045Smargo 	daddr_t lblkno;
26157045Smargo 	long size;
26257045Smargo 	int run;
26357045Smargo {
26457045Smargo 	if (!bp) {
26557797Smckusick 		bp = getblk(vp, lblkno, size, 0, 0);
26657045Smargo 		if (bp->b_flags & (B_DONE | B_DELWRI)) {
26757045Smargo 			bp->b_blkno = blkno;
26857045Smargo 			return(bp);
26957045Smargo 		}
27057045Smargo 	}
27157045Smargo 	allocbuf(bp, run * size);
27257045Smargo 	bp->b_blkno = blkno;
27357045Smargo 	bp->b_iodone = cluster_callback;
27457045Smargo 	bp->b_flags |= flags | B_CALL;
27557045Smargo 	return(bp);
27657045Smargo }
27757045Smargo 
27857045Smargo /*
27957045Smargo  * Cleanup after a clustered read or write.
28057045Smargo  */
28157045Smargo void
28257045Smargo cluster_callback(bp)
28357045Smargo 	struct buf *bp;
28457045Smargo {
28557045Smargo 	struct cluster_save *b_save;
28657045Smargo 	struct buf **tbp;
28757045Smargo 	long bsize;
28857045Smargo 	caddr_t cp;
28957045Smargo 	b_save = (struct cluster_save *)(bp->b_saveaddr);
29057045Smargo 	bp->b_saveaddr = b_save->bs_saveaddr;
29157045Smargo 
29257045Smargo 	cp = bp->b_un.b_addr + b_save->bs_bufsize;
29357045Smargo 	for (tbp = b_save->bs_children; b_save->bs_nchildren--; ++tbp) {
29457045Smargo 		pagemove(cp, (*tbp)->b_un.b_addr, (*tbp)->b_bufsize);
29557045Smargo 		cp += (*tbp)->b_bufsize;
29657045Smargo 		bp->b_bufsize -= (*tbp)->b_bufsize;
29757045Smargo 		biodone(*tbp);
29857045Smargo 	}
29957045Smargo #ifdef DIAGNOSTIC
30057045Smargo 	if (bp->b_bufsize != b_save->bs_bufsize)
30157045Smargo 		panic ("cluster_callback: more space to reclaim");
30257045Smargo #endif
30357045Smargo 	bp->b_bcount = bp->b_bufsize;
30457045Smargo 	bp->b_iodone = NULL;
30557045Smargo 	free(b_save, M_SEGMENT);
30657045Smargo 	if (bp->b_flags & B_ASYNC)
30757045Smargo 		brelse(bp);
30857045Smargo 	else
30957045Smargo 		wakeup((caddr_t)bp);
31057045Smargo }
31157045Smargo 
31257045Smargo /*
31357045Smargo  * Do clustered write for FFS.
31457045Smargo  *
31557045Smargo  * Three cases:
31657045Smargo  *	1. Write is not sequential (write asynchronously)
31757045Smargo  *	Write is sequential:
31857045Smargo  *	2.	beginning of cluster - begin cluster
31957045Smargo  *	3.	middle of a cluster - add to cluster
32057045Smargo  *	4.	end of a cluster - asynchronously write cluster
32157045Smargo  */
32257045Smargo void
32357045Smargo cluster_write(bp, filesize)
32457045Smargo         struct buf *bp;
32557045Smargo 	u_quad_t filesize;
32657045Smargo {
32757045Smargo         struct vnode *vp;
32857045Smargo         daddr_t lbn;
32959872Smargo         int clen;
33057045Smargo 
33157045Smargo         vp = bp->b_vp;
33257045Smargo         lbn = bp->b_lblkno;
33357045Smargo 
33459872Smargo 	/* Initialize vnode to beginning of file. */
33559872Smargo 	if (lbn == 0)
33659872Smargo 		vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
33759872Smargo 
33859872Smargo         if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
33959872Smargo 	    (bp->b_blkno != vp->v_lasta + bp->b_bcount / DEV_BSIZE)) {
34057045Smargo 		if (vp->v_clen != 0)
34157045Smargo 			/*
34257045Smargo 			 * Write is not sequential.
34357045Smargo 			 */
34457045Smargo 			cluster_wbuild(vp, NULL, bp->b_bcount, vp->v_cstart,
34557045Smargo 			    vp->v_lastw - vp->v_cstart + 1, lbn);
34657045Smargo 		/*
34757045Smargo 		 * Consider beginning a cluster.
34857045Smargo 		 */
34959872Smargo 		if ((lbn + 1) * bp->b_bcount == filesize)
35059872Smargo 			/* End of file, make cluster as large as possible */
35159872Smargo 			clen = MAXBSIZE / vp->v_mount->mnt_stat.f_iosize - 1;
35259872Smargo 		else if (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &clen)) {
35357045Smargo 			bawrite(bp);
35459872Smargo 			vp->v_clen = 0;
35559872Smargo 			vp->v_lasta = bp->b_blkno;
35657045Smargo 			vp->v_cstart = lbn + 1;
35757045Smargo 			vp->v_lastw = lbn;
35857045Smargo 			return;
35959872Smargo 		} else
36059872Smargo 			clen = 0;
36157045Smargo                 vp->v_clen = clen;
36257045Smargo                 if (clen == 0) {		/* I/O not contiguous */
36357045Smargo 			vp->v_cstart = lbn + 1;
36457045Smargo                         bawrite(bp);
36557045Smargo                 } else {			/* Wait for rest of cluster */
36657045Smargo 			vp->v_cstart = lbn;
36757045Smargo                         bdwrite(bp);
36857045Smargo 		}
36957045Smargo         } else if (lbn == vp->v_cstart + vp->v_clen) {
37057045Smargo 		/*
37157045Smargo 		 * At end of cluster, write it out.
37257045Smargo 		 */
37357045Smargo 		cluster_wbuild(vp, bp, bp->b_bcount, vp->v_cstart,
37457045Smargo 		    vp->v_clen + 1, lbn);
37557045Smargo 		vp->v_clen = 0;
37657045Smargo 		vp->v_cstart = lbn + 1;
37757045Smargo         } else
37857045Smargo 		/*
37957045Smargo 		 * In the middle of a cluster, so just delay the
38057045Smargo 		 * I/O for now.
38157045Smargo 		 */
38257045Smargo                 bdwrite(bp);
38357045Smargo         vp->v_lastw = lbn;
38459872Smargo 	vp->v_lasta = bp->b_blkno;
38557045Smargo }
38657045Smargo 
38757045Smargo 
38857045Smargo /*
38957045Smargo  * This is an awful lot like cluster_rbuild...wish they could be combined.
39057045Smargo  * The last lbn argument is the current block on which I/O is being
39157045Smargo  * performed.  Check to see that it doesn't fall in the middle of
39257045Smargo  * the current block.
39357045Smargo  */
39457045Smargo void
39557045Smargo cluster_wbuild(vp, last_bp, size, start_lbn, len, lbn)
39657045Smargo 	struct vnode *vp;
39757045Smargo 	struct buf *last_bp;
39857045Smargo 	long size;
39957045Smargo 	daddr_t start_lbn;
40057045Smargo 	int len;
40157045Smargo 	daddr_t	lbn;
40257045Smargo {
40357045Smargo 	struct cluster_save *b_save;
40457045Smargo 	struct buf *bp, *tbp;
40557045Smargo 	caddr_t	cp;
40657045Smargo 	int i, s;
40757045Smargo 
40859872Smargo #ifdef DIAGNOSTIC
40959872Smargo 	if (size != vp->v_mount->mnt_stat.f_iosize)
41059872Smargo 		panic("cluster_wbuild: size %d != filesize %d\n",
41159872Smargo 			size, vp->v_mount->mnt_stat.f_iosize);
41259872Smargo #endif
41357045Smargo redo:
41457045Smargo 	while ((!incore(vp, start_lbn) || start_lbn == lbn) && len) {
41557045Smargo 		++start_lbn;
41657045Smargo 		--len;
41757045Smargo 	}
41857045Smargo 
41957045Smargo 	/* Get more memory for current buffer */
42057045Smargo 	if (len <= 1) {
42159872Smargo 		if (last_bp) {
42257045Smargo 			bawrite(last_bp);
42359872Smargo 		} else if (len) {
42459872Smargo 			bp = getblk(vp, start_lbn, size, 0, 0);
42559872Smargo 			bawrite(bp);
42659872Smargo 		}
42757045Smargo 		return;
42857045Smargo 	}
42957045Smargo 
43057797Smckusick 	bp = getblk(vp, start_lbn, size, 0, 0);
43157045Smargo 	if (!(bp->b_flags & B_DELWRI)) {
43257045Smargo 		++start_lbn;
43357045Smargo 		--len;
43457045Smargo 		brelse(bp);
43557045Smargo 		goto redo;
43657045Smargo 	}
43757045Smargo 
43857045Smargo 	--len;
43957045Smargo 	b_save = malloc(sizeof(struct buf *) * len + sizeof(struct cluster_save),
44057045Smargo 	    M_SEGMENT, M_WAITOK);
44157045Smargo 	b_save->bs_bcount = bp->b_bcount;
44257045Smargo 	b_save->bs_bufsize = bp->b_bufsize;
44357045Smargo 	b_save->bs_nchildren = 0;
44457045Smargo 	b_save->bs_children = (struct buf **)(b_save + 1);
44557045Smargo 	b_save->bs_saveaddr = bp->b_saveaddr;
44657045Smargo 	bp->b_saveaddr = (caddr_t) b_save;
44757045Smargo 
44857045Smargo 
44957045Smargo 	bp->b_flags |= B_CALL;
45057045Smargo 	bp->b_iodone = cluster_callback;
45157045Smargo 	cp = bp->b_un.b_addr + bp->b_bufsize;
45257045Smargo 	for (++start_lbn, i = 0; i < len; ++i, ++start_lbn) {
45357045Smargo 		if (!incore(vp, start_lbn) || start_lbn == lbn)
45457045Smargo 			break;
45557045Smargo 
45657045Smargo 		if (last_bp == NULL || start_lbn != last_bp->b_lblkno) {
45757797Smckusick 			tbp = getblk(vp, start_lbn, size, 0, 0);
45857045Smargo #ifdef DIAGNOSTIC
45957045Smargo 			if (tbp->b_bcount != tbp->b_bufsize)
46057045Smargo 				panic("cluster_wbuild: Buffer too big");
46157045Smargo #endif
46257045Smargo 			if (!(tbp->b_flags & B_DELWRI)) {
46357045Smargo 				brelse(tbp);
46457045Smargo 				break;
46557045Smargo 			}
46657045Smargo 		} else
46757045Smargo 			tbp = last_bp;
46857045Smargo 
46957045Smargo 		++b_save->bs_nchildren;
47057045Smargo 
47157045Smargo 		/* Move memory from children to parent */
47259872Smargo 		if (tbp->b_blkno != (bp->b_blkno + bp->b_bufsize / DEV_BSIZE)) {
47359872Smargo 			printf("Clustered Block: %d addr %x bufsize: %d\n",
47459872Smargo 			    bp->b_lblkno, bp->b_blkno, bp->b_bufsize);
47559872Smargo 			printf("Child Block: %d addr: %x\n", tbp->b_lblkno,
47659872Smargo 			    tbp->b_blkno);
47759872Smargo 			panic("Clustered write to wrong blocks");
47859872Smargo 		}
47959872Smargo 
48057045Smargo 		pagemove(tbp->b_un.b_daddr, cp, size);
48157045Smargo 		bp->b_bcount += size;
48257045Smargo 		bp->b_bufsize += size;
48357045Smargo 
48457045Smargo 		tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
48557045Smargo 		tbp->b_flags |= B_ASYNC;
48657045Smargo 		s = splbio();
48757045Smargo 		reassignbuf(tbp, tbp->b_vp);		/* put on clean list */
48857045Smargo 		++tbp->b_vp->v_numoutput;
48957045Smargo 		splx(s);
49057045Smargo 		b_save->bs_children[i] = tbp;
49157045Smargo 
49257045Smargo 		cp += tbp->b_bufsize;
49357045Smargo 	}
49457045Smargo 
49557045Smargo 	if (i == 0) {
49657045Smargo 		/* None to cluster */
49757045Smargo 		bp->b_saveaddr = b_save->bs_saveaddr;
49857045Smargo 		bp->b_flags &= ~B_CALL;
49957045Smargo 		bp->b_iodone = NULL;
50057045Smargo 		free(b_save, M_SEGMENT);
50157045Smargo 	}
50257045Smargo 	bawrite(bp);
50357045Smargo 	if (i < len) {
50457045Smargo 		len -= i + 1;
50557045Smargo 		start_lbn += 1;
50657045Smargo 		goto redo;
50757045Smargo 	}
50857045Smargo }
509