xref: /csrg-svn/sys/kern/vfs_cluster.c (revision 68615)
149589Sbostic /*-
263180Sbostic  * Copyright (c) 1993
363180Sbostic  *	The Regents of the University of California.  All rights reserved.
423395Smckusick  *
559878Smckusick  * %sccs.include.redist.c%
637736Smckusick  *
7*68615Smckusick  *	@(#)vfs_cluster.c	8.10 (Berkeley) 03/28/95
823395Smckusick  */
98Sbill 
1051455Sbostic #include <sys/param.h>
1151455Sbostic #include <sys/proc.h>
1251455Sbostic #include <sys/buf.h>
1351455Sbostic #include <sys/vnode.h>
1451455Sbostic #include <sys/mount.h>
1551455Sbostic #include <sys/trace.h>
1659878Smckusick #include <sys/malloc.h>
1751455Sbostic #include <sys/resourcevar.h>
1856395Smckusick #include <libkern/libkern.h>
198Sbill 
2091Sbill /*
2157045Smargo  * Local declarations
2257045Smargo  */
2357045Smargo struct buf *cluster_newbuf __P((struct vnode *, struct buf *, long, daddr_t,
2457045Smargo 	    daddr_t, long, int));
2557045Smargo struct buf *cluster_rbuild __P((struct vnode *, u_quad_t, struct buf *,
2657045Smargo 	    daddr_t, daddr_t, long, int, long));
2764717Smckusick void	    cluster_wbuild __P((struct vnode *, struct buf *, long,
2864717Smckusick 	    daddr_t, int, daddr_t));
2965998Smckusick struct cluster_save *cluster_collectbufs __P((struct vnode *, struct buf *));
3057045Smargo 
3165670Shibler #ifdef DIAGNOSTIC
3256395Smckusick /*
3365670Shibler  * Set to 1 if reads of block zero should cause readahead to be done.
3465670Shibler  * Set to 0 treats a read of block zero as a non-sequential read.
3557045Smargo  *
3665670Shibler  * Setting to one assumes that most reads of block zero of files are due to
3765670Shibler  * sequential passes over the files (e.g. cat, sum) where additional blocks
3865670Shibler  * will soon be needed.  Setting to zero assumes that the majority are
3965670Shibler  * surgical strikes to get particular info (e.g. size, file) where readahead
4065670Shibler  * blocks will not be used and, in fact, push out other potentially useful
4165670Shibler  * blocks from the cache.  The former seems intuitive, but some quick tests
4265670Shibler  * showed that the latter performed better from a system-wide point of view.
4365670Shibler  */
4465670Shibler int	doclusterraz = 0;
4565670Shibler #define ISSEQREAD(vp, blk) \
4665670Shibler 	(((blk) != 0 || doclusterraz) && \
4765670Shibler 	 ((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr))
4865670Shibler #else
4965670Shibler #define ISSEQREAD(vp, blk) \
5065670Shibler 	((blk) != 0 && ((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr))
5165670Shibler #endif
5265670Shibler 
5365670Shibler /*
5457045Smargo  * This replaces bread.  If this is a bread at the beginning of a file and
5557045Smargo  * lastr is 0, we assume this is the first read and we'll read up to two
5657045Smargo  * blocks if they are sequential.  After that, we'll do regular read ahead
5757045Smargo  * in clustered chunks.
5857045Smargo  *
5957045Smargo  * There are 4 or 5 cases depending on how you count:
6057045Smargo  *	Desired block is in the cache:
6157045Smargo  *	    1 Not sequential access (0 I/Os).
6257045Smargo  *	    2 Access is sequential, do read-ahead (1 ASYNC).
6357045Smargo  *	Desired block is not in cache:
6457045Smargo  *	    3 Not sequential access (1 SYNC).
6557045Smargo  *	    4 Sequential access, next block is contiguous (1 SYNC).
6657045Smargo  *	    5 Sequential access, next block is not contiguous (1 SYNC, 1 ASYNC)
6757045Smargo  *
6857045Smargo  * There are potentially two buffers that require I/O.
6957045Smargo  * 	bp is the block requested.
7057045Smargo  *	rbp is the read-ahead block.
7157045Smargo  *	If either is NULL, then you don't have to do the I/O.
7257045Smargo  */
7357045Smargo cluster_read(vp, filesize, lblkno, size, cred, bpp)
7457045Smargo 	struct vnode *vp;
7557045Smargo 	u_quad_t filesize;
7657045Smargo 	daddr_t lblkno;
7757045Smargo 	long size;
7857045Smargo 	struct ucred *cred;
7957045Smargo 	struct buf **bpp;
8057045Smargo {
8157045Smargo 	struct buf *bp, *rbp;
8257045Smargo 	daddr_t blkno, ioblkno;
8357045Smargo 	long flags;
8457045Smargo 	int error, num_ra, alreadyincore;
8557045Smargo 
8657045Smargo #ifdef DIAGNOSTIC
8757045Smargo 	if (size == 0)
8857045Smargo 		panic("cluster_read: size = 0");
8957045Smargo #endif
9057045Smargo 
9157045Smargo 	error = 0;
9257045Smargo 	flags = B_READ;
9357797Smckusick 	*bpp = bp = getblk(vp, lblkno, size, 0, 0);
9465670Shibler 	if (bp->b_flags & B_CACHE) {
9557045Smargo 		/*
9657045Smargo 		 * Desired block is in cache; do any readahead ASYNC.
9757045Smargo 		 * Case 1, 2.
9857045Smargo 		 */
9957045Smargo 		trace(TR_BREADHIT, pack(vp, size), lblkno);
10057045Smargo 		flags |= B_ASYNC;
10165670Shibler 		ioblkno = lblkno + (vp->v_ralen ? vp->v_ralen : 1);
10268320Scgd 		alreadyincore = incore(vp, ioblkno) != NULL;
10357045Smargo 		bp = NULL;
10457045Smargo 	} else {
10557045Smargo 		/* Block wasn't in cache, case 3, 4, 5. */
10657045Smargo 		trace(TR_BREADMISS, pack(vp, size), lblkno);
10765670Shibler 		bp->b_flags |= B_READ;
10857045Smargo 		ioblkno = lblkno;
10957045Smargo 		alreadyincore = 0;
11057045Smargo 		curproc->p_stats->p_ru.ru_inblock++;		/* XXX */
11157045Smargo 	}
11257045Smargo 	/*
11357045Smargo 	 * XXX
11457045Smargo 	 * Replace 1 with a window size based on some permutation of
11557045Smargo 	 * maxcontig and rot_delay.  This will let you figure out how
11657045Smargo 	 * many blocks you should read-ahead (case 2, 4, 5).
11757045Smargo 	 *
11865670Shibler 	 * If the access isn't sequential, reset the window to 1.
11965670Shibler 	 * Note that a read to the same block is considered sequential.
12065670Shibler 	 * This catches the case where the file is being read sequentially,
12165670Shibler 	 * but at smaller than the filesystem block size.
12257045Smargo 	 */
12357045Smargo 	rbp = NULL;
12465670Shibler 	if (!ISSEQREAD(vp, lblkno)) {
12565670Shibler 		vp->v_ralen = 0;
12665670Shibler 		vp->v_maxra = lblkno;
12765670Shibler 	} else if ((ioblkno + 1) * size <= filesize && !alreadyincore &&
12864717Smckusick 	    !(error = VOP_BMAP(vp, ioblkno, NULL, &blkno, &num_ra)) &&
12964717Smckusick 	    blkno != -1) {
13057045Smargo 		/*
13157045Smargo 		 * Reading sequentially, and the next block is not in the
13265670Shibler 		 * cache.  We are going to try reading ahead.
13357045Smargo 		 */
13465670Shibler 		if (num_ra) {
13565670Shibler 			/*
13665670Shibler 			 * If our desired readahead block had been read
13765670Shibler 			 * in a previous readahead but is no longer in
13865670Shibler 			 * core, then we may be reading ahead too far
13965670Shibler 			 * or are not using our readahead very rapidly.
14065670Shibler 			 * In this case we scale back the window.
14165670Shibler 			 */
14265670Shibler 			if (!alreadyincore && ioblkno <= vp->v_maxra)
14365670Shibler 				vp->v_ralen = max(vp->v_ralen >> 1, 1);
14465670Shibler 			/*
14565670Shibler 			 * There are more sequential blocks than our current
14665670Shibler 			 * window allows, scale up.  Ideally we want to get
14765670Shibler 			 * in sync with the filesystem maxcontig value.
14865670Shibler 			 */
14965670Shibler 			else if (num_ra > vp->v_ralen && lblkno != vp->v_lastr)
15065670Shibler 				vp->v_ralen = vp->v_ralen ?
15165670Shibler 					min(num_ra, vp->v_ralen << 1) : 1;
15257045Smargo 
15365670Shibler 			if (num_ra > vp->v_ralen)
15465670Shibler 				num_ra = vp->v_ralen;
15565670Shibler 		}
15657045Smargo 
15757045Smargo 		if (num_ra)				/* case 2, 4 */
15857045Smargo 			rbp = cluster_rbuild(vp, filesize,
15957045Smargo 			    bp, ioblkno, blkno, size, num_ra, flags);
16065670Shibler 		else if (ioblkno == lblkno) {
16165670Shibler 			bp->b_blkno = blkno;
16257045Smargo 			/* Case 5: check how many blocks to read ahead */
16357045Smargo 			++ioblkno;
16457045Smargo 			if ((ioblkno + 1) * size > filesize ||
16565670Shibler 			    incore(vp, ioblkno) || (error = VOP_BMAP(vp,
16665670Shibler 			     ioblkno, NULL, &blkno, &num_ra)) || blkno == -1)
16757045Smargo 				goto skip_readahead;
16865670Shibler 			/*
16967578Shibler 			 * Adjust readahead as above.
17067578Shibler 			 * Don't check alreadyincore, we know it is 0 from
17167578Shibler 			 * the previous conditional.
17265670Shibler 			 */
17365670Shibler 			if (num_ra) {
17467578Shibler 				if (ioblkno <= vp->v_maxra)
17565670Shibler 					vp->v_ralen = max(vp->v_ralen >> 1, 1);
17665670Shibler 				else if (num_ra > vp->v_ralen &&
17765670Shibler 					 lblkno != vp->v_lastr)
17865670Shibler 					vp->v_ralen = vp->v_ralen ?
17965670Shibler 						min(num_ra,vp->v_ralen<<1) : 1;
18065670Shibler 				if (num_ra > vp->v_ralen)
18165670Shibler 					num_ra = vp->v_ralen;
18265670Shibler 			}
18357045Smargo 			flags |= B_ASYNC;
18457045Smargo 			if (num_ra)
18557045Smargo 				rbp = cluster_rbuild(vp, filesize,
18657045Smargo 				    NULL, ioblkno, blkno, size, num_ra, flags);
18757045Smargo 			else {
18857797Smckusick 				rbp = getblk(vp, ioblkno, size, 0, 0);
18957045Smargo 				rbp->b_flags |= flags;
19057045Smargo 				rbp->b_blkno = blkno;
19157045Smargo 			}
19265670Shibler 		} else {
19357045Smargo 			/* case 2; read ahead single block */
19457797Smckusick 			rbp = getblk(vp, ioblkno, size, 0, 0);
19557045Smargo 			rbp->b_flags |= flags;
19657045Smargo 			rbp->b_blkno = blkno;
19765670Shibler 		}
19857045Smargo 
19965670Shibler 		if (rbp == bp)			/* case 4 */
20057045Smargo 			rbp = NULL;
20157045Smargo 		else if (rbp) {			/* case 2, 5 */
20257045Smargo 			trace(TR_BREADMISSRA,
20357045Smargo 			    pack(vp, (num_ra + 1) * size), ioblkno);
20457045Smargo 			curproc->p_stats->p_ru.ru_inblock++;	/* XXX */
20557045Smargo 		}
20657045Smargo 	}
20757045Smargo 
20857045Smargo 	/* XXX Kirk, do we need to make sure the bp has creds? */
20957045Smargo skip_readahead:
21057045Smargo 	if (bp)
21157045Smargo 		if (bp->b_flags & (B_DONE | B_DELWRI))
21257045Smargo 			panic("cluster_read: DONE bp");
21357045Smargo 		else
21457045Smargo 			error = VOP_STRATEGY(bp);
21557045Smargo 
21657045Smargo 	if (rbp)
21757045Smargo 		if (error || rbp->b_flags & (B_DONE | B_DELWRI)) {
21857045Smargo 			rbp->b_flags &= ~(B_ASYNC | B_READ);
21957045Smargo 			brelse(rbp);
22057045Smargo 		} else
22157045Smargo 			(void) VOP_STRATEGY(rbp);
22257045Smargo 
22365670Shibler 	/*
22465670Shibler 	 * Recalculate our maximum readahead
22565670Shibler 	 */
22665670Shibler 	if (rbp == NULL)
22765670Shibler 		rbp = bp;
22865670Shibler 	if (rbp)
22965670Shibler 		vp->v_maxra = rbp->b_lblkno + (rbp->b_bufsize / size) - 1;
23065670Shibler 
23157045Smargo 	if (bp)
23257045Smargo 		return(biowait(bp));
23357045Smargo 	return(error);
23457045Smargo }
23557045Smargo 
23657045Smargo /*
23757045Smargo  * If blocks are contiguous on disk, use this to provide clustered
23857045Smargo  * read ahead.  We will read as many blocks as possible sequentially
23957045Smargo  * and then parcel them up into logical blocks in the buffer hash table.
24057045Smargo  */
24157045Smargo struct buf *
cluster_rbuild(vp,filesize,bp,lbn,blkno,size,run,flags)24257045Smargo cluster_rbuild(vp, filesize, bp, lbn, blkno, size, run, flags)
24357045Smargo 	struct vnode *vp;
24457045Smargo 	u_quad_t filesize;
24557045Smargo 	struct buf *bp;
24657045Smargo 	daddr_t lbn;
24757045Smargo 	daddr_t blkno;
24857045Smargo 	long size;
24957045Smargo 	int run;
25057045Smargo 	long flags;
25157045Smargo {
25257045Smargo 	struct cluster_save *b_save;
25357045Smargo 	struct buf *tbp;
25457045Smargo 	daddr_t bn;
25557045Smargo 	int i, inc;
25657045Smargo 
25759872Smargo #ifdef DIAGNOSTIC
25859872Smargo 	if (size != vp->v_mount->mnt_stat.f_iosize)
25959872Smargo 		panic("cluster_rbuild: size %d != filesize %d\n",
26059872Smargo 			size, vp->v_mount->mnt_stat.f_iosize);
26159872Smargo #endif
26257045Smargo 	if (size * (lbn + run + 1) > filesize)
26357045Smargo 		--run;
26457045Smargo 	if (run == 0) {
26557045Smargo 		if (!bp) {
26657797Smckusick 			bp = getblk(vp, lbn, size, 0, 0);
26757045Smargo 			bp->b_blkno = blkno;
26857045Smargo 			bp->b_flags |= flags;
26957045Smargo 		}
27057045Smargo 		return(bp);
27157045Smargo 	}
27257045Smargo 
27357045Smargo 	bp = cluster_newbuf(vp, bp, flags, blkno, lbn, size, run + 1);
27457045Smargo 	if (bp->b_flags & (B_DONE | B_DELWRI))
27557045Smargo 		return (bp);
27657045Smargo 
27757045Smargo 	b_save = malloc(sizeof(struct buf *) * run + sizeof(struct cluster_save),
27857045Smargo 	    M_SEGMENT, M_WAITOK);
27957045Smargo 	b_save->bs_bufsize = b_save->bs_bcount = size;
28057045Smargo 	b_save->bs_nchildren = 0;
28157045Smargo 	b_save->bs_children = (struct buf **)(b_save + 1);
28257045Smargo 	b_save->bs_saveaddr = bp->b_saveaddr;
28357045Smargo 	bp->b_saveaddr = (caddr_t) b_save;
28457045Smargo 
28565670Shibler 	inc = btodb(size);
28657045Smargo 	for (bn = blkno + inc, i = 1; i <= run; ++i, bn += inc) {
28767578Shibler 		/*
28867578Shibler 		 * A component of the cluster is already in core,
28967578Shibler 		 * terminate the cluster early.
29067578Shibler 		 */
29167578Shibler 		if (incore(vp, lbn + i))
29257045Smargo 			break;
29357797Smckusick 		tbp = getblk(vp, lbn + i, 0, 0, 0);
29465670Shibler 		/*
29565670Shibler 		 * getblk may return some memory in the buffer if there were
29665670Shibler 		 * no empty buffers to shed it to.  If there is currently
29765670Shibler 		 * memory in the buffer, we move it down size bytes to make
29865670Shibler 		 * room for the valid pages that cluster_callback will insert.
29965670Shibler 		 * We do this now so we don't have to do it at interrupt time
30065670Shibler 		 * in the callback routine.
30165670Shibler 		 */
30265670Shibler 		if (tbp->b_bufsize != 0) {
30365670Shibler 			caddr_t bdata = (char *)tbp->b_data;
30465670Shibler 
30567578Shibler 			/*
30667578Shibler 			 * No room in the buffer to add another page,
30767578Shibler 			 * terminate the cluster early.
30867578Shibler 			 */
30967578Shibler 			if (tbp->b_bufsize + size > MAXBSIZE) {
31067578Shibler #ifdef DIAGNOSTIC
31167578Shibler 				if (tbp->b_bufsize != MAXBSIZE)
31267578Shibler 					panic("cluster_rbuild: too much memory");
31367578Shibler #endif
31467578Shibler 				brelse(tbp);
31567578Shibler 				break;
31667578Shibler 			}
31765670Shibler 			if (tbp->b_bufsize > size) {
31865670Shibler 				/*
31965670Shibler 				 * XXX if the source and destination regions
32065670Shibler 				 * overlap we have to copy backward to avoid
32165670Shibler 				 * clobbering any valid pages (i.e. pagemove
32265670Shibler 				 * implementations typically can't handle
32365670Shibler 				 * overlap).
32465670Shibler 				 */
32565670Shibler 				bdata += tbp->b_bufsize;
32665670Shibler 				while (bdata > (char *)tbp->b_data) {
32765670Shibler 					bdata -= CLBYTES;
32865670Shibler 					pagemove(bdata, bdata + size, CLBYTES);
32965670Shibler 				}
33065670Shibler 			} else
33165670Shibler 				pagemove(bdata, bdata + size, tbp->b_bufsize);
33265670Shibler 		}
33357045Smargo 		tbp->b_blkno = bn;
33457045Smargo 		tbp->b_flags |= flags | B_READ | B_ASYNC;
33557045Smargo 		++b_save->bs_nchildren;
33657045Smargo 		b_save->bs_children[i - 1] = tbp;
33757045Smargo 	}
33867578Shibler 	/*
33967578Shibler 	 * The cluster may have been terminated early, adjust the cluster
34067578Shibler 	 * buffer size accordingly.  If no cluster could be formed,
34167578Shibler 	 * deallocate the cluster save info.
34267578Shibler 	 */
34367578Shibler 	if (i <= run) {
34467578Shibler 		if (i == 1) {
34567578Shibler 			bp->b_saveaddr = b_save->bs_saveaddr;
34667578Shibler 			bp->b_flags &= ~B_CALL;
34767578Shibler 			bp->b_iodone = NULL;
34867578Shibler 			free(b_save, M_SEGMENT);
34967578Shibler 		}
35067578Shibler 		allocbuf(bp, size * i);
35167578Shibler 	}
35257045Smargo 	return(bp);
35357045Smargo }
35457045Smargo 
35557045Smargo /*
35657045Smargo  * Either get a new buffer or grow the existing one.
35757045Smargo  */
35857045Smargo struct buf *
cluster_newbuf(vp,bp,flags,blkno,lblkno,size,run)35957045Smargo cluster_newbuf(vp, bp, flags, blkno, lblkno, size, run)
36057045Smargo 	struct vnode *vp;
36157045Smargo 	struct buf *bp;
36257045Smargo 	long flags;
36357045Smargo 	daddr_t blkno;
36457045Smargo 	daddr_t lblkno;
36557045Smargo 	long size;
36657045Smargo 	int run;
36757045Smargo {
36857045Smargo 	if (!bp) {
36957797Smckusick 		bp = getblk(vp, lblkno, size, 0, 0);
37057045Smargo 		if (bp->b_flags & (B_DONE | B_DELWRI)) {
37157045Smargo 			bp->b_blkno = blkno;
37257045Smargo 			return(bp);
37357045Smargo 		}
37457045Smargo 	}
37557045Smargo 	allocbuf(bp, run * size);
37657045Smargo 	bp->b_blkno = blkno;
37757045Smargo 	bp->b_iodone = cluster_callback;
37857045Smargo 	bp->b_flags |= flags | B_CALL;
37957045Smargo 	return(bp);
38057045Smargo }
38157045Smargo 
38257045Smargo /*
38357045Smargo  * Cleanup after a clustered read or write.
38465670Shibler  * This is complicated by the fact that any of the buffers might have
38565670Shibler  * extra memory (if there were no empty buffer headers at allocbuf time)
38665670Shibler  * that we will need to shift around.
38757045Smargo  */
38857045Smargo void
cluster_callback(bp)38957045Smargo cluster_callback(bp)
39057045Smargo 	struct buf *bp;
39157045Smargo {
39257045Smargo 	struct cluster_save *b_save;
39365670Shibler 	struct buf **bpp, *tbp;
39465670Shibler 	long bsize;
39557045Smargo 	caddr_t cp;
39665670Shibler 	int error = 0;
39764717Smckusick 
39865670Shibler 	/*
39965670Shibler 	 * Must propogate errors to all the components.
40065670Shibler 	 */
40165670Shibler 	if (bp->b_flags & B_ERROR)
40265670Shibler 		error = bp->b_error;
40365670Shibler 
40457045Smargo 	b_save = (struct cluster_save *)(bp->b_saveaddr);
40557045Smargo 	bp->b_saveaddr = b_save->bs_saveaddr;
40657045Smargo 
40765670Shibler 	bsize = b_save->bs_bufsize;
40865670Shibler 	cp = (char *)bp->b_data + bsize;
40965670Shibler 	/*
41065670Shibler 	 * Move memory from the large cluster buffer into the component
41165670Shibler 	 * buffers and mark IO as done on these.
41265670Shibler 	 */
41365670Shibler 	for (bpp = b_save->bs_children; b_save->bs_nchildren--; ++bpp) {
41465670Shibler 		tbp = *bpp;
41565670Shibler 		pagemove(cp, tbp->b_data, bsize);
41665670Shibler 		tbp->b_bufsize += bsize;
41765670Shibler 		tbp->b_bcount = bsize;
41865670Shibler 		if (error) {
41965670Shibler 			tbp->b_flags |= B_ERROR;
42065670Shibler 			tbp->b_error = error;
42165670Shibler 		}
42265670Shibler 		biodone(tbp);
42365670Shibler 		bp->b_bufsize -= bsize;
42465670Shibler 		cp += bsize;
42557045Smargo 	}
42665670Shibler 	/*
42765670Shibler 	 * If there was excess memory in the cluster buffer,
42865670Shibler 	 * slide it up adjacent to the remaining valid data.
42965670Shibler 	 */
43065670Shibler 	if (bp->b_bufsize != bsize) {
43165670Shibler 		if (bp->b_bufsize < bsize)
43265670Shibler 			panic("cluster_callback: too little memory");
43365670Shibler 		pagemove(cp, (char *)bp->b_data + bsize, bp->b_bufsize - bsize);
43465670Shibler 	}
43565670Shibler 	bp->b_bcount = bsize;
43657045Smargo 	bp->b_iodone = NULL;
43757045Smargo 	free(b_save, M_SEGMENT);
43857045Smargo 	if (bp->b_flags & B_ASYNC)
43957045Smargo 		brelse(bp);
44065670Shibler 	else {
44165670Shibler 		bp->b_flags &= ~B_WANTED;
44257045Smargo 		wakeup((caddr_t)bp);
44365670Shibler 	}
44457045Smargo }
44557045Smargo 
44657045Smargo /*
44757045Smargo  * Do clustered write for FFS.
44857045Smargo  *
44957045Smargo  * Three cases:
45057045Smargo  *	1. Write is not sequential (write asynchronously)
45157045Smargo  *	Write is sequential:
45257045Smargo  *	2.	beginning of cluster - begin cluster
45357045Smargo  *	3.	middle of a cluster - add to cluster
45457045Smargo  *	4.	end of a cluster - asynchronously write cluster
45557045Smargo  */
45657045Smargo void
cluster_write(bp,filesize)45757045Smargo cluster_write(bp, filesize)
45857045Smargo         struct buf *bp;
45957045Smargo 	u_quad_t filesize;
46057045Smargo {
46157045Smargo         struct vnode *vp;
46257045Smargo         daddr_t lbn;
46365998Smckusick         int maxclen, cursize;
46457045Smargo 
46557045Smargo         vp = bp->b_vp;
46657045Smargo         lbn = bp->b_lblkno;
46757045Smargo 
46859872Smargo 	/* Initialize vnode to beginning of file. */
46959872Smargo 	if (lbn == 0)
47059872Smargo 		vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
47159872Smargo 
47259872Smargo         if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
47365670Shibler 	    (bp->b_blkno != vp->v_lasta + btodb(bp->b_bcount))) {
47465998Smckusick 		maxclen = MAXBSIZE / vp->v_mount->mnt_stat.f_iosize - 1;
47565998Smckusick 		if (vp->v_clen != 0) {
47657045Smargo 			/*
47765998Smckusick 			 * Next block is not sequential.
47865998Smckusick 			 *
47965998Smckusick 			 * If we are not writing at end of file, the process
48065998Smckusick 			 * seeked to another point in the file since its
48165998Smckusick 			 * last write, or we have reached our maximum
48265998Smckusick 			 * cluster size, then push the previous cluster.
48365998Smckusick 			 * Otherwise try reallocating to make it sequential.
48457045Smargo 			 */
48565998Smckusick 			cursize = vp->v_lastw - vp->v_cstart + 1;
486*68615Smckusick 			if ((lbn + 1) * bp->b_bcount != filesize ||
48765998Smckusick 			    lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) {
48865998Smckusick 				cluster_wbuild(vp, NULL, bp->b_bcount,
48965998Smckusick 				    vp->v_cstart, cursize, lbn);
49065998Smckusick 			} else {
49165998Smckusick 				struct buf **bpp, **endbp;
49265998Smckusick 				struct cluster_save *buflist;
49365998Smckusick 
49465998Smckusick 				buflist = cluster_collectbufs(vp, bp);
49565998Smckusick 				endbp = &buflist->bs_children
49665998Smckusick 				    [buflist->bs_nchildren - 1];
49765998Smckusick 				if (VOP_REALLOCBLKS(vp, buflist)) {
49865998Smckusick 					/*
49965998Smckusick 					 * Failed, push the previous cluster.
50065998Smckusick 					 */
50165998Smckusick 					for (bpp = buflist->bs_children;
50265998Smckusick 					     bpp < endbp; bpp++)
50365998Smckusick 						brelse(*bpp);
50465998Smckusick 					free(buflist, M_SEGMENT);
50565998Smckusick 					cluster_wbuild(vp, NULL, bp->b_bcount,
50665998Smckusick 					    vp->v_cstart, cursize, lbn);
50765998Smckusick 				} else {
50865998Smckusick 					/*
50965998Smckusick 					 * Succeeded, keep building cluster.
51065998Smckusick 					 */
51165998Smckusick 					for (bpp = buflist->bs_children;
51265998Smckusick 					     bpp <= endbp; bpp++)
51365998Smckusick 						bdwrite(*bpp);
51465998Smckusick 					free(buflist, M_SEGMENT);
51565998Smckusick 					vp->v_lastw = lbn;
51665998Smckusick 					vp->v_lasta = bp->b_blkno;
51765998Smckusick 					return;
51865998Smckusick 				}
51965998Smckusick 			}
52065998Smckusick 		}
52157045Smargo 		/*
52257045Smargo 		 * Consider beginning a cluster.
52365998Smckusick 		 * If at end of file, make cluster as large as possible,
52465998Smckusick 		 * otherwise find size of existing cluster.
52557045Smargo 		 */
52665998Smckusick 		if ((lbn + 1) * bp->b_bcount != filesize &&
52765998Smckusick 		    (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen) ||
52865998Smckusick 		     bp->b_blkno == -1)) {
52957045Smargo 			bawrite(bp);
53059872Smargo 			vp->v_clen = 0;
53159872Smargo 			vp->v_lasta = bp->b_blkno;
53257045Smargo 			vp->v_cstart = lbn + 1;
53357045Smargo 			vp->v_lastw = lbn;
53457045Smargo 			return;
53564717Smckusick 		}
53665998Smckusick                 vp->v_clen = maxclen;
53765998Smckusick                 if (maxclen == 0) {		/* I/O not contiguous */
53857045Smargo 			vp->v_cstart = lbn + 1;
53957045Smargo                         bawrite(bp);
54057045Smargo                 } else {			/* Wait for rest of cluster */
54157045Smargo 			vp->v_cstart = lbn;
54257045Smargo                         bdwrite(bp);
54357045Smargo 		}
54465670Shibler 	} else if (lbn == vp->v_cstart + vp->v_clen) {
54557045Smargo 		/*
54657045Smargo 		 * At end of cluster, write it out.
54757045Smargo 		 */
54857045Smargo 		cluster_wbuild(vp, bp, bp->b_bcount, vp->v_cstart,
54957045Smargo 		    vp->v_clen + 1, lbn);
55057045Smargo 		vp->v_clen = 0;
55157045Smargo 		vp->v_cstart = lbn + 1;
55265670Shibler 	} else
55357045Smargo 		/*
55457045Smargo 		 * In the middle of a cluster, so just delay the
55557045Smargo 		 * I/O for now.
55657045Smargo 		 */
55765670Shibler 		bdwrite(bp);
55865670Shibler 	vp->v_lastw = lbn;
55959872Smargo 	vp->v_lasta = bp->b_blkno;
56057045Smargo }
56157045Smargo 
56257045Smargo 
56357045Smargo /*
56457045Smargo  * This is an awful lot like cluster_rbuild...wish they could be combined.
56557045Smargo  * The last lbn argument is the current block on which I/O is being
56657045Smargo  * performed.  Check to see that it doesn't fall in the middle of
56765670Shibler  * the current block (if last_bp == NULL).
56857045Smargo  */
56957045Smargo void
cluster_wbuild(vp,last_bp,size,start_lbn,len,lbn)57057045Smargo cluster_wbuild(vp, last_bp, size, start_lbn, len, lbn)
57157045Smargo 	struct vnode *vp;
57257045Smargo 	struct buf *last_bp;
57357045Smargo 	long size;
57457045Smargo 	daddr_t start_lbn;
57557045Smargo 	int len;
57657045Smargo 	daddr_t	lbn;
57757045Smargo {
57857045Smargo 	struct cluster_save *b_save;
57957045Smargo 	struct buf *bp, *tbp;
58057045Smargo 	caddr_t	cp;
58157045Smargo 	int i, s;
58257045Smargo 
58359872Smargo #ifdef DIAGNOSTIC
58459872Smargo 	if (size != vp->v_mount->mnt_stat.f_iosize)
58559872Smargo 		panic("cluster_wbuild: size %d != filesize %d\n",
58659872Smargo 			size, vp->v_mount->mnt_stat.f_iosize);
58759872Smargo #endif
58857045Smargo redo:
58957045Smargo 	while ((!incore(vp, start_lbn) || start_lbn == lbn) && len) {
59057045Smargo 		++start_lbn;
59157045Smargo 		--len;
59257045Smargo 	}
59357045Smargo 
59457045Smargo 	/* Get more memory for current buffer */
59557045Smargo 	if (len <= 1) {
59659872Smargo 		if (last_bp) {
59757045Smargo 			bawrite(last_bp);
59859872Smargo 		} else if (len) {
59959872Smargo 			bp = getblk(vp, start_lbn, size, 0, 0);
60059872Smargo 			bawrite(bp);
60159872Smargo 		}
60257045Smargo 		return;
60357045Smargo 	}
60457045Smargo 
60557797Smckusick 	bp = getblk(vp, start_lbn, size, 0, 0);
60657045Smargo 	if (!(bp->b_flags & B_DELWRI)) {
60757045Smargo 		++start_lbn;
60857045Smargo 		--len;
60957045Smargo 		brelse(bp);
61057045Smargo 		goto redo;
61157045Smargo 	}
61257045Smargo 
61365670Shibler 	/*
61465670Shibler 	 * Extra memory in the buffer, punt on this buffer.
61565670Shibler 	 * XXX we could handle this in most cases, but we would have to
61665670Shibler 	 * push the extra memory down to after our max possible cluster
61765670Shibler 	 * size and then potentially pull it back up if the cluster was
61865670Shibler 	 * terminated prematurely--too much hassle.
61965670Shibler 	 */
62065670Shibler 	if (bp->b_bcount != bp->b_bufsize) {
62165670Shibler 		++start_lbn;
62265670Shibler 		--len;
62365670Shibler 		bawrite(bp);
62465670Shibler 		goto redo;
62565670Shibler 	}
62665670Shibler 
62757045Smargo 	--len;
62857045Smargo 	b_save = malloc(sizeof(struct buf *) * len + sizeof(struct cluster_save),
62957045Smargo 	    M_SEGMENT, M_WAITOK);
63057045Smargo 	b_save->bs_bcount = bp->b_bcount;
63157045Smargo 	b_save->bs_bufsize = bp->b_bufsize;
63257045Smargo 	b_save->bs_nchildren = 0;
63357045Smargo 	b_save->bs_children = (struct buf **)(b_save + 1);
63457045Smargo 	b_save->bs_saveaddr = bp->b_saveaddr;
63557045Smargo 	bp->b_saveaddr = (caddr_t) b_save;
63657045Smargo 
63757045Smargo 	bp->b_flags |= B_CALL;
63857045Smargo 	bp->b_iodone = cluster_callback;
63965670Shibler 	cp = (char *)bp->b_data + size;
64057045Smargo 	for (++start_lbn, i = 0; i < len; ++i, ++start_lbn) {
64165670Shibler 		/*
64265670Shibler 		 * Block is not in core or the non-sequential block
64365670Shibler 		 * ending our cluster was part of the cluster (in which
64465670Shibler 		 * case we don't want to write it twice).
64565670Shibler 		 */
64665670Shibler 		if (!incore(vp, start_lbn) ||
64765670Shibler 		    last_bp == NULL && start_lbn == lbn)
64857045Smargo 			break;
64957045Smargo 
65065670Shibler 		/*
65165670Shibler 		 * Get the desired block buffer (unless it is the final
65265670Shibler 		 * sequential block whose buffer was passed in explictly
65365670Shibler 		 * as last_bp).
65465670Shibler 		 */
65565670Shibler 		if (last_bp == NULL || start_lbn != lbn) {
65657797Smckusick 			tbp = getblk(vp, start_lbn, size, 0, 0);
65757045Smargo 			if (!(tbp->b_flags & B_DELWRI)) {
65857045Smargo 				brelse(tbp);
65957045Smargo 				break;
66057045Smargo 			}
66157045Smargo 		} else
66257045Smargo 			tbp = last_bp;
66357045Smargo 
66457045Smargo 		++b_save->bs_nchildren;
66557045Smargo 
66657045Smargo 		/* Move memory from children to parent */
66765670Shibler 		if (tbp->b_blkno != (bp->b_blkno + btodb(bp->b_bufsize))) {
66859872Smargo 			printf("Clustered Block: %d addr %x bufsize: %d\n",
66959872Smargo 			    bp->b_lblkno, bp->b_blkno, bp->b_bufsize);
67059872Smargo 			printf("Child Block: %d addr: %x\n", tbp->b_lblkno,
67159872Smargo 			    tbp->b_blkno);
67259872Smargo 			panic("Clustered write to wrong blocks");
67359872Smargo 		}
67459872Smargo 
67564528Sbostic 		pagemove(tbp->b_data, cp, size);
67657045Smargo 		bp->b_bcount += size;
67757045Smargo 		bp->b_bufsize += size;
67857045Smargo 
67965670Shibler 		tbp->b_bufsize -= size;
68057045Smargo 		tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
68165670Shibler 		tbp->b_flags |= (B_ASYNC | B_AGE);
68257045Smargo 		s = splbio();
68357045Smargo 		reassignbuf(tbp, tbp->b_vp);		/* put on clean list */
68457045Smargo 		++tbp->b_vp->v_numoutput;
68557045Smargo 		splx(s);
68657045Smargo 		b_save->bs_children[i] = tbp;
68757045Smargo 
68865670Shibler 		cp += size;
68957045Smargo 	}
69057045Smargo 
69157045Smargo 	if (i == 0) {
69257045Smargo 		/* None to cluster */
69357045Smargo 		bp->b_saveaddr = b_save->bs_saveaddr;
69457045Smargo 		bp->b_flags &= ~B_CALL;
69557045Smargo 		bp->b_iodone = NULL;
69657045Smargo 		free(b_save, M_SEGMENT);
69757045Smargo 	}
69857045Smargo 	bawrite(bp);
69957045Smargo 	if (i < len) {
70057045Smargo 		len -= i + 1;
70157045Smargo 		start_lbn += 1;
70257045Smargo 		goto redo;
70357045Smargo 	}
70457045Smargo }
70565998Smckusick 
70665998Smckusick /*
70765998Smckusick  * Collect together all the buffers in a cluster.
70865998Smckusick  * Plus add one additional buffer.
70965998Smckusick  */
71065998Smckusick struct cluster_save *
cluster_collectbufs(vp,last_bp)71165998Smckusick cluster_collectbufs(vp, last_bp)
71265998Smckusick 	struct vnode *vp;
71365998Smckusick 	struct buf *last_bp;
71465998Smckusick {
71565998Smckusick 	struct cluster_save *buflist;
71665998Smckusick 	daddr_t	lbn;
71765998Smckusick 	int i, len;
71865998Smckusick 
71965998Smckusick 	len = vp->v_lastw - vp->v_cstart + 1;
72065998Smckusick 	buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
72165998Smckusick 	    M_SEGMENT, M_WAITOK);
72265998Smckusick 	buflist->bs_nchildren = 0;
72365998Smckusick 	buflist->bs_children = (struct buf **)(buflist + 1);
72465998Smckusick 	for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++)
72565998Smckusick 		    (void)bread(vp, lbn, last_bp->b_bcount, NOCRED,
72665998Smckusick 			&buflist->bs_children[i]);
72765998Smckusick 	buflist->bs_children[i] = last_bp;
72865998Smckusick 	buflist->bs_nchildren = i + 1;
72965998Smckusick 	return (buflist);
73065998Smckusick }
731