xref: /dflybsd-src/sys/kern/vfs_cluster.c (revision 5cccfb7b21444e0e73a738d924f82daf27b4854d)
1 /*-
2  * Copyright (c) 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * Modifications/enhancements:
5  * 	Copyright (c) 1995 John S. Dyson.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)vfs_cluster.c	8.7 (Berkeley) 2/13/94
36  * $FreeBSD: src/sys/kern/vfs_cluster.c,v 1.92.2.9 2001/11/18 07:10:59 dillon Exp $
37  * $DragonFly: src/sys/kern/vfs_cluster.c,v 1.40 2008/07/14 03:09:00 dillon Exp $
38  */
39 
40 #include "opt_debug_cluster.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/proc.h>
46 #include <sys/buf.h>
47 #include <sys/vnode.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/resourcevar.h>
51 #include <sys/vmmeter.h>
52 #include <vm/vm.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_page.h>
55 #include <sys/sysctl.h>
56 #include <sys/buf2.h>
57 #include <vm/vm_page2.h>
58 
59 #if defined(CLUSTERDEBUG)
60 #include <sys/sysctl.h>
61 static int	rcluster= 0;
62 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, "");
63 #endif
64 
65 static MALLOC_DEFINE(M_SEGMENT, "cluster_save", "cluster_save buffer");
66 
67 static struct cluster_save *
68 	cluster_collectbufs (struct vnode *vp, struct buf *last_bp,
69 			    int blksize);
70 static struct buf *
71 	cluster_rbuild (struct vnode *vp, off_t filesize, off_t loffset,
72 			    off_t doffset, int blksize, int run,
73 			    struct buf *fbp, int doasync);
74 static void cluster_callback (struct bio *);
75 
76 
77 static int write_behind = 1;
78 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, "");
79 
80 extern vm_page_t	bogus_page;
81 
82 extern int cluster_pbuf_freecnt;
83 
84 /*
85  * Maximum number of blocks for read-ahead.
86  */
87 #define MAXRA 32
88 
89 /*
90  * This replaces bread.
91  */
92 int
93 cluster_read(struct vnode *vp, off_t filesize, off_t loffset,
94 	     int blksize, int totread, int seqcount, struct buf **bpp)
95 {
96 	struct buf *bp, *rbp, *reqbp;
97 	off_t origoffset;
98 	off_t doffset;
99 	int error;
100 	int i;
101 	int maxra, racluster;
102 
103 	error = 0;
104 
105 	/*
106 	 * Try to limit the amount of read-ahead by a few
107 	 * ad-hoc parameters.  This needs work!!!
108 	 */
109 	racluster = vmaxiosize(vp) / blksize;
110 	maxra = 2 * racluster + (totread / blksize);
111 	if (maxra > MAXRA)
112 		maxra = MAXRA;
113 	if (maxra > nbuf/8)
114 		maxra = nbuf/8;
115 
116 	/*
117 	 * get the requested block
118 	 */
119 	*bpp = reqbp = bp = getblk(vp, loffset, blksize, 0, 0);
120 	origoffset = loffset;
121 
122 	/*
123 	 * if it is in the cache, then check to see if the reads have been
124 	 * sequential.  If they have, then try some read-ahead, otherwise
125 	 * back-off on prospective read-aheads.
126 	 */
127 	if (bp->b_flags & B_CACHE) {
128 		if (!seqcount) {
129 			return 0;
130 		} else if ((bp->b_flags & B_RAM) == 0) {
131 			return 0;
132 		} else {
133 			struct buf *tbp;
134 			bp->b_flags &= ~B_RAM;
135 
136 			/*
137 			 * Set read-ahead-mark only if we can passively lock
138 			 * the buffer.  Note that with these flags the bp
139 			 * could very exist even though NULL is returned.
140 			 */
141 			for (i = 1; i < maxra; i++) {
142 				tbp = findblk(vp, loffset + i * blksize,
143 					      FINDBLK_NBLOCK);
144 				if (tbp == NULL)
145 					break;
146 				if (((i % racluster) == (racluster - 1)) ||
147 				    (i == (maxra - 1))) {
148 					tbp->b_flags |= B_RAM;
149 				}
150 				BUF_UNLOCK(tbp);
151 			}
152 			if (i >= maxra)
153 				return 0;
154 			loffset += i * blksize;
155 		}
156 		reqbp = bp = NULL;
157 	} else {
158 		off_t firstread = bp->b_loffset;
159 		int nblks;
160 
161 		KASSERT(firstread != NOOFFSET,
162 			("cluster_read: no buffer offset"));
163 		if (firstread + totread > filesize)
164 			totread = (int)(filesize - firstread);
165 		nblks = totread / blksize;
166 		if (nblks) {
167 			int burstbytes;
168 
169 			if (nblks > racluster)
170 				nblks = racluster;
171 
172 	    		error = VOP_BMAP(vp, loffset, &doffset,
173 					 &burstbytes, NULL, BUF_CMD_READ);
174 			if (error)
175 				goto single_block_read;
176 			if (doffset == NOOFFSET)
177 				goto single_block_read;
178 			if (burstbytes < blksize * 2)
179 				goto single_block_read;
180 			if (nblks > burstbytes / blksize)
181 				nblks = burstbytes / blksize;
182 
183 			bp = cluster_rbuild(vp, filesize, loffset,
184 					    doffset, blksize, nblks, bp, 0);
185 			loffset += bp->b_bufsize;
186 		} else {
187 single_block_read:
188 			/*
189 			 * if it isn't in the cache, then get a chunk from
190 			 * disk if sequential, otherwise just get the block.
191 			 */
192 			bp->b_flags |= B_RAM;
193 			loffset += blksize;
194 		}
195 	}
196 
197 	/*
198 	 * Handle the synchronous read.  This only occurs if B_CACHE was
199 	 * not set.  bp (and rbp) could be either a cluster bp or a normal
200 	 * bp depending on the what cluster_rbuild() decided to do.  If
201 	 * it is a cluster bp, vfs_busy_pages() has already been called.
202 	 */
203 	if (bp) {
204 #if defined(CLUSTERDEBUG)
205 		if (rcluster)
206 			kprintf("S(%lld,%d,%d) ",
207 			    bp->b_loffset, bp->b_bcount, seqcount);
208 #endif
209 		bp->b_cmd = BUF_CMD_READ;
210 		if ((bp->b_flags & B_CLUSTER) == 0)
211 			vfs_busy_pages(vp, bp);
212 		bp->b_flags &= ~(B_ERROR|B_INVAL);
213 		if ((bp->b_flags & B_ASYNC) || bp->b_bio1.bio_done != NULL)
214 			BUF_KERNPROC(bp);
215 		vn_strategy(vp, &bp->b_bio1);
216 		if (bp->b_flags & B_ERROR) {
217 			if ((error = bp->b_error) == 0)
218 				error = EIO;
219 		} else {
220 			error = 0;
221 		}
222 	}
223 
224 	/*
225 	 * If we have been doing sequential I/O, then do some read-ahead.
226 	 *
227 	 * Only mess with buffers which we can immediately lock.  HAMMER
228 	 * will do device-readahead irrespective of what the blocks
229 	 * represent.
230 	 */
231 	rbp = NULL;
232 	if (!error &&
233 	    seqcount &&
234 	    loffset < origoffset + seqcount * blksize &&
235 	    loffset + blksize <= filesize
236 	) {
237 		int nblksread;
238 		int ntoread;
239 		int burstbytes;
240 		int tmp_error;
241 
242 		rbp = getblk(vp, loffset, blksize,
243 			     GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
244 		if (rbp == NULL)
245 			goto no_read_ahead;
246 		if ((rbp->b_flags & B_CACHE)) {
247 			bqrelse(rbp);
248 			goto no_read_ahead;
249 		}
250 
251 		/*
252 		 * An error from the read-ahead bmap has nothing to do
253 		 * with the caller's original request.
254 		 */
255 		tmp_error = VOP_BMAP(vp, loffset, &doffset,
256 				     &burstbytes, NULL, BUF_CMD_READ);
257 		if (tmp_error || doffset == NOOFFSET) {
258 			rbp->b_flags |= B_INVAL;
259 			brelse(rbp);
260 			rbp = NULL;
261 			goto no_read_ahead;
262 		}
263 		ntoread = burstbytes / blksize;
264 		nblksread = (totread + blksize - 1) / blksize;
265 		if (seqcount < nblksread)
266 			seqcount = nblksread;
267 		if (ntoread > seqcount)
268 			ntoread = seqcount;
269 
270 		rbp->b_flags |= B_RAM/* | B_AGE*/;
271 		if (burstbytes) {
272 			rbp = cluster_rbuild(vp, filesize, loffset,
273 					     doffset, blksize,
274 					     ntoread, rbp, 1);
275 		} else {
276 			rbp->b_bio2.bio_offset = doffset;
277 		}
278 #if defined(CLUSTERDEBUG)
279 		if (rcluster) {
280 			if (bp)
281 				kprintf("A+(%lld,%d,%lld,%d) ",
282 				    rbp->b_loffset, rbp->b_bcount,
283 				    rbp->b_loffset - origoffset,
284 				    seqcount);
285 			else
286 				kprintf("A(%lld,%d,%lld,%d) ",
287 				    rbp->b_loffset, rbp->b_bcount,
288 				    rbp->b_loffset - origoffset,
289 				    seqcount);
290 		}
291 #endif
292 		rbp->b_flags &= ~(B_ERROR|B_INVAL);
293 		rbp->b_flags |= B_ASYNC;
294 		rbp->b_cmd = BUF_CMD_READ;
295 
296 		if ((rbp->b_flags & B_CLUSTER) == 0)
297 			vfs_busy_pages(vp, rbp);
298 		BUF_KERNPROC(rbp);			/* B_ASYNC */
299 		vn_strategy(vp, &rbp->b_bio1);
300 	}
301 no_read_ahead:
302 
303 	if (reqbp)
304 		return (biowait(reqbp));
305 	else
306 		return (error);
307 }
308 
309 /*
310  * If blocks are contiguous on disk, use this to provide clustered
311  * read ahead.  We will read as many blocks as possible sequentially
312  * and then parcel them up into logical blocks in the buffer hash table.
313  */
314 static struct buf *
315 cluster_rbuild(struct vnode *vp, off_t filesize, off_t loffset,
316 	off_t doffset, int blksize, int run, struct buf *fbp, int doasync)
317 {
318 	struct buf *bp, *tbp;
319 	off_t boffset;
320 	int i, j;
321 	int maxiosize = vmaxiosize(vp);
322 
323 	/*
324 	 * avoid a division
325 	 */
326 	while (loffset + run * blksize > filesize) {
327 		--run;
328 	}
329 
330 	tbp = fbp;
331 	tbp->b_bio2.bio_offset = doffset;
332 	if((tbp->b_flags & B_MALLOC) ||
333 	    ((tbp->b_flags & B_VMIO) == 0) || (run <= 1)) {
334 		return tbp;
335 	}
336 
337 	bp = trypbuf(&cluster_pbuf_freecnt);
338 	if (bp == NULL)
339 		return tbp;
340 
341 	/*
342 	 * We are synthesizing a buffer out of vm_page_t's, but
343 	 * if the block size is not page aligned then the starting
344 	 * address may not be either.  Inherit the b_data offset
345 	 * from the original buffer.
346 	 */
347 	bp->b_data = (char *)((vm_offset_t)bp->b_data |
348 	    ((vm_offset_t)tbp->b_data & PAGE_MASK));
349 	bp->b_flags |= B_ASYNC | B_CLUSTER | B_VMIO;
350 	bp->b_cmd = BUF_CMD_READ;
351 	bp->b_bio1.bio_done = cluster_callback;
352 	bp->b_bio1.bio_caller_info1.cluster_head = NULL;
353 	bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
354 	bp->b_loffset = loffset;
355 	bp->b_bio2.bio_offset = doffset;
356 	KASSERT(bp->b_loffset != NOOFFSET,
357 		("cluster_rbuild: no buffer offset"));
358 
359 	bp->b_bcount = 0;
360 	bp->b_bufsize = 0;
361 	bp->b_xio.xio_npages = 0;
362 
363 	for (boffset = doffset, i = 0; i < run; ++i, boffset += blksize) {
364 		if (i) {
365 			if ((bp->b_xio.xio_npages * PAGE_SIZE) +
366 			    round_page(blksize) > maxiosize) {
367 				break;
368 			}
369 
370 			/*
371 			 * Shortcut some checks and try to avoid buffers that
372 			 * would block in the lock.  The same checks have to
373 			 * be made again after we officially get the buffer.
374 			 */
375 			tbp = getblk(vp, loffset + i * blksize, blksize,
376 				     GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
377 			if (tbp == NULL)
378 				break;
379 			for (j = 0; j < tbp->b_xio.xio_npages; j++) {
380 				if (tbp->b_xio.xio_pages[j]->valid)
381 					break;
382 			}
383 			if (j != tbp->b_xio.xio_npages) {
384 				bqrelse(tbp);
385 				break;
386 			}
387 
388 			/*
389 			 * Stop scanning if the buffer is fuly valid
390 			 * (marked B_CACHE), or locked (may be doing a
391 			 * background write), or if the buffer is not
392 			 * VMIO backed.  The clustering code can only deal
393 			 * with VMIO-backed buffers.
394 			 */
395 			if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
396 			    (tbp->b_flags & B_VMIO) == 0 ||
397 			    (LIST_FIRST(&tbp->b_dep) != NULL &&
398 			     buf_checkread(tbp))
399 			) {
400 				bqrelse(tbp);
401 				break;
402 			}
403 
404 			/*
405 			 * The buffer must be completely invalid in order to
406 			 * take part in the cluster.  If it is partially valid
407 			 * then we stop.
408 			 */
409 			for (j = 0;j < tbp->b_xio.xio_npages; j++) {
410 				if (tbp->b_xio.xio_pages[j]->valid)
411 					break;
412 			}
413 			if (j != tbp->b_xio.xio_npages) {
414 				bqrelse(tbp);
415 				break;
416 			}
417 
418 			/*
419 			 * Set a read-ahead mark as appropriate
420 			 */
421 			if (i == 1 || i == (run - 1))
422 				tbp->b_flags |= B_RAM;
423 
424 			/*
425 			 * Depress the priority of buffers not explicitly
426 			 * requested.
427 			 */
428 			/* tbp->b_flags |= B_AGE; */
429 
430 			/*
431 			 * Set the block number if it isn't set, otherwise
432 			 * if it is make sure it matches the block number we
433 			 * expect.
434 			 */
435 			if (tbp->b_bio2.bio_offset == NOOFFSET) {
436 				tbp->b_bio2.bio_offset = boffset;
437 			} else if (tbp->b_bio2.bio_offset != boffset) {
438 				brelse(tbp);
439 				break;
440 			}
441 		}
442 		/*
443 		 * The first buffer is setup async if doasync is specified.
444 		 * All other buffers in the cluster are setup async.  This
445 		 * way the caller can decide how to deal with the requested
446 		 * buffer.
447 		 */
448 		if (i || doasync)
449 			tbp->b_flags |= B_ASYNC;
450 		tbp->b_cmd = BUF_CMD_READ;
451 		BUF_KERNPROC(tbp);
452 		cluster_append(&bp->b_bio1, tbp);
453 		for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
454 			vm_page_t m;
455 			m = tbp->b_xio.xio_pages[j];
456 			vm_page_io_start(m);
457 			vm_object_pip_add(m->object, 1);
458 			if ((bp->b_xio.xio_npages == 0) ||
459 				(bp->b_xio.xio_pages[bp->b_xio.xio_npages-1] != m)) {
460 				bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
461 				bp->b_xio.xio_npages++;
462 			}
463 			if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
464 				tbp->b_xio.xio_pages[j] = bogus_page;
465 		}
466 		/*
467 		 * XXX shouldn't this be += size for both, like in
468 		 * cluster_wbuild()?
469 		 *
470 		 * Don't inherit tbp->b_bufsize as it may be larger due to
471 		 * a non-page-aligned size.  Instead just aggregate using
472 		 * 'size'.
473 		 */
474 		if (tbp->b_bcount != blksize)
475 		    kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp->b_bcount, blksize);
476 		if (tbp->b_bufsize != blksize)
477 		    kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp->b_bufsize, blksize);
478 		bp->b_bcount += blksize;
479 		bp->b_bufsize += blksize;
480 	}
481 
482 	/*
483 	 * Fully valid pages in the cluster are already good and do not need
484 	 * to be re-read from disk.  Replace the page with bogus_page
485 	 */
486 	for (j = 0; j < bp->b_xio.xio_npages; j++) {
487 		if ((bp->b_xio.xio_pages[j]->valid & VM_PAGE_BITS_ALL) ==
488 		    VM_PAGE_BITS_ALL) {
489 			bp->b_xio.xio_pages[j] = bogus_page;
490 		}
491 	}
492 	if (bp->b_bufsize > bp->b_kvasize) {
493 		panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
494 		    bp->b_bufsize, bp->b_kvasize);
495 	}
496 
497 	pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
498 		(vm_page_t *)bp->b_xio.xio_pages, bp->b_xio.xio_npages);
499 	return (bp);
500 }
501 
502 /*
503  * Cleanup after a clustered read or write.
504  * This is complicated by the fact that any of the buffers might have
505  * extra memory (if there were no empty buffer headers at allocbuf time)
506  * that we will need to shift around.
507  *
508  * The returned bio is &bp->b_bio1
509  */
510 void
511 cluster_callback(struct bio *bio)
512 {
513 	struct buf *bp = bio->bio_buf;
514 	struct buf *tbp;
515 	int error = 0;
516 
517 	/*
518 	 * Must propogate errors to all the components.  A short read (EOF)
519 	 * is a critical error.
520 	 */
521 	if (bp->b_flags & B_ERROR) {
522 		error = bp->b_error;
523 	} else if (bp->b_bcount != bp->b_bufsize) {
524 		panic("cluster_callback: unexpected EOF on cluster %p!", bio);
525 	}
526 
527 	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_xio.xio_npages);
528 	/*
529 	 * Move memory from the large cluster buffer into the component
530 	 * buffers and mark IO as done on these.  Since the memory map
531 	 * is the same, no actual copying is required.
532 	 */
533 	while ((tbp = bio->bio_caller_info1.cluster_head) != NULL) {
534 		bio->bio_caller_info1.cluster_head = tbp->b_cluster_next;
535 		if (error) {
536 			tbp->b_flags |= B_ERROR;
537 			tbp->b_error = error;
538 		} else {
539 			tbp->b_dirtyoff = tbp->b_dirtyend = 0;
540 			tbp->b_flags &= ~(B_ERROR|B_INVAL);
541 			/*
542 			 * XXX the bdwrite()/bqrelse() issued during
543 			 * cluster building clears B_RELBUF (see bqrelse()
544 			 * comment).  If direct I/O was specified, we have
545 			 * to restore it here to allow the buffer and VM
546 			 * to be freed.
547 			 */
548 			if (tbp->b_flags & B_DIRECT)
549 				tbp->b_flags |= B_RELBUF;
550 		}
551 		biodone(&tbp->b_bio1);
552 	}
553 	relpbuf(bp, &cluster_pbuf_freecnt);
554 }
555 
556 /*
557  *	cluster_wbuild_wb:
558  *
559  *	Implement modified write build for cluster.
560  *
561  *		write_behind = 0	write behind disabled
562  *		write_behind = 1	write behind normal (default)
563  *		write_behind = 2	write behind backed-off
564  */
565 
566 static __inline int
567 cluster_wbuild_wb(struct vnode *vp, int blksize, off_t start_loffset, int len)
568 {
569 	int r = 0;
570 
571 	switch(write_behind) {
572 	case 2:
573 		if (start_loffset < len)
574 			break;
575 		start_loffset -= len;
576 		/* fall through */
577 	case 1:
578 		r = cluster_wbuild(vp, blksize, start_loffset, len);
579 		/* fall through */
580 	default:
581 		/* fall through */
582 		break;
583 	}
584 	return(r);
585 }
586 
587 /*
588  * Do clustered write for FFS.
589  *
590  * Three cases:
591  *	1. Write is not sequential (write asynchronously)
592  *	Write is sequential:
593  *	2.	beginning of cluster - begin cluster
594  *	3.	middle of a cluster - add to cluster
595  *	4.	end of a cluster - asynchronously write cluster
596  */
597 void
598 cluster_write(struct buf *bp, off_t filesize, int blksize, int seqcount)
599 {
600 	struct vnode *vp;
601 	off_t loffset;
602 	int maxclen, cursize;
603 	int async;
604 
605 	vp = bp->b_vp;
606 	if (vp->v_type == VREG)
607 		async = vp->v_mount->mnt_flag & MNT_ASYNC;
608 	else
609 		async = 0;
610 	loffset = bp->b_loffset;
611 	KASSERT(bp->b_loffset != NOOFFSET,
612 		("cluster_write: no buffer offset"));
613 
614 	/* Initialize vnode to beginning of file. */
615 	if (loffset == 0)
616 		vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
617 
618 	if (vp->v_clen == 0 || loffset != vp->v_lastw + blksize ||
619 	    bp->b_bio2.bio_offset == NOOFFSET ||
620 	    (bp->b_bio2.bio_offset != vp->v_lasta + blksize)) {
621 		maxclen = vmaxiosize(vp);
622 		if (vp->v_clen != 0) {
623 			/*
624 			 * Next block is not sequential.
625 			 *
626 			 * If we are not writing at end of file, the process
627 			 * seeked to another point in the file since its last
628 			 * write, or we have reached our maximum cluster size,
629 			 * then push the previous cluster. Otherwise try
630 			 * reallocating to make it sequential.
631 			 *
632 			 * Change to algorithm: only push previous cluster if
633 			 * it was sequential from the point of view of the
634 			 * seqcount heuristic, otherwise leave the buffer
635 			 * intact so we can potentially optimize the I/O
636 			 * later on in the buf_daemon or update daemon
637 			 * flush.
638 			 */
639 			cursize = vp->v_lastw - vp->v_cstart + blksize;
640 			if (bp->b_loffset + blksize != filesize ||
641 			    loffset != vp->v_lastw + blksize || vp->v_clen <= cursize) {
642 				if (!async && seqcount > 0) {
643 					cluster_wbuild_wb(vp, blksize,
644 						vp->v_cstart, cursize);
645 				}
646 			} else {
647 				struct buf **bpp, **endbp;
648 				struct cluster_save *buflist;
649 
650 				buflist = cluster_collectbufs(vp, bp, blksize);
651 				endbp = &buflist->bs_children
652 				    [buflist->bs_nchildren - 1];
653 				if (VOP_REALLOCBLKS(vp, buflist)) {
654 					/*
655 					 * Failed, push the previous cluster
656 					 * if *really* writing sequentially
657 					 * in the logical file (seqcount > 1),
658 					 * otherwise delay it in the hopes that
659 					 * the low level disk driver can
660 					 * optimize the write ordering.
661 					 */
662 					for (bpp = buflist->bs_children;
663 					     bpp < endbp; bpp++)
664 						brelse(*bpp);
665 					kfree(buflist, M_SEGMENT);
666 					if (seqcount > 1) {
667 						cluster_wbuild_wb(vp,
668 						    blksize, vp->v_cstart,
669 						    cursize);
670 					}
671 				} else {
672 					/*
673 					 * Succeeded, keep building cluster.
674 					 */
675 					for (bpp = buflist->bs_children;
676 					     bpp <= endbp; bpp++)
677 						bdwrite(*bpp);
678 					kfree(buflist, M_SEGMENT);
679 					vp->v_lastw = loffset;
680 					vp->v_lasta = bp->b_bio2.bio_offset;
681 					return;
682 				}
683 			}
684 		}
685 		/*
686 		 * Consider beginning a cluster. If at end of file, make
687 		 * cluster as large as possible, otherwise find size of
688 		 * existing cluster.
689 		 */
690 		if ((vp->v_type == VREG) &&
691 		    bp->b_loffset + blksize != filesize &&
692 		    (bp->b_bio2.bio_offset == NOOFFSET) &&
693 		    (VOP_BMAP(vp, loffset, &bp->b_bio2.bio_offset, &maxclen, NULL, BUF_CMD_WRITE) ||
694 		     bp->b_bio2.bio_offset == NOOFFSET)) {
695 			bawrite(bp);
696 			vp->v_clen = 0;
697 			vp->v_lasta = bp->b_bio2.bio_offset;
698 			vp->v_cstart = loffset + blksize;
699 			vp->v_lastw = loffset;
700 			return;
701 		}
702 		if (maxclen > blksize)
703 			vp->v_clen = maxclen - blksize;
704 		else
705 			vp->v_clen = 0;
706 		if (!async && vp->v_clen == 0) { /* I/O not contiguous */
707 			vp->v_cstart = loffset + blksize;
708 			bawrite(bp);
709 		} else {	/* Wait for rest of cluster */
710 			vp->v_cstart = loffset;
711 			bdwrite(bp);
712 		}
713 	} else if (loffset == vp->v_cstart + vp->v_clen) {
714 		/*
715 		 * At end of cluster, write it out if seqcount tells us we
716 		 * are operating sequentially, otherwise let the buf or
717 		 * update daemon handle it.
718 		 */
719 		bdwrite(bp);
720 		if (seqcount > 1)
721 			cluster_wbuild_wb(vp, blksize, vp->v_cstart,
722 					  vp->v_clen + blksize);
723 		vp->v_clen = 0;
724 		vp->v_cstart = loffset + blksize;
725 	} else if (vm_page_count_severe()) {
726 		/*
727 		 * We are low on memory, get it going NOW
728 		 */
729 		bawrite(bp);
730 	} else {
731 		/*
732 		 * In the middle of a cluster, so just delay the I/O for now.
733 		 */
734 		bdwrite(bp);
735 	}
736 	vp->v_lastw = loffset;
737 	vp->v_lasta = bp->b_bio2.bio_offset;
738 }
739 
740 
741 /*
742  * This is an awful lot like cluster_rbuild...wish they could be combined.
743  * The last lbn argument is the current block on which I/O is being
744  * performed.  Check to see that it doesn't fall in the middle of
745  * the current block (if last_bp == NULL).
746  */
747 int
748 cluster_wbuild(struct vnode *vp, int blksize, off_t start_loffset, int bytes)
749 {
750 	struct buf *bp, *tbp;
751 	int i, j;
752 	int totalwritten = 0;
753 	int maxiosize = vmaxiosize(vp);
754 
755 	while (bytes > 0) {
756 		/*
757 		 * If the buffer is not delayed-write (i.e. dirty), or it
758 		 * is delayed-write but either locked or inval, it cannot
759 		 * partake in the clustered write.
760 		 */
761 		tbp = findblk(vp, start_loffset, FINDBLK_NBLOCK);
762 		if (tbp == NULL ||
763 		    (tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) != B_DELWRI ||
764 		    (LIST_FIRST(&tbp->b_dep) && buf_checkwrite(tbp))) {
765 			if (tbp)
766 				BUF_UNLOCK(tbp);
767 			start_loffset += blksize;
768 			bytes -= blksize;
769 			continue;
770 		}
771 		bremfree(tbp);
772 		KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
773 
774 		/*
775 		 * Extra memory in the buffer, punt on this buffer.
776 		 * XXX we could handle this in most cases, but we would
777 		 * have to push the extra memory down to after our max
778 		 * possible cluster size and then potentially pull it back
779 		 * up if the cluster was terminated prematurely--too much
780 		 * hassle.
781 		 */
782 		if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) ||
783 		    (tbp->b_bcount != tbp->b_bufsize) ||
784 		    (tbp->b_bcount != blksize) ||
785 		    (bytes == blksize) ||
786 		    ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) {
787 			totalwritten += tbp->b_bufsize;
788 			bawrite(tbp);
789 			start_loffset += blksize;
790 			bytes -= blksize;
791 			continue;
792 		}
793 
794 		/*
795 		 * Set up the pbuf.  Track our append point with b_bcount
796 		 * and b_bufsize.  b_bufsize is not used by the device but
797 		 * our caller uses it to loop clusters and we use it to
798 		 * detect a premature EOF on the block device.
799 		 */
800 		bp->b_bcount = 0;
801 		bp->b_bufsize = 0;
802 		bp->b_xio.xio_npages = 0;
803 		bp->b_loffset = tbp->b_loffset;
804 		bp->b_bio2.bio_offset = tbp->b_bio2.bio_offset;
805 
806 		/*
807 		 * We are synthesizing a buffer out of vm_page_t's, but
808 		 * if the block size is not page aligned then the starting
809 		 * address may not be either.  Inherit the b_data offset
810 		 * from the original buffer.
811 		 */
812 		bp->b_data = (char *)((vm_offset_t)bp->b_data |
813 		    ((vm_offset_t)tbp->b_data & PAGE_MASK));
814 		bp->b_flags &= ~B_ERROR;
815 		bp->b_flags |= B_CLUSTER | B_BNOCLIP |
816 			(tbp->b_flags & (B_VMIO | B_NEEDCOMMIT));
817 		bp->b_bio1.bio_done = cluster_callback;
818 		bp->b_bio1.bio_caller_info1.cluster_head = NULL;
819 		bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
820 
821 		/*
822 		 * From this location in the file, scan forward to see
823 		 * if there are buffers with adjacent data that need to
824 		 * be written as well.
825 		 */
826 		for (i = 0; i < bytes; (i += blksize), (start_loffset += blksize)) {
827 			if (i != 0) { /* If not the first buffer */
828 				tbp = findblk(vp, start_loffset,
829 					      FINDBLK_NBLOCK);
830 				/*
831 				 * Buffer not found or could not be locked
832 				 * non-blocking.
833 				 */
834 				if (tbp == NULL)
835 					break;
836 
837 				/*
838 				 * If it IS in core, but has different
839 				 * characteristics, then don't cluster
840 				 * with it.
841 				 */
842 				if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
843 				     B_INVAL | B_DELWRI | B_NEEDCOMMIT))
844 				    != (B_DELWRI | B_CLUSTEROK |
845 				     (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
846 				    (tbp->b_flags & B_LOCKED) ||
847 				    (LIST_FIRST(&tbp->b_dep) &&
848 				     buf_checkwrite(tbp))
849 				) {
850 					BUF_UNLOCK(tbp);
851 					break;
852 				}
853 
854 				/*
855 				 * Check that the combined cluster
856 				 * would make sense with regard to pages
857 				 * and would not be too large
858 				 */
859 				if ((tbp->b_bcount != blksize) ||
860 				  ((bp->b_bio2.bio_offset + i) !=
861 				    tbp->b_bio2.bio_offset) ||
862 				  ((tbp->b_xio.xio_npages + bp->b_xio.xio_npages) >
863 				    (maxiosize / PAGE_SIZE))) {
864 					BUF_UNLOCK(tbp);
865 					break;
866 				}
867 				/*
868 				 * Ok, it's passed all the tests,
869 				 * so remove it from the free list
870 				 * and mark it busy. We will use it.
871 				 */
872 				bremfree(tbp);
873 				KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
874 			} /* end of code for non-first buffers only */
875 
876 			/*
877 			 * If the IO is via the VM then we do some
878 			 * special VM hackery (yuck).  Since the buffer's
879 			 * block size may not be page-aligned it is possible
880 			 * for a page to be shared between two buffers.  We
881 			 * have to get rid of the duplication when building
882 			 * the cluster.
883 			 */
884 			if (tbp->b_flags & B_VMIO) {
885 				vm_page_t m;
886 
887 				if (i != 0) { /* if not first buffer */
888 					for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
889 						m = tbp->b_xio.xio_pages[j];
890 						if (m->flags & PG_BUSY) {
891 							bqrelse(tbp);
892 							goto finishcluster;
893 						}
894 					}
895 				}
896 
897 				for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
898 					m = tbp->b_xio.xio_pages[j];
899 					vm_page_io_start(m);
900 					vm_object_pip_add(m->object, 1);
901 					if ((bp->b_xio.xio_npages == 0) ||
902 					  (bp->b_xio.xio_pages[bp->b_xio.xio_npages - 1] != m)) {
903 						bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
904 						bp->b_xio.xio_npages++;
905 					}
906 				}
907 			}
908 			bp->b_bcount += blksize;
909 			bp->b_bufsize += blksize;
910 
911 			bundirty(tbp);
912 			tbp->b_flags &= ~B_ERROR;
913 			tbp->b_flags |= B_ASYNC;
914 			tbp->b_cmd = BUF_CMD_WRITE;
915 			BUF_KERNPROC(tbp);
916 			cluster_append(&bp->b_bio1, tbp);
917 
918 			/*
919 			 * check for latent dependencies to be handled
920 			 */
921 			if (LIST_FIRST(&tbp->b_dep) != NULL)
922 				buf_start(tbp);
923 		}
924 	finishcluster:
925 		pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
926 			(vm_page_t *) bp->b_xio.xio_pages, bp->b_xio.xio_npages);
927 		if (bp->b_bufsize > bp->b_kvasize) {
928 			panic(
929 			    "cluster_wbuild: b_bufsize(%d) > b_kvasize(%d)\n",
930 			    bp->b_bufsize, bp->b_kvasize);
931 		}
932 		totalwritten += bp->b_bufsize;
933 		bp->b_dirtyoff = 0;
934 		bp->b_dirtyend = bp->b_bufsize;
935 		bp->b_flags |= B_ASYNC;
936 		bp->b_cmd = BUF_CMD_WRITE;
937 		vfs_busy_pages(vp, bp);
938 		bp->b_runningbufspace = bp->b_bufsize;
939 		if (bp->b_runningbufspace) {
940 			runningbufspace += bp->b_runningbufspace;
941 			++runningbufcount;
942 		}
943 		BUF_KERNPROC(bp);	/* B_ASYNC */
944 		vn_strategy(vp, &bp->b_bio1);
945 
946 		bytes -= i;
947 	}
948 	return totalwritten;
949 }
950 
951 /*
952  * Collect together all the buffers in a cluster.
953  * Plus add one additional buffer.
954  */
955 static struct cluster_save *
956 cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int blksize)
957 {
958 	struct cluster_save *buflist;
959 	struct buf *bp;
960 	off_t loffset;
961 	int i, len;
962 
963 	len = (int)(vp->v_lastw - vp->v_cstart + blksize) / blksize;
964 	buflist = kmalloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
965 			 M_SEGMENT, M_WAITOK);
966 	buflist->bs_nchildren = 0;
967 	buflist->bs_children = (struct buf **) (buflist + 1);
968 	for (loffset = vp->v_cstart, i = 0; i < len; (loffset += blksize), i++) {
969 		(void) bread(vp, loffset, last_bp->b_bcount, &bp);
970 		buflist->bs_children[i] = bp;
971 		if (bp->b_bio2.bio_offset == NOOFFSET) {
972 			VOP_BMAP(bp->b_vp, bp->b_loffset,
973 				 &bp->b_bio2.bio_offset,
974 				 NULL, NULL, BUF_CMD_WRITE);
975 		}
976 	}
977 	buflist->bs_children[i] = bp = last_bp;
978 	if (bp->b_bio2.bio_offset == NOOFFSET) {
979 		VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset,
980 			 NULL, NULL, BUF_CMD_WRITE);
981 	}
982 	buflist->bs_nchildren = i + 1;
983 	return (buflist);
984 }
985 
986 void
987 cluster_append(struct bio *bio, struct buf *tbp)
988 {
989 	tbp->b_cluster_next = NULL;
990 	if (bio->bio_caller_info1.cluster_head == NULL) {
991 		bio->bio_caller_info1.cluster_head = tbp;
992 		bio->bio_caller_info2.cluster_tail = tbp;
993 	} else {
994 		bio->bio_caller_info2.cluster_tail->b_cluster_next = tbp;
995 		bio->bio_caller_info2.cluster_tail = tbp;
996 	}
997 }
998 
999