xref: /openbsd-src/sys/kern/vfs_bio.c (revision 43003dfe3ad45d1698bed8a37f2b0f5b14f20d4f)
1 /*	$OpenBSD: vfs_bio.c,v 1.120 2009/08/08 15:04:43 beck Exp $	*/
2 /*	$NetBSD: vfs_bio.c,v 1.44 1996/06/11 11:15:36 pk Exp $	*/
3 
4 /*
5  * Copyright (c) 1994 Christopher G. Demetriou
6  * Copyright (c) 1982, 1986, 1989, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  * (c) UNIX System Laboratories, Inc.
9  * All or some portions of this file are derived from material licensed
10  * to the University of California by American Telephone and Telegraph
11  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
12  * the permission of UNIX System Laboratories, Inc.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
39  */
40 
41 /*
42  * Some references:
43  *	Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
44  *	Leffler, et al.: The Design and Implementation of the 4.3BSD
45  *		UNIX Operating System (Addison Welley, 1989)
46  */
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/buf.h>
52 #include <sys/vnode.h>
53 #include <sys/mount.h>
54 #include <sys/malloc.h>
55 #include <sys/pool.h>
56 #include <sys/resourcevar.h>
57 #include <sys/conf.h>
58 #include <sys/kernel.h>
59 
60 #include <uvm/uvm_extern.h>
61 
62 #include <miscfs/specfs/specdev.h>
63 
64 /*
65  * Definitions for the buffer free lists.
66  */
67 #define	BQUEUES		2		/* number of free buffer queues */
68 
69 #define	BQ_DIRTY	0		/* LRU queue with dirty buffers */
70 #define	BQ_CLEAN	1		/* LRU queue with clean buffers */
71 
72 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
73 int needbuffer;
74 struct bio_ops bioops;
75 
76 /*
77  * Buffer pool for I/O buffers.
78  */
79 struct pool bufpool;
80 struct bufhead bufhead = LIST_HEAD_INITIALIZER(bufhead);
81 void buf_put(struct buf *);
82 
83 /*
84  * Insq/Remq for the buffer free lists.
85  */
86 #define	binsheadfree(bp, dp)	TAILQ_INSERT_HEAD(dp, bp, b_freelist)
87 #define	binstailfree(bp, dp)	TAILQ_INSERT_TAIL(dp, bp, b_freelist)
88 
89 struct buf *bio_doread(struct vnode *, daddr64_t, int, int);
90 struct buf *buf_get(struct vnode *, daddr64_t, size_t);
91 void bread_cluster_callback(struct buf *);
92 
93 /*
94  * We keep a few counters to monitor the utilization of the buffer cache
95  *
96  *  numbufpages   - number of pages totally allocated.
97  *  numdirtypages - number of pages on BQ_DIRTY queue.
98  *  lodirtypages  - low water mark for buffer cleaning daemon.
99  *  hidirtypages  - high water mark for buffer cleaning daemon.
100  *  numcleanpages - number of pages on BQ_CLEAN queue.
101  *		    Used to track the need to speedup the cleaner and
102  *		    as a reserve for special processes like syncer.
103  *  maxcleanpages - the highest page count on BQ_CLEAN.
104  */
105 
106 struct bcachestats bcstats;
107 long lodirtypages;
108 long hidirtypages;
109 long locleanpages;
110 long hicleanpages;
111 long maxcleanpages;
112 long backoffpages;	/* backoff counter for page allocations */
113 long buflowpages;	/* bufpages low water mark */
114 long bufhighpages; 	/* bufpages high water mark */
115 long bufbackpages; 	/* number of pages we back off when asked to shrink */
116 
117 /* XXX - should be defined here. */
118 extern int bufcachepercent;
119 
120 vsize_t bufkvm;
121 
122 struct proc *cleanerproc;
123 int bd_req;			/* Sleep point for cleaner daemon. */
124 
125 void
126 bremfree(struct buf *bp)
127 {
128 	struct bqueues *dp = NULL;
129 
130 	splassert(IPL_BIO);
131 
132 	/*
133 	 * We only calculate the head of the freelist when removing
134 	 * the last element of the list as that is the only time that
135 	 * it is needed (e.g. to reset the tail pointer).
136 	 *
137 	 * NB: This makes an assumption about how tailq's are implemented.
138 	 */
139 	if (TAILQ_NEXT(bp, b_freelist) == NULL) {
140 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
141 			if (dp->tqh_last == &TAILQ_NEXT(bp, b_freelist))
142 				break;
143 		if (dp == &bufqueues[BQUEUES])
144 			panic("bremfree: lost tail");
145 	}
146 	if (!ISSET(bp->b_flags, B_DELWRI)) {
147 		bcstats.numcleanpages -= atop(bp->b_bufsize);
148 	} else {
149 		bcstats.numdirtypages -= atop(bp->b_bufsize);
150 	}
151 	TAILQ_REMOVE(dp, bp, b_freelist);
152 	bcstats.freebufs--;
153 }
154 
155 void
156 buf_put(struct buf *bp)
157 {
158 	splassert(IPL_BIO);
159 
160 #ifdef DIAGNOSTIC
161 	if (bp->b_pobj != NULL)
162 		KASSERT(bp->b_bufsize > 0);
163 	if (ISSET(bp->b_flags, B_DELWRI))
164 		panic("buf_put: releasing dirty buffer");
165 	if (bp->b_freelist.tqe_next != NOLIST &&
166 	    bp->b_freelist.tqe_next != (void *)-1)
167 		panic("buf_put: still on the free list");
168 	if (bp->b_vnbufs.le_next != NOLIST &&
169 	    bp->b_vnbufs.le_next != (void *)-1)
170 		panic("buf_put: still on the vnode list");
171 	if (!LIST_EMPTY(&bp->b_dep))
172 		panic("buf_put: b_dep is not empty");
173 #endif
174 
175 	LIST_REMOVE(bp, b_list);
176 	bcstats.numbufs--;
177 	if (backoffpages) {
178 		backoffpages -= atop(bp->b_bufsize);
179 		if (backoffpages < 0)
180 			backoffpages = 0;
181 	}
182 
183 	if (buf_dealloc_mem(bp) != 0)
184 		return;
185 	pool_put(&bufpool, bp);
186 }
187 
188 /*
189  * Initialize buffers and hash links for buffers.
190  */
191 void
192 bufinit(void)
193 {
194 	struct bqueues *dp;
195 
196 	/* XXX - for now */
197 	bufhighpages = buflowpages = bufpages = bufcachepercent = bufkvm = 0;
198 
199 	/*
200 	 * If MD code doesn't say otherwise, use 10% of kvm for mappings and
201 	 * 10% physmem for pages.
202 	 */
203 	if (bufcachepercent == 0)
204 		bufcachepercent = 10;
205 	if (bufpages == 0)
206 		bufpages = physmem * bufcachepercent / 100;
207 
208 	bufhighpages = bufpages;
209 
210 	/*
211 	 * set the base backoff level for the buffer cache to bufpages.
212 	 * we will not allow uvm to steal back more than this number of
213 	 * pages
214 	 */
215 	buflowpages = physmem * 10 / 100;
216 
217 	/*
218 	 * set bufbackpages to 100 pages, or 10 percent of the low water mark
219 	 * if we don't have that many pages.
220 	 */
221 
222 	bufbackpages = buflowpages * 10 / 100;
223 	if (bufbackpages > 100)
224 		bufbackpages = 100;
225 
226 	if (bufkvm == 0)
227 		bufkvm = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 10;
228 
229 	/*
230 	 * Don't use more than twice the amount of bufpages for mappings.
231 	 * It's twice since we map things sparsely.
232 	 */
233 	if (bufkvm > bufpages * PAGE_SIZE)
234 		bufkvm = bufpages * PAGE_SIZE;
235 	/*
236 	 * Round bufkvm to MAXPHYS because we allocate chunks of va space
237 	 * in MAXPHYS chunks.
238 	 */
239 	bufkvm &= ~(MAXPHYS - 1);
240 
241 	pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
242 	pool_setipl(&bufpool, IPL_BIO);
243 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
244 		TAILQ_INIT(dp);
245 
246 	/*
247 	 * hmm - bufkvm is an argument because it's static, while
248 	 * bufpages is global because it can change while running.
249  	 */
250 	buf_mem_init(bufkvm);
251 
252 	hidirtypages = (bufpages / 4) * 3;
253 	lodirtypages = bufpages / 2;
254 
255 	/*
256 	 * When we hit 95% of pages being clean, we bring them down to
257 	 * 90% to have some slack.
258 	 */
259 	hicleanpages = bufpages - (bufpages / 20);
260 	locleanpages = bufpages - (bufpages / 10);
261 
262 	maxcleanpages = locleanpages;
263 }
264 
265 /*
266  * Change cachepct
267  */
268 void
269 bufadjust(int newbufpages)
270 {
271 	/*
272 	 * XXX - note, bufkvm was allocated once, based on 10% of physmem
273 	 * see above.
274 	 */
275 	struct buf *bp;
276 	int s;
277 
278 	s = splbio();
279 	bufpages = newbufpages;
280 
281 	hidirtypages = (bufpages / 4) * 3;
282 	lodirtypages = bufpages / 2;
283 
284 	/*
285 	 * When we hit 95% of pages being clean, we bring them down to
286 	 * 90% to have some slack.
287 	 */
288 	hicleanpages = bufpages - (bufpages / 20);
289 	locleanpages = bufpages - (bufpages / 10);
290 
291 	maxcleanpages = locleanpages;
292 
293 	/*
294 	 * If we we have more buffers allocated than bufpages,
295 	 * free them up to get back down. this may possibly consume
296 	 * all our clean pages...
297 	 */
298 	while ((bp = TAILQ_FIRST(&bufqueues[BQ_CLEAN])) &&
299 	    (bcstats.numbufpages > bufpages)) {
300 		bremfree(bp);
301 		if (bp->b_vp) {
302 			RB_REMOVE(buf_rb_bufs,
303 			    &bp->b_vp->v_bufs_tree, bp);
304 			brelvp(bp);
305 		}
306 		buf_put(bp);
307 	}
308 
309 	/*
310 	 * Wake up cleaner if we're getting low on pages. We might
311 	 * now have too much dirty, or have fallen below our low
312 	 * water mark on clean pages so we need to free more stuff
313 	 * up.
314 	 */
315 	if (bcstats.numdirtypages >= hidirtypages ||
316 	    bcstats.numcleanpages <= locleanpages)
317 		wakeup(&bd_req);
318 
319 	/*
320 	 * if immediate action has not freed up enough goo for us
321 	 * to proceed - we tsleep and wait for the cleaner above
322 	 * to do it's work and get us reduced down to sanity.
323 	 */
324 	while (bcstats.numbufpages > bufpages) {
325 		tsleep(&needbuffer, PRIBIO, "needbuffer", 0);
326 	}
327 	splx(s);
328 }
329 
330 /*
331  * Make the buffer cache back off from cachepct.
332  */
333 int
334 bufbackoff()
335 {
336 	/*
337 	 * Back off the amount of buffer cache pages. Called by the page
338 	 * daemon to consume buffer cache pages rather than swapping.
339 	 *
340 	 * On success, it frees N pages from the buffer cache, and sets
341 	 * a flag so that the next N allocations from buf_get will recycle
342 	 * a buffer rather than allocate a new one. It then returns 0 to the
343 	 * caller.
344 	 *
345 	 * on failure, it could free no pages from the buffer cache, does
346 	 * nothing and returns -1 to the caller.
347 	 */
348 	long d;
349 
350 	if (bufpages <= buflowpages)
351 		return(-1);
352 
353 	if (bufpages - bufbackpages >= buflowpages)
354 		d = bufbackpages;
355 	else
356 		d = bufpages - buflowpages;
357 	backoffpages = bufbackpages;
358 	bufadjust(bufpages - d);
359 	backoffpages = bufbackpages;
360 	return(0);
361 }
362 
363 struct buf *
364 bio_doread(struct vnode *vp, daddr64_t blkno, int size, int async)
365 {
366 	struct buf *bp;
367 	struct mount *mp;
368 
369 	bp = getblk(vp, blkno, size, 0, 0);
370 
371 	/*
372 	 * If buffer does not have valid data, start a read.
373 	 * Note that if buffer is B_INVAL, getblk() won't return it.
374 	 * Therefore, it's valid if its I/O has completed or been delayed.
375 	 */
376 	if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
377 		SET(bp->b_flags, B_READ | async);
378 		bcstats.pendingreads++;
379 		bcstats.numreads++;
380 		VOP_STRATEGY(bp);
381 		/* Pay for the read. */
382 		curproc->p_stats->p_ru.ru_inblock++;		/* XXX */
383 	} else if (async) {
384 		brelse(bp);
385 	}
386 
387 	mp = vp->v_type == VBLK? vp->v_specmountpoint : vp->v_mount;
388 
389 	/*
390 	 * Collect statistics on synchronous and asynchronous reads.
391 	 * Reads from block devices are charged to their associated
392 	 * filesystem (if any).
393 	 */
394 	if (mp != NULL) {
395 		if (async == 0)
396 			mp->mnt_stat.f_syncreads++;
397 		else
398 			mp->mnt_stat.f_asyncreads++;
399 	}
400 
401 	return (bp);
402 }
403 
404 /*
405  * Read a disk block.
406  * This algorithm described in Bach (p.54).
407  */
408 int
409 bread(struct vnode *vp, daddr64_t blkno, int size, struct ucred *cred,
410     struct buf **bpp)
411 {
412 	struct buf *bp;
413 
414 	/* Get buffer for block. */
415 	bp = *bpp = bio_doread(vp, blkno, size, 0);
416 
417 	/* Wait for the read to complete, and return result. */
418 	return (biowait(bp));
419 }
420 
421 /*
422  * Read-ahead multiple disk blocks. The first is sync, the rest async.
423  * Trivial modification to the breada algorithm presented in Bach (p.55).
424  */
425 int
426 breadn(struct vnode *vp, daddr64_t blkno, int size, daddr64_t rablks[],
427     int rasizes[], int nrablks, struct ucred *cred, struct buf **bpp)
428 {
429 	struct buf *bp;
430 	int i;
431 
432 	bp = *bpp = bio_doread(vp, blkno, size, 0);
433 
434 	/*
435 	 * For each of the read-ahead blocks, start a read, if necessary.
436 	 */
437 	for (i = 0; i < nrablks; i++) {
438 		/* If it's in the cache, just go on to next one. */
439 		if (incore(vp, rablks[i]))
440 			continue;
441 
442 		/* Get a buffer for the read-ahead block */
443 		(void) bio_doread(vp, rablks[i], rasizes[i], B_ASYNC);
444 	}
445 
446 	/* Otherwise, we had to start a read for it; wait until it's valid. */
447 	return (biowait(bp));
448 }
449 
450 /*
451  * Called from interrupt context.
452  */
453 void
454 bread_cluster_callback(struct buf *bp)
455 {
456 	struct buf **xbpp = bp->b_saveaddr;
457 	int i;
458 
459 	if (xbpp[1] != NULL) {
460 		size_t newsize = xbpp[1]->b_bufsize;
461 
462 		/*
463 		 * Shrink this buffer to only cover its part of the total I/O.
464 		 */
465 		buf_shrink_mem(bp, newsize);
466 		bp->b_bcount = newsize;
467 	}
468 
469 	for (i = 1; xbpp[i] != 0; i++) {
470 		if (ISSET(bp->b_flags, B_ERROR))
471 			SET(xbpp[i]->b_flags, B_INVAL | B_ERROR);
472 		biodone(xbpp[i]);
473 	}
474 
475 	free(xbpp, M_TEMP);
476 
477 	if (ISSET(bp->b_flags, B_ASYNC)) {
478 		brelse(bp);
479 	} else {
480 		CLR(bp->b_flags, B_WANTED);
481 		wakeup(bp);
482 	}
483 }
484 
485 int
486 bread_cluster(struct vnode *vp, daddr64_t blkno, int size, struct buf **rbpp)
487 {
488 	struct buf *bp, **xbpp;
489 	int howmany, maxra, i, inc;
490 	daddr64_t sblkno;
491 
492 	*rbpp = bio_doread(vp, blkno, size, 0);
493 
494 	if (size != round_page(size))
495 		goto out;
496 
497 	if (VOP_BMAP(vp, blkno + 1, NULL, &sblkno, &maxra))
498 		goto out;
499 
500 	maxra++;
501 	if (sblkno == -1 || maxra < 2)
502 		goto out;
503 
504 	howmany = MAXPHYS / size;
505 	if (howmany > maxra)
506 		howmany = maxra;
507 
508 	xbpp = malloc((howmany + 1) * sizeof(struct buf *), M_TEMP, M_NOWAIT);
509 	if (xbpp == NULL)
510 		goto out;
511 
512 	for (i = howmany - 1; i >= 0; i--) {
513 		size_t sz;
514 
515 		/*
516 		 * First buffer allocates big enough size to cover what
517 		 * all the other buffers need.
518 		 */
519 		sz = i == 0 ? howmany * size : 0;
520 
521 		xbpp[i] = buf_get(vp, blkno + i + 1, sz);
522 		if (xbpp[i] == NULL) {
523 			for (++i; i < howmany; i++) {
524 				SET(xbpp[i]->b_flags, B_INVAL);
525 				brelse(xbpp[i]);
526 			}
527 			free(xbpp, M_TEMP);
528 			goto out;
529 		}
530 	}
531 
532 	bp = xbpp[0];
533 
534 	xbpp[howmany] = 0;
535 
536 	inc = btodb(size);
537 
538 	for (i = 1; i < howmany; i++) {
539 		bcstats.pendingreads++;
540 		bcstats.numreads++;
541 		SET(xbpp[i]->b_flags, B_READ | B_ASYNC);
542 		xbpp[i]->b_blkno = sblkno + (i * inc);
543 		xbpp[i]->b_bufsize = xbpp[i]->b_bcount = size;
544 		xbpp[i]->b_data = NULL;
545 		xbpp[i]->b_pobj = bp->b_pobj;
546 		xbpp[i]->b_poffs = bp->b_poffs + (i * size);
547 	}
548 
549 	KASSERT(bp->b_lblkno == blkno + 1);
550 	KASSERT(bp->b_vp == vp);
551 
552 	bp->b_blkno = sblkno;
553 	SET(bp->b_flags, B_READ | B_ASYNC | B_CALL);
554 
555 	bp->b_saveaddr = (void *)xbpp;
556 	bp->b_iodone = bread_cluster_callback;
557 
558 	bcstats.pendingreads++;
559 	bcstats.numreads++;
560 	VOP_STRATEGY(bp);
561 	curproc->p_stats->p_ru.ru_inblock++;
562 
563 out:
564 	return (biowait(*rbpp));
565 }
566 
567 /*
568  * Block write.  Described in Bach (p.56)
569  */
570 int
571 bwrite(struct buf *bp)
572 {
573 	int rv, async, wasdelayed, s;
574 	struct vnode *vp;
575 	struct mount *mp;
576 
577 	vp = bp->b_vp;
578 	if (vp != NULL)
579 		mp = vp->v_type == VBLK? vp->v_specmountpoint : vp->v_mount;
580 	else
581 		mp = NULL;
582 
583 	/*
584 	 * Remember buffer type, to switch on it later.  If the write was
585 	 * synchronous, but the file system was mounted with MNT_ASYNC,
586 	 * convert it to a delayed write.
587 	 * XXX note that this relies on delayed tape writes being converted
588 	 * to async, not sync writes (which is safe, but ugly).
589 	 */
590 	async = ISSET(bp->b_flags, B_ASYNC);
591 	if (!async && mp && ISSET(mp->mnt_flag, MNT_ASYNC)) {
592 		bdwrite(bp);
593 		return (0);
594 	}
595 
596 	/*
597 	 * Collect statistics on synchronous and asynchronous writes.
598 	 * Writes to block devices are charged to their associated
599 	 * filesystem (if any).
600 	 */
601 	if (mp != NULL) {
602 		if (async)
603 			mp->mnt_stat.f_asyncwrites++;
604 		else
605 			mp->mnt_stat.f_syncwrites++;
606 	}
607 	bcstats.pendingwrites++;
608 	bcstats.numwrites++;
609 
610 	wasdelayed = ISSET(bp->b_flags, B_DELWRI);
611 	CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
612 
613 	s = splbio();
614 
615 	/*
616 	 * If not synchronous, pay for the I/O operation and make
617 	 * sure the buf is on the correct vnode queue.  We have
618 	 * to do this now, because if we don't, the vnode may not
619 	 * be properly notified that its I/O has completed.
620 	 */
621 	if (wasdelayed) {
622 		reassignbuf(bp);
623 	} else
624 		curproc->p_stats->p_ru.ru_oublock++;
625 
626 
627 	/* Initiate disk write.  Make sure the appropriate party is charged. */
628 	bp->b_vp->v_numoutput++;
629 	splx(s);
630 	SET(bp->b_flags, B_WRITEINPROG);
631 	VOP_STRATEGY(bp);
632 
633 	if (async)
634 		return (0);
635 
636 	/*
637 	 * If I/O was synchronous, wait for it to complete.
638 	 */
639 	rv = biowait(bp);
640 
641 	/* Release the buffer. */
642 	brelse(bp);
643 
644 	return (rv);
645 }
646 
647 
648 /*
649  * Delayed write.
650  *
651  * The buffer is marked dirty, but is not queued for I/O.
652  * This routine should be used when the buffer is expected
653  * to be modified again soon, typically a small write that
654  * partially fills a buffer.
655  *
656  * NB: magnetic tapes cannot be delayed; they must be
657  * written in the order that the writes are requested.
658  *
659  * Described in Leffler, et al. (pp. 208-213).
660  */
661 void
662 bdwrite(struct buf *bp)
663 {
664 	int s;
665 
666 	/*
667 	 * If the block hasn't been seen before:
668 	 *	(1) Mark it as having been seen,
669 	 *	(2) Charge for the write.
670 	 *	(3) Make sure it's on its vnode's correct block list,
671 	 *	(4) If a buffer is rewritten, move it to end of dirty list
672 	 */
673 	if (!ISSET(bp->b_flags, B_DELWRI)) {
674 		SET(bp->b_flags, B_DELWRI);
675 		bp->b_synctime = time_uptime + 35;
676 		s = splbio();
677 		reassignbuf(bp);
678 		splx(s);
679 		curproc->p_stats->p_ru.ru_oublock++;	/* XXX */
680 	} else {
681 		/*
682 		 * see if this buffer has slacked through the syncer
683 		 * and enforce an async write upon it.
684 		 */
685 		if (bp->b_synctime < time_uptime) {
686 			bawrite(bp);
687 			return;
688 		}
689 	}
690 
691 	/* If this is a tape block, write the block now. */
692 	if (major(bp->b_dev) < nblkdev &&
693 	    bdevsw[major(bp->b_dev)].d_type == D_TAPE) {
694 		bawrite(bp);
695 		return;
696 	}
697 
698 	/* Otherwise, the "write" is done, so mark and release the buffer. */
699 	CLR(bp->b_flags, B_NEEDCOMMIT);
700 	SET(bp->b_flags, B_DONE);
701 	brelse(bp);
702 }
703 
704 /*
705  * Asynchronous block write; just an asynchronous bwrite().
706  */
707 void
708 bawrite(struct buf *bp)
709 {
710 
711 	SET(bp->b_flags, B_ASYNC);
712 	VOP_BWRITE(bp);
713 }
714 
715 /*
716  * Must be called at splbio()
717  */
718 void
719 buf_dirty(struct buf *bp)
720 {
721 	splassert(IPL_BIO);
722 
723 #ifdef DIAGNOSTIC
724 	if (!ISSET(bp->b_flags, B_BUSY))
725 		panic("Trying to dirty buffer on freelist!");
726 #endif
727 
728 	if (ISSET(bp->b_flags, B_DELWRI) == 0) {
729 		SET(bp->b_flags, B_DELWRI);
730 		bp->b_synctime = time_uptime + 35;
731 		reassignbuf(bp);
732 	}
733 }
734 
735 /*
736  * Must be called at splbio()
737  */
738 void
739 buf_undirty(struct buf *bp)
740 {
741 	splassert(IPL_BIO);
742 
743 #ifdef DIAGNOSTIC
744 	if (!ISSET(bp->b_flags, B_BUSY))
745 		panic("Trying to undirty buffer on freelist!");
746 #endif
747 	if (ISSET(bp->b_flags, B_DELWRI)) {
748 		CLR(bp->b_flags, B_DELWRI);
749 		reassignbuf(bp);
750 	}
751 }
752 
753 /*
754  * Release a buffer on to the free lists.
755  * Described in Bach (p. 46).
756  */
757 void
758 brelse(struct buf *bp)
759 {
760 	struct bqueues *bufq;
761 	int s;
762 
763 	s = splbio();
764 
765 	if (bp->b_data != NULL)
766 		KASSERT(bp->b_bufsize > 0);
767 
768 	/*
769 	 * Determine which queue the buffer should be on, then put it there.
770 	 */
771 
772 	/* If it's not cacheable, or an error, mark it invalid. */
773 	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
774 		SET(bp->b_flags, B_INVAL);
775 
776 	if (ISSET(bp->b_flags, B_INVAL)) {
777 		/*
778 		 * If the buffer is invalid, place it in the clean queue, so it
779 		 * can be reused.
780 		 */
781 		if (LIST_FIRST(&bp->b_dep) != NULL)
782 			buf_deallocate(bp);
783 
784 		if (ISSET(bp->b_flags, B_DELWRI)) {
785 			CLR(bp->b_flags, B_DELWRI);
786 		}
787 
788 		if (bp->b_vp) {
789 			RB_REMOVE(buf_rb_bufs, &bp->b_vp->v_bufs_tree,
790 			    bp);
791 			brelvp(bp);
792 		}
793 		bp->b_vp = NULL;
794 
795 		/*
796 		 * If the buffer has no associated data, place it back in the
797 		 * pool.
798 		 */
799 		if (bp->b_data == NULL && bp->b_pobj == NULL) {
800 			/*
801 			 * Wake up any processes waiting for _this_ buffer to
802 			 * become free. They are not allowed to grab it
803 			 * since it will be freed. But the only sleeper is
804 			 * getblk and it's restarting the operation after
805 			 * sleep.
806 			 */
807 			if (ISSET(bp->b_flags, B_WANTED)) {
808 				CLR(bp->b_flags, B_WANTED);
809 				wakeup(bp);
810 			}
811 			if (bp->b_vp != NULL)
812 				RB_REMOVE(buf_rb_bufs,
813 				    &bp->b_vp->v_bufs_tree, bp);
814 			buf_put(bp);
815 			splx(s);
816 			return;
817 		}
818 
819 		bcstats.numcleanpages += atop(bp->b_bufsize);
820 		if (maxcleanpages < bcstats.numcleanpages)
821 			maxcleanpages = bcstats.numcleanpages;
822 		binsheadfree(bp, &bufqueues[BQ_CLEAN]);
823 	} else {
824 		/*
825 		 * It has valid data.  Put it on the end of the appropriate
826 		 * queue, so that it'll stick around for as long as possible.
827 		 */
828 
829 		if (!ISSET(bp->b_flags, B_DELWRI)) {
830 			bcstats.numcleanpages += atop(bp->b_bufsize);
831 			if (maxcleanpages < bcstats.numcleanpages)
832 				maxcleanpages = bcstats.numcleanpages;
833 			bufq = &bufqueues[BQ_CLEAN];
834 		} else {
835 			bcstats.numdirtypages += atop(bp->b_bufsize);
836 			bufq = &bufqueues[BQ_DIRTY];
837 		}
838 		if (ISSET(bp->b_flags, B_AGE)) {
839 			binsheadfree(bp, bufq);
840 			bp->b_synctime = time_uptime + 30;
841 		} else {
842 			binstailfree(bp, bufq);
843 			bp->b_synctime = time_uptime + 300;
844 		}
845 	}
846 
847 	/* Unlock the buffer. */
848 	bcstats.freebufs++;
849 	CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE | B_DEFERRED));
850 	buf_release(bp);
851 
852 	/* Wake up any processes waiting for any buffer to become free. */
853 	if (needbuffer) {
854 		needbuffer--;
855 		wakeup(&needbuffer);
856 	}
857 
858 	/* Wake up any processes waiting for _this_ buffer to become free. */
859 	if (ISSET(bp->b_flags, B_WANTED)) {
860 		CLR(bp->b_flags, B_WANTED);
861 		wakeup(bp);
862 	}
863 
864 	splx(s);
865 }
866 
867 /*
868  * Determine if a block is in the cache. Just look on what would be its hash
869  * chain. If it's there, return a pointer to it, unless it's marked invalid.
870  */
871 struct buf *
872 incore(struct vnode *vp, daddr64_t blkno)
873 {
874 	struct buf *bp;
875 	struct buf b;
876 
877 	/* Search buf lookup tree */
878 	b.b_lblkno = blkno;
879 	bp = RB_FIND(buf_rb_bufs, &vp->v_bufs_tree, &b);
880 	if (bp && !ISSET(bp->b_flags, B_INVAL))
881 		return(bp);
882 	return(NULL);
883 }
884 
885 /*
886  * Get a block of requested size that is associated with
887  * a given vnode and block offset. If it is found in the
888  * block cache, mark it as having been found, make it busy
889  * and return it. Otherwise, return an empty block of the
890  * correct size. It is up to the caller to ensure that the
891  * cached blocks be of the correct size.
892  */
893 struct buf *
894 getblk(struct vnode *vp, daddr64_t blkno, int size, int slpflag, int slptimeo)
895 {
896 	struct buf *bp;
897 	struct buf b;
898 	int s, error;
899 
900 	/*
901 	 * XXX
902 	 * The following is an inlined version of 'incore()', but with
903 	 * the 'invalid' test moved to after the 'busy' test.  It's
904 	 * necessary because there are some cases in which the NFS
905 	 * code sets B_INVAL prior to writing data to the server, but
906 	 * in which the buffers actually contain valid data.  In this
907 	 * case, we can't allow the system to allocate a new buffer for
908 	 * the block until the write is finished.
909 	 */
910 start:
911 	b.b_lblkno = blkno;
912 	bp = RB_FIND(buf_rb_bufs, &vp->v_bufs_tree, &b);
913 	if (bp != NULL) {
914 
915 		s = splbio();
916 		if (ISSET(bp->b_flags, B_BUSY)) {
917 			SET(bp->b_flags, B_WANTED);
918 			error = tsleep(bp, slpflag | (PRIBIO + 1), "getblk",
919 			    slptimeo);
920 			splx(s);
921 			if (error)
922 				return (NULL);
923 			goto start;
924 		}
925 
926 		if (!ISSET(bp->b_flags, B_INVAL)) {
927 			bcstats.cachehits++;
928 			SET(bp->b_flags, B_CACHE);
929 			bremfree(bp);
930 			buf_acquire(bp);
931 			splx(s);
932 			return (bp);
933 		}
934 		splx(s);
935 	}
936 
937 	if ((bp = buf_get(vp, blkno, size)) == NULL)
938 		goto start;
939 
940 	return (bp);
941 }
942 
943 /*
944  * Get an empty, disassociated buffer of given size.
945  */
946 struct buf *
947 geteblk(int size)
948 {
949 	struct buf *bp;
950 
951 	while ((bp = buf_get(NULL, 0, size)) == NULL)
952 		;
953 
954 	return (bp);
955 }
956 
957 /*
958  * Allocate a buffer.
959  */
960 struct buf *
961 buf_get(struct vnode *vp, daddr64_t blkno, size_t size)
962 {
963 	static int gcount = 0;
964 	struct buf *bp;
965 	int poolwait = size == 0 ? PR_NOWAIT : PR_WAITOK;
966 	int npages;
967 	int s;
968 
969 	/*
970 	 * if we were previously backed off, slowly climb back up
971 	 * to the high water mark again.
972 	 */
973 	if ((backoffpages == 0) && (bufpages < bufhighpages)) {
974 		if ( gcount == 0 )  {
975 			bufadjust(bufpages + bufbackpages);
976 			gcount += bufbackpages;
977 		} else
978 			gcount--;
979 	}
980 
981 	s = splbio();
982 	if (size) {
983 		/*
984 		 * Wake up cleaner if we're getting low on pages.
985 		 */
986 		if (bcstats.numdirtypages >= hidirtypages ||
987 		    bcstats.numcleanpages <= locleanpages)
988 			wakeup(&bd_req);
989 
990 		/*
991 		 * If we're above the high water mark for clean pages,
992 		 * free down to the low water mark.
993 		 */
994 		if (bcstats.numcleanpages > hicleanpages) {
995 			while (bcstats.numcleanpages > locleanpages) {
996 				bp = TAILQ_FIRST(&bufqueues[BQ_CLEAN]);
997 				bremfree(bp);
998 				if (bp->b_vp) {
999 					RB_REMOVE(buf_rb_bufs,
1000 					    &bp->b_vp->v_bufs_tree, bp);
1001 					brelvp(bp);
1002 				}
1003 				buf_put(bp);
1004 			}
1005 		}
1006 
1007 		npages = atop(round_page(size));
1008 
1009 		/*
1010 		 * Free some buffers until we have enough space.
1011 		 */
1012 		while ((bcstats.numbufpages + npages > bufpages)
1013 		    || backoffpages) {
1014 			int freemax = 5;
1015 			int i = freemax;
1016 			while ((bp = TAILQ_FIRST(&bufqueues[BQ_CLEAN])) && i--) {
1017 				bremfree(bp);
1018 				if (bp->b_vp) {
1019 					RB_REMOVE(buf_rb_bufs,
1020 					    &bp->b_vp->v_bufs_tree, bp);
1021 					brelvp(bp);
1022 				}
1023 				buf_put(bp);
1024 			}
1025 			if (freemax == i &&
1026 			    (bcstats.numbufpages + npages > bufpages)) {
1027 				needbuffer++;
1028 				tsleep(&needbuffer, PRIBIO, "needbuffer", 0);
1029 				splx(s);
1030 				return (NULL);
1031 			}
1032 		}
1033 	}
1034 
1035 	bp = pool_get(&bufpool, poolwait|PR_ZERO);
1036 
1037 	if (bp == NULL) {
1038 		splx(s);
1039 		return (NULL);
1040 	}
1041 
1042 	bp->b_freelist.tqe_next = NOLIST;
1043 	bp->b_synctime = time_uptime + 300;
1044 	bp->b_dev = NODEV;
1045 	LIST_INIT(&bp->b_dep);
1046 	bp->b_bcount = size;
1047 
1048 	buf_acquire_unmapped(bp);
1049 
1050 	if (vp != NULL) {
1051 		/*
1052 		 * We insert the buffer into the hash with B_BUSY set
1053 		 * while we allocate pages for it. This way any getblk
1054 		 * that happens while we allocate pages will wait for
1055 		 * this buffer instead of starting its own guf_get.
1056 		 *
1057 		 * But first, we check if someone beat us to it.
1058 		 */
1059 		if (incore(vp, blkno)) {
1060 			pool_put(&bufpool, bp);
1061 			splx(s);
1062 			return (NULL);
1063 		}
1064 
1065 		bp->b_blkno = bp->b_lblkno = blkno;
1066 		bgetvp(vp, bp);
1067 		if (RB_INSERT(buf_rb_bufs, &vp->v_bufs_tree, bp))
1068 			panic("buf_get: dup lblk vp %p bp %p", vp, bp);
1069 	} else {
1070 		bp->b_vnbufs.le_next = NOLIST;
1071 		SET(bp->b_flags, B_INVAL);
1072 		bp->b_vp = NULL;
1073 	}
1074 
1075 	LIST_INSERT_HEAD(&bufhead, bp, b_list);
1076 	bcstats.numbufs++;
1077 
1078 	if (size) {
1079 		buf_alloc_pages(bp, round_page(size));
1080 		buf_map(bp);
1081 	}
1082 
1083 	splx(s);
1084 
1085 	return (bp);
1086 }
1087 
1088 /*
1089  * Buffer cleaning daemon.
1090  */
1091 void
1092 buf_daemon(struct proc *p)
1093 {
1094 	struct timeval starttime, timediff;
1095 	struct buf *bp;
1096 	int s;
1097 
1098 	cleanerproc = curproc;
1099 
1100 	s = splbio();
1101 	for (;;) {
1102 		if (bcstats.numdirtypages < hidirtypages)
1103 			tsleep(&bd_req, PRIBIO - 7, "cleaner", 0);
1104 
1105 		getmicrouptime(&starttime);
1106 
1107 		while ((bp = TAILQ_FIRST(&bufqueues[BQ_DIRTY]))) {
1108 			struct timeval tv;
1109 
1110 			if (bcstats.numdirtypages < lodirtypages)
1111 				break;
1112 
1113 			bremfree(bp);
1114 			buf_acquire(bp);
1115 			splx(s);
1116 
1117 			if (ISSET(bp->b_flags, B_INVAL)) {
1118 				brelse(bp);
1119 				s = splbio();
1120 				continue;
1121 			}
1122 #ifdef DIAGNOSTIC
1123 			if (!ISSET(bp->b_flags, B_DELWRI))
1124 				panic("Clean buffer on BQ_DIRTY");
1125 #endif
1126 			if (LIST_FIRST(&bp->b_dep) != NULL &&
1127 			    !ISSET(bp->b_flags, B_DEFERRED) &&
1128 			    buf_countdeps(bp, 0, 0)) {
1129 				SET(bp->b_flags, B_DEFERRED);
1130 				s = splbio();
1131 				bcstats.numdirtypages += atop(bp->b_bufsize);
1132 				binstailfree(bp, &bufqueues[BQ_DIRTY]);
1133 				bcstats.freebufs++;
1134 				buf_release(bp);
1135 				continue;
1136 			}
1137 
1138 			bawrite(bp);
1139 
1140 			/* Never allow processing to run for more than 1 sec */
1141 			getmicrouptime(&tv);
1142 			timersub(&tv, &starttime, &timediff);
1143 			s = splbio();
1144 			if (timediff.tv_sec)
1145 				break;
1146 
1147 		}
1148 	}
1149 }
1150 
1151 /*
1152  * Wait for operations on the buffer to complete.
1153  * When they do, extract and return the I/O's error value.
1154  */
1155 int
1156 biowait(struct buf *bp)
1157 {
1158 	int s;
1159 
1160 	KASSERT(!(bp->b_flags & B_ASYNC));
1161 
1162 	s = splbio();
1163 	while (!ISSET(bp->b_flags, B_DONE))
1164 		tsleep(bp, PRIBIO + 1, "biowait", 0);
1165 	splx(s);
1166 
1167 	/* check for interruption of I/O (e.g. via NFS), then errors. */
1168 	if (ISSET(bp->b_flags, B_EINTR)) {
1169 		CLR(bp->b_flags, B_EINTR);
1170 		return (EINTR);
1171 	}
1172 
1173 	if (ISSET(bp->b_flags, B_ERROR))
1174 		return (bp->b_error ? bp->b_error : EIO);
1175 	else
1176 		return (0);
1177 }
1178 
1179 /*
1180  * Mark I/O complete on a buffer.
1181  *
1182  * If a callback has been requested, e.g. the pageout
1183  * daemon, do so. Otherwise, awaken waiting processes.
1184  *
1185  * [ Leffler, et al., says on p.247:
1186  *	"This routine wakes up the blocked process, frees the buffer
1187  *	for an asynchronous write, or, for a request by the pagedaemon
1188  *	process, invokes a procedure specified in the buffer structure" ]
1189  *
1190  * In real life, the pagedaemon (or other system processes) wants
1191  * to do async stuff to, and doesn't want the buffer brelse()'d.
1192  * (for swap pager, that puts swap buffers on the free lists (!!!),
1193  * for the vn device, that puts malloc'd buffers on the free lists!)
1194  *
1195  * Must be called at splbio().
1196  */
1197 void
1198 biodone(struct buf *bp)
1199 {
1200 	splassert(IPL_BIO);
1201 
1202 	if (ISSET(bp->b_flags, B_DONE))
1203 		panic("biodone already");
1204 	SET(bp->b_flags, B_DONE);		/* note that it's done */
1205 
1206 	if (LIST_FIRST(&bp->b_dep) != NULL)
1207 		buf_complete(bp);
1208 
1209 	if (!ISSET(bp->b_flags, B_READ)) {
1210 		CLR(bp->b_flags, B_WRITEINPROG);
1211 		vwakeup(bp->b_vp);
1212 	}
1213 	if (bcstats.numbufs &&
1214 	    (!(ISSET(bp->b_flags, B_RAW) || ISSET(bp->b_flags, B_PHYS)))) {
1215 		if (!ISSET(bp->b_flags, B_READ))
1216 			bcstats.pendingwrites--;
1217 		else
1218 			bcstats.pendingreads--;
1219 	}
1220 	if (ISSET(bp->b_flags, B_CALL)) {	/* if necessary, call out */
1221 		CLR(bp->b_flags, B_CALL);	/* but note callout done */
1222 		(*bp->b_iodone)(bp);
1223 	} else {
1224 		if (ISSET(bp->b_flags, B_ASYNC)) {/* if async, release it */
1225 			brelse(bp);
1226 		} else {			/* or just wakeup the buffer */
1227 			CLR(bp->b_flags, B_WANTED);
1228 			wakeup(bp);
1229 		}
1230 	}
1231 }
1232 
1233 #ifdef DDB
1234 void	bcstats_print(int (*)(const char *, ...));
1235 /*
1236  * bcstats_print: ddb hook to print interesting buffer cache counters
1237  */
1238 void
1239 bcstats_print(int (*pr)(const char *, ...))
1240 {
1241 	(*pr)("Current Buffer Cache status:\n");
1242 	(*pr)("numbufs %d freebufs %d\n", bcstats.numbufs, bcstats.freebufs);
1243     	(*pr)("bufpages %d freepages %d dirtypages %d\n", bcstats.numbufpages,
1244 	    bcstats.numfreepages, bcstats.numdirtypages);
1245 	(*pr)("pendingreads %d, pendingwrites %d\n",
1246 	    bcstats.pendingreads, bcstats.pendingwrites);
1247 }
1248 #endif
1249