xref: /openbsd-src/sys/kern/vfs_bio.c (revision 4c1e55dc91edd6e69ccc60ce855900fbc12cf34f)
1 /*	$OpenBSD: vfs_bio.c,v 1.136 2012/05/30 19:32:19 miod Exp $	*/
2 /*	$NetBSD: vfs_bio.c,v 1.44 1996/06/11 11:15:36 pk Exp $	*/
3 
4 /*
5  * Copyright (c) 1994 Christopher G. Demetriou
6  * Copyright (c) 1982, 1986, 1989, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  * (c) UNIX System Laboratories, Inc.
9  * All or some portions of this file are derived from material licensed
10  * to the University of California by American Telephone and Telegraph
11  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
12  * the permission of UNIX System Laboratories, Inc.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
39  */
40 
41 /*
42  * Some references:
43  *	Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
44  *	Leffler, et al.: The Design and Implementation of the 4.3BSD
45  *		UNIX Operating System (Addison Welley, 1989)
46  */
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/buf.h>
52 #include <sys/vnode.h>
53 #include <sys/mount.h>
54 #include <sys/malloc.h>
55 #include <sys/pool.h>
56 #include <sys/resourcevar.h>
57 #include <sys/conf.h>
58 #include <sys/kernel.h>
59 #include <sys/specdev.h>
60 
61 #include <uvm/uvm_extern.h>
62 
63 /*
64  * Definitions for the buffer free lists.
65  */
66 #define	BQUEUES		2		/* number of free buffer queues */
67 
68 #define	BQ_DIRTY	0		/* LRU queue with dirty buffers */
69 #define	BQ_CLEAN	1		/* LRU queue with clean buffers */
70 
71 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
72 int needbuffer;
73 struct bio_ops bioops;
74 
75 /*
76  * Buffer pool for I/O buffers.
77  */
78 struct pool bufpool;
79 struct bufhead bufhead = LIST_HEAD_INITIALIZER(bufhead);
80 void buf_put(struct buf *);
81 
82 /*
83  * Insq/Remq for the buffer free lists.
84  */
85 #define	binsheadfree(bp, dp)	TAILQ_INSERT_HEAD(dp, bp, b_freelist)
86 #define	binstailfree(bp, dp)	TAILQ_INSERT_TAIL(dp, bp, b_freelist)
87 
88 struct buf *bio_doread(struct vnode *, daddr64_t, int, int);
89 struct buf *buf_get(struct vnode *, daddr64_t, size_t);
90 void bread_cluster_callback(struct buf *);
91 
92 /*
93  * We keep a few counters to monitor the utilization of the buffer cache
94  *
95  *  numbufpages   - number of pages totally allocated.
96  *  numdirtypages - number of pages on BQ_DIRTY queue.
97  *  lodirtypages  - low water mark for buffer cleaning daemon.
98  *  hidirtypages  - high water mark for buffer cleaning daemon.
99  *  numcleanpages - number of pages on BQ_CLEAN queue.
100  *		    Used to track the need to speedup the cleaner and
101  *		    as a reserve for special processes like syncer.
102  *  maxcleanpages - the highest page count on BQ_CLEAN.
103  */
104 
105 struct bcachestats bcstats;
106 long lodirtypages;
107 long hidirtypages;
108 long locleanpages;
109 long hicleanpages;
110 long maxcleanpages;
111 long backoffpages;	/* backoff counter for page allocations */
112 long buflowpages;	/* bufpages low water mark */
113 long bufhighpages; 	/* bufpages high water mark */
114 long bufbackpages; 	/* number of pages we back off when asked to shrink */
115 
116 vsize_t bufkvm;
117 
118 struct proc *cleanerproc;
119 int bd_req;			/* Sleep point for cleaner daemon. */
120 
121 void
122 bremfree(struct buf *bp)
123 {
124 	struct bqueues *dp = NULL;
125 
126 	splassert(IPL_BIO);
127 
128 	/*
129 	 * We only calculate the head of the freelist when removing
130 	 * the last element of the list as that is the only time that
131 	 * it is needed (e.g. to reset the tail pointer).
132 	 *
133 	 * NB: This makes an assumption about how tailq's are implemented.
134 	 */
135 	if (TAILQ_NEXT(bp, b_freelist) == NULL) {
136 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
137 			if (dp->tqh_last == &TAILQ_NEXT(bp, b_freelist))
138 				break;
139 		if (dp == &bufqueues[BQUEUES])
140 			panic("bremfree: lost tail");
141 	}
142 	if (!ISSET(bp->b_flags, B_DELWRI)) {
143 		bcstats.numcleanpages -= atop(bp->b_bufsize);
144 	} else {
145 		bcstats.numdirtypages -= atop(bp->b_bufsize);
146 		bcstats.delwribufs--;
147 	}
148 	TAILQ_REMOVE(dp, bp, b_freelist);
149 }
150 
151 void
152 buf_put(struct buf *bp)
153 {
154 	splassert(IPL_BIO);
155 
156 #ifdef DIAGNOSTIC
157 	if (bp->b_pobj != NULL)
158 		KASSERT(bp->b_bufsize > 0);
159 	if (ISSET(bp->b_flags, B_DELWRI))
160 		panic("buf_put: releasing dirty buffer");
161 	if (bp->b_freelist.tqe_next != NOLIST &&
162 	    bp->b_freelist.tqe_next != (void *)-1)
163 		panic("buf_put: still on the free list");
164 	if (bp->b_vnbufs.le_next != NOLIST &&
165 	    bp->b_vnbufs.le_next != (void *)-1)
166 		panic("buf_put: still on the vnode list");
167 	if (!LIST_EMPTY(&bp->b_dep))
168 		panic("buf_put: b_dep is not empty");
169 #endif
170 
171 	LIST_REMOVE(bp, b_list);
172 	bcstats.numbufs--;
173 	if (backoffpages) {
174 		backoffpages -= atop(bp->b_bufsize);
175 		if (backoffpages < 0)
176 			backoffpages = 0;
177 	}
178 
179 	if (buf_dealloc_mem(bp) != 0)
180 		return;
181 	pool_put(&bufpool, bp);
182 }
183 
184 /*
185  * Initialize buffers and hash links for buffers.
186  */
187 void
188 bufinit(void)
189 {
190 	u_int64_t dmapages;
191 	struct bqueues *dp;
192 
193 	dmapages = uvm_pagecount(&dma_constraint);
194 
195 	/*
196 	 * If MD code doesn't say otherwise, use 10% of kvm for mappings and
197 	 * 10% of dmaable pages for cache pages.
198 	 */
199 	if (bufcachepercent == 0)
200 		bufcachepercent = 10;
201 	if (bufpages == 0)
202 		bufpages = dmapages * bufcachepercent / 100;
203 
204 	bufhighpages = bufpages;
205 
206 	/*
207 	 * set the base backoff level for the buffer cache to bufpages.
208 	 * we will not allow uvm to steal back more than this number of
209 	 * pages
210 	 */
211 	buflowpages = dmapages * 10 / 100;
212 
213 	/*
214 	 * set bufbackpages to 100 pages, or 10 percent of the low water mark
215 	 * if we don't have that many pages.
216 	 */
217 
218 	bufbackpages = buflowpages * 10 / 100;
219 	if (bufbackpages > 100)
220 		bufbackpages = 100;
221 
222 	if (bufkvm == 0)
223 		bufkvm = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 10;
224 
225 	/*
226 	 * Don't use more than twice the amount of bufpages for mappings.
227 	 * It's twice since we map things sparsely.
228 	 */
229 	if (bufkvm > bufpages * PAGE_SIZE)
230 		bufkvm = bufpages * PAGE_SIZE;
231 	/*
232 	 * Round bufkvm to MAXPHYS because we allocate chunks of va space
233 	 * in MAXPHYS chunks.
234 	 */
235 	bufkvm &= ~(MAXPHYS - 1);
236 
237 	pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
238 	pool_setipl(&bufpool, IPL_BIO);
239 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
240 		TAILQ_INIT(dp);
241 
242 	/*
243 	 * hmm - bufkvm is an argument because it's static, while
244 	 * bufpages is global because it can change while running.
245  	 */
246 	buf_mem_init(bufkvm);
247 
248 	hidirtypages = (bufpages / 4) * 3;
249 	lodirtypages = bufpages / 2;
250 
251 	/*
252 	 * When we hit 95% of pages being clean, we bring them down to
253 	 * 90% to have some slack.
254 	 */
255 	hicleanpages = bufpages - (bufpages / 20);
256 	locleanpages = bufpages - (bufpages / 10);
257 
258 	maxcleanpages = locleanpages;
259 }
260 
261 /*
262  * Change cachepct
263  */
264 void
265 bufadjust(int newbufpages)
266 {
267 	/*
268 	 * XXX - note, bufkvm was allocated once, based on 10% of physmem
269 	 * see above.
270 	 */
271 	struct buf *bp;
272 	int s;
273 
274 	s = splbio();
275 	bufpages = newbufpages;
276 
277 	hidirtypages = (bufpages / 4) * 3;
278 	lodirtypages = bufpages / 2;
279 
280 	/*
281 	 * When we hit 95% of pages being clean, we bring them down to
282 	 * 90% to have some slack.
283 	 */
284 	hicleanpages = bufpages - (bufpages / 20);
285 	locleanpages = bufpages - (bufpages / 10);
286 
287 	maxcleanpages = locleanpages;
288 
289 	/*
290 	 * If we we have more buffers allocated than bufpages,
291 	 * free them up to get back down. this may possibly consume
292 	 * all our clean pages...
293 	 */
294 	while ((bp = TAILQ_FIRST(&bufqueues[BQ_CLEAN])) &&
295 	    (bcstats.numbufpages > bufpages)) {
296 		bremfree(bp);
297 		if (bp->b_vp) {
298 			RB_REMOVE(buf_rb_bufs,
299 			    &bp->b_vp->v_bufs_tree, bp);
300 			brelvp(bp);
301 		}
302 		buf_put(bp);
303 	}
304 
305 	/*
306 	 * Wake up cleaner if we're getting low on pages. We might
307 	 * now have too much dirty, or have fallen below our low
308 	 * water mark on clean pages so we need to free more stuff
309 	 * up.
310 	 */
311 	if (bcstats.numdirtypages >= hidirtypages ||
312 	    bcstats.numcleanpages <= locleanpages)
313 		wakeup(&bd_req);
314 
315 	/*
316 	 * if immediate action has not freed up enough goo for us
317 	 * to proceed - we tsleep and wait for the cleaner above
318 	 * to do it's work and get us reduced down to sanity.
319 	 */
320 	while (bcstats.numbufpages > bufpages) {
321 		needbuffer++;
322 		tsleep(&needbuffer, PRIBIO, "needbuffer", 0);
323 	}
324 	splx(s);
325 }
326 
327 /*
328  * Make the buffer cache back off from cachepct.
329  */
330 int
331 bufbackoff(struct uvm_constraint_range *range, long size)
332 {
333 	/*
334 	 * Back off the amount of buffer cache pages. Called by the page
335 	 * daemon to consume buffer cache pages rather than swapping.
336 	 *
337 	 * On success, it frees N pages from the buffer cache, and sets
338 	 * a flag so that the next N allocations from buf_get will recycle
339 	 * a buffer rather than allocate a new one. It then returns 0 to the
340 	 * caller.
341 	 *
342 	 * on failure, it could free no pages from the buffer cache, does
343 	 * nothing and returns -1 to the caller.
344 	 */
345 	long d;
346 
347 	if (bufpages <= buflowpages)
348 		return(-1);
349 
350 	if (bufpages - bufbackpages >= buflowpages)
351 		d = bufbackpages;
352 	else
353 		d = bufpages - buflowpages;
354 	backoffpages = bufbackpages;
355 	bufadjust(bufpages - d);
356 	backoffpages = 0;
357 	return(0);
358 }
359 
360 struct buf *
361 bio_doread(struct vnode *vp, daddr64_t blkno, int size, int async)
362 {
363 	struct buf *bp;
364 	struct mount *mp;
365 
366 	bp = getblk(vp, blkno, size, 0, 0);
367 
368 	/*
369 	 * If buffer does not have valid data, start a read.
370 	 * Note that if buffer is B_INVAL, getblk() won't return it.
371 	 * Therefore, it's valid if its I/O has completed or been delayed.
372 	 */
373 	if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
374 		SET(bp->b_flags, B_READ | async);
375 		bcstats.pendingreads++;
376 		bcstats.numreads++;
377 		VOP_STRATEGY(bp);
378 		/* Pay for the read. */
379 		curproc->p_ru.ru_inblock++;			/* XXX */
380 	} else if (async) {
381 		brelse(bp);
382 	}
383 
384 	mp = vp->v_type == VBLK? vp->v_specmountpoint : vp->v_mount;
385 
386 	/*
387 	 * Collect statistics on synchronous and asynchronous reads.
388 	 * Reads from block devices are charged to their associated
389 	 * filesystem (if any).
390 	 */
391 	if (mp != NULL) {
392 		if (async == 0)
393 			mp->mnt_stat.f_syncreads++;
394 		else
395 			mp->mnt_stat.f_asyncreads++;
396 	}
397 
398 	return (bp);
399 }
400 
401 /*
402  * Read a disk block.
403  * This algorithm described in Bach (p.54).
404  */
405 int
406 bread(struct vnode *vp, daddr64_t blkno, int size, struct buf **bpp)
407 {
408 	struct buf *bp;
409 
410 	/* Get buffer for block. */
411 	bp = *bpp = bio_doread(vp, blkno, size, 0);
412 
413 	/* Wait for the read to complete, and return result. */
414 	return (biowait(bp));
415 }
416 
417 /*
418  * Read-ahead multiple disk blocks. The first is sync, the rest async.
419  * Trivial modification to the breada algorithm presented in Bach (p.55).
420  */
421 int
422 breadn(struct vnode *vp, daddr64_t blkno, int size, daddr64_t rablks[],
423     int rasizes[], int nrablks, struct buf **bpp)
424 {
425 	struct buf *bp;
426 	int i;
427 
428 	bp = *bpp = bio_doread(vp, blkno, size, 0);
429 
430 	/*
431 	 * For each of the read-ahead blocks, start a read, if necessary.
432 	 */
433 	for (i = 0; i < nrablks; i++) {
434 		/* If it's in the cache, just go on to next one. */
435 		if (incore(vp, rablks[i]))
436 			continue;
437 
438 		/* Get a buffer for the read-ahead block */
439 		(void) bio_doread(vp, rablks[i], rasizes[i], B_ASYNC);
440 	}
441 
442 	/* Otherwise, we had to start a read for it; wait until it's valid. */
443 	return (biowait(bp));
444 }
445 
446 /*
447  * Called from interrupt context.
448  */
449 void
450 bread_cluster_callback(struct buf *bp)
451 {
452 	struct buf **xbpp = bp->b_saveaddr;
453 	int i;
454 
455 	if (xbpp[1] != NULL) {
456 		size_t newsize = xbpp[1]->b_bufsize;
457 
458 		/*
459 		 * Shrink this buffer's mapping to only cover its part of
460 		 * the total I/O.
461 		 */
462 		buf_fix_mapping(bp, newsize);
463 		bp->b_bcount = newsize;
464 	}
465 
466 	for (i = 1; xbpp[i] != 0; i++) {
467 		if (ISSET(bp->b_flags, B_ERROR))
468 			SET(xbpp[i]->b_flags, B_INVAL | B_ERROR);
469 		biodone(xbpp[i]);
470 	}
471 
472 	free(xbpp, M_TEMP);
473 
474 	if (ISSET(bp->b_flags, B_ASYNC)) {
475 		brelse(bp);
476 	} else {
477 		CLR(bp->b_flags, B_WANTED);
478 		wakeup(bp);
479 	}
480 }
481 
482 int
483 bread_cluster(struct vnode *vp, daddr64_t blkno, int size, struct buf **rbpp)
484 {
485 	struct buf *bp, **xbpp;
486 	int howmany, maxra, i, inc;
487 	daddr64_t sblkno;
488 
489 	*rbpp = bio_doread(vp, blkno, size, 0);
490 
491 	if (size != round_page(size))
492 		goto out;
493 
494 	if (VOP_BMAP(vp, blkno + 1, NULL, &sblkno, &maxra))
495 		goto out;
496 
497 	maxra++;
498 	if (sblkno == -1 || maxra < 2)
499 		goto out;
500 
501 	howmany = MAXPHYS / size;
502 	if (howmany > maxra)
503 		howmany = maxra;
504 
505 	xbpp = malloc((howmany + 1) * sizeof(struct buf *), M_TEMP, M_NOWAIT);
506 	if (xbpp == NULL)
507 		goto out;
508 
509 	for (i = howmany - 1; i >= 0; i--) {
510 		size_t sz;
511 
512 		/*
513 		 * First buffer allocates big enough size to cover what
514 		 * all the other buffers need.
515 		 */
516 		sz = i == 0 ? howmany * size : 0;
517 
518 		xbpp[i] = buf_get(vp, blkno + i + 1, sz);
519 		if (xbpp[i] == NULL) {
520 			for (++i; i < howmany; i++) {
521 				SET(xbpp[i]->b_flags, B_INVAL);
522 				brelse(xbpp[i]);
523 			}
524 			free(xbpp, M_TEMP);
525 			goto out;
526 		}
527 	}
528 
529 	bp = xbpp[0];
530 
531 	xbpp[howmany] = 0;
532 
533 	inc = btodb(size);
534 
535 	for (i = 1; i < howmany; i++) {
536 		bcstats.pendingreads++;
537 		bcstats.numreads++;
538 		SET(xbpp[i]->b_flags, B_READ | B_ASYNC);
539 		xbpp[i]->b_blkno = sblkno + (i * inc);
540 		xbpp[i]->b_bufsize = xbpp[i]->b_bcount = size;
541 		xbpp[i]->b_data = NULL;
542 		xbpp[i]->b_pobj = bp->b_pobj;
543 		xbpp[i]->b_poffs = bp->b_poffs + (i * size);
544 	}
545 
546 	KASSERT(bp->b_lblkno == blkno + 1);
547 	KASSERT(bp->b_vp == vp);
548 
549 	bp->b_blkno = sblkno;
550 	SET(bp->b_flags, B_READ | B_ASYNC | B_CALL);
551 
552 	bp->b_saveaddr = (void *)xbpp;
553 	bp->b_iodone = bread_cluster_callback;
554 
555 	bcstats.pendingreads++;
556 	bcstats.numreads++;
557 	VOP_STRATEGY(bp);
558 	curproc->p_ru.ru_inblock++;
559 
560 out:
561 	return (biowait(*rbpp));
562 }
563 
564 /*
565  * Block write.  Described in Bach (p.56)
566  */
567 int
568 bwrite(struct buf *bp)
569 {
570 	int rv, async, wasdelayed, s;
571 	struct vnode *vp;
572 	struct mount *mp;
573 
574 	vp = bp->b_vp;
575 	if (vp != NULL)
576 		mp = vp->v_type == VBLK? vp->v_specmountpoint : vp->v_mount;
577 	else
578 		mp = NULL;
579 
580 	/*
581 	 * Remember buffer type, to switch on it later.  If the write was
582 	 * synchronous, but the file system was mounted with MNT_ASYNC,
583 	 * convert it to a delayed write.
584 	 * XXX note that this relies on delayed tape writes being converted
585 	 * to async, not sync writes (which is safe, but ugly).
586 	 */
587 	async = ISSET(bp->b_flags, B_ASYNC);
588 	if (!async && mp && ISSET(mp->mnt_flag, MNT_ASYNC)) {
589 		bdwrite(bp);
590 		return (0);
591 	}
592 
593 	/*
594 	 * Collect statistics on synchronous and asynchronous writes.
595 	 * Writes to block devices are charged to their associated
596 	 * filesystem (if any).
597 	 */
598 	if (mp != NULL) {
599 		if (async)
600 			mp->mnt_stat.f_asyncwrites++;
601 		else
602 			mp->mnt_stat.f_syncwrites++;
603 	}
604 	bcstats.pendingwrites++;
605 	bcstats.numwrites++;
606 
607 	wasdelayed = ISSET(bp->b_flags, B_DELWRI);
608 	CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
609 
610 	s = splbio();
611 
612 	/*
613 	 * If not synchronous, pay for the I/O operation and make
614 	 * sure the buf is on the correct vnode queue.  We have
615 	 * to do this now, because if we don't, the vnode may not
616 	 * be properly notified that its I/O has completed.
617 	 */
618 	if (wasdelayed) {
619 		reassignbuf(bp);
620 	} else
621 		curproc->p_ru.ru_oublock++;
622 
623 
624 	/* Initiate disk write.  Make sure the appropriate party is charged. */
625 	bp->b_vp->v_numoutput++;
626 	splx(s);
627 	SET(bp->b_flags, B_WRITEINPROG);
628 	VOP_STRATEGY(bp);
629 
630 	if (async)
631 		return (0);
632 
633 	/*
634 	 * If I/O was synchronous, wait for it to complete.
635 	 */
636 	rv = biowait(bp);
637 
638 	/* Release the buffer. */
639 	brelse(bp);
640 
641 	return (rv);
642 }
643 
644 
645 /*
646  * Delayed write.
647  *
648  * The buffer is marked dirty, but is not queued for I/O.
649  * This routine should be used when the buffer is expected
650  * to be modified again soon, typically a small write that
651  * partially fills a buffer.
652  *
653  * NB: magnetic tapes cannot be delayed; they must be
654  * written in the order that the writes are requested.
655  *
656  * Described in Leffler, et al. (pp. 208-213).
657  */
658 void
659 bdwrite(struct buf *bp)
660 {
661 	int s;
662 
663 	/*
664 	 * If the block hasn't been seen before:
665 	 *	(1) Mark it as having been seen,
666 	 *	(2) Charge for the write.
667 	 *	(3) Make sure it's on its vnode's correct block list,
668 	 *	(4) If a buffer is rewritten, move it to end of dirty list
669 	 */
670 	if (!ISSET(bp->b_flags, B_DELWRI)) {
671 		SET(bp->b_flags, B_DELWRI);
672 		s = splbio();
673 		reassignbuf(bp);
674 		splx(s);
675 		curproc->p_ru.ru_oublock++;		/* XXX */
676 	}
677 
678 	/* If this is a tape block, write the block now. */
679 	if (major(bp->b_dev) < nblkdev &&
680 	    bdevsw[major(bp->b_dev)].d_type == D_TAPE) {
681 		bawrite(bp);
682 		return;
683 	}
684 
685 	/* Otherwise, the "write" is done, so mark and release the buffer. */
686 	CLR(bp->b_flags, B_NEEDCOMMIT);
687 	SET(bp->b_flags, B_DONE);
688 	brelse(bp);
689 }
690 
691 /*
692  * Asynchronous block write; just an asynchronous bwrite().
693  */
694 void
695 bawrite(struct buf *bp)
696 {
697 
698 	SET(bp->b_flags, B_ASYNC);
699 	VOP_BWRITE(bp);
700 }
701 
702 /*
703  * Must be called at splbio()
704  */
705 void
706 buf_dirty(struct buf *bp)
707 {
708 	splassert(IPL_BIO);
709 
710 #ifdef DIAGNOSTIC
711 	if (!ISSET(bp->b_flags, B_BUSY))
712 		panic("Trying to dirty buffer on freelist!");
713 #endif
714 
715 	if (ISSET(bp->b_flags, B_DELWRI) == 0) {
716 		SET(bp->b_flags, B_DELWRI);
717 		reassignbuf(bp);
718 	}
719 }
720 
721 /*
722  * Must be called at splbio()
723  */
724 void
725 buf_undirty(struct buf *bp)
726 {
727 	splassert(IPL_BIO);
728 
729 #ifdef DIAGNOSTIC
730 	if (!ISSET(bp->b_flags, B_BUSY))
731 		panic("Trying to undirty buffer on freelist!");
732 #endif
733 	if (ISSET(bp->b_flags, B_DELWRI)) {
734 		CLR(bp->b_flags, B_DELWRI);
735 		reassignbuf(bp);
736 	}
737 }
738 
739 /*
740  * Release a buffer on to the free lists.
741  * Described in Bach (p. 46).
742  */
743 void
744 brelse(struct buf *bp)
745 {
746 	struct bqueues *bufq;
747 	int s;
748 
749 	s = splbio();
750 
751 	if (bp->b_data != NULL)
752 		KASSERT(bp->b_bufsize > 0);
753 
754 	/*
755 	 * Determine which queue the buffer should be on, then put it there.
756 	 */
757 
758 	/* If it's not cacheable, or an error, mark it invalid. */
759 	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
760 		SET(bp->b_flags, B_INVAL);
761 
762 	if (ISSET(bp->b_flags, B_INVAL)) {
763 		/*
764 		 * If the buffer is invalid, place it in the clean queue, so it
765 		 * can be reused.
766 		 */
767 		if (LIST_FIRST(&bp->b_dep) != NULL)
768 			buf_deallocate(bp);
769 
770 		if (ISSET(bp->b_flags, B_DELWRI)) {
771 			CLR(bp->b_flags, B_DELWRI);
772 		}
773 
774 		if (bp->b_vp) {
775 			RB_REMOVE(buf_rb_bufs, &bp->b_vp->v_bufs_tree,
776 			    bp);
777 			brelvp(bp);
778 		}
779 		bp->b_vp = NULL;
780 
781 		/*
782 		 * If the buffer has no associated data, place it back in the
783 		 * pool.
784 		 */
785 		if (bp->b_data == NULL && bp->b_pobj == NULL) {
786 			/*
787 			 * Wake up any processes waiting for _this_ buffer to
788 			 * become free. They are not allowed to grab it
789 			 * since it will be freed. But the only sleeper is
790 			 * getblk and it's restarting the operation after
791 			 * sleep.
792 			 */
793 			if (ISSET(bp->b_flags, B_WANTED)) {
794 				CLR(bp->b_flags, B_WANTED);
795 				wakeup(bp);
796 			}
797 			if (bp->b_vp != NULL)
798 				RB_REMOVE(buf_rb_bufs,
799 				    &bp->b_vp->v_bufs_tree, bp);
800 			buf_put(bp);
801 			splx(s);
802 			return;
803 		}
804 
805 		bcstats.numcleanpages += atop(bp->b_bufsize);
806 		if (maxcleanpages < bcstats.numcleanpages)
807 			maxcleanpages = bcstats.numcleanpages;
808 		binsheadfree(bp, &bufqueues[BQ_CLEAN]);
809 	} else {
810 		/*
811 		 * It has valid data.  Put it on the end of the appropriate
812 		 * queue, so that it'll stick around for as long as possible.
813 		 */
814 
815 		if (!ISSET(bp->b_flags, B_DELWRI)) {
816 			bcstats.numcleanpages += atop(bp->b_bufsize);
817 			if (maxcleanpages < bcstats.numcleanpages)
818 				maxcleanpages = bcstats.numcleanpages;
819 			bufq = &bufqueues[BQ_CLEAN];
820 		} else {
821 			bcstats.numdirtypages += atop(bp->b_bufsize);
822 			bcstats.delwribufs++;
823 			bufq = &bufqueues[BQ_DIRTY];
824 		}
825 		if (ISSET(bp->b_flags, B_AGE)) {
826 			binsheadfree(bp, bufq);
827 			bp->b_synctime = time_uptime + 30;
828 		} else {
829 			binstailfree(bp, bufq);
830 			bp->b_synctime = time_uptime + 300;
831 		}
832 	}
833 
834 	/* Unlock the buffer. */
835 	CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE | B_DEFERRED));
836 	buf_release(bp);
837 
838 	/* Wake up any processes waiting for any buffer to become free. */
839 	if (needbuffer) {
840 		needbuffer--;
841 		wakeup(&needbuffer);
842 	}
843 
844 	/* Wake up any processes waiting for _this_ buffer to become free. */
845 	if (ISSET(bp->b_flags, B_WANTED)) {
846 		CLR(bp->b_flags, B_WANTED);
847 		wakeup(bp);
848 	}
849 
850 	splx(s);
851 }
852 
853 /*
854  * Determine if a block is in the cache. Just look on what would be its hash
855  * chain. If it's there, return a pointer to it, unless it's marked invalid.
856  */
857 struct buf *
858 incore(struct vnode *vp, daddr64_t blkno)
859 {
860 	struct buf *bp;
861 	struct buf b;
862 	int s;
863 
864 	s = splbio();
865 
866 	/* Search buf lookup tree */
867 	b.b_lblkno = blkno;
868 	bp = RB_FIND(buf_rb_bufs, &vp->v_bufs_tree, &b);
869 	if (bp != NULL && ISSET(bp->b_flags, B_INVAL))
870 		bp = NULL;
871 
872 	splx(s);
873 	return (bp);
874 }
875 
876 /*
877  * Get a block of requested size that is associated with
878  * a given vnode and block offset. If it is found in the
879  * block cache, mark it as having been found, make it busy
880  * and return it. Otherwise, return an empty block of the
881  * correct size. It is up to the caller to ensure that the
882  * cached blocks be of the correct size.
883  */
884 struct buf *
885 getblk(struct vnode *vp, daddr64_t blkno, int size, int slpflag, int slptimeo)
886 {
887 	struct buf *bp;
888 	struct buf b;
889 	int s, error;
890 
891 	/*
892 	 * XXX
893 	 * The following is an inlined version of 'incore()', but with
894 	 * the 'invalid' test moved to after the 'busy' test.  It's
895 	 * necessary because there are some cases in which the NFS
896 	 * code sets B_INVAL prior to writing data to the server, but
897 	 * in which the buffers actually contain valid data.  In this
898 	 * case, we can't allow the system to allocate a new buffer for
899 	 * the block until the write is finished.
900 	 */
901 start:
902 	s = splbio();
903 	b.b_lblkno = blkno;
904 	bp = RB_FIND(buf_rb_bufs, &vp->v_bufs_tree, &b);
905 	if (bp != NULL) {
906 		if (ISSET(bp->b_flags, B_BUSY)) {
907 			SET(bp->b_flags, B_WANTED);
908 			error = tsleep(bp, slpflag | (PRIBIO + 1), "getblk",
909 			    slptimeo);
910 			splx(s);
911 			if (error)
912 				return (NULL);
913 			goto start;
914 		}
915 
916 		if (!ISSET(bp->b_flags, B_INVAL)) {
917 			bcstats.cachehits++;
918 			SET(bp->b_flags, B_CACHE);
919 			bremfree(bp);
920 			buf_acquire(bp);
921 			splx(s);
922 			return (bp);
923 		}
924 	}
925 	splx(s);
926 
927 	if ((bp = buf_get(vp, blkno, size)) == NULL)
928 		goto start;
929 
930 	return (bp);
931 }
932 
933 /*
934  * Get an empty, disassociated buffer of given size.
935  */
936 struct buf *
937 geteblk(int size)
938 {
939 	struct buf *bp;
940 
941 	while ((bp = buf_get(NULL, 0, size)) == NULL)
942 		;
943 
944 	return (bp);
945 }
946 
947 /*
948  * Allocate a buffer.
949  */
950 struct buf *
951 buf_get(struct vnode *vp, daddr64_t blkno, size_t size)
952 {
953 	static int gcount = 0;
954 	struct buf *bp;
955 	int poolwait = size == 0 ? PR_NOWAIT : PR_WAITOK;
956 	int npages;
957 	int s;
958 
959 	/*
960 	 * if we were previously backed off, slowly climb back up
961 	 * to the high water mark again.
962 	 */
963 	if (backoffpages == 0 && bufpages < bufhighpages) {
964 		if (gcount == 0)  {
965 			bufadjust(bufpages + bufbackpages);
966 			gcount += bufbackpages;
967 		} else
968 			gcount--;
969 	}
970 
971 	s = splbio();
972 	if (size) {
973 		/*
974 		 * Wake up cleaner if we're getting low on pages.
975 		 */
976 		if (bcstats.numdirtypages >= hidirtypages ||
977 		    bcstats.numcleanpages <= locleanpages)
978 			wakeup(&bd_req);
979 
980 		/*
981 		 * If we're above the high water mark for clean pages,
982 		 * free down to the low water mark.
983 		 */
984 		if (bcstats.numcleanpages > hicleanpages) {
985 			while (bcstats.numcleanpages > locleanpages) {
986 				bp = TAILQ_FIRST(&bufqueues[BQ_CLEAN]);
987 				bremfree(bp);
988 				if (bp->b_vp) {
989 					RB_REMOVE(buf_rb_bufs,
990 					    &bp->b_vp->v_bufs_tree, bp);
991 					brelvp(bp);
992 				}
993 				buf_put(bp);
994 			}
995 		}
996 
997 		npages = atop(round_page(size));
998 
999 		/*
1000 		 * Free some buffers until we have enough space.
1001 		 */
1002 		while ((bcstats.numbufpages + npages > bufpages)
1003 		    || backoffpages) {
1004 			int freemax = 5;
1005 			int i = freemax;
1006 			while ((bp = TAILQ_FIRST(&bufqueues[BQ_CLEAN])) && i--) {
1007 				bremfree(bp);
1008 				if (bp->b_vp) {
1009 					RB_REMOVE(buf_rb_bufs,
1010 					    &bp->b_vp->v_bufs_tree, bp);
1011 					brelvp(bp);
1012 				}
1013 				buf_put(bp);
1014 			}
1015 			if (freemax == i &&
1016 			    (bcstats.numbufpages + npages > bufpages ||
1017 			     backoffpages)) {
1018 				needbuffer++;
1019 				tsleep(&needbuffer, PRIBIO, "needbuffer", 0);
1020 				splx(s);
1021 				return (NULL);
1022 			}
1023 		}
1024 	}
1025 
1026 	bp = pool_get(&bufpool, poolwait|PR_ZERO);
1027 
1028 	if (bp == NULL) {
1029 		splx(s);
1030 		return (NULL);
1031 	}
1032 
1033 	bp->b_freelist.tqe_next = NOLIST;
1034 	bp->b_synctime = time_uptime + 300;
1035 	bp->b_dev = NODEV;
1036 	LIST_INIT(&bp->b_dep);
1037 	bp->b_bcount = size;
1038 
1039 	buf_acquire_unmapped(bp);
1040 
1041 	if (vp != NULL) {
1042 		/*
1043 		 * We insert the buffer into the hash with B_BUSY set
1044 		 * while we allocate pages for it. This way any getblk
1045 		 * that happens while we allocate pages will wait for
1046 		 * this buffer instead of starting its own guf_get.
1047 		 *
1048 		 * But first, we check if someone beat us to it.
1049 		 */
1050 		if (incore(vp, blkno)) {
1051 			pool_put(&bufpool, bp);
1052 			splx(s);
1053 			return (NULL);
1054 		}
1055 
1056 		bp->b_blkno = bp->b_lblkno = blkno;
1057 		bgetvp(vp, bp);
1058 		if (RB_INSERT(buf_rb_bufs, &vp->v_bufs_tree, bp))
1059 			panic("buf_get: dup lblk vp %p bp %p", vp, bp);
1060 	} else {
1061 		bp->b_vnbufs.le_next = NOLIST;
1062 		SET(bp->b_flags, B_INVAL);
1063 		bp->b_vp = NULL;
1064 	}
1065 
1066 	LIST_INSERT_HEAD(&bufhead, bp, b_list);
1067 	bcstats.numbufs++;
1068 
1069 	if (size) {
1070 		buf_alloc_pages(bp, round_page(size));
1071 		buf_map(bp);
1072 	}
1073 
1074 	splx(s);
1075 
1076 	return (bp);
1077 }
1078 
1079 /*
1080  * Buffer cleaning daemon.
1081  */
1082 void
1083 buf_daemon(struct proc *p)
1084 {
1085 	struct timeval starttime, timediff;
1086 	struct buf *bp;
1087 	int s;
1088 
1089 	cleanerproc = curproc;
1090 
1091 	s = splbio();
1092 	for (;;) {
1093 		if (bcstats.numdirtypages < hidirtypages)
1094 			tsleep(&bd_req, PRIBIO - 7, "cleaner", 0);
1095 
1096 		getmicrouptime(&starttime);
1097 
1098 		while ((bp = TAILQ_FIRST(&bufqueues[BQ_DIRTY]))) {
1099 			struct timeval tv;
1100 
1101 			if (bcstats.numdirtypages < lodirtypages)
1102 				break;
1103 
1104 			bremfree(bp);
1105 			buf_acquire(bp);
1106 			splx(s);
1107 
1108 			if (ISSET(bp->b_flags, B_INVAL)) {
1109 				brelse(bp);
1110 				s = splbio();
1111 				continue;
1112 			}
1113 #ifdef DIAGNOSTIC
1114 			if (!ISSET(bp->b_flags, B_DELWRI))
1115 				panic("Clean buffer on BQ_DIRTY");
1116 #endif
1117 			if (LIST_FIRST(&bp->b_dep) != NULL &&
1118 			    !ISSET(bp->b_flags, B_DEFERRED) &&
1119 			    buf_countdeps(bp, 0, 0)) {
1120 				SET(bp->b_flags, B_DEFERRED);
1121 				s = splbio();
1122 				bcstats.numdirtypages += atop(bp->b_bufsize);
1123 				bcstats.delwribufs++;
1124 				binstailfree(bp, &bufqueues[BQ_DIRTY]);
1125 				buf_release(bp);
1126 				continue;
1127 			}
1128 
1129 			bawrite(bp);
1130 
1131 			/* Never allow processing to run for more than 1 sec */
1132 			getmicrouptime(&tv);
1133 			timersub(&tv, &starttime, &timediff);
1134 			s = splbio();
1135 			if (timediff.tv_sec)
1136 				break;
1137 
1138 		}
1139 	}
1140 }
1141 
1142 /*
1143  * Wait for operations on the buffer to complete.
1144  * When they do, extract and return the I/O's error value.
1145  */
1146 int
1147 biowait(struct buf *bp)
1148 {
1149 	int s;
1150 
1151 	KASSERT(!(bp->b_flags & B_ASYNC));
1152 
1153 	s = splbio();
1154 	while (!ISSET(bp->b_flags, B_DONE))
1155 		tsleep(bp, PRIBIO + 1, "biowait", 0);
1156 	splx(s);
1157 
1158 	/* check for interruption of I/O (e.g. via NFS), then errors. */
1159 	if (ISSET(bp->b_flags, B_EINTR)) {
1160 		CLR(bp->b_flags, B_EINTR);
1161 		return (EINTR);
1162 	}
1163 
1164 	if (ISSET(bp->b_flags, B_ERROR))
1165 		return (bp->b_error ? bp->b_error : EIO);
1166 	else
1167 		return (0);
1168 }
1169 
1170 /*
1171  * Mark I/O complete on a buffer.
1172  *
1173  * If a callback has been requested, e.g. the pageout
1174  * daemon, do so. Otherwise, awaken waiting processes.
1175  *
1176  * [ Leffler, et al., says on p.247:
1177  *	"This routine wakes up the blocked process, frees the buffer
1178  *	for an asynchronous write, or, for a request by the pagedaemon
1179  *	process, invokes a procedure specified in the buffer structure" ]
1180  *
1181  * In real life, the pagedaemon (or other system processes) wants
1182  * to do async stuff to, and doesn't want the buffer brelse()'d.
1183  * (for swap pager, that puts swap buffers on the free lists (!!!),
1184  * for the vn device, that puts malloc'd buffers on the free lists!)
1185  *
1186  * Must be called at splbio().
1187  */
1188 void
1189 biodone(struct buf *bp)
1190 {
1191 	splassert(IPL_BIO);
1192 
1193 	if (ISSET(bp->b_flags, B_DONE))
1194 		panic("biodone already");
1195 	SET(bp->b_flags, B_DONE);		/* note that it's done */
1196 
1197 	if (bp->b_bq)
1198 		bufq_done(bp->b_bq, bp);
1199 
1200 	if (LIST_FIRST(&bp->b_dep) != NULL)
1201 		buf_complete(bp);
1202 
1203 	if (!ISSET(bp->b_flags, B_READ)) {
1204 		CLR(bp->b_flags, B_WRITEINPROG);
1205 		vwakeup(bp->b_vp);
1206 	}
1207 	if (bcstats.numbufs &&
1208 	    (!(ISSET(bp->b_flags, B_RAW) || ISSET(bp->b_flags, B_PHYS)))) {
1209 		if (!ISSET(bp->b_flags, B_READ))
1210 			bcstats.pendingwrites--;
1211 		else
1212 			bcstats.pendingreads--;
1213 	}
1214 	if (ISSET(bp->b_flags, B_CALL)) {	/* if necessary, call out */
1215 		CLR(bp->b_flags, B_CALL);	/* but note callout done */
1216 		(*bp->b_iodone)(bp);
1217 	} else {
1218 		if (ISSET(bp->b_flags, B_ASYNC)) {/* if async, release it */
1219 			brelse(bp);
1220 		} else {			/* or just wakeup the buffer */
1221 			CLR(bp->b_flags, B_WANTED);
1222 			wakeup(bp);
1223 		}
1224 	}
1225 }
1226 
1227 #ifdef DDB
1228 void	bcstats_print(int (*)(const char *, ...));
1229 /*
1230  * bcstats_print: ddb hook to print interesting buffer cache counters
1231  */
1232 void
1233 bcstats_print(int (*pr)(const char *, ...))
1234 {
1235 	(*pr)("Current Buffer Cache status:\n");
1236 	(*pr)("numbufs %lld busymapped %lld, delwri %lld\n",
1237 	    bcstats.numbufs, bcstats.busymapped, bcstats.delwribufs);
1238 	(*pr)("kvaslots %lld avail kva slots %lld\n",
1239 	    bcstats.kvaslots, bcstats.kvaslots_avail);
1240     	(*pr)("bufpages %lld, dirtypages %lld\n",
1241 	    bcstats.numbufpages,  bcstats.numdirtypages);
1242 	(*pr)("pendingreads %lld, pendingwrites %lld\n",
1243 	    bcstats.pendingreads, bcstats.pendingwrites);
1244 }
1245 #endif
1246