xref: /openbsd-src/sys/kern/vfs_bio.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: vfs_bio.c,v 1.160 2014/07/13 15:48:41 tedu Exp $	*/
2 /*	$NetBSD: vfs_bio.c,v 1.44 1996/06/11 11:15:36 pk Exp $	*/
3 
4 /*
5  * Copyright (c) 1994 Christopher G. Demetriou
6  * Copyright (c) 1982, 1986, 1989, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  * (c) UNIX System Laboratories, Inc.
9  * All or some portions of this file are derived from material licensed
10  * to the University of California by American Telephone and Telegraph
11  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
12  * the permission of UNIX System Laboratories, Inc.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
39  */
40 
41 /*
42  * Some references:
43  *	Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
44  *	Leffler, et al.: The Design and Implementation of the 4.3BSD
45  *		UNIX Operating System (Addison Welley, 1989)
46  */
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/buf.h>
52 #include <sys/vnode.h>
53 #include <sys/mount.h>
54 #include <sys/malloc.h>
55 #include <sys/pool.h>
56 #include <sys/resourcevar.h>
57 #include <sys/conf.h>
58 #include <sys/kernel.h>
59 #include <sys/specdev.h>
60 
61 #ifdef HIBERNATE
62 #include <sys/hibernate.h>
63 #endif /* HIBERNATE */
64 
65 int nobuffers;
66 int needbuffer;
67 struct bio_ops bioops;
68 
69 /*
70  * Buffer pool for I/O buffers.
71  */
72 struct pool bufpool;
73 struct bufhead bufhead = LIST_HEAD_INITIALIZER(bufhead);
74 void buf_put(struct buf *);
75 
76 struct buf *bio_doread(struct vnode *, daddr_t, int, int);
77 struct buf *buf_get(struct vnode *, daddr_t, size_t);
78 void bread_cluster_callback(struct buf *);
79 
80 struct bcachestats bcstats;  /* counters */
81 long lodirtypages;      /* dirty page count low water mark */
82 long hidirtypages;      /* dirty page count high water mark */
83 long targetpages;   	/* target number of pages for cache size */
84 long buflowpages;	/* smallest size cache allowed */
85 long bufhighpages; 	/* largerst size cache allowed */
86 long bufbackpages; 	/* minimum number of pages we shrink when asked to */
87 
88 vsize_t bufkvm;
89 
90 struct proc *cleanerproc;
91 int bd_req;			/* Sleep point for cleaner daemon. */
92 
93 void
94 buf_put(struct buf *bp)
95 {
96 	splassert(IPL_BIO);
97 
98 #ifdef DIAGNOSTIC
99 	if (bp->b_pobj != NULL)
100 		KASSERT(bp->b_bufsize > 0);
101 	if (ISSET(bp->b_flags, B_DELWRI))
102 		panic("buf_put: releasing dirty buffer");
103 	if (bp->b_freelist.tqe_next != NOLIST &&
104 	    bp->b_freelist.tqe_next != (void *)-1)
105 		panic("buf_put: still on the free list");
106 	if (bp->b_vnbufs.le_next != NOLIST &&
107 	    bp->b_vnbufs.le_next != (void *)-1)
108 		panic("buf_put: still on the vnode list");
109 	if (!LIST_EMPTY(&bp->b_dep))
110 		panic("buf_put: b_dep is not empty");
111 #endif
112 
113 	LIST_REMOVE(bp, b_list);
114 	bcstats.numbufs--;
115 
116 	if (buf_dealloc_mem(bp) != 0)
117 		return;
118 	pool_put(&bufpool, bp);
119 }
120 
121 /*
122  * Initialize buffers and hash links for buffers.
123  */
124 void
125 bufinit(void)
126 {
127 	u_int64_t dmapages;
128 
129 	dmapages = uvm_pagecount(&dma_constraint);
130 	/* take away a guess at how much of this the kernel will consume */
131 	dmapages -= (atop(physmem) - atop(uvmexp.free));
132 
133 	/*
134 	 * If MD code doesn't say otherwise, use up to 10% of DMA'able
135 	 * memory for buffers.
136 	 */
137 	if (bufcachepercent == 0)
138 		bufcachepercent = 10;
139 
140 	/*
141 	 * XXX these values and their same use in kern_sysctl
142 	 * need to move into buf.h
143 	 */
144 	KASSERT(bufcachepercent <= 90);
145 	KASSERT(bufcachepercent >= 5);
146 	if (bufpages == 0)
147 		bufpages = dmapages * bufcachepercent / 100;
148 	if (bufpages < BCACHE_MIN)
149 		bufpages = BCACHE_MIN;
150 	KASSERT(bufpages < dmapages);
151 
152 	bufhighpages = bufpages;
153 
154 	/*
155 	 * Set the base backoff level for the buffer cache.  We will
156 	 * not allow uvm to steal back more than this number of pages.
157 	 */
158 	buflowpages = dmapages * 5 / 100;
159 	if (buflowpages < BCACHE_MIN)
160 		buflowpages = BCACHE_MIN;
161 
162 	/*
163 	 * set bufbackpages to 100 pages, or 10 percent of the low water mark
164 	 * if we don't have that many pages.
165 	 */
166 
167 	bufbackpages = buflowpages * 10 / 100;
168 	if (bufbackpages > 100)
169 		bufbackpages = 100;
170 
171 	/*
172 	 * If the MD code does not say otherwise, reserve 10% of kva
173 	 * space for mapping buffers.
174 	 */
175 	if (bufkvm == 0)
176 		bufkvm = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 10;
177 
178 	/*
179 	 * Don't use more than twice the amount of bufpages for mappings.
180 	 * It's twice since we map things sparsely.
181 	 */
182 	if (bufkvm > bufpages * PAGE_SIZE)
183 		bufkvm = bufpages * PAGE_SIZE;
184 	/*
185 	 * Round bufkvm to MAXPHYS because we allocate chunks of va space
186 	 * in MAXPHYS chunks.
187 	 */
188 	bufkvm &= ~(MAXPHYS - 1);
189 
190 	pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
191 	pool_setipl(&bufpool, IPL_BIO);
192 
193 	bufcache_init();
194 
195 	/*
196 	 * hmm - bufkvm is an argument because it's static, while
197 	 * bufpages is global because it can change while running.
198  	 */
199 	buf_mem_init(bufkvm);
200 
201 	/*
202 	 * Set the dirty page high water mark to be less than the low
203 	 * water mark for pages in the buffer cache. This ensures we
204 	 * can always back off by throwing away clean pages, and give
205 	 * ourselves a chance to write out the dirty pages eventually.
206 	 */
207 	hidirtypages = (buflowpages / 4) * 3;
208 	lodirtypages = buflowpages / 2;
209 
210 	/*
211 	 * We are allowed to use up to the reserve.
212 	 */
213 	targetpages = bufpages - RESERVE_PAGES;
214 }
215 
216 /*
217  * Change cachepct
218  */
219 void
220 bufadjust(int newbufpages)
221 {
222 	struct buf *bp;
223 	int s;
224 
225 	if (newbufpages < buflowpages)
226 		newbufpages = buflowpages;
227 
228 	s = splbio();
229 	bufpages = newbufpages;
230 
231 	/*
232 	 * We are allowed to use up to the reserve
233 	 */
234 	targetpages = bufpages - RESERVE_PAGES;
235 
236 	/*
237 	 * Shrinking the cache happens here only if someone has manually
238 	 * adjusted bufcachepercent - or the pagedaemon has told us
239 	 * to give back memory *now* - so we give it all back.
240 	 */
241 	while ((bp = bufcache_getcleanbuf()) &&
242 	    (bcstats.numbufpages > targetpages)) {
243 		bufcache_take(bp);
244 		if (bp->b_vp) {
245 			RB_REMOVE(buf_rb_bufs,
246 			    &bp->b_vp->v_bufs_tree, bp);
247 			brelvp(bp);
248 		}
249 		buf_put(bp);
250 	}
251 
252 	/*
253 	 * Wake up the cleaner if we have lots of dirty pages,
254 	 * or if we are getting low on buffer cache kva.
255 	 */
256 	if ((UNCLEAN_PAGES >= hidirtypages) ||
257 	    bcstats.kvaslots_avail <= 2 * RESERVE_SLOTS)
258 		wakeup(&bd_req);
259 
260 	splx(s);
261 }
262 
263 /*
264  * Make the buffer cache back off from cachepct.
265  */
266 int
267 bufbackoff(struct uvm_constraint_range *range, long size)
268 {
269 	/*
270 	 * Back off "size" buffer cache pages. Called by the page
271 	 * daemon to consume buffer cache pages rather than scanning.
272 	 *
273 	 * It returns 0 to the pagedaemon to indicate that it has
274 	 * succeeded in freeing enough pages. It returns -1 to
275 	 * indicate that it could not and the pagedaemon should take
276 	 * other measures.
277 	 *
278 	 */
279 	long pdelta, oldbufpages;
280 
281 	/*
282 	 * Back off by at least bufbackpages. If the page daemon gave us
283 	 * a larger size, back off by that much.
284 	 */
285 	pdelta = (size > bufbackpages) ? size : bufbackpages;
286 
287 	if (bufpages <= buflowpages)
288 		return(-1);
289 	if (bufpages - pdelta < buflowpages)
290 		pdelta = bufpages - buflowpages;
291 	oldbufpages = bufpages;
292 	bufadjust(bufpages - pdelta);
293 	if (oldbufpages - bufpages < size)
294 		return (-1); /* we did not free what we were asked */
295 	else
296 		return(0);
297 }
298 
299 struct buf *
300 bio_doread(struct vnode *vp, daddr_t blkno, int size, int async)
301 {
302 	struct buf *bp;
303 	struct mount *mp;
304 
305 	bp = getblk(vp, blkno, size, 0, 0);
306 
307 	/*
308 	 * If buffer does not have valid data, start a read.
309 	 * Note that if buffer is B_INVAL, getblk() won't return it.
310 	 * Therefore, it's valid if its I/O has completed or been delayed.
311 	 */
312 	if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
313 		SET(bp->b_flags, B_READ | async);
314 		bcstats.pendingreads++;
315 		bcstats.numreads++;
316 		VOP_STRATEGY(bp);
317 		/* Pay for the read. */
318 		curproc->p_ru.ru_inblock++;			/* XXX */
319 	} else if (async) {
320 		brelse(bp);
321 	}
322 
323 	mp = vp->v_type == VBLK? vp->v_specmountpoint : vp->v_mount;
324 
325 	/*
326 	 * Collect statistics on synchronous and asynchronous reads.
327 	 * Reads from block devices are charged to their associated
328 	 * filesystem (if any).
329 	 */
330 	if (mp != NULL) {
331 		if (async == 0)
332 			mp->mnt_stat.f_syncreads++;
333 		else
334 			mp->mnt_stat.f_asyncreads++;
335 	}
336 
337 	return (bp);
338 }
339 
340 /*
341  * Read a disk block.
342  * This algorithm described in Bach (p.54).
343  */
344 int
345 bread(struct vnode *vp, daddr_t blkno, int size, struct buf **bpp)
346 {
347 	struct buf *bp;
348 
349 	/* Get buffer for block. */
350 	bp = *bpp = bio_doread(vp, blkno, size, 0);
351 
352 	/* Wait for the read to complete, and return result. */
353 	return (biowait(bp));
354 }
355 
356 /*
357  * Read-ahead multiple disk blocks. The first is sync, the rest async.
358  * Trivial modification to the breada algorithm presented in Bach (p.55).
359  */
360 int
361 breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t rablks[],
362     int rasizes[], int nrablks, struct buf **bpp)
363 {
364 	struct buf *bp;
365 	int i;
366 
367 	bp = *bpp = bio_doread(vp, blkno, size, 0);
368 
369 	/*
370 	 * For each of the read-ahead blocks, start a read, if necessary.
371 	 */
372 	for (i = 0; i < nrablks; i++) {
373 		/* If it's in the cache, just go on to next one. */
374 		if (incore(vp, rablks[i]))
375 			continue;
376 
377 		/* Get a buffer for the read-ahead block */
378 		(void) bio_doread(vp, rablks[i], rasizes[i], B_ASYNC);
379 	}
380 
381 	/* Otherwise, we had to start a read for it; wait until it's valid. */
382 	return (biowait(bp));
383 }
384 
385 /*
386  * Called from interrupt context.
387  */
388 void
389 bread_cluster_callback(struct buf *bp)
390 {
391 	struct buf **xbpp = bp->b_saveaddr;
392 	int i;
393 
394 	if (xbpp[1] != NULL) {
395 		size_t newsize = xbpp[1]->b_bufsize;
396 
397 		/*
398 		 * Shrink this buffer's mapping to only cover its part of
399 		 * the total I/O.
400 		 */
401 		buf_fix_mapping(bp, newsize);
402 		bp->b_bcount = newsize;
403 	}
404 
405 	for (i = 1; xbpp[i] != 0; i++) {
406 		if (ISSET(bp->b_flags, B_ERROR))
407 			SET(xbpp[i]->b_flags, B_INVAL | B_ERROR);
408 		biodone(xbpp[i]);
409 	}
410 
411 	free(xbpp, M_TEMP, 0);
412 
413 	if (ISSET(bp->b_flags, B_ASYNC)) {
414 		brelse(bp);
415 	} else {
416 		CLR(bp->b_flags, B_WANTED);
417 		wakeup(bp);
418 	}
419 }
420 
421 int
422 bread_cluster(struct vnode *vp, daddr_t blkno, int size, struct buf **rbpp)
423 {
424 	struct buf *bp, **xbpp;
425 	int howmany, maxra, i, inc;
426 	daddr_t sblkno;
427 
428 	*rbpp = bio_doread(vp, blkno, size, 0);
429 
430 	if (size != round_page(size))
431 		goto out;
432 
433 	if (VOP_BMAP(vp, blkno + 1, NULL, &sblkno, &maxra))
434 		goto out;
435 
436 	maxra++;
437 	if (sblkno == -1 || maxra < 2)
438 		goto out;
439 
440 	howmany = MAXPHYS / size;
441 	if (howmany > maxra)
442 		howmany = maxra;
443 
444 	xbpp = mallocarray(howmany + 1, sizeof(struct buf *), M_TEMP, M_NOWAIT);
445 	if (xbpp == NULL)
446 		goto out;
447 
448 	for (i = howmany - 1; i >= 0; i--) {
449 		size_t sz;
450 
451 		/*
452 		 * First buffer allocates big enough size to cover what
453 		 * all the other buffers need.
454 		 */
455 		sz = i == 0 ? howmany * size : 0;
456 
457 		xbpp[i] = buf_get(vp, blkno + i + 1, sz);
458 		if (xbpp[i] == NULL) {
459 			for (++i; i < howmany; i++) {
460 				SET(xbpp[i]->b_flags, B_INVAL);
461 				brelse(xbpp[i]);
462 			}
463 			free(xbpp, M_TEMP, 0);
464 			goto out;
465 		}
466 	}
467 
468 	bp = xbpp[0];
469 
470 	xbpp[howmany] = 0;
471 
472 	inc = btodb(size);
473 
474 	for (i = 1; i < howmany; i++) {
475 		bcstats.pendingreads++;
476 		bcstats.numreads++;
477 		SET(xbpp[i]->b_flags, B_READ | B_ASYNC);
478 		xbpp[i]->b_blkno = sblkno + (i * inc);
479 		xbpp[i]->b_bufsize = xbpp[i]->b_bcount = size;
480 		xbpp[i]->b_data = NULL;
481 		xbpp[i]->b_pobj = bp->b_pobj;
482 		xbpp[i]->b_poffs = bp->b_poffs + (i * size);
483 	}
484 
485 	KASSERT(bp->b_lblkno == blkno + 1);
486 	KASSERT(bp->b_vp == vp);
487 
488 	bp->b_blkno = sblkno;
489 	SET(bp->b_flags, B_READ | B_ASYNC | B_CALL);
490 
491 	bp->b_saveaddr = (void *)xbpp;
492 	bp->b_iodone = bread_cluster_callback;
493 
494 	bcstats.pendingreads++;
495 	bcstats.numreads++;
496 	VOP_STRATEGY(bp);
497 	curproc->p_ru.ru_inblock++;
498 
499 out:
500 	return (biowait(*rbpp));
501 }
502 
503 /*
504  * Block write.  Described in Bach (p.56)
505  */
506 int
507 bwrite(struct buf *bp)
508 {
509 	int rv, async, wasdelayed, s;
510 	struct vnode *vp;
511 	struct mount *mp;
512 
513 	vp = bp->b_vp;
514 	if (vp != NULL)
515 		mp = vp->v_type == VBLK? vp->v_specmountpoint : vp->v_mount;
516 	else
517 		mp = NULL;
518 
519 	/*
520 	 * Remember buffer type, to switch on it later.  If the write was
521 	 * synchronous, but the file system was mounted with MNT_ASYNC,
522 	 * convert it to a delayed write.
523 	 * XXX note that this relies on delayed tape writes being converted
524 	 * to async, not sync writes (which is safe, but ugly).
525 	 */
526 	async = ISSET(bp->b_flags, B_ASYNC);
527 	if (!async && mp && ISSET(mp->mnt_flag, MNT_ASYNC)) {
528 		bdwrite(bp);
529 		return (0);
530 	}
531 
532 	/*
533 	 * Collect statistics on synchronous and asynchronous writes.
534 	 * Writes to block devices are charged to their associated
535 	 * filesystem (if any).
536 	 */
537 	if (mp != NULL) {
538 		if (async)
539 			mp->mnt_stat.f_asyncwrites++;
540 		else
541 			mp->mnt_stat.f_syncwrites++;
542 	}
543 	bcstats.pendingwrites++;
544 	bcstats.numwrites++;
545 
546 	wasdelayed = ISSET(bp->b_flags, B_DELWRI);
547 	CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
548 
549 	s = splbio();
550 
551 	/*
552 	 * If not synchronous, pay for the I/O operation and make
553 	 * sure the buf is on the correct vnode queue.  We have
554 	 * to do this now, because if we don't, the vnode may not
555 	 * be properly notified that its I/O has completed.
556 	 */
557 	if (wasdelayed) {
558 		reassignbuf(bp);
559 	} else
560 		curproc->p_ru.ru_oublock++;
561 
562 
563 	/* Initiate disk write.  Make sure the appropriate party is charged. */
564 	bp->b_vp->v_numoutput++;
565 	splx(s);
566 	SET(bp->b_flags, B_WRITEINPROG);
567 	VOP_STRATEGY(bp);
568 
569 	/*
570 	 * If the queue is above the high water mark, wait till
571 	 * the number of outstanding write bufs drops below the low
572 	 * water mark.
573 	 */
574 	if (bp->b_bq)
575 		bufq_wait(bp->b_bq, bp);
576 
577 	if (async)
578 		return (0);
579 
580 	/*
581 	 * If I/O was synchronous, wait for it to complete.
582 	 */
583 	rv = biowait(bp);
584 
585 	/* Release the buffer. */
586 	brelse(bp);
587 
588 	return (rv);
589 }
590 
591 
592 /*
593  * Delayed write.
594  *
595  * The buffer is marked dirty, but is not queued for I/O.
596  * This routine should be used when the buffer is expected
597  * to be modified again soon, typically a small write that
598  * partially fills a buffer.
599  *
600  * NB: magnetic tapes cannot be delayed; they must be
601  * written in the order that the writes are requested.
602  *
603  * Described in Leffler, et al. (pp. 208-213).
604  */
605 void
606 bdwrite(struct buf *bp)
607 {
608 	int s;
609 
610 	/*
611 	 * If the block hasn't been seen before:
612 	 *	(1) Mark it as having been seen,
613 	 *	(2) Charge for the write.
614 	 *	(3) Make sure it's on its vnode's correct block list,
615 	 *	(4) If a buffer is rewritten, move it to end of dirty list
616 	 */
617 	if (!ISSET(bp->b_flags, B_DELWRI)) {
618 		SET(bp->b_flags, B_DELWRI);
619 		s = splbio();
620 		reassignbuf(bp);
621 		splx(s);
622 		curproc->p_ru.ru_oublock++;		/* XXX */
623 	}
624 
625 	/* If this is a tape block, write the block now. */
626 	if (major(bp->b_dev) < nblkdev &&
627 	    bdevsw[major(bp->b_dev)].d_type == D_TAPE) {
628 		bawrite(bp);
629 		return;
630 	}
631 
632 	/* Otherwise, the "write" is done, so mark and release the buffer. */
633 	CLR(bp->b_flags, B_NEEDCOMMIT);
634 	SET(bp->b_flags, B_DONE);
635 	brelse(bp);
636 }
637 
638 /*
639  * Asynchronous block write; just an asynchronous bwrite().
640  */
641 void
642 bawrite(struct buf *bp)
643 {
644 
645 	SET(bp->b_flags, B_ASYNC);
646 	VOP_BWRITE(bp);
647 }
648 
649 /*
650  * Must be called at splbio()
651  */
652 void
653 buf_dirty(struct buf *bp)
654 {
655 	splassert(IPL_BIO);
656 
657 #ifdef DIAGNOSTIC
658 	if (!ISSET(bp->b_flags, B_BUSY))
659 		panic("Trying to dirty buffer on freelist!");
660 #endif
661 
662 	if (ISSET(bp->b_flags, B_DELWRI) == 0) {
663 		SET(bp->b_flags, B_DELWRI);
664 		reassignbuf(bp);
665 	}
666 }
667 
668 /*
669  * Must be called at splbio()
670  */
671 void
672 buf_undirty(struct buf *bp)
673 {
674 	splassert(IPL_BIO);
675 
676 #ifdef DIAGNOSTIC
677 	if (!ISSET(bp->b_flags, B_BUSY))
678 		panic("Trying to undirty buffer on freelist!");
679 #endif
680 	if (ISSET(bp->b_flags, B_DELWRI)) {
681 		CLR(bp->b_flags, B_DELWRI);
682 		reassignbuf(bp);
683 	}
684 }
685 
686 /*
687  * Release a buffer on to the free lists.
688  * Described in Bach (p. 46).
689  */
690 void
691 brelse(struct buf *bp)
692 {
693 	int s;
694 
695 	s = splbio();
696 
697 	if (bp->b_data != NULL)
698 		KASSERT(bp->b_bufsize > 0);
699 
700 	/*
701 	 * Determine which queue the buffer should be on, then put it there.
702 	 */
703 
704 	/* If it's not cacheable, or an error, mark it invalid. */
705 	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
706 		SET(bp->b_flags, B_INVAL);
707 
708 	if (ISSET(bp->b_flags, B_INVAL)) {
709 		/*
710 		 * If the buffer is invalid, free it now rather than leaving
711 		 * it in a queue and wasting memory.
712 		 */
713 		if (LIST_FIRST(&bp->b_dep) != NULL)
714 			buf_deallocate(bp);
715 
716 		if (ISSET(bp->b_flags, B_DELWRI)) {
717 			CLR(bp->b_flags, B_DELWRI);
718 		}
719 
720 		if (bp->b_vp) {
721 			RB_REMOVE(buf_rb_bufs, &bp->b_vp->v_bufs_tree,
722 			    bp);
723 			brelvp(bp);
724 		}
725 		bp->b_vp = NULL;
726 
727 		/*
728 		 * Wake up any processes waiting for _this_ buffer to
729 		 * become free. They are not allowed to grab it
730 		 * since it will be freed. But the only sleeper is
731 		 * getblk and it will restart the operation after
732 		 * sleep.
733 		 */
734 		if (ISSET(bp->b_flags, B_WANTED)) {
735 			CLR(bp->b_flags, B_WANTED);
736 			wakeup(bp);
737 		}
738 		buf_put(bp);
739 	} else {
740 		/*
741 		 * It has valid data.  Put it on the end of the appropriate
742 		 * queue, so that it'll stick around for as long as possible.
743 		 */
744 		bufcache_release(bp);
745 
746 		/* Unlock the buffer. */
747 		CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE | B_DEFERRED));
748 		buf_release(bp);
749 
750 		/* Wake up any processes waiting for _this_ buffer to
751 		 * become free. */
752 		if (ISSET(bp->b_flags, B_WANTED)) {
753 			CLR(bp->b_flags, B_WANTED);
754 			wakeup(bp);
755 		}
756 	}
757 
758 	/* Wake up syncer and cleaner processes waiting for buffers. */
759 	if (nobuffers) {
760 		nobuffers = 0;
761 		wakeup(&nobuffers);
762 	}
763 
764 	/* Wake up any processes waiting for any buffer to become free. */
765 	if (needbuffer && bcstats.numbufpages < targetpages &&
766 	    bcstats.kvaslots_avail > RESERVE_SLOTS) {
767 		needbuffer = 0;
768 		wakeup(&needbuffer);
769 	}
770 
771 	splx(s);
772 }
773 
774 /*
775  * Determine if a block is in the cache. Just look on what would be its hash
776  * chain. If it's there, return a pointer to it, unless it's marked invalid.
777  */
778 struct buf *
779 incore(struct vnode *vp, daddr_t blkno)
780 {
781 	struct buf *bp;
782 	struct buf b;
783 	int s;
784 
785 	s = splbio();
786 
787 	/* Search buf lookup tree */
788 	b.b_lblkno = blkno;
789 	bp = RB_FIND(buf_rb_bufs, &vp->v_bufs_tree, &b);
790 	if (bp != NULL && ISSET(bp->b_flags, B_INVAL))
791 		bp = NULL;
792 
793 	splx(s);
794 	return (bp);
795 }
796 
797 /*
798  * Get a block of requested size that is associated with
799  * a given vnode and block offset. If it is found in the
800  * block cache, mark it as having been found, make it busy
801  * and return it. Otherwise, return an empty block of the
802  * correct size. It is up to the caller to ensure that the
803  * cached blocks be of the correct size.
804  */
805 struct buf *
806 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo)
807 {
808 	struct buf *bp;
809 	struct buf b;
810 	int s, error;
811 
812 	/*
813 	 * XXX
814 	 * The following is an inlined version of 'incore()', but with
815 	 * the 'invalid' test moved to after the 'busy' test.  It's
816 	 * necessary because there are some cases in which the NFS
817 	 * code sets B_INVAL prior to writing data to the server, but
818 	 * in which the buffers actually contain valid data.  In this
819 	 * case, we can't allow the system to allocate a new buffer for
820 	 * the block until the write is finished.
821 	 */
822 start:
823 	s = splbio();
824 	b.b_lblkno = blkno;
825 	bp = RB_FIND(buf_rb_bufs, &vp->v_bufs_tree, &b);
826 	if (bp != NULL) {
827 		if (ISSET(bp->b_flags, B_BUSY)) {
828 			SET(bp->b_flags, B_WANTED);
829 			error = tsleep(bp, slpflag | (PRIBIO + 1), "getblk",
830 			    slptimeo);
831 			splx(s);
832 			if (error)
833 				return (NULL);
834 			goto start;
835 		}
836 
837 		if (!ISSET(bp->b_flags, B_INVAL)) {
838 			bcstats.cachehits++;
839 			SET(bp->b_flags, B_CACHE);
840 			bufcache_take(bp);
841 			buf_acquire(bp);
842 			splx(s);
843 			return (bp);
844 		}
845 	}
846 	splx(s);
847 
848 	if ((bp = buf_get(vp, blkno, size)) == NULL)
849 		goto start;
850 
851 	return (bp);
852 }
853 
854 /*
855  * Get an empty, disassociated buffer of given size.
856  */
857 struct buf *
858 geteblk(int size)
859 {
860 	struct buf *bp;
861 
862 	while ((bp = buf_get(NULL, 0, size)) == NULL)
863 		;
864 
865 	return (bp);
866 }
867 
868 /*
869  * Allocate a buffer.
870  */
871 struct buf *
872 buf_get(struct vnode *vp, daddr_t blkno, size_t size)
873 {
874 	struct buf *bp;
875 	int poolwait = size == 0 ? PR_NOWAIT : PR_WAITOK;
876 	int npages;
877 	int s;
878 
879 	s = splbio();
880 	if (size) {
881 		/*
882 		 * Wake up the cleaner if we have lots of dirty pages,
883 		 * or if we are getting low on buffer cache kva.
884 		 */
885 		if (UNCLEAN_PAGES >= hidirtypages ||
886 			bcstats.kvaslots_avail <= 2 * RESERVE_SLOTS)
887 			wakeup(&bd_req);
888 
889 		npages = atop(round_page(size));
890 
891 		/*
892 		 * if our cache has been previously shrunk,
893 		 * allow it to grow again with use up to
894 		 * bufhighpages (cachepercent)
895 		 */
896 		if (bufpages < bufhighpages)
897 			bufadjust(bufhighpages);
898 
899 		/*
900 		 * If would go over the page target with our
901 		 * new allocation, free enough buffers first
902 		 * to stay at the target with our new allocation.
903 		 */
904 		while ((bcstats.numbufpages + npages > targetpages) &&
905 		    (bp = bufcache_getcleanbuf())) {
906 			bufcache_take(bp);
907 			if (bp->b_vp) {
908 				RB_REMOVE(buf_rb_bufs,
909 				    &bp->b_vp->v_bufs_tree, bp);
910 				brelvp(bp);
911 			}
912 			buf_put(bp);
913 		}
914 
915 		/*
916 		 * If we get here, we tried to free the world down
917 		 * above, and couldn't get down - Wake the cleaner
918 		 * and wait for it to push some buffers out.
919 		 */
920 		if ((bcstats.numbufpages + npages > targetpages ||
921 		    bcstats.kvaslots_avail <= RESERVE_SLOTS) &&
922 		    curproc != syncerproc && curproc != cleanerproc) {
923 			wakeup(&bd_req);
924 			needbuffer++;
925 			tsleep(&needbuffer, PRIBIO, "needbuffer", 0);
926 			splx(s);
927 			return (NULL);
928 		}
929 		if (bcstats.numbufpages + npages > bufpages) {
930 			/* cleaner or syncer */
931 			nobuffers = 1;
932 			tsleep(&nobuffers, PRIBIO, "nobuffers", 0);
933 			splx(s);
934 			return (NULL);
935 		}
936 	}
937 
938 	bp = pool_get(&bufpool, poolwait|PR_ZERO);
939 
940 	if (bp == NULL) {
941 		splx(s);
942 		return (NULL);
943 	}
944 
945 	bp->b_freelist.tqe_next = NOLIST;
946 	bp->b_dev = NODEV;
947 	LIST_INIT(&bp->b_dep);
948 	bp->b_bcount = size;
949 
950 	buf_acquire_nomap(bp);
951 
952 	if (vp != NULL) {
953 		/*
954 		 * We insert the buffer into the hash with B_BUSY set
955 		 * while we allocate pages for it. This way any getblk
956 		 * that happens while we allocate pages will wait for
957 		 * this buffer instead of starting its own guf_get.
958 		 *
959 		 * But first, we check if someone beat us to it.
960 		 */
961 		if (incore(vp, blkno)) {
962 			pool_put(&bufpool, bp);
963 			splx(s);
964 			return (NULL);
965 		}
966 
967 		bp->b_blkno = bp->b_lblkno = blkno;
968 		bgetvp(vp, bp);
969 		if (RB_INSERT(buf_rb_bufs, &vp->v_bufs_tree, bp))
970 			panic("buf_get: dup lblk vp %p bp %p", vp, bp);
971 	} else {
972 		bp->b_vnbufs.le_next = NOLIST;
973 		SET(bp->b_flags, B_INVAL);
974 		bp->b_vp = NULL;
975 	}
976 
977 	LIST_INSERT_HEAD(&bufhead, bp, b_list);
978 	bcstats.numbufs++;
979 
980 	if (size) {
981 		buf_alloc_pages(bp, round_page(size));
982 		buf_map(bp);
983 	}
984 
985 	splx(s);
986 
987 	return (bp);
988 }
989 
990 /*
991  * Buffer cleaning daemon.
992  */
993 void
994 buf_daemon(struct proc *p)
995 {
996 	struct timeval starttime, timediff;
997 	struct buf *bp = NULL;
998 	int s, pushed = 0;
999 
1000 	cleanerproc = curproc;
1001 
1002 	s = splbio();
1003 	for (;;) {
1004 		if (bp == NULL || (pushed >= 16 &&
1005 		    UNCLEAN_PAGES < hidirtypages &&
1006 		    bcstats.kvaslots_avail > 2 * RESERVE_SLOTS)){
1007 			pushed = 0;
1008 			/*
1009 			 * Wake up anyone who was waiting for buffers
1010 			 * to be released.
1011 			 */
1012 			if (needbuffer) {
1013 				needbuffer = 0;
1014 				wakeup(&needbuffer);
1015 			}
1016 			tsleep(&bd_req, PRIBIO - 7, "cleaner", 0);
1017 		}
1018 
1019 		getmicrouptime(&starttime);
1020 
1021 		while ((bp = bufcache_getdirtybuf())) {
1022 			struct timeval tv;
1023 
1024 			if (UNCLEAN_PAGES < lodirtypages &&
1025 			    bcstats.kvaslots_avail > 2 * RESERVE_SLOTS &&
1026 			    pushed >= 16)
1027 				break;
1028 
1029 			bufcache_take(bp);
1030 			buf_acquire(bp);
1031 			splx(s);
1032 
1033 			if (ISSET(bp->b_flags, B_INVAL)) {
1034 				brelse(bp);
1035 				s = splbio();
1036 				continue;
1037 			}
1038 #ifdef DIAGNOSTIC
1039 			if (!ISSET(bp->b_flags, B_DELWRI))
1040 				panic("Clean buffer on dirty queue");
1041 #endif
1042 			if (LIST_FIRST(&bp->b_dep) != NULL &&
1043 			    !ISSET(bp->b_flags, B_DEFERRED) &&
1044 			    buf_countdeps(bp, 0, 0)) {
1045 				SET(bp->b_flags, B_DEFERRED);
1046 				s = splbio();
1047 				bufcache_release(bp);
1048 				buf_release(bp);
1049 				continue;
1050 			}
1051 
1052 			bawrite(bp);
1053 			pushed++;
1054 
1055 			/* Never allow processing to run for more than 1 sec */
1056 			getmicrouptime(&tv);
1057 			timersub(&tv, &starttime, &timediff);
1058 			s = splbio();
1059 			if (timediff.tv_sec)
1060 				break;
1061 
1062 		}
1063 	}
1064 }
1065 
1066 /*
1067  * Wait for operations on the buffer to complete.
1068  * When they do, extract and return the I/O's error value.
1069  */
1070 int
1071 biowait(struct buf *bp)
1072 {
1073 	int s;
1074 
1075 	KASSERT(!(bp->b_flags & B_ASYNC));
1076 
1077 	s = splbio();
1078 	while (!ISSET(bp->b_flags, B_DONE))
1079 		tsleep(bp, PRIBIO + 1, "biowait", 0);
1080 	splx(s);
1081 
1082 	/* check for interruption of I/O (e.g. via NFS), then errors. */
1083 	if (ISSET(bp->b_flags, B_EINTR)) {
1084 		CLR(bp->b_flags, B_EINTR);
1085 		return (EINTR);
1086 	}
1087 
1088 	if (ISSET(bp->b_flags, B_ERROR))
1089 		return (bp->b_error ? bp->b_error : EIO);
1090 	else
1091 		return (0);
1092 }
1093 
1094 /*
1095  * Mark I/O complete on a buffer.
1096  *
1097  * If a callback has been requested, e.g. the pageout
1098  * daemon, do so. Otherwise, awaken waiting processes.
1099  *
1100  * [ Leffler, et al., says on p.247:
1101  *	"This routine wakes up the blocked process, frees the buffer
1102  *	for an asynchronous write, or, for a request by the pagedaemon
1103  *	process, invokes a procedure specified in the buffer structure" ]
1104  *
1105  * In real life, the pagedaemon (or other system processes) wants
1106  * to do async stuff to, and doesn't want the buffer brelse()'d.
1107  * (for swap pager, that puts swap buffers on the free lists (!!!),
1108  * for the vn device, that puts malloc'd buffers on the free lists!)
1109  *
1110  * Must be called at splbio().
1111  */
1112 void
1113 biodone(struct buf *bp)
1114 {
1115 	splassert(IPL_BIO);
1116 
1117 	if (ISSET(bp->b_flags, B_DONE))
1118 		panic("biodone already");
1119 	SET(bp->b_flags, B_DONE);		/* note that it's done */
1120 
1121 	if (bp->b_bq)
1122 		bufq_done(bp->b_bq, bp);
1123 
1124 	if (LIST_FIRST(&bp->b_dep) != NULL)
1125 		buf_complete(bp);
1126 
1127 	if (!ISSET(bp->b_flags, B_READ)) {
1128 		CLR(bp->b_flags, B_WRITEINPROG);
1129 		vwakeup(bp->b_vp);
1130 	}
1131 	if (bcstats.numbufs &&
1132 	    (!(ISSET(bp->b_flags, B_RAW) || ISSET(bp->b_flags, B_PHYS)))) {
1133 		if (!ISSET(bp->b_flags, B_READ))
1134 			bcstats.pendingwrites--;
1135 		else
1136 			bcstats.pendingreads--;
1137 	}
1138 	if (ISSET(bp->b_flags, B_CALL)) {	/* if necessary, call out */
1139 		CLR(bp->b_flags, B_CALL);	/* but note callout done */
1140 		(*bp->b_iodone)(bp);
1141 	} else {
1142 		if (ISSET(bp->b_flags, B_ASYNC)) {/* if async, release it */
1143 			brelse(bp);
1144 		} else {			/* or just wakeup the buffer */
1145 			CLR(bp->b_flags, B_WANTED);
1146 			wakeup(bp);
1147 		}
1148 	}
1149 }
1150 
1151 #ifdef DDB
1152 void	bcstats_print(int (*)(const char *, ...)
1153     __attribute__((__format__(__kprintf__,1,2))));
1154 /*
1155  * bcstats_print: ddb hook to print interesting buffer cache counters
1156  */
1157 void
1158 bcstats_print(
1159     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
1160 {
1161 	(*pr)("Current Buffer Cache status:\n");
1162 	(*pr)("numbufs %lld busymapped %lld, delwri %lld\n",
1163 	    bcstats.numbufs, bcstats.busymapped, bcstats.delwribufs);
1164 	(*pr)("kvaslots %lld avail kva slots %lld\n",
1165 	    bcstats.kvaslots, bcstats.kvaslots_avail);
1166     	(*pr)("bufpages %lld, dirtypages %lld\n",
1167 	    bcstats.numbufpages,  bcstats.numdirtypages);
1168 	(*pr)("pendingreads %lld, pendingwrites %lld\n",
1169 	    bcstats.pendingreads, bcstats.pendingwrites);
1170 }
1171 #endif
1172 
1173 /* bufcache freelist code below */
1174 
1175 /*
1176  * simple LRU queues, one clean and one dirty
1177  */
1178 TAILQ_HEAD(bufqueue, buf);
1179 struct bufqueue cleanqueue;
1180 struct bufqueue dirtyqueue;
1181 
1182 void
1183 bufcache_init(void)
1184 {
1185 
1186 	TAILQ_INIT(&cleanqueue);
1187 	TAILQ_INIT(&dirtyqueue);
1188 }
1189 
1190 struct buf *
1191 bufcache_getcleanbuf(void)
1192 {
1193 	return TAILQ_FIRST(&cleanqueue);
1194 }
1195 
1196 struct buf *
1197 bufcache_getdirtybuf(void)
1198 {
1199 	return TAILQ_FIRST(&dirtyqueue);
1200 }
1201 
1202 void
1203 bufcache_take(struct buf *bp)
1204 {
1205 	struct bufqueue *queue;
1206 
1207 	splassert(IPL_BIO);
1208 
1209 	if (!ISSET(bp->b_flags, B_DELWRI)) {
1210 		queue = &cleanqueue;
1211 		bcstats.numcleanpages -= atop(bp->b_bufsize);
1212 	} else {
1213 		queue = &dirtyqueue;
1214 		bcstats.numdirtypages -= atop(bp->b_bufsize);
1215 		bcstats.delwribufs--;
1216 	}
1217 	TAILQ_REMOVE(queue, bp, b_freelist);
1218 }
1219 
1220 void
1221 bufcache_release(struct buf *bp)
1222 {
1223 	struct bufqueue *queue;
1224 
1225 	if (!ISSET(bp->b_flags, B_DELWRI)) {
1226 		queue = &cleanqueue;
1227 		bcstats.numcleanpages += atop(bp->b_bufsize);
1228 	} else {
1229 		queue = &dirtyqueue;
1230 		bcstats.numdirtypages += atop(bp->b_bufsize);
1231 		bcstats.delwribufs++;
1232 	}
1233 	TAILQ_INSERT_TAIL(queue, bp, b_freelist);
1234 }
1235 
1236 #ifdef HIBERNATE
1237 /*
1238  * Flush buffercache to lowest value on hibernate suspend
1239  */
1240 void
1241 hibernate_suspend_bufcache(void)
1242 {
1243 	long save_buflowpages = buflowpages;
1244 
1245 	/* Shrink buffercache to 16MB (4096 pages) */
1246 	buflowpages = 4096;
1247 	bufadjust(buflowpages);
1248 	buflowpages = save_buflowpages;
1249 	bufhighpages = bufpages;
1250 }
1251 
1252 void
1253 hibernate_resume_bufcache(void)
1254 {
1255 	uint64_t dmapages, pgs;
1256 
1257 	dmapages = uvm_pagecount(&dma_constraint);
1258 	pgs = bufcachepercent * dmapages / 100;
1259 	bufadjust(pgs);
1260 	bufhighpages = bufpages;
1261 }
1262 #endif /* HIBERNATE */
1263