xref: /openbsd-src/sys/kern/vfs_bio.c (revision 2b0358df1d88d06ef4139321dd05bd5e05d91eaf)
1 /*	$OpenBSD: vfs_bio.c,v 1.111 2009/03/23 15:10:44 beck Exp $	*/
2 /*	$NetBSD: vfs_bio.c,v 1.44 1996/06/11 11:15:36 pk Exp $	*/
3 
4 /*-
5  * Copyright (c) 1994 Christopher G. Demetriou
6  * Copyright (c) 1982, 1986, 1989, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  * (c) UNIX System Laboratories, Inc.
9  * All or some portions of this file are derived from material licensed
10  * to the University of California by American Telephone and Telegraph
11  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
12  * the permission of UNIX System Laboratories, Inc.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
39  */
40 
41 /*
42  * Some references:
43  *	Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
44  *	Leffler, et al.: The Design and Implementation of the 4.3BSD
45  *		UNIX Operating System (Addison Welley, 1989)
46  */
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/buf.h>
52 #include <sys/vnode.h>
53 #include <sys/mount.h>
54 #include <sys/malloc.h>
55 #include <sys/pool.h>
56 #include <sys/resourcevar.h>
57 #include <sys/conf.h>
58 #include <sys/kernel.h>
59 
60 #include <uvm/uvm_extern.h>
61 
62 #include <miscfs/specfs/specdev.h>
63 
64 /*
65  * Definitions for the buffer hash lists.
66  */
67 #define	BUFHASH(dvp, lbn)	\
68 	(&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
69 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
70 u_long	bufhash;
71 
72 /*
73  * Insq/Remq for the buffer hash lists.
74  */
75 #define	binshash(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_hash)
76 #define	bremhash(bp)		LIST_REMOVE(bp, b_hash)
77 
78 /*
79  * Definitions for the buffer free lists.
80  */
81 #define	BQUEUES		2		/* number of free buffer queues */
82 
83 #define	BQ_DIRTY	0		/* LRU queue with dirty buffers */
84 #define	BQ_CLEAN	1		/* LRU queue with clean buffers */
85 
86 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
87 int needbuffer;
88 struct bio_ops bioops;
89 
90 /*
91  * Buffer pool for I/O buffers.
92  */
93 struct pool bufpool;
94 struct bufhead bufhead = LIST_HEAD_INITIALIZER(bufhead);
95 struct buf *buf_get(size_t);
96 struct buf *buf_stub(struct vnode *, daddr64_t);
97 void buf_put(struct buf *);
98 
99 /*
100  * Insq/Remq for the buffer free lists.
101  */
102 #define	binsheadfree(bp, dp)	TAILQ_INSERT_HEAD(dp, bp, b_freelist)
103 #define	binstailfree(bp, dp)	TAILQ_INSERT_TAIL(dp, bp, b_freelist)
104 
105 struct buf *bio_doread(struct vnode *, daddr64_t, int, int);
106 struct buf *getnewbuf(size_t, int, int, int *);
107 void buf_init(struct buf *);
108 void bread_cluster_callback(struct buf *);
109 
110 /*
111  * We keep a few counters to monitor the utilization of the buffer cache
112  *
113  *  numbufpages   - number of pages totally allocated.
114  *  numdirtypages - number of pages on BQ_DIRTY queue.
115  *  lodirtypages  - low water mark for buffer cleaning daemon.
116  *  hidirtypages  - high water mark for buffer cleaning daemon.
117  *  numcleanpages - number of pages on BQ_CLEAN queue.
118  *		    Used to track the need to speedup the cleaner and
119  *		    as a reserve for special processes like syncer.
120  *  maxcleanpages - the highest page count on BQ_CLEAN.
121  */
122 
123 struct bcachestats bcstats;
124 long lodirtypages;
125 long hidirtypages;
126 long locleanpages;
127 long hicleanpages;
128 long maxcleanpages;
129 
130 /* XXX - should be defined here. */
131 extern int bufcachepercent;
132 
133 vsize_t bufkvm;
134 
135 struct proc *cleanerproc;
136 int bd_req;			/* Sleep point for cleaner daemon. */
137 
138 void
139 bremfree(struct buf *bp)
140 {
141 	struct bqueues *dp = NULL;
142 
143 	splassert(IPL_BIO);
144 
145 	/*
146 	 * We only calculate the head of the freelist when removing
147 	 * the last element of the list as that is the only time that
148 	 * it is needed (e.g. to reset the tail pointer).
149 	 *
150 	 * NB: This makes an assumption about how tailq's are implemented.
151 	 */
152 	if (TAILQ_NEXT(bp, b_freelist) == NULL) {
153 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
154 			if (dp->tqh_last == &TAILQ_NEXT(bp, b_freelist))
155 				break;
156 		if (dp == &bufqueues[BQUEUES])
157 			panic("bremfree: lost tail");
158 	}
159 	if (!ISSET(bp->b_flags, B_DELWRI)) {
160 		bcstats.numcleanpages -= atop(bp->b_bufsize);
161 	} else {
162 		bcstats.numdirtypages -= atop(bp->b_bufsize);
163 	}
164 	TAILQ_REMOVE(dp, bp, b_freelist);
165 	bcstats.freebufs--;
166 }
167 
168 void
169 buf_init(struct buf *bp)
170 {
171 	splassert(IPL_BIO);
172 
173 	bzero((char *)bp, sizeof *bp);
174 	bp->b_vnbufs.le_next = NOLIST;
175 	bp->b_freelist.tqe_next = NOLIST;
176 	bp->b_synctime = time_uptime + 300;
177 	bp->b_dev = NODEV;
178 	LIST_INIT(&bp->b_dep);
179 }
180 
181 /*
182  * This is a non-sleeping expanded equivalent of getblk() that allocates only
183  * the buffer structure, and not its contents.
184  */
185 struct buf *
186 buf_stub(struct vnode *vp, daddr64_t lblkno)
187 {
188 	struct buf *bp;
189 	int s;
190 
191 	s = splbio();
192 	bp = pool_get(&bufpool, PR_NOWAIT);
193 	splx(s);
194 
195 	if (bp == NULL)
196 		return (NULL);
197 
198 	bzero((char *)bp, sizeof *bp);
199 	bp->b_vnbufs.le_next = NOLIST;
200 	bp->b_freelist.tqe_next = NOLIST;
201 	bp->b_synctime = time_uptime + 300;
202 	bp->b_dev = NODEV;
203 	bp->b_bufsize = 0;
204 	bp->b_data = NULL;
205 	bp->b_flags = 0;
206 	bp->b_dev = NODEV;
207 	bp->b_blkno = bp->b_lblkno = lblkno;
208 	bp->b_iodone = NULL;
209 	bp->b_error = 0;
210 	bp->b_resid = 0;
211 	bp->b_bcount = 0;
212 	bp->b_dirtyoff = bp->b_dirtyend = 0;
213 	bp->b_validoff = bp->b_validend = 0;
214 
215 	LIST_INIT(&bp->b_dep);
216 
217 	buf_acquire_unmapped(bp);
218 
219 	s = splbio();
220 	LIST_INSERT_HEAD(&bufhead, bp, b_list);
221 	bcstats.numbufs++;
222 	bgetvp(vp, bp);
223 	splx(s);
224 
225 	return (bp);
226 }
227 
228 struct buf *
229 buf_get(size_t size)
230 {
231 	struct buf *bp;
232 	int npages;
233 
234 	splassert(IPL_BIO);
235 
236 	KASSERT(size > 0);
237 
238 	size = round_page(size);
239 	npages = atop(size);
240 
241 	if (bcstats.numbufpages + npages > bufpages)
242 		return (NULL);
243 
244 	bp = pool_get(&bufpool, PR_WAITOK);
245 
246 	buf_init(bp);
247 	bp->b_flags = B_INVAL;
248 	buf_alloc_pages(bp, size);
249 	bp->b_data = NULL;
250 	binsheadfree(bp, &bufqueues[BQ_CLEAN]);
251 	binshash(bp, &invalhash);
252 	LIST_INSERT_HEAD(&bufhead, bp, b_list);
253 	bcstats.numbufs++;
254 	bcstats.freebufs++;
255 	bcstats.numcleanpages += atop(bp->b_bufsize);
256 
257 	return (bp);
258 }
259 
260 void
261 buf_put(struct buf *bp)
262 {
263 	splassert(IPL_BIO);
264 
265 #ifdef DIAGNOSTIC
266 	if (bp->b_pobj != NULL)
267 		KASSERT(bp->b_bufsize > 0);
268 	if (ISSET(bp->b_flags, B_DELWRI))
269 		panic("buf_put: releasing dirty buffer");
270 	if (bp->b_freelist.tqe_next != NOLIST &&
271 	    bp->b_freelist.tqe_next != (void *)-1)
272 		panic("buf_put: still on the free list");
273 	if (bp->b_vnbufs.le_next != NOLIST &&
274 	    bp->b_vnbufs.le_next != (void *)-1)
275 		panic("buf_put: still on the vnode list");
276 	if (!LIST_EMPTY(&bp->b_dep))
277 		panic("buf_put: b_dep is not empty");
278 #endif
279 
280 	LIST_REMOVE(bp, b_list);
281 	bcstats.numbufs--;
282 
283 	if (buf_dealloc_mem(bp) != 0)
284 		return;
285 	pool_put(&bufpool, bp);
286 }
287 
288 /*
289  * Initialize buffers and hash links for buffers.
290  */
291 void
292 bufinit(void)
293 {
294 	struct bqueues *dp;
295 
296 	/* XXX - for now */
297 	bufpages = bufcachepercent = bufkvm = 0;
298 
299 	/*
300 	 * If MD code doesn't say otherwise, use 10% of kvm for mappings and
301 	 * 10% physmem for pages.
302 	 */
303 	if (bufcachepercent == 0)
304 		bufcachepercent = 10;
305 	if (bufpages == 0)
306 		bufpages = physmem * bufcachepercent / 100;
307 
308 	if (bufkvm == 0)
309 		bufkvm = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 10;
310 
311 	/*
312 	 * Don't use more than twice the amount of bufpages for mappings.
313 	 * It's twice since we map things sparsely.
314 	 */
315 	if (bufkvm > bufpages * PAGE_SIZE)
316 		bufkvm = bufpages * PAGE_SIZE;
317 	/*
318 	 * Round bufkvm to MAXPHYS because we allocate chunks of va space
319 	 * in MAXPHYS chunks.
320 	 */
321 	bufkvm &= ~(MAXPHYS - 1);
322 
323 	pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
324 	pool_setipl(&bufpool, IPL_BIO);
325 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
326 		TAILQ_INIT(dp);
327 
328 	/*
329 	 * hmm - bufkvm is an argument because it's static, while
330 	 * bufpages is global because it can change while running.
331  	 */
332 	buf_mem_init(bufkvm);
333 
334 	bufhashtbl = hashinit(bufpages / 4, M_CACHE, M_WAITOK, &bufhash);
335 	hidirtypages = (bufpages / 4) * 3;
336 	lodirtypages = bufpages / 2;
337 
338 	/*
339 	 * When we hit 95% of pages being clean, we bring them down to
340 	 * 90% to have some slack.
341 	 */
342 	hicleanpages = bufpages - (bufpages / 20);
343 	locleanpages = bufpages - (bufpages / 10);
344 
345 	maxcleanpages = locleanpages;
346 }
347 
348 struct buf *
349 bio_doread(struct vnode *vp, daddr64_t blkno, int size, int async)
350 {
351 	struct buf *bp;
352 	struct mount *mp;
353 
354 	bp = getblk(vp, blkno, size, 0, 0);
355 
356 	/*
357 	 * If buffer does not have valid data, start a read.
358 	 * Note that if buffer is B_INVAL, getblk() won't return it.
359 	 * Therefore, it's valid if its I/O has completed or been delayed.
360 	 */
361 	if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
362 		SET(bp->b_flags, B_READ | async);
363 		bcstats.pendingreads++;
364 		bcstats.numreads++;
365 		VOP_STRATEGY(bp);
366 		/* Pay for the read. */
367 		curproc->p_stats->p_ru.ru_inblock++;		/* XXX */
368 	} else if (async) {
369 		brelse(bp);
370 	}
371 
372 	mp = vp->v_type == VBLK? vp->v_specmountpoint : vp->v_mount;
373 
374 	/*
375 	 * Collect statistics on synchronous and asynchronous reads.
376 	 * Reads from block devices are charged to their associated
377 	 * filesystem (if any).
378 	 */
379 	if (mp != NULL) {
380 		if (async == 0)
381 			mp->mnt_stat.f_syncreads++;
382 		else
383 			mp->mnt_stat.f_asyncreads++;
384 	}
385 
386 	return (bp);
387 }
388 
389 /*
390  * Read a disk block.
391  * This algorithm described in Bach (p.54).
392  */
393 int
394 bread(struct vnode *vp, daddr64_t blkno, int size, struct ucred *cred,
395     struct buf **bpp)
396 {
397 	struct buf *bp;
398 
399 	/* Get buffer for block. */
400 	bp = *bpp = bio_doread(vp, blkno, size, 0);
401 
402 	/* Wait for the read to complete, and return result. */
403 	return (biowait(bp));
404 }
405 
406 /*
407  * Read-ahead multiple disk blocks. The first is sync, the rest async.
408  * Trivial modification to the breada algorithm presented in Bach (p.55).
409  */
410 int
411 breadn(struct vnode *vp, daddr64_t blkno, int size, daddr64_t rablks[],
412     int rasizes[], int nrablks, struct ucred *cred, struct buf **bpp)
413 {
414 	struct buf *bp;
415 	int i;
416 
417 	bp = *bpp = bio_doread(vp, blkno, size, 0);
418 
419 	/*
420 	 * For each of the read-ahead blocks, start a read, if necessary.
421 	 */
422 	for (i = 0; i < nrablks; i++) {
423 		/* If it's in the cache, just go on to next one. */
424 		if (incore(vp, rablks[i]))
425 			continue;
426 
427 		/* Get a buffer for the read-ahead block */
428 		(void) bio_doread(vp, rablks[i], rasizes[i], B_ASYNC);
429 	}
430 
431 	/* Otherwise, we had to start a read for it; wait until it's valid. */
432 	return (biowait(bp));
433 }
434 
435 /*
436  * Called from interrupt context.
437  */
438 void
439 bread_cluster_callback(struct buf *bp)
440 {
441 	int i;
442 	struct buf **xbpp;
443 
444 	xbpp = (struct buf **)bp->b_saveaddr;
445 
446 	for (i = 0; xbpp[i] != 0; i++) {
447 		if (ISSET(bp->b_flags, B_ERROR))
448 			SET(xbpp[i]->b_flags, B_INVAL | B_ERROR);
449 		biodone(xbpp[i]);
450 	}
451 
452 	free(xbpp, M_TEMP);
453 	bp->b_pobj = NULL;
454 	buf_put(bp);
455 }
456 
457 int
458 bread_cluster(struct vnode *vp, daddr64_t blkno, int size, struct buf **rbpp)
459 {
460 	struct buf *bp, **xbpp;
461 	int howmany, maxra, i, inc;
462 	daddr64_t sblkno;
463 
464 	*rbpp = bio_doread(vp, blkno, size, 0);
465 
466 	if (size != round_page(size))
467 		return (biowait(*rbpp));
468 
469 	if (VOP_BMAP(vp, blkno + 1, NULL, &sblkno, &maxra))
470 		return (biowait(*rbpp));
471 
472 	maxra++;
473 	if (sblkno == -1 || maxra < 2)
474 		return (biowait(*rbpp));
475 
476 	howmany = MAXPHYS / size;
477 	if (howmany > maxra)
478 		howmany = maxra;
479 
480 	xbpp = malloc((howmany + 1) * sizeof(struct buf *), M_TEMP, M_NOWAIT);
481 	if (xbpp == NULL)
482 		return (biowait(*rbpp));
483 
484 	for (i = 0; i < howmany; i++) {
485 		if (incore(vp, blkno + i + 1)) {
486 			for (--i; i >= 0; i--) {
487 				SET(xbpp[i]->b_flags, B_INVAL);
488 				brelse(xbpp[i]);
489 			}
490 			free(xbpp, M_TEMP);
491 			return (biowait(*rbpp));
492 		}
493 		xbpp[i] = buf_stub(vp, blkno + i + 1);
494 		if (xbpp[i] == NULL) {
495 			for (--i; i >= 0; i--) {
496 				SET(xbpp[i]->b_flags, B_INVAL);
497 				brelse(xbpp[i]);
498 			}
499 			free(xbpp, M_TEMP);
500 			return (biowait(*rbpp));
501 		}
502 	}
503 
504 	xbpp[howmany] = 0;
505 
506 	bp = getnewbuf(howmany * size, 0, 0, NULL);
507 	if (bp == NULL) {
508 		for (i = 0; i < howmany; i++) {
509 			SET(xbpp[i]->b_flags, B_INVAL);
510 			brelse(xbpp[i]);
511 		}
512 		free(xbpp, M_TEMP);
513 		return (biowait(*rbpp));
514 	}
515 
516 	inc = btodb(size);
517 
518 	for (i = 0; i < howmany; i++) {
519 		bcstats.pendingreads++;
520 		bcstats.numreads++;
521 		SET(xbpp[i]->b_flags, B_READ | B_ASYNC);
522 		binshash(xbpp[i], BUFHASH(vp, xbpp[i]->b_lblkno));
523 		xbpp[i]->b_blkno = sblkno + (i * inc);
524 		xbpp[i]->b_bufsize = xbpp[i]->b_bcount = size;
525 		xbpp[i]->b_data = NULL;
526 		xbpp[i]->b_pobj = bp->b_pobj;
527 		xbpp[i]->b_poffs = bp->b_poffs + (i * size);
528 		buf_acquire_unmapped(xbpp[i]);
529 	}
530 
531 	bp->b_blkno = sblkno;
532 	bp->b_lblkno = blkno + 1;
533 	SET(bp->b_flags, B_READ | B_ASYNC | B_CALL);
534 	bp->b_saveaddr = (void *)xbpp;
535 	bp->b_iodone = bread_cluster_callback;
536 	bp->b_vp = vp;
537 	bcstats.pendingreads++;
538 	bcstats.numreads++;
539 	VOP_STRATEGY(bp);
540 	curproc->p_stats->p_ru.ru_inblock++;
541 
542 	return (biowait(*rbpp));
543 }
544 
545 /*
546  * Block write.  Described in Bach (p.56)
547  */
548 int
549 bwrite(struct buf *bp)
550 {
551 	int rv, async, wasdelayed, s;
552 	struct vnode *vp;
553 	struct mount *mp;
554 
555 	vp = bp->b_vp;
556 	if (vp != NULL)
557 		mp = vp->v_type == VBLK? vp->v_specmountpoint : vp->v_mount;
558 	else
559 		mp = NULL;
560 
561 	/*
562 	 * Remember buffer type, to switch on it later.  If the write was
563 	 * synchronous, but the file system was mounted with MNT_ASYNC,
564 	 * convert it to a delayed write.
565 	 * XXX note that this relies on delayed tape writes being converted
566 	 * to async, not sync writes (which is safe, but ugly).
567 	 */
568 	async = ISSET(bp->b_flags, B_ASYNC);
569 	if (!async && mp && ISSET(mp->mnt_flag, MNT_ASYNC)) {
570 		bdwrite(bp);
571 		return (0);
572 	}
573 
574 	/*
575 	 * Collect statistics on synchronous and asynchronous writes.
576 	 * Writes to block devices are charged to their associated
577 	 * filesystem (if any).
578 	 */
579 	if (mp != NULL) {
580 		if (async)
581 			mp->mnt_stat.f_asyncwrites++;
582 		else
583 			mp->mnt_stat.f_syncwrites++;
584 	}
585 	bcstats.pendingwrites++;
586 	bcstats.numwrites++;
587 
588 	wasdelayed = ISSET(bp->b_flags, B_DELWRI);
589 	CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
590 
591 	s = splbio();
592 
593 	/*
594 	 * If not synchronous, pay for the I/O operation and make
595 	 * sure the buf is on the correct vnode queue.  We have
596 	 * to do this now, because if we don't, the vnode may not
597 	 * be properly notified that its I/O has completed.
598 	 */
599 	if (wasdelayed) {
600 		reassignbuf(bp);
601 	} else
602 		curproc->p_stats->p_ru.ru_oublock++;
603 
604 
605 	/* Initiate disk write.  Make sure the appropriate party is charged. */
606 	bp->b_vp->v_numoutput++;
607 	splx(s);
608 	SET(bp->b_flags, B_WRITEINPROG);
609 	VOP_STRATEGY(bp);
610 
611 	if (async)
612 		return (0);
613 
614 	/*
615 	 * If I/O was synchronous, wait for it to complete.
616 	 */
617 	rv = biowait(bp);
618 
619 	/* Release the buffer. */
620 	brelse(bp);
621 
622 	return (rv);
623 }
624 
625 
626 /*
627  * Delayed write.
628  *
629  * The buffer is marked dirty, but is not queued for I/O.
630  * This routine should be used when the buffer is expected
631  * to be modified again soon, typically a small write that
632  * partially fills a buffer.
633  *
634  * NB: magnetic tapes cannot be delayed; they must be
635  * written in the order that the writes are requested.
636  *
637  * Described in Leffler, et al. (pp. 208-213).
638  */
639 void
640 bdwrite(struct buf *bp)
641 {
642 	int s;
643 
644 	/*
645 	 * If the block hasn't been seen before:
646 	 *	(1) Mark it as having been seen,
647 	 *	(2) Charge for the write.
648 	 *	(3) Make sure it's on its vnode's correct block list,
649 	 *	(4) If a buffer is rewritten, move it to end of dirty list
650 	 */
651 	if (!ISSET(bp->b_flags, B_DELWRI)) {
652 		SET(bp->b_flags, B_DELWRI);
653 		bp->b_synctime = time_uptime + 35;
654 		s = splbio();
655 		reassignbuf(bp);
656 		splx(s);
657 		curproc->p_stats->p_ru.ru_oublock++;	/* XXX */
658 	} else {
659 		/*
660 		 * see if this buffer has slacked through the syncer
661 		 * and enforce an async write upon it.
662 		 */
663 		if (bp->b_synctime < time_uptime) {
664 			bawrite(bp);
665 			return;
666 		}
667 	}
668 
669 	/* If this is a tape block, write the block now. */
670 	if (major(bp->b_dev) < nblkdev &&
671 	    bdevsw[major(bp->b_dev)].d_type == D_TAPE) {
672 		bawrite(bp);
673 		return;
674 	}
675 
676 	/* Otherwise, the "write" is done, so mark and release the buffer. */
677 	CLR(bp->b_flags, B_NEEDCOMMIT);
678 	SET(bp->b_flags, B_DONE);
679 	brelse(bp);
680 }
681 
682 /*
683  * Asynchronous block write; just an asynchronous bwrite().
684  */
685 void
686 bawrite(struct buf *bp)
687 {
688 
689 	SET(bp->b_flags, B_ASYNC);
690 	VOP_BWRITE(bp);
691 }
692 
693 /*
694  * Must be called at splbio()
695  */
696 void
697 buf_dirty(struct buf *bp)
698 {
699 	splassert(IPL_BIO);
700 
701 #ifdef DIAGNOSTIC
702 	if (!ISSET(bp->b_flags, B_BUSY))
703 		panic("Trying to dirty buffer on freelist!");
704 #endif
705 
706 	if (ISSET(bp->b_flags, B_DELWRI) == 0) {
707 		SET(bp->b_flags, B_DELWRI);
708 		bp->b_synctime = time_uptime + 35;
709 		reassignbuf(bp);
710 	}
711 }
712 
713 /*
714  * Must be called at splbio()
715  */
716 void
717 buf_undirty(struct buf *bp)
718 {
719 	splassert(IPL_BIO);
720 
721 #ifdef DIAGNOSTIC
722 	if (!ISSET(bp->b_flags, B_BUSY))
723 		panic("Trying to undirty buffer on freelist!");
724 #endif
725 	if (ISSET(bp->b_flags, B_DELWRI)) {
726 		CLR(bp->b_flags, B_DELWRI);
727 		reassignbuf(bp);
728 	}
729 }
730 
731 /*
732  * Release a buffer on to the free lists.
733  * Described in Bach (p. 46).
734  */
735 void
736 brelse(struct buf *bp)
737 {
738 	struct bqueues *bufq;
739 	int s;
740 
741 	/* Block disk interrupts. */
742 	s = splbio();
743 
744 	if (bp->b_data != NULL)
745 		KASSERT(bp->b_bufsize > 0);
746 
747 	/*
748 	 * Determine which queue the buffer should be on, then put it there.
749 	 */
750 
751 	/* If it's not cacheable, or an error, mark it invalid. */
752 	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
753 		SET(bp->b_flags, B_INVAL);
754 
755 	if (ISSET(bp->b_flags, B_INVAL)) {
756 		/*
757 		 * If the buffer is invalid, place it in the clean queue, so it
758 		 * can be reused.
759 		 */
760 		if (LIST_FIRST(&bp->b_dep) != NULL)
761 			buf_deallocate(bp);
762 
763 		if (ISSET(bp->b_flags, B_DELWRI)) {
764 			CLR(bp->b_flags, B_DELWRI);
765 		}
766 
767 		if (bp->b_vp)
768 			brelvp(bp);
769 
770 		/*
771 		 * If the buffer has no associated data, place it back in the
772 		 * pool.
773 		 */
774 		if (bp->b_data == NULL && bp->b_pobj == NULL) {
775 			/*
776 			 * Wake up any processes waiting for _this_ buffer to
777 			 * become free. They are not allowed to grab it
778 			 * since it will be freed. But the only sleeper is
779 			 * getblk and it's restarting the operation after
780 			 * sleep.
781 			 */
782 			if (ISSET(bp->b_flags, B_WANTED)) {
783 				CLR(bp->b_flags, B_WANTED);
784 				wakeup(bp);
785 			}
786 			buf_put(bp);
787 			splx(s);
788 			return;
789 		}
790 
791 		bcstats.numcleanpages += atop(bp->b_bufsize);
792 		if (maxcleanpages < bcstats.numcleanpages)
793 			maxcleanpages = bcstats.numcleanpages;
794 		binsheadfree(bp, &bufqueues[BQ_CLEAN]);
795 	} else {
796 		/*
797 		 * It has valid data.  Put it on the end of the appropriate
798 		 * queue, so that it'll stick around for as long as possible.
799 		 */
800 
801 		if (!ISSET(bp->b_flags, B_DELWRI)) {
802 			bcstats.numcleanpages += atop(bp->b_bufsize);
803 			if (maxcleanpages < bcstats.numcleanpages)
804 				maxcleanpages = bcstats.numcleanpages;
805 			bufq = &bufqueues[BQ_CLEAN];
806 		} else {
807 			bcstats.numdirtypages += atop(bp->b_bufsize);
808 			bufq = &bufqueues[BQ_DIRTY];
809 		}
810 		if (ISSET(bp->b_flags, B_AGE)) {
811 			binsheadfree(bp, bufq);
812 			bp->b_synctime = time_uptime + 30;
813 		} else {
814 			binstailfree(bp, bufq);
815 			bp->b_synctime = time_uptime + 300;
816 		}
817 	}
818 
819 	/* Unlock the buffer. */
820 	bcstats.freebufs++;
821 	CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE | B_DEFERRED));
822 	buf_release(bp);
823 
824 	/* Wake up any processes waiting for any buffer to become free. */
825 	if (needbuffer) {
826 		needbuffer--;
827 		wakeup_one(&needbuffer);
828 	}
829 
830 	/* Wake up any processes waiting for _this_ buffer to become free. */
831 	if (ISSET(bp->b_flags, B_WANTED)) {
832 		CLR(bp->b_flags, B_WANTED);
833 		wakeup(bp);
834 	}
835 
836 	splx(s);
837 }
838 
839 /*
840  * Determine if a block is in the cache. Just look on what would be its hash
841  * chain. If it's there, return a pointer to it, unless it's marked invalid.
842  */
843 struct buf *
844 incore(struct vnode *vp, daddr64_t blkno)
845 {
846 	struct buf *bp;
847 
848 	/* Search hash chain */
849 	LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
850 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
851 		    !ISSET(bp->b_flags, B_INVAL))
852 			return (bp);
853 	}
854 
855 	return (NULL);
856 }
857 
858 /*
859  * Get a block of requested size that is associated with
860  * a given vnode and block offset. If it is found in the
861  * block cache, mark it as having been found, make it busy
862  * and return it. Otherwise, return an empty block of the
863  * correct size. It is up to the caller to ensure that the
864  * cached blocks be of the correct size.
865  */
866 struct buf *
867 getblk(struct vnode *vp, daddr64_t blkno, int size, int slpflag, int slptimeo)
868 {
869 	struct bufhashhdr *bh;
870 	struct buf *bp, *nb = NULL;
871 	int s, error;
872 
873 	/*
874 	 * XXX
875 	 * The following is an inlined version of 'incore()', but with
876 	 * the 'invalid' test moved to after the 'busy' test.  It's
877 	 * necessary because there are some cases in which the NFS
878 	 * code sets B_INVAL prior to writing data to the server, but
879 	 * in which the buffers actually contain valid data.  In this
880 	 * case, we can't allow the system to allocate a new buffer for
881 	 * the block until the write is finished.
882 	 */
883 	bh = BUFHASH(vp, blkno);
884 start:
885 	LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
886 		if (bp->b_lblkno != blkno || bp->b_vp != vp)
887 			continue;
888 
889 		s = splbio();
890 		if (ISSET(bp->b_flags, B_BUSY)) {
891 			if (nb != NULL) {
892 				SET(nb->b_flags, B_INVAL);
893 				binshash(nb, &invalhash);
894 				brelse(nb);
895 				nb = NULL;
896 			}
897 			SET(bp->b_flags, B_WANTED);
898 			error = tsleep(bp, slpflag | (PRIBIO + 1), "getblk",
899 			    slptimeo);
900 			splx(s);
901 			if (error)
902 				return (NULL);
903 			goto start;
904 		}
905 
906 		if (!ISSET(bp->b_flags, B_INVAL)) {
907 			bcstats.cachehits++;
908 			bremfree(bp);
909 			SET(bp->b_flags, B_CACHE);
910 			buf_acquire(bp);
911 			splx(s);
912 			break;
913 		}
914 		splx(s);
915 	}
916 	if (nb && bp) {
917 		SET(nb->b_flags, B_INVAL);
918 		binshash(nb, &invalhash);
919 		brelse(nb);
920 		nb = NULL;
921 	}
922 	if (bp == NULL && nb == NULL) {
923 		nb = getnewbuf(size, slpflag, slptimeo, &error);
924 		if (nb == NULL) {
925 			if (error == ERESTART || error == EINTR)
926 				return (NULL);
927 		}
928 		goto start;
929 	}
930 	if (nb) {
931 		bp = nb;
932 		binshash(bp, bh);
933 		bp->b_blkno = bp->b_lblkno = blkno;
934 		s = splbio();
935 		bgetvp(vp, bp);
936 		splx(s);
937 	}
938 #ifdef DIAGNOSTIC
939 	if (!ISSET(bp->b_flags, B_BUSY))
940 		panic("getblk buffer not B_BUSY");
941 #endif
942 	return (bp);
943 }
944 
945 /*
946  * Get an empty, disassociated buffer of given size.
947  */
948 struct buf *
949 geteblk(int size)
950 {
951 	struct buf *bp;
952 
953 	while ((bp = getnewbuf(size, 0, 0, NULL)) == NULL)
954 		;
955 	SET(bp->b_flags, B_INVAL);
956 	binshash(bp, &invalhash);
957 
958 	return (bp);
959 }
960 
961 /*
962  * Find a buffer which is available for use.
963  */
964 struct buf *
965 getnewbuf(size_t size, int slpflag, int slptimeo, int *ep)
966 {
967 	struct buf *bp;
968 	int s;
969 
970 #if 0		/* we would really like this but sblock update kills it */
971 	KASSERT(curproc != syncerproc && curproc != cleanerproc);
972 #endif
973 
974 	s = splbio();
975 	/*
976 	 * Wake up cleaner if we're getting low on pages.
977 	 */
978 	if (bcstats.numdirtypages >= hidirtypages || bcstats.numcleanpages <= locleanpages)
979 		wakeup(&bd_req);
980 
981 	/*
982 	 * If we're above the high water mark for clean pages,
983 	 * free down to the low water mark.
984 	 */
985 	if (bcstats.numcleanpages > hicleanpages) {
986 		while (bcstats.numcleanpages > locleanpages) {
987 			bp = TAILQ_FIRST(&bufqueues[BQ_CLEAN]);
988 			bremfree(bp);
989 			if (bp->b_vp)
990 				brelvp(bp);
991 			bremhash(bp);
992 			buf_put(bp);
993 		}
994 	}
995 
996 	/* we just ask. it can say no.. */
997 getsome:
998 	bp = buf_get(size);
999 	if (bp == NULL) {
1000 		int freemax = 5;
1001 		int i = freemax;
1002 		while ((bp = TAILQ_FIRST(&bufqueues[BQ_CLEAN])) && i--) {
1003 			bremfree(bp);
1004 			if (bp->b_vp)
1005 				brelvp(bp);
1006 			bremhash(bp);
1007 			buf_put(bp);
1008 		}
1009 		if (freemax != i)
1010 			goto getsome;
1011 		splx(s);
1012 		return (NULL);
1013 	}
1014 
1015 	bremfree(bp);
1016 	/* Buffer is no longer on free lists. */
1017 	bp->b_flags = 0;
1018 	buf_acquire(bp);
1019 
1020 	splx(s);
1021 
1022 	/* clear out various other fields */
1023 	bp->b_dev = NODEV;
1024 	bp->b_blkno = bp->b_lblkno = 0;
1025 	bp->b_iodone = NULL;
1026 	bp->b_error = 0;
1027 	bp->b_resid = 0;
1028 	bp->b_bcount = size;
1029 	bp->b_dirtyoff = bp->b_dirtyend = 0;
1030 	bp->b_validoff = bp->b_validend = 0;
1031 
1032 	bremhash(bp);
1033 	return (bp);
1034 }
1035 
1036 /*
1037  * Buffer cleaning daemon.
1038  */
1039 void
1040 buf_daemon(struct proc *p)
1041 {
1042 	struct timeval starttime, timediff;
1043 	struct buf *bp;
1044 	int s;
1045 
1046 	cleanerproc = curproc;
1047 
1048 	s = splbio();
1049 	for (;;) {
1050 		if (bcstats.numdirtypages < hidirtypages)
1051 			tsleep(&bd_req, PRIBIO - 7, "cleaner", 0);
1052 
1053 		getmicrouptime(&starttime);
1054 
1055 		while ((bp = TAILQ_FIRST(&bufqueues[BQ_DIRTY]))) {
1056 			struct timeval tv;
1057 
1058 			if (bcstats.numdirtypages < lodirtypages)
1059 				break;
1060 
1061 			bremfree(bp);
1062 			buf_acquire(bp);
1063 			splx(s);
1064 
1065 			if (ISSET(bp->b_flags, B_INVAL)) {
1066 				brelse(bp);
1067 				s = splbio();
1068 				continue;
1069 			}
1070 #ifdef DIAGNOSTIC
1071 			if (!ISSET(bp->b_flags, B_DELWRI))
1072 				panic("Clean buffer on BQ_DIRTY");
1073 #endif
1074 			if (LIST_FIRST(&bp->b_dep) != NULL &&
1075 			    !ISSET(bp->b_flags, B_DEFERRED) &&
1076 			    buf_countdeps(bp, 0, 0)) {
1077 				SET(bp->b_flags, B_DEFERRED);
1078 				s = splbio();
1079 				bcstats.numdirtypages += atop(bp->b_bufsize);
1080 				binstailfree(bp, &bufqueues[BQ_DIRTY]);
1081 				bcstats.freebufs++;
1082 				buf_release(bp);
1083 				continue;
1084 			}
1085 
1086 			bawrite(bp);
1087 
1088 			/* Never allow processing to run for more than 1 sec */
1089 			getmicrouptime(&tv);
1090 			timersub(&tv, &starttime, &timediff);
1091 			s = splbio();
1092 			if (timediff.tv_sec)
1093 				break;
1094 
1095 		}
1096 	}
1097 }
1098 
1099 /*
1100  * Wait for operations on the buffer to complete.
1101  * When they do, extract and return the I/O's error value.
1102  */
1103 int
1104 biowait(struct buf *bp)
1105 {
1106 	int s;
1107 
1108 	KASSERT(!(bp->b_flags & B_ASYNC));
1109 
1110 	s = splbio();
1111 	while (!ISSET(bp->b_flags, B_DONE))
1112 		tsleep(bp, PRIBIO + 1, "biowait", 0);
1113 	splx(s);
1114 
1115 	/* check for interruption of I/O (e.g. via NFS), then errors. */
1116 	if (ISSET(bp->b_flags, B_EINTR)) {
1117 		CLR(bp->b_flags, B_EINTR);
1118 		return (EINTR);
1119 	}
1120 
1121 	if (ISSET(bp->b_flags, B_ERROR))
1122 		return (bp->b_error ? bp->b_error : EIO);
1123 	else
1124 		return (0);
1125 }
1126 
1127 /*
1128  * Mark I/O complete on a buffer.
1129  *
1130  * If a callback has been requested, e.g. the pageout
1131  * daemon, do so. Otherwise, awaken waiting processes.
1132  *
1133  * [ Leffler, et al., says on p.247:
1134  *	"This routine wakes up the blocked process, frees the buffer
1135  *	for an asynchronous write, or, for a request by the pagedaemon
1136  *	process, invokes a procedure specified in the buffer structure" ]
1137  *
1138  * In real life, the pagedaemon (or other system processes) wants
1139  * to do async stuff to, and doesn't want the buffer brelse()'d.
1140  * (for swap pager, that puts swap buffers on the free lists (!!!),
1141  * for the vn device, that puts malloc'd buffers on the free lists!)
1142  *
1143  * Must be called at splbio().
1144  */
1145 void
1146 biodone(struct buf *bp)
1147 {
1148 	splassert(IPL_BIO);
1149 
1150 	if (ISSET(bp->b_flags, B_DONE))
1151 		panic("biodone already");
1152 	SET(bp->b_flags, B_DONE);		/* note that it's done */
1153 
1154 	if (LIST_FIRST(&bp->b_dep) != NULL)
1155 		buf_complete(bp);
1156 
1157 	if (!ISSET(bp->b_flags, B_READ)) {
1158 		CLR(bp->b_flags, B_WRITEINPROG);
1159 		vwakeup(bp->b_vp);
1160 	}
1161 	if (bcstats.numbufs &&
1162 	    (!(ISSET(bp->b_flags, B_RAW) || ISSET(bp->b_flags, B_PHYS)))) {
1163 		if (!ISSET(bp->b_flags, B_READ))
1164 			bcstats.pendingwrites--;
1165 		else
1166 			bcstats.pendingreads--;
1167 	}
1168 	if (ISSET(bp->b_flags, B_CALL)) {	/* if necessary, call out */
1169 		CLR(bp->b_flags, B_CALL);	/* but note callout done */
1170 		(*bp->b_iodone)(bp);
1171 	} else {
1172 		if (ISSET(bp->b_flags, B_ASYNC)) {/* if async, release it */
1173 			brelse(bp);
1174 		} else {			/* or just wakeup the buffer */
1175 			CLR(bp->b_flags, B_WANTED);
1176 			wakeup(bp);
1177 		}
1178 	}
1179 }
1180