xref: /openbsd-src/sys/kern/vfs_bio.c (revision b2ea75c1b17e1a9a339660e7ed45cd24946b230e)
1 /*	$OpenBSD: vfs_bio.c,v 1.38 2001/05/05 20:57:01 art Exp $	*/
2 /*	$NetBSD: vfs_bio.c,v 1.44 1996/06/11 11:15:36 pk Exp $	*/
3 
4 /*-
5  * Copyright (c) 1994 Christopher G. Demetriou
6  * Copyright (c) 1982, 1986, 1989, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  * (c) UNIX System Laboratories, Inc.
9  * All or some portions of this file are derived from material licensed
10  * to the University of California by American Telephone and Telegraph
11  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
12  * the permission of UNIX System Laboratories, Inc.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. All advertising materials mentioning features or use of this software
23  *    must display the following acknowledgement:
24  *	This product includes software developed by the University of
25  *	California, Berkeley and its contributors.
26  * 4. Neither the name of the University nor the names of its contributors
27  *    may be used to endorse or promote products derived from this software
28  *    without specific prior written permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
43  */
44 
45 /*
46  * Some references:
47  *	Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
48  *	Leffler, et al.: The Design and Implementation of the 4.3BSD
49  *		UNIX Operating System (Addison Welley, 1989)
50  */
51 
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/proc.h>
55 #include <sys/buf.h>
56 #include <sys/vnode.h>
57 #include <sys/mount.h>
58 #include <sys/malloc.h>
59 #include <sys/resourcevar.h>
60 #include <sys/conf.h>
61 #include <sys/kernel.h>
62 
63 #include <vm/vm.h>
64 
65 #include <miscfs/specfs/specdev.h>
66 
67 /* Macros to clear/set/test flags. */
68 #define	SET(t, f)	(t) |= (f)
69 #define	CLR(t, f)	(t) &= ~(f)
70 #define	ISSET(t, f)	((t) & (f))
71 
72 /*
73  * Definitions for the buffer hash lists.
74  */
75 #define	BUFHASH(dvp, lbn)	\
76 	(&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
77 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
78 u_long	bufhash;
79 
80 /*
81  * Insq/Remq for the buffer hash lists.
82  */
83 #define	binshash(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_hash)
84 #define	bremhash(bp)		LIST_REMOVE(bp, b_hash)
85 
86 /*
87  * Definitions for the buffer free lists.
88  */
89 #define	BQUEUES		4		/* number of free buffer queues */
90 
91 #define	BQ_LOCKED	0		/* super-blocks &c */
92 #define	BQ_LRU		1		/* lru, useful buffers */
93 #define	BQ_AGE		2		/* rubbish */
94 #define	BQ_EMPTY	3		/* buffer headers with no memory */
95 
96 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
97 int needbuffer;
98 int syncer_needbuffer;
99 struct bio_ops bioops;
100 
101 /*
102  * Insq/Remq for the buffer free lists.
103  */
104 #define	binsheadfree(bp, dp)	TAILQ_INSERT_HEAD(dp, bp, b_freelist)
105 #define	binstailfree(bp, dp)	TAILQ_INSERT_TAIL(dp, bp, b_freelist)
106 
107 static __inline struct buf *bio_doread __P((struct vnode *, daddr_t, int,
108 					    struct ucred *, int));
109 int count_lock_queue __P((void));
110 
111 /*
112  * We keep a few counters to monitor the utilization of the buffer cache
113  *
114  *  numdirtybufs - number of dirty (B_DELWRI) buffers. unused.
115  *  lodirtybufs  - ? unused.
116  *  hidirtybufs  - ? unused.
117  *  numfreebufs  - number of buffers on BQ_LRU and BQ_AGE. unused.
118  *  numcleanbufs - number of clean (!B_DELWRI) buffers on BQ_LRU and BQ_AGE.
119  *    Used to track the need to speedup the syncer and for the syncer reserve.
120  *  numemptybufs - number of buffers on BQ_EMPTY. unused.
121  *  mincleanbufs - the lowest number of clean buffers this far.
122  */
123 int numdirtybufs;	/* number of all dirty buffers */
124 int lodirtybufs, hidirtybufs;
125 int numfreebufs;	/* number of buffers on LRU+AGE free lists */
126 int numcleanbufs;	/* number of clean buffers on LRU+AGE free lists */
127 int numemptybufs;	/* number of buffers on EMPTY list */
128 int locleanbufs;
129 #ifdef DEBUG
130 int mincleanbufs;
131 #endif
132 
133 void
134 bremfree(bp)
135 	struct buf *bp;
136 {
137 	struct bqueues *dp = NULL;
138 
139 	/*
140 	 * We only calculate the head of the freelist when removing
141 	 * the last element of the list as that is the only time that
142 	 * it is needed (e.g. to reset the tail pointer).
143 	 *
144 	 * NB: This makes an assumption about how tailq's are implemented.
145 	 */
146 	if (bp->b_freelist.tqe_next == NULL) {
147 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
148 			if (dp->tqh_last == &bp->b_freelist.tqe_next)
149 				break;
150 		if (dp == &bufqueues[BQUEUES])
151 			panic("bremfree: lost tail");
152 	}
153 	if (bp->b_bufsize <= 0) {
154 		numemptybufs--;
155 	} else if (!ISSET(bp->b_flags, B_LOCKED)) {
156 		numfreebufs--;
157 		if (!ISSET(bp->b_flags, B_DELWRI)) {
158 			numcleanbufs--;
159 #ifdef DEBUG
160 			if (mincleanbufs > numcleanbufs)
161 				mincleanbufs = numcleanbufs;
162 #endif
163 		}
164 	}
165 	TAILQ_REMOVE(dp, bp, b_freelist);
166 }
167 
168 /*
169  * Initialize buffers and hash links for buffers.
170  */
171 void
172 bufinit()
173 {
174 	register struct buf *bp;
175 	struct bqueues *dp;
176 	register int i;
177 	int base, residual;
178 
179 	numfreebufs = 0;
180 	numcleanbufs = 0;
181 	numemptybufs = 0;
182 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
183 		TAILQ_INIT(dp);
184 	bufhashtbl = hashinit(nbuf, M_CACHE, M_WAITOK, &bufhash);
185 	base = bufpages / nbuf;
186 	residual = bufpages % nbuf;
187 	for (i = 0; i < nbuf; i++) {
188 		bp = &buf[i];
189 		bzero((char *)bp, sizeof *bp);
190 		bp->b_dev = NODEV;
191 		bp->b_rcred = NOCRED;
192 		bp->b_wcred = NOCRED;
193 		bp->b_vnbufs.le_next = NOLIST;
194 		bp->b_data = buffers + i * MAXBSIZE;
195 		LIST_INIT(&bp->b_dep);
196 		if (i < residual)
197 			bp->b_bufsize = (base + 1) * PAGE_SIZE;
198 		else
199 			bp->b_bufsize = base * PAGE_SIZE;
200 		bp->b_flags = B_INVAL;
201 		if (bp->b_bufsize) {
202 			dp = &bufqueues[BQ_AGE];
203 			numfreebufs++;
204 			numcleanbufs++;
205 		} else {
206 			dp = &bufqueues[BQ_EMPTY];
207 			numemptybufs++;
208 		}
209 		binsheadfree(bp, dp);
210 		binshash(bp, &invalhash);
211 	}
212 
213 	hidirtybufs = nbuf / 4 + 20;
214 	numdirtybufs = 0;
215 	lodirtybufs = hidirtybufs / 2;
216 
217 	/*
218 	 * Reserve 5% of bufs for syncer's needs,
219 	 * but not more than 25% and if possible
220 	 * not less then 16 bufs. locleanbufs
221 	 * value must be not too small, but probably
222 	 * there are no reason to set it more than 32.
223 	 */
224 	locleanbufs = nbuf / 20;
225 	if (locleanbufs < 16)
226 		locleanbufs = 16;
227 	if (locleanbufs > nbuf/4)
228 		locleanbufs = nbuf / 4;
229 #ifdef DEBUG
230 	mincleanbufs = locleanbufs;
231 #endif
232 }
233 
234 static __inline struct buf *
235 bio_doread(vp, blkno, size, cred, async)
236 	struct vnode *vp;
237 	daddr_t blkno;
238 	int size;
239 	struct ucred *cred;
240 	int async;
241 {
242 	register struct buf *bp;
243 
244 	bp = getblk(vp, blkno, size, 0, 0);
245 
246 	/*
247 	 * If buffer does not have data valid, start a read.
248 	 * Note that if buffer is B_INVAL, getblk() won't return it.
249 	 * Therefore, it's valid if it's I/O has completed or been delayed.
250 	 */
251 	if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
252 		/* Start I/O for the buffer (keeping credentials). */
253 		SET(bp->b_flags, B_READ | async);
254 		if (cred != NOCRED && bp->b_rcred == NOCRED) {
255 			crhold(cred);
256 			bp->b_rcred = cred;
257 		}
258 		VOP_STRATEGY(bp);
259 
260 		/* Pay for the read. */
261 		curproc->p_stats->p_ru.ru_inblock++;		/* XXX */
262 	} else if (async) {
263 		brelse(bp);
264 	}
265 
266 	return (bp);
267 }
268 
269 /*
270  * Read a disk block.
271  * This algorithm described in Bach (p.54).
272  */
273 int
274 bread(vp, blkno, size, cred, bpp)
275 	struct vnode *vp;
276 	daddr_t blkno;
277 	int size;
278 	struct ucred *cred;
279 	struct buf **bpp;
280 {
281 	register struct buf *bp;
282 
283 	/* Get buffer for block. */
284 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
285 
286 	/* Wait for the read to complete, and return result. */
287 	return (biowait(bp));
288 }
289 
290 /*
291  * Read-ahead multiple disk blocks. The first is sync, the rest async.
292  * Trivial modification to the breada algorithm presented in Bach (p.55).
293  */
294 int
295 breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp)
296 	struct vnode *vp;
297 	daddr_t blkno; int size;
298 	daddr_t rablks[]; int rasizes[];
299 	int nrablks;
300 	struct ucred *cred;
301 	struct buf **bpp;
302 {
303 	register struct buf *bp;
304 	int i;
305 
306 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
307 
308 	/*
309 	 * For each of the read-ahead blocks, start a read, if necessary.
310 	 */
311 	for (i = 0; i < nrablks; i++) {
312 		/* If it's in the cache, just go on to next one. */
313 		if (incore(vp, rablks[i]))
314 			continue;
315 
316 		/* Get a buffer for the read-ahead block */
317 		(void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
318 	}
319 
320 	/* Otherwise, we had to start a read for it; wait until it's valid. */
321 	return (biowait(bp));
322 }
323 
324 /*
325  * Read with single-block read-ahead.  Defined in Bach (p.55), but
326  * implemented as a call to breadn().
327  * XXX for compatibility with old file systems.
328  */
329 int
330 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
331 	struct vnode *vp;
332 	daddr_t blkno; int size;
333 	daddr_t rablkno; int rabsize;
334 	struct ucred *cred;
335 	struct buf **bpp;
336 {
337 
338 	return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
339 }
340 
341 /*
342  * Block write.  Described in Bach (p.56)
343  */
344 int
345 bwrite(bp)
346 	struct buf *bp;
347 {
348 	int rv, async, wasdelayed, s;
349 	struct vnode *vp;
350 	struct mount *mp;
351 
352 	/*
353 	 * Remember buffer type, to switch on it later.  If the write was
354 	 * synchronous, but the file system was mounted with MNT_ASYNC,
355 	 * convert it to a delayed write.
356 	 * XXX note that this relies on delayed tape writes being converted
357 	 * to async, not sync writes (which is safe, but ugly).
358 	 */
359 	async = ISSET(bp->b_flags, B_ASYNC);
360 	if (!async && bp->b_vp && bp->b_vp->v_mount &&
361 	    ISSET(bp->b_vp->v_mount->mnt_flag, MNT_ASYNC)) {
362 		bdwrite(bp);
363 		return (0);
364 	}
365 
366 	/*
367 	 * Collect statistics on synchronous and asynchronous writes.
368 	 * Writes to block devices are charged to their associated
369 	 * filesystem (if any).
370 	 */
371 	if ((vp = bp->b_vp) != NULL) {
372 		if (vp->v_type == VBLK)
373 			mp = vp->v_specmountpoint;
374 		else
375 			mp = vp->v_mount;
376 		if (mp != NULL) {
377 			if (async)
378 				mp->mnt_stat.f_asyncwrites++;
379 			else
380 				mp->mnt_stat.f_syncwrites++;
381 		}
382 	}
383 
384 	wasdelayed = ISSET(bp->b_flags, B_DELWRI);
385 	CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
386 
387 	s = splbio();
388 
389 	/*
390 	 * If not synchronous, pay for the I/O operation and make
391 	 * sure the buf is on the correct vnode queue.  We have
392 	 * to do this now, because if we don't, the vnode may not
393 	 * be properly notified that its I/O has completed.
394 	 */
395 	if (wasdelayed) {
396 		--numdirtybufs;
397 		reassignbuf(bp);
398 	} else
399 		curproc->p_stats->p_ru.ru_oublock++;
400 
401 
402 	/* Initiate disk write.  Make sure the appropriate party is charged. */
403 	bp->b_vp->v_numoutput++;
404 	splx(s);
405 	SET(bp->b_flags, B_WRITEINPROG);
406 	VOP_STRATEGY(bp);
407 
408 	if (async)
409 		return (0);
410 
411 	/*
412 	 * If I/O was synchronous, wait for it to complete.
413 	 */
414 	rv = biowait(bp);
415 
416 	/* Release the buffer. */
417 	brelse(bp);
418 
419 	return (rv);
420 }
421 
422 
423 /*
424  * Delayed write.
425  *
426  * The buffer is marked dirty, but is not queued for I/O.
427  * This routine should be used when the buffer is expected
428  * to be modified again soon, typically a small write that
429  * partially fills a buffer.
430  *
431  * NB: magnetic tapes cannot be delayed; they must be
432  * written in the order that the writes are requested.
433  *
434  * Described in Leffler, et al. (pp. 208-213).
435  */
436 void
437 bdwrite(bp)
438 	struct buf *bp;
439 {
440 	int s;
441 
442 	/*
443 	 * If the block hasn't been seen before:
444 	 *	(1) Mark it as having been seen,
445 	 *	(2) Charge for the write.
446 	 *	(3) Make sure it's on its vnode's correct block list,
447 	 *	(4) If a buffer is rewritten, move it to end of dirty list
448 	 */
449 	if (!ISSET(bp->b_flags, B_DELWRI)) {
450 		SET(bp->b_flags, B_DELWRI);
451 		s = splbio();
452 		reassignbuf(bp);
453 		++numdirtybufs;
454 		splx(s);
455 		curproc->p_stats->p_ru.ru_oublock++;	/* XXX */
456 	}
457 
458 	/* If this is a tape block, write the block now. */
459 	if (major(bp->b_dev) < nblkdev &&
460 	    bdevsw[major(bp->b_dev)].d_type == D_TAPE) {
461 		bawrite(bp);
462 		return;
463 	}
464 
465 	/* Otherwise, the "write" is done, so mark and release the buffer. */
466 	CLR(bp->b_flags, B_NEEDCOMMIT);
467 	SET(bp->b_flags, B_DONE);
468 	brelse(bp);
469 }
470 
471 /*
472  * Asynchronous block write; just an asynchronous bwrite().
473  */
474 void
475 bawrite(bp)
476 	struct buf *bp;
477 {
478 
479 	SET(bp->b_flags, B_ASYNC);
480 	VOP_BWRITE(bp);
481 }
482 
483 /*
484  * Must be called at splbio()
485  */
486 void
487 buf_dirty(bp)
488 	struct buf *bp;
489 {
490 	if (ISSET(bp->b_flags, B_DELWRI) == 0) {
491 		SET(bp->b_flags, B_DELWRI);
492 		reassignbuf(bp);
493 		++numdirtybufs;
494 #ifdef DIAGNOSTIC
495 		if (numdirtybufs > nbuf)
496 			panic("buf_dirty: incorrect number of dirty bufs");
497 #endif
498 	}
499 }
500 
501 /*
502  * Must be called at splbio()
503  */
504 void
505 buf_undirty(bp)
506 	struct buf *bp;
507 {
508 	if (ISSET(bp->b_flags, B_DELWRI)) {
509 		CLR(bp->b_flags, B_DELWRI);
510 		reassignbuf(bp);
511 		--numdirtybufs;
512 #ifdef DIAGNOSTIC
513 		if (numdirtybufs < 0)
514 			panic("buf_undirty: incorrect number of dirty bufs");
515 #endif
516 	}
517 }
518 
519 /*
520  * Release a buffer on to the free lists.
521  * Described in Bach (p. 46).
522  */
523 void
524 brelse(bp)
525 	struct buf *bp;
526 {
527 	struct bqueues *bufq;
528 	int s;
529 
530 	/* Block disk interrupts. */
531 	s = splbio();
532 
533 	/*
534 	 * Determine which queue the buffer should be on, then put it there.
535 	 */
536 
537 	/* If it's locked, don't report an error; try again later. */
538 	if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
539 		CLR(bp->b_flags, B_ERROR);
540 
541 	/* If it's not cacheable, or an error, mark it invalid. */
542 	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
543 		SET(bp->b_flags, B_INVAL);
544 
545 	if (ISSET(bp->b_flags, B_VFLUSH)) {
546 		/*
547 		 * This is a delayed write buffer that was just flushed to
548 		 * disk.  It is still on the LRU queue.  If it's become
549 		 * invalid, then we need to move it to a different queue;
550 		 * otherwise leave it in its current position.
551 		 */
552 		CLR(bp->b_flags, B_VFLUSH);
553 		if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE))
554 			goto already_queued;
555 		else
556 			bremfree(bp);
557 	}
558 
559 	if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
560 		/*
561 		 * If it's invalid or empty, dissociate it from its vnode
562 		 * and put on the head of the appropriate queue.
563 		 */
564 		if (LIST_FIRST(&bp->b_dep) != NULL)
565 			buf_deallocate(bp);
566 
567 		if (ISSET(bp->b_flags, B_DELWRI)) {
568 			--numdirtybufs;
569 			CLR(bp->b_flags, B_DELWRI);
570 		}
571 
572 		if (bp->b_vp) {
573 			reassignbuf(bp);
574 			brelvp(bp);
575 		}
576 		if (bp->b_bufsize <= 0) {
577 			/* no data */
578 			bufq = &bufqueues[BQ_EMPTY];
579 			numemptybufs++;
580 		} else {
581 			/* invalid data */
582 			bufq = &bufqueues[BQ_AGE];
583 			numfreebufs++;
584 			numcleanbufs++;
585 		}
586 		binsheadfree(bp, bufq);
587 	} else {
588 		/*
589 		 * It has valid data.  Put it on the end of the appropriate
590 		 * queue, so that it'll stick around for as long as possible.
591 		 * If buf is AGE, but has dependencies, must put it on last
592 		 * bufqueue to be scanned, ie LRU. This protects against the
593 		 * livelock where BQ_AGE only has buffers with dependencies,
594 		 * and we thus never get to the dependent buffers in BQ_LRU.
595 		 */
596 		if (ISSET(bp->b_flags, B_LOCKED))
597 			/* locked in core */
598 			bufq = &bufqueues[BQ_LOCKED];
599 		else {
600 			numfreebufs++;
601 			if (!ISSET(bp->b_flags, B_DELWRI))
602 				numcleanbufs++;
603 			if (ISSET(bp->b_flags, B_AGE))
604 				/* stale but valid data */
605 				bufq = buf_countdeps(bp, 0, 1) ?
606 				    &bufqueues[BQ_LRU] : &bufqueues[BQ_AGE];
607 			else
608 				/* valid data */
609 				bufq = &bufqueues[BQ_LRU];
610 		}
611 		binstailfree(bp, bufq);
612 	}
613 
614 already_queued:
615 	/* Unlock the buffer. */
616 	CLR(bp->b_flags, (B_AGE | B_ASYNC | B_BUSY | B_NOCACHE));
617 
618 	/* Allow disk interrupts. */
619 	splx(s);
620 
621 	/* Wake up syncer process waiting for buffers */
622 	if (syncer_needbuffer) {
623 		wakeup(&syncer_needbuffer);
624 		syncer_needbuffer = 0;
625 	}
626 
627 	/* Wake up any processes waiting for any buffer to become free. */
628 	if (needbuffer && (numcleanbufs > locleanbufs)) {
629 		needbuffer--;
630 		wakeup_one(&needbuffer);
631 	}
632 
633 	/* Wake up any proceeses waiting for _this_ buffer to become free. */
634 	if (ISSET(bp->b_flags, B_WANTED)) {
635 		CLR(bp->b_flags, B_WANTED);
636 		wakeup(bp);
637 	}
638 }
639 
640 /*
641  * Determine if a block is in the cache.
642  * Just look on what would be its hash chain.  If it's there, return
643  * a pointer to it, unless it's marked invalid.  If it's marked invalid,
644  * we normally don't return the buffer, unless the caller explicitly
645  * wants us to.
646  */
647 struct buf *
648 incore(vp, blkno)
649 	struct vnode *vp;
650 	daddr_t blkno;
651 {
652 	struct buf *bp;
653 
654 	bp = BUFHASH(vp, blkno)->lh_first;
655 
656 	/* Search hash chain */
657 	for (; bp != NULL; bp = bp->b_hash.le_next) {
658 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
659 		    !ISSET(bp->b_flags, B_INVAL))
660 			return (bp);
661 	}
662 
663 	return (0);
664 }
665 
666 /*
667  * Get a block of requested size that is associated with
668  * a given vnode and block offset. If it is found in the
669  * block cache, mark it as having been found, make it busy
670  * and return it. Otherwise, return an empty block of the
671  * correct size. It is up to the caller to insure that the
672  * cached blocks be of the correct size.
673  */
674 struct buf *
675 getblk(vp, blkno, size, slpflag, slptimeo)
676 	register struct vnode *vp;
677 	daddr_t blkno;
678 	int size, slpflag, slptimeo;
679 {
680 	struct bufhashhdr *bh;
681 	struct buf *bp;
682 	int s, err;
683 
684 	/*
685 	 * XXX
686 	 * The following is an inlined version of 'incore()', but with
687 	 * the 'invalid' test moved to after the 'busy' test.  It's
688 	 * necessary because there are some cases in which the NFS
689 	 * code sets B_INVAL prior to writing data to the server, but
690 	 * in which the buffers actually contain valid data.  In this
691 	 * case, we can't allow the system to allocate a new buffer for
692 	 * the block until the write is finished.
693 	 */
694 	bh = BUFHASH(vp, blkno);
695 start:
696 	bp = bh->lh_first;
697 	for (; bp != NULL; bp = bp->b_hash.le_next) {
698 		if (bp->b_lblkno != blkno || bp->b_vp != vp)
699 			continue;
700 
701 		s = splbio();
702 		if (ISSET(bp->b_flags, B_BUSY)) {
703 			SET(bp->b_flags, B_WANTED);
704 			err = tsleep(bp, slpflag | (PRIBIO + 1), "getblk",
705 			    slptimeo);
706 			splx(s);
707 			if (err)
708 				return (NULL);
709 			goto start;
710 		}
711 
712 		if (!ISSET(bp->b_flags, B_INVAL)) {
713 			SET(bp->b_flags, (B_BUSY | B_CACHE));
714 			bremfree(bp);
715 			splx(s);
716 			break;
717 		}
718 		splx(s);
719 	}
720 
721 	if (bp == NULL) {
722 		if ((bp = getnewbuf(slpflag, slptimeo)) == NULL)
723 			goto start;
724 		binshash(bp, bh);
725 		bp->b_blkno = bp->b_lblkno = blkno;
726 		s = splbio();
727 		bgetvp(vp, bp);
728 		splx(s);
729 	}
730 	allocbuf(bp, size);
731 
732 	return (bp);
733 }
734 
735 /*
736  * Get an empty, disassociated buffer of given size.
737  */
738 struct buf *
739 geteblk(size)
740 	int size;
741 {
742 	struct buf *bp;
743 
744 	while ((bp = getnewbuf(0, 0)) == 0)
745 		;
746 	SET(bp->b_flags, B_INVAL);
747 	binshash(bp, &invalhash);
748 	allocbuf(bp, size);
749 
750 	return (bp);
751 }
752 
753 /*
754  * Expand or contract the actual memory allocated to a buffer.
755  *
756  * If the buffer shrinks, data is lost, so it's up to the
757  * caller to have written it out *first*; this routine will not
758  * start a write.  If the buffer grows, it's the callers
759  * responsibility to fill out the buffer's additional contents.
760  */
761 void
762 allocbuf(bp, size)
763 	struct buf *bp;
764 	int size;
765 {
766 	struct buf	*nbp;
767 	vsize_t		desired_size;
768 	int		s;
769 
770 	desired_size = round_page(size);
771 	if (desired_size > MAXBSIZE)
772 		panic("allocbuf: buffer larger than MAXBSIZE requested");
773 
774 	if (bp->b_bufsize == desired_size)
775 		goto out;
776 
777 	/*
778 	 * If the buffer is smaller than the desired size, we need to snarf
779 	 * it from other buffers.  Get buffers (via getnewbuf()), and
780 	 * steal their pages.
781 	 */
782 	while (bp->b_bufsize < desired_size) {
783 		int amt;
784 
785 		/* find a buffer */
786 		while ((nbp = getnewbuf(0, 0)) == NULL)
787 			;
788  		SET(nbp->b_flags, B_INVAL);
789 		binshash(nbp, &invalhash);
790 
791 		/* and steal its pages, up to the amount we need */
792 		amt = MIN(nbp->b_bufsize, (desired_size - bp->b_bufsize));
793 		pagemove((nbp->b_data + nbp->b_bufsize - amt),
794 			 bp->b_data + bp->b_bufsize, amt);
795 		bp->b_bufsize += amt;
796 		nbp->b_bufsize -= amt;
797 
798 		/* reduce transfer count if we stole some data */
799 		if (nbp->b_bcount > nbp->b_bufsize)
800 			nbp->b_bcount = nbp->b_bufsize;
801 
802 #ifdef DIAGNOSTIC
803 		if (nbp->b_bufsize < 0)
804 			panic("allocbuf: negative bufsize");
805 #endif
806 
807 		brelse(nbp);
808 	}
809 
810 	/*
811 	 * If we want a buffer smaller than the current size,
812 	 * shrink this buffer.  Grab a buf head from the EMPTY queue,
813 	 * move a page onto it, and put it on front of the AGE queue.
814 	 * If there are no free buffer headers, leave the buffer alone.
815 	 */
816 	if (bp->b_bufsize > desired_size) {
817 		s = splbio();
818 		if ((nbp = bufqueues[BQ_EMPTY].tqh_first) == NULL) {
819 			/* No free buffer head */
820 			splx(s);
821 			goto out;
822 		}
823 		bremfree(nbp);
824 		SET(nbp->b_flags, B_BUSY);
825 		splx(s);
826 
827 		/* move the page to it and note this change */
828 		pagemove(bp->b_data + desired_size,
829 		    nbp->b_data, bp->b_bufsize - desired_size);
830 		nbp->b_bufsize = bp->b_bufsize - desired_size;
831 		bp->b_bufsize = desired_size;
832 		nbp->b_bcount = 0;
833 		SET(nbp->b_flags, B_INVAL);
834 
835 		/* release the newly-filled buffer and leave */
836 		brelse(nbp);
837 	}
838 
839 out:
840 	bp->b_bcount = size;
841 }
842 
843 /*
844  * Find a buffer which is available for use.
845  * Select something from a free list.
846  * Preference is to AGE list, then LRU list.
847  */
848 struct buf *
849 getnewbuf(slpflag, slptimeo)
850 	int slpflag, slptimeo;
851 {
852 	register struct buf *bp;
853 	int s;
854 
855 start:
856 	s = splbio();
857 	/*
858 	 * If we're getting low on buffers kick the syncer to work harder.
859 	 */
860 	if (numcleanbufs < locleanbufs + min(locleanbufs, 4))
861 		speedup_syncer();
862 
863 	if ((numcleanbufs <= locleanbufs) && curproc != syncerproc) {
864 		/* wait for a free buffer of any kind */
865 		needbuffer++;
866 		tsleep(&needbuffer, slpflag|(PRIBIO+1), "getnewbuf", slptimeo);
867 		splx(s);
868 		return (0);
869 	}
870 	if ((bp = bufqueues[BQ_AGE].tqh_first) == NULL &&
871 	    (bp = bufqueues[BQ_LRU].tqh_first) == NULL) {
872 		/* wait for a free buffer of any kind */
873 		syncer_needbuffer = 1;
874 		tsleep(&syncer_needbuffer, slpflag|(PRIBIO-3), "getnewbuf",
875 			slptimeo);
876 		splx(s);
877 		return (0);
878 	}
879 
880 	bremfree(bp);
881 
882 	if (ISSET(bp->b_flags, B_VFLUSH)) {
883 		/*
884 		 * This is a delayed write buffer being flushed to disk.  Make
885 		 * sure it gets aged out of the queue when it's finished, and
886 		 * leave it off the LRU queue.
887 		 */
888 		CLR(bp->b_flags, B_VFLUSH);
889 		SET(bp->b_flags, B_AGE);
890 		splx(s);
891 		goto start;
892 	}
893 
894 	/* Buffer is no longer on free lists. */
895 	SET(bp->b_flags, B_BUSY);
896 
897 	/* If buffer was a delayed write, start it, and go back to the top. */
898 	if (ISSET(bp->b_flags, B_DELWRI)) {
899 		splx(s);
900 		/*
901 		 * This buffer has gone through the LRU, so make sure it gets
902 		 * reused ASAP.
903 		 */
904 		SET(bp->b_flags, B_AGE);
905 		bawrite(bp);
906 		return (0);
907 	}
908 
909 	/* disassociate us from our vnode, if we had one... */
910 	if (bp->b_vp)
911 		brelvp(bp);
912 
913 	splx(s);
914 
915 	if (LIST_FIRST(&bp->b_dep) != NULL)
916 		buf_deallocate(bp);
917 
918 	/* clear out various other fields */
919 	bp->b_flags = B_BUSY;
920 	bp->b_dev = NODEV;
921 	bp->b_blkno = bp->b_lblkno = 0;
922 	bp->b_iodone = 0;
923 	bp->b_error = 0;
924 	bp->b_resid = 0;
925 	bp->b_bcount = 0;
926 	bp->b_dirtyoff = bp->b_dirtyend = 0;
927 	bp->b_validoff = bp->b_validend = 0;
928 
929 	/* nuke any credentials we were holding */
930 	if (bp->b_rcred != NOCRED) {
931 		crfree(bp->b_rcred);
932 		bp->b_rcred = NOCRED;
933 	}
934 	if (bp->b_wcred != NOCRED) {
935 		crfree(bp->b_wcred);
936 		bp->b_wcred = NOCRED;
937 	}
938 
939 	bremhash(bp);
940 	return (bp);
941 }
942 
943 /*
944  * Wait for operations on the buffer to complete.
945  * When they do, extract and return the I/O's error value.
946  */
947 int
948 biowait(bp)
949 	struct buf *bp;
950 {
951 	int s;
952 
953 	s = splbio();
954 	while (!ISSET(bp->b_flags, B_DONE))
955 		tsleep(bp, PRIBIO + 1, "biowait", 0);
956 	splx(s);
957 
958 	/* check for interruption of I/O (e.g. via NFS), then errors. */
959 	if (ISSET(bp->b_flags, B_EINTR)) {
960 		CLR(bp->b_flags, B_EINTR);
961 		return (EINTR);
962 	}
963 
964 	if (ISSET(bp->b_flags, B_ERROR))
965 		return (bp->b_error ? bp->b_error : EIO);
966 	else
967 		return (0);
968 }
969 
970 /*
971  * Mark I/O complete on a buffer.
972  *
973  * If a callback has been requested, e.g. the pageout
974  * daemon, do so. Otherwise, awaken waiting processes.
975  *
976  * [ Leffler, et al., says on p.247:
977  *	"This routine wakes up the blocked process, frees the buffer
978  *	for an asynchronous write, or, for a request by the pagedaemon
979  *	process, invokes a procedure specified in the buffer structure" ]
980  *
981  * In real life, the pagedaemon (or other system processes) wants
982  * to do async stuff to, and doesn't want the buffer brelse()'d.
983  * (for swap pager, that puts swap buffers on the free lists (!!!),
984  * for the vn device, that puts malloc'd buffers on the free lists!)
985  */
986 void
987 biodone(bp)
988 	struct buf *bp;
989 {
990 	if (ISSET(bp->b_flags, B_DONE))
991 		panic("biodone already");
992 	SET(bp->b_flags, B_DONE);		/* note that it's done */
993 
994 	if (LIST_FIRST(&bp->b_dep) != NULL)
995 		buf_complete(bp);
996 
997 	if (!ISSET(bp->b_flags, B_READ)) {
998 		CLR(bp->b_flags, B_WRITEINPROG);
999 		vwakeup(bp->b_vp);
1000 	}
1001 
1002 	if (ISSET(bp->b_flags, B_CALL)) {	/* if necessary, call out */
1003 		CLR(bp->b_flags, B_CALL);	/* but note callout done */
1004 		(*bp->b_iodone)(bp);
1005 	} else {
1006 		if (ISSET(bp->b_flags, B_ASYNC)) {/* if async, release it */
1007 			brelse(bp);
1008 		} else {			/* or just wakeup the buffer */
1009 			CLR(bp->b_flags, B_WANTED);
1010 			wakeup(bp);
1011 		}
1012 	}
1013 }
1014 
1015 #ifdef DEBUG
1016 /*
1017  * Return a count of buffers on the "locked" queue.
1018  */
1019 int
1020 count_lock_queue()
1021 {
1022 	register struct buf *bp;
1023 	register int n = 0;
1024 
1025 	for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
1026 	    bp = bp->b_freelist.tqe_next)
1027 		n++;
1028 	return (n);
1029 }
1030 #endif /* DEBUG */
1031 
1032 #ifdef DEBUG
1033 /*
1034  * Print out statistics on the current allocation of the buffer pool.
1035  * Can be enabled to print out on every ``sync'' by setting "syncprt"
1036  * in vfs_syscalls.c using sysctl.
1037  */
1038 void
1039 vfs_bufstats()
1040 {
1041 	int s, i, j, count;
1042 	register struct buf *bp;
1043 	register struct bqueues *dp;
1044 	int counts[MAXBSIZE/PAGE_SIZE+1];
1045 	int totals[BQUEUES];
1046 	static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
1047 
1048 	s = splbio();
1049 	for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
1050 		count = 0;
1051 		for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1052 			counts[j] = 0;
1053 		for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
1054 			counts[bp->b_bufsize/PAGE_SIZE]++;
1055 			count++;
1056 		}
1057 		totals[i] = count;
1058 		printf("%s: total-%d", bname[i], count);
1059 		for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1060 			if (counts[j] != 0)
1061 				printf(", %d-%d", j * PAGE_SIZE, counts[j]);
1062 		printf("\n");
1063 	}
1064 	if (totals[BQ_EMPTY] != numemptybufs)
1065 		printf("numemptybufs counter wrong: %d != %d\n",
1066 			totals[BQ_EMPTY], numemptybufs);
1067 	if ((totals[BQ_LRU] + totals[BQ_AGE]) != numfreebufs)
1068 		printf("numfreebufs counter wrong: %d != %d\n",
1069 			totals[BQ_LRU] + totals[BQ_AGE], numemptybufs);
1070 	if ((totals[BQ_LRU] + totals[BQ_AGE]) < numcleanbufs ||
1071 	    (numcleanbufs < 0))
1072 		printf("numcleanbufs counter wrong: %d < %d\n",
1073 			totals[BQ_LRU] + totals[BQ_AGE], numcleanbufs);
1074 	printf("numcleanbufs: %d\n", numcleanbufs);
1075 	printf("syncer eating up to %d bufs from %d reserved\n",
1076 			locleanbufs - mincleanbufs, locleanbufs);
1077 	printf("numdirtybufs: %d\n", numdirtybufs);
1078 	splx(s);
1079 }
1080 #endif /* DEBUG */
1081