xref: /netbsd-src/sys/kern/vfs_bio.c (revision 4472dbe5e3bd91ef2540bada7a7ca7384627ff9b)
1 /*	$NetBSD: vfs_bio.c,v 1.67 2000/04/12 11:33:43 fvdl Exp $	*/
2 
3 /*-
4  * Copyright (c) 1994 Christopher G. Demetriou
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by the University of
24  *	California, Berkeley and its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
42  */
43 
44 /*
45  * Some references:
46  *	Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
47  *	Leffler, et al.: The Design and Implementation of the 4.3BSD
48  *		UNIX Operating System (Addison Welley, 1989)
49  */
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/proc.h>
54 #include <sys/buf.h>
55 #include <sys/vnode.h>
56 #include <sys/mount.h>
57 #include <sys/trace.h>
58 #include <sys/malloc.h>
59 #include <sys/resourcevar.h>
60 #include <sys/conf.h>
61 
62 #include <vm/vm.h>
63 
64 #include <miscfs/specfs/specdev.h>
65 
66 /* Macros to clear/set/test flags. */
67 #define	SET(t, f)	(t) |= (f)
68 #define	CLR(t, f)	(t) &= ~(f)
69 #define	ISSET(t, f)	((t) & (f))
70 
71 /*
72  * Definitions for the buffer hash lists.
73  */
74 #define	BUFHASH(dvp, lbn)	\
75 	(&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
76 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
77 u_long	bufhash;
78 struct bio_ops bioops;	/* I/O operation notification */
79 
80 /*
81  * Insq/Remq for the buffer hash lists.
82  */
83 #define	binshash(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_hash)
84 #define	bremhash(bp)		LIST_REMOVE(bp, b_hash)
85 
86 /*
87  * Definitions for the buffer free lists.
88  */
89 #define	BQUEUES		4		/* number of free buffer queues */
90 
91 #define	BQ_LOCKED	0		/* super-blocks &c */
92 #define	BQ_LRU		1		/* lru, useful buffers */
93 #define	BQ_AGE		2		/* rubbish */
94 #define	BQ_EMPTY	3		/* buffer headers with no memory */
95 
96 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
97 int needbuffer;
98 
99 /*
100  * Buffer pool for I/O buffers.
101  */
102 struct pool bufpool;
103 
104 /*
105  * Insq/Remq for the buffer free lists.
106  */
107 #define	binsheadfree(bp, dp)	TAILQ_INSERT_HEAD(dp, bp, b_freelist)
108 #define	binstailfree(bp, dp)	TAILQ_INSERT_TAIL(dp, bp, b_freelist)
109 
110 static __inline struct buf *bio_doread __P((struct vnode *, daddr_t, int,
111 					    struct ucred *, int));
112 int count_lock_queue __P((void));
113 
114 void
115 bremfree(bp)
116 	struct buf *bp;
117 {
118 	int s = splbio();
119 
120 	struct bqueues *dp = NULL;
121 
122 	/*
123 	 * We only calculate the head of the freelist when removing
124 	 * the last element of the list as that is the only time that
125 	 * it is needed (e.g. to reset the tail pointer).
126 	 *
127 	 * NB: This makes an assumption about how tailq's are implemented.
128 	 */
129 	if (bp->b_freelist.tqe_next == NULL) {
130 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
131 			if (dp->tqh_last == &bp->b_freelist.tqe_next)
132 				break;
133 		if (dp == &bufqueues[BQUEUES])
134 			panic("bremfree: lost tail");
135 	}
136 	TAILQ_REMOVE(dp, bp, b_freelist);
137 
138 	splx(s);
139 }
140 
141 /*
142  * Initialize buffers and hash links for buffers.
143  */
144 void
145 bufinit()
146 {
147 	struct buf *bp;
148 	struct bqueues *dp;
149 	int i;
150 	int base, residual;
151 
152 	/*
153 	 * Initialize the buffer pool.  This pool is used for buffers
154 	 * which are strictly I/O control blocks, not buffer cache
155 	 * buffers.
156 	 */
157 	pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", 0,
158 	    NULL, NULL, M_DEVBUF);
159 
160 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
161 		TAILQ_INIT(dp);
162 	bufhashtbl = hashinit(nbuf, M_CACHE, M_WAITOK, &bufhash);
163 	base = bufpages / nbuf;
164 	residual = bufpages % nbuf;
165 	for (i = 0; i < nbuf; i++) {
166 		bp = &buf[i];
167 		memset((char *)bp, 0, sizeof(*bp));
168 		bp->b_dev = NODEV;
169 		bp->b_rcred = NOCRED;
170 		bp->b_wcred = NOCRED;
171 		bp->b_vnbufs.le_next = NOLIST;
172 		LIST_INIT(&bp->b_dep);
173 		bp->b_data = buffers + i * MAXBSIZE;
174 		if (i < residual)
175 			bp->b_bufsize = (base + 1) * NBPG;
176 		else
177 			bp->b_bufsize = base * NBPG;
178 		bp->b_flags = B_INVAL;
179 		dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
180 		binsheadfree(bp, dp);
181 		binshash(bp, &invalhash);
182 	}
183 }
184 
185 static __inline struct buf *
186 bio_doread(vp, blkno, size, cred, async)
187 	struct vnode *vp;
188 	daddr_t blkno;
189 	int size;
190 	struct ucred *cred;
191 	int async;
192 {
193 	struct buf *bp;
194 	struct proc *p = (curproc != NULL ? curproc : &proc0);	/* XXX */
195 
196 	bp = getblk(vp, blkno, size, 0, 0);
197 
198 	/*
199 	 * If buffer does not have data valid, start a read.
200 	 * Note that if buffer is B_INVAL, getblk() won't return it.
201 	 * Therefore, it's valid if it's I/O has completed or been delayed.
202 	 */
203 	if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
204 		/* Start I/O for the buffer (keeping credentials). */
205 		SET(bp->b_flags, B_READ | async);
206 		if (cred != NOCRED && bp->b_rcred == NOCRED) {
207 			crhold(cred);
208 			bp->b_rcred = cred;
209 		}
210 		VOP_STRATEGY(bp);
211 
212 		/* Pay for the read. */
213 		p->p_stats->p_ru.ru_inblock++;
214 	} else if (async) {
215 		brelse(bp);
216 	}
217 
218 	return (bp);
219 }
220 
221 /*
222  * Read a disk block.
223  * This algorithm described in Bach (p.54).
224  */
225 int
226 bread(vp, blkno, size, cred, bpp)
227 	struct vnode *vp;
228 	daddr_t blkno;
229 	int size;
230 	struct ucred *cred;
231 	struct buf **bpp;
232 {
233 	struct buf *bp;
234 
235 	/* Get buffer for block. */
236 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
237 
238 	/*
239 	 * Delayed write buffers are found in the cache and have
240 	 * valid contents. Also, B_ERROR is not set, otherwise
241 	 * getblk() would not have returned them.
242 	 */
243 	if (ISSET(bp->b_flags, B_DONE|B_DELWRI))
244 		return (0);
245 
246 	/*
247 	 * Otherwise, we had to start a read for it; wait until
248 	 * it's valid and return the result.
249 	 */
250 	return (biowait(bp));
251 }
252 
253 /*
254  * Read-ahead multiple disk blocks. The first is sync, the rest async.
255  * Trivial modification to the breada algorithm presented in Bach (p.55).
256  */
257 int
258 breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp)
259 	struct vnode *vp;
260 	daddr_t blkno; int size;
261 	daddr_t rablks[]; int rasizes[];
262 	int nrablks;
263 	struct ucred *cred;
264 	struct buf **bpp;
265 {
266 	struct buf *bp;
267 	int i;
268 
269 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
270 
271 	/*
272 	 * For each of the read-ahead blocks, start a read, if necessary.
273 	 */
274 	for (i = 0; i < nrablks; i++) {
275 		/* If it's in the cache, just go on to next one. */
276 		if (incore(vp, rablks[i]))
277 			continue;
278 
279 		/* Get a buffer for the read-ahead block */
280 		(void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
281 	}
282 
283 	/*
284 	 * Delayed write buffers are found in the cache and have
285 	 * valid contents. Also, B_ERROR is not set, otherwise
286 	 * getblk() would not have returned them.
287 	 */
288 	if (ISSET(bp->b_flags, B_DONE|B_DELWRI))
289 		return (0);
290 
291 	/*
292 	 * Otherwise, we had to start a read for it; wait until
293 	 * it's valid and return the result.
294 	 */
295 	return (biowait(bp));
296 }
297 
298 /*
299  * Read with single-block read-ahead.  Defined in Bach (p.55), but
300  * implemented as a call to breadn().
301  * XXX for compatibility with old file systems.
302  */
303 int
304 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
305 	struct vnode *vp;
306 	daddr_t blkno; int size;
307 	daddr_t rablkno; int rabsize;
308 	struct ucred *cred;
309 	struct buf **bpp;
310 {
311 
312 	return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
313 }
314 
315 /*
316  * Block write.  Described in Bach (p.56)
317  */
318 int
319 bwrite(bp)
320 	struct buf *bp;
321 {
322 	int rv, sync, wasdelayed, s;
323 	struct proc *p = (curproc != NULL ? curproc : &proc0);	/* XXX */
324 	struct vnode *vp;
325 	struct mount *mp;
326 
327 	/*
328 	 * Remember buffer type, to switch on it later.  If the write was
329 	 * synchronous, but the file system was mounted with MNT_ASYNC,
330 	 * convert it to a delayed write.
331 	 * XXX note that this relies on delayed tape writes being converted
332 	 * to async, not sync writes (which is safe, but ugly).
333 	 */
334 	sync = !ISSET(bp->b_flags, B_ASYNC);
335 	if (sync && bp->b_vp && bp->b_vp->v_mount &&
336 	    ISSET(bp->b_vp->v_mount->mnt_flag, MNT_ASYNC)) {
337 		bdwrite(bp);
338 		return (0);
339 	}
340 
341 	/*
342 	 * Collect statistics on synchronous and asynchronous writes.
343 	 * Writes to block devices are charged to their associated
344 	 * filesystem (if any).
345 	 */
346 	if ((vp = bp->b_vp) != NULL) {
347 		if (vp->v_type == VBLK)
348 			mp = vp->v_specmountpoint;
349 		else
350 			mp = vp->v_mount;
351 		if (mp != NULL) {
352 			if (sync)
353 				mp->mnt_stat.f_syncwrites++;
354 			else
355 				mp->mnt_stat.f_asyncwrites++;
356 		}
357 	}
358 
359 	wasdelayed = ISSET(bp->b_flags, B_DELWRI);
360 
361 	s = splbio();
362 
363 	CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
364 
365 	/*
366 	 * Pay for the I/O operation and make sure the buf is on the correct
367 	 * vnode queue.
368 	 */
369 	if (wasdelayed)
370 		reassignbuf(bp, bp->b_vp);
371 	else
372 		p->p_stats->p_ru.ru_oublock++;
373 
374 	/* Initiate disk write.  Make sure the appropriate party is charged. */
375 	bp->b_vp->v_numoutput++;
376 	splx(s);
377 
378 	SET(bp->b_flags, B_WRITEINPROG);
379 	VOP_STRATEGY(bp);
380 
381 	if (sync) {
382 		/* If I/O was synchronous, wait for it to complete. */
383 		rv = biowait(bp);
384 
385 		/* Release the buffer. */
386 		brelse(bp);
387 
388 		return (rv);
389 	} else {
390 		return (0);
391 	}
392 }
393 
394 int
395 vn_bwrite(v)
396 	void *v;
397 {
398 	struct vop_bwrite_args *ap = v;
399 
400 	return (bwrite(ap->a_bp));
401 }
402 
403 /*
404  * Delayed write.
405  *
406  * The buffer is marked dirty, but is not queued for I/O.
407  * This routine should be used when the buffer is expected
408  * to be modified again soon, typically a small write that
409  * partially fills a buffer.
410  *
411  * NB: magnetic tapes cannot be delayed; they must be
412  * written in the order that the writes are requested.
413  *
414  * Described in Leffler, et al. (pp. 208-213).
415  */
416 void
417 bdwrite(bp)
418 	struct buf *bp;
419 {
420 	struct proc *p = (curproc != NULL ? curproc : &proc0);	/* XXX */
421 	int s;
422 
423 	/* If this is a tape block, write the block now. */
424 	/* XXX NOTE: the memory filesystem usurpes major device */
425 	/* XXX       number 255, which is a bad idea.		*/
426 	if (bp->b_dev != NODEV &&
427 	    major(bp->b_dev) != 255 &&	/* XXX - MFS buffers! */
428 	    bdevsw[major(bp->b_dev)].d_type == D_TAPE) {
429 		bawrite(bp);
430 		return;
431 	}
432 
433 	/*
434 	 * If the block hasn't been seen before:
435 	 *	(1) Mark it as having been seen,
436 	 *	(2) Charge for the write,
437 	 *	(3) Make sure it's on its vnode's correct block list.
438 	 */
439 	s = splbio();
440 
441 	if (!ISSET(bp->b_flags, B_DELWRI)) {
442 		SET(bp->b_flags, B_DELWRI);
443 		p->p_stats->p_ru.ru_oublock++;
444 		reassignbuf(bp, bp->b_vp);
445 	}
446 
447 	/* Otherwise, the "write" is done, so mark and release the buffer. */
448 	CLR(bp->b_flags, B_NEEDCOMMIT|B_DONE);
449 	splx(s);
450 
451 	brelse(bp);
452 }
453 
454 /*
455  * Asynchronous block write; just an asynchronous bwrite().
456  */
457 void
458 bawrite(bp)
459 	struct buf *bp;
460 {
461 
462 	SET(bp->b_flags, B_ASYNC);
463 	VOP_BWRITE(bp);
464 }
465 
466 /*
467  * Ordered block write; asynchronous, but I/O will occur in order queued.
468  */
469 void
470 bowrite(bp)
471 	struct buf *bp;
472 {
473 
474 	SET(bp->b_flags, B_ASYNC | B_ORDERED);
475 	VOP_BWRITE(bp);
476 }
477 
478 /*
479  * Same as first half of bdwrite, mark buffer dirty, but do not release it.
480  */
481 void
482 bdirty(bp)
483 	struct buf *bp;
484 {
485 	struct proc *p = (curproc != NULL ? curproc : &proc0);	/* XXX */
486 	int s;
487 
488 	s = splbio();
489 
490 	CLR(bp->b_flags, B_AGE);
491 
492 	if (!ISSET(bp->b_flags, B_DELWRI)) {
493 		SET(bp->b_flags, B_DELWRI);
494 		p->p_stats->p_ru.ru_oublock++;
495 		reassignbuf(bp, bp->b_vp);
496 	}
497 
498 	splx(s);
499 }
500 
501 /*
502  * Release a buffer on to the free lists.
503  * Described in Bach (p. 46).
504  */
505 void
506 brelse(bp)
507 	struct buf *bp;
508 {
509 	struct bqueues *bufq;
510 	int s;
511 
512 	/* Wake up any processes waiting for any buffer to become free. */
513 	if (needbuffer) {
514 		needbuffer = 0;
515 		wakeup(&needbuffer);
516 	}
517 
518 	/* Block disk interrupts. */
519 	s = splbio();
520 
521 	/* Wake up any proceeses waiting for _this_ buffer to become free. */
522 	if (ISSET(bp->b_flags, B_WANTED)) {
523 		CLR(bp->b_flags, B_WANTED|B_AGE);
524 		wakeup(bp);
525 	}
526 
527 	/*
528 	 * Determine which queue the buffer should be on, then put it there.
529 	 */
530 
531 	/* If it's locked, don't report an error; try again later. */
532 	if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
533 		CLR(bp->b_flags, B_ERROR);
534 
535 	/* If it's not cacheable, or an error, mark it invalid. */
536 	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
537 		SET(bp->b_flags, B_INVAL);
538 
539 	if (ISSET(bp->b_flags, B_VFLUSH)) {
540 		/*
541 		 * This is a delayed write buffer that was just flushed to
542 		 * disk.  It is still on the LRU queue.  If it's become
543 		 * invalid, then we need to move it to a different queue;
544 		 * otherwise leave it in its current position.
545 		 */
546 		CLR(bp->b_flags, B_VFLUSH);
547 		if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE))
548 			goto already_queued;
549 		else
550 			bremfree(bp);
551 	}
552 
553 	if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
554 		/*
555 		 * If it's invalid or empty, dissociate it from its vnode
556 		 * and put on the head of the appropriate queue.
557 		 */
558 		if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
559 			(*bioops.io_deallocate)(bp);
560 		CLR(bp->b_flags, B_DONE|B_DELWRI);
561 		if (bp->b_vp) {
562 			reassignbuf(bp, bp->b_vp);
563 			brelvp(bp);
564 		}
565 		if (bp->b_bufsize <= 0)
566 			/* no data */
567 			bufq = &bufqueues[BQ_EMPTY];
568 		else
569 			/* invalid data */
570 			bufq = &bufqueues[BQ_AGE];
571 		binsheadfree(bp, bufq);
572 	} else {
573 		/*
574 		 * It has valid data.  Put it on the end of the appropriate
575 		 * queue, so that it'll stick around for as long as possible.
576 		 * If buf is AGE, but has dependencies, must put it on last
577 		 * bufqueue to be scanned, ie LRU. This protects against the
578 		 * livelock where BQ_AGE only has buffers with dependencies,
579 		 * and we thus never get to the dependent buffers in BQ_LRU.
580 		 */
581 		if (ISSET(bp->b_flags, B_LOCKED))
582 			/* locked in core */
583 			bufq = &bufqueues[BQ_LOCKED];
584 		else if (!ISSET(bp->b_flags, B_AGE))
585 			/* valid data */
586 			bufq = &bufqueues[BQ_LRU];
587 		else {
588 			/* stale but valid data */
589 			int has_deps;
590 
591 			if (LIST_FIRST(&bp->b_dep) != NULL &&
592 			    bioops.io_countdeps)
593 				has_deps = (*bioops.io_countdeps)(bp, 0);
594 			else
595 				has_deps = 0;
596 			bufq = has_deps ? &bufqueues[BQ_LRU] :
597 			    &bufqueues[BQ_AGE];
598 		}
599 		binstailfree(bp, bufq);
600 	}
601 
602 already_queued:
603 	/* Unlock the buffer. */
604 	CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE|B_ORDERED);
605 
606 	/* Allow disk interrupts. */
607 	splx(s);
608 }
609 
610 /*
611  * Determine if a block is in the cache.
612  * Just look on what would be its hash chain.  If it's there, return
613  * a pointer to it, unless it's marked invalid.  If it's marked invalid,
614  * we normally don't return the buffer, unless the caller explicitly
615  * wants us to.
616  */
617 struct buf *
618 incore(vp, blkno)
619 	struct vnode *vp;
620 	daddr_t blkno;
621 {
622 	struct buf *bp;
623 
624 	bp = BUFHASH(vp, blkno)->lh_first;
625 
626 	/* Search hash chain */
627 	for (; bp != NULL; bp = bp->b_hash.le_next) {
628 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
629 		    !ISSET(bp->b_flags, B_INVAL))
630 		return (bp);
631 	}
632 
633 	return (0);
634 }
635 
636 /*
637  * Get a block of requested size that is associated with
638  * a given vnode and block offset. If it is found in the
639  * block cache, mark it as having been found, make it busy
640  * and return it. Otherwise, return an empty block of the
641  * correct size. It is up to the caller to insure that the
642  * cached blocks be of the correct size.
643  */
644 struct buf *
645 getblk(vp, blkno, size, slpflag, slptimeo)
646 	struct vnode *vp;
647 	daddr_t blkno;
648 	int size, slpflag, slptimeo;
649 {
650 	struct bufhashhdr *bh;
651 	struct buf *bp;
652 	int s, err;
653 
654 	/*
655 	 * XXX
656 	 * The following is an inlined version of 'incore()', but with
657 	 * the 'invalid' test moved to after the 'busy' test.  It's
658 	 * necessary because there are some cases in which the NFS
659 	 * code sets B_INVAL prior to writing data to the server, but
660 	 * in which the buffers actually contain valid data.  In this
661 	 * case, we can't allow the system to allocate a new buffer for
662 	 * the block until the write is finished.
663 	 */
664 	bh = BUFHASH(vp, blkno);
665 start:
666         bp = bh->lh_first;
667         for (; bp != NULL; bp = bp->b_hash.le_next) {
668                 if (bp->b_lblkno != blkno || bp->b_vp != vp)
669 			continue;
670 
671 		s = splbio();
672 		if (ISSET(bp->b_flags, B_BUSY)) {
673 			SET(bp->b_flags, B_WANTED);
674 			err = tsleep(bp, slpflag | (PRIBIO + 1), "getblk",
675 			    slptimeo);
676 			splx(s);
677 			if (err)
678 				return (NULL);
679 			goto start;
680 		}
681 
682 		if (!ISSET(bp->b_flags, B_INVAL)) {
683 #ifdef DIAGNOSTIC
684 			if (ISSET(bp->b_flags, B_DONE|B_DELWRI) &&
685 			    bp->b_bcount < size)
686 				panic("getblk: block size invariant failed");
687 #endif
688 			SET(bp->b_flags, B_BUSY);
689 			bremfree(bp);
690 			splx(s);
691 			break;
692 		}
693 		splx(s);
694         }
695 
696 	if (bp == NULL) {
697 		if ((bp = getnewbuf(slpflag, slptimeo)) == NULL)
698 			goto start;
699 		binshash(bp, bh);
700 		bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
701 		s = splbio();
702 		bgetvp(vp, bp);
703 		splx(s);
704 	}
705 	allocbuf(bp, size);
706 	return (bp);
707 }
708 
709 /*
710  * Get an empty, disassociated buffer of given size.
711  */
712 struct buf *
713 geteblk(size)
714 	int size;
715 {
716 	struct buf *bp;
717 
718 	while ((bp = getnewbuf(0, 0)) == 0)
719 		;
720 	SET(bp->b_flags, B_INVAL);
721 	binshash(bp, &invalhash);
722 	allocbuf(bp, size);
723 
724 	return (bp);
725 }
726 
727 /*
728  * Expand or contract the actual memory allocated to a buffer.
729  *
730  * If the buffer shrinks, data is lost, so it's up to the
731  * caller to have written it out *first*; this routine will not
732  * start a write.  If the buffer grows, it's the callers
733  * responsibility to fill out the buffer's additional contents.
734  */
735 void
736 allocbuf(bp, size)
737 	struct buf *bp;
738 	int size;
739 {
740 	struct buf      *nbp;
741 	vsize_t       desired_size;
742 	int	     s;
743 
744 	desired_size = roundup(size, NBPG);
745 	if (desired_size > MAXBSIZE)
746 		panic("allocbuf: buffer larger than MAXBSIZE requested");
747 
748 	if (bp->b_bufsize == desired_size)
749 		goto out;
750 
751 	/*
752 	 * If the buffer is smaller than the desired size, we need to snarf
753 	 * it from other buffers.  Get buffers (via getnewbuf()), and
754 	 * steal their pages.
755 	 */
756 	while (bp->b_bufsize < desired_size) {
757 		int amt;
758 
759 		/* find a buffer */
760 		while ((nbp = getnewbuf(0, 0)) == NULL)
761 			;
762 		SET(nbp->b_flags, B_INVAL);
763 		binshash(nbp, &invalhash);
764 
765 		/* and steal its pages, up to the amount we need */
766 		amt = min(nbp->b_bufsize, (desired_size - bp->b_bufsize));
767 		pagemove((nbp->b_data + nbp->b_bufsize - amt),
768 			 bp->b_data + bp->b_bufsize, amt);
769 		bp->b_bufsize += amt;
770 		nbp->b_bufsize -= amt;
771 
772 		/* reduce transfer count if we stole some data */
773 		if (nbp->b_bcount > nbp->b_bufsize)
774 			nbp->b_bcount = nbp->b_bufsize;
775 
776 #ifdef DIAGNOSTIC
777 		if (nbp->b_bufsize < 0)
778 			panic("allocbuf: negative bufsize");
779 #endif
780 
781 		brelse(nbp);
782 	}
783 
784 	/*
785 	 * If we want a buffer smaller than the current size,
786 	 * shrink this buffer.  Grab a buf head from the EMPTY queue,
787 	 * move a page onto it, and put it on front of the AGE queue.
788 	 * If there are no free buffer headers, leave the buffer alone.
789 	 */
790 	if (bp->b_bufsize > desired_size) {
791 		s = splbio();
792 		if ((nbp = bufqueues[BQ_EMPTY].tqh_first) == NULL) {
793 			/* No free buffer head */
794 			splx(s);
795 			goto out;
796 		}
797 		bremfree(nbp);
798 		SET(nbp->b_flags, B_BUSY);
799 		splx(s);
800 
801 		/* move the page to it and note this change */
802 		pagemove(bp->b_data + desired_size,
803 		    nbp->b_data, bp->b_bufsize - desired_size);
804 		nbp->b_bufsize = bp->b_bufsize - desired_size;
805 		bp->b_bufsize = desired_size;
806 		nbp->b_bcount = 0;
807 		SET(nbp->b_flags, B_INVAL);
808 
809 		/* release the newly-filled buffer and leave */
810 		brelse(nbp);
811 	}
812 
813 out:
814 	bp->b_bcount = size;
815 }
816 
817 /*
818  * Find a buffer which is available for use.
819  * Select something from a free list.
820  * Preference is to AGE list, then LRU list.
821  */
822 struct buf *
823 getnewbuf(slpflag, slptimeo)
824 	int slpflag, slptimeo;
825 {
826 	struct buf *bp;
827 	int s;
828 
829 start:
830 	s = splbio();
831 	if ((bp = bufqueues[BQ_AGE].tqh_first) != NULL ||
832 	    (bp = bufqueues[BQ_LRU].tqh_first) != NULL) {
833 		bremfree(bp);
834 	} else {
835 		/* wait for a free buffer of any kind */
836 		needbuffer = 1;
837 		tsleep(&needbuffer, slpflag|(PRIBIO+1), "getnewbuf", slptimeo);
838 		splx(s);
839 		return (0);
840 	}
841 
842 	if (ISSET(bp->b_flags, B_VFLUSH)) {
843 		/*
844 		 * This is a delayed write buffer being flushed to disk.  Make
845 		 * sure it gets aged out of the queue when it's finished, and
846 		 * leave it off the LRU queue.
847 		 */
848 		CLR(bp->b_flags, B_VFLUSH);
849 		SET(bp->b_flags, B_AGE);
850 		splx(s);
851 		goto start;
852 	}
853 
854 	/* Buffer is no longer on free lists. */
855 	SET(bp->b_flags, B_BUSY);
856 
857 	/* If buffer was a delayed write, start it, and go back to the top. */
858 	if (ISSET(bp->b_flags, B_DELWRI)) {
859 		splx(s);
860 		/*
861 		 * This buffer has gone through the LRU, so make sure it gets
862 		 * reused ASAP.
863 		 */
864 		SET(bp->b_flags, B_AGE);
865 		bawrite(bp);
866 		goto start;
867 	}
868 
869 	/* disassociate us from our vnode, if we had one... */
870 	if (bp->b_vp)
871 		brelvp(bp);
872 	splx(s);
873 
874 	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
875 		(*bioops.io_deallocate)(bp);
876 
877 	/* clear out various other fields */
878 	bp->b_flags = B_BUSY;
879 	bp->b_dev = NODEV;
880 	bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
881 	bp->b_iodone = 0;
882 	bp->b_error = 0;
883 	bp->b_resid = 0;
884 	bp->b_bcount = 0;
885 	bp->b_dirtyoff = bp->b_dirtyend = 0;
886 	bp->b_validoff = bp->b_validend = 0;
887 
888 	/* nuke any credentials we were holding */
889 	if (bp->b_rcred != NOCRED) {
890 		crfree(bp->b_rcred);
891 		bp->b_rcred = NOCRED;
892 	}
893 	if (bp->b_wcred != NOCRED) {
894 		crfree(bp->b_wcred);
895 		bp->b_wcred = NOCRED;
896 	}
897 
898 	bremhash(bp);
899 	return (bp);
900 }
901 
902 /*
903  * Wait for operations on the buffer to complete.
904  * When they do, extract and return the I/O's error value.
905  */
906 int
907 biowait(bp)
908 	struct buf *bp;
909 {
910 	int s;
911 
912 	s = splbio();
913 	while (!ISSET(bp->b_flags, B_DONE))
914 		tsleep(bp, PRIBIO + 1, "biowait", 0);
915 	splx(s);
916 
917 	/* check for interruption of I/O (e.g. via NFS), then errors. */
918 	if (ISSET(bp->b_flags, B_EINTR)) {
919 		CLR(bp->b_flags, B_EINTR);
920 		return (EINTR);
921 	} else if (ISSET(bp->b_flags, B_ERROR))
922 		return (bp->b_error ? bp->b_error : EIO);
923 	else
924 		return (0);
925 }
926 
927 /*
928  * Mark I/O complete on a buffer.
929  *
930  * If a callback has been requested, e.g. the pageout
931  * daemon, do so. Otherwise, awaken waiting processes.
932  *
933  * [ Leffler, et al., says on p.247:
934  *	"This routine wakes up the blocked process, frees the buffer
935  *	for an asynchronous write, or, for a request by the pagedaemon
936  *	process, invokes a procedure specified in the buffer structure" ]
937  *
938  * In real life, the pagedaemon (or other system processes) wants
939  * to do async stuff to, and doesn't want the buffer brelse()'d.
940  * (for swap pager, that puts swap buffers on the free lists (!!!),
941  * for the vn device, that puts malloc'd buffers on the free lists!)
942  */
943 void
944 biodone(bp)
945 	struct buf *bp;
946 {
947 	int s = splbio();
948 
949 	if (ISSET(bp->b_flags, B_DONE))
950 		panic("biodone already");
951 	SET(bp->b_flags, B_DONE);		/* note that it's done */
952 
953 	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
954 		(*bioops.io_complete)(bp);
955 
956 	if (!ISSET(bp->b_flags, B_READ))	/* wake up reader */
957 		vwakeup(bp);
958 
959 	if (ISSET(bp->b_flags, B_CALL)) {	/* if necessary, call out */
960 		CLR(bp->b_flags, B_CALL);	/* but note callout done */
961 		(*bp->b_iodone)(bp);
962 	} else {
963 		if (ISSET(bp->b_flags, B_ASYNC))	/* if async, release */
964 			brelse(bp);
965 		else {				/* or just wakeup the buffer */
966 			CLR(bp->b_flags, B_WANTED);
967 			wakeup(bp);
968 		}
969 	}
970 
971 	splx(s);
972 }
973 
974 /*
975  * Return a count of buffers on the "locked" queue.
976  */
977 int
978 count_lock_queue()
979 {
980 	struct buf *bp;
981 	int n = 0;
982 
983 	for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
984 	    bp = bp->b_freelist.tqe_next)
985 		n++;
986 	return (n);
987 }
988 
989 #ifdef DEBUG
990 /*
991  * Print out statistics on the current allocation of the buffer pool.
992  * Can be enabled to print out on every ``sync'' by setting "syncprt"
993  * in vfs_syscalls.c using sysctl.
994  */
995 void
996 vfs_bufstats()
997 {
998 	int s, i, j, count;
999 	struct buf *bp;
1000 	struct bqueues *dp;
1001 	int counts[MAXBSIZE/NBPG+1];
1002 	static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
1003 
1004 	for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
1005 		count = 0;
1006 		for (j = 0; j <= MAXBSIZE/NBPG; j++)
1007 			counts[j] = 0;
1008 		s = splbio();
1009 		for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
1010 			counts[bp->b_bufsize/NBPG]++;
1011 			count++;
1012 		}
1013 		splx(s);
1014 		printf("%s: total-%d", bname[i], count);
1015 		for (j = 0; j <= MAXBSIZE/NBPG; j++)
1016 			if (counts[j] != 0)
1017 				printf(", %d-%d", j * NBPG, counts[j]);
1018 		printf("\n");
1019 	}
1020 }
1021 #endif /* DEBUG */
1022