xref: /netbsd-src/sys/kern/vfs_bio.c (revision 1ca5c1b28139779176bd5c13ad7c5f25c0bcd5f8)
1 /*	$NetBSD: vfs_bio.c,v 1.77 2001/11/12 15:25:35 lukem Exp $	*/
2 
3 /*-
4  * Copyright (c) 1994 Christopher G. Demetriou
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by the University of
24  *	California, Berkeley and its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
42  */
43 
44 /*
45  * Some references:
46  *	Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
47  *	Leffler, et al.: The Design and Implementation of the 4.3BSD
48  *		UNIX Operating System (Addison Welley, 1989)
49  */
50 
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.77 2001/11/12 15:25:35 lukem Exp $");
53 
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/proc.h>
57 #include <sys/buf.h>
58 #include <sys/vnode.h>
59 #include <sys/mount.h>
60 #include <sys/malloc.h>
61 #include <sys/resourcevar.h>
62 #include <sys/conf.h>
63 
64 #include <uvm/uvm.h>
65 
66 #include <miscfs/specfs/specdev.h>
67 
68 /* Macros to clear/set/test flags. */
69 #define	SET(t, f)	(t) |= (f)
70 #define	CLR(t, f)	(t) &= ~(f)
71 #define	ISSET(t, f)	((t) & (f))
72 
73 /*
74  * Definitions for the buffer hash lists.
75  */
76 #define	BUFHASH(dvp, lbn)	\
77 	(&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
78 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
79 u_long	bufhash;
80 struct bio_ops bioops;	/* I/O operation notification */
81 
82 /*
83  * Insq/Remq for the buffer hash lists.
84  */
85 #define	binshash(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_hash)
86 #define	bremhash(bp)		LIST_REMOVE(bp, b_hash)
87 
88 /*
89  * Definitions for the buffer free lists.
90  */
91 #define	BQUEUES		4		/* number of free buffer queues */
92 
93 #define	BQ_LOCKED	0		/* super-blocks &c */
94 #define	BQ_LRU		1		/* lru, useful buffers */
95 #define	BQ_AGE		2		/* rubbish */
96 #define	BQ_EMPTY	3		/* buffer headers with no memory */
97 
98 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
99 int needbuffer;
100 
101 /*
102  * Buffer pool for I/O buffers.
103  */
104 struct pool bufpool;
105 
106 /*
107  * Insq/Remq for the buffer free lists.
108  */
109 #define	binsheadfree(bp, dp)	TAILQ_INSERT_HEAD(dp, bp, b_freelist)
110 #define	binstailfree(bp, dp)	TAILQ_INSERT_TAIL(dp, bp, b_freelist)
111 
112 static __inline struct buf *bio_doread __P((struct vnode *, daddr_t, int,
113 					    struct ucred *, int));
114 int count_lock_queue __P((void));
115 
116 void
117 bremfree(bp)
118 	struct buf *bp;
119 {
120 	int s = splbio();
121 
122 	struct bqueues *dp = NULL;
123 
124 	/*
125 	 * We only calculate the head of the freelist when removing
126 	 * the last element of the list as that is the only time that
127 	 * it is needed (e.g. to reset the tail pointer).
128 	 *
129 	 * NB: This makes an assumption about how tailq's are implemented.
130 	 */
131 	if (bp->b_freelist.tqe_next == NULL) {
132 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
133 			if (dp->tqh_last == &bp->b_freelist.tqe_next)
134 				break;
135 		if (dp == &bufqueues[BQUEUES])
136 			panic("bremfree: lost tail");
137 	}
138 	TAILQ_REMOVE(dp, bp, b_freelist);
139 	splx(s);
140 }
141 
142 /*
143  * Initialize buffers and hash links for buffers.
144  */
145 void
146 bufinit()
147 {
148 	struct buf *bp;
149 	struct bqueues *dp;
150 	int i;
151 	int base, residual;
152 
153 	/*
154 	 * Initialize the buffer pool.  This pool is used for buffers
155 	 * which are strictly I/O control blocks, not buffer cache
156 	 * buffers.
157 	 */
158 	pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", 0,
159 	    NULL, NULL, M_DEVBUF);
160 
161 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
162 		TAILQ_INIT(dp);
163 	bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash);
164 	base = bufpages / nbuf;
165 	residual = bufpages % nbuf;
166 	for (i = 0; i < nbuf; i++) {
167 		bp = &buf[i];
168 		memset((char *)bp, 0, sizeof(*bp));
169 		bp->b_dev = NODEV;
170 		bp->b_vnbufs.le_next = NOLIST;
171 		LIST_INIT(&bp->b_dep);
172 		bp->b_data = buffers + i * MAXBSIZE;
173 		if (i < residual)
174 			bp->b_bufsize = (base + 1) * PAGE_SIZE;
175 		else
176 			bp->b_bufsize = base * PAGE_SIZE;
177 		bp->b_flags = B_INVAL;
178 		dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
179 		binsheadfree(bp, dp);
180 		binshash(bp, &invalhash);
181 	}
182 }
183 
184 static __inline struct buf *
185 bio_doread(vp, blkno, size, cred, async)
186 	struct vnode *vp;
187 	daddr_t blkno;
188 	int size;
189 	struct ucred *cred;
190 	int async;
191 {
192 	struct buf *bp;
193 	struct proc *p = (curproc != NULL ? curproc : &proc0);	/* XXX */
194 
195 	bp = getblk(vp, blkno, size, 0, 0);
196 
197 	/*
198 	 * If buffer does not have data valid, start a read.
199 	 * Note that if buffer is B_INVAL, getblk() won't return it.
200 	 * Therefore, it's valid if it's I/O has completed or been delayed.
201 	 */
202 	if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
203 		/* Start I/O for the buffer. */
204 		SET(bp->b_flags, B_READ | async);
205 		VOP_STRATEGY(bp);
206 
207 		/* Pay for the read. */
208 		p->p_stats->p_ru.ru_inblock++;
209 	} else if (async) {
210 		brelse(bp);
211 	}
212 
213 	return (bp);
214 }
215 
216 /*
217  * Read a disk block.
218  * This algorithm described in Bach (p.54).
219  */
220 int
221 bread(vp, blkno, size, cred, bpp)
222 	struct vnode *vp;
223 	daddr_t blkno;
224 	int size;
225 	struct ucred *cred;
226 	struct buf **bpp;
227 {
228 	struct buf *bp;
229 
230 	/* Get buffer for block. */
231 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
232 
233 	/*
234 	 * Delayed write buffers are found in the cache and have
235 	 * valid contents. Also, B_ERROR is not set, otherwise
236 	 * getblk() would not have returned them.
237 	 */
238 	if (ISSET(bp->b_flags, B_DONE|B_DELWRI))
239 		return (0);
240 
241 	/*
242 	 * Otherwise, we had to start a read for it; wait until
243 	 * it's valid and return the result.
244 	 */
245 	return (biowait(bp));
246 }
247 
248 /*
249  * Read-ahead multiple disk blocks. The first is sync, the rest async.
250  * Trivial modification to the breada algorithm presented in Bach (p.55).
251  */
252 int
253 breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp)
254 	struct vnode *vp;
255 	daddr_t blkno; int size;
256 	daddr_t rablks[]; int rasizes[];
257 	int nrablks;
258 	struct ucred *cred;
259 	struct buf **bpp;
260 {
261 	struct buf *bp;
262 	int i;
263 
264 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
265 
266 	/*
267 	 * For each of the read-ahead blocks, start a read, if necessary.
268 	 */
269 	for (i = 0; i < nrablks; i++) {
270 		/* If it's in the cache, just go on to next one. */
271 		if (incore(vp, rablks[i]))
272 			continue;
273 
274 		/* Get a buffer for the read-ahead block */
275 		(void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
276 	}
277 
278 	/*
279 	 * Delayed write buffers are found in the cache and have
280 	 * valid contents. Also, B_ERROR is not set, otherwise
281 	 * getblk() would not have returned them.
282 	 */
283 	if (ISSET(bp->b_flags, B_DONE|B_DELWRI))
284 		return (0);
285 
286 	/*
287 	 * Otherwise, we had to start a read for it; wait until
288 	 * it's valid and return the result.
289 	 */
290 	return (biowait(bp));
291 }
292 
293 /*
294  * Read with single-block read-ahead.  Defined in Bach (p.55), but
295  * implemented as a call to breadn().
296  * XXX for compatibility with old file systems.
297  */
298 int
299 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
300 	struct vnode *vp;
301 	daddr_t blkno; int size;
302 	daddr_t rablkno; int rabsize;
303 	struct ucred *cred;
304 	struct buf **bpp;
305 {
306 
307 	return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
308 }
309 
310 /*
311  * Block write.  Described in Bach (p.56)
312  */
313 int
314 bwrite(bp)
315 	struct buf *bp;
316 {
317 	int rv, sync, wasdelayed, s;
318 	struct proc *p = (curproc != NULL ? curproc : &proc0);	/* XXX */
319 	struct vnode *vp;
320 	struct mount *mp;
321 
322 	vp = bp->b_vp;
323 	if (vp != NULL) {
324 		if (vp->v_type == VBLK)
325 			mp = vp->v_specmountpoint;
326 		else
327 			mp = vp->v_mount;
328 	} else {
329 		mp = NULL;
330 	}
331 
332 	/*
333 	 * Remember buffer type, to switch on it later.  If the write was
334 	 * synchronous, but the file system was mounted with MNT_ASYNC,
335 	 * convert it to a delayed write.
336 	 * XXX note that this relies on delayed tape writes being converted
337 	 * to async, not sync writes (which is safe, but ugly).
338 	 */
339 	sync = !ISSET(bp->b_flags, B_ASYNC);
340 	if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
341 		bdwrite(bp);
342 		return (0);
343 	}
344 
345 	/*
346 	 * Collect statistics on synchronous and asynchronous writes.
347 	 * Writes to block devices are charged to their associated
348 	 * filesystem (if any).
349 	 */
350 	if (mp != NULL) {
351 		if (sync)
352 			mp->mnt_stat.f_syncwrites++;
353 		else
354 			mp->mnt_stat.f_asyncwrites++;
355 	}
356 
357 	wasdelayed = ISSET(bp->b_flags, B_DELWRI);
358 
359 	s = splbio();
360 
361 	CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
362 
363 	/*
364 	 * Pay for the I/O operation and make sure the buf is on the correct
365 	 * vnode queue.
366 	 */
367 	if (wasdelayed)
368 		reassignbuf(bp, bp->b_vp);
369 	else
370 		p->p_stats->p_ru.ru_oublock++;
371 
372 	/* Initiate disk write.  Make sure the appropriate party is charged. */
373 	bp->b_vp->v_numoutput++;
374 	splx(s);
375 
376 	VOP_STRATEGY(bp);
377 
378 	if (sync) {
379 		/* If I/O was synchronous, wait for it to complete. */
380 		rv = biowait(bp);
381 
382 		/* Release the buffer. */
383 		brelse(bp);
384 
385 		return (rv);
386 	} else {
387 		return (0);
388 	}
389 }
390 
391 int
392 vn_bwrite(v)
393 	void *v;
394 {
395 	struct vop_bwrite_args *ap = v;
396 
397 	return (bwrite(ap->a_bp));
398 }
399 
400 /*
401  * Delayed write.
402  *
403  * The buffer is marked dirty, but is not queued for I/O.
404  * This routine should be used when the buffer is expected
405  * to be modified again soon, typically a small write that
406  * partially fills a buffer.
407  *
408  * NB: magnetic tapes cannot be delayed; they must be
409  * written in the order that the writes are requested.
410  *
411  * Described in Leffler, et al. (pp. 208-213).
412  */
413 void
414 bdwrite(bp)
415 	struct buf *bp;
416 {
417 	struct proc *p = (curproc != NULL ? curproc : &proc0);	/* XXX */
418 	int s;
419 
420 	/* If this is a tape block, write the block now. */
421 	/* XXX NOTE: the memory filesystem usurpes major device */
422 	/* XXX       number 255, which is a bad idea.		*/
423 	if (bp->b_dev != NODEV &&
424 	    major(bp->b_dev) != 255 &&	/* XXX - MFS buffers! */
425 	    bdevsw[major(bp->b_dev)].d_type == D_TAPE) {
426 		bawrite(bp);
427 		return;
428 	}
429 
430 	/*
431 	 * If the block hasn't been seen before:
432 	 *	(1) Mark it as having been seen,
433 	 *	(2) Charge for the write,
434 	 *	(3) Make sure it's on its vnode's correct block list.
435 	 */
436 	s = splbio();
437 
438 	if (!ISSET(bp->b_flags, B_DELWRI)) {
439 		SET(bp->b_flags, B_DELWRI);
440 		p->p_stats->p_ru.ru_oublock++;
441 		reassignbuf(bp, bp->b_vp);
442 	}
443 
444 	/* Otherwise, the "write" is done, so mark and release the buffer. */
445 	CLR(bp->b_flags, B_NEEDCOMMIT|B_DONE);
446 	splx(s);
447 
448 	brelse(bp);
449 }
450 
451 /*
452  * Asynchronous block write; just an asynchronous bwrite().
453  */
454 void
455 bawrite(bp)
456 	struct buf *bp;
457 {
458 
459 	SET(bp->b_flags, B_ASYNC);
460 	VOP_BWRITE(bp);
461 }
462 
463 /*
464  * Ordered block write; asynchronous, but I/O will occur in order queued.
465  */
466 void
467 bowrite(bp)
468 	struct buf *bp;
469 {
470 
471 	SET(bp->b_flags, B_ASYNC | B_ORDERED);
472 	VOP_BWRITE(bp);
473 }
474 
475 /*
476  * Same as first half of bdwrite, mark buffer dirty, but do not release it.
477  */
478 void
479 bdirty(bp)
480 	struct buf *bp;
481 {
482 	struct proc *p = (curproc != NULL ? curproc : &proc0);	/* XXX */
483 	int s;
484 
485 	s = splbio();
486 
487 	CLR(bp->b_flags, B_AGE);
488 
489 	if (!ISSET(bp->b_flags, B_DELWRI)) {
490 		SET(bp->b_flags, B_DELWRI);
491 		p->p_stats->p_ru.ru_oublock++;
492 		reassignbuf(bp, bp->b_vp);
493 	}
494 
495 	splx(s);
496 }
497 
498 /*
499  * Release a buffer on to the free lists.
500  * Described in Bach (p. 46).
501  */
502 void
503 brelse(bp)
504 	struct buf *bp;
505 {
506 	struct bqueues *bufq;
507 	int s;
508 
509 	KASSERT(ISSET(bp->b_flags, B_BUSY));
510 
511 	/* Wake up any processes waiting for any buffer to become free. */
512 	if (needbuffer) {
513 		needbuffer = 0;
514 		wakeup(&needbuffer);
515 	}
516 
517 	/* Block disk interrupts. */
518 	s = splbio();
519 
520 	/* Wake up any proceeses waiting for _this_ buffer to become free. */
521 	if (ISSET(bp->b_flags, B_WANTED)) {
522 		CLR(bp->b_flags, B_WANTED|B_AGE);
523 		wakeup(bp);
524 	}
525 
526 	/*
527 	 * Determine which queue the buffer should be on, then put it there.
528 	 */
529 
530 	/* If it's locked, don't report an error; try again later. */
531 	if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
532 		CLR(bp->b_flags, B_ERROR);
533 
534 	/* If it's not cacheable, or an error, mark it invalid. */
535 	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
536 		SET(bp->b_flags, B_INVAL);
537 
538 	if (ISSET(bp->b_flags, B_VFLUSH)) {
539 		/*
540 		 * This is a delayed write buffer that was just flushed to
541 		 * disk.  It is still on the LRU queue.  If it's become
542 		 * invalid, then we need to move it to a different queue;
543 		 * otherwise leave it in its current position.
544 		 */
545 		CLR(bp->b_flags, B_VFLUSH);
546 		if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE))
547 			goto already_queued;
548 		else
549 			bremfree(bp);
550 	}
551 
552 	if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
553 		/*
554 		 * If it's invalid or empty, dissociate it from its vnode
555 		 * and put on the head of the appropriate queue.
556 		 */
557 		if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
558 			(*bioops.io_deallocate)(bp);
559 		CLR(bp->b_flags, B_DONE|B_DELWRI);
560 		if (bp->b_vp) {
561 			reassignbuf(bp, bp->b_vp);
562 			brelvp(bp);
563 		}
564 		if (bp->b_bufsize <= 0)
565 			/* no data */
566 			bufq = &bufqueues[BQ_EMPTY];
567 		else
568 			/* invalid data */
569 			bufq = &bufqueues[BQ_AGE];
570 		binsheadfree(bp, bufq);
571 	} else {
572 		/*
573 		 * It has valid data.  Put it on the end of the appropriate
574 		 * queue, so that it'll stick around for as long as possible.
575 		 * If buf is AGE, but has dependencies, must put it on last
576 		 * bufqueue to be scanned, ie LRU. This protects against the
577 		 * livelock where BQ_AGE only has buffers with dependencies,
578 		 * and we thus never get to the dependent buffers in BQ_LRU.
579 		 */
580 		if (ISSET(bp->b_flags, B_LOCKED))
581 			/* locked in core */
582 			bufq = &bufqueues[BQ_LOCKED];
583 		else if (!ISSET(bp->b_flags, B_AGE))
584 			/* valid data */
585 			bufq = &bufqueues[BQ_LRU];
586 		else {
587 			/* stale but valid data */
588 			int has_deps;
589 
590 			if (LIST_FIRST(&bp->b_dep) != NULL &&
591 			    bioops.io_countdeps)
592 				has_deps = (*bioops.io_countdeps)(bp, 0);
593 			else
594 				has_deps = 0;
595 			bufq = has_deps ? &bufqueues[BQ_LRU] :
596 			    &bufqueues[BQ_AGE];
597 		}
598 		binstailfree(bp, bufq);
599 	}
600 
601 already_queued:
602 	/* Unlock the buffer. */
603 	CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE|B_ORDERED);
604 	SET(bp->b_flags, B_CACHE);
605 
606 	/* Allow disk interrupts. */
607 	splx(s);
608 }
609 
610 /*
611  * Determine if a block is in the cache.
612  * Just look on what would be its hash chain.  If it's there, return
613  * a pointer to it, unless it's marked invalid.  If it's marked invalid,
614  * we normally don't return the buffer, unless the caller explicitly
615  * wants us to.
616  */
617 struct buf *
618 incore(vp, blkno)
619 	struct vnode *vp;
620 	daddr_t blkno;
621 {
622 	struct buf *bp;
623 
624 	bp = BUFHASH(vp, blkno)->lh_first;
625 
626 	/* Search hash chain */
627 	for (; bp != NULL; bp = bp->b_hash.le_next) {
628 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
629 		    !ISSET(bp->b_flags, B_INVAL))
630 		return (bp);
631 	}
632 
633 	return (NULL);
634 }
635 
636 /*
637  * Get a block of requested size that is associated with
638  * a given vnode and block offset. If it is found in the
639  * block cache, mark it as having been found, make it busy
640  * and return it. Otherwise, return an empty block of the
641  * correct size. It is up to the caller to insure that the
642  * cached blocks be of the correct size.
643  */
644 struct buf *
645 getblk(vp, blkno, size, slpflag, slptimeo)
646 	struct vnode *vp;
647 	daddr_t blkno;
648 	int size, slpflag, slptimeo;
649 {
650 	struct buf *bp;
651 	int s, err;
652 
653 start:
654 	bp = incore(vp, blkno);
655 	if (bp != NULL) {
656 		s = splbio();
657 		if (ISSET(bp->b_flags, B_BUSY)) {
658 			if (curproc == uvm.pagedaemon_proc) {
659 				splx(s);
660 				return NULL;
661 			}
662 			SET(bp->b_flags, B_WANTED);
663 			err = tsleep(bp, slpflag | (PRIBIO + 1), "getblk",
664 				     slptimeo);
665 			splx(s);
666 			if (err)
667 				return (NULL);
668 			goto start;
669 		}
670 #ifdef DIAGNOSTIC
671 		if (ISSET(bp->b_flags, B_DONE|B_DELWRI) && bp->b_bcount < size)
672 			panic("getblk: block size invariant failed");
673 #endif
674 		SET(bp->b_flags, B_BUSY);
675 		bremfree(bp);
676 		splx(s);
677 	} else {
678 		if ((bp = getnewbuf(slpflag, slptimeo)) == NULL)
679 			goto start;
680 
681 		binshash(bp, BUFHASH(vp, blkno));
682 		bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
683 		s = splbio();
684 		bgetvp(vp, bp);
685 		splx(s);
686 	}
687 	allocbuf(bp, size);
688 	return (bp);
689 }
690 
691 /*
692  * Get an empty, disassociated buffer of given size.
693  */
694 struct buf *
695 geteblk(size)
696 	int size;
697 {
698 	struct buf *bp;
699 
700 	while ((bp = getnewbuf(0, 0)) == 0)
701 		;
702 	SET(bp->b_flags, B_INVAL);
703 	binshash(bp, &invalhash);
704 	allocbuf(bp, size);
705 	return (bp);
706 }
707 
708 /*
709  * Expand or contract the actual memory allocated to a buffer.
710  *
711  * If the buffer shrinks, data is lost, so it's up to the
712  * caller to have written it out *first*; this routine will not
713  * start a write.  If the buffer grows, it's the callers
714  * responsibility to fill out the buffer's additional contents.
715  */
716 void
717 allocbuf(bp, size)
718 	struct buf *bp;
719 	int size;
720 {
721 	struct buf *nbp;
722 	vsize_t desired_size;
723 	int s;
724 
725 	desired_size = round_page((vsize_t)size);
726 	if (desired_size > MAXBSIZE)
727 		panic("allocbuf: buffer larger than MAXBSIZE requested");
728 
729 	if (bp->b_bufsize == desired_size)
730 		goto out;
731 
732 	/*
733 	 * If the buffer is smaller than the desired size, we need to snarf
734 	 * it from other buffers.  Get buffers (via getnewbuf()), and
735 	 * steal their pages.
736 	 */
737 	while (bp->b_bufsize < desired_size) {
738 		int amt;
739 
740 		/* find a buffer */
741 		while ((nbp = getnewbuf(0, 0)) == NULL)
742 			;
743 
744 		SET(nbp->b_flags, B_INVAL);
745 		binshash(nbp, &invalhash);
746 
747 		/* and steal its pages, up to the amount we need */
748 		amt = min(nbp->b_bufsize, (desired_size - bp->b_bufsize));
749 		pagemove((nbp->b_data + nbp->b_bufsize - amt),
750 			 bp->b_data + bp->b_bufsize, amt);
751 		bp->b_bufsize += amt;
752 		nbp->b_bufsize -= amt;
753 
754 		/* reduce transfer count if we stole some data */
755 		if (nbp->b_bcount > nbp->b_bufsize)
756 			nbp->b_bcount = nbp->b_bufsize;
757 
758 #ifdef DIAGNOSTIC
759 		if (nbp->b_bufsize < 0)
760 			panic("allocbuf: negative bufsize");
761 #endif
762 
763 		brelse(nbp);
764 	}
765 
766 	/*
767 	 * If we want a buffer smaller than the current size,
768 	 * shrink this buffer.  Grab a buf head from the EMPTY queue,
769 	 * move a page onto it, and put it on front of the AGE queue.
770 	 * If there are no free buffer headers, leave the buffer alone.
771 	 */
772 	if (bp->b_bufsize > desired_size) {
773 		s = splbio();
774 		if ((nbp = bufqueues[BQ_EMPTY].tqh_first) == NULL) {
775 			/* No free buffer head */
776 			splx(s);
777 			goto out;
778 		}
779 		bremfree(nbp);
780 		SET(nbp->b_flags, B_BUSY);
781 		splx(s);
782 
783 		/* move the page to it and note this change */
784 		pagemove(bp->b_data + desired_size,
785 		    nbp->b_data, bp->b_bufsize - desired_size);
786 		nbp->b_bufsize = bp->b_bufsize - desired_size;
787 		bp->b_bufsize = desired_size;
788 		nbp->b_bcount = 0;
789 		SET(nbp->b_flags, B_INVAL);
790 
791 		/* release the newly-filled buffer and leave */
792 		brelse(nbp);
793 	}
794 
795 out:
796 	bp->b_bcount = size;
797 }
798 
799 /*
800  * Find a buffer which is available for use.
801  * Select something from a free list.
802  * Preference is to AGE list, then LRU list.
803  */
804 struct buf *
805 getnewbuf(slpflag, slptimeo)
806 	int slpflag, slptimeo;
807 {
808 	struct buf *bp;
809 	int s;
810 
811 start:
812 	s = splbio();
813 	if ((bp = bufqueues[BQ_AGE].tqh_first) != NULL ||
814 	    (bp = bufqueues[BQ_LRU].tqh_first) != NULL) {
815 		bremfree(bp);
816 	} else {
817 		/* wait for a free buffer of any kind */
818 		needbuffer = 1;
819 		tsleep(&needbuffer, slpflag|(PRIBIO+1), "getnewbuf", slptimeo);
820 		splx(s);
821 		return (NULL);
822 	}
823 
824 	if (ISSET(bp->b_flags, B_VFLUSH)) {
825 		/*
826 		 * This is a delayed write buffer being flushed to disk.  Make
827 		 * sure it gets aged out of the queue when it's finished, and
828 		 * leave it off the LRU queue.
829 		 */
830 		CLR(bp->b_flags, B_VFLUSH);
831 		SET(bp->b_flags, B_AGE);
832 		splx(s);
833 		goto start;
834 	}
835 
836 	/* Buffer is no longer on free lists. */
837 	SET(bp->b_flags, B_BUSY);
838 
839 	/*
840 	 * If buffer was a delayed write, start it and return NULL
841 	 * (since we might sleep while starting the write).
842 	 */
843 	if (ISSET(bp->b_flags, B_DELWRI)) {
844 		splx(s);
845 		/*
846 		 * This buffer has gone through the LRU, so make sure it gets
847 		 * reused ASAP.
848 		 */
849 		SET(bp->b_flags, B_AGE);
850 		bawrite(bp);
851 		return (NULL);
852 	}
853 
854 	/* disassociate us from our vnode, if we had one... */
855 	if (bp->b_vp)
856 		brelvp(bp);
857 	splx(s);
858 
859 	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
860 		(*bioops.io_deallocate)(bp);
861 
862 	/* clear out various other fields */
863 	bp->b_flags = B_BUSY;
864 	bp->b_dev = NODEV;
865 	bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
866 	bp->b_iodone = 0;
867 	bp->b_error = 0;
868 	bp->b_resid = 0;
869 	bp->b_bcount = 0;
870 
871 	bremhash(bp);
872 	return (bp);
873 }
874 
875 /*
876  * Wait for operations on the buffer to complete.
877  * When they do, extract and return the I/O's error value.
878  */
879 int
880 biowait(bp)
881 	struct buf *bp;
882 {
883 	int s;
884 
885 	s = splbio();
886 	while (!ISSET(bp->b_flags, B_DONE))
887 		tsleep(bp, PRIBIO + 1, "biowait", 0);
888 	splx(s);
889 
890 	/* check for interruption of I/O (e.g. via NFS), then errors. */
891 	if (ISSET(bp->b_flags, B_EINTR)) {
892 		CLR(bp->b_flags, B_EINTR);
893 		return (EINTR);
894 	} else if (ISSET(bp->b_flags, B_ERROR))
895 		return (bp->b_error ? bp->b_error : EIO);
896 	else
897 		return (0);
898 }
899 
900 /*
901  * Mark I/O complete on a buffer.
902  *
903  * If a callback has been requested, e.g. the pageout
904  * daemon, do so. Otherwise, awaken waiting processes.
905  *
906  * [ Leffler, et al., says on p.247:
907  *	"This routine wakes up the blocked process, frees the buffer
908  *	for an asynchronous write, or, for a request by the pagedaemon
909  *	process, invokes a procedure specified in the buffer structure" ]
910  *
911  * In real life, the pagedaemon (or other system processes) wants
912  * to do async stuff to, and doesn't want the buffer brelse()'d.
913  * (for swap pager, that puts swap buffers on the free lists (!!!),
914  * for the vn device, that puts malloc'd buffers on the free lists!)
915  */
916 void
917 biodone(bp)
918 	struct buf *bp;
919 {
920 	int s = splbio();
921 
922 	if (ISSET(bp->b_flags, B_DONE))
923 		panic("biodone already");
924 	SET(bp->b_flags, B_DONE);		/* note that it's done */
925 
926 	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
927 		(*bioops.io_complete)(bp);
928 
929 	if (!ISSET(bp->b_flags, B_READ))	/* wake up reader */
930 		vwakeup(bp);
931 
932 	if (ISSET(bp->b_flags, B_CALL)) {	/* if necessary, call out */
933 		CLR(bp->b_flags, B_CALL);	/* but note callout done */
934 		(*bp->b_iodone)(bp);
935 	} else {
936 		if (ISSET(bp->b_flags, B_ASYNC))	/* if async, release */
937 			brelse(bp);
938 		else {				/* or just wakeup the buffer */
939 			CLR(bp->b_flags, B_WANTED);
940 			wakeup(bp);
941 		}
942 	}
943 
944 	splx(s);
945 }
946 
947 /*
948  * Return a count of buffers on the "locked" queue.
949  */
950 int
951 count_lock_queue()
952 {
953 	struct buf *bp;
954 	int n = 0;
955 
956 	for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
957 	    bp = bp->b_freelist.tqe_next)
958 		n++;
959 	return (n);
960 }
961 
962 #ifdef DEBUG
963 /*
964  * Print out statistics on the current allocation of the buffer pool.
965  * Can be enabled to print out on every ``sync'' by setting "syncprt"
966  * in vfs_syscalls.c using sysctl.
967  */
968 void
969 vfs_bufstats()
970 {
971 	int s, i, j, count;
972 	struct buf *bp;
973 	struct bqueues *dp;
974 	int counts[(MAXBSIZE / PAGE_SIZE) + 1];
975 	static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
976 
977 	for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
978 		count = 0;
979 		for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
980 			counts[j] = 0;
981 		s = splbio();
982 		for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
983 			counts[bp->b_bufsize/PAGE_SIZE]++;
984 			count++;
985 		}
986 		splx(s);
987 		printf("%s: total-%d", bname[i], count);
988 		for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
989 			if (counts[j] != 0)
990 				printf(", %d-%d", j * PAGE_SIZE, counts[j]);
991 		printf("\n");
992 	}
993 }
994 #endif /* DEBUG */
995