xref: /netbsd-src/sys/kern/vfs_bio.c (revision 76dfffe33547c37f8bdd446e3e4ab0f3c16cea4b)
1 /*	$NetBSD: vfs_bio.c,v 1.49 1996/10/15 23:06:27 cgd Exp $	*/
2 
3 /*-
4  * Copyright (c) 1994 Christopher G. Demetriou
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by the University of
24  *	California, Berkeley and its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
42  */
43 
44 /*
45  * Some references:
46  *	Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
47  *	Leffler, et al.: The Design and Implementation of the 4.3BSD
48  *		UNIX Operating System (Addison Welley, 1989)
49  */
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/proc.h>
54 #include <sys/buf.h>
55 #include <sys/vnode.h>
56 #include <sys/mount.h>
57 #include <sys/trace.h>
58 #include <sys/malloc.h>
59 #include <sys/resourcevar.h>
60 #include <sys/conf.h>
61 
62 #include <vm/vm.h>
63 
64 /* Macros to clear/set/test flags. */
65 #define	SET(t, f)	(t) |= (f)
66 #define	CLR(t, f)	(t) &= ~(f)
67 #define	ISSET(t, f)	((t) & (f))
68 
69 /*
70  * Definitions for the buffer hash lists.
71  */
72 #define	BUFHASH(dvp, lbn)	\
73 	(&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
74 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
75 u_long	bufhash;
76 
77 /*
78  * Insq/Remq for the buffer hash lists.
79  */
80 #define	binshash(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_hash)
81 #define	bremhash(bp)		LIST_REMOVE(bp, b_hash)
82 
83 /*
84  * Definitions for the buffer free lists.
85  */
86 #define	BQUEUES		4		/* number of free buffer queues */
87 
88 #define	BQ_LOCKED	0		/* super-blocks &c */
89 #define	BQ_LRU		1		/* lru, useful buffers */
90 #define	BQ_AGE		2		/* rubbish */
91 #define	BQ_EMPTY	3		/* buffer headers with no memory */
92 
93 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
94 int needbuffer;
95 
96 /*
97  * Insq/Remq for the buffer free lists.
98  */
99 #define	binsheadfree(bp, dp)	TAILQ_INSERT_HEAD(dp, bp, b_freelist)
100 #define	binstailfree(bp, dp)	TAILQ_INSERT_TAIL(dp, bp, b_freelist)
101 
102 static __inline struct buf *bio_doread __P((struct vnode *, daddr_t, int,
103 					    struct ucred *, int));
104 int count_lock_queue __P((void));
105 
106 void
107 bremfree(bp)
108 	struct buf *bp;
109 {
110 	struct bqueues *dp = NULL;
111 
112 	/*
113 	 * We only calculate the head of the freelist when removing
114 	 * the last element of the list as that is the only time that
115 	 * it is needed (e.g. to reset the tail pointer).
116 	 *
117 	 * NB: This makes an assumption about how tailq's are implemented.
118 	 */
119 	if (bp->b_freelist.tqe_next == NULL) {
120 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
121 			if (dp->tqh_last == &bp->b_freelist.tqe_next)
122 				break;
123 		if (dp == &bufqueues[BQUEUES])
124 			panic("bremfree: lost tail");
125 	}
126 	TAILQ_REMOVE(dp, bp, b_freelist);
127 }
128 
129 /*
130  * Initialize buffers and hash links for buffers.
131  */
132 void
133 bufinit()
134 {
135 	register struct buf *bp;
136 	struct bqueues *dp;
137 	register int i;
138 	int base, residual;
139 
140 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
141 		TAILQ_INIT(dp);
142 	bufhashtbl = hashinit(nbuf, M_CACHE, &bufhash);
143 	base = bufpages / nbuf;
144 	residual = bufpages % nbuf;
145 	for (i = 0; i < nbuf; i++) {
146 		bp = &buf[i];
147 		bzero((char *)bp, sizeof *bp);
148 		bp->b_dev = NODEV;
149 		bp->b_rcred = NOCRED;
150 		bp->b_wcred = NOCRED;
151 		bp->b_vnbufs.le_next = NOLIST;
152 		bp->b_data = buffers + i * MAXBSIZE;
153 		if (i < residual)
154 			bp->b_bufsize = (base + 1) * CLBYTES;
155 		else
156 			bp->b_bufsize = base * CLBYTES;
157 		bp->b_flags = B_INVAL;
158 		dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
159 		binsheadfree(bp, dp);
160 		binshash(bp, &invalhash);
161 	}
162 }
163 
164 static __inline struct buf *
165 bio_doread(vp, blkno, size, cred, async)
166 	struct vnode *vp;
167 	daddr_t blkno;
168 	int size;
169 	struct ucred *cred;
170 	int async;
171 {
172 	register struct buf *bp;
173 	struct proc *p = (curproc != NULL ? curproc : &proc0);	/* XXX */
174 
175 	bp = getblk(vp, blkno, size, 0, 0);
176 
177 	/*
178 	 * If buffer does not have data valid, start a read.
179 	 * Note that if buffer is B_INVAL, getblk() won't return it.
180 	 * Therefore, it's valid if it's I/O has completed or been delayed.
181 	 */
182 	if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
183 		/* Start I/O for the buffer (keeping credentials). */
184 		SET(bp->b_flags, B_READ | async);
185 		if (cred != NOCRED && bp->b_rcred == NOCRED) {
186 			crhold(cred);
187 			bp->b_rcred = cred;
188 		}
189 		VOP_STRATEGY(bp);
190 
191 		/* Pay for the read. */
192 		p->p_stats->p_ru.ru_inblock++;
193 	} else if (async) {
194 		brelse(bp);
195 	}
196 
197 	return (bp);
198 }
199 
200 /*
201  * Read a disk block.
202  * This algorithm described in Bach (p.54).
203  */
204 int
205 bread(vp, blkno, size, cred, bpp)
206 	struct vnode *vp;
207 	daddr_t blkno;
208 	int size;
209 	struct ucred *cred;
210 	struct buf **bpp;
211 {
212 	register struct buf *bp;
213 
214 	/* Get buffer for block. */
215 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
216 
217 	/* Wait for the read to complete, and return result. */
218 	return (biowait(bp));
219 }
220 
221 /*
222  * Read-ahead multiple disk blocks. The first is sync, the rest async.
223  * Trivial modification to the breada algorithm presented in Bach (p.55).
224  */
225 int
226 breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp)
227 	struct vnode *vp;
228 	daddr_t blkno; int size;
229 	daddr_t rablks[]; int rasizes[];
230 	int nrablks;
231 	struct ucred *cred;
232 	struct buf **bpp;
233 {
234 	register struct buf *bp;
235 	int i;
236 
237 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
238 
239 	/*
240 	 * For each of the read-ahead blocks, start a read, if necessary.
241 	 */
242 	for (i = 0; i < nrablks; i++) {
243 		/* If it's in the cache, just go on to next one. */
244 		if (incore(vp, rablks[i]))
245 			continue;
246 
247 		/* Get a buffer for the read-ahead block */
248 		(void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
249 	}
250 
251 	/* Otherwise, we had to start a read for it; wait until it's valid. */
252 	return (biowait(bp));
253 }
254 
255 /*
256  * Read with single-block read-ahead.  Defined in Bach (p.55), but
257  * implemented as a call to breadn().
258  * XXX for compatibility with old file systems.
259  */
260 int
261 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
262 	struct vnode *vp;
263 	daddr_t blkno; int size;
264 	daddr_t rablkno; int rabsize;
265 	struct ucred *cred;
266 	struct buf **bpp;
267 {
268 
269 	return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
270 }
271 
272 /*
273  * Block write.  Described in Bach (p.56)
274  */
275 int
276 bwrite(bp)
277 	struct buf *bp;
278 {
279 	int rv, sync, wasdelayed, s;
280 	struct proc *p = (curproc != NULL ? curproc : &proc0);	/* XXX */
281 
282 	/*
283 	 * Remember buffer type, to switch on it later.  If the write was
284 	 * synchronous, but the file system was mounted with MNT_ASYNC,
285 	 * convert it to a delayed write.
286 	 * XXX note that this relies on delayed tape writes being converted
287 	 * to async, not sync writes (which is safe, but ugly).
288 	 */
289 	sync = !ISSET(bp->b_flags, B_ASYNC);
290 	if (sync && bp->b_vp && bp->b_vp->v_mount &&
291 	    ISSET(bp->b_vp->v_mount->mnt_flag, MNT_ASYNC)) {
292 		bdwrite(bp);
293 		return (0);
294 	}
295 
296 	wasdelayed = ISSET(bp->b_flags, B_DELWRI);
297 	CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
298 
299 	s = splbio();
300 
301 	/*
302 	 * Pay for the I/O operation and make sure the buf is on the correct
303 	 * vnode queue.
304 	 */
305 	if (wasdelayed)
306 		reassignbuf(bp, bp->b_vp);
307 	else
308 		p->p_stats->p_ru.ru_oublock++;
309 
310 	/* Initiate disk write.  Make sure the appropriate party is charged. */
311 	bp->b_vp->v_numoutput++;
312 	splx(s);
313 
314 	SET(bp->b_flags, B_WRITEINPROG);
315 	VOP_STRATEGY(bp);
316 
317 	if (sync) {
318 		/* If I/O was synchronous, wait for it to complete. */
319 		rv = biowait(bp);
320 
321 		/* Release the buffer. */
322 		brelse(bp);
323 
324 		return (rv);
325 	} else {
326 		return (0);
327 	}
328 }
329 
330 int
331 vn_bwrite(v)
332 	void *v;
333 {
334 	struct vop_bwrite_args *ap = v;
335 
336 	return (bwrite(ap->a_bp));
337 }
338 
339 /*
340  * Delayed write.
341  *
342  * The buffer is marked dirty, but is not queued for I/O.
343  * This routine should be used when the buffer is expected
344  * to be modified again soon, typically a small write that
345  * partially fills a buffer.
346  *
347  * NB: magnetic tapes cannot be delayed; they must be
348  * written in the order that the writes are requested.
349  *
350  * Described in Leffler, et al. (pp. 208-213).
351  */
352 void
353 bdwrite(bp)
354 	struct buf *bp;
355 {
356 	int s;
357 	struct proc *p = (curproc != NULL ? curproc : &proc0);	/* XXX */
358 
359 	/* If this is a tape block, write the block now. */
360 	if (bdevsw[major(bp->b_dev)].d_type == D_TAPE) {
361 		bawrite(bp);
362 		return;
363 	}
364 
365 	/*
366 	 * If the block hasn't been seen before:
367 	 *	(1) Mark it as having been seen,
368 	 *	(2) Charge for the write,
369 	 *	(3) Make sure it's on its vnode's correct block list.
370 	 */
371 	if (!ISSET(bp->b_flags, B_DELWRI)) {
372 		SET(bp->b_flags, B_DELWRI);
373 		p->p_stats->p_ru.ru_oublock++;
374 		s = splbio();
375 		reassignbuf(bp, bp->b_vp);
376 		splx(s);
377 	}
378 
379 	/* Otherwise, the "write" is done, so mark and release the buffer. */
380 	CLR(bp->b_flags, B_NEEDCOMMIT);
381 	SET(bp->b_flags, B_DONE);
382 	brelse(bp);
383 }
384 
385 /*
386  * Asynchronous block write; just an asynchronous bwrite().
387  */
388 void
389 bawrite(bp)
390 	struct buf *bp;
391 {
392 
393 	SET(bp->b_flags, B_ASYNC);
394 	VOP_BWRITE(bp);
395 }
396 
397 /*
398  * Release a buffer on to the free lists.
399  * Described in Bach (p. 46).
400  */
401 void
402 brelse(bp)
403 	struct buf *bp;
404 {
405 	struct bqueues *bufq;
406 	int s;
407 
408 	/* Wake up any processes waiting for any buffer to become free. */
409 	if (needbuffer) {
410 		needbuffer = 0;
411 		wakeup(&needbuffer);
412 	}
413 
414 	/* Wake up any proceeses waiting for _this_ buffer to become free. */
415 	if (ISSET(bp->b_flags, B_WANTED)) {
416 		CLR(bp->b_flags, B_WANTED);
417 		wakeup(bp);
418 	}
419 
420 	/* Block disk interrupts. */
421 	s = splbio();
422 
423 	/*
424 	 * Determine which queue the buffer should be on, then put it there.
425 	 */
426 
427 	/* If it's locked, don't report an error; try again later. */
428 	if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
429 		CLR(bp->b_flags, B_ERROR);
430 
431 	/* If it's not cacheable, or an error, mark it invalid. */
432 	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
433 		SET(bp->b_flags, B_INVAL);
434 
435 	if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
436 		/*
437 		 * If it's invalid or empty, dissociate it from its vnode
438 		 * and put on the head of the appropriate queue.
439 		 */
440 		if (bp->b_vp)
441 			brelvp(bp);
442 		CLR(bp->b_flags, B_DELWRI);
443 		if (bp->b_bufsize <= 0)
444 			/* no data */
445 			bufq = &bufqueues[BQ_EMPTY];
446 		else
447 			/* invalid data */
448 			bufq = &bufqueues[BQ_AGE];
449 		binsheadfree(bp, bufq);
450 	} else {
451 		/*
452 		 * It has valid data.  Put it on the end of the appropriate
453 		 * queue, so that it'll stick around for as long as possible.
454 		 */
455 		if (ISSET(bp->b_flags, B_LOCKED))
456 			/* locked in core */
457 			bufq = &bufqueues[BQ_LOCKED];
458 		else if (ISSET(bp->b_flags, B_AGE))
459 			/* stale but valid data */
460 			bufq = &bufqueues[BQ_AGE];
461 		else
462 			/* valid data */
463 			bufq = &bufqueues[BQ_LRU];
464 		binstailfree(bp, bufq);
465 	}
466 
467 	/* Unlock the buffer. */
468 	CLR(bp->b_flags, (B_AGE | B_ASYNC | B_BUSY | B_NOCACHE));
469 
470 	/* Allow disk interrupts. */
471 	splx(s);
472 }
473 
474 /*
475  * Determine if a block is in the cache.
476  * Just look on what would be its hash chain.  If it's there, return
477  * a pointer to it, unless it's marked invalid.  If it's marked invalid,
478  * we normally don't return the buffer, unless the caller explicitly
479  * wants us to.
480  */
481 struct buf *
482 incore(vp, blkno)
483 	struct vnode *vp;
484 	daddr_t blkno;
485 {
486 	struct buf *bp;
487 
488 	bp = BUFHASH(vp, blkno)->lh_first;
489 
490 	/* Search hash chain */
491 	for (; bp != NULL; bp = bp->b_hash.le_next) {
492 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
493 		    !ISSET(bp->b_flags, B_INVAL))
494 		return (bp);
495 	}
496 
497 	return (0);
498 }
499 
500 /*
501  * Get a block of requested size that is associated with
502  * a given vnode and block offset. If it is found in the
503  * block cache, mark it as having been found, make it busy
504  * and return it. Otherwise, return an empty block of the
505  * correct size. It is up to the caller to insure that the
506  * cached blocks be of the correct size.
507  */
508 struct buf *
509 getblk(vp, blkno, size, slpflag, slptimeo)
510 	register struct vnode *vp;
511 	daddr_t blkno;
512 	int size, slpflag, slptimeo;
513 {
514 	struct bufhashhdr *bh;
515 	struct buf *bp;
516 	int s, err;
517 
518 	/*
519 	 * XXX
520 	 * The following is an inlined version of 'incore()', but with
521 	 * the 'invalid' test moved to after the 'busy' test.  It's
522 	 * necessary because there are some cases in which the NFS
523 	 * code sets B_INVAL prior to writing data to the server, but
524 	 * in which the buffers actually contain valid data.  In this
525 	 * case, we can't allow the system to allocate a new buffer for
526 	 * the block until the write is finished.
527 	 */
528 	bh = BUFHASH(vp, blkno);
529 start:
530         bp = bh->lh_first;
531         for (; bp != NULL; bp = bp->b_hash.le_next) {
532                 if (bp->b_lblkno != blkno || bp->b_vp != vp)
533 			continue;
534 
535 		s = splbio();
536 		if (ISSET(bp->b_flags, B_BUSY)) {
537 			SET(bp->b_flags, B_WANTED);
538 			err = tsleep(bp, slpflag | (PRIBIO + 1), "getblk",
539 			    slptimeo);
540 			splx(s);
541 			if (err)
542 				return (NULL);
543 			goto start;
544 		}
545 
546 		if (!ISSET(bp->b_flags, B_INVAL)) {
547 			SET(bp->b_flags, (B_BUSY | B_CACHE));
548 			bremfree(bp);
549 			splx(s);
550 			break;
551 		}
552 		splx(s);
553         }
554 
555 	if (bp == NULL) {
556 		if ((bp = getnewbuf(slpflag, slptimeo)) == NULL)
557 			goto start;
558 		binshash(bp, bh);
559 		bp->b_blkno = bp->b_lblkno = blkno;
560 		s = splbio();
561 		bgetvp(vp, bp);
562 		splx(s);
563 	}
564 	allocbuf(bp, size);
565 	return (bp);
566 }
567 
568 /*
569  * Get an empty, disassociated buffer of given size.
570  */
571 struct buf *
572 geteblk(size)
573 	int size;
574 {
575 	struct buf *bp;
576 
577 	while ((bp = getnewbuf(0, 0)) == 0)
578 		;
579 	SET(bp->b_flags, B_INVAL);
580 	binshash(bp, &invalhash);
581 	allocbuf(bp, size);
582 
583 	return (bp);
584 }
585 
586 /*
587  * Expand or contract the actual memory allocated to a buffer.
588  *
589  * If the buffer shrinks, data is lost, so it's up to the
590  * caller to have written it out *first*; this routine will not
591  * start a write.  If the buffer grows, it's the callers
592  * responsibility to fill out the buffer's additional contents.
593  */
594 void
595 allocbuf(bp, size)
596 	struct buf *bp;
597 	int size;
598 {
599 	struct buf      *nbp;
600 	vm_size_t       desired_size;
601 	int	     s;
602 
603 	desired_size = roundup(size, CLBYTES);
604 	if (desired_size > MAXBSIZE)
605 		panic("allocbuf: buffer larger than MAXBSIZE requested");
606 
607 	if (bp->b_bufsize == desired_size)
608 		goto out;
609 
610 	/*
611 	 * If the buffer is smaller than the desired size, we need to snarf
612 	 * it from other buffers.  Get buffers (via getnewbuf()), and
613 	 * steal their pages.
614 	 */
615 	while (bp->b_bufsize < desired_size) {
616 		int amt;
617 
618 		/* find a buffer */
619 		while ((nbp = getnewbuf(0, 0)) == NULL)
620 			;
621 		SET(nbp->b_flags, B_INVAL);
622 		binshash(nbp, &invalhash);
623 
624 		/* and steal its pages, up to the amount we need */
625 		amt = min(nbp->b_bufsize, (desired_size - bp->b_bufsize));
626 		pagemove((nbp->b_data + nbp->b_bufsize - amt),
627 			 bp->b_data + bp->b_bufsize, amt);
628 		bp->b_bufsize += amt;
629 		nbp->b_bufsize -= amt;
630 
631 		/* reduce transfer count if we stole some data */
632 		if (nbp->b_bcount > nbp->b_bufsize)
633 			nbp->b_bcount = nbp->b_bufsize;
634 
635 #ifdef DIAGNOSTIC
636 		if (nbp->b_bufsize < 0)
637 			panic("allocbuf: negative bufsize");
638 #endif
639 
640 		brelse(nbp);
641 	}
642 
643 	/*
644 	 * If we want a buffer smaller than the current size,
645 	 * shrink this buffer.  Grab a buf head from the EMPTY queue,
646 	 * move a page onto it, and put it on front of the AGE queue.
647 	 * If there are no free buffer headers, leave the buffer alone.
648 	 */
649 	if (bp->b_bufsize > desired_size) {
650 		s = splbio();
651 		if ((nbp = bufqueues[BQ_EMPTY].tqh_first) == NULL) {
652 			/* No free buffer head */
653 			splx(s);
654 			goto out;
655 		}
656 		bremfree(nbp);
657 		SET(nbp->b_flags, B_BUSY);
658 		splx(s);
659 
660 		/* move the page to it and note this change */
661 		pagemove(bp->b_data + desired_size,
662 		    nbp->b_data, bp->b_bufsize - desired_size);
663 		nbp->b_bufsize = bp->b_bufsize - desired_size;
664 		bp->b_bufsize = desired_size;
665 		nbp->b_bcount = 0;
666 		SET(nbp->b_flags, B_INVAL);
667 
668 		/* release the newly-filled buffer and leave */
669 		brelse(nbp);
670 	}
671 
672 out:
673 	bp->b_bcount = size;
674 }
675 
676 /*
677  * Find a buffer which is available for use.
678  * Select something from a free list.
679  * Preference is to AGE list, then LRU list.
680  */
681 struct buf *
682 getnewbuf(slpflag, slptimeo)
683 	int slpflag, slptimeo;
684 {
685 	register struct buf *bp;
686 	int s;
687 
688 start:
689 	s = splbio();
690 	if ((bp = bufqueues[BQ_AGE].tqh_first) != NULL ||
691 	    (bp = bufqueues[BQ_LRU].tqh_first) != NULL) {
692 		bremfree(bp);
693 	} else {
694 		/* wait for a free buffer of any kind */
695 		needbuffer = 1;
696 		tsleep(&needbuffer, slpflag|(PRIBIO+1), "getnewbuf", slptimeo);
697 		splx(s);
698 		return (0);
699 	}
700 
701 	/* Buffer is no longer on free lists. */
702 	SET(bp->b_flags, B_BUSY);
703 
704 	/* If buffer was a delayed write, start it, and go back to the top. */
705 	if (ISSET(bp->b_flags, B_DELWRI)) {
706 		splx(s);
707 		bawrite (bp);
708 		goto start;
709 	}
710 
711 	/* disassociate us from our vnode, if we had one... */
712 	if (bp->b_vp)
713 		brelvp(bp);
714 	splx(s);
715 
716 	/* clear out various other fields */
717 	bp->b_flags = B_BUSY;
718 	bp->b_dev = NODEV;
719 	bp->b_blkno = bp->b_lblkno = 0;
720 	bp->b_iodone = 0;
721 	bp->b_error = 0;
722 	bp->b_resid = 0;
723 	bp->b_bcount = 0;
724 	bp->b_dirtyoff = bp->b_dirtyend = 0;
725 	bp->b_validoff = bp->b_validend = 0;
726 
727 	/* nuke any credentials we were holding */
728 	if (bp->b_rcred != NOCRED) {
729 		crfree(bp->b_rcred);
730 		bp->b_rcred = NOCRED;
731 	}
732 	if (bp->b_wcred != NOCRED) {
733 		crfree(bp->b_wcred);
734 		bp->b_wcred = NOCRED;
735 	}
736 
737 	bremhash(bp);
738 	return (bp);
739 }
740 
741 /*
742  * Wait for operations on the buffer to complete.
743  * When they do, extract and return the I/O's error value.
744  */
745 int
746 biowait(bp)
747 	struct buf *bp;
748 {
749 	int s;
750 
751 	s = splbio();
752 	while (!ISSET(bp->b_flags, B_DONE))
753 		tsleep(bp, PRIBIO + 1, "biowait", 0);
754 	splx(s);
755 
756 	/* check for interruption of I/O (e.g. via NFS), then errors. */
757 	if (ISSET(bp->b_flags, B_EINTR)) {
758 		CLR(bp->b_flags, B_EINTR);
759 		return (EINTR);
760 	} else if (ISSET(bp->b_flags, B_ERROR))
761 		return (bp->b_error ? bp->b_error : EIO);
762 	else
763 		return (0);
764 }
765 
766 /*
767  * Mark I/O complete on a buffer.
768  *
769  * If a callback has been requested, e.g. the pageout
770  * daemon, do so. Otherwise, awaken waiting processes.
771  *
772  * [ Leffler, et al., says on p.247:
773  *	"This routine wakes up the blocked process, frees the buffer
774  *	for an asynchronous write, or, for a request by the pagedaemon
775  *	process, invokes a procedure specified in the buffer structure" ]
776  *
777  * In real life, the pagedaemon (or other system processes) wants
778  * to do async stuff to, and doesn't want the buffer brelse()'d.
779  * (for swap pager, that puts swap buffers on the free lists (!!!),
780  * for the vn device, that puts malloc'd buffers on the free lists!)
781  */
782 void
783 biodone(bp)
784 	struct buf *bp;
785 {
786 	if (ISSET(bp->b_flags, B_DONE))
787 		panic("biodone already");
788 	SET(bp->b_flags, B_DONE);		/* note that it's done */
789 
790 	if (!ISSET(bp->b_flags, B_READ))	/* wake up reader */
791 		vwakeup(bp);
792 
793 	if (ISSET(bp->b_flags, B_CALL)) {	/* if necessary, call out */
794 		CLR(bp->b_flags, B_CALL);	/* but note callout done */
795 		(*bp->b_iodone)(bp);
796 	} else if (ISSET(bp->b_flags, B_ASYNC))	/* if async, release it */
797 		brelse(bp);
798 	else {					/* or just wakeup the buffer */
799 		CLR(bp->b_flags, B_WANTED);
800 		wakeup(bp);
801 	}
802 }
803 
804 /*
805  * Return a count of buffers on the "locked" queue.
806  */
807 int
808 count_lock_queue()
809 {
810 	register struct buf *bp;
811 	register int n = 0;
812 
813 	for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
814 	    bp = bp->b_freelist.tqe_next)
815 		n++;
816 	return (n);
817 }
818 
819 #ifdef DEBUG
820 /*
821  * Print out statistics on the current allocation of the buffer pool.
822  * Can be enabled to print out on every ``sync'' by setting "syncprt"
823  * in vfs_syscalls.c using sysctl.
824  */
825 void
826 vfs_bufstats()
827 {
828 	int s, i, j, count;
829 	register struct buf *bp;
830 	register struct bqueues *dp;
831 	int counts[MAXBSIZE/CLBYTES+1];
832 	static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
833 
834 	for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
835 		count = 0;
836 		for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
837 			counts[j] = 0;
838 		s = splbio();
839 		for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
840 			counts[bp->b_bufsize/CLBYTES]++;
841 			count++;
842 		}
843 		splx(s);
844 		printf("%s: total-%d", bname[i], count);
845 		for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
846 			if (counts[j] != 0)
847 				printf(", %d-%d", j * CLBYTES, counts[j]);
848 		printf("\n");
849 	}
850 }
851 #endif /* DEBUG */
852