xref: /csrg-svn/sys/kern/vfs_bio.c (revision 65858)
1 /*-
2  * Copyright (c) 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * This code is derived from software contributed to Berkeley by
11  * Berkeley Software Design Inc.
12  *
13  * %sccs.include.redist.c%
14  *
15  *	@(#)vfs_bio.c	8.8 (Berkeley) 01/24/94
16  */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/proc.h>
21 #include <sys/buf.h>
22 #include <sys/vnode.h>
23 #include <sys/mount.h>
24 #include <sys/trace.h>
25 #include <sys/malloc.h>
26 #include <sys/resourcevar.h>
27 
28 /*
29  * Definitions for the buffer hash lists.
30  */
31 #define	BUFHASH(dvp, lbn)	\
32 	(&bufhashtbl[((int)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
33 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
34 u_long	bufhash;
35 
36 /*
37  * Insq/Remq for the buffer hash lists.
38  */
39 #define	binshash(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_hash)
40 #define	bremhash(bp)		LIST_REMOVE(bp, b_hash)
41 
42 /*
43  * Definitions for the buffer free lists.
44  */
45 #define	BQUEUES		4		/* number of free buffer queues */
46 
47 #define	BQ_LOCKED	0		/* super-blocks &c */
48 #define	BQ_LRU		1		/* lru, useful buffers */
49 #define	BQ_AGE		2		/* rubbish */
50 #define	BQ_EMPTY	3		/* buffer headers with no memory */
51 
52 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
53 int needbuffer;
54 
55 /*
56  * Insq/Remq for the buffer free lists.
57  */
58 #define	binsheadfree(bp, dp)	TAILQ_INSERT_HEAD(dp, bp, b_freelist)
59 #define	binstailfree(bp, dp)	TAILQ_INSERT_TAIL(dp, bp, b_freelist)
60 
61 void
62 bremfree(bp)
63 	struct buf *bp;
64 {
65 	struct bqueues *dp = NULL;
66 
67 	/*
68 	 * We only calculate the head of the freelist when removing
69 	 * the last element of the list as that is the only time that
70 	 * it is needed (e.g. to reset the tail pointer).
71 	 *
72 	 * NB: This makes an assumption about how tailq's are implemented.
73 	 */
74 	if (bp->b_freelist.tqe_next == NULL) {
75 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
76 			if (dp->tqh_last == &bp->b_freelist.tqe_next)
77 				break;
78 		if (dp == &bufqueues[BQUEUES])
79 			panic("bremfree: lost tail");
80 	}
81 	TAILQ_REMOVE(dp, bp, b_freelist);
82 }
83 
84 /*
85  * Initialize buffers and hash links for buffers.
86  */
87 void
88 bufinit()
89 {
90 	register struct buf *bp;
91 	struct bqueues *dp;
92 	register int i;
93 	int base, residual;
94 
95 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
96 		TAILQ_INIT(dp);
97 	bufhashtbl = hashinit(nbuf, M_CACHE, &bufhash);
98 	base = bufpages / nbuf;
99 	residual = bufpages % nbuf;
100 	for (i = 0; i < nbuf; i++) {
101 		bp = &buf[i];
102 		bzero((char *)bp, sizeof *bp);
103 		bp->b_dev = NODEV;
104 		bp->b_rcred = NOCRED;
105 		bp->b_wcred = NOCRED;
106 		bp->b_vnbufs.le_next = NOLIST;
107 		bp->b_data = buffers + i * MAXBSIZE;
108 		if (i < residual)
109 			bp->b_bufsize = (base + 1) * CLBYTES;
110 		else
111 			bp->b_bufsize = base * CLBYTES;
112 		bp->b_flags = B_INVAL;
113 		dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
114 		binsheadfree(bp, dp);
115 		binshash(bp, &invalhash);
116 	}
117 }
118 
119 /*
120  * Find the block in the buffer pool.
121  * If the buffer is not present, allocate a new buffer and load
122  * its contents according to the filesystem fill routine.
123  */
124 bread(vp, blkno, size, cred, bpp)
125 	struct vnode *vp;
126 	daddr_t blkno;
127 	int size;
128 	struct ucred *cred;
129 	struct buf **bpp;
130 {
131 	struct proc *p = curproc;		/* XXX */
132 	register struct buf *bp;
133 
134 	if (size == 0)
135 		panic("bread: size 0");
136 	*bpp = bp = getblk(vp, blkno, size, 0, 0);
137 	if (bp->b_flags & (B_DONE | B_DELWRI)) {
138 		trace(TR_BREADHIT, pack(vp, size), blkno);
139 		return (0);
140 	}
141 	bp->b_flags |= B_READ;
142 	if (bp->b_bcount > bp->b_bufsize)
143 		panic("bread");
144 	if (bp->b_rcred == NOCRED && cred != NOCRED) {
145 		crhold(cred);
146 		bp->b_rcred = cred;
147 	}
148 	VOP_STRATEGY(bp);
149 	trace(TR_BREADMISS, pack(vp, size), blkno);
150 	p->p_stats->p_ru.ru_inblock++;		/* pay for read */
151 	return (biowait(bp));
152 }
153 
154 /*
155  * Operates like bread, but also starts I/O on the N specified
156  * read-ahead blocks.
157  */
158 breadn(vp, blkno, size, rablkno, rabsize, num, cred, bpp)
159 	struct vnode *vp;
160 	daddr_t blkno; int size;
161 	daddr_t rablkno[]; int rabsize[];
162 	int num;
163 	struct ucred *cred;
164 	struct buf **bpp;
165 {
166 	struct proc *p = curproc;		/* XXX */
167 	register struct buf *bp, *rabp;
168 	register int i;
169 
170 	bp = NULL;
171 	/*
172 	 * If the block is not memory resident,
173 	 * allocate a buffer and start I/O.
174 	 */
175 	if (!incore(vp, blkno)) {
176 		*bpp = bp = getblk(vp, blkno, size, 0, 0);
177 		if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
178 			bp->b_flags |= B_READ;
179 			if (bp->b_bcount > bp->b_bufsize)
180 				panic("breadn");
181 			if (bp->b_rcred == NOCRED && cred != NOCRED) {
182 				crhold(cred);
183 				bp->b_rcred = cred;
184 			}
185 			VOP_STRATEGY(bp);
186 			trace(TR_BREADMISS, pack(vp, size), blkno);
187 			p->p_stats->p_ru.ru_inblock++;	/* pay for read */
188 		} else {
189 			trace(TR_BREADHIT, pack(vp, size), blkno);
190 		}
191 	}
192 
193 	/*
194 	 * If there's read-ahead block(s), start I/O
195 	 * on them also (as above).
196 	 */
197 	for (i = 0; i < num; i++) {
198 		if (incore(vp, rablkno[i]))
199 			continue;
200 		rabp = getblk(vp, rablkno[i], rabsize[i], 0, 0);
201 		if (rabp->b_flags & (B_DONE | B_DELWRI)) {
202 			brelse(rabp);
203 			trace(TR_BREADHITRA, pack(vp, rabsize[i]), rablkno[i]);
204 		} else {
205 			rabp->b_flags |= B_ASYNC | B_READ;
206 			if (rabp->b_bcount > rabp->b_bufsize)
207 				panic("breadrabp");
208 			if (rabp->b_rcred == NOCRED && cred != NOCRED) {
209 				crhold(cred);
210 				rabp->b_rcred = cred;
211 			}
212 			VOP_STRATEGY(rabp);
213 			trace(TR_BREADMISSRA, pack(vp, rabsize[i]), rablkno[i]);
214 			p->p_stats->p_ru.ru_inblock++;	/* pay in advance */
215 		}
216 	}
217 
218 	/*
219 	 * If block was memory resident, let bread get it.
220 	 * If block was not memory resident, the read was
221 	 * started above, so just wait for the read to complete.
222 	 */
223 	if (bp == NULL)
224 		return (bread(vp, blkno, size, cred, bpp));
225 	return (biowait(bp));
226 }
227 
228 /*
229  * Synchronous write.
230  * Release buffer on completion.
231  */
232 bwrite(bp)
233 	register struct buf *bp;
234 {
235 	struct proc *p = curproc;		/* XXX */
236 	register int flag;
237 	int s, error = 0;
238 
239 	if ((bp->b_flags & B_ASYNC) == 0 &&
240 	    bp->b_vp && (bp->b_vp->v_mount->mnt_flag & MNT_ASYNC)) {
241 		bdwrite(bp);
242 		return (0);
243 	}
244 	flag = bp->b_flags;
245 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
246 	if (flag & B_ASYNC) {
247 		if ((flag & B_DELWRI) == 0)
248 			p->p_stats->p_ru.ru_oublock++;	/* no one paid yet */
249 		else
250 			reassignbuf(bp, bp->b_vp);
251 	}
252 	trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno);
253 	if (bp->b_bcount > bp->b_bufsize)
254 		panic("bwrite");
255 	s = splbio();
256 	bp->b_vp->v_numoutput++;
257 	bp->b_flags |= B_WRITEINPROG;
258 	splx(s);
259 	VOP_STRATEGY(bp);
260 
261 	/*
262 	 * If the write was synchronous, then await I/O completion.
263 	 * If the write was "delayed", then we put the buffer on
264 	 * the queue of blocks awaiting I/O completion status.
265 	 */
266 	if ((flag & B_ASYNC) == 0) {
267 		error = biowait(bp);
268 		if ((flag&B_DELWRI) == 0)
269 			p->p_stats->p_ru.ru_oublock++;	/* no one paid yet */
270 		else
271 			reassignbuf(bp, bp->b_vp);
272 		if (bp->b_flags & B_EINTR) {
273 			bp->b_flags &= ~B_EINTR;
274 			error = EINTR;
275 		}
276 		brelse(bp);
277 	} else if (flag & B_DELWRI) {
278 		s = splbio();
279 		bp->b_flags |= B_AGE;
280 		splx(s);
281 	}
282 	return (error);
283 }
284 
285 int
286 vn_bwrite(ap)
287 	struct vop_bwrite_args *ap;
288 {
289 
290 	return (bwrite(ap->a_bp));
291 }
292 
293 
294 /*
295  * Delayed write.
296  *
297  * The buffer is marked dirty, but is not queued for I/O.
298  * This routine should be used when the buffer is expected
299  * to be modified again soon, typically a small write that
300  * partially fills a buffer.
301  *
302  * NB: magnetic tapes cannot be delayed; they must be
303  * written in the order that the writes are requested.
304  */
305 bdwrite(bp)
306 	register struct buf *bp;
307 {
308 	struct proc *p = curproc;		/* XXX */
309 
310 	if ((bp->b_flags & B_DELWRI) == 0) {
311 		bp->b_flags |= B_DELWRI;
312 		reassignbuf(bp, bp->b_vp);
313 		p->p_stats->p_ru.ru_oublock++;		/* no one paid yet */
314 	}
315 	/*
316 	 * If this is a tape drive, the write must be initiated.
317 	 */
318 	if (VOP_IOCTL(bp->b_vp, 0, (caddr_t)B_TAPE, 0, NOCRED, p) == 0) {
319 		bawrite(bp);
320 	} else {
321 		bp->b_flags |= (B_DONE | B_DELWRI);
322 		brelse(bp);
323 	}
324 }
325 
326 /*
327  * Asynchronous write.
328  * Start I/O on a buffer, but do not wait for it to complete.
329  * The buffer is released when the I/O completes.
330  */
331 bawrite(bp)
332 	register struct buf *bp;
333 {
334 
335 	/*
336 	 * Setting the ASYNC flag causes bwrite to return
337 	 * after starting the I/O.
338 	 */
339 	bp->b_flags |= B_ASYNC;
340 	(void) VOP_BWRITE(bp);
341 }
342 
343 /*
344  * Release a buffer.
345  * Even if the buffer is dirty, no I/O is started.
346  */
347 brelse(bp)
348 	register struct buf *bp;
349 {
350 	register struct bqueues *flist;
351 	int s;
352 
353 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
354 	/*
355 	 * If a process is waiting for the buffer, or
356 	 * is waiting for a free buffer, awaken it.
357 	 */
358 	if (bp->b_flags & B_WANTED)
359 		wakeup((caddr_t)bp);
360 	if (needbuffer) {
361 		needbuffer = 0;
362 		wakeup((caddr_t)&needbuffer);
363 	}
364 	/*
365 	 * Retry I/O for locked buffers rather than invalidating them.
366 	 */
367 	s = splbio();
368 	if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
369 		bp->b_flags &= ~B_ERROR;
370 	/*
371 	 * Disassociate buffers that are no longer valid.
372 	 */
373 	if (bp->b_flags & (B_NOCACHE | B_ERROR))
374 		bp->b_flags |= B_INVAL;
375 	if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR | B_INVAL))) {
376 		if (bp->b_vp)
377 			brelvp(bp);
378 		bp->b_flags &= ~B_DELWRI;
379 	}
380 	/*
381 	 * Stick the buffer back on a free list.
382 	 */
383 	if (bp->b_bufsize <= 0) {
384 		/* block has no buffer ... put at front of unused buffer list */
385 		flist = &bufqueues[BQ_EMPTY];
386 		binsheadfree(bp, flist);
387 	} else if (bp->b_flags & (B_ERROR | B_INVAL)) {
388 		/* block has no info ... put at front of most free list */
389 		flist = &bufqueues[BQ_AGE];
390 		binsheadfree(bp, flist);
391 	} else {
392 		if (bp->b_flags & B_LOCKED)
393 			flist = &bufqueues[BQ_LOCKED];
394 		else if (bp->b_flags & B_AGE)
395 			flist = &bufqueues[BQ_AGE];
396 		else
397 			flist = &bufqueues[BQ_LRU];
398 		binstailfree(bp, flist);
399 	}
400 	bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_AGE | B_NOCACHE);
401 	splx(s);
402 }
403 
404 /*
405  * Check to see if a block is currently memory resident.
406  */
407 struct buf *
408 incore(vp, blkno)
409 	struct vnode *vp;
410 	daddr_t blkno;
411 {
412 	register struct buf *bp;
413 
414 	for (bp = BUFHASH(vp, blkno)->lh_first; bp; bp = bp->b_hash.le_next)
415 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
416 		    (bp->b_flags & B_INVAL) == 0)
417 			return (bp);
418 	return (NULL);
419 }
420 
421 /*
422  * Check to see if a block is currently memory resident.
423  * If it is resident, return it. If it is not resident,
424  * allocate a new buffer and assign it to the block.
425  */
426 struct buf *
427 getblk(vp, blkno, size, slpflag, slptimeo)
428 	register struct vnode *vp;
429 	daddr_t blkno;
430 	int size, slpflag, slptimeo;
431 {
432 	register struct buf *bp;
433 	struct bufhashhdr *dp;
434 	int s, error;
435 
436 	if (size > MAXBSIZE)
437 		panic("getblk: size too big");
438 	/*
439 	 * Search the cache for the block. If the buffer is found,
440 	 * but it is currently locked, the we must wait for it to
441 	 * become available.
442 	 */
443 	dp = BUFHASH(vp, blkno);
444 loop:
445 	for (bp = dp->lh_first; bp; bp = bp->b_hash.le_next) {
446 		if (bp->b_lblkno != blkno || bp->b_vp != vp)
447 			continue;
448 		s = splbio();
449 		if (bp->b_flags & B_BUSY) {
450 			bp->b_flags |= B_WANTED;
451 			error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1),
452 				"getblk", slptimeo);
453 			splx(s);
454 			if (error)
455 				return (NULL);
456 			goto loop;
457 		}
458 		/*
459 		 * The test for B_INVAL is moved down here, since there
460 		 * are cases where B_INVAL is set before VOP_BWRITE() is
461 		 * called and for NFS, the process cannot be allowed to
462 		 * allocate a new buffer for the same block until the write
463 		 * back to the server has been completed. (ie. B_BUSY clears)
464 		 */
465 		if (bp->b_flags & B_INVAL) {
466 			splx(s);
467 			continue;
468 		}
469 		bremfree(bp);
470 		bp->b_flags |= B_BUSY;
471 		splx(s);
472 		if (bp->b_bcount != size) {
473 			printf("getblk: stray size");
474 			bp->b_flags |= B_INVAL;
475 			VOP_BWRITE(bp);
476 			goto loop;
477 		}
478 		bp->b_flags |= B_CACHE;
479 		return (bp);
480 	}
481 	/*
482 	 * The loop back to the top when getnewbuf() fails is because
483 	 * stateless filesystems like NFS have no node locks. Thus,
484 	 * there is a slight chance that more than one process will
485 	 * try and getnewbuf() for the same block concurrently when
486 	 * the first sleeps in getnewbuf(). So after a sleep, go back
487 	 * up to the top to check the hash lists again.
488 	 */
489 	if ((bp = getnewbuf(slpflag, slptimeo)) == 0)
490 		goto loop;
491 	bremhash(bp);
492 	bgetvp(vp, bp);
493 	bp->b_bcount = 0;
494 	bp->b_lblkno = blkno;
495 	bp->b_blkno = blkno;
496 	bp->b_error = 0;
497 	bp->b_resid = 0;
498 	binshash(bp, dp);
499 	allocbuf(bp, size);
500 	return (bp);
501 }
502 
503 /*
504  * Allocate a buffer.
505  * The caller will assign it to a block.
506  */
507 struct buf *
508 geteblk(size)
509 	int size;
510 {
511 	register struct buf *bp;
512 
513 	if (size > MAXBSIZE)
514 		panic("geteblk: size too big");
515 	while ((bp = getnewbuf(0, 0)) == NULL)
516 		/* void */;
517 	bp->b_flags |= B_INVAL;
518 	bremhash(bp);
519 	binshash(bp, &invalhash);
520 	bp->b_bcount = 0;
521 	bp->b_error = 0;
522 	bp->b_resid = 0;
523 	allocbuf(bp, size);
524 	return (bp);
525 }
526 
527 /*
528  * Expand or contract the actual memory allocated to a buffer.
529  * If no memory is available, release buffer and take error exit.
530  */
531 allocbuf(tp, size)
532 	register struct buf *tp;
533 	int size;
534 {
535 	register struct buf *bp, *ep;
536 	int sizealloc, take, s;
537 
538 	sizealloc = roundup(size, CLBYTES);
539 	/*
540 	 * Buffer size does not change
541 	 */
542 	if (sizealloc == tp->b_bufsize)
543 		goto out;
544 	/*
545 	 * Buffer size is shrinking.
546 	 * Place excess space in a buffer header taken from the
547 	 * BQ_EMPTY buffer list and placed on the "most free" list.
548 	 * If no extra buffer headers are available, leave the
549 	 * extra space in the present buffer.
550 	 */
551 	if (sizealloc < tp->b_bufsize) {
552 		if ((ep = bufqueues[BQ_EMPTY].tqh_first) == NULL)
553 			goto out;
554 		s = splbio();
555 		bremfree(ep);
556 		ep->b_flags |= B_BUSY;
557 		splx(s);
558 		pagemove((char *)tp->b_data + sizealloc, ep->b_data,
559 		    (int)tp->b_bufsize - sizealloc);
560 		ep->b_bufsize = tp->b_bufsize - sizealloc;
561 		tp->b_bufsize = sizealloc;
562 		ep->b_flags |= B_INVAL;
563 		ep->b_bcount = 0;
564 		brelse(ep);
565 		goto out;
566 	}
567 	/*
568 	 * More buffer space is needed. Get it out of buffers on
569 	 * the "most free" list, placing the empty headers on the
570 	 * BQ_EMPTY buffer header list.
571 	 */
572 	while (tp->b_bufsize < sizealloc) {
573 		take = sizealloc - tp->b_bufsize;
574 		while ((bp = getnewbuf(0, 0)) == NULL)
575 			/* void */;
576 		if (take >= bp->b_bufsize)
577 			take = bp->b_bufsize;
578 		pagemove(&((char *)bp->b_data)[bp->b_bufsize - take],
579 		    &((char *)tp->b_data)[tp->b_bufsize], take);
580 		tp->b_bufsize += take;
581 		bp->b_bufsize = bp->b_bufsize - take;
582 		if (bp->b_bcount > bp->b_bufsize)
583 			bp->b_bcount = bp->b_bufsize;
584 		if (bp->b_bufsize <= 0) {
585 			bremhash(bp);
586 			binshash(bp, &invalhash);
587 			bp->b_dev = NODEV;
588 			bp->b_error = 0;
589 			bp->b_flags |= B_INVAL;
590 		}
591 		brelse(bp);
592 	}
593 out:
594 	tp->b_bcount = size;
595 	return (1);
596 }
597 
598 /*
599  * Find a buffer which is available for use.
600  * Select something from a free list.
601  * Preference is to AGE list, then LRU list.
602  */
603 struct buf *
604 getnewbuf(slpflag, slptimeo)
605 	int slpflag, slptimeo;
606 {
607 	register struct buf *bp;
608 	register struct bqueues *dp;
609 	register struct ucred *cred;
610 	int s;
611 
612 loop:
613 	s = splbio();
614 	for (dp = &bufqueues[BQ_AGE]; dp > bufqueues; dp--)
615 		if (dp->tqh_first)
616 			break;
617 	if (dp == bufqueues) {		/* no free blocks */
618 		needbuffer = 1;
619 		(void) tsleep((caddr_t)&needbuffer, slpflag | (PRIBIO + 1),
620 			"getnewbuf", slptimeo);
621 		splx(s);
622 		return (NULL);
623 	}
624 	bp = dp->tqh_first;
625 	bremfree(bp);
626 	bp->b_flags |= B_BUSY;
627 	splx(s);
628 	if (bp->b_flags & B_DELWRI) {
629 		(void) bawrite(bp);
630 		goto loop;
631 	}
632 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
633 	if (bp->b_vp)
634 		brelvp(bp);
635 	if (bp->b_rcred != NOCRED) {
636 		cred = bp->b_rcred;
637 		bp->b_rcred = NOCRED;
638 		crfree(cred);
639 	}
640 	if (bp->b_wcred != NOCRED) {
641 		cred = bp->b_wcred;
642 		bp->b_wcred = NOCRED;
643 		crfree(cred);
644 	}
645 	bp->b_flags = B_BUSY;
646 	bp->b_dirtyoff = bp->b_dirtyend = 0;
647 	bp->b_validoff = bp->b_validend = 0;
648 	return (bp);
649 }
650 
651 /*
652  * Wait for I/O to complete.
653  *
654  * Extract and return any errors associated with the I/O.
655  * If the error flag is set, but no specific error is
656  * given, return EIO.
657  */
658 biowait(bp)
659 	register struct buf *bp;
660 {
661 	int s;
662 
663 	s = splbio();
664 	while ((bp->b_flags & B_DONE) == 0)
665 		sleep((caddr_t)bp, PRIBIO);
666 	splx(s);
667 	if ((bp->b_flags & B_ERROR) == 0)
668 		return (0);
669 	if (bp->b_error)
670 		return (bp->b_error);
671 	return (EIO);
672 }
673 
674 /*
675  * Mark I/O complete on a buffer.
676  *
677  * If a callback has been requested, e.g. the pageout
678  * daemon, do so. Otherwise, awaken waiting processes.
679  */
680 void
681 biodone(bp)
682 	register struct buf *bp;
683 {
684 
685 	if (bp->b_flags & B_DONE)
686 		panic("dup biodone");
687 	bp->b_flags |= B_DONE;
688 	if ((bp->b_flags & B_READ) == 0)
689 		vwakeup(bp);
690 	if (bp->b_flags & B_CALL) {
691 		bp->b_flags &= ~B_CALL;
692 		(*bp->b_iodone)(bp);
693 		return;
694 	}
695 	if (bp->b_flags & B_ASYNC)
696 		brelse(bp);
697 	else {
698 		bp->b_flags &= ~B_WANTED;
699 		wakeup((caddr_t)bp);
700 	}
701 }
702 
703 int
704 count_lock_queue()
705 {
706 	register struct buf *bp;
707 	register int ret;
708 
709 	for (ret = 0, bp = (struct buf *)bufqueues[BQ_LOCKED].tqh_first;
710 	    bp; bp = (struct buf *)bp->b_freelist.tqe_next)
711 		++ret;
712 	return(ret);
713 }
714 
715 #ifdef DIAGNOSTIC
716 /*
717  * Print out statistics on the current allocation of the buffer pool.
718  * Can be enabled to print out on every ``sync'' by setting "syncprt"
719  * in vfs_syscalls.c using sysctl.
720  */
721 void
722 vfs_bufstats()
723 {
724 	int s, i, j, count;
725 	register struct buf *bp;
726 	register struct bqueues *dp;
727 	int counts[MAXBSIZE/CLBYTES+1];
728 	static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
729 
730 	for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
731 		count = 0;
732 		for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
733 			counts[j] = 0;
734 		s = splbio();
735 		for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
736 			counts[bp->b_bufsize/CLBYTES]++;
737 			count++;
738 		}
739 		splx(s);
740 		printf("%s: total-%d", bname[i], count);
741 		for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
742 			if (counts[j] != 0)
743 				printf(", %d-%d", j * CLBYTES, counts[j]);
744 		printf("\n");
745 	}
746 }
747 #endif /* DIAGNOSTIC */
748