xref: /netbsd-src/sys/kern/vfs_bio.c (revision d20841bb642898112fe68f0ad3f7b26dddf56f07)
1 /*	$NetBSD: vfs_bio.c,v 1.115 2004/02/11 17:36:31 tls Exp $	*/
2 
3 /*-
4  * Copyright (c) 1982, 1986, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * (c) UNIX System Laboratories, Inc.
7  * All or some portions of this file are derived from material licensed
8  * to the University of California by American Telephone and Telegraph
9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10  * the permission of UNIX System Laboratories, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
37  */
38 
39 /*-
40  * Copyright (c) 1994 Christopher G. Demetriou
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by the University of
53  *	California, Berkeley and its contributors.
54  * 4. Neither the name of the University nor the names of its contributors
55  *    may be used to endorse or promote products derived from this software
56  *    without specific prior written permission.
57  *
58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68  * SUCH DAMAGE.
69  *
70  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
71  */
72 
73 /*
74  * Some references:
75  *	Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
76  *	Leffler, et al.: The Design and Implementation of the 4.3BSD
77  *		UNIX Operating System (Addison Welley, 1989)
78  */
79 
80 #include "opt_bufcache.h"
81 #include "opt_softdep.h"
82 
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.115 2004/02/11 17:36:31 tls Exp $");
85 
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/proc.h>
90 #include <sys/buf.h>
91 #include <sys/vnode.h>
92 #include <sys/mount.h>
93 #include <sys/malloc.h>
94 #include <sys/resourcevar.h>
95 #include <sys/sysctl.h>
96 #include <sys/conf.h>
97 
98 #include <uvm/uvm.h>
99 
100 #include <miscfs/specfs/specdev.h>
101 
102 #ifndef	BUFPAGES
103 # define BUFPAGES 0
104 #endif
105 
106 #ifdef BUFCACHE
107 # if (BUFCACHE < 5) || (BUFCACHE > 95)
108 #  error BUFCACHE is not between 5 and 95
109 # endif
110 #else
111 # define BUFCACHE 15
112 #endif
113 
114 u_int	nbuf;			/* XXX - for softdep_lockedbufs */
115 u_int	bufpages = BUFPAGES;	/* optional hardwired count */
116 u_int	bufcache = BUFCACHE;	/* max % of RAM to use for buffer cache */
117 
118 
119 /* Macros to clear/set/test flags. */
120 #define	SET(t, f)	(t) |= (f)
121 #define	CLR(t, f)	(t) &= ~(f)
122 #define	ISSET(t, f)	((t) & (f))
123 
124 /*
125  * Definitions for the buffer hash lists.
126  */
127 #define	BUFHASH(dvp, lbn)	\
128 	(&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
129 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
130 u_long	bufhash;
131 #ifndef SOFTDEP
132 struct bio_ops bioops;	/* I/O operation notification */
133 #endif
134 
135 /*
136  * Insq/Remq for the buffer hash lists.
137  */
138 #define	binshash(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_hash)
139 #define	bremhash(bp)		LIST_REMOVE(bp, b_hash)
140 
141 /*
142  * Definitions for the buffer free lists.
143  */
144 #define	BQUEUES		3		/* number of free buffer queues */
145 
146 #define	BQ_LOCKED	0		/* super-blocks &c */
147 #define	BQ_LRU		1		/* lru, useful buffers */
148 #define	BQ_AGE		2		/* rubbish */
149 
150 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
151 int needbuffer;
152 
153 /*
154  * Buffer queue lock.
155  * Take this lock first if also taking some buffer's b_interlock.
156  */
157 struct simplelock bqueue_slock = SIMPLELOCK_INITIALIZER;
158 
159 /*
160  * Buffer pool for I/O buffers.
161  */
162 struct pool bufpool;
163 
164 /* XXX - somewhat gross.. */
165 #if MAXBSIZE == 0x2000
166 #define NMEMPOOLS 4
167 #elif MAXBSIZE == 0x4000
168 #define NMEMPOOLS 5
169 #elif MAXBSIZE == 0x8000
170 #define NMEMPOOLS 6
171 #else
172 #define NMEMPOOLS 7
173 #endif
174 
175 #define MEMPOOL_INDEX_OFFSET 10		/* smallest pool is 1k */
176 #if (1 << (NMEMPOOLS + MEMPOOL_INDEX_OFFSET - 1)) != MAXBSIZE
177 #error update vfs_bio buffer memory parameters
178 #endif
179 
180 /* Buffer memory pools */
181 static struct pool bmempools[NMEMPOOLS];
182 
183 struct vm_map *buf_map;
184 
185 /*
186  * Buffer memory pool allocator.
187  */
188 static void *
189 bufpool_page_alloc(struct pool *pp, int flags)
190 {
191 
192 	return (void *)uvm_km_kmemalloc1(buf_map,
193 	    uvm.kernel_object, MAXBSIZE, MAXBSIZE, UVM_UNKNOWN_OFFSET,
194 	    (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK);
195 }
196 
197 static void
198 bufpool_page_free(struct pool *pp, void *v)
199 {
200 	uvm_km_free(buf_map, (vaddr_t)v, MAXBSIZE);
201 }
202 
203 static struct pool_allocator bufmempool_allocator = {
204 	bufpool_page_alloc, bufpool_page_free, MAXBSIZE,
205 };
206 
207 /* Buffer memory management variables */
208 u_long bufmem_valimit;
209 u_long bufmem_hiwater;
210 u_long bufmem_lowater;
211 u_long bufmem;
212 
213 /*
214  * MD code can call this to set a hard limit on the amount
215  * of virtual memory used by the buffer cache.
216  */
217 int
218 buf_setvalimit(vsize_t sz)
219 {
220 
221 	/* We need to accommodate at least NMEMPOOLS of MAXBSIZE each */
222 	if (sz < NMEMPOOLS * MAXBSIZE)
223 		return EINVAL;
224 
225 	bufmem_valimit = sz;
226 	return 0;
227 }
228 
229 static int buf_trim(void);
230 
231 /*
232  * bread()/breadn() helper.
233  */
234 static __inline struct buf *bio_doread(struct vnode *, daddr_t, int,
235 					struct ucred *, int);
236 int count_lock_queue(void);
237 
238 /*
239  * Insq/Remq for the buffer free lists.
240  * Call with buffer queue locked.
241  */
242 #define	binsheadfree(bp, dp)	TAILQ_INSERT_HEAD(dp, bp, b_freelist)
243 #define	binstailfree(bp, dp)	TAILQ_INSERT_TAIL(dp, bp, b_freelist)
244 
245 #ifdef DEBUG
246 int debug_verify_freelist = 0;
247 static int checkfreelist(struct buf *bp, struct bqueues *dp)
248 {
249 	struct buf *b;
250 	TAILQ_FOREACH(b, dp, b_freelist) {
251 		if (b == bp)
252 			return 1;
253 	}
254 	return 0;
255 }
256 #endif
257 
258 void
259 bremfree(struct buf *bp)
260 {
261 	struct bqueues *dp = NULL;
262 
263 	LOCK_ASSERT(simple_lock_held(&bqueue_slock));
264 
265 	KDASSERT(!debug_verify_freelist ||
266 		checkfreelist(bp, &bufqueues[BQ_AGE]) ||
267 		checkfreelist(bp, &bufqueues[BQ_LRU]) ||
268 		checkfreelist(bp, &bufqueues[BQ_LOCKED]) );
269 
270 	/*
271 	 * We only calculate the head of the freelist when removing
272 	 * the last element of the list as that is the only time that
273 	 * it is needed (e.g. to reset the tail pointer).
274 	 *
275 	 * NB: This makes an assumption about how tailq's are implemented.
276 	 *
277 	 * We break the TAILQ abstraction in order to efficiently remove a
278 	 * buffer from its freelist without having to know exactly which
279 	 * freelist it is on.
280 	 */
281 	if (TAILQ_NEXT(bp, b_freelist) == NULL) {
282 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
283 			if (dp->tqh_last == &bp->b_freelist.tqe_next)
284 				break;
285 		if (dp == &bufqueues[BQUEUES])
286 			panic("bremfree: lost tail");
287 	}
288 	TAILQ_REMOVE(dp, bp, b_freelist);
289 }
290 
291 u_long
292 buf_memcalc(void)
293 {
294 	u_long n;
295 
296 	/*
297 	 * Determine the upper bound of memory to use for buffers.
298 	 *
299 	 *	- If bufpages is specified, use that as the number
300 	 *	  pages.
301 	 *
302 	 *	- Otherwise, use bufcache as the percentage of
303 	 *	  physical memory.
304 	 */
305 	if (bufpages != 0) {
306 		n = bufpages;
307 	} else {
308 		if (bufcache < 5) {
309 			printf("forcing bufcache %d -> 5", bufcache);
310 			bufcache = 5;
311 		}
312 		if (bufcache > 95) {
313 			printf("forcing bufcache %d -> 95", bufcache);
314 			bufcache = 95;
315 		}
316 		n = physmem / 100 * bufcache;
317 	}
318 
319 	n <<= PAGE_SHIFT;
320 	if (bufmem_valimit != 0 && n > bufmem_valimit)
321 		n = bufmem_valimit;
322 
323 	return (n);
324 }
325 
326 /*
327  * Initialize buffers and hash links for buffers.
328  */
329 void
330 bufinit(void)
331 {
332 	struct bqueues *dp;
333 	int smallmem;
334 	u_int i;
335 
336 	/*
337 	 * Initialize buffer cache memory parameters.
338 	 */
339 	bufmem = 0;
340 	bufmem_hiwater = buf_memcalc();
341 	/* lowater is approx. 2% of memory (with bufcache=15) */
342 	bufmem_lowater = (bufmem_hiwater >> 3);
343 	if (bufmem_lowater < 64 * 1024)
344 		/* Ensure a reasonable minimum value */
345 		bufmem_lowater = 64 * 1024;
346 
347 	if (bufmem_valimit != 0) {
348 		vaddr_t minaddr = 0, maxaddr;
349 		buf_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
350 					  bufmem_valimit, VM_MAP_PAGEABLE,
351 					  FALSE, 0);
352 		if (buf_map == NULL)
353 			panic("bufinit: cannot allocate submap");
354 	} else
355 		buf_map = kernel_map;
356 
357 	/*
358 	 * Initialize the buffer pools.
359 	 */
360 	pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
361 
362 	/* On "small" machines use small pool page sizes where possible */
363 	smallmem = (physmem < atop(16*1024*1024));
364 
365 	for (i = 0; i < NMEMPOOLS; i++) {
366 		struct pool_allocator *pa;
367 		struct pool *pp = &bmempools[i];
368 		u_int size = 1 << (i + MEMPOOL_INDEX_OFFSET);
369 		char *name = malloc(8, M_TEMP, M_WAITOK);
370 		snprintf(name, 8, "buf%dk", 1 << i);
371 		pa = (size <= PAGE_SIZE && smallmem)
372 			? &pool_allocator_nointr
373 			: &bufmempool_allocator;
374 		pool_init(pp, size, 0, 0, PR_IMMEDRELEASE, name, pa);
375 		pool_setlowat(pp, 1);
376 	}
377 
378 	/* Initialize the buffer queues */
379 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
380 		TAILQ_INIT(dp);
381 
382 	/*
383 	 * Estimate hash table size based on the amount of memory we
384 	 * intend to use for the buffer cache. The average buffer
385 	 * size is dependent on our clients (i.e. filesystems).
386 	 *
387 	 * For now, use an empirical 3K per buffer.
388 	 */
389 	nbuf = (bufmem_hiwater / 1024) / 3;
390 	bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash);
391 }
392 
393 static int
394 buf_lotsfree(void)
395 {
396 	int try, thresh;
397 
398 	if (bufmem < bufmem_lowater) {
399 		return 1;
400 	}
401 
402 	/* If there's anything on the AGE list, it should be eaten. */
403 
404 	if(TAILQ_FIRST(&bufqueues[BQ_AGE]) != NULL)
405 		return 0;
406 
407 	try = random() & 0x0000000fL;
408 
409 	thresh = (16 * bufmem) / bufmem_hiwater;
410 
411 	if ((try > thresh) && (uvmexp.free > ( 2 * uvmexp.freetarg))) {
412 		return 1;
413 	}
414 
415 	return 0;
416 }
417 
418 /*
419  * Return estimate of # of buffers we think need to be
420  * released to help resolve low memory conditions.
421  */
422 static int
423 buf_canrelease(void)
424 {
425 	int pagedemand, ninvalid = 0;
426 	struct buf *bp;
427 
428 	TAILQ_FOREACH(bp, &bufqueues[BQ_AGE], b_freelist)
429 		ninvalid += bp->b_bufsize;
430 
431 	if (bufmem < bufmem_lowater)
432 		return 0;
433 
434 	pagedemand = uvmexp.freetarg - uvmexp.free;
435 	if (pagedemand < 0)
436 		return ninvalid;
437 	return MAX(ninvalid, MIN(2 * MAXBSIZE,
438 	    MIN((bufmem - bufmem_lowater) / 16, pagedemand * PAGE_SIZE)));
439 }
440 
441 /*
442  * Buffer memory allocation helper functions
443  */
444 static __inline u_long
445 buf_mempoolidx(u_long size)
446 {
447 	u_int n = 0;
448 
449 	size -= 1;
450 	size >>= MEMPOOL_INDEX_OFFSET;
451 	while (size) {
452 		size >>= 1;
453 		n += 1;
454 	}
455 	if (n >= NMEMPOOLS)
456 		panic("buf mem pool index %d", n);
457 	return n;
458 }
459 
460 static __inline u_long
461 buf_roundsize(u_long size)
462 {
463 	/* Round up to nearest power of 2 */
464 	return (1 << (buf_mempoolidx(size) + MEMPOOL_INDEX_OFFSET));
465 }
466 
467 static __inline caddr_t
468 buf_malloc(size_t size)
469 {
470 	u_int n = buf_mempoolidx(size);
471 	caddr_t addr;
472 	int s;
473 
474 	while (1) {
475 		addr = pool_get(&bmempools[n], PR_NOWAIT);
476 		if (addr != NULL)
477 			break;
478 
479 		/* No memory, see if we can free some. If so, try again */
480 		if (buf_drain(1) > 0)
481 			continue;
482 
483 		/* Wait for buffers to arrive on the LRU queue */
484 		s = splbio();
485 		simple_lock(&bqueue_slock);
486 		needbuffer = 1;
487 		ltsleep(&needbuffer, PNORELOCK | (PRIBIO+1),
488 			"buf_malloc", 0, &bqueue_slock);
489 		splx(s);
490 	}
491 
492 	return addr;
493 }
494 
495 static void
496 buf_mrelease(caddr_t addr, size_t size)
497 {
498 
499 	pool_put(&bmempools[buf_mempoolidx(size)], addr);
500 }
501 
502 
503 static __inline struct buf *
504 bio_doread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
505     int async)
506 {
507 	struct buf *bp;
508 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
509 	struct proc *p = l->l_proc;
510 
511 	bp = getblk(vp, blkno, size, 0, 0);
512 
513 #ifdef DIAGNOSTIC
514 	if (bp == NULL) {
515 		panic("bio_doread: no such buf");
516 	}
517 #endif
518 
519 	/*
520 	 * If buffer does not have data valid, start a read.
521 	 * Note that if buffer is B_INVAL, getblk() won't return it.
522 	 * Therefore, it's valid if its I/O has completed or been delayed.
523 	 */
524 	if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
525 		/* Start I/O for the buffer. */
526 		SET(bp->b_flags, B_READ | async);
527 		if (async)
528 			BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
529 		else
530 			BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
531 		VOP_STRATEGY(vp, bp);
532 
533 		/* Pay for the read. */
534 		p->p_stats->p_ru.ru_inblock++;
535 	} else if (async) {
536 		brelse(bp);
537 	}
538 
539 	return (bp);
540 }
541 
542 /*
543  * Read a disk block.
544  * This algorithm described in Bach (p.54).
545  */
546 int
547 bread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
548     struct buf **bpp)
549 {
550 	struct buf *bp;
551 
552 	/* Get buffer for block. */
553 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
554 
555 	/* Wait for the read to complete, and return result. */
556 	return (biowait(bp));
557 }
558 
559 /*
560  * Read-ahead multiple disk blocks. The first is sync, the rest async.
561  * Trivial modification to the breada algorithm presented in Bach (p.55).
562  */
563 int
564 breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablks,
565     int *rasizes, int nrablks, struct ucred *cred, struct buf **bpp)
566 {
567 	struct buf *bp;
568 	int i;
569 
570 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
571 
572 	/*
573 	 * For each of the read-ahead blocks, start a read, if necessary.
574 	 */
575 	for (i = 0; i < nrablks; i++) {
576 		/* If it's in the cache, just go on to next one. */
577 		if (incore(vp, rablks[i]))
578 			continue;
579 
580 		/* Get a buffer for the read-ahead block */
581 		(void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
582 	}
583 
584 	/* Otherwise, we had to start a read for it; wait until it's valid. */
585 	return (biowait(bp));
586 }
587 
588 /*
589  * Read with single-block read-ahead.  Defined in Bach (p.55), but
590  * implemented as a call to breadn().
591  * XXX for compatibility with old file systems.
592  */
593 int
594 breada(struct vnode *vp, daddr_t blkno, int size, daddr_t rablkno,
595     int rabsize, struct ucred *cred, struct buf **bpp)
596 {
597 
598 	return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
599 }
600 
601 /*
602  * Block write.  Described in Bach (p.56)
603  */
604 int
605 bwrite(struct buf *bp)
606 {
607 	int rv, sync, wasdelayed, s;
608 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
609 	struct proc *p = l->l_proc;
610 	struct vnode *vp;
611 	struct mount *mp;
612 
613 	KASSERT(ISSET(bp->b_flags, B_BUSY));
614 
615 	vp = bp->b_vp;
616 	if (vp != NULL) {
617 		if (vp->v_type == VBLK)
618 			mp = vp->v_specmountpoint;
619 		else
620 			mp = vp->v_mount;
621 	} else {
622 		mp = NULL;
623 	}
624 
625 	/*
626 	 * Remember buffer type, to switch on it later.  If the write was
627 	 * synchronous, but the file system was mounted with MNT_ASYNC,
628 	 * convert it to a delayed write.
629 	 * XXX note that this relies on delayed tape writes being converted
630 	 * to async, not sync writes (which is safe, but ugly).
631 	 */
632 	sync = !ISSET(bp->b_flags, B_ASYNC);
633 	if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
634 		bdwrite(bp);
635 		return (0);
636 	}
637 
638 	/*
639 	 * Collect statistics on synchronous and asynchronous writes.
640 	 * Writes to block devices are charged to their associated
641 	 * filesystem (if any).
642 	 */
643 	if (mp != NULL) {
644 		if (sync)
645 			mp->mnt_stat.f_syncwrites++;
646 		else
647 			mp->mnt_stat.f_asyncwrites++;
648 	}
649 
650 	s = splbio();
651 	simple_lock(&bp->b_interlock);
652 
653 	wasdelayed = ISSET(bp->b_flags, B_DELWRI);
654 
655 	CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
656 
657 	/*
658 	 * Pay for the I/O operation and make sure the buf is on the correct
659 	 * vnode queue.
660 	 */
661 	if (wasdelayed)
662 		reassignbuf(bp, bp->b_vp);
663 	else
664 		p->p_stats->p_ru.ru_oublock++;
665 
666 	/* Initiate disk write.  Make sure the appropriate party is charged. */
667 	V_INCR_NUMOUTPUT(bp->b_vp);
668 	simple_unlock(&bp->b_interlock);
669 	splx(s);
670 
671 	if (sync)
672 		BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
673 	else
674 		BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
675 
676 	VOP_STRATEGY(vp, bp);
677 
678 	if (sync) {
679 		/* If I/O was synchronous, wait for it to complete. */
680 		rv = biowait(bp);
681 
682 		/* Release the buffer. */
683 		brelse(bp);
684 
685 		return (rv);
686 	} else {
687 		return (0);
688 	}
689 }
690 
691 int
692 vn_bwrite(void *v)
693 {
694 	struct vop_bwrite_args *ap = v;
695 
696 	return (bwrite(ap->a_bp));
697 }
698 
699 /*
700  * Delayed write.
701  *
702  * The buffer is marked dirty, but is not queued for I/O.
703  * This routine should be used when the buffer is expected
704  * to be modified again soon, typically a small write that
705  * partially fills a buffer.
706  *
707  * NB: magnetic tapes cannot be delayed; they must be
708  * written in the order that the writes are requested.
709  *
710  * Described in Leffler, et al. (pp. 208-213).
711  */
712 void
713 bdwrite(struct buf *bp)
714 {
715 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
716 	struct proc *p = l->l_proc;
717 	const struct bdevsw *bdev;
718 	int s;
719 
720 	/* If this is a tape block, write the block now. */
721 	bdev = bdevsw_lookup(bp->b_dev);
722 	if (bdev != NULL && bdev->d_type == D_TAPE) {
723 		bawrite(bp);
724 		return;
725 	}
726 
727 	/*
728 	 * If the block hasn't been seen before:
729 	 *	(1) Mark it as having been seen,
730 	 *	(2) Charge for the write,
731 	 *	(3) Make sure it's on its vnode's correct block list.
732 	 */
733 	s = splbio();
734 	simple_lock(&bp->b_interlock);
735 
736 	KASSERT(ISSET(bp->b_flags, B_BUSY));
737 
738 	if (!ISSET(bp->b_flags, B_DELWRI)) {
739 		SET(bp->b_flags, B_DELWRI);
740 		p->p_stats->p_ru.ru_oublock++;
741 		reassignbuf(bp, bp->b_vp);
742 	}
743 
744 	/* Otherwise, the "write" is done, so mark and release the buffer. */
745 	CLR(bp->b_flags, B_DONE);
746 	simple_unlock(&bp->b_interlock);
747 	splx(s);
748 
749 	brelse(bp);
750 }
751 
752 /*
753  * Asynchronous block write; just an asynchronous bwrite().
754  */
755 void
756 bawrite(struct buf *bp)
757 {
758 	int s;
759 
760 	s = splbio();
761 	simple_lock(&bp->b_interlock);
762 
763 	KASSERT(ISSET(bp->b_flags, B_BUSY));
764 
765 	SET(bp->b_flags, B_ASYNC);
766 	simple_unlock(&bp->b_interlock);
767 	splx(s);
768 	VOP_BWRITE(bp);
769 }
770 
771 /*
772  * Same as first half of bdwrite, mark buffer dirty, but do not release it.
773  * Call at splbio() and with the buffer interlock locked.
774  * Note: called only from biodone() through ffs softdep's bioops.io_complete()
775  */
776 void
777 bdirty(struct buf *bp)
778 {
779 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
780 	struct proc *p = l->l_proc;
781 
782 	LOCK_ASSERT(simple_lock_held(&bp->b_interlock));
783 	KASSERT(ISSET(bp->b_flags, B_BUSY));
784 
785 	CLR(bp->b_flags, B_AGE);
786 
787 	if (!ISSET(bp->b_flags, B_DELWRI)) {
788 		SET(bp->b_flags, B_DELWRI);
789 		p->p_stats->p_ru.ru_oublock++;
790 		reassignbuf(bp, bp->b_vp);
791 	}
792 }
793 
794 /*
795  * Release a buffer on to the free lists.
796  * Described in Bach (p. 46).
797  */
798 void
799 brelse(struct buf *bp)
800 {
801 	struct bqueues *bufq;
802 	int s;
803 
804 	/* Block disk interrupts. */
805 	s = splbio();
806 	simple_lock(&bqueue_slock);
807 	simple_lock(&bp->b_interlock);
808 
809 	KASSERT(ISSET(bp->b_flags, B_BUSY));
810 	KASSERT(!ISSET(bp->b_flags, B_CALL));
811 
812 	/* Wake up any processes waiting for any buffer to become free. */
813 	if (needbuffer) {
814 		needbuffer = 0;
815 		wakeup(&needbuffer);
816 	}
817 
818 	/* Wake up any proceeses waiting for _this_ buffer to become free. */
819 	if (ISSET(bp->b_flags, B_WANTED)) {
820 		CLR(bp->b_flags, B_WANTED|B_AGE);
821 		wakeup(bp);
822 	}
823 
824 	/*
825 	 * Determine which queue the buffer should be on, then put it there.
826 	 */
827 
828 	/* If it's locked, don't report an error; try again later. */
829 	if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
830 		CLR(bp->b_flags, B_ERROR);
831 
832 	/* If it's not cacheable, or an error, mark it invalid. */
833 	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
834 		SET(bp->b_flags, B_INVAL);
835 
836 	if (ISSET(bp->b_flags, B_VFLUSH)) {
837 		/*
838 		 * This is a delayed write buffer that was just flushed to
839 		 * disk.  It is still on the LRU queue.  If it's become
840 		 * invalid, then we need to move it to a different queue;
841 		 * otherwise leave it in its current position.
842 		 */
843 		CLR(bp->b_flags, B_VFLUSH);
844 		if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE)) {
845 			KDASSERT(!debug_verify_freelist || checkfreelist(bp, &bufqueues[BQ_LRU]));
846 			goto already_queued;
847 		} else {
848 			bremfree(bp);
849 		}
850 	}
851 
852   KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_AGE]));
853   KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LRU]));
854   KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LOCKED]));
855 
856 	if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
857 		/*
858 		 * If it's invalid or empty, dissociate it from its vnode
859 		 * and put on the head of the appropriate queue.
860 		 */
861 		if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
862 			(*bioops.io_deallocate)(bp);
863 		CLR(bp->b_flags, B_DONE|B_DELWRI);
864 		if (bp->b_vp) {
865 			reassignbuf(bp, bp->b_vp);
866 			brelvp(bp);
867 		}
868 		if (bp->b_bufsize <= 0)
869 			/* no data */
870 			goto already_queued;
871 		else
872 			/* invalid data */
873 			bufq = &bufqueues[BQ_AGE];
874 		binsheadfree(bp, bufq);
875 	} else {
876 		/*
877 		 * It has valid data.  Put it on the end of the appropriate
878 		 * queue, so that it'll stick around for as long as possible.
879 		 * If buf is AGE, but has dependencies, must put it on last
880 		 * bufqueue to be scanned, ie LRU. This protects against the
881 		 * livelock where BQ_AGE only has buffers with dependencies,
882 		 * and we thus never get to the dependent buffers in BQ_LRU.
883 		 */
884 		if (ISSET(bp->b_flags, B_LOCKED))
885 			/* locked in core */
886 			bufq = &bufqueues[BQ_LOCKED];
887 		else if (!ISSET(bp->b_flags, B_AGE))
888 			/* valid data */
889 			bufq = &bufqueues[BQ_LRU];
890 		else {
891 			/* stale but valid data */
892 			int has_deps;
893 
894 			if (LIST_FIRST(&bp->b_dep) != NULL &&
895 			    bioops.io_countdeps)
896 				has_deps = (*bioops.io_countdeps)(bp, 0);
897 			else
898 				has_deps = 0;
899 			bufq = has_deps ? &bufqueues[BQ_LRU] :
900 			    &bufqueues[BQ_AGE];
901 		}
902 		binstailfree(bp, bufq);
903 	}
904 
905 already_queued:
906 	/* Unlock the buffer. */
907 	CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE);
908 	SET(bp->b_flags, B_CACHE);
909 
910 	/* Allow disk interrupts. */
911 	simple_unlock(&bp->b_interlock);
912 	simple_unlock(&bqueue_slock);
913 	if (bp->b_bufsize <= 0) {
914 #ifdef DEBUG
915 		memset((char *)bp, 0, sizeof(*bp));
916 #endif
917 		pool_put(&bufpool, bp);
918 	}
919 	splx(s);
920 }
921 
922 /*
923  * Determine if a block is in the cache.
924  * Just look on what would be its hash chain.  If it's there, return
925  * a pointer to it, unless it's marked invalid.  If it's marked invalid,
926  * we normally don't return the buffer, unless the caller explicitly
927  * wants us to.
928  */
929 struct buf *
930 incore(struct vnode *vp, daddr_t blkno)
931 {
932 	struct buf *bp;
933 
934 	/* Search hash chain */
935 	LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
936 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
937 		    !ISSET(bp->b_flags, B_INVAL))
938 		return (bp);
939 	}
940 
941 	return (NULL);
942 }
943 
944 /*
945  * Get a block of requested size that is associated with
946  * a given vnode and block offset. If it is found in the
947  * block cache, mark it as having been found, make it busy
948  * and return it. Otherwise, return an empty block of the
949  * correct size. It is up to the caller to insure that the
950  * cached blocks be of the correct size.
951  */
952 struct buf *
953 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo)
954 {
955 	struct buf *bp;
956 	int s, err;
957 	int preserve;
958 
959 start:
960 	s = splbio();
961 	simple_lock(&bqueue_slock);
962 	bp = incore(vp, blkno);
963 	if (bp != NULL) {
964 		simple_lock(&bp->b_interlock);
965 		if (ISSET(bp->b_flags, B_BUSY)) {
966 			simple_unlock(&bqueue_slock);
967 			if (curproc == uvm.pagedaemon_proc) {
968 				simple_unlock(&bp->b_interlock);
969 				splx(s);
970 				return NULL;
971 			}
972 			SET(bp->b_flags, B_WANTED);
973 			err = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
974 					"getblk", slptimeo, &bp->b_interlock);
975 			splx(s);
976 			if (err)
977 				return (NULL);
978 			goto start;
979 		}
980 #ifdef DIAGNOSTIC
981 		if (ISSET(bp->b_flags, B_DONE|B_DELWRI) &&
982 		    bp->b_bcount < size && vp->v_type != VBLK)
983 			panic("getblk: block size invariant failed");
984 #endif
985 		SET(bp->b_flags, B_BUSY);
986 		bremfree(bp);
987 		preserve = 1;
988 	} else {
989 		if ((bp = getnewbuf(slpflag, slptimeo, 0)) == NULL) {
990 			simple_unlock(&bqueue_slock);
991 			splx(s);
992 			goto start;
993 		}
994 
995 		binshash(bp, BUFHASH(vp, blkno));
996 		bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
997 		bgetvp(vp, bp);
998 		preserve = 0;
999 	}
1000 	simple_unlock(&bp->b_interlock);
1001 	simple_unlock(&bqueue_slock);
1002 	splx(s);
1003 	/*
1004 	 * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes)
1005 	 * if we re-size buffers here.
1006 	 */
1007 	if (ISSET(bp->b_flags, B_LOCKED)) {
1008 		KASSERT(bp->b_bufsize >= size);
1009 	} else {
1010 		allocbuf(bp, size, preserve);
1011 	}
1012 	BIO_SETPRIO(bp, BPRIO_DEFAULT);
1013 	return (bp);
1014 }
1015 
1016 /*
1017  * Get an empty, disassociated buffer of given size.
1018  */
1019 struct buf *
1020 geteblk(int size)
1021 {
1022 	struct buf *bp;
1023 	int s;
1024 
1025 	s = splbio();
1026 	simple_lock(&bqueue_slock);
1027 	while ((bp = getnewbuf(0, 0, 0)) == 0)
1028 		;
1029 
1030 	SET(bp->b_flags, B_INVAL);
1031 	binshash(bp, &invalhash);
1032 	simple_unlock(&bqueue_slock);
1033 	simple_unlock(&bp->b_interlock);
1034 	splx(s);
1035 	BIO_SETPRIO(bp, BPRIO_DEFAULT);
1036 	allocbuf(bp, size, 0);
1037 	return (bp);
1038 }
1039 
1040 /*
1041  * Expand or contract the actual memory allocated to a buffer.
1042  *
1043  * If the buffer shrinks, data is lost, so it's up to the
1044  * caller to have written it out *first*; this routine will not
1045  * start a write.  If the buffer grows, it's the callers
1046  * responsibility to fill out the buffer's additional contents.
1047  */
1048 void
1049 allocbuf(struct buf *bp, int size, int preserve)
1050 {
1051 	vsize_t oldsize, desired_size;
1052 	caddr_t addr;
1053 	int s, delta;
1054 
1055 	desired_size = buf_roundsize(size);
1056 	if (desired_size > MAXBSIZE)
1057 		printf("allocbuf: buffer larger than MAXBSIZE requested");
1058 
1059 	bp->b_bcount = size;
1060 
1061 	oldsize = bp->b_bufsize;
1062 	if (oldsize == desired_size)
1063 		return;
1064 
1065 	/*
1066 	 * If we want a buffer of a different size, re-allocate the
1067 	 * buffer's memory; copy old content only if needed.
1068 	 */
1069 	addr = buf_malloc(desired_size);
1070 	if (preserve)
1071 		memcpy(addr, bp->b_data, MIN(oldsize,desired_size));
1072 	if (bp->b_data != NULL)
1073 		buf_mrelease(bp->b_data, oldsize);
1074 	bp->b_data = addr;
1075 	bp->b_bufsize = desired_size;
1076 
1077 	/*
1078 	 * Update overall buffer memory counter (protected by bqueue_slock)
1079 	 */
1080 	delta = (long)desired_size - (long)oldsize;
1081 
1082 	s = splbio();
1083 	simple_lock(&bqueue_slock);
1084 	if ((bufmem += delta) > bufmem_hiwater) {
1085 		/*
1086 		 * Need to trim overall memory usage.
1087 		 */
1088 		while (buf_canrelease()) {
1089 			if (buf_trim() == 0)
1090 				break;
1091 		}
1092 	}
1093 
1094 	simple_unlock(&bqueue_slock);
1095 	splx(s);
1096 }
1097 
1098 /*
1099  * Find a buffer which is available for use.
1100  * Select something from a free list.
1101  * Preference is to AGE list, then LRU list.
1102  *
1103  * Called at splbio and with buffer queues locked.
1104  * Return buffer locked.
1105  */
1106 struct buf *
1107 getnewbuf(int slpflag, int slptimeo, int from_bufq)
1108 {
1109 	struct buf *bp;
1110 
1111 start:
1112 	LOCK_ASSERT(simple_lock_held(&bqueue_slock));
1113 
1114 	/*
1115 	 * Get a new buffer from the pool; but use NOWAIT because
1116 	 * we have the buffer queues locked.
1117 	 */
1118 	if (buf_lotsfree() && !from_bufq &&
1119 	    (bp = pool_get(&bufpool, PR_NOWAIT)) != NULL) {
1120 		memset((char *)bp, 0, sizeof(*bp));
1121 		BUF_INIT(bp);
1122 		bp->b_dev = NODEV;
1123 		bp->b_vnbufs.le_next = NOLIST;
1124 		bp->b_flags = B_BUSY;
1125 		simple_lock(&bp->b_interlock);
1126 		return (bp);
1127 	}
1128 
1129 	if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE])) != NULL ||
1130 	    (bp = TAILQ_FIRST(&bufqueues[BQ_LRU])) != NULL) {
1131 		simple_lock(&bp->b_interlock);
1132 		bremfree(bp);
1133 	} else {
1134 		/* wait for a free buffer of any kind */
1135 		needbuffer = 1;
1136 		ltsleep(&needbuffer, slpflag|(PRIBIO+1),
1137 			"getnewbuf", slptimeo, &bqueue_slock);
1138 		return (NULL);
1139 	}
1140 
1141 #ifdef DIAGNOSTIC
1142 	if (bp->b_bufsize <= 0)
1143 		panic("buffer %p: on queue but empty", bp);
1144 #endif
1145 
1146 	if (ISSET(bp->b_flags, B_VFLUSH)) {
1147 		/*
1148 		 * This is a delayed write buffer being flushed to disk.  Make
1149 		 * sure it gets aged out of the queue when it's finished, and
1150 		 * leave it off the LRU queue.
1151 		 */
1152 		CLR(bp->b_flags, B_VFLUSH);
1153 		SET(bp->b_flags, B_AGE);
1154 		simple_unlock(&bp->b_interlock);
1155 		goto start;
1156 	}
1157 
1158 	/* Buffer is no longer on free lists. */
1159 	SET(bp->b_flags, B_BUSY);
1160 
1161 	/*
1162 	 * If buffer was a delayed write, start it and return NULL
1163 	 * (since we might sleep while starting the write).
1164 	 */
1165 	if (ISSET(bp->b_flags, B_DELWRI)) {
1166 		/*
1167 		 * This buffer has gone through the LRU, so make sure it gets
1168 		 * reused ASAP.
1169 		 */
1170 		SET(bp->b_flags, B_AGE);
1171 		simple_unlock(&bp->b_interlock);
1172 		simple_unlock(&bqueue_slock);
1173 		bawrite(bp);
1174 		simple_lock(&bqueue_slock);
1175 		return (NULL);
1176 	}
1177 
1178 	/* disassociate us from our vnode, if we had one... */
1179 	if (bp->b_vp)
1180 		brelvp(bp);
1181 
1182 	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
1183 		(*bioops.io_deallocate)(bp);
1184 
1185 	/* clear out various other fields */
1186 	bp->b_flags = B_BUSY;
1187 	bp->b_dev = NODEV;
1188 	bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
1189 	bp->b_iodone = 0;
1190 	bp->b_error = 0;
1191 	bp->b_resid = 0;
1192 	bp->b_bcount = 0;
1193 
1194 	bremhash(bp);
1195 	return (bp);
1196 }
1197 
1198 /*
1199  * Attempt to free an aged buffer off the queues.
1200  * Called at splbio and with queue lock held.
1201  * Returns the amount of buffer memory freed.
1202  */
1203 int
1204 buf_trim(void)
1205 {
1206 	struct buf *bp;
1207 	long size = 0;
1208 	int wanted;
1209 
1210 	/* Instruct getnewbuf() to get buffers off the queues */
1211 	if ((bp = getnewbuf(PCATCH, 1, 1)) == NULL)
1212 		return 0;
1213 
1214 	wanted = ISSET(bp->b_flags, B_WANTED);
1215 	simple_unlock(&bp->b_interlock);
1216 	if (wanted) {
1217 		printf("buftrim: got WANTED buffer\n");
1218 		SET(bp->b_flags, B_INVAL);
1219 		binshash(bp, &invalhash);
1220 		simple_unlock(&bqueue_slock);
1221 		goto out;
1222 	}
1223 	size = bp->b_bufsize;
1224 	bufmem -= size;
1225 	simple_unlock(&bqueue_slock);
1226 	if (size > 0) {
1227 		buf_mrelease(bp->b_data, size);
1228 		bp->b_bcount = bp->b_bufsize = 0;
1229 	}
1230 
1231 out:
1232 	/* brelse() will return the buffer to the global buffer pool */
1233 	brelse(bp);
1234 	simple_lock(&bqueue_slock);
1235 	return size;
1236 }
1237 
1238 int
1239 buf_drain(int n)
1240 {
1241 	int s, size = 0;
1242 
1243 	/* If not asked for a specific amount, make our own estimate */
1244 	if (n == 0)
1245 		n = buf_canrelease();
1246 
1247 	s = splbio();
1248 	simple_lock(&bqueue_slock);
1249 	while (size < n && bufmem > bufmem_lowater)
1250 		size += buf_trim();
1251 
1252 	simple_unlock(&bqueue_slock);
1253 	splx(s);
1254 	return size;
1255 }
1256 
1257 /*
1258  * Wait for operations on the buffer to complete.
1259  * When they do, extract and return the I/O's error value.
1260  */
1261 int
1262 biowait(struct buf *bp)
1263 {
1264 	int s, error;
1265 
1266 	s = splbio();
1267 	simple_lock(&bp->b_interlock);
1268 	while (!ISSET(bp->b_flags, B_DONE | B_DELWRI))
1269 		ltsleep(bp, PRIBIO + 1, "biowait", 0, &bp->b_interlock);
1270 
1271 	/* check for interruption of I/O (e.g. via NFS), then errors. */
1272 	if (ISSET(bp->b_flags, B_EINTR)) {
1273 		CLR(bp->b_flags, B_EINTR);
1274 		error = EINTR;
1275 	} else if (ISSET(bp->b_flags, B_ERROR))
1276 		error = bp->b_error ? bp->b_error : EIO;
1277 	else
1278 		error = 0;
1279 
1280 	simple_unlock(&bp->b_interlock);
1281 	splx(s);
1282 	return (error);
1283 }
1284 
1285 /*
1286  * Mark I/O complete on a buffer.
1287  *
1288  * If a callback has been requested, e.g. the pageout
1289  * daemon, do so. Otherwise, awaken waiting processes.
1290  *
1291  * [ Leffler, et al., says on p.247:
1292  *	"This routine wakes up the blocked process, frees the buffer
1293  *	for an asynchronous write, or, for a request by the pagedaemon
1294  *	process, invokes a procedure specified in the buffer structure" ]
1295  *
1296  * In real life, the pagedaemon (or other system processes) wants
1297  * to do async stuff to, and doesn't want the buffer brelse()'d.
1298  * (for swap pager, that puts swap buffers on the free lists (!!!),
1299  * for the vn device, that puts malloc'd buffers on the free lists!)
1300  */
1301 void
1302 biodone(struct buf *bp)
1303 {
1304 	int s = splbio();
1305 
1306 	simple_lock(&bp->b_interlock);
1307 	if (ISSET(bp->b_flags, B_DONE))
1308 		panic("biodone already");
1309 	SET(bp->b_flags, B_DONE);		/* note that it's done */
1310 	BIO_SETPRIO(bp, BPRIO_DEFAULT);
1311 
1312 	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
1313 		(*bioops.io_complete)(bp);
1314 
1315 	if (!ISSET(bp->b_flags, B_READ))	/* wake up reader */
1316 		vwakeup(bp);
1317 
1318 	/*
1319 	 * If necessary, call out.  Unlock the buffer before calling
1320 	 * iodone() as the buffer isn't valid any more when it return.
1321 	 */
1322 	if (ISSET(bp->b_flags, B_CALL)) {
1323 		CLR(bp->b_flags, B_CALL);	/* but note callout done */
1324 		simple_unlock(&bp->b_interlock);
1325 		(*bp->b_iodone)(bp);
1326 	} else {
1327 		if (ISSET(bp->b_flags, B_ASYNC)) {	/* if async, release */
1328 			simple_unlock(&bp->b_interlock);
1329 			brelse(bp);
1330 		} else {			/* or just wakeup the buffer */
1331 			CLR(bp->b_flags, B_WANTED);
1332 			wakeup(bp);
1333 			simple_unlock(&bp->b_interlock);
1334 		}
1335 	}
1336 
1337 	splx(s);
1338 }
1339 
1340 /*
1341  * Return a count of buffers on the "locked" queue.
1342  */
1343 int
1344 count_lock_queue(void)
1345 {
1346 	struct buf *bp;
1347 	int n = 0;
1348 
1349 	simple_lock(&bqueue_slock);
1350 	TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED], b_freelist)
1351 		n++;
1352 	simple_unlock(&bqueue_slock);
1353 	return (n);
1354 }
1355 
1356 /*
1357  * Wait for all buffers to complete I/O
1358  * Return the number of "stuck" buffers.
1359  */
1360 int
1361 buf_syncwait(void)
1362 {
1363 	struct buf *bp;
1364 	int iter, nbusy, nbusy_prev = 0, dcount, s, ihash;
1365 
1366 	dcount = 10000;
1367 	for (iter = 0; iter < 20;) {
1368 		s = splbio();
1369 		simple_lock(&bqueue_slock);
1370 		nbusy = 0;
1371 		for (ihash = 0; ihash < bufhash+1; ihash++) {
1372 		    LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1373 			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1374 				nbusy++;
1375 			/*
1376 			 * With soft updates, some buffers that are
1377 			 * written will be remarked as dirty until other
1378 			 * buffers are written.
1379 			 */
1380 			if (bp->b_vp && bp->b_vp->v_mount
1381 			    && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP)
1382 			    && (bp->b_flags & B_DELWRI)) {
1383 				simple_lock(&bp->b_interlock);
1384 				bremfree(bp);
1385 				bp->b_flags |= B_BUSY;
1386 				nbusy++;
1387 				simple_unlock(&bp->b_interlock);
1388 				simple_unlock(&bqueue_slock);
1389 				bawrite(bp);
1390 				if (dcount-- <= 0) {
1391 					printf("softdep ");
1392 					goto fail;
1393 				}
1394 				simple_lock(&bqueue_slock);
1395 			}
1396 		    }
1397 		}
1398 
1399 		simple_unlock(&bqueue_slock);
1400 		splx(s);
1401 
1402 		if (nbusy == 0)
1403 			break;
1404 		if (nbusy_prev == 0)
1405 			nbusy_prev = nbusy;
1406 		printf("%d ", nbusy);
1407 		tsleep(&nbusy, PRIBIO, "bflush",
1408 		    (iter == 0) ? 1 : hz / 25 * iter);
1409 		if (nbusy >= nbusy_prev) /* we didn't flush anything */
1410 			iter++;
1411 		else
1412 			nbusy_prev = nbusy;
1413 	}
1414 
1415 	if (nbusy) {
1416 fail:;
1417 #if defined(DEBUG) || defined(DEBUG_HALT_BUSY)
1418 		printf("giving up\nPrinting vnodes for busy buffers\n");
1419 		for (ihash = 0; ihash < bufhash+1; ihash++) {
1420 		    LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1421 			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1422 				vprint(NULL, bp->b_vp);
1423 		    }
1424 		}
1425 #endif
1426 	}
1427 
1428 	return nbusy;
1429 }
1430 
1431 #define KERN_BUFSLOP 20
1432 static int
1433 sysctl_dobuf(SYSCTLFN_ARGS)
1434 {
1435 	struct buf *bp;
1436 	char *dp;
1437 	u_int i, elem_size;
1438 	size_t len, buflen, needed;
1439 	int error, s;
1440 
1441 	dp = oldp;
1442 	len = buflen = oldp != NULL ? *oldlenp : 0;
1443 	error = 0;
1444 	needed = 0;
1445 	elem_size = sizeof(struct buf);
1446 
1447 	s = splbio();
1448 	simple_lock(&bqueue_slock);
1449 	for (i = 0; i < BQUEUES; i++) {
1450 		TAILQ_FOREACH(bp, &bufqueues[i], b_freelist) {
1451 			if (len >= elem_size) {
1452 				error = copyout(bp, dp, elem_size);
1453 				if (error)
1454 					goto cleanup;
1455 				dp += elem_size;
1456 				len -= elem_size;
1457 			}
1458 			needed += elem_size;
1459 		}
1460 	}
1461 cleanup:
1462 	simple_unlock(&bqueue_slock);
1463 	splx(s);
1464 
1465 	if (oldp != NULL) {
1466 		*oldlenp = (char *)dp - (char *)oldp;
1467 		if (needed > *oldlenp)
1468 			error = ENOMEM;
1469 	} else {
1470 		needed += KERN_BUFSLOP;
1471 		*oldlenp = needed;
1472 	}
1473 
1474 	return (error);
1475 }
1476 
1477 static int sysctlnum_bufcache, sysctlnum_bufmemhiwater, sysctlnum_bufmemlowater;
1478 
1479 static int
1480 sysctl_bufvm_update(SYSCTLFN_ARGS)
1481 {
1482 	int t, error;
1483 	struct sysctlnode node;
1484 
1485 	node = *rnode;
1486 	node.sysctl_data = &t;
1487 	t = *(int*)rnode->sysctl_data;
1488 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1489 	if (error || newp == NULL)
1490 		return (error);
1491 
1492 	if (rnode->sysctl_num == sysctlnum_bufcache) {
1493 		if (t < 0 || t > 100)
1494 			return (EINVAL);
1495 		bufcache = t;
1496 		bufmem_hiwater = buf_memcalc();
1497 		bufmem_lowater = (bufmem_hiwater >> 3);
1498 		if (bufmem_lowater < 64 * 1024)
1499 			/* Ensure a reasonable minimum value */
1500 			bufmem_lowater = 64 * 1024;
1501 
1502 	} else if (rnode->sysctl_num == sysctlnum_bufmemlowater) {
1503 		bufmem_lowater = t;
1504 	} else if (rnode->sysctl_num == sysctlnum_bufmemhiwater) {
1505 		bufmem_hiwater = t;
1506 	} else
1507 		return (EINVAL);
1508 
1509 	/* Drain until below new high water mark */
1510 	while ((t = bufmem - bufmem_hiwater) >= 0) {
1511 		if (buf_drain(t / (2*1024)) <= 0)
1512 			break;
1513 	}
1514 
1515 	return 0;
1516 }
1517 
1518 SYSCTL_SETUP(sysctl_kern_buf_setup, "sysctl kern.buf subtree setup")
1519 {
1520 
1521 	sysctl_createv(SYSCTL_PERMANENT,
1522 		       CTLTYPE_NODE, "kern", NULL,
1523 		       NULL, 0, NULL, 0,
1524 		       CTL_KERN, CTL_EOL);
1525 	sysctl_createv(SYSCTL_PERMANENT,
1526 		       CTLTYPE_NODE, "buf", NULL,
1527 		       sysctl_dobuf, 0, NULL, 0,
1528 		       CTL_KERN, KERN_BUF, CTL_EOL);
1529 }
1530 
1531 SYSCTL_SETUP(sysctl_vm_buf_setup, "sysctl vm.buf* subtree setup")
1532 {
1533 	struct sysctlnode *rnode;
1534 
1535 	sysctl_createv(SYSCTL_PERMANENT,
1536 		       CTLTYPE_NODE, "vm", NULL,
1537 		       NULL, 0, NULL, 0,
1538 		       CTL_VM, CTL_EOL);
1539 
1540 	rnode = NULL;
1541 	if (sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE,
1542 			   CTLTYPE_INT, "bufcache", &rnode,
1543 			   sysctl_bufvm_update, 0, &bufcache, 0,
1544 			   CTL_VM, CTL_CREATE, CTL_EOL) == 0)
1545 		sysctlnum_bufcache = rnode->sysctl_num;
1546 
1547 	rnode = NULL;
1548 	if (sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE,
1549 			   CTLTYPE_INT, "bufmem_lowater", &rnode,
1550 			   sysctl_bufvm_update, 0, &bufmem_lowater, 0,
1551 			   CTL_VM, CTL_CREATE, CTL_EOL) == 0)
1552 		sysctlnum_bufmemlowater = rnode->sysctl_num;
1553 
1554 	rnode = NULL;
1555 	if (sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE,
1556 			   CTLTYPE_INT, "bufmem_hiwater", &rnode,
1557 			   sysctl_bufvm_update, 0, &bufmem_hiwater, 0,
1558 			   CTL_VM, CTL_CREATE, CTL_EOL) == 0)
1559 		sysctlnum_bufmemhiwater = rnode->sysctl_num;
1560 }
1561 
1562 #ifdef DEBUG
1563 /*
1564  * Print out statistics on the current allocation of the buffer pool.
1565  * Can be enabled to print out on every ``sync'' by setting "syncprt"
1566  * in vfs_syscalls.c using sysctl.
1567  */
1568 void
1569 vfs_bufstats(void)
1570 {
1571 	int s, i, j, count;
1572 	struct buf *bp;
1573 	struct bqueues *dp;
1574 	int counts[(MAXBSIZE / PAGE_SIZE) + 1];
1575 	static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE" };
1576 
1577 	for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
1578 		count = 0;
1579 		for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1580 			counts[j] = 0;
1581 		s = splbio();
1582 		TAILQ_FOREACH(bp, dp, b_freelist) {
1583 			counts[bp->b_bufsize/PAGE_SIZE]++;
1584 			count++;
1585 		}
1586 		splx(s);
1587 		printf("%s: total-%d", bname[i], count);
1588 		for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1589 			if (counts[j] != 0)
1590 				printf(", %d-%d", j * PAGE_SIZE, counts[j]);
1591 		printf("\n");
1592 	}
1593 }
1594 #endif /* DEBUG */
1595