xref: /netbsd-src/sys/kern/vfs_bio.c (revision aa73cae19608873cc4d1f712c4a0f8f8435f1ffa)
1 /*	$NetBSD: vfs_bio.c,v 1.142 2005/02/26 21:34:56 perry Exp $	*/
2 
3 /*-
4  * Copyright (c) 1982, 1986, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * (c) UNIX System Laboratories, Inc.
7  * All or some portions of this file are derived from material licensed
8  * to the University of California by American Telephone and Telegraph
9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10  * the permission of UNIX System Laboratories, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
37  */
38 
39 /*-
40  * Copyright (c) 1994 Christopher G. Demetriou
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by the University of
53  *	California, Berkeley and its contributors.
54  * 4. Neither the name of the University nor the names of its contributors
55  *    may be used to endorse or promote products derived from this software
56  *    without specific prior written permission.
57  *
58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68  * SUCH DAMAGE.
69  *
70  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
71  */
72 
73 /*
74  * Some references:
75  *	Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
76  *	Leffler, et al.: The Design and Implementation of the 4.3BSD
77  *		UNIX Operating System (Addison Welley, 1989)
78  */
79 
80 #include "opt_bufcache.h"
81 #include "opt_softdep.h"
82 
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.142 2005/02/26 21:34:56 perry Exp $");
85 
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/proc.h>
90 #include <sys/buf.h>
91 #include <sys/vnode.h>
92 #include <sys/mount.h>
93 #include <sys/malloc.h>
94 #include <sys/resourcevar.h>
95 #include <sys/sysctl.h>
96 #include <sys/conf.h>
97 
98 #include <uvm/uvm.h>
99 
100 #include <miscfs/specfs/specdev.h>
101 
102 #ifndef	BUFPAGES
103 # define BUFPAGES 0
104 #endif
105 
106 #ifdef BUFCACHE
107 # if (BUFCACHE < 5) || (BUFCACHE > 95)
108 #  error BUFCACHE is not between 5 and 95
109 # endif
110 #else
111 # define BUFCACHE 15
112 #endif
113 
114 u_int	nbuf;			/* XXX - for softdep_lockedbufs */
115 u_int	bufpages = BUFPAGES;	/* optional hardwired count */
116 u_int	bufcache = BUFCACHE;	/* max % of RAM to use for buffer cache */
117 
118 /* Function prototypes */
119 struct bqueue;
120 
121 static void buf_setwm(void);
122 static int buf_trim(void);
123 static void *bufpool_page_alloc(struct pool *, int);
124 static void bufpool_page_free(struct pool *, void *);
125 static __inline struct buf *bio_doread(struct vnode *, daddr_t, int,
126     struct ucred *, int);
127 static int buf_lotsfree(void);
128 static int buf_canrelease(void);
129 static __inline u_long buf_mempoolidx(u_long);
130 static __inline u_long buf_roundsize(u_long);
131 static __inline caddr_t buf_malloc(size_t);
132 static void buf_mrelease(caddr_t, size_t);
133 static __inline void binsheadfree(struct buf *, struct bqueue *);
134 static __inline void binstailfree(struct buf *, struct bqueue *);
135 int count_lock_queue(void); /* XXX */
136 #ifdef DEBUG
137 static int checkfreelist(struct buf *, struct bqueue *);
138 #endif
139 
140 /* Macros to clear/set/test flags. */
141 #define	SET(t, f)	(t) |= (f)
142 #define	CLR(t, f)	(t) &= ~(f)
143 #define	ISSET(t, f)	((t) & (f))
144 
145 /*
146  * Definitions for the buffer hash lists.
147  */
148 #define	BUFHASH(dvp, lbn)	\
149 	(&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
150 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
151 u_long	bufhash;
152 #if !defined(SOFTDEP) || !defined(FFS)
153 struct bio_ops bioops;	/* I/O operation notification */
154 #endif
155 
156 /*
157  * Insq/Remq for the buffer hash lists.
158  */
159 #define	binshash(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_hash)
160 #define	bremhash(bp)		LIST_REMOVE(bp, b_hash)
161 
162 /*
163  * Definitions for the buffer free lists.
164  */
165 #define	BQUEUES		3		/* number of free buffer queues */
166 
167 #define	BQ_LOCKED	0		/* super-blocks &c */
168 #define	BQ_LRU		1		/* lru, useful buffers */
169 #define	BQ_AGE		2		/* rubbish */
170 
171 struct bqueue {
172 	TAILQ_HEAD(, buf) bq_queue;
173 	uint64_t bq_bytes;
174 } bufqueues[BQUEUES];
175 int needbuffer;
176 
177 /*
178  * Buffer queue lock.
179  * Take this lock first if also taking some buffer's b_interlock.
180  */
181 struct simplelock bqueue_slock = SIMPLELOCK_INITIALIZER;
182 
183 /*
184  * Buffer pool for I/O buffers.
185  */
186 struct pool bufpool;
187 
188 /* XXX - somewhat gross.. */
189 #if MAXBSIZE == 0x2000
190 #define NMEMPOOLS 4
191 #elif MAXBSIZE == 0x4000
192 #define NMEMPOOLS 5
193 #elif MAXBSIZE == 0x8000
194 #define NMEMPOOLS 6
195 #else
196 #define NMEMPOOLS 7
197 #endif
198 
199 #define MEMPOOL_INDEX_OFFSET 10		/* smallest pool is 1k */
200 #if (1 << (NMEMPOOLS + MEMPOOL_INDEX_OFFSET - 1)) != MAXBSIZE
201 #error update vfs_bio buffer memory parameters
202 #endif
203 
204 /* Buffer memory pools */
205 static struct pool bmempools[NMEMPOOLS];
206 
207 struct vm_map *buf_map;
208 
209 /*
210  * Buffer memory pool allocator.
211  */
212 static void *
213 bufpool_page_alloc(struct pool *pp, int flags)
214 {
215 
216 	return (void *)uvm_km_kmemalloc1(buf_map,
217 	    uvm.kernel_object, MAXBSIZE, MAXBSIZE, UVM_UNKNOWN_OFFSET,
218 	    (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK);
219 }
220 
221 static void
222 bufpool_page_free(struct pool *pp, void *v)
223 {
224 	uvm_km_free(buf_map, (vaddr_t)v, MAXBSIZE);
225 }
226 
227 static struct pool_allocator bufmempool_allocator = {
228 	bufpool_page_alloc, bufpool_page_free, MAXBSIZE,
229 };
230 
231 /* Buffer memory management variables */
232 u_long bufmem_valimit;
233 u_long bufmem_hiwater;
234 u_long bufmem_lowater;
235 u_long bufmem;
236 
237 /*
238  * MD code can call this to set a hard limit on the amount
239  * of virtual memory used by the buffer cache.
240  */
241 int
242 buf_setvalimit(vsize_t sz)
243 {
244 
245 	/* We need to accommodate at least NMEMPOOLS of MAXBSIZE each */
246 	if (sz < NMEMPOOLS * MAXBSIZE)
247 		return EINVAL;
248 
249 	bufmem_valimit = sz;
250 	return 0;
251 }
252 
253 static void
254 buf_setwm(void)
255 {
256 
257 	bufmem_hiwater = buf_memcalc();
258 	/* lowater is approx. 2% of memory (with bufcache = 15) */
259 #define	BUFMEM_WMSHIFT	3
260 #define	BUFMEM_HIWMMIN	(64 * 1024 << BUFMEM_WMSHIFT)
261 	if (bufmem_hiwater < BUFMEM_HIWMMIN)
262 		/* Ensure a reasonable minimum value */
263 		bufmem_hiwater = BUFMEM_HIWMMIN;
264 	bufmem_lowater = bufmem_hiwater >> BUFMEM_WMSHIFT;
265 }
266 
267 #ifdef DEBUG
268 int debug_verify_freelist = 0;
269 static int
270 checkfreelist(struct buf *bp, struct bqueue *dp)
271 {
272 	struct buf *b;
273 
274 	TAILQ_FOREACH(b, &dp->bq_queue, b_freelist) {
275 		if (b == bp)
276 			return 1;
277 	}
278 	return 0;
279 }
280 #endif
281 
282 /*
283  * Insq/Remq for the buffer hash lists.
284  * Call with buffer queue locked.
285  */
286 static __inline void
287 binsheadfree(struct buf *bp, struct bqueue *dp)
288 {
289 
290 	KASSERT(bp->b_freelistindex == -1);
291 	TAILQ_INSERT_HEAD(&dp->bq_queue, bp, b_freelist);
292 	dp->bq_bytes += bp->b_bufsize;
293 	bp->b_freelistindex = dp - bufqueues;
294 }
295 
296 static __inline void
297 binstailfree(struct buf *bp, struct bqueue *dp)
298 {
299 
300 	KASSERT(bp->b_freelistindex == -1);
301 	TAILQ_INSERT_TAIL(&dp->bq_queue, bp, b_freelist);
302 	dp->bq_bytes += bp->b_bufsize;
303 	bp->b_freelistindex = dp - bufqueues;
304 }
305 
306 void
307 bremfree(struct buf *bp)
308 {
309 	struct bqueue *dp;
310 	int bqidx = bp->b_freelistindex;
311 
312 	LOCK_ASSERT(simple_lock_held(&bqueue_slock));
313 
314 	KASSERT(bqidx != -1);
315 	dp = &bufqueues[bqidx];
316 	KDASSERT(!debug_verify_freelist || checkfreelist(bp, dp));
317 	KASSERT(dp->bq_bytes >= bp->b_bufsize);
318 	TAILQ_REMOVE(&dp->bq_queue, bp, b_freelist);
319 	dp->bq_bytes -= bp->b_bufsize;
320 #if defined(DIAGNOSTIC)
321 	bp->b_freelistindex = -1;
322 #endif /* defined(DIAGNOSTIC) */
323 }
324 
325 u_long
326 buf_memcalc(void)
327 {
328 	u_long n;
329 
330 	/*
331 	 * Determine the upper bound of memory to use for buffers.
332 	 *
333 	 *	- If bufpages is specified, use that as the number
334 	 *	  pages.
335 	 *
336 	 *	- Otherwise, use bufcache as the percentage of
337 	 *	  physical memory.
338 	 */
339 	if (bufpages != 0) {
340 		n = bufpages;
341 	} else {
342 		if (bufcache < 5) {
343 			printf("forcing bufcache %d -> 5", bufcache);
344 			bufcache = 5;
345 		}
346 		if (bufcache > 95) {
347 			printf("forcing bufcache %d -> 95", bufcache);
348 			bufcache = 95;
349 		}
350 		n = physmem / 100 * bufcache;
351 	}
352 
353 	n <<= PAGE_SHIFT;
354 	if (bufmem_valimit != 0 && n > bufmem_valimit)
355 		n = bufmem_valimit;
356 
357 	return (n);
358 }
359 
360 /*
361  * Initialize buffers and hash links for buffers.
362  */
363 void
364 bufinit(void)
365 {
366 	struct bqueue *dp;
367 	int use_std;
368 	u_int i;
369 
370 	/*
371 	 * Initialize buffer cache memory parameters.
372 	 */
373 	bufmem = 0;
374 	buf_setwm();
375 
376 	if (bufmem_valimit != 0) {
377 		vaddr_t minaddr = 0, maxaddr;
378 		buf_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
379 					  bufmem_valimit, VM_MAP_PAGEABLE,
380 					  FALSE, 0);
381 		if (buf_map == NULL)
382 			panic("bufinit: cannot allocate submap");
383 	} else
384 		buf_map = kernel_map;
385 
386 	/*
387 	 * Initialize the buffer pools.
388 	 */
389 	pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
390 
391 	/* On "small" machines use small pool page sizes where possible */
392 	use_std = (physmem < atop(16*1024*1024));
393 
394 	/*
395 	 * Also use them on systems that can map the pool pages using
396 	 * a direct-mapped segment.
397 	 */
398 #ifdef PMAP_MAP_POOLPAGE
399 	use_std = 1;
400 #endif
401 
402 	for (i = 0; i < NMEMPOOLS; i++) {
403 		struct pool_allocator *pa;
404 		struct pool *pp = &bmempools[i];
405 		u_int size = 1 << (i + MEMPOOL_INDEX_OFFSET);
406 		char *name = malloc(8, M_TEMP, M_WAITOK);
407 		snprintf(name, 8, "buf%dk", 1 << i);
408 		pa = (size <= PAGE_SIZE && use_std)
409 			? &pool_allocator_nointr
410 			: &bufmempool_allocator;
411 		pool_init(pp, size, 0, 0, 0, name, pa);
412 		pool_setlowat(pp, 1);
413 		pool_sethiwat(pp, 1);
414 	}
415 
416 	/* Initialize the buffer queues */
417 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) {
418 		TAILQ_INIT(&dp->bq_queue);
419 		dp->bq_bytes = 0;
420 	}
421 
422 	/*
423 	 * Estimate hash table size based on the amount of memory we
424 	 * intend to use for the buffer cache. The average buffer
425 	 * size is dependent on our clients (i.e. filesystems).
426 	 *
427 	 * For now, use an empirical 3K per buffer.
428 	 */
429 	nbuf = (bufmem_hiwater / 1024) / 3;
430 	bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash);
431 }
432 
433 static int
434 buf_lotsfree(void)
435 {
436 	int try, thresh;
437 	struct lwp *l = curlwp;
438 
439 	/* Always allocate if doing copy on write */
440 	if (l->l_flag & L_COWINPROGRESS)
441 		return 1;
442 
443 	/* Always allocate if less than the low water mark. */
444 	if (bufmem < bufmem_lowater)
445 		return 1;
446 
447 	/* Never allocate if greater than the high water mark. */
448 	if (bufmem > bufmem_hiwater)
449 		return 0;
450 
451 	/* If there's anything on the AGE list, it should be eaten. */
452 	if (TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue) != NULL)
453 		return 0;
454 
455 	/*
456 	 * The probabily of getting a new allocation is inversely
457 	 * proportional to the current size of the cache, using
458 	 * a granularity of 16 steps.
459 	 */
460 	try = random() & 0x0000000fL;
461 
462 	/* Don't use "16 * bufmem" here to avoid a 32-bit overflow. */
463 	thresh = (bufmem - bufmem_lowater) /
464 	    ((bufmem_hiwater - bufmem_lowater) / 16);
465 
466 	if (try >= thresh)
467 		return 1;
468 
469 	/* Otherwise don't allocate. */
470 	return 0;
471 }
472 
473 /*
474  * Return estimate of bytes we think need to be
475  * released to help resolve low memory conditions.
476  *
477  * => called at splbio.
478  * => called with bqueue_slock held.
479  */
480 static int
481 buf_canrelease(void)
482 {
483 	int pagedemand, ninvalid = 0;
484 
485 	LOCK_ASSERT(simple_lock_held(&bqueue_slock));
486 
487 	if (bufmem < bufmem_lowater)
488 		return 0;
489 
490 	if (bufmem > bufmem_hiwater)
491 		return bufmem - bufmem_hiwater;
492 
493 	ninvalid += bufqueues[BQ_AGE].bq_bytes;
494 
495 	pagedemand = uvmexp.freetarg - uvmexp.free;
496 	if (pagedemand < 0)
497 		return ninvalid;
498 	return MAX(ninvalid, MIN(2 * MAXBSIZE,
499 	    MIN((bufmem - bufmem_lowater) / 16, pagedemand * PAGE_SIZE)));
500 }
501 
502 /*
503  * Buffer memory allocation helper functions
504  */
505 static __inline u_long
506 buf_mempoolidx(u_long size)
507 {
508 	u_int n = 0;
509 
510 	size -= 1;
511 	size >>= MEMPOOL_INDEX_OFFSET;
512 	while (size) {
513 		size >>= 1;
514 		n += 1;
515 	}
516 	if (n >= NMEMPOOLS)
517 		panic("buf mem pool index %d", n);
518 	return n;
519 }
520 
521 static __inline u_long
522 buf_roundsize(u_long size)
523 {
524 	/* Round up to nearest power of 2 */
525 	return (1 << (buf_mempoolidx(size) + MEMPOOL_INDEX_OFFSET));
526 }
527 
528 static __inline caddr_t
529 buf_malloc(size_t size)
530 {
531 	u_int n = buf_mempoolidx(size);
532 	caddr_t addr;
533 	int s;
534 
535 	while (1) {
536 		addr = pool_get(&bmempools[n], PR_NOWAIT);
537 		if (addr != NULL)
538 			break;
539 
540 		/* No memory, see if we can free some. If so, try again */
541 		if (buf_drain(1) > 0)
542 			continue;
543 
544 		/* Wait for buffers to arrive on the LRU queue */
545 		s = splbio();
546 		simple_lock(&bqueue_slock);
547 		needbuffer = 1;
548 		ltsleep(&needbuffer, PNORELOCK | (PRIBIO + 1),
549 			"buf_malloc", 0, &bqueue_slock);
550 		splx(s);
551 	}
552 
553 	return addr;
554 }
555 
556 static void
557 buf_mrelease(caddr_t addr, size_t size)
558 {
559 
560 	pool_put(&bmempools[buf_mempoolidx(size)], addr);
561 }
562 
563 /*
564  * bread()/breadn() helper.
565  */
566 static __inline struct buf *
567 bio_doread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
568     int async)
569 {
570 	struct buf *bp;
571 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
572 	struct proc *p = l->l_proc;
573 	struct mount *mp;
574 
575 	bp = getblk(vp, blkno, size, 0, 0);
576 
577 #ifdef DIAGNOSTIC
578 	if (bp == NULL) {
579 		panic("bio_doread: no such buf");
580 	}
581 #endif
582 
583 	/*
584 	 * If buffer does not have data valid, start a read.
585 	 * Note that if buffer is B_INVAL, getblk() won't return it.
586 	 * Therefore, it's valid if its I/O has completed or been delayed.
587 	 */
588 	if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
589 		/* Start I/O for the buffer. */
590 		SET(bp->b_flags, B_READ | async);
591 		if (async)
592 			BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
593 		else
594 			BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
595 		VOP_STRATEGY(vp, bp);
596 
597 		/* Pay for the read. */
598 		p->p_stats->p_ru.ru_inblock++;
599 	} else if (async) {
600 		brelse(bp);
601 	}
602 
603 	if (vp->v_type == VBLK)
604 		mp = vp->v_specmountpoint;
605 	else
606 		mp = vp->v_mount;
607 
608 	/*
609 	 * Collect statistics on synchronous and asynchronous reads.
610 	 * Reads from block devices are charged to their associated
611 	 * filesystem (if any).
612 	 */
613 	if (mp != NULL) {
614 		if (async == 0)
615 			mp->mnt_stat.f_syncreads++;
616 		else
617 			mp->mnt_stat.f_asyncreads++;
618 	}
619 
620 	return (bp);
621 }
622 
623 /*
624  * Read a disk block.
625  * This algorithm described in Bach (p.54).
626  */
627 int
628 bread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
629     struct buf **bpp)
630 {
631 	struct buf *bp;
632 
633 	/* Get buffer for block. */
634 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
635 
636 	/* Wait for the read to complete, and return result. */
637 	return (biowait(bp));
638 }
639 
640 /*
641  * Read-ahead multiple disk blocks. The first is sync, the rest async.
642  * Trivial modification to the breada algorithm presented in Bach (p.55).
643  */
644 int
645 breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablks,
646     int *rasizes, int nrablks, struct ucred *cred, struct buf **bpp)
647 {
648 	struct buf *bp;
649 	int i;
650 
651 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
652 
653 	/*
654 	 * For each of the read-ahead blocks, start a read, if necessary.
655 	 */
656 	for (i = 0; i < nrablks; i++) {
657 		/* If it's in the cache, just go on to next one. */
658 		if (incore(vp, rablks[i]))
659 			continue;
660 
661 		/* Get a buffer for the read-ahead block */
662 		(void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
663 	}
664 
665 	/* Otherwise, we had to start a read for it; wait until it's valid. */
666 	return (biowait(bp));
667 }
668 
669 /*
670  * Read with single-block read-ahead.  Defined in Bach (p.55), but
671  * implemented as a call to breadn().
672  * XXX for compatibility with old file systems.
673  */
674 int
675 breada(struct vnode *vp, daddr_t blkno, int size, daddr_t rablkno,
676     int rabsize, struct ucred *cred, struct buf **bpp)
677 {
678 
679 	return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
680 }
681 
682 /*
683  * Block write.  Described in Bach (p.56)
684  */
685 int
686 bwrite(struct buf *bp)
687 {
688 	int rv, sync, wasdelayed, s;
689 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
690 	struct proc *p = l->l_proc;
691 	struct vnode *vp;
692 	struct mount *mp;
693 
694 	KASSERT(ISSET(bp->b_flags, B_BUSY));
695 
696 	vp = bp->b_vp;
697 	if (vp != NULL) {
698 		if (vp->v_type == VBLK)
699 			mp = vp->v_specmountpoint;
700 		else
701 			mp = vp->v_mount;
702 	} else {
703 		mp = NULL;
704 	}
705 
706 	/*
707 	 * Remember buffer type, to switch on it later.  If the write was
708 	 * synchronous, but the file system was mounted with MNT_ASYNC,
709 	 * convert it to a delayed write.
710 	 * XXX note that this relies on delayed tape writes being converted
711 	 * to async, not sync writes (which is safe, but ugly).
712 	 */
713 	sync = !ISSET(bp->b_flags, B_ASYNC);
714 	if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
715 		bdwrite(bp);
716 		return (0);
717 	}
718 
719 	/*
720 	 * Collect statistics on synchronous and asynchronous writes.
721 	 * Writes to block devices are charged to their associated
722 	 * filesystem (if any).
723 	 */
724 	if (mp != NULL) {
725 		if (sync)
726 			mp->mnt_stat.f_syncwrites++;
727 		else
728 			mp->mnt_stat.f_asyncwrites++;
729 	}
730 
731 	s = splbio();
732 	simple_lock(&bp->b_interlock);
733 
734 	wasdelayed = ISSET(bp->b_flags, B_DELWRI);
735 
736 	CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
737 
738 	/*
739 	 * Pay for the I/O operation and make sure the buf is on the correct
740 	 * vnode queue.
741 	 */
742 	if (wasdelayed)
743 		reassignbuf(bp, bp->b_vp);
744 	else
745 		p->p_stats->p_ru.ru_oublock++;
746 
747 	/* Initiate disk write.  Make sure the appropriate party is charged. */
748 	V_INCR_NUMOUTPUT(bp->b_vp);
749 	simple_unlock(&bp->b_interlock);
750 	splx(s);
751 
752 	if (sync)
753 		BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
754 	else
755 		BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
756 
757 	VOP_STRATEGY(vp, bp);
758 
759 	if (sync) {
760 		/* If I/O was synchronous, wait for it to complete. */
761 		rv = biowait(bp);
762 
763 		/* Release the buffer. */
764 		brelse(bp);
765 
766 		return (rv);
767 	} else {
768 		return (0);
769 	}
770 }
771 
772 int
773 vn_bwrite(void *v)
774 {
775 	struct vop_bwrite_args *ap = v;
776 
777 	return (bwrite(ap->a_bp));
778 }
779 
780 /*
781  * Delayed write.
782  *
783  * The buffer is marked dirty, but is not queued for I/O.
784  * This routine should be used when the buffer is expected
785  * to be modified again soon, typically a small write that
786  * partially fills a buffer.
787  *
788  * NB: magnetic tapes cannot be delayed; they must be
789  * written in the order that the writes are requested.
790  *
791  * Described in Leffler, et al. (pp. 208-213).
792  */
793 void
794 bdwrite(struct buf *bp)
795 {
796 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
797 	struct proc *p = l->l_proc;
798 	const struct bdevsw *bdev;
799 	int s;
800 
801 	/* If this is a tape block, write the block now. */
802 	bdev = bdevsw_lookup(bp->b_dev);
803 	if (bdev != NULL && bdev->d_type == D_TAPE) {
804 		bawrite(bp);
805 		return;
806 	}
807 
808 	/*
809 	 * If the block hasn't been seen before:
810 	 *	(1) Mark it as having been seen,
811 	 *	(2) Charge for the write,
812 	 *	(3) Make sure it's on its vnode's correct block list.
813 	 */
814 	s = splbio();
815 	simple_lock(&bp->b_interlock);
816 
817 	KASSERT(ISSET(bp->b_flags, B_BUSY));
818 
819 	if (!ISSET(bp->b_flags, B_DELWRI)) {
820 		SET(bp->b_flags, B_DELWRI);
821 		p->p_stats->p_ru.ru_oublock++;
822 		reassignbuf(bp, bp->b_vp);
823 	}
824 
825 	/* Otherwise, the "write" is done, so mark and release the buffer. */
826 	CLR(bp->b_flags, B_DONE);
827 	simple_unlock(&bp->b_interlock);
828 	splx(s);
829 
830 	brelse(bp);
831 }
832 
833 /*
834  * Asynchronous block write; just an asynchronous bwrite().
835  */
836 void
837 bawrite(struct buf *bp)
838 {
839 	int s;
840 
841 	s = splbio();
842 	simple_lock(&bp->b_interlock);
843 
844 	KASSERT(ISSET(bp->b_flags, B_BUSY));
845 
846 	SET(bp->b_flags, B_ASYNC);
847 	simple_unlock(&bp->b_interlock);
848 	splx(s);
849 	VOP_BWRITE(bp);
850 }
851 
852 /*
853  * Same as first half of bdwrite, mark buffer dirty, but do not release it.
854  * Call at splbio() and with the buffer interlock locked.
855  * Note: called only from biodone() through ffs softdep's bioops.io_complete()
856  */
857 void
858 bdirty(struct buf *bp)
859 {
860 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
861 	struct proc *p = l->l_proc;
862 
863 	LOCK_ASSERT(simple_lock_held(&bp->b_interlock));
864 	KASSERT(ISSET(bp->b_flags, B_BUSY));
865 
866 	CLR(bp->b_flags, B_AGE);
867 
868 	if (!ISSET(bp->b_flags, B_DELWRI)) {
869 		SET(bp->b_flags, B_DELWRI);
870 		p->p_stats->p_ru.ru_oublock++;
871 		reassignbuf(bp, bp->b_vp);
872 	}
873 }
874 
875 /*
876  * Release a buffer on to the free lists.
877  * Described in Bach (p. 46).
878  */
879 void
880 brelse(struct buf *bp)
881 {
882 	struct bqueue *bufq;
883 	int s;
884 
885 	/* Block disk interrupts. */
886 	s = splbio();
887 	simple_lock(&bqueue_slock);
888 	simple_lock(&bp->b_interlock);
889 
890 	KASSERT(ISSET(bp->b_flags, B_BUSY));
891 	KASSERT(!ISSET(bp->b_flags, B_CALL));
892 
893 	/* Wake up any processes waiting for any buffer to become free. */
894 	if (needbuffer) {
895 		needbuffer = 0;
896 		wakeup(&needbuffer);
897 	}
898 
899 	/* Wake up any proceeses waiting for _this_ buffer to become free. */
900 	if (ISSET(bp->b_flags, B_WANTED)) {
901 		CLR(bp->b_flags, B_WANTED|B_AGE);
902 		wakeup(bp);
903 	}
904 
905 	/*
906 	 * Determine which queue the buffer should be on, then put it there.
907 	 */
908 
909 	/* If it's locked, don't report an error; try again later. */
910 	if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
911 		CLR(bp->b_flags, B_ERROR);
912 
913 	/* If it's not cacheable, or an error, mark it invalid. */
914 	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
915 		SET(bp->b_flags, B_INVAL);
916 
917 	if (ISSET(bp->b_flags, B_VFLUSH)) {
918 		/*
919 		 * This is a delayed write buffer that was just flushed to
920 		 * disk.  It is still on the LRU queue.  If it's become
921 		 * invalid, then we need to move it to a different queue;
922 		 * otherwise leave it in its current position.
923 		 */
924 		CLR(bp->b_flags, B_VFLUSH);
925 		if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE)) {
926 			KDASSERT(!debug_verify_freelist || checkfreelist(bp, &bufqueues[BQ_LRU]));
927 			goto already_queued;
928 		} else {
929 			bremfree(bp);
930 		}
931 	}
932 
933   KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_AGE]));
934   KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LRU]));
935   KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LOCKED]));
936 
937 	if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
938 		/*
939 		 * If it's invalid or empty, dissociate it from its vnode
940 		 * and put on the head of the appropriate queue.
941 		 */
942 		if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
943 			(*bioops.io_deallocate)(bp);
944 		CLR(bp->b_flags, B_DONE|B_DELWRI);
945 		if (bp->b_vp) {
946 			reassignbuf(bp, bp->b_vp);
947 			brelvp(bp);
948 		}
949 		if (bp->b_bufsize <= 0)
950 			/* no data */
951 			goto already_queued;
952 		else
953 			/* invalid data */
954 			bufq = &bufqueues[BQ_AGE];
955 		binsheadfree(bp, bufq);
956 	} else {
957 		/*
958 		 * It has valid data.  Put it on the end of the appropriate
959 		 * queue, so that it'll stick around for as long as possible.
960 		 * If buf is AGE, but has dependencies, must put it on last
961 		 * bufqueue to be scanned, ie LRU. This protects against the
962 		 * livelock where BQ_AGE only has buffers with dependencies,
963 		 * and we thus never get to the dependent buffers in BQ_LRU.
964 		 */
965 		if (ISSET(bp->b_flags, B_LOCKED))
966 			/* locked in core */
967 			bufq = &bufqueues[BQ_LOCKED];
968 		else if (!ISSET(bp->b_flags, B_AGE))
969 			/* valid data */
970 			bufq = &bufqueues[BQ_LRU];
971 		else {
972 			/* stale but valid data */
973 			int has_deps;
974 
975 			if (LIST_FIRST(&bp->b_dep) != NULL &&
976 			    bioops.io_countdeps)
977 				has_deps = (*bioops.io_countdeps)(bp, 0);
978 			else
979 				has_deps = 0;
980 			bufq = has_deps ? &bufqueues[BQ_LRU] :
981 			    &bufqueues[BQ_AGE];
982 		}
983 		binstailfree(bp, bufq);
984 	}
985 
986 already_queued:
987 	/* Unlock the buffer. */
988 	CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE);
989 	SET(bp->b_flags, B_CACHE);
990 
991 	/* Allow disk interrupts. */
992 	simple_unlock(&bp->b_interlock);
993 	simple_unlock(&bqueue_slock);
994 	if (bp->b_bufsize <= 0) {
995 #ifdef DEBUG
996 		memset((char *)bp, 0, sizeof(*bp));
997 #endif
998 		pool_put(&bufpool, bp);
999 	}
1000 	splx(s);
1001 }
1002 
1003 /*
1004  * Determine if a block is in the cache.
1005  * Just look on what would be its hash chain.  If it's there, return
1006  * a pointer to it, unless it's marked invalid.  If it's marked invalid,
1007  * we normally don't return the buffer, unless the caller explicitly
1008  * wants us to.
1009  */
1010 struct buf *
1011 incore(struct vnode *vp, daddr_t blkno)
1012 {
1013 	struct buf *bp;
1014 
1015 	/* Search hash chain */
1016 	LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
1017 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
1018 		    !ISSET(bp->b_flags, B_INVAL))
1019 		return (bp);
1020 	}
1021 
1022 	return (NULL);
1023 }
1024 
1025 /*
1026  * Get a block of requested size that is associated with
1027  * a given vnode and block offset. If it is found in the
1028  * block cache, mark it as having been found, make it busy
1029  * and return it. Otherwise, return an empty block of the
1030  * correct size. It is up to the caller to insure that the
1031  * cached blocks be of the correct size.
1032  */
1033 struct buf *
1034 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1035 {
1036 	struct buf *bp;
1037 	int s, err;
1038 	int preserve;
1039 
1040 start:
1041 	s = splbio();
1042 	simple_lock(&bqueue_slock);
1043 	bp = incore(vp, blkno);
1044 	if (bp != NULL) {
1045 		simple_lock(&bp->b_interlock);
1046 		if (ISSET(bp->b_flags, B_BUSY)) {
1047 			simple_unlock(&bqueue_slock);
1048 			if (curproc == uvm.pagedaemon_proc) {
1049 				simple_unlock(&bp->b_interlock);
1050 				splx(s);
1051 				return NULL;
1052 			}
1053 			SET(bp->b_flags, B_WANTED);
1054 			err = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
1055 					"getblk", slptimeo, &bp->b_interlock);
1056 			splx(s);
1057 			if (err)
1058 				return (NULL);
1059 			goto start;
1060 		}
1061 #ifdef DIAGNOSTIC
1062 		if (ISSET(bp->b_flags, B_DONE|B_DELWRI) &&
1063 		    bp->b_bcount < size && vp->v_type != VBLK)
1064 			panic("getblk: block size invariant failed");
1065 #endif
1066 		SET(bp->b_flags, B_BUSY);
1067 		bremfree(bp);
1068 		preserve = 1;
1069 	} else {
1070 		if ((bp = getnewbuf(slpflag, slptimeo, 0)) == NULL) {
1071 			simple_unlock(&bqueue_slock);
1072 			splx(s);
1073 			goto start;
1074 		}
1075 
1076 		binshash(bp, BUFHASH(vp, blkno));
1077 		bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
1078 		bgetvp(vp, bp);
1079 		preserve = 0;
1080 	}
1081 	simple_unlock(&bp->b_interlock);
1082 	simple_unlock(&bqueue_slock);
1083 	splx(s);
1084 	/*
1085 	 * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes)
1086 	 * if we re-size buffers here.
1087 	 */
1088 	if (ISSET(bp->b_flags, B_LOCKED)) {
1089 		KASSERT(bp->b_bufsize >= size);
1090 	} else {
1091 		allocbuf(bp, size, preserve);
1092 	}
1093 	BIO_SETPRIO(bp, BPRIO_DEFAULT);
1094 	return (bp);
1095 }
1096 
1097 /*
1098  * Get an empty, disassociated buffer of given size.
1099  */
1100 struct buf *
1101 geteblk(int size)
1102 {
1103 	struct buf *bp;
1104 	int s;
1105 
1106 	s = splbio();
1107 	simple_lock(&bqueue_slock);
1108 	while ((bp = getnewbuf(0, 0, 0)) == 0)
1109 		;
1110 
1111 	SET(bp->b_flags, B_INVAL);
1112 	binshash(bp, &invalhash);
1113 	simple_unlock(&bqueue_slock);
1114 	simple_unlock(&bp->b_interlock);
1115 	splx(s);
1116 	BIO_SETPRIO(bp, BPRIO_DEFAULT);
1117 	allocbuf(bp, size, 0);
1118 	return (bp);
1119 }
1120 
1121 /*
1122  * Expand or contract the actual memory allocated to a buffer.
1123  *
1124  * If the buffer shrinks, data is lost, so it's up to the
1125  * caller to have written it out *first*; this routine will not
1126  * start a write.  If the buffer grows, it's the callers
1127  * responsibility to fill out the buffer's additional contents.
1128  */
1129 void
1130 allocbuf(struct buf *bp, int size, int preserve)
1131 {
1132 	vsize_t oldsize, desired_size;
1133 	caddr_t addr;
1134 	int s, delta;
1135 
1136 	desired_size = buf_roundsize(size);
1137 	if (desired_size > MAXBSIZE)
1138 		printf("allocbuf: buffer larger than MAXBSIZE requested");
1139 
1140 	bp->b_bcount = size;
1141 
1142 	oldsize = bp->b_bufsize;
1143 	if (oldsize == desired_size)
1144 		return;
1145 
1146 	/*
1147 	 * If we want a buffer of a different size, re-allocate the
1148 	 * buffer's memory; copy old content only if needed.
1149 	 */
1150 	addr = buf_malloc(desired_size);
1151 	if (preserve)
1152 		memcpy(addr, bp->b_data, MIN(oldsize,desired_size));
1153 	if (bp->b_data != NULL)
1154 		buf_mrelease(bp->b_data, oldsize);
1155 	bp->b_data = addr;
1156 	bp->b_bufsize = desired_size;
1157 
1158 	/*
1159 	 * Update overall buffer memory counter (protected by bqueue_slock)
1160 	 */
1161 	delta = (long)desired_size - (long)oldsize;
1162 
1163 	s = splbio();
1164 	simple_lock(&bqueue_slock);
1165 	if ((bufmem += delta) > bufmem_hiwater) {
1166 		/*
1167 		 * Need to trim overall memory usage.
1168 		 */
1169 		while (buf_canrelease()) {
1170 			if (buf_trim() == 0)
1171 				break;
1172 		}
1173 	}
1174 
1175 	simple_unlock(&bqueue_slock);
1176 	splx(s);
1177 }
1178 
1179 /*
1180  * Find a buffer which is available for use.
1181  * Select something from a free list.
1182  * Preference is to AGE list, then LRU list.
1183  *
1184  * Called at splbio and with buffer queues locked.
1185  * Return buffer locked.
1186  */
1187 struct buf *
1188 getnewbuf(int slpflag, int slptimeo, int from_bufq)
1189 {
1190 	struct buf *bp;
1191 
1192 start:
1193 	LOCK_ASSERT(simple_lock_held(&bqueue_slock));
1194 
1195 	/*
1196 	 * Get a new buffer from the pool; but use NOWAIT because
1197 	 * we have the buffer queues locked.
1198 	 */
1199 	if (!from_bufq && buf_lotsfree() &&
1200 	    (bp = pool_get(&bufpool, PR_NOWAIT)) != NULL) {
1201 		memset((char *)bp, 0, sizeof(*bp));
1202 		BUF_INIT(bp);
1203 		bp->b_dev = NODEV;
1204 		bp->b_vnbufs.le_next = NOLIST;
1205 		bp->b_flags = B_BUSY;
1206 		simple_lock(&bp->b_interlock);
1207 #if defined(DIAGNOSTIC)
1208 		bp->b_freelistindex = -1;
1209 #endif /* defined(DIAGNOSTIC) */
1210 		return (bp);
1211 	}
1212 
1213 	if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue)) != NULL ||
1214 	    (bp = TAILQ_FIRST(&bufqueues[BQ_LRU].bq_queue)) != NULL) {
1215 		simple_lock(&bp->b_interlock);
1216 		bremfree(bp);
1217 	} else {
1218 		/*
1219 		 * XXX: !from_bufq should be removed.
1220 		 */
1221 		if (!from_bufq || curproc != uvm.pagedaemon_proc) {
1222 			/* wait for a free buffer of any kind */
1223 			needbuffer = 1;
1224 			ltsleep(&needbuffer, slpflag|(PRIBIO + 1),
1225 			    "getnewbuf", slptimeo, &bqueue_slock);
1226 		}
1227 		return (NULL);
1228 	}
1229 
1230 #ifdef DIAGNOSTIC
1231 	if (bp->b_bufsize <= 0)
1232 		panic("buffer %p: on queue but empty", bp);
1233 #endif
1234 
1235 	if (ISSET(bp->b_flags, B_VFLUSH)) {
1236 		/*
1237 		 * This is a delayed write buffer being flushed to disk.  Make
1238 		 * sure it gets aged out of the queue when it's finished, and
1239 		 * leave it off the LRU queue.
1240 		 */
1241 		CLR(bp->b_flags, B_VFLUSH);
1242 		SET(bp->b_flags, B_AGE);
1243 		simple_unlock(&bp->b_interlock);
1244 		goto start;
1245 	}
1246 
1247 	/* Buffer is no longer on free lists. */
1248 	SET(bp->b_flags, B_BUSY);
1249 
1250 	/*
1251 	 * If buffer was a delayed write, start it and return NULL
1252 	 * (since we might sleep while starting the write).
1253 	 */
1254 	if (ISSET(bp->b_flags, B_DELWRI)) {
1255 		/*
1256 		 * This buffer has gone through the LRU, so make sure it gets
1257 		 * reused ASAP.
1258 		 */
1259 		SET(bp->b_flags, B_AGE);
1260 		simple_unlock(&bp->b_interlock);
1261 		simple_unlock(&bqueue_slock);
1262 		bawrite(bp);
1263 		simple_lock(&bqueue_slock);
1264 		return (NULL);
1265 	}
1266 
1267 	/* disassociate us from our vnode, if we had one... */
1268 	if (bp->b_vp)
1269 		brelvp(bp);
1270 
1271 	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
1272 		(*bioops.io_deallocate)(bp);
1273 
1274 	/* clear out various other fields */
1275 	bp->b_flags = B_BUSY;
1276 	bp->b_dev = NODEV;
1277 	bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
1278 	bp->b_iodone = 0;
1279 	bp->b_error = 0;
1280 	bp->b_resid = 0;
1281 	bp->b_bcount = 0;
1282 
1283 	bremhash(bp);
1284 	return (bp);
1285 }
1286 
1287 /*
1288  * Attempt to free an aged buffer off the queues.
1289  * Called at splbio and with queue lock held.
1290  * Returns the amount of buffer memory freed.
1291  */
1292 static int
1293 buf_trim(void)
1294 {
1295 	struct buf *bp;
1296 	long size = 0;
1297 
1298 	/* Instruct getnewbuf() to get buffers off the queues */
1299 	if ((bp = getnewbuf(PCATCH, 1, 1)) == NULL)
1300 		return 0;
1301 
1302 	KASSERT(!ISSET(bp->b_flags, B_WANTED));
1303 	simple_unlock(&bp->b_interlock);
1304 	size = bp->b_bufsize;
1305 	bufmem -= size;
1306 	simple_unlock(&bqueue_slock);
1307 	if (size > 0) {
1308 		buf_mrelease(bp->b_data, size);
1309 		bp->b_bcount = bp->b_bufsize = 0;
1310 	}
1311 	/* brelse() will return the buffer to the global buffer pool */
1312 	brelse(bp);
1313 	simple_lock(&bqueue_slock);
1314 	return size;
1315 }
1316 
1317 int
1318 buf_drain(int n)
1319 {
1320 	int s, size = 0, sz;
1321 
1322 	s = splbio();
1323 	simple_lock(&bqueue_slock);
1324 
1325 	while (size < n && bufmem > bufmem_lowater) {
1326 		sz = buf_trim();
1327 		if (sz <= 0)
1328 			break;
1329 		size += sz;
1330 	}
1331 
1332 	simple_unlock(&bqueue_slock);
1333 	splx(s);
1334 	return size;
1335 }
1336 
1337 /*
1338  * Wait for operations on the buffer to complete.
1339  * When they do, extract and return the I/O's error value.
1340  */
1341 int
1342 biowait(struct buf *bp)
1343 {
1344 	int s, error;
1345 
1346 	s = splbio();
1347 	simple_lock(&bp->b_interlock);
1348 	while (!ISSET(bp->b_flags, B_DONE | B_DELWRI))
1349 		ltsleep(bp, PRIBIO + 1, "biowait", 0, &bp->b_interlock);
1350 
1351 	/* check for interruption of I/O (e.g. via NFS), then errors. */
1352 	if (ISSET(bp->b_flags, B_EINTR)) {
1353 		CLR(bp->b_flags, B_EINTR);
1354 		error = EINTR;
1355 	} else if (ISSET(bp->b_flags, B_ERROR))
1356 		error = bp->b_error ? bp->b_error : EIO;
1357 	else
1358 		error = 0;
1359 
1360 	simple_unlock(&bp->b_interlock);
1361 	splx(s);
1362 	return (error);
1363 }
1364 
1365 /*
1366  * Mark I/O complete on a buffer.
1367  *
1368  * If a callback has been requested, e.g. the pageout
1369  * daemon, do so. Otherwise, awaken waiting processes.
1370  *
1371  * [ Leffler, et al., says on p.247:
1372  *	"This routine wakes up the blocked process, frees the buffer
1373  *	for an asynchronous write, or, for a request by the pagedaemon
1374  *	process, invokes a procedure specified in the buffer structure" ]
1375  *
1376  * In real life, the pagedaemon (or other system processes) wants
1377  * to do async stuff to, and doesn't want the buffer brelse()'d.
1378  * (for swap pager, that puts swap buffers on the free lists (!!!),
1379  * for the vn device, that puts malloc'd buffers on the free lists!)
1380  */
1381 void
1382 biodone(struct buf *bp)
1383 {
1384 	int s = splbio();
1385 
1386 	simple_lock(&bp->b_interlock);
1387 	if (ISSET(bp->b_flags, B_DONE))
1388 		panic("biodone already");
1389 	SET(bp->b_flags, B_DONE);		/* note that it's done */
1390 	BIO_SETPRIO(bp, BPRIO_DEFAULT);
1391 
1392 	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
1393 		(*bioops.io_complete)(bp);
1394 
1395 	if (!ISSET(bp->b_flags, B_READ))	/* wake up reader */
1396 		vwakeup(bp);
1397 
1398 	/*
1399 	 * If necessary, call out.  Unlock the buffer before calling
1400 	 * iodone() as the buffer isn't valid any more when it return.
1401 	 */
1402 	if (ISSET(bp->b_flags, B_CALL)) {
1403 		CLR(bp->b_flags, B_CALL);	/* but note callout done */
1404 		simple_unlock(&bp->b_interlock);
1405 		(*bp->b_iodone)(bp);
1406 	} else {
1407 		if (ISSET(bp->b_flags, B_ASYNC)) {	/* if async, release */
1408 			simple_unlock(&bp->b_interlock);
1409 			brelse(bp);
1410 		} else {			/* or just wakeup the buffer */
1411 			CLR(bp->b_flags, B_WANTED);
1412 			wakeup(bp);
1413 			simple_unlock(&bp->b_interlock);
1414 		}
1415 	}
1416 
1417 	splx(s);
1418 }
1419 
1420 /*
1421  * Return a count of buffers on the "locked" queue.
1422  */
1423 int
1424 count_lock_queue(void)
1425 {
1426 	struct buf *bp;
1427 	int n = 0;
1428 
1429 	simple_lock(&bqueue_slock);
1430 	TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED].bq_queue, b_freelist)
1431 		n++;
1432 	simple_unlock(&bqueue_slock);
1433 	return (n);
1434 }
1435 
1436 /*
1437  * Wait for all buffers to complete I/O
1438  * Return the number of "stuck" buffers.
1439  */
1440 int
1441 buf_syncwait(void)
1442 {
1443 	struct buf *bp;
1444 	int iter, nbusy, nbusy_prev = 0, dcount, s, ihash;
1445 
1446 	dcount = 10000;
1447 	for (iter = 0; iter < 20;) {
1448 		s = splbio();
1449 		simple_lock(&bqueue_slock);
1450 		nbusy = 0;
1451 		for (ihash = 0; ihash < bufhash+1; ihash++) {
1452 		    LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1453 			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1454 				nbusy++;
1455 			/*
1456 			 * With soft updates, some buffers that are
1457 			 * written will be remarked as dirty until other
1458 			 * buffers are written.
1459 			 */
1460 			if (bp->b_vp && bp->b_vp->v_mount
1461 			    && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP)
1462 			    && (bp->b_flags & B_DELWRI)) {
1463 				simple_lock(&bp->b_interlock);
1464 				bremfree(bp);
1465 				bp->b_flags |= B_BUSY;
1466 				nbusy++;
1467 				simple_unlock(&bp->b_interlock);
1468 				simple_unlock(&bqueue_slock);
1469 				bawrite(bp);
1470 				if (dcount-- <= 0) {
1471 					printf("softdep ");
1472 					splx(s);
1473 					goto fail;
1474 				}
1475 				simple_lock(&bqueue_slock);
1476 			}
1477 		    }
1478 		}
1479 
1480 		simple_unlock(&bqueue_slock);
1481 		splx(s);
1482 
1483 		if (nbusy == 0)
1484 			break;
1485 		if (nbusy_prev == 0)
1486 			nbusy_prev = nbusy;
1487 		printf("%d ", nbusy);
1488 		tsleep(&nbusy, PRIBIO, "bflush",
1489 		    (iter == 0) ? 1 : hz / 25 * iter);
1490 		if (nbusy >= nbusy_prev) /* we didn't flush anything */
1491 			iter++;
1492 		else
1493 			nbusy_prev = nbusy;
1494 	}
1495 
1496 	if (nbusy) {
1497 fail:;
1498 #if defined(DEBUG) || defined(DEBUG_HALT_BUSY)
1499 		printf("giving up\nPrinting vnodes for busy buffers\n");
1500 		s = splbio();
1501 		for (ihash = 0; ihash < bufhash+1; ihash++) {
1502 		    LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1503 			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1504 				vprint(NULL, bp->b_vp);
1505 		    }
1506 		}
1507 		splx(s);
1508 #endif
1509 	}
1510 
1511 	return nbusy;
1512 }
1513 
1514 static void
1515 sysctl_fillbuf(struct buf *i, struct buf_sysctl *o)
1516 {
1517 
1518 	o->b_flags = i->b_flags;
1519 	o->b_error = i->b_error;
1520 	o->b_prio = i->b_prio;
1521 	o->b_dev = i->b_dev;
1522 	o->b_bufsize = i->b_bufsize;
1523 	o->b_bcount = i->b_bcount;
1524 	o->b_resid = i->b_resid;
1525 	o->b_addr = PTRTOUINT64(i->b_un.b_addr);
1526 	o->b_blkno = i->b_blkno;
1527 	o->b_rawblkno = i->b_rawblkno;
1528 	o->b_iodone = PTRTOUINT64(i->b_iodone);
1529 	o->b_proc = PTRTOUINT64(i->b_proc);
1530 	o->b_vp = PTRTOUINT64(i->b_vp);
1531 	o->b_saveaddr = PTRTOUINT64(i->b_saveaddr);
1532 	o->b_lblkno = i->b_lblkno;
1533 }
1534 
1535 #define KERN_BUFSLOP 20
1536 static int
1537 sysctl_dobuf(SYSCTLFN_ARGS)
1538 {
1539 	struct buf *bp;
1540 	struct buf_sysctl bs;
1541 	char *dp;
1542 	u_int i, op, arg;
1543 	size_t len, needed, elem_size, out_size;
1544 	int error, s, elem_count;
1545 
1546 	if (namelen == 1 && name[0] == CTL_QUERY)
1547 		return (sysctl_query(SYSCTLFN_CALL(rnode)));
1548 
1549 	if (namelen != 4)
1550 		return (EINVAL);
1551 
1552 	dp = oldp;
1553 	len = (oldp != NULL) ? *oldlenp : 0;
1554 	op = name[0];
1555 	arg = name[1];
1556 	elem_size = name[2];
1557 	elem_count = name[3];
1558 	out_size = MIN(sizeof(bs), elem_size);
1559 
1560 	/*
1561 	 * at the moment, these are just "placeholders" to make the
1562 	 * API for retrieving kern.buf data more extensible in the
1563 	 * future.
1564 	 *
1565 	 * XXX kern.buf currently has "netbsd32" issues.  hopefully
1566 	 * these will be resolved at a later point.
1567 	 */
1568 	if (op != KERN_BUF_ALL || arg != KERN_BUF_ALL ||
1569 	    elem_size < 1 || elem_count < 0)
1570 		return (EINVAL);
1571 
1572 	error = 0;
1573 	needed = 0;
1574 	s = splbio();
1575 	simple_lock(&bqueue_slock);
1576 	for (i = 0; i < BQUEUES; i++) {
1577 		TAILQ_FOREACH(bp, &bufqueues[i].bq_queue, b_freelist) {
1578 			if (len >= elem_size && elem_count > 0) {
1579 				sysctl_fillbuf(bp, &bs);
1580 				error = copyout(&bs, dp, out_size);
1581 				if (error)
1582 					goto cleanup;
1583 				dp += elem_size;
1584 				len -= elem_size;
1585 			}
1586 			if (elem_count > 0) {
1587 				needed += elem_size;
1588 				if (elem_count != INT_MAX)
1589 					elem_count--;
1590 			}
1591 		}
1592 	}
1593 cleanup:
1594 	simple_unlock(&bqueue_slock);
1595 	splx(s);
1596 
1597 	*oldlenp = needed;
1598 	if (oldp == NULL)
1599 		*oldlenp += KERN_BUFSLOP * sizeof(struct buf);
1600 
1601 	return (error);
1602 }
1603 
1604 static int
1605 sysctl_bufvm_update(SYSCTLFN_ARGS)
1606 {
1607 	int t, error;
1608 	struct sysctlnode node;
1609 
1610 	node = *rnode;
1611 	node.sysctl_data = &t;
1612 	t = *(int*)rnode->sysctl_data;
1613 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1614 	if (error || newp == NULL)
1615 		return (error);
1616 
1617 	if (rnode->sysctl_data == &bufcache) {
1618 		if (t < 0 || t > 100)
1619 			return (EINVAL);
1620 		bufcache = t;
1621 		buf_setwm();
1622 	} else if (rnode->sysctl_data == &bufmem_lowater) {
1623 		if (bufmem_hiwater - bufmem_lowater < 16)
1624 			return (EINVAL);
1625 		bufmem_lowater = t;
1626 	} else if (rnode->sysctl_data == &bufmem_hiwater) {
1627 		if (bufmem_hiwater - bufmem_lowater < 16)
1628 			return (EINVAL);
1629 		bufmem_hiwater = t;
1630 	} else
1631 		return (EINVAL);
1632 
1633 	/* Drain until below new high water mark */
1634 	while ((t = bufmem - bufmem_hiwater) >= 0) {
1635 		if (buf_drain(t / (2*1024)) <= 0)
1636 			break;
1637 	}
1638 
1639 	return 0;
1640 }
1641 
1642 SYSCTL_SETUP(sysctl_kern_buf_setup, "sysctl kern.buf subtree setup")
1643 {
1644 
1645 	sysctl_createv(clog, 0, NULL, NULL,
1646 		       CTLFLAG_PERMANENT,
1647 		       CTLTYPE_NODE, "kern", NULL,
1648 		       NULL, 0, NULL, 0,
1649 		       CTL_KERN, CTL_EOL);
1650 	sysctl_createv(clog, 0, NULL, NULL,
1651 		       CTLFLAG_PERMANENT,
1652 		       CTLTYPE_NODE, "buf",
1653 		       SYSCTL_DESCR("Kernel buffer cache information"),
1654 		       sysctl_dobuf, 0, NULL, 0,
1655 		       CTL_KERN, KERN_BUF, CTL_EOL);
1656 }
1657 
1658 SYSCTL_SETUP(sysctl_vm_buf_setup, "sysctl vm.buf* subtree setup")
1659 {
1660 
1661 	sysctl_createv(clog, 0, NULL, NULL,
1662 		       CTLFLAG_PERMANENT,
1663 		       CTLTYPE_NODE, "vm", NULL,
1664 		       NULL, 0, NULL, 0,
1665 		       CTL_VM, CTL_EOL);
1666 
1667 	sysctl_createv(clog, 0, NULL, NULL,
1668 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1669 		       CTLTYPE_INT, "bufcache",
1670 		       SYSCTL_DESCR("Percentage of physical memory to use for "
1671 				    "buffer cache"),
1672 		       sysctl_bufvm_update, 0, &bufcache, 0,
1673 		       CTL_VM, CTL_CREATE, CTL_EOL);
1674 	sysctl_createv(clog, 0, NULL, NULL,
1675 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
1676 		       CTLTYPE_INT, "bufmem",
1677 		       SYSCTL_DESCR("Amount of kernel memory used by buffer "
1678 				    "cache"),
1679 		       NULL, 0, &bufmem, 0,
1680 		       CTL_VM, CTL_CREATE, CTL_EOL);
1681 	sysctl_createv(clog, 0, NULL, NULL,
1682 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1683 		       CTLTYPE_INT, "bufmem_lowater",
1684 		       SYSCTL_DESCR("Minimum amount of kernel memory to "
1685 				    "reserve for buffer cache"),
1686 		       sysctl_bufvm_update, 0, &bufmem_lowater, 0,
1687 		       CTL_VM, CTL_CREATE, CTL_EOL);
1688 	sysctl_createv(clog, 0, NULL, NULL,
1689 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1690 		       CTLTYPE_INT, "bufmem_hiwater",
1691 		       SYSCTL_DESCR("Maximum amount of kernel memory to use "
1692 				    "for buffer cache"),
1693 		       sysctl_bufvm_update, 0, &bufmem_hiwater, 0,
1694 		       CTL_VM, CTL_CREATE, CTL_EOL);
1695 }
1696 
1697 #ifdef DEBUG
1698 /*
1699  * Print out statistics on the current allocation of the buffer pool.
1700  * Can be enabled to print out on every ``sync'' by setting "syncprt"
1701  * in vfs_syscalls.c using sysctl.
1702  */
1703 void
1704 vfs_bufstats(void)
1705 {
1706 	int s, i, j, count;
1707 	struct buf *bp;
1708 	struct bqueue *dp;
1709 	int counts[(MAXBSIZE / PAGE_SIZE) + 1];
1710 	static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE" };
1711 
1712 	for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
1713 		count = 0;
1714 		for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1715 			counts[j] = 0;
1716 		s = splbio();
1717 		TAILQ_FOREACH(bp, &dp->bq_queue, b_freelist) {
1718 			counts[bp->b_bufsize/PAGE_SIZE]++;
1719 			count++;
1720 		}
1721 		splx(s);
1722 		printf("%s: total-%d", bname[i], count);
1723 		for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1724 			if (counts[j] != 0)
1725 				printf(", %d-%d", j * PAGE_SIZE, counts[j]);
1726 		printf("\n");
1727 	}
1728 }
1729 #endif /* DEBUG */
1730