xref: /netbsd-src/sys/kern/vfs_bio.c (revision 27527e67bbdf8d9ec84fd58803048ed6d181ece2)
1 /*	$NetBSD: vfs_bio.c,v 1.156 2006/02/04 12:02:35 yamt Exp $	*/
2 
3 /*-
4  * Copyright (c) 1982, 1986, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * (c) UNIX System Laboratories, Inc.
7  * All or some portions of this file are derived from material licensed
8  * to the University of California by American Telephone and Telegraph
9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10  * the permission of UNIX System Laboratories, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
37  */
38 
39 /*-
40  * Copyright (c) 1994 Christopher G. Demetriou
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by the University of
53  *	California, Berkeley and its contributors.
54  * 4. Neither the name of the University nor the names of its contributors
55  *    may be used to endorse or promote products derived from this software
56  *    without specific prior written permission.
57  *
58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68  * SUCH DAMAGE.
69  *
70  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
71  */
72 
73 /*
74  * Some references:
75  *	Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
76  *	Leffler, et al.: The Design and Implementation of the 4.3BSD
77  *		UNIX Operating System (Addison Welley, 1989)
78  */
79 
80 #include "opt_bufcache.h"
81 #include "opt_softdep.h"
82 
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.156 2006/02/04 12:02:35 yamt Exp $");
85 
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/proc.h>
90 #include <sys/buf.h>
91 #include <sys/vnode.h>
92 #include <sys/mount.h>
93 #include <sys/malloc.h>
94 #include <sys/resourcevar.h>
95 #include <sys/sysctl.h>
96 #include <sys/conf.h>
97 
98 #include <uvm/uvm.h>
99 
100 #include <miscfs/specfs/specdev.h>
101 
102 #ifndef	BUFPAGES
103 # define BUFPAGES 0
104 #endif
105 
106 #ifdef BUFCACHE
107 # if (BUFCACHE < 5) || (BUFCACHE > 95)
108 #  error BUFCACHE is not between 5 and 95
109 # endif
110 #else
111 # define BUFCACHE 15
112 #endif
113 
114 u_int	nbuf;			/* XXX - for softdep_lockedbufs */
115 u_int	bufpages = BUFPAGES;	/* optional hardwired count */
116 u_int	bufcache = BUFCACHE;	/* max % of RAM to use for buffer cache */
117 
118 /* Function prototypes */
119 struct bqueue;
120 
121 static void buf_setwm(void);
122 static int buf_trim(void);
123 static void *bufpool_page_alloc(struct pool *, int);
124 static void bufpool_page_free(struct pool *, void *);
125 static inline struct buf *bio_doread(struct vnode *, daddr_t, int,
126     struct ucred *, int);
127 static int buf_lotsfree(void);
128 static int buf_canrelease(void);
129 static inline u_long buf_mempoolidx(u_long);
130 static inline u_long buf_roundsize(u_long);
131 static inline caddr_t buf_malloc(size_t);
132 static void buf_mrelease(caddr_t, size_t);
133 static inline void binsheadfree(struct buf *, struct bqueue *);
134 static inline void binstailfree(struct buf *, struct bqueue *);
135 int count_lock_queue(void); /* XXX */
136 #ifdef DEBUG
137 static int checkfreelist(struct buf *, struct bqueue *);
138 #endif
139 
140 /* Macros to clear/set/test flags. */
141 #define	SET(t, f)	(t) |= (f)
142 #define	CLR(t, f)	(t) &= ~(f)
143 #define	ISSET(t, f)	((t) & (f))
144 
145 /*
146  * Definitions for the buffer hash lists.
147  */
148 #define	BUFHASH(dvp, lbn)	\
149 	(&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
150 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
151 u_long	bufhash;
152 #if !defined(SOFTDEP) || !defined(FFS)
153 struct bio_ops bioops;	/* I/O operation notification */
154 #endif
155 
156 /*
157  * Insq/Remq for the buffer hash lists.
158  */
159 #define	binshash(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_hash)
160 #define	bremhash(bp)		LIST_REMOVE(bp, b_hash)
161 
162 /*
163  * Definitions for the buffer free lists.
164  */
165 #define	BQUEUES		3		/* number of free buffer queues */
166 
167 #define	BQ_LOCKED	0		/* super-blocks &c */
168 #define	BQ_LRU		1		/* lru, useful buffers */
169 #define	BQ_AGE		2		/* rubbish */
170 
171 struct bqueue {
172 	TAILQ_HEAD(, buf) bq_queue;
173 	uint64_t bq_bytes;
174 } bufqueues[BQUEUES];
175 int needbuffer;
176 
177 /*
178  * Buffer queue lock.
179  * Take this lock first if also taking some buffer's b_interlock.
180  */
181 struct simplelock bqueue_slock = SIMPLELOCK_INITIALIZER;
182 
183 /*
184  * Buffer pool for I/O buffers.
185  * Access to this pool must be protected with splbio().
186  */
187 static POOL_INIT(bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
188 
189 
190 /* XXX - somewhat gross.. */
191 #if MAXBSIZE == 0x2000
192 #define NMEMPOOLS 4
193 #elif MAXBSIZE == 0x4000
194 #define NMEMPOOLS 5
195 #elif MAXBSIZE == 0x8000
196 #define NMEMPOOLS 6
197 #else
198 #define NMEMPOOLS 7
199 #endif
200 
201 #define MEMPOOL_INDEX_OFFSET 10		/* smallest pool is 1k */
202 #if (1 << (NMEMPOOLS + MEMPOOL_INDEX_OFFSET - 1)) != MAXBSIZE
203 #error update vfs_bio buffer memory parameters
204 #endif
205 
206 /* Buffer memory pools */
207 static struct pool bmempools[NMEMPOOLS];
208 
209 struct vm_map *buf_map;
210 
211 /*
212  * Buffer memory pool allocator.
213  */
214 static void *
215 bufpool_page_alloc(struct pool *pp, int flags)
216 {
217 
218 	return (void *)uvm_km_alloc(buf_map,
219 	    MAXBSIZE, MAXBSIZE,
220 	    ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)
221 	    | UVM_KMF_WIRED);
222 }
223 
224 static void
225 bufpool_page_free(struct pool *pp, void *v)
226 {
227 
228 	uvm_km_free(buf_map, (vaddr_t)v, MAXBSIZE, UVM_KMF_WIRED);
229 }
230 
231 static struct pool_allocator bufmempool_allocator = {
232 	bufpool_page_alloc, bufpool_page_free, MAXBSIZE,
233 };
234 
235 /* Buffer memory management variables */
236 u_long bufmem_valimit;
237 u_long bufmem_hiwater;
238 u_long bufmem_lowater;
239 u_long bufmem;
240 
241 /*
242  * MD code can call this to set a hard limit on the amount
243  * of virtual memory used by the buffer cache.
244  */
245 int
246 buf_setvalimit(vsize_t sz)
247 {
248 
249 	/* We need to accommodate at least NMEMPOOLS of MAXBSIZE each */
250 	if (sz < NMEMPOOLS * MAXBSIZE)
251 		return EINVAL;
252 
253 	bufmem_valimit = sz;
254 	return 0;
255 }
256 
257 static void
258 buf_setwm(void)
259 {
260 
261 	bufmem_hiwater = buf_memcalc();
262 	/* lowater is approx. 2% of memory (with bufcache = 15) */
263 #define	BUFMEM_WMSHIFT	3
264 #define	BUFMEM_HIWMMIN	(64 * 1024 << BUFMEM_WMSHIFT)
265 	if (bufmem_hiwater < BUFMEM_HIWMMIN)
266 		/* Ensure a reasonable minimum value */
267 		bufmem_hiwater = BUFMEM_HIWMMIN;
268 	bufmem_lowater = bufmem_hiwater >> BUFMEM_WMSHIFT;
269 }
270 
271 #ifdef DEBUG
272 int debug_verify_freelist = 0;
273 static int
274 checkfreelist(struct buf *bp, struct bqueue *dp)
275 {
276 	struct buf *b;
277 
278 	TAILQ_FOREACH(b, &dp->bq_queue, b_freelist) {
279 		if (b == bp)
280 			return 1;
281 	}
282 	return 0;
283 }
284 #endif
285 
286 /*
287  * Insq/Remq for the buffer hash lists.
288  * Call with buffer queue locked.
289  */
290 static inline void
291 binsheadfree(struct buf *bp, struct bqueue *dp)
292 {
293 
294 	KASSERT(bp->b_freelistindex == -1);
295 	TAILQ_INSERT_HEAD(&dp->bq_queue, bp, b_freelist);
296 	dp->bq_bytes += bp->b_bufsize;
297 	bp->b_freelistindex = dp - bufqueues;
298 }
299 
300 static inline void
301 binstailfree(struct buf *bp, struct bqueue *dp)
302 {
303 
304 	KASSERT(bp->b_freelistindex == -1);
305 	TAILQ_INSERT_TAIL(&dp->bq_queue, bp, b_freelist);
306 	dp->bq_bytes += bp->b_bufsize;
307 	bp->b_freelistindex = dp - bufqueues;
308 }
309 
310 void
311 bremfree(struct buf *bp)
312 {
313 	struct bqueue *dp;
314 	int bqidx = bp->b_freelistindex;
315 
316 	LOCK_ASSERT(simple_lock_held(&bqueue_slock));
317 
318 	KASSERT(bqidx != -1);
319 	dp = &bufqueues[bqidx];
320 	KDASSERT(!debug_verify_freelist || checkfreelist(bp, dp));
321 	KASSERT(dp->bq_bytes >= bp->b_bufsize);
322 	TAILQ_REMOVE(&dp->bq_queue, bp, b_freelist);
323 	dp->bq_bytes -= bp->b_bufsize;
324 #if defined(DIAGNOSTIC)
325 	bp->b_freelistindex = -1;
326 #endif /* defined(DIAGNOSTIC) */
327 }
328 
329 u_long
330 buf_memcalc(void)
331 {
332 	u_long n;
333 
334 	/*
335 	 * Determine the upper bound of memory to use for buffers.
336 	 *
337 	 *	- If bufpages is specified, use that as the number
338 	 *	  pages.
339 	 *
340 	 *	- Otherwise, use bufcache as the percentage of
341 	 *	  physical memory.
342 	 */
343 	if (bufpages != 0) {
344 		n = bufpages;
345 	} else {
346 		if (bufcache < 5) {
347 			printf("forcing bufcache %d -> 5", bufcache);
348 			bufcache = 5;
349 		}
350 		if (bufcache > 95) {
351 			printf("forcing bufcache %d -> 95", bufcache);
352 			bufcache = 95;
353 		}
354 		n = physmem / 100 * bufcache;
355 	}
356 
357 	n <<= PAGE_SHIFT;
358 	if (bufmem_valimit != 0 && n > bufmem_valimit)
359 		n = bufmem_valimit;
360 
361 	return (n);
362 }
363 
364 /*
365  * Initialize buffers and hash links for buffers.
366  */
367 void
368 bufinit(void)
369 {
370 	struct bqueue *dp;
371 	int use_std;
372 	u_int i;
373 
374 	/*
375 	 * Initialize buffer cache memory parameters.
376 	 */
377 	bufmem = 0;
378 	buf_setwm();
379 
380 	if (bufmem_valimit != 0) {
381 		vaddr_t minaddr = 0, maxaddr;
382 		buf_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
383 					  bufmem_valimit, VM_MAP_PAGEABLE,
384 					  FALSE, 0);
385 		if (buf_map == NULL)
386 			panic("bufinit: cannot allocate submap");
387 	} else
388 		buf_map = kernel_map;
389 
390 	/* On "small" machines use small pool page sizes where possible */
391 	use_std = (physmem < atop(16*1024*1024));
392 
393 	/*
394 	 * Also use them on systems that can map the pool pages using
395 	 * a direct-mapped segment.
396 	 */
397 #ifdef PMAP_MAP_POOLPAGE
398 	use_std = 1;
399 #endif
400 
401 	for (i = 0; i < NMEMPOOLS; i++) {
402 		struct pool_allocator *pa;
403 		struct pool *pp = &bmempools[i];
404 		u_int size = 1 << (i + MEMPOOL_INDEX_OFFSET);
405 		char *name = malloc(8, M_TEMP, M_WAITOK);
406 		snprintf(name, 8, "buf%dk", 1 << i);
407 		pa = (size <= PAGE_SIZE && use_std)
408 			? &pool_allocator_nointr
409 			: &bufmempool_allocator;
410 		pool_init(pp, size, 0, 0, 0, name, pa);
411 		pool_setlowat(pp, 1);
412 		pool_sethiwat(pp, 1);
413 	}
414 
415 	/* Initialize the buffer queues */
416 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) {
417 		TAILQ_INIT(&dp->bq_queue);
418 		dp->bq_bytes = 0;
419 	}
420 
421 	/*
422 	 * Estimate hash table size based on the amount of memory we
423 	 * intend to use for the buffer cache. The average buffer
424 	 * size is dependent on our clients (i.e. filesystems).
425 	 *
426 	 * For now, use an empirical 3K per buffer.
427 	 */
428 	nbuf = (bufmem_hiwater / 1024) / 3;
429 	bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash);
430 }
431 
432 static int
433 buf_lotsfree(void)
434 {
435 	int try, thresh;
436 	struct lwp *l = curlwp;
437 
438 	/* Always allocate if doing copy on write */
439 	if (l->l_flag & L_COWINPROGRESS)
440 		return 1;
441 
442 	/* Always allocate if less than the low water mark. */
443 	if (bufmem < bufmem_lowater)
444 		return 1;
445 
446 	/* Never allocate if greater than the high water mark. */
447 	if (bufmem > bufmem_hiwater)
448 		return 0;
449 
450 	/* If there's anything on the AGE list, it should be eaten. */
451 	if (TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue) != NULL)
452 		return 0;
453 
454 	/*
455 	 * The probabily of getting a new allocation is inversely
456 	 * proportional to the current size of the cache, using
457 	 * a granularity of 16 steps.
458 	 */
459 	try = random() & 0x0000000fL;
460 
461 	/* Don't use "16 * bufmem" here to avoid a 32-bit overflow. */
462 	thresh = (bufmem - bufmem_lowater) /
463 	    ((bufmem_hiwater - bufmem_lowater) / 16);
464 
465 	if (try >= thresh)
466 		return 1;
467 
468 	/* Otherwise don't allocate. */
469 	return 0;
470 }
471 
472 /*
473  * Return estimate of bytes we think need to be
474  * released to help resolve low memory conditions.
475  *
476  * => called at splbio.
477  * => called with bqueue_slock held.
478  */
479 static int
480 buf_canrelease(void)
481 {
482 	int pagedemand, ninvalid = 0;
483 
484 	LOCK_ASSERT(simple_lock_held(&bqueue_slock));
485 
486 	if (bufmem < bufmem_lowater)
487 		return 0;
488 
489 	if (bufmem > bufmem_hiwater)
490 		return bufmem - bufmem_hiwater;
491 
492 	ninvalid += bufqueues[BQ_AGE].bq_bytes;
493 
494 	pagedemand = uvmexp.freetarg - uvmexp.free;
495 	if (pagedemand < 0)
496 		return ninvalid;
497 	return MAX(ninvalid, MIN(2 * MAXBSIZE,
498 	    MIN((bufmem - bufmem_lowater) / 16, pagedemand * PAGE_SIZE)));
499 }
500 
501 /*
502  * Buffer memory allocation helper functions
503  */
504 static inline u_long
505 buf_mempoolidx(u_long size)
506 {
507 	u_int n = 0;
508 
509 	size -= 1;
510 	size >>= MEMPOOL_INDEX_OFFSET;
511 	while (size) {
512 		size >>= 1;
513 		n += 1;
514 	}
515 	if (n >= NMEMPOOLS)
516 		panic("buf mem pool index %d", n);
517 	return n;
518 }
519 
520 static inline u_long
521 buf_roundsize(u_long size)
522 {
523 	/* Round up to nearest power of 2 */
524 	return (1 << (buf_mempoolidx(size) + MEMPOOL_INDEX_OFFSET));
525 }
526 
527 static inline caddr_t
528 buf_malloc(size_t size)
529 {
530 	u_int n = buf_mempoolidx(size);
531 	caddr_t addr;
532 	int s;
533 
534 	while (1) {
535 		addr = pool_get(&bmempools[n], PR_NOWAIT);
536 		if (addr != NULL)
537 			break;
538 
539 		/* No memory, see if we can free some. If so, try again */
540 		if (buf_drain(1) > 0)
541 			continue;
542 
543 		/* Wait for buffers to arrive on the LRU queue */
544 		s = splbio();
545 		simple_lock(&bqueue_slock);
546 		needbuffer = 1;
547 		ltsleep(&needbuffer, PNORELOCK | (PRIBIO + 1),
548 			"buf_malloc", 0, &bqueue_slock);
549 		splx(s);
550 	}
551 
552 	return addr;
553 }
554 
555 static void
556 buf_mrelease(caddr_t addr, size_t size)
557 {
558 
559 	pool_put(&bmempools[buf_mempoolidx(size)], addr);
560 }
561 
562 /*
563  * bread()/breadn() helper.
564  */
565 static inline struct buf *
566 bio_doread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
567     int async)
568 {
569 	struct buf *bp;
570 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
571 	struct proc *p = l->l_proc;
572 	struct mount *mp;
573 
574 	bp = getblk(vp, blkno, size, 0, 0);
575 
576 #ifdef DIAGNOSTIC
577 	if (bp == NULL) {
578 		panic("bio_doread: no such buf");
579 	}
580 #endif
581 
582 	/*
583 	 * If buffer does not have data valid, start a read.
584 	 * Note that if buffer is B_INVAL, getblk() won't return it.
585 	 * Therefore, it's valid if its I/O has completed or been delayed.
586 	 */
587 	if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
588 		/* Start I/O for the buffer. */
589 		SET(bp->b_flags, B_READ | async);
590 		if (async)
591 			BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
592 		else
593 			BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
594 		VOP_STRATEGY(vp, bp);
595 
596 		/* Pay for the read. */
597 		p->p_stats->p_ru.ru_inblock++;
598 	} else if (async) {
599 		brelse(bp);
600 	}
601 
602 	if (vp->v_type == VBLK)
603 		mp = vp->v_specmountpoint;
604 	else
605 		mp = vp->v_mount;
606 
607 	/*
608 	 * Collect statistics on synchronous and asynchronous reads.
609 	 * Reads from block devices are charged to their associated
610 	 * filesystem (if any).
611 	 */
612 	if (mp != NULL) {
613 		if (async == 0)
614 			mp->mnt_stat.f_syncreads++;
615 		else
616 			mp->mnt_stat.f_asyncreads++;
617 	}
618 
619 	return (bp);
620 }
621 
622 /*
623  * Read a disk block.
624  * This algorithm described in Bach (p.54).
625  */
626 int
627 bread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
628     struct buf **bpp)
629 {
630 	struct buf *bp;
631 
632 	/* Get buffer for block. */
633 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
634 
635 	/* Wait for the read to complete, and return result. */
636 	return (biowait(bp));
637 }
638 
639 /*
640  * Read-ahead multiple disk blocks. The first is sync, the rest async.
641  * Trivial modification to the breada algorithm presented in Bach (p.55).
642  */
643 int
644 breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablks,
645     int *rasizes, int nrablks, struct ucred *cred, struct buf **bpp)
646 {
647 	struct buf *bp;
648 	int i;
649 
650 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
651 
652 	/*
653 	 * For each of the read-ahead blocks, start a read, if necessary.
654 	 */
655 	for (i = 0; i < nrablks; i++) {
656 		/* If it's in the cache, just go on to next one. */
657 		if (incore(vp, rablks[i]))
658 			continue;
659 
660 		/* Get a buffer for the read-ahead block */
661 		(void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
662 	}
663 
664 	/* Otherwise, we had to start a read for it; wait until it's valid. */
665 	return (biowait(bp));
666 }
667 
668 /*
669  * Read with single-block read-ahead.  Defined in Bach (p.55), but
670  * implemented as a call to breadn().
671  * XXX for compatibility with old file systems.
672  */
673 int
674 breada(struct vnode *vp, daddr_t blkno, int size, daddr_t rablkno,
675     int rabsize, struct ucred *cred, struct buf **bpp)
676 {
677 
678 	return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
679 }
680 
681 /*
682  * Block write.  Described in Bach (p.56)
683  */
684 int
685 bwrite(struct buf *bp)
686 {
687 	int rv, sync, wasdelayed, s;
688 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
689 	struct proc *p = l->l_proc;
690 	struct vnode *vp;
691 	struct mount *mp;
692 
693 	KASSERT(ISSET(bp->b_flags, B_BUSY));
694 
695 	vp = bp->b_vp;
696 	if (vp != NULL) {
697 		if (vp->v_type == VBLK)
698 			mp = vp->v_specmountpoint;
699 		else
700 			mp = vp->v_mount;
701 	} else {
702 		mp = NULL;
703 	}
704 
705 	/*
706 	 * Remember buffer type, to switch on it later.  If the write was
707 	 * synchronous, but the file system was mounted with MNT_ASYNC,
708 	 * convert it to a delayed write.
709 	 * XXX note that this relies on delayed tape writes being converted
710 	 * to async, not sync writes (which is safe, but ugly).
711 	 */
712 	sync = !ISSET(bp->b_flags, B_ASYNC);
713 	if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
714 		bdwrite(bp);
715 		return (0);
716 	}
717 
718 	/*
719 	 * Collect statistics on synchronous and asynchronous writes.
720 	 * Writes to block devices are charged to their associated
721 	 * filesystem (if any).
722 	 */
723 	if (mp != NULL) {
724 		if (sync)
725 			mp->mnt_stat.f_syncwrites++;
726 		else
727 			mp->mnt_stat.f_asyncwrites++;
728 	}
729 
730 	s = splbio();
731 	simple_lock(&bp->b_interlock);
732 
733 	wasdelayed = ISSET(bp->b_flags, B_DELWRI);
734 
735 	CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
736 
737 	/*
738 	 * Pay for the I/O operation and make sure the buf is on the correct
739 	 * vnode queue.
740 	 */
741 	if (wasdelayed)
742 		reassignbuf(bp, bp->b_vp);
743 	else
744 		p->p_stats->p_ru.ru_oublock++;
745 
746 	/* Initiate disk write.  Make sure the appropriate party is charged. */
747 	V_INCR_NUMOUTPUT(bp->b_vp);
748 	simple_unlock(&bp->b_interlock);
749 	splx(s);
750 
751 	if (sync)
752 		BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
753 	else
754 		BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
755 
756 	VOP_STRATEGY(vp, bp);
757 
758 	if (sync) {
759 		/* If I/O was synchronous, wait for it to complete. */
760 		rv = biowait(bp);
761 
762 		/* Release the buffer. */
763 		brelse(bp);
764 
765 		return (rv);
766 	} else {
767 		return (0);
768 	}
769 }
770 
771 int
772 vn_bwrite(void *v)
773 {
774 	struct vop_bwrite_args *ap = v;
775 
776 	return (bwrite(ap->a_bp));
777 }
778 
779 /*
780  * Delayed write.
781  *
782  * The buffer is marked dirty, but is not queued for I/O.
783  * This routine should be used when the buffer is expected
784  * to be modified again soon, typically a small write that
785  * partially fills a buffer.
786  *
787  * NB: magnetic tapes cannot be delayed; they must be
788  * written in the order that the writes are requested.
789  *
790  * Described in Leffler, et al. (pp. 208-213).
791  */
792 void
793 bdwrite(struct buf *bp)
794 {
795 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
796 	struct proc *p = l->l_proc;
797 	const struct bdevsw *bdev;
798 	int s;
799 
800 	/* If this is a tape block, write the block now. */
801 	bdev = bdevsw_lookup(bp->b_dev);
802 	if (bdev != NULL && bdev->d_type == D_TAPE) {
803 		bawrite(bp);
804 		return;
805 	}
806 
807 	/*
808 	 * If the block hasn't been seen before:
809 	 *	(1) Mark it as having been seen,
810 	 *	(2) Charge for the write,
811 	 *	(3) Make sure it's on its vnode's correct block list.
812 	 */
813 	s = splbio();
814 	simple_lock(&bp->b_interlock);
815 
816 	KASSERT(ISSET(bp->b_flags, B_BUSY));
817 
818 	if (!ISSET(bp->b_flags, B_DELWRI)) {
819 		SET(bp->b_flags, B_DELWRI);
820 		p->p_stats->p_ru.ru_oublock++;
821 		reassignbuf(bp, bp->b_vp);
822 	}
823 
824 	/* Otherwise, the "write" is done, so mark and release the buffer. */
825 	CLR(bp->b_flags, B_DONE);
826 	simple_unlock(&bp->b_interlock);
827 	splx(s);
828 
829 	brelse(bp);
830 }
831 
832 /*
833  * Asynchronous block write; just an asynchronous bwrite().
834  */
835 void
836 bawrite(struct buf *bp)
837 {
838 	int s;
839 
840 	s = splbio();
841 	simple_lock(&bp->b_interlock);
842 
843 	KASSERT(ISSET(bp->b_flags, B_BUSY));
844 
845 	SET(bp->b_flags, B_ASYNC);
846 	simple_unlock(&bp->b_interlock);
847 	splx(s);
848 	VOP_BWRITE(bp);
849 }
850 
851 /*
852  * Same as first half of bdwrite, mark buffer dirty, but do not release it.
853  * Call at splbio() and with the buffer interlock locked.
854  * Note: called only from biodone() through ffs softdep's bioops.io_complete()
855  */
856 void
857 bdirty(struct buf *bp)
858 {
859 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
860 	struct proc *p = l->l_proc;
861 
862 	LOCK_ASSERT(simple_lock_held(&bp->b_interlock));
863 	KASSERT(ISSET(bp->b_flags, B_BUSY));
864 
865 	CLR(bp->b_flags, B_AGE);
866 
867 	if (!ISSET(bp->b_flags, B_DELWRI)) {
868 		SET(bp->b_flags, B_DELWRI);
869 		p->p_stats->p_ru.ru_oublock++;
870 		reassignbuf(bp, bp->b_vp);
871 	}
872 }
873 
874 /*
875  * Release a buffer on to the free lists.
876  * Described in Bach (p. 46).
877  */
878 void
879 brelse(struct buf *bp)
880 {
881 	struct bqueue *bufq;
882 	int s;
883 
884 	/* Block disk interrupts. */
885 	s = splbio();
886 	simple_lock(&bqueue_slock);
887 	simple_lock(&bp->b_interlock);
888 
889 	KASSERT(ISSET(bp->b_flags, B_BUSY));
890 	KASSERT(!ISSET(bp->b_flags, B_CALL));
891 
892 	/* Wake up any processes waiting for any buffer to become free. */
893 	if (needbuffer) {
894 		needbuffer = 0;
895 		wakeup(&needbuffer);
896 	}
897 
898 	/* Wake up any proceeses waiting for _this_ buffer to become free. */
899 	if (ISSET(bp->b_flags, B_WANTED)) {
900 		CLR(bp->b_flags, B_WANTED|B_AGE);
901 		wakeup(bp);
902 	}
903 
904 	/*
905 	 * Determine which queue the buffer should be on, then put it there.
906 	 */
907 
908 	/* If it's locked, don't report an error; try again later. */
909 	if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
910 		CLR(bp->b_flags, B_ERROR);
911 
912 	/* If it's not cacheable, or an error, mark it invalid. */
913 	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
914 		SET(bp->b_flags, B_INVAL);
915 
916 	if (ISSET(bp->b_flags, B_VFLUSH)) {
917 		/*
918 		 * This is a delayed write buffer that was just flushed to
919 		 * disk.  It is still on the LRU queue.  If it's become
920 		 * invalid, then we need to move it to a different queue;
921 		 * otherwise leave it in its current position.
922 		 */
923 		CLR(bp->b_flags, B_VFLUSH);
924 		if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE)) {
925 			KDASSERT(!debug_verify_freelist || checkfreelist(bp, &bufqueues[BQ_LRU]));
926 			goto already_queued;
927 		} else {
928 			bremfree(bp);
929 		}
930 	}
931 
932   KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_AGE]));
933   KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LRU]));
934   KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LOCKED]));
935 
936 	if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
937 		/*
938 		 * If it's invalid or empty, dissociate it from its vnode
939 		 * and put on the head of the appropriate queue.
940 		 */
941 		if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
942 			(*bioops.io_deallocate)(bp);
943 		CLR(bp->b_flags, B_DONE|B_DELWRI);
944 		if (bp->b_vp) {
945 			reassignbuf(bp, bp->b_vp);
946 			brelvp(bp);
947 		}
948 		if (bp->b_bufsize <= 0)
949 			/* no data */
950 			goto already_queued;
951 		else
952 			/* invalid data */
953 			bufq = &bufqueues[BQ_AGE];
954 		binsheadfree(bp, bufq);
955 	} else {
956 		/*
957 		 * It has valid data.  Put it on the end of the appropriate
958 		 * queue, so that it'll stick around for as long as possible.
959 		 * If buf is AGE, but has dependencies, must put it on last
960 		 * bufqueue to be scanned, ie LRU. This protects against the
961 		 * livelock where BQ_AGE only has buffers with dependencies,
962 		 * and we thus never get to the dependent buffers in BQ_LRU.
963 		 */
964 		if (ISSET(bp->b_flags, B_LOCKED))
965 			/* locked in core */
966 			bufq = &bufqueues[BQ_LOCKED];
967 		else if (!ISSET(bp->b_flags, B_AGE))
968 			/* valid data */
969 			bufq = &bufqueues[BQ_LRU];
970 		else {
971 			/* stale but valid data */
972 			int has_deps;
973 
974 			if (LIST_FIRST(&bp->b_dep) != NULL &&
975 			    bioops.io_countdeps)
976 				has_deps = (*bioops.io_countdeps)(bp, 0);
977 			else
978 				has_deps = 0;
979 			bufq = has_deps ? &bufqueues[BQ_LRU] :
980 			    &bufqueues[BQ_AGE];
981 		}
982 		binstailfree(bp, bufq);
983 	}
984 
985 already_queued:
986 	/* Unlock the buffer. */
987 	CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE);
988 	SET(bp->b_flags, B_CACHE);
989 
990 	/* Allow disk interrupts. */
991 	simple_unlock(&bp->b_interlock);
992 	simple_unlock(&bqueue_slock);
993 	if (bp->b_bufsize <= 0) {
994 #ifdef DEBUG
995 		memset((char *)bp, 0, sizeof(*bp));
996 #endif
997 		pool_put(&bufpool, bp);
998 	}
999 	splx(s);
1000 }
1001 
1002 /*
1003  * Determine if a block is in the cache.
1004  * Just look on what would be its hash chain.  If it's there, return
1005  * a pointer to it, unless it's marked invalid.  If it's marked invalid,
1006  * we normally don't return the buffer, unless the caller explicitly
1007  * wants us to.
1008  */
1009 struct buf *
1010 incore(struct vnode *vp, daddr_t blkno)
1011 {
1012 	struct buf *bp;
1013 
1014 	/* Search hash chain */
1015 	LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
1016 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
1017 		    !ISSET(bp->b_flags, B_INVAL))
1018 		return (bp);
1019 	}
1020 
1021 	return (NULL);
1022 }
1023 
1024 /*
1025  * Get a block of requested size that is associated with
1026  * a given vnode and block offset. If it is found in the
1027  * block cache, mark it as having been found, make it busy
1028  * and return it. Otherwise, return an empty block of the
1029  * correct size. It is up to the caller to insure that the
1030  * cached blocks be of the correct size.
1031  */
1032 struct buf *
1033 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1034 {
1035 	struct buf *bp;
1036 	int s, err;
1037 	int preserve;
1038 
1039 start:
1040 	s = splbio();
1041 	simple_lock(&bqueue_slock);
1042 	bp = incore(vp, blkno);
1043 	if (bp != NULL) {
1044 		simple_lock(&bp->b_interlock);
1045 		if (ISSET(bp->b_flags, B_BUSY)) {
1046 			simple_unlock(&bqueue_slock);
1047 			if (curproc == uvm.pagedaemon_proc) {
1048 				simple_unlock(&bp->b_interlock);
1049 				splx(s);
1050 				return NULL;
1051 			}
1052 			SET(bp->b_flags, B_WANTED);
1053 			err = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
1054 					"getblk", slptimeo, &bp->b_interlock);
1055 			splx(s);
1056 			if (err)
1057 				return (NULL);
1058 			goto start;
1059 		}
1060 #ifdef DIAGNOSTIC
1061 		if (ISSET(bp->b_flags, B_DONE|B_DELWRI) &&
1062 		    bp->b_bcount < size && vp->v_type != VBLK)
1063 			panic("getblk: block size invariant failed");
1064 #endif
1065 		SET(bp->b_flags, B_BUSY);
1066 		bremfree(bp);
1067 		preserve = 1;
1068 	} else {
1069 		if ((bp = getnewbuf(slpflag, slptimeo, 0)) == NULL) {
1070 			simple_unlock(&bqueue_slock);
1071 			splx(s);
1072 			goto start;
1073 		}
1074 
1075 		binshash(bp, BUFHASH(vp, blkno));
1076 		bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
1077 		bgetvp(vp, bp);
1078 		preserve = 0;
1079 	}
1080 	simple_unlock(&bp->b_interlock);
1081 	simple_unlock(&bqueue_slock);
1082 	splx(s);
1083 	/*
1084 	 * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes)
1085 	 * if we re-size buffers here.
1086 	 */
1087 	if (ISSET(bp->b_flags, B_LOCKED)) {
1088 		KASSERT(bp->b_bufsize >= size);
1089 	} else {
1090 		allocbuf(bp, size, preserve);
1091 	}
1092 	BIO_SETPRIO(bp, BPRIO_DEFAULT);
1093 	return (bp);
1094 }
1095 
1096 /*
1097  * Get an empty, disassociated buffer of given size.
1098  */
1099 struct buf *
1100 geteblk(int size)
1101 {
1102 	struct buf *bp;
1103 	int s;
1104 
1105 	s = splbio();
1106 	simple_lock(&bqueue_slock);
1107 	while ((bp = getnewbuf(0, 0, 0)) == 0)
1108 		;
1109 
1110 	SET(bp->b_flags, B_INVAL);
1111 	binshash(bp, &invalhash);
1112 	simple_unlock(&bqueue_slock);
1113 	simple_unlock(&bp->b_interlock);
1114 	splx(s);
1115 	BIO_SETPRIO(bp, BPRIO_DEFAULT);
1116 	allocbuf(bp, size, 0);
1117 	return (bp);
1118 }
1119 
1120 /*
1121  * Expand or contract the actual memory allocated to a buffer.
1122  *
1123  * If the buffer shrinks, data is lost, so it's up to the
1124  * caller to have written it out *first*; this routine will not
1125  * start a write.  If the buffer grows, it's the callers
1126  * responsibility to fill out the buffer's additional contents.
1127  */
1128 void
1129 allocbuf(struct buf *bp, int size, int preserve)
1130 {
1131 	vsize_t oldsize, desired_size;
1132 	caddr_t addr;
1133 	int s, delta;
1134 
1135 	desired_size = buf_roundsize(size);
1136 	if (desired_size > MAXBSIZE)
1137 		printf("allocbuf: buffer larger than MAXBSIZE requested");
1138 
1139 	bp->b_bcount = size;
1140 
1141 	oldsize = bp->b_bufsize;
1142 	if (oldsize == desired_size)
1143 		return;
1144 
1145 	/*
1146 	 * If we want a buffer of a different size, re-allocate the
1147 	 * buffer's memory; copy old content only if needed.
1148 	 */
1149 	addr = buf_malloc(desired_size);
1150 	if (preserve)
1151 		memcpy(addr, bp->b_data, MIN(oldsize,desired_size));
1152 	if (bp->b_data != NULL)
1153 		buf_mrelease(bp->b_data, oldsize);
1154 	bp->b_data = addr;
1155 	bp->b_bufsize = desired_size;
1156 
1157 	/*
1158 	 * Update overall buffer memory counter (protected by bqueue_slock)
1159 	 */
1160 	delta = (long)desired_size - (long)oldsize;
1161 
1162 	s = splbio();
1163 	simple_lock(&bqueue_slock);
1164 	if ((bufmem += delta) > bufmem_hiwater) {
1165 		/*
1166 		 * Need to trim overall memory usage.
1167 		 */
1168 		while (buf_canrelease()) {
1169 			if (curcpu()->ci_schedstate.spc_flags &
1170 			    SPCF_SHOULDYIELD) {
1171 				simple_unlock(&bqueue_slock);
1172 				splx(s);
1173 				preempt(1);
1174 				s = splbio();
1175 				simple_lock(&bqueue_slock);
1176 			}
1177 
1178 			if (buf_trim() == 0)
1179 				break;
1180 		}
1181 	}
1182 
1183 	simple_unlock(&bqueue_slock);
1184 	splx(s);
1185 }
1186 
1187 /*
1188  * Find a buffer which is available for use.
1189  * Select something from a free list.
1190  * Preference is to AGE list, then LRU list.
1191  *
1192  * Called at splbio and with buffer queues locked.
1193  * Return buffer locked.
1194  */
1195 struct buf *
1196 getnewbuf(int slpflag, int slptimeo, int from_bufq)
1197 {
1198 	struct buf *bp;
1199 
1200 start:
1201 	LOCK_ASSERT(simple_lock_held(&bqueue_slock));
1202 
1203 	/*
1204 	 * Get a new buffer from the pool; but use NOWAIT because
1205 	 * we have the buffer queues locked.
1206 	 */
1207 	if (!from_bufq && buf_lotsfree() &&
1208 	    (bp = pool_get(&bufpool, PR_NOWAIT)) != NULL) {
1209 		memset((char *)bp, 0, sizeof(*bp));
1210 		BUF_INIT(bp);
1211 		bp->b_dev = NODEV;
1212 		bp->b_vnbufs.le_next = NOLIST;
1213 		bp->b_flags = B_BUSY;
1214 		simple_lock(&bp->b_interlock);
1215 #if defined(DIAGNOSTIC)
1216 		bp->b_freelistindex = -1;
1217 #endif /* defined(DIAGNOSTIC) */
1218 		return (bp);
1219 	}
1220 
1221 	if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue)) != NULL ||
1222 	    (bp = TAILQ_FIRST(&bufqueues[BQ_LRU].bq_queue)) != NULL) {
1223 		simple_lock(&bp->b_interlock);
1224 		bremfree(bp);
1225 	} else {
1226 		/*
1227 		 * XXX: !from_bufq should be removed.
1228 		 */
1229 		if (!from_bufq || curproc != uvm.pagedaemon_proc) {
1230 			/* wait for a free buffer of any kind */
1231 			needbuffer = 1;
1232 			ltsleep(&needbuffer, slpflag|(PRIBIO + 1),
1233 			    "getnewbuf", slptimeo, &bqueue_slock);
1234 		}
1235 		return (NULL);
1236 	}
1237 
1238 #ifdef DIAGNOSTIC
1239 	if (bp->b_bufsize <= 0)
1240 		panic("buffer %p: on queue but empty", bp);
1241 #endif
1242 
1243 	if (ISSET(bp->b_flags, B_VFLUSH)) {
1244 		/*
1245 		 * This is a delayed write buffer being flushed to disk.  Make
1246 		 * sure it gets aged out of the queue when it's finished, and
1247 		 * leave it off the LRU queue.
1248 		 */
1249 		CLR(bp->b_flags, B_VFLUSH);
1250 		SET(bp->b_flags, B_AGE);
1251 		simple_unlock(&bp->b_interlock);
1252 		goto start;
1253 	}
1254 
1255 	/* Buffer is no longer on free lists. */
1256 	SET(bp->b_flags, B_BUSY);
1257 
1258 	/*
1259 	 * If buffer was a delayed write, start it and return NULL
1260 	 * (since we might sleep while starting the write).
1261 	 */
1262 	if (ISSET(bp->b_flags, B_DELWRI)) {
1263 		/*
1264 		 * This buffer has gone through the LRU, so make sure it gets
1265 		 * reused ASAP.
1266 		 */
1267 		SET(bp->b_flags, B_AGE);
1268 		simple_unlock(&bp->b_interlock);
1269 		simple_unlock(&bqueue_slock);
1270 		bawrite(bp);
1271 		simple_lock(&bqueue_slock);
1272 		return (NULL);
1273 	}
1274 
1275 	/* disassociate us from our vnode, if we had one... */
1276 	if (bp->b_vp)
1277 		brelvp(bp);
1278 
1279 	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
1280 		(*bioops.io_deallocate)(bp);
1281 
1282 	/* clear out various other fields */
1283 	bp->b_flags = B_BUSY;
1284 	bp->b_dev = NODEV;
1285 	bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
1286 	bp->b_iodone = 0;
1287 	bp->b_error = 0;
1288 	bp->b_resid = 0;
1289 	bp->b_bcount = 0;
1290 
1291 	bremhash(bp);
1292 	return (bp);
1293 }
1294 
1295 /*
1296  * Attempt to free an aged buffer off the queues.
1297  * Called at splbio and with queue lock held.
1298  * Returns the amount of buffer memory freed.
1299  */
1300 static int
1301 buf_trim(void)
1302 {
1303 	struct buf *bp;
1304 	long size = 0;
1305 
1306 	/* Instruct getnewbuf() to get buffers off the queues */
1307 	if ((bp = getnewbuf(PCATCH, 1, 1)) == NULL)
1308 		return 0;
1309 
1310 	KASSERT(!ISSET(bp->b_flags, B_WANTED));
1311 	simple_unlock(&bp->b_interlock);
1312 	size = bp->b_bufsize;
1313 	bufmem -= size;
1314 	simple_unlock(&bqueue_slock);
1315 	if (size > 0) {
1316 		buf_mrelease(bp->b_data, size);
1317 		bp->b_bcount = bp->b_bufsize = 0;
1318 	}
1319 	/* brelse() will return the buffer to the global buffer pool */
1320 	brelse(bp);
1321 	simple_lock(&bqueue_slock);
1322 	return size;
1323 }
1324 
1325 int
1326 buf_drain(int n)
1327 {
1328 	int s, size = 0, sz;
1329 
1330 	s = splbio();
1331 	simple_lock(&bqueue_slock);
1332 
1333 	while (size < n && bufmem > bufmem_lowater) {
1334 		sz = buf_trim();
1335 		if (sz <= 0)
1336 			break;
1337 		size += sz;
1338 	}
1339 
1340 	simple_unlock(&bqueue_slock);
1341 	splx(s);
1342 	return size;
1343 }
1344 
1345 /*
1346  * Wait for operations on the buffer to complete.
1347  * When they do, extract and return the I/O's error value.
1348  */
1349 int
1350 biowait(struct buf *bp)
1351 {
1352 	int s, error;
1353 
1354 	s = splbio();
1355 	simple_lock(&bp->b_interlock);
1356 	while (!ISSET(bp->b_flags, B_DONE | B_DELWRI))
1357 		ltsleep(bp, PRIBIO + 1, "biowait", 0, &bp->b_interlock);
1358 
1359 	/* check errors. */
1360 	if (ISSET(bp->b_flags, B_ERROR))
1361 		error = bp->b_error ? bp->b_error : EIO;
1362 	else
1363 		error = 0;
1364 
1365 	simple_unlock(&bp->b_interlock);
1366 	splx(s);
1367 	return (error);
1368 }
1369 
1370 /*
1371  * Mark I/O complete on a buffer.
1372  *
1373  * If a callback has been requested, e.g. the pageout
1374  * daemon, do so. Otherwise, awaken waiting processes.
1375  *
1376  * [ Leffler, et al., says on p.247:
1377  *	"This routine wakes up the blocked process, frees the buffer
1378  *	for an asynchronous write, or, for a request by the pagedaemon
1379  *	process, invokes a procedure specified in the buffer structure" ]
1380  *
1381  * In real life, the pagedaemon (or other system processes) wants
1382  * to do async stuff to, and doesn't want the buffer brelse()'d.
1383  * (for swap pager, that puts swap buffers on the free lists (!!!),
1384  * for the vn device, that puts malloc'd buffers on the free lists!)
1385  */
1386 void
1387 biodone(struct buf *bp)
1388 {
1389 	int s = splbio();
1390 
1391 	simple_lock(&bp->b_interlock);
1392 	if (ISSET(bp->b_flags, B_DONE))
1393 		panic("biodone already");
1394 	SET(bp->b_flags, B_DONE);		/* note that it's done */
1395 	BIO_SETPRIO(bp, BPRIO_DEFAULT);
1396 
1397 	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
1398 		(*bioops.io_complete)(bp);
1399 
1400 	if (!ISSET(bp->b_flags, B_READ))	/* wake up reader */
1401 		vwakeup(bp);
1402 
1403 	/*
1404 	 * If necessary, call out.  Unlock the buffer before calling
1405 	 * iodone() as the buffer isn't valid any more when it return.
1406 	 */
1407 	if (ISSET(bp->b_flags, B_CALL)) {
1408 		CLR(bp->b_flags, B_CALL);	/* but note callout done */
1409 		simple_unlock(&bp->b_interlock);
1410 		(*bp->b_iodone)(bp);
1411 	} else {
1412 		if (ISSET(bp->b_flags, B_ASYNC)) {	/* if async, release */
1413 			simple_unlock(&bp->b_interlock);
1414 			brelse(bp);
1415 		} else {			/* or just wakeup the buffer */
1416 			CLR(bp->b_flags, B_WANTED);
1417 			wakeup(bp);
1418 			simple_unlock(&bp->b_interlock);
1419 		}
1420 	}
1421 
1422 	splx(s);
1423 }
1424 
1425 /*
1426  * Return a count of buffers on the "locked" queue.
1427  */
1428 int
1429 count_lock_queue(void)
1430 {
1431 	struct buf *bp;
1432 	int n = 0;
1433 
1434 	simple_lock(&bqueue_slock);
1435 	TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED].bq_queue, b_freelist)
1436 		n++;
1437 	simple_unlock(&bqueue_slock);
1438 	return (n);
1439 }
1440 
1441 /*
1442  * Wait for all buffers to complete I/O
1443  * Return the number of "stuck" buffers.
1444  */
1445 int
1446 buf_syncwait(void)
1447 {
1448 	struct buf *bp;
1449 	int iter, nbusy, nbusy_prev = 0, dcount, s, ihash;
1450 
1451 	dcount = 10000;
1452 	for (iter = 0; iter < 20;) {
1453 		s = splbio();
1454 		simple_lock(&bqueue_slock);
1455 		nbusy = 0;
1456 		for (ihash = 0; ihash < bufhash+1; ihash++) {
1457 		    LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1458 			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1459 				nbusy++;
1460 			/*
1461 			 * With soft updates, some buffers that are
1462 			 * written will be remarked as dirty until other
1463 			 * buffers are written.
1464 			 */
1465 			if (bp->b_vp && bp->b_vp->v_mount
1466 			    && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP)
1467 			    && (bp->b_flags & B_DELWRI)) {
1468 				simple_lock(&bp->b_interlock);
1469 				bremfree(bp);
1470 				bp->b_flags |= B_BUSY;
1471 				nbusy++;
1472 				simple_unlock(&bp->b_interlock);
1473 				simple_unlock(&bqueue_slock);
1474 				bawrite(bp);
1475 				if (dcount-- <= 0) {
1476 					printf("softdep ");
1477 					splx(s);
1478 					goto fail;
1479 				}
1480 				simple_lock(&bqueue_slock);
1481 			}
1482 		    }
1483 		}
1484 
1485 		simple_unlock(&bqueue_slock);
1486 		splx(s);
1487 
1488 		if (nbusy == 0)
1489 			break;
1490 		if (nbusy_prev == 0)
1491 			nbusy_prev = nbusy;
1492 		printf("%d ", nbusy);
1493 		tsleep(&nbusy, PRIBIO, "bflush",
1494 		    (iter == 0) ? 1 : hz / 25 * iter);
1495 		if (nbusy >= nbusy_prev) /* we didn't flush anything */
1496 			iter++;
1497 		else
1498 			nbusy_prev = nbusy;
1499 	}
1500 
1501 	if (nbusy) {
1502 fail:;
1503 #if defined(DEBUG) || defined(DEBUG_HALT_BUSY)
1504 		printf("giving up\nPrinting vnodes for busy buffers\n");
1505 		s = splbio();
1506 		for (ihash = 0; ihash < bufhash+1; ihash++) {
1507 		    LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1508 			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1509 				vprint(NULL, bp->b_vp);
1510 		    }
1511 		}
1512 		splx(s);
1513 #endif
1514 	}
1515 
1516 	return nbusy;
1517 }
1518 
1519 static void
1520 sysctl_fillbuf(struct buf *i, struct buf_sysctl *o)
1521 {
1522 
1523 	o->b_flags = i->b_flags;
1524 	o->b_error = i->b_error;
1525 	o->b_prio = i->b_prio;
1526 	o->b_dev = i->b_dev;
1527 	o->b_bufsize = i->b_bufsize;
1528 	o->b_bcount = i->b_bcount;
1529 	o->b_resid = i->b_resid;
1530 	o->b_addr = PTRTOUINT64(i->b_un.b_addr);
1531 	o->b_blkno = i->b_blkno;
1532 	o->b_rawblkno = i->b_rawblkno;
1533 	o->b_iodone = PTRTOUINT64(i->b_iodone);
1534 	o->b_proc = PTRTOUINT64(i->b_proc);
1535 	o->b_vp = PTRTOUINT64(i->b_vp);
1536 	o->b_saveaddr = PTRTOUINT64(i->b_saveaddr);
1537 	o->b_lblkno = i->b_lblkno;
1538 }
1539 
1540 #define KERN_BUFSLOP 20
1541 static int
1542 sysctl_dobuf(SYSCTLFN_ARGS)
1543 {
1544 	struct buf *bp;
1545 	struct buf_sysctl bs;
1546 	char *dp;
1547 	u_int i, op, arg;
1548 	size_t len, needed, elem_size, out_size;
1549 	int error, s, elem_count;
1550 
1551 	if (namelen == 1 && name[0] == CTL_QUERY)
1552 		return (sysctl_query(SYSCTLFN_CALL(rnode)));
1553 
1554 	if (namelen != 4)
1555 		return (EINVAL);
1556 
1557 	dp = oldp;
1558 	len = (oldp != NULL) ? *oldlenp : 0;
1559 	op = name[0];
1560 	arg = name[1];
1561 	elem_size = name[2];
1562 	elem_count = name[3];
1563 	out_size = MIN(sizeof(bs), elem_size);
1564 
1565 	/*
1566 	 * at the moment, these are just "placeholders" to make the
1567 	 * API for retrieving kern.buf data more extensible in the
1568 	 * future.
1569 	 *
1570 	 * XXX kern.buf currently has "netbsd32" issues.  hopefully
1571 	 * these will be resolved at a later point.
1572 	 */
1573 	if (op != KERN_BUF_ALL || arg != KERN_BUF_ALL ||
1574 	    elem_size < 1 || elem_count < 0)
1575 		return (EINVAL);
1576 
1577 	error = 0;
1578 	needed = 0;
1579 	s = splbio();
1580 	simple_lock(&bqueue_slock);
1581 	for (i = 0; i < BQUEUES; i++) {
1582 		TAILQ_FOREACH(bp, &bufqueues[i].bq_queue, b_freelist) {
1583 			if (len >= elem_size && elem_count > 0) {
1584 				sysctl_fillbuf(bp, &bs);
1585 				error = copyout(&bs, dp, out_size);
1586 				if (error)
1587 					goto cleanup;
1588 				dp += elem_size;
1589 				len -= elem_size;
1590 			}
1591 			if (elem_count > 0) {
1592 				needed += elem_size;
1593 				if (elem_count != INT_MAX)
1594 					elem_count--;
1595 			}
1596 		}
1597 	}
1598 cleanup:
1599 	simple_unlock(&bqueue_slock);
1600 	splx(s);
1601 
1602 	*oldlenp = needed;
1603 	if (oldp == NULL)
1604 		*oldlenp += KERN_BUFSLOP * sizeof(struct buf);
1605 
1606 	return (error);
1607 }
1608 
1609 static int
1610 sysctl_bufvm_update(SYSCTLFN_ARGS)
1611 {
1612 	int t, error;
1613 	struct sysctlnode node;
1614 
1615 	node = *rnode;
1616 	node.sysctl_data = &t;
1617 	t = *(int *)rnode->sysctl_data;
1618 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1619 	if (error || newp == NULL)
1620 		return (error);
1621 
1622 	if (t < 0)
1623 		return EINVAL;
1624 	if (rnode->sysctl_data == &bufcache) {
1625 		if (t > 100)
1626 			return (EINVAL);
1627 		bufcache = t;
1628 		buf_setwm();
1629 	} else if (rnode->sysctl_data == &bufmem_lowater) {
1630 		if (bufmem_hiwater - t < 16)
1631 			return (EINVAL);
1632 		bufmem_lowater = t;
1633 	} else if (rnode->sysctl_data == &bufmem_hiwater) {
1634 		if (t - bufmem_lowater < 16)
1635 			return (EINVAL);
1636 		bufmem_hiwater = t;
1637 	} else
1638 		return (EINVAL);
1639 
1640 	/* Drain until below new high water mark */
1641 	while ((t = bufmem - bufmem_hiwater) >= 0) {
1642 		if (buf_drain(t / (2 * 1024)) <= 0)
1643 			break;
1644 	}
1645 
1646 	return 0;
1647 }
1648 
1649 SYSCTL_SETUP(sysctl_kern_buf_setup, "sysctl kern.buf subtree setup")
1650 {
1651 
1652 	sysctl_createv(clog, 0, NULL, NULL,
1653 		       CTLFLAG_PERMANENT,
1654 		       CTLTYPE_NODE, "kern", NULL,
1655 		       NULL, 0, NULL, 0,
1656 		       CTL_KERN, CTL_EOL);
1657 	sysctl_createv(clog, 0, NULL, NULL,
1658 		       CTLFLAG_PERMANENT,
1659 		       CTLTYPE_NODE, "buf",
1660 		       SYSCTL_DESCR("Kernel buffer cache information"),
1661 		       sysctl_dobuf, 0, NULL, 0,
1662 		       CTL_KERN, KERN_BUF, CTL_EOL);
1663 }
1664 
1665 SYSCTL_SETUP(sysctl_vm_buf_setup, "sysctl vm.buf* subtree setup")
1666 {
1667 
1668 	sysctl_createv(clog, 0, NULL, NULL,
1669 		       CTLFLAG_PERMANENT,
1670 		       CTLTYPE_NODE, "vm", NULL,
1671 		       NULL, 0, NULL, 0,
1672 		       CTL_VM, CTL_EOL);
1673 
1674 	sysctl_createv(clog, 0, NULL, NULL,
1675 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1676 		       CTLTYPE_INT, "bufcache",
1677 		       SYSCTL_DESCR("Percentage of physical memory to use for "
1678 				    "buffer cache"),
1679 		       sysctl_bufvm_update, 0, &bufcache, 0,
1680 		       CTL_VM, CTL_CREATE, CTL_EOL);
1681 	sysctl_createv(clog, 0, NULL, NULL,
1682 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
1683 		       CTLTYPE_INT, "bufmem",
1684 		       SYSCTL_DESCR("Amount of kernel memory used by buffer "
1685 				    "cache"),
1686 		       NULL, 0, &bufmem, 0,
1687 		       CTL_VM, CTL_CREATE, CTL_EOL);
1688 	sysctl_createv(clog, 0, NULL, NULL,
1689 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1690 		       CTLTYPE_INT, "bufmem_lowater",
1691 		       SYSCTL_DESCR("Minimum amount of kernel memory to "
1692 				    "reserve for buffer cache"),
1693 		       sysctl_bufvm_update, 0, &bufmem_lowater, 0,
1694 		       CTL_VM, CTL_CREATE, CTL_EOL);
1695 	sysctl_createv(clog, 0, NULL, NULL,
1696 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1697 		       CTLTYPE_INT, "bufmem_hiwater",
1698 		       SYSCTL_DESCR("Maximum amount of kernel memory to use "
1699 				    "for buffer cache"),
1700 		       sysctl_bufvm_update, 0, &bufmem_hiwater, 0,
1701 		       CTL_VM, CTL_CREATE, CTL_EOL);
1702 }
1703 
1704 #ifdef DEBUG
1705 /*
1706  * Print out statistics on the current allocation of the buffer pool.
1707  * Can be enabled to print out on every ``sync'' by setting "syncprt"
1708  * in vfs_syscalls.c using sysctl.
1709  */
1710 void
1711 vfs_bufstats(void)
1712 {
1713 	int s, i, j, count;
1714 	struct buf *bp;
1715 	struct bqueue *dp;
1716 	int counts[(MAXBSIZE / PAGE_SIZE) + 1];
1717 	static const char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE" };
1718 
1719 	for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
1720 		count = 0;
1721 		for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1722 			counts[j] = 0;
1723 		s = splbio();
1724 		TAILQ_FOREACH(bp, &dp->bq_queue, b_freelist) {
1725 			counts[bp->b_bufsize/PAGE_SIZE]++;
1726 			count++;
1727 		}
1728 		splx(s);
1729 		printf("%s: total-%d", bname[i], count);
1730 		for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1731 			if (counts[j] != 0)
1732 				printf(", %d-%d", j * PAGE_SIZE, counts[j]);
1733 		printf("\n");
1734 	}
1735 }
1736 #endif /* DEBUG */
1737 
1738 /* ------------------------------ */
1739 
1740 static POOL_INIT(bufiopool, sizeof(struct buf), 0, 0, 0, "biopl", NULL);
1741 
1742 static struct buf *
1743 getiobuf1(int prflags)
1744 {
1745 	struct buf *bp;
1746 	int s;
1747 
1748 	s = splbio();
1749 	bp = pool_get(&bufiopool, prflags);
1750 	splx(s);
1751 	if (bp != NULL) {
1752 		BUF_INIT(bp);
1753 	}
1754 	return bp;
1755 }
1756 
1757 struct buf *
1758 getiobuf(void)
1759 {
1760 
1761 	return getiobuf1(PR_WAITOK);
1762 }
1763 
1764 struct buf *
1765 getiobuf_nowait(void)
1766 {
1767 
1768 	return getiobuf1(PR_NOWAIT);
1769 }
1770 
1771 void
1772 putiobuf(struct buf *bp)
1773 {
1774 	int s;
1775 
1776 	s = splbio();
1777 	pool_put(&bufiopool, bp);
1778 	splx(s);
1779 }
1780 
1781 /*
1782  * nestiobuf_iodone: b_iodone callback for nested buffers.
1783  */
1784 
1785 static void
1786 nestiobuf_iodone(struct buf *bp)
1787 {
1788 	struct buf *mbp = bp->b_private;
1789 	int error;
1790 	int donebytes;
1791 
1792 	KASSERT(bp->b_bcount <= bp->b_bufsize);
1793 	KASSERT(mbp != bp);
1794 
1795 	error = 0;
1796 	if ((bp->b_flags & B_ERROR) != 0) {
1797 		error = EIO;
1798 		/* check if an error code was returned */
1799 		if (bp->b_error)
1800 			error = bp->b_error;
1801 	} else if ((bp->b_bcount < bp->b_bufsize) || (bp->b_resid > 0)) {
1802 		/*
1803 		 * Not all got transfered, raise an error. We have no way to
1804 		 * propagate these conditions to mbp.
1805 		 */
1806 		error = EIO;
1807 	}
1808 
1809 	donebytes = bp->b_bufsize;
1810 
1811 	putiobuf(bp);
1812 	nestiobuf_done(mbp, donebytes, error);
1813 }
1814 
1815 /*
1816  * nestiobuf_setup: setup a "nested" buffer.
1817  *
1818  * => 'mbp' is a "master" buffer which is being divided into sub pieces.
1819  * => 'bp' should be a buffer allocated by getiobuf or getiobuf_nowait.
1820  * => 'offset' is a byte offset in the master buffer.
1821  * => 'size' is a size in bytes of this nested buffer.
1822  */
1823 
1824 void
1825 nestiobuf_setup(struct buf *mbp, struct buf *bp, int offset, size_t size)
1826 {
1827 	const int b_read = mbp->b_flags & B_READ;
1828 	struct vnode *vp = mbp->b_vp;
1829 
1830 	KASSERT(mbp->b_bcount >= offset + size);
1831 	bp->b_vp = vp;
1832 	bp->b_flags = B_BUSY | B_CALL | B_ASYNC | b_read;
1833 	bp->b_iodone = nestiobuf_iodone;
1834 	bp->b_data = mbp->b_data + offset;
1835 	bp->b_resid = bp->b_bcount = size;
1836 	bp->b_bufsize = bp->b_bcount;
1837 	bp->b_private = mbp;
1838 	BIO_COPYPRIO(bp, mbp);
1839 	if (!b_read && vp != NULL) {
1840 		int s;
1841 
1842 		s = splbio();
1843 		V_INCR_NUMOUTPUT(vp);
1844 		splx(s);
1845 	}
1846 }
1847 
1848 /*
1849  * nestiobuf_done: propagate completion to the master buffer.
1850  *
1851  * => 'donebytes' specifies how many bytes in the 'mbp' is completed.
1852  * => 'error' is an errno(2) that 'donebytes' has been completed with.
1853  */
1854 
1855 void
1856 nestiobuf_done(struct buf *mbp, int donebytes, int error)
1857 {
1858 	int s;
1859 
1860 	if (donebytes == 0) {
1861 		return;
1862 	}
1863 	s = splbio();
1864 	KASSERT(mbp->b_resid >= donebytes);
1865 	if (error) {
1866 		mbp->b_flags |= B_ERROR;
1867 		mbp->b_error = error;
1868 	}
1869 	mbp->b_resid -= donebytes;
1870 	if (mbp->b_resid == 0) {
1871 		if ((mbp->b_flags & B_ERROR) != 0) {
1872 			mbp->b_resid = mbp->b_bcount; /* be conservative */
1873 		}
1874 		biodone(mbp);
1875 	}
1876 	splx(s);
1877 }
1878