xref: /netbsd-src/sys/kern/vfs_bio.c (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: vfs_bio.c,v 1.181 2007/12/02 13:56:16 hannken Exp $	*/
2 
3 /*-
4  * Copyright (c) 1982, 1986, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * (c) UNIX System Laboratories, Inc.
7  * All or some portions of this file are derived from material licensed
8  * to the University of California by American Telephone and Telegraph
9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10  * the permission of UNIX System Laboratories, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
37  */
38 
39 /*-
40  * Copyright (c) 1994 Christopher G. Demetriou
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by the University of
53  *	California, Berkeley and its contributors.
54  * 4. Neither the name of the University nor the names of its contributors
55  *    may be used to endorse or promote products derived from this software
56  *    without specific prior written permission.
57  *
58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68  * SUCH DAMAGE.
69  *
70  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
71  */
72 
73 /*
74  * Some references:
75  *	Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
76  *	Leffler, et al.: The Design and Implementation of the 4.3BSD
77  *		UNIX Operating System (Addison Welley, 1989)
78  */
79 
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.181 2007/12/02 13:56:16 hannken Exp $");
82 
83 #include "fs_ffs.h"
84 #include "opt_bufcache.h"
85 
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/proc.h>
90 #include <sys/buf.h>
91 #include <sys/vnode.h>
92 #include <sys/mount.h>
93 #include <sys/malloc.h>
94 #include <sys/resourcevar.h>
95 #include <sys/sysctl.h>
96 #include <sys/conf.h>
97 #include <sys/kauth.h>
98 
99 #include <uvm/uvm.h>
100 
101 #include <miscfs/specfs/specdev.h>
102 
103 #ifndef	BUFPAGES
104 # define BUFPAGES 0
105 #endif
106 
107 #ifdef BUFCACHE
108 # if (BUFCACHE < 5) || (BUFCACHE > 95)
109 #  error BUFCACHE is not between 5 and 95
110 # endif
111 #else
112 # define BUFCACHE 15
113 #endif
114 
115 u_int	nbuf;			/* XXX - for softdep_lockedbufs */
116 u_int	bufpages = BUFPAGES;	/* optional hardwired count */
117 u_int	bufcache = BUFCACHE;	/* max % of RAM to use for buffer cache */
118 
119 /* Function prototypes */
120 struct bqueue;
121 
122 static void buf_setwm(void);
123 static int buf_trim(void);
124 static void *bufpool_page_alloc(struct pool *, int);
125 static void bufpool_page_free(struct pool *, void *);
126 static inline struct buf *bio_doread(struct vnode *, daddr_t, int,
127     kauth_cred_t, int);
128 static struct buf *getnewbuf(int, int, int);
129 static int buf_lotsfree(void);
130 static int buf_canrelease(void);
131 static inline u_long buf_mempoolidx(u_long);
132 static inline u_long buf_roundsize(u_long);
133 static inline void *buf_malloc(size_t);
134 static void buf_mrelease(void *, size_t);
135 static inline void binsheadfree(struct buf *, struct bqueue *);
136 static inline void binstailfree(struct buf *, struct bqueue *);
137 int count_lock_queue(void); /* XXX */
138 #ifdef DEBUG
139 static int checkfreelist(struct buf *, struct bqueue *);
140 #endif
141 
142 /*
143  * Definitions for the buffer hash lists.
144  */
145 #define	BUFHASH(dvp, lbn)	\
146 	(&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
147 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
148 u_long	bufhash;
149 
150 struct bio_ops *bioopsp;	/* can be overriden by ffs_softdep */
151 
152 /*
153  * Insq/Remq for the buffer hash lists.
154  */
155 #define	binshash(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_hash)
156 #define	bremhash(bp)		LIST_REMOVE(bp, b_hash)
157 
158 /*
159  * Definitions for the buffer free lists.
160  */
161 #define	BQUEUES		3		/* number of free buffer queues */
162 
163 #define	BQ_LOCKED	0		/* super-blocks &c */
164 #define	BQ_LRU		1		/* lru, useful buffers */
165 #define	BQ_AGE		2		/* rubbish */
166 
167 struct bqueue {
168 	TAILQ_HEAD(, buf) bq_queue;
169 	uint64_t bq_bytes;
170 } bufqueues[BQUEUES];
171 int needbuffer;
172 
173 /*
174  * Buffer queue lock.
175  * Take this lock first if also taking some buffer's b_interlock.
176  */
177 struct simplelock bqueue_slock = SIMPLELOCK_INITIALIZER;
178 
179 /*
180  * Buffer pools for I/O buffers.
181  */
182 static struct pool bufpool;
183 static struct pool bufiopool;
184 
185 
186 /* XXX - somewhat gross.. */
187 #if MAXBSIZE == 0x2000
188 #define NMEMPOOLS 5
189 #elif MAXBSIZE == 0x4000
190 #define NMEMPOOLS 6
191 #elif MAXBSIZE == 0x8000
192 #define NMEMPOOLS 7
193 #else
194 #define NMEMPOOLS 8
195 #endif
196 
197 #define MEMPOOL_INDEX_OFFSET 9	/* smallest pool is 512 bytes */
198 #if (1 << (NMEMPOOLS + MEMPOOL_INDEX_OFFSET - 1)) != MAXBSIZE
199 #error update vfs_bio buffer memory parameters
200 #endif
201 
202 /* Buffer memory pools */
203 static struct pool bmempools[NMEMPOOLS];
204 
205 struct vm_map *buf_map;
206 
207 /*
208  * Buffer memory pool allocator.
209  */
210 static void *
211 bufpool_page_alloc(struct pool *pp, int flags)
212 {
213 
214 	return (void *)uvm_km_alloc(buf_map,
215 	    MAXBSIZE, MAXBSIZE,
216 	    ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)
217 	    | UVM_KMF_WIRED);
218 }
219 
220 static void
221 bufpool_page_free(struct pool *pp, void *v)
222 {
223 
224 	uvm_km_free(buf_map, (vaddr_t)v, MAXBSIZE, UVM_KMF_WIRED);
225 }
226 
227 static struct pool_allocator bufmempool_allocator = {
228 	.pa_alloc = bufpool_page_alloc,
229 	.pa_free = bufpool_page_free,
230 	.pa_pagesz = MAXBSIZE,
231 };
232 
233 /* Buffer memory management variables */
234 uint64_t bufmem_valimit;
235 uint64_t bufmem_hiwater;
236 uint64_t bufmem_lowater;
237 uint64_t bufmem;
238 
239 /*
240  * MD code can call this to set a hard limit on the amount
241  * of virtual memory used by the buffer cache.
242  */
243 int
244 buf_setvalimit(vsize_t sz)
245 {
246 
247 	/* We need to accommodate at least NMEMPOOLS of MAXBSIZE each */
248 	if (sz < NMEMPOOLS * MAXBSIZE)
249 		return EINVAL;
250 
251 	bufmem_valimit = sz;
252 	return 0;
253 }
254 
255 static void
256 buf_setwm(void)
257 {
258 
259 	bufmem_hiwater = buf_memcalc();
260 	/* lowater is approx. 2% of memory (with bufcache = 15) */
261 #define	BUFMEM_WMSHIFT	3
262 #define	BUFMEM_HIWMMIN	(64 * 1024 << BUFMEM_WMSHIFT)
263 	if (bufmem_hiwater < BUFMEM_HIWMMIN)
264 		/* Ensure a reasonable minimum value */
265 		bufmem_hiwater = BUFMEM_HIWMMIN;
266 	bufmem_lowater = bufmem_hiwater >> BUFMEM_WMSHIFT;
267 }
268 
269 #ifdef DEBUG
270 int debug_verify_freelist = 0;
271 static int
272 checkfreelist(struct buf *bp, struct bqueue *dp)
273 {
274 	struct buf *b;
275 
276 	TAILQ_FOREACH(b, &dp->bq_queue, b_freelist) {
277 		if (b == bp)
278 			return 1;
279 	}
280 	return 0;
281 }
282 #endif
283 
284 /*
285  * Insq/Remq for the buffer hash lists.
286  * Call with buffer queue locked.
287  */
288 static inline void
289 binsheadfree(struct buf *bp, struct bqueue *dp)
290 {
291 
292 	KASSERT(bp->b_freelistindex == -1);
293 	TAILQ_INSERT_HEAD(&dp->bq_queue, bp, b_freelist);
294 	dp->bq_bytes += bp->b_bufsize;
295 	bp->b_freelistindex = dp - bufqueues;
296 }
297 
298 static inline void
299 binstailfree(struct buf *bp, struct bqueue *dp)
300 {
301 
302 	KASSERT(bp->b_freelistindex == -1);
303 	TAILQ_INSERT_TAIL(&dp->bq_queue, bp, b_freelist);
304 	dp->bq_bytes += bp->b_bufsize;
305 	bp->b_freelistindex = dp - bufqueues;
306 }
307 
308 void
309 bremfree(struct buf *bp)
310 {
311 	struct bqueue *dp;
312 	int bqidx = bp->b_freelistindex;
313 
314 	LOCK_ASSERT(simple_lock_held(&bqueue_slock));
315 
316 	KASSERT(bqidx != -1);
317 	dp = &bufqueues[bqidx];
318 	KDASSERT(!debug_verify_freelist || checkfreelist(bp, dp));
319 	KASSERT(dp->bq_bytes >= bp->b_bufsize);
320 	TAILQ_REMOVE(&dp->bq_queue, bp, b_freelist);
321 	dp->bq_bytes -= bp->b_bufsize;
322 #if defined(DIAGNOSTIC)
323 	bp->b_freelistindex = -1;
324 #endif /* defined(DIAGNOSTIC) */
325 }
326 
327 u_long
328 buf_memcalc(void)
329 {
330 	u_long n;
331 
332 	/*
333 	 * Determine the upper bound of memory to use for buffers.
334 	 *
335 	 *	- If bufpages is specified, use that as the number
336 	 *	  pages.
337 	 *
338 	 *	- Otherwise, use bufcache as the percentage of
339 	 *	  physical memory.
340 	 */
341 	if (bufpages != 0) {
342 		n = bufpages;
343 	} else {
344 		if (bufcache < 5) {
345 			printf("forcing bufcache %d -> 5", bufcache);
346 			bufcache = 5;
347 		}
348 		if (bufcache > 95) {
349 			printf("forcing bufcache %d -> 95", bufcache);
350 			bufcache = 95;
351 		}
352 		n = physmem / 100 * bufcache;
353 	}
354 
355 	n <<= PAGE_SHIFT;
356 	if (bufmem_valimit != 0 && n > bufmem_valimit)
357 		n = bufmem_valimit;
358 
359 	return (n);
360 }
361 
362 /*
363  * Initialize buffers and hash links for buffers.
364  */
365 void
366 bufinit(void)
367 {
368 	struct bqueue *dp;
369 	int use_std;
370 	u_int i;
371 
372 	/*
373 	 * Initialize buffer cache memory parameters.
374 	 */
375 	bufmem = 0;
376 	buf_setwm();
377 
378 	if (bufmem_valimit != 0) {
379 		vaddr_t minaddr = 0, maxaddr;
380 		buf_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
381 					  bufmem_valimit, 0, false, 0);
382 		if (buf_map == NULL)
383 			panic("bufinit: cannot allocate submap");
384 	} else
385 		buf_map = kernel_map;
386 
387 	/* On "small" machines use small pool page sizes where possible */
388 	use_std = (physmem < atop(16*1024*1024));
389 
390 	/*
391 	 * Also use them on systems that can map the pool pages using
392 	 * a direct-mapped segment.
393 	 */
394 #ifdef PMAP_MAP_POOLPAGE
395 	use_std = 1;
396 #endif
397 
398 	pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl",
399 	    &pool_allocator_nointr, IPL_NONE);
400 	pool_init(&bufiopool, sizeof(struct buf), 0, 0, 0, "biopl",
401 	    NULL, IPL_BIO);
402 
403 	bufmempool_allocator.pa_backingmap = buf_map;
404 	for (i = 0; i < NMEMPOOLS; i++) {
405 		struct pool_allocator *pa;
406 		struct pool *pp = &bmempools[i];
407 		u_int size = 1 << (i + MEMPOOL_INDEX_OFFSET);
408 		char *name = malloc(8, M_TEMP, M_WAITOK);
409 		if (__predict_true(size >= 1024))
410 			(void)snprintf(name, 8, "buf%dk", size / 1024);
411 		else
412 			(void)snprintf(name, 8, "buf%db", size);
413 		pa = (size <= PAGE_SIZE && use_std)
414 			? &pool_allocator_nointr
415 			: &bufmempool_allocator;
416 		pool_init(pp, size, 0, 0, 0, name, pa, IPL_NONE);
417 		pool_setlowat(pp, 1);
418 		pool_sethiwat(pp, 1);
419 	}
420 
421 	/* Initialize the buffer queues */
422 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) {
423 		TAILQ_INIT(&dp->bq_queue);
424 		dp->bq_bytes = 0;
425 	}
426 
427 	/*
428 	 * Estimate hash table size based on the amount of memory we
429 	 * intend to use for the buffer cache. The average buffer
430 	 * size is dependent on our clients (i.e. filesystems).
431 	 *
432 	 * For now, use an empirical 3K per buffer.
433 	 */
434 	nbuf = (bufmem_hiwater / 1024) / 3;
435 	bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash);
436 }
437 
438 static int
439 buf_lotsfree(void)
440 {
441 	int try, thresh;
442 
443 	/* Always allocate if doing copy on write */
444 	if (curlwp->l_pflag & LP_UFSCOW)
445 		return 1;
446 
447 	/* Always allocate if less than the low water mark. */
448 	if (bufmem < bufmem_lowater)
449 		return 1;
450 
451 	/* Never allocate if greater than the high water mark. */
452 	if (bufmem > bufmem_hiwater)
453 		return 0;
454 
455 	/* If there's anything on the AGE list, it should be eaten. */
456 	if (TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue) != NULL)
457 		return 0;
458 
459 	/*
460 	 * The probabily of getting a new allocation is inversely
461 	 * proportional to the current size of the cache, using
462 	 * a granularity of 16 steps.
463 	 */
464 	try = random() & 0x0000000fL;
465 
466 	/* Don't use "16 * bufmem" here to avoid a 32-bit overflow. */
467 	thresh = (bufmem - bufmem_lowater) /
468 	    ((bufmem_hiwater - bufmem_lowater) / 16);
469 
470 	if (try >= thresh)
471 		return 1;
472 
473 	/* Otherwise don't allocate. */
474 	return 0;
475 }
476 
477 /*
478  * Return estimate of bytes we think need to be
479  * released to help resolve low memory conditions.
480  *
481  * => called at splbio.
482  * => called with bqueue_slock held.
483  */
484 static int
485 buf_canrelease(void)
486 {
487 	int pagedemand, ninvalid = 0;
488 
489 	LOCK_ASSERT(simple_lock_held(&bqueue_slock));
490 
491 	if (bufmem < bufmem_lowater)
492 		return 0;
493 
494 	if (bufmem > bufmem_hiwater)
495 		return bufmem - bufmem_hiwater;
496 
497 	ninvalid += bufqueues[BQ_AGE].bq_bytes;
498 
499 	pagedemand = uvmexp.freetarg - uvmexp.free;
500 	if (pagedemand < 0)
501 		return ninvalid;
502 	return MAX(ninvalid, MIN(2 * MAXBSIZE,
503 	    MIN((bufmem - bufmem_lowater) / 16, pagedemand * PAGE_SIZE)));
504 }
505 
506 /*
507  * Buffer memory allocation helper functions
508  */
509 static inline u_long
510 buf_mempoolidx(u_long size)
511 {
512 	u_int n = 0;
513 
514 	size -= 1;
515 	size >>= MEMPOOL_INDEX_OFFSET;
516 	while (size) {
517 		size >>= 1;
518 		n += 1;
519 	}
520 	if (n >= NMEMPOOLS)
521 		panic("buf mem pool index %d", n);
522 	return n;
523 }
524 
525 static inline u_long
526 buf_roundsize(u_long size)
527 {
528 	/* Round up to nearest power of 2 */
529 	return (1 << (buf_mempoolidx(size) + MEMPOOL_INDEX_OFFSET));
530 }
531 
532 static inline void *
533 buf_malloc(size_t size)
534 {
535 	u_int n = buf_mempoolidx(size);
536 	void *addr;
537 	int s;
538 
539 	while (1) {
540 		addr = pool_get(&bmempools[n], PR_NOWAIT);
541 		if (addr != NULL)
542 			break;
543 
544 		/* No memory, see if we can free some. If so, try again */
545 		if (buf_drain(1) > 0)
546 			continue;
547 
548 		/* Wait for buffers to arrive on the LRU queue */
549 		s = splbio();
550 		simple_lock(&bqueue_slock);
551 		needbuffer = 1;
552 		ltsleep(&needbuffer, PNORELOCK | (PRIBIO + 1),
553 			"buf_malloc", 0, &bqueue_slock);
554 		splx(s);
555 	}
556 
557 	return addr;
558 }
559 
560 static void
561 buf_mrelease(void *addr, size_t size)
562 {
563 
564 	pool_put(&bmempools[buf_mempoolidx(size)], addr);
565 }
566 
567 /*
568  * bread()/breadn() helper.
569  */
570 static inline struct buf *
571 bio_doread(struct vnode *vp, daddr_t blkno, int size, kauth_cred_t cred,
572     int async)
573 {
574 	struct buf *bp;
575 	struct mount *mp;
576 
577 	bp = getblk(vp, blkno, size, 0, 0);
578 
579 #ifdef DIAGNOSTIC
580 	if (bp == NULL) {
581 		panic("bio_doread: no such buf");
582 	}
583 #endif
584 
585 	/*
586 	 * If buffer does not have data valid, start a read.
587 	 * Note that if buffer is B_INVAL, getblk() won't return it.
588 	 * Therefore, it's valid if its I/O has completed or been delayed.
589 	 */
590 	if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
591 		/* Start I/O for the buffer. */
592 		SET(bp->b_flags, B_READ | async);
593 		if (async)
594 			BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
595 		else
596 			BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
597 		VOP_STRATEGY(vp, bp);
598 
599 		/* Pay for the read. */
600 		curproc->p_stats->p_ru.ru_inblock++;
601 	} else if (async) {
602 		brelse(bp, 0);
603 	}
604 
605 	if (vp->v_type == VBLK)
606 		mp = vp->v_specmountpoint;
607 	else
608 		mp = vp->v_mount;
609 
610 	/*
611 	 * Collect statistics on synchronous and asynchronous reads.
612 	 * Reads from block devices are charged to their associated
613 	 * filesystem (if any).
614 	 */
615 	if (mp != NULL) {
616 		if (async == 0)
617 			mp->mnt_stat.f_syncreads++;
618 		else
619 			mp->mnt_stat.f_asyncreads++;
620 	}
621 
622 	return (bp);
623 }
624 
625 /*
626  * Read a disk block.
627  * This algorithm described in Bach (p.54).
628  */
629 int
630 bread(struct vnode *vp, daddr_t blkno, int size, kauth_cred_t cred,
631     struct buf **bpp)
632 {
633 	struct buf *bp;
634 
635 	/* Get buffer for block. */
636 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
637 
638 	/* Wait for the read to complete, and return result. */
639 	return (biowait(bp));
640 }
641 
642 /*
643  * Read-ahead multiple disk blocks. The first is sync, the rest async.
644  * Trivial modification to the breada algorithm presented in Bach (p.55).
645  */
646 int
647 breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablks,
648     int *rasizes, int nrablks, kauth_cred_t cred, struct buf **bpp)
649 {
650 	struct buf *bp;
651 	int i;
652 
653 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
654 
655 	/*
656 	 * For each of the read-ahead blocks, start a read, if necessary.
657 	 */
658 	for (i = 0; i < nrablks; i++) {
659 		/* If it's in the cache, just go on to next one. */
660 		if (incore(vp, rablks[i]))
661 			continue;
662 
663 		/* Get a buffer for the read-ahead block */
664 		(void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
665 	}
666 
667 	/* Otherwise, we had to start a read for it; wait until it's valid. */
668 	return (biowait(bp));
669 }
670 
671 /*
672  * Read with single-block read-ahead.  Defined in Bach (p.55), but
673  * implemented as a call to breadn().
674  * XXX for compatibility with old file systems.
675  */
676 int
677 breada(struct vnode *vp, daddr_t blkno, int size, daddr_t rablkno,
678     int rabsize, kauth_cred_t cred, struct buf **bpp)
679 {
680 
681 	return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
682 }
683 
684 /*
685  * Block write.  Described in Bach (p.56)
686  */
687 int
688 bwrite(struct buf *bp)
689 {
690 	int rv, sync, wasdelayed, s;
691 	struct vnode *vp;
692 	struct mount *mp;
693 
694 	KASSERT(ISSET(bp->b_flags, B_BUSY));
695 
696 	vp = bp->b_vp;
697 	if (vp != NULL) {
698 		if (vp->v_type == VBLK)
699 			mp = vp->v_specmountpoint;
700 		else
701 			mp = vp->v_mount;
702 	} else {
703 		mp = NULL;
704 	}
705 
706 	/*
707 	 * Remember buffer type, to switch on it later.  If the write was
708 	 * synchronous, but the file system was mounted with MNT_ASYNC,
709 	 * convert it to a delayed write.
710 	 * XXX note that this relies on delayed tape writes being converted
711 	 * to async, not sync writes (which is safe, but ugly).
712 	 */
713 	sync = !ISSET(bp->b_flags, B_ASYNC);
714 	if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
715 		bdwrite(bp);
716 		return (0);
717 	}
718 
719 	/*
720 	 * Collect statistics on synchronous and asynchronous writes.
721 	 * Writes to block devices are charged to their associated
722 	 * filesystem (if any).
723 	 */
724 	if (mp != NULL) {
725 		if (sync)
726 			mp->mnt_stat.f_syncwrites++;
727 		else
728 			mp->mnt_stat.f_asyncwrites++;
729 	}
730 
731 	s = splbio();
732 	simple_lock(&bp->b_interlock);
733 
734 	wasdelayed = ISSET(bp->b_flags, B_DELWRI);
735 
736 	CLR(bp->b_flags, (B_READ | B_DONE | B_DELWRI));
737 	bp->b_error = 0;
738 
739 	/*
740 	 * Pay for the I/O operation and make sure the buf is on the correct
741 	 * vnode queue.
742 	 */
743 	if (wasdelayed)
744 		reassignbuf(bp, bp->b_vp);
745 	else
746 		curproc->p_stats->p_ru.ru_oublock++;
747 
748 	/* Initiate disk write.  Make sure the appropriate party is charged. */
749 	V_INCR_NUMOUTPUT(bp->b_vp);
750 	simple_unlock(&bp->b_interlock);
751 	splx(s);
752 
753 	if (sync)
754 		BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
755 	else
756 		BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
757 
758 	VOP_STRATEGY(vp, bp);
759 
760 	if (sync) {
761 		/* If I/O was synchronous, wait for it to complete. */
762 		rv = biowait(bp);
763 
764 		/* Release the buffer. */
765 		brelse(bp, 0);
766 
767 		return (rv);
768 	} else {
769 		return (0);
770 	}
771 }
772 
773 int
774 vn_bwrite(void *v)
775 {
776 	struct vop_bwrite_args *ap = v;
777 
778 	return (bwrite(ap->a_bp));
779 }
780 
781 /*
782  * Delayed write.
783  *
784  * The buffer is marked dirty, but is not queued for I/O.
785  * This routine should be used when the buffer is expected
786  * to be modified again soon, typically a small write that
787  * partially fills a buffer.
788  *
789  * NB: magnetic tapes cannot be delayed; they must be
790  * written in the order that the writes are requested.
791  *
792  * Described in Leffler, et al. (pp. 208-213).
793  */
794 void
795 bdwrite(struct buf *bp)
796 {
797 	int s;
798 
799 	/* If this is a tape block, write the block now. */
800 	if (bdev_type(bp->b_dev) == D_TAPE) {
801 		bawrite(bp);
802 		return;
803 	}
804 
805 	/*
806 	 * If the block hasn't been seen before:
807 	 *	(1) Mark it as having been seen,
808 	 *	(2) Charge for the write,
809 	 *	(3) Make sure it's on its vnode's correct block list.
810 	 */
811 	s = splbio();
812 	simple_lock(&bp->b_interlock);
813 
814 	KASSERT(ISSET(bp->b_flags, B_BUSY));
815 
816 	if (!ISSET(bp->b_flags, B_DELWRI)) {
817 		SET(bp->b_flags, B_DELWRI);
818 		curproc->p_stats->p_ru.ru_oublock++;
819 		reassignbuf(bp, bp->b_vp);
820 	}
821 
822 	/* Otherwise, the "write" is done, so mark and release the buffer. */
823 	CLR(bp->b_flags, B_DONE);
824 	simple_unlock(&bp->b_interlock);
825 	splx(s);
826 
827 	brelse(bp, 0);
828 }
829 
830 /*
831  * Asynchronous block write; just an asynchronous bwrite().
832  */
833 void
834 bawrite(struct buf *bp)
835 {
836 	int s;
837 
838 	s = splbio();
839 	simple_lock(&bp->b_interlock);
840 
841 	KASSERT(ISSET(bp->b_flags, B_BUSY));
842 
843 	SET(bp->b_flags, B_ASYNC);
844 	simple_unlock(&bp->b_interlock);
845 	splx(s);
846 	VOP_BWRITE(bp);
847 }
848 
849 /*
850  * Same as first half of bdwrite, mark buffer dirty, but do not release it.
851  * Call at splbio() and with the buffer interlock locked.
852  * Note: called only from biodone() through ffs softdep's bioopsp->io_complete()
853  */
854 void
855 bdirty(struct buf *bp)
856 {
857 
858 	LOCK_ASSERT(simple_lock_held(&bp->b_interlock));
859 	KASSERT(ISSET(bp->b_flags, B_BUSY));
860 
861 	CLR(bp->b_flags, B_AGE);
862 
863 	if (!ISSET(bp->b_flags, B_DELWRI)) {
864 		SET(bp->b_flags, B_DELWRI);
865 		curproc->p_stats->p_ru.ru_oublock++;
866 		reassignbuf(bp, bp->b_vp);
867 	}
868 }
869 
870 /*
871  * Release a buffer on to the free lists.
872  * Described in Bach (p. 46).
873  */
874 void
875 brelse(struct buf *bp, int set)
876 {
877 	struct bqueue *bufq;
878 	int s;
879 
880 	/* Block disk interrupts. */
881 	s = splbio();
882 	simple_lock(&bqueue_slock);
883 	simple_lock(&bp->b_interlock);
884 
885 	bp->b_flags |= set;
886 
887 	KASSERT(ISSET(bp->b_flags, B_BUSY));
888 	KASSERT(!ISSET(bp->b_flags, B_CALL));
889 
890 	/* Wake up any processes waiting for any buffer to become free. */
891 	if (needbuffer) {
892 		needbuffer = 0;
893 		wakeup(&needbuffer);
894 	}
895 
896 	/* Wake up any proceeses waiting for _this_ buffer to become free. */
897 	if (ISSET(bp->b_flags, B_WANTED)) {
898 		CLR(bp->b_flags, B_WANTED|B_AGE);
899 		wakeup(bp);
900 	}
901 
902 	/*
903 	 * Determine which queue the buffer should be on, then put it there.
904 	 */
905 
906 	/* If it's locked, don't report an error; try again later. */
907 	if (ISSET(bp->b_flags, B_LOCKED) && bp->b_error != 0)
908 		bp->b_error = 0;
909 
910 	/* If it's not cacheable, or an error, mark it invalid. */
911 	if (ISSET(bp->b_flags, B_NOCACHE) || bp->b_error != 0)
912 		SET(bp->b_flags, B_INVAL);
913 
914 	if (ISSET(bp->b_flags, B_VFLUSH)) {
915 		/*
916 		 * This is a delayed write buffer that was just flushed to
917 		 * disk.  It is still on the LRU queue.  If it's become
918 		 * invalid, then we need to move it to a different queue;
919 		 * otherwise leave it in its current position.
920 		 */
921 		CLR(bp->b_flags, B_VFLUSH);
922 		if (!ISSET(bp->b_flags, B_INVAL|B_LOCKED|B_AGE) &&
923 		    bp->b_error == 0) {
924 			KDASSERT(!debug_verify_freelist || checkfreelist(bp, &bufqueues[BQ_LRU]));
925 			goto already_queued;
926 		} else {
927 			bremfree(bp);
928 		}
929 	}
930 
931   KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_AGE]));
932   KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LRU]));
933   KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LOCKED]));
934 
935 	if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
936 		/*
937 		 * If it's invalid or empty, dissociate it from its vnode
938 		 * and put on the head of the appropriate queue.
939 		 */
940 		if (LIST_FIRST(&bp->b_dep) != NULL && bioopsp)
941 			bioopsp->io_deallocate(bp);
942 		CLR(bp->b_flags, B_DONE|B_DELWRI);
943 		if (bp->b_vp) {
944 			reassignbuf(bp, bp->b_vp);
945 			brelvp(bp);
946 		}
947 		if (bp->b_bufsize <= 0)
948 			/* no data */
949 			goto already_queued;
950 		else
951 			/* invalid data */
952 			bufq = &bufqueues[BQ_AGE];
953 		binsheadfree(bp, bufq);
954 	} else {
955 		/*
956 		 * It has valid data.  Put it on the end of the appropriate
957 		 * queue, so that it'll stick around for as long as possible.
958 		 * If buf is AGE, but has dependencies, must put it on last
959 		 * bufqueue to be scanned, ie LRU. This protects against the
960 		 * livelock where BQ_AGE only has buffers with dependencies,
961 		 * and we thus never get to the dependent buffers in BQ_LRU.
962 		 */
963 		if (ISSET(bp->b_flags, B_LOCKED))
964 			/* locked in core */
965 			bufq = &bufqueues[BQ_LOCKED];
966 		else if (!ISSET(bp->b_flags, B_AGE))
967 			/* valid data */
968 			bufq = &bufqueues[BQ_LRU];
969 		else {
970 			/* stale but valid data */
971 			int has_deps;
972 
973 			if (LIST_FIRST(&bp->b_dep) != NULL && bioopsp)
974 				has_deps = bioopsp->io_countdeps(bp, 0);
975 			else
976 				has_deps = 0;
977 			bufq = has_deps ? &bufqueues[BQ_LRU] :
978 			    &bufqueues[BQ_AGE];
979 		}
980 		binstailfree(bp, bufq);
981 	}
982 
983 already_queued:
984 	/* Unlock the buffer. */
985 	CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE);
986 	SET(bp->b_flags, B_CACHE);
987 
988 	/* Allow disk interrupts. */
989 	simple_unlock(&bp->b_interlock);
990 	simple_unlock(&bqueue_slock);
991 	splx(s);
992 	if (bp->b_bufsize <= 0) {
993 #ifdef DEBUG
994 		memset((char *)bp, 0, sizeof(*bp));
995 #endif
996 		pool_put(&bufpool, bp);
997 	}
998 }
999 
1000 /*
1001  * Determine if a block is in the cache.
1002  * Just look on what would be its hash chain.  If it's there, return
1003  * a pointer to it, unless it's marked invalid.  If it's marked invalid,
1004  * we normally don't return the buffer, unless the caller explicitly
1005  * wants us to.
1006  */
1007 struct buf *
1008 incore(struct vnode *vp, daddr_t blkno)
1009 {
1010 	struct buf *bp;
1011 
1012 	/* Search hash chain */
1013 	LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
1014 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
1015 		    !ISSET(bp->b_flags, B_INVAL))
1016 		return (bp);
1017 	}
1018 
1019 	return (NULL);
1020 }
1021 
1022 /*
1023  * Get a block of requested size that is associated with
1024  * a given vnode and block offset. If it is found in the
1025  * block cache, mark it as having been found, make it busy
1026  * and return it. Otherwise, return an empty block of the
1027  * correct size. It is up to the caller to insure that the
1028  * cached blocks be of the correct size.
1029  */
1030 struct buf *
1031 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1032 {
1033 	struct buf *bp;
1034 	int s, err;
1035 	int preserve;
1036 
1037 start:
1038 	s = splbio();
1039 	simple_lock(&bqueue_slock);
1040 	bp = incore(vp, blkno);
1041 	if (bp != NULL) {
1042 		simple_lock(&bp->b_interlock);
1043 		if (ISSET(bp->b_flags, B_BUSY)) {
1044 			simple_unlock(&bqueue_slock);
1045 			if (curlwp == uvm.pagedaemon_lwp) {
1046 				simple_unlock(&bp->b_interlock);
1047 				splx(s);
1048 				return NULL;
1049 			}
1050 			SET(bp->b_flags, B_WANTED);
1051 			err = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
1052 					"getblk", slptimeo, &bp->b_interlock);
1053 			splx(s);
1054 			if (err)
1055 				return (NULL);
1056 			goto start;
1057 		}
1058 #ifdef DIAGNOSTIC
1059 		if (ISSET(bp->b_flags, B_DONE|B_DELWRI) &&
1060 		    bp->b_bcount < size && vp->v_type != VBLK)
1061 			panic("getblk: block size invariant failed");
1062 #endif
1063 		SET(bp->b_flags, B_BUSY);
1064 		bremfree(bp);
1065 		preserve = 1;
1066 	} else {
1067 		if ((bp = getnewbuf(slpflag, slptimeo, 0)) == NULL) {
1068 			simple_unlock(&bqueue_slock);
1069 			splx(s);
1070 			goto start;
1071 		}
1072 
1073 		binshash(bp, BUFHASH(vp, blkno));
1074 		bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
1075 		bgetvp(vp, bp);
1076 		preserve = 0;
1077 	}
1078 	simple_unlock(&bp->b_interlock);
1079 	simple_unlock(&bqueue_slock);
1080 	splx(s);
1081 	/*
1082 	 * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes)
1083 	 * if we re-size buffers here.
1084 	 */
1085 	if (ISSET(bp->b_flags, B_LOCKED)) {
1086 		KASSERT(bp->b_bufsize >= size);
1087 	} else {
1088 		allocbuf(bp, size, preserve);
1089 	}
1090 	BIO_SETPRIO(bp, BPRIO_DEFAULT);
1091 	return (bp);
1092 }
1093 
1094 /*
1095  * Get an empty, disassociated buffer of given size.
1096  */
1097 struct buf *
1098 geteblk(int size)
1099 {
1100 	struct buf *bp;
1101 	int s;
1102 
1103 	s = splbio();
1104 	simple_lock(&bqueue_slock);
1105 	while ((bp = getnewbuf(0, 0, 0)) == 0)
1106 		;
1107 
1108 	SET(bp->b_flags, B_INVAL);
1109 	binshash(bp, &invalhash);
1110 	simple_unlock(&bqueue_slock);
1111 	simple_unlock(&bp->b_interlock);
1112 	splx(s);
1113 	BIO_SETPRIO(bp, BPRIO_DEFAULT);
1114 	allocbuf(bp, size, 0);
1115 	return (bp);
1116 }
1117 
1118 /*
1119  * Expand or contract the actual memory allocated to a buffer.
1120  *
1121  * If the buffer shrinks, data is lost, so it's up to the
1122  * caller to have written it out *first*; this routine will not
1123  * start a write.  If the buffer grows, it's the callers
1124  * responsibility to fill out the buffer's additional contents.
1125  */
1126 void
1127 allocbuf(struct buf *bp, int size, int preserve)
1128 {
1129 	vsize_t oldsize, desired_size;
1130 	void *addr;
1131 	int s, delta;
1132 
1133 	desired_size = buf_roundsize(size);
1134 	if (desired_size > MAXBSIZE)
1135 		printf("allocbuf: buffer larger than MAXBSIZE requested");
1136 
1137 	bp->b_bcount = size;
1138 
1139 	oldsize = bp->b_bufsize;
1140 	if (oldsize == desired_size)
1141 		return;
1142 
1143 	/*
1144 	 * If we want a buffer of a different size, re-allocate the
1145 	 * buffer's memory; copy old content only if needed.
1146 	 */
1147 	addr = buf_malloc(desired_size);
1148 	if (preserve)
1149 		memcpy(addr, bp->b_data, MIN(oldsize,desired_size));
1150 	if (bp->b_data != NULL)
1151 		buf_mrelease(bp->b_data, oldsize);
1152 	bp->b_data = addr;
1153 	bp->b_bufsize = desired_size;
1154 
1155 	/*
1156 	 * Update overall buffer memory counter (protected by bqueue_slock)
1157 	 */
1158 	delta = (long)desired_size - (long)oldsize;
1159 
1160 	s = splbio();
1161 	simple_lock(&bqueue_slock);
1162 	if ((bufmem += delta) > bufmem_hiwater) {
1163 		/*
1164 		 * Need to trim overall memory usage.
1165 		 */
1166 		while (buf_canrelease()) {
1167 			if (curcpu()->ci_schedstate.spc_flags &
1168 			    SPCF_SHOULDYIELD) {
1169 				simple_unlock(&bqueue_slock);
1170 				splx(s);
1171 				preempt();
1172 				s = splbio();
1173 				simple_lock(&bqueue_slock);
1174 			}
1175 
1176 			if (buf_trim() == 0)
1177 				break;
1178 		}
1179 	}
1180 
1181 	simple_unlock(&bqueue_slock);
1182 	splx(s);
1183 }
1184 
1185 /*
1186  * Find a buffer which is available for use.
1187  * Select something from a free list.
1188  * Preference is to AGE list, then LRU list.
1189  *
1190  * Called at splbio and with buffer queues locked.
1191  * Return buffer locked.
1192  */
1193 struct buf *
1194 getnewbuf(int slpflag, int slptimeo, int from_bufq)
1195 {
1196 	struct buf *bp;
1197 
1198 start:
1199 	LOCK_ASSERT(simple_lock_held(&bqueue_slock));
1200 
1201 	/*
1202 	 * Get a new buffer from the pool; but use NOWAIT because
1203 	 * we have the buffer queues locked.
1204 	 */
1205 	if (!from_bufq && buf_lotsfree() &&
1206 	    (bp = pool_get(&bufpool, PR_NOWAIT)) != NULL) {
1207 		memset((char *)bp, 0, sizeof(*bp));
1208 		BUF_INIT(bp);
1209 		bp->b_dev = NODEV;
1210 		bp->b_vnbufs.le_next = NOLIST;
1211 		bp->b_flags = B_BUSY;
1212 		simple_lock(&bp->b_interlock);
1213 #if defined(DIAGNOSTIC)
1214 		bp->b_freelistindex = -1;
1215 #endif /* defined(DIAGNOSTIC) */
1216 		return (bp);
1217 	}
1218 
1219 	if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue)) != NULL ||
1220 	    (bp = TAILQ_FIRST(&bufqueues[BQ_LRU].bq_queue)) != NULL) {
1221 		simple_lock(&bp->b_interlock);
1222 		bremfree(bp);
1223 	} else {
1224 		/*
1225 		 * XXX: !from_bufq should be removed.
1226 		 */
1227 		if (!from_bufq || curlwp != uvm.pagedaemon_lwp) {
1228 			/* wait for a free buffer of any kind */
1229 			needbuffer = 1;
1230 			ltsleep(&needbuffer, slpflag|(PRIBIO + 1),
1231 			    "getnewbuf", slptimeo, &bqueue_slock);
1232 		}
1233 		return (NULL);
1234 	}
1235 
1236 #ifdef DIAGNOSTIC
1237 	if (bp->b_bufsize <= 0)
1238 		panic("buffer %p: on queue but empty", bp);
1239 #endif
1240 
1241 	if (ISSET(bp->b_flags, B_VFLUSH)) {
1242 		/*
1243 		 * This is a delayed write buffer being flushed to disk.  Make
1244 		 * sure it gets aged out of the queue when it's finished, and
1245 		 * leave it off the LRU queue.
1246 		 */
1247 		CLR(bp->b_flags, B_VFLUSH);
1248 		SET(bp->b_flags, B_AGE);
1249 		simple_unlock(&bp->b_interlock);
1250 		goto start;
1251 	}
1252 
1253 	/* Buffer is no longer on free lists. */
1254 	SET(bp->b_flags, B_BUSY);
1255 
1256 	/*
1257 	 * If buffer was a delayed write, start it and return NULL
1258 	 * (since we might sleep while starting the write).
1259 	 */
1260 	if (ISSET(bp->b_flags, B_DELWRI)) {
1261 		/*
1262 		 * This buffer has gone through the LRU, so make sure it gets
1263 		 * reused ASAP.
1264 		 */
1265 		SET(bp->b_flags, B_AGE);
1266 		simple_unlock(&bp->b_interlock);
1267 		simple_unlock(&bqueue_slock);
1268 		bawrite(bp);
1269 		simple_lock(&bqueue_slock);
1270 		return (NULL);
1271 	}
1272 
1273 	/* disassociate us from our vnode, if we had one... */
1274 	if (bp->b_vp)
1275 		brelvp(bp);
1276 
1277 	if (LIST_FIRST(&bp->b_dep) != NULL && bioopsp)
1278 		bioopsp->io_deallocate(bp);
1279 
1280 	/* clear out various other fields */
1281 	bp->b_flags = B_BUSY;
1282 	bp->b_dev = NODEV;
1283 	bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
1284 	bp->b_iodone = 0;
1285 	bp->b_error = 0;
1286 	bp->b_resid = 0;
1287 	bp->b_bcount = 0;
1288 
1289 	bremhash(bp);
1290 	return (bp);
1291 }
1292 
1293 /*
1294  * Attempt to free an aged buffer off the queues.
1295  * Called at splbio and with queue lock held.
1296  * Returns the amount of buffer memory freed.
1297  */
1298 static int
1299 buf_trim(void)
1300 {
1301 	struct buf *bp;
1302 	long size = 0;
1303 
1304 	/* Instruct getnewbuf() to get buffers off the queues */
1305 	if ((bp = getnewbuf(PCATCH, 1, 1)) == NULL)
1306 		return 0;
1307 
1308 	KASSERT(!ISSET(bp->b_flags, B_WANTED));
1309 	simple_unlock(&bp->b_interlock);
1310 	size = bp->b_bufsize;
1311 	bufmem -= size;
1312 	simple_unlock(&bqueue_slock);
1313 	if (size > 0) {
1314 		buf_mrelease(bp->b_data, size);
1315 		bp->b_bcount = bp->b_bufsize = 0;
1316 	}
1317 	/* brelse() will return the buffer to the global buffer pool */
1318 	brelse(bp, 0);
1319 	simple_lock(&bqueue_slock);
1320 	return size;
1321 }
1322 
1323 int
1324 buf_drain(int n)
1325 {
1326 	int s, size = 0, sz;
1327 
1328 	s = splbio();
1329 	simple_lock(&bqueue_slock);
1330 
1331 	while (size < n && bufmem > bufmem_lowater) {
1332 		sz = buf_trim();
1333 		if (sz <= 0)
1334 			break;
1335 		size += sz;
1336 	}
1337 
1338 	simple_unlock(&bqueue_slock);
1339 	splx(s);
1340 	return size;
1341 }
1342 
1343 /*
1344  * Wait for operations on the buffer to complete.
1345  * When they do, extract and return the I/O's error value.
1346  */
1347 int
1348 biowait(struct buf *bp)
1349 {
1350 	int s, error;
1351 
1352 	s = splbio();
1353 	simple_lock(&bp->b_interlock);
1354 	while (!ISSET(bp->b_flags, B_DONE | B_DELWRI))
1355 		ltsleep(bp, PRIBIO + 1, "biowait", 0, &bp->b_interlock);
1356 	error = bp->b_error;
1357 	simple_unlock(&bp->b_interlock);
1358 	splx(s);
1359 	return (error);
1360 }
1361 
1362 /*
1363  * Mark I/O complete on a buffer.
1364  *
1365  * If a callback has been requested, e.g. the pageout
1366  * daemon, do so. Otherwise, awaken waiting processes.
1367  *
1368  * [ Leffler, et al., says on p.247:
1369  *	"This routine wakes up the blocked process, frees the buffer
1370  *	for an asynchronous write, or, for a request by the pagedaemon
1371  *	process, invokes a procedure specified in the buffer structure" ]
1372  *
1373  * In real life, the pagedaemon (or other system processes) wants
1374  * to do async stuff to, and doesn't want the buffer brelse()'d.
1375  * (for swap pager, that puts swap buffers on the free lists (!!!),
1376  * for the vn device, that puts malloc'd buffers on the free lists!)
1377  */
1378 void
1379 biodone(struct buf *bp)
1380 {
1381 	int s = splbio();
1382 
1383 	simple_lock(&bp->b_interlock);
1384 	if (ISSET(bp->b_flags, B_DONE))
1385 		panic("biodone already");
1386 	CLR(bp->b_flags, B_COWDONE);
1387 	SET(bp->b_flags, B_DONE);		/* note that it's done */
1388 	BIO_SETPRIO(bp, BPRIO_DEFAULT);
1389 
1390 	if (LIST_FIRST(&bp->b_dep) != NULL && bioopsp)
1391 		bioopsp->io_complete(bp);
1392 
1393 	if (!ISSET(bp->b_flags, B_READ))	/* wake up reader */
1394 		vwakeup(bp);
1395 
1396 	/*
1397 	 * If necessary, call out.  Unlock the buffer before calling
1398 	 * iodone() as the buffer isn't valid any more when it return.
1399 	 */
1400 	if (ISSET(bp->b_flags, B_CALL)) {
1401 		CLR(bp->b_flags, B_CALL);	/* but note callout done */
1402 		simple_unlock(&bp->b_interlock);
1403 		(*bp->b_iodone)(bp);
1404 	} else {
1405 		if (ISSET(bp->b_flags, B_ASYNC)) {	/* if async, release */
1406 			simple_unlock(&bp->b_interlock);
1407 			brelse(bp, 0);
1408 		} else {			/* or just wakeup the buffer */
1409 			CLR(bp->b_flags, B_WANTED);
1410 			wakeup(bp);
1411 			simple_unlock(&bp->b_interlock);
1412 		}
1413 	}
1414 
1415 	splx(s);
1416 }
1417 
1418 /*
1419  * Return a count of buffers on the "locked" queue.
1420  */
1421 int
1422 count_lock_queue(void)
1423 {
1424 	struct buf *bp;
1425 	int n = 0;
1426 
1427 	simple_lock(&bqueue_slock);
1428 	TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED].bq_queue, b_freelist)
1429 		n++;
1430 	simple_unlock(&bqueue_slock);
1431 	return (n);
1432 }
1433 
1434 /*
1435  * Wait for all buffers to complete I/O
1436  * Return the number of "stuck" buffers.
1437  */
1438 int
1439 buf_syncwait(void)
1440 {
1441 	struct buf *bp;
1442 	int iter, nbusy, nbusy_prev = 0, dcount, s, ihash;
1443 
1444 	dcount = 10000;
1445 	for (iter = 0; iter < 20;) {
1446 		s = splbio();
1447 		simple_lock(&bqueue_slock);
1448 		nbusy = 0;
1449 		for (ihash = 0; ihash < bufhash+1; ihash++) {
1450 		    LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1451 			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1452 				nbusy++;
1453 			/*
1454 			 * With soft updates, some buffers that are
1455 			 * written will be remarked as dirty until other
1456 			 * buffers are written.
1457 			 */
1458 			if (bp->b_vp && bp->b_vp->v_mount
1459 			    && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP)
1460 			    && (bp->b_flags & B_DELWRI)) {
1461 				simple_lock(&bp->b_interlock);
1462 				bremfree(bp);
1463 				bp->b_flags |= B_BUSY;
1464 				nbusy++;
1465 				simple_unlock(&bp->b_interlock);
1466 				simple_unlock(&bqueue_slock);
1467 				bawrite(bp);
1468 				if (dcount-- <= 0) {
1469 					printf("softdep ");
1470 					splx(s);
1471 					goto fail;
1472 				}
1473 				simple_lock(&bqueue_slock);
1474 			}
1475 		    }
1476 		}
1477 
1478 		simple_unlock(&bqueue_slock);
1479 		splx(s);
1480 
1481 		if (nbusy == 0)
1482 			break;
1483 		if (nbusy_prev == 0)
1484 			nbusy_prev = nbusy;
1485 		printf("%d ", nbusy);
1486 		tsleep(&nbusy, PRIBIO, "bflush",
1487 		    (iter == 0) ? 1 : hz / 25 * iter);
1488 		if (nbusy >= nbusy_prev) /* we didn't flush anything */
1489 			iter++;
1490 		else
1491 			nbusy_prev = nbusy;
1492 	}
1493 
1494 	if (nbusy) {
1495 fail:;
1496 #if defined(DEBUG) || defined(DEBUG_HALT_BUSY)
1497 		printf("giving up\nPrinting vnodes for busy buffers\n");
1498 		s = splbio();
1499 		for (ihash = 0; ihash < bufhash+1; ihash++) {
1500 		    LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1501 			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1502 				vprint(NULL, bp->b_vp);
1503 		    }
1504 		}
1505 		splx(s);
1506 #endif
1507 	}
1508 
1509 	return nbusy;
1510 }
1511 
1512 static void
1513 sysctl_fillbuf(struct buf *i, struct buf_sysctl *o)
1514 {
1515 
1516 	o->b_flags = i->b_flags;
1517 	o->b_error = i->b_error;
1518 	o->b_prio = i->b_prio;
1519 	o->b_dev = i->b_dev;
1520 	o->b_bufsize = i->b_bufsize;
1521 	o->b_bcount = i->b_bcount;
1522 	o->b_resid = i->b_resid;
1523 	o->b_addr = PTRTOUINT64(i->b_un.b_addr);
1524 	o->b_blkno = i->b_blkno;
1525 	o->b_rawblkno = i->b_rawblkno;
1526 	o->b_iodone = PTRTOUINT64(i->b_iodone);
1527 	o->b_proc = PTRTOUINT64(i->b_proc);
1528 	o->b_vp = PTRTOUINT64(i->b_vp);
1529 	o->b_saveaddr = PTRTOUINT64(i->b_saveaddr);
1530 	o->b_lblkno = i->b_lblkno;
1531 }
1532 
1533 #define KERN_BUFSLOP 20
1534 static int
1535 sysctl_dobuf(SYSCTLFN_ARGS)
1536 {
1537 	struct buf *bp;
1538 	struct buf_sysctl bs;
1539 	char *dp;
1540 	u_int i, op, arg;
1541 	size_t len, needed, elem_size, out_size;
1542 	int error, s, elem_count;
1543 
1544 	if (namelen == 1 && name[0] == CTL_QUERY)
1545 		return (sysctl_query(SYSCTLFN_CALL(rnode)));
1546 
1547 	if (namelen != 4)
1548 		return (EINVAL);
1549 
1550 	dp = oldp;
1551 	len = (oldp != NULL) ? *oldlenp : 0;
1552 	op = name[0];
1553 	arg = name[1];
1554 	elem_size = name[2];
1555 	elem_count = name[3];
1556 	out_size = MIN(sizeof(bs), elem_size);
1557 
1558 	/*
1559 	 * at the moment, these are just "placeholders" to make the
1560 	 * API for retrieving kern.buf data more extensible in the
1561 	 * future.
1562 	 *
1563 	 * XXX kern.buf currently has "netbsd32" issues.  hopefully
1564 	 * these will be resolved at a later point.
1565 	 */
1566 	if (op != KERN_BUF_ALL || arg != KERN_BUF_ALL ||
1567 	    elem_size < 1 || elem_count < 0)
1568 		return (EINVAL);
1569 
1570 	error = 0;
1571 	needed = 0;
1572 	s = splbio();
1573 	simple_lock(&bqueue_slock);
1574 	for (i = 0; i < BQUEUES; i++) {
1575 		TAILQ_FOREACH(bp, &bufqueues[i].bq_queue, b_freelist) {
1576 			if (len >= elem_size && elem_count > 0) {
1577 				sysctl_fillbuf(bp, &bs);
1578 				error = copyout(&bs, dp, out_size);
1579 				if (error)
1580 					goto cleanup;
1581 				dp += elem_size;
1582 				len -= elem_size;
1583 			}
1584 			if (elem_count > 0) {
1585 				needed += elem_size;
1586 				if (elem_count != INT_MAX)
1587 					elem_count--;
1588 			}
1589 		}
1590 	}
1591 cleanup:
1592 	simple_unlock(&bqueue_slock);
1593 	splx(s);
1594 
1595 	*oldlenp = needed;
1596 	if (oldp == NULL)
1597 		*oldlenp += KERN_BUFSLOP * sizeof(struct buf);
1598 
1599 	return (error);
1600 }
1601 
1602 static void
1603 sysctl_bufvm_common(void)
1604 {
1605 	int64_t t;
1606 
1607 	/* Drain until below new high water mark */
1608 	while ((t = (int64_t)bufmem - (int64_t)bufmem_hiwater) >= 0) {
1609 		if (buf_drain(t / (2 * 1024)) <= 0)
1610 			break;
1611 	}
1612 }
1613 
1614 static int
1615 sysctl_bufcache_update(SYSCTLFN_ARGS)
1616 {
1617 	int t, error;
1618 	struct sysctlnode node;
1619 
1620 	node = *rnode;
1621 	node.sysctl_data = &t;
1622 	t = *(int *)rnode->sysctl_data;
1623 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1624 	if (error || newp == NULL)
1625 		return (error);
1626 
1627 	if (t < 0 || t > 100)
1628 		return EINVAL;
1629 	bufcache = t;
1630 	buf_setwm();
1631 
1632 	sysctl_bufvm_common();
1633 	return 0;
1634 }
1635 
1636 static int
1637 sysctl_bufvm_update(SYSCTLFN_ARGS)
1638 {
1639 	int64_t t;
1640 	int error;
1641 	struct sysctlnode node;
1642 
1643 	node = *rnode;
1644 	node.sysctl_data = &t;
1645 	t = *(int64_t *)rnode->sysctl_data;
1646 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1647 	if (error || newp == NULL)
1648 		return (error);
1649 
1650 	if (t < 0)
1651 		return EINVAL;
1652 	if (rnode->sysctl_data == &bufmem_lowater) {
1653 		if (bufmem_hiwater - t < 16)
1654 			return (EINVAL);
1655 		bufmem_lowater = t;
1656 	} else if (rnode->sysctl_data == &bufmem_hiwater) {
1657 		if (t - bufmem_lowater < 16)
1658 			return (EINVAL);
1659 		bufmem_hiwater = t;
1660 	} else
1661 		return (EINVAL);
1662 
1663 	sysctl_bufvm_common();
1664 
1665 	return 0;
1666 }
1667 
1668 SYSCTL_SETUP(sysctl_kern_buf_setup, "sysctl kern.buf subtree setup")
1669 {
1670 
1671 	sysctl_createv(clog, 0, NULL, NULL,
1672 		       CTLFLAG_PERMANENT,
1673 		       CTLTYPE_NODE, "kern", NULL,
1674 		       NULL, 0, NULL, 0,
1675 		       CTL_KERN, CTL_EOL);
1676 	sysctl_createv(clog, 0, NULL, NULL,
1677 		       CTLFLAG_PERMANENT,
1678 		       CTLTYPE_NODE, "buf",
1679 		       SYSCTL_DESCR("Kernel buffer cache information"),
1680 		       sysctl_dobuf, 0, NULL, 0,
1681 		       CTL_KERN, KERN_BUF, CTL_EOL);
1682 }
1683 
1684 SYSCTL_SETUP(sysctl_vm_buf_setup, "sysctl vm.buf* subtree setup")
1685 {
1686 
1687 	sysctl_createv(clog, 0, NULL, NULL,
1688 		       CTLFLAG_PERMANENT,
1689 		       CTLTYPE_NODE, "vm", NULL,
1690 		       NULL, 0, NULL, 0,
1691 		       CTL_VM, CTL_EOL);
1692 
1693 	sysctl_createv(clog, 0, NULL, NULL,
1694 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1695 		       CTLTYPE_INT, "bufcache",
1696 		       SYSCTL_DESCR("Percentage of physical memory to use for "
1697 				    "buffer cache"),
1698 		       sysctl_bufcache_update, 0, &bufcache, 0,
1699 		       CTL_VM, CTL_CREATE, CTL_EOL);
1700 	sysctl_createv(clog, 0, NULL, NULL,
1701 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
1702 		       CTLTYPE_QUAD, "bufmem",
1703 		       SYSCTL_DESCR("Amount of kernel memory used by buffer "
1704 				    "cache"),
1705 		       NULL, 0, &bufmem, 0,
1706 		       CTL_VM, CTL_CREATE, CTL_EOL);
1707 	sysctl_createv(clog, 0, NULL, NULL,
1708 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1709 		       CTLTYPE_QUAD, "bufmem_lowater",
1710 		       SYSCTL_DESCR("Minimum amount of kernel memory to "
1711 				    "reserve for buffer cache"),
1712 		       sysctl_bufvm_update, 0, &bufmem_lowater, 0,
1713 		       CTL_VM, CTL_CREATE, CTL_EOL);
1714 	sysctl_createv(clog, 0, NULL, NULL,
1715 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1716 		       CTLTYPE_QUAD, "bufmem_hiwater",
1717 		       SYSCTL_DESCR("Maximum amount of kernel memory to use "
1718 				    "for buffer cache"),
1719 		       sysctl_bufvm_update, 0, &bufmem_hiwater, 0,
1720 		       CTL_VM, CTL_CREATE, CTL_EOL);
1721 }
1722 
1723 #ifdef DEBUG
1724 /*
1725  * Print out statistics on the current allocation of the buffer pool.
1726  * Can be enabled to print out on every ``sync'' by setting "syncprt"
1727  * in vfs_syscalls.c using sysctl.
1728  */
1729 void
1730 vfs_bufstats(void)
1731 {
1732 	int s, i, j, count;
1733 	struct buf *bp;
1734 	struct bqueue *dp;
1735 	int counts[(MAXBSIZE / PAGE_SIZE) + 1];
1736 	static const char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE" };
1737 
1738 	for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
1739 		count = 0;
1740 		for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1741 			counts[j] = 0;
1742 		s = splbio();
1743 		TAILQ_FOREACH(bp, &dp->bq_queue, b_freelist) {
1744 			counts[bp->b_bufsize/PAGE_SIZE]++;
1745 			count++;
1746 		}
1747 		splx(s);
1748 		printf("%s: total-%d", bname[i], count);
1749 		for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1750 			if (counts[j] != 0)
1751 				printf(", %d-%d", j * PAGE_SIZE, counts[j]);
1752 		printf("\n");
1753 	}
1754 }
1755 #endif /* DEBUG */
1756 
1757 /* ------------------------------ */
1758 
1759 static struct buf *
1760 getiobuf1(int prflags)
1761 {
1762 	struct buf *bp;
1763 	int s;
1764 
1765 	s = splbio();
1766 	bp = pool_get(&bufiopool, prflags);
1767 	splx(s);
1768 	if (bp != NULL) {
1769 		BUF_INIT(bp);
1770 	}
1771 	return bp;
1772 }
1773 
1774 struct buf *
1775 getiobuf(void)
1776 {
1777 
1778 	return getiobuf1(PR_WAITOK);
1779 }
1780 
1781 struct buf *
1782 getiobuf_nowait(void)
1783 {
1784 
1785 	return getiobuf1(PR_NOWAIT);
1786 }
1787 
1788 void
1789 putiobuf(struct buf *bp)
1790 {
1791 	int s;
1792 
1793 	s = splbio();
1794 	pool_put(&bufiopool, bp);
1795 	splx(s);
1796 }
1797 
1798 /*
1799  * nestiobuf_iodone: b_iodone callback for nested buffers.
1800  */
1801 
1802 void
1803 nestiobuf_iodone(struct buf *bp)
1804 {
1805 	struct buf *mbp = bp->b_private;
1806 	int error;
1807 	int donebytes;
1808 
1809 	KASSERT(bp->b_bcount <= bp->b_bufsize);
1810 	KASSERT(mbp != bp);
1811 
1812 	error = 0;
1813 	if (bp->b_error != 0) {
1814 		error = bp->b_error;
1815 	} else if ((bp->b_bcount < bp->b_bufsize) || (bp->b_resid > 0)) {
1816 		/*
1817 		 * Not all got transfered, raise an error. We have no way to
1818 		 * propagate these conditions to mbp.
1819 		 */
1820 		error = EIO;
1821 	}
1822 
1823 	donebytes = bp->b_bufsize;
1824 
1825 	putiobuf(bp);
1826 	nestiobuf_done(mbp, donebytes, error);
1827 }
1828 
1829 /*
1830  * nestiobuf_setup: setup a "nested" buffer.
1831  *
1832  * => 'mbp' is a "master" buffer which is being divided into sub pieces.
1833  * => 'bp' should be a buffer allocated by getiobuf or getiobuf_nowait.
1834  * => 'offset' is a byte offset in the master buffer.
1835  * => 'size' is a size in bytes of this nested buffer.
1836  */
1837 
1838 void
1839 nestiobuf_setup(struct buf *mbp, struct buf *bp, int offset, size_t size)
1840 {
1841 	const int b_read = mbp->b_flags & B_READ;
1842 	struct vnode *vp = mbp->b_vp;
1843 
1844 	KASSERT(mbp->b_bcount >= offset + size);
1845 	bp->b_vp = vp;
1846 	bp->b_flags = B_BUSY | B_CALL | B_ASYNC | b_read;
1847 	bp->b_iodone = nestiobuf_iodone;
1848 	bp->b_data = (char *)mbp->b_data + offset;
1849 	bp->b_resid = bp->b_bcount = size;
1850 	bp->b_bufsize = bp->b_bcount;
1851 	bp->b_private = mbp;
1852 	BIO_COPYPRIO(bp, mbp);
1853 	if (!b_read && vp != NULL) {
1854 		int s;
1855 
1856 		s = splbio();
1857 		V_INCR_NUMOUTPUT(vp);
1858 		splx(s);
1859 	}
1860 }
1861 
1862 /*
1863  * nestiobuf_done: propagate completion to the master buffer.
1864  *
1865  * => 'donebytes' specifies how many bytes in the 'mbp' is completed.
1866  * => 'error' is an errno(2) that 'donebytes' has been completed with.
1867  */
1868 
1869 void
1870 nestiobuf_done(struct buf *mbp, int donebytes, int error)
1871 {
1872 	int s;
1873 
1874 	if (donebytes == 0) {
1875 		return;
1876 	}
1877 	s = splbio();
1878 	KASSERT(mbp->b_resid >= donebytes);
1879 	if (error) {
1880 		mbp->b_error = error;
1881 	}
1882 	mbp->b_resid -= donebytes;
1883 	if (mbp->b_resid == 0) {
1884 		if (mbp->b_error != 0) {
1885 			mbp->b_resid = mbp->b_bcount; /* be conservative */
1886 		}
1887 		biodone(mbp);
1888 	}
1889 	splx(s);
1890 }
1891