xref: /csrg-svn/sys/kern/vfs_cluster.c (revision 53545)
149589Sbostic /*-
249589Sbostic  * Copyright (c) 1982, 1986, 1989 The Regents of the University of California.
337736Smckusick  * All rights reserved.
423395Smckusick  *
549618Smckusick  * This module is believed to contain source code proprietary to AT&T.
649618Smckusick  * Use and redistribution is subject to the Berkeley Software License
749618Smckusick  * Agreement and your Software Agreement with AT&T (Western Electric).
837736Smckusick  *
9*53545Sheideman  *	@(#)vfs_cluster.c	7.47 (Berkeley) 05/14/92
1023395Smckusick  */
118Sbill 
1251455Sbostic #include <sys/param.h>
1351455Sbostic #include <sys/proc.h>
1451455Sbostic #include <sys/buf.h>
1551455Sbostic #include <sys/vnode.h>
1651455Sbostic #include <sys/specdev.h>
1751455Sbostic #include <sys/mount.h>
1851455Sbostic #include <sys/trace.h>
1951455Sbostic #include <sys/resourcevar.h>
208Sbill 
2191Sbill /*
2249280Skarels  * Initialize buffers and hash links for buffers.
2349280Skarels  */
2451455Sbostic void
2549280Skarels bufinit()
2649280Skarels {
2749280Skarels 	register int i;
2849280Skarels 	register struct buf *bp, *dp;
2949280Skarels 	register struct bufhd *hp;
3049280Skarels 	int base, residual;
3149280Skarels 
3249280Skarels 	for (hp = bufhash, i = 0; i < BUFHSZ; i++, hp++)
3349280Skarels 		hp->b_forw = hp->b_back = (struct buf *)hp;
3449280Skarels 
3549280Skarels 	for (dp = bfreelist; dp < &bfreelist[BQUEUES]; dp++) {
3649280Skarels 		dp->b_forw = dp->b_back = dp->av_forw = dp->av_back = dp;
3749280Skarels 		dp->b_flags = B_HEAD;
3849280Skarels 	}
3949280Skarels 	base = bufpages / nbuf;
4049280Skarels 	residual = bufpages % nbuf;
4149280Skarels 	for (i = 0; i < nbuf; i++) {
4249280Skarels 		bp = &buf[i];
4349280Skarels 		bp->b_dev = NODEV;
4449280Skarels 		bp->b_bcount = 0;
4549280Skarels 		bp->b_rcred = NOCRED;
4649280Skarels 		bp->b_wcred = NOCRED;
4749280Skarels 		bp->b_dirtyoff = 0;
4849280Skarels 		bp->b_dirtyend = 0;
4952189Smckusick 		bp->b_validoff = 0;
5052189Smckusick 		bp->b_validend = 0;
5149280Skarels 		bp->b_un.b_addr = buffers + i * MAXBSIZE;
5249280Skarels 		if (i < residual)
5349280Skarels 			bp->b_bufsize = (base + 1) * CLBYTES;
5449280Skarels 		else
5549280Skarels 			bp->b_bufsize = base * CLBYTES;
5649280Skarels 		binshash(bp, &bfreelist[BQ_AGE]);
5752413Storek 		bp->b_flags = B_INVAL;
5852413Storek 		dp = bp->b_bufsize ? &bfreelist[BQ_AGE] : &bfreelist[BQ_EMPTY];
5952413Storek 		binsheadfree(bp, dp);
6049280Skarels 	}
6149280Skarels }
6249280Skarels 
6349280Skarels /*
6446151Smckusick  * Find the block in the buffer pool.
6546151Smckusick  * If the buffer is not present, allocate a new buffer and load
6646151Smckusick  * its contents according to the filesystem fill routine.
678Sbill  */
6838776Smckusick bread(vp, blkno, size, cred, bpp)
6937736Smckusick 	struct vnode *vp;
706563Smckusic 	daddr_t blkno;
716563Smckusic 	int size;
7238776Smckusick 	struct ucred *cred;
7337736Smckusick 	struct buf **bpp;
748Sbill {
75*53545Sheideman 	USES_VOP_STRATEGY;
7647545Skarels 	struct proc *p = curproc;		/* XXX */
778Sbill 	register struct buf *bp;
788Sbill 
798670S 	if (size == 0)
808670S 		panic("bread: size 0");
8137736Smckusick 	*bpp = bp = getblk(vp, blkno, size);
8246151Smckusick 	if (bp->b_flags & (B_DONE | B_DELWRI)) {
8340341Smckusick 		trace(TR_BREADHIT, pack(vp, size), blkno);
8437736Smckusick 		return (0);
858Sbill 	}
868Sbill 	bp->b_flags |= B_READ;
878670S 	if (bp->b_bcount > bp->b_bufsize)
888670S 		panic("bread");
8938776Smckusick 	if (bp->b_rcred == NOCRED && cred != NOCRED) {
9038776Smckusick 		crhold(cred);
9138776Smckusick 		bp->b_rcred = cred;
9238776Smckusick 	}
9337736Smckusick 	VOP_STRATEGY(bp);
9440341Smckusick 	trace(TR_BREADMISS, pack(vp, size), blkno);
9547545Skarels 	p->p_stats->p_ru.ru_inblock++;		/* pay for read */
9637736Smckusick 	return (biowait(bp));
978Sbill }
988Sbill 
998Sbill /*
10052189Smckusick  * Operates like bread, but also starts I/O on the N specified
10152189Smckusick  * read-ahead blocks.
1028Sbill  */
10352189Smckusick breadn(vp, blkno, size, rablkno, rabsize, num, cred, bpp)
10437736Smckusick 	struct vnode *vp;
1057114Smckusick 	daddr_t blkno; int size;
10652189Smckusick 	daddr_t rablkno[]; int rabsize[];
10752189Smckusick 	int num;
10838776Smckusick 	struct ucred *cred;
10937736Smckusick 	struct buf **bpp;
1108Sbill {
111*53545Sheideman 	USES_VOP_STRATEGY;
11247545Skarels 	struct proc *p = curproc;		/* XXX */
1138Sbill 	register struct buf *bp, *rabp;
11452189Smckusick 	register int i;
1158Sbill 
1168Sbill 	bp = NULL;
1177015Smckusick 	/*
11846151Smckusick 	 * If the block is not memory resident,
11946151Smckusick 	 * allocate a buffer and start I/O.
1207015Smckusick 	 */
12137736Smckusick 	if (!incore(vp, blkno)) {
12237736Smckusick 		*bpp = bp = getblk(vp, blkno, size);
12346151Smckusick 		if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
1248Sbill 			bp->b_flags |= B_READ;
1258670S 			if (bp->b_bcount > bp->b_bufsize)
12652189Smckusick 				panic("breadn");
12738776Smckusick 			if (bp->b_rcred == NOCRED && cred != NOCRED) {
12838776Smckusick 				crhold(cred);
12938776Smckusick 				bp->b_rcred = cred;
13038776Smckusick 			}
13137736Smckusick 			VOP_STRATEGY(bp);
13240341Smckusick 			trace(TR_BREADMISS, pack(vp, size), blkno);
13347545Skarels 			p->p_stats->p_ru.ru_inblock++;	/* pay for read */
1347015Smckusick 		} else
13540341Smckusick 			trace(TR_BREADHIT, pack(vp, size), blkno);
1368Sbill 	}
1377015Smckusick 
1387015Smckusick 	/*
13952189Smckusick 	 * If there's read-ahead block(s), start I/O
14052189Smckusick 	 * on them also (as above).
1417015Smckusick 	 */
14252189Smckusick 	for (i = 0; i < num; i++) {
14352189Smckusick 		if (incore(vp, rablkno[i]))
14452189Smckusick 			continue;
14552189Smckusick 		rabp = getblk(vp, rablkno[i], rabsize[i]);
14646151Smckusick 		if (rabp->b_flags & (B_DONE | B_DELWRI)) {
1478Sbill 			brelse(rabp);
14852189Smckusick 			trace(TR_BREADHITRA, pack(vp, rabsize[i]), rablkno[i]);
1492045Swnj 		} else {
15046151Smckusick 			rabp->b_flags |= B_ASYNC | B_READ;
1518670S 			if (rabp->b_bcount > rabp->b_bufsize)
1528670S 				panic("breadrabp");
15338880Smckusick 			if (rabp->b_rcred == NOCRED && cred != NOCRED) {
15438776Smckusick 				crhold(cred);
15538880Smckusick 				rabp->b_rcred = cred;
15638776Smckusick 			}
15737736Smckusick 			VOP_STRATEGY(rabp);
15852189Smckusick 			trace(TR_BREADMISSRA, pack(vp, rabsize[i]), rablkno[i]);
15947545Skarels 			p->p_stats->p_ru.ru_inblock++;	/* pay in advance */
1608Sbill 		}
1618Sbill 	}
1627015Smckusick 
1637015Smckusick 	/*
16446151Smckusick 	 * If block was memory resident, let bread get it.
16546151Smckusick 	 * If block was not memory resident, the read was
16646151Smckusick 	 * started above, so just wait for the read to complete.
1677015Smckusick 	 */
1687114Smckusick 	if (bp == NULL)
16938776Smckusick 		return (bread(vp, blkno, size, cred, bpp));
17037736Smckusick 	return (biowait(bp));
1718Sbill }
1728Sbill 
1738Sbill /*
17446151Smckusick  * Synchronous write.
17546151Smckusick  * Release buffer on completion.
1768Sbill  */
1778Sbill bwrite(bp)
1787015Smckusick 	register struct buf *bp;
1798Sbill {
180*53545Sheideman 	USES_VOP_STRATEGY;
18147545Skarels 	struct proc *p = curproc;		/* XXX */
18237736Smckusick 	register int flag;
18352413Storek 	int s, error = 0;
1848Sbill 
1858Sbill 	flag = bp->b_flags;
1869857Ssam 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
18749459Smckusick 	if (flag & B_ASYNC) {
18849459Smckusick 		if ((flag & B_DELWRI) == 0)
18949459Smckusick 			p->p_stats->p_ru.ru_oublock++;	/* no one paid yet */
19049459Smckusick 		else
19149459Smckusick 			reassignbuf(bp, bp->b_vp);
19249459Smckusick 	}
19340341Smckusick 	trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno);
1948670S 	if (bp->b_bcount > bp->b_bufsize)
1958670S 		panic("bwrite");
19640226Smckusick 	s = splbio();
19739882Smckusick 	bp->b_vp->v_numoutput++;
19840226Smckusick 	splx(s);
19937736Smckusick 	VOP_STRATEGY(bp);
2007015Smckusick 
2017015Smckusick 	/*
20246151Smckusick 	 * If the write was synchronous, then await I/O completion.
2037015Smckusick 	 * If the write was "delayed", then we put the buffer on
20446151Smckusick 	 * the queue of blocks awaiting I/O completion status.
2057015Smckusick 	 */
20646151Smckusick 	if ((flag & B_ASYNC) == 0) {
20737736Smckusick 		error = biowait(bp);
20849459Smckusick 		if ((flag&B_DELWRI) == 0)
20949459Smckusick 			p->p_stats->p_ru.ru_oublock++;	/* no one paid yet */
21049459Smckusick 		else
21149459Smckusick 			reassignbuf(bp, bp->b_vp);
2128Sbill 		brelse(bp);
21337736Smckusick 	} else if (flag & B_DELWRI) {
21452413Storek 		s = splbio();
2158Sbill 		bp->b_flags |= B_AGE;
21652413Storek 		splx(s);
21737736Smckusick 	}
21837736Smckusick 	return (error);
2198Sbill }
2208Sbill 
2218Sbill /*
22246151Smckusick  * Delayed write.
22346151Smckusick  *
22446151Smckusick  * The buffer is marked dirty, but is not queued for I/O.
22546151Smckusick  * This routine should be used when the buffer is expected
22646151Smckusick  * to be modified again soon, typically a small write that
22746151Smckusick  * partially fills a buffer.
22846151Smckusick  *
22946151Smckusick  * NB: magnetic tapes cannot be delayed; they must be
23046151Smckusick  * written in the order that the writes are requested.
2318Sbill  */
2328Sbill bdwrite(bp)
2337015Smckusick 	register struct buf *bp;
2348Sbill {
235*53545Sheideman 	USES_VOP_IOCTL;
23647545Skarels 	struct proc *p = curproc;		/* XXX */
2378Sbill 
23839882Smckusick 	if ((bp->b_flags & B_DELWRI) == 0) {
23939882Smckusick 		bp->b_flags |= B_DELWRI;
24039882Smckusick 		reassignbuf(bp, bp->b_vp);
24147545Skarels 		p->p_stats->p_ru.ru_oublock++;		/* no one paid yet */
24239882Smckusick 	}
24337736Smckusick 	/*
24439668Smckusick 	 * If this is a tape drive, the write must be initiated.
24537736Smckusick 	 */
24648360Smckusick 	if (VOP_IOCTL(bp->b_vp, 0, (caddr_t)B_TAPE, 0, NOCRED, p) == 0) {
2478Sbill 		bawrite(bp);
24839668Smckusick 	} else {
24946151Smckusick 		bp->b_flags |= (B_DONE | B_DELWRI);
2508Sbill 		brelse(bp);
2518Sbill 	}
2528Sbill }
2538Sbill 
2548Sbill /*
25546151Smckusick  * Asynchronous write.
25646151Smckusick  * Start I/O on a buffer, but do not wait for it to complete.
25746151Smckusick  * The buffer is released when the I/O completes.
2588Sbill  */
2598Sbill bawrite(bp)
2607015Smckusick 	register struct buf *bp;
2618Sbill {
2628Sbill 
26346151Smckusick 	/*
26446151Smckusick 	 * Setting the ASYNC flag causes bwrite to return
26546151Smckusick 	 * after starting the I/O.
26646151Smckusick 	 */
2678Sbill 	bp->b_flags |= B_ASYNC;
26837736Smckusick 	(void) bwrite(bp);
2698Sbill }
2708Sbill 
2718Sbill /*
27246151Smckusick  * Release a buffer.
27346151Smckusick  * Even if the buffer is dirty, no I/O is started.
2748Sbill  */
2758Sbill brelse(bp)
2767015Smckusick 	register struct buf *bp;
2778Sbill {
2782325Swnj 	register struct buf *flist;
27946151Smckusick 	int s;
2808Sbill 
28140341Smckusick 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
2827015Smckusick 	/*
28339668Smckusick 	 * If a process is waiting for the buffer, or
28439668Smckusick 	 * is waiting for a free buffer, awaken it.
2857015Smckusick 	 */
28646151Smckusick 	if (bp->b_flags & B_WANTED)
2878Sbill 		wakeup((caddr_t)bp);
28846151Smckusick 	if (bfreelist[0].b_flags & B_WANTED) {
2892325Swnj 		bfreelist[0].b_flags &= ~B_WANTED;
2902325Swnj 		wakeup((caddr_t)bfreelist);
2918Sbill 	}
29239668Smckusick 	/*
29339668Smckusick 	 * Retry I/O for locked buffers rather than invalidating them.
29439668Smckusick 	 */
29552413Storek 	s = splbio();
29639668Smckusick 	if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
29739668Smckusick 		bp->b_flags &= ~B_ERROR;
29839668Smckusick 	/*
29939668Smckusick 	 * Disassociate buffers that are no longer valid.
30039668Smckusick 	 */
30146151Smckusick 	if (bp->b_flags & (B_NOCACHE | B_ERROR))
30237736Smckusick 		bp->b_flags |= B_INVAL;
30346151Smckusick 	if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR | B_INVAL))) {
30439668Smckusick 		if (bp->b_vp)
30539668Smckusick 			brelvp(bp);
30639668Smckusick 		bp->b_flags &= ~B_DELWRI;
30737736Smckusick 	}
3087015Smckusick 	/*
3097015Smckusick 	 * Stick the buffer back on a free list.
3107015Smckusick 	 */
3118670S 	if (bp->b_bufsize <= 0) {
3128670S 		/* block has no buffer ... put at front of unused buffer list */
3138670S 		flist = &bfreelist[BQ_EMPTY];
3148670S 		binsheadfree(bp, flist);
31546151Smckusick 	} else if (bp->b_flags & (B_ERROR | B_INVAL)) {
3162325Swnj 		/* block has no info ... put at front of most free list */
3178670S 		flist = &bfreelist[BQ_AGE];
3187015Smckusick 		binsheadfree(bp, flist);
3198Sbill 	} else {
3202325Swnj 		if (bp->b_flags & B_LOCKED)
3212325Swnj 			flist = &bfreelist[BQ_LOCKED];
3222325Swnj 		else if (bp->b_flags & B_AGE)
3232325Swnj 			flist = &bfreelist[BQ_AGE];
3242325Swnj 		else
3252325Swnj 			flist = &bfreelist[BQ_LRU];
3267015Smckusick 		binstailfree(bp, flist);
3278Sbill 	}
32846151Smckusick 	bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_AGE | B_NOCACHE);
3298Sbill 	splx(s);
3308Sbill }
3318Sbill 
3328Sbill /*
33346151Smckusick  * Check to see if a block is currently memory resident.
3348Sbill  */
33537736Smckusick incore(vp, blkno)
33637736Smckusick 	struct vnode *vp;
3377015Smckusick 	daddr_t blkno;
3388Sbill {
3398Sbill 	register struct buf *bp;
3402325Swnj 	register struct buf *dp;
3418Sbill 
34238225Smckusick 	dp = BUFHASH(vp, blkno);
3432325Swnj 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
34439668Smckusick 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
3457015Smckusick 		    (bp->b_flags & B_INVAL) == 0)
34691Sbill 			return (1);
34791Sbill 	return (0);
3488Sbill }
3498Sbill 
35039668Smckusick /*
35146151Smckusick  * Check to see if a block is currently memory resident.
35246151Smckusick  * If it is resident, return it. If it is not resident,
35346151Smckusick  * allocate a new buffer and assign it to the block.
35439668Smckusick  */
3558Sbill struct buf *
35637736Smckusick getblk(vp, blkno, size)
35737736Smckusick 	register struct vnode *vp;
3586563Smckusic 	daddr_t blkno;
3596563Smckusic 	int size;
3608Sbill {
3618670S 	register struct buf *bp, *dp;
3625424Swnj 	int s;
3638Sbill 
36425255Smckusick 	if (size > MAXBSIZE)
36525255Smckusick 		panic("getblk: size too big");
3667015Smckusick 	/*
36746151Smckusick 	 * Search the cache for the block. If the buffer is found,
36846151Smckusick 	 * but it is currently locked, the we must wait for it to
36946151Smckusick 	 * become available.
3707015Smckusick 	 */
37137736Smckusick 	dp = BUFHASH(vp, blkno);
3727015Smckusick loop:
3732325Swnj 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
37439668Smckusick 		if (bp->b_lblkno != blkno || bp->b_vp != vp ||
37546151Smckusick 		    (bp->b_flags & B_INVAL))
3768Sbill 			continue;
37726271Skarels 		s = splbio();
37846151Smckusick 		if (bp->b_flags & B_BUSY) {
3798Sbill 			bp->b_flags |= B_WANTED;
38046151Smckusick 			sleep((caddr_t)bp, PRIBIO + 1);
3815424Swnj 			splx(s);
3828Sbill 			goto loop;
3838Sbill 		}
38439882Smckusick 		bremfree(bp);
38539882Smckusick 		bp->b_flags |= B_BUSY;
3865424Swnj 		splx(s);
38732608Smckusick 		if (bp->b_bcount != size) {
38839668Smckusick 			printf("getblk: stray size");
38939668Smckusick 			bp->b_flags |= B_INVAL;
39039668Smckusick 			bwrite(bp);
39139668Smckusick 			goto loop;
39232608Smckusick 		}
3938Sbill 		bp->b_flags |= B_CACHE;
39426271Skarels 		return (bp);
3958Sbill 	}
3968670S 	bp = getnewbuf();
3977015Smckusick 	bremhash(bp);
39839668Smckusick 	bgetvp(vp, bp);
39945116Smckusick 	bp->b_bcount = 0;
40039668Smckusick 	bp->b_lblkno = blkno;
4016563Smckusic 	bp->b_blkno = blkno;
4028670S 	bp->b_error = 0;
40337736Smckusick 	bp->b_resid = 0;
40437736Smckusick 	binshash(bp, dp);
40545116Smckusick 	allocbuf(bp, size);
40626271Skarels 	return (bp);
4078Sbill }
4088Sbill 
4098Sbill /*
41046151Smckusick  * Allocate a buffer.
41146151Smckusick  * The caller will assign it to a block.
4128Sbill  */
4138Sbill struct buf *
4146563Smckusic geteblk(size)
4156563Smckusic 	int size;
4168Sbill {
4178670S 	register struct buf *bp, *flist;
4188Sbill 
41925255Smckusick 	if (size > MAXBSIZE)
42025255Smckusick 		panic("geteblk: size too big");
4218670S 	bp = getnewbuf();
4228670S 	bp->b_flags |= B_INVAL;
4237015Smckusick 	bremhash(bp);
4248670S 	flist = &bfreelist[BQ_AGE];
42545116Smckusick 	bp->b_bcount = 0;
42637736Smckusick 	bp->b_error = 0;
42737736Smckusick 	bp->b_resid = 0;
4288670S 	binshash(bp, flist);
42945116Smckusick 	allocbuf(bp, size);
43026271Skarels 	return (bp);
4318Sbill }
4328Sbill 
4338Sbill /*
43445116Smckusick  * Expand or contract the actual memory allocated to a buffer.
43546151Smckusick  * If no memory is available, release buffer and take error exit.
4366563Smckusic  */
43745116Smckusick allocbuf(tp, size)
43845116Smckusick 	register struct buf *tp;
4396563Smckusic 	int size;
4406563Smckusic {
44145116Smckusick 	register struct buf *bp, *ep;
44245116Smckusick 	int sizealloc, take, s;
4436563Smckusic 
44445116Smckusick 	sizealloc = roundup(size, CLBYTES);
44545116Smckusick 	/*
44645116Smckusick 	 * Buffer size does not change
44745116Smckusick 	 */
44845116Smckusick 	if (sizealloc == tp->b_bufsize)
44945116Smckusick 		goto out;
45045116Smckusick 	/*
45145116Smckusick 	 * Buffer size is shrinking.
45245116Smckusick 	 * Place excess space in a buffer header taken from the
45345116Smckusick 	 * BQ_EMPTY buffer list and placed on the "most free" list.
45445116Smckusick 	 * If no extra buffer headers are available, leave the
45545116Smckusick 	 * extra space in the present buffer.
45645116Smckusick 	 */
45745116Smckusick 	if (sizealloc < tp->b_bufsize) {
45845116Smckusick 		ep = bfreelist[BQ_EMPTY].av_forw;
45945116Smckusick 		if (ep == &bfreelist[BQ_EMPTY])
46045116Smckusick 			goto out;
46145116Smckusick 		s = splbio();
46245116Smckusick 		bremfree(ep);
46345116Smckusick 		ep->b_flags |= B_BUSY;
46445116Smckusick 		splx(s);
46545116Smckusick 		pagemove(tp->b_un.b_addr + sizealloc, ep->b_un.b_addr,
46645116Smckusick 		    (int)tp->b_bufsize - sizealloc);
46745116Smckusick 		ep->b_bufsize = tp->b_bufsize - sizealloc;
46845116Smckusick 		tp->b_bufsize = sizealloc;
46945116Smckusick 		ep->b_flags |= B_INVAL;
47045116Smckusick 		ep->b_bcount = 0;
47145116Smckusick 		brelse(ep);
47245116Smckusick 		goto out;
47345116Smckusick 	}
47445116Smckusick 	/*
47545116Smckusick 	 * More buffer space is needed. Get it out of buffers on
47645116Smckusick 	 * the "most free" list, placing the empty headers on the
47745116Smckusick 	 * BQ_EMPTY buffer header list.
47845116Smckusick 	 */
47945116Smckusick 	while (tp->b_bufsize < sizealloc) {
48045116Smckusick 		take = sizealloc - tp->b_bufsize;
48145116Smckusick 		bp = getnewbuf();
48245116Smckusick 		if (take >= bp->b_bufsize)
48345116Smckusick 			take = bp->b_bufsize;
48445116Smckusick 		pagemove(&bp->b_un.b_addr[bp->b_bufsize - take],
48545116Smckusick 		    &tp->b_un.b_addr[tp->b_bufsize], take);
48645116Smckusick 		tp->b_bufsize += take;
48745116Smckusick 		bp->b_bufsize = bp->b_bufsize - take;
48845116Smckusick 		if (bp->b_bcount > bp->b_bufsize)
48945116Smckusick 			bp->b_bcount = bp->b_bufsize;
49045116Smckusick 		if (bp->b_bufsize <= 0) {
49145116Smckusick 			bremhash(bp);
49245116Smckusick 			binshash(bp, &bfreelist[BQ_EMPTY]);
49346151Smckusick 			bp->b_dev = NODEV;
49445116Smckusick 			bp->b_error = 0;
49545116Smckusick 			bp->b_flags |= B_INVAL;
49645116Smckusick 		}
49745116Smckusick 		brelse(bp);
49845116Smckusick 	}
49945116Smckusick out:
50045116Smckusick 	tp->b_bcount = size;
50145116Smckusick 	return (1);
5028670S }
5038670S 
5048670S /*
5058670S  * Find a buffer which is available for use.
5068670S  * Select something from a free list.
5078670S  * Preference is to AGE list, then LRU list.
5088670S  */
5098670S struct buf *
5108670S getnewbuf()
5118670S {
5128670S 	register struct buf *bp, *dp;
51338776Smckusick 	register struct ucred *cred;
5148670S 	int s;
5158670S 
51652096Sbostic #ifdef LFS
51752096Sbostic 	lfs_flush();
51852096Sbostic #endif
5198670S loop:
52026271Skarels 	s = splbio();
5218670S 	for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--)
5228670S 		if (dp->av_forw != dp)
5238670S 			break;
5248670S 	if (dp == bfreelist) {		/* no free blocks */
5258670S 		dp->b_flags |= B_WANTED;
52646151Smckusick 		sleep((caddr_t)dp, PRIBIO + 1);
52712170Ssam 		splx(s);
5288670S 		goto loop;
5298670S 	}
53039882Smckusick 	bp = dp->av_forw;
53139882Smckusick 	bremfree(bp);
53239882Smckusick 	bp->b_flags |= B_BUSY;
5338670S 	splx(s);
5348670S 	if (bp->b_flags & B_DELWRI) {
53538614Smckusick 		(void) bawrite(bp);
5368670S 		goto loop;
5378670S 	}
53840341Smckusick 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
53939668Smckusick 	if (bp->b_vp)
54039668Smckusick 		brelvp(bp);
54138776Smckusick 	if (bp->b_rcred != NOCRED) {
54238776Smckusick 		cred = bp->b_rcred;
54338776Smckusick 		bp->b_rcred = NOCRED;
54438776Smckusick 		crfree(cred);
54538776Smckusick 	}
54638776Smckusick 	if (bp->b_wcred != NOCRED) {
54738776Smckusick 		cred = bp->b_wcred;
54838776Smckusick 		bp->b_wcred = NOCRED;
54938776Smckusick 		crfree(cred);
55038776Smckusick 	}
5518670S 	bp->b_flags = B_BUSY;
55246989Smckusick 	bp->b_dirtyoff = bp->b_dirtyend = 0;
55352189Smckusick 	bp->b_validoff = bp->b_validend = 0;
5548670S 	return (bp);
5558670S }
5568670S 
5578670S /*
55846151Smckusick  * Wait for I/O to complete.
55946151Smckusick  *
56046151Smckusick  * Extract and return any errors associated with the I/O.
56146151Smckusick  * If the error flag is set, but no specific error is
56246151Smckusick  * given, return EIO.
5638Sbill  */
5647015Smckusick biowait(bp)
5656563Smckusic 	register struct buf *bp;
5668Sbill {
5675431Sroot 	int s;
5688Sbill 
56926271Skarels 	s = splbio();
57038776Smckusick 	while ((bp->b_flags & B_DONE) == 0)
5718Sbill 		sleep((caddr_t)bp, PRIBIO);
5725431Sroot 	splx(s);
57337736Smckusick 	if ((bp->b_flags & B_ERROR) == 0)
57437736Smckusick 		return (0);
57537736Smckusick 	if (bp->b_error)
57637736Smckusick 		return (bp->b_error);
57737736Smckusick 	return (EIO);
5788Sbill }
5798Sbill 
5808Sbill /*
58113128Ssam  * Mark I/O complete on a buffer.
58246151Smckusick  *
58346151Smckusick  * If a callback has been requested, e.g. the pageout
58446151Smckusick  * daemon, do so. Otherwise, awaken waiting processes.
5858Sbill  */
58651455Sbostic void
5877015Smckusick biodone(bp)
5887015Smckusick 	register struct buf *bp;
5898Sbill {
5908Sbill 
591420Sbill 	if (bp->b_flags & B_DONE)
5927015Smckusick 		panic("dup biodone");
5938Sbill 	bp->b_flags |= B_DONE;
59449232Smckusick 	if ((bp->b_flags & B_READ) == 0)
59549232Smckusick 		vwakeup(bp);
5969763Ssam 	if (bp->b_flags & B_CALL) {
5979763Ssam 		bp->b_flags &= ~B_CALL;
5989763Ssam 		(*bp->b_iodone)(bp);
5999763Ssam 		return;
6009763Ssam 	}
60146151Smckusick 	if (bp->b_flags & B_ASYNC)
6028Sbill 		brelse(bp);
6038Sbill 	else {
6048Sbill 		bp->b_flags &= ~B_WANTED;
6058Sbill 		wakeup((caddr_t)bp);
6068Sbill 	}
6078Sbill }
608