xref: /csrg-svn/sys/kern/vfs_cluster.c (revision 53578)
149589Sbostic /*-
249589Sbostic  * Copyright (c) 1982, 1986, 1989 The Regents of the University of California.
337736Smckusick  * All rights reserved.
423395Smckusick  *
549618Smckusick  * This module is believed to contain source code proprietary to AT&T.
649618Smckusick  * Use and redistribution is subject to the Berkeley Software License
749618Smckusick  * Agreement and your Software Agreement with AT&T (Western Electric).
837736Smckusick  *
9*53578Sheideman  *	@(#)vfs_cluster.c	7.48 (Berkeley) 05/15/92
1023395Smckusick  */
118Sbill 
1251455Sbostic #include <sys/param.h>
1351455Sbostic #include <sys/proc.h>
1451455Sbostic #include <sys/buf.h>
1551455Sbostic #include <sys/vnode.h>
1651455Sbostic #include <sys/specdev.h>
1751455Sbostic #include <sys/mount.h>
1851455Sbostic #include <sys/trace.h>
1951455Sbostic #include <sys/resourcevar.h>
208Sbill 
2191Sbill /*
2249280Skarels  * Initialize buffers and hash links for buffers.
2349280Skarels  */
2451455Sbostic void
2549280Skarels bufinit()
2649280Skarels {
2749280Skarels 	register int i;
2849280Skarels 	register struct buf *bp, *dp;
2949280Skarels 	register struct bufhd *hp;
3049280Skarels 	int base, residual;
3149280Skarels 
3249280Skarels 	for (hp = bufhash, i = 0; i < BUFHSZ; i++, hp++)
3349280Skarels 		hp->b_forw = hp->b_back = (struct buf *)hp;
3449280Skarels 
3549280Skarels 	for (dp = bfreelist; dp < &bfreelist[BQUEUES]; dp++) {
3649280Skarels 		dp->b_forw = dp->b_back = dp->av_forw = dp->av_back = dp;
3749280Skarels 		dp->b_flags = B_HEAD;
3849280Skarels 	}
3949280Skarels 	base = bufpages / nbuf;
4049280Skarels 	residual = bufpages % nbuf;
4149280Skarels 	for (i = 0; i < nbuf; i++) {
4249280Skarels 		bp = &buf[i];
4349280Skarels 		bp->b_dev = NODEV;
4449280Skarels 		bp->b_bcount = 0;
4549280Skarels 		bp->b_rcred = NOCRED;
4649280Skarels 		bp->b_wcred = NOCRED;
4749280Skarels 		bp->b_dirtyoff = 0;
4849280Skarels 		bp->b_dirtyend = 0;
4952189Smckusick 		bp->b_validoff = 0;
5052189Smckusick 		bp->b_validend = 0;
5149280Skarels 		bp->b_un.b_addr = buffers + i * MAXBSIZE;
5249280Skarels 		if (i < residual)
5349280Skarels 			bp->b_bufsize = (base + 1) * CLBYTES;
5449280Skarels 		else
5549280Skarels 			bp->b_bufsize = base * CLBYTES;
5649280Skarels 		binshash(bp, &bfreelist[BQ_AGE]);
5752413Storek 		bp->b_flags = B_INVAL;
5852413Storek 		dp = bp->b_bufsize ? &bfreelist[BQ_AGE] : &bfreelist[BQ_EMPTY];
5952413Storek 		binsheadfree(bp, dp);
6049280Skarels 	}
6149280Skarels }
6249280Skarels 
6349280Skarels /*
6446151Smckusick  * Find the block in the buffer pool.
6546151Smckusick  * If the buffer is not present, allocate a new buffer and load
6646151Smckusick  * its contents according to the filesystem fill routine.
678Sbill  */
6838776Smckusick bread(vp, blkno, size, cred, bpp)
6937736Smckusick 	struct vnode *vp;
706563Smckusic 	daddr_t blkno;
716563Smckusic 	int size;
7238776Smckusick 	struct ucred *cred;
7337736Smckusick 	struct buf **bpp;
748Sbill {
7553545Sheideman 	USES_VOP_STRATEGY;
7647545Skarels 	struct proc *p = curproc;		/* XXX */
778Sbill 	register struct buf *bp;
788Sbill 
798670S 	if (size == 0)
808670S 		panic("bread: size 0");
8137736Smckusick 	*bpp = bp = getblk(vp, blkno, size);
8246151Smckusick 	if (bp->b_flags & (B_DONE | B_DELWRI)) {
8340341Smckusick 		trace(TR_BREADHIT, pack(vp, size), blkno);
8437736Smckusick 		return (0);
858Sbill 	}
868Sbill 	bp->b_flags |= B_READ;
878670S 	if (bp->b_bcount > bp->b_bufsize)
888670S 		panic("bread");
8938776Smckusick 	if (bp->b_rcred == NOCRED && cred != NOCRED) {
9038776Smckusick 		crhold(cred);
9138776Smckusick 		bp->b_rcred = cred;
9238776Smckusick 	}
9337736Smckusick 	VOP_STRATEGY(bp);
9440341Smckusick 	trace(TR_BREADMISS, pack(vp, size), blkno);
9547545Skarels 	p->p_stats->p_ru.ru_inblock++;		/* pay for read */
9637736Smckusick 	return (biowait(bp));
978Sbill }
988Sbill 
998Sbill /*
10052189Smckusick  * Operates like bread, but also starts I/O on the N specified
10152189Smckusick  * read-ahead blocks.
1028Sbill  */
10352189Smckusick breadn(vp, blkno, size, rablkno, rabsize, num, cred, bpp)
10437736Smckusick 	struct vnode *vp;
1057114Smckusick 	daddr_t blkno; int size;
10652189Smckusick 	daddr_t rablkno[]; int rabsize[];
10752189Smckusick 	int num;
10838776Smckusick 	struct ucred *cred;
10937736Smckusick 	struct buf **bpp;
1108Sbill {
11153545Sheideman 	USES_VOP_STRATEGY;
11247545Skarels 	struct proc *p = curproc;		/* XXX */
1138Sbill 	register struct buf *bp, *rabp;
11452189Smckusick 	register int i;
1158Sbill 
1168Sbill 	bp = NULL;
1177015Smckusick 	/*
11846151Smckusick 	 * If the block is not memory resident,
11946151Smckusick 	 * allocate a buffer and start I/O.
1207015Smckusick 	 */
12137736Smckusick 	if (!incore(vp, blkno)) {
12237736Smckusick 		*bpp = bp = getblk(vp, blkno, size);
12346151Smckusick 		if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
1248Sbill 			bp->b_flags |= B_READ;
1258670S 			if (bp->b_bcount > bp->b_bufsize)
12652189Smckusick 				panic("breadn");
12738776Smckusick 			if (bp->b_rcred == NOCRED && cred != NOCRED) {
12838776Smckusick 				crhold(cred);
12938776Smckusick 				bp->b_rcred = cred;
13038776Smckusick 			}
13137736Smckusick 			VOP_STRATEGY(bp);
13240341Smckusick 			trace(TR_BREADMISS, pack(vp, size), blkno);
13347545Skarels 			p->p_stats->p_ru.ru_inblock++;	/* pay for read */
1347015Smckusick 		} else
13540341Smckusick 			trace(TR_BREADHIT, pack(vp, size), blkno);
1368Sbill 	}
1377015Smckusick 
1387015Smckusick 	/*
13952189Smckusick 	 * If there's read-ahead block(s), start I/O
14052189Smckusick 	 * on them also (as above).
1417015Smckusick 	 */
14252189Smckusick 	for (i = 0; i < num; i++) {
14352189Smckusick 		if (incore(vp, rablkno[i]))
14452189Smckusick 			continue;
14552189Smckusick 		rabp = getblk(vp, rablkno[i], rabsize[i]);
14646151Smckusick 		if (rabp->b_flags & (B_DONE | B_DELWRI)) {
1478Sbill 			brelse(rabp);
14852189Smckusick 			trace(TR_BREADHITRA, pack(vp, rabsize[i]), rablkno[i]);
1492045Swnj 		} else {
15046151Smckusick 			rabp->b_flags |= B_ASYNC | B_READ;
1518670S 			if (rabp->b_bcount > rabp->b_bufsize)
1528670S 				panic("breadrabp");
15338880Smckusick 			if (rabp->b_rcred == NOCRED && cred != NOCRED) {
15438776Smckusick 				crhold(cred);
15538880Smckusick 				rabp->b_rcred = cred;
15638776Smckusick 			}
15737736Smckusick 			VOP_STRATEGY(rabp);
15852189Smckusick 			trace(TR_BREADMISSRA, pack(vp, rabsize[i]), rablkno[i]);
15947545Skarels 			p->p_stats->p_ru.ru_inblock++;	/* pay in advance */
1608Sbill 		}
1618Sbill 	}
1627015Smckusick 
1637015Smckusick 	/*
16446151Smckusick 	 * If block was memory resident, let bread get it.
16546151Smckusick 	 * If block was not memory resident, the read was
16646151Smckusick 	 * started above, so just wait for the read to complete.
1677015Smckusick 	 */
1687114Smckusick 	if (bp == NULL)
16938776Smckusick 		return (bread(vp, blkno, size, cred, bpp));
17037736Smckusick 	return (biowait(bp));
1718Sbill }
1728Sbill 
1738Sbill /*
17446151Smckusick  * Synchronous write.
17546151Smckusick  * Release buffer on completion.
1768Sbill  */
1778Sbill bwrite(bp)
1787015Smckusick 	register struct buf *bp;
1798Sbill {
18053545Sheideman 	USES_VOP_STRATEGY;
18147545Skarels 	struct proc *p = curproc;		/* XXX */
18237736Smckusick 	register int flag;
18352413Storek 	int s, error = 0;
1848Sbill 
1858Sbill 	flag = bp->b_flags;
1869857Ssam 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
18749459Smckusick 	if (flag & B_ASYNC) {
18849459Smckusick 		if ((flag & B_DELWRI) == 0)
18949459Smckusick 			p->p_stats->p_ru.ru_oublock++;	/* no one paid yet */
19049459Smckusick 		else
19149459Smckusick 			reassignbuf(bp, bp->b_vp);
19249459Smckusick 	}
19340341Smckusick 	trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno);
1948670S 	if (bp->b_bcount > bp->b_bufsize)
1958670S 		panic("bwrite");
19640226Smckusick 	s = splbio();
19739882Smckusick 	bp->b_vp->v_numoutput++;
19840226Smckusick 	splx(s);
19937736Smckusick 	VOP_STRATEGY(bp);
2007015Smckusick 
2017015Smckusick 	/*
20246151Smckusick 	 * If the write was synchronous, then await I/O completion.
2037015Smckusick 	 * If the write was "delayed", then we put the buffer on
20446151Smckusick 	 * the queue of blocks awaiting I/O completion status.
2057015Smckusick 	 */
20646151Smckusick 	if ((flag & B_ASYNC) == 0) {
20737736Smckusick 		error = biowait(bp);
20849459Smckusick 		if ((flag&B_DELWRI) == 0)
20949459Smckusick 			p->p_stats->p_ru.ru_oublock++;	/* no one paid yet */
21049459Smckusick 		else
21149459Smckusick 			reassignbuf(bp, bp->b_vp);
2128Sbill 		brelse(bp);
21337736Smckusick 	} else if (flag & B_DELWRI) {
21452413Storek 		s = splbio();
2158Sbill 		bp->b_flags |= B_AGE;
21652413Storek 		splx(s);
21737736Smckusick 	}
21837736Smckusick 	return (error);
2198Sbill }
2208Sbill 
221*53578Sheideman int
222*53578Sheideman vn_bwrite(ap)
223*53578Sheideman 	struct vop_bwrite_args *ap;
224*53578Sheideman {
225*53578Sheideman 	return bwrite (ap->a_bp);
226*53578Sheideman }
227*53578Sheideman 
228*53578Sheideman 
2298Sbill /*
23046151Smckusick  * Delayed write.
23146151Smckusick  *
23246151Smckusick  * The buffer is marked dirty, but is not queued for I/O.
23346151Smckusick  * This routine should be used when the buffer is expected
23446151Smckusick  * to be modified again soon, typically a small write that
23546151Smckusick  * partially fills a buffer.
23646151Smckusick  *
23746151Smckusick  * NB: magnetic tapes cannot be delayed; they must be
23846151Smckusick  * written in the order that the writes are requested.
2398Sbill  */
2408Sbill bdwrite(bp)
2417015Smckusick 	register struct buf *bp;
2428Sbill {
24353545Sheideman 	USES_VOP_IOCTL;
24447545Skarels 	struct proc *p = curproc;		/* XXX */
2458Sbill 
24639882Smckusick 	if ((bp->b_flags & B_DELWRI) == 0) {
24739882Smckusick 		bp->b_flags |= B_DELWRI;
24839882Smckusick 		reassignbuf(bp, bp->b_vp);
24947545Skarels 		p->p_stats->p_ru.ru_oublock++;		/* no one paid yet */
25039882Smckusick 	}
25137736Smckusick 	/*
25239668Smckusick 	 * If this is a tape drive, the write must be initiated.
25337736Smckusick 	 */
25448360Smckusick 	if (VOP_IOCTL(bp->b_vp, 0, (caddr_t)B_TAPE, 0, NOCRED, p) == 0) {
2558Sbill 		bawrite(bp);
25639668Smckusick 	} else {
25746151Smckusick 		bp->b_flags |= (B_DONE | B_DELWRI);
2588Sbill 		brelse(bp);
2598Sbill 	}
2608Sbill }
2618Sbill 
2628Sbill /*
26346151Smckusick  * Asynchronous write.
26446151Smckusick  * Start I/O on a buffer, but do not wait for it to complete.
26546151Smckusick  * The buffer is released when the I/O completes.
2668Sbill  */
2678Sbill bawrite(bp)
2687015Smckusick 	register struct buf *bp;
2698Sbill {
2708Sbill 
27146151Smckusick 	/*
27246151Smckusick 	 * Setting the ASYNC flag causes bwrite to return
27346151Smckusick 	 * after starting the I/O.
27446151Smckusick 	 */
2758Sbill 	bp->b_flags |= B_ASYNC;
27637736Smckusick 	(void) bwrite(bp);
2778Sbill }
2788Sbill 
2798Sbill /*
28046151Smckusick  * Release a buffer.
28146151Smckusick  * Even if the buffer is dirty, no I/O is started.
2828Sbill  */
2838Sbill brelse(bp)
2847015Smckusick 	register struct buf *bp;
2858Sbill {
2862325Swnj 	register struct buf *flist;
28746151Smckusick 	int s;
2888Sbill 
28940341Smckusick 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
2907015Smckusick 	/*
29139668Smckusick 	 * If a process is waiting for the buffer, or
29239668Smckusick 	 * is waiting for a free buffer, awaken it.
2937015Smckusick 	 */
29446151Smckusick 	if (bp->b_flags & B_WANTED)
2958Sbill 		wakeup((caddr_t)bp);
29646151Smckusick 	if (bfreelist[0].b_flags & B_WANTED) {
2972325Swnj 		bfreelist[0].b_flags &= ~B_WANTED;
2982325Swnj 		wakeup((caddr_t)bfreelist);
2998Sbill 	}
30039668Smckusick 	/*
30139668Smckusick 	 * Retry I/O for locked buffers rather than invalidating them.
30239668Smckusick 	 */
30352413Storek 	s = splbio();
30439668Smckusick 	if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
30539668Smckusick 		bp->b_flags &= ~B_ERROR;
30639668Smckusick 	/*
30739668Smckusick 	 * Disassociate buffers that are no longer valid.
30839668Smckusick 	 */
30946151Smckusick 	if (bp->b_flags & (B_NOCACHE | B_ERROR))
31037736Smckusick 		bp->b_flags |= B_INVAL;
31146151Smckusick 	if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR | B_INVAL))) {
31239668Smckusick 		if (bp->b_vp)
31339668Smckusick 			brelvp(bp);
31439668Smckusick 		bp->b_flags &= ~B_DELWRI;
31537736Smckusick 	}
3167015Smckusick 	/*
3177015Smckusick 	 * Stick the buffer back on a free list.
3187015Smckusick 	 */
3198670S 	if (bp->b_bufsize <= 0) {
3208670S 		/* block has no buffer ... put at front of unused buffer list */
3218670S 		flist = &bfreelist[BQ_EMPTY];
3228670S 		binsheadfree(bp, flist);
32346151Smckusick 	} else if (bp->b_flags & (B_ERROR | B_INVAL)) {
3242325Swnj 		/* block has no info ... put at front of most free list */
3258670S 		flist = &bfreelist[BQ_AGE];
3267015Smckusick 		binsheadfree(bp, flist);
3278Sbill 	} else {
3282325Swnj 		if (bp->b_flags & B_LOCKED)
3292325Swnj 			flist = &bfreelist[BQ_LOCKED];
3302325Swnj 		else if (bp->b_flags & B_AGE)
3312325Swnj 			flist = &bfreelist[BQ_AGE];
3322325Swnj 		else
3332325Swnj 			flist = &bfreelist[BQ_LRU];
3347015Smckusick 		binstailfree(bp, flist);
3358Sbill 	}
33646151Smckusick 	bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_AGE | B_NOCACHE);
3378Sbill 	splx(s);
3388Sbill }
3398Sbill 
3408Sbill /*
34146151Smckusick  * Check to see if a block is currently memory resident.
3428Sbill  */
34337736Smckusick incore(vp, blkno)
34437736Smckusick 	struct vnode *vp;
3457015Smckusick 	daddr_t blkno;
3468Sbill {
3478Sbill 	register struct buf *bp;
3482325Swnj 	register struct buf *dp;
3498Sbill 
35038225Smckusick 	dp = BUFHASH(vp, blkno);
3512325Swnj 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
35239668Smckusick 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
3537015Smckusick 		    (bp->b_flags & B_INVAL) == 0)
35491Sbill 			return (1);
35591Sbill 	return (0);
3568Sbill }
3578Sbill 
35839668Smckusick /*
35946151Smckusick  * Check to see if a block is currently memory resident.
36046151Smckusick  * If it is resident, return it. If it is not resident,
36146151Smckusick  * allocate a new buffer and assign it to the block.
36239668Smckusick  */
3638Sbill struct buf *
36437736Smckusick getblk(vp, blkno, size)
36537736Smckusick 	register struct vnode *vp;
3666563Smckusic 	daddr_t blkno;
3676563Smckusic 	int size;
3688Sbill {
3698670S 	register struct buf *bp, *dp;
3705424Swnj 	int s;
3718Sbill 
37225255Smckusick 	if (size > MAXBSIZE)
37325255Smckusick 		panic("getblk: size too big");
3747015Smckusick 	/*
37546151Smckusick 	 * Search the cache for the block. If the buffer is found,
37646151Smckusick 	 * but it is currently locked, the we must wait for it to
37746151Smckusick 	 * become available.
3787015Smckusick 	 */
37937736Smckusick 	dp = BUFHASH(vp, blkno);
3807015Smckusick loop:
3812325Swnj 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
38239668Smckusick 		if (bp->b_lblkno != blkno || bp->b_vp != vp ||
38346151Smckusick 		    (bp->b_flags & B_INVAL))
3848Sbill 			continue;
38526271Skarels 		s = splbio();
38646151Smckusick 		if (bp->b_flags & B_BUSY) {
3878Sbill 			bp->b_flags |= B_WANTED;
38846151Smckusick 			sleep((caddr_t)bp, PRIBIO + 1);
3895424Swnj 			splx(s);
3908Sbill 			goto loop;
3918Sbill 		}
39239882Smckusick 		bremfree(bp);
39339882Smckusick 		bp->b_flags |= B_BUSY;
3945424Swnj 		splx(s);
39532608Smckusick 		if (bp->b_bcount != size) {
39639668Smckusick 			printf("getblk: stray size");
39739668Smckusick 			bp->b_flags |= B_INVAL;
39839668Smckusick 			bwrite(bp);
39939668Smckusick 			goto loop;
40032608Smckusick 		}
4018Sbill 		bp->b_flags |= B_CACHE;
40226271Skarels 		return (bp);
4038Sbill 	}
4048670S 	bp = getnewbuf();
4057015Smckusick 	bremhash(bp);
40639668Smckusick 	bgetvp(vp, bp);
40745116Smckusick 	bp->b_bcount = 0;
40839668Smckusick 	bp->b_lblkno = blkno;
4096563Smckusic 	bp->b_blkno = blkno;
4108670S 	bp->b_error = 0;
41137736Smckusick 	bp->b_resid = 0;
41237736Smckusick 	binshash(bp, dp);
41345116Smckusick 	allocbuf(bp, size);
41426271Skarels 	return (bp);
4158Sbill }
4168Sbill 
4178Sbill /*
41846151Smckusick  * Allocate a buffer.
41946151Smckusick  * The caller will assign it to a block.
4208Sbill  */
4218Sbill struct buf *
4226563Smckusic geteblk(size)
4236563Smckusic 	int size;
4248Sbill {
4258670S 	register struct buf *bp, *flist;
4268Sbill 
42725255Smckusick 	if (size > MAXBSIZE)
42825255Smckusick 		panic("geteblk: size too big");
4298670S 	bp = getnewbuf();
4308670S 	bp->b_flags |= B_INVAL;
4317015Smckusick 	bremhash(bp);
4328670S 	flist = &bfreelist[BQ_AGE];
43345116Smckusick 	bp->b_bcount = 0;
43437736Smckusick 	bp->b_error = 0;
43537736Smckusick 	bp->b_resid = 0;
4368670S 	binshash(bp, flist);
43745116Smckusick 	allocbuf(bp, size);
43826271Skarels 	return (bp);
4398Sbill }
4408Sbill 
4418Sbill /*
44245116Smckusick  * Expand or contract the actual memory allocated to a buffer.
44346151Smckusick  * If no memory is available, release buffer and take error exit.
4446563Smckusic  */
44545116Smckusick allocbuf(tp, size)
44645116Smckusick 	register struct buf *tp;
4476563Smckusic 	int size;
4486563Smckusic {
44945116Smckusick 	register struct buf *bp, *ep;
45045116Smckusick 	int sizealloc, take, s;
4516563Smckusic 
45245116Smckusick 	sizealloc = roundup(size, CLBYTES);
45345116Smckusick 	/*
45445116Smckusick 	 * Buffer size does not change
45545116Smckusick 	 */
45645116Smckusick 	if (sizealloc == tp->b_bufsize)
45745116Smckusick 		goto out;
45845116Smckusick 	/*
45945116Smckusick 	 * Buffer size is shrinking.
46045116Smckusick 	 * Place excess space in a buffer header taken from the
46145116Smckusick 	 * BQ_EMPTY buffer list and placed on the "most free" list.
46245116Smckusick 	 * If no extra buffer headers are available, leave the
46345116Smckusick 	 * extra space in the present buffer.
46445116Smckusick 	 */
46545116Smckusick 	if (sizealloc < tp->b_bufsize) {
46645116Smckusick 		ep = bfreelist[BQ_EMPTY].av_forw;
46745116Smckusick 		if (ep == &bfreelist[BQ_EMPTY])
46845116Smckusick 			goto out;
46945116Smckusick 		s = splbio();
47045116Smckusick 		bremfree(ep);
47145116Smckusick 		ep->b_flags |= B_BUSY;
47245116Smckusick 		splx(s);
47345116Smckusick 		pagemove(tp->b_un.b_addr + sizealloc, ep->b_un.b_addr,
47445116Smckusick 		    (int)tp->b_bufsize - sizealloc);
47545116Smckusick 		ep->b_bufsize = tp->b_bufsize - sizealloc;
47645116Smckusick 		tp->b_bufsize = sizealloc;
47745116Smckusick 		ep->b_flags |= B_INVAL;
47845116Smckusick 		ep->b_bcount = 0;
47945116Smckusick 		brelse(ep);
48045116Smckusick 		goto out;
48145116Smckusick 	}
48245116Smckusick 	/*
48345116Smckusick 	 * More buffer space is needed. Get it out of buffers on
48445116Smckusick 	 * the "most free" list, placing the empty headers on the
48545116Smckusick 	 * BQ_EMPTY buffer header list.
48645116Smckusick 	 */
48745116Smckusick 	while (tp->b_bufsize < sizealloc) {
48845116Smckusick 		take = sizealloc - tp->b_bufsize;
48945116Smckusick 		bp = getnewbuf();
49045116Smckusick 		if (take >= bp->b_bufsize)
49145116Smckusick 			take = bp->b_bufsize;
49245116Smckusick 		pagemove(&bp->b_un.b_addr[bp->b_bufsize - take],
49345116Smckusick 		    &tp->b_un.b_addr[tp->b_bufsize], take);
49445116Smckusick 		tp->b_bufsize += take;
49545116Smckusick 		bp->b_bufsize = bp->b_bufsize - take;
49645116Smckusick 		if (bp->b_bcount > bp->b_bufsize)
49745116Smckusick 			bp->b_bcount = bp->b_bufsize;
49845116Smckusick 		if (bp->b_bufsize <= 0) {
49945116Smckusick 			bremhash(bp);
50045116Smckusick 			binshash(bp, &bfreelist[BQ_EMPTY]);
50146151Smckusick 			bp->b_dev = NODEV;
50245116Smckusick 			bp->b_error = 0;
50345116Smckusick 			bp->b_flags |= B_INVAL;
50445116Smckusick 		}
50545116Smckusick 		brelse(bp);
50645116Smckusick 	}
50745116Smckusick out:
50845116Smckusick 	tp->b_bcount = size;
50945116Smckusick 	return (1);
5108670S }
5118670S 
5128670S /*
5138670S  * Find a buffer which is available for use.
5148670S  * Select something from a free list.
5158670S  * Preference is to AGE list, then LRU list.
5168670S  */
5178670S struct buf *
5188670S getnewbuf()
5198670S {
5208670S 	register struct buf *bp, *dp;
52138776Smckusick 	register struct ucred *cred;
5228670S 	int s;
5238670S 
52452096Sbostic #ifdef LFS
52552096Sbostic 	lfs_flush();
52652096Sbostic #endif
5278670S loop:
52826271Skarels 	s = splbio();
5298670S 	for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--)
5308670S 		if (dp->av_forw != dp)
5318670S 			break;
5328670S 	if (dp == bfreelist) {		/* no free blocks */
5338670S 		dp->b_flags |= B_WANTED;
53446151Smckusick 		sleep((caddr_t)dp, PRIBIO + 1);
53512170Ssam 		splx(s);
5368670S 		goto loop;
5378670S 	}
53839882Smckusick 	bp = dp->av_forw;
53939882Smckusick 	bremfree(bp);
54039882Smckusick 	bp->b_flags |= B_BUSY;
5418670S 	splx(s);
5428670S 	if (bp->b_flags & B_DELWRI) {
54338614Smckusick 		(void) bawrite(bp);
5448670S 		goto loop;
5458670S 	}
54640341Smckusick 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
54739668Smckusick 	if (bp->b_vp)
54839668Smckusick 		brelvp(bp);
54938776Smckusick 	if (bp->b_rcred != NOCRED) {
55038776Smckusick 		cred = bp->b_rcred;
55138776Smckusick 		bp->b_rcred = NOCRED;
55238776Smckusick 		crfree(cred);
55338776Smckusick 	}
55438776Smckusick 	if (bp->b_wcred != NOCRED) {
55538776Smckusick 		cred = bp->b_wcred;
55638776Smckusick 		bp->b_wcred = NOCRED;
55738776Smckusick 		crfree(cred);
55838776Smckusick 	}
5598670S 	bp->b_flags = B_BUSY;
56046989Smckusick 	bp->b_dirtyoff = bp->b_dirtyend = 0;
56152189Smckusick 	bp->b_validoff = bp->b_validend = 0;
5628670S 	return (bp);
5638670S }
5648670S 
5658670S /*
56646151Smckusick  * Wait for I/O to complete.
56746151Smckusick  *
56846151Smckusick  * Extract and return any errors associated with the I/O.
56946151Smckusick  * If the error flag is set, but no specific error is
57046151Smckusick  * given, return EIO.
5718Sbill  */
5727015Smckusick biowait(bp)
5736563Smckusic 	register struct buf *bp;
5748Sbill {
5755431Sroot 	int s;
5768Sbill 
57726271Skarels 	s = splbio();
57838776Smckusick 	while ((bp->b_flags & B_DONE) == 0)
5798Sbill 		sleep((caddr_t)bp, PRIBIO);
5805431Sroot 	splx(s);
58137736Smckusick 	if ((bp->b_flags & B_ERROR) == 0)
58237736Smckusick 		return (0);
58337736Smckusick 	if (bp->b_error)
58437736Smckusick 		return (bp->b_error);
58537736Smckusick 	return (EIO);
5868Sbill }
5878Sbill 
5888Sbill /*
58913128Ssam  * Mark I/O complete on a buffer.
59046151Smckusick  *
59146151Smckusick  * If a callback has been requested, e.g. the pageout
59246151Smckusick  * daemon, do so. Otherwise, awaken waiting processes.
5938Sbill  */
59451455Sbostic void
5957015Smckusick biodone(bp)
5967015Smckusick 	register struct buf *bp;
5978Sbill {
5988Sbill 
599420Sbill 	if (bp->b_flags & B_DONE)
6007015Smckusick 		panic("dup biodone");
6018Sbill 	bp->b_flags |= B_DONE;
60249232Smckusick 	if ((bp->b_flags & B_READ) == 0)
60349232Smckusick 		vwakeup(bp);
6049763Ssam 	if (bp->b_flags & B_CALL) {
6059763Ssam 		bp->b_flags &= ~B_CALL;
6069763Ssam 		(*bp->b_iodone)(bp);
6079763Ssam 		return;
6089763Ssam 	}
60946151Smckusick 	if (bp->b_flags & B_ASYNC)
6108Sbill 		brelse(bp);
6118Sbill 	else {
6128Sbill 		bp->b_flags &= ~B_WANTED;
6138Sbill 		wakeup((caddr_t)bp);
6148Sbill 	}
6158Sbill }
616