xref: /csrg-svn/sys/kern/vfs_cluster.c (revision 39808)
123395Smckusick /*
237736Smckusick  * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
337736Smckusick  * All rights reserved.
423395Smckusick  *
537736Smckusick  * Redistribution and use in source and binary forms are permitted
637736Smckusick  * provided that the above copyright notice and this paragraph are
737736Smckusick  * duplicated in all such forms and that any documentation,
837736Smckusick  * advertising materials, and other materials related to such
937736Smckusick  * distribution and use acknowledge that the software was developed
1037736Smckusick  * by the University of California, Berkeley.  The name of the
1137736Smckusick  * University may not be used to endorse or promote products derived
1237736Smckusick  * from this software without specific prior written permission.
1337736Smckusick  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
1437736Smckusick  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
1537736Smckusick  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
1637736Smckusick  *
17*39808Smckusick  *	@(#)vfs_cluster.c	7.18 (Berkeley) 12/30/89
1823395Smckusick  */
198Sbill 
2017098Sbloom #include "param.h"
2117098Sbloom #include "user.h"
2217098Sbloom #include "buf.h"
2337736Smckusick #include "vnode.h"
2439668Smckusick #include "mount.h"
2517098Sbloom #include "trace.h"
2638776Smckusick #include "ucred.h"
278Sbill 
2891Sbill /*
298Sbill  * Read in (if necessary) the block and return a buffer pointer.
308Sbill  */
3138776Smckusick bread(vp, blkno, size, cred, bpp)
3237736Smckusick 	struct vnode *vp;
336563Smckusic 	daddr_t blkno;
346563Smckusic 	int size;
3538776Smckusick 	struct ucred *cred;
3637736Smckusick 	struct buf **bpp;
378Sbill {
388Sbill 	register struct buf *bp;
398Sbill 
408670S 	if (size == 0)
418670S 		panic("bread: size 0");
4237736Smckusick 	*bpp = bp = getblk(vp, blkno, size);
4332608Smckusick 	if (bp->b_flags&(B_DONE|B_DELWRI)) {
4437736Smckusick 		trace(TR_BREADHIT, pack(vp->v_mount->m_fsid[0], size), blkno);
4537736Smckusick 		return (0);
468Sbill 	}
478Sbill 	bp->b_flags |= B_READ;
488670S 	if (bp->b_bcount > bp->b_bufsize)
498670S 		panic("bread");
5038776Smckusick 	if (bp->b_rcred == NOCRED && cred != NOCRED) {
5138776Smckusick 		crhold(cred);
5238776Smckusick 		bp->b_rcred = cred;
5338776Smckusick 	}
5437736Smckusick 	VOP_STRATEGY(bp);
5537736Smckusick 	trace(TR_BREADMISS, pack(vp->v_mount->m_fsid[0], size), blkno);
568039Sroot 	u.u_ru.ru_inblock++;		/* pay for read */
5737736Smckusick 	return (biowait(bp));
588Sbill }
598Sbill 
608Sbill /*
618Sbill  * Read in the block, like bread, but also start I/O on the
628Sbill  * read-ahead block (which is not allocated to the caller)
638Sbill  */
6438776Smckusick breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
6537736Smckusick 	struct vnode *vp;
667114Smckusick 	daddr_t blkno; int size;
678592Sroot 	daddr_t rablkno; int rabsize;
6838776Smckusick 	struct ucred *cred;
6937736Smckusick 	struct buf **bpp;
708Sbill {
718Sbill 	register struct buf *bp, *rabp;
728Sbill 
738Sbill 	bp = NULL;
747015Smckusick 	/*
757015Smckusick 	 * If the block isn't in core, then allocate
767015Smckusick 	 * a buffer and initiate i/o (getblk checks
777015Smckusick 	 * for a cache hit).
787015Smckusick 	 */
7937736Smckusick 	if (!incore(vp, blkno)) {
8037736Smckusick 		*bpp = bp = getblk(vp, blkno, size);
8132608Smckusick 		if ((bp->b_flags&(B_DONE|B_DELWRI)) == 0) {
828Sbill 			bp->b_flags |= B_READ;
838670S 			if (bp->b_bcount > bp->b_bufsize)
848670S 				panic("breada");
8538776Smckusick 			if (bp->b_rcred == NOCRED && cred != NOCRED) {
8638776Smckusick 				crhold(cred);
8738776Smckusick 				bp->b_rcred = cred;
8838776Smckusick 			}
8937736Smckusick 			VOP_STRATEGY(bp);
9037736Smckusick 			trace(TR_BREADMISS, pack(vp->v_mount->m_fsid[0], size),
9137736Smckusick 			    blkno);
928039Sroot 			u.u_ru.ru_inblock++;		/* pay for read */
937015Smckusick 		} else
9437736Smckusick 			trace(TR_BREADHIT, pack(vp->v_mount->m_fsid[0], size),
9537736Smckusick 			    blkno);
968Sbill 	}
977015Smckusick 
987015Smckusick 	/*
997015Smckusick 	 * If there's a read-ahead block, start i/o
1007015Smckusick 	 * on it also (as above).
1017015Smckusick 	 */
10237736Smckusick 	if (rablkno && !incore(vp, rablkno)) {
10337736Smckusick 		rabp = getblk(vp, rablkno, rabsize);
10432608Smckusick 		if (rabp->b_flags & (B_DONE|B_DELWRI)) {
1058Sbill 			brelse(rabp);
10637736Smckusick 			trace(TR_BREADHITRA,
10738880Smckusick 			    pack(vp->v_mount->m_fsid[0], rabsize), rablkno);
1082045Swnj 		} else {
1098Sbill 			rabp->b_flags |= B_READ|B_ASYNC;
1108670S 			if (rabp->b_bcount > rabp->b_bufsize)
1118670S 				panic("breadrabp");
11238880Smckusick 			if (rabp->b_rcred == NOCRED && cred != NOCRED) {
11338776Smckusick 				crhold(cred);
11438880Smckusick 				rabp->b_rcred = cred;
11538776Smckusick 			}
11637736Smckusick 			VOP_STRATEGY(rabp);
11737736Smckusick 			trace(TR_BREADMISSRA,
11838880Smckusick 			    pack(vp->v_mount->m_fsid[0], rabsize), rablkno);
1198039Sroot 			u.u_ru.ru_inblock++;		/* pay in advance */
1208Sbill 		}
1218Sbill 	}
1227015Smckusick 
1237015Smckusick 	/*
1247114Smckusick 	 * If block was in core, let bread get it.
1257114Smckusick 	 * If block wasn't in core, then the read was started
1267114Smckusick 	 * above, and just wait for it.
1277015Smckusick 	 */
1287114Smckusick 	if (bp == NULL)
12938776Smckusick 		return (bread(vp, blkno, size, cred, bpp));
13037736Smckusick 	return (biowait(bp));
1318Sbill }
1328Sbill 
1338Sbill /*
1348Sbill  * Write the buffer, waiting for completion.
1358Sbill  * Then release the buffer.
1368Sbill  */
1378Sbill bwrite(bp)
1387015Smckusick 	register struct buf *bp;
1398Sbill {
14037736Smckusick 	register int flag;
14137736Smckusick 	int error;
1428Sbill 
1438Sbill 	flag = bp->b_flags;
1449857Ssam 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
1458Sbill 	if ((flag&B_DELWRI) == 0)
1468039Sroot 		u.u_ru.ru_oublock++;		/* noone paid yet */
14737736Smckusick 	trace(TR_BWRITE,
14839668Smckusick 	    pack(bp->b_vp->v_mount->m_fsid[0], bp->b_bcount), bp->b_lblkno);
1498670S 	if (bp->b_bcount > bp->b_bufsize)
1508670S 		panic("bwrite");
15137736Smckusick 	VOP_STRATEGY(bp);
1527015Smckusick 
1537015Smckusick 	/*
1547015Smckusick 	 * If the write was synchronous, then await i/o completion.
1557015Smckusick 	 * If the write was "delayed", then we put the buffer on
1567015Smckusick 	 * the q of blocks awaiting i/o completion status.
1577015Smckusick 	 */
1588Sbill 	if ((flag&B_ASYNC) == 0) {
15937736Smckusick 		error = biowait(bp);
1608Sbill 		brelse(bp);
16137736Smckusick 	} else if (flag & B_DELWRI) {
1628Sbill 		bp->b_flags |= B_AGE;
16337736Smckusick 		error = 0;
16437736Smckusick 	}
16537736Smckusick 	return (error);
1668Sbill }
1678Sbill 
1688Sbill /*
1698Sbill  * Release the buffer, marking it so that if it is grabbed
1708Sbill  * for another purpose it will be written out before being
1718Sbill  * given up (e.g. when writing a partial block where it is
1728Sbill  * assumed that another write for the same block will soon follow).
1738Sbill  * This can't be done for magtape, since writes must be done
1748Sbill  * in the same order as requested.
1758Sbill  */
1768Sbill bdwrite(bp)
1777015Smckusick 	register struct buf *bp;
1788Sbill {
1798Sbill 
1808Sbill 	if ((bp->b_flags&B_DELWRI) == 0)
1818039Sroot 		u.u_ru.ru_oublock++;		/* noone paid yet */
18237736Smckusick 	/*
18339668Smckusick 	 * If this is a tape drive, the write must be initiated.
18437736Smckusick 	 */
18539668Smckusick 	if (VOP_IOCTL(bp->b_vp, 0, B_TAPE, 0, NOCRED) == 0) {
1868Sbill 		bawrite(bp);
18739668Smckusick 	} else {
1888Sbill 		bp->b_flags |= B_DELWRI | B_DONE;
1898Sbill 		brelse(bp);
1908Sbill 	}
1918Sbill }
1928Sbill 
1938Sbill /*
1948Sbill  * Release the buffer, start I/O on it, but don't wait for completion.
1958Sbill  */
1968Sbill bawrite(bp)
1977015Smckusick 	register struct buf *bp;
1988Sbill {
1998Sbill 
2008Sbill 	bp->b_flags |= B_ASYNC;
20137736Smckusick 	(void) bwrite(bp);
2028Sbill }
2038Sbill 
2048Sbill /*
2057015Smckusick  * Release the buffer, with no I/O implied.
2068Sbill  */
2078Sbill brelse(bp)
2087015Smckusick 	register struct buf *bp;
2098Sbill {
2102325Swnj 	register struct buf *flist;
2118Sbill 	register s;
2128Sbill 
21337736Smckusick 	trace(TR_BRELSE,
21439668Smckusick 	    pack(bp->b_vp->v_mount->m_fsid[0], bp->b_bufsize), bp->b_lblkno);
2157015Smckusick 	/*
21639668Smckusick 	 * If a process is waiting for the buffer, or
21739668Smckusick 	 * is waiting for a free buffer, awaken it.
2187015Smckusick 	 */
2198Sbill 	if (bp->b_flags&B_WANTED)
2208Sbill 		wakeup((caddr_t)bp);
2212325Swnj 	if (bfreelist[0].b_flags&B_WANTED) {
2222325Swnj 		bfreelist[0].b_flags &= ~B_WANTED;
2232325Swnj 		wakeup((caddr_t)bfreelist);
2248Sbill 	}
22539668Smckusick 	/*
22639668Smckusick 	 * Retry I/O for locked buffers rather than invalidating them.
22739668Smckusick 	 */
22839668Smckusick 	if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
22939668Smckusick 		bp->b_flags &= ~B_ERROR;
23039668Smckusick 
23139668Smckusick 	/*
23239668Smckusick 	 * Disassociate buffers that are no longer valid.
23339668Smckusick 	 */
23439668Smckusick 	if (bp->b_flags & (B_NOCACHE|B_ERROR))
23537736Smckusick 		bp->b_flags |= B_INVAL;
23639668Smckusick 	if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR|B_INVAL))) {
23739668Smckusick 		if (bp->b_vp)
23839668Smckusick 			brelvp(bp);
23939668Smckusick 		bp->b_flags &= ~B_DELWRI;
24037736Smckusick 	}
2417015Smckusick 	/*
2427015Smckusick 	 * Stick the buffer back on a free list.
2437015Smckusick 	 */
24426271Skarels 	s = splbio();
2458670S 	if (bp->b_bufsize <= 0) {
2468670S 		/* block has no buffer ... put at front of unused buffer list */
2478670S 		flist = &bfreelist[BQ_EMPTY];
2488670S 		binsheadfree(bp, flist);
2498670S 	} else if (bp->b_flags & (B_ERROR|B_INVAL)) {
2502325Swnj 		/* block has no info ... put at front of most free list */
2518670S 		flist = &bfreelist[BQ_AGE];
2527015Smckusick 		binsheadfree(bp, flist);
2538Sbill 	} else {
2542325Swnj 		if (bp->b_flags & B_LOCKED)
2552325Swnj 			flist = &bfreelist[BQ_LOCKED];
2562325Swnj 		else if (bp->b_flags & B_AGE)
2572325Swnj 			flist = &bfreelist[BQ_AGE];
2582325Swnj 		else
2592325Swnj 			flist = &bfreelist[BQ_LRU];
2607015Smckusick 		binstailfree(bp, flist);
2618Sbill 	}
26237736Smckusick 	bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_AGE|B_NOCACHE);
2638Sbill 	splx(s);
2648Sbill }
2658Sbill 
2668Sbill /*
2678Sbill  * See if the block is associated with some buffer
2688Sbill  * (mainly to avoid getting hung up on a wait in breada)
2698Sbill  */
27037736Smckusick incore(vp, blkno)
27137736Smckusick 	struct vnode *vp;
2727015Smckusick 	daddr_t blkno;
2738Sbill {
2748Sbill 	register struct buf *bp;
2752325Swnj 	register struct buf *dp;
2768Sbill 
27738225Smckusick 	dp = BUFHASH(vp, blkno);
2782325Swnj 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
27939668Smckusick 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
2807015Smckusick 		    (bp->b_flags & B_INVAL) == 0)
28191Sbill 			return (1);
28291Sbill 	return (0);
2838Sbill }
2848Sbill 
28539668Smckusick /*
28639668Smckusick  * Return a block if it is in memory.
28739668Smckusick  */
28838776Smckusick baddr(vp, blkno, size, cred, bpp)
28937736Smckusick 	struct vnode *vp;
2906563Smckusic 	daddr_t blkno;
2916563Smckusic 	int size;
29238776Smckusick 	struct ucred *cred;
29337736Smckusick 	struct buf **bpp;
2948Sbill {
2958Sbill 
29637736Smckusick 	if (incore(vp, blkno))
29738776Smckusick 		return (bread(vp, blkno, size, cred, bpp));
29837736Smckusick 	*bpp = 0;
2998Sbill 	return (0);
3008Sbill }
3018Sbill 
3028Sbill /*
3038Sbill  * Assign a buffer for the given block.  If the appropriate
3048Sbill  * block is already associated, return it; otherwise search
3058Sbill  * for the oldest non-busy buffer and reassign it.
3065424Swnj  *
3075424Swnj  * We use splx here because this routine may be called
3085424Swnj  * on the interrupt stack during a dump, and we don't
3095424Swnj  * want to lower the ipl back to 0.
3108Sbill  */
3118Sbill struct buf *
31237736Smckusick getblk(vp, blkno, size)
31337736Smckusick 	register struct vnode *vp;
3146563Smckusic 	daddr_t blkno;
3156563Smckusic 	int size;
3168Sbill {
3178670S 	register struct buf *bp, *dp;
3185424Swnj 	int s;
3198Sbill 
32025255Smckusick 	if (size > MAXBSIZE)
32125255Smckusick 		panic("getblk: size too big");
3227015Smckusick 	/*
32324730Smckusick 	 * To prevent overflow of 32-bit ints when converting block
32424730Smckusick 	 * numbers to byte offsets, blknos > 2^32 / DEV_BSIZE are set
32524730Smckusick 	 * to the maximum number that can be converted to a byte offset
32624730Smckusick 	 * without overflow. This is historic code; what bug it fixed,
32724730Smckusick 	 * or whether it is still a reasonable thing to do is open to
32824730Smckusick 	 * dispute. mkm 9/85
32939668Smckusick 	 *
33039668Smckusick 	 * Make it a panic to see if it ever really happens. mkm 11/89
33124730Smckusick 	 */
33239668Smckusick 	if ((unsigned)blkno >= 1 << (sizeof(int)*NBBY-DEV_BSHIFT)) {
33339668Smckusick 		panic("getblk: blkno too big");
33424730Smckusick 		blkno = 1 << ((sizeof(int)*NBBY-DEV_BSHIFT) + 1);
33539668Smckusick 	}
33624730Smckusick 	/*
3377015Smckusick 	 * Search the cache for the block.  If we hit, but
3387015Smckusick 	 * the buffer is in use for i/o, then we wait until
3397015Smckusick 	 * the i/o has completed.
3407015Smckusick 	 */
34137736Smckusick 	dp = BUFHASH(vp, blkno);
3427015Smckusick loop:
3432325Swnj 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
34439668Smckusick 		if (bp->b_lblkno != blkno || bp->b_vp != vp ||
3452325Swnj 		    bp->b_flags&B_INVAL)
3468Sbill 			continue;
34726271Skarels 		s = splbio();
3488Sbill 		if (bp->b_flags&B_BUSY) {
3498Sbill 			bp->b_flags |= B_WANTED;
3508Sbill 			sleep((caddr_t)bp, PRIBIO+1);
3515424Swnj 			splx(s);
3528Sbill 			goto loop;
3538Sbill 		}
3545424Swnj 		splx(s);
3558Sbill 		notavail(bp);
35632608Smckusick 		if (bp->b_bcount != size) {
35739668Smckusick 			printf("getblk: stray size");
35839668Smckusick 			bp->b_flags |= B_INVAL;
35939668Smckusick 			bwrite(bp);
36039668Smckusick 			goto loop;
36132608Smckusick 		}
3628Sbill 		bp->b_flags |= B_CACHE;
36326271Skarels 		return (bp);
3648Sbill 	}
3658670S 	bp = getnewbuf();
3666563Smckusic 	bfree(bp);
3677015Smckusick 	bremhash(bp);
36839668Smckusick 	bgetvp(vp, bp);
36939668Smckusick 	bp->b_lblkno = blkno;
3706563Smckusic 	bp->b_blkno = blkno;
3718670S 	bp->b_error = 0;
37237736Smckusick 	bp->b_resid = 0;
37337736Smckusick 	binshash(bp, dp);
37439668Smckusick 	brealloc(bp, size);
37526271Skarels 	return (bp);
3768Sbill }
3778Sbill 
3788Sbill /*
3798Sbill  * get an empty block,
3808Sbill  * not assigned to any particular device
3818Sbill  */
3828Sbill struct buf *
3836563Smckusic geteblk(size)
3846563Smckusic 	int size;
3858Sbill {
3868670S 	register struct buf *bp, *flist;
3878Sbill 
38825255Smckusick 	if (size > MAXBSIZE)
38925255Smckusick 		panic("geteblk: size too big");
3908670S 	bp = getnewbuf();
3918670S 	bp->b_flags |= B_INVAL;
3927015Smckusick 	bfree(bp);
3937015Smckusick 	bremhash(bp);
3948670S 	flist = &bfreelist[BQ_AGE];
39537736Smckusick 	bp->b_error = 0;
39637736Smckusick 	bp->b_resid = 0;
3978670S 	binshash(bp, flist);
39839668Smckusick 	brealloc(bp, size);
39926271Skarels 	return (bp);
4008Sbill }
4018Sbill 
4028Sbill /*
4036563Smckusic  * Allocate space associated with a buffer.
4046563Smckusic  */
4056563Smckusic brealloc(bp, size)
4066563Smckusic 	register struct buf *bp;
4076563Smckusic 	int size;
4086563Smckusic {
4096563Smckusic 	daddr_t start, last;
4106563Smckusic 	register struct buf *ep;
4116563Smckusic 	struct buf *dp;
4126563Smckusic 	int s;
4136563Smckusic 
4146563Smckusic 	if (size == bp->b_bcount)
41539668Smckusick 		return;
41639668Smckusick 	allocbuf(bp, size);
4178670S }
4188670S 
4198670S /*
4208670S  * Find a buffer which is available for use.
4218670S  * Select something from a free list.
4228670S  * Preference is to AGE list, then LRU list.
4238670S  */
4248670S struct buf *
4258670S getnewbuf()
4268670S {
4278670S 	register struct buf *bp, *dp;
42838776Smckusick 	register struct ucred *cred;
4298670S 	int s;
4308670S 
4318670S loop:
43226271Skarels 	s = splbio();
4338670S 	for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--)
4348670S 		if (dp->av_forw != dp)
4358670S 			break;
4368670S 	if (dp == bfreelist) {		/* no free blocks */
4378670S 		dp->b_flags |= B_WANTED;
4388670S 		sleep((caddr_t)dp, PRIBIO+1);
43912170Ssam 		splx(s);
4408670S 		goto loop;
4418670S 	}
4428670S 	splx(s);
4438670S 	bp = dp->av_forw;
4448670S 	notavail(bp);
4458670S 	if (bp->b_flags & B_DELWRI) {
44638614Smckusick 		(void) bawrite(bp);
4478670S 		goto loop;
4488670S 	}
44937736Smckusick 	trace(TR_BRELSE,
45039668Smckusick 	    pack(bp->b_vp->v_mount->m_fsid[0], bp->b_bufsize), bp->b_lblkno);
45139668Smckusick 	if (bp->b_vp)
45239668Smckusick 		brelvp(bp);
45338776Smckusick 	if (bp->b_rcred != NOCRED) {
45438776Smckusick 		cred = bp->b_rcred;
45538776Smckusick 		bp->b_rcred = NOCRED;
45638776Smckusick 		crfree(cred);
45738776Smckusick 	}
45838776Smckusick 	if (bp->b_wcred != NOCRED) {
45938776Smckusick 		cred = bp->b_wcred;
46038776Smckusick 		bp->b_wcred = NOCRED;
46138776Smckusick 		crfree(cred);
46238776Smckusick 	}
4638670S 	bp->b_flags = B_BUSY;
4648670S 	return (bp);
4658670S }
4668670S 
4678670S /*
4688Sbill  * Wait for I/O completion on the buffer; return errors
4698Sbill  * to the user.
4708Sbill  */
4717015Smckusick biowait(bp)
4726563Smckusic 	register struct buf *bp;
4738Sbill {
4745431Sroot 	int s;
4758Sbill 
47626271Skarels 	s = splbio();
47738776Smckusick 	while ((bp->b_flags & B_DONE) == 0)
4788Sbill 		sleep((caddr_t)bp, PRIBIO);
4795431Sroot 	splx(s);
48037736Smckusick 	/*
48137736Smckusick 	 * Pick up the device's error number and pass it to the user;
48237736Smckusick 	 * if there is an error but the number is 0 set a generalized code.
48337736Smckusick 	 */
48437736Smckusick 	if ((bp->b_flags & B_ERROR) == 0)
48537736Smckusick 		return (0);
48637736Smckusick 	if (bp->b_error)
48737736Smckusick 		return (bp->b_error);
48837736Smckusick 	return (EIO);
4898Sbill }
4908Sbill 
4918Sbill /*
49213128Ssam  * Mark I/O complete on a buffer.
49313128Ssam  * If someone should be called, e.g. the pageout
49413128Ssam  * daemon, do so.  Otherwise, wake up anyone
49513128Ssam  * waiting for it.
4968Sbill  */
4977015Smckusick biodone(bp)
4987015Smckusick 	register struct buf *bp;
4998Sbill {
5008Sbill 
501420Sbill 	if (bp->b_flags & B_DONE)
5027015Smckusick 		panic("dup biodone");
5038Sbill 	bp->b_flags |= B_DONE;
50438776Smckusick 	if ((bp->b_flags & B_READ) == 0)
50538776Smckusick 		bp->b_dirtyoff = bp->b_dirtyend = 0;
5069763Ssam 	if (bp->b_flags & B_CALL) {
5079763Ssam 		bp->b_flags &= ~B_CALL;
5089763Ssam 		(*bp->b_iodone)(bp);
5099763Ssam 		return;
5109763Ssam 	}
5118Sbill 	if (bp->b_flags&B_ASYNC)
5128Sbill 		brelse(bp);
5138Sbill 	else {
5148Sbill 		bp->b_flags &= ~B_WANTED;
5158Sbill 		wakeup((caddr_t)bp);
5168Sbill 	}
5178Sbill }
5188Sbill 
5198Sbill /*
52037736Smckusick  * Make sure all write-behind blocks associated
52138776Smckusick  * with mount point are flushed out (from sync).
5228Sbill  */
52339668Smckusick mntflushbuf(mountp, flags)
52438776Smckusick 	struct mount *mountp;
52539668Smckusick 	int flags;
5268Sbill {
52739668Smckusick 	register struct vnode *vp;
52839764Smckusick 	struct vnode *nvp;
52939668Smckusick 
53039668Smckusick loop:
53139764Smckusick 	for (vp = mountp->m_mounth; vp; vp = nvp) {
53239764Smckusick 		nvp = vp->v_mountf;
53339668Smckusick 		if (vget(vp))
53439668Smckusick 			goto loop;
53539668Smckusick 		vflushbuf(vp, flags);
53639668Smckusick 		vput(vp);
53739668Smckusick 	}
53839668Smckusick }
53939668Smckusick 
54039668Smckusick /*
54139668Smckusick  * Flush all dirty buffers associated with a vnode.
54239668Smckusick  */
54339668Smckusick vflushbuf(vp, flags)
54439668Smckusick 	register struct vnode *vp;
54539668Smckusick 	int flags;
54639668Smckusick {
5478Sbill 	register struct buf *bp;
54839668Smckusick 	struct buf *nbp;
5495431Sroot 	int s;
5508Sbill 
5518Sbill loop:
55226271Skarels 	s = splbio();
55339668Smckusick 	for (bp = vp->v_blockh; bp; bp = nbp) {
55439668Smckusick 		nbp = bp->b_blockf;
55539668Smckusick 		if ((bp->b_flags & B_BUSY))
55639668Smckusick 			continue;
55739668Smckusick 		if ((bp->b_flags & B_DELWRI) == 0)
55839668Smckusick 			continue;
55939668Smckusick 		splx(s);
56039668Smckusick 		notavail(bp);
56139668Smckusick 		(void) bawrite(bp);
56239668Smckusick 		goto loop;
56339668Smckusick 	}
56439738Smckusick 	splx(s);
56539668Smckusick 	if ((flags & B_SYNC) == 0)
56639668Smckusick 		return;
56739668Smckusick wloop:
56839668Smckusick 	s = splbio();
56939668Smckusick 	for (bp = vp->v_blockh; bp; bp = nbp) {
57039668Smckusick 		nbp = bp->b_blockf;
57139668Smckusick 		if (bp->b_flags & B_BUSY) {
57239668Smckusick 			bp->b_flags |= B_WANTED;
57339668Smckusick 			sleep((caddr_t)bp, PRIBIO+1);
57439668Smckusick 			splx(s);
57539668Smckusick 			goto wloop;
57637736Smckusick 		}
57739738Smckusick 		if ((bp->b_flags & B_DELWRI)) {
57839738Smckusick 			splx(s);
57939668Smckusick 			goto loop;
58039738Smckusick 		}
58137736Smckusick 	}
58239738Smckusick 	splx(s);
5838Sbill }
5842299Skre 
5852299Skre /*
5862299Skre  * Invalidate in core blocks belonging to closed or umounted filesystem
5872299Skre  *
58839668Smckusick  * Go through the list of vnodes associated with the file system;
58939668Smckusick  * for each vnode invalidate any buffers that it holds. Normally
59039668Smckusick  * this routine is preceeded by a bflush call, so that on a quiescent
59139668Smckusick  * filesystem there will be no dirty buffers when we are done. Binval
59239668Smckusick  * returns the count of dirty buffers when it is finished.
5932299Skre  */
59439668Smckusick mntinvalbuf(mountp)
59538776Smckusick 	struct mount *mountp;
5962299Skre {
59739668Smckusick 	register struct vnode *vp;
59839764Smckusick 	struct vnode *nvp;
59939668Smckusick 	int dirty = 0;
60039668Smckusick 
60139668Smckusick loop:
60239764Smckusick 	for (vp = mountp->m_mounth; vp; vp = nvp) {
60339764Smckusick 		nvp = vp->v_mountf;
60439668Smckusick 		if (vget(vp))
60539668Smckusick 			goto loop;
60639668Smckusick 		dirty += vinvalbuf(vp, 1);
60739668Smckusick 		vput(vp);
60839668Smckusick 	}
60939668Smckusick 	return (dirty);
61039668Smckusick }
61139668Smckusick 
61239668Smckusick /*
61339668Smckusick  * Flush out and invalidate all buffers associated with a vnode.
61439668Smckusick  * Called with the underlying object locked.
61539668Smckusick  */
61639668Smckusick vinvalbuf(vp, save)
61739668Smckusick 	register struct vnode *vp;
61839668Smckusick 	int save;
61939668Smckusick {
6202361Skre 	register struct buf *bp;
62139668Smckusick 	struct buf *nbp;
62238633Smckusick 	int s, dirty = 0;
6232299Skre 
62438776Smckusick loop:
62539668Smckusick 	for (bp = vp->v_blockh; bp; bp = nbp) {
62639668Smckusick 		nbp = bp->b_blockf;
62739668Smckusick 		s = splbio();
62839668Smckusick 		if (bp->b_flags & B_BUSY) {
62939668Smckusick 			bp->b_flags |= B_WANTED;
63039668Smckusick 			sleep((caddr_t)bp, PRIBIO+1);
63138808Smckusick 			splx(s);
63239668Smckusick 			goto loop;
63339668Smckusick 		}
63439668Smckusick 		splx(s);
63539668Smckusick 		notavail(bp);
63639668Smckusick 		if (save) {
63738614Smckusick 			if (bp->b_flags & B_DELWRI) {
63838614Smckusick 				dirty++;
63939668Smckusick 				(void) bwrite(bp);
64039668Smckusick 				goto loop;
64137736Smckusick 			}
64238614Smckusick 		}
64339668Smckusick 		bp->b_flags |= B_INVAL;
64439668Smckusick 		brelse(bp);
64538614Smckusick 	}
64639668Smckusick 	if (vp->v_blockh != 0)
64739668Smckusick 		panic("vinvalbuf: flush failed");
64838614Smckusick 	return (dirty);
6492299Skre }
65037736Smckusick 
65139668Smckusick /*
65239668Smckusick  * Associate a buffer with a vnode.
65339668Smckusick  */
65439668Smckusick bgetvp(vp, bp)
65539668Smckusick 	register struct vnode *vp;
65639668Smckusick 	register struct buf *bp;
65739668Smckusick {
65839668Smckusick 
65939668Smckusick 	if (bp->b_vp)
66039668Smckusick 		panic("bgetvp: not free");
661*39808Smckusick 	VHOLD(vp);
66239668Smckusick 	bp->b_vp = vp;
66339668Smckusick 	if (vp->v_type == VBLK || vp->v_type == VCHR)
66439668Smckusick 		bp->b_dev = vp->v_rdev;
66539668Smckusick 	else
66639668Smckusick 		bp->b_dev = NODEV;
66739668Smckusick 	/*
66839668Smckusick 	 * Insert onto list for new vnode.
66939668Smckusick 	 */
67039668Smckusick 	if (vp->v_blockh) {
67139668Smckusick 		bp->b_blockf = vp->v_blockh;
67239668Smckusick 		bp->b_blockb = &vp->v_blockh;
67339668Smckusick 		vp->v_blockh->b_blockb = &bp->b_blockf;
67439668Smckusick 		vp->v_blockh = bp;
67539668Smckusick 	} else {
67639668Smckusick 		vp->v_blockh = bp;
67739668Smckusick 		bp->b_blockb = &vp->v_blockh;
67839668Smckusick 		bp->b_blockf = NULL;
67939668Smckusick 	}
68039668Smckusick }
68139668Smckusick 
68239668Smckusick /*
68339668Smckusick  * Disassociate a buffer from a vnode.
68439668Smckusick  */
68537736Smckusick brelvp(bp)
68639668Smckusick 	register struct buf *bp;
68737736Smckusick {
68839668Smckusick 	struct buf *bq;
68937736Smckusick 	struct vnode *vp;
69037736Smckusick 
69137736Smckusick 	if (bp->b_vp == (struct vnode *) 0)
69239668Smckusick 		panic("brelvp: NULL");
69339668Smckusick 	/*
69439668Smckusick 	 * Delete from old vnode list, if on one.
69539668Smckusick 	 */
69639668Smckusick 	if (bp->b_blockb) {
69739668Smckusick 		if (bq = bp->b_blockf)
69839668Smckusick 			bq->b_blockb = bp->b_blockb;
69939668Smckusick 		*bp->b_blockb = bq;
70039668Smckusick 		bp->b_blockf = NULL;
70139668Smckusick 		bp->b_blockb = NULL;
70239668Smckusick 	}
70337736Smckusick 	vp = bp->b_vp;
70437736Smckusick 	bp->b_vp = (struct vnode *) 0;
705*39808Smckusick 	HOLDRELE(vp);
70637736Smckusick }
70739668Smckusick 
70839668Smckusick /*
70939668Smckusick  * Reassign a buffer from one vnode to another.
71039668Smckusick  * Used to assign file specific control information
71139668Smckusick  * (indirect blocks) to the vnode to which they belong.
71239668Smckusick  */
71339668Smckusick reassignbuf(bp, newvp)
71439668Smckusick 	register struct buf *bp;
71539668Smckusick 	register struct vnode *newvp;
71639668Smckusick {
71739668Smckusick 	register struct buf *bq;
71839668Smckusick 
71939668Smckusick 	/*
72039668Smckusick 	 * Delete from old vnode list, if on one.
72139668Smckusick 	 */
72239668Smckusick 	if (bp->b_blockb) {
72339668Smckusick 		if (bq = bp->b_blockf)
72439668Smckusick 			bq->b_blockb = bp->b_blockb;
72539668Smckusick 		*bp->b_blockb = bq;
72639668Smckusick 	}
72739668Smckusick 	/*
72839668Smckusick 	 * Insert onto list for new vnode.
72939668Smckusick 	 */
73039668Smckusick 	if (newvp->v_blockh) {
73139668Smckusick 		bp->b_blockf = newvp->v_blockh;
73239668Smckusick 		bp->b_blockb = &newvp->v_blockh;
73339668Smckusick 		newvp->v_blockh->b_blockb = &bp->b_blockf;
73439668Smckusick 		newvp->v_blockh = bp;
73539668Smckusick 	} else {
73639668Smckusick 		newvp->v_blockh = bp;
73739668Smckusick 		bp->b_blockb = &newvp->v_blockh;
73839668Smckusick 		bp->b_blockf = NULL;
73939668Smckusick 	}
74039668Smckusick }
741