xref: /csrg-svn/sys/kern/vfs_cluster.c (revision 16855)
1*16855Smckusick /*	vfs_cluster.c	6.3	84/08/07	*/
28Sbill 
39763Ssam #include "../machine/pte.h"
49763Ssam 
58Sbill #include "../h/param.h"
68Sbill #include "../h/systm.h"
78Sbill #include "../h/dir.h"
88Sbill #include "../h/user.h"
98Sbill #include "../h/buf.h"
108Sbill #include "../h/conf.h"
118Sbill #include "../h/proc.h"
128Sbill #include "../h/seg.h"
138Sbill #include "../h/vm.h"
142045Swnj #include "../h/trace.h"
158Sbill 
1691Sbill /*
178Sbill  * Read in (if necessary) the block and return a buffer pointer.
188Sbill  */
198Sbill struct buf *
206563Smckusic bread(dev, blkno, size)
216563Smckusic 	dev_t dev;
226563Smckusic 	daddr_t blkno;
236563Smckusic 	int size;
248Sbill {
258Sbill 	register struct buf *bp;
268Sbill 
278670S 	if (size == 0)
288670S 		panic("bread: size 0");
296563Smckusic 	bp = getblk(dev, blkno, size);
308Sbill 	if (bp->b_flags&B_DONE) {
3115795Ssam 		trace(TR_BREADHIT, pack(dev, size), blkno);
328Sbill 		return(bp);
338Sbill 	}
348Sbill 	bp->b_flags |= B_READ;
358670S 	if (bp->b_bcount > bp->b_bufsize)
368670S 		panic("bread");
378Sbill 	(*bdevsw[major(dev)].d_strategy)(bp);
3815795Ssam 	trace(TR_BREADMISS, pack(dev, size), blkno);
398039Sroot 	u.u_ru.ru_inblock++;		/* pay for read */
407015Smckusick 	biowait(bp);
418Sbill 	return(bp);
428Sbill }
438Sbill 
448Sbill /*
458Sbill  * Read in the block, like bread, but also start I/O on the
468Sbill  * read-ahead block (which is not allocated to the caller)
478Sbill  */
488Sbill struct buf *
498592Sroot breada(dev, blkno, size, rablkno, rabsize)
506563Smckusic 	dev_t dev;
517114Smckusick 	daddr_t blkno; int size;
528592Sroot 	daddr_t rablkno; int rabsize;
538Sbill {
548Sbill 	register struct buf *bp, *rabp;
558Sbill 
568Sbill 	bp = NULL;
577015Smckusick 	/*
587015Smckusick 	 * If the block isn't in core, then allocate
597015Smckusick 	 * a buffer and initiate i/o (getblk checks
607015Smckusick 	 * for a cache hit).
617015Smckusick 	 */
628Sbill 	if (!incore(dev, blkno)) {
636563Smckusic 		bp = getblk(dev, blkno, size);
648Sbill 		if ((bp->b_flags&B_DONE) == 0) {
658Sbill 			bp->b_flags |= B_READ;
668670S 			if (bp->b_bcount > bp->b_bufsize)
678670S 				panic("breada");
688Sbill 			(*bdevsw[major(dev)].d_strategy)(bp);
6915795Ssam 			trace(TR_BREADMISS, pack(dev, size), blkno);
708039Sroot 			u.u_ru.ru_inblock++;		/* pay for read */
717015Smckusick 		} else
7215795Ssam 			trace(TR_BREADHIT, pack(dev, size), blkno);
738Sbill 	}
747015Smckusick 
757015Smckusick 	/*
767015Smckusick 	 * If there's a read-ahead block, start i/o
777015Smckusick 	 * on it also (as above).
787015Smckusick 	 */
798Sbill 	if (rablkno && !incore(dev, rablkno)) {
808592Sroot 		rabp = getblk(dev, rablkno, rabsize);
812045Swnj 		if (rabp->b_flags & B_DONE) {
828Sbill 			brelse(rabp);
8315795Ssam 			trace(TR_BREADHITRA, pack(dev, rabsize), blkno);
842045Swnj 		} else {
858Sbill 			rabp->b_flags |= B_READ|B_ASYNC;
868670S 			if (rabp->b_bcount > rabp->b_bufsize)
878670S 				panic("breadrabp");
888Sbill 			(*bdevsw[major(dev)].d_strategy)(rabp);
8915795Ssam 			trace(TR_BREADMISSRA, pack(dev, rabsize), rablock);
908039Sroot 			u.u_ru.ru_inblock++;		/* pay in advance */
918Sbill 		}
928Sbill 	}
937015Smckusick 
947015Smckusick 	/*
957114Smckusick 	 * If block was in core, let bread get it.
967114Smckusick 	 * If block wasn't in core, then the read was started
977114Smckusick 	 * above, and just wait for it.
987015Smckusick 	 */
997114Smckusick 	if (bp == NULL)
1007114Smckusick 		return (bread(dev, blkno, size));
1017015Smckusick 	biowait(bp);
1027114Smckusick 	return (bp);
1038Sbill }
1048Sbill 
1058Sbill /*
1068Sbill  * Write the buffer, waiting for completion.
1078Sbill  * Then release the buffer.
1088Sbill  */
1098Sbill bwrite(bp)
1107015Smckusick 	register struct buf *bp;
1118Sbill {
1128Sbill 	register flag;
1138Sbill 
1148Sbill 	flag = bp->b_flags;
1159857Ssam 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
1168Sbill 	if ((flag&B_DELWRI) == 0)
1178039Sroot 		u.u_ru.ru_oublock++;		/* noone paid yet */
11815795Ssam 	trace(TR_BWRITE, pack(bp->b_dev, bp->b_bcount), bp->b_blkno);
1198670S 	if (bp->b_bcount > bp->b_bufsize)
1208670S 		panic("bwrite");
1218Sbill 	(*bdevsw[major(bp->b_dev)].d_strategy)(bp);
1227015Smckusick 
1237015Smckusick 	/*
1247015Smckusick 	 * If the write was synchronous, then await i/o completion.
1257015Smckusick 	 * If the write was "delayed", then we put the buffer on
1267015Smckusick 	 * the q of blocks awaiting i/o completion status.
1277015Smckusick 	 */
1288Sbill 	if ((flag&B_ASYNC) == 0) {
1297015Smckusick 		biowait(bp);
1308Sbill 		brelse(bp);
1318Sbill 	} else if (flag & B_DELWRI)
1328Sbill 		bp->b_flags |= B_AGE;
1338Sbill }
1348Sbill 
1358Sbill /*
1368Sbill  * Release the buffer, marking it so that if it is grabbed
1378Sbill  * for another purpose it will be written out before being
1388Sbill  * given up (e.g. when writing a partial block where it is
1398Sbill  * assumed that another write for the same block will soon follow).
1408Sbill  * This can't be done for magtape, since writes must be done
1418Sbill  * in the same order as requested.
1428Sbill  */
1438Sbill bdwrite(bp)
1447015Smckusick 	register struct buf *bp;
1458Sbill {
1462403Skre 	register int flags;
1478Sbill 
1488Sbill 	if ((bp->b_flags&B_DELWRI) == 0)
1498039Sroot 		u.u_ru.ru_oublock++;		/* noone paid yet */
1502403Skre 	flags = bdevsw[major(bp->b_dev)].d_flags;
1512403Skre 	if(flags & B_TAPE)
1528Sbill 		bawrite(bp);
1538Sbill 	else {
1548Sbill 		bp->b_flags |= B_DELWRI | B_DONE;
1558Sbill 		brelse(bp);
1568Sbill 	}
1578Sbill }
1588Sbill 
1598Sbill /*
1608Sbill  * Release the buffer, start I/O on it, but don't wait for completion.
1618Sbill  */
1628Sbill bawrite(bp)
1637015Smckusick 	register struct buf *bp;
1648Sbill {
1658Sbill 
1668Sbill 	bp->b_flags |= B_ASYNC;
1678Sbill 	bwrite(bp);
1688Sbill }
1698Sbill 
1708Sbill /*
1717015Smckusick  * Release the buffer, with no I/O implied.
1728Sbill  */
1738Sbill brelse(bp)
1747015Smckusick 	register struct buf *bp;
1758Sbill {
1762325Swnj 	register struct buf *flist;
1778Sbill 	register s;
1788Sbill 
17915795Ssam 	trace(TR_BRELSE, pack(bp->b_dev, bp->b_bufsize), bp->b_blkno);
1807015Smckusick 	/*
1817015Smckusick 	 * If someone's waiting for the buffer, or
1827015Smckusick 	 * is waiting for a buffer wake 'em up.
1837015Smckusick 	 */
1848Sbill 	if (bp->b_flags&B_WANTED)
1858Sbill 		wakeup((caddr_t)bp);
1862325Swnj 	if (bfreelist[0].b_flags&B_WANTED) {
1872325Swnj 		bfreelist[0].b_flags &= ~B_WANTED;
1882325Swnj 		wakeup((caddr_t)bfreelist);
1898Sbill 	}
1902683Swnj 	if (bp->b_flags&B_ERROR)
1912683Swnj 		if (bp->b_flags & B_LOCKED)
1922683Swnj 			bp->b_flags &= ~B_ERROR;	/* try again later */
1932683Swnj 		else
1942683Swnj 			bp->b_dev = NODEV;  		/* no assoc */
1957015Smckusick 
1967015Smckusick 	/*
1977015Smckusick 	 * Stick the buffer back on a free list.
1987015Smckusick 	 */
1998Sbill 	s = spl6();
2008670S 	if (bp->b_bufsize <= 0) {
2018670S 		/* block has no buffer ... put at front of unused buffer list */
2028670S 		flist = &bfreelist[BQ_EMPTY];
2038670S 		binsheadfree(bp, flist);
2048670S 	} else if (bp->b_flags & (B_ERROR|B_INVAL)) {
2052325Swnj 		/* block has no info ... put at front of most free list */
2068670S 		flist = &bfreelist[BQ_AGE];
2077015Smckusick 		binsheadfree(bp, flist);
2088Sbill 	} else {
2092325Swnj 		if (bp->b_flags & B_LOCKED)
2102325Swnj 			flist = &bfreelist[BQ_LOCKED];
2112325Swnj 		else if (bp->b_flags & B_AGE)
2122325Swnj 			flist = &bfreelist[BQ_AGE];
2132325Swnj 		else
2142325Swnj 			flist = &bfreelist[BQ_LRU];
2157015Smckusick 		binstailfree(bp, flist);
2168Sbill 	}
2178Sbill 	bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_AGE);
2188Sbill 	splx(s);
2198Sbill }
2208Sbill 
2218Sbill /*
2228Sbill  * See if the block is associated with some buffer
2238Sbill  * (mainly to avoid getting hung up on a wait in breada)
2248Sbill  */
2258Sbill incore(dev, blkno)
2267015Smckusick 	dev_t dev;
2277015Smckusick 	daddr_t blkno;
2288Sbill {
2298Sbill 	register struct buf *bp;
2302325Swnj 	register struct buf *dp;
2318Sbill 
2326563Smckusic 	dp = BUFHASH(dev, blkno);
2332325Swnj 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
2346563Smckusic 		if (bp->b_blkno == blkno && bp->b_dev == dev &&
2357015Smckusick 		    (bp->b_flags & B_INVAL) == 0)
23691Sbill 			return (1);
23791Sbill 	return (0);
2388Sbill }
2398Sbill 
2408Sbill struct buf *
2416563Smckusic baddr(dev, blkno, size)
2426563Smckusic 	dev_t dev;
2436563Smckusic 	daddr_t blkno;
2446563Smckusic 	int size;
2458Sbill {
2468Sbill 
2478Sbill 	if (incore(dev, blkno))
2486563Smckusic 		return (bread(dev, blkno, size));
2498Sbill 	return (0);
2508Sbill }
2518Sbill 
2528Sbill /*
2538Sbill  * Assign a buffer for the given block.  If the appropriate
2548Sbill  * block is already associated, return it; otherwise search
2558Sbill  * for the oldest non-busy buffer and reassign it.
2565424Swnj  *
2575424Swnj  * We use splx here because this routine may be called
2585424Swnj  * on the interrupt stack during a dump, and we don't
2595424Swnj  * want to lower the ipl back to 0.
2608Sbill  */
2618Sbill struct buf *
2626563Smckusic getblk(dev, blkno, size)
2636563Smckusic 	dev_t dev;
2646563Smckusic 	daddr_t blkno;
2656563Smckusic 	int size;
2668Sbill {
2678670S 	register struct buf *bp, *dp;
2685424Swnj 	int s;
2698Sbill 
2709763Ssam 	if ((unsigned)blkno >= 1 << (sizeof(int)*NBBY-PGSHIFT))	/* XXX */
2711831Sbill 		blkno = 1 << ((sizeof(int)*NBBY-PGSHIFT) + 1);
2727015Smckusick 	/*
2737015Smckusick 	 * Search the cache for the block.  If we hit, but
2747015Smckusick 	 * the buffer is in use for i/o, then we wait until
2757015Smckusick 	 * the i/o has completed.
2767015Smckusick 	 */
2776563Smckusic 	dp = BUFHASH(dev, blkno);
2787015Smckusick loop:
2792325Swnj 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
2806563Smckusic 		if (bp->b_blkno != blkno || bp->b_dev != dev ||
2812325Swnj 		    bp->b_flags&B_INVAL)
2828Sbill 			continue;
2835424Swnj 		s = spl6();
2848Sbill 		if (bp->b_flags&B_BUSY) {
2858Sbill 			bp->b_flags |= B_WANTED;
2868Sbill 			sleep((caddr_t)bp, PRIBIO+1);
2875424Swnj 			splx(s);
2888Sbill 			goto loop;
2898Sbill 		}
2905424Swnj 		splx(s);
2918Sbill 		notavail(bp);
292*16855Smckusick 		if (bp->b_bcount != size && brealloc(bp, size) == 0)
2937188Sroot 			goto loop;
2948Sbill 		bp->b_flags |= B_CACHE;
2958Sbill 		return(bp);
2968Sbill 	}
29791Sbill 	if (major(dev) >= nblkdev)
29891Sbill 		panic("blkdev");
2998670S 	bp = getnewbuf();
3006563Smckusic 	bfree(bp);
3017015Smckusick 	bremhash(bp);
3027015Smckusick 	binshash(bp, dp);
3038Sbill 	bp->b_dev = dev;
3046563Smckusic 	bp->b_blkno = blkno;
3058670S 	bp->b_error = 0;
3067188Sroot 	if (brealloc(bp, size) == 0)
3077188Sroot 		goto loop;
3088Sbill 	return(bp);
3098Sbill }
3108Sbill 
3118Sbill /*
3128Sbill  * get an empty block,
3138Sbill  * not assigned to any particular device
3148Sbill  */
3158Sbill struct buf *
3166563Smckusic geteblk(size)
3176563Smckusic 	int size;
3188Sbill {
3198670S 	register struct buf *bp, *flist;
3208Sbill 
3218Sbill loop:
3228670S 	bp = getnewbuf();
3238670S 	bp->b_flags |= B_INVAL;
3247015Smckusick 	bfree(bp);
3257015Smckusick 	bremhash(bp);
3268670S 	flist = &bfreelist[BQ_AGE];
3278670S 	binshash(bp, flist);
3288Sbill 	bp->b_dev = (dev_t)NODEV;
3298670S 	bp->b_error = 0;
3307188Sroot 	if (brealloc(bp, size) == 0)
3317188Sroot 		goto loop;
3328Sbill 	return(bp);
3338Sbill }
3348Sbill 
3358Sbill /*
3366563Smckusic  * Allocate space associated with a buffer.
3379763Ssam  * If can't get space, buffer is released
3386563Smckusic  */
3396563Smckusic brealloc(bp, size)
3406563Smckusic 	register struct buf *bp;
3416563Smckusic 	int size;
3426563Smckusic {
3436563Smckusic 	daddr_t start, last;
3446563Smckusic 	register struct buf *ep;
3456563Smckusic 	struct buf *dp;
3466563Smckusic 	int s;
3476563Smckusic 
3486563Smckusic 	/*
3496563Smckusic 	 * First need to make sure that all overlaping previous I/O
3506563Smckusic 	 * is dispatched with.
3516563Smckusic 	 */
3526563Smckusic 	if (size == bp->b_bcount)
3537188Sroot 		return (1);
3547188Sroot 	if (size < bp->b_bcount) {
3557188Sroot 		if (bp->b_flags & B_DELWRI) {
3567188Sroot 			bwrite(bp);
3577188Sroot 			return (0);
3587188Sroot 		}
3597188Sroot 		if (bp->b_flags & B_LOCKED)
3607188Sroot 			panic("brealloc");
3619763Ssam 		return (allocbuf(bp, size));
3627188Sroot 	}
3637188Sroot 	bp->b_flags &= ~B_DONE;
3649763Ssam 	if (bp->b_dev == NODEV)
3659763Ssam 		return (allocbuf(bp, size));
3667016Smckusick 
36715795Ssam 	trace(TR_BREALLOC, pack(bp->b_dev, size), bp->b_blkno);
3687188Sroot 	/*
3697188Sroot 	 * Search cache for any buffers that overlap the one that we
3707188Sroot 	 * are trying to allocate. Overlapping buffers must be marked
3717188Sroot 	 * invalid, after being written out if they are dirty. (indicated
3727188Sroot 	 * by B_DELWRI) A disk block must be mapped by at most one buffer
3737188Sroot 	 * at any point in time. Care must be taken to avoid deadlocking
3747188Sroot 	 * when two buffer are trying to get the same set of disk blocks.
3757188Sroot 	 */
3767188Sroot 	start = bp->b_blkno;
37712644Ssam 	last = start + btodb(size) - 1;
3786563Smckusic 	dp = BUFHASH(bp->b_dev, bp->b_blkno);
3796563Smckusic loop:
3806563Smckusic 	for (ep = dp->b_forw; ep != dp; ep = ep->b_forw) {
3817188Sroot 		if (ep == bp || ep->b_dev != bp->b_dev || (ep->b_flags&B_INVAL))
3826563Smckusic 			continue;
3837188Sroot 		/* look for overlap */
3847188Sroot 		if (ep->b_bcount == 0 || ep->b_blkno > last ||
38512644Ssam 		    ep->b_blkno + btodb(ep->b_bcount) <= start)
3867188Sroot 			continue;
3876563Smckusic 		s = spl6();
3886563Smckusic 		if (ep->b_flags&B_BUSY) {
3896563Smckusic 			ep->b_flags |= B_WANTED;
3906563Smckusic 			sleep((caddr_t)ep, PRIBIO+1);
3918670S 			splx(s);
3926563Smckusic 			goto loop;
3936563Smckusic 		}
3948670S 		splx(s);
3957188Sroot 		notavail(ep);
3966563Smckusic 		if (ep->b_flags & B_DELWRI) {
3976563Smckusic 			bwrite(ep);
3986563Smckusic 			goto loop;
3996563Smckusic 		}
4007188Sroot 		ep->b_flags |= B_INVAL;
4017188Sroot 		brelse(ep);
4026563Smckusic 	}
4039763Ssam 	return (allocbuf(bp, size));
4048670S }
4058670S 
4068670S /*
4078670S  * Find a buffer which is available for use.
4088670S  * Select something from a free list.
4098670S  * Preference is to AGE list, then LRU list.
4108670S  */
4118670S struct buf *
4128670S getnewbuf()
4138670S {
4148670S 	register struct buf *bp, *dp;
4158670S 	int s;
4168670S 
4178670S loop:
4188670S 	s = spl6();
4198670S 	for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--)
4208670S 		if (dp->av_forw != dp)
4218670S 			break;
4228670S 	if (dp == bfreelist) {		/* no free blocks */
4238670S 		dp->b_flags |= B_WANTED;
4248670S 		sleep((caddr_t)dp, PRIBIO+1);
42512170Ssam 		splx(s);
4268670S 		goto loop;
4278670S 	}
4288670S 	splx(s);
4298670S 	bp = dp->av_forw;
4308670S 	notavail(bp);
4318670S 	if (bp->b_flags & B_DELWRI) {
4328670S 		bp->b_flags |= B_ASYNC;
4338670S 		bwrite(bp);
4348670S 		goto loop;
4358670S 	}
43615795Ssam 	trace(TR_BRELSE, pack(bp->b_dev, bp->b_bufsize), bp->b_blkno);
4378670S 	bp->b_flags = B_BUSY;
4388670S 	return (bp);
4398670S }
4408670S 
4418670S /*
4428Sbill  * Wait for I/O completion on the buffer; return errors
4438Sbill  * to the user.
4448Sbill  */
4457015Smckusick biowait(bp)
4466563Smckusic 	register struct buf *bp;
4478Sbill {
4485431Sroot 	int s;
4498Sbill 
4505431Sroot 	s = spl6();
4518Sbill 	while ((bp->b_flags&B_DONE)==0)
4528Sbill 		sleep((caddr_t)bp, PRIBIO);
4535431Sroot 	splx(s);
45411841Ssam 	if (u.u_error == 0)			/* XXX */
45511841Ssam 		u.u_error = geterror(bp);
4568Sbill }
4578Sbill 
4588Sbill /*
45913128Ssam  * Mark I/O complete on a buffer.
46013128Ssam  * If someone should be called, e.g. the pageout
46113128Ssam  * daemon, do so.  Otherwise, wake up anyone
46213128Ssam  * waiting for it.
4638Sbill  */
4647015Smckusick biodone(bp)
4657015Smckusick 	register struct buf *bp;
4668Sbill {
4678Sbill 
468420Sbill 	if (bp->b_flags & B_DONE)
4697015Smckusick 		panic("dup biodone");
4708Sbill 	bp->b_flags |= B_DONE;
4719763Ssam 	if (bp->b_flags & B_CALL) {
4729763Ssam 		bp->b_flags &= ~B_CALL;
4739763Ssam 		(*bp->b_iodone)(bp);
4749763Ssam 		return;
4759763Ssam 	}
4768Sbill 	if (bp->b_flags&B_ASYNC)
4778Sbill 		brelse(bp);
4788Sbill 	else {
4798Sbill 		bp->b_flags &= ~B_WANTED;
4808Sbill 		wakeup((caddr_t)bp);
4818Sbill 	}
4828Sbill }
4838Sbill 
4848Sbill /*
4858670S  * Insure that no part of a specified block is in an incore buffer.
4868670S  */
4878670S blkflush(dev, blkno, size)
4888670S 	dev_t dev;
4898670S 	daddr_t blkno;
4908670S 	long size;
4918670S {
4928670S 	register struct buf *ep;
4938670S 	struct buf *dp;
4948670S 	daddr_t start, last;
4958670S 	int s;
4968670S 
4978670S 	start = blkno;
49812644Ssam 	last = start + btodb(size) - 1;
4998670S 	dp = BUFHASH(dev, blkno);
5008670S loop:
5018670S 	for (ep = dp->b_forw; ep != dp; ep = ep->b_forw) {
5028670S 		if (ep->b_dev != dev || (ep->b_flags&B_INVAL))
5038670S 			continue;
5048670S 		/* look for overlap */
5058670S 		if (ep->b_bcount == 0 || ep->b_blkno > last ||
50612644Ssam 		    ep->b_blkno + btodb(ep->b_bcount) <= start)
5078670S 			continue;
5088670S 		s = spl6();
5098670S 		if (ep->b_flags&B_BUSY) {
5108670S 			ep->b_flags |= B_WANTED;
5118670S 			sleep((caddr_t)ep, PRIBIO+1);
5128670S 			splx(s);
5138670S 			goto loop;
5148670S 		}
5158670S 		if (ep->b_flags & B_DELWRI) {
5168670S 			splx(s);
5178670S 			notavail(ep);
5188670S 			bwrite(ep);
5198670S 			goto loop;
5208670S 		}
5218670S 		splx(s);
5228670S 	}
5238670S }
5248670S 
5258670S /*
52613128Ssam  * Make sure all write-behind blocks
5278Sbill  * on dev (or NODEV for all)
5288Sbill  * are flushed out.
5298Sbill  * (from umount and update)
5308Sbill  */
5318Sbill bflush(dev)
5327015Smckusick 	dev_t dev;
5338Sbill {
5348Sbill 	register struct buf *bp;
5352325Swnj 	register struct buf *flist;
5365431Sroot 	int s;
5378Sbill 
5388Sbill loop:
5395431Sroot 	s = spl6();
5408670S 	for (flist = bfreelist; flist < &bfreelist[BQ_EMPTY]; flist++)
5412325Swnj 	for (bp = flist->av_forw; bp != flist; bp = bp->av_forw) {
5427015Smckusick 		if ((bp->b_flags & B_DELWRI) == 0)
5437015Smckusick 			continue;
5447015Smckusick 		if (dev == NODEV || dev == bp->b_dev) {
5458Sbill 			bp->b_flags |= B_ASYNC;
5468Sbill 			notavail(bp);
5478Sbill 			bwrite(bp);
54812173Ssam 			splx(s);
5498Sbill 			goto loop;
5508Sbill 		}
5518Sbill 	}
5525431Sroot 	splx(s);
5538Sbill }
5548Sbill 
5558Sbill /*
5568Sbill  * Pick up the device's error number and pass it to the user;
5578Sbill  * if there is an error but the number is 0 set a generalized
5588Sbill  * code.  Actually the latter is always true because devices
5598Sbill  * don't yet return specific errors.
5608Sbill  */
5618Sbill geterror(bp)
5627015Smckusick 	register struct buf *bp;
5638Sbill {
5647723Swnj 	int error = 0;
5658Sbill 
5668Sbill 	if (bp->b_flags&B_ERROR)
5677723Swnj 		if ((error = bp->b_error)==0)
5687723Swnj 			return (EIO);
5697723Swnj 	return (error);
5708Sbill }
5712299Skre 
5722299Skre /*
5732299Skre  * Invalidate in core blocks belonging to closed or umounted filesystem
5742299Skre  *
5752299Skre  * This is not nicely done at all - the buffer ought to be removed from the
5762299Skre  * hash chains & have its dev/blkno fields clobbered, but unfortunately we
5772299Skre  * can't do that here, as it is quite possible that the block is still
5782299Skre  * being used for i/o. Eventually, all disc drivers should be forced to
5792299Skre  * have a close routine, which ought ensure that the queue is empty, then
5802299Skre  * properly flush the queues. Until that happy day, this suffices for
5812299Skre  * correctness.						... kre
5822299Skre  */
5832299Skre binval(dev)
5847015Smckusick 	dev_t dev;
5852299Skre {
5862361Skre 	register struct buf *bp;
5872361Skre 	register struct bufhd *hp;
5882361Skre #define dp ((struct buf *)hp)
5892299Skre 
5902361Skre 	for (hp = bufhash; hp < &bufhash[BUFHSZ]; hp++)
5912361Skre 		for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
5922361Skre 			if (bp->b_dev == dev)
5932361Skre 				bp->b_flags |= B_INVAL;
5942299Skre }
595