1*7723Swnj /* vfs_cluster.c 4.35 82/08/13 */ 28Sbill 38Sbill #include "../h/param.h" 48Sbill #include "../h/systm.h" 58Sbill #include "../h/dir.h" 68Sbill #include "../h/user.h" 78Sbill #include "../h/buf.h" 88Sbill #include "../h/conf.h" 98Sbill #include "../h/proc.h" 108Sbill #include "../h/seg.h" 118Sbill #include "../h/pte.h" 128Sbill #include "../h/vm.h" 132045Swnj #include "../h/trace.h" 148Sbill 157188Sroot int bioprintfs = 0; 167188Sroot 1791Sbill /* 188Sbill * Read in (if necessary) the block and return a buffer pointer. 198Sbill */ 208Sbill struct buf * 216563Smckusic bread(dev, blkno, size) 226563Smckusic dev_t dev; 236563Smckusic daddr_t blkno; 246563Smckusic int size; 258Sbill { 268Sbill register struct buf *bp; 278Sbill 286563Smckusic bp = getblk(dev, blkno, size); 298Sbill if (bp->b_flags&B_DONE) { 303199Swnj trace(TR_BREADHIT, dev, blkno); 318Sbill return(bp); 328Sbill } 338Sbill bp->b_flags |= B_READ; 348Sbill (*bdevsw[major(dev)].d_strategy)(bp); 353199Swnj trace(TR_BREADMISS, dev, blkno); 368Sbill u.u_vm.vm_inblk++; /* pay for read */ 377015Smckusick biowait(bp); 388Sbill return(bp); 398Sbill } 408Sbill 418Sbill /* 428Sbill * Read in the block, like bread, but also start I/O on the 438Sbill * read-ahead block (which is not allocated to the caller) 448Sbill */ 458Sbill struct buf * 467114Smckusick breada(dev, blkno, size, rablkno, rasize) 476563Smckusic dev_t dev; 487114Smckusick daddr_t blkno; int size; 497114Smckusick daddr_t rablkno; int rasize; 508Sbill { 518Sbill register struct buf *bp, *rabp; 528Sbill 538Sbill bp = NULL; 547015Smckusick /* 557015Smckusick * If the block isn't in core, then allocate 567015Smckusick * a buffer and initiate i/o (getblk checks 577015Smckusick * for a cache hit). 587015Smckusick */ 598Sbill if (!incore(dev, blkno)) { 606563Smckusic bp = getblk(dev, blkno, size); 618Sbill if ((bp->b_flags&B_DONE) == 0) { 628Sbill bp->b_flags |= B_READ; 638Sbill (*bdevsw[major(dev)].d_strategy)(bp); 643199Swnj trace(TR_BREADMISS, dev, blkno); 658Sbill u.u_vm.vm_inblk++; /* pay for read */ 667015Smckusick } else 673199Swnj trace(TR_BREADHIT, dev, blkno); 688Sbill } 697015Smckusick 707015Smckusick /* 717015Smckusick * If there's a read-ahead block, start i/o 727015Smckusick * on it also (as above). 737015Smckusick */ 748Sbill if (rablkno && !incore(dev, rablkno)) { 757114Smckusick rabp = getblk(dev, rablkno, rasize); 762045Swnj if (rabp->b_flags & B_DONE) { 778Sbill brelse(rabp); 783199Swnj trace(TR_BREADHITRA, dev, blkno); 792045Swnj } else { 808Sbill rabp->b_flags |= B_READ|B_ASYNC; 818Sbill (*bdevsw[major(dev)].d_strategy)(rabp); 823199Swnj trace(TR_BREADMISSRA, dev, rablock); 838Sbill u.u_vm.vm_inblk++; /* pay in advance */ 848Sbill } 858Sbill } 867015Smckusick 877015Smckusick /* 887114Smckusick * If block was in core, let bread get it. 897114Smckusick * If block wasn't in core, then the read was started 907114Smckusick * above, and just wait for it. 917015Smckusick */ 927114Smckusick if (bp == NULL) 937114Smckusick return (bread(dev, blkno, size)); 947015Smckusick biowait(bp); 957114Smckusick return (bp); 968Sbill } 978Sbill 988Sbill /* 998Sbill * Write the buffer, waiting for completion. 1008Sbill * Then release the buffer. 1018Sbill */ 1028Sbill bwrite(bp) 1037015Smckusick register struct buf *bp; 1048Sbill { 1058Sbill register flag; 1068Sbill 1078Sbill flag = bp->b_flags; 1088Sbill bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI | B_AGE); 1098Sbill if ((flag&B_DELWRI) == 0) 1108Sbill u.u_vm.vm_oublk++; /* noone paid yet */ 1114033Swnj trace(TR_BWRITE, bp->b_dev, bp->b_blkno); 1127188Sroot if (bioprintfs) 1137188Sroot printf("write %x blk %d count %d\n", bp->b_dev, bp->b_blkno, bp->b_bcount); 1148Sbill (*bdevsw[major(bp->b_dev)].d_strategy)(bp); 1157015Smckusick 1167015Smckusick /* 1177015Smckusick * If the write was synchronous, then await i/o completion. 1187015Smckusick * If the write was "delayed", then we put the buffer on 1197015Smckusick * the q of blocks awaiting i/o completion status. 1207015Smckusick * Otherwise, the i/o must be finished and we check for 1217015Smckusick * an error. 1227015Smckusick */ 1238Sbill if ((flag&B_ASYNC) == 0) { 1247015Smckusick biowait(bp); 1258Sbill brelse(bp); 1268Sbill } else if (flag & B_DELWRI) 1278Sbill bp->b_flags |= B_AGE; 1288Sbill else 129*7723Swnj u.u_error = geterror(bp); 1308Sbill } 1318Sbill 1328Sbill /* 1338Sbill * Release the buffer, marking it so that if it is grabbed 1348Sbill * for another purpose it will be written out before being 1358Sbill * given up (e.g. when writing a partial block where it is 1368Sbill * assumed that another write for the same block will soon follow). 1378Sbill * This can't be done for magtape, since writes must be done 1388Sbill * in the same order as requested. 1398Sbill */ 1408Sbill bdwrite(bp) 1417015Smckusick register struct buf *bp; 1428Sbill { 1432403Skre register int flags; 1448Sbill 1458Sbill if ((bp->b_flags&B_DELWRI) == 0) 1468Sbill u.u_vm.vm_oublk++; /* noone paid yet */ 1472403Skre flags = bdevsw[major(bp->b_dev)].d_flags; 1482403Skre if(flags & B_TAPE) 1498Sbill bawrite(bp); 1508Sbill else { 1518Sbill bp->b_flags |= B_DELWRI | B_DONE; 1528Sbill brelse(bp); 1538Sbill } 1548Sbill } 1558Sbill 1568Sbill /* 1578Sbill * Release the buffer, start I/O on it, but don't wait for completion. 1588Sbill */ 1598Sbill bawrite(bp) 1607015Smckusick register struct buf *bp; 1618Sbill { 1628Sbill 1638Sbill bp->b_flags |= B_ASYNC; 1648Sbill bwrite(bp); 1658Sbill } 1668Sbill 1678Sbill /* 1687015Smckusick * Release the buffer, with no I/O implied. 1698Sbill */ 1708Sbill brelse(bp) 1717015Smckusick register struct buf *bp; 1728Sbill { 1732325Swnj register struct buf *flist; 1748Sbill register s; 1758Sbill 1767015Smckusick /* 1777015Smckusick * If someone's waiting for the buffer, or 1787015Smckusick * is waiting for a buffer wake 'em up. 1797015Smckusick */ 1808Sbill if (bp->b_flags&B_WANTED) 1818Sbill wakeup((caddr_t)bp); 1822325Swnj if (bfreelist[0].b_flags&B_WANTED) { 1832325Swnj bfreelist[0].b_flags &= ~B_WANTED; 1842325Swnj wakeup((caddr_t)bfreelist); 1858Sbill } 1862683Swnj if (bp->b_flags&B_ERROR) 1872683Swnj if (bp->b_flags & B_LOCKED) 1882683Swnj bp->b_flags &= ~B_ERROR; /* try again later */ 1892683Swnj else 1902683Swnj bp->b_dev = NODEV; /* no assoc */ 1917015Smckusick 1927015Smckusick /* 1937015Smckusick * Stick the buffer back on a free list. 1947015Smckusick */ 1958Sbill s = spl6(); 1962325Swnj if (bp->b_flags & (B_ERROR|B_INVAL)) { 1972325Swnj /* block has no info ... put at front of most free list */ 1982325Swnj flist = &bfreelist[BQUEUES-1]; 1997015Smckusick binsheadfree(bp, flist); 2008Sbill } else { 2012325Swnj if (bp->b_flags & B_LOCKED) 2022325Swnj flist = &bfreelist[BQ_LOCKED]; 2032325Swnj else if (bp->b_flags & B_AGE) 2042325Swnj flist = &bfreelist[BQ_AGE]; 2052325Swnj else 2062325Swnj flist = &bfreelist[BQ_LRU]; 2077015Smckusick binstailfree(bp, flist); 2088Sbill } 2098Sbill bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_AGE); 2108Sbill splx(s); 2118Sbill } 2128Sbill 2138Sbill /* 2148Sbill * See if the block is associated with some buffer 2158Sbill * (mainly to avoid getting hung up on a wait in breada) 2168Sbill */ 2178Sbill incore(dev, blkno) 2187015Smckusick dev_t dev; 2197015Smckusick daddr_t blkno; 2208Sbill { 2218Sbill register struct buf *bp; 2222325Swnj register struct buf *dp; 2238Sbill 2246563Smckusic dp = BUFHASH(dev, blkno); 2252325Swnj for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) 2266563Smckusic if (bp->b_blkno == blkno && bp->b_dev == dev && 2277015Smckusick (bp->b_flags & B_INVAL) == 0) 22891Sbill return (1); 22991Sbill return (0); 2308Sbill } 2318Sbill 2328Sbill struct buf * 2336563Smckusic baddr(dev, blkno, size) 2346563Smckusic dev_t dev; 2356563Smckusic daddr_t blkno; 2366563Smckusic int size; 2378Sbill { 2388Sbill 2398Sbill if (incore(dev, blkno)) 2406563Smckusic return (bread(dev, blkno, size)); 2418Sbill return (0); 2428Sbill } 2438Sbill 2448Sbill /* 2458Sbill * Assign a buffer for the given block. If the appropriate 2468Sbill * block is already associated, return it; otherwise search 2478Sbill * for the oldest non-busy buffer and reassign it. 2485424Swnj * 2495424Swnj * We use splx here because this routine may be called 2505424Swnj * on the interrupt stack during a dump, and we don't 2515424Swnj * want to lower the ipl back to 0. 2528Sbill */ 2538Sbill struct buf * 2546563Smckusic getblk(dev, blkno, size) 2556563Smckusic dev_t dev; 2566563Smckusic daddr_t blkno; 2576563Smckusic int size; 2588Sbill { 25991Sbill register struct buf *bp, *dp, *ep; 2605424Swnj int s; 2618Sbill 2621831Sbill if ((unsigned)blkno >= 1 << (sizeof(int)*NBBY-PGSHIFT)) 2631831Sbill blkno = 1 << ((sizeof(int)*NBBY-PGSHIFT) + 1); 2647015Smckusick /* 2657015Smckusick * Search the cache for the block. If we hit, but 2667015Smckusick * the buffer is in use for i/o, then we wait until 2677015Smckusick * the i/o has completed. 2687015Smckusick */ 2696563Smckusic dp = BUFHASH(dev, blkno); 2707015Smckusick loop: 2712325Swnj for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) { 2726563Smckusic if (bp->b_blkno != blkno || bp->b_dev != dev || 2732325Swnj bp->b_flags&B_INVAL) 2748Sbill continue; 2755424Swnj s = spl6(); 2768Sbill if (bp->b_flags&B_BUSY) { 2778Sbill bp->b_flags |= B_WANTED; 2788Sbill sleep((caddr_t)bp, PRIBIO+1); 2795424Swnj splx(s); 2808Sbill goto loop; 2818Sbill } 2825424Swnj splx(s); 2838Sbill notavail(bp); 2847188Sroot if (brealloc(bp, size) == 0) 2857188Sroot goto loop; 2868Sbill bp->b_flags |= B_CACHE; 2878Sbill return(bp); 2888Sbill } 28991Sbill if (major(dev) >= nblkdev) 29091Sbill panic("blkdev"); 2917015Smckusick /* 2927015Smckusick * Not found in the cache, select something from 2937015Smckusick * a free list. Preference is to LRU list, then AGE list. 2947015Smckusick */ 2955424Swnj s = spl6(); 2962325Swnj for (ep = &bfreelist[BQUEUES-1]; ep > bfreelist; ep--) 2972325Swnj if (ep->av_forw != ep) 2982325Swnj break; 2992325Swnj if (ep == bfreelist) { /* no free blocks at all */ 3002325Swnj ep->b_flags |= B_WANTED; 3012325Swnj sleep((caddr_t)ep, PRIBIO+1); 3025424Swnj splx(s); 3038Sbill goto loop; 3048Sbill } 3055424Swnj splx(s); 3062325Swnj bp = ep->av_forw; 3078Sbill notavail(bp); 3088Sbill if (bp->b_flags & B_DELWRI) { 3098Sbill bp->b_flags |= B_ASYNC; 3108Sbill bwrite(bp); 3118Sbill goto loop; 3128Sbill } 3134033Swnj trace(TR_BRELSE, bp->b_dev, bp->b_blkno); 3148Sbill bp->b_flags = B_BUSY; 3156563Smckusic bfree(bp); 3167015Smckusick bremhash(bp); 3177015Smckusick binshash(bp, dp); 3188Sbill bp->b_dev = dev; 3196563Smckusic bp->b_blkno = blkno; 3207188Sroot if (brealloc(bp, size) == 0) 3217188Sroot goto loop; 3228Sbill return(bp); 3238Sbill } 3248Sbill 3258Sbill /* 3268Sbill * get an empty block, 3278Sbill * not assigned to any particular device 3288Sbill */ 3298Sbill struct buf * 3306563Smckusic geteblk(size) 3316563Smckusic int size; 3328Sbill { 333182Sbill register struct buf *bp, *dp; 3345431Sroot int s; 3358Sbill 3368Sbill loop: 3375431Sroot s = spl6(); 3382325Swnj for (dp = &bfreelist[BQUEUES-1]; dp > bfreelist; dp--) 3392325Swnj if (dp->av_forw != dp) 3402325Swnj break; 3412325Swnj if (dp == bfreelist) { /* no free blocks */ 3422325Swnj dp->b_flags |= B_WANTED; 3432325Swnj sleep((caddr_t)dp, PRIBIO+1); 3442325Swnj goto loop; 3458Sbill } 3465431Sroot splx(s); 3472325Swnj bp = dp->av_forw; 3488Sbill notavail(bp); 3498Sbill if (bp->b_flags & B_DELWRI) { 3508Sbill bp->b_flags |= B_ASYNC; 3518Sbill bwrite(bp); 3528Sbill goto loop; 3538Sbill } 3544033Swnj trace(TR_BRELSE, bp->b_dev, bp->b_blkno); 3552325Swnj bp->b_flags = B_BUSY|B_INVAL; 3567015Smckusick bfree(bp); 3577015Smckusick bremhash(bp); 3587015Smckusick binshash(bp, dp); 3598Sbill bp->b_dev = (dev_t)NODEV; 3607188Sroot if (brealloc(bp, size) == 0) 3617188Sroot goto loop; 3628Sbill return(bp); 3638Sbill } 3648Sbill 3658Sbill /* 3666563Smckusic * Allocate space associated with a buffer. 3676563Smckusic */ 3686563Smckusic brealloc(bp, size) 3696563Smckusic register struct buf *bp; 3706563Smckusic int size; 3716563Smckusic { 3726563Smckusic daddr_t start, last; 3736563Smckusic register struct buf *ep; 3746563Smckusic struct buf *dp; 3756563Smckusic int s; 3766563Smckusic 3776563Smckusic /* 3786563Smckusic * First need to make sure that all overlaping previous I/O 3796563Smckusic * is dispatched with. 3806563Smckusic */ 3816563Smckusic if (size == bp->b_bcount) 3827188Sroot return (1); 3837188Sroot if (size < bp->b_bcount) { 3847188Sroot if (bp->b_flags & B_DELWRI) { 3857188Sroot bwrite(bp); 3867188Sroot return (0); 3877188Sroot } 3887188Sroot if (bp->b_flags & B_LOCKED) 3897188Sroot panic("brealloc"); 3907016Smckusick goto allocit; 3917188Sroot } 3927188Sroot bp->b_flags &= ~B_DONE; 3937188Sroot if (bp->b_dev == NODEV) 3947188Sroot goto allocit; 3957016Smckusick 3967188Sroot /* 3977188Sroot * Search cache for any buffers that overlap the one that we 3987188Sroot * are trying to allocate. Overlapping buffers must be marked 3997188Sroot * invalid, after being written out if they are dirty. (indicated 4007188Sroot * by B_DELWRI) A disk block must be mapped by at most one buffer 4017188Sroot * at any point in time. Care must be taken to avoid deadlocking 4027188Sroot * when two buffer are trying to get the same set of disk blocks. 4037188Sroot */ 4047188Sroot start = bp->b_blkno; 4057188Sroot last = start + (size / DEV_BSIZE) - 1; 4066563Smckusic dp = BUFHASH(bp->b_dev, bp->b_blkno); 4076563Smckusic loop: 4086563Smckusic for (ep = dp->b_forw; ep != dp; ep = ep->b_forw) { 4097188Sroot if (ep == bp || ep->b_dev != bp->b_dev || (ep->b_flags&B_INVAL)) 4106563Smckusic continue; 4117188Sroot /* look for overlap */ 4127188Sroot if (ep->b_bcount == 0 || ep->b_blkno > last || 4137188Sroot ep->b_blkno + (ep->b_bcount / DEV_BSIZE) <= start) 4147188Sroot continue; 4157188Sroot if (bioprintfs) 4167188Sroot if (ep->b_flags&B_BUSY) 4177188Sroot printf("sleeping on:dev 0x%x, blks %d-%d, flg 0%o allocing dev 0x%x, blks %d-%d, flg 0%o\n", 4187188Sroot ep->b_dev, ep->b_blkno, ep->b_blkno + (ep->b_bcount / DEV_BSIZE) - 1, 4197188Sroot ep->b_flags, bp->b_dev, start, last, bp->b_flags); 4206563Smckusic s = spl6(); 4216563Smckusic if (ep->b_flags&B_BUSY) { 4226563Smckusic ep->b_flags |= B_WANTED; 4236563Smckusic sleep((caddr_t)ep, PRIBIO+1); 4247015Smckusick (void) splx(s); 4256563Smckusic goto loop; 4266563Smckusic } 4277015Smckusick (void) splx(s); 4287188Sroot notavail(ep); 4296563Smckusic if (ep->b_flags & B_DELWRI) { 4307188Sroot if (bioprintfs) 4317188Sroot printf("DELWRI:dev 0x%x, blks %d-%d, flg 0%o allocing dev 0x%x, blks %d-%d, flg 0%o\n", 4327188Sroot ep->b_dev, ep->b_blkno, ep->b_blkno + (ep->b_bcount / DEV_BSIZE) - 1, 4337188Sroot ep->b_flags, bp->b_dev, start, last, bp->b_flags); 4346563Smckusic bwrite(ep); 4356563Smckusic goto loop; 4366563Smckusic } 4377188Sroot ep->b_flags |= B_INVAL; 4387188Sroot brelse(ep); 4396563Smckusic } 4406563Smckusic allocit: 4416563Smckusic /* 4426563Smckusic * Here the buffer is already available, so all we 4436563Smckusic * need to do is set the size. Someday a better memory 4446563Smckusic * management scheme will be implemented. 4456563Smckusic */ 4466563Smckusic bp->b_bcount = size; 4477188Sroot return (1); 4486563Smckusic } 4496563Smckusic 4506563Smckusic /* 4516563Smckusic * Release space associated with a buffer. 4526563Smckusic */ 4536563Smckusic bfree(bp) 4546563Smckusic struct buf *bp; 4556563Smckusic { 4566563Smckusic /* 4576563Smckusic * Here the buffer does not change, so all we 4586563Smckusic * need to do is set the size. Someday a better memory 4596563Smckusic * management scheme will be implemented. 4606563Smckusic */ 4616563Smckusic bp->b_bcount = 0; 4626563Smckusic } 4636563Smckusic 4646563Smckusic /* 4658Sbill * Wait for I/O completion on the buffer; return errors 4668Sbill * to the user. 4678Sbill */ 4687015Smckusick biowait(bp) 4696563Smckusic register struct buf *bp; 4708Sbill { 4715431Sroot int s; 4728Sbill 4735431Sroot s = spl6(); 4748Sbill while ((bp->b_flags&B_DONE)==0) 4758Sbill sleep((caddr_t)bp, PRIBIO); 4765431Sroot splx(s); 477*7723Swnj u.u_error = geterror(bp); 4788Sbill } 4798Sbill 4808Sbill /* 4818Sbill * Mark I/O complete on a buffer. If the header 4828Sbill * indicates a dirty page push completion, the 4838Sbill * header is inserted into the ``cleaned'' list 4848Sbill * to be processed by the pageout daemon. Otherwise 4858Sbill * release it if I/O is asynchronous, and wake 4868Sbill * up anyone waiting for it. 4878Sbill */ 4887015Smckusick biodone(bp) 4897015Smckusick register struct buf *bp; 4908Sbill { 4918Sbill register int s; 4928Sbill 493420Sbill if (bp->b_flags & B_DONE) 4947015Smckusick panic("dup biodone"); 4958Sbill bp->b_flags |= B_DONE; 4968Sbill if (bp->b_flags & B_DIRTY) { 4978Sbill if (bp->b_flags & B_ERROR) 4988Sbill panic("IO err in push"); 4998Sbill s = spl6(); 5008Sbill bp->av_forw = bclnlist; 5018Sbill bp->b_bcount = swsize[bp - swbuf]; 5028Sbill bp->b_pfcent = swpf[bp - swbuf]; 5033601Swnj cnt.v_pgout++; 5043601Swnj cnt.v_pgpgout += bp->b_bcount / NBPG; 5058Sbill bclnlist = bp; 5068Sbill if (bswlist.b_flags & B_WANTED) 5078Sbill wakeup((caddr_t)&proc[2]); 5088Sbill splx(s); 509383Sbill return; 5108Sbill } 5118Sbill if (bp->b_flags&B_ASYNC) 5128Sbill brelse(bp); 5138Sbill else { 5148Sbill bp->b_flags &= ~B_WANTED; 5158Sbill wakeup((caddr_t)bp); 5168Sbill } 5178Sbill } 5188Sbill 5198Sbill /* 5208Sbill * make sure all write-behind blocks 5218Sbill * on dev (or NODEV for all) 5228Sbill * are flushed out. 5238Sbill * (from umount and update) 5246563Smckusic * (and temporarily pagein) 5258Sbill */ 5268Sbill bflush(dev) 5277015Smckusick dev_t dev; 5288Sbill { 5298Sbill register struct buf *bp; 5302325Swnj register struct buf *flist; 5315431Sroot int s; 5328Sbill 5338Sbill loop: 5345431Sroot s = spl6(); 5352325Swnj for (flist = bfreelist; flist < &bfreelist[BQUEUES]; flist++) 5362325Swnj for (bp = flist->av_forw; bp != flist; bp = bp->av_forw) { 5377015Smckusick if ((bp->b_flags & B_DELWRI) == 0) 5387015Smckusick continue; 5397015Smckusick if (dev == NODEV || dev == bp->b_dev) { 5408Sbill bp->b_flags |= B_ASYNC; 5418Sbill notavail(bp); 5428Sbill bwrite(bp); 5438Sbill goto loop; 5448Sbill } 5458Sbill } 5465431Sroot splx(s); 5478Sbill } 5488Sbill 5498Sbill /* 5508Sbill * Pick up the device's error number and pass it to the user; 5518Sbill * if there is an error but the number is 0 set a generalized 5528Sbill * code. Actually the latter is always true because devices 5538Sbill * don't yet return specific errors. 5548Sbill */ 5558Sbill geterror(bp) 5567015Smckusick register struct buf *bp; 5578Sbill { 558*7723Swnj int error = 0; 5598Sbill 5608Sbill if (bp->b_flags&B_ERROR) 561*7723Swnj if ((error = bp->b_error)==0) 562*7723Swnj return (EIO); 563*7723Swnj return (error); 5648Sbill } 5652299Skre 5662299Skre /* 5672299Skre * Invalidate in core blocks belonging to closed or umounted filesystem 5682299Skre * 5692299Skre * This is not nicely done at all - the buffer ought to be removed from the 5702299Skre * hash chains & have its dev/blkno fields clobbered, but unfortunately we 5712299Skre * can't do that here, as it is quite possible that the block is still 5722299Skre * being used for i/o. Eventually, all disc drivers should be forced to 5732299Skre * have a close routine, which ought ensure that the queue is empty, then 5742299Skre * properly flush the queues. Until that happy day, this suffices for 5752299Skre * correctness. ... kre 5762299Skre */ 5772299Skre binval(dev) 5787015Smckusick dev_t dev; 5792299Skre { 5802361Skre register struct buf *bp; 5812361Skre register struct bufhd *hp; 5822361Skre #define dp ((struct buf *)hp) 5832299Skre 5842361Skre for (hp = bufhash; hp < &bufhash[BUFHSZ]; hp++) 5852361Skre for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) 5862361Skre if (bp->b_dev == dev) 5872361Skre bp->b_flags |= B_INVAL; 5882299Skre } 589