1*23395Smckusick /* 2*23395Smckusick * Copyright (c) 1982 Regents of the University of California. 3*23395Smckusick * All rights reserved. The Berkeley software License Agreement 4*23395Smckusick * specifies the terms and conditions for redistribution. 5*23395Smckusick * 6*23395Smckusick * @(#)vfs_cluster.c 6.5 (Berkeley) 06/08/85 7*23395Smckusick */ 88Sbill 99763Ssam #include "../machine/pte.h" 109763Ssam 1117098Sbloom #include "param.h" 1217098Sbloom #include "systm.h" 1317098Sbloom #include "dir.h" 1417098Sbloom #include "user.h" 1517098Sbloom #include "buf.h" 1617098Sbloom #include "conf.h" 1717098Sbloom #include "proc.h" 1817098Sbloom #include "seg.h" 1917098Sbloom #include "vm.h" 2017098Sbloom #include "trace.h" 218Sbill 2291Sbill /* 238Sbill * Read in (if necessary) the block and return a buffer pointer. 248Sbill */ 258Sbill struct buf * 266563Smckusic bread(dev, blkno, size) 276563Smckusic dev_t dev; 286563Smckusic daddr_t blkno; 296563Smckusic int size; 308Sbill { 318Sbill register struct buf *bp; 328Sbill 338670S if (size == 0) 348670S panic("bread: size 0"); 356563Smckusic bp = getblk(dev, blkno, size); 368Sbill if (bp->b_flags&B_DONE) { 3715795Ssam trace(TR_BREADHIT, pack(dev, size), blkno); 388Sbill return(bp); 398Sbill } 408Sbill bp->b_flags |= B_READ; 418670S if (bp->b_bcount > bp->b_bufsize) 428670S panic("bread"); 438Sbill (*bdevsw[major(dev)].d_strategy)(bp); 4415795Ssam trace(TR_BREADMISS, pack(dev, size), blkno); 458039Sroot u.u_ru.ru_inblock++; /* pay for read */ 467015Smckusick biowait(bp); 478Sbill return(bp); 488Sbill } 498Sbill 508Sbill /* 518Sbill * Read in the block, like bread, but also start I/O on the 528Sbill * read-ahead block (which is not allocated to the caller) 538Sbill */ 548Sbill struct buf * 558592Sroot breada(dev, blkno, size, rablkno, rabsize) 566563Smckusic dev_t dev; 577114Smckusick daddr_t blkno; int size; 588592Sroot daddr_t rablkno; int rabsize; 598Sbill { 608Sbill register struct buf *bp, *rabp; 618Sbill 628Sbill bp = NULL; 637015Smckusick /* 647015Smckusick * If the block isn't in core, then allocate 657015Smckusick * a buffer and initiate i/o (getblk checks 667015Smckusick * for a cache hit). 677015Smckusick */ 688Sbill if (!incore(dev, blkno)) { 696563Smckusic bp = getblk(dev, blkno, size); 708Sbill if ((bp->b_flags&B_DONE) == 0) { 718Sbill bp->b_flags |= B_READ; 728670S if (bp->b_bcount > bp->b_bufsize) 738670S panic("breada"); 748Sbill (*bdevsw[major(dev)].d_strategy)(bp); 7515795Ssam trace(TR_BREADMISS, pack(dev, size), blkno); 768039Sroot u.u_ru.ru_inblock++; /* pay for read */ 777015Smckusick } else 7815795Ssam trace(TR_BREADHIT, pack(dev, size), blkno); 798Sbill } 807015Smckusick 817015Smckusick /* 827015Smckusick * If there's a read-ahead block, start i/o 837015Smckusick * on it also (as above). 847015Smckusick */ 858Sbill if (rablkno && !incore(dev, rablkno)) { 868592Sroot rabp = getblk(dev, rablkno, rabsize); 872045Swnj if (rabp->b_flags & B_DONE) { 888Sbill brelse(rabp); 8915795Ssam trace(TR_BREADHITRA, pack(dev, rabsize), blkno); 902045Swnj } else { 918Sbill rabp->b_flags |= B_READ|B_ASYNC; 928670S if (rabp->b_bcount > rabp->b_bufsize) 938670S panic("breadrabp"); 948Sbill (*bdevsw[major(dev)].d_strategy)(rabp); 9515795Ssam trace(TR_BREADMISSRA, pack(dev, rabsize), rablock); 968039Sroot u.u_ru.ru_inblock++; /* pay in advance */ 978Sbill } 988Sbill } 997015Smckusick 1007015Smckusick /* 1017114Smckusick * If block was in core, let bread get it. 1027114Smckusick * If block wasn't in core, then the read was started 1037114Smckusick * above, and just wait for it. 1047015Smckusick */ 1057114Smckusick if (bp == NULL) 1067114Smckusick return (bread(dev, blkno, size)); 1077015Smckusick biowait(bp); 1087114Smckusick return (bp); 1098Sbill } 1108Sbill 1118Sbill /* 1128Sbill * Write the buffer, waiting for completion. 1138Sbill * Then release the buffer. 1148Sbill */ 1158Sbill bwrite(bp) 1167015Smckusick register struct buf *bp; 1178Sbill { 1188Sbill register flag; 1198Sbill 1208Sbill flag = bp->b_flags; 1219857Ssam bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 1228Sbill if ((flag&B_DELWRI) == 0) 1238039Sroot u.u_ru.ru_oublock++; /* noone paid yet */ 12415795Ssam trace(TR_BWRITE, pack(bp->b_dev, bp->b_bcount), bp->b_blkno); 1258670S if (bp->b_bcount > bp->b_bufsize) 1268670S panic("bwrite"); 1278Sbill (*bdevsw[major(bp->b_dev)].d_strategy)(bp); 1287015Smckusick 1297015Smckusick /* 1307015Smckusick * If the write was synchronous, then await i/o completion. 1317015Smckusick * If the write was "delayed", then we put the buffer on 1327015Smckusick * the q of blocks awaiting i/o completion status. 1337015Smckusick */ 1348Sbill if ((flag&B_ASYNC) == 0) { 1357015Smckusick biowait(bp); 1368Sbill brelse(bp); 1378Sbill } else if (flag & B_DELWRI) 1388Sbill bp->b_flags |= B_AGE; 1398Sbill } 1408Sbill 1418Sbill /* 1428Sbill * Release the buffer, marking it so that if it is grabbed 1438Sbill * for another purpose it will be written out before being 1448Sbill * given up (e.g. when writing a partial block where it is 1458Sbill * assumed that another write for the same block will soon follow). 1468Sbill * This can't be done for magtape, since writes must be done 1478Sbill * in the same order as requested. 1488Sbill */ 1498Sbill bdwrite(bp) 1507015Smckusick register struct buf *bp; 1518Sbill { 1522403Skre register int flags; 1538Sbill 1548Sbill if ((bp->b_flags&B_DELWRI) == 0) 1558039Sroot u.u_ru.ru_oublock++; /* noone paid yet */ 1562403Skre flags = bdevsw[major(bp->b_dev)].d_flags; 1572403Skre if(flags & B_TAPE) 1588Sbill bawrite(bp); 1598Sbill else { 1608Sbill bp->b_flags |= B_DELWRI | B_DONE; 1618Sbill brelse(bp); 1628Sbill } 1638Sbill } 1648Sbill 1658Sbill /* 1668Sbill * Release the buffer, start I/O on it, but don't wait for completion. 1678Sbill */ 1688Sbill bawrite(bp) 1697015Smckusick register struct buf *bp; 1708Sbill { 1718Sbill 1728Sbill bp->b_flags |= B_ASYNC; 1738Sbill bwrite(bp); 1748Sbill } 1758Sbill 1768Sbill /* 1777015Smckusick * Release the buffer, with no I/O implied. 1788Sbill */ 1798Sbill brelse(bp) 1807015Smckusick register struct buf *bp; 1818Sbill { 1822325Swnj register struct buf *flist; 1838Sbill register s; 1848Sbill 18515795Ssam trace(TR_BRELSE, pack(bp->b_dev, bp->b_bufsize), bp->b_blkno); 1867015Smckusick /* 1877015Smckusick * If someone's waiting for the buffer, or 1887015Smckusick * is waiting for a buffer wake 'em up. 1897015Smckusick */ 1908Sbill if (bp->b_flags&B_WANTED) 1918Sbill wakeup((caddr_t)bp); 1922325Swnj if (bfreelist[0].b_flags&B_WANTED) { 1932325Swnj bfreelist[0].b_flags &= ~B_WANTED; 1942325Swnj wakeup((caddr_t)bfreelist); 1958Sbill } 1962683Swnj if (bp->b_flags&B_ERROR) 1972683Swnj if (bp->b_flags & B_LOCKED) 1982683Swnj bp->b_flags &= ~B_ERROR; /* try again later */ 1992683Swnj else 2002683Swnj bp->b_dev = NODEV; /* no assoc */ 2017015Smckusick 2027015Smckusick /* 2037015Smckusick * Stick the buffer back on a free list. 2047015Smckusick */ 2058Sbill s = spl6(); 2068670S if (bp->b_bufsize <= 0) { 2078670S /* block has no buffer ... put at front of unused buffer list */ 2088670S flist = &bfreelist[BQ_EMPTY]; 2098670S binsheadfree(bp, flist); 2108670S } else if (bp->b_flags & (B_ERROR|B_INVAL)) { 2112325Swnj /* block has no info ... put at front of most free list */ 2128670S flist = &bfreelist[BQ_AGE]; 2137015Smckusick binsheadfree(bp, flist); 2148Sbill } else { 2152325Swnj if (bp->b_flags & B_LOCKED) 2162325Swnj flist = &bfreelist[BQ_LOCKED]; 2172325Swnj else if (bp->b_flags & B_AGE) 2182325Swnj flist = &bfreelist[BQ_AGE]; 2192325Swnj else 2202325Swnj flist = &bfreelist[BQ_LRU]; 2217015Smckusick binstailfree(bp, flist); 2228Sbill } 2238Sbill bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_AGE); 2248Sbill splx(s); 2258Sbill } 2268Sbill 2278Sbill /* 2288Sbill * See if the block is associated with some buffer 2298Sbill * (mainly to avoid getting hung up on a wait in breada) 2308Sbill */ 2318Sbill incore(dev, blkno) 2327015Smckusick dev_t dev; 2337015Smckusick daddr_t blkno; 2348Sbill { 2358Sbill register struct buf *bp; 2362325Swnj register struct buf *dp; 2378Sbill 2386563Smckusic dp = BUFHASH(dev, blkno); 2392325Swnj for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) 2406563Smckusic if (bp->b_blkno == blkno && bp->b_dev == dev && 2417015Smckusick (bp->b_flags & B_INVAL) == 0) 24291Sbill return (1); 24391Sbill return (0); 2448Sbill } 2458Sbill 2468Sbill struct buf * 2476563Smckusic baddr(dev, blkno, size) 2486563Smckusic dev_t dev; 2496563Smckusic daddr_t blkno; 2506563Smckusic int size; 2518Sbill { 2528Sbill 2538Sbill if (incore(dev, blkno)) 2546563Smckusic return (bread(dev, blkno, size)); 2558Sbill return (0); 2568Sbill } 2578Sbill 2588Sbill /* 2598Sbill * Assign a buffer for the given block. If the appropriate 2608Sbill * block is already associated, return it; otherwise search 2618Sbill * for the oldest non-busy buffer and reassign it. 2625424Swnj * 2635424Swnj * We use splx here because this routine may be called 2645424Swnj * on the interrupt stack during a dump, and we don't 2655424Swnj * want to lower the ipl back to 0. 2668Sbill */ 2678Sbill struct buf * 2686563Smckusic getblk(dev, blkno, size) 2696563Smckusic dev_t dev; 2706563Smckusic daddr_t blkno; 2716563Smckusic int size; 2728Sbill { 2738670S register struct buf *bp, *dp; 2745424Swnj int s; 2758Sbill 2769763Ssam if ((unsigned)blkno >= 1 << (sizeof(int)*NBBY-PGSHIFT)) /* XXX */ 2771831Sbill blkno = 1 << ((sizeof(int)*NBBY-PGSHIFT) + 1); 2787015Smckusick /* 2797015Smckusick * Search the cache for the block. If we hit, but 2807015Smckusick * the buffer is in use for i/o, then we wait until 2817015Smckusick * the i/o has completed. 2827015Smckusick */ 2836563Smckusic dp = BUFHASH(dev, blkno); 2847015Smckusick loop: 2852325Swnj for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) { 2866563Smckusic if (bp->b_blkno != blkno || bp->b_dev != dev || 2872325Swnj bp->b_flags&B_INVAL) 2888Sbill continue; 2895424Swnj s = spl6(); 2908Sbill if (bp->b_flags&B_BUSY) { 2918Sbill bp->b_flags |= B_WANTED; 2928Sbill sleep((caddr_t)bp, PRIBIO+1); 2935424Swnj splx(s); 2948Sbill goto loop; 2958Sbill } 2965424Swnj splx(s); 2978Sbill notavail(bp); 29816855Smckusick if (bp->b_bcount != size && brealloc(bp, size) == 0) 2997188Sroot goto loop; 3008Sbill bp->b_flags |= B_CACHE; 3018Sbill return(bp); 3028Sbill } 30391Sbill if (major(dev) >= nblkdev) 30491Sbill panic("blkdev"); 3058670S bp = getnewbuf(); 3066563Smckusic bfree(bp); 3077015Smckusick bremhash(bp); 3087015Smckusick binshash(bp, dp); 3098Sbill bp->b_dev = dev; 3106563Smckusic bp->b_blkno = blkno; 3118670S bp->b_error = 0; 3127188Sroot if (brealloc(bp, size) == 0) 3137188Sroot goto loop; 3148Sbill return(bp); 3158Sbill } 3168Sbill 3178Sbill /* 3188Sbill * get an empty block, 3198Sbill * not assigned to any particular device 3208Sbill */ 3218Sbill struct buf * 3226563Smckusic geteblk(size) 3236563Smckusic int size; 3248Sbill { 3258670S register struct buf *bp, *flist; 3268Sbill 3278Sbill loop: 3288670S bp = getnewbuf(); 3298670S bp->b_flags |= B_INVAL; 3307015Smckusick bfree(bp); 3317015Smckusick bremhash(bp); 3328670S flist = &bfreelist[BQ_AGE]; 3338670S binshash(bp, flist); 3348Sbill bp->b_dev = (dev_t)NODEV; 3358670S bp->b_error = 0; 3367188Sroot if (brealloc(bp, size) == 0) 3377188Sroot goto loop; 3388Sbill return(bp); 3398Sbill } 3408Sbill 3418Sbill /* 3426563Smckusic * Allocate space associated with a buffer. 3439763Ssam * If can't get space, buffer is released 3446563Smckusic */ 3456563Smckusic brealloc(bp, size) 3466563Smckusic register struct buf *bp; 3476563Smckusic int size; 3486563Smckusic { 3496563Smckusic daddr_t start, last; 3506563Smckusic register struct buf *ep; 3516563Smckusic struct buf *dp; 3526563Smckusic int s; 3536563Smckusic 3546563Smckusic /* 3556563Smckusic * First need to make sure that all overlaping previous I/O 3566563Smckusic * is dispatched with. 3576563Smckusic */ 3586563Smckusic if (size == bp->b_bcount) 3597188Sroot return (1); 3607188Sroot if (size < bp->b_bcount) { 3617188Sroot if (bp->b_flags & B_DELWRI) { 3627188Sroot bwrite(bp); 3637188Sroot return (0); 3647188Sroot } 3657188Sroot if (bp->b_flags & B_LOCKED) 3667188Sroot panic("brealloc"); 3679763Ssam return (allocbuf(bp, size)); 3687188Sroot } 3697188Sroot bp->b_flags &= ~B_DONE; 3709763Ssam if (bp->b_dev == NODEV) 3719763Ssam return (allocbuf(bp, size)); 3727016Smckusick 37315795Ssam trace(TR_BREALLOC, pack(bp->b_dev, size), bp->b_blkno); 3747188Sroot /* 3757188Sroot * Search cache for any buffers that overlap the one that we 3767188Sroot * are trying to allocate. Overlapping buffers must be marked 3777188Sroot * invalid, after being written out if they are dirty. (indicated 3787188Sroot * by B_DELWRI) A disk block must be mapped by at most one buffer 3797188Sroot * at any point in time. Care must be taken to avoid deadlocking 3807188Sroot * when two buffer are trying to get the same set of disk blocks. 3817188Sroot */ 3827188Sroot start = bp->b_blkno; 38312644Ssam last = start + btodb(size) - 1; 3846563Smckusic dp = BUFHASH(bp->b_dev, bp->b_blkno); 3856563Smckusic loop: 3866563Smckusic for (ep = dp->b_forw; ep != dp; ep = ep->b_forw) { 3877188Sroot if (ep == bp || ep->b_dev != bp->b_dev || (ep->b_flags&B_INVAL)) 3886563Smckusic continue; 3897188Sroot /* look for overlap */ 3907188Sroot if (ep->b_bcount == 0 || ep->b_blkno > last || 39112644Ssam ep->b_blkno + btodb(ep->b_bcount) <= start) 3927188Sroot continue; 3936563Smckusic s = spl6(); 3946563Smckusic if (ep->b_flags&B_BUSY) { 3956563Smckusic ep->b_flags |= B_WANTED; 3966563Smckusic sleep((caddr_t)ep, PRIBIO+1); 3978670S splx(s); 3986563Smckusic goto loop; 3996563Smckusic } 4008670S splx(s); 4017188Sroot notavail(ep); 4026563Smckusic if (ep->b_flags & B_DELWRI) { 4036563Smckusic bwrite(ep); 4046563Smckusic goto loop; 4056563Smckusic } 4067188Sroot ep->b_flags |= B_INVAL; 4077188Sroot brelse(ep); 4086563Smckusic } 4099763Ssam return (allocbuf(bp, size)); 4108670S } 4118670S 4128670S /* 4138670S * Find a buffer which is available for use. 4148670S * Select something from a free list. 4158670S * Preference is to AGE list, then LRU list. 4168670S */ 4178670S struct buf * 4188670S getnewbuf() 4198670S { 4208670S register struct buf *bp, *dp; 4218670S int s; 4228670S 4238670S loop: 4248670S s = spl6(); 4258670S for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--) 4268670S if (dp->av_forw != dp) 4278670S break; 4288670S if (dp == bfreelist) { /* no free blocks */ 4298670S dp->b_flags |= B_WANTED; 4308670S sleep((caddr_t)dp, PRIBIO+1); 43112170Ssam splx(s); 4328670S goto loop; 4338670S } 4348670S splx(s); 4358670S bp = dp->av_forw; 4368670S notavail(bp); 4378670S if (bp->b_flags & B_DELWRI) { 4388670S bp->b_flags |= B_ASYNC; 4398670S bwrite(bp); 4408670S goto loop; 4418670S } 44215795Ssam trace(TR_BRELSE, pack(bp->b_dev, bp->b_bufsize), bp->b_blkno); 4438670S bp->b_flags = B_BUSY; 4448670S return (bp); 4458670S } 4468670S 4478670S /* 4488Sbill * Wait for I/O completion on the buffer; return errors 4498Sbill * to the user. 4508Sbill */ 4517015Smckusick biowait(bp) 4526563Smckusic register struct buf *bp; 4538Sbill { 4545431Sroot int s; 4558Sbill 4565431Sroot s = spl6(); 4578Sbill while ((bp->b_flags&B_DONE)==0) 4588Sbill sleep((caddr_t)bp, PRIBIO); 4595431Sroot splx(s); 46011841Ssam if (u.u_error == 0) /* XXX */ 46111841Ssam u.u_error = geterror(bp); 4628Sbill } 4638Sbill 4648Sbill /* 46513128Ssam * Mark I/O complete on a buffer. 46613128Ssam * If someone should be called, e.g. the pageout 46713128Ssam * daemon, do so. Otherwise, wake up anyone 46813128Ssam * waiting for it. 4698Sbill */ 4707015Smckusick biodone(bp) 4717015Smckusick register struct buf *bp; 4728Sbill { 4738Sbill 474420Sbill if (bp->b_flags & B_DONE) 4757015Smckusick panic("dup biodone"); 4768Sbill bp->b_flags |= B_DONE; 4779763Ssam if (bp->b_flags & B_CALL) { 4789763Ssam bp->b_flags &= ~B_CALL; 4799763Ssam (*bp->b_iodone)(bp); 4809763Ssam return; 4819763Ssam } 4828Sbill if (bp->b_flags&B_ASYNC) 4838Sbill brelse(bp); 4848Sbill else { 4858Sbill bp->b_flags &= ~B_WANTED; 4868Sbill wakeup((caddr_t)bp); 4878Sbill } 4888Sbill } 4898Sbill 4908Sbill /* 4918670S * Insure that no part of a specified block is in an incore buffer. 4928670S */ 4938670S blkflush(dev, blkno, size) 4948670S dev_t dev; 4958670S daddr_t blkno; 4968670S long size; 4978670S { 4988670S register struct buf *ep; 4998670S struct buf *dp; 5008670S daddr_t start, last; 5018670S int s; 5028670S 5038670S start = blkno; 50412644Ssam last = start + btodb(size) - 1; 5058670S dp = BUFHASH(dev, blkno); 5068670S loop: 5078670S for (ep = dp->b_forw; ep != dp; ep = ep->b_forw) { 5088670S if (ep->b_dev != dev || (ep->b_flags&B_INVAL)) 5098670S continue; 5108670S /* look for overlap */ 5118670S if (ep->b_bcount == 0 || ep->b_blkno > last || 51212644Ssam ep->b_blkno + btodb(ep->b_bcount) <= start) 5138670S continue; 5148670S s = spl6(); 5158670S if (ep->b_flags&B_BUSY) { 5168670S ep->b_flags |= B_WANTED; 5178670S sleep((caddr_t)ep, PRIBIO+1); 5188670S splx(s); 5198670S goto loop; 5208670S } 5218670S if (ep->b_flags & B_DELWRI) { 5228670S splx(s); 5238670S notavail(ep); 5248670S bwrite(ep); 5258670S goto loop; 5268670S } 5278670S splx(s); 5288670S } 5298670S } 5308670S 5318670S /* 53213128Ssam * Make sure all write-behind blocks 5338Sbill * on dev (or NODEV for all) 5348Sbill * are flushed out. 5358Sbill * (from umount and update) 5368Sbill */ 5378Sbill bflush(dev) 5387015Smckusick dev_t dev; 5398Sbill { 5408Sbill register struct buf *bp; 5412325Swnj register struct buf *flist; 5425431Sroot int s; 5438Sbill 5448Sbill loop: 5455431Sroot s = spl6(); 5468670S for (flist = bfreelist; flist < &bfreelist[BQ_EMPTY]; flist++) 5472325Swnj for (bp = flist->av_forw; bp != flist; bp = bp->av_forw) { 5487015Smckusick if ((bp->b_flags & B_DELWRI) == 0) 5497015Smckusick continue; 5507015Smckusick if (dev == NODEV || dev == bp->b_dev) { 5518Sbill bp->b_flags |= B_ASYNC; 5528Sbill notavail(bp); 5538Sbill bwrite(bp); 55412173Ssam splx(s); 5558Sbill goto loop; 5568Sbill } 5578Sbill } 5585431Sroot splx(s); 5598Sbill } 5608Sbill 5618Sbill /* 5628Sbill * Pick up the device's error number and pass it to the user; 5638Sbill * if there is an error but the number is 0 set a generalized 5648Sbill * code. Actually the latter is always true because devices 5658Sbill * don't yet return specific errors. 5668Sbill */ 5678Sbill geterror(bp) 5687015Smckusick register struct buf *bp; 5698Sbill { 5707723Swnj int error = 0; 5718Sbill 5728Sbill if (bp->b_flags&B_ERROR) 5737723Swnj if ((error = bp->b_error)==0) 5747723Swnj return (EIO); 5757723Swnj return (error); 5768Sbill } 5772299Skre 5782299Skre /* 5792299Skre * Invalidate in core blocks belonging to closed or umounted filesystem 5802299Skre * 5812299Skre * This is not nicely done at all - the buffer ought to be removed from the 5822299Skre * hash chains & have its dev/blkno fields clobbered, but unfortunately we 5832299Skre * can't do that here, as it is quite possible that the block is still 5842299Skre * being used for i/o. Eventually, all disc drivers should be forced to 5852299Skre * have a close routine, which ought ensure that the queue is empty, then 5862299Skre * properly flush the queues. Until that happy day, this suffices for 5872299Skre * correctness. ... kre 5882299Skre */ 5892299Skre binval(dev) 5907015Smckusick dev_t dev; 5912299Skre { 5922361Skre register struct buf *bp; 5932361Skre register struct bufhd *hp; 5942361Skre #define dp ((struct buf *)hp) 5952299Skre 5962361Skre for (hp = bufhash; hp < &bufhash[BUFHSZ]; hp++) 5972361Skre for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) 5982361Skre if (bp->b_dev == dev) 5992361Skre bp->b_flags |= B_INVAL; 6002299Skre } 601