123395Smckusick /* 223395Smckusick * Copyright (c) 1982 Regents of the University of California. 323395Smckusick * All rights reserved. The Berkeley software License Agreement 423395Smckusick * specifies the terms and conditions for redistribution. 523395Smckusick * 6*26271Skarels * @(#)vfs_cluster.c 6.9 (Berkeley) 02/20/86 723395Smckusick */ 88Sbill 99763Ssam #include "../machine/pte.h" 109763Ssam 1117098Sbloom #include "param.h" 1217098Sbloom #include "systm.h" 1317098Sbloom #include "dir.h" 1417098Sbloom #include "user.h" 1517098Sbloom #include "buf.h" 1617098Sbloom #include "conf.h" 1717098Sbloom #include "proc.h" 1817098Sbloom #include "seg.h" 1917098Sbloom #include "vm.h" 2017098Sbloom #include "trace.h" 218Sbill 2291Sbill /* 238Sbill * Read in (if necessary) the block and return a buffer pointer. 248Sbill */ 258Sbill struct buf * 266563Smckusic bread(dev, blkno, size) 276563Smckusic dev_t dev; 286563Smckusic daddr_t blkno; 296563Smckusic int size; 308Sbill { 318Sbill register struct buf *bp; 328Sbill 338670S if (size == 0) 348670S panic("bread: size 0"); 356563Smckusic bp = getblk(dev, blkno, size); 368Sbill if (bp->b_flags&B_DONE) { 3715795Ssam trace(TR_BREADHIT, pack(dev, size), blkno); 38*26271Skarels return (bp); 398Sbill } 408Sbill bp->b_flags |= B_READ; 418670S if (bp->b_bcount > bp->b_bufsize) 428670S panic("bread"); 438Sbill (*bdevsw[major(dev)].d_strategy)(bp); 4415795Ssam trace(TR_BREADMISS, pack(dev, size), blkno); 458039Sroot u.u_ru.ru_inblock++; /* pay for read */ 467015Smckusick biowait(bp); 47*26271Skarels return (bp); 488Sbill } 498Sbill 508Sbill /* 518Sbill * Read in the block, like bread, but also start I/O on the 528Sbill * read-ahead block (which is not allocated to the caller) 538Sbill */ 548Sbill struct buf * 558592Sroot breada(dev, blkno, size, rablkno, rabsize) 566563Smckusic dev_t dev; 577114Smckusick daddr_t blkno; int size; 588592Sroot daddr_t rablkno; int rabsize; 598Sbill { 608Sbill register struct buf *bp, *rabp; 618Sbill 628Sbill bp = NULL; 637015Smckusick /* 647015Smckusick * If the block isn't in core, then allocate 657015Smckusick * a buffer and initiate i/o (getblk checks 667015Smckusick * for a cache hit). 677015Smckusick */ 688Sbill if (!incore(dev, blkno)) { 696563Smckusic bp = getblk(dev, blkno, size); 708Sbill if ((bp->b_flags&B_DONE) == 0) { 718Sbill bp->b_flags |= B_READ; 728670S if (bp->b_bcount > bp->b_bufsize) 738670S panic("breada"); 748Sbill (*bdevsw[major(dev)].d_strategy)(bp); 7515795Ssam trace(TR_BREADMISS, pack(dev, size), blkno); 768039Sroot u.u_ru.ru_inblock++; /* pay for read */ 777015Smckusick } else 7815795Ssam trace(TR_BREADHIT, pack(dev, size), blkno); 798Sbill } 807015Smckusick 817015Smckusick /* 827015Smckusick * If there's a read-ahead block, start i/o 837015Smckusick * on it also (as above). 847015Smckusick */ 858Sbill if (rablkno && !incore(dev, rablkno)) { 868592Sroot rabp = getblk(dev, rablkno, rabsize); 872045Swnj if (rabp->b_flags & B_DONE) { 888Sbill brelse(rabp); 8915795Ssam trace(TR_BREADHITRA, pack(dev, rabsize), blkno); 902045Swnj } else { 918Sbill rabp->b_flags |= B_READ|B_ASYNC; 928670S if (rabp->b_bcount > rabp->b_bufsize) 938670S panic("breadrabp"); 948Sbill (*bdevsw[major(dev)].d_strategy)(rabp); 9515795Ssam trace(TR_BREADMISSRA, pack(dev, rabsize), rablock); 968039Sroot u.u_ru.ru_inblock++; /* pay in advance */ 978Sbill } 988Sbill } 997015Smckusick 1007015Smckusick /* 1017114Smckusick * If block was in core, let bread get it. 1027114Smckusick * If block wasn't in core, then the read was started 1037114Smckusick * above, and just wait for it. 1047015Smckusick */ 1057114Smckusick if (bp == NULL) 1067114Smckusick return (bread(dev, blkno, size)); 1077015Smckusick biowait(bp); 1087114Smckusick return (bp); 1098Sbill } 1108Sbill 1118Sbill /* 1128Sbill * Write the buffer, waiting for completion. 1138Sbill * Then release the buffer. 1148Sbill */ 1158Sbill bwrite(bp) 1167015Smckusick register struct buf *bp; 1178Sbill { 1188Sbill register flag; 1198Sbill 1208Sbill flag = bp->b_flags; 1219857Ssam bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 1228Sbill if ((flag&B_DELWRI) == 0) 1238039Sroot u.u_ru.ru_oublock++; /* noone paid yet */ 12415795Ssam trace(TR_BWRITE, pack(bp->b_dev, bp->b_bcount), bp->b_blkno); 1258670S if (bp->b_bcount > bp->b_bufsize) 1268670S panic("bwrite"); 1278Sbill (*bdevsw[major(bp->b_dev)].d_strategy)(bp); 1287015Smckusick 1297015Smckusick /* 1307015Smckusick * If the write was synchronous, then await i/o completion. 1317015Smckusick * If the write was "delayed", then we put the buffer on 1327015Smckusick * the q of blocks awaiting i/o completion status. 1337015Smckusick */ 1348Sbill if ((flag&B_ASYNC) == 0) { 1357015Smckusick biowait(bp); 1368Sbill brelse(bp); 1378Sbill } else if (flag & B_DELWRI) 1388Sbill bp->b_flags |= B_AGE; 1398Sbill } 1408Sbill 1418Sbill /* 1428Sbill * Release the buffer, marking it so that if it is grabbed 1438Sbill * for another purpose it will be written out before being 1448Sbill * given up (e.g. when writing a partial block where it is 1458Sbill * assumed that another write for the same block will soon follow). 1468Sbill * This can't be done for magtape, since writes must be done 1478Sbill * in the same order as requested. 1488Sbill */ 1498Sbill bdwrite(bp) 1507015Smckusick register struct buf *bp; 1518Sbill { 1522403Skre register int flags; 1538Sbill 1548Sbill if ((bp->b_flags&B_DELWRI) == 0) 1558039Sroot u.u_ru.ru_oublock++; /* noone paid yet */ 1562403Skre flags = bdevsw[major(bp->b_dev)].d_flags; 1572403Skre if(flags & B_TAPE) 1588Sbill bawrite(bp); 1598Sbill else { 1608Sbill bp->b_flags |= B_DELWRI | B_DONE; 1618Sbill brelse(bp); 1628Sbill } 1638Sbill } 1648Sbill 1658Sbill /* 1668Sbill * Release the buffer, start I/O on it, but don't wait for completion. 1678Sbill */ 1688Sbill bawrite(bp) 1697015Smckusick register struct buf *bp; 1708Sbill { 1718Sbill 1728Sbill bp->b_flags |= B_ASYNC; 1738Sbill bwrite(bp); 1748Sbill } 1758Sbill 1768Sbill /* 1777015Smckusick * Release the buffer, with no I/O implied. 1788Sbill */ 1798Sbill brelse(bp) 1807015Smckusick register struct buf *bp; 1818Sbill { 1822325Swnj register struct buf *flist; 1838Sbill register s; 1848Sbill 18515795Ssam trace(TR_BRELSE, pack(bp->b_dev, bp->b_bufsize), bp->b_blkno); 1867015Smckusick /* 1877015Smckusick * If someone's waiting for the buffer, or 1887015Smckusick * is waiting for a buffer wake 'em up. 1897015Smckusick */ 1908Sbill if (bp->b_flags&B_WANTED) 1918Sbill wakeup((caddr_t)bp); 1922325Swnj if (bfreelist[0].b_flags&B_WANTED) { 1932325Swnj bfreelist[0].b_flags &= ~B_WANTED; 1942325Swnj wakeup((caddr_t)bfreelist); 1958Sbill } 1962683Swnj if (bp->b_flags&B_ERROR) 1972683Swnj if (bp->b_flags & B_LOCKED) 1982683Swnj bp->b_flags &= ~B_ERROR; /* try again later */ 1992683Swnj else 2002683Swnj bp->b_dev = NODEV; /* no assoc */ 2017015Smckusick 2027015Smckusick /* 2037015Smckusick * Stick the buffer back on a free list. 2047015Smckusick */ 205*26271Skarels s = splbio(); 2068670S if (bp->b_bufsize <= 0) { 2078670S /* block has no buffer ... put at front of unused buffer list */ 2088670S flist = &bfreelist[BQ_EMPTY]; 2098670S binsheadfree(bp, flist); 2108670S } else if (bp->b_flags & (B_ERROR|B_INVAL)) { 2112325Swnj /* block has no info ... put at front of most free list */ 2128670S flist = &bfreelist[BQ_AGE]; 2137015Smckusick binsheadfree(bp, flist); 2148Sbill } else { 2152325Swnj if (bp->b_flags & B_LOCKED) 2162325Swnj flist = &bfreelist[BQ_LOCKED]; 2172325Swnj else if (bp->b_flags & B_AGE) 2182325Swnj flist = &bfreelist[BQ_AGE]; 2192325Swnj else 2202325Swnj flist = &bfreelist[BQ_LRU]; 2217015Smckusick binstailfree(bp, flist); 2228Sbill } 2238Sbill bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_AGE); 2248Sbill splx(s); 2258Sbill } 2268Sbill 2278Sbill /* 2288Sbill * See if the block is associated with some buffer 2298Sbill * (mainly to avoid getting hung up on a wait in breada) 2308Sbill */ 2318Sbill incore(dev, blkno) 2327015Smckusick dev_t dev; 2337015Smckusick daddr_t blkno; 2348Sbill { 2358Sbill register struct buf *bp; 2362325Swnj register struct buf *dp; 2378Sbill 2386563Smckusic dp = BUFHASH(dev, blkno); 2392325Swnj for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) 2406563Smckusic if (bp->b_blkno == blkno && bp->b_dev == dev && 2417015Smckusick (bp->b_flags & B_INVAL) == 0) 24291Sbill return (1); 24391Sbill return (0); 2448Sbill } 2458Sbill 2468Sbill struct buf * 2476563Smckusic baddr(dev, blkno, size) 2486563Smckusic dev_t dev; 2496563Smckusic daddr_t blkno; 2506563Smckusic int size; 2518Sbill { 2528Sbill 2538Sbill if (incore(dev, blkno)) 2546563Smckusic return (bread(dev, blkno, size)); 2558Sbill return (0); 2568Sbill } 2578Sbill 2588Sbill /* 2598Sbill * Assign a buffer for the given block. If the appropriate 2608Sbill * block is already associated, return it; otherwise search 2618Sbill * for the oldest non-busy buffer and reassign it. 2625424Swnj * 2635424Swnj * We use splx here because this routine may be called 2645424Swnj * on the interrupt stack during a dump, and we don't 2655424Swnj * want to lower the ipl back to 0. 2668Sbill */ 2678Sbill struct buf * 2686563Smckusic getblk(dev, blkno, size) 2696563Smckusic dev_t dev; 2706563Smckusic daddr_t blkno; 2716563Smckusic int size; 2728Sbill { 2738670S register struct buf *bp, *dp; 2745424Swnj int s; 2758Sbill 27625255Smckusick if (size > MAXBSIZE) 27725255Smckusick panic("getblk: size too big"); 2787015Smckusick /* 27924730Smckusick * To prevent overflow of 32-bit ints when converting block 28024730Smckusick * numbers to byte offsets, blknos > 2^32 / DEV_BSIZE are set 28124730Smckusick * to the maximum number that can be converted to a byte offset 28224730Smckusick * without overflow. This is historic code; what bug it fixed, 28324730Smckusick * or whether it is still a reasonable thing to do is open to 28424730Smckusick * dispute. mkm 9/85 28524730Smckusick */ 28624730Smckusick if ((unsigned)blkno >= 1 << (sizeof(int)*NBBY-DEV_BSHIFT)) 28724730Smckusick blkno = 1 << ((sizeof(int)*NBBY-DEV_BSHIFT) + 1); 28824730Smckusick /* 2897015Smckusick * Search the cache for the block. If we hit, but 2907015Smckusick * the buffer is in use for i/o, then we wait until 2917015Smckusick * the i/o has completed. 2927015Smckusick */ 2936563Smckusic dp = BUFHASH(dev, blkno); 2947015Smckusick loop: 2952325Swnj for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) { 2966563Smckusic if (bp->b_blkno != blkno || bp->b_dev != dev || 2972325Swnj bp->b_flags&B_INVAL) 2988Sbill continue; 299*26271Skarels s = splbio(); 3008Sbill if (bp->b_flags&B_BUSY) { 3018Sbill bp->b_flags |= B_WANTED; 3028Sbill sleep((caddr_t)bp, PRIBIO+1); 3035424Swnj splx(s); 3048Sbill goto loop; 3058Sbill } 3065424Swnj splx(s); 3078Sbill notavail(bp); 30816855Smckusick if (bp->b_bcount != size && brealloc(bp, size) == 0) 3097188Sroot goto loop; 3108Sbill bp->b_flags |= B_CACHE; 311*26271Skarels return (bp); 3128Sbill } 31391Sbill if (major(dev) >= nblkdev) 31491Sbill panic("blkdev"); 3158670S bp = getnewbuf(); 3166563Smckusic bfree(bp); 3177015Smckusick bremhash(bp); 3187015Smckusick binshash(bp, dp); 3198Sbill bp->b_dev = dev; 3206563Smckusic bp->b_blkno = blkno; 3218670S bp->b_error = 0; 3227188Sroot if (brealloc(bp, size) == 0) 3237188Sroot goto loop; 324*26271Skarels return (bp); 3258Sbill } 3268Sbill 3278Sbill /* 3288Sbill * get an empty block, 3298Sbill * not assigned to any particular device 3308Sbill */ 3318Sbill struct buf * 3326563Smckusic geteblk(size) 3336563Smckusic int size; 3348Sbill { 3358670S register struct buf *bp, *flist; 3368Sbill 33725255Smckusick if (size > MAXBSIZE) 33825255Smckusick panic("geteblk: size too big"); 3398Sbill loop: 3408670S bp = getnewbuf(); 3418670S bp->b_flags |= B_INVAL; 3427015Smckusick bfree(bp); 3437015Smckusick bremhash(bp); 3448670S flist = &bfreelist[BQ_AGE]; 3458670S binshash(bp, flist); 3468Sbill bp->b_dev = (dev_t)NODEV; 3478670S bp->b_error = 0; 3487188Sroot if (brealloc(bp, size) == 0) 3497188Sroot goto loop; 350*26271Skarels return (bp); 3518Sbill } 3528Sbill 3538Sbill /* 3546563Smckusic * Allocate space associated with a buffer. 3559763Ssam * If can't get space, buffer is released 3566563Smckusic */ 3576563Smckusic brealloc(bp, size) 3586563Smckusic register struct buf *bp; 3596563Smckusic int size; 3606563Smckusic { 3616563Smckusic daddr_t start, last; 3626563Smckusic register struct buf *ep; 3636563Smckusic struct buf *dp; 3646563Smckusic int s; 3656563Smckusic 3666563Smckusic /* 3676563Smckusic * First need to make sure that all overlaping previous I/O 3686563Smckusic * is dispatched with. 3696563Smckusic */ 3706563Smckusic if (size == bp->b_bcount) 3717188Sroot return (1); 3727188Sroot if (size < bp->b_bcount) { 3737188Sroot if (bp->b_flags & B_DELWRI) { 3747188Sroot bwrite(bp); 3757188Sroot return (0); 3767188Sroot } 3777188Sroot if (bp->b_flags & B_LOCKED) 3787188Sroot panic("brealloc"); 3799763Ssam return (allocbuf(bp, size)); 3807188Sroot } 3817188Sroot bp->b_flags &= ~B_DONE; 3829763Ssam if (bp->b_dev == NODEV) 3839763Ssam return (allocbuf(bp, size)); 3847016Smckusick 38515795Ssam trace(TR_BREALLOC, pack(bp->b_dev, size), bp->b_blkno); 3867188Sroot /* 3877188Sroot * Search cache for any buffers that overlap the one that we 3887188Sroot * are trying to allocate. Overlapping buffers must be marked 3897188Sroot * invalid, after being written out if they are dirty. (indicated 3907188Sroot * by B_DELWRI) A disk block must be mapped by at most one buffer 3917188Sroot * at any point in time. Care must be taken to avoid deadlocking 3927188Sroot * when two buffer are trying to get the same set of disk blocks. 3937188Sroot */ 3947188Sroot start = bp->b_blkno; 39512644Ssam last = start + btodb(size) - 1; 3966563Smckusic dp = BUFHASH(bp->b_dev, bp->b_blkno); 3976563Smckusic loop: 3986563Smckusic for (ep = dp->b_forw; ep != dp; ep = ep->b_forw) { 3997188Sroot if (ep == bp || ep->b_dev != bp->b_dev || (ep->b_flags&B_INVAL)) 4006563Smckusic continue; 4017188Sroot /* look for overlap */ 4027188Sroot if (ep->b_bcount == 0 || ep->b_blkno > last || 40312644Ssam ep->b_blkno + btodb(ep->b_bcount) <= start) 4047188Sroot continue; 405*26271Skarels s = splbio(); 4066563Smckusic if (ep->b_flags&B_BUSY) { 4076563Smckusic ep->b_flags |= B_WANTED; 4086563Smckusic sleep((caddr_t)ep, PRIBIO+1); 4098670S splx(s); 4106563Smckusic goto loop; 4116563Smckusic } 4128670S splx(s); 4137188Sroot notavail(ep); 4146563Smckusic if (ep->b_flags & B_DELWRI) { 4156563Smckusic bwrite(ep); 4166563Smckusic goto loop; 4176563Smckusic } 4187188Sroot ep->b_flags |= B_INVAL; 4197188Sroot brelse(ep); 4206563Smckusic } 4219763Ssam return (allocbuf(bp, size)); 4228670S } 4238670S 4248670S /* 4258670S * Find a buffer which is available for use. 4268670S * Select something from a free list. 4278670S * Preference is to AGE list, then LRU list. 4288670S */ 4298670S struct buf * 4308670S getnewbuf() 4318670S { 4328670S register struct buf *bp, *dp; 4338670S int s; 4348670S 4358670S loop: 436*26271Skarels s = splbio(); 4378670S for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--) 4388670S if (dp->av_forw != dp) 4398670S break; 4408670S if (dp == bfreelist) { /* no free blocks */ 4418670S dp->b_flags |= B_WANTED; 4428670S sleep((caddr_t)dp, PRIBIO+1); 44312170Ssam splx(s); 4448670S goto loop; 4458670S } 4468670S splx(s); 4478670S bp = dp->av_forw; 4488670S notavail(bp); 4498670S if (bp->b_flags & B_DELWRI) { 4508670S bp->b_flags |= B_ASYNC; 4518670S bwrite(bp); 4528670S goto loop; 4538670S } 45415795Ssam trace(TR_BRELSE, pack(bp->b_dev, bp->b_bufsize), bp->b_blkno); 4558670S bp->b_flags = B_BUSY; 4568670S return (bp); 4578670S } 4588670S 4598670S /* 4608Sbill * Wait for I/O completion on the buffer; return errors 4618Sbill * to the user. 4628Sbill */ 4637015Smckusick biowait(bp) 4646563Smckusic register struct buf *bp; 4658Sbill { 4665431Sroot int s; 4678Sbill 468*26271Skarels s = splbio(); 4698Sbill while ((bp->b_flags&B_DONE)==0) 4708Sbill sleep((caddr_t)bp, PRIBIO); 4715431Sroot splx(s); 47211841Ssam if (u.u_error == 0) /* XXX */ 47311841Ssam u.u_error = geterror(bp); 4748Sbill } 4758Sbill 4768Sbill /* 47713128Ssam * Mark I/O complete on a buffer. 47813128Ssam * If someone should be called, e.g. the pageout 47913128Ssam * daemon, do so. Otherwise, wake up anyone 48013128Ssam * waiting for it. 4818Sbill */ 4827015Smckusick biodone(bp) 4837015Smckusick register struct buf *bp; 4848Sbill { 4858Sbill 486420Sbill if (bp->b_flags & B_DONE) 4877015Smckusick panic("dup biodone"); 4888Sbill bp->b_flags |= B_DONE; 4899763Ssam if (bp->b_flags & B_CALL) { 4909763Ssam bp->b_flags &= ~B_CALL; 4919763Ssam (*bp->b_iodone)(bp); 4929763Ssam return; 4939763Ssam } 4948Sbill if (bp->b_flags&B_ASYNC) 4958Sbill brelse(bp); 4968Sbill else { 4978Sbill bp->b_flags &= ~B_WANTED; 4988Sbill wakeup((caddr_t)bp); 4998Sbill } 5008Sbill } 5018Sbill 5028Sbill /* 5038670S * Insure that no part of a specified block is in an incore buffer. 5048670S */ 5058670S blkflush(dev, blkno, size) 5068670S dev_t dev; 5078670S daddr_t blkno; 5088670S long size; 5098670S { 5108670S register struct buf *ep; 5118670S struct buf *dp; 5128670S daddr_t start, last; 5138670S int s; 5148670S 5158670S start = blkno; 51612644Ssam last = start + btodb(size) - 1; 5178670S dp = BUFHASH(dev, blkno); 5188670S loop: 5198670S for (ep = dp->b_forw; ep != dp; ep = ep->b_forw) { 5208670S if (ep->b_dev != dev || (ep->b_flags&B_INVAL)) 5218670S continue; 5228670S /* look for overlap */ 5238670S if (ep->b_bcount == 0 || ep->b_blkno > last || 52412644Ssam ep->b_blkno + btodb(ep->b_bcount) <= start) 5258670S continue; 526*26271Skarels s = splbio(); 5278670S if (ep->b_flags&B_BUSY) { 5288670S ep->b_flags |= B_WANTED; 5298670S sleep((caddr_t)ep, PRIBIO+1); 5308670S splx(s); 5318670S goto loop; 5328670S } 5338670S if (ep->b_flags & B_DELWRI) { 5348670S splx(s); 5358670S notavail(ep); 5368670S bwrite(ep); 5378670S goto loop; 5388670S } 5398670S splx(s); 5408670S } 5418670S } 5428670S 5438670S /* 54413128Ssam * Make sure all write-behind blocks 5458Sbill * on dev (or NODEV for all) 5468Sbill * are flushed out. 5478Sbill * (from umount and update) 5488Sbill */ 5498Sbill bflush(dev) 5507015Smckusick dev_t dev; 5518Sbill { 5528Sbill register struct buf *bp; 5532325Swnj register struct buf *flist; 5545431Sroot int s; 5558Sbill 5568Sbill loop: 557*26271Skarels s = splbio(); 5588670S for (flist = bfreelist; flist < &bfreelist[BQ_EMPTY]; flist++) 5592325Swnj for (bp = flist->av_forw; bp != flist; bp = bp->av_forw) { 5607015Smckusick if ((bp->b_flags & B_DELWRI) == 0) 5617015Smckusick continue; 5627015Smckusick if (dev == NODEV || dev == bp->b_dev) { 5638Sbill bp->b_flags |= B_ASYNC; 5648Sbill notavail(bp); 5658Sbill bwrite(bp); 56612173Ssam splx(s); 5678Sbill goto loop; 5688Sbill } 5698Sbill } 5705431Sroot splx(s); 5718Sbill } 5728Sbill 5738Sbill /* 5748Sbill * Pick up the device's error number and pass it to the user; 57524829Skarels * if there is an error but the number is 0 set a generalized code. 5768Sbill */ 5778Sbill geterror(bp) 5787015Smckusick register struct buf *bp; 5798Sbill { 5807723Swnj int error = 0; 5818Sbill 5828Sbill if (bp->b_flags&B_ERROR) 5837723Swnj if ((error = bp->b_error)==0) 5847723Swnj return (EIO); 5857723Swnj return (error); 5868Sbill } 5872299Skre 5882299Skre /* 5892299Skre * Invalidate in core blocks belonging to closed or umounted filesystem 5902299Skre * 5912299Skre * This is not nicely done at all - the buffer ought to be removed from the 5922299Skre * hash chains & have its dev/blkno fields clobbered, but unfortunately we 5932299Skre * can't do that here, as it is quite possible that the block is still 5942299Skre * being used for i/o. Eventually, all disc drivers should be forced to 5952299Skre * have a close routine, which ought ensure that the queue is empty, then 5962299Skre * properly flush the queues. Until that happy day, this suffices for 5972299Skre * correctness. ... kre 5982299Skre */ 5992299Skre binval(dev) 6007015Smckusick dev_t dev; 6012299Skre { 6022361Skre register struct buf *bp; 6032361Skre register struct bufhd *hp; 6042361Skre #define dp ((struct buf *)hp) 6052299Skre 6062361Skre for (hp = bufhash; hp < &bufhash[BUFHSZ]; hp++) 6072361Skre for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) 6082361Skre if (bp->b_dev == dev) 6092361Skre bp->b_flags |= B_INVAL; 6102299Skre } 611