123395Smckusick /* 237736Smckusick * Copyright (c) 1982, 1986, 1989 Regents of the University of California. 337736Smckusick * All rights reserved. 423395Smckusick * 5*44454Sbostic * %sccs.include.redist.c% 637736Smckusick * 7*44454Sbostic * @(#)vfs_cluster.c 7.30 (Berkeley) 06/28/90 823395Smckusick */ 98Sbill 1017098Sbloom #include "param.h" 1117098Sbloom #include "user.h" 1217098Sbloom #include "buf.h" 1337736Smckusick #include "vnode.h" 1440652Smckusick #include "specdev.h" 1539668Smckusick #include "mount.h" 1617098Sbloom #include "trace.h" 1738776Smckusick #include "ucred.h" 188Sbill 1991Sbill /* 208Sbill * Read in (if necessary) the block and return a buffer pointer. 218Sbill */ 2238776Smckusick bread(vp, blkno, size, cred, bpp) 2337736Smckusick struct vnode *vp; 246563Smckusic daddr_t blkno; 256563Smckusic int size; 2638776Smckusick struct ucred *cred; 2737736Smckusick struct buf **bpp; 288Sbill { 298Sbill register struct buf *bp; 308Sbill 318670S if (size == 0) 328670S panic("bread: size 0"); 3337736Smckusick *bpp = bp = getblk(vp, blkno, size); 3432608Smckusick if (bp->b_flags&(B_DONE|B_DELWRI)) { 3540341Smckusick trace(TR_BREADHIT, pack(vp, size), blkno); 3637736Smckusick return (0); 378Sbill } 388Sbill bp->b_flags |= B_READ; 398670S if (bp->b_bcount > bp->b_bufsize) 408670S panic("bread"); 4138776Smckusick if (bp->b_rcred == NOCRED && cred != NOCRED) { 4238776Smckusick crhold(cred); 4338776Smckusick bp->b_rcred = cred; 4438776Smckusick } 4537736Smckusick VOP_STRATEGY(bp); 4640341Smckusick trace(TR_BREADMISS, pack(vp, size), blkno); 478039Sroot u.u_ru.ru_inblock++; /* pay for read */ 4837736Smckusick return (biowait(bp)); 498Sbill } 508Sbill 518Sbill /* 528Sbill * Read in the block, like bread, but also start I/O on the 538Sbill * read-ahead block (which is not allocated to the caller) 548Sbill */ 5538776Smckusick breada(vp, blkno, size, rablkno, rabsize, cred, bpp) 5637736Smckusick struct vnode *vp; 577114Smckusick daddr_t blkno; int size; 588592Sroot daddr_t rablkno; int rabsize; 5938776Smckusick struct ucred *cred; 6037736Smckusick struct buf **bpp; 618Sbill { 628Sbill register struct buf *bp, *rabp; 638Sbill 648Sbill bp = NULL; 657015Smckusick /* 667015Smckusick * If the block isn't in core, then allocate 677015Smckusick * a buffer and initiate i/o (getblk checks 687015Smckusick * for a cache hit). 697015Smckusick */ 7037736Smckusick if (!incore(vp, blkno)) { 7137736Smckusick *bpp = bp = getblk(vp, blkno, size); 7232608Smckusick if ((bp->b_flags&(B_DONE|B_DELWRI)) == 0) { 738Sbill bp->b_flags |= B_READ; 748670S if (bp->b_bcount > bp->b_bufsize) 758670S panic("breada"); 7638776Smckusick if (bp->b_rcred == NOCRED && cred != NOCRED) { 7738776Smckusick crhold(cred); 7838776Smckusick bp->b_rcred = cred; 7938776Smckusick } 8037736Smckusick VOP_STRATEGY(bp); 8140341Smckusick trace(TR_BREADMISS, pack(vp, size), blkno); 828039Sroot u.u_ru.ru_inblock++; /* pay for read */ 837015Smckusick } else 8440341Smckusick trace(TR_BREADHIT, pack(vp, size), blkno); 858Sbill } 867015Smckusick 877015Smckusick /* 887015Smckusick * If there's a read-ahead block, start i/o 897015Smckusick * on it also (as above). 907015Smckusick */ 9139895Smckusick if (!incore(vp, rablkno)) { 9237736Smckusick rabp = getblk(vp, rablkno, rabsize); 9332608Smckusick if (rabp->b_flags & (B_DONE|B_DELWRI)) { 948Sbill brelse(rabp); 9540341Smckusick trace(TR_BREADHITRA, pack(vp, rabsize), rablkno); 962045Swnj } else { 978Sbill rabp->b_flags |= B_READ|B_ASYNC; 988670S if (rabp->b_bcount > rabp->b_bufsize) 998670S panic("breadrabp"); 10038880Smckusick if (rabp->b_rcred == NOCRED && cred != NOCRED) { 10138776Smckusick crhold(cred); 10238880Smckusick rabp->b_rcred = cred; 10338776Smckusick } 10437736Smckusick VOP_STRATEGY(rabp); 10540341Smckusick trace(TR_BREADMISSRA, pack(vp, rabsize), rablkno); 1068039Sroot u.u_ru.ru_inblock++; /* pay in advance */ 1078Sbill } 1088Sbill } 1097015Smckusick 1107015Smckusick /* 1117114Smckusick * If block was in core, let bread get it. 1127114Smckusick * If block wasn't in core, then the read was started 1137114Smckusick * above, and just wait for it. 1147015Smckusick */ 1157114Smckusick if (bp == NULL) 11638776Smckusick return (bread(vp, blkno, size, cred, bpp)); 11737736Smckusick return (biowait(bp)); 1188Sbill } 1198Sbill 1208Sbill /* 1218Sbill * Write the buffer, waiting for completion. 1228Sbill * Then release the buffer. 1238Sbill */ 1248Sbill bwrite(bp) 1257015Smckusick register struct buf *bp; 1268Sbill { 12737736Smckusick register int flag; 12840226Smckusick int s, error; 1298Sbill 1308Sbill flag = bp->b_flags; 1319857Ssam bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 1328Sbill if ((flag&B_DELWRI) == 0) 1338039Sroot u.u_ru.ru_oublock++; /* noone paid yet */ 13439882Smckusick else 13539882Smckusick reassignbuf(bp, bp->b_vp); 13640341Smckusick trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno); 1378670S if (bp->b_bcount > bp->b_bufsize) 1388670S panic("bwrite"); 13940226Smckusick s = splbio(); 14039882Smckusick bp->b_vp->v_numoutput++; 14140226Smckusick splx(s); 14237736Smckusick VOP_STRATEGY(bp); 1437015Smckusick 1447015Smckusick /* 1457015Smckusick * If the write was synchronous, then await i/o completion. 1467015Smckusick * If the write was "delayed", then we put the buffer on 1477015Smckusick * the q of blocks awaiting i/o completion status. 1487015Smckusick */ 1498Sbill if ((flag&B_ASYNC) == 0) { 15037736Smckusick error = biowait(bp); 1518Sbill brelse(bp); 15237736Smckusick } else if (flag & B_DELWRI) { 1538Sbill bp->b_flags |= B_AGE; 15437736Smckusick error = 0; 15537736Smckusick } 15637736Smckusick return (error); 1578Sbill } 1588Sbill 1598Sbill /* 1608Sbill * Release the buffer, marking it so that if it is grabbed 1618Sbill * for another purpose it will be written out before being 1628Sbill * given up (e.g. when writing a partial block where it is 1638Sbill * assumed that another write for the same block will soon follow). 1648Sbill * This can't be done for magtape, since writes must be done 1658Sbill * in the same order as requested. 1668Sbill */ 1678Sbill bdwrite(bp) 1687015Smckusick register struct buf *bp; 1698Sbill { 1708Sbill 17139882Smckusick if ((bp->b_flags & B_DELWRI) == 0) { 17239882Smckusick bp->b_flags |= B_DELWRI; 17339882Smckusick reassignbuf(bp, bp->b_vp); 1748039Sroot u.u_ru.ru_oublock++; /* noone paid yet */ 17539882Smckusick } 17637736Smckusick /* 17739668Smckusick * If this is a tape drive, the write must be initiated. 17837736Smckusick */ 17939668Smckusick if (VOP_IOCTL(bp->b_vp, 0, B_TAPE, 0, NOCRED) == 0) { 1808Sbill bawrite(bp); 18139668Smckusick } else { 1828Sbill bp->b_flags |= B_DELWRI | B_DONE; 1838Sbill brelse(bp); 1848Sbill } 1858Sbill } 1868Sbill 1878Sbill /* 1888Sbill * Release the buffer, start I/O on it, but don't wait for completion. 1898Sbill */ 1908Sbill bawrite(bp) 1917015Smckusick register struct buf *bp; 1928Sbill { 1938Sbill 1948Sbill bp->b_flags |= B_ASYNC; 19537736Smckusick (void) bwrite(bp); 1968Sbill } 1978Sbill 1988Sbill /* 1997015Smckusick * Release the buffer, with no I/O implied. 2008Sbill */ 2018Sbill brelse(bp) 2027015Smckusick register struct buf *bp; 2038Sbill { 2042325Swnj register struct buf *flist; 2058Sbill register s; 2068Sbill 20740341Smckusick trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); 2087015Smckusick /* 20939668Smckusick * If a process is waiting for the buffer, or 21039668Smckusick * is waiting for a free buffer, awaken it. 2117015Smckusick */ 2128Sbill if (bp->b_flags&B_WANTED) 2138Sbill wakeup((caddr_t)bp); 2142325Swnj if (bfreelist[0].b_flags&B_WANTED) { 2152325Swnj bfreelist[0].b_flags &= ~B_WANTED; 2162325Swnj wakeup((caddr_t)bfreelist); 2178Sbill } 21839668Smckusick /* 21939668Smckusick * Retry I/O for locked buffers rather than invalidating them. 22039668Smckusick */ 22139668Smckusick if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED)) 22239668Smckusick bp->b_flags &= ~B_ERROR; 22339668Smckusick 22439668Smckusick /* 22539668Smckusick * Disassociate buffers that are no longer valid. 22639668Smckusick */ 22739668Smckusick if (bp->b_flags & (B_NOCACHE|B_ERROR)) 22837736Smckusick bp->b_flags |= B_INVAL; 22939668Smckusick if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR|B_INVAL))) { 23039668Smckusick if (bp->b_vp) 23139668Smckusick brelvp(bp); 23239668Smckusick bp->b_flags &= ~B_DELWRI; 23337736Smckusick } 2347015Smckusick /* 2357015Smckusick * Stick the buffer back on a free list. 2367015Smckusick */ 23726271Skarels s = splbio(); 2388670S if (bp->b_bufsize <= 0) { 2398670S /* block has no buffer ... put at front of unused buffer list */ 2408670S flist = &bfreelist[BQ_EMPTY]; 2418670S binsheadfree(bp, flist); 2428670S } else if (bp->b_flags & (B_ERROR|B_INVAL)) { 2432325Swnj /* block has no info ... put at front of most free list */ 2448670S flist = &bfreelist[BQ_AGE]; 2457015Smckusick binsheadfree(bp, flist); 2468Sbill } else { 2472325Swnj if (bp->b_flags & B_LOCKED) 2482325Swnj flist = &bfreelist[BQ_LOCKED]; 2492325Swnj else if (bp->b_flags & B_AGE) 2502325Swnj flist = &bfreelist[BQ_AGE]; 2512325Swnj else 2522325Swnj flist = &bfreelist[BQ_LRU]; 2537015Smckusick binstailfree(bp, flist); 2548Sbill } 25537736Smckusick bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_AGE|B_NOCACHE); 2568Sbill splx(s); 2578Sbill } 2588Sbill 2598Sbill /* 2608Sbill * See if the block is associated with some buffer 2618Sbill * (mainly to avoid getting hung up on a wait in breada) 2628Sbill */ 26337736Smckusick incore(vp, blkno) 26437736Smckusick struct vnode *vp; 2657015Smckusick daddr_t blkno; 2668Sbill { 2678Sbill register struct buf *bp; 2682325Swnj register struct buf *dp; 2698Sbill 27038225Smckusick dp = BUFHASH(vp, blkno); 2712325Swnj for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) 27239668Smckusick if (bp->b_lblkno == blkno && bp->b_vp == vp && 2737015Smckusick (bp->b_flags & B_INVAL) == 0) 27491Sbill return (1); 27591Sbill return (0); 2768Sbill } 2778Sbill 27839668Smckusick /* 27939668Smckusick * Return a block if it is in memory. 28039668Smckusick */ 28138776Smckusick baddr(vp, blkno, size, cred, bpp) 28237736Smckusick struct vnode *vp; 2836563Smckusic daddr_t blkno; 2846563Smckusic int size; 28538776Smckusick struct ucred *cred; 28637736Smckusick struct buf **bpp; 2878Sbill { 2888Sbill 28937736Smckusick if (incore(vp, blkno)) 29038776Smckusick return (bread(vp, blkno, size, cred, bpp)); 29137736Smckusick *bpp = 0; 2928Sbill return (0); 2938Sbill } 2948Sbill 2958Sbill /* 2968Sbill * Assign a buffer for the given block. If the appropriate 2978Sbill * block is already associated, return it; otherwise search 2988Sbill * for the oldest non-busy buffer and reassign it. 2995424Swnj * 3005424Swnj * We use splx here because this routine may be called 3015424Swnj * on the interrupt stack during a dump, and we don't 3025424Swnj * want to lower the ipl back to 0. 3038Sbill */ 3048Sbill struct buf * 30537736Smckusick getblk(vp, blkno, size) 30637736Smckusick register struct vnode *vp; 3076563Smckusic daddr_t blkno; 3086563Smckusic int size; 3098Sbill { 3108670S register struct buf *bp, *dp; 3115424Swnj int s; 3128Sbill 31325255Smckusick if (size > MAXBSIZE) 31425255Smckusick panic("getblk: size too big"); 3157015Smckusick /* 3167015Smckusick * Search the cache for the block. If we hit, but 3177015Smckusick * the buffer is in use for i/o, then we wait until 3187015Smckusick * the i/o has completed. 3197015Smckusick */ 32037736Smckusick dp = BUFHASH(vp, blkno); 3217015Smckusick loop: 3222325Swnj for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) { 32339668Smckusick if (bp->b_lblkno != blkno || bp->b_vp != vp || 3242325Swnj bp->b_flags&B_INVAL) 3258Sbill continue; 32626271Skarels s = splbio(); 3278Sbill if (bp->b_flags&B_BUSY) { 3288Sbill bp->b_flags |= B_WANTED; 3298Sbill sleep((caddr_t)bp, PRIBIO+1); 3305424Swnj splx(s); 3318Sbill goto loop; 3328Sbill } 33339882Smckusick bremfree(bp); 33439882Smckusick bp->b_flags |= B_BUSY; 3355424Swnj splx(s); 33632608Smckusick if (bp->b_bcount != size) { 33739668Smckusick printf("getblk: stray size"); 33839668Smckusick bp->b_flags |= B_INVAL; 33939668Smckusick bwrite(bp); 34039668Smckusick goto loop; 34132608Smckusick } 3428Sbill bp->b_flags |= B_CACHE; 34326271Skarels return (bp); 3448Sbill } 3458670S bp = getnewbuf(); 3466563Smckusic bfree(bp); 3477015Smckusick bremhash(bp); 34839668Smckusick bgetvp(vp, bp); 34939668Smckusick bp->b_lblkno = blkno; 3506563Smckusic bp->b_blkno = blkno; 3518670S bp->b_error = 0; 35237736Smckusick bp->b_resid = 0; 35337736Smckusick binshash(bp, dp); 35439668Smckusick brealloc(bp, size); 35526271Skarels return (bp); 3568Sbill } 3578Sbill 3588Sbill /* 3598Sbill * get an empty block, 3608Sbill * not assigned to any particular device 3618Sbill */ 3628Sbill struct buf * 3636563Smckusic geteblk(size) 3646563Smckusic int size; 3658Sbill { 3668670S register struct buf *bp, *flist; 3678Sbill 36825255Smckusick if (size > MAXBSIZE) 36925255Smckusick panic("geteblk: size too big"); 3708670S bp = getnewbuf(); 3718670S bp->b_flags |= B_INVAL; 3727015Smckusick bfree(bp); 3737015Smckusick bremhash(bp); 3748670S flist = &bfreelist[BQ_AGE]; 37537736Smckusick bp->b_error = 0; 37637736Smckusick bp->b_resid = 0; 3778670S binshash(bp, flist); 37839668Smckusick brealloc(bp, size); 37926271Skarels return (bp); 3808Sbill } 3818Sbill 3828Sbill /* 3836563Smckusic * Allocate space associated with a buffer. 3846563Smckusic */ 3856563Smckusic brealloc(bp, size) 3866563Smckusic register struct buf *bp; 3876563Smckusic int size; 3886563Smckusic { 3896563Smckusic daddr_t start, last; 3906563Smckusic register struct buf *ep; 3916563Smckusic struct buf *dp; 3926563Smckusic int s; 3936563Smckusic 3946563Smckusic if (size == bp->b_bcount) 39539668Smckusick return; 39639668Smckusick allocbuf(bp, size); 3978670S } 3988670S 3998670S /* 4008670S * Find a buffer which is available for use. 4018670S * Select something from a free list. 4028670S * Preference is to AGE list, then LRU list. 4038670S */ 4048670S struct buf * 4058670S getnewbuf() 4068670S { 4078670S register struct buf *bp, *dp; 40838776Smckusick register struct ucred *cred; 4098670S int s; 4108670S 4118670S loop: 41226271Skarels s = splbio(); 4138670S for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--) 4148670S if (dp->av_forw != dp) 4158670S break; 4168670S if (dp == bfreelist) { /* no free blocks */ 4178670S dp->b_flags |= B_WANTED; 4188670S sleep((caddr_t)dp, PRIBIO+1); 41912170Ssam splx(s); 4208670S goto loop; 4218670S } 42239882Smckusick bp = dp->av_forw; 42339882Smckusick bremfree(bp); 42439882Smckusick bp->b_flags |= B_BUSY; 4258670S splx(s); 4268670S if (bp->b_flags & B_DELWRI) { 42738614Smckusick (void) bawrite(bp); 4288670S goto loop; 4298670S } 43040341Smckusick trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); 43139668Smckusick if (bp->b_vp) 43239668Smckusick brelvp(bp); 43338776Smckusick if (bp->b_rcred != NOCRED) { 43438776Smckusick cred = bp->b_rcred; 43538776Smckusick bp->b_rcred = NOCRED; 43638776Smckusick crfree(cred); 43738776Smckusick } 43838776Smckusick if (bp->b_wcred != NOCRED) { 43938776Smckusick cred = bp->b_wcred; 44038776Smckusick bp->b_wcred = NOCRED; 44138776Smckusick crfree(cred); 44238776Smckusick } 4438670S bp->b_flags = B_BUSY; 4448670S return (bp); 4458670S } 4468670S 4478670S /* 4488Sbill * Wait for I/O completion on the buffer; return errors 4498Sbill * to the user. 4508Sbill */ 4517015Smckusick biowait(bp) 4526563Smckusic register struct buf *bp; 4538Sbill { 4545431Sroot int s; 4558Sbill 45626271Skarels s = splbio(); 45738776Smckusick while ((bp->b_flags & B_DONE) == 0) 4588Sbill sleep((caddr_t)bp, PRIBIO); 4595431Sroot splx(s); 46037736Smckusick /* 46137736Smckusick * Pick up the device's error number and pass it to the user; 46237736Smckusick * if there is an error but the number is 0 set a generalized code. 46337736Smckusick */ 46437736Smckusick if ((bp->b_flags & B_ERROR) == 0) 46537736Smckusick return (0); 46637736Smckusick if (bp->b_error) 46737736Smckusick return (bp->b_error); 46837736Smckusick return (EIO); 4698Sbill } 4708Sbill 4718Sbill /* 47213128Ssam * Mark I/O complete on a buffer. 47313128Ssam * If someone should be called, e.g. the pageout 47413128Ssam * daemon, do so. Otherwise, wake up anyone 47513128Ssam * waiting for it. 4768Sbill */ 4777015Smckusick biodone(bp) 4787015Smckusick register struct buf *bp; 4798Sbill { 48039882Smckusick register struct vnode *vp; 4818Sbill 482420Sbill if (bp->b_flags & B_DONE) 4837015Smckusick panic("dup biodone"); 4848Sbill bp->b_flags |= B_DONE; 48539882Smckusick if ((bp->b_flags & B_READ) == 0) { 48638776Smckusick bp->b_dirtyoff = bp->b_dirtyend = 0; 48739882Smckusick if (vp = bp->b_vp) { 48839882Smckusick vp->v_numoutput--; 48939882Smckusick if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) { 49039882Smckusick if (vp->v_numoutput < 0) 49139882Smckusick panic("biodone: neg numoutput"); 49239882Smckusick vp->v_flag &= ~VBWAIT; 49339882Smckusick wakeup((caddr_t)&vp->v_numoutput); 49439882Smckusick } 49539882Smckusick } 49639882Smckusick } 4979763Ssam if (bp->b_flags & B_CALL) { 4989763Ssam bp->b_flags &= ~B_CALL; 4999763Ssam (*bp->b_iodone)(bp); 5009763Ssam return; 5019763Ssam } 5028Sbill if (bp->b_flags&B_ASYNC) 5038Sbill brelse(bp); 5048Sbill else { 5058Sbill bp->b_flags &= ~B_WANTED; 5068Sbill wakeup((caddr_t)bp); 5078Sbill } 5088Sbill } 5098Sbill 5108Sbill /* 51137736Smckusick * Make sure all write-behind blocks associated 51238776Smckusick * with mount point are flushed out (from sync). 5138Sbill */ 51439668Smckusick mntflushbuf(mountp, flags) 51538776Smckusick struct mount *mountp; 51639668Smckusick int flags; 5178Sbill { 51839668Smckusick register struct vnode *vp; 51939668Smckusick 52041400Smckusick if ((mountp->mnt_flag & MNT_MPBUSY) == 0) 52141299Smckusick panic("mntflushbuf: not busy"); 52239668Smckusick loop: 52341421Smckusick for (vp = mountp->mnt_mounth; vp; vp = vp->v_mountf) { 52439668Smckusick if (vget(vp)) 52539668Smckusick goto loop; 52639668Smckusick vflushbuf(vp, flags); 52739668Smckusick vput(vp); 52841421Smckusick if (vp->v_mount != mountp) 52941421Smckusick goto loop; 53039668Smckusick } 53139668Smckusick } 53239668Smckusick 53339668Smckusick /* 53439668Smckusick * Flush all dirty buffers associated with a vnode. 53539668Smckusick */ 53639668Smckusick vflushbuf(vp, flags) 53739668Smckusick register struct vnode *vp; 53839668Smckusick int flags; 53939668Smckusick { 5408Sbill register struct buf *bp; 54139668Smckusick struct buf *nbp; 5425431Sroot int s; 5438Sbill 5448Sbill loop: 54526271Skarels s = splbio(); 54639882Smckusick for (bp = vp->v_dirtyblkhd; bp; bp = nbp) { 54739668Smckusick nbp = bp->b_blockf; 54839668Smckusick if ((bp->b_flags & B_BUSY)) 54939668Smckusick continue; 55039668Smckusick if ((bp->b_flags & B_DELWRI) == 0) 55139882Smckusick panic("vflushbuf: not dirty"); 55239882Smckusick bremfree(bp); 55339882Smckusick bp->b_flags |= B_BUSY; 55439668Smckusick splx(s); 55539882Smckusick /* 55639882Smckusick * Wait for I/O associated with indirect blocks to complete, 55739882Smckusick * since there is no way to quickly wait for them below. 55839882Smckusick * NB - This is really specific to ufs, but is done here 55939882Smckusick * as it is easier and quicker. 56039882Smckusick */ 56139882Smckusick if (bp->b_vp == vp || (flags & B_SYNC) == 0) { 56239882Smckusick (void) bawrite(bp); 56340639Smckusick s = splbio(); 56439882Smckusick } else { 56539882Smckusick (void) bwrite(bp); 56639882Smckusick goto loop; 56739882Smckusick } 56839668Smckusick } 56939738Smckusick splx(s); 57039668Smckusick if ((flags & B_SYNC) == 0) 57139668Smckusick return; 57239668Smckusick s = splbio(); 57339882Smckusick while (vp->v_numoutput) { 57439882Smckusick vp->v_flag |= VBWAIT; 57539882Smckusick sleep((caddr_t)&vp->v_numoutput, PRIBIO+1); 57637736Smckusick } 57739738Smckusick splx(s); 57839882Smckusick if (vp->v_dirtyblkhd) { 57939882Smckusick vprint("vflushbuf: dirty", vp); 58039882Smckusick goto loop; 58139882Smckusick } 5828Sbill } 5832299Skre 5842299Skre /* 5852299Skre * Invalidate in core blocks belonging to closed or umounted filesystem 5862299Skre * 58739668Smckusick * Go through the list of vnodes associated with the file system; 58839668Smckusick * for each vnode invalidate any buffers that it holds. Normally 58939668Smckusick * this routine is preceeded by a bflush call, so that on a quiescent 59039668Smckusick * filesystem there will be no dirty buffers when we are done. Binval 59139668Smckusick * returns the count of dirty buffers when it is finished. 5922299Skre */ 59339668Smckusick mntinvalbuf(mountp) 59438776Smckusick struct mount *mountp; 5952299Skre { 59639668Smckusick register struct vnode *vp; 59739668Smckusick int dirty = 0; 59839668Smckusick 59941400Smckusick if ((mountp->mnt_flag & MNT_MPBUSY) == 0) 60041299Smckusick panic("mntinvalbuf: not busy"); 60139668Smckusick loop: 60241421Smckusick for (vp = mountp->mnt_mounth; vp; vp = vp->v_mountf) { 60339668Smckusick if (vget(vp)) 60439668Smckusick goto loop; 60539668Smckusick dirty += vinvalbuf(vp, 1); 60639668Smckusick vput(vp); 60741421Smckusick if (vp->v_mount != mountp) 60841421Smckusick goto loop; 60939668Smckusick } 61039668Smckusick return (dirty); 61139668Smckusick } 61239668Smckusick 61339668Smckusick /* 61439668Smckusick * Flush out and invalidate all buffers associated with a vnode. 61539668Smckusick * Called with the underlying object locked. 61639668Smckusick */ 61739668Smckusick vinvalbuf(vp, save) 61839668Smckusick register struct vnode *vp; 61939668Smckusick int save; 62039668Smckusick { 6212361Skre register struct buf *bp; 62239882Smckusick struct buf *nbp, *blist; 62338633Smckusick int s, dirty = 0; 6242299Skre 62539882Smckusick for (;;) { 62639882Smckusick if (blist = vp->v_dirtyblkhd) 62739882Smckusick /* void */; 62839882Smckusick else if (blist = vp->v_cleanblkhd) 62939882Smckusick /* void */; 63039882Smckusick else 63139882Smckusick break; 63239882Smckusick for (bp = blist; bp; bp = nbp) { 63339882Smckusick nbp = bp->b_blockf; 63439882Smckusick s = splbio(); 63539882Smckusick if (bp->b_flags & B_BUSY) { 63639882Smckusick bp->b_flags |= B_WANTED; 63739882Smckusick sleep((caddr_t)bp, PRIBIO+1); 63839882Smckusick splx(s); 63939882Smckusick break; 64039882Smckusick } 64139882Smckusick bremfree(bp); 64239882Smckusick bp->b_flags |= B_BUSY; 64338808Smckusick splx(s); 64439882Smckusick if (save && (bp->b_flags & B_DELWRI)) { 64538614Smckusick dirty++; 64639668Smckusick (void) bwrite(bp); 64739882Smckusick break; 64837736Smckusick } 64940034Smckusick if (bp->b_vp != vp) 65040034Smckusick reassignbuf(bp, bp->b_vp); 65140034Smckusick else 65240034Smckusick bp->b_flags |= B_INVAL; 65339882Smckusick brelse(bp); 65438614Smckusick } 65538614Smckusick } 65639882Smckusick if (vp->v_dirtyblkhd || vp->v_cleanblkhd) 65739668Smckusick panic("vinvalbuf: flush failed"); 65838614Smckusick return (dirty); 6592299Skre } 66037736Smckusick 66139668Smckusick /* 66239668Smckusick * Associate a buffer with a vnode. 66339668Smckusick */ 66439668Smckusick bgetvp(vp, bp) 66539668Smckusick register struct vnode *vp; 66639668Smckusick register struct buf *bp; 66739668Smckusick { 66839668Smckusick 66939668Smckusick if (bp->b_vp) 67039668Smckusick panic("bgetvp: not free"); 67139808Smckusick VHOLD(vp); 67239668Smckusick bp->b_vp = vp; 67339668Smckusick if (vp->v_type == VBLK || vp->v_type == VCHR) 67439668Smckusick bp->b_dev = vp->v_rdev; 67539668Smckusick else 67639668Smckusick bp->b_dev = NODEV; 67739668Smckusick /* 67839668Smckusick * Insert onto list for new vnode. 67939668Smckusick */ 68039882Smckusick if (vp->v_cleanblkhd) { 68139882Smckusick bp->b_blockf = vp->v_cleanblkhd; 68239882Smckusick bp->b_blockb = &vp->v_cleanblkhd; 68339882Smckusick vp->v_cleanblkhd->b_blockb = &bp->b_blockf; 68439882Smckusick vp->v_cleanblkhd = bp; 68539668Smckusick } else { 68639882Smckusick vp->v_cleanblkhd = bp; 68739882Smckusick bp->b_blockb = &vp->v_cleanblkhd; 68839668Smckusick bp->b_blockf = NULL; 68939668Smckusick } 69039668Smckusick } 69139668Smckusick 69239668Smckusick /* 69339668Smckusick * Disassociate a buffer from a vnode. 69439668Smckusick */ 69537736Smckusick brelvp(bp) 69639668Smckusick register struct buf *bp; 69737736Smckusick { 69839668Smckusick struct buf *bq; 69937736Smckusick struct vnode *vp; 70037736Smckusick 70137736Smckusick if (bp->b_vp == (struct vnode *) 0) 70239668Smckusick panic("brelvp: NULL"); 70339668Smckusick /* 70439668Smckusick * Delete from old vnode list, if on one. 70539668Smckusick */ 70639668Smckusick if (bp->b_blockb) { 70739668Smckusick if (bq = bp->b_blockf) 70839668Smckusick bq->b_blockb = bp->b_blockb; 70939668Smckusick *bp->b_blockb = bq; 71039668Smckusick bp->b_blockf = NULL; 71139668Smckusick bp->b_blockb = NULL; 71239668Smckusick } 71337736Smckusick vp = bp->b_vp; 71437736Smckusick bp->b_vp = (struct vnode *) 0; 71539808Smckusick HOLDRELE(vp); 71637736Smckusick } 71739668Smckusick 71839668Smckusick /* 71939668Smckusick * Reassign a buffer from one vnode to another. 72039668Smckusick * Used to assign file specific control information 72139668Smckusick * (indirect blocks) to the vnode to which they belong. 72239668Smckusick */ 72339668Smckusick reassignbuf(bp, newvp) 72439668Smckusick register struct buf *bp; 72539668Smckusick register struct vnode *newvp; 72639668Smckusick { 72739882Smckusick register struct buf *bq, **listheadp; 72839668Smckusick 72939882Smckusick if (newvp == NULL) 73039882Smckusick panic("reassignbuf: NULL"); 73139668Smckusick /* 73239668Smckusick * Delete from old vnode list, if on one. 73339668Smckusick */ 73439668Smckusick if (bp->b_blockb) { 73539668Smckusick if (bq = bp->b_blockf) 73639668Smckusick bq->b_blockb = bp->b_blockb; 73739668Smckusick *bp->b_blockb = bq; 73839668Smckusick } 73939668Smckusick /* 74039882Smckusick * If dirty, put on list of dirty buffers; 74139882Smckusick * otherwise insert onto list of clean buffers. 74239668Smckusick */ 74339882Smckusick if (bp->b_flags & B_DELWRI) 74439882Smckusick listheadp = &newvp->v_dirtyblkhd; 74539882Smckusick else 74639882Smckusick listheadp = &newvp->v_cleanblkhd; 74739882Smckusick if (*listheadp) { 74839882Smckusick bp->b_blockf = *listheadp; 74939882Smckusick bp->b_blockb = listheadp; 75039882Smckusick bp->b_blockf->b_blockb = &bp->b_blockf; 75139882Smckusick *listheadp = bp; 75239668Smckusick } else { 75339882Smckusick *listheadp = bp; 75439882Smckusick bp->b_blockb = listheadp; 75539668Smckusick bp->b_blockf = NULL; 75639668Smckusick } 75739668Smckusick } 758