123395Smckusick /* 237736Smckusick * Copyright (c) 1982, 1986, 1989 Regents of the University of California. 337736Smckusick * All rights reserved. 423395Smckusick * 544454Sbostic * %sccs.include.redist.c% 637736Smckusick * 7*49459Smckusick * @(#)vfs_cluster.c 7.40 (Berkeley) 05/08/91 823395Smckusick */ 98Sbill 1017098Sbloom #include "param.h" 1147545Skarels #include "proc.h" 1217098Sbloom #include "buf.h" 1337736Smckusick #include "vnode.h" 1440652Smckusick #include "specdev.h" 1539668Smckusick #include "mount.h" 1617098Sbloom #include "trace.h" 1747568Skarels #include "resourcevar.h" 188Sbill 1991Sbill /* 2049280Skarels * Initialize buffers and hash links for buffers. 2149280Skarels */ 2249280Skarels bufinit() 2349280Skarels { 2449280Skarels register int i; 2549280Skarels register struct buf *bp, *dp; 2649280Skarels register struct bufhd *hp; 2749280Skarels int base, residual; 2849280Skarels 2949280Skarels for (hp = bufhash, i = 0; i < BUFHSZ; i++, hp++) 3049280Skarels hp->b_forw = hp->b_back = (struct buf *)hp; 3149280Skarels 3249280Skarels for (dp = bfreelist; dp < &bfreelist[BQUEUES]; dp++) { 3349280Skarels dp->b_forw = dp->b_back = dp->av_forw = dp->av_back = dp; 3449280Skarels dp->b_flags = B_HEAD; 3549280Skarels } 3649280Skarels base = bufpages / nbuf; 3749280Skarels residual = bufpages % nbuf; 3849280Skarels for (i = 0; i < nbuf; i++) { 3949280Skarels bp = &buf[i]; 4049280Skarels bp->b_dev = NODEV; 4149280Skarels bp->b_bcount = 0; 4249280Skarels bp->b_rcred = NOCRED; 4349280Skarels bp->b_wcred = NOCRED; 4449280Skarels bp->b_dirtyoff = 0; 4549280Skarels bp->b_dirtyend = 0; 4649280Skarels bp->b_un.b_addr = buffers + i * MAXBSIZE; 4749280Skarels if (i < residual) 4849280Skarels bp->b_bufsize = (base + 1) * CLBYTES; 4949280Skarels else 5049280Skarels bp->b_bufsize = base * CLBYTES; 5149280Skarels binshash(bp, &bfreelist[BQ_AGE]); 5249280Skarels bp->b_flags = B_BUSY|B_INVAL; 5349280Skarels brelse(bp); 5449280Skarels } 5549280Skarels } 5649280Skarels 5749280Skarels /* 5846151Smckusick * Find the block in the buffer pool. 5946151Smckusick * If the buffer is not present, allocate a new buffer and load 6046151Smckusick * its contents according to the filesystem fill routine. 618Sbill */ 6238776Smckusick bread(vp, blkno, size, cred, bpp) 6337736Smckusick struct vnode *vp; 646563Smckusic daddr_t blkno; 656563Smckusic int size; 6638776Smckusick struct ucred *cred; 6737736Smckusick struct buf **bpp; 688Sbill { 6947545Skarels struct proc *p = curproc; /* XXX */ 708Sbill register struct buf *bp; 718Sbill 728670S if (size == 0) 738670S panic("bread: size 0"); 7437736Smckusick *bpp = bp = getblk(vp, blkno, size); 7546151Smckusick if (bp->b_flags & (B_DONE | B_DELWRI)) { 7640341Smckusick trace(TR_BREADHIT, pack(vp, size), blkno); 7737736Smckusick return (0); 788Sbill } 798Sbill bp->b_flags |= B_READ; 808670S if (bp->b_bcount > bp->b_bufsize) 818670S panic("bread"); 8238776Smckusick if (bp->b_rcred == NOCRED && cred != NOCRED) { 8338776Smckusick crhold(cred); 8438776Smckusick bp->b_rcred = cred; 8538776Smckusick } 8637736Smckusick VOP_STRATEGY(bp); 8740341Smckusick trace(TR_BREADMISS, pack(vp, size), blkno); 8847545Skarels p->p_stats->p_ru.ru_inblock++; /* pay for read */ 8937736Smckusick return (biowait(bp)); 908Sbill } 918Sbill 928Sbill /* 9346151Smckusick * Operates like bread, but also starts I/O on the specified 9446151Smckusick * read-ahead block. 958Sbill */ 9638776Smckusick breada(vp, blkno, size, rablkno, rabsize, cred, bpp) 9737736Smckusick struct vnode *vp; 987114Smckusick daddr_t blkno; int size; 998592Sroot daddr_t rablkno; int rabsize; 10038776Smckusick struct ucred *cred; 10137736Smckusick struct buf **bpp; 1028Sbill { 10347545Skarels struct proc *p = curproc; /* XXX */ 1048Sbill register struct buf *bp, *rabp; 1058Sbill 1068Sbill bp = NULL; 1077015Smckusick /* 10846151Smckusick * If the block is not memory resident, 10946151Smckusick * allocate a buffer and start I/O. 1107015Smckusick */ 11137736Smckusick if (!incore(vp, blkno)) { 11237736Smckusick *bpp = bp = getblk(vp, blkno, size); 11346151Smckusick if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) { 1148Sbill bp->b_flags |= B_READ; 1158670S if (bp->b_bcount > bp->b_bufsize) 1168670S panic("breada"); 11738776Smckusick if (bp->b_rcred == NOCRED && cred != NOCRED) { 11838776Smckusick crhold(cred); 11938776Smckusick bp->b_rcred = cred; 12038776Smckusick } 12137736Smckusick VOP_STRATEGY(bp); 12240341Smckusick trace(TR_BREADMISS, pack(vp, size), blkno); 12347545Skarels p->p_stats->p_ru.ru_inblock++; /* pay for read */ 1247015Smckusick } else 12540341Smckusick trace(TR_BREADHIT, pack(vp, size), blkno); 1268Sbill } 1277015Smckusick 1287015Smckusick /* 12946151Smckusick * If there is a read-ahead block, start I/O on it too. 1307015Smckusick */ 13139895Smckusick if (!incore(vp, rablkno)) { 13237736Smckusick rabp = getblk(vp, rablkno, rabsize); 13346151Smckusick if (rabp->b_flags & (B_DONE | B_DELWRI)) { 1348Sbill brelse(rabp); 13540341Smckusick trace(TR_BREADHITRA, pack(vp, rabsize), rablkno); 1362045Swnj } else { 13746151Smckusick rabp->b_flags |= B_ASYNC | B_READ; 1388670S if (rabp->b_bcount > rabp->b_bufsize) 1398670S panic("breadrabp"); 14038880Smckusick if (rabp->b_rcred == NOCRED && cred != NOCRED) { 14138776Smckusick crhold(cred); 14238880Smckusick rabp->b_rcred = cred; 14338776Smckusick } 14437736Smckusick VOP_STRATEGY(rabp); 14540341Smckusick trace(TR_BREADMISSRA, pack(vp, rabsize), rablkno); 14647545Skarels p->p_stats->p_ru.ru_inblock++; /* pay in advance */ 1478Sbill } 1488Sbill } 1497015Smckusick 1507015Smckusick /* 15146151Smckusick * If block was memory resident, let bread get it. 15246151Smckusick * If block was not memory resident, the read was 15346151Smckusick * started above, so just wait for the read to complete. 1547015Smckusick */ 1557114Smckusick if (bp == NULL) 15638776Smckusick return (bread(vp, blkno, size, cred, bpp)); 15737736Smckusick return (biowait(bp)); 1588Sbill } 1598Sbill 1608Sbill /* 16146151Smckusick * Synchronous write. 16246151Smckusick * Release buffer on completion. 1638Sbill */ 1648Sbill bwrite(bp) 1657015Smckusick register struct buf *bp; 1668Sbill { 16747545Skarels struct proc *p = curproc; /* XXX */ 16837736Smckusick register int flag; 16940226Smckusick int s, error; 1708Sbill 1718Sbill flag = bp->b_flags; 1729857Ssam bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 173*49459Smckusick if (flag & B_ASYNC) { 174*49459Smckusick if ((flag & B_DELWRI) == 0) 175*49459Smckusick p->p_stats->p_ru.ru_oublock++; /* no one paid yet */ 176*49459Smckusick else 177*49459Smckusick reassignbuf(bp, bp->b_vp); 178*49459Smckusick } 17940341Smckusick trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno); 1808670S if (bp->b_bcount > bp->b_bufsize) 1818670S panic("bwrite"); 18240226Smckusick s = splbio(); 18339882Smckusick bp->b_vp->v_numoutput++; 18440226Smckusick splx(s); 18537736Smckusick VOP_STRATEGY(bp); 1867015Smckusick 1877015Smckusick /* 18846151Smckusick * If the write was synchronous, then await I/O completion. 1897015Smckusick * If the write was "delayed", then we put the buffer on 19046151Smckusick * the queue of blocks awaiting I/O completion status. 1917015Smckusick */ 19246151Smckusick if ((flag & B_ASYNC) == 0) { 19337736Smckusick error = biowait(bp); 194*49459Smckusick if ((flag&B_DELWRI) == 0) 195*49459Smckusick p->p_stats->p_ru.ru_oublock++; /* no one paid yet */ 196*49459Smckusick else 197*49459Smckusick reassignbuf(bp, bp->b_vp); 1988Sbill brelse(bp); 19937736Smckusick } else if (flag & B_DELWRI) { 2008Sbill bp->b_flags |= B_AGE; 20137736Smckusick error = 0; 20237736Smckusick } 20337736Smckusick return (error); 2048Sbill } 2058Sbill 2068Sbill /* 20746151Smckusick * Delayed write. 20846151Smckusick * 20946151Smckusick * The buffer is marked dirty, but is not queued for I/O. 21046151Smckusick * This routine should be used when the buffer is expected 21146151Smckusick * to be modified again soon, typically a small write that 21246151Smckusick * partially fills a buffer. 21346151Smckusick * 21446151Smckusick * NB: magnetic tapes cannot be delayed; they must be 21546151Smckusick * written in the order that the writes are requested. 2168Sbill */ 2178Sbill bdwrite(bp) 2187015Smckusick register struct buf *bp; 2198Sbill { 22047545Skarels struct proc *p = curproc; /* XXX */ 2218Sbill 22239882Smckusick if ((bp->b_flags & B_DELWRI) == 0) { 22339882Smckusick bp->b_flags |= B_DELWRI; 22439882Smckusick reassignbuf(bp, bp->b_vp); 22547545Skarels p->p_stats->p_ru.ru_oublock++; /* no one paid yet */ 22639882Smckusick } 22737736Smckusick /* 22839668Smckusick * If this is a tape drive, the write must be initiated. 22937736Smckusick */ 23048360Smckusick if (VOP_IOCTL(bp->b_vp, 0, (caddr_t)B_TAPE, 0, NOCRED, p) == 0) { 2318Sbill bawrite(bp); 23239668Smckusick } else { 23346151Smckusick bp->b_flags |= (B_DONE | B_DELWRI); 2348Sbill brelse(bp); 2358Sbill } 2368Sbill } 2378Sbill 2388Sbill /* 23946151Smckusick * Asynchronous write. 24046151Smckusick * Start I/O on a buffer, but do not wait for it to complete. 24146151Smckusick * The buffer is released when the I/O completes. 2428Sbill */ 2438Sbill bawrite(bp) 2447015Smckusick register struct buf *bp; 2458Sbill { 2468Sbill 24746151Smckusick /* 24846151Smckusick * Setting the ASYNC flag causes bwrite to return 24946151Smckusick * after starting the I/O. 25046151Smckusick */ 2518Sbill bp->b_flags |= B_ASYNC; 25237736Smckusick (void) bwrite(bp); 2538Sbill } 2548Sbill 2558Sbill /* 25646151Smckusick * Release a buffer. 25746151Smckusick * Even if the buffer is dirty, no I/O is started. 2588Sbill */ 2598Sbill brelse(bp) 2607015Smckusick register struct buf *bp; 2618Sbill { 2622325Swnj register struct buf *flist; 26346151Smckusick int s; 2648Sbill 26540341Smckusick trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); 2667015Smckusick /* 26739668Smckusick * If a process is waiting for the buffer, or 26839668Smckusick * is waiting for a free buffer, awaken it. 2697015Smckusick */ 27046151Smckusick if (bp->b_flags & B_WANTED) 2718Sbill wakeup((caddr_t)bp); 27246151Smckusick if (bfreelist[0].b_flags & B_WANTED) { 2732325Swnj bfreelist[0].b_flags &= ~B_WANTED; 2742325Swnj wakeup((caddr_t)bfreelist); 2758Sbill } 27639668Smckusick /* 27739668Smckusick * Retry I/O for locked buffers rather than invalidating them. 27839668Smckusick */ 27939668Smckusick if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED)) 28039668Smckusick bp->b_flags &= ~B_ERROR; 28139668Smckusick /* 28239668Smckusick * Disassociate buffers that are no longer valid. 28339668Smckusick */ 28446151Smckusick if (bp->b_flags & (B_NOCACHE | B_ERROR)) 28537736Smckusick bp->b_flags |= B_INVAL; 28646151Smckusick if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR | B_INVAL))) { 28739668Smckusick if (bp->b_vp) 28839668Smckusick brelvp(bp); 28939668Smckusick bp->b_flags &= ~B_DELWRI; 29037736Smckusick } 2917015Smckusick /* 2927015Smckusick * Stick the buffer back on a free list. 2937015Smckusick */ 29426271Skarels s = splbio(); 2958670S if (bp->b_bufsize <= 0) { 2968670S /* block has no buffer ... put at front of unused buffer list */ 2978670S flist = &bfreelist[BQ_EMPTY]; 2988670S binsheadfree(bp, flist); 29946151Smckusick } else if (bp->b_flags & (B_ERROR | B_INVAL)) { 3002325Swnj /* block has no info ... put at front of most free list */ 3018670S flist = &bfreelist[BQ_AGE]; 3027015Smckusick binsheadfree(bp, flist); 3038Sbill } else { 3042325Swnj if (bp->b_flags & B_LOCKED) 3052325Swnj flist = &bfreelist[BQ_LOCKED]; 3062325Swnj else if (bp->b_flags & B_AGE) 3072325Swnj flist = &bfreelist[BQ_AGE]; 3082325Swnj else 3092325Swnj flist = &bfreelist[BQ_LRU]; 3107015Smckusick binstailfree(bp, flist); 3118Sbill } 31246151Smckusick bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_AGE | B_NOCACHE); 3138Sbill splx(s); 3148Sbill } 3158Sbill 3168Sbill /* 31746151Smckusick * Check to see if a block is currently memory resident. 3188Sbill */ 31937736Smckusick incore(vp, blkno) 32037736Smckusick struct vnode *vp; 3217015Smckusick daddr_t blkno; 3228Sbill { 3238Sbill register struct buf *bp; 3242325Swnj register struct buf *dp; 3258Sbill 32638225Smckusick dp = BUFHASH(vp, blkno); 3272325Swnj for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) 32839668Smckusick if (bp->b_lblkno == blkno && bp->b_vp == vp && 3297015Smckusick (bp->b_flags & B_INVAL) == 0) 33091Sbill return (1); 33191Sbill return (0); 3328Sbill } 3338Sbill 33439668Smckusick /* 33546151Smckusick * Check to see if a block is currently memory resident. 33646151Smckusick * If it is resident, return it. If it is not resident, 33746151Smckusick * allocate a new buffer and assign it to the block. 33839668Smckusick */ 3398Sbill struct buf * 34037736Smckusick getblk(vp, blkno, size) 34137736Smckusick register struct vnode *vp; 3426563Smckusic daddr_t blkno; 3436563Smckusic int size; 3448Sbill { 3458670S register struct buf *bp, *dp; 3465424Swnj int s; 3478Sbill 34825255Smckusick if (size > MAXBSIZE) 34925255Smckusick panic("getblk: size too big"); 3507015Smckusick /* 35146151Smckusick * Search the cache for the block. If the buffer is found, 35246151Smckusick * but it is currently locked, the we must wait for it to 35346151Smckusick * become available. 3547015Smckusick */ 35537736Smckusick dp = BUFHASH(vp, blkno); 3567015Smckusick loop: 3572325Swnj for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) { 35839668Smckusick if (bp->b_lblkno != blkno || bp->b_vp != vp || 35946151Smckusick (bp->b_flags & B_INVAL)) 3608Sbill continue; 36126271Skarels s = splbio(); 36246151Smckusick if (bp->b_flags & B_BUSY) { 3638Sbill bp->b_flags |= B_WANTED; 36446151Smckusick sleep((caddr_t)bp, PRIBIO + 1); 3655424Swnj splx(s); 3668Sbill goto loop; 3678Sbill } 36839882Smckusick bremfree(bp); 36939882Smckusick bp->b_flags |= B_BUSY; 3705424Swnj splx(s); 37132608Smckusick if (bp->b_bcount != size) { 37239668Smckusick printf("getblk: stray size"); 37339668Smckusick bp->b_flags |= B_INVAL; 37439668Smckusick bwrite(bp); 37539668Smckusick goto loop; 37632608Smckusick } 3778Sbill bp->b_flags |= B_CACHE; 37826271Skarels return (bp); 3798Sbill } 3808670S bp = getnewbuf(); 3817015Smckusick bremhash(bp); 38239668Smckusick bgetvp(vp, bp); 38345116Smckusick bp->b_bcount = 0; 38439668Smckusick bp->b_lblkno = blkno; 3856563Smckusic bp->b_blkno = blkno; 3868670S bp->b_error = 0; 38737736Smckusick bp->b_resid = 0; 38837736Smckusick binshash(bp, dp); 38945116Smckusick allocbuf(bp, size); 39026271Skarels return (bp); 3918Sbill } 3928Sbill 3938Sbill /* 39446151Smckusick * Allocate a buffer. 39546151Smckusick * The caller will assign it to a block. 3968Sbill */ 3978Sbill struct buf * 3986563Smckusic geteblk(size) 3996563Smckusic int size; 4008Sbill { 4018670S register struct buf *bp, *flist; 4028Sbill 40325255Smckusick if (size > MAXBSIZE) 40425255Smckusick panic("geteblk: size too big"); 4058670S bp = getnewbuf(); 4068670S bp->b_flags |= B_INVAL; 4077015Smckusick bremhash(bp); 4088670S flist = &bfreelist[BQ_AGE]; 40945116Smckusick bp->b_bcount = 0; 41037736Smckusick bp->b_error = 0; 41137736Smckusick bp->b_resid = 0; 4128670S binshash(bp, flist); 41345116Smckusick allocbuf(bp, size); 41426271Skarels return (bp); 4158Sbill } 4168Sbill 4178Sbill /* 41845116Smckusick * Expand or contract the actual memory allocated to a buffer. 41946151Smckusick * If no memory is available, release buffer and take error exit. 4206563Smckusic */ 42145116Smckusick allocbuf(tp, size) 42245116Smckusick register struct buf *tp; 4236563Smckusic int size; 4246563Smckusic { 42545116Smckusick register struct buf *bp, *ep; 42645116Smckusick int sizealloc, take, s; 4276563Smckusic 42845116Smckusick sizealloc = roundup(size, CLBYTES); 42945116Smckusick /* 43045116Smckusick * Buffer size does not change 43145116Smckusick */ 43245116Smckusick if (sizealloc == tp->b_bufsize) 43345116Smckusick goto out; 43445116Smckusick /* 43545116Smckusick * Buffer size is shrinking. 43645116Smckusick * Place excess space in a buffer header taken from the 43745116Smckusick * BQ_EMPTY buffer list and placed on the "most free" list. 43845116Smckusick * If no extra buffer headers are available, leave the 43945116Smckusick * extra space in the present buffer. 44045116Smckusick */ 44145116Smckusick if (sizealloc < tp->b_bufsize) { 44245116Smckusick ep = bfreelist[BQ_EMPTY].av_forw; 44345116Smckusick if (ep == &bfreelist[BQ_EMPTY]) 44445116Smckusick goto out; 44545116Smckusick s = splbio(); 44645116Smckusick bremfree(ep); 44745116Smckusick ep->b_flags |= B_BUSY; 44845116Smckusick splx(s); 44945116Smckusick pagemove(tp->b_un.b_addr + sizealloc, ep->b_un.b_addr, 45045116Smckusick (int)tp->b_bufsize - sizealloc); 45145116Smckusick ep->b_bufsize = tp->b_bufsize - sizealloc; 45245116Smckusick tp->b_bufsize = sizealloc; 45345116Smckusick ep->b_flags |= B_INVAL; 45445116Smckusick ep->b_bcount = 0; 45545116Smckusick brelse(ep); 45645116Smckusick goto out; 45745116Smckusick } 45845116Smckusick /* 45945116Smckusick * More buffer space is needed. Get it out of buffers on 46045116Smckusick * the "most free" list, placing the empty headers on the 46145116Smckusick * BQ_EMPTY buffer header list. 46245116Smckusick */ 46345116Smckusick while (tp->b_bufsize < sizealloc) { 46445116Smckusick take = sizealloc - tp->b_bufsize; 46545116Smckusick bp = getnewbuf(); 46645116Smckusick if (take >= bp->b_bufsize) 46745116Smckusick take = bp->b_bufsize; 46845116Smckusick pagemove(&bp->b_un.b_addr[bp->b_bufsize - take], 46945116Smckusick &tp->b_un.b_addr[tp->b_bufsize], take); 47045116Smckusick tp->b_bufsize += take; 47145116Smckusick bp->b_bufsize = bp->b_bufsize - take; 47245116Smckusick if (bp->b_bcount > bp->b_bufsize) 47345116Smckusick bp->b_bcount = bp->b_bufsize; 47445116Smckusick if (bp->b_bufsize <= 0) { 47545116Smckusick bremhash(bp); 47645116Smckusick binshash(bp, &bfreelist[BQ_EMPTY]); 47746151Smckusick bp->b_dev = NODEV; 47845116Smckusick bp->b_error = 0; 47945116Smckusick bp->b_flags |= B_INVAL; 48045116Smckusick } 48145116Smckusick brelse(bp); 48245116Smckusick } 48345116Smckusick out: 48445116Smckusick tp->b_bcount = size; 48545116Smckusick return (1); 4868670S } 4878670S 4888670S /* 4898670S * Find a buffer which is available for use. 4908670S * Select something from a free list. 4918670S * Preference is to AGE list, then LRU list. 4928670S */ 4938670S struct buf * 4948670S getnewbuf() 4958670S { 4968670S register struct buf *bp, *dp; 49738776Smckusick register struct ucred *cred; 4988670S int s; 4998670S 5008670S loop: 50126271Skarels s = splbio(); 5028670S for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--) 5038670S if (dp->av_forw != dp) 5048670S break; 5058670S if (dp == bfreelist) { /* no free blocks */ 5068670S dp->b_flags |= B_WANTED; 50746151Smckusick sleep((caddr_t)dp, PRIBIO + 1); 50812170Ssam splx(s); 5098670S goto loop; 5108670S } 51139882Smckusick bp = dp->av_forw; 51239882Smckusick bremfree(bp); 51339882Smckusick bp->b_flags |= B_BUSY; 5148670S splx(s); 5158670S if (bp->b_flags & B_DELWRI) { 51638614Smckusick (void) bawrite(bp); 5178670S goto loop; 5188670S } 51940341Smckusick trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); 52039668Smckusick if (bp->b_vp) 52139668Smckusick brelvp(bp); 52238776Smckusick if (bp->b_rcred != NOCRED) { 52338776Smckusick cred = bp->b_rcred; 52438776Smckusick bp->b_rcred = NOCRED; 52538776Smckusick crfree(cred); 52638776Smckusick } 52738776Smckusick if (bp->b_wcred != NOCRED) { 52838776Smckusick cred = bp->b_wcred; 52938776Smckusick bp->b_wcred = NOCRED; 53038776Smckusick crfree(cred); 53138776Smckusick } 5328670S bp->b_flags = B_BUSY; 53346989Smckusick bp->b_dirtyoff = bp->b_dirtyend = 0; 5348670S return (bp); 5358670S } 5368670S 5378670S /* 53846151Smckusick * Wait for I/O to complete. 53946151Smckusick * 54046151Smckusick * Extract and return any errors associated with the I/O. 54146151Smckusick * If the error flag is set, but no specific error is 54246151Smckusick * given, return EIO. 5438Sbill */ 5447015Smckusick biowait(bp) 5456563Smckusic register struct buf *bp; 5468Sbill { 5475431Sroot int s; 5488Sbill 54926271Skarels s = splbio(); 55038776Smckusick while ((bp->b_flags & B_DONE) == 0) 5518Sbill sleep((caddr_t)bp, PRIBIO); 5525431Sroot splx(s); 55337736Smckusick if ((bp->b_flags & B_ERROR) == 0) 55437736Smckusick return (0); 55537736Smckusick if (bp->b_error) 55637736Smckusick return (bp->b_error); 55737736Smckusick return (EIO); 5588Sbill } 5598Sbill 5608Sbill /* 56113128Ssam * Mark I/O complete on a buffer. 56246151Smckusick * 56346151Smckusick * If a callback has been requested, e.g. the pageout 56446151Smckusick * daemon, do so. Otherwise, awaken waiting processes. 5658Sbill */ 5667015Smckusick biodone(bp) 5677015Smckusick register struct buf *bp; 5688Sbill { 5698Sbill 570420Sbill if (bp->b_flags & B_DONE) 5717015Smckusick panic("dup biodone"); 5728Sbill bp->b_flags |= B_DONE; 57349232Smckusick if ((bp->b_flags & B_READ) == 0) 57449232Smckusick vwakeup(bp); 5759763Ssam if (bp->b_flags & B_CALL) { 5769763Ssam bp->b_flags &= ~B_CALL; 5779763Ssam (*bp->b_iodone)(bp); 5789763Ssam return; 5799763Ssam } 58046151Smckusick if (bp->b_flags & B_ASYNC) 5818Sbill brelse(bp); 5828Sbill else { 5838Sbill bp->b_flags &= ~B_WANTED; 5848Sbill wakeup((caddr_t)bp); 5858Sbill } 5868Sbill } 587