149589Sbostic /*- 249589Sbostic * Copyright (c) 1982, 1986, 1989 The Regents of the University of California. 337736Smckusick * All rights reserved. 423395Smckusick * 549618Smckusick * This module is believed to contain source code proprietary to AT&T. 649618Smckusick * Use and redistribution is subject to the Berkeley Software License 749618Smckusick * Agreement and your Software Agreement with AT&T (Western Electric). 837736Smckusick * 9*52413Storek * @(#)vfs_cluster.c 7.46 (Berkeley) 02/05/92 1023395Smckusick */ 118Sbill 1251455Sbostic #include <sys/param.h> 1351455Sbostic #include <sys/proc.h> 1451455Sbostic #include <sys/buf.h> 1551455Sbostic #include <sys/vnode.h> 1651455Sbostic #include <sys/specdev.h> 1751455Sbostic #include <sys/mount.h> 1851455Sbostic #include <sys/trace.h> 1951455Sbostic #include <sys/resourcevar.h> 208Sbill 2191Sbill /* 2249280Skarels * Initialize buffers and hash links for buffers. 2349280Skarels */ 2451455Sbostic void 2549280Skarels bufinit() 2649280Skarels { 2749280Skarels register int i; 2849280Skarels register struct buf *bp, *dp; 2949280Skarels register struct bufhd *hp; 3049280Skarels int base, residual; 3149280Skarels 3249280Skarels for (hp = bufhash, i = 0; i < BUFHSZ; i++, hp++) 3349280Skarels hp->b_forw = hp->b_back = (struct buf *)hp; 3449280Skarels 3549280Skarels for (dp = bfreelist; dp < &bfreelist[BQUEUES]; dp++) { 3649280Skarels dp->b_forw = dp->b_back = dp->av_forw = dp->av_back = dp; 3749280Skarels dp->b_flags = B_HEAD; 3849280Skarels } 3949280Skarels base = bufpages / nbuf; 4049280Skarels residual = bufpages % nbuf; 4149280Skarels for (i = 0; i < nbuf; i++) { 4249280Skarels bp = &buf[i]; 4349280Skarels bp->b_dev = NODEV; 4449280Skarels bp->b_bcount = 0; 4549280Skarels bp->b_rcred = NOCRED; 4649280Skarels bp->b_wcred = NOCRED; 4749280Skarels bp->b_dirtyoff = 0; 4849280Skarels bp->b_dirtyend = 0; 4952189Smckusick bp->b_validoff = 0; 5052189Smckusick bp->b_validend = 0; 5149280Skarels bp->b_un.b_addr = buffers + i * MAXBSIZE; 5249280Skarels if (i < residual) 5349280Skarels bp->b_bufsize = (base + 1) * CLBYTES; 5449280Skarels else 5549280Skarels bp->b_bufsize = base * CLBYTES; 5649280Skarels binshash(bp, &bfreelist[BQ_AGE]); 57*52413Storek bp->b_flags = B_INVAL; 58*52413Storek dp = bp->b_bufsize ? &bfreelist[BQ_AGE] : &bfreelist[BQ_EMPTY]; 59*52413Storek binsheadfree(bp, dp); 6049280Skarels } 6149280Skarels } 6249280Skarels 6349280Skarels /* 6446151Smckusick * Find the block in the buffer pool. 6546151Smckusick * If the buffer is not present, allocate a new buffer and load 6646151Smckusick * its contents according to the filesystem fill routine. 678Sbill */ 6838776Smckusick bread(vp, blkno, size, cred, bpp) 6937736Smckusick struct vnode *vp; 706563Smckusic daddr_t blkno; 716563Smckusic int size; 7238776Smckusick struct ucred *cred; 7337736Smckusick struct buf **bpp; 748Sbill { 7547545Skarels struct proc *p = curproc; /* XXX */ 768Sbill register struct buf *bp; 778Sbill 788670S if (size == 0) 798670S panic("bread: size 0"); 8037736Smckusick *bpp = bp = getblk(vp, blkno, size); 8146151Smckusick if (bp->b_flags & (B_DONE | B_DELWRI)) { 8240341Smckusick trace(TR_BREADHIT, pack(vp, size), blkno); 8337736Smckusick return (0); 848Sbill } 858Sbill bp->b_flags |= B_READ; 868670S if (bp->b_bcount > bp->b_bufsize) 878670S panic("bread"); 8838776Smckusick if (bp->b_rcred == NOCRED && cred != NOCRED) { 8938776Smckusick crhold(cred); 9038776Smckusick bp->b_rcred = cred; 9138776Smckusick } 9237736Smckusick VOP_STRATEGY(bp); 9340341Smckusick trace(TR_BREADMISS, pack(vp, size), blkno); 9447545Skarels p->p_stats->p_ru.ru_inblock++; /* pay for read */ 9537736Smckusick return (biowait(bp)); 968Sbill } 978Sbill 988Sbill /* 9952189Smckusick * Operates like bread, but also starts I/O on the N specified 10052189Smckusick * read-ahead blocks. 1018Sbill */ 10252189Smckusick breadn(vp, blkno, size, rablkno, rabsize, num, cred, bpp) 10337736Smckusick struct vnode *vp; 1047114Smckusick daddr_t blkno; int size; 10552189Smckusick daddr_t rablkno[]; int rabsize[]; 10652189Smckusick int num; 10738776Smckusick struct ucred *cred; 10837736Smckusick struct buf **bpp; 1098Sbill { 11047545Skarels struct proc *p = curproc; /* XXX */ 1118Sbill register struct buf *bp, *rabp; 11252189Smckusick register int i; 1138Sbill 1148Sbill bp = NULL; 1157015Smckusick /* 11646151Smckusick * If the block is not memory resident, 11746151Smckusick * allocate a buffer and start I/O. 1187015Smckusick */ 11937736Smckusick if (!incore(vp, blkno)) { 12037736Smckusick *bpp = bp = getblk(vp, blkno, size); 12146151Smckusick if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) { 1228Sbill bp->b_flags |= B_READ; 1238670S if (bp->b_bcount > bp->b_bufsize) 12452189Smckusick panic("breadn"); 12538776Smckusick if (bp->b_rcred == NOCRED && cred != NOCRED) { 12638776Smckusick crhold(cred); 12738776Smckusick bp->b_rcred = cred; 12838776Smckusick } 12937736Smckusick VOP_STRATEGY(bp); 13040341Smckusick trace(TR_BREADMISS, pack(vp, size), blkno); 13147545Skarels p->p_stats->p_ru.ru_inblock++; /* pay for read */ 1327015Smckusick } else 13340341Smckusick trace(TR_BREADHIT, pack(vp, size), blkno); 1348Sbill } 1357015Smckusick 1367015Smckusick /* 13752189Smckusick * If there's read-ahead block(s), start I/O 13852189Smckusick * on them also (as above). 1397015Smckusick */ 14052189Smckusick for (i = 0; i < num; i++) { 14152189Smckusick if (incore(vp, rablkno[i])) 14252189Smckusick continue; 14352189Smckusick rabp = getblk(vp, rablkno[i], rabsize[i]); 14446151Smckusick if (rabp->b_flags & (B_DONE | B_DELWRI)) { 1458Sbill brelse(rabp); 14652189Smckusick trace(TR_BREADHITRA, pack(vp, rabsize[i]), rablkno[i]); 1472045Swnj } else { 14846151Smckusick rabp->b_flags |= B_ASYNC | B_READ; 1498670S if (rabp->b_bcount > rabp->b_bufsize) 1508670S panic("breadrabp"); 15138880Smckusick if (rabp->b_rcred == NOCRED && cred != NOCRED) { 15238776Smckusick crhold(cred); 15338880Smckusick rabp->b_rcred = cred; 15438776Smckusick } 15537736Smckusick VOP_STRATEGY(rabp); 15652189Smckusick trace(TR_BREADMISSRA, pack(vp, rabsize[i]), rablkno[i]); 15747545Skarels p->p_stats->p_ru.ru_inblock++; /* pay in advance */ 1588Sbill } 1598Sbill } 1607015Smckusick 1617015Smckusick /* 16246151Smckusick * If block was memory resident, let bread get it. 16346151Smckusick * If block was not memory resident, the read was 16446151Smckusick * started above, so just wait for the read to complete. 1657015Smckusick */ 1667114Smckusick if (bp == NULL) 16738776Smckusick return (bread(vp, blkno, size, cred, bpp)); 16837736Smckusick return (biowait(bp)); 1698Sbill } 1708Sbill 1718Sbill /* 17246151Smckusick * Synchronous write. 17346151Smckusick * Release buffer on completion. 1748Sbill */ 1758Sbill bwrite(bp) 1767015Smckusick register struct buf *bp; 1778Sbill { 17847545Skarels struct proc *p = curproc; /* XXX */ 17937736Smckusick register int flag; 180*52413Storek int s, error = 0; 1818Sbill 1828Sbill flag = bp->b_flags; 1839857Ssam bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 18449459Smckusick if (flag & B_ASYNC) { 18549459Smckusick if ((flag & B_DELWRI) == 0) 18649459Smckusick p->p_stats->p_ru.ru_oublock++; /* no one paid yet */ 18749459Smckusick else 18849459Smckusick reassignbuf(bp, bp->b_vp); 18949459Smckusick } 19040341Smckusick trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno); 1918670S if (bp->b_bcount > bp->b_bufsize) 1928670S panic("bwrite"); 19340226Smckusick s = splbio(); 19439882Smckusick bp->b_vp->v_numoutput++; 19540226Smckusick splx(s); 19637736Smckusick VOP_STRATEGY(bp); 1977015Smckusick 1987015Smckusick /* 19946151Smckusick * If the write was synchronous, then await I/O completion. 2007015Smckusick * If the write was "delayed", then we put the buffer on 20146151Smckusick * the queue of blocks awaiting I/O completion status. 2027015Smckusick */ 20346151Smckusick if ((flag & B_ASYNC) == 0) { 20437736Smckusick error = biowait(bp); 20549459Smckusick if ((flag&B_DELWRI) == 0) 20649459Smckusick p->p_stats->p_ru.ru_oublock++; /* no one paid yet */ 20749459Smckusick else 20849459Smckusick reassignbuf(bp, bp->b_vp); 2098Sbill brelse(bp); 21037736Smckusick } else if (flag & B_DELWRI) { 211*52413Storek s = splbio(); 2128Sbill bp->b_flags |= B_AGE; 213*52413Storek splx(s); 21437736Smckusick } 21537736Smckusick return (error); 2168Sbill } 2178Sbill 2188Sbill /* 21946151Smckusick * Delayed write. 22046151Smckusick * 22146151Smckusick * The buffer is marked dirty, but is not queued for I/O. 22246151Smckusick * This routine should be used when the buffer is expected 22346151Smckusick * to be modified again soon, typically a small write that 22446151Smckusick * partially fills a buffer. 22546151Smckusick * 22646151Smckusick * NB: magnetic tapes cannot be delayed; they must be 22746151Smckusick * written in the order that the writes are requested. 2288Sbill */ 2298Sbill bdwrite(bp) 2307015Smckusick register struct buf *bp; 2318Sbill { 23247545Skarels struct proc *p = curproc; /* XXX */ 2338Sbill 23439882Smckusick if ((bp->b_flags & B_DELWRI) == 0) { 23539882Smckusick bp->b_flags |= B_DELWRI; 23639882Smckusick reassignbuf(bp, bp->b_vp); 23747545Skarels p->p_stats->p_ru.ru_oublock++; /* no one paid yet */ 23839882Smckusick } 23937736Smckusick /* 24039668Smckusick * If this is a tape drive, the write must be initiated. 24137736Smckusick */ 24248360Smckusick if (VOP_IOCTL(bp->b_vp, 0, (caddr_t)B_TAPE, 0, NOCRED, p) == 0) { 2438Sbill bawrite(bp); 24439668Smckusick } else { 24546151Smckusick bp->b_flags |= (B_DONE | B_DELWRI); 2468Sbill brelse(bp); 2478Sbill } 2488Sbill } 2498Sbill 2508Sbill /* 25146151Smckusick * Asynchronous write. 25246151Smckusick * Start I/O on a buffer, but do not wait for it to complete. 25346151Smckusick * The buffer is released when the I/O completes. 2548Sbill */ 2558Sbill bawrite(bp) 2567015Smckusick register struct buf *bp; 2578Sbill { 2588Sbill 25946151Smckusick /* 26046151Smckusick * Setting the ASYNC flag causes bwrite to return 26146151Smckusick * after starting the I/O. 26246151Smckusick */ 2638Sbill bp->b_flags |= B_ASYNC; 26437736Smckusick (void) bwrite(bp); 2658Sbill } 2668Sbill 2678Sbill /* 26846151Smckusick * Release a buffer. 26946151Smckusick * Even if the buffer is dirty, no I/O is started. 2708Sbill */ 2718Sbill brelse(bp) 2727015Smckusick register struct buf *bp; 2738Sbill { 2742325Swnj register struct buf *flist; 27546151Smckusick int s; 2768Sbill 27740341Smckusick trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); 2787015Smckusick /* 27939668Smckusick * If a process is waiting for the buffer, or 28039668Smckusick * is waiting for a free buffer, awaken it. 2817015Smckusick */ 28246151Smckusick if (bp->b_flags & B_WANTED) 2838Sbill wakeup((caddr_t)bp); 28446151Smckusick if (bfreelist[0].b_flags & B_WANTED) { 2852325Swnj bfreelist[0].b_flags &= ~B_WANTED; 2862325Swnj wakeup((caddr_t)bfreelist); 2878Sbill } 28839668Smckusick /* 28939668Smckusick * Retry I/O for locked buffers rather than invalidating them. 29039668Smckusick */ 291*52413Storek s = splbio(); 29239668Smckusick if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED)) 29339668Smckusick bp->b_flags &= ~B_ERROR; 29439668Smckusick /* 29539668Smckusick * Disassociate buffers that are no longer valid. 29639668Smckusick */ 29746151Smckusick if (bp->b_flags & (B_NOCACHE | B_ERROR)) 29837736Smckusick bp->b_flags |= B_INVAL; 29946151Smckusick if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR | B_INVAL))) { 30039668Smckusick if (bp->b_vp) 30139668Smckusick brelvp(bp); 30239668Smckusick bp->b_flags &= ~B_DELWRI; 30337736Smckusick } 3047015Smckusick /* 3057015Smckusick * Stick the buffer back on a free list. 3067015Smckusick */ 3078670S if (bp->b_bufsize <= 0) { 3088670S /* block has no buffer ... put at front of unused buffer list */ 3098670S flist = &bfreelist[BQ_EMPTY]; 3108670S binsheadfree(bp, flist); 31146151Smckusick } else if (bp->b_flags & (B_ERROR | B_INVAL)) { 3122325Swnj /* block has no info ... put at front of most free list */ 3138670S flist = &bfreelist[BQ_AGE]; 3147015Smckusick binsheadfree(bp, flist); 3158Sbill } else { 3162325Swnj if (bp->b_flags & B_LOCKED) 3172325Swnj flist = &bfreelist[BQ_LOCKED]; 3182325Swnj else if (bp->b_flags & B_AGE) 3192325Swnj flist = &bfreelist[BQ_AGE]; 3202325Swnj else 3212325Swnj flist = &bfreelist[BQ_LRU]; 3227015Smckusick binstailfree(bp, flist); 3238Sbill } 32446151Smckusick bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_AGE | B_NOCACHE); 3258Sbill splx(s); 3268Sbill } 3278Sbill 3288Sbill /* 32946151Smckusick * Check to see if a block is currently memory resident. 3308Sbill */ 33137736Smckusick incore(vp, blkno) 33237736Smckusick struct vnode *vp; 3337015Smckusick daddr_t blkno; 3348Sbill { 3358Sbill register struct buf *bp; 3362325Swnj register struct buf *dp; 3378Sbill 33838225Smckusick dp = BUFHASH(vp, blkno); 3392325Swnj for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) 34039668Smckusick if (bp->b_lblkno == blkno && bp->b_vp == vp && 3417015Smckusick (bp->b_flags & B_INVAL) == 0) 34291Sbill return (1); 34391Sbill return (0); 3448Sbill } 3458Sbill 34639668Smckusick /* 34746151Smckusick * Check to see if a block is currently memory resident. 34846151Smckusick * If it is resident, return it. If it is not resident, 34946151Smckusick * allocate a new buffer and assign it to the block. 35039668Smckusick */ 3518Sbill struct buf * 35237736Smckusick getblk(vp, blkno, size) 35337736Smckusick register struct vnode *vp; 3546563Smckusic daddr_t blkno; 3556563Smckusic int size; 3568Sbill { 3578670S register struct buf *bp, *dp; 3585424Swnj int s; 3598Sbill 36025255Smckusick if (size > MAXBSIZE) 36125255Smckusick panic("getblk: size too big"); 3627015Smckusick /* 36346151Smckusick * Search the cache for the block. If the buffer is found, 36446151Smckusick * but it is currently locked, the we must wait for it to 36546151Smckusick * become available. 3667015Smckusick */ 36737736Smckusick dp = BUFHASH(vp, blkno); 3687015Smckusick loop: 3692325Swnj for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) { 37039668Smckusick if (bp->b_lblkno != blkno || bp->b_vp != vp || 37146151Smckusick (bp->b_flags & B_INVAL)) 3728Sbill continue; 37326271Skarels s = splbio(); 37446151Smckusick if (bp->b_flags & B_BUSY) { 3758Sbill bp->b_flags |= B_WANTED; 37646151Smckusick sleep((caddr_t)bp, PRIBIO + 1); 3775424Swnj splx(s); 3788Sbill goto loop; 3798Sbill } 38039882Smckusick bremfree(bp); 38139882Smckusick bp->b_flags |= B_BUSY; 3825424Swnj splx(s); 38332608Smckusick if (bp->b_bcount != size) { 38439668Smckusick printf("getblk: stray size"); 38539668Smckusick bp->b_flags |= B_INVAL; 38639668Smckusick bwrite(bp); 38739668Smckusick goto loop; 38832608Smckusick } 3898Sbill bp->b_flags |= B_CACHE; 39026271Skarels return (bp); 3918Sbill } 3928670S bp = getnewbuf(); 3937015Smckusick bremhash(bp); 39439668Smckusick bgetvp(vp, bp); 39545116Smckusick bp->b_bcount = 0; 39639668Smckusick bp->b_lblkno = blkno; 3976563Smckusic bp->b_blkno = blkno; 3988670S bp->b_error = 0; 39937736Smckusick bp->b_resid = 0; 40037736Smckusick binshash(bp, dp); 40145116Smckusick allocbuf(bp, size); 40226271Skarels return (bp); 4038Sbill } 4048Sbill 4058Sbill /* 40646151Smckusick * Allocate a buffer. 40746151Smckusick * The caller will assign it to a block. 4088Sbill */ 4098Sbill struct buf * 4106563Smckusic geteblk(size) 4116563Smckusic int size; 4128Sbill { 4138670S register struct buf *bp, *flist; 4148Sbill 41525255Smckusick if (size > MAXBSIZE) 41625255Smckusick panic("geteblk: size too big"); 4178670S bp = getnewbuf(); 4188670S bp->b_flags |= B_INVAL; 4197015Smckusick bremhash(bp); 4208670S flist = &bfreelist[BQ_AGE]; 42145116Smckusick bp->b_bcount = 0; 42237736Smckusick bp->b_error = 0; 42337736Smckusick bp->b_resid = 0; 4248670S binshash(bp, flist); 42545116Smckusick allocbuf(bp, size); 42626271Skarels return (bp); 4278Sbill } 4288Sbill 4298Sbill /* 43045116Smckusick * Expand or contract the actual memory allocated to a buffer. 43146151Smckusick * If no memory is available, release buffer and take error exit. 4326563Smckusic */ 43345116Smckusick allocbuf(tp, size) 43445116Smckusick register struct buf *tp; 4356563Smckusic int size; 4366563Smckusic { 43745116Smckusick register struct buf *bp, *ep; 43845116Smckusick int sizealloc, take, s; 4396563Smckusic 44045116Smckusick sizealloc = roundup(size, CLBYTES); 44145116Smckusick /* 44245116Smckusick * Buffer size does not change 44345116Smckusick */ 44445116Smckusick if (sizealloc == tp->b_bufsize) 44545116Smckusick goto out; 44645116Smckusick /* 44745116Smckusick * Buffer size is shrinking. 44845116Smckusick * Place excess space in a buffer header taken from the 44945116Smckusick * BQ_EMPTY buffer list and placed on the "most free" list. 45045116Smckusick * If no extra buffer headers are available, leave the 45145116Smckusick * extra space in the present buffer. 45245116Smckusick */ 45345116Smckusick if (sizealloc < tp->b_bufsize) { 45445116Smckusick ep = bfreelist[BQ_EMPTY].av_forw; 45545116Smckusick if (ep == &bfreelist[BQ_EMPTY]) 45645116Smckusick goto out; 45745116Smckusick s = splbio(); 45845116Smckusick bremfree(ep); 45945116Smckusick ep->b_flags |= B_BUSY; 46045116Smckusick splx(s); 46145116Smckusick pagemove(tp->b_un.b_addr + sizealloc, ep->b_un.b_addr, 46245116Smckusick (int)tp->b_bufsize - sizealloc); 46345116Smckusick ep->b_bufsize = tp->b_bufsize - sizealloc; 46445116Smckusick tp->b_bufsize = sizealloc; 46545116Smckusick ep->b_flags |= B_INVAL; 46645116Smckusick ep->b_bcount = 0; 46745116Smckusick brelse(ep); 46845116Smckusick goto out; 46945116Smckusick } 47045116Smckusick /* 47145116Smckusick * More buffer space is needed. Get it out of buffers on 47245116Smckusick * the "most free" list, placing the empty headers on the 47345116Smckusick * BQ_EMPTY buffer header list. 47445116Smckusick */ 47545116Smckusick while (tp->b_bufsize < sizealloc) { 47645116Smckusick take = sizealloc - tp->b_bufsize; 47745116Smckusick bp = getnewbuf(); 47845116Smckusick if (take >= bp->b_bufsize) 47945116Smckusick take = bp->b_bufsize; 48045116Smckusick pagemove(&bp->b_un.b_addr[bp->b_bufsize - take], 48145116Smckusick &tp->b_un.b_addr[tp->b_bufsize], take); 48245116Smckusick tp->b_bufsize += take; 48345116Smckusick bp->b_bufsize = bp->b_bufsize - take; 48445116Smckusick if (bp->b_bcount > bp->b_bufsize) 48545116Smckusick bp->b_bcount = bp->b_bufsize; 48645116Smckusick if (bp->b_bufsize <= 0) { 48745116Smckusick bremhash(bp); 48845116Smckusick binshash(bp, &bfreelist[BQ_EMPTY]); 48946151Smckusick bp->b_dev = NODEV; 49045116Smckusick bp->b_error = 0; 49145116Smckusick bp->b_flags |= B_INVAL; 49245116Smckusick } 49345116Smckusick brelse(bp); 49445116Smckusick } 49545116Smckusick out: 49645116Smckusick tp->b_bcount = size; 49745116Smckusick return (1); 4988670S } 4998670S 5008670S /* 5018670S * Find a buffer which is available for use. 5028670S * Select something from a free list. 5038670S * Preference is to AGE list, then LRU list. 5048670S */ 5058670S struct buf * 5068670S getnewbuf() 5078670S { 5088670S register struct buf *bp, *dp; 50938776Smckusick register struct ucred *cred; 5108670S int s; 5118670S 51252096Sbostic #ifdef LFS 51352096Sbostic lfs_flush(); 51452096Sbostic #endif 5158670S loop: 51626271Skarels s = splbio(); 5178670S for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--) 5188670S if (dp->av_forw != dp) 5198670S break; 5208670S if (dp == bfreelist) { /* no free blocks */ 5218670S dp->b_flags |= B_WANTED; 52246151Smckusick sleep((caddr_t)dp, PRIBIO + 1); 52312170Ssam splx(s); 5248670S goto loop; 5258670S } 52639882Smckusick bp = dp->av_forw; 52739882Smckusick bremfree(bp); 52839882Smckusick bp->b_flags |= B_BUSY; 5298670S splx(s); 5308670S if (bp->b_flags & B_DELWRI) { 53138614Smckusick (void) bawrite(bp); 5328670S goto loop; 5338670S } 53440341Smckusick trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); 53539668Smckusick if (bp->b_vp) 53639668Smckusick brelvp(bp); 53738776Smckusick if (bp->b_rcred != NOCRED) { 53838776Smckusick cred = bp->b_rcred; 53938776Smckusick bp->b_rcred = NOCRED; 54038776Smckusick crfree(cred); 54138776Smckusick } 54238776Smckusick if (bp->b_wcred != NOCRED) { 54338776Smckusick cred = bp->b_wcred; 54438776Smckusick bp->b_wcred = NOCRED; 54538776Smckusick crfree(cred); 54638776Smckusick } 5478670S bp->b_flags = B_BUSY; 54846989Smckusick bp->b_dirtyoff = bp->b_dirtyend = 0; 54952189Smckusick bp->b_validoff = bp->b_validend = 0; 5508670S return (bp); 5518670S } 5528670S 5538670S /* 55446151Smckusick * Wait for I/O to complete. 55546151Smckusick * 55646151Smckusick * Extract and return any errors associated with the I/O. 55746151Smckusick * If the error flag is set, but no specific error is 55846151Smckusick * given, return EIO. 5598Sbill */ 5607015Smckusick biowait(bp) 5616563Smckusic register struct buf *bp; 5628Sbill { 5635431Sroot int s; 5648Sbill 56526271Skarels s = splbio(); 56638776Smckusick while ((bp->b_flags & B_DONE) == 0) 5678Sbill sleep((caddr_t)bp, PRIBIO); 5685431Sroot splx(s); 56937736Smckusick if ((bp->b_flags & B_ERROR) == 0) 57037736Smckusick return (0); 57137736Smckusick if (bp->b_error) 57237736Smckusick return (bp->b_error); 57337736Smckusick return (EIO); 5748Sbill } 5758Sbill 5768Sbill /* 57713128Ssam * Mark I/O complete on a buffer. 57846151Smckusick * 57946151Smckusick * If a callback has been requested, e.g. the pageout 58046151Smckusick * daemon, do so. Otherwise, awaken waiting processes. 5818Sbill */ 58251455Sbostic void 5837015Smckusick biodone(bp) 5847015Smckusick register struct buf *bp; 5858Sbill { 5868Sbill 587420Sbill if (bp->b_flags & B_DONE) 5887015Smckusick panic("dup biodone"); 5898Sbill bp->b_flags |= B_DONE; 59049232Smckusick if ((bp->b_flags & B_READ) == 0) 59149232Smckusick vwakeup(bp); 5929763Ssam if (bp->b_flags & B_CALL) { 5939763Ssam bp->b_flags &= ~B_CALL; 5949763Ssam (*bp->b_iodone)(bp); 5959763Ssam return; 5969763Ssam } 59746151Smckusick if (bp->b_flags & B_ASYNC) 5988Sbill brelse(bp); 5998Sbill else { 6008Sbill bp->b_flags &= ~B_WANTED; 6018Sbill wakeup((caddr_t)bp); 6028Sbill } 6038Sbill } 604