149589Sbostic /*- 249589Sbostic * Copyright (c) 1982, 1986, 1989 The Regents of the University of California. 337736Smckusick * All rights reserved. 423395Smckusick * 5*49618Smckusick * This module is believed to contain source code proprietary to AT&T. 6*49618Smckusick * Use and redistribution is subject to the Berkeley Software License 7*49618Smckusick * Agreement and your Software Agreement with AT&T (Western Electric). 837736Smckusick * 9*49618Smckusick * @(#)vfs_cluster.c 7.42 (Berkeley) 05/10/91 1023395Smckusick */ 118Sbill 1217098Sbloom #include "param.h" 1347545Skarels #include "proc.h" 1417098Sbloom #include "buf.h" 1537736Smckusick #include "vnode.h" 1640652Smckusick #include "specdev.h" 1739668Smckusick #include "mount.h" 1817098Sbloom #include "trace.h" 1947568Skarels #include "resourcevar.h" 208Sbill 2191Sbill /* 2249280Skarels * Initialize buffers and hash links for buffers. 2349280Skarels */ 2449280Skarels bufinit() 2549280Skarels { 2649280Skarels register int i; 2749280Skarels register struct buf *bp, *dp; 2849280Skarels register struct bufhd *hp; 2949280Skarels int base, residual; 3049280Skarels 3149280Skarels for (hp = bufhash, i = 0; i < BUFHSZ; i++, hp++) 3249280Skarels hp->b_forw = hp->b_back = (struct buf *)hp; 3349280Skarels 3449280Skarels for (dp = bfreelist; dp < &bfreelist[BQUEUES]; dp++) { 3549280Skarels dp->b_forw = dp->b_back = dp->av_forw = dp->av_back = dp; 3649280Skarels dp->b_flags = B_HEAD; 3749280Skarels } 3849280Skarels base = bufpages / nbuf; 3949280Skarels residual = bufpages % nbuf; 4049280Skarels for (i = 0; i < nbuf; i++) { 4149280Skarels bp = &buf[i]; 4249280Skarels bp->b_dev = NODEV; 4349280Skarels bp->b_bcount = 0; 4449280Skarels bp->b_rcred = NOCRED; 4549280Skarels bp->b_wcred = NOCRED; 4649280Skarels bp->b_dirtyoff = 0; 4749280Skarels bp->b_dirtyend = 0; 4849280Skarels bp->b_un.b_addr = buffers + i * MAXBSIZE; 4949280Skarels if (i < residual) 5049280Skarels bp->b_bufsize = (base + 1) * CLBYTES; 5149280Skarels else 5249280Skarels bp->b_bufsize = base * CLBYTES; 5349280Skarels binshash(bp, &bfreelist[BQ_AGE]); 5449280Skarels bp->b_flags = B_BUSY|B_INVAL; 5549280Skarels brelse(bp); 5649280Skarels } 5749280Skarels } 5849280Skarels 5949280Skarels /* 6046151Smckusick * Find the block in the buffer pool. 6146151Smckusick * If the buffer is not present, allocate a new buffer and load 6246151Smckusick * its contents according to the filesystem fill routine. 638Sbill */ 6438776Smckusick bread(vp, blkno, size, cred, bpp) 6537736Smckusick struct vnode *vp; 666563Smckusic daddr_t blkno; 676563Smckusic int size; 6838776Smckusick struct ucred *cred; 6937736Smckusick struct buf **bpp; 708Sbill { 7147545Skarels struct proc *p = curproc; /* XXX */ 728Sbill register struct buf *bp; 738Sbill 748670S if (size == 0) 758670S panic("bread: size 0"); 7637736Smckusick *bpp = bp = getblk(vp, blkno, size); 7746151Smckusick if (bp->b_flags & (B_DONE | B_DELWRI)) { 7840341Smckusick trace(TR_BREADHIT, pack(vp, size), blkno); 7937736Smckusick return (0); 808Sbill } 818Sbill bp->b_flags |= B_READ; 828670S if (bp->b_bcount > bp->b_bufsize) 838670S panic("bread"); 8438776Smckusick if (bp->b_rcred == NOCRED && cred != NOCRED) { 8538776Smckusick crhold(cred); 8638776Smckusick bp->b_rcred = cred; 8738776Smckusick } 8837736Smckusick VOP_STRATEGY(bp); 8940341Smckusick trace(TR_BREADMISS, pack(vp, size), blkno); 9047545Skarels p->p_stats->p_ru.ru_inblock++; /* pay for read */ 9137736Smckusick return (biowait(bp)); 928Sbill } 938Sbill 948Sbill /* 9546151Smckusick * Operates like bread, but also starts I/O on the specified 9646151Smckusick * read-ahead block. 978Sbill */ 9838776Smckusick breada(vp, blkno, size, rablkno, rabsize, cred, bpp) 9937736Smckusick struct vnode *vp; 1007114Smckusick daddr_t blkno; int size; 1018592Sroot daddr_t rablkno; int rabsize; 10238776Smckusick struct ucred *cred; 10337736Smckusick struct buf **bpp; 1048Sbill { 10547545Skarels struct proc *p = curproc; /* XXX */ 1068Sbill register struct buf *bp, *rabp; 1078Sbill 1088Sbill bp = NULL; 1097015Smckusick /* 11046151Smckusick * If the block is not memory resident, 11146151Smckusick * allocate a buffer and start I/O. 1127015Smckusick */ 11337736Smckusick if (!incore(vp, blkno)) { 11437736Smckusick *bpp = bp = getblk(vp, blkno, size); 11546151Smckusick if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) { 1168Sbill bp->b_flags |= B_READ; 1178670S if (bp->b_bcount > bp->b_bufsize) 1188670S panic("breada"); 11938776Smckusick if (bp->b_rcred == NOCRED && cred != NOCRED) { 12038776Smckusick crhold(cred); 12138776Smckusick bp->b_rcred = cred; 12238776Smckusick } 12337736Smckusick VOP_STRATEGY(bp); 12440341Smckusick trace(TR_BREADMISS, pack(vp, size), blkno); 12547545Skarels p->p_stats->p_ru.ru_inblock++; /* pay for read */ 1267015Smckusick } else 12740341Smckusick trace(TR_BREADHIT, pack(vp, size), blkno); 1288Sbill } 1297015Smckusick 1307015Smckusick /* 13146151Smckusick * If there is a read-ahead block, start I/O on it too. 1327015Smckusick */ 13339895Smckusick if (!incore(vp, rablkno)) { 13437736Smckusick rabp = getblk(vp, rablkno, rabsize); 13546151Smckusick if (rabp->b_flags & (B_DONE | B_DELWRI)) { 1368Sbill brelse(rabp); 13740341Smckusick trace(TR_BREADHITRA, pack(vp, rabsize), rablkno); 1382045Swnj } else { 13946151Smckusick rabp->b_flags |= B_ASYNC | B_READ; 1408670S if (rabp->b_bcount > rabp->b_bufsize) 1418670S panic("breadrabp"); 14238880Smckusick if (rabp->b_rcred == NOCRED && cred != NOCRED) { 14338776Smckusick crhold(cred); 14438880Smckusick rabp->b_rcred = cred; 14538776Smckusick } 14637736Smckusick VOP_STRATEGY(rabp); 14740341Smckusick trace(TR_BREADMISSRA, pack(vp, rabsize), rablkno); 14847545Skarels p->p_stats->p_ru.ru_inblock++; /* pay in advance */ 1498Sbill } 1508Sbill } 1517015Smckusick 1527015Smckusick /* 15346151Smckusick * If block was memory resident, let bread get it. 15446151Smckusick * If block was not memory resident, the read was 15546151Smckusick * started above, so just wait for the read to complete. 1567015Smckusick */ 1577114Smckusick if (bp == NULL) 15838776Smckusick return (bread(vp, blkno, size, cred, bpp)); 15937736Smckusick return (biowait(bp)); 1608Sbill } 1618Sbill 1628Sbill /* 16346151Smckusick * Synchronous write. 16446151Smckusick * Release buffer on completion. 1658Sbill */ 1668Sbill bwrite(bp) 1677015Smckusick register struct buf *bp; 1688Sbill { 16947545Skarels struct proc *p = curproc; /* XXX */ 17037736Smckusick register int flag; 17140226Smckusick int s, error; 1728Sbill 1738Sbill flag = bp->b_flags; 1749857Ssam bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 17549459Smckusick if (flag & B_ASYNC) { 17649459Smckusick if ((flag & B_DELWRI) == 0) 17749459Smckusick p->p_stats->p_ru.ru_oublock++; /* no one paid yet */ 17849459Smckusick else 17949459Smckusick reassignbuf(bp, bp->b_vp); 18049459Smckusick } 18140341Smckusick trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno); 1828670S if (bp->b_bcount > bp->b_bufsize) 1838670S panic("bwrite"); 18440226Smckusick s = splbio(); 18539882Smckusick bp->b_vp->v_numoutput++; 18640226Smckusick splx(s); 18737736Smckusick VOP_STRATEGY(bp); 1887015Smckusick 1897015Smckusick /* 19046151Smckusick * If the write was synchronous, then await I/O completion. 1917015Smckusick * If the write was "delayed", then we put the buffer on 19246151Smckusick * the queue of blocks awaiting I/O completion status. 1937015Smckusick */ 19446151Smckusick if ((flag & B_ASYNC) == 0) { 19537736Smckusick error = biowait(bp); 19649459Smckusick if ((flag&B_DELWRI) == 0) 19749459Smckusick p->p_stats->p_ru.ru_oublock++; /* no one paid yet */ 19849459Smckusick else 19949459Smckusick reassignbuf(bp, bp->b_vp); 2008Sbill brelse(bp); 20137736Smckusick } else if (flag & B_DELWRI) { 2028Sbill bp->b_flags |= B_AGE; 20337736Smckusick error = 0; 20437736Smckusick } 20537736Smckusick return (error); 2068Sbill } 2078Sbill 2088Sbill /* 20946151Smckusick * Delayed write. 21046151Smckusick * 21146151Smckusick * The buffer is marked dirty, but is not queued for I/O. 21246151Smckusick * This routine should be used when the buffer is expected 21346151Smckusick * to be modified again soon, typically a small write that 21446151Smckusick * partially fills a buffer. 21546151Smckusick * 21646151Smckusick * NB: magnetic tapes cannot be delayed; they must be 21746151Smckusick * written in the order that the writes are requested. 2188Sbill */ 2198Sbill bdwrite(bp) 2207015Smckusick register struct buf *bp; 2218Sbill { 22247545Skarels struct proc *p = curproc; /* XXX */ 2238Sbill 22439882Smckusick if ((bp->b_flags & B_DELWRI) == 0) { 22539882Smckusick bp->b_flags |= B_DELWRI; 22639882Smckusick reassignbuf(bp, bp->b_vp); 22747545Skarels p->p_stats->p_ru.ru_oublock++; /* no one paid yet */ 22839882Smckusick } 22937736Smckusick /* 23039668Smckusick * If this is a tape drive, the write must be initiated. 23137736Smckusick */ 23248360Smckusick if (VOP_IOCTL(bp->b_vp, 0, (caddr_t)B_TAPE, 0, NOCRED, p) == 0) { 2338Sbill bawrite(bp); 23439668Smckusick } else { 23546151Smckusick bp->b_flags |= (B_DONE | B_DELWRI); 2368Sbill brelse(bp); 2378Sbill } 2388Sbill } 2398Sbill 2408Sbill /* 24146151Smckusick * Asynchronous write. 24246151Smckusick * Start I/O on a buffer, but do not wait for it to complete. 24346151Smckusick * The buffer is released when the I/O completes. 2448Sbill */ 2458Sbill bawrite(bp) 2467015Smckusick register struct buf *bp; 2478Sbill { 2488Sbill 24946151Smckusick /* 25046151Smckusick * Setting the ASYNC flag causes bwrite to return 25146151Smckusick * after starting the I/O. 25246151Smckusick */ 2538Sbill bp->b_flags |= B_ASYNC; 25437736Smckusick (void) bwrite(bp); 2558Sbill } 2568Sbill 2578Sbill /* 25846151Smckusick * Release a buffer. 25946151Smckusick * Even if the buffer is dirty, no I/O is started. 2608Sbill */ 2618Sbill brelse(bp) 2627015Smckusick register struct buf *bp; 2638Sbill { 2642325Swnj register struct buf *flist; 26546151Smckusick int s; 2668Sbill 26740341Smckusick trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); 2687015Smckusick /* 26939668Smckusick * If a process is waiting for the buffer, or 27039668Smckusick * is waiting for a free buffer, awaken it. 2717015Smckusick */ 27246151Smckusick if (bp->b_flags & B_WANTED) 2738Sbill wakeup((caddr_t)bp); 27446151Smckusick if (bfreelist[0].b_flags & B_WANTED) { 2752325Swnj bfreelist[0].b_flags &= ~B_WANTED; 2762325Swnj wakeup((caddr_t)bfreelist); 2778Sbill } 27839668Smckusick /* 27939668Smckusick * Retry I/O for locked buffers rather than invalidating them. 28039668Smckusick */ 28139668Smckusick if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED)) 28239668Smckusick bp->b_flags &= ~B_ERROR; 28339668Smckusick /* 28439668Smckusick * Disassociate buffers that are no longer valid. 28539668Smckusick */ 28646151Smckusick if (bp->b_flags & (B_NOCACHE | B_ERROR)) 28737736Smckusick bp->b_flags |= B_INVAL; 28846151Smckusick if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR | B_INVAL))) { 28939668Smckusick if (bp->b_vp) 29039668Smckusick brelvp(bp); 29139668Smckusick bp->b_flags &= ~B_DELWRI; 29237736Smckusick } 2937015Smckusick /* 2947015Smckusick * Stick the buffer back on a free list. 2957015Smckusick */ 29626271Skarels s = splbio(); 2978670S if (bp->b_bufsize <= 0) { 2988670S /* block has no buffer ... put at front of unused buffer list */ 2998670S flist = &bfreelist[BQ_EMPTY]; 3008670S binsheadfree(bp, flist); 30146151Smckusick } else if (bp->b_flags & (B_ERROR | B_INVAL)) { 3022325Swnj /* block has no info ... put at front of most free list */ 3038670S flist = &bfreelist[BQ_AGE]; 3047015Smckusick binsheadfree(bp, flist); 3058Sbill } else { 3062325Swnj if (bp->b_flags & B_LOCKED) 3072325Swnj flist = &bfreelist[BQ_LOCKED]; 3082325Swnj else if (bp->b_flags & B_AGE) 3092325Swnj flist = &bfreelist[BQ_AGE]; 3102325Swnj else 3112325Swnj flist = &bfreelist[BQ_LRU]; 3127015Smckusick binstailfree(bp, flist); 3138Sbill } 31446151Smckusick bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_AGE | B_NOCACHE); 3158Sbill splx(s); 3168Sbill } 3178Sbill 3188Sbill /* 31946151Smckusick * Check to see if a block is currently memory resident. 3208Sbill */ 32137736Smckusick incore(vp, blkno) 32237736Smckusick struct vnode *vp; 3237015Smckusick daddr_t blkno; 3248Sbill { 3258Sbill register struct buf *bp; 3262325Swnj register struct buf *dp; 3278Sbill 32838225Smckusick dp = BUFHASH(vp, blkno); 3292325Swnj for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) 33039668Smckusick if (bp->b_lblkno == blkno && bp->b_vp == vp && 3317015Smckusick (bp->b_flags & B_INVAL) == 0) 33291Sbill return (1); 33391Sbill return (0); 3348Sbill } 3358Sbill 33639668Smckusick /* 33746151Smckusick * Check to see if a block is currently memory resident. 33846151Smckusick * If it is resident, return it. If it is not resident, 33946151Smckusick * allocate a new buffer and assign it to the block. 34039668Smckusick */ 3418Sbill struct buf * 34237736Smckusick getblk(vp, blkno, size) 34337736Smckusick register struct vnode *vp; 3446563Smckusic daddr_t blkno; 3456563Smckusic int size; 3468Sbill { 3478670S register struct buf *bp, *dp; 3485424Swnj int s; 3498Sbill 35025255Smckusick if (size > MAXBSIZE) 35125255Smckusick panic("getblk: size too big"); 3527015Smckusick /* 35346151Smckusick * Search the cache for the block. If the buffer is found, 35446151Smckusick * but it is currently locked, the we must wait for it to 35546151Smckusick * become available. 3567015Smckusick */ 35737736Smckusick dp = BUFHASH(vp, blkno); 3587015Smckusick loop: 3592325Swnj for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) { 36039668Smckusick if (bp->b_lblkno != blkno || bp->b_vp != vp || 36146151Smckusick (bp->b_flags & B_INVAL)) 3628Sbill continue; 36326271Skarels s = splbio(); 36446151Smckusick if (bp->b_flags & B_BUSY) { 3658Sbill bp->b_flags |= B_WANTED; 36646151Smckusick sleep((caddr_t)bp, PRIBIO + 1); 3675424Swnj splx(s); 3688Sbill goto loop; 3698Sbill } 37039882Smckusick bremfree(bp); 37139882Smckusick bp->b_flags |= B_BUSY; 3725424Swnj splx(s); 37332608Smckusick if (bp->b_bcount != size) { 37439668Smckusick printf("getblk: stray size"); 37539668Smckusick bp->b_flags |= B_INVAL; 37639668Smckusick bwrite(bp); 37739668Smckusick goto loop; 37832608Smckusick } 3798Sbill bp->b_flags |= B_CACHE; 38026271Skarels return (bp); 3818Sbill } 3828670S bp = getnewbuf(); 3837015Smckusick bremhash(bp); 38439668Smckusick bgetvp(vp, bp); 38545116Smckusick bp->b_bcount = 0; 38639668Smckusick bp->b_lblkno = blkno; 3876563Smckusic bp->b_blkno = blkno; 3888670S bp->b_error = 0; 38937736Smckusick bp->b_resid = 0; 39037736Smckusick binshash(bp, dp); 39145116Smckusick allocbuf(bp, size); 39226271Skarels return (bp); 3938Sbill } 3948Sbill 3958Sbill /* 39646151Smckusick * Allocate a buffer. 39746151Smckusick * The caller will assign it to a block. 3988Sbill */ 3998Sbill struct buf * 4006563Smckusic geteblk(size) 4016563Smckusic int size; 4028Sbill { 4038670S register struct buf *bp, *flist; 4048Sbill 40525255Smckusick if (size > MAXBSIZE) 40625255Smckusick panic("geteblk: size too big"); 4078670S bp = getnewbuf(); 4088670S bp->b_flags |= B_INVAL; 4097015Smckusick bremhash(bp); 4108670S flist = &bfreelist[BQ_AGE]; 41145116Smckusick bp->b_bcount = 0; 41237736Smckusick bp->b_error = 0; 41337736Smckusick bp->b_resid = 0; 4148670S binshash(bp, flist); 41545116Smckusick allocbuf(bp, size); 41626271Skarels return (bp); 4178Sbill } 4188Sbill 4198Sbill /* 42045116Smckusick * Expand or contract the actual memory allocated to a buffer. 42146151Smckusick * If no memory is available, release buffer and take error exit. 4226563Smckusic */ 42345116Smckusick allocbuf(tp, size) 42445116Smckusick register struct buf *tp; 4256563Smckusic int size; 4266563Smckusic { 42745116Smckusick register struct buf *bp, *ep; 42845116Smckusick int sizealloc, take, s; 4296563Smckusic 43045116Smckusick sizealloc = roundup(size, CLBYTES); 43145116Smckusick /* 43245116Smckusick * Buffer size does not change 43345116Smckusick */ 43445116Smckusick if (sizealloc == tp->b_bufsize) 43545116Smckusick goto out; 43645116Smckusick /* 43745116Smckusick * Buffer size is shrinking. 43845116Smckusick * Place excess space in a buffer header taken from the 43945116Smckusick * BQ_EMPTY buffer list and placed on the "most free" list. 44045116Smckusick * If no extra buffer headers are available, leave the 44145116Smckusick * extra space in the present buffer. 44245116Smckusick */ 44345116Smckusick if (sizealloc < tp->b_bufsize) { 44445116Smckusick ep = bfreelist[BQ_EMPTY].av_forw; 44545116Smckusick if (ep == &bfreelist[BQ_EMPTY]) 44645116Smckusick goto out; 44745116Smckusick s = splbio(); 44845116Smckusick bremfree(ep); 44945116Smckusick ep->b_flags |= B_BUSY; 45045116Smckusick splx(s); 45145116Smckusick pagemove(tp->b_un.b_addr + sizealloc, ep->b_un.b_addr, 45245116Smckusick (int)tp->b_bufsize - sizealloc); 45345116Smckusick ep->b_bufsize = tp->b_bufsize - sizealloc; 45445116Smckusick tp->b_bufsize = sizealloc; 45545116Smckusick ep->b_flags |= B_INVAL; 45645116Smckusick ep->b_bcount = 0; 45745116Smckusick brelse(ep); 45845116Smckusick goto out; 45945116Smckusick } 46045116Smckusick /* 46145116Smckusick * More buffer space is needed. Get it out of buffers on 46245116Smckusick * the "most free" list, placing the empty headers on the 46345116Smckusick * BQ_EMPTY buffer header list. 46445116Smckusick */ 46545116Smckusick while (tp->b_bufsize < sizealloc) { 46645116Smckusick take = sizealloc - tp->b_bufsize; 46745116Smckusick bp = getnewbuf(); 46845116Smckusick if (take >= bp->b_bufsize) 46945116Smckusick take = bp->b_bufsize; 47045116Smckusick pagemove(&bp->b_un.b_addr[bp->b_bufsize - take], 47145116Smckusick &tp->b_un.b_addr[tp->b_bufsize], take); 47245116Smckusick tp->b_bufsize += take; 47345116Smckusick bp->b_bufsize = bp->b_bufsize - take; 47445116Smckusick if (bp->b_bcount > bp->b_bufsize) 47545116Smckusick bp->b_bcount = bp->b_bufsize; 47645116Smckusick if (bp->b_bufsize <= 0) { 47745116Smckusick bremhash(bp); 47845116Smckusick binshash(bp, &bfreelist[BQ_EMPTY]); 47946151Smckusick bp->b_dev = NODEV; 48045116Smckusick bp->b_error = 0; 48145116Smckusick bp->b_flags |= B_INVAL; 48245116Smckusick } 48345116Smckusick brelse(bp); 48445116Smckusick } 48545116Smckusick out: 48645116Smckusick tp->b_bcount = size; 48745116Smckusick return (1); 4888670S } 4898670S 4908670S /* 4918670S * Find a buffer which is available for use. 4928670S * Select something from a free list. 4938670S * Preference is to AGE list, then LRU list. 4948670S */ 4958670S struct buf * 4968670S getnewbuf() 4978670S { 4988670S register struct buf *bp, *dp; 49938776Smckusick register struct ucred *cred; 5008670S int s; 5018670S 5028670S loop: 50326271Skarels s = splbio(); 5048670S for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--) 5058670S if (dp->av_forw != dp) 5068670S break; 5078670S if (dp == bfreelist) { /* no free blocks */ 5088670S dp->b_flags |= B_WANTED; 50946151Smckusick sleep((caddr_t)dp, PRIBIO + 1); 51012170Ssam splx(s); 5118670S goto loop; 5128670S } 51339882Smckusick bp = dp->av_forw; 51439882Smckusick bremfree(bp); 51539882Smckusick bp->b_flags |= B_BUSY; 5168670S splx(s); 5178670S if (bp->b_flags & B_DELWRI) { 51838614Smckusick (void) bawrite(bp); 5198670S goto loop; 5208670S } 52140341Smckusick trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); 52239668Smckusick if (bp->b_vp) 52339668Smckusick brelvp(bp); 52438776Smckusick if (bp->b_rcred != NOCRED) { 52538776Smckusick cred = bp->b_rcred; 52638776Smckusick bp->b_rcred = NOCRED; 52738776Smckusick crfree(cred); 52838776Smckusick } 52938776Smckusick if (bp->b_wcred != NOCRED) { 53038776Smckusick cred = bp->b_wcred; 53138776Smckusick bp->b_wcred = NOCRED; 53238776Smckusick crfree(cred); 53338776Smckusick } 5348670S bp->b_flags = B_BUSY; 53546989Smckusick bp->b_dirtyoff = bp->b_dirtyend = 0; 5368670S return (bp); 5378670S } 5388670S 5398670S /* 54046151Smckusick * Wait for I/O to complete. 54146151Smckusick * 54246151Smckusick * Extract and return any errors associated with the I/O. 54346151Smckusick * If the error flag is set, but no specific error is 54446151Smckusick * given, return EIO. 5458Sbill */ 5467015Smckusick biowait(bp) 5476563Smckusic register struct buf *bp; 5488Sbill { 5495431Sroot int s; 5508Sbill 55126271Skarels s = splbio(); 55238776Smckusick while ((bp->b_flags & B_DONE) == 0) 5538Sbill sleep((caddr_t)bp, PRIBIO); 5545431Sroot splx(s); 55537736Smckusick if ((bp->b_flags & B_ERROR) == 0) 55637736Smckusick return (0); 55737736Smckusick if (bp->b_error) 55837736Smckusick return (bp->b_error); 55937736Smckusick return (EIO); 5608Sbill } 5618Sbill 5628Sbill /* 56313128Ssam * Mark I/O complete on a buffer. 56446151Smckusick * 56546151Smckusick * If a callback has been requested, e.g. the pageout 56646151Smckusick * daemon, do so. Otherwise, awaken waiting processes. 5678Sbill */ 5687015Smckusick biodone(bp) 5697015Smckusick register struct buf *bp; 5708Sbill { 5718Sbill 572420Sbill if (bp->b_flags & B_DONE) 5737015Smckusick panic("dup biodone"); 5748Sbill bp->b_flags |= B_DONE; 57549232Smckusick if ((bp->b_flags & B_READ) == 0) 57649232Smckusick vwakeup(bp); 5779763Ssam if (bp->b_flags & B_CALL) { 5789763Ssam bp->b_flags &= ~B_CALL; 5799763Ssam (*bp->b_iodone)(bp); 5809763Ssam return; 5819763Ssam } 58246151Smckusick if (bp->b_flags & B_ASYNC) 5838Sbill brelse(bp); 5848Sbill else { 5858Sbill bp->b_flags &= ~B_WANTED; 5868Sbill wakeup((caddr_t)bp); 5878Sbill } 5888Sbill } 589