149589Sbostic /*- 249589Sbostic * Copyright (c) 1982, 1986, 1989 The Regents of the University of California. 337736Smckusick * All rights reserved. 423395Smckusick * 549618Smckusick * This module is believed to contain source code proprietary to AT&T. 649618Smckusick * Use and redistribution is subject to the Berkeley Software License 749618Smckusick * Agreement and your Software Agreement with AT&T (Western Electric). 837736Smckusick * 9*52096Sbostic * @(#)vfs_cluster.c 7.44 (Berkeley) 12/31/91 1023395Smckusick */ 118Sbill 1251455Sbostic #include <sys/param.h> 1351455Sbostic #include <sys/proc.h> 1451455Sbostic #include <sys/buf.h> 1551455Sbostic #include <sys/vnode.h> 1651455Sbostic #include <sys/specdev.h> 1751455Sbostic #include <sys/mount.h> 1851455Sbostic #include <sys/trace.h> 1951455Sbostic #include <sys/resourcevar.h> 208Sbill 2191Sbill /* 2249280Skarels * Initialize buffers and hash links for buffers. 2349280Skarels */ 2451455Sbostic void 2549280Skarels bufinit() 2649280Skarels { 2749280Skarels register int i; 2849280Skarels register struct buf *bp, *dp; 2949280Skarels register struct bufhd *hp; 3049280Skarels int base, residual; 3149280Skarels 3249280Skarels for (hp = bufhash, i = 0; i < BUFHSZ; i++, hp++) 3349280Skarels hp->b_forw = hp->b_back = (struct buf *)hp; 3449280Skarels 3549280Skarels for (dp = bfreelist; dp < &bfreelist[BQUEUES]; dp++) { 3649280Skarels dp->b_forw = dp->b_back = dp->av_forw = dp->av_back = dp; 3749280Skarels dp->b_flags = B_HEAD; 3849280Skarels } 3949280Skarels base = bufpages / nbuf; 4049280Skarels residual = bufpages % nbuf; 4149280Skarels for (i = 0; i < nbuf; i++) { 4249280Skarels bp = &buf[i]; 4349280Skarels bp->b_dev = NODEV; 4449280Skarels bp->b_bcount = 0; 4549280Skarels bp->b_rcred = NOCRED; 4649280Skarels bp->b_wcred = NOCRED; 4749280Skarels bp->b_dirtyoff = 0; 4849280Skarels bp->b_dirtyend = 0; 4949280Skarels bp->b_un.b_addr = buffers + i * MAXBSIZE; 5049280Skarels if (i < residual) 5149280Skarels bp->b_bufsize = (base + 1) * CLBYTES; 5249280Skarels else 5349280Skarels bp->b_bufsize = base * CLBYTES; 5449280Skarels binshash(bp, &bfreelist[BQ_AGE]); 5549280Skarels bp->b_flags = B_BUSY|B_INVAL; 5649280Skarels brelse(bp); 5749280Skarels } 5849280Skarels } 5949280Skarels 6049280Skarels /* 6146151Smckusick * Find the block in the buffer pool. 6246151Smckusick * If the buffer is not present, allocate a new buffer and load 6346151Smckusick * its contents according to the filesystem fill routine. 648Sbill */ 6538776Smckusick bread(vp, blkno, size, cred, bpp) 6637736Smckusick struct vnode *vp; 676563Smckusic daddr_t blkno; 686563Smckusic int size; 6938776Smckusick struct ucred *cred; 7037736Smckusick struct buf **bpp; 718Sbill { 7247545Skarels struct proc *p = curproc; /* XXX */ 738Sbill register struct buf *bp; 748Sbill 758670S if (size == 0) 768670S panic("bread: size 0"); 7737736Smckusick *bpp = bp = getblk(vp, blkno, size); 7846151Smckusick if (bp->b_flags & (B_DONE | B_DELWRI)) { 7940341Smckusick trace(TR_BREADHIT, pack(vp, size), blkno); 8037736Smckusick return (0); 818Sbill } 828Sbill bp->b_flags |= B_READ; 838670S if (bp->b_bcount > bp->b_bufsize) 848670S panic("bread"); 8538776Smckusick if (bp->b_rcred == NOCRED && cred != NOCRED) { 8638776Smckusick crhold(cred); 8738776Smckusick bp->b_rcred = cred; 8838776Smckusick } 8937736Smckusick VOP_STRATEGY(bp); 9040341Smckusick trace(TR_BREADMISS, pack(vp, size), blkno); 9147545Skarels p->p_stats->p_ru.ru_inblock++; /* pay for read */ 9237736Smckusick return (biowait(bp)); 938Sbill } 948Sbill 958Sbill /* 9646151Smckusick * Operates like bread, but also starts I/O on the specified 9746151Smckusick * read-ahead block. 988Sbill */ 9938776Smckusick breada(vp, blkno, size, rablkno, rabsize, cred, bpp) 10037736Smckusick struct vnode *vp; 1017114Smckusick daddr_t blkno; int size; 1028592Sroot daddr_t rablkno; int rabsize; 10338776Smckusick struct ucred *cred; 10437736Smckusick struct buf **bpp; 1058Sbill { 10647545Skarels struct proc *p = curproc; /* XXX */ 1078Sbill register struct buf *bp, *rabp; 1088Sbill 1098Sbill bp = NULL; 1107015Smckusick /* 11146151Smckusick * If the block is not memory resident, 11246151Smckusick * allocate a buffer and start I/O. 1137015Smckusick */ 11437736Smckusick if (!incore(vp, blkno)) { 11537736Smckusick *bpp = bp = getblk(vp, blkno, size); 11646151Smckusick if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) { 1178Sbill bp->b_flags |= B_READ; 1188670S if (bp->b_bcount > bp->b_bufsize) 1198670S panic("breada"); 12038776Smckusick if (bp->b_rcred == NOCRED && cred != NOCRED) { 12138776Smckusick crhold(cred); 12238776Smckusick bp->b_rcred = cred; 12338776Smckusick } 12437736Smckusick VOP_STRATEGY(bp); 12540341Smckusick trace(TR_BREADMISS, pack(vp, size), blkno); 12647545Skarels p->p_stats->p_ru.ru_inblock++; /* pay for read */ 1277015Smckusick } else 12840341Smckusick trace(TR_BREADHIT, pack(vp, size), blkno); 1298Sbill } 1307015Smckusick 1317015Smckusick /* 13246151Smckusick * If there is a read-ahead block, start I/O on it too. 1337015Smckusick */ 13439895Smckusick if (!incore(vp, rablkno)) { 13537736Smckusick rabp = getblk(vp, rablkno, rabsize); 13646151Smckusick if (rabp->b_flags & (B_DONE | B_DELWRI)) { 1378Sbill brelse(rabp); 13840341Smckusick trace(TR_BREADHITRA, pack(vp, rabsize), rablkno); 1392045Swnj } else { 14046151Smckusick rabp->b_flags |= B_ASYNC | B_READ; 1418670S if (rabp->b_bcount > rabp->b_bufsize) 1428670S panic("breadrabp"); 14338880Smckusick if (rabp->b_rcred == NOCRED && cred != NOCRED) { 14438776Smckusick crhold(cred); 14538880Smckusick rabp->b_rcred = cred; 14638776Smckusick } 14737736Smckusick VOP_STRATEGY(rabp); 14840341Smckusick trace(TR_BREADMISSRA, pack(vp, rabsize), rablkno); 14947545Skarels p->p_stats->p_ru.ru_inblock++; /* pay in advance */ 1508Sbill } 1518Sbill } 1527015Smckusick 1537015Smckusick /* 15446151Smckusick * If block was memory resident, let bread get it. 15546151Smckusick * If block was not memory resident, the read was 15646151Smckusick * started above, so just wait for the read to complete. 1577015Smckusick */ 1587114Smckusick if (bp == NULL) 15938776Smckusick return (bread(vp, blkno, size, cred, bpp)); 16037736Smckusick return (biowait(bp)); 1618Sbill } 1628Sbill 1638Sbill /* 16446151Smckusick * Synchronous write. 16546151Smckusick * Release buffer on completion. 1668Sbill */ 1678Sbill bwrite(bp) 1687015Smckusick register struct buf *bp; 1698Sbill { 17047545Skarels struct proc *p = curproc; /* XXX */ 17137736Smckusick register int flag; 17240226Smckusick int s, error; 1738Sbill 1748Sbill flag = bp->b_flags; 1759857Ssam bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 17649459Smckusick if (flag & B_ASYNC) { 17749459Smckusick if ((flag & B_DELWRI) == 0) 17849459Smckusick p->p_stats->p_ru.ru_oublock++; /* no one paid yet */ 17949459Smckusick else 18049459Smckusick reassignbuf(bp, bp->b_vp); 18149459Smckusick } 18240341Smckusick trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno); 1838670S if (bp->b_bcount > bp->b_bufsize) 1848670S panic("bwrite"); 18540226Smckusick s = splbio(); 18639882Smckusick bp->b_vp->v_numoutput++; 18740226Smckusick splx(s); 18837736Smckusick VOP_STRATEGY(bp); 1897015Smckusick 1907015Smckusick /* 19146151Smckusick * If the write was synchronous, then await I/O completion. 1927015Smckusick * If the write was "delayed", then we put the buffer on 19346151Smckusick * the queue of blocks awaiting I/O completion status. 1947015Smckusick */ 19546151Smckusick if ((flag & B_ASYNC) == 0) { 19637736Smckusick error = biowait(bp); 19749459Smckusick if ((flag&B_DELWRI) == 0) 19849459Smckusick p->p_stats->p_ru.ru_oublock++; /* no one paid yet */ 19949459Smckusick else 20049459Smckusick reassignbuf(bp, bp->b_vp); 2018Sbill brelse(bp); 20237736Smckusick } else if (flag & B_DELWRI) { 2038Sbill bp->b_flags |= B_AGE; 20437736Smckusick error = 0; 20537736Smckusick } 20637736Smckusick return (error); 2078Sbill } 2088Sbill 2098Sbill /* 21046151Smckusick * Delayed write. 21146151Smckusick * 21246151Smckusick * The buffer is marked dirty, but is not queued for I/O. 21346151Smckusick * This routine should be used when the buffer is expected 21446151Smckusick * to be modified again soon, typically a small write that 21546151Smckusick * partially fills a buffer. 21646151Smckusick * 21746151Smckusick * NB: magnetic tapes cannot be delayed; they must be 21846151Smckusick * written in the order that the writes are requested. 2198Sbill */ 2208Sbill bdwrite(bp) 2217015Smckusick register struct buf *bp; 2228Sbill { 22347545Skarels struct proc *p = curproc; /* XXX */ 2248Sbill 22539882Smckusick if ((bp->b_flags & B_DELWRI) == 0) { 22639882Smckusick bp->b_flags |= B_DELWRI; 22739882Smckusick reassignbuf(bp, bp->b_vp); 22847545Skarels p->p_stats->p_ru.ru_oublock++; /* no one paid yet */ 22939882Smckusick } 23037736Smckusick /* 23139668Smckusick * If this is a tape drive, the write must be initiated. 23237736Smckusick */ 23348360Smckusick if (VOP_IOCTL(bp->b_vp, 0, (caddr_t)B_TAPE, 0, NOCRED, p) == 0) { 2348Sbill bawrite(bp); 23539668Smckusick } else { 23646151Smckusick bp->b_flags |= (B_DONE | B_DELWRI); 2378Sbill brelse(bp); 2388Sbill } 2398Sbill } 2408Sbill 2418Sbill /* 24246151Smckusick * Asynchronous write. 24346151Smckusick * Start I/O on a buffer, but do not wait for it to complete. 24446151Smckusick * The buffer is released when the I/O completes. 2458Sbill */ 2468Sbill bawrite(bp) 2477015Smckusick register struct buf *bp; 2488Sbill { 2498Sbill 25046151Smckusick /* 25146151Smckusick * Setting the ASYNC flag causes bwrite to return 25246151Smckusick * after starting the I/O. 25346151Smckusick */ 2548Sbill bp->b_flags |= B_ASYNC; 25537736Smckusick (void) bwrite(bp); 2568Sbill } 2578Sbill 2588Sbill /* 25946151Smckusick * Release a buffer. 26046151Smckusick * Even if the buffer is dirty, no I/O is started. 2618Sbill */ 2628Sbill brelse(bp) 2637015Smckusick register struct buf *bp; 2648Sbill { 2652325Swnj register struct buf *flist; 26646151Smckusick int s; 2678Sbill 26840341Smckusick trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); 2697015Smckusick /* 27039668Smckusick * If a process is waiting for the buffer, or 27139668Smckusick * is waiting for a free buffer, awaken it. 2727015Smckusick */ 27346151Smckusick if (bp->b_flags & B_WANTED) 2748Sbill wakeup((caddr_t)bp); 27546151Smckusick if (bfreelist[0].b_flags & B_WANTED) { 2762325Swnj bfreelist[0].b_flags &= ~B_WANTED; 2772325Swnj wakeup((caddr_t)bfreelist); 2788Sbill } 27939668Smckusick /* 28039668Smckusick * Retry I/O for locked buffers rather than invalidating them. 28139668Smckusick */ 28239668Smckusick if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED)) 28339668Smckusick bp->b_flags &= ~B_ERROR; 28439668Smckusick /* 28539668Smckusick * Disassociate buffers that are no longer valid. 28639668Smckusick */ 28746151Smckusick if (bp->b_flags & (B_NOCACHE | B_ERROR)) 28837736Smckusick bp->b_flags |= B_INVAL; 28946151Smckusick if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR | B_INVAL))) { 29039668Smckusick if (bp->b_vp) 29139668Smckusick brelvp(bp); 29239668Smckusick bp->b_flags &= ~B_DELWRI; 29337736Smckusick } 2947015Smckusick /* 2957015Smckusick * Stick the buffer back on a free list. 2967015Smckusick */ 29726271Skarels s = splbio(); 2988670S if (bp->b_bufsize <= 0) { 2998670S /* block has no buffer ... put at front of unused buffer list */ 3008670S flist = &bfreelist[BQ_EMPTY]; 3018670S binsheadfree(bp, flist); 30246151Smckusick } else if (bp->b_flags & (B_ERROR | B_INVAL)) { 3032325Swnj /* block has no info ... put at front of most free list */ 3048670S flist = &bfreelist[BQ_AGE]; 3057015Smckusick binsheadfree(bp, flist); 3068Sbill } else { 3072325Swnj if (bp->b_flags & B_LOCKED) 3082325Swnj flist = &bfreelist[BQ_LOCKED]; 3092325Swnj else if (bp->b_flags & B_AGE) 3102325Swnj flist = &bfreelist[BQ_AGE]; 3112325Swnj else 3122325Swnj flist = &bfreelist[BQ_LRU]; 3137015Smckusick binstailfree(bp, flist); 3148Sbill } 31546151Smckusick bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_AGE | B_NOCACHE); 3168Sbill splx(s); 3178Sbill } 3188Sbill 3198Sbill /* 32046151Smckusick * Check to see if a block is currently memory resident. 3218Sbill */ 32237736Smckusick incore(vp, blkno) 32337736Smckusick struct vnode *vp; 3247015Smckusick daddr_t blkno; 3258Sbill { 3268Sbill register struct buf *bp; 3272325Swnj register struct buf *dp; 3288Sbill 32938225Smckusick dp = BUFHASH(vp, blkno); 3302325Swnj for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) 33139668Smckusick if (bp->b_lblkno == blkno && bp->b_vp == vp && 3327015Smckusick (bp->b_flags & B_INVAL) == 0) 33391Sbill return (1); 33491Sbill return (0); 3358Sbill } 3368Sbill 33739668Smckusick /* 33846151Smckusick * Check to see if a block is currently memory resident. 33946151Smckusick * If it is resident, return it. If it is not resident, 34046151Smckusick * allocate a new buffer and assign it to the block. 34139668Smckusick */ 3428Sbill struct buf * 34337736Smckusick getblk(vp, blkno, size) 34437736Smckusick register struct vnode *vp; 3456563Smckusic daddr_t blkno; 3466563Smckusic int size; 3478Sbill { 3488670S register struct buf *bp, *dp; 3495424Swnj int s; 3508Sbill 35125255Smckusick if (size > MAXBSIZE) 35225255Smckusick panic("getblk: size too big"); 3537015Smckusick /* 35446151Smckusick * Search the cache for the block. If the buffer is found, 35546151Smckusick * but it is currently locked, the we must wait for it to 35646151Smckusick * become available. 3577015Smckusick */ 35837736Smckusick dp = BUFHASH(vp, blkno); 3597015Smckusick loop: 3602325Swnj for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) { 36139668Smckusick if (bp->b_lblkno != blkno || bp->b_vp != vp || 36246151Smckusick (bp->b_flags & B_INVAL)) 3638Sbill continue; 36426271Skarels s = splbio(); 36546151Smckusick if (bp->b_flags & B_BUSY) { 3668Sbill bp->b_flags |= B_WANTED; 36746151Smckusick sleep((caddr_t)bp, PRIBIO + 1); 3685424Swnj splx(s); 3698Sbill goto loop; 3708Sbill } 37139882Smckusick bremfree(bp); 37239882Smckusick bp->b_flags |= B_BUSY; 3735424Swnj splx(s); 37432608Smckusick if (bp->b_bcount != size) { 37539668Smckusick printf("getblk: stray size"); 37639668Smckusick bp->b_flags |= B_INVAL; 37739668Smckusick bwrite(bp); 37839668Smckusick goto loop; 37932608Smckusick } 3808Sbill bp->b_flags |= B_CACHE; 38126271Skarels return (bp); 3828Sbill } 3838670S bp = getnewbuf(); 3847015Smckusick bremhash(bp); 38539668Smckusick bgetvp(vp, bp); 38645116Smckusick bp->b_bcount = 0; 38739668Smckusick bp->b_lblkno = blkno; 3886563Smckusic bp->b_blkno = blkno; 3898670S bp->b_error = 0; 39037736Smckusick bp->b_resid = 0; 39137736Smckusick binshash(bp, dp); 39245116Smckusick allocbuf(bp, size); 39326271Skarels return (bp); 3948Sbill } 3958Sbill 3968Sbill /* 39746151Smckusick * Allocate a buffer. 39846151Smckusick * The caller will assign it to a block. 3998Sbill */ 4008Sbill struct buf * 4016563Smckusic geteblk(size) 4026563Smckusic int size; 4038Sbill { 4048670S register struct buf *bp, *flist; 4058Sbill 40625255Smckusick if (size > MAXBSIZE) 40725255Smckusick panic("geteblk: size too big"); 4088670S bp = getnewbuf(); 4098670S bp->b_flags |= B_INVAL; 4107015Smckusick bremhash(bp); 4118670S flist = &bfreelist[BQ_AGE]; 41245116Smckusick bp->b_bcount = 0; 41337736Smckusick bp->b_error = 0; 41437736Smckusick bp->b_resid = 0; 4158670S binshash(bp, flist); 41645116Smckusick allocbuf(bp, size); 41726271Skarels return (bp); 4188Sbill } 4198Sbill 4208Sbill /* 42145116Smckusick * Expand or contract the actual memory allocated to a buffer. 42246151Smckusick * If no memory is available, release buffer and take error exit. 4236563Smckusic */ 42445116Smckusick allocbuf(tp, size) 42545116Smckusick register struct buf *tp; 4266563Smckusic int size; 4276563Smckusic { 42845116Smckusick register struct buf *bp, *ep; 42945116Smckusick int sizealloc, take, s; 4306563Smckusic 43145116Smckusick sizealloc = roundup(size, CLBYTES); 43245116Smckusick /* 43345116Smckusick * Buffer size does not change 43445116Smckusick */ 43545116Smckusick if (sizealloc == tp->b_bufsize) 43645116Smckusick goto out; 43745116Smckusick /* 43845116Smckusick * Buffer size is shrinking. 43945116Smckusick * Place excess space in a buffer header taken from the 44045116Smckusick * BQ_EMPTY buffer list and placed on the "most free" list. 44145116Smckusick * If no extra buffer headers are available, leave the 44245116Smckusick * extra space in the present buffer. 44345116Smckusick */ 44445116Smckusick if (sizealloc < tp->b_bufsize) { 44545116Smckusick ep = bfreelist[BQ_EMPTY].av_forw; 44645116Smckusick if (ep == &bfreelist[BQ_EMPTY]) 44745116Smckusick goto out; 44845116Smckusick s = splbio(); 44945116Smckusick bremfree(ep); 45045116Smckusick ep->b_flags |= B_BUSY; 45145116Smckusick splx(s); 45245116Smckusick pagemove(tp->b_un.b_addr + sizealloc, ep->b_un.b_addr, 45345116Smckusick (int)tp->b_bufsize - sizealloc); 45445116Smckusick ep->b_bufsize = tp->b_bufsize - sizealloc; 45545116Smckusick tp->b_bufsize = sizealloc; 45645116Smckusick ep->b_flags |= B_INVAL; 45745116Smckusick ep->b_bcount = 0; 45845116Smckusick brelse(ep); 45945116Smckusick goto out; 46045116Smckusick } 46145116Smckusick /* 46245116Smckusick * More buffer space is needed. Get it out of buffers on 46345116Smckusick * the "most free" list, placing the empty headers on the 46445116Smckusick * BQ_EMPTY buffer header list. 46545116Smckusick */ 46645116Smckusick while (tp->b_bufsize < sizealloc) { 46745116Smckusick take = sizealloc - tp->b_bufsize; 46845116Smckusick bp = getnewbuf(); 46945116Smckusick if (take >= bp->b_bufsize) 47045116Smckusick take = bp->b_bufsize; 47145116Smckusick pagemove(&bp->b_un.b_addr[bp->b_bufsize - take], 47245116Smckusick &tp->b_un.b_addr[tp->b_bufsize], take); 47345116Smckusick tp->b_bufsize += take; 47445116Smckusick bp->b_bufsize = bp->b_bufsize - take; 47545116Smckusick if (bp->b_bcount > bp->b_bufsize) 47645116Smckusick bp->b_bcount = bp->b_bufsize; 47745116Smckusick if (bp->b_bufsize <= 0) { 47845116Smckusick bremhash(bp); 47945116Smckusick binshash(bp, &bfreelist[BQ_EMPTY]); 48046151Smckusick bp->b_dev = NODEV; 48145116Smckusick bp->b_error = 0; 48245116Smckusick bp->b_flags |= B_INVAL; 48345116Smckusick } 48445116Smckusick brelse(bp); 48545116Smckusick } 48645116Smckusick out: 48745116Smckusick tp->b_bcount = size; 48845116Smckusick return (1); 4898670S } 4908670S 4918670S /* 4928670S * Find a buffer which is available for use. 4938670S * Select something from a free list. 4948670S * Preference is to AGE list, then LRU list. 4958670S */ 4968670S struct buf * 4978670S getnewbuf() 4988670S { 4998670S register struct buf *bp, *dp; 50038776Smckusick register struct ucred *cred; 5018670S int s; 5028670S 503*52096Sbostic #ifdef LFS 504*52096Sbostic lfs_flush(); 505*52096Sbostic #endif 5068670S loop: 50726271Skarels s = splbio(); 5088670S for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--) 5098670S if (dp->av_forw != dp) 5108670S break; 5118670S if (dp == bfreelist) { /* no free blocks */ 5128670S dp->b_flags |= B_WANTED; 51346151Smckusick sleep((caddr_t)dp, PRIBIO + 1); 51412170Ssam splx(s); 5158670S goto loop; 5168670S } 51739882Smckusick bp = dp->av_forw; 51839882Smckusick bremfree(bp); 51939882Smckusick bp->b_flags |= B_BUSY; 5208670S splx(s); 5218670S if (bp->b_flags & B_DELWRI) { 52238614Smckusick (void) bawrite(bp); 5238670S goto loop; 5248670S } 52540341Smckusick trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); 52639668Smckusick if (bp->b_vp) 52739668Smckusick brelvp(bp); 52838776Smckusick if (bp->b_rcred != NOCRED) { 52938776Smckusick cred = bp->b_rcred; 53038776Smckusick bp->b_rcred = NOCRED; 53138776Smckusick crfree(cred); 53238776Smckusick } 53338776Smckusick if (bp->b_wcred != NOCRED) { 53438776Smckusick cred = bp->b_wcred; 53538776Smckusick bp->b_wcred = NOCRED; 53638776Smckusick crfree(cred); 53738776Smckusick } 5388670S bp->b_flags = B_BUSY; 53946989Smckusick bp->b_dirtyoff = bp->b_dirtyend = 0; 5408670S return (bp); 5418670S } 5428670S 5438670S /* 54446151Smckusick * Wait for I/O to complete. 54546151Smckusick * 54646151Smckusick * Extract and return any errors associated with the I/O. 54746151Smckusick * If the error flag is set, but no specific error is 54846151Smckusick * given, return EIO. 5498Sbill */ 5507015Smckusick biowait(bp) 5516563Smckusic register struct buf *bp; 5528Sbill { 5535431Sroot int s; 5548Sbill 55526271Skarels s = splbio(); 55638776Smckusick while ((bp->b_flags & B_DONE) == 0) 5578Sbill sleep((caddr_t)bp, PRIBIO); 5585431Sroot splx(s); 55937736Smckusick if ((bp->b_flags & B_ERROR) == 0) 56037736Smckusick return (0); 56137736Smckusick if (bp->b_error) 56237736Smckusick return (bp->b_error); 56337736Smckusick return (EIO); 5648Sbill } 5658Sbill 5668Sbill /* 56713128Ssam * Mark I/O complete on a buffer. 56846151Smckusick * 56946151Smckusick * If a callback has been requested, e.g. the pageout 57046151Smckusick * daemon, do so. Otherwise, awaken waiting processes. 5718Sbill */ 57251455Sbostic void 5737015Smckusick biodone(bp) 5747015Smckusick register struct buf *bp; 5758Sbill { 5768Sbill 577420Sbill if (bp->b_flags & B_DONE) 5787015Smckusick panic("dup biodone"); 5798Sbill bp->b_flags |= B_DONE; 58049232Smckusick if ((bp->b_flags & B_READ) == 0) 58149232Smckusick vwakeup(bp); 5829763Ssam if (bp->b_flags & B_CALL) { 5839763Ssam bp->b_flags &= ~B_CALL; 5849763Ssam (*bp->b_iodone)(bp); 5859763Ssam return; 5869763Ssam } 58746151Smckusick if (bp->b_flags & B_ASYNC) 5888Sbill brelse(bp); 5898Sbill else { 5908Sbill bp->b_flags &= ~B_WANTED; 5918Sbill wakeup((caddr_t)bp); 5928Sbill } 5938Sbill } 594