1 /* 2 * Copyright (c) 1982, 1986, 1989 Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)vfs_cluster.c 7.31 (Berkeley) 08/24/90 8 */ 9 10 #include "param.h" 11 #include "user.h" 12 #include "buf.h" 13 #include "vnode.h" 14 #include "specdev.h" 15 #include "mount.h" 16 #include "trace.h" 17 #include "ucred.h" 18 19 /* 20 * Read in (if necessary) the block and return a buffer pointer. 21 */ 22 bread(vp, blkno, size, cred, bpp) 23 struct vnode *vp; 24 daddr_t blkno; 25 int size; 26 struct ucred *cred; 27 struct buf **bpp; 28 { 29 register struct buf *bp; 30 31 if (size == 0) 32 panic("bread: size 0"); 33 *bpp = bp = getblk(vp, blkno, size); 34 if (bp->b_flags&(B_DONE|B_DELWRI)) { 35 trace(TR_BREADHIT, pack(vp, size), blkno); 36 return (0); 37 } 38 bp->b_flags |= B_READ; 39 if (bp->b_bcount > bp->b_bufsize) 40 panic("bread"); 41 if (bp->b_rcred == NOCRED && cred != NOCRED) { 42 crhold(cred); 43 bp->b_rcred = cred; 44 } 45 VOP_STRATEGY(bp); 46 trace(TR_BREADMISS, pack(vp, size), blkno); 47 u.u_ru.ru_inblock++; /* pay for read */ 48 return (biowait(bp)); 49 } 50 51 /* 52 * Read in the block, like bread, but also start I/O on the 53 * read-ahead block (which is not allocated to the caller) 54 */ 55 breada(vp, blkno, size, rablkno, rabsize, cred, bpp) 56 struct vnode *vp; 57 daddr_t blkno; int size; 58 daddr_t rablkno; int rabsize; 59 struct ucred *cred; 60 struct buf **bpp; 61 { 62 register struct buf *bp, *rabp; 63 64 bp = NULL; 65 /* 66 * If the block isn't in core, then allocate 67 * a buffer and initiate i/o (getblk checks 68 * for a cache hit). 69 */ 70 if (!incore(vp, blkno)) { 71 *bpp = bp = getblk(vp, blkno, size); 72 if ((bp->b_flags&(B_DONE|B_DELWRI)) == 0) { 73 bp->b_flags |= B_READ; 74 if (bp->b_bcount > bp->b_bufsize) 75 panic("breada"); 76 if (bp->b_rcred == NOCRED && cred != NOCRED) { 77 crhold(cred); 78 bp->b_rcred = cred; 79 } 80 VOP_STRATEGY(bp); 81 trace(TR_BREADMISS, pack(vp, size), blkno); 82 u.u_ru.ru_inblock++; /* pay for read */ 83 } else 84 trace(TR_BREADHIT, pack(vp, size), blkno); 85 } 86 87 /* 88 * If there's a read-ahead block, start i/o 89 * on it also (as above). 90 */ 91 if (!incore(vp, rablkno)) { 92 rabp = getblk(vp, rablkno, rabsize); 93 if (rabp->b_flags & (B_DONE|B_DELWRI)) { 94 brelse(rabp); 95 trace(TR_BREADHITRA, pack(vp, rabsize), rablkno); 96 } else { 97 rabp->b_flags |= B_READ|B_ASYNC; 98 if (rabp->b_bcount > rabp->b_bufsize) 99 panic("breadrabp"); 100 if (rabp->b_rcred == NOCRED && cred != NOCRED) { 101 crhold(cred); 102 rabp->b_rcred = cred; 103 } 104 VOP_STRATEGY(rabp); 105 trace(TR_BREADMISSRA, pack(vp, rabsize), rablkno); 106 u.u_ru.ru_inblock++; /* pay in advance */ 107 } 108 } 109 110 /* 111 * If block was in core, let bread get it. 112 * If block wasn't in core, then the read was started 113 * above, and just wait for it. 114 */ 115 if (bp == NULL) 116 return (bread(vp, blkno, size, cred, bpp)); 117 return (biowait(bp)); 118 } 119 120 /* 121 * Write the buffer, waiting for completion. 122 * Then release the buffer. 123 */ 124 bwrite(bp) 125 register struct buf *bp; 126 { 127 register int flag; 128 int s, error; 129 130 flag = bp->b_flags; 131 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 132 if ((flag&B_DELWRI) == 0) 133 u.u_ru.ru_oublock++; /* noone paid yet */ 134 else 135 reassignbuf(bp, bp->b_vp); 136 trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno); 137 if (bp->b_bcount > bp->b_bufsize) 138 panic("bwrite"); 139 s = splbio(); 140 bp->b_vp->v_numoutput++; 141 splx(s); 142 VOP_STRATEGY(bp); 143 144 /* 145 * If the write was synchronous, then await i/o completion. 146 * If the write was "delayed", then we put the buffer on 147 * the q of blocks awaiting i/o completion status. 148 */ 149 if ((flag&B_ASYNC) == 0) { 150 error = biowait(bp); 151 brelse(bp); 152 } else if (flag & B_DELWRI) { 153 bp->b_flags |= B_AGE; 154 error = 0; 155 } 156 return (error); 157 } 158 159 /* 160 * Release the buffer, marking it so that if it is grabbed 161 * for another purpose it will be written out before being 162 * given up (e.g. when writing a partial block where it is 163 * assumed that another write for the same block will soon follow). 164 * This can't be done for magtape, since writes must be done 165 * in the same order as requested. 166 */ 167 bdwrite(bp) 168 register struct buf *bp; 169 { 170 171 if ((bp->b_flags & B_DELWRI) == 0) { 172 bp->b_flags |= B_DELWRI; 173 reassignbuf(bp, bp->b_vp); 174 u.u_ru.ru_oublock++; /* noone paid yet */ 175 } 176 /* 177 * If this is a tape drive, the write must be initiated. 178 */ 179 if (VOP_IOCTL(bp->b_vp, 0, B_TAPE, 0, NOCRED) == 0) { 180 bawrite(bp); 181 } else { 182 bp->b_flags |= B_DELWRI | B_DONE; 183 brelse(bp); 184 } 185 } 186 187 /* 188 * Release the buffer, start I/O on it, but don't wait for completion. 189 */ 190 bawrite(bp) 191 register struct buf *bp; 192 { 193 194 bp->b_flags |= B_ASYNC; 195 (void) bwrite(bp); 196 } 197 198 /* 199 * Release the buffer, with no I/O implied. 200 */ 201 brelse(bp) 202 register struct buf *bp; 203 { 204 register struct buf *flist; 205 register s; 206 207 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); 208 /* 209 * If a process is waiting for the buffer, or 210 * is waiting for a free buffer, awaken it. 211 */ 212 if (bp->b_flags&B_WANTED) 213 wakeup((caddr_t)bp); 214 if (bfreelist[0].b_flags&B_WANTED) { 215 bfreelist[0].b_flags &= ~B_WANTED; 216 wakeup((caddr_t)bfreelist); 217 } 218 /* 219 * Retry I/O for locked buffers rather than invalidating them. 220 */ 221 if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED)) 222 bp->b_flags &= ~B_ERROR; 223 224 /* 225 * Disassociate buffers that are no longer valid. 226 */ 227 if (bp->b_flags & (B_NOCACHE|B_ERROR)) 228 bp->b_flags |= B_INVAL; 229 if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR|B_INVAL))) { 230 if (bp->b_vp) 231 brelvp(bp); 232 bp->b_flags &= ~B_DELWRI; 233 } 234 /* 235 * Stick the buffer back on a free list. 236 */ 237 s = splbio(); 238 if (bp->b_bufsize <= 0) { 239 /* block has no buffer ... put at front of unused buffer list */ 240 flist = &bfreelist[BQ_EMPTY]; 241 binsheadfree(bp, flist); 242 } else if (bp->b_flags & (B_ERROR|B_INVAL)) { 243 /* block has no info ... put at front of most free list */ 244 flist = &bfreelist[BQ_AGE]; 245 binsheadfree(bp, flist); 246 } else { 247 if (bp->b_flags & B_LOCKED) 248 flist = &bfreelist[BQ_LOCKED]; 249 else if (bp->b_flags & B_AGE) 250 flist = &bfreelist[BQ_AGE]; 251 else 252 flist = &bfreelist[BQ_LRU]; 253 binstailfree(bp, flist); 254 } 255 bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_AGE|B_NOCACHE); 256 splx(s); 257 } 258 259 /* 260 * See if the block is associated with some buffer 261 * (mainly to avoid getting hung up on a wait in breada) 262 */ 263 incore(vp, blkno) 264 struct vnode *vp; 265 daddr_t blkno; 266 { 267 register struct buf *bp; 268 register struct buf *dp; 269 270 dp = BUFHASH(vp, blkno); 271 for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) 272 if (bp->b_lblkno == blkno && bp->b_vp == vp && 273 (bp->b_flags & B_INVAL) == 0) 274 return (1); 275 return (0); 276 } 277 278 /* 279 * Return a block if it is in memory. 280 */ 281 baddr(vp, blkno, size, cred, bpp) 282 struct vnode *vp; 283 daddr_t blkno; 284 int size; 285 struct ucred *cred; 286 struct buf **bpp; 287 { 288 289 if (incore(vp, blkno)) 290 return (bread(vp, blkno, size, cred, bpp)); 291 *bpp = 0; 292 return (0); 293 } 294 295 /* 296 * Assign a buffer for the given block. If the appropriate 297 * block is already associated, return it; otherwise search 298 * for the oldest non-busy buffer and reassign it. 299 * 300 * We use splx here because this routine may be called 301 * on the interrupt stack during a dump, and we don't 302 * want to lower the ipl back to 0. 303 */ 304 struct buf * 305 getblk(vp, blkno, size) 306 register struct vnode *vp; 307 daddr_t blkno; 308 int size; 309 { 310 register struct buf *bp, *dp; 311 int s; 312 313 if (size > MAXBSIZE) 314 panic("getblk: size too big"); 315 /* 316 * Search the cache for the block. If we hit, but 317 * the buffer is in use for i/o, then we wait until 318 * the i/o has completed. 319 */ 320 dp = BUFHASH(vp, blkno); 321 loop: 322 for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) { 323 if (bp->b_lblkno != blkno || bp->b_vp != vp || 324 bp->b_flags&B_INVAL) 325 continue; 326 s = splbio(); 327 if (bp->b_flags&B_BUSY) { 328 bp->b_flags |= B_WANTED; 329 sleep((caddr_t)bp, PRIBIO+1); 330 splx(s); 331 goto loop; 332 } 333 bremfree(bp); 334 bp->b_flags |= B_BUSY; 335 splx(s); 336 if (bp->b_bcount != size) { 337 printf("getblk: stray size"); 338 bp->b_flags |= B_INVAL; 339 bwrite(bp); 340 goto loop; 341 } 342 bp->b_flags |= B_CACHE; 343 return (bp); 344 } 345 bp = getnewbuf(); 346 bremhash(bp); 347 bgetvp(vp, bp); 348 bp->b_bcount = 0; 349 bp->b_lblkno = blkno; 350 bp->b_blkno = blkno; 351 bp->b_error = 0; 352 bp->b_resid = 0; 353 binshash(bp, dp); 354 allocbuf(bp, size); 355 return (bp); 356 } 357 358 /* 359 * get an empty block, 360 * not assigned to any particular device 361 */ 362 struct buf * 363 geteblk(size) 364 int size; 365 { 366 register struct buf *bp, *flist; 367 368 if (size > MAXBSIZE) 369 panic("geteblk: size too big"); 370 bp = getnewbuf(); 371 bp->b_flags |= B_INVAL; 372 bremhash(bp); 373 flist = &bfreelist[BQ_AGE]; 374 bp->b_bcount = 0; 375 bp->b_error = 0; 376 bp->b_resid = 0; 377 binshash(bp, flist); 378 allocbuf(bp, size); 379 return (bp); 380 } 381 382 /* 383 * Expand or contract the actual memory allocated to a buffer. 384 * If no memory is available, release buffer and take error exit 385 */ 386 allocbuf(tp, size) 387 register struct buf *tp; 388 int size; 389 { 390 register struct buf *bp, *ep; 391 int sizealloc, take, s; 392 393 sizealloc = roundup(size, CLBYTES); 394 /* 395 * Buffer size does not change 396 */ 397 if (sizealloc == tp->b_bufsize) 398 goto out; 399 /* 400 * Buffer size is shrinking. 401 * Place excess space in a buffer header taken from the 402 * BQ_EMPTY buffer list and placed on the "most free" list. 403 * If no extra buffer headers are available, leave the 404 * extra space in the present buffer. 405 */ 406 if (sizealloc < tp->b_bufsize) { 407 ep = bfreelist[BQ_EMPTY].av_forw; 408 if (ep == &bfreelist[BQ_EMPTY]) 409 goto out; 410 s = splbio(); 411 bremfree(ep); 412 ep->b_flags |= B_BUSY; 413 splx(s); 414 pagemove(tp->b_un.b_addr + sizealloc, ep->b_un.b_addr, 415 (int)tp->b_bufsize - sizealloc); 416 ep->b_bufsize = tp->b_bufsize - sizealloc; 417 tp->b_bufsize = sizealloc; 418 ep->b_flags |= B_INVAL; 419 ep->b_bcount = 0; 420 brelse(ep); 421 goto out; 422 } 423 /* 424 * More buffer space is needed. Get it out of buffers on 425 * the "most free" list, placing the empty headers on the 426 * BQ_EMPTY buffer header list. 427 */ 428 while (tp->b_bufsize < sizealloc) { 429 take = sizealloc - tp->b_bufsize; 430 bp = getnewbuf(); 431 if (take >= bp->b_bufsize) 432 take = bp->b_bufsize; 433 pagemove(&bp->b_un.b_addr[bp->b_bufsize - take], 434 &tp->b_un.b_addr[tp->b_bufsize], take); 435 tp->b_bufsize += take; 436 bp->b_bufsize = bp->b_bufsize - take; 437 if (bp->b_bcount > bp->b_bufsize) 438 bp->b_bcount = bp->b_bufsize; 439 if (bp->b_bufsize <= 0) { 440 bremhash(bp); 441 binshash(bp, &bfreelist[BQ_EMPTY]); 442 bp->b_dev = (dev_t)NODEV; 443 bp->b_error = 0; 444 bp->b_flags |= B_INVAL; 445 } 446 brelse(bp); 447 } 448 out: 449 tp->b_bcount = size; 450 return (1); 451 } 452 453 /* 454 * Find a buffer which is available for use. 455 * Select something from a free list. 456 * Preference is to AGE list, then LRU list. 457 */ 458 struct buf * 459 getnewbuf() 460 { 461 register struct buf *bp, *dp; 462 register struct ucred *cred; 463 int s; 464 465 loop: 466 s = splbio(); 467 for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--) 468 if (dp->av_forw != dp) 469 break; 470 if (dp == bfreelist) { /* no free blocks */ 471 dp->b_flags |= B_WANTED; 472 sleep((caddr_t)dp, PRIBIO+1); 473 splx(s); 474 goto loop; 475 } 476 bp = dp->av_forw; 477 bremfree(bp); 478 bp->b_flags |= B_BUSY; 479 splx(s); 480 if (bp->b_flags & B_DELWRI) { 481 (void) bawrite(bp); 482 goto loop; 483 } 484 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); 485 if (bp->b_vp) 486 brelvp(bp); 487 if (bp->b_rcred != NOCRED) { 488 cred = bp->b_rcred; 489 bp->b_rcred = NOCRED; 490 crfree(cred); 491 } 492 if (bp->b_wcred != NOCRED) { 493 cred = bp->b_wcred; 494 bp->b_wcred = NOCRED; 495 crfree(cred); 496 } 497 bp->b_flags = B_BUSY; 498 return (bp); 499 } 500 501 /* 502 * Wait for I/O completion on the buffer; return errors 503 * to the user. 504 */ 505 biowait(bp) 506 register struct buf *bp; 507 { 508 int s; 509 510 s = splbio(); 511 while ((bp->b_flags & B_DONE) == 0) 512 sleep((caddr_t)bp, PRIBIO); 513 splx(s); 514 /* 515 * Pick up the device's error number and pass it to the user; 516 * if there is an error but the number is 0 set a generalized code. 517 */ 518 if ((bp->b_flags & B_ERROR) == 0) 519 return (0); 520 if (bp->b_error) 521 return (bp->b_error); 522 return (EIO); 523 } 524 525 /* 526 * Mark I/O complete on a buffer. 527 * If someone should be called, e.g. the pageout 528 * daemon, do so. Otherwise, wake up anyone 529 * waiting for it. 530 */ 531 biodone(bp) 532 register struct buf *bp; 533 { 534 register struct vnode *vp; 535 536 if (bp->b_flags & B_DONE) 537 panic("dup biodone"); 538 bp->b_flags |= B_DONE; 539 if ((bp->b_flags & B_READ) == 0) { 540 bp->b_dirtyoff = bp->b_dirtyend = 0; 541 if (vp = bp->b_vp) { 542 vp->v_numoutput--; 543 if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) { 544 if (vp->v_numoutput < 0) 545 panic("biodone: neg numoutput"); 546 vp->v_flag &= ~VBWAIT; 547 wakeup((caddr_t)&vp->v_numoutput); 548 } 549 } 550 } 551 if (bp->b_flags & B_CALL) { 552 bp->b_flags &= ~B_CALL; 553 (*bp->b_iodone)(bp); 554 return; 555 } 556 if (bp->b_flags&B_ASYNC) 557 brelse(bp); 558 else { 559 bp->b_flags &= ~B_WANTED; 560 wakeup((caddr_t)bp); 561 } 562 } 563 564 /* 565 * Make sure all write-behind blocks associated 566 * with mount point are flushed out (from sync). 567 */ 568 mntflushbuf(mountp, flags) 569 struct mount *mountp; 570 int flags; 571 { 572 register struct vnode *vp; 573 574 if ((mountp->mnt_flag & MNT_MPBUSY) == 0) 575 panic("mntflushbuf: not busy"); 576 loop: 577 for (vp = mountp->mnt_mounth; vp; vp = vp->v_mountf) { 578 if (vget(vp)) 579 goto loop; 580 vflushbuf(vp, flags); 581 vput(vp); 582 if (vp->v_mount != mountp) 583 goto loop; 584 } 585 } 586 587 /* 588 * Flush all dirty buffers associated with a vnode. 589 */ 590 vflushbuf(vp, flags) 591 register struct vnode *vp; 592 int flags; 593 { 594 register struct buf *bp; 595 struct buf *nbp; 596 int s; 597 598 loop: 599 s = splbio(); 600 for (bp = vp->v_dirtyblkhd; bp; bp = nbp) { 601 nbp = bp->b_blockf; 602 if ((bp->b_flags & B_BUSY)) 603 continue; 604 if ((bp->b_flags & B_DELWRI) == 0) 605 panic("vflushbuf: not dirty"); 606 bremfree(bp); 607 bp->b_flags |= B_BUSY; 608 splx(s); 609 /* 610 * Wait for I/O associated with indirect blocks to complete, 611 * since there is no way to quickly wait for them below. 612 * NB - This is really specific to ufs, but is done here 613 * as it is easier and quicker. 614 */ 615 if (bp->b_vp == vp || (flags & B_SYNC) == 0) { 616 (void) bawrite(bp); 617 s = splbio(); 618 } else { 619 (void) bwrite(bp); 620 goto loop; 621 } 622 } 623 splx(s); 624 if ((flags & B_SYNC) == 0) 625 return; 626 s = splbio(); 627 while (vp->v_numoutput) { 628 vp->v_flag |= VBWAIT; 629 sleep((caddr_t)&vp->v_numoutput, PRIBIO+1); 630 } 631 splx(s); 632 if (vp->v_dirtyblkhd) { 633 vprint("vflushbuf: dirty", vp); 634 goto loop; 635 } 636 } 637 638 /* 639 * Invalidate in core blocks belonging to closed or umounted filesystem 640 * 641 * Go through the list of vnodes associated with the file system; 642 * for each vnode invalidate any buffers that it holds. Normally 643 * this routine is preceeded by a bflush call, so that on a quiescent 644 * filesystem there will be no dirty buffers when we are done. Binval 645 * returns the count of dirty buffers when it is finished. 646 */ 647 mntinvalbuf(mountp) 648 struct mount *mountp; 649 { 650 register struct vnode *vp; 651 int dirty = 0; 652 653 if ((mountp->mnt_flag & MNT_MPBUSY) == 0) 654 panic("mntinvalbuf: not busy"); 655 loop: 656 for (vp = mountp->mnt_mounth; vp; vp = vp->v_mountf) { 657 if (vget(vp)) 658 goto loop; 659 dirty += vinvalbuf(vp, 1); 660 vput(vp); 661 if (vp->v_mount != mountp) 662 goto loop; 663 } 664 return (dirty); 665 } 666 667 /* 668 * Flush out and invalidate all buffers associated with a vnode. 669 * Called with the underlying object locked. 670 */ 671 vinvalbuf(vp, save) 672 register struct vnode *vp; 673 int save; 674 { 675 register struct buf *bp; 676 struct buf *nbp, *blist; 677 int s, dirty = 0; 678 679 for (;;) { 680 if (blist = vp->v_dirtyblkhd) 681 /* void */; 682 else if (blist = vp->v_cleanblkhd) 683 /* void */; 684 else 685 break; 686 for (bp = blist; bp; bp = nbp) { 687 nbp = bp->b_blockf; 688 s = splbio(); 689 if (bp->b_flags & B_BUSY) { 690 bp->b_flags |= B_WANTED; 691 sleep((caddr_t)bp, PRIBIO+1); 692 splx(s); 693 break; 694 } 695 bremfree(bp); 696 bp->b_flags |= B_BUSY; 697 splx(s); 698 if (save && (bp->b_flags & B_DELWRI)) { 699 dirty++; 700 (void) bwrite(bp); 701 break; 702 } 703 if (bp->b_vp != vp) 704 reassignbuf(bp, bp->b_vp); 705 else 706 bp->b_flags |= B_INVAL; 707 brelse(bp); 708 } 709 } 710 if (vp->v_dirtyblkhd || vp->v_cleanblkhd) 711 panic("vinvalbuf: flush failed"); 712 return (dirty); 713 } 714 715 /* 716 * Associate a buffer with a vnode. 717 */ 718 bgetvp(vp, bp) 719 register struct vnode *vp; 720 register struct buf *bp; 721 { 722 723 if (bp->b_vp) 724 panic("bgetvp: not free"); 725 VHOLD(vp); 726 bp->b_vp = vp; 727 if (vp->v_type == VBLK || vp->v_type == VCHR) 728 bp->b_dev = vp->v_rdev; 729 else 730 bp->b_dev = NODEV; 731 /* 732 * Insert onto list for new vnode. 733 */ 734 if (vp->v_cleanblkhd) { 735 bp->b_blockf = vp->v_cleanblkhd; 736 bp->b_blockb = &vp->v_cleanblkhd; 737 vp->v_cleanblkhd->b_blockb = &bp->b_blockf; 738 vp->v_cleanblkhd = bp; 739 } else { 740 vp->v_cleanblkhd = bp; 741 bp->b_blockb = &vp->v_cleanblkhd; 742 bp->b_blockf = NULL; 743 } 744 } 745 746 /* 747 * Disassociate a buffer from a vnode. 748 */ 749 brelvp(bp) 750 register struct buf *bp; 751 { 752 struct buf *bq; 753 struct vnode *vp; 754 755 if (bp->b_vp == (struct vnode *) 0) 756 panic("brelvp: NULL"); 757 /* 758 * Delete from old vnode list, if on one. 759 */ 760 if (bp->b_blockb) { 761 if (bq = bp->b_blockf) 762 bq->b_blockb = bp->b_blockb; 763 *bp->b_blockb = bq; 764 bp->b_blockf = NULL; 765 bp->b_blockb = NULL; 766 } 767 vp = bp->b_vp; 768 bp->b_vp = (struct vnode *) 0; 769 HOLDRELE(vp); 770 } 771 772 /* 773 * Reassign a buffer from one vnode to another. 774 * Used to assign file specific control information 775 * (indirect blocks) to the vnode to which they belong. 776 */ 777 reassignbuf(bp, newvp) 778 register struct buf *bp; 779 register struct vnode *newvp; 780 { 781 register struct buf *bq, **listheadp; 782 783 if (newvp == NULL) 784 panic("reassignbuf: NULL"); 785 /* 786 * Delete from old vnode list, if on one. 787 */ 788 if (bp->b_blockb) { 789 if (bq = bp->b_blockf) 790 bq->b_blockb = bp->b_blockb; 791 *bp->b_blockb = bq; 792 } 793 /* 794 * If dirty, put on list of dirty buffers; 795 * otherwise insert onto list of clean buffers. 796 */ 797 if (bp->b_flags & B_DELWRI) 798 listheadp = &newvp->v_dirtyblkhd; 799 else 800 listheadp = &newvp->v_cleanblkhd; 801 if (*listheadp) { 802 bp->b_blockf = *listheadp; 803 bp->b_blockb = listheadp; 804 bp->b_blockf->b_blockb = &bp->b_blockf; 805 *listheadp = bp; 806 } else { 807 *listheadp = bp; 808 bp->b_blockb = listheadp; 809 bp->b_blockf = NULL; 810 } 811 } 812