1 /* $NetBSD: vfs_bio.c,v 1.65 2000/02/14 20:12:03 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1994 Christopher G. Demetriou 5 * Copyright (c) 1982, 1986, 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94 42 */ 43 44 /* 45 * Some references: 46 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986) 47 * Leffler, et al.: The Design and Implementation of the 4.3BSD 48 * UNIX Operating System (Addison Welley, 1989) 49 */ 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/proc.h> 54 #include <sys/buf.h> 55 #include <sys/vnode.h> 56 #include <sys/mount.h> 57 #include <sys/trace.h> 58 #include <sys/malloc.h> 59 #include <sys/resourcevar.h> 60 #include <sys/conf.h> 61 62 #include <vm/vm.h> 63 64 #include <miscfs/specfs/specdev.h> 65 66 /* Macros to clear/set/test flags. */ 67 #define SET(t, f) (t) |= (f) 68 #define CLR(t, f) (t) &= ~(f) 69 #define ISSET(t, f) ((t) & (f)) 70 71 /* 72 * Definitions for the buffer hash lists. 73 */ 74 #define BUFHASH(dvp, lbn) \ 75 (&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash]) 76 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash; 77 u_long bufhash; 78 struct bio_ops bioops; /* I/O operation notification */ 79 80 /* 81 * Insq/Remq for the buffer hash lists. 82 */ 83 #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash) 84 #define bremhash(bp) LIST_REMOVE(bp, b_hash) 85 86 /* 87 * Definitions for the buffer free lists. 88 */ 89 #define BQUEUES 4 /* number of free buffer queues */ 90 91 #define BQ_LOCKED 0 /* super-blocks &c */ 92 #define BQ_LRU 1 /* lru, useful buffers */ 93 #define BQ_AGE 2 /* rubbish */ 94 #define BQ_EMPTY 3 /* buffer headers with no memory */ 95 96 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES]; 97 int needbuffer; 98 99 /* 100 * Buffer pool for I/O buffers. 101 */ 102 struct pool bufpool; 103 104 /* 105 * Insq/Remq for the buffer free lists. 106 */ 107 #define binsheadfree(bp, dp) TAILQ_INSERT_HEAD(dp, bp, b_freelist) 108 #define binstailfree(bp, dp) TAILQ_INSERT_TAIL(dp, bp, b_freelist) 109 110 static __inline struct buf *bio_doread __P((struct vnode *, daddr_t, int, 111 struct ucred *, int)); 112 int count_lock_queue __P((void)); 113 114 void 115 bremfree(bp) 116 struct buf *bp; 117 { 118 int s = splbio(); 119 120 struct bqueues *dp = NULL; 121 122 /* 123 * We only calculate the head of the freelist when removing 124 * the last element of the list as that is the only time that 125 * it is needed (e.g. to reset the tail pointer). 126 * 127 * NB: This makes an assumption about how tailq's are implemented. 128 */ 129 if (bp->b_freelist.tqe_next == NULL) { 130 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) 131 if (dp->tqh_last == &bp->b_freelist.tqe_next) 132 break; 133 if (dp == &bufqueues[BQUEUES]) 134 panic("bremfree: lost tail"); 135 } 136 TAILQ_REMOVE(dp, bp, b_freelist); 137 138 splx(s); 139 } 140 141 /* 142 * Initialize buffers and hash links for buffers. 143 */ 144 void 145 bufinit() 146 { 147 register struct buf *bp; 148 struct bqueues *dp; 149 register int i; 150 int base, residual; 151 152 /* 153 * Initialize the buffer pool. This pool is used for buffers 154 * which are strictly I/O control blocks, not buffer cache 155 * buffers. 156 */ 157 pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", 0, 158 NULL, NULL, M_DEVBUF); 159 160 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) 161 TAILQ_INIT(dp); 162 bufhashtbl = hashinit(nbuf, M_CACHE, M_WAITOK, &bufhash); 163 base = bufpages / nbuf; 164 residual = bufpages % nbuf; 165 for (i = 0; i < nbuf; i++) { 166 bp = &buf[i]; 167 memset((char *)bp, 0, sizeof(*bp)); 168 bp->b_dev = NODEV; 169 bp->b_rcred = NOCRED; 170 bp->b_wcred = NOCRED; 171 bp->b_vnbufs.le_next = NOLIST; 172 LIST_INIT(&bp->b_dep); 173 bp->b_data = buffers + i * MAXBSIZE; 174 if (i < residual) 175 bp->b_bufsize = (base + 1) * NBPG; 176 else 177 bp->b_bufsize = base * NBPG; 178 bp->b_flags = B_INVAL; 179 dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY]; 180 binsheadfree(bp, dp); 181 binshash(bp, &invalhash); 182 } 183 } 184 185 static __inline struct buf * 186 bio_doread(vp, blkno, size, cred, async) 187 struct vnode *vp; 188 daddr_t blkno; 189 int size; 190 struct ucred *cred; 191 int async; 192 { 193 register struct buf *bp; 194 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */ 195 196 bp = getblk(vp, blkno, size, 0, 0); 197 198 /* 199 * If buffer does not have data valid, start a read. 200 * Note that if buffer is B_INVAL, getblk() won't return it. 201 * Therefore, it's valid if it's I/O has completed or been delayed. 202 */ 203 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) { 204 /* Start I/O for the buffer (keeping credentials). */ 205 SET(bp->b_flags, B_READ | async); 206 if (cred != NOCRED && bp->b_rcred == NOCRED) { 207 crhold(cred); 208 bp->b_rcred = cred; 209 } 210 VOP_STRATEGY(bp); 211 212 /* Pay for the read. */ 213 p->p_stats->p_ru.ru_inblock++; 214 } else if (async) { 215 brelse(bp); 216 } 217 218 return (bp); 219 } 220 221 /* 222 * Read a disk block. 223 * This algorithm described in Bach (p.54). 224 */ 225 int 226 bread(vp, blkno, size, cred, bpp) 227 struct vnode *vp; 228 daddr_t blkno; 229 int size; 230 struct ucred *cred; 231 struct buf **bpp; 232 { 233 register struct buf *bp; 234 235 /* Get buffer for block. */ 236 bp = *bpp = bio_doread(vp, blkno, size, cred, 0); 237 238 /* 239 * Delayed write buffers are found in the cache and have 240 * valid contents. Also, B_ERROR is not set, otherwise 241 * getblk() would not have returned them. 242 */ 243 if (ISSET(bp->b_flags, B_DONE|B_DELWRI)) 244 return (0); 245 246 /* 247 * Otherwise, we had to start a read for it; wait until 248 * it's valid and return the result. 249 */ 250 return (biowait(bp)); 251 } 252 253 /* 254 * Read-ahead multiple disk blocks. The first is sync, the rest async. 255 * Trivial modification to the breada algorithm presented in Bach (p.55). 256 */ 257 int 258 breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp) 259 struct vnode *vp; 260 daddr_t blkno; int size; 261 daddr_t rablks[]; int rasizes[]; 262 int nrablks; 263 struct ucred *cred; 264 struct buf **bpp; 265 { 266 register struct buf *bp; 267 int i; 268 269 bp = *bpp = bio_doread(vp, blkno, size, cred, 0); 270 271 /* 272 * For each of the read-ahead blocks, start a read, if necessary. 273 */ 274 for (i = 0; i < nrablks; i++) { 275 /* If it's in the cache, just go on to next one. */ 276 if (incore(vp, rablks[i])) 277 continue; 278 279 /* Get a buffer for the read-ahead block */ 280 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC); 281 } 282 283 /* 284 * Delayed write buffers are found in the cache and have 285 * valid contents. Also, B_ERROR is not set, otherwise 286 * getblk() would not have returned them. 287 */ 288 if (ISSET(bp->b_flags, B_DONE|B_DELWRI)) 289 return (0); 290 291 /* 292 * Otherwise, we had to start a read for it; wait until 293 * it's valid and return the result. 294 */ 295 return (biowait(bp)); 296 } 297 298 /* 299 * Read with single-block read-ahead. Defined in Bach (p.55), but 300 * implemented as a call to breadn(). 301 * XXX for compatibility with old file systems. 302 */ 303 int 304 breada(vp, blkno, size, rablkno, rabsize, cred, bpp) 305 struct vnode *vp; 306 daddr_t blkno; int size; 307 daddr_t rablkno; int rabsize; 308 struct ucred *cred; 309 struct buf **bpp; 310 { 311 312 return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp)); 313 } 314 315 /* 316 * Block write. Described in Bach (p.56) 317 */ 318 int 319 bwrite(bp) 320 struct buf *bp; 321 { 322 int rv, sync, wasdelayed, s; 323 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */ 324 struct vnode *vp; 325 struct mount *mp; 326 327 /* 328 * Remember buffer type, to switch on it later. If the write was 329 * synchronous, but the file system was mounted with MNT_ASYNC, 330 * convert it to a delayed write. 331 * XXX note that this relies on delayed tape writes being converted 332 * to async, not sync writes (which is safe, but ugly). 333 */ 334 sync = !ISSET(bp->b_flags, B_ASYNC); 335 if (sync && bp->b_vp && bp->b_vp->v_mount && 336 ISSET(bp->b_vp->v_mount->mnt_flag, MNT_ASYNC)) { 337 bdwrite(bp); 338 return (0); 339 } 340 341 /* 342 * Collect statistics on synchronous and asynchronous writes. 343 * Writes to block devices are charged to their associated 344 * filesystem (if any). 345 */ 346 if ((vp = bp->b_vp) != NULL) { 347 if (vp->v_type == VBLK) 348 mp = vp->v_specmountpoint; 349 else 350 mp = vp->v_mount; 351 if (mp != NULL) { 352 if (sync) 353 mp->mnt_stat.f_syncwrites++; 354 else 355 mp->mnt_stat.f_asyncwrites++; 356 } 357 } 358 359 wasdelayed = ISSET(bp->b_flags, B_DELWRI); 360 361 s = splbio(); 362 363 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI)); 364 365 /* 366 * Pay for the I/O operation and make sure the buf is on the correct 367 * vnode queue. 368 */ 369 if (wasdelayed) 370 reassignbuf(bp, bp->b_vp); 371 else 372 p->p_stats->p_ru.ru_oublock++; 373 374 /* Initiate disk write. Make sure the appropriate party is charged. */ 375 bp->b_vp->v_numoutput++; 376 splx(s); 377 378 SET(bp->b_flags, B_WRITEINPROG); 379 VOP_STRATEGY(bp); 380 381 if (sync) { 382 /* If I/O was synchronous, wait for it to complete. */ 383 rv = biowait(bp); 384 385 /* Release the buffer. */ 386 brelse(bp); 387 388 return (rv); 389 } else { 390 return (0); 391 } 392 } 393 394 int 395 vn_bwrite(v) 396 void *v; 397 { 398 struct vop_bwrite_args *ap = v; 399 400 return (bwrite(ap->a_bp)); 401 } 402 403 /* 404 * Delayed write. 405 * 406 * The buffer is marked dirty, but is not queued for I/O. 407 * This routine should be used when the buffer is expected 408 * to be modified again soon, typically a small write that 409 * partially fills a buffer. 410 * 411 * NB: magnetic tapes cannot be delayed; they must be 412 * written in the order that the writes are requested. 413 * 414 * Described in Leffler, et al. (pp. 208-213). 415 */ 416 void 417 bdwrite(bp) 418 struct buf *bp; 419 { 420 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */ 421 int s; 422 423 /* If this is a tape block, write the block now. */ 424 /* XXX NOTE: the memory filesystem usurpes major device */ 425 /* XXX number 255, which is a bad idea. */ 426 if (bp->b_dev != NODEV && 427 major(bp->b_dev) != 255 && /* XXX - MFS buffers! */ 428 bdevsw[major(bp->b_dev)].d_type == D_TAPE) { 429 bawrite(bp); 430 return; 431 } 432 433 /* 434 * If the block hasn't been seen before: 435 * (1) Mark it as having been seen, 436 * (2) Charge for the write, 437 * (3) Make sure it's on its vnode's correct block list. 438 */ 439 s = splbio(); 440 441 if (!ISSET(bp->b_flags, B_DELWRI)) { 442 SET(bp->b_flags, B_DELWRI); 443 p->p_stats->p_ru.ru_oublock++; 444 reassignbuf(bp, bp->b_vp); 445 } 446 447 /* Otherwise, the "write" is done, so mark and release the buffer. */ 448 CLR(bp->b_flags, B_NEEDCOMMIT|B_DONE); 449 splx(s); 450 451 brelse(bp); 452 } 453 454 /* 455 * Asynchronous block write; just an asynchronous bwrite(). 456 */ 457 void 458 bawrite(bp) 459 struct buf *bp; 460 { 461 462 SET(bp->b_flags, B_ASYNC); 463 VOP_BWRITE(bp); 464 } 465 466 /* 467 * Ordered block write; asynchronous, but I/O will occur in order queued. 468 */ 469 void 470 bowrite(bp) 471 struct buf *bp; 472 { 473 474 SET(bp->b_flags, B_ASYNC | B_ORDERED); 475 VOP_BWRITE(bp); 476 } 477 478 /* 479 * Same as first half of bdwrite, mark buffer dirty, but do not release it. 480 */ 481 void 482 bdirty(bp) 483 struct buf *bp; 484 { 485 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */ 486 int s; 487 488 s = splbio(); 489 490 CLR(bp->b_flags, B_AGE); 491 492 if (!ISSET(bp->b_flags, B_DELWRI)) { 493 SET(bp->b_flags, B_DELWRI); 494 p->p_stats->p_ru.ru_oublock++; 495 reassignbuf(bp, bp->b_vp); 496 } 497 498 splx(s); 499 } 500 501 /* 502 * Release a buffer on to the free lists. 503 * Described in Bach (p. 46). 504 */ 505 void 506 brelse(bp) 507 struct buf *bp; 508 { 509 struct bqueues *bufq; 510 int s; 511 512 /* Wake up any processes waiting for any buffer to become free. */ 513 if (needbuffer) { 514 needbuffer = 0; 515 wakeup(&needbuffer); 516 } 517 518 /* Block disk interrupts. */ 519 s = splbio(); 520 521 /* Wake up any proceeses waiting for _this_ buffer to become free. */ 522 if (ISSET(bp->b_flags, B_WANTED)) { 523 CLR(bp->b_flags, B_WANTED|B_AGE); 524 wakeup(bp); 525 } 526 527 /* 528 * Determine which queue the buffer should be on, then put it there. 529 */ 530 531 /* If it's locked, don't report an error; try again later. */ 532 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR)) 533 CLR(bp->b_flags, B_ERROR); 534 535 /* If it's not cacheable, or an error, mark it invalid. */ 536 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR))) 537 SET(bp->b_flags, B_INVAL); 538 539 if (ISSET(bp->b_flags, B_VFLUSH)) { 540 /* 541 * This is a delayed write buffer that was just flushed to 542 * disk. It is still on the LRU queue. If it's become 543 * invalid, then we need to move it to a different queue; 544 * otherwise leave it in its current position. 545 */ 546 CLR(bp->b_flags, B_VFLUSH); 547 if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE)) 548 goto already_queued; 549 else 550 bremfree(bp); 551 } 552 553 if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) { 554 /* 555 * If it's invalid or empty, dissociate it from its vnode 556 * and put on the head of the appropriate queue. 557 */ 558 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 559 (*bioops.io_deallocate)(bp); 560 CLR(bp->b_flags, B_DONE|B_DELWRI); 561 if (bp->b_vp) { 562 reassignbuf(bp, bp->b_vp); 563 brelvp(bp); 564 } 565 if (bp->b_bufsize <= 0) 566 /* no data */ 567 bufq = &bufqueues[BQ_EMPTY]; 568 else 569 /* invalid data */ 570 bufq = &bufqueues[BQ_AGE]; 571 binsheadfree(bp, bufq); 572 } else { 573 /* 574 * It has valid data. Put it on the end of the appropriate 575 * queue, so that it'll stick around for as long as possible. 576 */ 577 if (ISSET(bp->b_flags, B_LOCKED)) 578 /* locked in core */ 579 bufq = &bufqueues[BQ_LOCKED]; 580 else if (ISSET(bp->b_flags, B_AGE)) 581 /* stale but valid data */ 582 bufq = &bufqueues[BQ_AGE]; 583 else 584 /* valid data */ 585 bufq = &bufqueues[BQ_LRU]; 586 binstailfree(bp, bufq); 587 } 588 589 already_queued: 590 /* Unlock the buffer. */ 591 CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE|B_ORDERED); 592 593 /* Allow disk interrupts. */ 594 splx(s); 595 } 596 597 /* 598 * Determine if a block is in the cache. 599 * Just look on what would be its hash chain. If it's there, return 600 * a pointer to it, unless it's marked invalid. If it's marked invalid, 601 * we normally don't return the buffer, unless the caller explicitly 602 * wants us to. 603 */ 604 struct buf * 605 incore(vp, blkno) 606 struct vnode *vp; 607 daddr_t blkno; 608 { 609 struct buf *bp; 610 611 bp = BUFHASH(vp, blkno)->lh_first; 612 613 /* Search hash chain */ 614 for (; bp != NULL; bp = bp->b_hash.le_next) { 615 if (bp->b_lblkno == blkno && bp->b_vp == vp && 616 !ISSET(bp->b_flags, B_INVAL)) 617 return (bp); 618 } 619 620 return (0); 621 } 622 623 /* 624 * Get a block of requested size that is associated with 625 * a given vnode and block offset. If it is found in the 626 * block cache, mark it as having been found, make it busy 627 * and return it. Otherwise, return an empty block of the 628 * correct size. It is up to the caller to insure that the 629 * cached blocks be of the correct size. 630 */ 631 struct buf * 632 getblk(vp, blkno, size, slpflag, slptimeo) 633 register struct vnode *vp; 634 daddr_t blkno; 635 int size, slpflag, slptimeo; 636 { 637 struct bufhashhdr *bh; 638 struct buf *bp; 639 int s, err; 640 641 /* 642 * XXX 643 * The following is an inlined version of 'incore()', but with 644 * the 'invalid' test moved to after the 'busy' test. It's 645 * necessary because there are some cases in which the NFS 646 * code sets B_INVAL prior to writing data to the server, but 647 * in which the buffers actually contain valid data. In this 648 * case, we can't allow the system to allocate a new buffer for 649 * the block until the write is finished. 650 */ 651 bh = BUFHASH(vp, blkno); 652 start: 653 bp = bh->lh_first; 654 for (; bp != NULL; bp = bp->b_hash.le_next) { 655 if (bp->b_lblkno != blkno || bp->b_vp != vp) 656 continue; 657 658 s = splbio(); 659 if (ISSET(bp->b_flags, B_BUSY)) { 660 SET(bp->b_flags, B_WANTED); 661 err = tsleep(bp, slpflag | (PRIBIO + 1), "getblk", 662 slptimeo); 663 splx(s); 664 if (err) 665 return (NULL); 666 goto start; 667 } 668 669 if (!ISSET(bp->b_flags, B_INVAL)) { 670 #ifdef DIAGNOSTIC 671 if (ISSET(bp->b_flags, B_DONE|B_DELWRI) && 672 bp->b_bcount < size) 673 panic("getblk: block size invariant failed"); 674 #endif 675 SET(bp->b_flags, B_BUSY); 676 bremfree(bp); 677 splx(s); 678 break; 679 } 680 splx(s); 681 } 682 683 if (bp == NULL) { 684 if ((bp = getnewbuf(slpflag, slptimeo)) == NULL) 685 goto start; 686 binshash(bp, bh); 687 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno; 688 s = splbio(); 689 bgetvp(vp, bp); 690 splx(s); 691 } 692 allocbuf(bp, size); 693 return (bp); 694 } 695 696 /* 697 * Get an empty, disassociated buffer of given size. 698 */ 699 struct buf * 700 geteblk(size) 701 int size; 702 { 703 struct buf *bp; 704 705 while ((bp = getnewbuf(0, 0)) == 0) 706 ; 707 SET(bp->b_flags, B_INVAL); 708 binshash(bp, &invalhash); 709 allocbuf(bp, size); 710 711 return (bp); 712 } 713 714 /* 715 * Expand or contract the actual memory allocated to a buffer. 716 * 717 * If the buffer shrinks, data is lost, so it's up to the 718 * caller to have written it out *first*; this routine will not 719 * start a write. If the buffer grows, it's the callers 720 * responsibility to fill out the buffer's additional contents. 721 */ 722 void 723 allocbuf(bp, size) 724 struct buf *bp; 725 int size; 726 { 727 struct buf *nbp; 728 vsize_t desired_size; 729 int s; 730 731 desired_size = roundup(size, NBPG); 732 if (desired_size > MAXBSIZE) 733 panic("allocbuf: buffer larger than MAXBSIZE requested"); 734 735 if (bp->b_bufsize == desired_size) 736 goto out; 737 738 /* 739 * If the buffer is smaller than the desired size, we need to snarf 740 * it from other buffers. Get buffers (via getnewbuf()), and 741 * steal their pages. 742 */ 743 while (bp->b_bufsize < desired_size) { 744 int amt; 745 746 /* find a buffer */ 747 while ((nbp = getnewbuf(0, 0)) == NULL) 748 ; 749 SET(nbp->b_flags, B_INVAL); 750 binshash(nbp, &invalhash); 751 752 /* and steal its pages, up to the amount we need */ 753 amt = min(nbp->b_bufsize, (desired_size - bp->b_bufsize)); 754 pagemove((nbp->b_data + nbp->b_bufsize - amt), 755 bp->b_data + bp->b_bufsize, amt); 756 bp->b_bufsize += amt; 757 nbp->b_bufsize -= amt; 758 759 /* reduce transfer count if we stole some data */ 760 if (nbp->b_bcount > nbp->b_bufsize) 761 nbp->b_bcount = nbp->b_bufsize; 762 763 #ifdef DIAGNOSTIC 764 if (nbp->b_bufsize < 0) 765 panic("allocbuf: negative bufsize"); 766 #endif 767 768 brelse(nbp); 769 } 770 771 /* 772 * If we want a buffer smaller than the current size, 773 * shrink this buffer. Grab a buf head from the EMPTY queue, 774 * move a page onto it, and put it on front of the AGE queue. 775 * If there are no free buffer headers, leave the buffer alone. 776 */ 777 if (bp->b_bufsize > desired_size) { 778 s = splbio(); 779 if ((nbp = bufqueues[BQ_EMPTY].tqh_first) == NULL) { 780 /* No free buffer head */ 781 splx(s); 782 goto out; 783 } 784 bremfree(nbp); 785 SET(nbp->b_flags, B_BUSY); 786 splx(s); 787 788 /* move the page to it and note this change */ 789 pagemove(bp->b_data + desired_size, 790 nbp->b_data, bp->b_bufsize - desired_size); 791 nbp->b_bufsize = bp->b_bufsize - desired_size; 792 bp->b_bufsize = desired_size; 793 nbp->b_bcount = 0; 794 SET(nbp->b_flags, B_INVAL); 795 796 /* release the newly-filled buffer and leave */ 797 brelse(nbp); 798 } 799 800 out: 801 bp->b_bcount = size; 802 } 803 804 /* 805 * Find a buffer which is available for use. 806 * Select something from a free list. 807 * Preference is to AGE list, then LRU list. 808 */ 809 struct buf * 810 getnewbuf(slpflag, slptimeo) 811 int slpflag, slptimeo; 812 { 813 register struct buf *bp; 814 int s; 815 816 start: 817 s = splbio(); 818 if ((bp = bufqueues[BQ_AGE].tqh_first) != NULL || 819 (bp = bufqueues[BQ_LRU].tqh_first) != NULL) { 820 bremfree(bp); 821 } else { 822 /* wait for a free buffer of any kind */ 823 needbuffer = 1; 824 tsleep(&needbuffer, slpflag|(PRIBIO+1), "getnewbuf", slptimeo); 825 splx(s); 826 return (0); 827 } 828 829 if (ISSET(bp->b_flags, B_VFLUSH)) { 830 /* 831 * This is a delayed write buffer being flushed to disk. Make 832 * sure it gets aged out of the queue when it's finished, and 833 * leave it off the LRU queue. 834 */ 835 CLR(bp->b_flags, B_VFLUSH); 836 SET(bp->b_flags, B_AGE); 837 splx(s); 838 goto start; 839 } 840 841 /* Buffer is no longer on free lists. */ 842 SET(bp->b_flags, B_BUSY); 843 844 /* If buffer was a delayed write, start it, and go back to the top. */ 845 if (ISSET(bp->b_flags, B_DELWRI)) { 846 splx(s); 847 /* 848 * This buffer has gone through the LRU, so make sure it gets 849 * reused ASAP. 850 */ 851 SET(bp->b_flags, B_AGE); 852 bawrite(bp); 853 goto start; 854 } 855 856 /* disassociate us from our vnode, if we had one... */ 857 if (bp->b_vp) 858 brelvp(bp); 859 splx(s); 860 861 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 862 (*bioops.io_deallocate)(bp); 863 864 /* clear out various other fields */ 865 bp->b_flags = B_BUSY; 866 bp->b_dev = NODEV; 867 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0; 868 bp->b_iodone = 0; 869 bp->b_error = 0; 870 bp->b_resid = 0; 871 bp->b_bcount = 0; 872 bp->b_dirtyoff = bp->b_dirtyend = 0; 873 bp->b_validoff = bp->b_validend = 0; 874 875 /* nuke any credentials we were holding */ 876 if (bp->b_rcred != NOCRED) { 877 crfree(bp->b_rcred); 878 bp->b_rcred = NOCRED; 879 } 880 if (bp->b_wcred != NOCRED) { 881 crfree(bp->b_wcred); 882 bp->b_wcred = NOCRED; 883 } 884 885 bremhash(bp); 886 return (bp); 887 } 888 889 /* 890 * Wait for operations on the buffer to complete. 891 * When they do, extract and return the I/O's error value. 892 */ 893 int 894 biowait(bp) 895 struct buf *bp; 896 { 897 int s; 898 899 s = splbio(); 900 while (!ISSET(bp->b_flags, B_DONE)) 901 tsleep(bp, PRIBIO + 1, "biowait", 0); 902 splx(s); 903 904 /* check for interruption of I/O (e.g. via NFS), then errors. */ 905 if (ISSET(bp->b_flags, B_EINTR)) { 906 CLR(bp->b_flags, B_EINTR); 907 return (EINTR); 908 } else if (ISSET(bp->b_flags, B_ERROR)) 909 return (bp->b_error ? bp->b_error : EIO); 910 else 911 return (0); 912 } 913 914 /* 915 * Mark I/O complete on a buffer. 916 * 917 * If a callback has been requested, e.g. the pageout 918 * daemon, do so. Otherwise, awaken waiting processes. 919 * 920 * [ Leffler, et al., says on p.247: 921 * "This routine wakes up the blocked process, frees the buffer 922 * for an asynchronous write, or, for a request by the pagedaemon 923 * process, invokes a procedure specified in the buffer structure" ] 924 * 925 * In real life, the pagedaemon (or other system processes) wants 926 * to do async stuff to, and doesn't want the buffer brelse()'d. 927 * (for swap pager, that puts swap buffers on the free lists (!!!), 928 * for the vn device, that puts malloc'd buffers on the free lists!) 929 */ 930 void 931 biodone(bp) 932 struct buf *bp; 933 { 934 int s = splbio(); 935 936 if (ISSET(bp->b_flags, B_DONE)) 937 panic("biodone already"); 938 SET(bp->b_flags, B_DONE); /* note that it's done */ 939 940 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete) 941 (*bioops.io_complete)(bp); 942 943 if (!ISSET(bp->b_flags, B_READ)) /* wake up reader */ 944 vwakeup(bp); 945 946 if (ISSET(bp->b_flags, B_CALL)) { /* if necessary, call out */ 947 CLR(bp->b_flags, B_CALL); /* but note callout done */ 948 (*bp->b_iodone)(bp); 949 } else { 950 if (ISSET(bp->b_flags, B_ASYNC)) /* if async, release */ 951 brelse(bp); 952 else { /* or just wakeup the buffer */ 953 CLR(bp->b_flags, B_WANTED); 954 wakeup(bp); 955 } 956 } 957 958 splx(s); 959 } 960 961 /* 962 * Return a count of buffers on the "locked" queue. 963 */ 964 int 965 count_lock_queue() 966 { 967 register struct buf *bp; 968 register int n = 0; 969 970 for (bp = bufqueues[BQ_LOCKED].tqh_first; bp; 971 bp = bp->b_freelist.tqe_next) 972 n++; 973 return (n); 974 } 975 976 #ifdef DEBUG 977 /* 978 * Print out statistics on the current allocation of the buffer pool. 979 * Can be enabled to print out on every ``sync'' by setting "syncprt" 980 * in vfs_syscalls.c using sysctl. 981 */ 982 void 983 vfs_bufstats() 984 { 985 int s, i, j, count; 986 register struct buf *bp; 987 register struct bqueues *dp; 988 int counts[MAXBSIZE/NBPG+1]; 989 static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" }; 990 991 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) { 992 count = 0; 993 for (j = 0; j <= MAXBSIZE/NBPG; j++) 994 counts[j] = 0; 995 s = splbio(); 996 for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) { 997 counts[bp->b_bufsize/NBPG]++; 998 count++; 999 } 1000 splx(s); 1001 printf("%s: total-%d", bname[i], count); 1002 for (j = 0; j <= MAXBSIZE/NBPG; j++) 1003 if (counts[j] != 0) 1004 printf(", %d-%d", j * NBPG, counts[j]); 1005 printf("\n"); 1006 } 1007 } 1008 #endif /* DEBUG */ 1009