1 /* $NetBSD: vfs_bio.c,v 1.74 2000/12/13 17:48:46 jdolecek Exp $ */ 2 3 /*- 4 * Copyright (c) 1994 Christopher G. Demetriou 5 * Copyright (c) 1982, 1986, 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94 42 */ 43 44 /* 45 * Some references: 46 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986) 47 * Leffler, et al.: The Design and Implementation of the 4.3BSD 48 * UNIX Operating System (Addison Welley, 1989) 49 */ 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/proc.h> 54 #include <sys/buf.h> 55 #include <sys/vnode.h> 56 #include <sys/mount.h> 57 #include <sys/malloc.h> 58 #include <sys/resourcevar.h> 59 #include <sys/conf.h> 60 61 #include <uvm/uvm.h> 62 63 #include <miscfs/specfs/specdev.h> 64 65 /* Macros to clear/set/test flags. */ 66 #define SET(t, f) (t) |= (f) 67 #define CLR(t, f) (t) &= ~(f) 68 #define ISSET(t, f) ((t) & (f)) 69 70 /* 71 * Definitions for the buffer hash lists. 72 */ 73 #define BUFHASH(dvp, lbn) \ 74 (&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash]) 75 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash; 76 u_long bufhash; 77 struct bio_ops bioops; /* I/O operation notification */ 78 79 /* 80 * Insq/Remq for the buffer hash lists. 81 */ 82 #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash) 83 #define bremhash(bp) LIST_REMOVE(bp, b_hash) 84 85 /* 86 * Definitions for the buffer free lists. 87 */ 88 #define BQUEUES 4 /* number of free buffer queues */ 89 90 #define BQ_LOCKED 0 /* super-blocks &c */ 91 #define BQ_LRU 1 /* lru, useful buffers */ 92 #define BQ_AGE 2 /* rubbish */ 93 #define BQ_EMPTY 3 /* buffer headers with no memory */ 94 95 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES]; 96 int needbuffer; 97 98 /* 99 * Buffer pool for I/O buffers. 100 */ 101 struct pool bufpool; 102 103 /* 104 * Insq/Remq for the buffer free lists. 105 */ 106 #define binsheadfree(bp, dp) TAILQ_INSERT_HEAD(dp, bp, b_freelist) 107 #define binstailfree(bp, dp) TAILQ_INSERT_TAIL(dp, bp, b_freelist) 108 109 static __inline struct buf *bio_doread __P((struct vnode *, daddr_t, int, 110 struct ucred *, int)); 111 int count_lock_queue __P((void)); 112 113 void 114 bremfree(bp) 115 struct buf *bp; 116 { 117 int s = splbio(); 118 119 struct bqueues *dp = NULL; 120 121 /* 122 * We only calculate the head of the freelist when removing 123 * the last element of the list as that is the only time that 124 * it is needed (e.g. to reset the tail pointer). 125 * 126 * NB: This makes an assumption about how tailq's are implemented. 127 */ 128 if (bp->b_freelist.tqe_next == NULL) { 129 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) 130 if (dp->tqh_last == &bp->b_freelist.tqe_next) 131 break; 132 if (dp == &bufqueues[BQUEUES]) 133 panic("bremfree: lost tail"); 134 } 135 TAILQ_REMOVE(dp, bp, b_freelist); 136 splx(s); 137 } 138 139 /* 140 * Initialize buffers and hash links for buffers. 141 */ 142 void 143 bufinit() 144 { 145 struct buf *bp; 146 struct bqueues *dp; 147 int i; 148 int base, residual; 149 150 /* 151 * Initialize the buffer pool. This pool is used for buffers 152 * which are strictly I/O control blocks, not buffer cache 153 * buffers. 154 */ 155 pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", 0, 156 NULL, NULL, M_DEVBUF); 157 158 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) 159 TAILQ_INIT(dp); 160 bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash); 161 base = bufpages / nbuf; 162 residual = bufpages % nbuf; 163 for (i = 0; i < nbuf; i++) { 164 bp = &buf[i]; 165 memset((char *)bp, 0, sizeof(*bp)); 166 bp->b_dev = NODEV; 167 bp->b_vnbufs.le_next = NOLIST; 168 LIST_INIT(&bp->b_dep); 169 bp->b_data = buffers + i * MAXBSIZE; 170 if (i < residual) 171 bp->b_bufsize = (base + 1) * PAGE_SIZE; 172 else 173 bp->b_bufsize = base * PAGE_SIZE; 174 bp->b_flags = B_INVAL; 175 dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY]; 176 binsheadfree(bp, dp); 177 binshash(bp, &invalhash); 178 } 179 } 180 181 static __inline struct buf * 182 bio_doread(vp, blkno, size, cred, async) 183 struct vnode *vp; 184 daddr_t blkno; 185 int size; 186 struct ucred *cred; 187 int async; 188 { 189 struct buf *bp; 190 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */ 191 192 bp = getblk(vp, blkno, size, 0, 0); 193 194 /* 195 * If buffer does not have data valid, start a read. 196 * Note that if buffer is B_INVAL, getblk() won't return it. 197 * Therefore, it's valid if it's I/O has completed or been delayed. 198 */ 199 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) { 200 /* Start I/O for the buffer. */ 201 SET(bp->b_flags, B_READ | async); 202 VOP_STRATEGY(bp); 203 204 /* Pay for the read. */ 205 p->p_stats->p_ru.ru_inblock++; 206 } else if (async) { 207 brelse(bp); 208 } 209 210 return (bp); 211 } 212 213 /* 214 * Read a disk block. 215 * This algorithm described in Bach (p.54). 216 */ 217 int 218 bread(vp, blkno, size, cred, bpp) 219 struct vnode *vp; 220 daddr_t blkno; 221 int size; 222 struct ucred *cred; 223 struct buf **bpp; 224 { 225 struct buf *bp; 226 227 /* Get buffer for block. */ 228 bp = *bpp = bio_doread(vp, blkno, size, cred, 0); 229 230 /* 231 * Delayed write buffers are found in the cache and have 232 * valid contents. Also, B_ERROR is not set, otherwise 233 * getblk() would not have returned them. 234 */ 235 if (ISSET(bp->b_flags, B_DONE|B_DELWRI)) 236 return (0); 237 238 /* 239 * Otherwise, we had to start a read for it; wait until 240 * it's valid and return the result. 241 */ 242 return (biowait(bp)); 243 } 244 245 /* 246 * Read-ahead multiple disk blocks. The first is sync, the rest async. 247 * Trivial modification to the breada algorithm presented in Bach (p.55). 248 */ 249 int 250 breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp) 251 struct vnode *vp; 252 daddr_t blkno; int size; 253 daddr_t rablks[]; int rasizes[]; 254 int nrablks; 255 struct ucred *cred; 256 struct buf **bpp; 257 { 258 struct buf *bp; 259 int i; 260 261 bp = *bpp = bio_doread(vp, blkno, size, cred, 0); 262 263 /* 264 * For each of the read-ahead blocks, start a read, if necessary. 265 */ 266 for (i = 0; i < nrablks; i++) { 267 /* If it's in the cache, just go on to next one. */ 268 if (incore(vp, rablks[i])) 269 continue; 270 271 /* Get a buffer for the read-ahead block */ 272 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC); 273 } 274 275 /* 276 * Delayed write buffers are found in the cache and have 277 * valid contents. Also, B_ERROR is not set, otherwise 278 * getblk() would not have returned them. 279 */ 280 if (ISSET(bp->b_flags, B_DONE|B_DELWRI)) 281 return (0); 282 283 /* 284 * Otherwise, we had to start a read for it; wait until 285 * it's valid and return the result. 286 */ 287 return (biowait(bp)); 288 } 289 290 /* 291 * Read with single-block read-ahead. Defined in Bach (p.55), but 292 * implemented as a call to breadn(). 293 * XXX for compatibility with old file systems. 294 */ 295 int 296 breada(vp, blkno, size, rablkno, rabsize, cred, bpp) 297 struct vnode *vp; 298 daddr_t blkno; int size; 299 daddr_t rablkno; int rabsize; 300 struct ucred *cred; 301 struct buf **bpp; 302 { 303 304 return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp)); 305 } 306 307 /* 308 * Block write. Described in Bach (p.56) 309 */ 310 int 311 bwrite(bp) 312 struct buf *bp; 313 { 314 int rv, sync, wasdelayed, s; 315 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */ 316 struct vnode *vp; 317 struct mount *mp; 318 319 /* 320 * Remember buffer type, to switch on it later. If the write was 321 * synchronous, but the file system was mounted with MNT_ASYNC, 322 * convert it to a delayed write. 323 * XXX note that this relies on delayed tape writes being converted 324 * to async, not sync writes (which is safe, but ugly). 325 */ 326 sync = !ISSET(bp->b_flags, B_ASYNC); 327 if (sync && bp->b_vp && bp->b_vp->v_mount && 328 ISSET(bp->b_vp->v_mount->mnt_flag, MNT_ASYNC)) { 329 bdwrite(bp); 330 return (0); 331 } 332 333 /* 334 * Collect statistics on synchronous and asynchronous writes. 335 * Writes to block devices are charged to their associated 336 * filesystem (if any). 337 */ 338 if ((vp = bp->b_vp) != NULL) { 339 if (vp->v_type == VBLK) 340 mp = vp->v_specmountpoint; 341 else 342 mp = vp->v_mount; 343 if (mp != NULL) { 344 if (sync) 345 mp->mnt_stat.f_syncwrites++; 346 else 347 mp->mnt_stat.f_asyncwrites++; 348 } 349 } 350 351 wasdelayed = ISSET(bp->b_flags, B_DELWRI); 352 353 s = splbio(); 354 355 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI)); 356 357 /* 358 * Pay for the I/O operation and make sure the buf is on the correct 359 * vnode queue. 360 */ 361 if (wasdelayed) 362 reassignbuf(bp, bp->b_vp); 363 else 364 p->p_stats->p_ru.ru_oublock++; 365 366 /* Initiate disk write. Make sure the appropriate party is charged. */ 367 bp->b_vp->v_numoutput++; 368 splx(s); 369 370 VOP_STRATEGY(bp); 371 372 if (sync) { 373 /* If I/O was synchronous, wait for it to complete. */ 374 rv = biowait(bp); 375 376 /* Release the buffer. */ 377 brelse(bp); 378 379 return (rv); 380 } else { 381 return (0); 382 } 383 } 384 385 int 386 vn_bwrite(v) 387 void *v; 388 { 389 struct vop_bwrite_args *ap = v; 390 391 return (bwrite(ap->a_bp)); 392 } 393 394 /* 395 * Delayed write. 396 * 397 * The buffer is marked dirty, but is not queued for I/O. 398 * This routine should be used when the buffer is expected 399 * to be modified again soon, typically a small write that 400 * partially fills a buffer. 401 * 402 * NB: magnetic tapes cannot be delayed; they must be 403 * written in the order that the writes are requested. 404 * 405 * Described in Leffler, et al. (pp. 208-213). 406 */ 407 void 408 bdwrite(bp) 409 struct buf *bp; 410 { 411 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */ 412 int s; 413 414 /* If this is a tape block, write the block now. */ 415 /* XXX NOTE: the memory filesystem usurpes major device */ 416 /* XXX number 255, which is a bad idea. */ 417 if (bp->b_dev != NODEV && 418 major(bp->b_dev) != 255 && /* XXX - MFS buffers! */ 419 bdevsw[major(bp->b_dev)].d_type == D_TAPE) { 420 bawrite(bp); 421 return; 422 } 423 424 /* 425 * If the block hasn't been seen before: 426 * (1) Mark it as having been seen, 427 * (2) Charge for the write, 428 * (3) Make sure it's on its vnode's correct block list. 429 */ 430 s = splbio(); 431 432 if (!ISSET(bp->b_flags, B_DELWRI)) { 433 SET(bp->b_flags, B_DELWRI); 434 p->p_stats->p_ru.ru_oublock++; 435 reassignbuf(bp, bp->b_vp); 436 } 437 438 /* Otherwise, the "write" is done, so mark and release the buffer. */ 439 CLR(bp->b_flags, B_NEEDCOMMIT|B_DONE); 440 splx(s); 441 442 brelse(bp); 443 } 444 445 /* 446 * Asynchronous block write; just an asynchronous bwrite(). 447 */ 448 void 449 bawrite(bp) 450 struct buf *bp; 451 { 452 453 SET(bp->b_flags, B_ASYNC); 454 VOP_BWRITE(bp); 455 } 456 457 /* 458 * Ordered block write; asynchronous, but I/O will occur in order queued. 459 */ 460 void 461 bowrite(bp) 462 struct buf *bp; 463 { 464 465 SET(bp->b_flags, B_ASYNC | B_ORDERED); 466 VOP_BWRITE(bp); 467 } 468 469 /* 470 * Same as first half of bdwrite, mark buffer dirty, but do not release it. 471 */ 472 void 473 bdirty(bp) 474 struct buf *bp; 475 { 476 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */ 477 int s; 478 479 s = splbio(); 480 481 CLR(bp->b_flags, B_AGE); 482 483 if (!ISSET(bp->b_flags, B_DELWRI)) { 484 SET(bp->b_flags, B_DELWRI); 485 p->p_stats->p_ru.ru_oublock++; 486 reassignbuf(bp, bp->b_vp); 487 } 488 489 splx(s); 490 } 491 492 /* 493 * Release a buffer on to the free lists. 494 * Described in Bach (p. 46). 495 */ 496 void 497 brelse(bp) 498 struct buf *bp; 499 { 500 struct bqueues *bufq; 501 int s; 502 503 KASSERT(ISSET(bp->b_flags, B_BUSY)); 504 505 /* Wake up any processes waiting for any buffer to become free. */ 506 if (needbuffer) { 507 needbuffer = 0; 508 wakeup(&needbuffer); 509 } 510 511 /* Block disk interrupts. */ 512 s = splbio(); 513 514 /* Wake up any proceeses waiting for _this_ buffer to become free. */ 515 if (ISSET(bp->b_flags, B_WANTED)) { 516 CLR(bp->b_flags, B_WANTED|B_AGE); 517 wakeup(bp); 518 } 519 520 /* 521 * Determine which queue the buffer should be on, then put it there. 522 */ 523 524 /* If it's locked, don't report an error; try again later. */ 525 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR)) 526 CLR(bp->b_flags, B_ERROR); 527 528 /* If it's not cacheable, or an error, mark it invalid. */ 529 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR))) 530 SET(bp->b_flags, B_INVAL); 531 532 if (ISSET(bp->b_flags, B_VFLUSH)) { 533 /* 534 * This is a delayed write buffer that was just flushed to 535 * disk. It is still on the LRU queue. If it's become 536 * invalid, then we need to move it to a different queue; 537 * otherwise leave it in its current position. 538 */ 539 CLR(bp->b_flags, B_VFLUSH); 540 if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE)) 541 goto already_queued; 542 else 543 bremfree(bp); 544 } 545 546 if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) { 547 /* 548 * If it's invalid or empty, dissociate it from its vnode 549 * and put on the head of the appropriate queue. 550 */ 551 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 552 (*bioops.io_deallocate)(bp); 553 CLR(bp->b_flags, B_DONE|B_DELWRI); 554 if (bp->b_vp) { 555 reassignbuf(bp, bp->b_vp); 556 brelvp(bp); 557 } 558 if (bp->b_bufsize <= 0) 559 /* no data */ 560 bufq = &bufqueues[BQ_EMPTY]; 561 else 562 /* invalid data */ 563 bufq = &bufqueues[BQ_AGE]; 564 binsheadfree(bp, bufq); 565 } else { 566 /* 567 * It has valid data. Put it on the end of the appropriate 568 * queue, so that it'll stick around for as long as possible. 569 * If buf is AGE, but has dependencies, must put it on last 570 * bufqueue to be scanned, ie LRU. This protects against the 571 * livelock where BQ_AGE only has buffers with dependencies, 572 * and we thus never get to the dependent buffers in BQ_LRU. 573 */ 574 if (ISSET(bp->b_flags, B_LOCKED)) 575 /* locked in core */ 576 bufq = &bufqueues[BQ_LOCKED]; 577 else if (!ISSET(bp->b_flags, B_AGE)) 578 /* valid data */ 579 bufq = &bufqueues[BQ_LRU]; 580 else { 581 /* stale but valid data */ 582 int has_deps; 583 584 if (LIST_FIRST(&bp->b_dep) != NULL && 585 bioops.io_countdeps) 586 has_deps = (*bioops.io_countdeps)(bp, 0); 587 else 588 has_deps = 0; 589 bufq = has_deps ? &bufqueues[BQ_LRU] : 590 &bufqueues[BQ_AGE]; 591 } 592 binstailfree(bp, bufq); 593 } 594 595 already_queued: 596 /* Unlock the buffer. */ 597 CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE|B_ORDERED); 598 SET(bp->b_flags, B_CACHE); 599 600 /* Allow disk interrupts. */ 601 splx(s); 602 } 603 604 /* 605 * Determine if a block is in the cache. 606 * Just look on what would be its hash chain. If it's there, return 607 * a pointer to it, unless it's marked invalid. If it's marked invalid, 608 * we normally don't return the buffer, unless the caller explicitly 609 * wants us to. 610 */ 611 struct buf * 612 incore(vp, blkno) 613 struct vnode *vp; 614 daddr_t blkno; 615 { 616 struct buf *bp; 617 618 bp = BUFHASH(vp, blkno)->lh_first; 619 620 /* Search hash chain */ 621 for (; bp != NULL; bp = bp->b_hash.le_next) { 622 if (bp->b_lblkno == blkno && bp->b_vp == vp && 623 !ISSET(bp->b_flags, B_INVAL)) 624 return (bp); 625 } 626 627 return (NULL); 628 } 629 630 /* 631 * Get a block of requested size that is associated with 632 * a given vnode and block offset. If it is found in the 633 * block cache, mark it as having been found, make it busy 634 * and return it. Otherwise, return an empty block of the 635 * correct size. It is up to the caller to insure that the 636 * cached blocks be of the correct size. 637 */ 638 struct buf * 639 getblk(vp, blkno, size, slpflag, slptimeo) 640 struct vnode *vp; 641 daddr_t blkno; 642 int size, slpflag, slptimeo; 643 { 644 struct buf *bp; 645 int s, err; 646 647 start: 648 bp = incore(vp, blkno); 649 if (bp != NULL) { 650 s = splbio(); 651 if (ISSET(bp->b_flags, B_BUSY)) { 652 if (curproc == uvm.pagedaemon_proc) { 653 splx(s); 654 return NULL; 655 } 656 SET(bp->b_flags, B_WANTED); 657 err = tsleep(bp, slpflag | (PRIBIO + 1), "getblk", 658 slptimeo); 659 splx(s); 660 if (err) 661 return (NULL); 662 goto start; 663 } 664 #ifdef DIAGNOSTIC 665 if (ISSET(bp->b_flags, B_DONE|B_DELWRI) && bp->b_bcount < size) 666 panic("getblk: block size invariant failed"); 667 #endif 668 SET(bp->b_flags, B_BUSY); 669 bremfree(bp); 670 splx(s); 671 } else { 672 if ((bp = getnewbuf(slpflag, slptimeo)) == NULL) 673 goto start; 674 675 binshash(bp, BUFHASH(vp, blkno)); 676 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno; 677 s = splbio(); 678 bgetvp(vp, bp); 679 splx(s); 680 } 681 allocbuf(bp, size); 682 return (bp); 683 } 684 685 /* 686 * Get an empty, disassociated buffer of given size. 687 */ 688 struct buf * 689 geteblk(size) 690 int size; 691 { 692 struct buf *bp; 693 694 while ((bp = getnewbuf(0, 0)) == 0) 695 ; 696 SET(bp->b_flags, B_INVAL); 697 binshash(bp, &invalhash); 698 allocbuf(bp, size); 699 return (bp); 700 } 701 702 /* 703 * Expand or contract the actual memory allocated to a buffer. 704 * 705 * If the buffer shrinks, data is lost, so it's up to the 706 * caller to have written it out *first*; this routine will not 707 * start a write. If the buffer grows, it's the callers 708 * responsibility to fill out the buffer's additional contents. 709 */ 710 void 711 allocbuf(bp, size) 712 struct buf *bp; 713 int size; 714 { 715 struct buf *nbp; 716 vsize_t desired_size; 717 int s; 718 719 desired_size = round_page((vsize_t)size); 720 if (desired_size > MAXBSIZE) 721 panic("allocbuf: buffer larger than MAXBSIZE requested"); 722 723 if (bp->b_bufsize == desired_size) 724 goto out; 725 726 /* 727 * If the buffer is smaller than the desired size, we need to snarf 728 * it from other buffers. Get buffers (via getnewbuf()), and 729 * steal their pages. 730 */ 731 while (bp->b_bufsize < desired_size) { 732 int amt; 733 734 /* find a buffer */ 735 while ((nbp = getnewbuf(0, 0)) == NULL) 736 ; 737 738 SET(nbp->b_flags, B_INVAL); 739 binshash(nbp, &invalhash); 740 741 /* and steal its pages, up to the amount we need */ 742 amt = min(nbp->b_bufsize, (desired_size - bp->b_bufsize)); 743 pagemove((nbp->b_data + nbp->b_bufsize - amt), 744 bp->b_data + bp->b_bufsize, amt); 745 bp->b_bufsize += amt; 746 nbp->b_bufsize -= amt; 747 748 /* reduce transfer count if we stole some data */ 749 if (nbp->b_bcount > nbp->b_bufsize) 750 nbp->b_bcount = nbp->b_bufsize; 751 752 #ifdef DIAGNOSTIC 753 if (nbp->b_bufsize < 0) 754 panic("allocbuf: negative bufsize"); 755 #endif 756 757 brelse(nbp); 758 } 759 760 /* 761 * If we want a buffer smaller than the current size, 762 * shrink this buffer. Grab a buf head from the EMPTY queue, 763 * move a page onto it, and put it on front of the AGE queue. 764 * If there are no free buffer headers, leave the buffer alone. 765 */ 766 if (bp->b_bufsize > desired_size) { 767 s = splbio(); 768 if ((nbp = bufqueues[BQ_EMPTY].tqh_first) == NULL) { 769 /* No free buffer head */ 770 splx(s); 771 goto out; 772 } 773 bremfree(nbp); 774 SET(nbp->b_flags, B_BUSY); 775 splx(s); 776 777 /* move the page to it and note this change */ 778 pagemove(bp->b_data + desired_size, 779 nbp->b_data, bp->b_bufsize - desired_size); 780 nbp->b_bufsize = bp->b_bufsize - desired_size; 781 bp->b_bufsize = desired_size; 782 nbp->b_bcount = 0; 783 SET(nbp->b_flags, B_INVAL); 784 785 /* release the newly-filled buffer and leave */ 786 brelse(nbp); 787 } 788 789 out: 790 bp->b_bcount = size; 791 } 792 793 /* 794 * Find a buffer which is available for use. 795 * Select something from a free list. 796 * Preference is to AGE list, then LRU list. 797 */ 798 struct buf * 799 getnewbuf(slpflag, slptimeo) 800 int slpflag, slptimeo; 801 { 802 struct buf *bp; 803 int s; 804 805 start: 806 s = splbio(); 807 if ((bp = bufqueues[BQ_AGE].tqh_first) != NULL || 808 (bp = bufqueues[BQ_LRU].tqh_first) != NULL) { 809 bremfree(bp); 810 } else { 811 /* wait for a free buffer of any kind */ 812 needbuffer = 1; 813 tsleep(&needbuffer, slpflag|(PRIBIO+1), "getnewbuf", slptimeo); 814 splx(s); 815 return (NULL); 816 } 817 818 if (ISSET(bp->b_flags, B_VFLUSH)) { 819 /* 820 * This is a delayed write buffer being flushed to disk. Make 821 * sure it gets aged out of the queue when it's finished, and 822 * leave it off the LRU queue. 823 */ 824 CLR(bp->b_flags, B_VFLUSH); 825 SET(bp->b_flags, B_AGE); 826 splx(s); 827 goto start; 828 } 829 830 /* Buffer is no longer on free lists. */ 831 SET(bp->b_flags, B_BUSY); 832 833 /* If buffer was a delayed write, start it, and go back to the top. */ 834 if (ISSET(bp->b_flags, B_DELWRI)) { 835 splx(s); 836 /* 837 * This buffer has gone through the LRU, so make sure it gets 838 * reused ASAP. 839 */ 840 SET(bp->b_flags, B_AGE); 841 bawrite(bp); 842 goto start; 843 } 844 845 /* disassociate us from our vnode, if we had one... */ 846 if (bp->b_vp) 847 brelvp(bp); 848 splx(s); 849 850 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 851 (*bioops.io_deallocate)(bp); 852 853 /* clear out various other fields */ 854 bp->b_flags = B_BUSY; 855 bp->b_dev = NODEV; 856 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0; 857 bp->b_iodone = 0; 858 bp->b_error = 0; 859 bp->b_resid = 0; 860 bp->b_bcount = 0; 861 862 bremhash(bp); 863 return (bp); 864 } 865 866 /* 867 * Wait for operations on the buffer to complete. 868 * When they do, extract and return the I/O's error value. 869 */ 870 int 871 biowait(bp) 872 struct buf *bp; 873 { 874 int s; 875 876 s = splbio(); 877 while (!ISSET(bp->b_flags, B_DONE)) 878 tsleep(bp, PRIBIO + 1, "biowait", 0); 879 splx(s); 880 881 /* check for interruption of I/O (e.g. via NFS), then errors. */ 882 if (ISSET(bp->b_flags, B_EINTR)) { 883 CLR(bp->b_flags, B_EINTR); 884 return (EINTR); 885 } else if (ISSET(bp->b_flags, B_ERROR)) 886 return (bp->b_error ? bp->b_error : EIO); 887 else 888 return (0); 889 } 890 891 /* 892 * Mark I/O complete on a buffer. 893 * 894 * If a callback has been requested, e.g. the pageout 895 * daemon, do so. Otherwise, awaken waiting processes. 896 * 897 * [ Leffler, et al., says on p.247: 898 * "This routine wakes up the blocked process, frees the buffer 899 * for an asynchronous write, or, for a request by the pagedaemon 900 * process, invokes a procedure specified in the buffer structure" ] 901 * 902 * In real life, the pagedaemon (or other system processes) wants 903 * to do async stuff to, and doesn't want the buffer brelse()'d. 904 * (for swap pager, that puts swap buffers on the free lists (!!!), 905 * for the vn device, that puts malloc'd buffers on the free lists!) 906 */ 907 void 908 biodone(bp) 909 struct buf *bp; 910 { 911 int s = splbio(); 912 913 if (ISSET(bp->b_flags, B_DONE)) 914 panic("biodone already"); 915 SET(bp->b_flags, B_DONE); /* note that it's done */ 916 917 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete) 918 (*bioops.io_complete)(bp); 919 920 if (!ISSET(bp->b_flags, B_READ)) /* wake up reader */ 921 vwakeup(bp); 922 923 if (ISSET(bp->b_flags, B_CALL)) { /* if necessary, call out */ 924 CLR(bp->b_flags, B_CALL); /* but note callout done */ 925 (*bp->b_iodone)(bp); 926 } else { 927 if (ISSET(bp->b_flags, B_ASYNC)) /* if async, release */ 928 brelse(bp); 929 else { /* or just wakeup the buffer */ 930 CLR(bp->b_flags, B_WANTED); 931 wakeup(bp); 932 } 933 } 934 935 splx(s); 936 } 937 938 /* 939 * Return a count of buffers on the "locked" queue. 940 */ 941 int 942 count_lock_queue() 943 { 944 struct buf *bp; 945 int n = 0; 946 947 for (bp = bufqueues[BQ_LOCKED].tqh_first; bp; 948 bp = bp->b_freelist.tqe_next) 949 n++; 950 return (n); 951 } 952 953 #ifdef DEBUG 954 /* 955 * Print out statistics on the current allocation of the buffer pool. 956 * Can be enabled to print out on every ``sync'' by setting "syncprt" 957 * in vfs_syscalls.c using sysctl. 958 */ 959 void 960 vfs_bufstats() 961 { 962 int s, i, j, count; 963 struct buf *bp; 964 struct bqueues *dp; 965 int counts[(MAXBSIZE / PAGE_SIZE) + 1]; 966 static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" }; 967 968 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) { 969 count = 0; 970 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++) 971 counts[j] = 0; 972 s = splbio(); 973 for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) { 974 counts[bp->b_bufsize/PAGE_SIZE]++; 975 count++; 976 } 977 splx(s); 978 printf("%s: total-%d", bname[i], count); 979 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++) 980 if (counts[j] != 0) 981 printf(", %d-%d", j * PAGE_SIZE, counts[j]); 982 printf("\n"); 983 } 984 } 985 #endif /* DEBUG */ 986