1 /* $NetBSD: vfs_bio.c,v 1.152 2006/01/11 00:44:41 yamt Exp $ */ 2 3 /*- 4 * Copyright (c) 1982, 1986, 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94 37 */ 38 39 /*- 40 * Copyright (c) 1994 Christopher G. Demetriou 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. All advertising materials mentioning features or use of this software 51 * must display the following acknowledgement: 52 * This product includes software developed by the University of 53 * California, Berkeley and its contributors. 54 * 4. Neither the name of the University nor the names of its contributors 55 * may be used to endorse or promote products derived from this software 56 * without specific prior written permission. 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 68 * SUCH DAMAGE. 69 * 70 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94 71 */ 72 73 /* 74 * Some references: 75 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986) 76 * Leffler, et al.: The Design and Implementation of the 4.3BSD 77 * UNIX Operating System (Addison Welley, 1989) 78 */ 79 80 #include "opt_bufcache.h" 81 #include "opt_softdep.h" 82 83 #include <sys/cdefs.h> 84 __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.152 2006/01/11 00:44:41 yamt Exp $"); 85 86 #include <sys/param.h> 87 #include <sys/systm.h> 88 #include <sys/kernel.h> 89 #include <sys/proc.h> 90 #include <sys/buf.h> 91 #include <sys/vnode.h> 92 #include <sys/mount.h> 93 #include <sys/malloc.h> 94 #include <sys/resourcevar.h> 95 #include <sys/sysctl.h> 96 #include <sys/conf.h> 97 98 #include <uvm/uvm.h> 99 100 #include <miscfs/specfs/specdev.h> 101 102 #ifndef BUFPAGES 103 # define BUFPAGES 0 104 #endif 105 106 #ifdef BUFCACHE 107 # if (BUFCACHE < 5) || (BUFCACHE > 95) 108 # error BUFCACHE is not between 5 and 95 109 # endif 110 #else 111 # define BUFCACHE 15 112 #endif 113 114 u_int nbuf; /* XXX - for softdep_lockedbufs */ 115 u_int bufpages = BUFPAGES; /* optional hardwired count */ 116 u_int bufcache = BUFCACHE; /* max % of RAM to use for buffer cache */ 117 118 /* Function prototypes */ 119 struct bqueue; 120 121 static void buf_setwm(void); 122 static int buf_trim(void); 123 static void *bufpool_page_alloc(struct pool *, int); 124 static void bufpool_page_free(struct pool *, void *); 125 static inline struct buf *bio_doread(struct vnode *, daddr_t, int, 126 struct ucred *, int); 127 static int buf_lotsfree(void); 128 static int buf_canrelease(void); 129 static inline u_long buf_mempoolidx(u_long); 130 static inline u_long buf_roundsize(u_long); 131 static inline caddr_t buf_malloc(size_t); 132 static void buf_mrelease(caddr_t, size_t); 133 static inline void binsheadfree(struct buf *, struct bqueue *); 134 static inline void binstailfree(struct buf *, struct bqueue *); 135 int count_lock_queue(void); /* XXX */ 136 #ifdef DEBUG 137 static int checkfreelist(struct buf *, struct bqueue *); 138 #endif 139 140 /* Macros to clear/set/test flags. */ 141 #define SET(t, f) (t) |= (f) 142 #define CLR(t, f) (t) &= ~(f) 143 #define ISSET(t, f) ((t) & (f)) 144 145 /* 146 * Definitions for the buffer hash lists. 147 */ 148 #define BUFHASH(dvp, lbn) \ 149 (&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash]) 150 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash; 151 u_long bufhash; 152 #if !defined(SOFTDEP) || !defined(FFS) 153 struct bio_ops bioops; /* I/O operation notification */ 154 #endif 155 156 /* 157 * Insq/Remq for the buffer hash lists. 158 */ 159 #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash) 160 #define bremhash(bp) LIST_REMOVE(bp, b_hash) 161 162 /* 163 * Definitions for the buffer free lists. 164 */ 165 #define BQUEUES 3 /* number of free buffer queues */ 166 167 #define BQ_LOCKED 0 /* super-blocks &c */ 168 #define BQ_LRU 1 /* lru, useful buffers */ 169 #define BQ_AGE 2 /* rubbish */ 170 171 struct bqueue { 172 TAILQ_HEAD(, buf) bq_queue; 173 uint64_t bq_bytes; 174 } bufqueues[BQUEUES]; 175 int needbuffer; 176 177 /* 178 * Buffer queue lock. 179 * Take this lock first if also taking some buffer's b_interlock. 180 */ 181 struct simplelock bqueue_slock = SIMPLELOCK_INITIALIZER; 182 183 /* 184 * Buffer pool for I/O buffers. 185 * Access to this pool must be protected with splbio(). 186 */ 187 static struct pool bufpool; 188 189 /* XXX - somewhat gross.. */ 190 #if MAXBSIZE == 0x2000 191 #define NMEMPOOLS 4 192 #elif MAXBSIZE == 0x4000 193 #define NMEMPOOLS 5 194 #elif MAXBSIZE == 0x8000 195 #define NMEMPOOLS 6 196 #else 197 #define NMEMPOOLS 7 198 #endif 199 200 #define MEMPOOL_INDEX_OFFSET 10 /* smallest pool is 1k */ 201 #if (1 << (NMEMPOOLS + MEMPOOL_INDEX_OFFSET - 1)) != MAXBSIZE 202 #error update vfs_bio buffer memory parameters 203 #endif 204 205 /* Buffer memory pools */ 206 static struct pool bmempools[NMEMPOOLS]; 207 208 struct vm_map *buf_map; 209 210 /* 211 * Buffer memory pool allocator. 212 */ 213 static void * 214 bufpool_page_alloc(struct pool *pp, int flags) 215 { 216 217 return (void *)uvm_km_alloc(buf_map, 218 MAXBSIZE, MAXBSIZE, 219 ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK) 220 | UVM_KMF_WIRED); 221 } 222 223 static void 224 bufpool_page_free(struct pool *pp, void *v) 225 { 226 227 uvm_km_free(buf_map, (vaddr_t)v, MAXBSIZE, UVM_KMF_WIRED); 228 } 229 230 static struct pool_allocator bufmempool_allocator = { 231 bufpool_page_alloc, bufpool_page_free, MAXBSIZE, 232 }; 233 234 /* Buffer memory management variables */ 235 u_long bufmem_valimit; 236 u_long bufmem_hiwater; 237 u_long bufmem_lowater; 238 u_long bufmem; 239 240 /* 241 * MD code can call this to set a hard limit on the amount 242 * of virtual memory used by the buffer cache. 243 */ 244 int 245 buf_setvalimit(vsize_t sz) 246 { 247 248 /* We need to accommodate at least NMEMPOOLS of MAXBSIZE each */ 249 if (sz < NMEMPOOLS * MAXBSIZE) 250 return EINVAL; 251 252 bufmem_valimit = sz; 253 return 0; 254 } 255 256 static void 257 buf_setwm(void) 258 { 259 260 bufmem_hiwater = buf_memcalc(); 261 /* lowater is approx. 2% of memory (with bufcache = 15) */ 262 #define BUFMEM_WMSHIFT 3 263 #define BUFMEM_HIWMMIN (64 * 1024 << BUFMEM_WMSHIFT) 264 if (bufmem_hiwater < BUFMEM_HIWMMIN) 265 /* Ensure a reasonable minimum value */ 266 bufmem_hiwater = BUFMEM_HIWMMIN; 267 bufmem_lowater = bufmem_hiwater >> BUFMEM_WMSHIFT; 268 } 269 270 #ifdef DEBUG 271 int debug_verify_freelist = 0; 272 static int 273 checkfreelist(struct buf *bp, struct bqueue *dp) 274 { 275 struct buf *b; 276 277 TAILQ_FOREACH(b, &dp->bq_queue, b_freelist) { 278 if (b == bp) 279 return 1; 280 } 281 return 0; 282 } 283 #endif 284 285 /* 286 * Insq/Remq for the buffer hash lists. 287 * Call with buffer queue locked. 288 */ 289 static inline void 290 binsheadfree(struct buf *bp, struct bqueue *dp) 291 { 292 293 KASSERT(bp->b_freelistindex == -1); 294 TAILQ_INSERT_HEAD(&dp->bq_queue, bp, b_freelist); 295 dp->bq_bytes += bp->b_bufsize; 296 bp->b_freelistindex = dp - bufqueues; 297 } 298 299 static inline void 300 binstailfree(struct buf *bp, struct bqueue *dp) 301 { 302 303 KASSERT(bp->b_freelistindex == -1); 304 TAILQ_INSERT_TAIL(&dp->bq_queue, bp, b_freelist); 305 dp->bq_bytes += bp->b_bufsize; 306 bp->b_freelistindex = dp - bufqueues; 307 } 308 309 void 310 bremfree(struct buf *bp) 311 { 312 struct bqueue *dp; 313 int bqidx = bp->b_freelistindex; 314 315 LOCK_ASSERT(simple_lock_held(&bqueue_slock)); 316 317 KASSERT(bqidx != -1); 318 dp = &bufqueues[bqidx]; 319 KDASSERT(!debug_verify_freelist || checkfreelist(bp, dp)); 320 KASSERT(dp->bq_bytes >= bp->b_bufsize); 321 TAILQ_REMOVE(&dp->bq_queue, bp, b_freelist); 322 dp->bq_bytes -= bp->b_bufsize; 323 #if defined(DIAGNOSTIC) 324 bp->b_freelistindex = -1; 325 #endif /* defined(DIAGNOSTIC) */ 326 } 327 328 u_long 329 buf_memcalc(void) 330 { 331 u_long n; 332 333 /* 334 * Determine the upper bound of memory to use for buffers. 335 * 336 * - If bufpages is specified, use that as the number 337 * pages. 338 * 339 * - Otherwise, use bufcache as the percentage of 340 * physical memory. 341 */ 342 if (bufpages != 0) { 343 n = bufpages; 344 } else { 345 if (bufcache < 5) { 346 printf("forcing bufcache %d -> 5", bufcache); 347 bufcache = 5; 348 } 349 if (bufcache > 95) { 350 printf("forcing bufcache %d -> 95", bufcache); 351 bufcache = 95; 352 } 353 n = physmem / 100 * bufcache; 354 } 355 356 n <<= PAGE_SHIFT; 357 if (bufmem_valimit != 0 && n > bufmem_valimit) 358 n = bufmem_valimit; 359 360 return (n); 361 } 362 363 /* 364 * Initialize buffers and hash links for buffers. 365 */ 366 void 367 bufinit(void) 368 { 369 struct bqueue *dp; 370 int use_std; 371 u_int i; 372 373 /* 374 * Initialize buffer cache memory parameters. 375 */ 376 bufmem = 0; 377 buf_setwm(); 378 379 if (bufmem_valimit != 0) { 380 vaddr_t minaddr = 0, maxaddr; 381 buf_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 382 bufmem_valimit, VM_MAP_PAGEABLE, 383 FALSE, 0); 384 if (buf_map == NULL) 385 panic("bufinit: cannot allocate submap"); 386 } else 387 buf_map = kernel_map; 388 389 /* 390 * Initialize the buffer pools. 391 */ 392 pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL); 393 394 /* On "small" machines use small pool page sizes where possible */ 395 use_std = (physmem < atop(16*1024*1024)); 396 397 /* 398 * Also use them on systems that can map the pool pages using 399 * a direct-mapped segment. 400 */ 401 #ifdef PMAP_MAP_POOLPAGE 402 use_std = 1; 403 #endif 404 405 for (i = 0; i < NMEMPOOLS; i++) { 406 struct pool_allocator *pa; 407 struct pool *pp = &bmempools[i]; 408 u_int size = 1 << (i + MEMPOOL_INDEX_OFFSET); 409 char *name = malloc(8, M_TEMP, M_WAITOK); 410 snprintf(name, 8, "buf%dk", 1 << i); 411 pa = (size <= PAGE_SIZE && use_std) 412 ? &pool_allocator_nointr 413 : &bufmempool_allocator; 414 pool_init(pp, size, 0, 0, 0, name, pa); 415 pool_setlowat(pp, 1); 416 pool_sethiwat(pp, 1); 417 } 418 419 /* Initialize the buffer queues */ 420 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) { 421 TAILQ_INIT(&dp->bq_queue); 422 dp->bq_bytes = 0; 423 } 424 425 /* 426 * Estimate hash table size based on the amount of memory we 427 * intend to use for the buffer cache. The average buffer 428 * size is dependent on our clients (i.e. filesystems). 429 * 430 * For now, use an empirical 3K per buffer. 431 */ 432 nbuf = (bufmem_hiwater / 1024) / 3; 433 bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash); 434 } 435 436 static int 437 buf_lotsfree(void) 438 { 439 int try, thresh; 440 struct lwp *l = curlwp; 441 442 /* Always allocate if doing copy on write */ 443 if (l->l_flag & L_COWINPROGRESS) 444 return 1; 445 446 /* Always allocate if less than the low water mark. */ 447 if (bufmem < bufmem_lowater) 448 return 1; 449 450 /* Never allocate if greater than the high water mark. */ 451 if (bufmem > bufmem_hiwater) 452 return 0; 453 454 /* If there's anything on the AGE list, it should be eaten. */ 455 if (TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue) != NULL) 456 return 0; 457 458 /* 459 * The probabily of getting a new allocation is inversely 460 * proportional to the current size of the cache, using 461 * a granularity of 16 steps. 462 */ 463 try = random() & 0x0000000fL; 464 465 /* Don't use "16 * bufmem" here to avoid a 32-bit overflow. */ 466 thresh = (bufmem - bufmem_lowater) / 467 ((bufmem_hiwater - bufmem_lowater) / 16); 468 469 if (try >= thresh) 470 return 1; 471 472 /* Otherwise don't allocate. */ 473 return 0; 474 } 475 476 /* 477 * Return estimate of bytes we think need to be 478 * released to help resolve low memory conditions. 479 * 480 * => called at splbio. 481 * => called with bqueue_slock held. 482 */ 483 static int 484 buf_canrelease(void) 485 { 486 int pagedemand, ninvalid = 0; 487 488 LOCK_ASSERT(simple_lock_held(&bqueue_slock)); 489 490 if (bufmem < bufmem_lowater) 491 return 0; 492 493 if (bufmem > bufmem_hiwater) 494 return bufmem - bufmem_hiwater; 495 496 ninvalid += bufqueues[BQ_AGE].bq_bytes; 497 498 pagedemand = uvmexp.freetarg - uvmexp.free; 499 if (pagedemand < 0) 500 return ninvalid; 501 return MAX(ninvalid, MIN(2 * MAXBSIZE, 502 MIN((bufmem - bufmem_lowater) / 16, pagedemand * PAGE_SIZE))); 503 } 504 505 /* 506 * Buffer memory allocation helper functions 507 */ 508 static inline u_long 509 buf_mempoolidx(u_long size) 510 { 511 u_int n = 0; 512 513 size -= 1; 514 size >>= MEMPOOL_INDEX_OFFSET; 515 while (size) { 516 size >>= 1; 517 n += 1; 518 } 519 if (n >= NMEMPOOLS) 520 panic("buf mem pool index %d", n); 521 return n; 522 } 523 524 static inline u_long 525 buf_roundsize(u_long size) 526 { 527 /* Round up to nearest power of 2 */ 528 return (1 << (buf_mempoolidx(size) + MEMPOOL_INDEX_OFFSET)); 529 } 530 531 static inline caddr_t 532 buf_malloc(size_t size) 533 { 534 u_int n = buf_mempoolidx(size); 535 caddr_t addr; 536 int s; 537 538 while (1) { 539 addr = pool_get(&bmempools[n], PR_NOWAIT); 540 if (addr != NULL) 541 break; 542 543 /* No memory, see if we can free some. If so, try again */ 544 if (buf_drain(1) > 0) 545 continue; 546 547 /* Wait for buffers to arrive on the LRU queue */ 548 s = splbio(); 549 simple_lock(&bqueue_slock); 550 needbuffer = 1; 551 ltsleep(&needbuffer, PNORELOCK | (PRIBIO + 1), 552 "buf_malloc", 0, &bqueue_slock); 553 splx(s); 554 } 555 556 return addr; 557 } 558 559 static void 560 buf_mrelease(caddr_t addr, size_t size) 561 { 562 563 pool_put(&bmempools[buf_mempoolidx(size)], addr); 564 } 565 566 /* 567 * bread()/breadn() helper. 568 */ 569 static inline struct buf * 570 bio_doread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred, 571 int async) 572 { 573 struct buf *bp; 574 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */ 575 struct proc *p = l->l_proc; 576 struct mount *mp; 577 578 bp = getblk(vp, blkno, size, 0, 0); 579 580 #ifdef DIAGNOSTIC 581 if (bp == NULL) { 582 panic("bio_doread: no such buf"); 583 } 584 #endif 585 586 /* 587 * If buffer does not have data valid, start a read. 588 * Note that if buffer is B_INVAL, getblk() won't return it. 589 * Therefore, it's valid if its I/O has completed or been delayed. 590 */ 591 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) { 592 /* Start I/O for the buffer. */ 593 SET(bp->b_flags, B_READ | async); 594 if (async) 595 BIO_SETPRIO(bp, BPRIO_TIMELIMITED); 596 else 597 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL); 598 VOP_STRATEGY(vp, bp); 599 600 /* Pay for the read. */ 601 p->p_stats->p_ru.ru_inblock++; 602 } else if (async) { 603 brelse(bp); 604 } 605 606 if (vp->v_type == VBLK) 607 mp = vp->v_specmountpoint; 608 else 609 mp = vp->v_mount; 610 611 /* 612 * Collect statistics on synchronous and asynchronous reads. 613 * Reads from block devices are charged to their associated 614 * filesystem (if any). 615 */ 616 if (mp != NULL) { 617 if (async == 0) 618 mp->mnt_stat.f_syncreads++; 619 else 620 mp->mnt_stat.f_asyncreads++; 621 } 622 623 return (bp); 624 } 625 626 /* 627 * Read a disk block. 628 * This algorithm described in Bach (p.54). 629 */ 630 int 631 bread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred, 632 struct buf **bpp) 633 { 634 struct buf *bp; 635 636 /* Get buffer for block. */ 637 bp = *bpp = bio_doread(vp, blkno, size, cred, 0); 638 639 /* Wait for the read to complete, and return result. */ 640 return (biowait(bp)); 641 } 642 643 /* 644 * Read-ahead multiple disk blocks. The first is sync, the rest async. 645 * Trivial modification to the breada algorithm presented in Bach (p.55). 646 */ 647 int 648 breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablks, 649 int *rasizes, int nrablks, struct ucred *cred, struct buf **bpp) 650 { 651 struct buf *bp; 652 int i; 653 654 bp = *bpp = bio_doread(vp, blkno, size, cred, 0); 655 656 /* 657 * For each of the read-ahead blocks, start a read, if necessary. 658 */ 659 for (i = 0; i < nrablks; i++) { 660 /* If it's in the cache, just go on to next one. */ 661 if (incore(vp, rablks[i])) 662 continue; 663 664 /* Get a buffer for the read-ahead block */ 665 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC); 666 } 667 668 /* Otherwise, we had to start a read for it; wait until it's valid. */ 669 return (biowait(bp)); 670 } 671 672 /* 673 * Read with single-block read-ahead. Defined in Bach (p.55), but 674 * implemented as a call to breadn(). 675 * XXX for compatibility with old file systems. 676 */ 677 int 678 breada(struct vnode *vp, daddr_t blkno, int size, daddr_t rablkno, 679 int rabsize, struct ucred *cred, struct buf **bpp) 680 { 681 682 return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp)); 683 } 684 685 /* 686 * Block write. Described in Bach (p.56) 687 */ 688 int 689 bwrite(struct buf *bp) 690 { 691 int rv, sync, wasdelayed, s; 692 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */ 693 struct proc *p = l->l_proc; 694 struct vnode *vp; 695 struct mount *mp; 696 697 KASSERT(ISSET(bp->b_flags, B_BUSY)); 698 699 vp = bp->b_vp; 700 if (vp != NULL) { 701 if (vp->v_type == VBLK) 702 mp = vp->v_specmountpoint; 703 else 704 mp = vp->v_mount; 705 } else { 706 mp = NULL; 707 } 708 709 /* 710 * Remember buffer type, to switch on it later. If the write was 711 * synchronous, but the file system was mounted with MNT_ASYNC, 712 * convert it to a delayed write. 713 * XXX note that this relies on delayed tape writes being converted 714 * to async, not sync writes (which is safe, but ugly). 715 */ 716 sync = !ISSET(bp->b_flags, B_ASYNC); 717 if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) { 718 bdwrite(bp); 719 return (0); 720 } 721 722 /* 723 * Collect statistics on synchronous and asynchronous writes. 724 * Writes to block devices are charged to their associated 725 * filesystem (if any). 726 */ 727 if (mp != NULL) { 728 if (sync) 729 mp->mnt_stat.f_syncwrites++; 730 else 731 mp->mnt_stat.f_asyncwrites++; 732 } 733 734 s = splbio(); 735 simple_lock(&bp->b_interlock); 736 737 wasdelayed = ISSET(bp->b_flags, B_DELWRI); 738 739 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI)); 740 741 /* 742 * Pay for the I/O operation and make sure the buf is on the correct 743 * vnode queue. 744 */ 745 if (wasdelayed) 746 reassignbuf(bp, bp->b_vp); 747 else 748 p->p_stats->p_ru.ru_oublock++; 749 750 /* Initiate disk write. Make sure the appropriate party is charged. */ 751 V_INCR_NUMOUTPUT(bp->b_vp); 752 simple_unlock(&bp->b_interlock); 753 splx(s); 754 755 if (sync) 756 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL); 757 else 758 BIO_SETPRIO(bp, BPRIO_TIMELIMITED); 759 760 VOP_STRATEGY(vp, bp); 761 762 if (sync) { 763 /* If I/O was synchronous, wait for it to complete. */ 764 rv = biowait(bp); 765 766 /* Release the buffer. */ 767 brelse(bp); 768 769 return (rv); 770 } else { 771 return (0); 772 } 773 } 774 775 int 776 vn_bwrite(void *v) 777 { 778 struct vop_bwrite_args *ap = v; 779 780 return (bwrite(ap->a_bp)); 781 } 782 783 /* 784 * Delayed write. 785 * 786 * The buffer is marked dirty, but is not queued for I/O. 787 * This routine should be used when the buffer is expected 788 * to be modified again soon, typically a small write that 789 * partially fills a buffer. 790 * 791 * NB: magnetic tapes cannot be delayed; they must be 792 * written in the order that the writes are requested. 793 * 794 * Described in Leffler, et al. (pp. 208-213). 795 */ 796 void 797 bdwrite(struct buf *bp) 798 { 799 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */ 800 struct proc *p = l->l_proc; 801 const struct bdevsw *bdev; 802 int s; 803 804 /* If this is a tape block, write the block now. */ 805 bdev = bdevsw_lookup(bp->b_dev); 806 if (bdev != NULL && bdev->d_type == D_TAPE) { 807 bawrite(bp); 808 return; 809 } 810 811 /* 812 * If the block hasn't been seen before: 813 * (1) Mark it as having been seen, 814 * (2) Charge for the write, 815 * (3) Make sure it's on its vnode's correct block list. 816 */ 817 s = splbio(); 818 simple_lock(&bp->b_interlock); 819 820 KASSERT(ISSET(bp->b_flags, B_BUSY)); 821 822 if (!ISSET(bp->b_flags, B_DELWRI)) { 823 SET(bp->b_flags, B_DELWRI); 824 p->p_stats->p_ru.ru_oublock++; 825 reassignbuf(bp, bp->b_vp); 826 } 827 828 /* Otherwise, the "write" is done, so mark and release the buffer. */ 829 CLR(bp->b_flags, B_DONE); 830 simple_unlock(&bp->b_interlock); 831 splx(s); 832 833 brelse(bp); 834 } 835 836 /* 837 * Asynchronous block write; just an asynchronous bwrite(). 838 */ 839 void 840 bawrite(struct buf *bp) 841 { 842 int s; 843 844 s = splbio(); 845 simple_lock(&bp->b_interlock); 846 847 KASSERT(ISSET(bp->b_flags, B_BUSY)); 848 849 SET(bp->b_flags, B_ASYNC); 850 simple_unlock(&bp->b_interlock); 851 splx(s); 852 VOP_BWRITE(bp); 853 } 854 855 /* 856 * Same as first half of bdwrite, mark buffer dirty, but do not release it. 857 * Call at splbio() and with the buffer interlock locked. 858 * Note: called only from biodone() through ffs softdep's bioops.io_complete() 859 */ 860 void 861 bdirty(struct buf *bp) 862 { 863 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */ 864 struct proc *p = l->l_proc; 865 866 LOCK_ASSERT(simple_lock_held(&bp->b_interlock)); 867 KASSERT(ISSET(bp->b_flags, B_BUSY)); 868 869 CLR(bp->b_flags, B_AGE); 870 871 if (!ISSET(bp->b_flags, B_DELWRI)) { 872 SET(bp->b_flags, B_DELWRI); 873 p->p_stats->p_ru.ru_oublock++; 874 reassignbuf(bp, bp->b_vp); 875 } 876 } 877 878 /* 879 * Release a buffer on to the free lists. 880 * Described in Bach (p. 46). 881 */ 882 void 883 brelse(struct buf *bp) 884 { 885 struct bqueue *bufq; 886 int s; 887 888 /* Block disk interrupts. */ 889 s = splbio(); 890 simple_lock(&bqueue_slock); 891 simple_lock(&bp->b_interlock); 892 893 KASSERT(ISSET(bp->b_flags, B_BUSY)); 894 KASSERT(!ISSET(bp->b_flags, B_CALL)); 895 896 /* Wake up any processes waiting for any buffer to become free. */ 897 if (needbuffer) { 898 needbuffer = 0; 899 wakeup(&needbuffer); 900 } 901 902 /* Wake up any proceeses waiting for _this_ buffer to become free. */ 903 if (ISSET(bp->b_flags, B_WANTED)) { 904 CLR(bp->b_flags, B_WANTED|B_AGE); 905 wakeup(bp); 906 } 907 908 /* 909 * Determine which queue the buffer should be on, then put it there. 910 */ 911 912 /* If it's locked, don't report an error; try again later. */ 913 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR)) 914 CLR(bp->b_flags, B_ERROR); 915 916 /* If it's not cacheable, or an error, mark it invalid. */ 917 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR))) 918 SET(bp->b_flags, B_INVAL); 919 920 if (ISSET(bp->b_flags, B_VFLUSH)) { 921 /* 922 * This is a delayed write buffer that was just flushed to 923 * disk. It is still on the LRU queue. If it's become 924 * invalid, then we need to move it to a different queue; 925 * otherwise leave it in its current position. 926 */ 927 CLR(bp->b_flags, B_VFLUSH); 928 if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE)) { 929 KDASSERT(!debug_verify_freelist || checkfreelist(bp, &bufqueues[BQ_LRU])); 930 goto already_queued; 931 } else { 932 bremfree(bp); 933 } 934 } 935 936 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_AGE])); 937 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LRU])); 938 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LOCKED])); 939 940 if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) { 941 /* 942 * If it's invalid or empty, dissociate it from its vnode 943 * and put on the head of the appropriate queue. 944 */ 945 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 946 (*bioops.io_deallocate)(bp); 947 CLR(bp->b_flags, B_DONE|B_DELWRI); 948 if (bp->b_vp) { 949 reassignbuf(bp, bp->b_vp); 950 brelvp(bp); 951 } 952 if (bp->b_bufsize <= 0) 953 /* no data */ 954 goto already_queued; 955 else 956 /* invalid data */ 957 bufq = &bufqueues[BQ_AGE]; 958 binsheadfree(bp, bufq); 959 } else { 960 /* 961 * It has valid data. Put it on the end of the appropriate 962 * queue, so that it'll stick around for as long as possible. 963 * If buf is AGE, but has dependencies, must put it on last 964 * bufqueue to be scanned, ie LRU. This protects against the 965 * livelock where BQ_AGE only has buffers with dependencies, 966 * and we thus never get to the dependent buffers in BQ_LRU. 967 */ 968 if (ISSET(bp->b_flags, B_LOCKED)) 969 /* locked in core */ 970 bufq = &bufqueues[BQ_LOCKED]; 971 else if (!ISSET(bp->b_flags, B_AGE)) 972 /* valid data */ 973 bufq = &bufqueues[BQ_LRU]; 974 else { 975 /* stale but valid data */ 976 int has_deps; 977 978 if (LIST_FIRST(&bp->b_dep) != NULL && 979 bioops.io_countdeps) 980 has_deps = (*bioops.io_countdeps)(bp, 0); 981 else 982 has_deps = 0; 983 bufq = has_deps ? &bufqueues[BQ_LRU] : 984 &bufqueues[BQ_AGE]; 985 } 986 binstailfree(bp, bufq); 987 } 988 989 already_queued: 990 /* Unlock the buffer. */ 991 CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE); 992 SET(bp->b_flags, B_CACHE); 993 994 /* Allow disk interrupts. */ 995 simple_unlock(&bp->b_interlock); 996 simple_unlock(&bqueue_slock); 997 if (bp->b_bufsize <= 0) { 998 #ifdef DEBUG 999 memset((char *)bp, 0, sizeof(*bp)); 1000 #endif 1001 pool_put(&bufpool, bp); 1002 } 1003 splx(s); 1004 } 1005 1006 /* 1007 * Determine if a block is in the cache. 1008 * Just look on what would be its hash chain. If it's there, return 1009 * a pointer to it, unless it's marked invalid. If it's marked invalid, 1010 * we normally don't return the buffer, unless the caller explicitly 1011 * wants us to. 1012 */ 1013 struct buf * 1014 incore(struct vnode *vp, daddr_t blkno) 1015 { 1016 struct buf *bp; 1017 1018 /* Search hash chain */ 1019 LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) { 1020 if (bp->b_lblkno == blkno && bp->b_vp == vp && 1021 !ISSET(bp->b_flags, B_INVAL)) 1022 return (bp); 1023 } 1024 1025 return (NULL); 1026 } 1027 1028 /* 1029 * Get a block of requested size that is associated with 1030 * a given vnode and block offset. If it is found in the 1031 * block cache, mark it as having been found, make it busy 1032 * and return it. Otherwise, return an empty block of the 1033 * correct size. It is up to the caller to insure that the 1034 * cached blocks be of the correct size. 1035 */ 1036 struct buf * 1037 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1038 { 1039 struct buf *bp; 1040 int s, err; 1041 int preserve; 1042 1043 start: 1044 s = splbio(); 1045 simple_lock(&bqueue_slock); 1046 bp = incore(vp, blkno); 1047 if (bp != NULL) { 1048 simple_lock(&bp->b_interlock); 1049 if (ISSET(bp->b_flags, B_BUSY)) { 1050 simple_unlock(&bqueue_slock); 1051 if (curproc == uvm.pagedaemon_proc) { 1052 simple_unlock(&bp->b_interlock); 1053 splx(s); 1054 return NULL; 1055 } 1056 SET(bp->b_flags, B_WANTED); 1057 err = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK, 1058 "getblk", slptimeo, &bp->b_interlock); 1059 splx(s); 1060 if (err) 1061 return (NULL); 1062 goto start; 1063 } 1064 #ifdef DIAGNOSTIC 1065 if (ISSET(bp->b_flags, B_DONE|B_DELWRI) && 1066 bp->b_bcount < size && vp->v_type != VBLK) 1067 panic("getblk: block size invariant failed"); 1068 #endif 1069 SET(bp->b_flags, B_BUSY); 1070 bremfree(bp); 1071 preserve = 1; 1072 } else { 1073 if ((bp = getnewbuf(slpflag, slptimeo, 0)) == NULL) { 1074 simple_unlock(&bqueue_slock); 1075 splx(s); 1076 goto start; 1077 } 1078 1079 binshash(bp, BUFHASH(vp, blkno)); 1080 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno; 1081 bgetvp(vp, bp); 1082 preserve = 0; 1083 } 1084 simple_unlock(&bp->b_interlock); 1085 simple_unlock(&bqueue_slock); 1086 splx(s); 1087 /* 1088 * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes) 1089 * if we re-size buffers here. 1090 */ 1091 if (ISSET(bp->b_flags, B_LOCKED)) { 1092 KASSERT(bp->b_bufsize >= size); 1093 } else { 1094 allocbuf(bp, size, preserve); 1095 } 1096 BIO_SETPRIO(bp, BPRIO_DEFAULT); 1097 return (bp); 1098 } 1099 1100 /* 1101 * Get an empty, disassociated buffer of given size. 1102 */ 1103 struct buf * 1104 geteblk(int size) 1105 { 1106 struct buf *bp; 1107 int s; 1108 1109 s = splbio(); 1110 simple_lock(&bqueue_slock); 1111 while ((bp = getnewbuf(0, 0, 0)) == 0) 1112 ; 1113 1114 SET(bp->b_flags, B_INVAL); 1115 binshash(bp, &invalhash); 1116 simple_unlock(&bqueue_slock); 1117 simple_unlock(&bp->b_interlock); 1118 splx(s); 1119 BIO_SETPRIO(bp, BPRIO_DEFAULT); 1120 allocbuf(bp, size, 0); 1121 return (bp); 1122 } 1123 1124 /* 1125 * Expand or contract the actual memory allocated to a buffer. 1126 * 1127 * If the buffer shrinks, data is lost, so it's up to the 1128 * caller to have written it out *first*; this routine will not 1129 * start a write. If the buffer grows, it's the callers 1130 * responsibility to fill out the buffer's additional contents. 1131 */ 1132 void 1133 allocbuf(struct buf *bp, int size, int preserve) 1134 { 1135 vsize_t oldsize, desired_size; 1136 caddr_t addr; 1137 int s, delta; 1138 1139 desired_size = buf_roundsize(size); 1140 if (desired_size > MAXBSIZE) 1141 printf("allocbuf: buffer larger than MAXBSIZE requested"); 1142 1143 bp->b_bcount = size; 1144 1145 oldsize = bp->b_bufsize; 1146 if (oldsize == desired_size) 1147 return; 1148 1149 /* 1150 * If we want a buffer of a different size, re-allocate the 1151 * buffer's memory; copy old content only if needed. 1152 */ 1153 addr = buf_malloc(desired_size); 1154 if (preserve) 1155 memcpy(addr, bp->b_data, MIN(oldsize,desired_size)); 1156 if (bp->b_data != NULL) 1157 buf_mrelease(bp->b_data, oldsize); 1158 bp->b_data = addr; 1159 bp->b_bufsize = desired_size; 1160 1161 /* 1162 * Update overall buffer memory counter (protected by bqueue_slock) 1163 */ 1164 delta = (long)desired_size - (long)oldsize; 1165 1166 s = splbio(); 1167 simple_lock(&bqueue_slock); 1168 if ((bufmem += delta) > bufmem_hiwater) { 1169 /* 1170 * Need to trim overall memory usage. 1171 */ 1172 while (buf_canrelease()) { 1173 if (buf_trim() == 0) 1174 break; 1175 } 1176 } 1177 1178 simple_unlock(&bqueue_slock); 1179 splx(s); 1180 } 1181 1182 /* 1183 * Find a buffer which is available for use. 1184 * Select something from a free list. 1185 * Preference is to AGE list, then LRU list. 1186 * 1187 * Called at splbio and with buffer queues locked. 1188 * Return buffer locked. 1189 */ 1190 struct buf * 1191 getnewbuf(int slpflag, int slptimeo, int from_bufq) 1192 { 1193 struct buf *bp; 1194 1195 start: 1196 LOCK_ASSERT(simple_lock_held(&bqueue_slock)); 1197 1198 /* 1199 * Get a new buffer from the pool; but use NOWAIT because 1200 * we have the buffer queues locked. 1201 */ 1202 if (!from_bufq && buf_lotsfree() && 1203 (bp = pool_get(&bufpool, PR_NOWAIT)) != NULL) { 1204 memset((char *)bp, 0, sizeof(*bp)); 1205 BUF_INIT(bp); 1206 bp->b_dev = NODEV; 1207 bp->b_vnbufs.le_next = NOLIST; 1208 bp->b_flags = B_BUSY; 1209 simple_lock(&bp->b_interlock); 1210 #if defined(DIAGNOSTIC) 1211 bp->b_freelistindex = -1; 1212 #endif /* defined(DIAGNOSTIC) */ 1213 return (bp); 1214 } 1215 1216 if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue)) != NULL || 1217 (bp = TAILQ_FIRST(&bufqueues[BQ_LRU].bq_queue)) != NULL) { 1218 simple_lock(&bp->b_interlock); 1219 bremfree(bp); 1220 } else { 1221 /* 1222 * XXX: !from_bufq should be removed. 1223 */ 1224 if (!from_bufq || curproc != uvm.pagedaemon_proc) { 1225 /* wait for a free buffer of any kind */ 1226 needbuffer = 1; 1227 ltsleep(&needbuffer, slpflag|(PRIBIO + 1), 1228 "getnewbuf", slptimeo, &bqueue_slock); 1229 } 1230 return (NULL); 1231 } 1232 1233 #ifdef DIAGNOSTIC 1234 if (bp->b_bufsize <= 0) 1235 panic("buffer %p: on queue but empty", bp); 1236 #endif 1237 1238 if (ISSET(bp->b_flags, B_VFLUSH)) { 1239 /* 1240 * This is a delayed write buffer being flushed to disk. Make 1241 * sure it gets aged out of the queue when it's finished, and 1242 * leave it off the LRU queue. 1243 */ 1244 CLR(bp->b_flags, B_VFLUSH); 1245 SET(bp->b_flags, B_AGE); 1246 simple_unlock(&bp->b_interlock); 1247 goto start; 1248 } 1249 1250 /* Buffer is no longer on free lists. */ 1251 SET(bp->b_flags, B_BUSY); 1252 1253 /* 1254 * If buffer was a delayed write, start it and return NULL 1255 * (since we might sleep while starting the write). 1256 */ 1257 if (ISSET(bp->b_flags, B_DELWRI)) { 1258 /* 1259 * This buffer has gone through the LRU, so make sure it gets 1260 * reused ASAP. 1261 */ 1262 SET(bp->b_flags, B_AGE); 1263 simple_unlock(&bp->b_interlock); 1264 simple_unlock(&bqueue_slock); 1265 bawrite(bp); 1266 simple_lock(&bqueue_slock); 1267 return (NULL); 1268 } 1269 1270 /* disassociate us from our vnode, if we had one... */ 1271 if (bp->b_vp) 1272 brelvp(bp); 1273 1274 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 1275 (*bioops.io_deallocate)(bp); 1276 1277 /* clear out various other fields */ 1278 bp->b_flags = B_BUSY; 1279 bp->b_dev = NODEV; 1280 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0; 1281 bp->b_iodone = 0; 1282 bp->b_error = 0; 1283 bp->b_resid = 0; 1284 bp->b_bcount = 0; 1285 1286 bremhash(bp); 1287 return (bp); 1288 } 1289 1290 /* 1291 * Attempt to free an aged buffer off the queues. 1292 * Called at splbio and with queue lock held. 1293 * Returns the amount of buffer memory freed. 1294 */ 1295 static int 1296 buf_trim(void) 1297 { 1298 struct buf *bp; 1299 long size = 0; 1300 1301 /* Instruct getnewbuf() to get buffers off the queues */ 1302 if ((bp = getnewbuf(PCATCH, 1, 1)) == NULL) 1303 return 0; 1304 1305 KASSERT(!ISSET(bp->b_flags, B_WANTED)); 1306 simple_unlock(&bp->b_interlock); 1307 size = bp->b_bufsize; 1308 bufmem -= size; 1309 simple_unlock(&bqueue_slock); 1310 if (size > 0) { 1311 buf_mrelease(bp->b_data, size); 1312 bp->b_bcount = bp->b_bufsize = 0; 1313 } 1314 /* brelse() will return the buffer to the global buffer pool */ 1315 brelse(bp); 1316 simple_lock(&bqueue_slock); 1317 return size; 1318 } 1319 1320 int 1321 buf_drain(int n) 1322 { 1323 int s, size = 0, sz; 1324 1325 s = splbio(); 1326 simple_lock(&bqueue_slock); 1327 1328 while (size < n && bufmem > bufmem_lowater) { 1329 sz = buf_trim(); 1330 if (sz <= 0) 1331 break; 1332 size += sz; 1333 } 1334 1335 simple_unlock(&bqueue_slock); 1336 splx(s); 1337 return size; 1338 } 1339 1340 /* 1341 * Wait for operations on the buffer to complete. 1342 * When they do, extract and return the I/O's error value. 1343 */ 1344 int 1345 biowait(struct buf *bp) 1346 { 1347 int s, error; 1348 1349 s = splbio(); 1350 simple_lock(&bp->b_interlock); 1351 while (!ISSET(bp->b_flags, B_DONE | B_DELWRI)) 1352 ltsleep(bp, PRIBIO + 1, "biowait", 0, &bp->b_interlock); 1353 1354 /* check errors. */ 1355 if (ISSET(bp->b_flags, B_ERROR)) 1356 error = bp->b_error ? bp->b_error : EIO; 1357 else 1358 error = 0; 1359 1360 simple_unlock(&bp->b_interlock); 1361 splx(s); 1362 return (error); 1363 } 1364 1365 /* 1366 * Mark I/O complete on a buffer. 1367 * 1368 * If a callback has been requested, e.g. the pageout 1369 * daemon, do so. Otherwise, awaken waiting processes. 1370 * 1371 * [ Leffler, et al., says on p.247: 1372 * "This routine wakes up the blocked process, frees the buffer 1373 * for an asynchronous write, or, for a request by the pagedaemon 1374 * process, invokes a procedure specified in the buffer structure" ] 1375 * 1376 * In real life, the pagedaemon (or other system processes) wants 1377 * to do async stuff to, and doesn't want the buffer brelse()'d. 1378 * (for swap pager, that puts swap buffers on the free lists (!!!), 1379 * for the vn device, that puts malloc'd buffers on the free lists!) 1380 */ 1381 void 1382 biodone(struct buf *bp) 1383 { 1384 int s = splbio(); 1385 1386 simple_lock(&bp->b_interlock); 1387 if (ISSET(bp->b_flags, B_DONE)) 1388 panic("biodone already"); 1389 SET(bp->b_flags, B_DONE); /* note that it's done */ 1390 BIO_SETPRIO(bp, BPRIO_DEFAULT); 1391 1392 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete) 1393 (*bioops.io_complete)(bp); 1394 1395 if (!ISSET(bp->b_flags, B_READ)) /* wake up reader */ 1396 vwakeup(bp); 1397 1398 /* 1399 * If necessary, call out. Unlock the buffer before calling 1400 * iodone() as the buffer isn't valid any more when it return. 1401 */ 1402 if (ISSET(bp->b_flags, B_CALL)) { 1403 CLR(bp->b_flags, B_CALL); /* but note callout done */ 1404 simple_unlock(&bp->b_interlock); 1405 (*bp->b_iodone)(bp); 1406 } else { 1407 if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release */ 1408 simple_unlock(&bp->b_interlock); 1409 brelse(bp); 1410 } else { /* or just wakeup the buffer */ 1411 CLR(bp->b_flags, B_WANTED); 1412 wakeup(bp); 1413 simple_unlock(&bp->b_interlock); 1414 } 1415 } 1416 1417 splx(s); 1418 } 1419 1420 /* 1421 * Return a count of buffers on the "locked" queue. 1422 */ 1423 int 1424 count_lock_queue(void) 1425 { 1426 struct buf *bp; 1427 int n = 0; 1428 1429 simple_lock(&bqueue_slock); 1430 TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED].bq_queue, b_freelist) 1431 n++; 1432 simple_unlock(&bqueue_slock); 1433 return (n); 1434 } 1435 1436 /* 1437 * Wait for all buffers to complete I/O 1438 * Return the number of "stuck" buffers. 1439 */ 1440 int 1441 buf_syncwait(void) 1442 { 1443 struct buf *bp; 1444 int iter, nbusy, nbusy_prev = 0, dcount, s, ihash; 1445 1446 dcount = 10000; 1447 for (iter = 0; iter < 20;) { 1448 s = splbio(); 1449 simple_lock(&bqueue_slock); 1450 nbusy = 0; 1451 for (ihash = 0; ihash < bufhash+1; ihash++) { 1452 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) { 1453 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 1454 nbusy++; 1455 /* 1456 * With soft updates, some buffers that are 1457 * written will be remarked as dirty until other 1458 * buffers are written. 1459 */ 1460 if (bp->b_vp && bp->b_vp->v_mount 1461 && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP) 1462 && (bp->b_flags & B_DELWRI)) { 1463 simple_lock(&bp->b_interlock); 1464 bremfree(bp); 1465 bp->b_flags |= B_BUSY; 1466 nbusy++; 1467 simple_unlock(&bp->b_interlock); 1468 simple_unlock(&bqueue_slock); 1469 bawrite(bp); 1470 if (dcount-- <= 0) { 1471 printf("softdep "); 1472 splx(s); 1473 goto fail; 1474 } 1475 simple_lock(&bqueue_slock); 1476 } 1477 } 1478 } 1479 1480 simple_unlock(&bqueue_slock); 1481 splx(s); 1482 1483 if (nbusy == 0) 1484 break; 1485 if (nbusy_prev == 0) 1486 nbusy_prev = nbusy; 1487 printf("%d ", nbusy); 1488 tsleep(&nbusy, PRIBIO, "bflush", 1489 (iter == 0) ? 1 : hz / 25 * iter); 1490 if (nbusy >= nbusy_prev) /* we didn't flush anything */ 1491 iter++; 1492 else 1493 nbusy_prev = nbusy; 1494 } 1495 1496 if (nbusy) { 1497 fail:; 1498 #if defined(DEBUG) || defined(DEBUG_HALT_BUSY) 1499 printf("giving up\nPrinting vnodes for busy buffers\n"); 1500 s = splbio(); 1501 for (ihash = 0; ihash < bufhash+1; ihash++) { 1502 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) { 1503 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 1504 vprint(NULL, bp->b_vp); 1505 } 1506 } 1507 splx(s); 1508 #endif 1509 } 1510 1511 return nbusy; 1512 } 1513 1514 static void 1515 sysctl_fillbuf(struct buf *i, struct buf_sysctl *o) 1516 { 1517 1518 o->b_flags = i->b_flags; 1519 o->b_error = i->b_error; 1520 o->b_prio = i->b_prio; 1521 o->b_dev = i->b_dev; 1522 o->b_bufsize = i->b_bufsize; 1523 o->b_bcount = i->b_bcount; 1524 o->b_resid = i->b_resid; 1525 o->b_addr = PTRTOUINT64(i->b_un.b_addr); 1526 o->b_blkno = i->b_blkno; 1527 o->b_rawblkno = i->b_rawblkno; 1528 o->b_iodone = PTRTOUINT64(i->b_iodone); 1529 o->b_proc = PTRTOUINT64(i->b_proc); 1530 o->b_vp = PTRTOUINT64(i->b_vp); 1531 o->b_saveaddr = PTRTOUINT64(i->b_saveaddr); 1532 o->b_lblkno = i->b_lblkno; 1533 } 1534 1535 #define KERN_BUFSLOP 20 1536 static int 1537 sysctl_dobuf(SYSCTLFN_ARGS) 1538 { 1539 struct buf *bp; 1540 struct buf_sysctl bs; 1541 char *dp; 1542 u_int i, op, arg; 1543 size_t len, needed, elem_size, out_size; 1544 int error, s, elem_count; 1545 1546 if (namelen == 1 && name[0] == CTL_QUERY) 1547 return (sysctl_query(SYSCTLFN_CALL(rnode))); 1548 1549 if (namelen != 4) 1550 return (EINVAL); 1551 1552 dp = oldp; 1553 len = (oldp != NULL) ? *oldlenp : 0; 1554 op = name[0]; 1555 arg = name[1]; 1556 elem_size = name[2]; 1557 elem_count = name[3]; 1558 out_size = MIN(sizeof(bs), elem_size); 1559 1560 /* 1561 * at the moment, these are just "placeholders" to make the 1562 * API for retrieving kern.buf data more extensible in the 1563 * future. 1564 * 1565 * XXX kern.buf currently has "netbsd32" issues. hopefully 1566 * these will be resolved at a later point. 1567 */ 1568 if (op != KERN_BUF_ALL || arg != KERN_BUF_ALL || 1569 elem_size < 1 || elem_count < 0) 1570 return (EINVAL); 1571 1572 error = 0; 1573 needed = 0; 1574 s = splbio(); 1575 simple_lock(&bqueue_slock); 1576 for (i = 0; i < BQUEUES; i++) { 1577 TAILQ_FOREACH(bp, &bufqueues[i].bq_queue, b_freelist) { 1578 if (len >= elem_size && elem_count > 0) { 1579 sysctl_fillbuf(bp, &bs); 1580 error = copyout(&bs, dp, out_size); 1581 if (error) 1582 goto cleanup; 1583 dp += elem_size; 1584 len -= elem_size; 1585 } 1586 if (elem_count > 0) { 1587 needed += elem_size; 1588 if (elem_count != INT_MAX) 1589 elem_count--; 1590 } 1591 } 1592 } 1593 cleanup: 1594 simple_unlock(&bqueue_slock); 1595 splx(s); 1596 1597 *oldlenp = needed; 1598 if (oldp == NULL) 1599 *oldlenp += KERN_BUFSLOP * sizeof(struct buf); 1600 1601 return (error); 1602 } 1603 1604 static int 1605 sysctl_bufvm_update(SYSCTLFN_ARGS) 1606 { 1607 int t, error; 1608 struct sysctlnode node; 1609 1610 node = *rnode; 1611 node.sysctl_data = &t; 1612 t = *(int *)rnode->sysctl_data; 1613 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1614 if (error || newp == NULL) 1615 return (error); 1616 1617 if (t < 0) 1618 return EINVAL; 1619 if (rnode->sysctl_data == &bufcache) { 1620 if (t > 100) 1621 return (EINVAL); 1622 bufcache = t; 1623 buf_setwm(); 1624 } else if (rnode->sysctl_data == &bufmem_lowater) { 1625 if (bufmem_hiwater - t < 16) 1626 return (EINVAL); 1627 bufmem_lowater = t; 1628 } else if (rnode->sysctl_data == &bufmem_hiwater) { 1629 if (t - bufmem_lowater < 16) 1630 return (EINVAL); 1631 bufmem_hiwater = t; 1632 } else 1633 return (EINVAL); 1634 1635 /* Drain until below new high water mark */ 1636 while ((t = bufmem - bufmem_hiwater) >= 0) { 1637 if (buf_drain(t / (2 * 1024)) <= 0) 1638 break; 1639 } 1640 1641 return 0; 1642 } 1643 1644 SYSCTL_SETUP(sysctl_kern_buf_setup, "sysctl kern.buf subtree setup") 1645 { 1646 1647 sysctl_createv(clog, 0, NULL, NULL, 1648 CTLFLAG_PERMANENT, 1649 CTLTYPE_NODE, "kern", NULL, 1650 NULL, 0, NULL, 0, 1651 CTL_KERN, CTL_EOL); 1652 sysctl_createv(clog, 0, NULL, NULL, 1653 CTLFLAG_PERMANENT, 1654 CTLTYPE_NODE, "buf", 1655 SYSCTL_DESCR("Kernel buffer cache information"), 1656 sysctl_dobuf, 0, NULL, 0, 1657 CTL_KERN, KERN_BUF, CTL_EOL); 1658 } 1659 1660 SYSCTL_SETUP(sysctl_vm_buf_setup, "sysctl vm.buf* subtree setup") 1661 { 1662 1663 sysctl_createv(clog, 0, NULL, NULL, 1664 CTLFLAG_PERMANENT, 1665 CTLTYPE_NODE, "vm", NULL, 1666 NULL, 0, NULL, 0, 1667 CTL_VM, CTL_EOL); 1668 1669 sysctl_createv(clog, 0, NULL, NULL, 1670 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1671 CTLTYPE_INT, "bufcache", 1672 SYSCTL_DESCR("Percentage of physical memory to use for " 1673 "buffer cache"), 1674 sysctl_bufvm_update, 0, &bufcache, 0, 1675 CTL_VM, CTL_CREATE, CTL_EOL); 1676 sysctl_createv(clog, 0, NULL, NULL, 1677 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 1678 CTLTYPE_INT, "bufmem", 1679 SYSCTL_DESCR("Amount of kernel memory used by buffer " 1680 "cache"), 1681 NULL, 0, &bufmem, 0, 1682 CTL_VM, CTL_CREATE, CTL_EOL); 1683 sysctl_createv(clog, 0, NULL, NULL, 1684 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1685 CTLTYPE_INT, "bufmem_lowater", 1686 SYSCTL_DESCR("Minimum amount of kernel memory to " 1687 "reserve for buffer cache"), 1688 sysctl_bufvm_update, 0, &bufmem_lowater, 0, 1689 CTL_VM, CTL_CREATE, CTL_EOL); 1690 sysctl_createv(clog, 0, NULL, NULL, 1691 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1692 CTLTYPE_INT, "bufmem_hiwater", 1693 SYSCTL_DESCR("Maximum amount of kernel memory to use " 1694 "for buffer cache"), 1695 sysctl_bufvm_update, 0, &bufmem_hiwater, 0, 1696 CTL_VM, CTL_CREATE, CTL_EOL); 1697 } 1698 1699 #ifdef DEBUG 1700 /* 1701 * Print out statistics on the current allocation of the buffer pool. 1702 * Can be enabled to print out on every ``sync'' by setting "syncprt" 1703 * in vfs_syscalls.c using sysctl. 1704 */ 1705 void 1706 vfs_bufstats(void) 1707 { 1708 int s, i, j, count; 1709 struct buf *bp; 1710 struct bqueue *dp; 1711 int counts[(MAXBSIZE / PAGE_SIZE) + 1]; 1712 static const char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE" }; 1713 1714 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) { 1715 count = 0; 1716 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++) 1717 counts[j] = 0; 1718 s = splbio(); 1719 TAILQ_FOREACH(bp, &dp->bq_queue, b_freelist) { 1720 counts[bp->b_bufsize/PAGE_SIZE]++; 1721 count++; 1722 } 1723 splx(s); 1724 printf("%s: total-%d", bname[i], count); 1725 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++) 1726 if (counts[j] != 0) 1727 printf(", %d-%d", j * PAGE_SIZE, counts[j]); 1728 printf("\n"); 1729 } 1730 } 1731 #endif /* DEBUG */ 1732 1733 /* ------------------------------ */ 1734 1735 POOL_INIT(bufiopool, sizeof(struct buf), 0, 0, 0, "biopl", NULL); 1736 1737 static struct buf * 1738 getiobuf1(int prflags) 1739 { 1740 struct buf *bp; 1741 int s; 1742 1743 s = splbio(); 1744 bp = pool_get(&bufiopool, prflags); 1745 splx(s); 1746 if (bp != NULL) { 1747 BUF_INIT(bp); 1748 } 1749 return bp; 1750 } 1751 1752 struct buf * 1753 getiobuf(void) 1754 { 1755 1756 return getiobuf1(PR_WAITOK); 1757 } 1758 1759 struct buf * 1760 getiobuf_nowait(void) 1761 { 1762 1763 return getiobuf1(PR_NOWAIT); 1764 } 1765 1766 void 1767 putiobuf(struct buf *bp) 1768 { 1769 int s; 1770 1771 s = splbio(); 1772 pool_put(&bufiopool, bp); 1773 splx(s); 1774 } 1775 1776 /* 1777 * nestiobuf_iodone: b_iodone callback for nested buffers. 1778 */ 1779 1780 static void 1781 nestiobuf_iodone(struct buf *bp) 1782 { 1783 struct buf *mbp = bp->b_private; 1784 int error; 1785 int donebytes = bp->b_bcount; /* XXX ignore b_resid */ 1786 1787 KASSERT(bp->b_bufsize == bp->b_bcount); 1788 KASSERT(mbp != bp); 1789 if ((bp->b_flags & B_ERROR) != 0) { 1790 error = bp->b_error; 1791 } else { 1792 KASSERT(bp->b_resid == 0); 1793 error = 0; 1794 } 1795 putiobuf(bp); 1796 nestiobuf_done(mbp, donebytes, error); 1797 } 1798 1799 /* 1800 * nestiobuf_setup: setup a "nested" buffer. 1801 * 1802 * => 'mbp' is a "master" buffer which is being divided into sub pieces. 1803 * => 'bp' should be a buffer allocated by getiobuf or getiobuf_nowait. 1804 * => 'offset' is a byte offset in the master buffer. 1805 * => 'size' is a size in bytes of this nested buffer. 1806 */ 1807 1808 void 1809 nestiobuf_setup(struct buf *mbp, struct buf *bp, int offset, size_t size) 1810 { 1811 const int b_read = mbp->b_flags & B_READ; 1812 struct vnode *vp = mbp->b_vp; 1813 1814 KASSERT(mbp->b_bcount >= offset + size); 1815 bp->b_vp = vp; 1816 bp->b_flags = B_BUSY | B_CALL | B_ASYNC | b_read; 1817 bp->b_iodone = nestiobuf_iodone; 1818 bp->b_data = mbp->b_data + offset; 1819 bp->b_resid = bp->b_bcount = size; 1820 #if defined(DIAGNOSTIC) 1821 bp->b_bufsize = bp->b_bcount; 1822 #endif /* defined(DIAGNOSTIC) */ 1823 bp->b_private = mbp; 1824 BIO_COPYPRIO(bp, mbp); 1825 if (!b_read && vp != NULL) { 1826 int s; 1827 1828 s = splbio(); 1829 V_INCR_NUMOUTPUT(vp); 1830 splx(s); 1831 } 1832 } 1833 1834 /* 1835 * nestiobuf_done: propagate completion to the master buffer. 1836 * 1837 * => 'donebytes' specifies how many bytes in the 'mbp' is completed. 1838 * => 'error' is an errno(2) that 'donebytes' has been completed with. 1839 */ 1840 1841 void 1842 nestiobuf_done(struct buf *mbp, int donebytes, int error) 1843 { 1844 int s; 1845 1846 if (donebytes == 0) { 1847 return; 1848 } 1849 s = splbio(); 1850 KASSERT(mbp->b_resid >= donebytes); 1851 if (error) { 1852 mbp->b_flags |= B_ERROR; 1853 mbp->b_error = error; 1854 } 1855 mbp->b_resid -= donebytes; 1856 if (mbp->b_resid == 0) { 1857 if ((mbp->b_flags & B_ERROR) != 0) { 1858 mbp->b_resid = mbp->b_bcount; /* be conservative */ 1859 } 1860 biodone(mbp); 1861 } 1862 splx(s); 1863 } 1864