1 /* $NetBSD: vfs_bio.c,v 1.161 2006/05/25 14:27:28 yamt Exp $ */ 2 3 /*- 4 * Copyright (c) 1982, 1986, 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94 37 */ 38 39 /*- 40 * Copyright (c) 1994 Christopher G. Demetriou 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. All advertising materials mentioning features or use of this software 51 * must display the following acknowledgement: 52 * This product includes software developed by the University of 53 * California, Berkeley and its contributors. 54 * 4. Neither the name of the University nor the names of its contributors 55 * may be used to endorse or promote products derived from this software 56 * without specific prior written permission. 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 68 * SUCH DAMAGE. 69 * 70 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94 71 */ 72 73 /* 74 * Some references: 75 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986) 76 * Leffler, et al.: The Design and Implementation of the 4.3BSD 77 * UNIX Operating System (Addison Welley, 1989) 78 */ 79 80 #include "fs_ffs.h" 81 #include "opt_bufcache.h" 82 #include "opt_softdep.h" 83 84 #include <sys/cdefs.h> 85 __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.161 2006/05/25 14:27:28 yamt Exp $"); 86 87 #include <sys/param.h> 88 #include <sys/systm.h> 89 #include <sys/kernel.h> 90 #include <sys/proc.h> 91 #include <sys/buf.h> 92 #include <sys/vnode.h> 93 #include <sys/mount.h> 94 #include <sys/malloc.h> 95 #include <sys/resourcevar.h> 96 #include <sys/sysctl.h> 97 #include <sys/conf.h> 98 #include <sys/kauth.h> 99 100 #include <uvm/uvm.h> 101 102 #include <miscfs/specfs/specdev.h> 103 104 #ifndef BUFPAGES 105 # define BUFPAGES 0 106 #endif 107 108 #ifdef BUFCACHE 109 # if (BUFCACHE < 5) || (BUFCACHE > 95) 110 # error BUFCACHE is not between 5 and 95 111 # endif 112 #else 113 # define BUFCACHE 15 114 #endif 115 116 u_int nbuf; /* XXX - for softdep_lockedbufs */ 117 u_int bufpages = BUFPAGES; /* optional hardwired count */ 118 u_int bufcache = BUFCACHE; /* max % of RAM to use for buffer cache */ 119 120 /* Function prototypes */ 121 struct bqueue; 122 123 static void buf_setwm(void); 124 static int buf_trim(void); 125 static void *bufpool_page_alloc(struct pool *, int); 126 static void bufpool_page_free(struct pool *, void *); 127 static inline struct buf *bio_doread(struct vnode *, daddr_t, int, 128 kauth_cred_t, int); 129 static int buf_lotsfree(void); 130 static int buf_canrelease(void); 131 static inline u_long buf_mempoolidx(u_long); 132 static inline u_long buf_roundsize(u_long); 133 static inline caddr_t buf_malloc(size_t); 134 static void buf_mrelease(caddr_t, size_t); 135 static inline void binsheadfree(struct buf *, struct bqueue *); 136 static inline void binstailfree(struct buf *, struct bqueue *); 137 int count_lock_queue(void); /* XXX */ 138 #ifdef DEBUG 139 static int checkfreelist(struct buf *, struct bqueue *); 140 #endif 141 142 /* 143 * Definitions for the buffer hash lists. 144 */ 145 #define BUFHASH(dvp, lbn) \ 146 (&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash]) 147 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash; 148 u_long bufhash; 149 #if !defined(SOFTDEP) || !defined(FFS) 150 struct bio_ops bioops; /* I/O operation notification */ 151 #endif 152 153 /* 154 * Insq/Remq for the buffer hash lists. 155 */ 156 #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash) 157 #define bremhash(bp) LIST_REMOVE(bp, b_hash) 158 159 /* 160 * Definitions for the buffer free lists. 161 */ 162 #define BQUEUES 3 /* number of free buffer queues */ 163 164 #define BQ_LOCKED 0 /* super-blocks &c */ 165 #define BQ_LRU 1 /* lru, useful buffers */ 166 #define BQ_AGE 2 /* rubbish */ 167 168 struct bqueue { 169 TAILQ_HEAD(, buf) bq_queue; 170 uint64_t bq_bytes; 171 } bufqueues[BQUEUES]; 172 int needbuffer; 173 174 /* 175 * Buffer queue lock. 176 * Take this lock first if also taking some buffer's b_interlock. 177 */ 178 struct simplelock bqueue_slock = SIMPLELOCK_INITIALIZER; 179 180 /* 181 * Buffer pool for I/O buffers. 182 */ 183 static POOL_INIT(bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", 184 &pool_allocator_nointr); 185 186 187 /* XXX - somewhat gross.. */ 188 #if MAXBSIZE == 0x2000 189 #define NMEMPOOLS 5 190 #elif MAXBSIZE == 0x4000 191 #define NMEMPOOLS 6 192 #elif MAXBSIZE == 0x8000 193 #define NMEMPOOLS 7 194 #else 195 #define NMEMPOOLS 8 196 #endif 197 198 #define MEMPOOL_INDEX_OFFSET 9 /* smallest pool is 512 bytes */ 199 #if (1 << (NMEMPOOLS + MEMPOOL_INDEX_OFFSET - 1)) != MAXBSIZE 200 #error update vfs_bio buffer memory parameters 201 #endif 202 203 /* Buffer memory pools */ 204 static struct pool bmempools[NMEMPOOLS]; 205 206 struct vm_map *buf_map; 207 208 /* 209 * Buffer memory pool allocator. 210 */ 211 static void * 212 bufpool_page_alloc(struct pool *pp, int flags) 213 { 214 215 return (void *)uvm_km_alloc(buf_map, 216 MAXBSIZE, MAXBSIZE, 217 ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK) 218 | UVM_KMF_WIRED); 219 } 220 221 static void 222 bufpool_page_free(struct pool *pp, void *v) 223 { 224 225 uvm_km_free(buf_map, (vaddr_t)v, MAXBSIZE, UVM_KMF_WIRED); 226 } 227 228 static struct pool_allocator bufmempool_allocator = { 229 bufpool_page_alloc, bufpool_page_free, MAXBSIZE, 230 }; 231 232 /* Buffer memory management variables */ 233 u_long bufmem_valimit; 234 u_long bufmem_hiwater; 235 u_long bufmem_lowater; 236 u_long bufmem; 237 238 /* 239 * MD code can call this to set a hard limit on the amount 240 * of virtual memory used by the buffer cache. 241 */ 242 int 243 buf_setvalimit(vsize_t sz) 244 { 245 246 /* We need to accommodate at least NMEMPOOLS of MAXBSIZE each */ 247 if (sz < NMEMPOOLS * MAXBSIZE) 248 return EINVAL; 249 250 bufmem_valimit = sz; 251 return 0; 252 } 253 254 static void 255 buf_setwm(void) 256 { 257 258 bufmem_hiwater = buf_memcalc(); 259 /* lowater is approx. 2% of memory (with bufcache = 15) */ 260 #define BUFMEM_WMSHIFT 3 261 #define BUFMEM_HIWMMIN (64 * 1024 << BUFMEM_WMSHIFT) 262 if (bufmem_hiwater < BUFMEM_HIWMMIN) 263 /* Ensure a reasonable minimum value */ 264 bufmem_hiwater = BUFMEM_HIWMMIN; 265 bufmem_lowater = bufmem_hiwater >> BUFMEM_WMSHIFT; 266 } 267 268 #ifdef DEBUG 269 int debug_verify_freelist = 0; 270 static int 271 checkfreelist(struct buf *bp, struct bqueue *dp) 272 { 273 struct buf *b; 274 275 TAILQ_FOREACH(b, &dp->bq_queue, b_freelist) { 276 if (b == bp) 277 return 1; 278 } 279 return 0; 280 } 281 #endif 282 283 /* 284 * Insq/Remq for the buffer hash lists. 285 * Call with buffer queue locked. 286 */ 287 static inline void 288 binsheadfree(struct buf *bp, struct bqueue *dp) 289 { 290 291 KASSERT(bp->b_freelistindex == -1); 292 TAILQ_INSERT_HEAD(&dp->bq_queue, bp, b_freelist); 293 dp->bq_bytes += bp->b_bufsize; 294 bp->b_freelistindex = dp - bufqueues; 295 } 296 297 static inline void 298 binstailfree(struct buf *bp, struct bqueue *dp) 299 { 300 301 KASSERT(bp->b_freelistindex == -1); 302 TAILQ_INSERT_TAIL(&dp->bq_queue, bp, b_freelist); 303 dp->bq_bytes += bp->b_bufsize; 304 bp->b_freelistindex = dp - bufqueues; 305 } 306 307 void 308 bremfree(struct buf *bp) 309 { 310 struct bqueue *dp; 311 int bqidx = bp->b_freelistindex; 312 313 LOCK_ASSERT(simple_lock_held(&bqueue_slock)); 314 315 KASSERT(bqidx != -1); 316 dp = &bufqueues[bqidx]; 317 KDASSERT(!debug_verify_freelist || checkfreelist(bp, dp)); 318 KASSERT(dp->bq_bytes >= bp->b_bufsize); 319 TAILQ_REMOVE(&dp->bq_queue, bp, b_freelist); 320 dp->bq_bytes -= bp->b_bufsize; 321 #if defined(DIAGNOSTIC) 322 bp->b_freelistindex = -1; 323 #endif /* defined(DIAGNOSTIC) */ 324 } 325 326 u_long 327 buf_memcalc(void) 328 { 329 u_long n; 330 331 /* 332 * Determine the upper bound of memory to use for buffers. 333 * 334 * - If bufpages is specified, use that as the number 335 * pages. 336 * 337 * - Otherwise, use bufcache as the percentage of 338 * physical memory. 339 */ 340 if (bufpages != 0) { 341 n = bufpages; 342 } else { 343 if (bufcache < 5) { 344 printf("forcing bufcache %d -> 5", bufcache); 345 bufcache = 5; 346 } 347 if (bufcache > 95) { 348 printf("forcing bufcache %d -> 95", bufcache); 349 bufcache = 95; 350 } 351 n = physmem / 100 * bufcache; 352 } 353 354 n <<= PAGE_SHIFT; 355 if (bufmem_valimit != 0 && n > bufmem_valimit) 356 n = bufmem_valimit; 357 358 return (n); 359 } 360 361 /* 362 * Initialize buffers and hash links for buffers. 363 */ 364 void 365 bufinit(void) 366 { 367 struct bqueue *dp; 368 int use_std; 369 u_int i; 370 371 /* 372 * Initialize buffer cache memory parameters. 373 */ 374 bufmem = 0; 375 buf_setwm(); 376 377 if (bufmem_valimit != 0) { 378 vaddr_t minaddr = 0, maxaddr; 379 buf_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 380 bufmem_valimit, 0, FALSE, 0); 381 if (buf_map == NULL) 382 panic("bufinit: cannot allocate submap"); 383 } else 384 buf_map = kernel_map; 385 386 /* On "small" machines use small pool page sizes where possible */ 387 use_std = (physmem < atop(16*1024*1024)); 388 389 /* 390 * Also use them on systems that can map the pool pages using 391 * a direct-mapped segment. 392 */ 393 #ifdef PMAP_MAP_POOLPAGE 394 use_std = 1; 395 #endif 396 397 bufmempool_allocator.pa_backingmap = buf_map; 398 for (i = 0; i < NMEMPOOLS; i++) { 399 struct pool_allocator *pa; 400 struct pool *pp = &bmempools[i]; 401 u_int size = 1 << (i + MEMPOOL_INDEX_OFFSET); 402 char *name = malloc(8, M_TEMP, M_WAITOK); 403 snprintf(name, 8, "buf%dk", 1 << i); 404 pa = (size <= PAGE_SIZE && use_std) 405 ? &pool_allocator_nointr 406 : &bufmempool_allocator; 407 pool_init(pp, size, 0, 0, 0, name, pa); 408 pool_setlowat(pp, 1); 409 pool_sethiwat(pp, 1); 410 } 411 412 /* Initialize the buffer queues */ 413 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) { 414 TAILQ_INIT(&dp->bq_queue); 415 dp->bq_bytes = 0; 416 } 417 418 /* 419 * Estimate hash table size based on the amount of memory we 420 * intend to use for the buffer cache. The average buffer 421 * size is dependent on our clients (i.e. filesystems). 422 * 423 * For now, use an empirical 3K per buffer. 424 */ 425 nbuf = (bufmem_hiwater / 1024) / 3; 426 bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash); 427 } 428 429 static int 430 buf_lotsfree(void) 431 { 432 int try, thresh; 433 struct lwp *l = curlwp; 434 435 /* Always allocate if doing copy on write */ 436 if (l->l_flag & L_COWINPROGRESS) 437 return 1; 438 439 /* Always allocate if less than the low water mark. */ 440 if (bufmem < bufmem_lowater) 441 return 1; 442 443 /* Never allocate if greater than the high water mark. */ 444 if (bufmem > bufmem_hiwater) 445 return 0; 446 447 /* If there's anything on the AGE list, it should be eaten. */ 448 if (TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue) != NULL) 449 return 0; 450 451 /* 452 * The probabily of getting a new allocation is inversely 453 * proportional to the current size of the cache, using 454 * a granularity of 16 steps. 455 */ 456 try = random() & 0x0000000fL; 457 458 /* Don't use "16 * bufmem" here to avoid a 32-bit overflow. */ 459 thresh = (bufmem - bufmem_lowater) / 460 ((bufmem_hiwater - bufmem_lowater) / 16); 461 462 if (try >= thresh) 463 return 1; 464 465 /* Otherwise don't allocate. */ 466 return 0; 467 } 468 469 /* 470 * Return estimate of bytes we think need to be 471 * released to help resolve low memory conditions. 472 * 473 * => called at splbio. 474 * => called with bqueue_slock held. 475 */ 476 static int 477 buf_canrelease(void) 478 { 479 int pagedemand, ninvalid = 0; 480 481 LOCK_ASSERT(simple_lock_held(&bqueue_slock)); 482 483 if (bufmem < bufmem_lowater) 484 return 0; 485 486 if (bufmem > bufmem_hiwater) 487 return bufmem - bufmem_hiwater; 488 489 ninvalid += bufqueues[BQ_AGE].bq_bytes; 490 491 pagedemand = uvmexp.freetarg - uvmexp.free; 492 if (pagedemand < 0) 493 return ninvalid; 494 return MAX(ninvalid, MIN(2 * MAXBSIZE, 495 MIN((bufmem - bufmem_lowater) / 16, pagedemand * PAGE_SIZE))); 496 } 497 498 /* 499 * Buffer memory allocation helper functions 500 */ 501 static inline u_long 502 buf_mempoolidx(u_long size) 503 { 504 u_int n = 0; 505 506 size -= 1; 507 size >>= MEMPOOL_INDEX_OFFSET; 508 while (size) { 509 size >>= 1; 510 n += 1; 511 } 512 if (n >= NMEMPOOLS) 513 panic("buf mem pool index %d", n); 514 return n; 515 } 516 517 static inline u_long 518 buf_roundsize(u_long size) 519 { 520 /* Round up to nearest power of 2 */ 521 return (1 << (buf_mempoolidx(size) + MEMPOOL_INDEX_OFFSET)); 522 } 523 524 static inline caddr_t 525 buf_malloc(size_t size) 526 { 527 u_int n = buf_mempoolidx(size); 528 caddr_t addr; 529 int s; 530 531 while (1) { 532 addr = pool_get(&bmempools[n], PR_NOWAIT); 533 if (addr != NULL) 534 break; 535 536 /* No memory, see if we can free some. If so, try again */ 537 if (buf_drain(1) > 0) 538 continue; 539 540 /* Wait for buffers to arrive on the LRU queue */ 541 s = splbio(); 542 simple_lock(&bqueue_slock); 543 needbuffer = 1; 544 ltsleep(&needbuffer, PNORELOCK | (PRIBIO + 1), 545 "buf_malloc", 0, &bqueue_slock); 546 splx(s); 547 } 548 549 return addr; 550 } 551 552 static void 553 buf_mrelease(caddr_t addr, size_t size) 554 { 555 556 pool_put(&bmempools[buf_mempoolidx(size)], addr); 557 } 558 559 /* 560 * bread()/breadn() helper. 561 */ 562 static inline struct buf * 563 bio_doread(struct vnode *vp, daddr_t blkno, int size, kauth_cred_t cred, 564 int async) 565 { 566 struct buf *bp; 567 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */ 568 struct proc *p = l->l_proc; 569 struct mount *mp; 570 571 bp = getblk(vp, blkno, size, 0, 0); 572 573 #ifdef DIAGNOSTIC 574 if (bp == NULL) { 575 panic("bio_doread: no such buf"); 576 } 577 #endif 578 579 /* 580 * If buffer does not have data valid, start a read. 581 * Note that if buffer is B_INVAL, getblk() won't return it. 582 * Therefore, it's valid if its I/O has completed or been delayed. 583 */ 584 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) { 585 /* Start I/O for the buffer. */ 586 SET(bp->b_flags, B_READ | async); 587 if (async) 588 BIO_SETPRIO(bp, BPRIO_TIMELIMITED); 589 else 590 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL); 591 VOP_STRATEGY(vp, bp); 592 593 /* Pay for the read. */ 594 p->p_stats->p_ru.ru_inblock++; 595 } else if (async) { 596 brelse(bp); 597 } 598 599 if (vp->v_type == VBLK) 600 mp = vp->v_specmountpoint; 601 else 602 mp = vp->v_mount; 603 604 /* 605 * Collect statistics on synchronous and asynchronous reads. 606 * Reads from block devices are charged to their associated 607 * filesystem (if any). 608 */ 609 if (mp != NULL) { 610 if (async == 0) 611 mp->mnt_stat.f_syncreads++; 612 else 613 mp->mnt_stat.f_asyncreads++; 614 } 615 616 return (bp); 617 } 618 619 /* 620 * Read a disk block. 621 * This algorithm described in Bach (p.54). 622 */ 623 int 624 bread(struct vnode *vp, daddr_t blkno, int size, kauth_cred_t cred, 625 struct buf **bpp) 626 { 627 struct buf *bp; 628 629 /* Get buffer for block. */ 630 bp = *bpp = bio_doread(vp, blkno, size, cred, 0); 631 632 /* Wait for the read to complete, and return result. */ 633 return (biowait(bp)); 634 } 635 636 /* 637 * Read-ahead multiple disk blocks. The first is sync, the rest async. 638 * Trivial modification to the breada algorithm presented in Bach (p.55). 639 */ 640 int 641 breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablks, 642 int *rasizes, int nrablks, kauth_cred_t cred, struct buf **bpp) 643 { 644 struct buf *bp; 645 int i; 646 647 bp = *bpp = bio_doread(vp, blkno, size, cred, 0); 648 649 /* 650 * For each of the read-ahead blocks, start a read, if necessary. 651 */ 652 for (i = 0; i < nrablks; i++) { 653 /* If it's in the cache, just go on to next one. */ 654 if (incore(vp, rablks[i])) 655 continue; 656 657 /* Get a buffer for the read-ahead block */ 658 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC); 659 } 660 661 /* Otherwise, we had to start a read for it; wait until it's valid. */ 662 return (biowait(bp)); 663 } 664 665 /* 666 * Read with single-block read-ahead. Defined in Bach (p.55), but 667 * implemented as a call to breadn(). 668 * XXX for compatibility with old file systems. 669 */ 670 int 671 breada(struct vnode *vp, daddr_t blkno, int size, daddr_t rablkno, 672 int rabsize, kauth_cred_t cred, struct buf **bpp) 673 { 674 675 return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp)); 676 } 677 678 /* 679 * Block write. Described in Bach (p.56) 680 */ 681 int 682 bwrite(struct buf *bp) 683 { 684 int rv, sync, wasdelayed, s; 685 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */ 686 struct proc *p = l->l_proc; 687 struct vnode *vp; 688 struct mount *mp; 689 690 KASSERT(ISSET(bp->b_flags, B_BUSY)); 691 692 vp = bp->b_vp; 693 if (vp != NULL) { 694 if (vp->v_type == VBLK) 695 mp = vp->v_specmountpoint; 696 else 697 mp = vp->v_mount; 698 } else { 699 mp = NULL; 700 } 701 702 /* 703 * Remember buffer type, to switch on it later. If the write was 704 * synchronous, but the file system was mounted with MNT_ASYNC, 705 * convert it to a delayed write. 706 * XXX note that this relies on delayed tape writes being converted 707 * to async, not sync writes (which is safe, but ugly). 708 */ 709 sync = !ISSET(bp->b_flags, B_ASYNC); 710 if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) { 711 bdwrite(bp); 712 return (0); 713 } 714 715 /* 716 * Collect statistics on synchronous and asynchronous writes. 717 * Writes to block devices are charged to their associated 718 * filesystem (if any). 719 */ 720 if (mp != NULL) { 721 if (sync) 722 mp->mnt_stat.f_syncwrites++; 723 else 724 mp->mnt_stat.f_asyncwrites++; 725 } 726 727 s = splbio(); 728 simple_lock(&bp->b_interlock); 729 730 wasdelayed = ISSET(bp->b_flags, B_DELWRI); 731 732 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI)); 733 734 /* 735 * Pay for the I/O operation and make sure the buf is on the correct 736 * vnode queue. 737 */ 738 if (wasdelayed) 739 reassignbuf(bp, bp->b_vp); 740 else 741 p->p_stats->p_ru.ru_oublock++; 742 743 /* Initiate disk write. Make sure the appropriate party is charged. */ 744 V_INCR_NUMOUTPUT(bp->b_vp); 745 simple_unlock(&bp->b_interlock); 746 splx(s); 747 748 if (sync) 749 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL); 750 else 751 BIO_SETPRIO(bp, BPRIO_TIMELIMITED); 752 753 VOP_STRATEGY(vp, bp); 754 755 if (sync) { 756 /* If I/O was synchronous, wait for it to complete. */ 757 rv = biowait(bp); 758 759 /* Release the buffer. */ 760 brelse(bp); 761 762 return (rv); 763 } else { 764 return (0); 765 } 766 } 767 768 int 769 vn_bwrite(void *v) 770 { 771 struct vop_bwrite_args *ap = v; 772 773 return (bwrite(ap->a_bp)); 774 } 775 776 /* 777 * Delayed write. 778 * 779 * The buffer is marked dirty, but is not queued for I/O. 780 * This routine should be used when the buffer is expected 781 * to be modified again soon, typically a small write that 782 * partially fills a buffer. 783 * 784 * NB: magnetic tapes cannot be delayed; they must be 785 * written in the order that the writes are requested. 786 * 787 * Described in Leffler, et al. (pp. 208-213). 788 */ 789 void 790 bdwrite(struct buf *bp) 791 { 792 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */ 793 struct proc *p = l->l_proc; 794 const struct bdevsw *bdev; 795 int s; 796 797 /* If this is a tape block, write the block now. */ 798 bdev = bdevsw_lookup(bp->b_dev); 799 if (bdev != NULL && bdev->d_type == D_TAPE) { 800 bawrite(bp); 801 return; 802 } 803 804 /* 805 * If the block hasn't been seen before: 806 * (1) Mark it as having been seen, 807 * (2) Charge for the write, 808 * (3) Make sure it's on its vnode's correct block list. 809 */ 810 s = splbio(); 811 simple_lock(&bp->b_interlock); 812 813 KASSERT(ISSET(bp->b_flags, B_BUSY)); 814 815 if (!ISSET(bp->b_flags, B_DELWRI)) { 816 SET(bp->b_flags, B_DELWRI); 817 p->p_stats->p_ru.ru_oublock++; 818 reassignbuf(bp, bp->b_vp); 819 } 820 821 /* Otherwise, the "write" is done, so mark and release the buffer. */ 822 CLR(bp->b_flags, B_DONE); 823 simple_unlock(&bp->b_interlock); 824 splx(s); 825 826 brelse(bp); 827 } 828 829 /* 830 * Asynchronous block write; just an asynchronous bwrite(). 831 */ 832 void 833 bawrite(struct buf *bp) 834 { 835 int s; 836 837 s = splbio(); 838 simple_lock(&bp->b_interlock); 839 840 KASSERT(ISSET(bp->b_flags, B_BUSY)); 841 842 SET(bp->b_flags, B_ASYNC); 843 simple_unlock(&bp->b_interlock); 844 splx(s); 845 VOP_BWRITE(bp); 846 } 847 848 /* 849 * Same as first half of bdwrite, mark buffer dirty, but do not release it. 850 * Call at splbio() and with the buffer interlock locked. 851 * Note: called only from biodone() through ffs softdep's bioops.io_complete() 852 */ 853 void 854 bdirty(struct buf *bp) 855 { 856 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */ 857 struct proc *p = l->l_proc; 858 859 LOCK_ASSERT(simple_lock_held(&bp->b_interlock)); 860 KASSERT(ISSET(bp->b_flags, B_BUSY)); 861 862 CLR(bp->b_flags, B_AGE); 863 864 if (!ISSET(bp->b_flags, B_DELWRI)) { 865 SET(bp->b_flags, B_DELWRI); 866 p->p_stats->p_ru.ru_oublock++; 867 reassignbuf(bp, bp->b_vp); 868 } 869 } 870 871 /* 872 * Release a buffer on to the free lists. 873 * Described in Bach (p. 46). 874 */ 875 void 876 brelse(struct buf *bp) 877 { 878 struct bqueue *bufq; 879 int s; 880 881 /* Block disk interrupts. */ 882 s = splbio(); 883 simple_lock(&bqueue_slock); 884 simple_lock(&bp->b_interlock); 885 886 KASSERT(ISSET(bp->b_flags, B_BUSY)); 887 KASSERT(!ISSET(bp->b_flags, B_CALL)); 888 889 /* Wake up any processes waiting for any buffer to become free. */ 890 if (needbuffer) { 891 needbuffer = 0; 892 wakeup(&needbuffer); 893 } 894 895 /* Wake up any proceeses waiting for _this_ buffer to become free. */ 896 if (ISSET(bp->b_flags, B_WANTED)) { 897 CLR(bp->b_flags, B_WANTED|B_AGE); 898 wakeup(bp); 899 } 900 901 /* 902 * Determine which queue the buffer should be on, then put it there. 903 */ 904 905 /* If it's locked, don't report an error; try again later. */ 906 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR)) 907 CLR(bp->b_flags, B_ERROR); 908 909 /* If it's not cacheable, or an error, mark it invalid. */ 910 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR))) 911 SET(bp->b_flags, B_INVAL); 912 913 if (ISSET(bp->b_flags, B_VFLUSH)) { 914 /* 915 * This is a delayed write buffer that was just flushed to 916 * disk. It is still on the LRU queue. If it's become 917 * invalid, then we need to move it to a different queue; 918 * otherwise leave it in its current position. 919 */ 920 CLR(bp->b_flags, B_VFLUSH); 921 if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE)) { 922 KDASSERT(!debug_verify_freelist || checkfreelist(bp, &bufqueues[BQ_LRU])); 923 goto already_queued; 924 } else { 925 bremfree(bp); 926 } 927 } 928 929 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_AGE])); 930 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LRU])); 931 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LOCKED])); 932 933 if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) { 934 /* 935 * If it's invalid or empty, dissociate it from its vnode 936 * and put on the head of the appropriate queue. 937 */ 938 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 939 (*bioops.io_deallocate)(bp); 940 CLR(bp->b_flags, B_DONE|B_DELWRI); 941 if (bp->b_vp) { 942 reassignbuf(bp, bp->b_vp); 943 brelvp(bp); 944 } 945 if (bp->b_bufsize <= 0) 946 /* no data */ 947 goto already_queued; 948 else 949 /* invalid data */ 950 bufq = &bufqueues[BQ_AGE]; 951 binsheadfree(bp, bufq); 952 } else { 953 /* 954 * It has valid data. Put it on the end of the appropriate 955 * queue, so that it'll stick around for as long as possible. 956 * If buf is AGE, but has dependencies, must put it on last 957 * bufqueue to be scanned, ie LRU. This protects against the 958 * livelock where BQ_AGE only has buffers with dependencies, 959 * and we thus never get to the dependent buffers in BQ_LRU. 960 */ 961 if (ISSET(bp->b_flags, B_LOCKED)) 962 /* locked in core */ 963 bufq = &bufqueues[BQ_LOCKED]; 964 else if (!ISSET(bp->b_flags, B_AGE)) 965 /* valid data */ 966 bufq = &bufqueues[BQ_LRU]; 967 else { 968 /* stale but valid data */ 969 int has_deps; 970 971 if (LIST_FIRST(&bp->b_dep) != NULL && 972 bioops.io_countdeps) 973 has_deps = (*bioops.io_countdeps)(bp, 0); 974 else 975 has_deps = 0; 976 bufq = has_deps ? &bufqueues[BQ_LRU] : 977 &bufqueues[BQ_AGE]; 978 } 979 binstailfree(bp, bufq); 980 } 981 982 already_queued: 983 /* Unlock the buffer. */ 984 CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE); 985 SET(bp->b_flags, B_CACHE); 986 987 /* Allow disk interrupts. */ 988 simple_unlock(&bp->b_interlock); 989 simple_unlock(&bqueue_slock); 990 splx(s); 991 if (bp->b_bufsize <= 0) { 992 #ifdef DEBUG 993 memset((char *)bp, 0, sizeof(*bp)); 994 #endif 995 pool_put(&bufpool, bp); 996 } 997 } 998 999 /* 1000 * Determine if a block is in the cache. 1001 * Just look on what would be its hash chain. If it's there, return 1002 * a pointer to it, unless it's marked invalid. If it's marked invalid, 1003 * we normally don't return the buffer, unless the caller explicitly 1004 * wants us to. 1005 */ 1006 struct buf * 1007 incore(struct vnode *vp, daddr_t blkno) 1008 { 1009 struct buf *bp; 1010 1011 /* Search hash chain */ 1012 LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) { 1013 if (bp->b_lblkno == blkno && bp->b_vp == vp && 1014 !ISSET(bp->b_flags, B_INVAL)) 1015 return (bp); 1016 } 1017 1018 return (NULL); 1019 } 1020 1021 /* 1022 * Get a block of requested size that is associated with 1023 * a given vnode and block offset. If it is found in the 1024 * block cache, mark it as having been found, make it busy 1025 * and return it. Otherwise, return an empty block of the 1026 * correct size. It is up to the caller to insure that the 1027 * cached blocks be of the correct size. 1028 */ 1029 struct buf * 1030 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1031 { 1032 struct buf *bp; 1033 int s, err; 1034 int preserve; 1035 1036 start: 1037 s = splbio(); 1038 simple_lock(&bqueue_slock); 1039 bp = incore(vp, blkno); 1040 if (bp != NULL) { 1041 simple_lock(&bp->b_interlock); 1042 if (ISSET(bp->b_flags, B_BUSY)) { 1043 simple_unlock(&bqueue_slock); 1044 if (curproc == uvm.pagedaemon_proc) { 1045 simple_unlock(&bp->b_interlock); 1046 splx(s); 1047 return NULL; 1048 } 1049 SET(bp->b_flags, B_WANTED); 1050 err = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK, 1051 "getblk", slptimeo, &bp->b_interlock); 1052 splx(s); 1053 if (err) 1054 return (NULL); 1055 goto start; 1056 } 1057 #ifdef DIAGNOSTIC 1058 if (ISSET(bp->b_flags, B_DONE|B_DELWRI) && 1059 bp->b_bcount < size && vp->v_type != VBLK) 1060 panic("getblk: block size invariant failed"); 1061 #endif 1062 SET(bp->b_flags, B_BUSY); 1063 bremfree(bp); 1064 preserve = 1; 1065 } else { 1066 if ((bp = getnewbuf(slpflag, slptimeo, 0)) == NULL) { 1067 simple_unlock(&bqueue_slock); 1068 splx(s); 1069 goto start; 1070 } 1071 1072 binshash(bp, BUFHASH(vp, blkno)); 1073 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno; 1074 bgetvp(vp, bp); 1075 preserve = 0; 1076 } 1077 simple_unlock(&bp->b_interlock); 1078 simple_unlock(&bqueue_slock); 1079 splx(s); 1080 /* 1081 * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes) 1082 * if we re-size buffers here. 1083 */ 1084 if (ISSET(bp->b_flags, B_LOCKED)) { 1085 KASSERT(bp->b_bufsize >= size); 1086 } else { 1087 allocbuf(bp, size, preserve); 1088 } 1089 BIO_SETPRIO(bp, BPRIO_DEFAULT); 1090 return (bp); 1091 } 1092 1093 /* 1094 * Get an empty, disassociated buffer of given size. 1095 */ 1096 struct buf * 1097 geteblk(int size) 1098 { 1099 struct buf *bp; 1100 int s; 1101 1102 s = splbio(); 1103 simple_lock(&bqueue_slock); 1104 while ((bp = getnewbuf(0, 0, 0)) == 0) 1105 ; 1106 1107 SET(bp->b_flags, B_INVAL); 1108 binshash(bp, &invalhash); 1109 simple_unlock(&bqueue_slock); 1110 simple_unlock(&bp->b_interlock); 1111 splx(s); 1112 BIO_SETPRIO(bp, BPRIO_DEFAULT); 1113 allocbuf(bp, size, 0); 1114 return (bp); 1115 } 1116 1117 /* 1118 * Expand or contract the actual memory allocated to a buffer. 1119 * 1120 * If the buffer shrinks, data is lost, so it's up to the 1121 * caller to have written it out *first*; this routine will not 1122 * start a write. If the buffer grows, it's the callers 1123 * responsibility to fill out the buffer's additional contents. 1124 */ 1125 void 1126 allocbuf(struct buf *bp, int size, int preserve) 1127 { 1128 vsize_t oldsize, desired_size; 1129 caddr_t addr; 1130 int s, delta; 1131 1132 desired_size = buf_roundsize(size); 1133 if (desired_size > MAXBSIZE) 1134 printf("allocbuf: buffer larger than MAXBSIZE requested"); 1135 1136 bp->b_bcount = size; 1137 1138 oldsize = bp->b_bufsize; 1139 if (oldsize == desired_size) 1140 return; 1141 1142 /* 1143 * If we want a buffer of a different size, re-allocate the 1144 * buffer's memory; copy old content only if needed. 1145 */ 1146 addr = buf_malloc(desired_size); 1147 if (preserve) 1148 memcpy(addr, bp->b_data, MIN(oldsize,desired_size)); 1149 if (bp->b_data != NULL) 1150 buf_mrelease(bp->b_data, oldsize); 1151 bp->b_data = addr; 1152 bp->b_bufsize = desired_size; 1153 1154 /* 1155 * Update overall buffer memory counter (protected by bqueue_slock) 1156 */ 1157 delta = (long)desired_size - (long)oldsize; 1158 1159 s = splbio(); 1160 simple_lock(&bqueue_slock); 1161 if ((bufmem += delta) > bufmem_hiwater) { 1162 /* 1163 * Need to trim overall memory usage. 1164 */ 1165 while (buf_canrelease()) { 1166 if (curcpu()->ci_schedstate.spc_flags & 1167 SPCF_SHOULDYIELD) { 1168 simple_unlock(&bqueue_slock); 1169 splx(s); 1170 preempt(1); 1171 s = splbio(); 1172 simple_lock(&bqueue_slock); 1173 } 1174 1175 if (buf_trim() == 0) 1176 break; 1177 } 1178 } 1179 1180 simple_unlock(&bqueue_slock); 1181 splx(s); 1182 } 1183 1184 /* 1185 * Find a buffer which is available for use. 1186 * Select something from a free list. 1187 * Preference is to AGE list, then LRU list. 1188 * 1189 * Called at splbio and with buffer queues locked. 1190 * Return buffer locked. 1191 */ 1192 struct buf * 1193 getnewbuf(int slpflag, int slptimeo, int from_bufq) 1194 { 1195 struct buf *bp; 1196 1197 start: 1198 LOCK_ASSERT(simple_lock_held(&bqueue_slock)); 1199 1200 /* 1201 * Get a new buffer from the pool; but use NOWAIT because 1202 * we have the buffer queues locked. 1203 */ 1204 if (!from_bufq && buf_lotsfree() && 1205 (bp = pool_get(&bufpool, PR_NOWAIT)) != NULL) { 1206 memset((char *)bp, 0, sizeof(*bp)); 1207 BUF_INIT(bp); 1208 bp->b_dev = NODEV; 1209 bp->b_vnbufs.le_next = NOLIST; 1210 bp->b_flags = B_BUSY; 1211 simple_lock(&bp->b_interlock); 1212 #if defined(DIAGNOSTIC) 1213 bp->b_freelistindex = -1; 1214 #endif /* defined(DIAGNOSTIC) */ 1215 return (bp); 1216 } 1217 1218 if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue)) != NULL || 1219 (bp = TAILQ_FIRST(&bufqueues[BQ_LRU].bq_queue)) != NULL) { 1220 simple_lock(&bp->b_interlock); 1221 bremfree(bp); 1222 } else { 1223 /* 1224 * XXX: !from_bufq should be removed. 1225 */ 1226 if (!from_bufq || curproc != uvm.pagedaemon_proc) { 1227 /* wait for a free buffer of any kind */ 1228 needbuffer = 1; 1229 ltsleep(&needbuffer, slpflag|(PRIBIO + 1), 1230 "getnewbuf", slptimeo, &bqueue_slock); 1231 } 1232 return (NULL); 1233 } 1234 1235 #ifdef DIAGNOSTIC 1236 if (bp->b_bufsize <= 0) 1237 panic("buffer %p: on queue but empty", bp); 1238 #endif 1239 1240 if (ISSET(bp->b_flags, B_VFLUSH)) { 1241 /* 1242 * This is a delayed write buffer being flushed to disk. Make 1243 * sure it gets aged out of the queue when it's finished, and 1244 * leave it off the LRU queue. 1245 */ 1246 CLR(bp->b_flags, B_VFLUSH); 1247 SET(bp->b_flags, B_AGE); 1248 simple_unlock(&bp->b_interlock); 1249 goto start; 1250 } 1251 1252 /* Buffer is no longer on free lists. */ 1253 SET(bp->b_flags, B_BUSY); 1254 1255 /* 1256 * If buffer was a delayed write, start it and return NULL 1257 * (since we might sleep while starting the write). 1258 */ 1259 if (ISSET(bp->b_flags, B_DELWRI)) { 1260 /* 1261 * This buffer has gone through the LRU, so make sure it gets 1262 * reused ASAP. 1263 */ 1264 SET(bp->b_flags, B_AGE); 1265 simple_unlock(&bp->b_interlock); 1266 simple_unlock(&bqueue_slock); 1267 bawrite(bp); 1268 simple_lock(&bqueue_slock); 1269 return (NULL); 1270 } 1271 1272 /* disassociate us from our vnode, if we had one... */ 1273 if (bp->b_vp) 1274 brelvp(bp); 1275 1276 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 1277 (*bioops.io_deallocate)(bp); 1278 1279 /* clear out various other fields */ 1280 bp->b_flags = B_BUSY; 1281 bp->b_dev = NODEV; 1282 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0; 1283 bp->b_iodone = 0; 1284 bp->b_error = 0; 1285 bp->b_resid = 0; 1286 bp->b_bcount = 0; 1287 1288 bremhash(bp); 1289 return (bp); 1290 } 1291 1292 /* 1293 * Attempt to free an aged buffer off the queues. 1294 * Called at splbio and with queue lock held. 1295 * Returns the amount of buffer memory freed. 1296 */ 1297 static int 1298 buf_trim(void) 1299 { 1300 struct buf *bp; 1301 long size = 0; 1302 1303 /* Instruct getnewbuf() to get buffers off the queues */ 1304 if ((bp = getnewbuf(PCATCH, 1, 1)) == NULL) 1305 return 0; 1306 1307 KASSERT(!ISSET(bp->b_flags, B_WANTED)); 1308 simple_unlock(&bp->b_interlock); 1309 size = bp->b_bufsize; 1310 bufmem -= size; 1311 simple_unlock(&bqueue_slock); 1312 if (size > 0) { 1313 buf_mrelease(bp->b_data, size); 1314 bp->b_bcount = bp->b_bufsize = 0; 1315 } 1316 /* brelse() will return the buffer to the global buffer pool */ 1317 brelse(bp); 1318 simple_lock(&bqueue_slock); 1319 return size; 1320 } 1321 1322 int 1323 buf_drain(int n) 1324 { 1325 int s, size = 0, sz; 1326 1327 s = splbio(); 1328 simple_lock(&bqueue_slock); 1329 1330 while (size < n && bufmem > bufmem_lowater) { 1331 sz = buf_trim(); 1332 if (sz <= 0) 1333 break; 1334 size += sz; 1335 } 1336 1337 simple_unlock(&bqueue_slock); 1338 splx(s); 1339 return size; 1340 } 1341 1342 /* 1343 * Wait for operations on the buffer to complete. 1344 * When they do, extract and return the I/O's error value. 1345 */ 1346 int 1347 biowait(struct buf *bp) 1348 { 1349 int s, error; 1350 1351 s = splbio(); 1352 simple_lock(&bp->b_interlock); 1353 while (!ISSET(bp->b_flags, B_DONE | B_DELWRI)) 1354 ltsleep(bp, PRIBIO + 1, "biowait", 0, &bp->b_interlock); 1355 1356 /* check errors. */ 1357 if (ISSET(bp->b_flags, B_ERROR)) 1358 error = bp->b_error ? bp->b_error : EIO; 1359 else 1360 error = 0; 1361 1362 simple_unlock(&bp->b_interlock); 1363 splx(s); 1364 return (error); 1365 } 1366 1367 /* 1368 * Mark I/O complete on a buffer. 1369 * 1370 * If a callback has been requested, e.g. the pageout 1371 * daemon, do so. Otherwise, awaken waiting processes. 1372 * 1373 * [ Leffler, et al., says on p.247: 1374 * "This routine wakes up the blocked process, frees the buffer 1375 * for an asynchronous write, or, for a request by the pagedaemon 1376 * process, invokes a procedure specified in the buffer structure" ] 1377 * 1378 * In real life, the pagedaemon (or other system processes) wants 1379 * to do async stuff to, and doesn't want the buffer brelse()'d. 1380 * (for swap pager, that puts swap buffers on the free lists (!!!), 1381 * for the vn device, that puts malloc'd buffers on the free lists!) 1382 */ 1383 void 1384 biodone(struct buf *bp) 1385 { 1386 int s = splbio(); 1387 1388 simple_lock(&bp->b_interlock); 1389 if (ISSET(bp->b_flags, B_DONE)) 1390 panic("biodone already"); 1391 SET(bp->b_flags, B_DONE); /* note that it's done */ 1392 BIO_SETPRIO(bp, BPRIO_DEFAULT); 1393 1394 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete) 1395 (*bioops.io_complete)(bp); 1396 1397 if (!ISSET(bp->b_flags, B_READ)) /* wake up reader */ 1398 vwakeup(bp); 1399 1400 /* 1401 * If necessary, call out. Unlock the buffer before calling 1402 * iodone() as the buffer isn't valid any more when it return. 1403 */ 1404 if (ISSET(bp->b_flags, B_CALL)) { 1405 CLR(bp->b_flags, B_CALL); /* but note callout done */ 1406 simple_unlock(&bp->b_interlock); 1407 (*bp->b_iodone)(bp); 1408 } else { 1409 if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release */ 1410 simple_unlock(&bp->b_interlock); 1411 brelse(bp); 1412 } else { /* or just wakeup the buffer */ 1413 CLR(bp->b_flags, B_WANTED); 1414 wakeup(bp); 1415 simple_unlock(&bp->b_interlock); 1416 } 1417 } 1418 1419 splx(s); 1420 } 1421 1422 /* 1423 * Return a count of buffers on the "locked" queue. 1424 */ 1425 int 1426 count_lock_queue(void) 1427 { 1428 struct buf *bp; 1429 int n = 0; 1430 1431 simple_lock(&bqueue_slock); 1432 TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED].bq_queue, b_freelist) 1433 n++; 1434 simple_unlock(&bqueue_slock); 1435 return (n); 1436 } 1437 1438 /* 1439 * Wait for all buffers to complete I/O 1440 * Return the number of "stuck" buffers. 1441 */ 1442 int 1443 buf_syncwait(void) 1444 { 1445 struct buf *bp; 1446 int iter, nbusy, nbusy_prev = 0, dcount, s, ihash; 1447 1448 dcount = 10000; 1449 for (iter = 0; iter < 20;) { 1450 s = splbio(); 1451 simple_lock(&bqueue_slock); 1452 nbusy = 0; 1453 for (ihash = 0; ihash < bufhash+1; ihash++) { 1454 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) { 1455 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 1456 nbusy++; 1457 /* 1458 * With soft updates, some buffers that are 1459 * written will be remarked as dirty until other 1460 * buffers are written. 1461 */ 1462 if (bp->b_vp && bp->b_vp->v_mount 1463 && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP) 1464 && (bp->b_flags & B_DELWRI)) { 1465 simple_lock(&bp->b_interlock); 1466 bremfree(bp); 1467 bp->b_flags |= B_BUSY; 1468 nbusy++; 1469 simple_unlock(&bp->b_interlock); 1470 simple_unlock(&bqueue_slock); 1471 bawrite(bp); 1472 if (dcount-- <= 0) { 1473 printf("softdep "); 1474 splx(s); 1475 goto fail; 1476 } 1477 simple_lock(&bqueue_slock); 1478 } 1479 } 1480 } 1481 1482 simple_unlock(&bqueue_slock); 1483 splx(s); 1484 1485 if (nbusy == 0) 1486 break; 1487 if (nbusy_prev == 0) 1488 nbusy_prev = nbusy; 1489 printf("%d ", nbusy); 1490 tsleep(&nbusy, PRIBIO, "bflush", 1491 (iter == 0) ? 1 : hz / 25 * iter); 1492 if (nbusy >= nbusy_prev) /* we didn't flush anything */ 1493 iter++; 1494 else 1495 nbusy_prev = nbusy; 1496 } 1497 1498 if (nbusy) { 1499 fail:; 1500 #if defined(DEBUG) || defined(DEBUG_HALT_BUSY) 1501 printf("giving up\nPrinting vnodes for busy buffers\n"); 1502 s = splbio(); 1503 for (ihash = 0; ihash < bufhash+1; ihash++) { 1504 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) { 1505 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 1506 vprint(NULL, bp->b_vp); 1507 } 1508 } 1509 splx(s); 1510 #endif 1511 } 1512 1513 return nbusy; 1514 } 1515 1516 static void 1517 sysctl_fillbuf(struct buf *i, struct buf_sysctl *o) 1518 { 1519 1520 o->b_flags = i->b_flags; 1521 o->b_error = i->b_error; 1522 o->b_prio = i->b_prio; 1523 o->b_dev = i->b_dev; 1524 o->b_bufsize = i->b_bufsize; 1525 o->b_bcount = i->b_bcount; 1526 o->b_resid = i->b_resid; 1527 o->b_addr = PTRTOUINT64(i->b_un.b_addr); 1528 o->b_blkno = i->b_blkno; 1529 o->b_rawblkno = i->b_rawblkno; 1530 o->b_iodone = PTRTOUINT64(i->b_iodone); 1531 o->b_proc = PTRTOUINT64(i->b_proc); 1532 o->b_vp = PTRTOUINT64(i->b_vp); 1533 o->b_saveaddr = PTRTOUINT64(i->b_saveaddr); 1534 o->b_lblkno = i->b_lblkno; 1535 } 1536 1537 #define KERN_BUFSLOP 20 1538 static int 1539 sysctl_dobuf(SYSCTLFN_ARGS) 1540 { 1541 struct buf *bp; 1542 struct buf_sysctl bs; 1543 char *dp; 1544 u_int i, op, arg; 1545 size_t len, needed, elem_size, out_size; 1546 int error, s, elem_count; 1547 1548 if (namelen == 1 && name[0] == CTL_QUERY) 1549 return (sysctl_query(SYSCTLFN_CALL(rnode))); 1550 1551 if (namelen != 4) 1552 return (EINVAL); 1553 1554 dp = oldp; 1555 len = (oldp != NULL) ? *oldlenp : 0; 1556 op = name[0]; 1557 arg = name[1]; 1558 elem_size = name[2]; 1559 elem_count = name[3]; 1560 out_size = MIN(sizeof(bs), elem_size); 1561 1562 /* 1563 * at the moment, these are just "placeholders" to make the 1564 * API for retrieving kern.buf data more extensible in the 1565 * future. 1566 * 1567 * XXX kern.buf currently has "netbsd32" issues. hopefully 1568 * these will be resolved at a later point. 1569 */ 1570 if (op != KERN_BUF_ALL || arg != KERN_BUF_ALL || 1571 elem_size < 1 || elem_count < 0) 1572 return (EINVAL); 1573 1574 error = 0; 1575 needed = 0; 1576 s = splbio(); 1577 simple_lock(&bqueue_slock); 1578 for (i = 0; i < BQUEUES; i++) { 1579 TAILQ_FOREACH(bp, &bufqueues[i].bq_queue, b_freelist) { 1580 if (len >= elem_size && elem_count > 0) { 1581 sysctl_fillbuf(bp, &bs); 1582 error = copyout(&bs, dp, out_size); 1583 if (error) 1584 goto cleanup; 1585 dp += elem_size; 1586 len -= elem_size; 1587 } 1588 if (elem_count > 0) { 1589 needed += elem_size; 1590 if (elem_count != INT_MAX) 1591 elem_count--; 1592 } 1593 } 1594 } 1595 cleanup: 1596 simple_unlock(&bqueue_slock); 1597 splx(s); 1598 1599 *oldlenp = needed; 1600 if (oldp == NULL) 1601 *oldlenp += KERN_BUFSLOP * sizeof(struct buf); 1602 1603 return (error); 1604 } 1605 1606 static int 1607 sysctl_bufvm_update(SYSCTLFN_ARGS) 1608 { 1609 int t, error; 1610 struct sysctlnode node; 1611 1612 node = *rnode; 1613 node.sysctl_data = &t; 1614 t = *(int *)rnode->sysctl_data; 1615 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1616 if (error || newp == NULL) 1617 return (error); 1618 1619 if (t < 0) 1620 return EINVAL; 1621 if (rnode->sysctl_data == &bufcache) { 1622 if (t > 100) 1623 return (EINVAL); 1624 bufcache = t; 1625 buf_setwm(); 1626 } else if (rnode->sysctl_data == &bufmem_lowater) { 1627 if (bufmem_hiwater - t < 16) 1628 return (EINVAL); 1629 bufmem_lowater = t; 1630 } else if (rnode->sysctl_data == &bufmem_hiwater) { 1631 if (t - bufmem_lowater < 16) 1632 return (EINVAL); 1633 bufmem_hiwater = t; 1634 } else 1635 return (EINVAL); 1636 1637 /* Drain until below new high water mark */ 1638 while ((t = bufmem - bufmem_hiwater) >= 0) { 1639 if (buf_drain(t / (2 * 1024)) <= 0) 1640 break; 1641 } 1642 1643 return 0; 1644 } 1645 1646 SYSCTL_SETUP(sysctl_kern_buf_setup, "sysctl kern.buf subtree setup") 1647 { 1648 1649 sysctl_createv(clog, 0, NULL, NULL, 1650 CTLFLAG_PERMANENT, 1651 CTLTYPE_NODE, "kern", NULL, 1652 NULL, 0, NULL, 0, 1653 CTL_KERN, CTL_EOL); 1654 sysctl_createv(clog, 0, NULL, NULL, 1655 CTLFLAG_PERMANENT, 1656 CTLTYPE_NODE, "buf", 1657 SYSCTL_DESCR("Kernel buffer cache information"), 1658 sysctl_dobuf, 0, NULL, 0, 1659 CTL_KERN, KERN_BUF, CTL_EOL); 1660 } 1661 1662 SYSCTL_SETUP(sysctl_vm_buf_setup, "sysctl vm.buf* subtree setup") 1663 { 1664 1665 sysctl_createv(clog, 0, NULL, NULL, 1666 CTLFLAG_PERMANENT, 1667 CTLTYPE_NODE, "vm", NULL, 1668 NULL, 0, NULL, 0, 1669 CTL_VM, CTL_EOL); 1670 1671 sysctl_createv(clog, 0, NULL, NULL, 1672 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1673 CTLTYPE_INT, "bufcache", 1674 SYSCTL_DESCR("Percentage of physical memory to use for " 1675 "buffer cache"), 1676 sysctl_bufvm_update, 0, &bufcache, 0, 1677 CTL_VM, CTL_CREATE, CTL_EOL); 1678 sysctl_createv(clog, 0, NULL, NULL, 1679 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 1680 CTLTYPE_INT, "bufmem", 1681 SYSCTL_DESCR("Amount of kernel memory used by buffer " 1682 "cache"), 1683 NULL, 0, &bufmem, 0, 1684 CTL_VM, CTL_CREATE, CTL_EOL); 1685 sysctl_createv(clog, 0, NULL, NULL, 1686 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1687 CTLTYPE_INT, "bufmem_lowater", 1688 SYSCTL_DESCR("Minimum amount of kernel memory to " 1689 "reserve for buffer cache"), 1690 sysctl_bufvm_update, 0, &bufmem_lowater, 0, 1691 CTL_VM, CTL_CREATE, CTL_EOL); 1692 sysctl_createv(clog, 0, NULL, NULL, 1693 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1694 CTLTYPE_INT, "bufmem_hiwater", 1695 SYSCTL_DESCR("Maximum amount of kernel memory to use " 1696 "for buffer cache"), 1697 sysctl_bufvm_update, 0, &bufmem_hiwater, 0, 1698 CTL_VM, CTL_CREATE, CTL_EOL); 1699 } 1700 1701 #ifdef DEBUG 1702 /* 1703 * Print out statistics on the current allocation of the buffer pool. 1704 * Can be enabled to print out on every ``sync'' by setting "syncprt" 1705 * in vfs_syscalls.c using sysctl. 1706 */ 1707 void 1708 vfs_bufstats(void) 1709 { 1710 int s, i, j, count; 1711 struct buf *bp; 1712 struct bqueue *dp; 1713 int counts[(MAXBSIZE / PAGE_SIZE) + 1]; 1714 static const char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE" }; 1715 1716 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) { 1717 count = 0; 1718 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++) 1719 counts[j] = 0; 1720 s = splbio(); 1721 TAILQ_FOREACH(bp, &dp->bq_queue, b_freelist) { 1722 counts[bp->b_bufsize/PAGE_SIZE]++; 1723 count++; 1724 } 1725 splx(s); 1726 printf("%s: total-%d", bname[i], count); 1727 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++) 1728 if (counts[j] != 0) 1729 printf(", %d-%d", j * PAGE_SIZE, counts[j]); 1730 printf("\n"); 1731 } 1732 } 1733 #endif /* DEBUG */ 1734 1735 /* ------------------------------ */ 1736 1737 static POOL_INIT(bufiopool, sizeof(struct buf), 0, 0, 0, "biopl", NULL); 1738 1739 static struct buf * 1740 getiobuf1(int prflags) 1741 { 1742 struct buf *bp; 1743 int s; 1744 1745 s = splbio(); 1746 bp = pool_get(&bufiopool, prflags); 1747 splx(s); 1748 if (bp != NULL) { 1749 BUF_INIT(bp); 1750 } 1751 return bp; 1752 } 1753 1754 struct buf * 1755 getiobuf(void) 1756 { 1757 1758 return getiobuf1(PR_WAITOK); 1759 } 1760 1761 struct buf * 1762 getiobuf_nowait(void) 1763 { 1764 1765 return getiobuf1(PR_NOWAIT); 1766 } 1767 1768 void 1769 putiobuf(struct buf *bp) 1770 { 1771 int s; 1772 1773 s = splbio(); 1774 pool_put(&bufiopool, bp); 1775 splx(s); 1776 } 1777 1778 /* 1779 * nestiobuf_iodone: b_iodone callback for nested buffers. 1780 */ 1781 1782 static void 1783 nestiobuf_iodone(struct buf *bp) 1784 { 1785 struct buf *mbp = bp->b_private; 1786 int error; 1787 int donebytes; 1788 1789 KASSERT(bp->b_bcount <= bp->b_bufsize); 1790 KASSERT(mbp != bp); 1791 1792 error = 0; 1793 if ((bp->b_flags & B_ERROR) != 0) { 1794 error = EIO; 1795 /* check if an error code was returned */ 1796 if (bp->b_error) 1797 error = bp->b_error; 1798 } else if ((bp->b_bcount < bp->b_bufsize) || (bp->b_resid > 0)) { 1799 /* 1800 * Not all got transfered, raise an error. We have no way to 1801 * propagate these conditions to mbp. 1802 */ 1803 error = EIO; 1804 } 1805 1806 donebytes = bp->b_bufsize; 1807 1808 putiobuf(bp); 1809 nestiobuf_done(mbp, donebytes, error); 1810 } 1811 1812 /* 1813 * nestiobuf_setup: setup a "nested" buffer. 1814 * 1815 * => 'mbp' is a "master" buffer which is being divided into sub pieces. 1816 * => 'bp' should be a buffer allocated by getiobuf or getiobuf_nowait. 1817 * => 'offset' is a byte offset in the master buffer. 1818 * => 'size' is a size in bytes of this nested buffer. 1819 */ 1820 1821 void 1822 nestiobuf_setup(struct buf *mbp, struct buf *bp, int offset, size_t size) 1823 { 1824 const int b_read = mbp->b_flags & B_READ; 1825 struct vnode *vp = mbp->b_vp; 1826 1827 KASSERT(mbp->b_bcount >= offset + size); 1828 bp->b_vp = vp; 1829 bp->b_flags = B_BUSY | B_CALL | B_ASYNC | b_read; 1830 bp->b_iodone = nestiobuf_iodone; 1831 bp->b_data = mbp->b_data + offset; 1832 bp->b_resid = bp->b_bcount = size; 1833 bp->b_bufsize = bp->b_bcount; 1834 bp->b_private = mbp; 1835 BIO_COPYPRIO(bp, mbp); 1836 if (!b_read && vp != NULL) { 1837 int s; 1838 1839 s = splbio(); 1840 V_INCR_NUMOUTPUT(vp); 1841 splx(s); 1842 } 1843 } 1844 1845 /* 1846 * nestiobuf_done: propagate completion to the master buffer. 1847 * 1848 * => 'donebytes' specifies how many bytes in the 'mbp' is completed. 1849 * => 'error' is an errno(2) that 'donebytes' has been completed with. 1850 */ 1851 1852 void 1853 nestiobuf_done(struct buf *mbp, int donebytes, int error) 1854 { 1855 int s; 1856 1857 if (donebytes == 0) { 1858 return; 1859 } 1860 s = splbio(); 1861 KASSERT(mbp->b_resid >= donebytes); 1862 if (error) { 1863 mbp->b_flags |= B_ERROR; 1864 mbp->b_error = error; 1865 } 1866 mbp->b_resid -= donebytes; 1867 if (mbp->b_resid == 0) { 1868 if ((mbp->b_flags & B_ERROR) != 0) { 1869 mbp->b_resid = mbp->b_bcount; /* be conservative */ 1870 } 1871 biodone(mbp); 1872 } 1873 splx(s); 1874 } 1875