1 /* $NetBSD: vfs_bio.c,v 1.290 2020/03/14 18:08:39 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran, and by Wasabi Systems, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c) 1982, 1986, 1989, 1993 34 * The Regents of the University of California. All rights reserved. 35 * (c) UNIX System Laboratories, Inc. 36 * All or some portions of this file are derived from material licensed 37 * to the University of California by American Telephone and Telegraph 38 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 39 * the permission of UNIX System Laboratories, Inc. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. Neither the name of the University nor the names of its contributors 50 * may be used to endorse or promote products derived from this software 51 * without specific prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 * 65 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94 66 */ 67 68 /*- 69 * Copyright (c) 1994 Christopher G. Demetriou 70 * 71 * Redistribution and use in source and binary forms, with or without 72 * modification, are permitted provided that the following conditions 73 * are met: 74 * 1. Redistributions of source code must retain the above copyright 75 * notice, this list of conditions and the following disclaimer. 76 * 2. Redistributions in binary form must reproduce the above copyright 77 * notice, this list of conditions and the following disclaimer in the 78 * documentation and/or other materials provided with the distribution. 79 * 3. All advertising materials mentioning features or use of this software 80 * must display the following acknowledgement: 81 * This product includes software developed by the University of 82 * California, Berkeley and its contributors. 83 * 4. Neither the name of the University nor the names of its contributors 84 * may be used to endorse or promote products derived from this software 85 * without specific prior written permission. 86 * 87 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 88 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 89 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 90 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 91 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 92 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 93 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 94 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 95 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 96 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 97 * SUCH DAMAGE. 98 * 99 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94 100 */ 101 102 /* 103 * The buffer cache subsystem. 104 * 105 * Some references: 106 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986) 107 * Leffler, et al.: The Design and Implementation of the 4.3BSD 108 * UNIX Operating System (Addison Welley, 1989) 109 * 110 * Locking 111 * 112 * There are three locks: 113 * - bufcache_lock: protects global buffer cache state. 114 * - BC_BUSY: a long term per-buffer lock. 115 * - buf_t::b_objlock: lock on completion (biowait vs biodone). 116 * 117 * For buffers associated with vnodes (a most common case) b_objlock points 118 * to the vnode_t::v_interlock. Otherwise, it points to generic buffer_lock. 119 * 120 * Lock order: 121 * bufcache_lock -> 122 * buf_t::b_objlock 123 */ 124 125 #include <sys/cdefs.h> 126 __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.290 2020/03/14 18:08:39 ad Exp $"); 127 128 #ifdef _KERNEL_OPT 129 #include "opt_bufcache.h" 130 #include "opt_dtrace.h" 131 #include "opt_biohist.h" 132 #endif 133 134 #include <sys/param.h> 135 #include <sys/systm.h> 136 #include <sys/kernel.h> 137 #include <sys/proc.h> 138 #include <sys/buf.h> 139 #include <sys/vnode.h> 140 #include <sys/mount.h> 141 #include <sys/resourcevar.h> 142 #include <sys/sysctl.h> 143 #include <sys/conf.h> 144 #include <sys/kauth.h> 145 #include <sys/fstrans.h> 146 #include <sys/intr.h> 147 #include <sys/cpu.h> 148 #include <sys/wapbl.h> 149 #include <sys/bitops.h> 150 #include <sys/cprng.h> 151 #include <sys/sdt.h> 152 153 #include <uvm/uvm.h> /* extern struct uvm uvm */ 154 155 #include <miscfs/specfs/specdev.h> 156 157 SDT_PROVIDER_DEFINE(io); 158 159 SDT_PROBE_DEFINE4(io, kernel, , bbusy__start, 160 "struct buf *"/*bp*/, 161 "bool"/*intr*/, "int"/*timo*/, "kmutex_t *"/*interlock*/); 162 SDT_PROBE_DEFINE5(io, kernel, , bbusy__done, 163 "struct buf *"/*bp*/, 164 "bool"/*intr*/, 165 "int"/*timo*/, 166 "kmutex_t *"/*interlock*/, 167 "int"/*error*/); 168 SDT_PROBE_DEFINE0(io, kernel, , getnewbuf__start); 169 SDT_PROBE_DEFINE1(io, kernel, , getnewbuf__done, "struct buf *"/*bp*/); 170 SDT_PROBE_DEFINE3(io, kernel, , getblk__start, 171 "struct vnode *"/*vp*/, "daddr_t"/*blkno*/, "int"/*size*/); 172 SDT_PROBE_DEFINE4(io, kernel, , getblk__done, 173 "struct vnode *"/*vp*/, "daddr_t"/*blkno*/, "int"/*size*/, 174 "struct buf *"/*bp*/); 175 SDT_PROBE_DEFINE2(io, kernel, , brelse, "struct buf *"/*bp*/, "int"/*set*/); 176 SDT_PROBE_DEFINE1(io, kernel, , wait__start, "struct buf *"/*bp*/); 177 SDT_PROBE_DEFINE1(io, kernel, , wait__done, "struct buf *"/*bp*/); 178 179 #ifndef BUFPAGES 180 # define BUFPAGES 0 181 #endif 182 183 #ifdef BUFCACHE 184 # if (BUFCACHE < 5) || (BUFCACHE > 95) 185 # error BUFCACHE is not between 5 and 95 186 # endif 187 #else 188 # define BUFCACHE 15 189 #endif 190 191 u_int nbuf; /* desired number of buffer headers */ 192 u_int bufpages = BUFPAGES; /* optional hardwired count */ 193 u_int bufcache = BUFCACHE; /* max % of RAM to use for buffer cache */ 194 195 /* 196 * Definitions for the buffer free lists. 197 */ 198 #define BQUEUES 3 /* number of free buffer queues */ 199 200 #define BQ_LOCKED 0 /* super-blocks &c */ 201 #define BQ_LRU 1 /* lru, useful buffers */ 202 #define BQ_AGE 2 /* rubbish */ 203 204 struct bqueue { 205 TAILQ_HEAD(, buf) bq_queue; 206 uint64_t bq_bytes; 207 buf_t *bq_marker; 208 }; 209 static struct bqueue bufqueues[BQUEUES] __cacheline_aligned; 210 211 /* Function prototypes */ 212 static void buf_setwm(void); 213 static int buf_trim(void); 214 static void *bufpool_page_alloc(struct pool *, int); 215 static void bufpool_page_free(struct pool *, void *); 216 static buf_t *bio_doread(struct vnode *, daddr_t, int, int); 217 static buf_t *getnewbuf(int, int, int); 218 static int buf_lotsfree(void); 219 static int buf_canrelease(void); 220 static u_long buf_mempoolidx(u_long); 221 static u_long buf_roundsize(u_long); 222 static void *buf_alloc(size_t); 223 static void buf_mrelease(void *, size_t); 224 static void binsheadfree(buf_t *, struct bqueue *); 225 static void binstailfree(buf_t *, struct bqueue *); 226 #ifdef DEBUG 227 static int checkfreelist(buf_t *, struct bqueue *, int); 228 #endif 229 static void biointr(void *); 230 static void biodone2(buf_t *); 231 static void bref(buf_t *); 232 static void brele(buf_t *); 233 static void sysctl_kern_buf_setup(void); 234 static void sysctl_vm_buf_setup(void); 235 236 /* Initialization for biohist */ 237 238 #include <sys/biohist.h> 239 240 BIOHIST_DEFINE(biohist); 241 242 void 243 biohist_init(void) 244 { 245 246 BIOHIST_INIT(biohist, BIOHIST_SIZE); 247 } 248 249 /* 250 * Definitions for the buffer hash lists. 251 */ 252 #define BUFHASH(dvp, lbn) \ 253 (&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash]) 254 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash; 255 u_long bufhash; 256 257 static kcondvar_t needbuffer_cv; 258 259 /* 260 * Buffer queue lock. 261 */ 262 kmutex_t bufcache_lock __cacheline_aligned; 263 kmutex_t buffer_lock __cacheline_aligned; 264 265 /* Software ISR for completed transfers. */ 266 static void *biodone_sih; 267 268 /* Buffer pool for I/O buffers. */ 269 static pool_cache_t buf_cache; 270 static pool_cache_t bufio_cache; 271 272 #define MEMPOOL_INDEX_OFFSET (ilog2(DEV_BSIZE)) /* smallest pool is 512 bytes */ 273 #define NMEMPOOLS (ilog2(MAXBSIZE) - MEMPOOL_INDEX_OFFSET + 1) 274 __CTASSERT((1 << (NMEMPOOLS + MEMPOOL_INDEX_OFFSET - 1)) == MAXBSIZE); 275 276 /* Buffer memory pools */ 277 static struct pool bmempools[NMEMPOOLS]; 278 279 static struct vm_map *buf_map; 280 281 /* 282 * Buffer memory pool allocator. 283 */ 284 static void * 285 bufpool_page_alloc(struct pool *pp, int flags) 286 { 287 288 return (void *)uvm_km_alloc(buf_map, 289 MAXBSIZE, MAXBSIZE, 290 ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT|UVM_KMF_TRYLOCK) 291 | UVM_KMF_WIRED); 292 } 293 294 static void 295 bufpool_page_free(struct pool *pp, void *v) 296 { 297 298 uvm_km_free(buf_map, (vaddr_t)v, MAXBSIZE, UVM_KMF_WIRED); 299 } 300 301 static struct pool_allocator bufmempool_allocator = { 302 .pa_alloc = bufpool_page_alloc, 303 .pa_free = bufpool_page_free, 304 .pa_pagesz = MAXBSIZE, 305 }; 306 307 /* Buffer memory management variables */ 308 u_long bufmem_valimit; 309 u_long bufmem_hiwater; 310 u_long bufmem_lowater; 311 u_long bufmem; 312 313 /* 314 * MD code can call this to set a hard limit on the amount 315 * of virtual memory used by the buffer cache. 316 */ 317 int 318 buf_setvalimit(vsize_t sz) 319 { 320 321 /* We need to accommodate at least NMEMPOOLS of MAXBSIZE each */ 322 if (sz < NMEMPOOLS * MAXBSIZE) 323 return EINVAL; 324 325 bufmem_valimit = sz; 326 return 0; 327 } 328 329 static void 330 buf_setwm(void) 331 { 332 333 bufmem_hiwater = buf_memcalc(); 334 /* lowater is approx. 2% of memory (with bufcache = 15) */ 335 #define BUFMEM_WMSHIFT 3 336 #define BUFMEM_HIWMMIN (64 * 1024 << BUFMEM_WMSHIFT) 337 if (bufmem_hiwater < BUFMEM_HIWMMIN) 338 /* Ensure a reasonable minimum value */ 339 bufmem_hiwater = BUFMEM_HIWMMIN; 340 bufmem_lowater = bufmem_hiwater >> BUFMEM_WMSHIFT; 341 } 342 343 #ifdef DEBUG 344 int debug_verify_freelist = 0; 345 static int 346 checkfreelist(buf_t *bp, struct bqueue *dp, int ison) 347 { 348 buf_t *b; 349 350 if (!debug_verify_freelist) 351 return 1; 352 353 TAILQ_FOREACH(b, &dp->bq_queue, b_freelist) { 354 if (b == bp) 355 return ison ? 1 : 0; 356 } 357 358 return ison ? 0 : 1; 359 } 360 #endif 361 362 /* 363 * Insq/Remq for the buffer hash lists. 364 * Call with buffer queue locked. 365 */ 366 static void 367 binsheadfree(buf_t *bp, struct bqueue *dp) 368 { 369 370 KASSERT(mutex_owned(&bufcache_lock)); 371 KASSERT(bp->b_freelistindex == -1); 372 TAILQ_INSERT_HEAD(&dp->bq_queue, bp, b_freelist); 373 dp->bq_bytes += bp->b_bufsize; 374 bp->b_freelistindex = dp - bufqueues; 375 } 376 377 static void 378 binstailfree(buf_t *bp, struct bqueue *dp) 379 { 380 381 KASSERT(mutex_owned(&bufcache_lock)); 382 KASSERTMSG(bp->b_freelistindex == -1, "double free of buffer? " 383 "bp=%p, b_freelistindex=%d\n", bp, bp->b_freelistindex); 384 TAILQ_INSERT_TAIL(&dp->bq_queue, bp, b_freelist); 385 dp->bq_bytes += bp->b_bufsize; 386 bp->b_freelistindex = dp - bufqueues; 387 } 388 389 void 390 bremfree(buf_t *bp) 391 { 392 struct bqueue *dp; 393 int bqidx = bp->b_freelistindex; 394 395 KASSERT(mutex_owned(&bufcache_lock)); 396 397 KASSERT(bqidx != -1); 398 dp = &bufqueues[bqidx]; 399 KDASSERT(checkfreelist(bp, dp, 1)); 400 KASSERT(dp->bq_bytes >= bp->b_bufsize); 401 TAILQ_REMOVE(&dp->bq_queue, bp, b_freelist); 402 dp->bq_bytes -= bp->b_bufsize; 403 404 /* For the sysctl helper. */ 405 if (bp == dp->bq_marker) 406 dp->bq_marker = NULL; 407 408 #if defined(DIAGNOSTIC) 409 bp->b_freelistindex = -1; 410 #endif /* defined(DIAGNOSTIC) */ 411 } 412 413 /* 414 * Add a reference to an buffer structure that came from buf_cache. 415 */ 416 static inline void 417 bref(buf_t *bp) 418 { 419 420 KASSERT(mutex_owned(&bufcache_lock)); 421 KASSERT(bp->b_refcnt > 0); 422 423 bp->b_refcnt++; 424 } 425 426 /* 427 * Free an unused buffer structure that came from buf_cache. 428 */ 429 static inline void 430 brele(buf_t *bp) 431 { 432 433 KASSERT(mutex_owned(&bufcache_lock)); 434 KASSERT(bp->b_refcnt > 0); 435 436 if (bp->b_refcnt-- == 1) { 437 buf_destroy(bp); 438 #ifdef DEBUG 439 memset((char *)bp, 0, sizeof(*bp)); 440 #endif 441 pool_cache_put(buf_cache, bp); 442 } 443 } 444 445 /* 446 * note that for some ports this is used by pmap bootstrap code to 447 * determine kva size. 448 */ 449 u_long 450 buf_memcalc(void) 451 { 452 u_long n; 453 vsize_t mapsz = 0; 454 455 /* 456 * Determine the upper bound of memory to use for buffers. 457 * 458 * - If bufpages is specified, use that as the number 459 * pages. 460 * 461 * - Otherwise, use bufcache as the percentage of 462 * physical memory. 463 */ 464 if (bufpages != 0) { 465 n = bufpages; 466 } else { 467 if (bufcache < 5) { 468 printf("forcing bufcache %d -> 5", bufcache); 469 bufcache = 5; 470 } 471 if (bufcache > 95) { 472 printf("forcing bufcache %d -> 95", bufcache); 473 bufcache = 95; 474 } 475 if (buf_map != NULL) 476 mapsz = vm_map_max(buf_map) - vm_map_min(buf_map); 477 n = calc_cache_size(mapsz, bufcache, 478 (buf_map != kernel_map) ? 100 : BUFCACHE_VA_MAXPCT) 479 / PAGE_SIZE; 480 } 481 482 n <<= PAGE_SHIFT; 483 if (bufmem_valimit != 0 && n > bufmem_valimit) 484 n = bufmem_valimit; 485 486 return (n); 487 } 488 489 /* 490 * Initialize buffers and hash links for buffers. 491 */ 492 void 493 bufinit(void) 494 { 495 struct bqueue *dp; 496 int use_std; 497 u_int i; 498 499 biodone_vfs = biodone; 500 501 mutex_init(&bufcache_lock, MUTEX_DEFAULT, IPL_NONE); 502 mutex_init(&buffer_lock, MUTEX_DEFAULT, IPL_NONE); 503 cv_init(&needbuffer_cv, "needbuf"); 504 505 if (bufmem_valimit != 0) { 506 vaddr_t minaddr = 0, maxaddr; 507 buf_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 508 bufmem_valimit, 0, false, 0); 509 if (buf_map == NULL) 510 panic("bufinit: cannot allocate submap"); 511 } else 512 buf_map = kernel_map; 513 514 /* 515 * Initialize buffer cache memory parameters. 516 */ 517 bufmem = 0; 518 buf_setwm(); 519 520 /* On "small" machines use small pool page sizes where possible */ 521 use_std = (physmem < atop(16*1024*1024)); 522 523 /* 524 * Also use them on systems that can map the pool pages using 525 * a direct-mapped segment. 526 */ 527 #ifdef PMAP_MAP_POOLPAGE 528 use_std = 1; 529 #endif 530 531 buf_cache = pool_cache_init(sizeof(buf_t), 0, 0, 0, 532 "bufpl", NULL, IPL_SOFTBIO, NULL, NULL, NULL); 533 bufio_cache = pool_cache_init(sizeof(buf_t), 0, 0, 0, 534 "biopl", NULL, IPL_BIO, NULL, NULL, NULL); 535 536 for (i = 0; i < NMEMPOOLS; i++) { 537 struct pool_allocator *pa; 538 struct pool *pp = &bmempools[i]; 539 u_int size = 1 << (i + MEMPOOL_INDEX_OFFSET); 540 char *name = kmem_alloc(8, KM_SLEEP); /* XXX: never freed */ 541 if (__predict_false(size >= 1048576)) 542 (void)snprintf(name, 8, "buf%um", size / 1048576); 543 else if (__predict_true(size >= 1024)) 544 (void)snprintf(name, 8, "buf%uk", size / 1024); 545 else 546 (void)snprintf(name, 8, "buf%ub", size); 547 pa = (size <= PAGE_SIZE && use_std) 548 ? &pool_allocator_nointr 549 : &bufmempool_allocator; 550 pool_init(pp, size, 0, 0, 0, name, pa, IPL_NONE); 551 pool_setlowat(pp, 1); 552 pool_sethiwat(pp, 1); 553 } 554 555 /* Initialize the buffer queues */ 556 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) { 557 TAILQ_INIT(&dp->bq_queue); 558 dp->bq_bytes = 0; 559 } 560 561 /* 562 * Estimate hash table size based on the amount of memory we 563 * intend to use for the buffer cache. The average buffer 564 * size is dependent on our clients (i.e. filesystems). 565 * 566 * For now, use an empirical 3K per buffer. 567 */ 568 nbuf = (bufmem_hiwater / 1024) / 3; 569 bufhashtbl = hashinit(nbuf, HASH_LIST, true, &bufhash); 570 571 sysctl_kern_buf_setup(); 572 sysctl_vm_buf_setup(); 573 } 574 575 void 576 bufinit2(void) 577 { 578 579 biodone_sih = softint_establish(SOFTINT_BIO | SOFTINT_MPSAFE, biointr, 580 NULL); 581 if (biodone_sih == NULL) 582 panic("bufinit2: can't establish soft interrupt"); 583 } 584 585 static int 586 buf_lotsfree(void) 587 { 588 u_long guess; 589 590 /* Always allocate if less than the low water mark. */ 591 if (bufmem < bufmem_lowater) 592 return 1; 593 594 /* Never allocate if greater than the high water mark. */ 595 if (bufmem > bufmem_hiwater) 596 return 0; 597 598 /* If there's anything on the AGE list, it should be eaten. */ 599 if (TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue) != NULL) 600 return 0; 601 602 /* 603 * The probabily of getting a new allocation is inversely 604 * proportional to the current size of the cache above 605 * the low water mark. Divide the total first to avoid overflows 606 * in the product. 607 */ 608 guess = cprng_fast32() % 16; 609 610 if ((bufmem_hiwater - bufmem_lowater) / 16 * guess >= 611 (bufmem - bufmem_lowater)) 612 return 1; 613 614 /* Otherwise don't allocate. */ 615 return 0; 616 } 617 618 /* 619 * Return estimate of bytes we think need to be 620 * released to help resolve low memory conditions. 621 * 622 * => called with bufcache_lock held. 623 */ 624 static int 625 buf_canrelease(void) 626 { 627 int pagedemand, ninvalid = 0; 628 629 KASSERT(mutex_owned(&bufcache_lock)); 630 631 if (bufmem < bufmem_lowater) 632 return 0; 633 634 if (bufmem > bufmem_hiwater) 635 return bufmem - bufmem_hiwater; 636 637 ninvalid += bufqueues[BQ_AGE].bq_bytes; 638 639 pagedemand = uvmexp.freetarg - uvm_availmem(); 640 if (pagedemand < 0) 641 return ninvalid; 642 return MAX(ninvalid, MIN(2 * MAXBSIZE, 643 MIN((bufmem - bufmem_lowater) / 16, pagedemand * PAGE_SIZE))); 644 } 645 646 /* 647 * Buffer memory allocation helper functions 648 */ 649 static u_long 650 buf_mempoolidx(u_long size) 651 { 652 u_int n = 0; 653 654 size -= 1; 655 size >>= MEMPOOL_INDEX_OFFSET; 656 while (size) { 657 size >>= 1; 658 n += 1; 659 } 660 if (n >= NMEMPOOLS) 661 panic("buf mem pool index %d", n); 662 return n; 663 } 664 665 static u_long 666 buf_roundsize(u_long size) 667 { 668 /* Round up to nearest power of 2 */ 669 return (1 << (buf_mempoolidx(size) + MEMPOOL_INDEX_OFFSET)); 670 } 671 672 static void * 673 buf_alloc(size_t size) 674 { 675 u_int n = buf_mempoolidx(size); 676 void *addr; 677 678 while (1) { 679 addr = pool_get(&bmempools[n], PR_NOWAIT); 680 if (addr != NULL) 681 break; 682 683 /* No memory, see if we can free some. If so, try again */ 684 mutex_enter(&bufcache_lock); 685 if (buf_drain(1) > 0) { 686 mutex_exit(&bufcache_lock); 687 continue; 688 } 689 690 if (curlwp == uvm.pagedaemon_lwp) { 691 mutex_exit(&bufcache_lock); 692 return NULL; 693 } 694 695 /* Wait for buffers to arrive on the LRU queue */ 696 cv_timedwait(&needbuffer_cv, &bufcache_lock, hz / 4); 697 mutex_exit(&bufcache_lock); 698 } 699 700 return addr; 701 } 702 703 static void 704 buf_mrelease(void *addr, size_t size) 705 { 706 707 pool_put(&bmempools[buf_mempoolidx(size)], addr); 708 } 709 710 /* 711 * bread()/breadn() helper. 712 */ 713 static buf_t * 714 bio_doread(struct vnode *vp, daddr_t blkno, int size, int async) 715 { 716 buf_t *bp; 717 struct mount *mp; 718 719 bp = getblk(vp, blkno, size, 0, 0); 720 721 /* 722 * getblk() may return NULL if we are the pagedaemon. 723 */ 724 if (bp == NULL) { 725 KASSERT(curlwp == uvm.pagedaemon_lwp); 726 return NULL; 727 } 728 729 /* 730 * If buffer does not have data valid, start a read. 731 * Note that if buffer is BC_INVAL, getblk() won't return it. 732 * Therefore, it's valid if its I/O has completed or been delayed. 733 */ 734 if (!ISSET(bp->b_oflags, (BO_DONE | BO_DELWRI))) { 735 /* Start I/O for the buffer. */ 736 SET(bp->b_flags, B_READ | async); 737 if (async) 738 BIO_SETPRIO(bp, BPRIO_TIMELIMITED); 739 else 740 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL); 741 VOP_STRATEGY(vp, bp); 742 743 /* Pay for the read. */ 744 curlwp->l_ru.ru_inblock++; 745 } else if (async) 746 brelse(bp, 0); 747 748 if (vp->v_type == VBLK) 749 mp = spec_node_getmountedfs(vp); 750 else 751 mp = vp->v_mount; 752 753 /* 754 * Collect statistics on synchronous and asynchronous reads. 755 * Reads from block devices are charged to their associated 756 * filesystem (if any). 757 */ 758 if (mp != NULL) { 759 if (async == 0) 760 mp->mnt_stat.f_syncreads++; 761 else 762 mp->mnt_stat.f_asyncreads++; 763 } 764 765 return (bp); 766 } 767 768 /* 769 * Read a disk block. 770 * This algorithm described in Bach (p.54). 771 */ 772 int 773 bread(struct vnode *vp, daddr_t blkno, int size, int flags, buf_t **bpp) 774 { 775 buf_t *bp; 776 int error; 777 778 BIOHIST_FUNC(__func__); BIOHIST_CALLED(biohist); 779 780 /* Get buffer for block. */ 781 bp = *bpp = bio_doread(vp, blkno, size, 0); 782 if (bp == NULL) 783 return ENOMEM; 784 785 /* Wait for the read to complete, and return result. */ 786 error = biowait(bp); 787 if (error == 0 && (flags & B_MODIFY) != 0) 788 error = fscow_run(bp, true); 789 if (error) { 790 brelse(bp, 0); 791 *bpp = NULL; 792 } 793 794 return error; 795 } 796 797 /* 798 * Read-ahead multiple disk blocks. The first is sync, the rest async. 799 * Trivial modification to the breada algorithm presented in Bach (p.55). 800 */ 801 int 802 breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablks, 803 int *rasizes, int nrablks, int flags, buf_t **bpp) 804 { 805 buf_t *bp; 806 int error, i; 807 808 BIOHIST_FUNC(__func__); BIOHIST_CALLED(biohist); 809 810 bp = *bpp = bio_doread(vp, blkno, size, 0); 811 if (bp == NULL) 812 return ENOMEM; 813 814 /* 815 * For each of the read-ahead blocks, start a read, if necessary. 816 */ 817 mutex_enter(&bufcache_lock); 818 for (i = 0; i < nrablks; i++) { 819 /* If it's in the cache, just go on to next one. */ 820 if (incore(vp, rablks[i])) 821 continue; 822 823 /* Get a buffer for the read-ahead block */ 824 mutex_exit(&bufcache_lock); 825 (void) bio_doread(vp, rablks[i], rasizes[i], B_ASYNC); 826 mutex_enter(&bufcache_lock); 827 } 828 mutex_exit(&bufcache_lock); 829 830 /* Otherwise, we had to start a read for it; wait until it's valid. */ 831 error = biowait(bp); 832 if (error == 0 && (flags & B_MODIFY) != 0) 833 error = fscow_run(bp, true); 834 if (error) { 835 brelse(bp, 0); 836 *bpp = NULL; 837 } 838 839 return error; 840 } 841 842 /* 843 * Block write. Described in Bach (p.56) 844 */ 845 int 846 bwrite(buf_t *bp) 847 { 848 int rv, sync, wasdelayed; 849 struct vnode *vp; 850 struct mount *mp; 851 852 BIOHIST_FUNC(__func__); BIOHIST_CALLARGS(biohist, "bp=%#jx", 853 (uintptr_t)bp, 0, 0, 0); 854 855 KASSERT(ISSET(bp->b_cflags, BC_BUSY)); 856 KASSERT(!cv_has_waiters(&bp->b_done)); 857 858 vp = bp->b_vp; 859 860 /* 861 * dholland 20160728 AFAICT vp==NULL must be impossible as it 862 * will crash upon reaching VOP_STRATEGY below... see further 863 * analysis on tech-kern. 864 */ 865 KASSERTMSG(vp != NULL, "bwrite given buffer with null vnode"); 866 867 if (vp != NULL) { 868 KASSERT(bp->b_objlock == vp->v_interlock); 869 if (vp->v_type == VBLK) 870 mp = spec_node_getmountedfs(vp); 871 else 872 mp = vp->v_mount; 873 } else { 874 mp = NULL; 875 } 876 877 if (mp && mp->mnt_wapbl) { 878 if (bp->b_iodone != mp->mnt_wapbl_op->wo_wapbl_biodone) { 879 bdwrite(bp); 880 return 0; 881 } 882 } 883 884 /* 885 * Remember buffer type, to switch on it later. If the write was 886 * synchronous, but the file system was mounted with MNT_ASYNC, 887 * convert it to a delayed write. 888 * XXX note that this relies on delayed tape writes being converted 889 * to async, not sync writes (which is safe, but ugly). 890 */ 891 sync = !ISSET(bp->b_flags, B_ASYNC); 892 if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) { 893 bdwrite(bp); 894 return (0); 895 } 896 897 /* 898 * Collect statistics on synchronous and asynchronous writes. 899 * Writes to block devices are charged to their associated 900 * filesystem (if any). 901 */ 902 if (mp != NULL) { 903 if (sync) 904 mp->mnt_stat.f_syncwrites++; 905 else 906 mp->mnt_stat.f_asyncwrites++; 907 } 908 909 /* 910 * Pay for the I/O operation and make sure the buf is on the correct 911 * vnode queue. 912 */ 913 bp->b_error = 0; 914 wasdelayed = ISSET(bp->b_oflags, BO_DELWRI); 915 CLR(bp->b_flags, B_READ); 916 if (wasdelayed) { 917 mutex_enter(&bufcache_lock); 918 mutex_enter(bp->b_objlock); 919 CLR(bp->b_oflags, BO_DONE | BO_DELWRI); 920 reassignbuf(bp, bp->b_vp); 921 /* Wake anyone trying to busy the buffer via vnode's lists. */ 922 cv_broadcast(&bp->b_busy); 923 mutex_exit(&bufcache_lock); 924 } else { 925 curlwp->l_ru.ru_oublock++; 926 mutex_enter(bp->b_objlock); 927 CLR(bp->b_oflags, BO_DONE | BO_DELWRI); 928 } 929 if (vp != NULL) 930 vp->v_numoutput++; 931 mutex_exit(bp->b_objlock); 932 933 /* Initiate disk write. */ 934 if (sync) 935 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL); 936 else 937 BIO_SETPRIO(bp, BPRIO_TIMELIMITED); 938 939 VOP_STRATEGY(vp, bp); 940 941 if (sync) { 942 /* If I/O was synchronous, wait for it to complete. */ 943 rv = biowait(bp); 944 945 /* Release the buffer. */ 946 brelse(bp, 0); 947 948 return (rv); 949 } else { 950 return (0); 951 } 952 } 953 954 int 955 vn_bwrite(void *v) 956 { 957 struct vop_bwrite_args *ap = v; 958 959 return (bwrite(ap->a_bp)); 960 } 961 962 /* 963 * Delayed write. 964 * 965 * The buffer is marked dirty, but is not queued for I/O. 966 * This routine should be used when the buffer is expected 967 * to be modified again soon, typically a small write that 968 * partially fills a buffer. 969 * 970 * NB: magnetic tapes cannot be delayed; they must be 971 * written in the order that the writes are requested. 972 * 973 * Described in Leffler, et al. (pp. 208-213). 974 */ 975 void 976 bdwrite(buf_t *bp) 977 { 978 979 BIOHIST_FUNC(__func__); BIOHIST_CALLARGS(biohist, "bp=%#jx", 980 (uintptr_t)bp, 0, 0, 0); 981 982 KASSERT(bp->b_vp == NULL || bp->b_vp->v_tag != VT_UFS || 983 bp->b_vp->v_type == VBLK || ISSET(bp->b_flags, B_COWDONE)); 984 KASSERT(ISSET(bp->b_cflags, BC_BUSY)); 985 KASSERT(!cv_has_waiters(&bp->b_done)); 986 987 /* If this is a tape block, write the block now. */ 988 if (bdev_type(bp->b_dev) == D_TAPE) { 989 bawrite(bp); 990 return; 991 } 992 993 if (wapbl_vphaswapbl(bp->b_vp)) { 994 struct mount *mp = wapbl_vptomp(bp->b_vp); 995 996 if (bp->b_iodone != mp->mnt_wapbl_op->wo_wapbl_biodone) { 997 WAPBL_ADD_BUF(mp, bp); 998 } 999 } 1000 1001 /* 1002 * If the block hasn't been seen before: 1003 * (1) Mark it as having been seen, 1004 * (2) Charge for the write, 1005 * (3) Make sure it's on its vnode's correct block list. 1006 */ 1007 KASSERT(bp->b_vp == NULL || bp->b_objlock == bp->b_vp->v_interlock); 1008 1009 if (!ISSET(bp->b_oflags, BO_DELWRI)) { 1010 mutex_enter(&bufcache_lock); 1011 mutex_enter(bp->b_objlock); 1012 SET(bp->b_oflags, BO_DELWRI); 1013 curlwp->l_ru.ru_oublock++; 1014 reassignbuf(bp, bp->b_vp); 1015 /* Wake anyone trying to busy the buffer via vnode's lists. */ 1016 cv_broadcast(&bp->b_busy); 1017 mutex_exit(&bufcache_lock); 1018 } else { 1019 mutex_enter(bp->b_objlock); 1020 } 1021 /* Otherwise, the "write" is done, so mark and release the buffer. */ 1022 CLR(bp->b_oflags, BO_DONE); 1023 mutex_exit(bp->b_objlock); 1024 1025 brelse(bp, 0); 1026 } 1027 1028 /* 1029 * Asynchronous block write; just an asynchronous bwrite(). 1030 */ 1031 void 1032 bawrite(buf_t *bp) 1033 { 1034 1035 KASSERT(ISSET(bp->b_cflags, BC_BUSY)); 1036 KASSERT(bp->b_vp != NULL); 1037 1038 SET(bp->b_flags, B_ASYNC); 1039 VOP_BWRITE(bp->b_vp, bp); 1040 } 1041 1042 /* 1043 * Release a buffer on to the free lists. 1044 * Described in Bach (p. 46). 1045 */ 1046 void 1047 brelsel(buf_t *bp, int set) 1048 { 1049 struct bqueue *bufq; 1050 struct vnode *vp; 1051 1052 SDT_PROBE2(io, kernel, , brelse, bp, set); 1053 1054 KASSERT(bp != NULL); 1055 KASSERT(mutex_owned(&bufcache_lock)); 1056 KASSERT(!cv_has_waiters(&bp->b_done)); 1057 KASSERT(bp->b_refcnt > 0); 1058 1059 SET(bp->b_cflags, set); 1060 1061 KASSERT(ISSET(bp->b_cflags, BC_BUSY)); 1062 KASSERT(bp->b_iodone == NULL); 1063 1064 /* Wake up any processes waiting for any buffer to become free. */ 1065 cv_signal(&needbuffer_cv); 1066 1067 /* Wake up any proceeses waiting for _this_ buffer to become free */ 1068 if (ISSET(bp->b_cflags, BC_WANTED)) 1069 CLR(bp->b_cflags, BC_WANTED|BC_AGE); 1070 1071 /* If it's clean clear the copy-on-write flag. */ 1072 if (ISSET(bp->b_flags, B_COWDONE)) { 1073 mutex_enter(bp->b_objlock); 1074 if (!ISSET(bp->b_oflags, BO_DELWRI)) 1075 CLR(bp->b_flags, B_COWDONE); 1076 mutex_exit(bp->b_objlock); 1077 } 1078 1079 /* 1080 * Determine which queue the buffer should be on, then put it there. 1081 */ 1082 1083 /* If it's locked, don't report an error; try again later. */ 1084 if (ISSET(bp->b_flags, B_LOCKED)) 1085 bp->b_error = 0; 1086 1087 /* If it's not cacheable, or an error, mark it invalid. */ 1088 if (ISSET(bp->b_cflags, BC_NOCACHE) || bp->b_error != 0) 1089 SET(bp->b_cflags, BC_INVAL); 1090 1091 if (ISSET(bp->b_cflags, BC_VFLUSH)) { 1092 /* 1093 * This is a delayed write buffer that was just flushed to 1094 * disk. It is still on the LRU queue. If it's become 1095 * invalid, then we need to move it to a different queue; 1096 * otherwise leave it in its current position. 1097 */ 1098 CLR(bp->b_cflags, BC_VFLUSH); 1099 if (!ISSET(bp->b_cflags, BC_INVAL|BC_AGE) && 1100 !ISSET(bp->b_flags, B_LOCKED) && bp->b_error == 0) { 1101 KDASSERT(checkfreelist(bp, &bufqueues[BQ_LRU], 1)); 1102 goto already_queued; 1103 } else { 1104 bremfree(bp); 1105 } 1106 } 1107 1108 KDASSERT(checkfreelist(bp, &bufqueues[BQ_AGE], 0)); 1109 KDASSERT(checkfreelist(bp, &bufqueues[BQ_LRU], 0)); 1110 KDASSERT(checkfreelist(bp, &bufqueues[BQ_LOCKED], 0)); 1111 1112 if ((bp->b_bufsize <= 0) || ISSET(bp->b_cflags, BC_INVAL)) { 1113 /* 1114 * If it's invalid or empty, dissociate it from its vnode 1115 * and put on the head of the appropriate queue. 1116 */ 1117 if (ISSET(bp->b_flags, B_LOCKED)) { 1118 if (wapbl_vphaswapbl(vp = bp->b_vp)) { 1119 struct mount *mp = wapbl_vptomp(vp); 1120 1121 KASSERT(bp->b_iodone 1122 != mp->mnt_wapbl_op->wo_wapbl_biodone); 1123 WAPBL_REMOVE_BUF(mp, bp); 1124 } 1125 } 1126 1127 mutex_enter(bp->b_objlock); 1128 CLR(bp->b_oflags, BO_DONE|BO_DELWRI); 1129 if ((vp = bp->b_vp) != NULL) { 1130 KASSERT(bp->b_objlock == vp->v_interlock); 1131 reassignbuf(bp, bp->b_vp); 1132 brelvp(bp); 1133 mutex_exit(vp->v_interlock); 1134 } else { 1135 KASSERT(bp->b_objlock == &buffer_lock); 1136 mutex_exit(bp->b_objlock); 1137 } 1138 /* We want to dispose of the buffer, so wake everybody. */ 1139 cv_broadcast(&bp->b_busy); 1140 if (bp->b_bufsize <= 0) 1141 /* no data */ 1142 goto already_queued; 1143 else 1144 /* invalid data */ 1145 bufq = &bufqueues[BQ_AGE]; 1146 binsheadfree(bp, bufq); 1147 } else { 1148 /* 1149 * It has valid data. Put it on the end of the appropriate 1150 * queue, so that it'll stick around for as long as possible. 1151 * If buf is AGE, but has dependencies, must put it on last 1152 * bufqueue to be scanned, ie LRU. This protects against the 1153 * livelock where BQ_AGE only has buffers with dependencies, 1154 * and we thus never get to the dependent buffers in BQ_LRU. 1155 */ 1156 if (ISSET(bp->b_flags, B_LOCKED)) { 1157 /* locked in core */ 1158 bufq = &bufqueues[BQ_LOCKED]; 1159 } else if (!ISSET(bp->b_cflags, BC_AGE)) { 1160 /* valid data */ 1161 bufq = &bufqueues[BQ_LRU]; 1162 } else { 1163 /* stale but valid data */ 1164 bufq = &bufqueues[BQ_AGE]; 1165 } 1166 binstailfree(bp, bufq); 1167 } 1168 already_queued: 1169 /* Unlock the buffer. */ 1170 CLR(bp->b_cflags, BC_AGE|BC_BUSY|BC_NOCACHE); 1171 CLR(bp->b_flags, B_ASYNC); 1172 1173 /* 1174 * Wake only the highest priority waiter on the lock, in order to 1175 * prevent a thundering herd: many LWPs simultaneously awakening and 1176 * competing for the buffer's lock. Testing in 2019 revealed this 1177 * to reduce contention on bufcache_lock tenfold during a kernel 1178 * compile. Elsewhere, when the buffer is changing identity, being 1179 * disposed of, or moving from one list to another, we wake all lock 1180 * requestors. 1181 */ 1182 cv_signal(&bp->b_busy); 1183 1184 if (bp->b_bufsize <= 0) 1185 brele(bp); 1186 } 1187 1188 void 1189 brelse(buf_t *bp, int set) 1190 { 1191 1192 mutex_enter(&bufcache_lock); 1193 brelsel(bp, set); 1194 mutex_exit(&bufcache_lock); 1195 } 1196 1197 /* 1198 * Determine if a block is in the cache. 1199 * Just look on what would be its hash chain. If it's there, return 1200 * a pointer to it, unless it's marked invalid. If it's marked invalid, 1201 * we normally don't return the buffer, unless the caller explicitly 1202 * wants us to. 1203 */ 1204 buf_t * 1205 incore(struct vnode *vp, daddr_t blkno) 1206 { 1207 buf_t *bp; 1208 1209 KASSERT(mutex_owned(&bufcache_lock)); 1210 1211 /* Search hash chain */ 1212 LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) { 1213 if (bp->b_lblkno == blkno && bp->b_vp == vp && 1214 !ISSET(bp->b_cflags, BC_INVAL)) { 1215 KASSERT(bp->b_objlock == vp->v_interlock); 1216 return (bp); 1217 } 1218 } 1219 1220 return (NULL); 1221 } 1222 1223 /* 1224 * Get a block of requested size that is associated with 1225 * a given vnode and block offset. If it is found in the 1226 * block cache, mark it as having been found, make it busy 1227 * and return it. Otherwise, return an empty block of the 1228 * correct size. It is up to the caller to insure that the 1229 * cached blocks be of the correct size. 1230 */ 1231 buf_t * 1232 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1233 { 1234 int err, preserve; 1235 buf_t *bp; 1236 1237 mutex_enter(&bufcache_lock); 1238 SDT_PROBE3(io, kernel, , getblk__start, vp, blkno, size); 1239 loop: 1240 bp = incore(vp, blkno); 1241 if (bp != NULL) { 1242 err = bbusy(bp, ((slpflag & PCATCH) != 0), slptimeo, NULL); 1243 if (err != 0) { 1244 if (err == EPASSTHROUGH) 1245 goto loop; 1246 mutex_exit(&bufcache_lock); 1247 SDT_PROBE4(io, kernel, , getblk__done, 1248 vp, blkno, size, NULL); 1249 return (NULL); 1250 } 1251 KASSERT(!cv_has_waiters(&bp->b_done)); 1252 #ifdef DIAGNOSTIC 1253 if (ISSET(bp->b_oflags, BO_DONE|BO_DELWRI) && 1254 bp->b_bcount < size && vp->v_type != VBLK) 1255 panic("getblk: block size invariant failed"); 1256 #endif 1257 bremfree(bp); 1258 preserve = 1; 1259 } else { 1260 if ((bp = getnewbuf(slpflag, slptimeo, 0)) == NULL) 1261 goto loop; 1262 1263 if (incore(vp, blkno) != NULL) { 1264 /* The block has come into memory in the meantime. */ 1265 brelsel(bp, 0); 1266 goto loop; 1267 } 1268 1269 LIST_INSERT_HEAD(BUFHASH(vp, blkno), bp, b_hash); 1270 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno; 1271 mutex_enter(vp->v_interlock); 1272 bgetvp(vp, bp); 1273 mutex_exit(vp->v_interlock); 1274 preserve = 0; 1275 } 1276 mutex_exit(&bufcache_lock); 1277 1278 /* 1279 * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes) 1280 * if we re-size buffers here. 1281 */ 1282 if (ISSET(bp->b_flags, B_LOCKED)) { 1283 KASSERT(bp->b_bufsize >= size); 1284 } else { 1285 if (allocbuf(bp, size, preserve)) { 1286 mutex_enter(&bufcache_lock); 1287 LIST_REMOVE(bp, b_hash); 1288 brelsel(bp, BC_INVAL); 1289 mutex_exit(&bufcache_lock); 1290 SDT_PROBE4(io, kernel, , getblk__done, 1291 vp, blkno, size, NULL); 1292 return NULL; 1293 } 1294 } 1295 BIO_SETPRIO(bp, BPRIO_DEFAULT); 1296 SDT_PROBE4(io, kernel, , getblk__done, vp, blkno, size, bp); 1297 return (bp); 1298 } 1299 1300 /* 1301 * Get an empty, disassociated buffer of given size. 1302 */ 1303 buf_t * 1304 geteblk(int size) 1305 { 1306 buf_t *bp; 1307 int error __diagused; 1308 1309 mutex_enter(&bufcache_lock); 1310 while ((bp = getnewbuf(0, 0, 0)) == NULL) 1311 ; 1312 1313 SET(bp->b_cflags, BC_INVAL); 1314 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1315 mutex_exit(&bufcache_lock); 1316 BIO_SETPRIO(bp, BPRIO_DEFAULT); 1317 error = allocbuf(bp, size, 0); 1318 KASSERT(error == 0); 1319 return (bp); 1320 } 1321 1322 /* 1323 * Expand or contract the actual memory allocated to a buffer. 1324 * 1325 * If the buffer shrinks, data is lost, so it's up to the 1326 * caller to have written it out *first*; this routine will not 1327 * start a write. If the buffer grows, it's the callers 1328 * responsibility to fill out the buffer's additional contents. 1329 */ 1330 int 1331 allocbuf(buf_t *bp, int size, int preserve) 1332 { 1333 void *addr; 1334 vsize_t oldsize, desired_size; 1335 int oldcount; 1336 int delta; 1337 1338 desired_size = buf_roundsize(size); 1339 if (desired_size > MAXBSIZE) 1340 printf("allocbuf: buffer larger than MAXBSIZE requested"); 1341 1342 oldcount = bp->b_bcount; 1343 1344 bp->b_bcount = size; 1345 1346 oldsize = bp->b_bufsize; 1347 if (oldsize == desired_size) { 1348 /* 1349 * Do not short cut the WAPBL resize, as the buffer length 1350 * could still have changed and this would corrupt the 1351 * tracking of the transaction length. 1352 */ 1353 goto out; 1354 } 1355 1356 /* 1357 * If we want a buffer of a different size, re-allocate the 1358 * buffer's memory; copy old content only if needed. 1359 */ 1360 addr = buf_alloc(desired_size); 1361 if (addr == NULL) 1362 return ENOMEM; 1363 if (preserve) 1364 memcpy(addr, bp->b_data, MIN(oldsize,desired_size)); 1365 if (bp->b_data != NULL) 1366 buf_mrelease(bp->b_data, oldsize); 1367 bp->b_data = addr; 1368 bp->b_bufsize = desired_size; 1369 1370 /* 1371 * Update overall buffer memory counter (protected by bufcache_lock) 1372 */ 1373 delta = (long)desired_size - (long)oldsize; 1374 1375 mutex_enter(&bufcache_lock); 1376 if ((bufmem += delta) > bufmem_hiwater) { 1377 /* 1378 * Need to trim overall memory usage. 1379 */ 1380 while (buf_canrelease()) { 1381 if (preempt_needed()) { 1382 mutex_exit(&bufcache_lock); 1383 preempt(); 1384 mutex_enter(&bufcache_lock); 1385 } 1386 if (buf_trim() == 0) 1387 break; 1388 } 1389 } 1390 mutex_exit(&bufcache_lock); 1391 1392 out: 1393 if (wapbl_vphaswapbl(bp->b_vp)) 1394 WAPBL_RESIZE_BUF(wapbl_vptomp(bp->b_vp), bp, oldsize, oldcount); 1395 1396 return 0; 1397 } 1398 1399 /* 1400 * Find a buffer which is available for use. 1401 * Select something from a free list. 1402 * Preference is to AGE list, then LRU list. 1403 * 1404 * Called with the buffer queues locked. 1405 * Return buffer locked. 1406 */ 1407 static buf_t * 1408 getnewbuf(int slpflag, int slptimeo, int from_bufq) 1409 { 1410 buf_t *bp; 1411 struct vnode *vp; 1412 struct mount *transmp = NULL; 1413 1414 SDT_PROBE0(io, kernel, , getnewbuf__start); 1415 1416 start: 1417 KASSERT(mutex_owned(&bufcache_lock)); 1418 1419 /* 1420 * Get a new buffer from the pool. 1421 */ 1422 if (!from_bufq && buf_lotsfree()) { 1423 mutex_exit(&bufcache_lock); 1424 bp = pool_cache_get(buf_cache, PR_NOWAIT); 1425 if (bp != NULL) { 1426 memset((char *)bp, 0, sizeof(*bp)); 1427 buf_init(bp); 1428 SET(bp->b_cflags, BC_BUSY); /* mark buffer busy */ 1429 mutex_enter(&bufcache_lock); 1430 #if defined(DIAGNOSTIC) 1431 bp->b_freelistindex = -1; 1432 #endif /* defined(DIAGNOSTIC) */ 1433 SDT_PROBE1(io, kernel, , getnewbuf__done, bp); 1434 return (bp); 1435 } 1436 mutex_enter(&bufcache_lock); 1437 } 1438 1439 KASSERT(mutex_owned(&bufcache_lock)); 1440 if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue)) != NULL) { 1441 KASSERT(!ISSET(bp->b_oflags, BO_DELWRI)); 1442 } else { 1443 TAILQ_FOREACH(bp, &bufqueues[BQ_LRU].bq_queue, b_freelist) { 1444 if (ISSET(bp->b_cflags, BC_VFLUSH) || 1445 !ISSET(bp->b_oflags, BO_DELWRI)) 1446 break; 1447 if (fstrans_start_nowait(bp->b_vp->v_mount) == 0) { 1448 KASSERT(transmp == NULL); 1449 transmp = bp->b_vp->v_mount; 1450 break; 1451 } 1452 } 1453 } 1454 if (bp != NULL) { 1455 KASSERT(!ISSET(bp->b_cflags, BC_BUSY) || ISSET(bp->b_cflags, BC_VFLUSH)); 1456 bremfree(bp); 1457 1458 /* Buffer is no longer on free lists. */ 1459 SET(bp->b_cflags, BC_BUSY); 1460 1461 /* Wake anyone trying to lock the old identity. */ 1462 cv_broadcast(&bp->b_busy); 1463 } else { 1464 /* 1465 * XXX: !from_bufq should be removed. 1466 */ 1467 if (!from_bufq || curlwp != uvm.pagedaemon_lwp) { 1468 /* wait for a free buffer of any kind */ 1469 if ((slpflag & PCATCH) != 0) 1470 (void)cv_timedwait_sig(&needbuffer_cv, 1471 &bufcache_lock, slptimeo); 1472 else 1473 (void)cv_timedwait(&needbuffer_cv, 1474 &bufcache_lock, slptimeo); 1475 } 1476 SDT_PROBE1(io, kernel, , getnewbuf__done, NULL); 1477 return (NULL); 1478 } 1479 1480 #ifdef DIAGNOSTIC 1481 if (bp->b_bufsize <= 0) 1482 panic("buffer %p: on queue but empty", bp); 1483 #endif 1484 1485 if (ISSET(bp->b_cflags, BC_VFLUSH)) { 1486 /* 1487 * This is a delayed write buffer being flushed to disk. Make 1488 * sure it gets aged out of the queue when it's finished, and 1489 * leave it off the LRU queue. 1490 */ 1491 CLR(bp->b_cflags, BC_VFLUSH); 1492 SET(bp->b_cflags, BC_AGE); 1493 goto start; 1494 } 1495 1496 KASSERT(ISSET(bp->b_cflags, BC_BUSY)); 1497 KASSERT(bp->b_refcnt > 0); 1498 KASSERT(!cv_has_waiters(&bp->b_done)); 1499 1500 /* 1501 * If buffer was a delayed write, start it and return NULL 1502 * (since we might sleep while starting the write). 1503 */ 1504 if (ISSET(bp->b_oflags, BO_DELWRI)) { 1505 /* 1506 * This buffer has gone through the LRU, so make sure it gets 1507 * reused ASAP. 1508 */ 1509 SET(bp->b_cflags, BC_AGE); 1510 mutex_exit(&bufcache_lock); 1511 bawrite(bp); 1512 KASSERT(transmp != NULL); 1513 fstrans_done(transmp); 1514 mutex_enter(&bufcache_lock); 1515 SDT_PROBE1(io, kernel, , getnewbuf__done, NULL); 1516 return (NULL); 1517 } 1518 1519 KASSERT(transmp == NULL); 1520 1521 vp = bp->b_vp; 1522 1523 /* clear out various other fields */ 1524 bp->b_cflags = BC_BUSY; 1525 bp->b_oflags = 0; 1526 bp->b_flags = 0; 1527 bp->b_dev = NODEV; 1528 bp->b_blkno = 0; 1529 bp->b_lblkno = 0; 1530 bp->b_rawblkno = 0; 1531 bp->b_iodone = 0; 1532 bp->b_error = 0; 1533 bp->b_resid = 0; 1534 bp->b_bcount = 0; 1535 1536 LIST_REMOVE(bp, b_hash); 1537 1538 /* Disassociate us from our vnode, if we had one... */ 1539 if (vp != NULL) { 1540 mutex_enter(vp->v_interlock); 1541 brelvp(bp); 1542 mutex_exit(vp->v_interlock); 1543 } 1544 1545 SDT_PROBE1(io, kernel, , getnewbuf__done, bp); 1546 return (bp); 1547 } 1548 1549 /* 1550 * Attempt to free an aged buffer off the queues. 1551 * Called with queue lock held. 1552 * Returns the amount of buffer memory freed. 1553 */ 1554 static int 1555 buf_trim(void) 1556 { 1557 buf_t *bp; 1558 long size; 1559 1560 KASSERT(mutex_owned(&bufcache_lock)); 1561 1562 /* Instruct getnewbuf() to get buffers off the queues */ 1563 if ((bp = getnewbuf(PCATCH, 1, 1)) == NULL) 1564 return 0; 1565 1566 KASSERT((bp->b_cflags & BC_WANTED) == 0); 1567 size = bp->b_bufsize; 1568 bufmem -= size; 1569 if (size > 0) { 1570 buf_mrelease(bp->b_data, size); 1571 bp->b_bcount = bp->b_bufsize = 0; 1572 } 1573 /* brelse() will return the buffer to the global buffer pool */ 1574 brelsel(bp, 0); 1575 return size; 1576 } 1577 1578 int 1579 buf_drain(int n) 1580 { 1581 int size = 0, sz; 1582 1583 KASSERT(mutex_owned(&bufcache_lock)); 1584 1585 while (size < n && bufmem > bufmem_lowater) { 1586 sz = buf_trim(); 1587 if (sz <= 0) 1588 break; 1589 size += sz; 1590 } 1591 1592 return size; 1593 } 1594 1595 /* 1596 * Wait for operations on the buffer to complete. 1597 * When they do, extract and return the I/O's error value. 1598 */ 1599 int 1600 biowait(buf_t *bp) 1601 { 1602 1603 BIOHIST_FUNC(__func__); 1604 1605 KASSERT(ISSET(bp->b_cflags, BC_BUSY)); 1606 KASSERT(bp->b_refcnt > 0); 1607 1608 SDT_PROBE1(io, kernel, , wait__start, bp); 1609 1610 mutex_enter(bp->b_objlock); 1611 1612 BIOHIST_CALLARGS(biohist, "bp=%#jx, oflags=0x%jx, ret_addr=%#jx", 1613 (uintptr_t)bp, bp->b_oflags, 1614 (uintptr_t)__builtin_return_address(0), 0); 1615 1616 while (!ISSET(bp->b_oflags, BO_DONE | BO_DELWRI)) { 1617 BIOHIST_LOG(biohist, "waiting bp=%#jx", (uintptr_t)bp, 0, 0, 0); 1618 cv_wait(&bp->b_done, bp->b_objlock); 1619 } 1620 mutex_exit(bp->b_objlock); 1621 1622 SDT_PROBE1(io, kernel, , wait__done, bp); 1623 1624 BIOHIST_LOG(biohist, "return %jd", bp->b_error, 0, 0, 0); 1625 1626 return bp->b_error; 1627 } 1628 1629 /* 1630 * Mark I/O complete on a buffer. 1631 * 1632 * If a callback has been requested, e.g. the pageout 1633 * daemon, do so. Otherwise, awaken waiting processes. 1634 * 1635 * [ Leffler, et al., says on p.247: 1636 * "This routine wakes up the blocked process, frees the buffer 1637 * for an asynchronous write, or, for a request by the pagedaemon 1638 * process, invokes a procedure specified in the buffer structure" ] 1639 * 1640 * In real life, the pagedaemon (or other system processes) wants 1641 * to do async stuff too, and doesn't want the buffer brelse()'d. 1642 * (for swap pager, that puts swap buffers on the free lists (!!!), 1643 * for the vn device, that puts allocated buffers on the free lists!) 1644 */ 1645 void 1646 biodone(buf_t *bp) 1647 { 1648 int s; 1649 1650 BIOHIST_FUNC(__func__); 1651 1652 KASSERT(!ISSET(bp->b_oflags, BO_DONE)); 1653 1654 if (cpu_intr_p()) { 1655 /* From interrupt mode: defer to a soft interrupt. */ 1656 s = splvm(); 1657 TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_biodone, bp, b_actq); 1658 1659 BIOHIST_CALLARGS(biohist, "bp=%#jx, softint scheduled", 1660 (uintptr_t)bp, 0, 0, 0); 1661 softint_schedule(biodone_sih); 1662 splx(s); 1663 } else { 1664 /* Process now - the buffer may be freed soon. */ 1665 biodone2(bp); 1666 } 1667 } 1668 1669 SDT_PROBE_DEFINE1(io, kernel, , done, "struct buf *"/*bp*/); 1670 1671 static void 1672 biodone2(buf_t *bp) 1673 { 1674 void (*callout)(buf_t *); 1675 1676 SDT_PROBE1(io, kernel, ,done, bp); 1677 1678 BIOHIST_FUNC(__func__); 1679 BIOHIST_CALLARGS(biohist, "bp=%#jx", (uintptr_t)bp, 0, 0, 0); 1680 1681 mutex_enter(bp->b_objlock); 1682 /* Note that the transfer is done. */ 1683 if (ISSET(bp->b_oflags, BO_DONE)) 1684 panic("biodone2 already"); 1685 CLR(bp->b_flags, B_COWDONE); 1686 SET(bp->b_oflags, BO_DONE); 1687 BIO_SETPRIO(bp, BPRIO_DEFAULT); 1688 1689 /* Wake up waiting writers. */ 1690 if (!ISSET(bp->b_flags, B_READ)) 1691 vwakeup(bp); 1692 1693 if ((callout = bp->b_iodone) != NULL) { 1694 BIOHIST_LOG(biohist, "callout %#jx", (uintptr_t)callout, 1695 0, 0, 0); 1696 1697 /* Note callout done, then call out. */ 1698 KASSERT(!cv_has_waiters(&bp->b_done)); 1699 bp->b_iodone = NULL; 1700 mutex_exit(bp->b_objlock); 1701 (*callout)(bp); 1702 } else if (ISSET(bp->b_flags, B_ASYNC)) { 1703 /* If async, release. */ 1704 BIOHIST_LOG(biohist, "async", 0, 0, 0, 0); 1705 KASSERT(!cv_has_waiters(&bp->b_done)); 1706 mutex_exit(bp->b_objlock); 1707 brelse(bp, 0); 1708 } else { 1709 /* Otherwise just wake up waiters in biowait(). */ 1710 BIOHIST_LOG(biohist, "wake-up", 0, 0, 0, 0); 1711 cv_broadcast(&bp->b_done); 1712 mutex_exit(bp->b_objlock); 1713 } 1714 } 1715 1716 static void 1717 biointr(void *cookie) 1718 { 1719 struct cpu_info *ci; 1720 buf_t *bp; 1721 int s; 1722 1723 BIOHIST_FUNC(__func__); BIOHIST_CALLED(biohist); 1724 1725 ci = curcpu(); 1726 1727 s = splvm(); 1728 while (!TAILQ_EMPTY(&ci->ci_data.cpu_biodone)) { 1729 KASSERT(curcpu() == ci); 1730 1731 bp = TAILQ_FIRST(&ci->ci_data.cpu_biodone); 1732 TAILQ_REMOVE(&ci->ci_data.cpu_biodone, bp, b_actq); 1733 splx(s); 1734 1735 BIOHIST_LOG(biohist, "bp=%#jx", (uintptr_t)bp, 0, 0, 0); 1736 biodone2(bp); 1737 1738 s = splvm(); 1739 } 1740 splx(s); 1741 } 1742 1743 /* 1744 * Wait for all buffers to complete I/O 1745 * Return the number of "stuck" buffers. 1746 */ 1747 int 1748 buf_syncwait(void) 1749 { 1750 buf_t *bp; 1751 int iter, nbusy, nbusy_prev = 0, ihash; 1752 1753 BIOHIST_FUNC(__func__); BIOHIST_CALLED(biohist); 1754 1755 for (iter = 0; iter < 20;) { 1756 mutex_enter(&bufcache_lock); 1757 nbusy = 0; 1758 for (ihash = 0; ihash < bufhash+1; ihash++) { 1759 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) { 1760 if ((bp->b_cflags & (BC_BUSY|BC_INVAL)) == BC_BUSY) 1761 nbusy += ((bp->b_flags & B_READ) == 0); 1762 } 1763 } 1764 mutex_exit(&bufcache_lock); 1765 1766 if (nbusy == 0) 1767 break; 1768 if (nbusy_prev == 0) 1769 nbusy_prev = nbusy; 1770 printf("%d ", nbusy); 1771 kpause("bflush", false, MAX(1, hz / 25 * iter), NULL); 1772 if (nbusy >= nbusy_prev) /* we didn't flush anything */ 1773 iter++; 1774 else 1775 nbusy_prev = nbusy; 1776 } 1777 1778 if (nbusy) { 1779 #if defined(DEBUG) || defined(DEBUG_HALT_BUSY) 1780 printf("giving up\nPrinting vnodes for busy buffers\n"); 1781 for (ihash = 0; ihash < bufhash+1; ihash++) { 1782 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) { 1783 if ((bp->b_cflags & (BC_BUSY|BC_INVAL)) == BC_BUSY && 1784 (bp->b_flags & B_READ) == 0) 1785 vprint(NULL, bp->b_vp); 1786 } 1787 } 1788 #endif 1789 } 1790 1791 return nbusy; 1792 } 1793 1794 static void 1795 sysctl_fillbuf(const buf_t *i, struct buf_sysctl *o) 1796 { 1797 const bool allowaddr = get_expose_address(curproc); 1798 1799 memset(o, 0, sizeof(*o)); 1800 1801 o->b_flags = i->b_flags | i->b_cflags | i->b_oflags; 1802 o->b_error = i->b_error; 1803 o->b_prio = i->b_prio; 1804 o->b_dev = i->b_dev; 1805 o->b_bufsize = i->b_bufsize; 1806 o->b_bcount = i->b_bcount; 1807 o->b_resid = i->b_resid; 1808 COND_SET_VALUE(o->b_addr, PTRTOUINT64(i->b_data), allowaddr); 1809 o->b_blkno = i->b_blkno; 1810 o->b_rawblkno = i->b_rawblkno; 1811 COND_SET_VALUE(o->b_iodone, PTRTOUINT64(i->b_iodone), allowaddr); 1812 COND_SET_VALUE(o->b_proc, PTRTOUINT64(i->b_proc), allowaddr); 1813 COND_SET_VALUE(o->b_vp, PTRTOUINT64(i->b_vp), allowaddr); 1814 COND_SET_VALUE(o->b_saveaddr, PTRTOUINT64(i->b_saveaddr), allowaddr); 1815 o->b_lblkno = i->b_lblkno; 1816 } 1817 1818 #define KERN_BUFSLOP 20 1819 static int 1820 sysctl_dobuf(SYSCTLFN_ARGS) 1821 { 1822 buf_t *bp; 1823 struct buf_sysctl bs; 1824 struct bqueue *bq; 1825 char *dp; 1826 u_int i, op, arg; 1827 size_t len, needed, elem_size, out_size; 1828 int error, elem_count, retries; 1829 1830 if (namelen == 1 && name[0] == CTL_QUERY) 1831 return (sysctl_query(SYSCTLFN_CALL(rnode))); 1832 1833 if (namelen != 4) 1834 return (EINVAL); 1835 1836 retries = 100; 1837 retry: 1838 dp = oldp; 1839 len = (oldp != NULL) ? *oldlenp : 0; 1840 op = name[0]; 1841 arg = name[1]; 1842 elem_size = name[2]; 1843 elem_count = name[3]; 1844 out_size = MIN(sizeof(bs), elem_size); 1845 1846 /* 1847 * at the moment, these are just "placeholders" to make the 1848 * API for retrieving kern.buf data more extensible in the 1849 * future. 1850 * 1851 * XXX kern.buf currently has "netbsd32" issues. hopefully 1852 * these will be resolved at a later point. 1853 */ 1854 if (op != KERN_BUF_ALL || arg != KERN_BUF_ALL || 1855 elem_size < 1 || elem_count < 0) 1856 return (EINVAL); 1857 1858 error = 0; 1859 needed = 0; 1860 sysctl_unlock(); 1861 mutex_enter(&bufcache_lock); 1862 for (i = 0; i < BQUEUES; i++) { 1863 bq = &bufqueues[i]; 1864 TAILQ_FOREACH(bp, &bq->bq_queue, b_freelist) { 1865 bq->bq_marker = bp; 1866 if (len >= elem_size && elem_count > 0) { 1867 sysctl_fillbuf(bp, &bs); 1868 mutex_exit(&bufcache_lock); 1869 error = copyout(&bs, dp, out_size); 1870 mutex_enter(&bufcache_lock); 1871 if (error) 1872 break; 1873 if (bq->bq_marker != bp) { 1874 /* 1875 * This sysctl node is only for 1876 * statistics. Retry; if the 1877 * queue keeps changing, then 1878 * bail out. 1879 */ 1880 if (retries-- == 0) { 1881 error = EAGAIN; 1882 break; 1883 } 1884 mutex_exit(&bufcache_lock); 1885 sysctl_relock(); 1886 goto retry; 1887 } 1888 dp += elem_size; 1889 len -= elem_size; 1890 } 1891 needed += elem_size; 1892 if (elem_count > 0 && elem_count != INT_MAX) 1893 elem_count--; 1894 } 1895 if (error != 0) 1896 break; 1897 } 1898 mutex_exit(&bufcache_lock); 1899 sysctl_relock(); 1900 1901 *oldlenp = needed; 1902 if (oldp == NULL) 1903 *oldlenp += KERN_BUFSLOP * sizeof(buf_t); 1904 1905 return (error); 1906 } 1907 1908 static int 1909 sysctl_bufvm_update(SYSCTLFN_ARGS) 1910 { 1911 int error, rv; 1912 struct sysctlnode node; 1913 unsigned int temp_bufcache; 1914 unsigned long temp_water; 1915 1916 /* Take a copy of the supplied node and its data */ 1917 node = *rnode; 1918 if (node.sysctl_data == &bufcache) { 1919 node.sysctl_data = &temp_bufcache; 1920 temp_bufcache = *(unsigned int *)rnode->sysctl_data; 1921 } else { 1922 node.sysctl_data = &temp_water; 1923 temp_water = *(unsigned long *)rnode->sysctl_data; 1924 } 1925 1926 /* Update the copy */ 1927 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1928 if (error || newp == NULL) 1929 return (error); 1930 1931 if (rnode->sysctl_data == &bufcache) { 1932 if (temp_bufcache > 100) 1933 return (EINVAL); 1934 bufcache = temp_bufcache; 1935 buf_setwm(); 1936 } else if (rnode->sysctl_data == &bufmem_lowater) { 1937 if (bufmem_hiwater - temp_water < 16) 1938 return (EINVAL); 1939 bufmem_lowater = temp_water; 1940 } else if (rnode->sysctl_data == &bufmem_hiwater) { 1941 if (temp_water - bufmem_lowater < 16) 1942 return (EINVAL); 1943 bufmem_hiwater = temp_water; 1944 } else 1945 return (EINVAL); 1946 1947 /* Drain until below new high water mark */ 1948 sysctl_unlock(); 1949 mutex_enter(&bufcache_lock); 1950 while (bufmem > bufmem_hiwater) { 1951 rv = buf_drain((bufmem - bufmem_hiwater) / (2 * 1024)); 1952 if (rv <= 0) 1953 break; 1954 } 1955 mutex_exit(&bufcache_lock); 1956 sysctl_relock(); 1957 1958 return 0; 1959 } 1960 1961 static struct sysctllog *vfsbio_sysctllog; 1962 1963 static void 1964 sysctl_kern_buf_setup(void) 1965 { 1966 1967 sysctl_createv(&vfsbio_sysctllog, 0, NULL, NULL, 1968 CTLFLAG_PERMANENT, 1969 CTLTYPE_NODE, "buf", 1970 SYSCTL_DESCR("Kernel buffer cache information"), 1971 sysctl_dobuf, 0, NULL, 0, 1972 CTL_KERN, KERN_BUF, CTL_EOL); 1973 } 1974 1975 static void 1976 sysctl_vm_buf_setup(void) 1977 { 1978 1979 sysctl_createv(&vfsbio_sysctllog, 0, NULL, NULL, 1980 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1981 CTLTYPE_INT, "bufcache", 1982 SYSCTL_DESCR("Percentage of physical memory to use for " 1983 "buffer cache"), 1984 sysctl_bufvm_update, 0, &bufcache, 0, 1985 CTL_VM, CTL_CREATE, CTL_EOL); 1986 sysctl_createv(&vfsbio_sysctllog, 0, NULL, NULL, 1987 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 1988 CTLTYPE_LONG, "bufmem", 1989 SYSCTL_DESCR("Amount of kernel memory used by buffer " 1990 "cache"), 1991 NULL, 0, &bufmem, 0, 1992 CTL_VM, CTL_CREATE, CTL_EOL); 1993 sysctl_createv(&vfsbio_sysctllog, 0, NULL, NULL, 1994 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1995 CTLTYPE_LONG, "bufmem_lowater", 1996 SYSCTL_DESCR("Minimum amount of kernel memory to " 1997 "reserve for buffer cache"), 1998 sysctl_bufvm_update, 0, &bufmem_lowater, 0, 1999 CTL_VM, CTL_CREATE, CTL_EOL); 2000 sysctl_createv(&vfsbio_sysctllog, 0, NULL, NULL, 2001 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2002 CTLTYPE_LONG, "bufmem_hiwater", 2003 SYSCTL_DESCR("Maximum amount of kernel memory to use " 2004 "for buffer cache"), 2005 sysctl_bufvm_update, 0, &bufmem_hiwater, 0, 2006 CTL_VM, CTL_CREATE, CTL_EOL); 2007 } 2008 2009 #ifdef DEBUG 2010 /* 2011 * Print out statistics on the current allocation of the buffer pool. 2012 * Can be enabled to print out on every ``sync'' by setting "syncprt" 2013 * in vfs_syscalls.c using sysctl. 2014 */ 2015 void 2016 vfs_bufstats(void) 2017 { 2018 int i, j, count; 2019 buf_t *bp; 2020 struct bqueue *dp; 2021 int counts[MAXBSIZE / MIN_PAGE_SIZE + 1]; 2022 static const char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE" }; 2023 2024 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) { 2025 count = 0; 2026 memset(counts, 0, sizeof(counts)); 2027 TAILQ_FOREACH(bp, &dp->bq_queue, b_freelist) { 2028 counts[bp->b_bufsize / PAGE_SIZE]++; 2029 count++; 2030 } 2031 printf("%s: total-%d", bname[i], count); 2032 for (j = 0; j <= MAXBSIZE / PAGE_SIZE; j++) 2033 if (counts[j] != 0) 2034 printf(", %d-%d", j * PAGE_SIZE, counts[j]); 2035 printf("\n"); 2036 } 2037 } 2038 #endif /* DEBUG */ 2039 2040 /* ------------------------------ */ 2041 2042 buf_t * 2043 getiobuf(struct vnode *vp, bool waitok) 2044 { 2045 buf_t *bp; 2046 2047 bp = pool_cache_get(bufio_cache, (waitok ? PR_WAITOK : PR_NOWAIT)); 2048 if (bp == NULL) 2049 return bp; 2050 2051 buf_init(bp); 2052 2053 if ((bp->b_vp = vp) != NULL) { 2054 bp->b_objlock = vp->v_interlock; 2055 } else { 2056 KASSERT(bp->b_objlock == &buffer_lock); 2057 } 2058 2059 return bp; 2060 } 2061 2062 void 2063 putiobuf(buf_t *bp) 2064 { 2065 2066 buf_destroy(bp); 2067 pool_cache_put(bufio_cache, bp); 2068 } 2069 2070 /* 2071 * nestiobuf_iodone: b_iodone callback for nested buffers. 2072 */ 2073 2074 void 2075 nestiobuf_iodone(buf_t *bp) 2076 { 2077 buf_t *mbp = bp->b_private; 2078 int error; 2079 int donebytes; 2080 2081 KASSERT(bp->b_bcount <= bp->b_bufsize); 2082 KASSERT(mbp != bp); 2083 2084 error = bp->b_error; 2085 if (bp->b_error == 0 && 2086 (bp->b_bcount < bp->b_bufsize || bp->b_resid > 0)) { 2087 /* 2088 * Not all got transferred, raise an error. We have no way to 2089 * propagate these conditions to mbp. 2090 */ 2091 error = EIO; 2092 } 2093 2094 donebytes = bp->b_bufsize; 2095 2096 putiobuf(bp); 2097 nestiobuf_done(mbp, donebytes, error); 2098 } 2099 2100 /* 2101 * nestiobuf_setup: setup a "nested" buffer. 2102 * 2103 * => 'mbp' is a "master" buffer which is being divided into sub pieces. 2104 * => 'bp' should be a buffer allocated by getiobuf. 2105 * => 'offset' is a byte offset in the master buffer. 2106 * => 'size' is a size in bytes of this nested buffer. 2107 */ 2108 2109 void 2110 nestiobuf_setup(buf_t *mbp, buf_t *bp, int offset, size_t size) 2111 { 2112 const int b_pass = mbp->b_flags & (B_READ|B_MEDIA_FLAGS); 2113 struct vnode *vp = mbp->b_vp; 2114 2115 KASSERT(mbp->b_bcount >= offset + size); 2116 bp->b_vp = vp; 2117 bp->b_dev = mbp->b_dev; 2118 bp->b_objlock = mbp->b_objlock; 2119 bp->b_cflags = BC_BUSY; 2120 bp->b_flags = B_ASYNC | b_pass; 2121 bp->b_iodone = nestiobuf_iodone; 2122 bp->b_data = (char *)mbp->b_data + offset; 2123 bp->b_resid = bp->b_bcount = size; 2124 bp->b_bufsize = bp->b_bcount; 2125 bp->b_private = mbp; 2126 BIO_COPYPRIO(bp, mbp); 2127 if (BUF_ISWRITE(bp) && vp != NULL) { 2128 mutex_enter(vp->v_interlock); 2129 vp->v_numoutput++; 2130 mutex_exit(vp->v_interlock); 2131 } 2132 } 2133 2134 /* 2135 * nestiobuf_done: propagate completion to the master buffer. 2136 * 2137 * => 'donebytes' specifies how many bytes in the 'mbp' is completed. 2138 * => 'error' is an errno(2) that 'donebytes' has been completed with. 2139 */ 2140 2141 void 2142 nestiobuf_done(buf_t *mbp, int donebytes, int error) 2143 { 2144 2145 if (donebytes == 0) { 2146 return; 2147 } 2148 mutex_enter(mbp->b_objlock); 2149 KASSERT(mbp->b_resid >= donebytes); 2150 mbp->b_resid -= donebytes; 2151 if (error) 2152 mbp->b_error = error; 2153 if (mbp->b_resid == 0) { 2154 if (mbp->b_error) 2155 mbp->b_resid = mbp->b_bcount; 2156 mutex_exit(mbp->b_objlock); 2157 biodone(mbp); 2158 } else 2159 mutex_exit(mbp->b_objlock); 2160 } 2161 2162 void 2163 buf_init(buf_t *bp) 2164 { 2165 2166 cv_init(&bp->b_busy, "biolock"); 2167 cv_init(&bp->b_done, "biowait"); 2168 bp->b_dev = NODEV; 2169 bp->b_error = 0; 2170 bp->b_flags = 0; 2171 bp->b_cflags = 0; 2172 bp->b_oflags = 0; 2173 bp->b_objlock = &buffer_lock; 2174 bp->b_iodone = NULL; 2175 bp->b_refcnt = 1; 2176 bp->b_dev = NODEV; 2177 bp->b_vnbufs.le_next = NOLIST; 2178 BIO_SETPRIO(bp, BPRIO_DEFAULT); 2179 } 2180 2181 void 2182 buf_destroy(buf_t *bp) 2183 { 2184 2185 cv_destroy(&bp->b_done); 2186 cv_destroy(&bp->b_busy); 2187 } 2188 2189 int 2190 bbusy(buf_t *bp, bool intr, int timo, kmutex_t *interlock) 2191 { 2192 int error; 2193 2194 KASSERT(mutex_owned(&bufcache_lock)); 2195 2196 SDT_PROBE4(io, kernel, , bbusy__start, bp, intr, timo, interlock); 2197 2198 if ((bp->b_cflags & BC_BUSY) != 0) { 2199 if (curlwp == uvm.pagedaemon_lwp) { 2200 error = EDEADLK; 2201 goto out; 2202 } 2203 bp->b_cflags |= BC_WANTED; 2204 bref(bp); 2205 if (interlock != NULL) 2206 mutex_exit(interlock); 2207 if (intr) { 2208 error = cv_timedwait_sig(&bp->b_busy, &bufcache_lock, 2209 timo); 2210 } else { 2211 error = cv_timedwait(&bp->b_busy, &bufcache_lock, 2212 timo); 2213 } 2214 brele(bp); 2215 if (interlock != NULL) 2216 mutex_enter(interlock); 2217 if (error != 0) 2218 goto out; 2219 error = EPASSTHROUGH; 2220 goto out; 2221 } 2222 bp->b_cflags |= BC_BUSY; 2223 error = 0; 2224 2225 out: SDT_PROBE5(io, kernel, , bbusy__done, 2226 bp, intr, timo, interlock, error); 2227 return error; 2228 } 2229 2230 /* 2231 * Nothing outside this file should really need to know about nbuf, 2232 * but a few things still want to read it, so give them a way to do that. 2233 */ 2234 u_int 2235 buf_nbuf(void) 2236 { 2237 2238 return nbuf; 2239 } 2240