1 /* 2 * Copyright (c) 1994,1997 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Absolutely no warranty of function or purpose is made by the author 12 * John S. Dyson. 13 * 14 * $FreeBSD: src/sys/kern/vfs_bio.c,v 1.242.2.20 2003/05/28 18:38:10 alc Exp $ 15 */ 16 17 /* 18 * this file contains a new buffer I/O scheme implementing a coherent 19 * VM object and buffer cache scheme. Pains have been taken to make 20 * sure that the performance degradation associated with schemes such 21 * as this is not realized. 22 * 23 * Author: John S. Dyson 24 * Significant help during the development and debugging phases 25 * had been provided by David Greenman, also of the FreeBSD core team. 26 * 27 * see man buf(9) for more info. Note that man buf(9) doesn't reflect 28 * the actual buf/bio implementation in DragonFly. 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/buf.h> 34 #include <sys/conf.h> 35 #include <sys/devicestat.h> 36 #include <sys/eventhandler.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/mount.h> 40 #include <sys/kernel.h> 41 #include <sys/kthread.h> 42 #include <sys/proc.h> 43 #include <sys/reboot.h> 44 #include <sys/resourcevar.h> 45 #include <sys/sysctl.h> 46 #include <sys/vmmeter.h> 47 #include <sys/vnode.h> 48 #include <sys/dsched.h> 49 #include <vm/vm.h> 50 #include <vm/vm_param.h> 51 #include <vm/vm_kern.h> 52 #include <vm/vm_pageout.h> 53 #include <vm/vm_page.h> 54 #include <vm/vm_object.h> 55 #include <vm/vm_extern.h> 56 #include <vm/vm_map.h> 57 #include <vm/vm_pager.h> 58 #include <vm/swap_pager.h> 59 60 #include <sys/buf2.h> 61 #include <sys/thread2.h> 62 #include <sys/spinlock2.h> 63 #include <vm/vm_page2.h> 64 65 #include "opt_ddb.h" 66 #ifdef DDB 67 #include <ddb/ddb.h> 68 #endif 69 70 /* 71 * Buffer queues. 72 */ 73 enum bufq_type { 74 BQUEUE_NONE, /* not on any queue */ 75 BQUEUE_LOCKED, /* locked buffers */ 76 BQUEUE_CLEAN, /* non-B_DELWRI buffers */ 77 BQUEUE_DIRTY, /* B_DELWRI buffers */ 78 BQUEUE_DIRTY_HW, /* B_DELWRI buffers - heavy weight */ 79 BQUEUE_EMPTY, /* empty buffer headers */ 80 81 BUFFER_QUEUES /* number of buffer queues */ 82 }; 83 84 typedef enum bufq_type bufq_type_t; 85 86 #define BD_WAKE_SIZE 16384 87 #define BD_WAKE_MASK (BD_WAKE_SIZE - 1) 88 89 TAILQ_HEAD(bqueues, buf); 90 91 struct bufpcpu { 92 struct spinlock spin; 93 struct bqueues bufqueues[BUFFER_QUEUES]; 94 } __cachealign; 95 96 struct bufpcpu bufpcpu[MAXCPU]; 97 98 static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); 99 100 struct buf *buf; /* buffer header pool */ 101 102 static void vfs_clean_pages(struct buf *bp); 103 static void vfs_clean_one_page(struct buf *bp, int pageno, vm_page_t m); 104 #if 0 105 static void vfs_dirty_one_page(struct buf *bp, int pageno, vm_page_t m); 106 #endif 107 static void vfs_vmio_release(struct buf *bp); 108 static int flushbufqueues(struct buf *marker, bufq_type_t q); 109 static vm_page_t bio_page_alloc(struct buf *bp, vm_object_t obj, 110 vm_pindex_t pg, int deficit); 111 112 static void bd_signal(long totalspace); 113 static void buf_daemon(void); 114 static void buf_daemon_hw(void); 115 116 /* 117 * bogus page -- for I/O to/from partially complete buffers 118 * this is a temporary solution to the problem, but it is not 119 * really that bad. it would be better to split the buffer 120 * for input in the case of buffers partially already in memory, 121 * but the code is intricate enough already. 122 */ 123 vm_page_t bogus_page; 124 125 /* 126 * These are all static, but make the ones we export globals so we do 127 * not need to use compiler magic. 128 */ 129 long bufspace; /* atomic ops */ 130 long maxbufspace; 131 static long bufmallocspace; /* atomic ops */ 132 long maxbufmallocspace, lobufspace, hibufspace; 133 static long lorunningspace; 134 static long hirunningspace; 135 static long dirtykvaspace; /* atomic */ 136 long dirtybufspace; /* atomic (global for systat) */ 137 static long dirtybufcount; /* atomic */ 138 static long dirtybufspacehw; /* atomic */ 139 static long dirtybufcounthw; /* atomic */ 140 static long runningbufspace; /* atomic */ 141 static long runningbufcount; /* atomic */ 142 long lodirtybufspace; 143 long hidirtybufspace; 144 static int getnewbufcalls; 145 static int recoverbufcalls; 146 static int needsbuffer; /* atomic */ 147 static int runningbufreq; /* atomic */ 148 static int bd_request; /* atomic */ 149 static int bd_request_hw; /* atomic */ 150 static u_int bd_wake_ary[BD_WAKE_SIZE]; 151 static u_int bd_wake_index; 152 static u_int vm_cycle_point = 40; /* 23-36 will migrate more act->inact */ 153 static int debug_commit; 154 static int debug_bufbio; 155 static long bufcache_bw = 200 * 1024 * 1024; 156 157 static struct thread *bufdaemon_td; 158 static struct thread *bufdaemonhw_td; 159 static u_int lowmempgallocs; 160 static u_int lowmempgfails; 161 static u_int flushperqueue = 1024; 162 163 /* 164 * Sysctls for operational control of the buffer cache. 165 */ 166 SYSCTL_UINT(_vfs, OID_AUTO, flushperqueue, CTLFLAG_RW, &flushperqueue, 0, 167 "Number of buffers to flush from each per-cpu queue"); 168 SYSCTL_LONG(_vfs, OID_AUTO, lodirtybufspace, CTLFLAG_RW, &lodirtybufspace, 0, 169 "Number of dirty buffers to flush before bufdaemon becomes inactive"); 170 SYSCTL_LONG(_vfs, OID_AUTO, hidirtybufspace, CTLFLAG_RW, &hidirtybufspace, 0, 171 "High watermark used to trigger explicit flushing of dirty buffers"); 172 SYSCTL_LONG(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0, 173 "Minimum amount of buffer space required for active I/O"); 174 SYSCTL_LONG(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0, 175 "Maximum amount of buffer space to usable for active I/O"); 176 SYSCTL_LONG(_vfs, OID_AUTO, bufcache_bw, CTLFLAG_RW, &bufcache_bw, 0, 177 "Buffer-cache -> VM page cache transfer bandwidth"); 178 SYSCTL_UINT(_vfs, OID_AUTO, lowmempgallocs, CTLFLAG_RW, &lowmempgallocs, 0, 179 "Page allocations done during periods of very low free memory"); 180 SYSCTL_UINT(_vfs, OID_AUTO, lowmempgfails, CTLFLAG_RW, &lowmempgfails, 0, 181 "Page allocations which failed during periods of very low free memory"); 182 SYSCTL_UINT(_vfs, OID_AUTO, vm_cycle_point, CTLFLAG_RW, &vm_cycle_point, 0, 183 "Recycle pages to active or inactive queue transition pt 0-64"); 184 /* 185 * Sysctls determining current state of the buffer cache. 186 */ 187 SYSCTL_LONG(_vfs, OID_AUTO, nbuf, CTLFLAG_RD, &nbuf, 0, 188 "Total number of buffers in buffer cache"); 189 SYSCTL_LONG(_vfs, OID_AUTO, dirtykvaspace, CTLFLAG_RD, &dirtykvaspace, 0, 190 "KVA reserved by dirty buffers (all)"); 191 SYSCTL_LONG(_vfs, OID_AUTO, dirtybufspace, CTLFLAG_RD, &dirtybufspace, 0, 192 "Pending bytes of dirty buffers (all)"); 193 SYSCTL_LONG(_vfs, OID_AUTO, dirtybufspacehw, CTLFLAG_RD, &dirtybufspacehw, 0, 194 "Pending bytes of dirty buffers (heavy weight)"); 195 SYSCTL_LONG(_vfs, OID_AUTO, dirtybufcount, CTLFLAG_RD, &dirtybufcount, 0, 196 "Pending number of dirty buffers"); 197 SYSCTL_LONG(_vfs, OID_AUTO, dirtybufcounthw, CTLFLAG_RD, &dirtybufcounthw, 0, 198 "Pending number of dirty buffers (heavy weight)"); 199 SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0, 200 "I/O bytes currently in progress due to asynchronous writes"); 201 SYSCTL_LONG(_vfs, OID_AUTO, runningbufcount, CTLFLAG_RD, &runningbufcount, 0, 202 "I/O buffers currently in progress due to asynchronous writes"); 203 SYSCTL_LONG(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0, 204 "Hard limit on maximum amount of memory usable for buffer space"); 205 SYSCTL_LONG(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0, 206 "Soft limit on maximum amount of memory usable for buffer space"); 207 SYSCTL_LONG(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0, 208 "Minimum amount of memory to reserve for system buffer space"); 209 SYSCTL_LONG(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0, 210 "Amount of memory available for buffers"); 211 SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RD, &maxbufmallocspace, 212 0, "Maximum amount of memory reserved for buffers using malloc"); 213 SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0, 214 "Amount of memory left for buffers using malloc-scheme"); 215 SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RD, &getnewbufcalls, 0, 216 "New buffer header acquisition requests"); 217 SYSCTL_INT(_vfs, OID_AUTO, recoverbufcalls, CTLFLAG_RD, &recoverbufcalls, 0, 218 "Recover VM space in an emergency"); 219 SYSCTL_INT(_vfs, OID_AUTO, debug_commit, CTLFLAG_RW, &debug_commit, 0, ""); 220 SYSCTL_INT(_vfs, OID_AUTO, debug_bufbio, CTLFLAG_RW, &debug_bufbio, 0, ""); 221 SYSCTL_INT(_debug_sizeof, OID_AUTO, buf, CTLFLAG_RD, 0, sizeof(struct buf), 222 "sizeof(struct buf)"); 223 224 char *buf_wmesg = BUF_WMESG; 225 226 #define VFS_BIO_NEED_ANY 0x01 /* any freeable buffer */ 227 #define VFS_BIO_NEED_UNUSED02 0x02 228 #define VFS_BIO_NEED_UNUSED04 0x04 229 #define VFS_BIO_NEED_BUFSPACE 0x08 /* wait for buf space, lo hysteresis */ 230 231 /* 232 * Called when buffer space is potentially available for recovery. 233 * getnewbuf() will block on this flag when it is unable to free 234 * sufficient buffer space. Buffer space becomes recoverable when 235 * bp's get placed back in the queues. 236 */ 237 static __inline void 238 bufspacewakeup(void) 239 { 240 /* 241 * If someone is waiting for BUF space, wake them up. Even 242 * though we haven't freed the kva space yet, the waiting 243 * process will be able to now. 244 */ 245 for (;;) { 246 int flags = needsbuffer; 247 cpu_ccfence(); 248 if ((flags & VFS_BIO_NEED_BUFSPACE) == 0) 249 break; 250 if (atomic_cmpset_int(&needsbuffer, flags, 251 flags & ~VFS_BIO_NEED_BUFSPACE)) { 252 wakeup(&needsbuffer); 253 break; 254 } 255 /* retry */ 256 } 257 } 258 259 /* 260 * runningbufwakeup: 261 * 262 * Accounting for I/O in progress. 263 * 264 */ 265 static __inline void 266 runningbufwakeup(struct buf *bp) 267 { 268 long totalspace; 269 long flags; 270 271 if ((totalspace = bp->b_runningbufspace) != 0) { 272 atomic_add_long(&runningbufspace, -totalspace); 273 atomic_add_long(&runningbufcount, -1); 274 bp->b_runningbufspace = 0; 275 276 /* 277 * see waitrunningbufspace() for limit test. 278 */ 279 for (;;) { 280 flags = runningbufreq; 281 cpu_ccfence(); 282 if (flags == 0) 283 break; 284 if (atomic_cmpset_int(&runningbufreq, flags, 0)) { 285 wakeup(&runningbufreq); 286 break; 287 } 288 /* retry */ 289 } 290 bd_signal(totalspace); 291 } 292 } 293 294 /* 295 * bufcountwakeup: 296 * 297 * Called when a buffer has been added to one of the free queues to 298 * account for the buffer and to wakeup anyone waiting for free buffers. 299 * This typically occurs when large amounts of metadata are being handled 300 * by the buffer cache ( else buffer space runs out first, usually ). 301 */ 302 static __inline void 303 bufcountwakeup(void) 304 { 305 long flags; 306 307 for (;;) { 308 flags = needsbuffer; 309 if (flags == 0) 310 break; 311 if (atomic_cmpset_int(&needsbuffer, flags, 312 (flags & ~VFS_BIO_NEED_ANY))) { 313 wakeup(&needsbuffer); 314 break; 315 } 316 /* retry */ 317 } 318 } 319 320 /* 321 * waitrunningbufspace() 322 * 323 * If runningbufspace exceeds 4/6 hirunningspace we block until 324 * runningbufspace drops to 3/6 hirunningspace. We also block if another 325 * thread blocked here in order to be fair, even if runningbufspace 326 * is now lower than the limit. 327 * 328 * The caller may be using this function to block in a tight loop, we 329 * must block while runningbufspace is greater than at least 330 * hirunningspace * 3 / 6. 331 */ 332 void 333 waitrunningbufspace(void) 334 { 335 long limit = hirunningspace * 4 / 6; 336 long flags; 337 338 while (runningbufspace > limit || runningbufreq) { 339 tsleep_interlock(&runningbufreq, 0); 340 flags = atomic_fetchadd_int(&runningbufreq, 1); 341 if (runningbufspace > limit || flags) 342 tsleep(&runningbufreq, PINTERLOCKED, "wdrn1", hz); 343 } 344 } 345 346 /* 347 * buf_dirty_count_severe: 348 * 349 * Return true if we have too many dirty buffers. 350 */ 351 int 352 buf_dirty_count_severe(void) 353 { 354 return (runningbufspace + dirtykvaspace >= hidirtybufspace || 355 dirtybufcount >= nbuf / 2); 356 } 357 358 /* 359 * Return true if the amount of running I/O is severe and BIOQ should 360 * start bursting. 361 */ 362 int 363 buf_runningbufspace_severe(void) 364 { 365 return (runningbufspace >= hirunningspace * 4 / 6); 366 } 367 368 /* 369 * vfs_buf_test_cache: 370 * 371 * Called when a buffer is extended. This function clears the B_CACHE 372 * bit if the newly extended portion of the buffer does not contain 373 * valid data. 374 * 375 * NOTE! Dirty VM pages are not processed into dirty (B_DELWRI) buffer 376 * cache buffers. The VM pages remain dirty, as someone had mmap()'d 377 * them while a clean buffer was present. 378 */ 379 static __inline__ 380 void 381 vfs_buf_test_cache(struct buf *bp, 382 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 383 vm_page_t m) 384 { 385 if (bp->b_flags & B_CACHE) { 386 int base = (foff + off) & PAGE_MASK; 387 if (vm_page_is_valid(m, base, size) == 0) 388 bp->b_flags &= ~B_CACHE; 389 } 390 } 391 392 /* 393 * bd_speedup() 394 * 395 * Spank the buf_daemon[_hw] if the total dirty buffer space exceeds the 396 * low water mark. 397 */ 398 static __inline__ 399 void 400 bd_speedup(void) 401 { 402 if (dirtykvaspace < lodirtybufspace && dirtybufcount < nbuf / 2) 403 return; 404 405 if (bd_request == 0 && 406 (dirtykvaspace > lodirtybufspace / 2 || 407 dirtybufcount - dirtybufcounthw >= nbuf / 2)) { 408 if (atomic_fetchadd_int(&bd_request, 1) == 0) 409 wakeup(&bd_request); 410 } 411 if (bd_request_hw == 0 && 412 (dirtykvaspace > lodirtybufspace / 2 || 413 dirtybufcounthw >= nbuf / 2)) { 414 if (atomic_fetchadd_int(&bd_request_hw, 1) == 0) 415 wakeup(&bd_request_hw); 416 } 417 } 418 419 /* 420 * bd_heatup() 421 * 422 * Get the buf_daemon heated up when the number of running and dirty 423 * buffers exceeds the mid-point. 424 * 425 * Return the total number of dirty bytes past the second mid point 426 * as a measure of how much excess dirty data there is in the system. 427 */ 428 long 429 bd_heatup(void) 430 { 431 long mid1; 432 long mid2; 433 long totalspace; 434 435 mid1 = lodirtybufspace + (hidirtybufspace - lodirtybufspace) / 2; 436 437 totalspace = runningbufspace + dirtykvaspace; 438 if (totalspace >= mid1 || dirtybufcount >= nbuf / 2) { 439 bd_speedup(); 440 mid2 = mid1 + (hidirtybufspace - mid1) / 2; 441 if (totalspace >= mid2) 442 return(totalspace - mid2); 443 } 444 return(0); 445 } 446 447 /* 448 * bd_wait() 449 * 450 * Wait for the buffer cache to flush (totalspace) bytes worth of 451 * buffers, then return. 452 * 453 * Regardless this function blocks while the number of dirty buffers 454 * exceeds hidirtybufspace. 455 */ 456 void 457 bd_wait(long totalspace) 458 { 459 u_int i; 460 u_int j; 461 u_int mi; 462 int count; 463 464 if (curthread == bufdaemonhw_td || curthread == bufdaemon_td) 465 return; 466 467 while (totalspace > 0) { 468 bd_heatup(); 469 470 /* 471 * Order is important. Suppliers adjust bd_wake_index after 472 * updating runningbufspace/dirtykvaspace. We want to fetch 473 * bd_wake_index before accessing. Any error should thus 474 * be in our favor. 475 */ 476 i = atomic_fetchadd_int(&bd_wake_index, 0); 477 if (totalspace > runningbufspace + dirtykvaspace) 478 totalspace = runningbufspace + dirtykvaspace; 479 count = totalspace / MAXBSIZE; 480 if (count >= BD_WAKE_SIZE / 2) 481 count = BD_WAKE_SIZE / 2; 482 i = i + count; 483 mi = i & BD_WAKE_MASK; 484 485 /* 486 * This is not a strict interlock, so we play a bit loose 487 * with locking access to dirtybufspace*. We have to re-check 488 * bd_wake_index to ensure that it hasn't passed us. 489 */ 490 tsleep_interlock(&bd_wake_ary[mi], 0); 491 atomic_add_int(&bd_wake_ary[mi], 1); 492 j = atomic_fetchadd_int(&bd_wake_index, 0); 493 if ((int)(i - j) >= 0) 494 tsleep(&bd_wake_ary[mi], PINTERLOCKED, "flstik", hz); 495 496 totalspace = runningbufspace + dirtykvaspace - hidirtybufspace; 497 } 498 } 499 500 /* 501 * bd_signal() 502 * 503 * This function is called whenever runningbufspace or dirtykvaspace 504 * is reduced. Track threads waiting for run+dirty buffer I/O 505 * complete. 506 */ 507 static void 508 bd_signal(long totalspace) 509 { 510 u_int i; 511 512 if (totalspace > 0) { 513 if (totalspace > MAXBSIZE * BD_WAKE_SIZE) 514 totalspace = MAXBSIZE * BD_WAKE_SIZE; 515 while (totalspace > 0) { 516 i = atomic_fetchadd_int(&bd_wake_index, 1); 517 i &= BD_WAKE_MASK; 518 if (atomic_readandclear_int(&bd_wake_ary[i])) 519 wakeup(&bd_wake_ary[i]); 520 totalspace -= MAXBSIZE; 521 } 522 } 523 } 524 525 /* 526 * BIO tracking support routines. 527 * 528 * Release a ref on a bio_track. Wakeup requests are atomically released 529 * along with the last reference so bk_active will never wind up set to 530 * only 0x80000000. 531 */ 532 static 533 void 534 bio_track_rel(struct bio_track *track) 535 { 536 int active; 537 int desired; 538 539 /* 540 * Shortcut 541 */ 542 active = track->bk_active; 543 if (active == 1 && atomic_cmpset_int(&track->bk_active, 1, 0)) 544 return; 545 546 /* 547 * Full-on. Note that the wait flag is only atomically released on 548 * the 1->0 count transition. 549 * 550 * We check for a negative count transition using bit 30 since bit 31 551 * has a different meaning. 552 */ 553 for (;;) { 554 desired = (active & 0x7FFFFFFF) - 1; 555 if (desired) 556 desired |= active & 0x80000000; 557 if (atomic_cmpset_int(&track->bk_active, active, desired)) { 558 if (desired & 0x40000000) 559 panic("bio_track_rel: bad count: %p", track); 560 if (active & 0x80000000) 561 wakeup(track); 562 break; 563 } 564 active = track->bk_active; 565 } 566 } 567 568 /* 569 * Wait for the tracking count to reach 0. 570 * 571 * Use atomic ops such that the wait flag is only set atomically when 572 * bk_active is non-zero. 573 */ 574 int 575 bio_track_wait(struct bio_track *track, int slp_flags, int slp_timo) 576 { 577 int active; 578 int desired; 579 int error; 580 581 /* 582 * Shortcut 583 */ 584 if (track->bk_active == 0) 585 return(0); 586 587 /* 588 * Full-on. Note that the wait flag may only be atomically set if 589 * the active count is non-zero. 590 * 591 * NOTE: We cannot optimize active == desired since a wakeup could 592 * clear active prior to our tsleep_interlock(). 593 */ 594 error = 0; 595 while ((active = track->bk_active) != 0) { 596 cpu_ccfence(); 597 desired = active | 0x80000000; 598 tsleep_interlock(track, slp_flags); 599 if (atomic_cmpset_int(&track->bk_active, active, desired)) { 600 error = tsleep(track, slp_flags | PINTERLOCKED, 601 "trwait", slp_timo); 602 if (error) 603 break; 604 } 605 } 606 return (error); 607 } 608 609 /* 610 * bufinit: 611 * 612 * Load time initialisation of the buffer cache, called from machine 613 * dependant initialization code. 614 */ 615 static 616 void 617 bufinit(void *dummy __unused) 618 { 619 struct bufpcpu *pcpu; 620 struct buf *bp; 621 vm_offset_t bogus_offset; 622 int i; 623 int j; 624 long n; 625 626 /* next, make a null set of free lists */ 627 for (i = 0; i < ncpus; ++i) { 628 pcpu = &bufpcpu[i]; 629 spin_init(&pcpu->spin, "bufinit"); 630 for (j = 0; j < BUFFER_QUEUES; j++) 631 TAILQ_INIT(&pcpu->bufqueues[j]); 632 } 633 634 /* 635 * Finally, initialize each buffer header and stick on empty q. 636 * Each buffer gets its own KVA reservation. 637 */ 638 i = 0; 639 pcpu = &bufpcpu[i]; 640 641 for (n = 0; n < nbuf; n++) { 642 bp = &buf[n]; 643 bzero(bp, sizeof *bp); 644 bp->b_flags = B_INVAL; /* we're just an empty header */ 645 bp->b_cmd = BUF_CMD_DONE; 646 bp->b_qindex = BQUEUE_EMPTY; 647 bp->b_qcpu = i; 648 bp->b_kvabase = (void *)(vm_map_min(&buffer_map) + 649 MAXBSIZE * n); 650 bp->b_kvasize = MAXBSIZE; 651 initbufbio(bp); 652 xio_init(&bp->b_xio); 653 buf_dep_init(bp); 654 TAILQ_INSERT_TAIL(&pcpu->bufqueues[bp->b_qindex], 655 bp, b_freelist); 656 657 i = (i + 1) % ncpus; 658 pcpu = &bufpcpu[i]; 659 } 660 661 /* 662 * maxbufspace is the absolute maximum amount of buffer space we are 663 * allowed to reserve in KVM and in real terms. The absolute maximum 664 * is nominally used by buf_daemon. hibufspace is the nominal maximum 665 * used by most other processes. The differential is required to 666 * ensure that buf_daemon is able to run when other processes might 667 * be blocked waiting for buffer space. 668 * 669 * Calculate hysteresis (lobufspace, hibufspace). Don't make it 670 * too large or we might lockup a cpu for too long a period of 671 * time in our tight loop. 672 */ 673 maxbufspace = nbuf * NBUFCALCSIZE; 674 hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10); 675 lobufspace = hibufspace * 7 / 8; 676 if (hibufspace - lobufspace > 64 * 1024 * 1024) 677 lobufspace = hibufspace - 64 * 1024 * 1024; 678 if (lobufspace > hibufspace - MAXBSIZE) 679 lobufspace = hibufspace - MAXBSIZE; 680 681 lorunningspace = 512 * 1024; 682 /* hirunningspace -- see below */ 683 684 /* 685 * Limit the amount of malloc memory since it is wired permanently 686 * into the kernel space. Even though this is accounted for in 687 * the buffer allocation, we don't want the malloced region to grow 688 * uncontrolled. The malloc scheme improves memory utilization 689 * significantly on average (small) directories. 690 */ 691 maxbufmallocspace = hibufspace / 20; 692 693 /* 694 * Reduce the chance of a deadlock occuring by limiting the number 695 * of delayed-write dirty buffers we allow to stack up. 696 * 697 * We don't want too much actually queued to the device at once 698 * (XXX this needs to be per-mount!), because the buffers will 699 * wind up locked for a very long period of time while the I/O 700 * drains. 701 */ 702 hidirtybufspace = hibufspace / 2; /* dirty + running */ 703 hirunningspace = hibufspace / 16; /* locked & queued to device */ 704 if (hirunningspace < 1024 * 1024) 705 hirunningspace = 1024 * 1024; 706 707 dirtykvaspace = 0; 708 dirtybufspace = 0; 709 dirtybufspacehw = 0; 710 711 lodirtybufspace = hidirtybufspace / 2; 712 713 /* 714 * Maximum number of async ops initiated per buf_daemon loop. This is 715 * somewhat of a hack at the moment, we really need to limit ourselves 716 * based on the number of bytes of I/O in-transit that were initiated 717 * from buf_daemon. 718 */ 719 720 bogus_offset = kmem_alloc_pageable(&kernel_map, PAGE_SIZE, 721 VM_SUBSYS_BOGUS); 722 vm_object_hold(&kernel_object); 723 bogus_page = vm_page_alloc(&kernel_object, 724 (bogus_offset >> PAGE_SHIFT), 725 VM_ALLOC_NORMAL); 726 vm_object_drop(&kernel_object); 727 vmstats.v_wire_count++; 728 729 } 730 731 SYSINIT(do_bufinit, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, bufinit, NULL); 732 733 /* 734 * Initialize the embedded bio structures, typically used by 735 * deprecated code which tries to allocate its own struct bufs. 736 */ 737 void 738 initbufbio(struct buf *bp) 739 { 740 bp->b_bio1.bio_buf = bp; 741 bp->b_bio1.bio_prev = NULL; 742 bp->b_bio1.bio_offset = NOOFFSET; 743 bp->b_bio1.bio_next = &bp->b_bio2; 744 bp->b_bio1.bio_done = NULL; 745 bp->b_bio1.bio_flags = 0; 746 747 bp->b_bio2.bio_buf = bp; 748 bp->b_bio2.bio_prev = &bp->b_bio1; 749 bp->b_bio2.bio_offset = NOOFFSET; 750 bp->b_bio2.bio_next = NULL; 751 bp->b_bio2.bio_done = NULL; 752 bp->b_bio2.bio_flags = 0; 753 754 BUF_LOCKINIT(bp); 755 } 756 757 /* 758 * Reinitialize the embedded bio structures as well as any additional 759 * translation cache layers. 760 */ 761 void 762 reinitbufbio(struct buf *bp) 763 { 764 struct bio *bio; 765 766 for (bio = &bp->b_bio1; bio; bio = bio->bio_next) { 767 bio->bio_done = NULL; 768 bio->bio_offset = NOOFFSET; 769 } 770 } 771 772 /* 773 * Undo the effects of an initbufbio(). 774 */ 775 void 776 uninitbufbio(struct buf *bp) 777 { 778 dsched_buf_exit(bp); 779 BUF_LOCKFREE(bp); 780 } 781 782 /* 783 * Push another BIO layer onto an existing BIO and return it. The new 784 * BIO layer may already exist, holding cached translation data. 785 */ 786 struct bio * 787 push_bio(struct bio *bio) 788 { 789 struct bio *nbio; 790 791 if ((nbio = bio->bio_next) == NULL) { 792 int index = bio - &bio->bio_buf->b_bio_array[0]; 793 if (index >= NBUF_BIO - 1) { 794 panic("push_bio: too many layers %d for bp %p", 795 index, bio->bio_buf); 796 } 797 nbio = &bio->bio_buf->b_bio_array[index + 1]; 798 bio->bio_next = nbio; 799 nbio->bio_prev = bio; 800 nbio->bio_buf = bio->bio_buf; 801 nbio->bio_offset = NOOFFSET; 802 nbio->bio_done = NULL; 803 nbio->bio_next = NULL; 804 } 805 KKASSERT(nbio->bio_done == NULL); 806 return(nbio); 807 } 808 809 /* 810 * Pop a BIO translation layer, returning the previous layer. The 811 * must have been previously pushed. 812 */ 813 struct bio * 814 pop_bio(struct bio *bio) 815 { 816 return(bio->bio_prev); 817 } 818 819 void 820 clearbiocache(struct bio *bio) 821 { 822 while (bio) { 823 bio->bio_offset = NOOFFSET; 824 bio = bio->bio_next; 825 } 826 } 827 828 /* 829 * Remove the buffer from the appropriate free list. 830 * (caller must be locked) 831 */ 832 static __inline void 833 _bremfree(struct buf *bp) 834 { 835 struct bufpcpu *pcpu = &bufpcpu[bp->b_qcpu]; 836 837 if (bp->b_qindex != BQUEUE_NONE) { 838 KASSERT(BUF_LOCKINUSE(bp), ("bremfree: bp %p not locked", bp)); 839 TAILQ_REMOVE(&pcpu->bufqueues[bp->b_qindex], bp, b_freelist); 840 bp->b_qindex = BQUEUE_NONE; 841 } else { 842 if (!BUF_LOCKINUSE(bp)) 843 panic("bremfree: removing a buffer not on a queue"); 844 } 845 } 846 847 /* 848 * bremfree() - must be called with a locked buffer 849 */ 850 void 851 bremfree(struct buf *bp) 852 { 853 struct bufpcpu *pcpu = &bufpcpu[bp->b_qcpu]; 854 855 spin_lock(&pcpu->spin); 856 _bremfree(bp); 857 spin_unlock(&pcpu->spin); 858 } 859 860 /* 861 * bremfree_locked - must be called with pcpu->spin locked 862 */ 863 static void 864 bremfree_locked(struct buf *bp) 865 { 866 _bremfree(bp); 867 } 868 869 /* 870 * This version of bread issues any required I/O asyncnronously and 871 * makes a callback on completion. 872 * 873 * The callback must check whether BIO_DONE is set in the bio and issue 874 * the bpdone(bp, 0) if it isn't. The callback is responsible for clearing 875 * BIO_DONE and disposing of the I/O (bqrelse()ing it). 876 */ 877 void 878 breadcb(struct vnode *vp, off_t loffset, int size, int bflags, 879 void (*func)(struct bio *), void *arg) 880 { 881 struct buf *bp; 882 883 bp = getblk(vp, loffset, size, 0, 0); 884 885 /* if not found in cache, do some I/O */ 886 if ((bp->b_flags & B_CACHE) == 0) { 887 bp->b_flags &= ~(B_ERROR | B_EINTR | B_INVAL | B_NOTMETA); 888 bp->b_flags |= bflags; 889 bp->b_cmd = BUF_CMD_READ; 890 bp->b_bio1.bio_done = func; 891 bp->b_bio1.bio_caller_info1.ptr = arg; 892 vfs_busy_pages(vp, bp); 893 BUF_KERNPROC(bp); 894 vn_strategy(vp, &bp->b_bio1); 895 } else if (func) { 896 /* 897 * Since we are issuing the callback synchronously it cannot 898 * race the BIO_DONE, so no need for atomic ops here. 899 */ 900 /*bp->b_bio1.bio_done = func;*/ 901 bp->b_bio1.bio_caller_info1.ptr = arg; 902 bp->b_bio1.bio_flags |= BIO_DONE; 903 func(&bp->b_bio1); 904 } else { 905 bqrelse(bp); 906 } 907 } 908 909 /* 910 * breadnx() - Terminal function for bread() and breadn(). 911 * 912 * This function will start asynchronous I/O on read-ahead blocks as well 913 * as satisfy the primary request. 914 * 915 * We must clear B_ERROR and B_INVAL prior to initiating I/O. If B_CACHE is 916 * set, the buffer is valid and we do not have to do anything. 917 */ 918 int 919 breadnx(struct vnode *vp, off_t loffset, int size, int bflags, 920 off_t *raoffset, int *rabsize, 921 int cnt, struct buf **bpp) 922 { 923 struct buf *bp, *rabp; 924 int i; 925 int rv = 0, readwait = 0; 926 int blkflags = (bflags & B_KVABIO) ? GETBLK_KVABIO : 0; 927 928 if (*bpp) 929 bp = *bpp; 930 else 931 *bpp = bp = getblk(vp, loffset, size, blkflags, 0); 932 933 /* if not found in cache, do some I/O */ 934 if ((bp->b_flags & B_CACHE) == 0) { 935 bp->b_flags &= ~(B_ERROR | B_EINTR | B_INVAL | B_NOTMETA); 936 bp->b_flags |= bflags; 937 bp->b_cmd = BUF_CMD_READ; 938 bp->b_bio1.bio_done = biodone_sync; 939 bp->b_bio1.bio_flags |= BIO_SYNC; 940 vfs_busy_pages(vp, bp); 941 vn_strategy(vp, &bp->b_bio1); 942 ++readwait; 943 } 944 945 for (i = 0; i < cnt; i++, raoffset++, rabsize++) { 946 if (inmem(vp, *raoffset)) 947 continue; 948 rabp = getblk(vp, *raoffset, *rabsize, GETBLK_KVABIO, 0); 949 950 if ((rabp->b_flags & B_CACHE) == 0) { 951 rabp->b_flags &= ~(B_ERROR | B_EINTR | 952 B_INVAL | B_NOTMETA); 953 rabp->b_flags |= (bflags & ~B_KVABIO); 954 rabp->b_cmd = BUF_CMD_READ; 955 vfs_busy_pages(vp, rabp); 956 BUF_KERNPROC(rabp); 957 vn_strategy(vp, &rabp->b_bio1); 958 } else { 959 brelse(rabp); 960 } 961 } 962 if (readwait) 963 rv = biowait(&bp->b_bio1, "biord"); 964 return (rv); 965 } 966 967 /* 968 * bwrite: 969 * 970 * Synchronous write, waits for completion. 971 * 972 * Write, release buffer on completion. (Done by iodone 973 * if async). Do not bother writing anything if the buffer 974 * is invalid. 975 * 976 * Note that we set B_CACHE here, indicating that buffer is 977 * fully valid and thus cacheable. This is true even of NFS 978 * now so we set it generally. This could be set either here 979 * or in biodone() since the I/O is synchronous. We put it 980 * here. 981 */ 982 int 983 bwrite(struct buf *bp) 984 { 985 int error; 986 987 if (bp->b_flags & B_INVAL) { 988 brelse(bp); 989 return (0); 990 } 991 if (BUF_LOCKINUSE(bp) == 0) 992 panic("bwrite: buffer is not busy???"); 993 994 /* 995 * NOTE: We no longer mark the buffer clear prior to the vn_strategy() 996 * call because it will remove the buffer from the vnode's 997 * dirty buffer list prematurely and possibly cause filesystem 998 * checks to race buffer flushes. This is now handled in 999 * bpdone(). 1000 * 1001 * bundirty(bp); REMOVED 1002 */ 1003 1004 bp->b_flags &= ~(B_ERROR | B_EINTR); 1005 bp->b_flags |= B_CACHE; 1006 bp->b_cmd = BUF_CMD_WRITE; 1007 bp->b_bio1.bio_done = biodone_sync; 1008 bp->b_bio1.bio_flags |= BIO_SYNC; 1009 vfs_busy_pages(bp->b_vp, bp); 1010 1011 /* 1012 * Normal bwrites pipeline writes. NOTE: b_bufsize is only 1013 * valid for vnode-backed buffers. 1014 */ 1015 bsetrunningbufspace(bp, bp->b_bufsize); 1016 vn_strategy(bp->b_vp, &bp->b_bio1); 1017 error = biowait(&bp->b_bio1, "biows"); 1018 brelse(bp); 1019 1020 return (error); 1021 } 1022 1023 /* 1024 * bawrite: 1025 * 1026 * Asynchronous write. Start output on a buffer, but do not wait for 1027 * it to complete. The buffer is released when the output completes. 1028 * 1029 * bwrite() ( or the VOP routine anyway ) is responsible for handling 1030 * B_INVAL buffers. Not us. 1031 */ 1032 void 1033 bawrite(struct buf *bp) 1034 { 1035 if (bp->b_flags & B_INVAL) { 1036 brelse(bp); 1037 return; 1038 } 1039 if (BUF_LOCKINUSE(bp) == 0) 1040 panic("bawrite: buffer is not busy???"); 1041 1042 /* 1043 * NOTE: We no longer mark the buffer clear prior to the vn_strategy() 1044 * call because it will remove the buffer from the vnode's 1045 * dirty buffer list prematurely and possibly cause filesystem 1046 * checks to race buffer flushes. This is now handled in 1047 * bpdone(). 1048 * 1049 * bundirty(bp); REMOVED 1050 */ 1051 bp->b_flags &= ~(B_ERROR | B_EINTR); 1052 bp->b_flags |= B_CACHE; 1053 bp->b_cmd = BUF_CMD_WRITE; 1054 KKASSERT(bp->b_bio1.bio_done == NULL); 1055 vfs_busy_pages(bp->b_vp, bp); 1056 1057 /* 1058 * Normal bwrites pipeline writes. NOTE: b_bufsize is only 1059 * valid for vnode-backed buffers. 1060 */ 1061 bsetrunningbufspace(bp, bp->b_bufsize); 1062 BUF_KERNPROC(bp); 1063 vn_strategy(bp->b_vp, &bp->b_bio1); 1064 } 1065 1066 /* 1067 * bdwrite: 1068 * 1069 * Delayed write. (Buffer is marked dirty). Do not bother writing 1070 * anything if the buffer is marked invalid. 1071 * 1072 * Note that since the buffer must be completely valid, we can safely 1073 * set B_CACHE. In fact, we have to set B_CACHE here rather then in 1074 * biodone() in order to prevent getblk from writing the buffer 1075 * out synchronously. 1076 */ 1077 void 1078 bdwrite(struct buf *bp) 1079 { 1080 if (BUF_LOCKINUSE(bp) == 0) 1081 panic("bdwrite: buffer is not busy"); 1082 1083 if (bp->b_flags & B_INVAL) { 1084 brelse(bp); 1085 return; 1086 } 1087 bdirty(bp); 1088 1089 dsched_buf_enter(bp); /* might stack */ 1090 1091 /* 1092 * Set B_CACHE, indicating that the buffer is fully valid. This is 1093 * true even of NFS now. 1094 */ 1095 bp->b_flags |= B_CACHE; 1096 1097 /* 1098 * This bmap keeps the system from needing to do the bmap later, 1099 * perhaps when the system is attempting to do a sync. Since it 1100 * is likely that the indirect block -- or whatever other datastructure 1101 * that the filesystem needs is still in memory now, it is a good 1102 * thing to do this. Note also, that if the pageout daemon is 1103 * requesting a sync -- there might not be enough memory to do 1104 * the bmap then... So, this is important to do. 1105 */ 1106 if (bp->b_bio2.bio_offset == NOOFFSET) { 1107 VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset, 1108 NULL, NULL, BUF_CMD_WRITE); 1109 } 1110 1111 /* 1112 * Because the underlying pages may still be mapped and 1113 * writable trying to set the dirty buffer (b_dirtyoff/end) 1114 * range here will be inaccurate. 1115 * 1116 * However, we must still clean the pages to satisfy the 1117 * vnode_pager and pageout daemon, so they think the pages 1118 * have been "cleaned". What has really occured is that 1119 * they've been earmarked for later writing by the buffer 1120 * cache. 1121 * 1122 * So we get the b_dirtyoff/end update but will not actually 1123 * depend on it (NFS that is) until the pages are busied for 1124 * writing later on. 1125 */ 1126 vfs_clean_pages(bp); 1127 bqrelse(bp); 1128 1129 /* 1130 * note: we cannot initiate I/O from a bdwrite even if we wanted to, 1131 * due to the softdep code. 1132 */ 1133 } 1134 1135 /* 1136 * Fake write - return pages to VM system as dirty, leave the buffer clean. 1137 * This is used by tmpfs. 1138 * 1139 * It is important for any VFS using this routine to NOT use it for 1140 * IO_SYNC or IO_ASYNC operations which occur when the system really 1141 * wants to flush VM pages to backing store. 1142 */ 1143 void 1144 buwrite(struct buf *bp) 1145 { 1146 vm_page_t m; 1147 int i; 1148 1149 /* 1150 * Only works for VMIO buffers. If the buffer is already 1151 * marked for delayed-write we can't avoid the bdwrite(). 1152 */ 1153 if ((bp->b_flags & B_VMIO) == 0 || (bp->b_flags & B_DELWRI)) { 1154 bdwrite(bp); 1155 return; 1156 } 1157 1158 /* 1159 * Mark as needing a commit. 1160 */ 1161 for (i = 0; i < bp->b_xio.xio_npages; i++) { 1162 m = bp->b_xio.xio_pages[i]; 1163 vm_page_need_commit(m); 1164 } 1165 bqrelse(bp); 1166 } 1167 1168 /* 1169 * bdirty: 1170 * 1171 * Turn buffer into delayed write request by marking it B_DELWRI. 1172 * B_RELBUF and B_NOCACHE must be cleared. 1173 * 1174 * We reassign the buffer to itself to properly update it in the 1175 * dirty/clean lists. 1176 * 1177 * Must be called from a critical section. 1178 * The buffer must be on BQUEUE_NONE. 1179 */ 1180 void 1181 bdirty(struct buf *bp) 1182 { 1183 KASSERT(bp->b_qindex == BQUEUE_NONE, 1184 ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex)); 1185 if (bp->b_flags & B_NOCACHE) { 1186 kprintf("bdirty: clearing B_NOCACHE on buf %p\n", bp); 1187 bp->b_flags &= ~B_NOCACHE; 1188 } 1189 if (bp->b_flags & B_INVAL) { 1190 kprintf("bdirty: warning, dirtying invalid buffer %p\n", bp); 1191 } 1192 bp->b_flags &= ~B_RELBUF; 1193 1194 if ((bp->b_flags & B_DELWRI) == 0) { 1195 lwkt_gettoken(&bp->b_vp->v_token); 1196 bp->b_flags |= B_DELWRI; 1197 reassignbuf(bp); 1198 lwkt_reltoken(&bp->b_vp->v_token); 1199 1200 atomic_add_long(&dirtybufcount, 1); 1201 atomic_add_long(&dirtykvaspace, bp->b_kvasize); 1202 atomic_add_long(&dirtybufspace, bp->b_bufsize); 1203 if (bp->b_flags & B_HEAVY) { 1204 atomic_add_long(&dirtybufcounthw, 1); 1205 atomic_add_long(&dirtybufspacehw, bp->b_bufsize); 1206 } 1207 bd_heatup(); 1208 } 1209 } 1210 1211 /* 1212 * Set B_HEAVY, indicating that this is a heavy-weight buffer that 1213 * needs to be flushed with a different buf_daemon thread to avoid 1214 * deadlocks. B_HEAVY also imposes restrictions in getnewbuf(). 1215 */ 1216 void 1217 bheavy(struct buf *bp) 1218 { 1219 if ((bp->b_flags & B_HEAVY) == 0) { 1220 bp->b_flags |= B_HEAVY; 1221 if (bp->b_flags & B_DELWRI) { 1222 atomic_add_long(&dirtybufcounthw, 1); 1223 atomic_add_long(&dirtybufspacehw, bp->b_bufsize); 1224 } 1225 } 1226 } 1227 1228 /* 1229 * bundirty: 1230 * 1231 * Clear B_DELWRI for buffer. 1232 * 1233 * Must be called from a critical section. 1234 * 1235 * The buffer is typically on BQUEUE_NONE but there is one case in 1236 * brelse() that calls this function after placing the buffer on 1237 * a different queue. 1238 */ 1239 void 1240 bundirty(struct buf *bp) 1241 { 1242 if (bp->b_flags & B_DELWRI) { 1243 lwkt_gettoken(&bp->b_vp->v_token); 1244 bp->b_flags &= ~B_DELWRI; 1245 reassignbuf(bp); 1246 lwkt_reltoken(&bp->b_vp->v_token); 1247 1248 atomic_add_long(&dirtybufcount, -1); 1249 atomic_add_long(&dirtykvaspace, -bp->b_kvasize); 1250 atomic_add_long(&dirtybufspace, -bp->b_bufsize); 1251 if (bp->b_flags & B_HEAVY) { 1252 atomic_add_long(&dirtybufcounthw, -1); 1253 atomic_add_long(&dirtybufspacehw, -bp->b_bufsize); 1254 } 1255 bd_signal(bp->b_bufsize); 1256 } 1257 /* 1258 * Since it is now being written, we can clear its deferred write flag. 1259 */ 1260 bp->b_flags &= ~B_DEFERRED; 1261 } 1262 1263 /* 1264 * Set the b_runningbufspace field, used to track how much I/O is 1265 * in progress at any given moment. 1266 */ 1267 void 1268 bsetrunningbufspace(struct buf *bp, int bytes) 1269 { 1270 bp->b_runningbufspace = bytes; 1271 if (bytes) { 1272 atomic_add_long(&runningbufspace, bytes); 1273 atomic_add_long(&runningbufcount, 1); 1274 } 1275 } 1276 1277 /* 1278 * brelse: 1279 * 1280 * Release a busy buffer and, if requested, free its resources. The 1281 * buffer will be stashed in the appropriate bufqueue[] allowing it 1282 * to be accessed later as a cache entity or reused for other purposes. 1283 */ 1284 void 1285 brelse(struct buf *bp) 1286 { 1287 struct bufpcpu *pcpu; 1288 #ifdef INVARIANTS 1289 int saved_flags = bp->b_flags; 1290 #endif 1291 1292 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), 1293 ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 1294 1295 /* 1296 * If B_NOCACHE is set we are being asked to destroy the buffer and 1297 * its backing store. Clear B_DELWRI. 1298 * 1299 * B_NOCACHE is set in two cases: (1) when the caller really wants 1300 * to destroy the buffer and backing store and (2) when the caller 1301 * wants to destroy the buffer and backing store after a write 1302 * completes. 1303 */ 1304 if ((bp->b_flags & (B_NOCACHE|B_DELWRI)) == (B_NOCACHE|B_DELWRI)) { 1305 bundirty(bp); 1306 } 1307 1308 if ((bp->b_flags & (B_INVAL | B_DELWRI)) == B_DELWRI) { 1309 /* 1310 * A re-dirtied buffer is only subject to destruction 1311 * by B_INVAL. B_ERROR and B_NOCACHE are ignored. 1312 */ 1313 /* leave buffer intact */ 1314 } else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 1315 (bp->b_bufsize <= 0)) { 1316 /* 1317 * Either a failed read or we were asked to free or not 1318 * cache the buffer. This path is reached with B_DELWRI 1319 * set only if B_INVAL is already set. B_NOCACHE governs 1320 * backing store destruction. 1321 * 1322 * NOTE: HAMMER will set B_LOCKED in buf_deallocate if the 1323 * buffer cannot be immediately freed. 1324 */ 1325 bp->b_flags |= B_INVAL; 1326 if (LIST_FIRST(&bp->b_dep) != NULL) 1327 buf_deallocate(bp); 1328 if (bp->b_flags & B_DELWRI) { 1329 atomic_add_long(&dirtybufcount, -1); 1330 atomic_add_long(&dirtykvaspace, -bp->b_kvasize); 1331 atomic_add_long(&dirtybufspace, -bp->b_bufsize); 1332 if (bp->b_flags & B_HEAVY) { 1333 atomic_add_long(&dirtybufcounthw, -1); 1334 atomic_add_long(&dirtybufspacehw, 1335 -bp->b_bufsize); 1336 } 1337 bd_signal(bp->b_bufsize); 1338 } 1339 bp->b_flags &= ~(B_DELWRI | B_CACHE); 1340 } 1341 1342 /* 1343 * We must clear B_RELBUF if B_DELWRI or B_LOCKED is set, 1344 * or if b_refs is non-zero. 1345 * 1346 * If vfs_vmio_release() is called with either bit set, the 1347 * underlying pages may wind up getting freed causing a previous 1348 * write (bdwrite()) to get 'lost' because pages associated with 1349 * a B_DELWRI bp are marked clean. Pages associated with a 1350 * B_LOCKED buffer may be mapped by the filesystem. 1351 * 1352 * If we want to release the buffer ourselves (rather then the 1353 * originator asking us to release it), give the originator a 1354 * chance to countermand the release by setting B_LOCKED. 1355 * 1356 * We still allow the B_INVAL case to call vfs_vmio_release(), even 1357 * if B_DELWRI is set. 1358 * 1359 * If B_DELWRI is not set we may have to set B_RELBUF if we are low 1360 * on pages to return pages to the VM page queues. 1361 */ 1362 if ((bp->b_flags & (B_DELWRI | B_LOCKED)) || bp->b_refs) { 1363 bp->b_flags &= ~B_RELBUF; 1364 } else if (vm_page_count_min(0)) { 1365 if (LIST_FIRST(&bp->b_dep) != NULL) 1366 buf_deallocate(bp); /* can set B_LOCKED */ 1367 if (bp->b_flags & (B_DELWRI | B_LOCKED)) 1368 bp->b_flags &= ~B_RELBUF; 1369 else 1370 bp->b_flags |= B_RELBUF; 1371 } 1372 1373 /* 1374 * Make sure b_cmd is clear. It may have already been cleared by 1375 * biodone(). 1376 * 1377 * At this point destroying the buffer is governed by the B_INVAL 1378 * or B_RELBUF flags. 1379 */ 1380 bp->b_cmd = BUF_CMD_DONE; 1381 dsched_buf_exit(bp); 1382 1383 /* 1384 * VMIO buffer rundown. Make sure the VM page array is restored 1385 * after an I/O may have replaces some of the pages with bogus pages 1386 * in order to not destroy dirty pages in a fill-in read. 1387 * 1388 * Note that due to the code above, if a buffer is marked B_DELWRI 1389 * then the B_RELBUF and B_NOCACHE bits will always be clear. 1390 * B_INVAL may still be set, however. 1391 * 1392 * For clean buffers, B_INVAL or B_RELBUF will destroy the buffer 1393 * but not the backing store. B_NOCACHE will destroy the backing 1394 * store. 1395 * 1396 * Note that dirty NFS buffers contain byte-granular write ranges 1397 * and should not be destroyed w/ B_INVAL even if the backing store 1398 * is left intact. 1399 */ 1400 if (bp->b_flags & B_VMIO) { 1401 /* 1402 * Rundown for VMIO buffers which are not dirty NFS buffers. 1403 */ 1404 int i, j, resid; 1405 vm_page_t m; 1406 off_t foff; 1407 vm_pindex_t poff; 1408 vm_object_t obj; 1409 struct vnode *vp; 1410 1411 vp = bp->b_vp; 1412 1413 /* 1414 * Get the base offset and length of the buffer. Note that 1415 * in the VMIO case if the buffer block size is not 1416 * page-aligned then b_data pointer may not be page-aligned. 1417 * But our b_xio.xio_pages array *IS* page aligned. 1418 * 1419 * block sizes less then DEV_BSIZE (usually 512) are not 1420 * supported due to the page granularity bits (m->valid, 1421 * m->dirty, etc...). 1422 * 1423 * See man buf(9) for more information 1424 */ 1425 1426 resid = bp->b_bufsize; 1427 foff = bp->b_loffset; 1428 1429 for (i = 0; i < bp->b_xio.xio_npages; i++) { 1430 m = bp->b_xio.xio_pages[i]; 1431 1432 /* 1433 * If we hit a bogus page, fixup *all* of them 1434 * now. Note that we left these pages wired 1435 * when we removed them so they had better exist, 1436 * and they cannot be ripped out from under us so 1437 * no critical section protection is necessary. 1438 */ 1439 if (m == bogus_page) { 1440 obj = vp->v_object; 1441 poff = OFF_TO_IDX(bp->b_loffset); 1442 1443 vm_object_hold(obj); 1444 for (j = i; j < bp->b_xio.xio_npages; j++) { 1445 vm_page_t mtmp; 1446 1447 mtmp = bp->b_xio.xio_pages[j]; 1448 if (mtmp == bogus_page) { 1449 if ((bp->b_flags & B_HASBOGUS) == 0) 1450 panic("brelse: bp %p corrupt bogus", bp); 1451 mtmp = vm_page_lookup(obj, poff + j); 1452 if (!mtmp) 1453 panic("brelse: bp %p page %d missing", bp, j); 1454 bp->b_xio.xio_pages[j] = mtmp; 1455 } 1456 } 1457 vm_object_drop(obj); 1458 1459 if ((bp->b_flags & B_HASBOGUS) || 1460 (bp->b_flags & B_INVAL) == 0) { 1461 pmap_qenter_noinval( 1462 trunc_page((vm_offset_t)bp->b_data), 1463 bp->b_xio.xio_pages, 1464 bp->b_xio.xio_npages); 1465 bp->b_flags &= ~B_HASBOGUS; 1466 bp->b_flags |= B_KVABIO; 1467 bkvareset(bp); 1468 } 1469 m = bp->b_xio.xio_pages[i]; 1470 } 1471 1472 /* 1473 * Invalidate the backing store if B_NOCACHE is set 1474 * (e.g. used with vinvalbuf()). If this is NFS 1475 * we impose a requirement that the block size be 1476 * a multiple of PAGE_SIZE and create a temporary 1477 * hack to basically invalidate the whole page. The 1478 * problem is that NFS uses really odd buffer sizes 1479 * especially when tracking piecemeal writes and 1480 * it also vinvalbuf()'s a lot, which would result 1481 * in only partial page validation and invalidation 1482 * here. If the file page is mmap()'d, however, 1483 * all the valid bits get set so after we invalidate 1484 * here we would end up with weird m->valid values 1485 * like 0xfc. nfs_getpages() can't handle this so 1486 * we clear all the valid bits for the NFS case 1487 * instead of just some of them. 1488 * 1489 * The real bug is the VM system having to set m->valid 1490 * to VM_PAGE_BITS_ALL for faulted-in pages, which 1491 * itself is an artifact of the whole 512-byte 1492 * granular mess that exists to support odd block 1493 * sizes and UFS meta-data block sizes (e.g. 6144). 1494 * A complete rewrite is required. 1495 * 1496 * XXX 1497 */ 1498 if (bp->b_flags & (B_NOCACHE|B_ERROR)) { 1499 int poffset = foff & PAGE_MASK; 1500 int presid; 1501 1502 presid = PAGE_SIZE - poffset; 1503 if (bp->b_vp->v_tag == VT_NFS && 1504 bp->b_vp->v_type == VREG) { 1505 ; /* entire page */ 1506 } else if (presid > resid) { 1507 presid = resid; 1508 } 1509 KASSERT(presid >= 0, ("brelse: extra page")); 1510 vm_page_set_invalid(m, poffset, presid); 1511 1512 /* 1513 * Also make sure any swap cache is removed 1514 * as it is now stale (HAMMER in particular 1515 * uses B_NOCACHE to deal with buffer 1516 * aliasing). 1517 */ 1518 swap_pager_unswapped(m); 1519 } 1520 resid -= PAGE_SIZE - (foff & PAGE_MASK); 1521 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 1522 } 1523 if (bp->b_flags & (B_INVAL | B_RELBUF)) 1524 vfs_vmio_release(bp); 1525 } else { 1526 /* 1527 * Rundown for non-VMIO buffers. 1528 */ 1529 if (bp->b_flags & (B_INVAL | B_RELBUF)) { 1530 if (bp->b_bufsize) 1531 allocbuf(bp, 0); 1532 KKASSERT (LIST_FIRST(&bp->b_dep) == NULL); 1533 if (bp->b_vp) 1534 brelvp(bp); 1535 } 1536 } 1537 1538 if (bp->b_qindex != BQUEUE_NONE) 1539 panic("brelse: free buffer onto another queue???"); 1540 1541 /* 1542 * Figure out the correct queue to place the cleaned up buffer on. 1543 * Buffers placed in the EMPTY or EMPTYKVA had better already be 1544 * disassociated from their vnode. 1545 * 1546 * Return the buffer to its original pcpu area 1547 */ 1548 pcpu = &bufpcpu[bp->b_qcpu]; 1549 spin_lock(&pcpu->spin); 1550 1551 if (bp->b_flags & B_LOCKED) { 1552 /* 1553 * Buffers that are locked are placed in the locked queue 1554 * immediately, regardless of their state. 1555 */ 1556 bp->b_qindex = BQUEUE_LOCKED; 1557 TAILQ_INSERT_TAIL(&pcpu->bufqueues[bp->b_qindex], 1558 bp, b_freelist); 1559 } else if (bp->b_bufsize == 0) { 1560 /* 1561 * Buffers with no memory. Due to conditionals near the top 1562 * of brelse() such buffers should probably already be 1563 * marked B_INVAL and disassociated from their vnode. 1564 */ 1565 bp->b_flags |= B_INVAL; 1566 KASSERT(bp->b_vp == NULL, 1567 ("bp1 %p flags %08x/%08x vnode %p " 1568 "unexpectededly still associated!", 1569 bp, saved_flags, bp->b_flags, bp->b_vp)); 1570 KKASSERT((bp->b_flags & B_HASHED) == 0); 1571 bp->b_qindex = BQUEUE_EMPTY; 1572 TAILQ_INSERT_HEAD(&pcpu->bufqueues[bp->b_qindex], 1573 bp, b_freelist); 1574 } else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) { 1575 /* 1576 * Buffers with junk contents. Again these buffers had better 1577 * already be disassociated from their vnode. 1578 */ 1579 KASSERT(bp->b_vp == NULL, 1580 ("bp2 %p flags %08x/%08x vnode %p unexpectededly " 1581 "still associated!", 1582 bp, saved_flags, bp->b_flags, bp->b_vp)); 1583 KKASSERT((bp->b_flags & B_HASHED) == 0); 1584 bp->b_flags |= B_INVAL; 1585 bp->b_qindex = BQUEUE_CLEAN; 1586 TAILQ_INSERT_HEAD(&pcpu->bufqueues[bp->b_qindex], 1587 bp, b_freelist); 1588 } else { 1589 /* 1590 * Remaining buffers. These buffers are still associated with 1591 * their vnode. 1592 */ 1593 switch(bp->b_flags & (B_DELWRI|B_HEAVY)) { 1594 case B_DELWRI: 1595 bp->b_qindex = BQUEUE_DIRTY; 1596 TAILQ_INSERT_TAIL(&pcpu->bufqueues[bp->b_qindex], 1597 bp, b_freelist); 1598 break; 1599 case B_DELWRI | B_HEAVY: 1600 bp->b_qindex = BQUEUE_DIRTY_HW; 1601 TAILQ_INSERT_TAIL(&pcpu->bufqueues[bp->b_qindex], 1602 bp, b_freelist); 1603 break; 1604 default: 1605 /* 1606 * NOTE: Buffers are always placed at the end of the 1607 * queue. If B_AGE is not set the buffer will cycle 1608 * through the queue twice. 1609 */ 1610 bp->b_qindex = BQUEUE_CLEAN; 1611 TAILQ_INSERT_TAIL(&pcpu->bufqueues[bp->b_qindex], 1612 bp, b_freelist); 1613 break; 1614 } 1615 } 1616 spin_unlock(&pcpu->spin); 1617 1618 /* 1619 * If B_INVAL, clear B_DELWRI. We've already placed the buffer 1620 * on the correct queue but we have not yet unlocked it. 1621 */ 1622 if ((bp->b_flags & (B_INVAL|B_DELWRI)) == (B_INVAL|B_DELWRI)) 1623 bundirty(bp); 1624 1625 /* 1626 * The bp is on an appropriate queue unless locked. If it is not 1627 * locked or dirty we can wakeup threads waiting for buffer space. 1628 * 1629 * We've already handled the B_INVAL case ( B_DELWRI will be clear 1630 * if B_INVAL is set ). 1631 */ 1632 if ((bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) 1633 bufcountwakeup(); 1634 1635 /* 1636 * Something we can maybe free or reuse 1637 */ 1638 if (bp->b_bufsize || bp->b_kvasize) 1639 bufspacewakeup(); 1640 1641 /* 1642 * Clean up temporary flags and unlock the buffer. 1643 */ 1644 bp->b_flags &= ~(B_NOCACHE | B_RELBUF | B_DIRECT); 1645 BUF_UNLOCK(bp); 1646 } 1647 1648 /* 1649 * bqrelse: 1650 * 1651 * Release a buffer back to the appropriate queue but do not try to free 1652 * it. The buffer is expected to be used again soon. 1653 * 1654 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by 1655 * biodone() to requeue an async I/O on completion. It is also used when 1656 * known good buffers need to be requeued but we think we may need the data 1657 * again soon. 1658 * 1659 * XXX we should be able to leave the B_RELBUF hint set on completion. 1660 */ 1661 void 1662 bqrelse(struct buf *bp) 1663 { 1664 struct bufpcpu *pcpu; 1665 1666 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), 1667 ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 1668 1669 if (bp->b_qindex != BQUEUE_NONE) 1670 panic("bqrelse: free buffer onto another queue???"); 1671 1672 buf_act_advance(bp); 1673 1674 pcpu = &bufpcpu[bp->b_qcpu]; 1675 spin_lock(&pcpu->spin); 1676 1677 if (bp->b_flags & B_LOCKED) { 1678 /* 1679 * Locked buffers are released to the locked queue. However, 1680 * if the buffer is dirty it will first go into the dirty 1681 * queue and later on after the I/O completes successfully it 1682 * will be released to the locked queue. 1683 */ 1684 bp->b_qindex = BQUEUE_LOCKED; 1685 TAILQ_INSERT_TAIL(&pcpu->bufqueues[bp->b_qindex], 1686 bp, b_freelist); 1687 } else if (bp->b_flags & B_DELWRI) { 1688 bp->b_qindex = (bp->b_flags & B_HEAVY) ? 1689 BQUEUE_DIRTY_HW : BQUEUE_DIRTY; 1690 TAILQ_INSERT_TAIL(&pcpu->bufqueues[bp->b_qindex], 1691 bp, b_freelist); 1692 } else if (vm_page_count_min(0)) { 1693 /* 1694 * We are too low on memory, we have to try to free the 1695 * buffer (most importantly: the wired pages making up its 1696 * backing store) *now*. 1697 */ 1698 spin_unlock(&pcpu->spin); 1699 brelse(bp); 1700 return; 1701 } else { 1702 bp->b_qindex = BQUEUE_CLEAN; 1703 TAILQ_INSERT_TAIL(&pcpu->bufqueues[bp->b_qindex], 1704 bp, b_freelist); 1705 } 1706 spin_unlock(&pcpu->spin); 1707 1708 /* 1709 * We have now placed the buffer on the proper queue, but have yet 1710 * to unlock it. 1711 */ 1712 if ((bp->b_flags & B_LOCKED) == 0 && 1713 ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0)) { 1714 bufcountwakeup(); 1715 } 1716 1717 /* 1718 * Something we can maybe free or reuse. 1719 */ 1720 if (bp->b_bufsize && !(bp->b_flags & B_DELWRI)) 1721 bufspacewakeup(); 1722 1723 /* 1724 * Final cleanup and unlock. Clear bits that are only used while a 1725 * buffer is actively locked. 1726 */ 1727 bp->b_flags &= ~(B_NOCACHE | B_RELBUF); 1728 dsched_buf_exit(bp); 1729 BUF_UNLOCK(bp); 1730 } 1731 1732 /* 1733 * Hold a buffer, preventing it from being reused. This will prevent 1734 * normal B_RELBUF operations on the buffer but will not prevent B_INVAL 1735 * operations. If a B_INVAL operation occurs the buffer will remain held 1736 * but the underlying pages may get ripped out. 1737 * 1738 * These functions are typically used in VOP_READ/VOP_WRITE functions 1739 * to hold a buffer during a copyin or copyout, preventing deadlocks 1740 * or recursive lock panics when read()/write() is used over mmap()'d 1741 * space. 1742 * 1743 * NOTE: bqhold() requires that the buffer be locked at the time of the 1744 * hold. bqdrop() has no requirements other than the buffer having 1745 * previously been held. 1746 */ 1747 void 1748 bqhold(struct buf *bp) 1749 { 1750 atomic_add_int(&bp->b_refs, 1); 1751 } 1752 1753 void 1754 bqdrop(struct buf *bp) 1755 { 1756 KKASSERT(bp->b_refs > 0); 1757 atomic_add_int(&bp->b_refs, -1); 1758 } 1759 1760 /* 1761 * Return backing pages held by the buffer 'bp' back to the VM system. 1762 * This routine is called when the bp is invalidated, released, or 1763 * reused. 1764 * 1765 * The KVA mapping (b_data) for the underlying pages is removed by 1766 * this function. 1767 * 1768 * WARNING! This routine is integral to the low memory critical path 1769 * when a buffer is B_RELBUF'd. If the system has a severe page 1770 * deficit we need to get the page(s) onto the PQ_FREE or PQ_CACHE 1771 * queues so they can be reused in the current pageout daemon 1772 * pass. 1773 */ 1774 static void 1775 vfs_vmio_release(struct buf *bp) 1776 { 1777 int i; 1778 vm_page_t m; 1779 1780 for (i = 0; i < bp->b_xio.xio_npages; i++) { 1781 m = bp->b_xio.xio_pages[i]; 1782 bp->b_xio.xio_pages[i] = NULL; 1783 1784 /* 1785 * We need to own the page in order to safely unwire it. 1786 */ 1787 vm_page_busy_wait(m, FALSE, "vmiopg"); 1788 1789 /* 1790 * The VFS is telling us this is not a meta-data buffer 1791 * even if it is backed by a block device. 1792 */ 1793 if (bp->b_flags & B_NOTMETA) 1794 vm_page_flag_set(m, PG_NOTMETA); 1795 1796 /* 1797 * This is a very important bit of code. We try to track 1798 * VM page use whether the pages are wired into the buffer 1799 * cache or not. While wired into the buffer cache the 1800 * bp tracks the act_count. 1801 * 1802 * We can choose to place unwired pages on the inactive 1803 * queue (0) or active queue (1). If we place too many 1804 * on the active queue the queue will cycle the act_count 1805 * on pages we'd like to keep, just from single-use pages 1806 * (such as when doing a tar-up or file scan). 1807 */ 1808 if (bp->b_act_count < vm_cycle_point) 1809 vm_page_unwire(m, 0); 1810 else 1811 vm_page_unwire(m, 1); 1812 1813 /* 1814 * If the wire_count has dropped to 0 we may need to take 1815 * further action before unbusying the page. 1816 * 1817 * WARNING: vm_page_try_*() also checks PG_NEED_COMMIT for us. 1818 */ 1819 if (m->wire_count == 0) { 1820 if (bp->b_flags & B_DIRECT) { 1821 /* 1822 * Attempt to free the page if B_DIRECT is 1823 * set, the caller does not desire the page 1824 * to be cached. 1825 */ 1826 vm_page_wakeup(m); 1827 vm_page_try_to_free(m); 1828 } else if ((bp->b_flags & B_NOTMETA) || 1829 vm_page_count_min(0)) { 1830 /* 1831 * Attempt to move the page to PQ_CACHE 1832 * if B_NOTMETA is set. This flag is set 1833 * by HAMMER to remove one of the two pages 1834 * present when double buffering is enabled. 1835 * 1836 * Attempt to move the page to PQ_CACHE 1837 * If we have a severe page deficit. This 1838 * will cause buffer cache operations related 1839 * to pageouts to recycle the related pages 1840 * in order to avoid a low memory deadlock. 1841 */ 1842 m->act_count = bp->b_act_count; 1843 vm_page_try_to_cache(m); 1844 } else { 1845 /* 1846 * Nominal case, leave the page on the 1847 * queue the original unwiring placed it on 1848 * (active or inactive). 1849 */ 1850 m->act_count = bp->b_act_count; 1851 vm_page_wakeup(m); 1852 } 1853 } else { 1854 vm_page_wakeup(m); 1855 } 1856 } 1857 1858 /* 1859 * Zero out the pmap pte's for the mapping, but don't bother 1860 * invalidating the TLB. The range will be properly invalidating 1861 * when new pages are entered into the mapping. 1862 * 1863 * This in particular reduces tmpfs tear-down overhead and reduces 1864 * buffer cache re-use overhead (one invalidation sequence instead 1865 * of two per re-use). 1866 */ 1867 pmap_qremove_noinval(trunc_page((vm_offset_t) bp->b_data), 1868 bp->b_xio.xio_npages); 1869 CPUMASK_ASSZERO(bp->b_cpumask); 1870 if (bp->b_bufsize) { 1871 atomic_add_long(&bufspace, -bp->b_bufsize); 1872 bp->b_bufsize = 0; 1873 bufspacewakeup(); 1874 } 1875 bp->b_xio.xio_npages = 0; 1876 bp->b_flags &= ~B_VMIO; 1877 KKASSERT (LIST_FIRST(&bp->b_dep) == NULL); 1878 if (bp->b_vp) 1879 brelvp(bp); 1880 } 1881 1882 /* 1883 * Find and initialize a new buffer header, freeing up existing buffers 1884 * in the bufqueues as necessary. The new buffer is returned locked. 1885 * 1886 * Important: B_INVAL is not set. If the caller wishes to throw the 1887 * buffer away, the caller must set B_INVAL prior to calling brelse(). 1888 * 1889 * We block if: 1890 * We have insufficient buffer headers 1891 * We have insufficient buffer space 1892 * 1893 * To avoid VFS layer recursion we do not flush dirty buffers ourselves. 1894 * Instead we ask the buf daemon to do it for us. We attempt to 1895 * avoid piecemeal wakeups of the pageout daemon. 1896 */ 1897 struct buf * 1898 getnewbuf(int blkflags, int slptimeo, int size, int maxsize) 1899 { 1900 struct bufpcpu *pcpu; 1901 struct buf *bp; 1902 struct buf *nbp; 1903 int nqindex; 1904 int nqcpu; 1905 int slpflags = (blkflags & GETBLK_PCATCH) ? PCATCH : 0; 1906 int maxloops = 200000; 1907 int restart_reason = 0; 1908 struct buf *restart_bp = NULL; 1909 static char flushingbufs[MAXCPU]; 1910 char *flushingp; 1911 1912 /* 1913 * We can't afford to block since we might be holding a vnode lock, 1914 * which may prevent system daemons from running. We deal with 1915 * low-memory situations by proactively returning memory and running 1916 * async I/O rather then sync I/O. 1917 */ 1918 1919 ++getnewbufcalls; 1920 nqcpu = mycpu->gd_cpuid; 1921 flushingp = &flushingbufs[nqcpu]; 1922 restart: 1923 if (bufspace < lobufspace) 1924 *flushingp = 0; 1925 1926 if (debug_bufbio && --maxloops == 0) 1927 panic("getnewbuf, excessive loops on cpu %d restart %d (%p)", 1928 mycpu->gd_cpuid, restart_reason, restart_bp); 1929 1930 /* 1931 * Setup for scan. If we do not have enough free buffers, 1932 * we setup a degenerate case that immediately fails. Note 1933 * that if we are specially marked process, we are allowed to 1934 * dip into our reserves. 1935 * 1936 * The scanning sequence is nominally: EMPTY->CLEAN 1937 */ 1938 pcpu = &bufpcpu[nqcpu]; 1939 spin_lock(&pcpu->spin); 1940 1941 /* 1942 * Prime the scan for this cpu. Locate the first buffer to 1943 * check. If we are flushing buffers we must skip the 1944 * EMPTY queue. 1945 */ 1946 nqindex = BQUEUE_EMPTY; 1947 nbp = TAILQ_FIRST(&pcpu->bufqueues[BQUEUE_EMPTY]); 1948 if (nbp == NULL || *flushingp) { 1949 nqindex = BQUEUE_CLEAN; 1950 nbp = TAILQ_FIRST(&pcpu->bufqueues[BQUEUE_CLEAN]); 1951 } 1952 1953 /* 1954 * Run scan, possibly freeing data and/or kva mappings on the fly, 1955 * depending. 1956 * 1957 * WARNING! spin is held! 1958 */ 1959 while ((bp = nbp) != NULL) { 1960 int qindex = nqindex; 1961 1962 nbp = TAILQ_NEXT(bp, b_freelist); 1963 1964 /* 1965 * BQUEUE_CLEAN - B_AGE special case. If not set the bp 1966 * cycles through the queue twice before being selected. 1967 */ 1968 if (qindex == BQUEUE_CLEAN && 1969 (bp->b_flags & B_AGE) == 0 && nbp) { 1970 bp->b_flags |= B_AGE; 1971 TAILQ_REMOVE(&pcpu->bufqueues[qindex], 1972 bp, b_freelist); 1973 TAILQ_INSERT_TAIL(&pcpu->bufqueues[qindex], 1974 bp, b_freelist); 1975 continue; 1976 } 1977 1978 /* 1979 * Calculate next bp ( we can only use it if we do not block 1980 * or do other fancy things ). 1981 */ 1982 if (nbp == NULL) { 1983 switch(qindex) { 1984 case BQUEUE_EMPTY: 1985 nqindex = BQUEUE_CLEAN; 1986 if ((nbp = TAILQ_FIRST(&pcpu->bufqueues[BQUEUE_CLEAN]))) 1987 break; 1988 /* fall through */ 1989 case BQUEUE_CLEAN: 1990 /* 1991 * nbp is NULL. 1992 */ 1993 break; 1994 } 1995 } 1996 1997 /* 1998 * Sanity Checks 1999 */ 2000 KASSERT(bp->b_qindex == qindex, 2001 ("getnewbuf: inconsistent queue %d bp %p", qindex, bp)); 2002 2003 /* 2004 * Note: we no longer distinguish between VMIO and non-VMIO 2005 * buffers. 2006 */ 2007 KASSERT((bp->b_flags & B_DELWRI) == 0, 2008 ("delwri buffer %p found in queue %d", bp, qindex)); 2009 2010 /* 2011 * Do not try to reuse a buffer with a non-zero b_refs. 2012 * This is an unsynchronized test. A synchronized test 2013 * is also performed after we lock the buffer. 2014 */ 2015 if (bp->b_refs) 2016 continue; 2017 2018 /* 2019 * Start freeing the bp. This is somewhat involved. nbp 2020 * remains valid only for BQUEUE_EMPTY bp's. Buffers 2021 * on the clean list must be disassociated from their 2022 * current vnode. Buffers on the empty lists have 2023 * already been disassociated. 2024 * 2025 * b_refs is checked after locking along with queue changes. 2026 * We must check here to deal with zero->nonzero transitions 2027 * made by the owner of the buffer lock, which is used by 2028 * VFS's to hold the buffer while issuing an unlocked 2029 * uiomove()s. We cannot invalidate the buffer's pages 2030 * for this case. Once we successfully lock a buffer the 2031 * only 0->1 transitions of b_refs will occur via findblk(). 2032 * 2033 * We must also check for queue changes after successful 2034 * locking as the current lock holder may dispose of the 2035 * buffer and change its queue. 2036 */ 2037 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 2038 spin_unlock(&pcpu->spin); 2039 tsleep(&bd_request, 0, "gnbxxx", (hz + 99) / 100); 2040 restart_reason = 1; 2041 restart_bp = bp; 2042 goto restart; 2043 } 2044 if (bp->b_qindex != qindex || bp->b_refs) { 2045 spin_unlock(&pcpu->spin); 2046 BUF_UNLOCK(bp); 2047 restart_reason = 2; 2048 restart_bp = bp; 2049 goto restart; 2050 } 2051 bremfree_locked(bp); 2052 spin_unlock(&pcpu->spin); 2053 2054 /* 2055 * Dependancies must be handled before we disassociate the 2056 * vnode. 2057 * 2058 * NOTE: HAMMER will set B_LOCKED if the buffer cannot 2059 * be immediately disassociated. HAMMER then becomes 2060 * responsible for releasing the buffer. 2061 * 2062 * NOTE: spin is UNLOCKED now. 2063 */ 2064 if (LIST_FIRST(&bp->b_dep) != NULL) { 2065 buf_deallocate(bp); 2066 if (bp->b_flags & B_LOCKED) { 2067 bqrelse(bp); 2068 restart_reason = 3; 2069 restart_bp = bp; 2070 goto restart; 2071 } 2072 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL); 2073 } 2074 2075 /* 2076 * CLEAN buffers have content or associations that must be 2077 * cleaned out if not repurposing. 2078 */ 2079 if (qindex == BQUEUE_CLEAN) { 2080 if (bp->b_flags & B_VMIO) 2081 vfs_vmio_release(bp); 2082 if (bp->b_vp) 2083 brelvp(bp); 2084 } 2085 2086 /* 2087 * NOTE: nbp is now entirely invalid. We can only restart 2088 * the scan from this point on. 2089 * 2090 * Get the rest of the buffer freed up. b_kva* is still 2091 * valid after this operation. 2092 */ 2093 KASSERT(bp->b_vp == NULL, 2094 ("bp3 %p flags %08x vnode %p qindex %d " 2095 "unexpectededly still associated!", 2096 bp, bp->b_flags, bp->b_vp, qindex)); 2097 KKASSERT((bp->b_flags & B_HASHED) == 0); 2098 2099 if (bp->b_bufsize) 2100 allocbuf(bp, 0); 2101 2102 if (bp->b_flags & (B_VNDIRTY | B_VNCLEAN | B_HASHED)) { 2103 kprintf("getnewbuf: caught bug vp queue " 2104 "%p/%08x qidx %d\n", 2105 bp, bp->b_flags, qindex); 2106 brelvp(bp); 2107 } 2108 bp->b_flags = B_BNOCLIP; 2109 bp->b_cmd = BUF_CMD_DONE; 2110 bp->b_vp = NULL; 2111 bp->b_error = 0; 2112 bp->b_resid = 0; 2113 bp->b_bcount = 0; 2114 bp->b_xio.xio_npages = 0; 2115 bp->b_dirtyoff = bp->b_dirtyend = 0; 2116 bp->b_act_count = ACT_INIT; 2117 reinitbufbio(bp); 2118 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL); 2119 buf_dep_init(bp); 2120 if (blkflags & GETBLK_BHEAVY) 2121 bp->b_flags |= B_HEAVY; 2122 2123 if (bufspace >= hibufspace) 2124 *flushingp = 1; 2125 if (bufspace < lobufspace) 2126 *flushingp = 0; 2127 if (*flushingp) { 2128 bp->b_flags |= B_INVAL; 2129 brelse(bp); 2130 restart_reason = 5; 2131 restart_bp = bp; 2132 goto restart; 2133 } 2134 2135 /* 2136 * b_refs can transition to a non-zero value while we hold 2137 * the buffer locked due to a findblk(). Our brelvp() above 2138 * interlocked any future possible transitions due to 2139 * findblk()s. 2140 * 2141 * If we find b_refs to be non-zero we can destroy the 2142 * buffer's contents but we cannot yet reuse the buffer. 2143 */ 2144 if (bp->b_refs) { 2145 bp->b_flags |= B_INVAL; 2146 brelse(bp); 2147 restart_reason = 6; 2148 restart_bp = bp; 2149 2150 goto restart; 2151 } 2152 2153 /* 2154 * We found our buffer! 2155 */ 2156 break; 2157 } 2158 2159 /* 2160 * If we exhausted our list, iterate other cpus. If that fails, 2161 * sleep as appropriate. We may have to wakeup various daemons 2162 * and write out some dirty buffers. 2163 * 2164 * Generally we are sleeping due to insufficient buffer space. 2165 * 2166 * NOTE: spin is held if bp is NULL, else it is not held. 2167 */ 2168 if (bp == NULL) { 2169 int flags; 2170 char *waitmsg; 2171 2172 spin_unlock(&pcpu->spin); 2173 2174 nqcpu = (nqcpu + 1) % ncpus; 2175 if (nqcpu != mycpu->gd_cpuid) { 2176 restart_reason = 7; 2177 restart_bp = bp; 2178 goto restart; 2179 } 2180 2181 if (bufspace >= hibufspace) { 2182 waitmsg = "bufspc"; 2183 flags = VFS_BIO_NEED_BUFSPACE; 2184 } else { 2185 waitmsg = "newbuf"; 2186 flags = VFS_BIO_NEED_ANY; 2187 } 2188 2189 bd_speedup(); /* heeeelp */ 2190 atomic_set_int(&needsbuffer, flags); 2191 while (needsbuffer & flags) { 2192 int value; 2193 2194 tsleep_interlock(&needsbuffer, 0); 2195 value = atomic_fetchadd_int(&needsbuffer, 0); 2196 if (value & flags) { 2197 if (tsleep(&needsbuffer, PINTERLOCKED|slpflags, 2198 waitmsg, slptimeo)) { 2199 return (NULL); 2200 } 2201 } 2202 } 2203 } else { 2204 /* 2205 * We finally have a valid bp. Reset b_data. 2206 * 2207 * (spin is not held) 2208 */ 2209 bp->b_data = bp->b_kvabase; 2210 } 2211 return(bp); 2212 } 2213 2214 /* 2215 * buf_daemon: 2216 * 2217 * Buffer flushing daemon. Buffers are normally flushed by the 2218 * update daemon but if it cannot keep up this process starts to 2219 * take the load in an attempt to prevent getnewbuf() from blocking. 2220 * 2221 * Once a flush is initiated it does not stop until the number 2222 * of buffers falls below lodirtybuffers, but we will wake up anyone 2223 * waiting at the mid-point. 2224 */ 2225 static struct kproc_desc buf_kp = { 2226 "bufdaemon", 2227 buf_daemon, 2228 &bufdaemon_td 2229 }; 2230 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, 2231 kproc_start, &buf_kp); 2232 2233 static struct kproc_desc bufhw_kp = { 2234 "bufdaemon_hw", 2235 buf_daemon_hw, 2236 &bufdaemonhw_td 2237 }; 2238 SYSINIT(bufdaemon_hw, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, 2239 kproc_start, &bufhw_kp); 2240 2241 static void 2242 buf_daemon1(struct thread *td, int queue, int (*buf_limit_fn)(long), 2243 int *bd_req) 2244 { 2245 long limit; 2246 struct buf *marker; 2247 2248 marker = kmalloc(sizeof(*marker), M_BIOBUF, M_WAITOK | M_ZERO); 2249 marker->b_flags |= B_MARKER; 2250 marker->b_qindex = BQUEUE_NONE; 2251 marker->b_qcpu = 0; 2252 2253 /* 2254 * This process needs to be suspended prior to shutdown sync. 2255 */ 2256 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, 2257 td, SHUTDOWN_PRI_LAST); 2258 curthread->td_flags |= TDF_SYSTHREAD; 2259 2260 /* 2261 * This process is allowed to take the buffer cache to the limit 2262 */ 2263 for (;;) { 2264 kproc_suspend_loop(); 2265 2266 /* 2267 * Do the flush as long as the number of dirty buffers 2268 * (including those running) exceeds lodirtybufspace. 2269 * 2270 * When flushing limit running I/O to hirunningspace 2271 * Do the flush. Limit the amount of in-transit I/O we 2272 * allow to build up, otherwise we would completely saturate 2273 * the I/O system. Wakeup any waiting processes before we 2274 * normally would so they can run in parallel with our drain. 2275 * 2276 * Our aggregate normal+HW lo water mark is lodirtybufspace, 2277 * but because we split the operation into two threads we 2278 * have to cut it in half for each thread. 2279 */ 2280 waitrunningbufspace(); 2281 limit = lodirtybufspace / 2; 2282 while (buf_limit_fn(limit)) { 2283 if (flushbufqueues(marker, queue) == 0) 2284 break; 2285 if (runningbufspace < hirunningspace) 2286 continue; 2287 waitrunningbufspace(); 2288 } 2289 2290 /* 2291 * We reached our low water mark, reset the 2292 * request and sleep until we are needed again. 2293 * The sleep is just so the suspend code works. 2294 */ 2295 tsleep_interlock(bd_req, 0); 2296 if (atomic_swap_int(bd_req, 0) == 0) 2297 tsleep(bd_req, PINTERLOCKED, "psleep", hz); 2298 } 2299 /* NOT REACHED */ 2300 /*kfree(marker, M_BIOBUF);*/ 2301 } 2302 2303 static int 2304 buf_daemon_limit(long limit) 2305 { 2306 return (runningbufspace + dirtykvaspace > limit || 2307 dirtybufcount - dirtybufcounthw >= nbuf / 2); 2308 } 2309 2310 static int 2311 buf_daemon_hw_limit(long limit) 2312 { 2313 return (runningbufspace + dirtykvaspace > limit || 2314 dirtybufcounthw >= nbuf / 2); 2315 } 2316 2317 static void 2318 buf_daemon(void) 2319 { 2320 buf_daemon1(bufdaemon_td, BQUEUE_DIRTY, buf_daemon_limit, 2321 &bd_request); 2322 } 2323 2324 static void 2325 buf_daemon_hw(void) 2326 { 2327 buf_daemon1(bufdaemonhw_td, BQUEUE_DIRTY_HW, buf_daemon_hw_limit, 2328 &bd_request_hw); 2329 } 2330 2331 /* 2332 * Flush up to (flushperqueue) buffers in the dirty queue. Each cpu has a 2333 * localized version of the queue. Each call made to this function iterates 2334 * to another cpu. It is desireable to flush several buffers from the same 2335 * cpu's queue at once, as these are likely going to be linear. 2336 * 2337 * We must be careful to free up B_INVAL buffers instead of write them, which 2338 * NFS is particularly sensitive to. 2339 * 2340 * B_RELBUF may only be set by VFSs. We do set B_AGE to indicate that we 2341 * really want to try to get the buffer out and reuse it due to the write 2342 * load on the machine. 2343 * 2344 * We must lock the buffer in order to check its validity before we can mess 2345 * with its contents. spin isn't enough. 2346 */ 2347 static int 2348 flushbufqueues(struct buf *marker, bufq_type_t q) 2349 { 2350 struct bufpcpu *pcpu; 2351 struct buf *bp; 2352 int r = 0; 2353 u_int loops = flushperqueue; 2354 int lcpu = marker->b_qcpu; 2355 2356 KKASSERT(marker->b_qindex == BQUEUE_NONE); 2357 KKASSERT(marker->b_flags & B_MARKER); 2358 2359 again: 2360 /* 2361 * Spinlock needed to perform operations on the queue and may be 2362 * held through a non-blocking BUF_LOCK(), but cannot be held when 2363 * BUF_UNLOCK()ing or through any other major operation. 2364 */ 2365 pcpu = &bufpcpu[marker->b_qcpu]; 2366 spin_lock(&pcpu->spin); 2367 marker->b_qindex = q; 2368 TAILQ_INSERT_HEAD(&pcpu->bufqueues[q], marker, b_freelist); 2369 bp = marker; 2370 2371 while ((bp = TAILQ_NEXT(bp, b_freelist)) != NULL) { 2372 /* 2373 * NOTE: spinlock is always held at the top of the loop 2374 */ 2375 if (bp->b_flags & B_MARKER) 2376 continue; 2377 if ((bp->b_flags & B_DELWRI) == 0) { 2378 kprintf("Unexpected clean buffer %p\n", bp); 2379 continue; 2380 } 2381 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) 2382 continue; 2383 KKASSERT(bp->b_qcpu == marker->b_qcpu && bp->b_qindex == q); 2384 2385 /* 2386 * Once the buffer is locked we will have no choice but to 2387 * unlock the spinlock around a later BUF_UNLOCK and re-set 2388 * bp = marker when looping. Move the marker now to make 2389 * things easier. 2390 */ 2391 TAILQ_REMOVE(&pcpu->bufqueues[q], marker, b_freelist); 2392 TAILQ_INSERT_AFTER(&pcpu->bufqueues[q], bp, marker, b_freelist); 2393 2394 /* 2395 * Must recheck B_DELWRI after successfully locking 2396 * the buffer. 2397 */ 2398 if ((bp->b_flags & B_DELWRI) == 0) { 2399 spin_unlock(&pcpu->spin); 2400 BUF_UNLOCK(bp); 2401 spin_lock(&pcpu->spin); 2402 bp = marker; 2403 continue; 2404 } 2405 2406 /* 2407 * Remove the buffer from its queue. We still own the 2408 * spinlock here. 2409 */ 2410 _bremfree(bp); 2411 2412 /* 2413 * Disposing of an invalid buffer counts as a flush op 2414 */ 2415 if (bp->b_flags & B_INVAL) { 2416 spin_unlock(&pcpu->spin); 2417 brelse(bp); 2418 goto doloop; 2419 } 2420 2421 /* 2422 * Release the spinlock for the more complex ops we 2423 * are now going to do. 2424 */ 2425 spin_unlock(&pcpu->spin); 2426 lwkt_yield(); 2427 2428 /* 2429 * This is a bit messy 2430 */ 2431 if (LIST_FIRST(&bp->b_dep) != NULL && 2432 (bp->b_flags & B_DEFERRED) == 0 && 2433 buf_countdeps(bp, 0)) { 2434 spin_lock(&pcpu->spin); 2435 TAILQ_INSERT_TAIL(&pcpu->bufqueues[q], bp, b_freelist); 2436 bp->b_qindex = q; 2437 bp->b_flags |= B_DEFERRED; 2438 spin_unlock(&pcpu->spin); 2439 BUF_UNLOCK(bp); 2440 spin_lock(&pcpu->spin); 2441 bp = marker; 2442 continue; 2443 } 2444 2445 /* 2446 * spinlock not held here. 2447 * 2448 * If the buffer has a dependancy, buf_checkwrite() must 2449 * also return 0 for us to be able to initate the write. 2450 * 2451 * If the buffer is flagged B_ERROR it may be requeued 2452 * over and over again, we try to avoid a live lock. 2453 */ 2454 if (LIST_FIRST(&bp->b_dep) != NULL && buf_checkwrite(bp)) { 2455 brelse(bp); 2456 } else if (bp->b_flags & B_ERROR) { 2457 tsleep(bp, 0, "bioer", 1); 2458 bp->b_flags &= ~B_AGE; 2459 cluster_awrite(bp); 2460 } else { 2461 bp->b_flags |= B_AGE | B_KVABIO; 2462 cluster_awrite(bp); 2463 } 2464 /* bp invalid but needs to be NULL-tested if we break out */ 2465 doloop: 2466 spin_lock(&pcpu->spin); 2467 ++r; 2468 if (--loops == 0) 2469 break; 2470 bp = marker; 2471 } 2472 /* bp is invalid here but can be NULL-tested to advance */ 2473 2474 TAILQ_REMOVE(&pcpu->bufqueues[q], marker, b_freelist); 2475 marker->b_qindex = BQUEUE_NONE; 2476 spin_unlock(&pcpu->spin); 2477 2478 /* 2479 * Advance the marker to be fair. 2480 */ 2481 marker->b_qcpu = (marker->b_qcpu + 1) % ncpus; 2482 if (bp == NULL) { 2483 if (marker->b_qcpu != lcpu) 2484 goto again; 2485 } 2486 2487 return (r); 2488 } 2489 2490 /* 2491 * inmem: 2492 * 2493 * Returns true if no I/O is needed to access the associated VM object. 2494 * This is like findblk except it also hunts around in the VM system for 2495 * the data. 2496 * 2497 * Note that we ignore vm_page_free() races from interrupts against our 2498 * lookup, since if the caller is not protected our return value will not 2499 * be any more valid then otherwise once we exit the critical section. 2500 */ 2501 int 2502 inmem(struct vnode *vp, off_t loffset) 2503 { 2504 vm_object_t obj; 2505 vm_offset_t toff, tinc, size; 2506 vm_page_t m; 2507 int res = 1; 2508 2509 if (findblk(vp, loffset, FINDBLK_TEST)) 2510 return 1; 2511 if (vp->v_mount == NULL) 2512 return 0; 2513 if ((obj = vp->v_object) == NULL) 2514 return 0; 2515 2516 size = PAGE_SIZE; 2517 if (size > vp->v_mount->mnt_stat.f_iosize) 2518 size = vp->v_mount->mnt_stat.f_iosize; 2519 2520 vm_object_hold(obj); 2521 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 2522 m = vm_page_lookup(obj, OFF_TO_IDX(loffset + toff)); 2523 if (m == NULL) { 2524 res = 0; 2525 break; 2526 } 2527 tinc = size; 2528 if (tinc > PAGE_SIZE - ((toff + loffset) & PAGE_MASK)) 2529 tinc = PAGE_SIZE - ((toff + loffset) & PAGE_MASK); 2530 if (vm_page_is_valid(m, 2531 (vm_offset_t) ((toff + loffset) & PAGE_MASK), tinc) == 0) { 2532 res = 0; 2533 break; 2534 } 2535 } 2536 vm_object_drop(obj); 2537 return (res); 2538 } 2539 2540 /* 2541 * findblk: 2542 * 2543 * Locate and return the specified buffer. Unless flagged otherwise, 2544 * a locked buffer will be returned if it exists or NULL if it does not. 2545 * 2546 * findblk()'d buffers are still on the bufqueues and if you intend 2547 * to use your (locked NON-TEST) buffer you need to bremfree(bp) 2548 * and possibly do other stuff to it. 2549 * 2550 * FINDBLK_TEST - Do not lock the buffer. The caller is responsible 2551 * for locking the buffer and ensuring that it remains 2552 * the desired buffer after locking. 2553 * 2554 * FINDBLK_NBLOCK - Lock the buffer non-blocking. If we are unable 2555 * to acquire the lock we return NULL, even if the 2556 * buffer exists. 2557 * 2558 * FINDBLK_REF - Returns the buffer ref'd, which prevents normal 2559 * reuse by getnewbuf() but does not prevent 2560 * disassociation (B_INVAL). Used to avoid deadlocks 2561 * against random (vp,loffset)s due to reassignment. 2562 * 2563 * FINDBLK_KVABIO - Only applicable when returning a locked buffer. 2564 * Indicates that the caller supports B_KVABIO. 2565 * 2566 * (0) - Lock the buffer blocking. 2567 */ 2568 struct buf * 2569 findblk(struct vnode *vp, off_t loffset, int flags) 2570 { 2571 struct buf *bp; 2572 int lkflags; 2573 2574 lkflags = LK_EXCLUSIVE; 2575 if (flags & FINDBLK_NBLOCK) 2576 lkflags |= LK_NOWAIT; 2577 2578 for (;;) { 2579 /* 2580 * Lookup. Ref the buf while holding v_token to prevent 2581 * reuse (but does not prevent diassociation). 2582 */ 2583 lwkt_gettoken_shared(&vp->v_token); 2584 bp = buf_rb_hash_RB_LOOKUP(&vp->v_rbhash_tree, loffset); 2585 if (bp == NULL) { 2586 lwkt_reltoken(&vp->v_token); 2587 return(NULL); 2588 } 2589 bqhold(bp); 2590 lwkt_reltoken(&vp->v_token); 2591 2592 /* 2593 * If testing only break and return bp, do not lock. 2594 */ 2595 if (flags & FINDBLK_TEST) 2596 break; 2597 2598 /* 2599 * Lock the buffer, return an error if the lock fails. 2600 * (only FINDBLK_NBLOCK can cause the lock to fail). 2601 */ 2602 if (BUF_LOCK(bp, lkflags)) { 2603 atomic_subtract_int(&bp->b_refs, 1); 2604 /* bp = NULL; not needed */ 2605 return(NULL); 2606 } 2607 2608 /* 2609 * Revalidate the locked buf before allowing it to be 2610 * returned. 2611 * 2612 * B_KVABIO is only set/cleared when locking. When 2613 * clearing B_KVABIO, we must ensure that the buffer 2614 * is synchronized to all cpus. 2615 */ 2616 if (bp->b_vp == vp && bp->b_loffset == loffset) { 2617 if (flags & FINDBLK_KVABIO) 2618 bp->b_flags |= B_KVABIO; 2619 else 2620 bkvasync_all(bp); 2621 break; 2622 } 2623 atomic_subtract_int(&bp->b_refs, 1); 2624 BUF_UNLOCK(bp); 2625 } 2626 2627 /* 2628 * Success 2629 */ 2630 if ((flags & FINDBLK_REF) == 0) 2631 atomic_subtract_int(&bp->b_refs, 1); 2632 return(bp); 2633 } 2634 2635 /* 2636 * getcacheblk: 2637 * 2638 * Similar to getblk() except only returns the buffer if it is 2639 * B_CACHE and requires no other manipulation. Otherwise NULL 2640 * is returned. NULL is also returned if GETBLK_NOWAIT is set 2641 * and the getblk() would block. 2642 * 2643 * If B_RAM is set the buffer might be just fine, but we return 2644 * NULL anyway because we want the code to fall through to the 2645 * cluster read to issue more read-aheads. Otherwise read-ahead breaks. 2646 * 2647 * If blksize is 0 the buffer cache buffer must already be fully 2648 * cached. 2649 * 2650 * If blksize is non-zero getblk() will be used, allowing a buffer 2651 * to be reinstantiated from its VM backing store. The buffer must 2652 * still be fully cached after reinstantiation to be returned. 2653 */ 2654 struct buf * 2655 getcacheblk(struct vnode *vp, off_t loffset, int blksize, int blkflags) 2656 { 2657 struct buf *bp; 2658 int fndflags = 0; 2659 2660 if (blkflags & GETBLK_NOWAIT) 2661 fndflags |= FINDBLK_NBLOCK; 2662 if (blkflags & GETBLK_KVABIO) 2663 fndflags |= FINDBLK_KVABIO; 2664 2665 if (blksize) { 2666 bp = getblk(vp, loffset, blksize, blkflags, 0); 2667 if (bp) { 2668 if ((bp->b_flags & (B_INVAL | B_CACHE)) == B_CACHE) { 2669 bp->b_flags &= ~B_AGE; 2670 if (bp->b_flags & B_RAM) { 2671 bqrelse(bp); 2672 bp = NULL; 2673 } 2674 } else { 2675 brelse(bp); 2676 bp = NULL; 2677 } 2678 } 2679 } else { 2680 bp = findblk(vp, loffset, fndflags); 2681 if (bp) { 2682 if ((bp->b_flags & (B_INVAL | B_CACHE | B_RAM)) == 2683 B_CACHE) { 2684 bp->b_flags &= ~B_AGE; 2685 bremfree(bp); 2686 } else { 2687 BUF_UNLOCK(bp); 2688 bp = NULL; 2689 } 2690 } 2691 } 2692 return (bp); 2693 } 2694 2695 /* 2696 * getblk: 2697 * 2698 * Get a block given a specified block and offset into a file/device. 2699 * B_INVAL may or may not be set on return. The caller should clear 2700 * B_INVAL prior to initiating a READ. 2701 * 2702 * IT IS IMPORTANT TO UNDERSTAND THAT IF YOU CALL GETBLK() AND B_CACHE 2703 * IS NOT SET, YOU MUST INITIALIZE THE RETURNED BUFFER, ISSUE A READ, 2704 * OR SET B_INVAL BEFORE RETIRING IT. If you retire a getblk'd buffer 2705 * without doing any of those things the system will likely believe 2706 * the buffer to be valid (especially if it is not B_VMIO), and the 2707 * next getblk() will return the buffer with B_CACHE set. 2708 * 2709 * For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for 2710 * an existing buffer. 2711 * 2712 * For a VMIO buffer, B_CACHE is modified according to the backing VM. 2713 * If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set 2714 * and then cleared based on the backing VM. If the previous buffer is 2715 * non-0-sized but invalid, B_CACHE will be cleared. 2716 * 2717 * If getblk() must create a new buffer, the new buffer is returned with 2718 * both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which 2719 * case it is returned with B_INVAL clear and B_CACHE set based on the 2720 * backing VM. 2721 * 2722 * getblk() also forces a bwrite() for any B_DELWRI buffer whos 2723 * B_CACHE bit is clear. 2724 * 2725 * What this means, basically, is that the caller should use B_CACHE to 2726 * determine whether the buffer is fully valid or not and should clear 2727 * B_INVAL prior to issuing a read. If the caller intends to validate 2728 * the buffer by loading its data area with something, the caller needs 2729 * to clear B_INVAL. If the caller does this without issuing an I/O, 2730 * the caller should set B_CACHE ( as an optimization ), else the caller 2731 * should issue the I/O and biodone() will set B_CACHE if the I/O was 2732 * a write attempt or if it was a successfull read. If the caller 2733 * intends to issue a READ, the caller must clear B_INVAL and B_ERROR 2734 * prior to issuing the READ. biodone() will *not* clear B_INVAL. 2735 * 2736 * getblk flags: 2737 * 2738 * GETBLK_PCATCH - catch signal if blocked, can cause NULL return 2739 * GETBLK_BHEAVY - heavy-weight buffer cache buffer 2740 */ 2741 struct buf * 2742 getblk(struct vnode *vp, off_t loffset, int size, int blkflags, int slptimeo) 2743 { 2744 struct buf *bp; 2745 int slpflags = (blkflags & GETBLK_PCATCH) ? PCATCH : 0; 2746 int error; 2747 int lkflags; 2748 2749 if (size > MAXBSIZE) 2750 panic("getblk: size(%d) > MAXBSIZE(%d)", size, MAXBSIZE); 2751 if (vp->v_object == NULL) 2752 panic("getblk: vnode %p has no object!", vp); 2753 2754 /* 2755 * NOTE: findblk does not try to resolve KVABIO in REF-only mode. 2756 * we still have to handle that ourselves. 2757 */ 2758 loop: 2759 if ((bp = findblk(vp, loffset, FINDBLK_REF | FINDBLK_TEST)) != NULL) { 2760 /* 2761 * The buffer was found in the cache, but we need to lock it. 2762 * We must acquire a ref on the bp to prevent reuse, but 2763 * this will not prevent disassociation (brelvp()) so we 2764 * must recheck (vp,loffset) after acquiring the lock. 2765 * 2766 * Without the ref the buffer could potentially be reused 2767 * before we acquire the lock and create a deadlock 2768 * situation between the thread trying to reuse the buffer 2769 * and us due to the fact that we would wind up blocking 2770 * on a random (vp,loffset). 2771 */ 2772 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 2773 if (blkflags & GETBLK_NOWAIT) { 2774 bqdrop(bp); 2775 return(NULL); 2776 } 2777 lkflags = LK_EXCLUSIVE | LK_SLEEPFAIL; 2778 if (blkflags & GETBLK_PCATCH) 2779 lkflags |= LK_PCATCH; 2780 error = BUF_TIMELOCK(bp, lkflags, "getblk", slptimeo); 2781 if (error) { 2782 bqdrop(bp); 2783 if (error == ENOLCK) 2784 goto loop; 2785 return (NULL); 2786 } 2787 /* buffer may have changed on us */ 2788 } 2789 bqdrop(bp); 2790 2791 /* 2792 * Once the buffer has been locked, make sure we didn't race 2793 * a buffer recyclement. Buffers that are no longer hashed 2794 * will have b_vp == NULL, so this takes care of that check 2795 * as well. 2796 */ 2797 if (bp->b_vp != vp || bp->b_loffset != loffset) { 2798 #if 0 2799 kprintf("Warning buffer %p (vp %p loffset %lld) " 2800 "was recycled\n", 2801 bp, vp, (long long)loffset); 2802 #endif 2803 BUF_UNLOCK(bp); 2804 goto loop; 2805 } 2806 2807 /* 2808 * If SZMATCH any pre-existing buffer must be of the requested 2809 * size or NULL is returned. The caller absolutely does not 2810 * want getblk() to bwrite() the buffer on a size mismatch. 2811 */ 2812 if ((blkflags & GETBLK_SZMATCH) && size != bp->b_bcount) { 2813 BUF_UNLOCK(bp); 2814 return(NULL); 2815 } 2816 2817 /* 2818 * All vnode-based buffers must be backed by a VM object. 2819 * 2820 * Set B_KVABIO for any incidental work, we will fix it 2821 * up later. 2822 */ 2823 KKASSERT(bp->b_flags & B_VMIO); 2824 KKASSERT(bp->b_cmd == BUF_CMD_DONE); 2825 bp->b_flags &= ~B_AGE; 2826 bp->b_flags |= B_KVABIO; 2827 2828 /* 2829 * Make sure that B_INVAL buffers do not have a cached 2830 * block number translation. 2831 */ 2832 if ((bp->b_flags & B_INVAL) && 2833 (bp->b_bio2.bio_offset != NOOFFSET)) { 2834 kprintf("Warning invalid buffer %p (vp %p loffset %lld)" 2835 " did not have cleared bio_offset cache\n", 2836 bp, vp, (long long)loffset); 2837 clearbiocache(&bp->b_bio2); 2838 } 2839 2840 /* 2841 * The buffer is locked. B_CACHE is cleared if the buffer is 2842 * invalid. 2843 * 2844 * After the bremfree(), disposals must use b[q]relse(). 2845 */ 2846 if (bp->b_flags & B_INVAL) 2847 bp->b_flags &= ~B_CACHE; 2848 bremfree(bp); 2849 2850 /* 2851 * Any size inconsistancy with a dirty buffer or a buffer 2852 * with a softupdates dependancy must be resolved. Resizing 2853 * the buffer in such circumstances can lead to problems. 2854 * 2855 * Dirty or dependant buffers are written synchronously. 2856 * Other types of buffers are simply released and 2857 * reconstituted as they may be backed by valid, dirty VM 2858 * pages (but not marked B_DELWRI). 2859 * 2860 * NFS NOTE: NFS buffers which straddle EOF are oddly-sized 2861 * and may be left over from a prior truncation (and thus 2862 * no longer represent the actual EOF point), so we 2863 * definitely do not want to B_NOCACHE the backing store. 2864 */ 2865 if (size != bp->b_bcount) { 2866 if (bp->b_flags & B_DELWRI) { 2867 bp->b_flags |= B_RELBUF; 2868 bwrite(bp); 2869 } else if (LIST_FIRST(&bp->b_dep)) { 2870 bp->b_flags |= B_RELBUF; 2871 bwrite(bp); 2872 } else { 2873 bp->b_flags |= B_RELBUF; 2874 brelse(bp); 2875 } 2876 goto loop; 2877 } 2878 KKASSERT(size <= bp->b_kvasize); 2879 KASSERT(bp->b_loffset != NOOFFSET, 2880 ("getblk: no buffer offset")); 2881 2882 /* 2883 * A buffer with B_DELWRI set and B_CACHE clear must 2884 * be committed before we can return the buffer in 2885 * order to prevent the caller from issuing a read 2886 * ( due to B_CACHE not being set ) and overwriting 2887 * it. 2888 * 2889 * Most callers, including NFS and FFS, need this to 2890 * operate properly either because they assume they 2891 * can issue a read if B_CACHE is not set, or because 2892 * ( for example ) an uncached B_DELWRI might loop due 2893 * to softupdates re-dirtying the buffer. In the latter 2894 * case, B_CACHE is set after the first write completes, 2895 * preventing further loops. 2896 * 2897 * NOTE! b*write() sets B_CACHE. If we cleared B_CACHE 2898 * above while extending the buffer, we cannot allow the 2899 * buffer to remain with B_CACHE set after the write 2900 * completes or it will represent a corrupt state. To 2901 * deal with this we set B_NOCACHE to scrap the buffer 2902 * after the write. 2903 * 2904 * XXX Should this be B_RELBUF instead of B_NOCACHE? 2905 * I'm not even sure this state is still possible 2906 * now that getblk() writes out any dirty buffers 2907 * on size changes. 2908 * 2909 * We might be able to do something fancy, like setting 2910 * B_CACHE in bwrite() except if B_DELWRI is already set, 2911 * so the below call doesn't set B_CACHE, but that gets real 2912 * confusing. This is much easier. 2913 */ 2914 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) { 2915 kprintf("getblk: Warning, bp %p loff=%jx DELWRI set " 2916 "and CACHE clear, b_flags %08x\n", 2917 bp, (uintmax_t)bp->b_loffset, bp->b_flags); 2918 bp->b_flags |= B_NOCACHE; 2919 bwrite(bp); 2920 goto loop; 2921 } 2922 } else { 2923 /* 2924 * Buffer is not in-core, create new buffer. The buffer 2925 * returned by getnewbuf() is locked. Note that the returned 2926 * buffer is also considered valid (not marked B_INVAL). 2927 * 2928 * Calculating the offset for the I/O requires figuring out 2929 * the block size. We use DEV_BSIZE for VBLK or VCHR and 2930 * the mount's f_iosize otherwise. If the vnode does not 2931 * have an associated mount we assume that the passed size is 2932 * the block size. 2933 * 2934 * Note that vn_isdisk() cannot be used here since it may 2935 * return a failure for numerous reasons. Note that the 2936 * buffer size may be larger then the block size (the caller 2937 * will use block numbers with the proper multiple). Beware 2938 * of using any v_* fields which are part of unions. In 2939 * particular, in DragonFly the mount point overloading 2940 * mechanism uses the namecache only and the underlying 2941 * directory vnode is not a special case. 2942 */ 2943 int bsize, maxsize; 2944 2945 if (vp->v_type == VBLK || vp->v_type == VCHR) 2946 bsize = DEV_BSIZE; 2947 else if (vp->v_mount) 2948 bsize = vp->v_mount->mnt_stat.f_iosize; 2949 else 2950 bsize = size; 2951 2952 maxsize = size + (loffset & PAGE_MASK); 2953 maxsize = imax(maxsize, bsize); 2954 2955 bp = getnewbuf(blkflags, slptimeo, size, maxsize); 2956 if (bp == NULL) { 2957 if (slpflags || slptimeo) 2958 return NULL; 2959 goto loop; 2960 } 2961 2962 /* 2963 * Atomically insert the buffer into the hash, so that it can 2964 * be found by findblk(). 2965 * 2966 * If bgetvp() returns non-zero a collision occured, and the 2967 * bp will not be associated with the vnode. 2968 * 2969 * Make sure the translation layer has been cleared. 2970 */ 2971 bp->b_loffset = loffset; 2972 bp->b_bio2.bio_offset = NOOFFSET; 2973 /* bp->b_bio2.bio_next = NULL; */ 2974 2975 if (bgetvp(vp, bp, size)) { 2976 bp->b_flags |= B_INVAL; 2977 brelse(bp); 2978 goto loop; 2979 } 2980 2981 /* 2982 * All vnode-based buffers must be backed by a VM object. 2983 * 2984 * Set B_KVABIO for incidental work 2985 */ 2986 KKASSERT(vp->v_object != NULL); 2987 bp->b_flags |= B_VMIO | B_KVABIO; 2988 KKASSERT(bp->b_cmd == BUF_CMD_DONE); 2989 2990 allocbuf(bp, size); 2991 } 2992 2993 /* 2994 * Do the nasty smp broadcast (if the buffer needs it) when KVABIO 2995 * is not supported. 2996 */ 2997 if (bp && (blkflags & GETBLK_KVABIO) == 0) { 2998 bkvasync_all(bp); 2999 } 3000 return (bp); 3001 } 3002 3003 /* 3004 * regetblk(bp) 3005 * 3006 * Reacquire a buffer that was previously released to the locked queue, 3007 * or reacquire a buffer which is interlocked by having bioops->io_deallocate 3008 * set B_LOCKED (which handles the acquisition race). 3009 * 3010 * To this end, either B_LOCKED must be set or the dependancy list must be 3011 * non-empty. 3012 */ 3013 void 3014 regetblk(struct buf *bp) 3015 { 3016 KKASSERT((bp->b_flags & B_LOCKED) || LIST_FIRST(&bp->b_dep) != NULL); 3017 BUF_LOCK(bp, LK_EXCLUSIVE | LK_RETRY); 3018 bremfree(bp); 3019 } 3020 3021 /* 3022 * allocbuf: 3023 * 3024 * This code constitutes the buffer memory from either anonymous system 3025 * memory (in the case of non-VMIO operations) or from an associated 3026 * VM object (in the case of VMIO operations). This code is able to 3027 * resize a buffer up or down. 3028 * 3029 * Note that this code is tricky, and has many complications to resolve 3030 * deadlock or inconsistant data situations. Tread lightly!!! 3031 * There are B_CACHE and B_DELWRI interactions that must be dealt with by 3032 * the caller. Calling this code willy nilly can result in the loss of 3033 * data. 3034 * 3035 * allocbuf() only adjusts B_CACHE for VMIO buffers. getblk() deals with 3036 * B_CACHE for the non-VMIO case. 3037 * 3038 * This routine does not need to be called from a critical section but you 3039 * must own the buffer. 3040 */ 3041 void 3042 allocbuf(struct buf *bp, int size) 3043 { 3044 vm_page_t m; 3045 int newbsize; 3046 int desiredpages; 3047 int i; 3048 3049 if (BUF_LOCKINUSE(bp) == 0) 3050 panic("allocbuf: buffer not busy"); 3051 3052 if (bp->b_kvasize < size) 3053 panic("allocbuf: buffer too small"); 3054 3055 KKASSERT(bp->b_flags & B_VMIO); 3056 3057 newbsize = roundup2(size, DEV_BSIZE); 3058 desiredpages = ((int)(bp->b_loffset & PAGE_MASK) + 3059 newbsize + PAGE_MASK) >> PAGE_SHIFT; 3060 KKASSERT(desiredpages <= XIO_INTERNAL_PAGES); 3061 3062 /* 3063 * Set B_CACHE initially if buffer is 0 length or will become 3064 * 0-length. 3065 */ 3066 if (size == 0 || bp->b_bufsize == 0) 3067 bp->b_flags |= B_CACHE; 3068 3069 if (newbsize < bp->b_bufsize) { 3070 /* 3071 * DEV_BSIZE aligned new buffer size is less then the 3072 * DEV_BSIZE aligned existing buffer size. Figure out 3073 * if we have to remove any pages. 3074 */ 3075 if (desiredpages < bp->b_xio.xio_npages) { 3076 for (i = desiredpages; i < bp->b_xio.xio_npages; i++) { 3077 /* 3078 * the page is not freed here -- it 3079 * is the responsibility of 3080 * vnode_pager_setsize 3081 */ 3082 m = bp->b_xio.xio_pages[i]; 3083 KASSERT(m != bogus_page, 3084 ("allocbuf: bogus page found")); 3085 vm_page_busy_wait(m, TRUE, "biodep"); 3086 bp->b_xio.xio_pages[i] = NULL; 3087 vm_page_unwire(m, 0); 3088 vm_page_wakeup(m); 3089 } 3090 pmap_qremove_noinval((vm_offset_t) 3091 trunc_page((vm_offset_t)bp->b_data) + 3092 (desiredpages << PAGE_SHIFT), 3093 (bp->b_xio.xio_npages - desiredpages)); 3094 bp->b_xio.xio_npages = desiredpages; 3095 3096 /* 3097 * Don't bother invalidating the pmap changes 3098 * (which wastes global SMP invalidation IPIs) 3099 * when setting the size to 0. This case occurs 3100 * when called via getnewbuf() during buffer 3101 * recyclement. 3102 */ 3103 if (desiredpages == 0) { 3104 CPUMASK_ASSZERO(bp->b_cpumask); 3105 } else { 3106 bkvareset(bp); 3107 } 3108 } 3109 } else if (size > bp->b_bcount) { 3110 /* 3111 * We are growing the buffer, possibly in a 3112 * byte-granular fashion. 3113 */ 3114 struct vnode *vp; 3115 vm_object_t obj; 3116 vm_offset_t toff; 3117 vm_offset_t tinc; 3118 3119 /* 3120 * Step 1, bring in the VM pages from the object, 3121 * allocating them if necessary. We must clear 3122 * B_CACHE if these pages are not valid for the 3123 * range covered by the buffer. 3124 */ 3125 vp = bp->b_vp; 3126 obj = vp->v_object; 3127 3128 vm_object_hold(obj); 3129 while (bp->b_xio.xio_npages < desiredpages) { 3130 vm_page_t m; 3131 vm_pindex_t pi; 3132 int error; 3133 3134 pi = OFF_TO_IDX(bp->b_loffset) + 3135 bp->b_xio.xio_npages; 3136 3137 /* 3138 * Blocking on m->busy_count might lead to a 3139 * deadlock: 3140 * 3141 * vm_fault->getpages->cluster_read->allocbuf 3142 */ 3143 m = vm_page_lookup_busy_try(obj, pi, FALSE, 3144 &error); 3145 if (error) { 3146 vm_page_sleep_busy(m, FALSE, "pgtblk"); 3147 continue; 3148 } 3149 if (m == NULL) { 3150 /* 3151 * note: must allocate system pages 3152 * since blocking here could intefere 3153 * with paging I/O, no matter which 3154 * process we are. 3155 */ 3156 m = bio_page_alloc(bp, obj, pi, 3157 desiredpages - 3158 bp->b_xio.xio_npages); 3159 if (m) { 3160 vm_page_wire(m); 3161 vm_page_wakeup(m); 3162 bp->b_flags &= ~B_CACHE; 3163 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m; 3164 ++bp->b_xio.xio_npages; 3165 } 3166 continue; 3167 } 3168 3169 /* 3170 * We found a page and were able to busy it. 3171 */ 3172 vm_page_wire(m); 3173 vm_page_wakeup(m); 3174 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m; 3175 ++bp->b_xio.xio_npages; 3176 if (bp->b_act_count < m->act_count) 3177 bp->b_act_count = m->act_count; 3178 } 3179 vm_object_drop(obj); 3180 3181 /* 3182 * Step 2. We've loaded the pages into the buffer, 3183 * we have to figure out if we can still have B_CACHE 3184 * set. Note that B_CACHE is set according to the 3185 * byte-granular range ( bcount and size ), not the 3186 * aligned range ( newbsize ). 3187 * 3188 * The VM test is against m->valid, which is DEV_BSIZE 3189 * aligned. Needless to say, the validity of the data 3190 * needs to also be DEV_BSIZE aligned. Note that this 3191 * fails with NFS if the server or some other client 3192 * extends the file's EOF. If our buffer is resized, 3193 * B_CACHE may remain set! XXX 3194 */ 3195 3196 toff = bp->b_bcount; 3197 tinc = PAGE_SIZE - ((bp->b_loffset + toff) & PAGE_MASK); 3198 3199 while ((bp->b_flags & B_CACHE) && toff < size) { 3200 vm_pindex_t pi; 3201 3202 if (tinc > (size - toff)) 3203 tinc = size - toff; 3204 3205 pi = ((bp->b_loffset & PAGE_MASK) + toff) >> 3206 PAGE_SHIFT; 3207 3208 vfs_buf_test_cache( 3209 bp, 3210 bp->b_loffset, 3211 toff, 3212 tinc, 3213 bp->b_xio.xio_pages[pi] 3214 ); 3215 toff += tinc; 3216 tinc = PAGE_SIZE; 3217 } 3218 3219 /* 3220 * Step 3, fixup the KVM pmap. Remember that 3221 * bp->b_data is relative to bp->b_loffset, but 3222 * bp->b_loffset may be offset into the first page. 3223 */ 3224 bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data); 3225 pmap_qenter_noinval((vm_offset_t)bp->b_data, 3226 bp->b_xio.xio_pages, bp->b_xio.xio_npages); 3227 bp->b_data = (caddr_t)((vm_offset_t)bp->b_data | 3228 (vm_offset_t)(bp->b_loffset & PAGE_MASK)); 3229 bkvareset(bp); 3230 } 3231 atomic_add_long(&bufspace, newbsize - bp->b_bufsize); 3232 3233 /* adjust space use on already-dirty buffer */ 3234 if (bp->b_flags & B_DELWRI) { 3235 /* dirtykvaspace unchanged */ 3236 atomic_add_long(&dirtybufspace, newbsize - bp->b_bufsize); 3237 if (bp->b_flags & B_HEAVY) { 3238 atomic_add_long(&dirtybufspacehw, 3239 newbsize - bp->b_bufsize); 3240 } 3241 } 3242 bp->b_bufsize = newbsize; /* actual buffer allocation */ 3243 bp->b_bcount = size; /* requested buffer size */ 3244 bufspacewakeup(); 3245 } 3246 3247 /* 3248 * biowait: 3249 * 3250 * Wait for buffer I/O completion, returning error status. B_EINTR 3251 * is converted into an EINTR error but not cleared (since a chain 3252 * of biowait() calls may occur). 3253 * 3254 * On return bpdone() will have been called but the buffer will remain 3255 * locked and will not have been brelse()'d. 3256 * 3257 * NOTE! If a timeout is specified and ETIMEDOUT occurs the I/O is 3258 * likely still in progress on return. 3259 * 3260 * NOTE! This operation is on a BIO, not a BUF. 3261 * 3262 * NOTE! BIO_DONE is cleared by vn_strategy() 3263 */ 3264 static __inline int 3265 _biowait(struct bio *bio, const char *wmesg, int to) 3266 { 3267 struct buf *bp = bio->bio_buf; 3268 u_int32_t flags; 3269 u_int32_t nflags; 3270 int error; 3271 3272 KKASSERT(bio == &bp->b_bio1); 3273 for (;;) { 3274 flags = bio->bio_flags; 3275 if (flags & BIO_DONE) 3276 break; 3277 nflags = flags | BIO_WANT; 3278 tsleep_interlock(bio, 0); 3279 if (atomic_cmpset_int(&bio->bio_flags, flags, nflags)) { 3280 if (wmesg) 3281 error = tsleep(bio, PINTERLOCKED, wmesg, to); 3282 else if (bp->b_cmd == BUF_CMD_READ) 3283 error = tsleep(bio, PINTERLOCKED, "biord", to); 3284 else 3285 error = tsleep(bio, PINTERLOCKED, "biowr", to); 3286 if (error) { 3287 kprintf("tsleep error biowait %d\n", error); 3288 return (error); 3289 } 3290 } 3291 } 3292 3293 /* 3294 * Finish up. 3295 */ 3296 KKASSERT(bp->b_cmd == BUF_CMD_DONE); 3297 bio->bio_flags &= ~(BIO_DONE | BIO_SYNC); 3298 if (bp->b_flags & B_EINTR) 3299 return (EINTR); 3300 if (bp->b_flags & B_ERROR) 3301 return (bp->b_error ? bp->b_error : EIO); 3302 return (0); 3303 } 3304 3305 int 3306 biowait(struct bio *bio, const char *wmesg) 3307 { 3308 return(_biowait(bio, wmesg, 0)); 3309 } 3310 3311 int 3312 biowait_timeout(struct bio *bio, const char *wmesg, int to) 3313 { 3314 return(_biowait(bio, wmesg, to)); 3315 } 3316 3317 /* 3318 * This associates a tracking count with an I/O. vn_strategy() and 3319 * dev_dstrategy() do this automatically but there are a few cases 3320 * where a vnode or device layer is bypassed when a block translation 3321 * is cached. In such cases bio_start_transaction() may be called on 3322 * the bypassed layers so the system gets an I/O in progress indication 3323 * for those higher layers. 3324 */ 3325 void 3326 bio_start_transaction(struct bio *bio, struct bio_track *track) 3327 { 3328 bio->bio_track = track; 3329 bio_track_ref(track); 3330 dsched_buf_enter(bio->bio_buf); /* might stack */ 3331 } 3332 3333 /* 3334 * Initiate I/O on a vnode. 3335 * 3336 * SWAPCACHE OPERATION: 3337 * 3338 * Real buffer cache buffers have a non-NULL bp->b_vp. Unfortunately 3339 * devfs also uses b_vp for fake buffers so we also have to check 3340 * that B_PAGING is 0. In this case the passed 'vp' is probably the 3341 * underlying block device. The swap assignments are related to the 3342 * buffer cache buffer's b_vp, not the passed vp. 3343 * 3344 * The passed vp == bp->b_vp only in the case where the strategy call 3345 * is made on the vp itself for its own buffers (a regular file or 3346 * block device vp). The filesystem usually then re-calls vn_strategy() 3347 * after translating the request to an underlying device. 3348 * 3349 * Cluster buffers set B_CLUSTER and the passed vp is the vp of the 3350 * underlying buffer cache buffers. 3351 * 3352 * We can only deal with page-aligned buffers at the moment, because 3353 * we can't tell what the real dirty state for pages straddling a buffer 3354 * are. 3355 * 3356 * In order to call swap_pager_strategy() we must provide the VM object 3357 * and base offset for the underlying buffer cache pages so it can find 3358 * the swap blocks. 3359 */ 3360 void 3361 vn_strategy(struct vnode *vp, struct bio *bio) 3362 { 3363 struct bio_track *track; 3364 struct buf *bp = bio->bio_buf; 3365 3366 KKASSERT(bp->b_cmd != BUF_CMD_DONE); 3367 3368 /* 3369 * Set when an I/O is issued on the bp. Cleared by consumers 3370 * (aka HAMMER), allowing the consumer to determine if I/O had 3371 * actually occurred. 3372 */ 3373 bp->b_flags |= B_IOISSUED; 3374 3375 /* 3376 * Handle the swapcache intercept. 3377 * 3378 * NOTE: The swapcache itself always supports KVABIO and will 3379 * do the right thing if its underlying devices do not. 3380 */ 3381 if (vn_cache_strategy(vp, bio)) 3382 return; 3383 3384 /* 3385 * If the vnode does not support KVABIO and the buffer is using 3386 * KVABIO, we must synchronize b_data to all cpus before dispatching. 3387 */ 3388 if ((vp->v_flag & VKVABIO) == 0 && (bp->b_flags & B_KVABIO)) 3389 bkvasync_all(bp); 3390 3391 /* 3392 * Otherwise do the operation through the filesystem 3393 */ 3394 if (bp->b_cmd == BUF_CMD_READ) 3395 track = &vp->v_track_read; 3396 else 3397 track = &vp->v_track_write; 3398 KKASSERT((bio->bio_flags & BIO_DONE) == 0); 3399 bio->bio_track = track; 3400 bio_track_ref(track); 3401 dsched_buf_enter(bp); /* might stack */ 3402 vop_strategy(*vp->v_ops, vp, bio); 3403 } 3404 3405 /* 3406 * vn_cache_strategy() 3407 * 3408 * NOTE: This function supports the KVABIO API wherein b_data might not 3409 * be synchronized to the current cpu. 3410 */ 3411 static void vn_cache_strategy_callback(struct bio *bio); 3412 3413 int 3414 vn_cache_strategy(struct vnode *vp, struct bio *bio) 3415 { 3416 struct buf *bp = bio->bio_buf; 3417 struct bio *nbio; 3418 vm_object_t object; 3419 vm_page_t m; 3420 int i; 3421 3422 /* 3423 * Stop using swapcache if paniced, dumping, or dumped 3424 */ 3425 if (panicstr || dumping) 3426 return(0); 3427 3428 /* 3429 * Is this buffer cache buffer suitable for reading from 3430 * the swap cache? 3431 */ 3432 if (vm_swapcache_read_enable == 0 || 3433 bp->b_cmd != BUF_CMD_READ || 3434 ((bp->b_flags & B_CLUSTER) == 0 && 3435 (bp->b_vp == NULL || (bp->b_flags & B_PAGING))) || 3436 ((int)bp->b_loffset & PAGE_MASK) != 0 || 3437 (bp->b_bcount & PAGE_MASK) != 0) { 3438 return(0); 3439 } 3440 3441 /* 3442 * Figure out the original VM object (it will match the underlying 3443 * VM pages). Note that swap cached data uses page indices relative 3444 * to that object, not relative to bio->bio_offset. 3445 */ 3446 if (bp->b_flags & B_CLUSTER) 3447 object = vp->v_object; 3448 else 3449 object = bp->b_vp->v_object; 3450 3451 /* 3452 * In order to be able to use the swap cache all underlying VM 3453 * pages must be marked as such, and we can't have any bogus pages. 3454 */ 3455 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 3456 m = bp->b_xio.xio_pages[i]; 3457 if ((m->flags & PG_SWAPPED) == 0) 3458 break; 3459 if (m == bogus_page) 3460 break; 3461 } 3462 3463 /* 3464 * If we are good then issue the I/O using swap_pager_strategy(). 3465 * 3466 * We can only do this if the buffer actually supports object-backed 3467 * I/O. If it doesn't npages will be 0. 3468 */ 3469 if (i && i == bp->b_xio.xio_npages) { 3470 m = bp->b_xio.xio_pages[0]; 3471 nbio = push_bio(bio); 3472 nbio->bio_done = vn_cache_strategy_callback; 3473 nbio->bio_offset = ptoa(m->pindex); 3474 KKASSERT(m->object == object); 3475 swap_pager_strategy(object, nbio); 3476 return(1); 3477 } 3478 return(0); 3479 } 3480 3481 /* 3482 * This is a bit of a hack but since the vn_cache_strategy() function can 3483 * override a VFS's strategy function we must make sure that the bio, which 3484 * is probably bio2, doesn't leak an unexpected offset value back to the 3485 * filesystem. The filesystem (e.g. UFS) might otherwise assume that the 3486 * bio went through its own file strategy function and the the bio2 offset 3487 * is a cached disk offset when, in fact, it isn't. 3488 */ 3489 static void 3490 vn_cache_strategy_callback(struct bio *bio) 3491 { 3492 bio->bio_offset = NOOFFSET; 3493 biodone(pop_bio(bio)); 3494 } 3495 3496 /* 3497 * bpdone: 3498 * 3499 * Finish I/O on a buffer after all BIOs have been processed. 3500 * Called when the bio chain is exhausted or by biowait. If called 3501 * by biowait, elseit is typically 0. 3502 * 3503 * bpdone is also responsible for setting B_CACHE in a B_VMIO bp. 3504 * In a non-VMIO bp, B_CACHE will be set on the next getblk() 3505 * assuming B_INVAL is clear. 3506 * 3507 * For the VMIO case, we set B_CACHE if the op was a read and no 3508 * read error occured, or if the op was a write. B_CACHE is never 3509 * set if the buffer is invalid or otherwise uncacheable. 3510 * 3511 * bpdone does not mess with B_INVAL, allowing the I/O routine or the 3512 * initiator to leave B_INVAL set to brelse the buffer out of existance 3513 * in the biodone routine. 3514 * 3515 * bpdone is responsible for calling bundirty() on the buffer after a 3516 * successful write. We previously did this prior to initiating the 3517 * write under the assumption that the buffer might be dirtied again 3518 * while the write was in progress, however doing it before-hand creates 3519 * a race condition prior to the call to vn_strategy() where the 3520 * filesystem may not be aware that a dirty buffer is present. 3521 * It should not be possible for the buffer or its underlying pages to 3522 * be redirtied prior to bpdone()'s unbusying of the underlying VM 3523 * pages. 3524 */ 3525 void 3526 bpdone(struct buf *bp, int elseit) 3527 { 3528 buf_cmd_t cmd; 3529 3530 KASSERT(BUF_LOCKINUSE(bp), ("bpdone: bp %p not busy", bp)); 3531 KASSERT(bp->b_cmd != BUF_CMD_DONE, 3532 ("bpdone: bp %p already done!", bp)); 3533 3534 /* 3535 * No more BIOs are left. All completion functions have been dealt 3536 * with, now we clean up the buffer. 3537 */ 3538 cmd = bp->b_cmd; 3539 bp->b_cmd = BUF_CMD_DONE; 3540 3541 /* 3542 * Only reads and writes are processed past this point. 3543 */ 3544 if (cmd != BUF_CMD_READ && cmd != BUF_CMD_WRITE) { 3545 if (cmd == BUF_CMD_FREEBLKS) 3546 bp->b_flags |= B_NOCACHE; 3547 if (elseit) 3548 brelse(bp); 3549 return; 3550 } 3551 3552 /* 3553 * A failed write must re-dirty the buffer unless B_INVAL 3554 * was set. 3555 * 3556 * A successful write must clear the dirty flag. This is done after 3557 * the write to ensure that the buffer remains on the vnode's dirty 3558 * list for filesystem interlocks / checks until the write is actually 3559 * complete. HAMMER2 is sensitive to this issue. 3560 * 3561 * Only applicable to normal buffers (with VPs). vinum buffers may 3562 * not have a vp. 3563 * 3564 * Must be done prior to calling buf_complete() as the callback might 3565 * re-dirty the buffer. 3566 */ 3567 if (cmd == BUF_CMD_WRITE) { 3568 if ((bp->b_flags & (B_ERROR | B_INVAL)) == B_ERROR) { 3569 bp->b_flags &= ~B_NOCACHE; 3570 if (bp->b_vp) 3571 bdirty(bp); 3572 } else { 3573 if (bp->b_vp) 3574 bundirty(bp); 3575 } 3576 } 3577 3578 /* 3579 * Warning: softupdates may re-dirty the buffer, and HAMMER can do 3580 * a lot worse. XXX - move this above the clearing of b_cmd 3581 */ 3582 if (LIST_FIRST(&bp->b_dep) != NULL) 3583 buf_complete(bp); 3584 3585 if (bp->b_flags & B_VMIO) { 3586 int i; 3587 vm_ooffset_t foff; 3588 vm_page_t m; 3589 vm_object_t obj; 3590 int iosize; 3591 struct vnode *vp = bp->b_vp; 3592 3593 obj = vp->v_object; 3594 3595 #if defined(VFS_BIO_DEBUG) 3596 if (vp->v_auxrefs == 0) 3597 panic("bpdone: zero vnode hold count"); 3598 if ((vp->v_flag & VOBJBUF) == 0) 3599 panic("bpdone: vnode is not setup for merged cache"); 3600 #endif 3601 3602 foff = bp->b_loffset; 3603 KASSERT(foff != NOOFFSET, ("bpdone: no buffer offset")); 3604 KASSERT(obj != NULL, ("bpdone: missing VM object")); 3605 3606 #if defined(VFS_BIO_DEBUG) 3607 if (obj->paging_in_progress < bp->b_xio.xio_npages) { 3608 kprintf("bpdone: paging in progress(%d) < " 3609 "bp->b_xio.xio_npages(%d)\n", 3610 obj->paging_in_progress, 3611 bp->b_xio.xio_npages); 3612 } 3613 #endif 3614 3615 /* 3616 * Set B_CACHE if the op was a normal read and no error 3617 * occured. B_CACHE is set for writes in the b*write() 3618 * routines. 3619 */ 3620 iosize = bp->b_bcount - bp->b_resid; 3621 if (cmd == BUF_CMD_READ && 3622 (bp->b_flags & (B_INVAL|B_NOCACHE|B_ERROR)) == 0) { 3623 bp->b_flags |= B_CACHE; 3624 } 3625 3626 vm_object_hold(obj); 3627 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3628 int resid; 3629 int isbogus; 3630 3631 resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff; 3632 if (resid > iosize) 3633 resid = iosize; 3634 3635 /* 3636 * cleanup bogus pages, restoring the originals. Since 3637 * the originals should still be wired, we don't have 3638 * to worry about interrupt/freeing races destroying 3639 * the VM object association. 3640 */ 3641 m = bp->b_xio.xio_pages[i]; 3642 if (m == bogus_page) { 3643 if ((bp->b_flags & B_HASBOGUS) == 0) 3644 panic("bpdone: bp %p corrupt bogus", bp); 3645 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 3646 if (m == NULL) 3647 panic("bpdone: page disappeared"); 3648 bp->b_xio.xio_pages[i] = m; 3649 isbogus = 1; 3650 } else { 3651 isbogus = 0; 3652 } 3653 #if defined(VFS_BIO_DEBUG) 3654 if (OFF_TO_IDX(foff) != m->pindex) { 3655 kprintf("bpdone: foff(%lu)/m->pindex(%ld) " 3656 "mismatch\n", 3657 (unsigned long)foff, (long)m->pindex); 3658 } 3659 #endif 3660 3661 /* 3662 * In the write case, the valid and clean bits are 3663 * already changed correctly (see bdwrite()), so we 3664 * only need to do this here in the read case. 3665 */ 3666 vm_page_busy_wait(m, FALSE, "bpdpgw"); 3667 if (cmd == BUF_CMD_READ && isbogus == 0 && resid > 0) 3668 vfs_clean_one_page(bp, i, m); 3669 3670 /* 3671 * when debugging new filesystems or buffer I/O 3672 * methods, this is the most common error that pops 3673 * up. if you see this, you have not set the page 3674 * busy flag correctly!!! 3675 */ 3676 if ((m->busy_count & PBUSY_MASK) == 0) { 3677 kprintf("bpdone: page busy < 0, " 3678 "pindex: %d, foff: 0x(%x,%x), " 3679 "resid: %d, index: %d\n", 3680 (int) m->pindex, (int)(foff >> 32), 3681 (int) foff & 0xffffffff, resid, i); 3682 if (!vn_isdisk(vp, NULL)) 3683 kprintf(" iosize: %ld, loffset: %lld, " 3684 "flags: 0x%08x, npages: %d\n", 3685 bp->b_vp->v_mount->mnt_stat.f_iosize, 3686 (long long)bp->b_loffset, 3687 bp->b_flags, bp->b_xio.xio_npages); 3688 else 3689 kprintf(" VDEV, loffset: %lld, flags: 0x%08x, npages: %d\n", 3690 (long long)bp->b_loffset, 3691 bp->b_flags, bp->b_xio.xio_npages); 3692 kprintf(" valid: 0x%x, dirty: 0x%x, " 3693 "wired: %d\n", 3694 m->valid, m->dirty, 3695 m->wire_count); 3696 panic("bpdone: page busy < 0"); 3697 } 3698 vm_page_io_finish(m); 3699 vm_page_wakeup(m); 3700 vm_object_pip_wakeup(obj); 3701 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 3702 iosize -= resid; 3703 } 3704 if (bp->b_flags & B_HASBOGUS) { 3705 pmap_qenter_noinval(trunc_page((vm_offset_t)bp->b_data), 3706 bp->b_xio.xio_pages, 3707 bp->b_xio.xio_npages); 3708 bp->b_flags &= ~B_HASBOGUS; 3709 bkvareset(bp); 3710 } 3711 vm_object_drop(obj); 3712 } 3713 3714 /* 3715 * Finish up by releasing the buffer. There are no more synchronous 3716 * or asynchronous completions, those were handled by bio_done 3717 * callbacks. 3718 */ 3719 if (elseit) { 3720 if (bp->b_flags & (B_NOCACHE|B_INVAL|B_ERROR|B_RELBUF)) 3721 brelse(bp); 3722 else 3723 bqrelse(bp); 3724 } 3725 } 3726 3727 /* 3728 * Normal biodone. 3729 */ 3730 void 3731 biodone(struct bio *bio) 3732 { 3733 struct buf *bp = bio->bio_buf; 3734 3735 runningbufwakeup(bp); 3736 3737 /* 3738 * Run up the chain of BIO's. Leave b_cmd intact for the duration. 3739 */ 3740 while (bio) { 3741 biodone_t *done_func; 3742 struct bio_track *track; 3743 3744 /* 3745 * BIO tracking. Most but not all BIOs are tracked. 3746 */ 3747 if ((track = bio->bio_track) != NULL) { 3748 bio_track_rel(track); 3749 bio->bio_track = NULL; 3750 } 3751 3752 /* 3753 * A bio_done function terminates the loop. The function 3754 * will be responsible for any further chaining and/or 3755 * buffer management. 3756 * 3757 * WARNING! The done function can deallocate the buffer! 3758 */ 3759 if ((done_func = bio->bio_done) != NULL) { 3760 bio->bio_done = NULL; 3761 done_func(bio); 3762 return; 3763 } 3764 bio = bio->bio_prev; 3765 } 3766 3767 /* 3768 * If we've run out of bio's do normal [a]synchronous completion. 3769 */ 3770 bpdone(bp, 1); 3771 } 3772 3773 /* 3774 * Synchronous biodone - this terminates a synchronous BIO. 3775 * 3776 * bpdone() is called with elseit=FALSE, leaving the buffer completed 3777 * but still locked. The caller must brelse() the buffer after waiting 3778 * for completion. 3779 */ 3780 void 3781 biodone_sync(struct bio *bio) 3782 { 3783 struct buf *bp = bio->bio_buf; 3784 int flags; 3785 int nflags; 3786 3787 KKASSERT(bio == &bp->b_bio1); 3788 bpdone(bp, 0); 3789 3790 for (;;) { 3791 flags = bio->bio_flags; 3792 nflags = (flags | BIO_DONE) & ~BIO_WANT; 3793 3794 if (atomic_cmpset_int(&bio->bio_flags, flags, nflags)) { 3795 if (flags & BIO_WANT) 3796 wakeup(bio); 3797 break; 3798 } 3799 } 3800 } 3801 3802 /* 3803 * vfs_unbusy_pages: 3804 * 3805 * This routine is called in lieu of iodone in the case of 3806 * incomplete I/O. This keeps the busy status for pages 3807 * consistant. 3808 */ 3809 void 3810 vfs_unbusy_pages(struct buf *bp) 3811 { 3812 int i; 3813 3814 runningbufwakeup(bp); 3815 3816 if (bp->b_flags & B_VMIO) { 3817 struct vnode *vp = bp->b_vp; 3818 vm_object_t obj; 3819 3820 obj = vp->v_object; 3821 vm_object_hold(obj); 3822 3823 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3824 vm_page_t m = bp->b_xio.xio_pages[i]; 3825 3826 /* 3827 * When restoring bogus changes the original pages 3828 * should still be wired, so we are in no danger of 3829 * losing the object association and do not need 3830 * critical section protection particularly. 3831 */ 3832 if (m == bogus_page) { 3833 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_loffset) + i); 3834 if (!m) { 3835 panic("vfs_unbusy_pages: page missing"); 3836 } 3837 bp->b_xio.xio_pages[i] = m; 3838 } 3839 vm_page_busy_wait(m, FALSE, "bpdpgw"); 3840 vm_page_io_finish(m); 3841 vm_page_wakeup(m); 3842 vm_object_pip_wakeup(obj); 3843 } 3844 if (bp->b_flags & B_HASBOGUS) { 3845 pmap_qenter_noinval(trunc_page((vm_offset_t)bp->b_data), 3846 bp->b_xio.xio_pages, 3847 bp->b_xio.xio_npages); 3848 bp->b_flags &= ~B_HASBOGUS; 3849 bkvareset(bp); 3850 } 3851 vm_object_drop(obj); 3852 } 3853 } 3854 3855 /* 3856 * vfs_busy_pages: 3857 * 3858 * This routine is called before a device strategy routine. 3859 * It is used to tell the VM system that paging I/O is in 3860 * progress, and treat the pages associated with the buffer 3861 * almost as being PBUSY_LOCKED. Also the object 'paging_in_progress' 3862 * flag is handled to make sure that the object doesn't become 3863 * inconsistant. 3864 * 3865 * Since I/O has not been initiated yet, certain buffer flags 3866 * such as B_ERROR or B_INVAL may be in an inconsistant state 3867 * and should be ignored. 3868 */ 3869 void 3870 vfs_busy_pages(struct vnode *vp, struct buf *bp) 3871 { 3872 int i, bogus; 3873 struct lwp *lp = curthread->td_lwp; 3874 3875 /* 3876 * The buffer's I/O command must already be set. If reading, 3877 * B_CACHE must be 0 (double check against callers only doing 3878 * I/O when B_CACHE is 0). 3879 */ 3880 KKASSERT(bp->b_cmd != BUF_CMD_DONE); 3881 KKASSERT(bp->b_cmd == BUF_CMD_WRITE || (bp->b_flags & B_CACHE) == 0); 3882 3883 if (bp->b_flags & B_VMIO) { 3884 vm_object_t obj; 3885 3886 obj = vp->v_object; 3887 KASSERT(bp->b_loffset != NOOFFSET, 3888 ("vfs_busy_pages: no buffer offset")); 3889 3890 /* 3891 * Busy all the pages. We have to busy them all at once 3892 * to avoid deadlocks. 3893 */ 3894 retry: 3895 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3896 vm_page_t m = bp->b_xio.xio_pages[i]; 3897 3898 if (vm_page_busy_try(m, FALSE)) { 3899 vm_page_sleep_busy(m, FALSE, "vbpage"); 3900 while (--i >= 0) 3901 vm_page_wakeup(bp->b_xio.xio_pages[i]); 3902 goto retry; 3903 } 3904 } 3905 3906 /* 3907 * Setup for I/O, soft-busy the page right now because 3908 * the next loop may block. 3909 */ 3910 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3911 vm_page_t m = bp->b_xio.xio_pages[i]; 3912 3913 if ((bp->b_flags & B_CLUSTER) == 0) { 3914 vm_object_pip_add(obj, 1); 3915 vm_page_io_start(m); 3916 } 3917 } 3918 3919 /* 3920 * Adjust protections for I/O and do bogus-page mapping. 3921 * Assume that vm_page_protect() can block (it can block 3922 * if VM_PROT_NONE, don't take any chances regardless). 3923 * 3924 * In particular note that for writes we must incorporate 3925 * page dirtyness from the VM system into the buffer's 3926 * dirty range. 3927 * 3928 * For reads we theoretically must incorporate page dirtyness 3929 * from the VM system to determine if the page needs bogus 3930 * replacement, but we shortcut the test by simply checking 3931 * that all m->valid bits are set, indicating that the page 3932 * is fully valid and does not need to be re-read. For any 3933 * VM system dirtyness the page will also be fully valid 3934 * since it was mapped at one point. 3935 */ 3936 bogus = 0; 3937 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3938 vm_page_t m = bp->b_xio.xio_pages[i]; 3939 3940 if (bp->b_cmd == BUF_CMD_WRITE) { 3941 /* 3942 * When readying a vnode-backed buffer for 3943 * a write we must zero-fill any invalid 3944 * portions of the backing VM pages, mark 3945 * it valid and clear related dirty bits. 3946 * 3947 * vfs_clean_one_page() incorporates any 3948 * VM dirtyness and updates the b_dirtyoff 3949 * range (after we've made the page RO). 3950 * 3951 * It is also expected that the pmap modified 3952 * bit has already been cleared by the 3953 * vm_page_protect(). We may not be able 3954 * to clear all dirty bits for a page if it 3955 * was also memory mapped (NFS). 3956 * 3957 * Finally be sure to unassign any swap-cache 3958 * backing store as it is now stale. 3959 */ 3960 vm_page_protect(m, VM_PROT_READ); 3961 vfs_clean_one_page(bp, i, m); 3962 swap_pager_unswapped(m); 3963 } else if (m->valid == VM_PAGE_BITS_ALL) { 3964 /* 3965 * When readying a vnode-backed buffer for 3966 * read we must replace any dirty pages with 3967 * a bogus page so dirty data is not destroyed 3968 * when filling gaps. 3969 * 3970 * To avoid testing whether the page is 3971 * dirty we instead test that the page was 3972 * at some point mapped (m->valid fully 3973 * valid) with the understanding that 3974 * this also covers the dirty case. 3975 */ 3976 bp->b_xio.xio_pages[i] = bogus_page; 3977 bp->b_flags |= B_HASBOGUS; 3978 bogus++; 3979 } else if (m->valid & m->dirty) { 3980 /* 3981 * This case should not occur as partial 3982 * dirtyment can only happen if the buffer 3983 * is B_CACHE, and this code is not entered 3984 * if the buffer is B_CACHE. 3985 */ 3986 kprintf("Warning: vfs_busy_pages - page not " 3987 "fully valid! loff=%jx bpf=%08x " 3988 "idx=%d val=%02x dir=%02x\n", 3989 (uintmax_t)bp->b_loffset, bp->b_flags, 3990 i, m->valid, m->dirty); 3991 vm_page_protect(m, VM_PROT_NONE); 3992 } else { 3993 /* 3994 * The page is not valid and can be made 3995 * part of the read. 3996 */ 3997 vm_page_protect(m, VM_PROT_NONE); 3998 } 3999 vm_page_wakeup(m); 4000 } 4001 if (bogus) { 4002 pmap_qenter_noinval(trunc_page((vm_offset_t)bp->b_data), 4003 bp->b_xio.xio_pages, 4004 bp->b_xio.xio_npages); 4005 bkvareset(bp); 4006 } 4007 } 4008 4009 /* 4010 * This is the easiest place to put the process accounting for the I/O 4011 * for now. 4012 */ 4013 if (lp != NULL) { 4014 if (bp->b_cmd == BUF_CMD_READ) 4015 lp->lwp_ru.ru_inblock++; 4016 else 4017 lp->lwp_ru.ru_oublock++; 4018 } 4019 } 4020 4021 /* 4022 * Tell the VM system that the pages associated with this buffer 4023 * are clean. This is used for delayed writes where the data is 4024 * going to go to disk eventually without additional VM intevention. 4025 * 4026 * NOTE: While we only really need to clean through to b_bcount, we 4027 * just go ahead and clean through to b_bufsize. 4028 */ 4029 static void 4030 vfs_clean_pages(struct buf *bp) 4031 { 4032 vm_page_t m; 4033 int i; 4034 4035 if ((bp->b_flags & B_VMIO) == 0) 4036 return; 4037 4038 KASSERT(bp->b_loffset != NOOFFSET, 4039 ("vfs_clean_pages: no buffer offset")); 4040 4041 for (i = 0; i < bp->b_xio.xio_npages; i++) { 4042 m = bp->b_xio.xio_pages[i]; 4043 vfs_clean_one_page(bp, i, m); 4044 } 4045 } 4046 4047 /* 4048 * vfs_clean_one_page: 4049 * 4050 * Set the valid bits and clear the dirty bits in a page within a 4051 * buffer. The range is restricted to the buffer's size and the 4052 * buffer's logical offset might index into the first page. 4053 * 4054 * The caller has busied or soft-busied the page and it is not mapped, 4055 * test and incorporate the dirty bits into b_dirtyoff/end before 4056 * clearing them. Note that we need to clear the pmap modified bits 4057 * after determining the the page was dirty, vm_page_set_validclean() 4058 * does not do it for us. 4059 * 4060 * This routine is typically called after a read completes (dirty should 4061 * be zero in that case as we are not called on bogus-replace pages), 4062 * or before a write is initiated. 4063 */ 4064 static void 4065 vfs_clean_one_page(struct buf *bp, int pageno, vm_page_t m) 4066 { 4067 int bcount; 4068 int xoff; 4069 int soff; 4070 int eoff; 4071 4072 /* 4073 * Calculate offset range within the page but relative to buffer's 4074 * loffset. loffset might be offset into the first page. 4075 */ 4076 xoff = (int)bp->b_loffset & PAGE_MASK; /* loffset offset into pg 0 */ 4077 bcount = bp->b_bcount + xoff; /* offset adjusted */ 4078 4079 if (pageno == 0) { 4080 soff = xoff; 4081 eoff = PAGE_SIZE; 4082 } else { 4083 soff = (pageno << PAGE_SHIFT); 4084 eoff = soff + PAGE_SIZE; 4085 } 4086 if (eoff > bcount) 4087 eoff = bcount; 4088 if (soff >= eoff) 4089 return; 4090 4091 /* 4092 * Test dirty bits and adjust b_dirtyoff/end. 4093 * 4094 * If dirty pages are incorporated into the bp any prior 4095 * B_NEEDCOMMIT state (NFS) must be cleared because the 4096 * caller has not taken into account the new dirty data. 4097 * 4098 * If the page was memory mapped the dirty bits might go beyond the 4099 * end of the buffer, but we can't really make the assumption that 4100 * a file EOF straddles the buffer (even though this is the case for 4101 * NFS if B_NEEDCOMMIT is also set). So for the purposes of clearing 4102 * B_NEEDCOMMIT we only test the dirty bits covered by the buffer. 4103 * This also saves some console spam. 4104 * 4105 * When clearing B_NEEDCOMMIT we must also clear B_CLUSTEROK, 4106 * NFS can handle huge commits but not huge writes. 4107 */ 4108 vm_page_test_dirty(m); 4109 if (m->dirty) { 4110 if ((bp->b_flags & B_NEEDCOMMIT) && 4111 (m->dirty & vm_page_bits(soff & PAGE_MASK, eoff - soff))) { 4112 if (debug_commit) 4113 kprintf("Warning: vfs_clean_one_page: bp %p " 4114 "loff=%jx,%d flgs=%08x clr B_NEEDCOMMIT" 4115 " cmd %d vd %02x/%02x x/s/e %d %d %d " 4116 "doff/end %d %d\n", 4117 bp, (uintmax_t)bp->b_loffset, bp->b_bcount, 4118 bp->b_flags, bp->b_cmd, 4119 m->valid, m->dirty, xoff, soff, eoff, 4120 bp->b_dirtyoff, bp->b_dirtyend); 4121 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 4122 if (debug_commit) 4123 print_backtrace(-1); 4124 } 4125 /* 4126 * Only clear the pmap modified bits if ALL the dirty bits 4127 * are set, otherwise the system might mis-clear portions 4128 * of a page. 4129 */ 4130 if (m->dirty == VM_PAGE_BITS_ALL && 4131 (bp->b_flags & B_NEEDCOMMIT) == 0) { 4132 pmap_clear_modify(m); 4133 } 4134 if (bp->b_dirtyoff > soff - xoff) 4135 bp->b_dirtyoff = soff - xoff; 4136 if (bp->b_dirtyend < eoff - xoff) 4137 bp->b_dirtyend = eoff - xoff; 4138 } 4139 4140 /* 4141 * Set related valid bits, clear related dirty bits. 4142 * Does not mess with the pmap modified bit. 4143 * 4144 * WARNING! We cannot just clear all of m->dirty here as the 4145 * buffer cache buffers may use a DEV_BSIZE'd aligned 4146 * block size, or have an odd size (e.g. NFS at file EOF). 4147 * The putpages code can clear m->dirty to 0. 4148 * 4149 * If a VOP_WRITE generates a buffer cache buffer which 4150 * covers the same space as mapped writable pages the 4151 * buffer flush might not be able to clear all the dirty 4152 * bits and still require a putpages from the VM system 4153 * to finish it off. 4154 * 4155 * WARNING! vm_page_set_validclean() currently assumes vm_token 4156 * is held. The page might not be busied (bdwrite() case). 4157 * XXX remove this comment once we've validated that this 4158 * is no longer an issue. 4159 */ 4160 vm_page_set_validclean(m, soff & PAGE_MASK, eoff - soff); 4161 } 4162 4163 #if 0 4164 /* 4165 * Similar to vfs_clean_one_page() but sets the bits to valid and dirty. 4166 * The page data is assumed to be valid (there is no zeroing here). 4167 */ 4168 static void 4169 vfs_dirty_one_page(struct buf *bp, int pageno, vm_page_t m) 4170 { 4171 int bcount; 4172 int xoff; 4173 int soff; 4174 int eoff; 4175 4176 /* 4177 * Calculate offset range within the page but relative to buffer's 4178 * loffset. loffset might be offset into the first page. 4179 */ 4180 xoff = (int)bp->b_loffset & PAGE_MASK; /* loffset offset into pg 0 */ 4181 bcount = bp->b_bcount + xoff; /* offset adjusted */ 4182 4183 if (pageno == 0) { 4184 soff = xoff; 4185 eoff = PAGE_SIZE; 4186 } else { 4187 soff = (pageno << PAGE_SHIFT); 4188 eoff = soff + PAGE_SIZE; 4189 } 4190 if (eoff > bcount) 4191 eoff = bcount; 4192 if (soff >= eoff) 4193 return; 4194 vm_page_set_validdirty(m, soff & PAGE_MASK, eoff - soff); 4195 } 4196 #endif 4197 4198 /* 4199 * vfs_bio_clrbuf: 4200 * 4201 * Clear a buffer. This routine essentially fakes an I/O, so we need 4202 * to clear B_ERROR and B_INVAL. 4203 * 4204 * Note that while we only theoretically need to clear through b_bcount, 4205 * we go ahead and clear through b_bufsize. 4206 */ 4207 void 4208 vfs_bio_clrbuf(struct buf *bp) 4209 { 4210 int i, mask = 0; 4211 caddr_t sa, ea; 4212 KKASSERT(bp->b_flags & B_VMIO); 4213 4214 bp->b_flags &= ~(B_INVAL | B_EINTR | B_ERROR); 4215 bkvasync(bp); 4216 4217 if ((bp->b_xio.xio_npages == 1) && (bp->b_bufsize < PAGE_SIZE) && 4218 (bp->b_loffset & PAGE_MASK) == 0) { 4219 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1; 4220 if ((bp->b_xio.xio_pages[0]->valid & mask) == mask) { 4221 bp->b_resid = 0; 4222 return; 4223 } 4224 if ((bp->b_xio.xio_pages[0]->valid & mask) == 0) { 4225 bzero(bp->b_data, bp->b_bufsize); 4226 bp->b_xio.xio_pages[0]->valid |= mask; 4227 bp->b_resid = 0; 4228 return; 4229 } 4230 } 4231 sa = bp->b_data; 4232 for(i = 0; i < bp->b_xio.xio_npages; i++, sa=ea) { 4233 int j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE; 4234 ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE); 4235 ea = (caddr_t)(vm_offset_t)ulmin( 4236 (u_long)(vm_offset_t)ea, 4237 (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize); 4238 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j; 4239 if ((bp->b_xio.xio_pages[i]->valid & mask) == mask) 4240 continue; 4241 if ((bp->b_xio.xio_pages[i]->valid & mask) == 0) { 4242 bzero(sa, ea - sa); 4243 } else { 4244 for (; sa < ea; sa += DEV_BSIZE, j++) { 4245 if ((bp->b_xio.xio_pages[i]->valid & 4246 (1<<j)) == 0) { 4247 bzero(sa, DEV_BSIZE); 4248 } 4249 } 4250 } 4251 bp->b_xio.xio_pages[i]->valid |= mask; 4252 } 4253 bp->b_resid = 0; 4254 } 4255 4256 /* 4257 * Allocate a page for a buffer cache buffer. 4258 * 4259 * If NULL is returned the caller is expected to retry (typically check if 4260 * the page already exists on retry before trying to allocate one). 4261 * 4262 * NOTE! Low-memory handling is dealt with in b[q]relse(), not here. This 4263 * function will use the system reserve with the hope that the page 4264 * allocations can be returned to PQ_CACHE/PQ_FREE when the caller 4265 * is done with the buffer. 4266 * 4267 * NOTE! However, TMPFS is a special case because flushing a dirty buffer 4268 * to TMPFS doesn't clean the page. For TMPFS, only the pagedaemon 4269 * is capable of retiring pages (to swap). For TMPFS we don't dig 4270 * into the system reserve because doing so could stall out pretty 4271 * much every process running on the system. 4272 */ 4273 static 4274 vm_page_t 4275 bio_page_alloc(struct buf *bp, vm_object_t obj, vm_pindex_t pg, int deficit) 4276 { 4277 int vmflags = VM_ALLOC_NORMAL | VM_ALLOC_NULL_OK; 4278 vm_page_t p; 4279 4280 ASSERT_LWKT_TOKEN_HELD(vm_object_token(obj)); 4281 4282 /* 4283 * Try a normal allocation first. 4284 */ 4285 p = vm_page_alloc(obj, pg, vmflags); 4286 if (p) 4287 return(p); 4288 if (vm_page_lookup(obj, pg)) 4289 return(NULL); 4290 vm_pageout_deficit += deficit; 4291 4292 /* 4293 * Try again, digging into the system reserve. 4294 * 4295 * Trying to recover pages from the buffer cache here can deadlock 4296 * against other threads trying to busy underlying pages so we 4297 * depend on the code in brelse() and bqrelse() to free/cache the 4298 * underlying buffer cache pages when memory is low. 4299 */ 4300 if (curthread->td_flags & TDF_SYSTHREAD) 4301 vmflags |= VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT; 4302 else if (bp->b_vp && bp->b_vp->v_tag == VT_TMPFS) 4303 vmflags |= 0; 4304 else 4305 vmflags |= VM_ALLOC_SYSTEM; 4306 4307 /*recoverbufpages();*/ 4308 p = vm_page_alloc(obj, pg, vmflags); 4309 if (p) 4310 return(p); 4311 if (vm_page_lookup(obj, pg)) 4312 return(NULL); 4313 4314 /* 4315 * Wait for memory to free up and try again 4316 */ 4317 if (vm_page_count_severe()) 4318 ++lowmempgallocs; 4319 vm_wait(hz / 20 + 1); 4320 4321 p = vm_page_alloc(obj, pg, vmflags); 4322 if (p) 4323 return(p); 4324 if (vm_page_lookup(obj, pg)) 4325 return(NULL); 4326 4327 /* 4328 * Ok, now we are really in trouble. 4329 */ 4330 if (bootverbose) { 4331 static struct krate biokrate = { .freq = 1 }; 4332 krateprintf(&biokrate, 4333 "Warning: bio_page_alloc: memory exhausted " 4334 "during buffer cache page allocation from %s\n", 4335 curthread->td_comm); 4336 } 4337 if (curthread->td_flags & TDF_SYSTHREAD) 4338 vm_wait(hz / 20 + 1); 4339 else 4340 vm_wait(hz / 2 + 1); 4341 return (NULL); 4342 } 4343 4344 /* 4345 * The buffer's mapping has changed. Adjust the buffer's memory 4346 * synchronization. The caller is the exclusive holder of the buffer 4347 * and has set or cleared B_KVABIO according to preference. 4348 * 4349 * WARNING! If the caller is using B_KVABIO mode, this function will 4350 * not map the data to the current cpu. The caller must also 4351 * call bkvasync(bp). 4352 */ 4353 void 4354 bkvareset(struct buf *bp) 4355 { 4356 if (bp->b_flags & B_KVABIO) { 4357 CPUMASK_ASSZERO(bp->b_cpumask); 4358 } else { 4359 CPUMASK_ORMASK(bp->b_cpumask, smp_active_mask); 4360 smp_invltlb(); 4361 cpu_invltlb(); 4362 } 4363 } 4364 4365 /* 4366 * The buffer will be used by the caller on the caller's cpu, synchronize 4367 * its data to the current cpu. 4368 * 4369 * If B_KVABIO is not set, the buffer is already fully synchronized. 4370 */ 4371 void 4372 bkvasync(struct buf *bp) 4373 { 4374 int cpuid = mycpu->gd_cpuid; 4375 char *bdata; 4376 4377 if ((bp->b_flags & B_KVABIO) && 4378 CPUMASK_TESTBIT(bp->b_cpumask, cpuid) == 0) { 4379 bdata = bp->b_data; 4380 while (bdata < bp->b_data + bp->b_bufsize) { 4381 cpu_invlpg(bdata); 4382 bdata += PAGE_SIZE - 4383 ((intptr_t)bdata & PAGE_MASK); 4384 } 4385 ATOMIC_CPUMASK_ORBIT(bp->b_cpumask, cpuid); 4386 } 4387 } 4388 4389 /* 4390 * The buffer will be used by a subsystem that does not understand 4391 * the KVABIO API. Make sure its data is synchronized to all cpus. 4392 * 4393 * If B_KVABIO is not set, the buffer is already fully synchronized. 4394 * 4395 * NOTE! This is the only safe way to clear B_KVABIO on a buffer. 4396 */ 4397 void 4398 bkvasync_all(struct buf *bp) 4399 { 4400 if ((bp->b_flags & B_KVABIO) && 4401 CPUMASK_CMPMASKNEQ(bp->b_cpumask, smp_active_mask)) { 4402 smp_invltlb(); 4403 cpu_invltlb(); 4404 ATOMIC_CPUMASK_ORMASK(bp->b_cpumask, smp_active_mask); 4405 } 4406 bp->b_flags &= ~B_KVABIO; 4407 } 4408 4409 /* 4410 * Scan all buffers in the system and issue the callback. 4411 */ 4412 int 4413 scan_all_buffers(int (*callback)(struct buf *, void *), void *info) 4414 { 4415 int count = 0; 4416 int error; 4417 long n; 4418 4419 for (n = 0; n < nbuf; ++n) { 4420 if ((error = callback(&buf[n], info)) < 0) { 4421 count = error; 4422 break; 4423 } 4424 count += error; 4425 } 4426 return (count); 4427 } 4428 4429 /* 4430 * nestiobuf_iodone: biodone callback for nested buffers and propagate 4431 * completion to the master buffer. 4432 */ 4433 static void 4434 nestiobuf_iodone(struct bio *bio) 4435 { 4436 struct bio *mbio; 4437 struct buf *mbp, *bp; 4438 struct devstat *stats; 4439 int error; 4440 int donebytes; 4441 4442 bp = bio->bio_buf; 4443 mbio = bio->bio_caller_info1.ptr; 4444 stats = bio->bio_caller_info2.ptr; 4445 mbp = mbio->bio_buf; 4446 4447 KKASSERT(bp->b_bcount <= bp->b_bufsize); 4448 KKASSERT(mbp != bp); 4449 4450 error = bp->b_error; 4451 if (bp->b_error == 0 && 4452 (bp->b_bcount < bp->b_bufsize || bp->b_resid > 0)) { 4453 /* 4454 * Not all got transfered, raise an error. We have no way to 4455 * propagate these conditions to mbp. 4456 */ 4457 error = EIO; 4458 } 4459 4460 donebytes = bp->b_bufsize; 4461 4462 relpbuf(bp, NULL); 4463 4464 nestiobuf_done(mbio, donebytes, error, stats); 4465 } 4466 4467 void 4468 nestiobuf_done(struct bio *mbio, int donebytes, int error, struct devstat *stats) 4469 { 4470 struct buf *mbp; 4471 4472 mbp = mbio->bio_buf; 4473 4474 KKASSERT((int)(intptr_t)mbio->bio_driver_info > 0); 4475 4476 /* 4477 * If an error occured, propagate it to the master buffer. 4478 * 4479 * Several biodone()s may wind up running concurrently so 4480 * use an atomic op to adjust b_flags. 4481 */ 4482 if (error) { 4483 mbp->b_error = error; 4484 atomic_set_int(&mbp->b_flags, B_ERROR); 4485 } 4486 4487 /* 4488 * Decrement the operations in progress counter and terminate the 4489 * I/O if this was the last bit. 4490 */ 4491 if (atomic_fetchadd_int((int *)&mbio->bio_driver_info, -1) == 1) { 4492 mbp->b_resid = 0; 4493 if (stats) 4494 devstat_end_transaction_buf(stats, mbp); 4495 biodone(mbio); 4496 } 4497 } 4498 4499 /* 4500 * Initialize a nestiobuf for use. Set an initial count of 1 to prevent 4501 * the mbio from being biodone()'d while we are still adding sub-bios to 4502 * it. 4503 */ 4504 void 4505 nestiobuf_init(struct bio *bio) 4506 { 4507 bio->bio_driver_info = (void *)1; 4508 } 4509 4510 /* 4511 * The BIOs added to the nestedio have already been started, remove the 4512 * count that placeheld our mbio and biodone() it if the count would 4513 * transition to 0. 4514 */ 4515 void 4516 nestiobuf_start(struct bio *mbio) 4517 { 4518 struct buf *mbp = mbio->bio_buf; 4519 4520 /* 4521 * Decrement the operations in progress counter and terminate the 4522 * I/O if this was the last bit. 4523 */ 4524 if (atomic_fetchadd_int((int *)&mbio->bio_driver_info, -1) == 1) { 4525 if (mbp->b_flags & B_ERROR) 4526 mbp->b_resid = mbp->b_bcount; 4527 else 4528 mbp->b_resid = 0; 4529 biodone(mbio); 4530 } 4531 } 4532 4533 /* 4534 * Set an intermediate error prior to calling nestiobuf_start() 4535 */ 4536 void 4537 nestiobuf_error(struct bio *mbio, int error) 4538 { 4539 struct buf *mbp = mbio->bio_buf; 4540 4541 if (error) { 4542 mbp->b_error = error; 4543 atomic_set_int(&mbp->b_flags, B_ERROR); 4544 } 4545 } 4546 4547 /* 4548 * nestiobuf_add: setup a "nested" buffer. 4549 * 4550 * => 'mbp' is a "master" buffer which is being divided into sub pieces. 4551 * => 'bp' should be a buffer allocated by getiobuf. 4552 * => 'offset' is a byte offset in the master buffer. 4553 * => 'size' is a size in bytes of this nested buffer. 4554 */ 4555 void 4556 nestiobuf_add(struct bio *mbio, struct buf *bp, int offset, size_t size, struct devstat *stats) 4557 { 4558 struct buf *mbp = mbio->bio_buf; 4559 struct vnode *vp = mbp->b_vp; 4560 4561 KKASSERT(mbp->b_bcount >= offset + size); 4562 4563 atomic_add_int((int *)&mbio->bio_driver_info, 1); 4564 4565 /* kernel needs to own the lock for it to be released in biodone */ 4566 BUF_KERNPROC(bp); 4567 bp->b_vp = vp; 4568 bp->b_cmd = mbp->b_cmd; 4569 bp->b_bio1.bio_done = nestiobuf_iodone; 4570 bp->b_data = (char *)mbp->b_data + offset; 4571 bp->b_resid = bp->b_bcount = size; 4572 bp->b_bufsize = bp->b_bcount; 4573 4574 bp->b_bio1.bio_track = NULL; 4575 bp->b_bio1.bio_caller_info1.ptr = mbio; 4576 bp->b_bio1.bio_caller_info2.ptr = stats; 4577 } 4578 4579 #ifdef DDB 4580 4581 DB_SHOW_COMMAND(buffer, db_show_buffer) 4582 { 4583 /* get args */ 4584 struct buf *bp = (struct buf *)addr; 4585 4586 if (!have_addr) { 4587 db_printf("usage: show buffer <addr>\n"); 4588 return; 4589 } 4590 4591 db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS); 4592 db_printf("b_cmd = %d\n", bp->b_cmd); 4593 db_printf("b_error = %d, b_bufsize = %d, b_bcount = %d, " 4594 "b_resid = %d\n, b_data = %p, " 4595 "bio_offset(disk) = %lld, bio_offset(phys) = %lld\n", 4596 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 4597 bp->b_data, 4598 (long long)bp->b_bio2.bio_offset, 4599 (long long)(bp->b_bio2.bio_next ? 4600 bp->b_bio2.bio_next->bio_offset : (off_t)-1)); 4601 if (bp->b_xio.xio_npages) { 4602 int i; 4603 db_printf("b_xio.xio_npages = %d, pages(OBJ, IDX, PA): ", 4604 bp->b_xio.xio_npages); 4605 for (i = 0; i < bp->b_xio.xio_npages; i++) { 4606 vm_page_t m; 4607 m = bp->b_xio.xio_pages[i]; 4608 db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object, 4609 (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m)); 4610 if ((i + 1) < bp->b_xio.xio_npages) 4611 db_printf(","); 4612 } 4613 db_printf("\n"); 4614 } 4615 } 4616 #endif /* DDB */ 4617