1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.32 2008/05/18 01:48:50 dillon Exp $ 35 */ 36 /* 37 * IO Primitives and buffer cache management 38 * 39 * All major data-tracking structures in HAMMER contain a struct hammer_io 40 * which is used to manage their backing store. We use filesystem buffers 41 * for backing store and we leave them passively associated with their 42 * HAMMER structures. 43 * 44 * If the kernel tries to release a passively associated buf which we cannot 45 * yet let go we set B_LOCKED in the buffer and then actively released it 46 * later when we can. 47 */ 48 49 #include "hammer.h" 50 #include <sys/fcntl.h> 51 #include <sys/nlookup.h> 52 #include <sys/buf.h> 53 #include <sys/buf2.h> 54 55 static void hammer_io_modify(hammer_io_t io, int count); 56 static void hammer_io_deallocate(struct buf *bp); 57 58 /* 59 * Initialize a new, already-zero'd hammer_io structure, or reinitialize 60 * an existing hammer_io structure which may have switched to another type. 61 */ 62 void 63 hammer_io_init(hammer_io_t io, hammer_mount_t hmp, enum hammer_io_type type) 64 { 65 io->hmp = hmp; 66 io->type = type; 67 } 68 69 void 70 hammer_io_reinit(hammer_io_t io, enum hammer_io_type type) 71 { 72 hammer_mount_t hmp = io->hmp; 73 74 if (io->modified) { 75 KKASSERT(io->mod_list != NULL); 76 if (io->mod_list == &hmp->volu_list || 77 io->mod_list == &hmp->meta_list) { 78 --hmp->locked_dirty_count; 79 --hammer_count_dirtybufs; 80 } 81 TAILQ_REMOVE(io->mod_list, io, mod_entry); 82 io->mod_list = NULL; 83 } 84 io->type = type; 85 if (io->modified) { 86 switch(io->type) { 87 case HAMMER_STRUCTURE_VOLUME: 88 io->mod_list = &hmp->volu_list; 89 ++hmp->locked_dirty_count; 90 ++hammer_count_dirtybufs; 91 break; 92 case HAMMER_STRUCTURE_META_BUFFER: 93 io->mod_list = &hmp->meta_list; 94 ++hmp->locked_dirty_count; 95 ++hammer_count_dirtybufs; 96 break; 97 case HAMMER_STRUCTURE_UNDO_BUFFER: 98 io->mod_list = &hmp->undo_list; 99 break; 100 case HAMMER_STRUCTURE_DATA_BUFFER: 101 io->mod_list = &hmp->data_list; 102 break; 103 } 104 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry); 105 } 106 } 107 108 /* 109 * Helper routine to disassociate a buffer cache buffer from an I/O 110 * structure. Called with the io structure exclusively locked. 111 * 112 * The io may have 0 or 1 references depending on who called us. The 113 * caller is responsible for dealing with the refs. 114 * 115 * This call can only be made when no action is required on the buffer. 116 * HAMMER must own the buffer (released == 0) since we mess around with it. 117 */ 118 static void 119 hammer_io_disassociate(hammer_io_structure_t iou, int elseit) 120 { 121 struct buf *bp = iou->io.bp; 122 123 KKASSERT(iou->io.modified == 0); 124 buf_dep_init(bp); 125 iou->io.bp = NULL; 126 bp->b_flags &= ~B_LOCKED; 127 if (elseit) { 128 KKASSERT(iou->io.released == 0); 129 iou->io.released = 1; 130 bqrelse(bp); 131 } else { 132 KKASSERT(iou->io.released); 133 } 134 135 switch(iou->io.type) { 136 case HAMMER_STRUCTURE_VOLUME: 137 iou->volume.ondisk = NULL; 138 break; 139 case HAMMER_STRUCTURE_DATA_BUFFER: 140 case HAMMER_STRUCTURE_META_BUFFER: 141 case HAMMER_STRUCTURE_UNDO_BUFFER: 142 iou->buffer.ondisk = NULL; 143 break; 144 } 145 } 146 147 /* 148 * Wait for any physical IO to complete 149 */ 150 static void 151 hammer_io_wait(hammer_io_t io) 152 { 153 if (io->running) { 154 crit_enter(); 155 tsleep_interlock(io); 156 io->waiting = 1; 157 for (;;) { 158 tsleep(io, 0, "hmrflw", 0); 159 if (io->running == 0) 160 break; 161 tsleep_interlock(io); 162 io->waiting = 1; 163 if (io->running == 0) 164 break; 165 } 166 crit_exit(); 167 } 168 } 169 170 #define HAMMER_MAXRA 4 171 172 /* 173 * Load bp for a HAMMER structure. The io must be exclusively locked by 174 * the caller. 175 * 176 * Generally speaking HAMMER assumes either an optimized layout or that 177 * typical access patterns will be close to the original layout when the 178 * information was written. For this reason we try to cluster all reads. 179 */ 180 int 181 hammer_io_read(struct vnode *devvp, struct hammer_io *io, hammer_off_t limit) 182 { 183 struct buf *bp; 184 int error; 185 186 if ((bp = io->bp) == NULL) { 187 #if 1 188 error = cluster_read(devvp, limit, io->offset, 189 HAMMER_BUFSIZE, MAXBSIZE, 16, &io->bp); 190 #else 191 error = bread(devvp, io->offset, HAMMER_BUFSIZE, &io->bp); 192 #endif 193 194 if (error == 0) { 195 bp = io->bp; 196 bp->b_ops = &hammer_bioops; 197 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node); 198 BUF_KERNPROC(bp); 199 } 200 KKASSERT(io->modified == 0); 201 KKASSERT(io->running == 0); 202 KKASSERT(io->waiting == 0); 203 io->released = 0; /* we hold an active lock on bp */ 204 } else { 205 error = 0; 206 } 207 return(error); 208 } 209 210 /* 211 * Similar to hammer_io_read() but returns a zero'd out buffer instead. 212 * Must be called with the IO exclusively locked. 213 * 214 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background 215 * I/O by forcing the buffer to not be in a released state before calling 216 * it. 217 * 218 * This function will also mark the IO as modified but it will not 219 * increment the modify_refs count. 220 */ 221 int 222 hammer_io_new(struct vnode *devvp, struct hammer_io *io) 223 { 224 struct buf *bp; 225 226 if ((bp = io->bp) == NULL) { 227 io->bp = getblk(devvp, io->offset, HAMMER_BUFSIZE, 0, 0); 228 bp = io->bp; 229 bp->b_ops = &hammer_bioops; 230 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node); 231 io->released = 0; 232 KKASSERT(io->running == 0); 233 io->waiting = 0; 234 BUF_KERNPROC(bp); 235 } else { 236 if (io->released) { 237 regetblk(bp); 238 BUF_KERNPROC(bp); 239 io->released = 0; 240 } 241 } 242 hammer_io_modify(io, 0); 243 vfs_bio_clrbuf(bp); 244 return(0); 245 } 246 247 /* 248 * This routine is called on the last reference to a hammer structure. 249 * The io is usually locked exclusively (but may not be during unmount). 250 * 251 * This routine is responsible for the disposition of the buffer cache 252 * buffer backing the IO. Only pure-data and undo buffers can be handed 253 * back to the kernel. Volume and meta-data buffers must be retained 254 * by HAMMER until explicitly flushed by the backend. 255 */ 256 void 257 hammer_io_release(struct hammer_io *io, int flush) 258 { 259 struct buf *bp; 260 261 if ((bp = io->bp) == NULL) 262 return; 263 264 /* 265 * Try to flush a dirty IO to disk if asked to by the 266 * caller or if the kernel tried to flush the buffer in the past. 267 * 268 * Kernel-initiated flushes are only allowed for pure-data buffers. 269 * meta-data and volume buffers can only be flushed explicitly 270 * by HAMMER. 271 */ 272 if (io->modified) { 273 if (flush) { 274 hammer_io_flush(io); 275 } else if (bp->b_flags & B_LOCKED) { 276 switch(io->type) { 277 case HAMMER_STRUCTURE_DATA_BUFFER: 278 case HAMMER_STRUCTURE_UNDO_BUFFER: 279 hammer_io_flush(io); 280 break; 281 default: 282 break; 283 } 284 } /* else no explicit request to flush the buffer */ 285 } 286 287 /* 288 * Wait for the IO to complete if asked to. 289 */ 290 if (io->waitdep && io->running) { 291 hammer_io_wait(io); 292 } 293 294 /* 295 * Return control of the buffer to the kernel (with the provisio 296 * that our bioops can override kernel decisions with regards to 297 * the buffer). 298 */ 299 if (flush && io->modified == 0 && io->running == 0) { 300 /* 301 * Always disassociate the bp if an explicit flush 302 * was requested and the IO completed with no error 303 * (so unmount can really clean up the structure). 304 */ 305 if (io->released) { 306 regetblk(bp); 307 BUF_KERNPROC(bp); 308 io->released = 0; 309 } 310 hammer_io_disassociate((hammer_io_structure_t)io, 1); 311 } else if (io->modified) { 312 /* 313 * Only certain IO types can be released to the kernel. 314 * volume and meta-data IO types must be explicitly flushed 315 * by HAMMER. 316 */ 317 switch(io->type) { 318 case HAMMER_STRUCTURE_DATA_BUFFER: 319 case HAMMER_STRUCTURE_UNDO_BUFFER: 320 if (io->released == 0) { 321 io->released = 1; 322 bdwrite(bp); 323 } 324 break; 325 default: 326 break; 327 } 328 } else if (io->released == 0) { 329 /* 330 * Clean buffers can be generally released to the kernel. 331 * We leave the bp passively associated with the HAMMER 332 * structure and use bioops to disconnect it later on 333 * if the kernel wants to discard the buffer. 334 */ 335 io->released = 1; 336 bqrelse(bp); 337 } 338 } 339 340 /* 341 * This routine is called with a locked IO when a flush is desired and 342 * no other references to the structure exists other then ours. This 343 * routine is ONLY called when HAMMER believes it is safe to flush a 344 * potentially modified buffer out. 345 */ 346 void 347 hammer_io_flush(struct hammer_io *io) 348 { 349 struct buf *bp; 350 351 /* 352 * Degenerate case - nothing to flush if nothing is dirty. 353 */ 354 if (io->modified == 0) { 355 return; 356 } 357 358 KKASSERT(io->bp); 359 KKASSERT(io->modify_refs == 0); 360 361 /* 362 * Acquire ownership of the bp, particularly before we clear our 363 * modified flag. 364 * 365 * We are going to bawrite() this bp. Don't leave a window where 366 * io->released is set, we actually own the bp rather then our 367 * buffer. 368 */ 369 bp = io->bp; 370 if (io->released) { 371 regetblk(bp); 372 /* BUF_KERNPROC(io->bp); */ 373 /* io->released = 0; */ 374 KKASSERT(io->released); 375 KKASSERT(io->bp == bp); 376 } 377 io->released = 1; 378 379 /* 380 * Acquire exclusive access to the bp and then clear the modified 381 * state of the buffer prior to issuing I/O to interlock any 382 * modifications made while the I/O is in progress. This shouldn't 383 * happen anyway but losing data would be worse. The modified bit 384 * will be rechecked after the IO completes. 385 * 386 * This is only legal when lock.refs == 1 (otherwise we might clear 387 * the modified bit while there are still users of the cluster 388 * modifying the data). 389 * 390 * Do this before potentially blocking so any attempt to modify the 391 * ondisk while we are blocked blocks waiting for us. 392 */ 393 KKASSERT(io->mod_list != NULL); 394 if (io->mod_list == &io->hmp->volu_list || 395 io->mod_list == &io->hmp->meta_list) { 396 --io->hmp->locked_dirty_count; 397 --hammer_count_dirtybufs; 398 } 399 TAILQ_REMOVE(io->mod_list, io, mod_entry); 400 io->mod_list = NULL; 401 io->modified = 0; 402 403 /* 404 * Transfer ownership to the kernel and initiate I/O. 405 */ 406 io->running = 1; 407 ++io->hmp->io_running_count; 408 bawrite(bp); 409 } 410 411 /************************************************************************ 412 * BUFFER DIRTYING * 413 ************************************************************************ 414 * 415 * These routines deal with dependancies created when IO buffers get 416 * modified. The caller must call hammer_modify_*() on a referenced 417 * HAMMER structure prior to modifying its on-disk data. 418 * 419 * Any intent to modify an IO buffer acquires the related bp and imposes 420 * various write ordering dependancies. 421 */ 422 423 /* 424 * Mark a HAMMER structure as undergoing modification. Meta-data buffers 425 * are locked until the flusher can deal with them, pure data buffers 426 * can be written out. 427 */ 428 static 429 void 430 hammer_io_modify(hammer_io_t io, int count) 431 { 432 struct hammer_mount *hmp = io->hmp; 433 434 /* 435 * Shortcut if nothing to do. 436 */ 437 KKASSERT(io->lock.refs != 0 && io->bp != NULL); 438 io->modify_refs += count; 439 if (io->modified && io->released == 0) 440 return; 441 442 hammer_lock_ex(&io->lock); 443 if (io->modified == 0) { 444 KKASSERT(io->mod_list == NULL); 445 switch(io->type) { 446 case HAMMER_STRUCTURE_VOLUME: 447 io->mod_list = &hmp->volu_list; 448 ++hmp->locked_dirty_count; 449 ++hammer_count_dirtybufs; 450 break; 451 case HAMMER_STRUCTURE_META_BUFFER: 452 io->mod_list = &hmp->meta_list; 453 ++hmp->locked_dirty_count; 454 ++hammer_count_dirtybufs; 455 break; 456 case HAMMER_STRUCTURE_UNDO_BUFFER: 457 io->mod_list = &hmp->undo_list; 458 break; 459 case HAMMER_STRUCTURE_DATA_BUFFER: 460 io->mod_list = &hmp->data_list; 461 break; 462 } 463 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry); 464 io->modified = 1; 465 } 466 if (io->released) { 467 regetblk(io->bp); 468 BUF_KERNPROC(io->bp); 469 io->released = 0; 470 KKASSERT(io->modified != 0); 471 } 472 hammer_unlock(&io->lock); 473 } 474 475 static __inline 476 void 477 hammer_io_modify_done(hammer_io_t io) 478 { 479 KKASSERT(io->modify_refs > 0); 480 --io->modify_refs; 481 } 482 483 /* 484 * Caller intends to modify a volume's ondisk structure. 485 * 486 * This is only allowed if we are the flusher or we have a ref on the 487 * sync_lock. 488 */ 489 void 490 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume, 491 void *base, int len) 492 { 493 KKASSERT (trans == NULL || trans->sync_lock_refs > 0); 494 495 hammer_io_modify(&volume->io, 1); 496 if (len) { 497 intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk; 498 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0); 499 hammer_generate_undo(trans, &volume->io, 500 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset), 501 base, len); 502 } 503 } 504 505 /* 506 * Caller intends to modify a buffer's ondisk structure. 507 * 508 * This is only allowed if we are the flusher or we have a ref on the 509 * sync_lock. 510 */ 511 void 512 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer, 513 void *base, int len) 514 { 515 KKASSERT (trans == NULL || trans->sync_lock_refs > 0); 516 517 hammer_io_modify(&buffer->io, 1); 518 if (len) { 519 intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk; 520 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0); 521 hammer_generate_undo(trans, &buffer->io, 522 buffer->zone2_offset + rel_offset, 523 base, len); 524 } 525 } 526 527 void 528 hammer_modify_volume_done(hammer_volume_t volume) 529 { 530 hammer_io_modify_done(&volume->io); 531 } 532 533 void 534 hammer_modify_buffer_done(hammer_buffer_t buffer) 535 { 536 hammer_io_modify_done(&buffer->io); 537 } 538 539 /* 540 * Mark an entity as not being dirty any more -- this usually occurs when 541 * the governing a-list has freed the entire entity. 542 * 543 * XXX 544 */ 545 void 546 hammer_io_clear_modify(struct hammer_io *io) 547 { 548 #if 0 549 struct buf *bp; 550 551 io->modified = 0; 552 XXX mod_list/entry 553 if ((bp = io->bp) != NULL) { 554 if (io->released) { 555 regetblk(bp); 556 /* BUF_KERNPROC(io->bp); */ 557 } else { 558 io->released = 1; 559 } 560 if (io->modified == 0) { 561 hkprintf("hammer_io_clear_modify: cleared %p\n", io); 562 bundirty(bp); 563 bqrelse(bp); 564 } else { 565 bdwrite(bp); 566 } 567 } 568 #endif 569 } 570 571 /************************************************************************ 572 * HAMMER_BIOOPS * 573 ************************************************************************ 574 * 575 */ 576 577 /* 578 * Pre-IO initiation kernel callback - cluster build only 579 */ 580 static void 581 hammer_io_start(struct buf *bp) 582 { 583 } 584 585 /* 586 * Post-IO completion kernel callback 587 * 588 * NOTE: HAMMER may modify a buffer after initiating I/O. The modified bit 589 * may also be set if we were marking a cluster header open. Only remove 590 * our dependancy if the modified bit is clear. 591 */ 592 static void 593 hammer_io_complete(struct buf *bp) 594 { 595 union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep); 596 597 KKASSERT(iou->io.released == 1); 598 599 if (iou->io.running) { 600 if (--iou->io.hmp->io_running_count == 0) 601 wakeup(&iou->io.hmp->io_running_count); 602 KKASSERT(iou->io.hmp->io_running_count >= 0); 603 iou->io.running = 0; 604 } 605 606 /* 607 * If no lock references remain and we can acquire the IO lock and 608 * someone at some point wanted us to flush (B_LOCKED test), then 609 * try to dispose of the IO. 610 */ 611 if (iou->io.waiting) { 612 iou->io.waiting = 0; 613 wakeup(iou); 614 } 615 616 /* 617 * Someone wanted us to flush, try to clean out the buffer. 618 */ 619 if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) { 620 KKASSERT(iou->io.modified == 0); 621 bp->b_flags &= ~B_LOCKED; 622 hammer_io_deallocate(bp); 623 /* structure may be dead now */ 624 } 625 } 626 627 /* 628 * Callback from kernel when it wishes to deallocate a passively 629 * associated structure. This mostly occurs with clean buffers 630 * but it may be possible for a holding structure to be marked dirty 631 * while its buffer is passively associated. 632 * 633 * If we cannot disassociate we set B_LOCKED to prevent the buffer 634 * from getting reused. 635 * 636 * WARNING: Because this can be called directly by getnewbuf we cannot 637 * recurse into the tree. If a bp cannot be immediately disassociated 638 * our only recourse is to set B_LOCKED. 639 */ 640 static void 641 hammer_io_deallocate(struct buf *bp) 642 { 643 hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep); 644 645 KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0); 646 if (iou->io.lock.refs > 0 || iou->io.modified) { 647 /* 648 * It is not legal to disassociate a modified buffer. This 649 * case really shouldn't ever occur. 650 */ 651 bp->b_flags |= B_LOCKED; 652 } else { 653 /* 654 * Disassociate the BP. If the io has no refs left we 655 * have to add it to the loose list. 656 */ 657 hammer_io_disassociate(iou, 0); 658 if (iou->io.bp == NULL && 659 iou->io.type != HAMMER_STRUCTURE_VOLUME) { 660 KKASSERT(iou->io.mod_list == NULL); 661 iou->io.mod_list = &iou->io.hmp->lose_list; 662 TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry); 663 } 664 } 665 } 666 667 static int 668 hammer_io_fsync(struct vnode *vp) 669 { 670 return(0); 671 } 672 673 /* 674 * NOTE: will not be called unless we tell the kernel about the 675 * bioops. Unused... we use the mount's VFS_SYNC instead. 676 */ 677 static int 678 hammer_io_sync(struct mount *mp) 679 { 680 return(0); 681 } 682 683 static void 684 hammer_io_movedeps(struct buf *bp1, struct buf *bp2) 685 { 686 } 687 688 /* 689 * I/O pre-check for reading and writing. HAMMER only uses this for 690 * B_CACHE buffers so checkread just shouldn't happen, but if it does 691 * allow it. 692 * 693 * Writing is a different case. We don't want the kernel to try to write 694 * out a buffer that HAMMER may be modifying passively or which has a 695 * dependancy. In addition, kernel-demanded writes can only proceed for 696 * certain types of buffers (i.e. UNDO and DATA types). Other dirty 697 * buffer types can only be explicitly written by the flusher. 698 * 699 * checkwrite will only be called for bdwrite()n buffers. If we return 700 * success the kernel is guaranteed to initiate the buffer write. 701 */ 702 static int 703 hammer_io_checkread(struct buf *bp) 704 { 705 return(0); 706 } 707 708 static int 709 hammer_io_checkwrite(struct buf *bp) 710 { 711 hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep); 712 713 /* 714 * This shouldn't happen under normal operation. 715 */ 716 if (io->type == HAMMER_STRUCTURE_VOLUME || 717 io->type == HAMMER_STRUCTURE_META_BUFFER) { 718 if (!panicstr) 719 panic("hammer_io_checkwrite: illegal buffer"); 720 hkprintf("x"); 721 bp->b_flags |= B_LOCKED; 722 return(1); 723 } 724 725 /* 726 * We can only clear the modified bit if the IO is not currently 727 * undergoing modification. Otherwise we may miss changes. 728 */ 729 if (io->modify_refs == 0 && io->modified) { 730 KKASSERT(io->mod_list != NULL); 731 if (io->mod_list == &io->hmp->volu_list || 732 io->mod_list == &io->hmp->meta_list) { 733 --io->hmp->locked_dirty_count; 734 --hammer_count_dirtybufs; 735 } 736 TAILQ_REMOVE(io->mod_list, io, mod_entry); 737 io->mod_list = NULL; 738 io->modified = 0; 739 } 740 741 /* 742 * The kernel is going to start the IO, set io->running. 743 */ 744 KKASSERT(io->running == 0); 745 io->running = 1; 746 ++io->hmp->io_running_count; 747 return(0); 748 } 749 750 /* 751 * Return non-zero if the caller should flush the structure associated 752 * with this io sub-structure. 753 */ 754 int 755 hammer_io_checkflush(struct hammer_io *io) 756 { 757 if (io->bp == NULL || (io->bp->b_flags & B_LOCKED)) { 758 return(1); 759 } 760 return(0); 761 } 762 763 /* 764 * Return non-zero if we wish to delay the kernel's attempt to flush 765 * this buffer to disk. 766 */ 767 static int 768 hammer_io_countdeps(struct buf *bp, int n) 769 { 770 return(0); 771 } 772 773 struct bio_ops hammer_bioops = { 774 .io_start = hammer_io_start, 775 .io_complete = hammer_io_complete, 776 .io_deallocate = hammer_io_deallocate, 777 .io_fsync = hammer_io_fsync, 778 .io_sync = hammer_io_sync, 779 .io_movedeps = hammer_io_movedeps, 780 .io_countdeps = hammer_io_countdeps, 781 .io_checkread = hammer_io_checkread, 782 .io_checkwrite = hammer_io_checkwrite, 783 }; 784 785