166325755SMatthew Dillon /* 2b84de5afSMatthew Dillon * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 366325755SMatthew Dillon * 466325755SMatthew Dillon * This code is derived from software contributed to The DragonFly Project 566325755SMatthew Dillon * by Matthew Dillon <dillon@backplane.com> 666325755SMatthew Dillon * 766325755SMatthew Dillon * Redistribution and use in source and binary forms, with or without 866325755SMatthew Dillon * modification, are permitted provided that the following conditions 966325755SMatthew Dillon * are met: 1066325755SMatthew Dillon * 1166325755SMatthew Dillon * 1. Redistributions of source code must retain the above copyright 1266325755SMatthew Dillon * notice, this list of conditions and the following disclaimer. 1366325755SMatthew Dillon * 2. Redistributions in binary form must reproduce the above copyright 1466325755SMatthew Dillon * notice, this list of conditions and the following disclaimer in 1566325755SMatthew Dillon * the documentation and/or other materials provided with the 1666325755SMatthew Dillon * distribution. 1766325755SMatthew Dillon * 3. Neither the name of The DragonFly Project nor the names of its 1866325755SMatthew Dillon * contributors may be used to endorse or promote products derived 1966325755SMatthew Dillon * from this software without specific, prior written permission. 2066325755SMatthew Dillon * 2166325755SMatthew Dillon * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 2266325755SMatthew Dillon * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 2366325755SMatthew Dillon * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 2466325755SMatthew Dillon * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 2566325755SMatthew Dillon * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 2666325755SMatthew Dillon * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 2766325755SMatthew Dillon * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 2866325755SMatthew Dillon * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 2966325755SMatthew Dillon * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 3066325755SMatthew Dillon * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 3166325755SMatthew Dillon * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3266325755SMatthew Dillon * SUCH DAMAGE. 3366325755SMatthew Dillon * 34*f5a07a7aSMatthew Dillon * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.47 2008/06/28 23:50:37 dillon Exp $ 3566325755SMatthew Dillon */ 3666325755SMatthew Dillon /* 3766325755SMatthew Dillon * IO Primitives and buffer cache management 3866325755SMatthew Dillon * 3966325755SMatthew Dillon * All major data-tracking structures in HAMMER contain a struct hammer_io 4066325755SMatthew Dillon * which is used to manage their backing store. We use filesystem buffers 4166325755SMatthew Dillon * for backing store and we leave them passively associated with their 4266325755SMatthew Dillon * HAMMER structures. 4366325755SMatthew Dillon * 449f5097dcSMatthew Dillon * If the kernel tries to destroy a passively associated buf which we cannot 4566325755SMatthew Dillon * yet let go we set B_LOCKED in the buffer and then actively released it 4666325755SMatthew Dillon * later when we can. 4766325755SMatthew Dillon */ 4866325755SMatthew Dillon 4966325755SMatthew Dillon #include "hammer.h" 5066325755SMatthew Dillon #include <sys/fcntl.h> 5166325755SMatthew Dillon #include <sys/nlookup.h> 5266325755SMatthew Dillon #include <sys/buf.h> 5366325755SMatthew Dillon #include <sys/buf2.h> 5466325755SMatthew Dillon 5510a5d1baSMatthew Dillon static void hammer_io_modify(hammer_io_t io, int count); 56055f5ff8SMatthew Dillon static void hammer_io_deallocate(struct buf *bp); 5743c665aeSMatthew Dillon static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data); 58055f5ff8SMatthew Dillon 59055f5ff8SMatthew Dillon /* 6010a5d1baSMatthew Dillon * Initialize a new, already-zero'd hammer_io structure, or reinitialize 6110a5d1baSMatthew Dillon * an existing hammer_io structure which may have switched to another type. 62055f5ff8SMatthew Dillon */ 63055f5ff8SMatthew Dillon void 6410a5d1baSMatthew Dillon hammer_io_init(hammer_io_t io, hammer_mount_t hmp, enum hammer_io_type type) 65055f5ff8SMatthew Dillon { 6610a5d1baSMatthew Dillon io->hmp = hmp; 67055f5ff8SMatthew Dillon io->type = type; 68055f5ff8SMatthew Dillon } 69055f5ff8SMatthew Dillon 7066325755SMatthew Dillon /* 71fbc6e32aSMatthew Dillon * Helper routine to disassociate a buffer cache buffer from an I/O 72bf3b416bSMatthew Dillon * structure. 73055f5ff8SMatthew Dillon * 74055f5ff8SMatthew Dillon * The io may have 0 or 1 references depending on who called us. The 75055f5ff8SMatthew Dillon * caller is responsible for dealing with the refs. 76055f5ff8SMatthew Dillon * 77055f5ff8SMatthew Dillon * This call can only be made when no action is required on the buffer. 78d8971d2bSMatthew Dillon * HAMMER must own the buffer (released == 0) since we mess around with it. 7966325755SMatthew Dillon */ 8066325755SMatthew Dillon static void 81055f5ff8SMatthew Dillon hammer_io_disassociate(hammer_io_structure_t iou, int elseit) 8266325755SMatthew Dillon { 83055f5ff8SMatthew Dillon struct buf *bp = iou->io.bp; 8466325755SMatthew Dillon 85b58c6388SMatthew Dillon KKASSERT(iou->io.modified == 0); 86af209b0fSMatthew Dillon KKASSERT(LIST_FIRST(&bp->b_dep) == (void *)iou); 874d75d829SMatthew Dillon buf_dep_init(bp); 88055f5ff8SMatthew Dillon iou->io.bp = NULL; 899f5097dcSMatthew Dillon 909f5097dcSMatthew Dillon /* 919f5097dcSMatthew Dillon * If the buffer was locked someone wanted to get rid of it. 929f5097dcSMatthew Dillon */ 93a99b9ea2SMatthew Dillon if (bp->b_flags & B_LOCKED) { 94a99b9ea2SMatthew Dillon --hammer_count_io_locked; 95d8971d2bSMatthew Dillon bp->b_flags &= ~B_LOCKED; 96a99b9ea2SMatthew Dillon } 979f5097dcSMatthew Dillon 989f5097dcSMatthew Dillon /* 99bf3b416bSMatthew Dillon * elseit is 0 when called from the kernel path when the io 100bf3b416bSMatthew Dillon * might have no references. 1019f5097dcSMatthew Dillon */ 102055f5ff8SMatthew Dillon if (elseit) { 103055f5ff8SMatthew Dillon KKASSERT(iou->io.released == 0); 104055f5ff8SMatthew Dillon iou->io.released = 1; 105cebe9493SMatthew Dillon if (iou->io.reclaim) 106cebe9493SMatthew Dillon bp->b_flags |= B_NOCACHE|B_RELBUF; 107055f5ff8SMatthew Dillon bqrelse(bp); 108055f5ff8SMatthew Dillon } else { 109055f5ff8SMatthew Dillon KKASSERT(iou->io.released); 110055f5ff8SMatthew Dillon } 111cebe9493SMatthew Dillon iou->io.reclaim = 0; 11266325755SMatthew Dillon 113055f5ff8SMatthew Dillon switch(iou->io.type) { 11466325755SMatthew Dillon case HAMMER_STRUCTURE_VOLUME: 115055f5ff8SMatthew Dillon iou->volume.ondisk = NULL; 11666325755SMatthew Dillon break; 11710a5d1baSMatthew Dillon case HAMMER_STRUCTURE_DATA_BUFFER: 11810a5d1baSMatthew Dillon case HAMMER_STRUCTURE_META_BUFFER: 11910a5d1baSMatthew Dillon case HAMMER_STRUCTURE_UNDO_BUFFER: 120055f5ff8SMatthew Dillon iou->buffer.ondisk = NULL; 12166325755SMatthew Dillon break; 12266325755SMatthew Dillon } 12366325755SMatthew Dillon } 124fbc6e32aSMatthew Dillon 125fbc6e32aSMatthew Dillon /* 126055f5ff8SMatthew Dillon * Wait for any physical IO to complete 127fbc6e32aSMatthew Dillon */ 128fbc6e32aSMatthew Dillon static void 129055f5ff8SMatthew Dillon hammer_io_wait(hammer_io_t io) 130fbc6e32aSMatthew Dillon { 131055f5ff8SMatthew Dillon if (io->running) { 132055f5ff8SMatthew Dillon crit_enter(); 133055f5ff8SMatthew Dillon tsleep_interlock(io); 134055f5ff8SMatthew Dillon io->waiting = 1; 135055f5ff8SMatthew Dillon for (;;) { 136055f5ff8SMatthew Dillon tsleep(io, 0, "hmrflw", 0); 137055f5ff8SMatthew Dillon if (io->running == 0) 138055f5ff8SMatthew Dillon break; 139055f5ff8SMatthew Dillon tsleep_interlock(io); 140055f5ff8SMatthew Dillon io->waiting = 1; 141055f5ff8SMatthew Dillon if (io->running == 0) 142055f5ff8SMatthew Dillon break; 143055f5ff8SMatthew Dillon } 144055f5ff8SMatthew Dillon crit_exit(); 145055f5ff8SMatthew Dillon } 146055f5ff8SMatthew Dillon } 147055f5ff8SMatthew Dillon 148af209b0fSMatthew Dillon /* 149af209b0fSMatthew Dillon * Wait for all hammer_io-initated write I/O's to complete. This is not 150af209b0fSMatthew Dillon * supposed to count direct I/O's but some can leak through (for 151af209b0fSMatthew Dillon * non-full-sized direct I/Os). 152af209b0fSMatthew Dillon */ 153af209b0fSMatthew Dillon void 154af209b0fSMatthew Dillon hammer_io_wait_all(hammer_mount_t hmp, const char *ident) 155af209b0fSMatthew Dillon { 156af209b0fSMatthew Dillon crit_enter(); 157*f5a07a7aSMatthew Dillon while (hmp->io_running_space) 158*f5a07a7aSMatthew Dillon tsleep(&hmp->io_running_space, 0, ident, 0); 159af209b0fSMatthew Dillon crit_exit(); 160af209b0fSMatthew Dillon } 161af209b0fSMatthew Dillon 1622f85fa4dSMatthew Dillon #define HAMMER_MAXRA 4 1632f85fa4dSMatthew Dillon 16461aeeb33SMatthew Dillon /* 16510a5d1baSMatthew Dillon * Load bp for a HAMMER structure. The io must be exclusively locked by 16610a5d1baSMatthew Dillon * the caller. 1672f85fa4dSMatthew Dillon * 168a99b9ea2SMatthew Dillon * This routine is mostly used on meta-data and small-data blocks. Generally 169a99b9ea2SMatthew Dillon * speaking HAMMER assumes some locality of reference and will cluster 170a99b9ea2SMatthew Dillon * a 64K read. 171af209b0fSMatthew Dillon * 172af209b0fSMatthew Dillon * Note that clustering occurs at the device layer, not the logical layer. 173af209b0fSMatthew Dillon * If the buffers do not apply to the current operation they may apply to 174af209b0fSMatthew Dillon * some other. 17566325755SMatthew Dillon */ 17666325755SMatthew Dillon int 1772f85fa4dSMatthew Dillon hammer_io_read(struct vnode *devvp, struct hammer_io *io, hammer_off_t limit) 17866325755SMatthew Dillon { 17966325755SMatthew Dillon struct buf *bp; 18066325755SMatthew Dillon int error; 18166325755SMatthew Dillon 18266325755SMatthew Dillon if ((bp = io->bp) == NULL) { 183*f5a07a7aSMatthew Dillon hammer_count_io_running_read += io->bytes; 1844a2796f3SMatthew Dillon #if 1 1854a2796f3SMatthew Dillon error = cluster_read(devvp, limit, io->offset, io->bytes, 186af209b0fSMatthew Dillon HAMMER_CLUSTER_SIZE, 187af209b0fSMatthew Dillon HAMMER_CLUSTER_BUFS, &io->bp); 1884a2796f3SMatthew Dillon #else 1894a2796f3SMatthew Dillon error = bread(devvp, io->offset, io->bytes, &io->bp); 1904a2796f3SMatthew Dillon #endif 191*f5a07a7aSMatthew Dillon hammer_count_io_running_read -= io->bytes; 19266325755SMatthew Dillon if (error == 0) { 19366325755SMatthew Dillon bp = io->bp; 19466325755SMatthew Dillon bp->b_ops = &hammer_bioops; 195af209b0fSMatthew Dillon KKASSERT(LIST_FIRST(&bp->b_dep) == NULL); 19666325755SMatthew Dillon LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node); 19766325755SMatthew Dillon BUF_KERNPROC(bp); 19866325755SMatthew Dillon } 19910a5d1baSMatthew Dillon KKASSERT(io->modified == 0); 20010a5d1baSMatthew Dillon KKASSERT(io->running == 0); 20110a5d1baSMatthew Dillon KKASSERT(io->waiting == 0); 20266325755SMatthew Dillon io->released = 0; /* we hold an active lock on bp */ 20366325755SMatthew Dillon } else { 20466325755SMatthew Dillon error = 0; 20566325755SMatthew Dillon } 20666325755SMatthew Dillon return(error); 20766325755SMatthew Dillon } 20866325755SMatthew Dillon 20966325755SMatthew Dillon /* 21066325755SMatthew Dillon * Similar to hammer_io_read() but returns a zero'd out buffer instead. 21110a5d1baSMatthew Dillon * Must be called with the IO exclusively locked. 212055f5ff8SMatthew Dillon * 21310a5d1baSMatthew Dillon * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background 21410a5d1baSMatthew Dillon * I/O by forcing the buffer to not be in a released state before calling 21510a5d1baSMatthew Dillon * it. 21610a5d1baSMatthew Dillon * 21710a5d1baSMatthew Dillon * This function will also mark the IO as modified but it will not 21810a5d1baSMatthew Dillon * increment the modify_refs count. 21966325755SMatthew Dillon */ 22066325755SMatthew Dillon int 22166325755SMatthew Dillon hammer_io_new(struct vnode *devvp, struct hammer_io *io) 22266325755SMatthew Dillon { 22366325755SMatthew Dillon struct buf *bp; 22466325755SMatthew Dillon 22566325755SMatthew Dillon if ((bp = io->bp) == NULL) { 2264a2796f3SMatthew Dillon io->bp = getblk(devvp, io->offset, io->bytes, 0, 0); 22766325755SMatthew Dillon bp = io->bp; 22866325755SMatthew Dillon bp->b_ops = &hammer_bioops; 229af209b0fSMatthew Dillon KKASSERT(LIST_FIRST(&bp->b_dep) == NULL); 23066325755SMatthew Dillon LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node); 231055f5ff8SMatthew Dillon io->released = 0; 23210a5d1baSMatthew Dillon KKASSERT(io->running == 0); 233055f5ff8SMatthew Dillon io->waiting = 0; 23466325755SMatthew Dillon BUF_KERNPROC(bp); 23566325755SMatthew Dillon } else { 23666325755SMatthew Dillon if (io->released) { 23766325755SMatthew Dillon regetblk(bp); 23866325755SMatthew Dillon BUF_KERNPROC(bp); 239d113fda1SMatthew Dillon io->released = 0; 24066325755SMatthew Dillon } 24166325755SMatthew Dillon } 24210a5d1baSMatthew Dillon hammer_io_modify(io, 0); 24366325755SMatthew Dillon vfs_bio_clrbuf(bp); 24466325755SMatthew Dillon return(0); 24566325755SMatthew Dillon } 24666325755SMatthew Dillon 24766325755SMatthew Dillon /* 24847637bffSMatthew Dillon * Remove potential device level aliases against buffers managed by high level 24947637bffSMatthew Dillon * vnodes. 25047637bffSMatthew Dillon */ 25147637bffSMatthew Dillon void 25247637bffSMatthew Dillon hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset) 25347637bffSMatthew Dillon { 254cebe9493SMatthew Dillon hammer_io_structure_t iou; 25547637bffSMatthew Dillon hammer_off_t phys_offset; 25647637bffSMatthew Dillon struct buf *bp; 25747637bffSMatthew Dillon 25847637bffSMatthew Dillon phys_offset = volume->ondisk->vol_buf_beg + 25947637bffSMatthew Dillon (zone2_offset & HAMMER_OFF_SHORT_MASK); 2604a2796f3SMatthew Dillon crit_enter(); 2614a2796f3SMatthew Dillon if ((bp = findblk(volume->devvp, phys_offset)) != NULL) { 2624a2796f3SMatthew Dillon bp = getblk(volume->devvp, phys_offset, bp->b_bufsize, 0, 0); 263cebe9493SMatthew Dillon if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) { 2644a2796f3SMatthew Dillon hammer_io_clear_modify(&iou->io, 1); 265cebe9493SMatthew Dillon bundirty(bp); 266cebe9493SMatthew Dillon iou->io.reclaim = 1; 2670832c9bbSMatthew Dillon hammer_io_deallocate(bp); 2680832c9bbSMatthew Dillon } else { 269cebe9493SMatthew Dillon KKASSERT((bp->b_flags & B_LOCKED) == 0); 270cebe9493SMatthew Dillon bundirty(bp); 271cebe9493SMatthew Dillon bp->b_flags |= B_NOCACHE|B_RELBUF; 27247637bffSMatthew Dillon brelse(bp); 27347637bffSMatthew Dillon } 27447637bffSMatthew Dillon } 2754a2796f3SMatthew Dillon crit_exit(); 2760832c9bbSMatthew Dillon } 27747637bffSMatthew Dillon 27847637bffSMatthew Dillon /* 279b3deaf57SMatthew Dillon * This routine is called on the last reference to a hammer structure. 280055f5ff8SMatthew Dillon * The io is usually locked exclusively (but may not be during unmount). 281b3deaf57SMatthew Dillon * 28210a5d1baSMatthew Dillon * This routine is responsible for the disposition of the buffer cache 28310a5d1baSMatthew Dillon * buffer backing the IO. Only pure-data and undo buffers can be handed 28410a5d1baSMatthew Dillon * back to the kernel. Volume and meta-data buffers must be retained 28510a5d1baSMatthew Dillon * by HAMMER until explicitly flushed by the backend. 28666325755SMatthew Dillon */ 28766325755SMatthew Dillon void 28809ac686bSMatthew Dillon hammer_io_release(struct hammer_io *io, int flush) 28966325755SMatthew Dillon { 2909f5097dcSMatthew Dillon union hammer_io_structure *iou = (void *)io; 29166325755SMatthew Dillon struct buf *bp; 29266325755SMatthew Dillon 293fbc6e32aSMatthew Dillon if ((bp = io->bp) == NULL) 294fbc6e32aSMatthew Dillon return; 295fbc6e32aSMatthew Dillon 2960b075555SMatthew Dillon /* 29710a5d1baSMatthew Dillon * Try to flush a dirty IO to disk if asked to by the 29810a5d1baSMatthew Dillon * caller or if the kernel tried to flush the buffer in the past. 2990b075555SMatthew Dillon * 30010a5d1baSMatthew Dillon * Kernel-initiated flushes are only allowed for pure-data buffers. 30110a5d1baSMatthew Dillon * meta-data and volume buffers can only be flushed explicitly 30210a5d1baSMatthew Dillon * by HAMMER. 303055f5ff8SMatthew Dillon */ 30410a5d1baSMatthew Dillon if (io->modified) { 30509ac686bSMatthew Dillon if (flush) { 306055f5ff8SMatthew Dillon hammer_io_flush(io); 30710a5d1baSMatthew Dillon } else if (bp->b_flags & B_LOCKED) { 30810a5d1baSMatthew Dillon switch(io->type) { 30910a5d1baSMatthew Dillon case HAMMER_STRUCTURE_DATA_BUFFER: 31010a5d1baSMatthew Dillon case HAMMER_STRUCTURE_UNDO_BUFFER: 31110a5d1baSMatthew Dillon hammer_io_flush(io); 31210a5d1baSMatthew Dillon break; 31310a5d1baSMatthew Dillon default: 31410a5d1baSMatthew Dillon break; 31510a5d1baSMatthew Dillon } 31610a5d1baSMatthew Dillon } /* else no explicit request to flush the buffer */ 31710a5d1baSMatthew Dillon } 318055f5ff8SMatthew Dillon 319055f5ff8SMatthew Dillon /* 32010a5d1baSMatthew Dillon * Wait for the IO to complete if asked to. 321055f5ff8SMatthew Dillon */ 322b58c6388SMatthew Dillon if (io->waitdep && io->running) { 323055f5ff8SMatthew Dillon hammer_io_wait(io); 324055f5ff8SMatthew Dillon } 325055f5ff8SMatthew Dillon 326055f5ff8SMatthew Dillon /* 32710a5d1baSMatthew Dillon * Return control of the buffer to the kernel (with the provisio 32810a5d1baSMatthew Dillon * that our bioops can override kernel decisions with regards to 32910a5d1baSMatthew Dillon * the buffer). 330055f5ff8SMatthew Dillon */ 331cebe9493SMatthew Dillon if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) { 33210a5d1baSMatthew Dillon /* 33310a5d1baSMatthew Dillon * Always disassociate the bp if an explicit flush 33410a5d1baSMatthew Dillon * was requested and the IO completed with no error 33510a5d1baSMatthew Dillon * (so unmount can really clean up the structure). 33610a5d1baSMatthew Dillon */ 337055f5ff8SMatthew Dillon if (io->released) { 338055f5ff8SMatthew Dillon regetblk(bp); 33946fe7ae1SMatthew Dillon BUF_KERNPROC(bp); 340055f5ff8SMatthew Dillon io->released = 0; 341055f5ff8SMatthew Dillon } 342055f5ff8SMatthew Dillon hammer_io_disassociate((hammer_io_structure_t)io, 1); 343055f5ff8SMatthew Dillon } else if (io->modified) { 34410a5d1baSMatthew Dillon /* 34510a5d1baSMatthew Dillon * Only certain IO types can be released to the kernel. 34610a5d1baSMatthew Dillon * volume and meta-data IO types must be explicitly flushed 34710a5d1baSMatthew Dillon * by HAMMER. 34810a5d1baSMatthew Dillon */ 34910a5d1baSMatthew Dillon switch(io->type) { 35010a5d1baSMatthew Dillon case HAMMER_STRUCTURE_DATA_BUFFER: 35110a5d1baSMatthew Dillon case HAMMER_STRUCTURE_UNDO_BUFFER: 352b58c6388SMatthew Dillon if (io->released == 0) { 353055f5ff8SMatthew Dillon io->released = 1; 354055f5ff8SMatthew Dillon bdwrite(bp); 355055f5ff8SMatthew Dillon } 35610a5d1baSMatthew Dillon break; 35710a5d1baSMatthew Dillon default: 35810a5d1baSMatthew Dillon break; 35910a5d1baSMatthew Dillon } 360055f5ff8SMatthew Dillon } else if (io->released == 0) { 36110a5d1baSMatthew Dillon /* 36210a5d1baSMatthew Dillon * Clean buffers can be generally released to the kernel. 36310a5d1baSMatthew Dillon * We leave the bp passively associated with the HAMMER 36410a5d1baSMatthew Dillon * structure and use bioops to disconnect it later on 36510a5d1baSMatthew Dillon * if the kernel wants to discard the buffer. 36610a5d1baSMatthew Dillon */ 3679f5097dcSMatthew Dillon if (bp->b_flags & B_LOCKED) { 3689f5097dcSMatthew Dillon hammer_io_disassociate(iou, 1); 3699f5097dcSMatthew Dillon } else { 370cebe9493SMatthew Dillon if (io->reclaim) { 371cebe9493SMatthew Dillon hammer_io_disassociate(iou, 1); 372cebe9493SMatthew Dillon } else { 373055f5ff8SMatthew Dillon io->released = 1; 374055f5ff8SMatthew Dillon bqrelse(bp); 3759f5097dcSMatthew Dillon } 376cebe9493SMatthew Dillon } 37719b97e01SMatthew Dillon } else { 37819b97e01SMatthew Dillon /* 379af209b0fSMatthew Dillon * A released buffer is passively associate with our 380af209b0fSMatthew Dillon * hammer_io structure. The kernel cannot destroy it 381af209b0fSMatthew Dillon * without making a bioops call. If the kernel (B_LOCKED) 382af209b0fSMatthew Dillon * or we (reclaim) requested that the buffer be destroyed 383af209b0fSMatthew Dillon * we destroy it, otherwise we do a quick get/release to 384af209b0fSMatthew Dillon * reset its position in the kernel's LRU list. 385af209b0fSMatthew Dillon * 386af209b0fSMatthew Dillon * Leaving the buffer passively associated allows us to 387af209b0fSMatthew Dillon * use the kernel's LRU buffer flushing mechanisms rather 388af209b0fSMatthew Dillon * then rolling our own. 389cb51be26SMatthew Dillon * 390cb51be26SMatthew Dillon * XXX there are two ways of doing this. We can re-acquire 391cb51be26SMatthew Dillon * and passively release to reset the LRU, or not. 39219b97e01SMatthew Dillon */ 39319b97e01SMatthew Dillon crit_enter(); 394af209b0fSMatthew Dillon if (io->running == 0) { 39519b97e01SMatthew Dillon regetblk(bp); 396cebe9493SMatthew Dillon if ((bp->b_flags & B_LOCKED) || io->reclaim) { 397cb51be26SMatthew Dillon /*regetblk(bp);*/ 3989f5097dcSMatthew Dillon io->released = 0; 3999f5097dcSMatthew Dillon hammer_io_disassociate(iou, 1); 4009f5097dcSMatthew Dillon } else { 40119b97e01SMatthew Dillon bqrelse(bp); 40219b97e01SMatthew Dillon } 4039f5097dcSMatthew Dillon } 40419b97e01SMatthew Dillon crit_exit(); 405055f5ff8SMatthew Dillon } 406055f5ff8SMatthew Dillon } 407055f5ff8SMatthew Dillon 408055f5ff8SMatthew Dillon /* 409b33e2cc0SMatthew Dillon * This routine is called with a locked IO when a flush is desired and 410b33e2cc0SMatthew Dillon * no other references to the structure exists other then ours. This 411b33e2cc0SMatthew Dillon * routine is ONLY called when HAMMER believes it is safe to flush a 412b33e2cc0SMatthew Dillon * potentially modified buffer out. 4130b075555SMatthew Dillon */ 4140b075555SMatthew Dillon void 415055f5ff8SMatthew Dillon hammer_io_flush(struct hammer_io *io) 4160b075555SMatthew Dillon { 417055f5ff8SMatthew Dillon struct buf *bp; 418055f5ff8SMatthew Dillon 419055f5ff8SMatthew Dillon /* 42010a5d1baSMatthew Dillon * Degenerate case - nothing to flush if nothing is dirty. 421055f5ff8SMatthew Dillon */ 422b58c6388SMatthew Dillon if (io->modified == 0) { 423055f5ff8SMatthew Dillon return; 424b58c6388SMatthew Dillon } 425055f5ff8SMatthew Dillon 426055f5ff8SMatthew Dillon KKASSERT(io->bp); 4279f5097dcSMatthew Dillon KKASSERT(io->modify_refs <= 0); 428055f5ff8SMatthew Dillon 429b33e2cc0SMatthew Dillon /* 43077062c8aSMatthew Dillon * Acquire ownership of the bp, particularly before we clear our 43177062c8aSMatthew Dillon * modified flag. 43277062c8aSMatthew Dillon * 43377062c8aSMatthew Dillon * We are going to bawrite() this bp. Don't leave a window where 43477062c8aSMatthew Dillon * io->released is set, we actually own the bp rather then our 43577062c8aSMatthew Dillon * buffer. 43677062c8aSMatthew Dillon */ 43777062c8aSMatthew Dillon bp = io->bp; 43877062c8aSMatthew Dillon if (io->released) { 43977062c8aSMatthew Dillon regetblk(bp); 44077062c8aSMatthew Dillon /* BUF_KERNPROC(io->bp); */ 44177062c8aSMatthew Dillon /* io->released = 0; */ 44277062c8aSMatthew Dillon KKASSERT(io->released); 44377062c8aSMatthew Dillon KKASSERT(io->bp == bp); 44477062c8aSMatthew Dillon } 44577062c8aSMatthew Dillon io->released = 1; 44677062c8aSMatthew Dillon 44777062c8aSMatthew Dillon /* 44810a5d1baSMatthew Dillon * Acquire exclusive access to the bp and then clear the modified 44910a5d1baSMatthew Dillon * state of the buffer prior to issuing I/O to interlock any 45010a5d1baSMatthew Dillon * modifications made while the I/O is in progress. This shouldn't 45110a5d1baSMatthew Dillon * happen anyway but losing data would be worse. The modified bit 45210a5d1baSMatthew Dillon * will be rechecked after the IO completes. 45310a5d1baSMatthew Dillon * 4544a2796f3SMatthew Dillon * NOTE: This call also finalizes the buffer's content (inval == 0). 4554a2796f3SMatthew Dillon * 456b33e2cc0SMatthew Dillon * This is only legal when lock.refs == 1 (otherwise we might clear 457b33e2cc0SMatthew Dillon * the modified bit while there are still users of the cluster 458b33e2cc0SMatthew Dillon * modifying the data). 459b33e2cc0SMatthew Dillon * 460b33e2cc0SMatthew Dillon * Do this before potentially blocking so any attempt to modify the 461b33e2cc0SMatthew Dillon * ondisk while we are blocked blocks waiting for us. 462b33e2cc0SMatthew Dillon */ 4634a2796f3SMatthew Dillon hammer_io_clear_modify(io, 0); 464bcac4bbbSMatthew Dillon 465bcac4bbbSMatthew Dillon /* 46610a5d1baSMatthew Dillon * Transfer ownership to the kernel and initiate I/O. 46710a5d1baSMatthew Dillon */ 468055f5ff8SMatthew Dillon io->running = 1; 469*f5a07a7aSMatthew Dillon io->hmp->io_running_space += io->bytes; 470*f5a07a7aSMatthew Dillon hammer_count_io_running_write += io->bytes; 471055f5ff8SMatthew Dillon bawrite(bp); 472055f5ff8SMatthew Dillon } 473055f5ff8SMatthew Dillon 474055f5ff8SMatthew Dillon /************************************************************************ 475055f5ff8SMatthew Dillon * BUFFER DIRTYING * 476055f5ff8SMatthew Dillon ************************************************************************ 477055f5ff8SMatthew Dillon * 478055f5ff8SMatthew Dillon * These routines deal with dependancies created when IO buffers get 479055f5ff8SMatthew Dillon * modified. The caller must call hammer_modify_*() on a referenced 480055f5ff8SMatthew Dillon * HAMMER structure prior to modifying its on-disk data. 481055f5ff8SMatthew Dillon * 482055f5ff8SMatthew Dillon * Any intent to modify an IO buffer acquires the related bp and imposes 483055f5ff8SMatthew Dillon * various write ordering dependancies. 484055f5ff8SMatthew Dillon */ 485055f5ff8SMatthew Dillon 486055f5ff8SMatthew Dillon /* 48710a5d1baSMatthew Dillon * Mark a HAMMER structure as undergoing modification. Meta-data buffers 48810a5d1baSMatthew Dillon * are locked until the flusher can deal with them, pure data buffers 48910a5d1baSMatthew Dillon * can be written out. 490055f5ff8SMatthew Dillon */ 49110a5d1baSMatthew Dillon static 492b58c6388SMatthew Dillon void 49310a5d1baSMatthew Dillon hammer_io_modify(hammer_io_t io, int count) 494055f5ff8SMatthew Dillon { 49510a5d1baSMatthew Dillon struct hammer_mount *hmp = io->hmp; 49610a5d1baSMatthew Dillon 49746fe7ae1SMatthew Dillon /* 4989f5097dcSMatthew Dillon * io->modify_refs must be >= 0 4999f5097dcSMatthew Dillon */ 5009f5097dcSMatthew Dillon while (io->modify_refs < 0) { 5019f5097dcSMatthew Dillon io->waitmod = 1; 5029f5097dcSMatthew Dillon tsleep(io, 0, "hmrmod", 0); 5039f5097dcSMatthew Dillon } 5049f5097dcSMatthew Dillon 5059f5097dcSMatthew Dillon /* 50646fe7ae1SMatthew Dillon * Shortcut if nothing to do. 50746fe7ae1SMatthew Dillon */ 508055f5ff8SMatthew Dillon KKASSERT(io->lock.refs != 0 && io->bp != NULL); 50910a5d1baSMatthew Dillon io->modify_refs += count; 510b58c6388SMatthew Dillon if (io->modified && io->released == 0) 511b58c6388SMatthew Dillon return; 51246fe7ae1SMatthew Dillon 513055f5ff8SMatthew Dillon hammer_lock_ex(&io->lock); 51410a5d1baSMatthew Dillon if (io->modified == 0) { 51510a5d1baSMatthew Dillon KKASSERT(io->mod_list == NULL); 51610a5d1baSMatthew Dillon switch(io->type) { 51710a5d1baSMatthew Dillon case HAMMER_STRUCTURE_VOLUME: 51810a5d1baSMatthew Dillon io->mod_list = &hmp->volu_list; 519*f5a07a7aSMatthew Dillon hmp->locked_dirty_space += io->bytes; 520*f5a07a7aSMatthew Dillon hammer_count_dirtybufspace += io->bytes; 52110a5d1baSMatthew Dillon break; 52210a5d1baSMatthew Dillon case HAMMER_STRUCTURE_META_BUFFER: 52310a5d1baSMatthew Dillon io->mod_list = &hmp->meta_list; 524*f5a07a7aSMatthew Dillon hmp->locked_dirty_space += io->bytes; 525*f5a07a7aSMatthew Dillon hammer_count_dirtybufspace += io->bytes; 52610a5d1baSMatthew Dillon break; 52710a5d1baSMatthew Dillon case HAMMER_STRUCTURE_UNDO_BUFFER: 52810a5d1baSMatthew Dillon io->mod_list = &hmp->undo_list; 52910a5d1baSMatthew Dillon break; 53010a5d1baSMatthew Dillon case HAMMER_STRUCTURE_DATA_BUFFER: 53110a5d1baSMatthew Dillon io->mod_list = &hmp->data_list; 53210a5d1baSMatthew Dillon break; 53310a5d1baSMatthew Dillon } 53410a5d1baSMatthew Dillon TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry); 53546fe7ae1SMatthew Dillon io->modified = 1; 53610a5d1baSMatthew Dillon } 537055f5ff8SMatthew Dillon if (io->released) { 538055f5ff8SMatthew Dillon regetblk(io->bp); 539055f5ff8SMatthew Dillon BUF_KERNPROC(io->bp); 540055f5ff8SMatthew Dillon io->released = 0; 54146fe7ae1SMatthew Dillon KKASSERT(io->modified != 0); 542055f5ff8SMatthew Dillon } 543055f5ff8SMatthew Dillon hammer_unlock(&io->lock); 5440b075555SMatthew Dillon } 5450b075555SMatthew Dillon 54610a5d1baSMatthew Dillon static __inline 54710a5d1baSMatthew Dillon void 54810a5d1baSMatthew Dillon hammer_io_modify_done(hammer_io_t io) 54910a5d1baSMatthew Dillon { 55010a5d1baSMatthew Dillon KKASSERT(io->modify_refs > 0); 55110a5d1baSMatthew Dillon --io->modify_refs; 5529f5097dcSMatthew Dillon if (io->modify_refs == 0 && io->waitmod) { 5539f5097dcSMatthew Dillon io->waitmod = 0; 5549f5097dcSMatthew Dillon wakeup(io); 5559f5097dcSMatthew Dillon } 5569f5097dcSMatthew Dillon } 5579f5097dcSMatthew Dillon 5589f5097dcSMatthew Dillon void 5599f5097dcSMatthew Dillon hammer_io_write_interlock(hammer_io_t io) 5609f5097dcSMatthew Dillon { 5619f5097dcSMatthew Dillon while (io->modify_refs != 0) { 5629f5097dcSMatthew Dillon io->waitmod = 1; 5639f5097dcSMatthew Dillon tsleep(io, 0, "hmrmod", 0); 5649f5097dcSMatthew Dillon } 5659f5097dcSMatthew Dillon io->modify_refs = -1; 5669f5097dcSMatthew Dillon } 5679f5097dcSMatthew Dillon 5689f5097dcSMatthew Dillon void 5699f5097dcSMatthew Dillon hammer_io_done_interlock(hammer_io_t io) 5709f5097dcSMatthew Dillon { 5719f5097dcSMatthew Dillon KKASSERT(io->modify_refs == -1); 5729f5097dcSMatthew Dillon io->modify_refs = 0; 5739f5097dcSMatthew Dillon if (io->waitmod) { 5749f5097dcSMatthew Dillon io->waitmod = 0; 5759f5097dcSMatthew Dillon wakeup(io); 5769f5097dcSMatthew Dillon } 57710a5d1baSMatthew Dillon } 57810a5d1baSMatthew Dillon 5792f85fa4dSMatthew Dillon /* 5802f85fa4dSMatthew Dillon * Caller intends to modify a volume's ondisk structure. 5812f85fa4dSMatthew Dillon * 5822f85fa4dSMatthew Dillon * This is only allowed if we are the flusher or we have a ref on the 5832f85fa4dSMatthew Dillon * sync_lock. 5842f85fa4dSMatthew Dillon */ 5850b075555SMatthew Dillon void 58636f82b23SMatthew Dillon hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume, 58736f82b23SMatthew Dillon void *base, int len) 5880b075555SMatthew Dillon { 5892f85fa4dSMatthew Dillon KKASSERT (trans == NULL || trans->sync_lock_refs > 0); 590055f5ff8SMatthew Dillon 5912f85fa4dSMatthew Dillon hammer_io_modify(&volume->io, 1); 59247197d71SMatthew Dillon if (len) { 59347197d71SMatthew Dillon intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk; 59447197d71SMatthew Dillon KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0); 595059819e3SMatthew Dillon hammer_generate_undo(trans, &volume->io, 59647197d71SMatthew Dillon HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset), 59747197d71SMatthew Dillon base, len); 598055f5ff8SMatthew Dillon } 599055f5ff8SMatthew Dillon } 600055f5ff8SMatthew Dillon 601055f5ff8SMatthew Dillon /* 6022f85fa4dSMatthew Dillon * Caller intends to modify a buffer's ondisk structure. 6032f85fa4dSMatthew Dillon * 6042f85fa4dSMatthew Dillon * This is only allowed if we are the flusher or we have a ref on the 6052f85fa4dSMatthew Dillon * sync_lock. 606055f5ff8SMatthew Dillon */ 607055f5ff8SMatthew Dillon void 60836f82b23SMatthew Dillon hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer, 60936f82b23SMatthew Dillon void *base, int len) 61046fe7ae1SMatthew Dillon { 6112f85fa4dSMatthew Dillon KKASSERT (trans == NULL || trans->sync_lock_refs > 0); 6122f85fa4dSMatthew Dillon 61310a5d1baSMatthew Dillon hammer_io_modify(&buffer->io, 1); 61447197d71SMatthew Dillon if (len) { 61547197d71SMatthew Dillon intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk; 61647197d71SMatthew Dillon KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0); 617059819e3SMatthew Dillon hammer_generate_undo(trans, &buffer->io, 61834d829f7SMatthew Dillon buffer->zone2_offset + rel_offset, 61947197d71SMatthew Dillon base, len); 62047197d71SMatthew Dillon } 62146fe7ae1SMatthew Dillon } 62246fe7ae1SMatthew Dillon 62310a5d1baSMatthew Dillon void 62410a5d1baSMatthew Dillon hammer_modify_volume_done(hammer_volume_t volume) 62510a5d1baSMatthew Dillon { 62610a5d1baSMatthew Dillon hammer_io_modify_done(&volume->io); 62710a5d1baSMatthew Dillon } 62810a5d1baSMatthew Dillon 62910a5d1baSMatthew Dillon void 63010a5d1baSMatthew Dillon hammer_modify_buffer_done(hammer_buffer_t buffer) 63110a5d1baSMatthew Dillon { 63210a5d1baSMatthew Dillon hammer_io_modify_done(&buffer->io); 63310a5d1baSMatthew Dillon } 63410a5d1baSMatthew Dillon 63546fe7ae1SMatthew Dillon /* 6364a2796f3SMatthew Dillon * Mark an entity as not being dirty any more and finalize any 6374a2796f3SMatthew Dillon * delayed adjustments to the buffer. 6384a2796f3SMatthew Dillon * 6394a2796f3SMatthew Dillon * Delayed adjustments are an important performance enhancement, allowing 6404a2796f3SMatthew Dillon * us to avoid recalculating B-Tree node CRCs over and over again when 6414a2796f3SMatthew Dillon * making bulk-modifications to the B-Tree. 6424a2796f3SMatthew Dillon * 6434a2796f3SMatthew Dillon * If inval is non-zero delayed adjustments are ignored. 64461aeeb33SMatthew Dillon */ 64561aeeb33SMatthew Dillon void 6464a2796f3SMatthew Dillon hammer_io_clear_modify(struct hammer_io *io, int inval) 64761aeeb33SMatthew Dillon { 6484a2796f3SMatthew Dillon if (io->modified == 0) 6494a2796f3SMatthew Dillon return; 6504a2796f3SMatthew Dillon 6514a2796f3SMatthew Dillon /* 6524a2796f3SMatthew Dillon * Take us off the mod-list and clear the modified bit. 6534a2796f3SMatthew Dillon */ 654cebe9493SMatthew Dillon KKASSERT(io->mod_list != NULL); 655cebe9493SMatthew Dillon if (io->mod_list == &io->hmp->volu_list || 656cebe9493SMatthew Dillon io->mod_list == &io->hmp->meta_list) { 657*f5a07a7aSMatthew Dillon io->hmp->locked_dirty_space -= io->bytes; 658*f5a07a7aSMatthew Dillon hammer_count_dirtybufspace -= io->bytes; 659cebe9493SMatthew Dillon } 660cebe9493SMatthew Dillon TAILQ_REMOVE(io->mod_list, io, mod_entry); 661cebe9493SMatthew Dillon io->mod_list = NULL; 66261aeeb33SMatthew Dillon io->modified = 0; 6634a2796f3SMatthew Dillon 6644a2796f3SMatthew Dillon /* 6654a2796f3SMatthew Dillon * If this bit is not set there are no delayed adjustments. 6664a2796f3SMatthew Dillon */ 6674a2796f3SMatthew Dillon if (io->gencrc == 0) 6684a2796f3SMatthew Dillon return; 6694a2796f3SMatthew Dillon io->gencrc = 0; 6704a2796f3SMatthew Dillon 6714a2796f3SMatthew Dillon /* 6724a2796f3SMatthew Dillon * Finalize requested CRCs. The NEEDSCRC flag also holds a reference 6734a2796f3SMatthew Dillon * on the node (& underlying buffer). Release the node after clearing 6744a2796f3SMatthew Dillon * the flag. 6754a2796f3SMatthew Dillon */ 6764a2796f3SMatthew Dillon if (io->type == HAMMER_STRUCTURE_META_BUFFER) { 6774a2796f3SMatthew Dillon hammer_buffer_t buffer = (void *)io; 6784a2796f3SMatthew Dillon hammer_node_t node; 6794a2796f3SMatthew Dillon 6804a2796f3SMatthew Dillon restart: 6814a2796f3SMatthew Dillon TAILQ_FOREACH(node, &buffer->clist, entry) { 6824a2796f3SMatthew Dillon if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0) 6834a2796f3SMatthew Dillon continue; 6844a2796f3SMatthew Dillon node->flags &= ~HAMMER_NODE_NEEDSCRC; 6854a2796f3SMatthew Dillon KKASSERT(node->ondisk); 6864a2796f3SMatthew Dillon if (inval == 0) 6874a2796f3SMatthew Dillon node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE); 6884a2796f3SMatthew Dillon hammer_rel_node(node); 6894a2796f3SMatthew Dillon goto restart; 69061aeeb33SMatthew Dillon } 69161aeeb33SMatthew Dillon } 692cebe9493SMatthew Dillon 6934a2796f3SMatthew Dillon } 6944a2796f3SMatthew Dillon 695cebe9493SMatthew Dillon /* 696cebe9493SMatthew Dillon * Clear the IO's modify list. Even though the IO is no longer modified 697cebe9493SMatthew Dillon * it may still be on the lose_list. This routine is called just before 698cebe9493SMatthew Dillon * the governing hammer_buffer is destroyed. 699cebe9493SMatthew Dillon */ 700cebe9493SMatthew Dillon void 701cebe9493SMatthew Dillon hammer_io_clear_modlist(struct hammer_io *io) 702cebe9493SMatthew Dillon { 7034a2796f3SMatthew Dillon KKASSERT(io->modified == 0); 704cebe9493SMatthew Dillon if (io->mod_list) { 705a99b9ea2SMatthew Dillon crit_enter(); /* biodone race against list */ 706cebe9493SMatthew Dillon KKASSERT(io->mod_list == &io->hmp->lose_list); 707cebe9493SMatthew Dillon TAILQ_REMOVE(io->mod_list, io, mod_entry); 708cebe9493SMatthew Dillon io->mod_list = NULL; 709a99b9ea2SMatthew Dillon crit_exit(); 710cebe9493SMatthew Dillon } 71166325755SMatthew Dillon } 71266325755SMatthew Dillon 713055f5ff8SMatthew Dillon /************************************************************************ 714055f5ff8SMatthew Dillon * HAMMER_BIOOPS * 715055f5ff8SMatthew Dillon ************************************************************************ 716055f5ff8SMatthew Dillon * 717055f5ff8SMatthew Dillon */ 718055f5ff8SMatthew Dillon 719055f5ff8SMatthew Dillon /* 720055f5ff8SMatthew Dillon * Pre-IO initiation kernel callback - cluster build only 721055f5ff8SMatthew Dillon */ 722055f5ff8SMatthew Dillon static void 723055f5ff8SMatthew Dillon hammer_io_start(struct buf *bp) 724055f5ff8SMatthew Dillon { 725055f5ff8SMatthew Dillon } 726055f5ff8SMatthew Dillon 727055f5ff8SMatthew Dillon /* 7287bc5b8c2SMatthew Dillon * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT! 729b33e2cc0SMatthew Dillon * 730b33e2cc0SMatthew Dillon * NOTE: HAMMER may modify a buffer after initiating I/O. The modified bit 731b33e2cc0SMatthew Dillon * may also be set if we were marking a cluster header open. Only remove 732b33e2cc0SMatthew Dillon * our dependancy if the modified bit is clear. 733055f5ff8SMatthew Dillon */ 73466325755SMatthew Dillon static void 73566325755SMatthew Dillon hammer_io_complete(struct buf *bp) 73666325755SMatthew Dillon { 737055f5ff8SMatthew Dillon union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep); 738fbc6e32aSMatthew Dillon 739055f5ff8SMatthew Dillon KKASSERT(iou->io.released == 1); 740055f5ff8SMatthew Dillon 741bf3b416bSMatthew Dillon /* 742bf3b416bSMatthew Dillon * Deal with people waiting for I/O to drain 743bf3b416bSMatthew Dillon */ 744f90dde4cSMatthew Dillon if (iou->io.running) { 745*f5a07a7aSMatthew Dillon hammer_count_io_running_write -= iou->io.bytes; 746*f5a07a7aSMatthew Dillon iou->io.hmp->io_running_space -= iou->io.bytes; 747*f5a07a7aSMatthew Dillon if (iou->io.hmp->io_running_space == 0) 748*f5a07a7aSMatthew Dillon wakeup(&iou->io.hmp->io_running_space); 749*f5a07a7aSMatthew Dillon KKASSERT(iou->io.hmp->io_running_space >= 0); 750f90dde4cSMatthew Dillon iou->io.running = 0; 751f90dde4cSMatthew Dillon } 752f90dde4cSMatthew Dillon 753055f5ff8SMatthew Dillon if (iou->io.waiting) { 754055f5ff8SMatthew Dillon iou->io.waiting = 0; 755055f5ff8SMatthew Dillon wakeup(iou); 756055f5ff8SMatthew Dillon } 757055f5ff8SMatthew Dillon 758055f5ff8SMatthew Dillon /* 759bf3b416bSMatthew Dillon * If B_LOCKED is set someone wanted to deallocate the bp at some 760bf3b416bSMatthew Dillon * point, do it now if refs has become zero. 761055f5ff8SMatthew Dillon */ 762055f5ff8SMatthew Dillon if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) { 763b33e2cc0SMatthew Dillon KKASSERT(iou->io.modified == 0); 764a99b9ea2SMatthew Dillon --hammer_count_io_locked; 765d5ef456eSMatthew Dillon bp->b_flags &= ~B_LOCKED; 766055f5ff8SMatthew Dillon hammer_io_deallocate(bp); 767055f5ff8SMatthew Dillon /* structure may be dead now */ 768fbc6e32aSMatthew Dillon } 76966325755SMatthew Dillon } 77066325755SMatthew Dillon 77166325755SMatthew Dillon /* 77266325755SMatthew Dillon * Callback from kernel when it wishes to deallocate a passively 77310a5d1baSMatthew Dillon * associated structure. This mostly occurs with clean buffers 77410a5d1baSMatthew Dillon * but it may be possible for a holding structure to be marked dirty 7757bc5b8c2SMatthew Dillon * while its buffer is passively associated. The caller owns the bp. 77666325755SMatthew Dillon * 77766325755SMatthew Dillon * If we cannot disassociate we set B_LOCKED to prevent the buffer 77866325755SMatthew Dillon * from getting reused. 77946fe7ae1SMatthew Dillon * 78046fe7ae1SMatthew Dillon * WARNING: Because this can be called directly by getnewbuf we cannot 78146fe7ae1SMatthew Dillon * recurse into the tree. If a bp cannot be immediately disassociated 78246fe7ae1SMatthew Dillon * our only recourse is to set B_LOCKED. 7837bc5b8c2SMatthew Dillon * 7847bc5b8c2SMatthew Dillon * WARNING: This may be called from an interrupt via hammer_io_complete() 78566325755SMatthew Dillon */ 78666325755SMatthew Dillon static void 78766325755SMatthew Dillon hammer_io_deallocate(struct buf *bp) 78866325755SMatthew Dillon { 789055f5ff8SMatthew Dillon hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep); 79066325755SMatthew Dillon 791055f5ff8SMatthew Dillon KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0); 79246fe7ae1SMatthew Dillon if (iou->io.lock.refs > 0 || iou->io.modified) { 79310a5d1baSMatthew Dillon /* 79410a5d1baSMatthew Dillon * It is not legal to disassociate a modified buffer. This 79510a5d1baSMatthew Dillon * case really shouldn't ever occur. 79610a5d1baSMatthew Dillon */ 797055f5ff8SMatthew Dillon bp->b_flags |= B_LOCKED; 798a99b9ea2SMatthew Dillon ++hammer_count_io_locked; 799055f5ff8SMatthew Dillon } else { 80010a5d1baSMatthew Dillon /* 80110a5d1baSMatthew Dillon * Disassociate the BP. If the io has no refs left we 80210a5d1baSMatthew Dillon * have to add it to the loose list. 80310a5d1baSMatthew Dillon */ 804055f5ff8SMatthew Dillon hammer_io_disassociate(iou, 0); 80510a5d1baSMatthew Dillon if (iou->io.bp == NULL && 80610a5d1baSMatthew Dillon iou->io.type != HAMMER_STRUCTURE_VOLUME) { 80710a5d1baSMatthew Dillon KKASSERT(iou->io.mod_list == NULL); 808a99b9ea2SMatthew Dillon crit_enter(); /* biodone race against list */ 80910a5d1baSMatthew Dillon iou->io.mod_list = &iou->io.hmp->lose_list; 81010a5d1baSMatthew Dillon TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry); 811a99b9ea2SMatthew Dillon crit_exit(); 81266325755SMatthew Dillon } 81366325755SMatthew Dillon } 81466325755SMatthew Dillon } 81566325755SMatthew Dillon 81666325755SMatthew Dillon static int 81766325755SMatthew Dillon hammer_io_fsync(struct vnode *vp) 81866325755SMatthew Dillon { 81966325755SMatthew Dillon return(0); 82066325755SMatthew Dillon } 82166325755SMatthew Dillon 82266325755SMatthew Dillon /* 82366325755SMatthew Dillon * NOTE: will not be called unless we tell the kernel about the 82466325755SMatthew Dillon * bioops. Unused... we use the mount's VFS_SYNC instead. 82566325755SMatthew Dillon */ 82666325755SMatthew Dillon static int 82766325755SMatthew Dillon hammer_io_sync(struct mount *mp) 82866325755SMatthew Dillon { 82966325755SMatthew Dillon return(0); 83066325755SMatthew Dillon } 83166325755SMatthew Dillon 83266325755SMatthew Dillon static void 83366325755SMatthew Dillon hammer_io_movedeps(struct buf *bp1, struct buf *bp2) 83466325755SMatthew Dillon { 83566325755SMatthew Dillon } 83666325755SMatthew Dillon 83766325755SMatthew Dillon /* 83866325755SMatthew Dillon * I/O pre-check for reading and writing. HAMMER only uses this for 83966325755SMatthew Dillon * B_CACHE buffers so checkread just shouldn't happen, but if it does 84066325755SMatthew Dillon * allow it. 84166325755SMatthew Dillon * 842fbc6e32aSMatthew Dillon * Writing is a different case. We don't want the kernel to try to write 843fbc6e32aSMatthew Dillon * out a buffer that HAMMER may be modifying passively or which has a 84410a5d1baSMatthew Dillon * dependancy. In addition, kernel-demanded writes can only proceed for 84510a5d1baSMatthew Dillon * certain types of buffers (i.e. UNDO and DATA types). Other dirty 84610a5d1baSMatthew Dillon * buffer types can only be explicitly written by the flusher. 847fbc6e32aSMatthew Dillon * 84810a5d1baSMatthew Dillon * checkwrite will only be called for bdwrite()n buffers. If we return 84910a5d1baSMatthew Dillon * success the kernel is guaranteed to initiate the buffer write. 85066325755SMatthew Dillon */ 85166325755SMatthew Dillon static int 85266325755SMatthew Dillon hammer_io_checkread(struct buf *bp) 85366325755SMatthew Dillon { 85466325755SMatthew Dillon return(0); 85566325755SMatthew Dillon } 85666325755SMatthew Dillon 85766325755SMatthew Dillon static int 85866325755SMatthew Dillon hammer_io_checkwrite(struct buf *bp) 85966325755SMatthew Dillon { 86010a5d1baSMatthew Dillon hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep); 86166325755SMatthew Dillon 86277062c8aSMatthew Dillon /* 86377062c8aSMatthew Dillon * This shouldn't happen under normal operation. 86477062c8aSMatthew Dillon */ 86577062c8aSMatthew Dillon if (io->type == HAMMER_STRUCTURE_VOLUME || 86677062c8aSMatthew Dillon io->type == HAMMER_STRUCTURE_META_BUFFER) { 86777062c8aSMatthew Dillon if (!panicstr) 86877062c8aSMatthew Dillon panic("hammer_io_checkwrite: illegal buffer"); 869a99b9ea2SMatthew Dillon if ((bp->b_flags & B_LOCKED) == 0) { 87077062c8aSMatthew Dillon bp->b_flags |= B_LOCKED; 871a99b9ea2SMatthew Dillon ++hammer_count_io_locked; 872a99b9ea2SMatthew Dillon } 87377062c8aSMatthew Dillon return(1); 87477062c8aSMatthew Dillon } 875c9b9e29dSMatthew Dillon 876fbc6e32aSMatthew Dillon /* 87710a5d1baSMatthew Dillon * We can only clear the modified bit if the IO is not currently 87810a5d1baSMatthew Dillon * undergoing modification. Otherwise we may miss changes. 879b33e2cc0SMatthew Dillon */ 880cebe9493SMatthew Dillon if (io->modify_refs == 0 && io->modified) 8814a2796f3SMatthew Dillon hammer_io_clear_modify(io, 0); 882f90dde4cSMatthew Dillon 883f90dde4cSMatthew Dillon /* 884f90dde4cSMatthew Dillon * The kernel is going to start the IO, set io->running. 885f90dde4cSMatthew Dillon */ 886f90dde4cSMatthew Dillon KKASSERT(io->running == 0); 887f90dde4cSMatthew Dillon io->running = 1; 888*f5a07a7aSMatthew Dillon io->hmp->io_running_space += io->bytes; 889*f5a07a7aSMatthew Dillon hammer_count_io_running_write += io->bytes; 890055f5ff8SMatthew Dillon return(0); 891055f5ff8SMatthew Dillon } 89266325755SMatthew Dillon 8938cd0a023SMatthew Dillon /* 89466325755SMatthew Dillon * Return non-zero if we wish to delay the kernel's attempt to flush 89566325755SMatthew Dillon * this buffer to disk. 89666325755SMatthew Dillon */ 89766325755SMatthew Dillon static int 89866325755SMatthew Dillon hammer_io_countdeps(struct buf *bp, int n) 89966325755SMatthew Dillon { 90066325755SMatthew Dillon return(0); 90166325755SMatthew Dillon } 90266325755SMatthew Dillon 90366325755SMatthew Dillon struct bio_ops hammer_bioops = { 90466325755SMatthew Dillon .io_start = hammer_io_start, 90566325755SMatthew Dillon .io_complete = hammer_io_complete, 90666325755SMatthew Dillon .io_deallocate = hammer_io_deallocate, 90766325755SMatthew Dillon .io_fsync = hammer_io_fsync, 90866325755SMatthew Dillon .io_sync = hammer_io_sync, 90966325755SMatthew Dillon .io_movedeps = hammer_io_movedeps, 91066325755SMatthew Dillon .io_countdeps = hammer_io_countdeps, 91166325755SMatthew Dillon .io_checkread = hammer_io_checkread, 91266325755SMatthew Dillon .io_checkwrite = hammer_io_checkwrite, 91366325755SMatthew Dillon }; 91466325755SMatthew Dillon 91547637bffSMatthew Dillon /************************************************************************ 91647637bffSMatthew Dillon * DIRECT IO OPS * 91747637bffSMatthew Dillon ************************************************************************ 91847637bffSMatthew Dillon * 91947637bffSMatthew Dillon * These functions operate directly on the buffer cache buffer associated 92047637bffSMatthew Dillon * with a front-end vnode rather then a back-end device vnode. 92147637bffSMatthew Dillon */ 92247637bffSMatthew Dillon 92347637bffSMatthew Dillon /* 92447637bffSMatthew Dillon * Read a buffer associated with a front-end vnode directly from the 92547637bffSMatthew Dillon * disk media. The bio may be issued asynchronously. 926a99b9ea2SMatthew Dillon * 92743c665aeSMatthew Dillon * A second-level bio already resolved to a zone-2 offset (typically by 92843c665aeSMatthew Dillon * the BMAP code, or by a previous hammer_io_direct_write()), is passed. 92947637bffSMatthew Dillon */ 93047637bffSMatthew Dillon int 9314a2796f3SMatthew Dillon hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio) 93247637bffSMatthew Dillon { 93347637bffSMatthew Dillon hammer_off_t zone2_offset; 93447637bffSMatthew Dillon hammer_volume_t volume; 93547637bffSMatthew Dillon struct buf *bp; 93647637bffSMatthew Dillon struct bio *nbio; 93747637bffSMatthew Dillon int vol_no; 93847637bffSMatthew Dillon int error; 93947637bffSMatthew Dillon 94043c665aeSMatthew Dillon zone2_offset = bio->bio_offset; 9414a2796f3SMatthew Dillon 94243c665aeSMatthew Dillon KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) == 94343c665aeSMatthew Dillon HAMMER_ZONE_RAW_BUFFER); 94443c665aeSMatthew Dillon 94547637bffSMatthew Dillon vol_no = HAMMER_VOL_DECODE(zone2_offset); 94647637bffSMatthew Dillon volume = hammer_get_volume(hmp, vol_no, &error); 94747637bffSMatthew Dillon if (error == 0 && zone2_offset >= volume->maxbuf_off) 94847637bffSMatthew Dillon error = EIO; 94943c665aeSMatthew Dillon 95043c665aeSMatthew Dillon /* 95143c665aeSMatthew Dillon * Third level bio - raw offset specific to the 95243c665aeSMatthew Dillon * correct volume. 95343c665aeSMatthew Dillon */ 95447637bffSMatthew Dillon if (error == 0) { 9550832c9bbSMatthew Dillon zone2_offset &= HAMMER_OFF_SHORT_MASK; 9564a2796f3SMatthew Dillon 95747637bffSMatthew Dillon nbio = push_bio(bio); 95847637bffSMatthew Dillon nbio->bio_offset = volume->ondisk->vol_buf_beg + 9590832c9bbSMatthew Dillon zone2_offset; 96047637bffSMatthew Dillon vn_strategy(volume->devvp, nbio); 96147637bffSMatthew Dillon } 96247637bffSMatthew Dillon hammer_rel_volume(volume, 0); 96343c665aeSMatthew Dillon 96447637bffSMatthew Dillon if (error) { 965cebe9493SMatthew Dillon kprintf("hammer_direct_read: failed @ %016llx\n", 96643c665aeSMatthew Dillon zone2_offset); 96747637bffSMatthew Dillon bp = bio->bio_buf; 96847637bffSMatthew Dillon bp->b_error = error; 96947637bffSMatthew Dillon bp->b_flags |= B_ERROR; 97047637bffSMatthew Dillon biodone(bio); 97147637bffSMatthew Dillon } 97247637bffSMatthew Dillon return(error); 97347637bffSMatthew Dillon } 97447637bffSMatthew Dillon 97547637bffSMatthew Dillon /* 97647637bffSMatthew Dillon * Write a buffer associated with a front-end vnode directly to the 97747637bffSMatthew Dillon * disk media. The bio may be issued asynchronously. 97847637bffSMatthew Dillon */ 97947637bffSMatthew Dillon int 98047637bffSMatthew Dillon hammer_io_direct_write(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf, 98147637bffSMatthew Dillon struct bio *bio) 98247637bffSMatthew Dillon { 9830832c9bbSMatthew Dillon hammer_off_t buf_offset; 98447637bffSMatthew Dillon hammer_off_t zone2_offset; 98547637bffSMatthew Dillon hammer_volume_t volume; 9860832c9bbSMatthew Dillon hammer_buffer_t buffer; 98747637bffSMatthew Dillon struct buf *bp; 98847637bffSMatthew Dillon struct bio *nbio; 9890832c9bbSMatthew Dillon char *ptr; 99047637bffSMatthew Dillon int vol_no; 99147637bffSMatthew Dillon int error; 99247637bffSMatthew Dillon 9930832c9bbSMatthew Dillon buf_offset = leaf->data_offset; 9940832c9bbSMatthew Dillon 9950832c9bbSMatthew Dillon KKASSERT(buf_offset > HAMMER_ZONE_BTREE); 99647637bffSMatthew Dillon KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE); 99747637bffSMatthew Dillon 9980832c9bbSMatthew Dillon if ((buf_offset & HAMMER_BUFMASK) == 0 && 9994a2796f3SMatthew Dillon leaf->data_len >= HAMMER_BUFSIZE) { 10000832c9bbSMatthew Dillon /* 10010832c9bbSMatthew Dillon * We are using the vnode's bio to write directly to the 10020832c9bbSMatthew Dillon * media, any hammer_buffer at the same zone-X offset will 10030832c9bbSMatthew Dillon * now have stale data. 10040832c9bbSMatthew Dillon */ 10050832c9bbSMatthew Dillon zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error); 100647637bffSMatthew Dillon vol_no = HAMMER_VOL_DECODE(zone2_offset); 100747637bffSMatthew Dillon volume = hammer_get_volume(hmp, vol_no, &error); 100847637bffSMatthew Dillon 100947637bffSMatthew Dillon if (error == 0 && zone2_offset >= volume->maxbuf_off) 101047637bffSMatthew Dillon error = EIO; 101147637bffSMatthew Dillon if (error == 0) { 10120832c9bbSMatthew Dillon bp = bio->bio_buf; 10134a2796f3SMatthew Dillon KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0); 10144a2796f3SMatthew Dillon hammer_del_buffers(hmp, buf_offset, 10154a2796f3SMatthew Dillon zone2_offset, bp->b_bufsize); 101643c665aeSMatthew Dillon /* 101743c665aeSMatthew Dillon * Second level bio - cached zone2 offset. 101843c665aeSMatthew Dillon */ 101947637bffSMatthew Dillon nbio = push_bio(bio); 102043c665aeSMatthew Dillon nbio->bio_offset = zone2_offset; 102143c665aeSMatthew Dillon 102243c665aeSMatthew Dillon /* 102343c665aeSMatthew Dillon * Third level bio - raw offset specific to the 102443c665aeSMatthew Dillon * correct volume. 102543c665aeSMatthew Dillon */ 102643c665aeSMatthew Dillon zone2_offset &= HAMMER_OFF_SHORT_MASK; 102743c665aeSMatthew Dillon nbio = push_bio(nbio); 102847637bffSMatthew Dillon nbio->bio_offset = volume->ondisk->vol_buf_beg + 10290832c9bbSMatthew Dillon zone2_offset; 103047637bffSMatthew Dillon vn_strategy(volume->devvp, nbio); 103147637bffSMatthew Dillon } 103247637bffSMatthew Dillon hammer_rel_volume(volume, 0); 10330832c9bbSMatthew Dillon } else { 10344a2796f3SMatthew Dillon /* must fit in a standard HAMMER buffer */ 10350832c9bbSMatthew Dillon KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0); 10360832c9bbSMatthew Dillon buffer = NULL; 10370832c9bbSMatthew Dillon ptr = hammer_bread(hmp, buf_offset, &error, &buffer); 10380832c9bbSMatthew Dillon if (error == 0) { 10390832c9bbSMatthew Dillon bp = bio->bio_buf; 10407bc5b8c2SMatthew Dillon bp->b_flags |= B_AGE; 10410832c9bbSMatthew Dillon hammer_io_modify(&buffer->io, 1); 10420832c9bbSMatthew Dillon bcopy(bp->b_data, ptr, leaf->data_len); 10430832c9bbSMatthew Dillon hammer_io_modify_done(&buffer->io); 10447bc5b8c2SMatthew Dillon hammer_rel_buffer(buffer, 0); 10450832c9bbSMatthew Dillon bp->b_resid = 0; 10460832c9bbSMatthew Dillon biodone(bio); 10470832c9bbSMatthew Dillon } 104847637bffSMatthew Dillon } 104947637bffSMatthew Dillon if (error) { 1050cebe9493SMatthew Dillon kprintf("hammer_direct_write: failed @ %016llx\n", 1051cebe9493SMatthew Dillon leaf->data_offset); 105247637bffSMatthew Dillon bp = bio->bio_buf; 105347637bffSMatthew Dillon bp->b_resid = 0; 105447637bffSMatthew Dillon bp->b_error = EIO; 105547637bffSMatthew Dillon bp->b_flags |= B_ERROR; 105647637bffSMatthew Dillon biodone(bio); 105747637bffSMatthew Dillon } 105847637bffSMatthew Dillon return(error); 105947637bffSMatthew Dillon } 106047637bffSMatthew Dillon 106143c665aeSMatthew Dillon /* 106243c665aeSMatthew Dillon * This is called to remove the second-level cached zone-2 offset from 106343c665aeSMatthew Dillon * frontend buffer cache buffers, now stale due to a data relocation. 106443c665aeSMatthew Dillon * These offsets are generated by cluster_read() via VOP_BMAP, or directly 106543c665aeSMatthew Dillon * by hammer_vop_strategy_read(). 106643c665aeSMatthew Dillon * 106743c665aeSMatthew Dillon * This is rather nasty because here we have something like the reblocker 106843c665aeSMatthew Dillon * scanning the raw B-Tree with no held references on anything, really, 106943c665aeSMatthew Dillon * other then a shared lock on the B-Tree node, and we have to access the 107043c665aeSMatthew Dillon * frontend's buffer cache to check for and clean out the association. 107143c665aeSMatthew Dillon * Specifically, if the reblocker is moving data on the disk, these cached 107243c665aeSMatthew Dillon * offsets will become invalid. 107343c665aeSMatthew Dillon * 107443c665aeSMatthew Dillon * Only data record types associated with the large-data zone are subject 107543c665aeSMatthew Dillon * to direct-io and need to be checked. 107643c665aeSMatthew Dillon * 107743c665aeSMatthew Dillon */ 107843c665aeSMatthew Dillon void 107943c665aeSMatthew Dillon hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf) 108043c665aeSMatthew Dillon { 108143c665aeSMatthew Dillon struct hammer_inode_info iinfo; 108243c665aeSMatthew Dillon int zone; 108343c665aeSMatthew Dillon 108443c665aeSMatthew Dillon if (leaf->base.rec_type != HAMMER_RECTYPE_DATA) 108543c665aeSMatthew Dillon return; 108643c665aeSMatthew Dillon zone = HAMMER_ZONE_DECODE(leaf->data_offset); 108743c665aeSMatthew Dillon if (zone != HAMMER_ZONE_LARGE_DATA_INDEX) 108843c665aeSMatthew Dillon return; 108943c665aeSMatthew Dillon iinfo.obj_id = leaf->base.obj_id; 109043c665aeSMatthew Dillon iinfo.obj_asof = 0; /* unused */ 109143c665aeSMatthew Dillon iinfo.obj_localization = leaf->base.localization & 10925a930e66SMatthew Dillon HAMMER_LOCALIZE_PSEUDOFS_MASK; 109343c665aeSMatthew Dillon iinfo.u.leaf = leaf; 109443c665aeSMatthew Dillon hammer_scan_inode_snapshots(hmp, &iinfo, 109543c665aeSMatthew Dillon hammer_io_direct_uncache_callback, 109643c665aeSMatthew Dillon leaf); 109743c665aeSMatthew Dillon } 109843c665aeSMatthew Dillon 109943c665aeSMatthew Dillon static int 110043c665aeSMatthew Dillon hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data) 110143c665aeSMatthew Dillon { 110243c665aeSMatthew Dillon hammer_inode_info_t iinfo = data; 110343c665aeSMatthew Dillon hammer_off_t data_offset; 110443c665aeSMatthew Dillon hammer_off_t file_offset; 110543c665aeSMatthew Dillon struct vnode *vp; 110643c665aeSMatthew Dillon struct buf *bp; 110743c665aeSMatthew Dillon int blksize; 110843c665aeSMatthew Dillon 110943c665aeSMatthew Dillon if (ip->vp == NULL) 111043c665aeSMatthew Dillon return(0); 111143c665aeSMatthew Dillon data_offset = iinfo->u.leaf->data_offset; 111243c665aeSMatthew Dillon file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len; 111343c665aeSMatthew Dillon blksize = iinfo->u.leaf->data_len; 111443c665aeSMatthew Dillon KKASSERT((blksize & HAMMER_BUFMASK) == 0); 111543c665aeSMatthew Dillon 111643c665aeSMatthew Dillon hammer_ref(&ip->lock); 111743c665aeSMatthew Dillon if (hammer_get_vnode(ip, &vp) == 0) { 111843c665aeSMatthew Dillon if ((bp = findblk(ip->vp, file_offset)) != NULL && 111943c665aeSMatthew Dillon bp->b_bio2.bio_offset != NOOFFSET) { 112043c665aeSMatthew Dillon bp = getblk(ip->vp, file_offset, blksize, 0, 0); 112143c665aeSMatthew Dillon bp->b_bio2.bio_offset = NOOFFSET; 112243c665aeSMatthew Dillon brelse(bp); 112343c665aeSMatthew Dillon } 112443c665aeSMatthew Dillon vput(vp); 112543c665aeSMatthew Dillon } 112643c665aeSMatthew Dillon hammer_rel_inode(ip, 0); 112743c665aeSMatthew Dillon return(0); 112843c665aeSMatthew Dillon } 112947637bffSMatthew Dillon 1130