xref: /dflybsd-src/sys/vfs/hammer/hammer_io.c (revision e469566bdb7b27c44c1b3d092f7d780c94ce8f1f)
166325755SMatthew Dillon /*
2b84de5afSMatthew Dillon  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
366325755SMatthew Dillon  *
466325755SMatthew Dillon  * This code is derived from software contributed to The DragonFly Project
566325755SMatthew Dillon  * by Matthew Dillon <dillon@backplane.com>
666325755SMatthew Dillon  *
766325755SMatthew Dillon  * Redistribution and use in source and binary forms, with or without
866325755SMatthew Dillon  * modification, are permitted provided that the following conditions
966325755SMatthew Dillon  * are met:
1066325755SMatthew Dillon  *
1166325755SMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
1266325755SMatthew Dillon  *    notice, this list of conditions and the following disclaimer.
1366325755SMatthew Dillon  * 2. Redistributions in binary form must reproduce the above copyright
1466325755SMatthew Dillon  *    notice, this list of conditions and the following disclaimer in
1566325755SMatthew Dillon  *    the documentation and/or other materials provided with the
1666325755SMatthew Dillon  *    distribution.
1766325755SMatthew Dillon  * 3. Neither the name of The DragonFly Project nor the names of its
1866325755SMatthew Dillon  *    contributors may be used to endorse or promote products derived
1966325755SMatthew Dillon  *    from this software without specific, prior written permission.
2066325755SMatthew Dillon  *
2166325755SMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2266325755SMatthew Dillon  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2366325755SMatthew Dillon  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
2466325755SMatthew Dillon  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
2566325755SMatthew Dillon  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
2666325755SMatthew Dillon  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
2766325755SMatthew Dillon  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
2866325755SMatthew Dillon  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
2966325755SMatthew Dillon  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
3066325755SMatthew Dillon  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
3166325755SMatthew Dillon  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3266325755SMatthew Dillon  * SUCH DAMAGE.
3366325755SMatthew Dillon  *
34*e469566bSMatthew Dillon  * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.52 2008/07/31 22:30:33 dillon Exp $
3566325755SMatthew Dillon  */
3666325755SMatthew Dillon /*
3766325755SMatthew Dillon  * IO Primitives and buffer cache management
3866325755SMatthew Dillon  *
3966325755SMatthew Dillon  * All major data-tracking structures in HAMMER contain a struct hammer_io
4066325755SMatthew Dillon  * which is used to manage their backing store.  We use filesystem buffers
4166325755SMatthew Dillon  * for backing store and we leave them passively associated with their
4266325755SMatthew Dillon  * HAMMER structures.
4366325755SMatthew Dillon  *
449f5097dcSMatthew Dillon  * If the kernel tries to destroy a passively associated buf which we cannot
4566325755SMatthew Dillon  * yet let go we set B_LOCKED in the buffer and then actively released it
4666325755SMatthew Dillon  * later when we can.
4766325755SMatthew Dillon  */
4866325755SMatthew Dillon 
4966325755SMatthew Dillon #include "hammer.h"
5066325755SMatthew Dillon #include <sys/fcntl.h>
5166325755SMatthew Dillon #include <sys/nlookup.h>
5266325755SMatthew Dillon #include <sys/buf.h>
5366325755SMatthew Dillon #include <sys/buf2.h>
5466325755SMatthew Dillon 
5510a5d1baSMatthew Dillon static void hammer_io_modify(hammer_io_t io, int count);
56055f5ff8SMatthew Dillon static void hammer_io_deallocate(struct buf *bp);
571b0ab2c3SMatthew Dillon #if 0
581b0ab2c3SMatthew Dillon static void hammer_io_direct_read_complete(struct bio *nbio);
591b0ab2c3SMatthew Dillon #endif
601b0ab2c3SMatthew Dillon static void hammer_io_direct_write_complete(struct bio *nbio);
6143c665aeSMatthew Dillon static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data);
62cdb6e4e6SMatthew Dillon static void hammer_io_set_modlist(struct hammer_io *io);
63055f5ff8SMatthew Dillon 
64055f5ff8SMatthew Dillon /*
6510a5d1baSMatthew Dillon  * Initialize a new, already-zero'd hammer_io structure, or reinitialize
6610a5d1baSMatthew Dillon  * an existing hammer_io structure which may have switched to another type.
67055f5ff8SMatthew Dillon  */
68055f5ff8SMatthew Dillon void
6910a5d1baSMatthew Dillon hammer_io_init(hammer_io_t io, hammer_mount_t hmp, enum hammer_io_type type)
70055f5ff8SMatthew Dillon {
7110a5d1baSMatthew Dillon 	io->hmp = hmp;
72055f5ff8SMatthew Dillon 	io->type = type;
73055f5ff8SMatthew Dillon }
74055f5ff8SMatthew Dillon 
7566325755SMatthew Dillon /*
76fbc6e32aSMatthew Dillon  * Helper routine to disassociate a buffer cache buffer from an I/O
77ecca949aSMatthew Dillon  * structure.  The buffer is unlocked and marked appropriate for reclamation.
78055f5ff8SMatthew Dillon  *
79055f5ff8SMatthew Dillon  * The io may have 0 or 1 references depending on who called us.  The
80055f5ff8SMatthew Dillon  * caller is responsible for dealing with the refs.
81055f5ff8SMatthew Dillon  *
82055f5ff8SMatthew Dillon  * This call can only be made when no action is required on the buffer.
83ecca949aSMatthew Dillon  *
84ecca949aSMatthew Dillon  * The caller must own the buffer and the IO must indicate that the
85ecca949aSMatthew Dillon  * structure no longer owns it (io.released != 0).
8666325755SMatthew Dillon  */
8766325755SMatthew Dillon static void
88ecca949aSMatthew Dillon hammer_io_disassociate(hammer_io_structure_t iou)
8966325755SMatthew Dillon {
90055f5ff8SMatthew Dillon 	struct buf *bp = iou->io.bp;
9166325755SMatthew Dillon 
92ecca949aSMatthew Dillon 	KKASSERT(iou->io.released);
93b58c6388SMatthew Dillon 	KKASSERT(iou->io.modified == 0);
94af209b0fSMatthew Dillon 	KKASSERT(LIST_FIRST(&bp->b_dep) == (void *)iou);
954d75d829SMatthew Dillon 	buf_dep_init(bp);
96055f5ff8SMatthew Dillon 	iou->io.bp = NULL;
979f5097dcSMatthew Dillon 
989f5097dcSMatthew Dillon 	/*
999f5097dcSMatthew Dillon 	 * If the buffer was locked someone wanted to get rid of it.
1009f5097dcSMatthew Dillon 	 */
101a99b9ea2SMatthew Dillon 	if (bp->b_flags & B_LOCKED) {
102a99b9ea2SMatthew Dillon 		--hammer_count_io_locked;
103d8971d2bSMatthew Dillon 		bp->b_flags &= ~B_LOCKED;
104a99b9ea2SMatthew Dillon 	}
105ecca949aSMatthew Dillon 	if (iou->io.reclaim) {
106cebe9493SMatthew Dillon 		bp->b_flags |= B_NOCACHE|B_RELBUF;
107cebe9493SMatthew Dillon 		iou->io.reclaim = 0;
108ecca949aSMatthew Dillon 	}
10966325755SMatthew Dillon 
110055f5ff8SMatthew Dillon 	switch(iou->io.type) {
11166325755SMatthew Dillon 	case HAMMER_STRUCTURE_VOLUME:
112055f5ff8SMatthew Dillon 		iou->volume.ondisk = NULL;
11366325755SMatthew Dillon 		break;
11410a5d1baSMatthew Dillon 	case HAMMER_STRUCTURE_DATA_BUFFER:
11510a5d1baSMatthew Dillon 	case HAMMER_STRUCTURE_META_BUFFER:
11610a5d1baSMatthew Dillon 	case HAMMER_STRUCTURE_UNDO_BUFFER:
117055f5ff8SMatthew Dillon 		iou->buffer.ondisk = NULL;
11866325755SMatthew Dillon 		break;
11966325755SMatthew Dillon 	}
12066325755SMatthew Dillon }
121fbc6e32aSMatthew Dillon 
122fbc6e32aSMatthew Dillon /*
123055f5ff8SMatthew Dillon  * Wait for any physical IO to complete
124fbc6e32aSMatthew Dillon  */
1251b0ab2c3SMatthew Dillon void
126055f5ff8SMatthew Dillon hammer_io_wait(hammer_io_t io)
127fbc6e32aSMatthew Dillon {
128055f5ff8SMatthew Dillon 	if (io->running) {
129055f5ff8SMatthew Dillon 		crit_enter();
130055f5ff8SMatthew Dillon 		tsleep_interlock(io);
131055f5ff8SMatthew Dillon 		io->waiting = 1;
132055f5ff8SMatthew Dillon 		for (;;) {
133055f5ff8SMatthew Dillon 			tsleep(io, 0, "hmrflw", 0);
134055f5ff8SMatthew Dillon 			if (io->running == 0)
135055f5ff8SMatthew Dillon 				break;
136055f5ff8SMatthew Dillon 			tsleep_interlock(io);
137055f5ff8SMatthew Dillon 			io->waiting = 1;
138055f5ff8SMatthew Dillon 			if (io->running == 0)
139055f5ff8SMatthew Dillon 				break;
140055f5ff8SMatthew Dillon 		}
141055f5ff8SMatthew Dillon 		crit_exit();
142055f5ff8SMatthew Dillon 	}
143055f5ff8SMatthew Dillon }
144055f5ff8SMatthew Dillon 
145af209b0fSMatthew Dillon /*
146af209b0fSMatthew Dillon  * Wait for all hammer_io-initated write I/O's to complete.  This is not
147af209b0fSMatthew Dillon  * supposed to count direct I/O's but some can leak through (for
148af209b0fSMatthew Dillon  * non-full-sized direct I/Os).
149af209b0fSMatthew Dillon  */
150af209b0fSMatthew Dillon void
151af209b0fSMatthew Dillon hammer_io_wait_all(hammer_mount_t hmp, const char *ident)
152af209b0fSMatthew Dillon {
153af209b0fSMatthew Dillon 	crit_enter();
154f5a07a7aSMatthew Dillon 	while (hmp->io_running_space)
155f5a07a7aSMatthew Dillon 		tsleep(&hmp->io_running_space, 0, ident, 0);
156af209b0fSMatthew Dillon 	crit_exit();
157af209b0fSMatthew Dillon }
158af209b0fSMatthew Dillon 
1592f85fa4dSMatthew Dillon #define HAMMER_MAXRA	4
1602f85fa4dSMatthew Dillon 
16161aeeb33SMatthew Dillon /*
16210a5d1baSMatthew Dillon  * Load bp for a HAMMER structure.  The io must be exclusively locked by
16310a5d1baSMatthew Dillon  * the caller.
1642f85fa4dSMatthew Dillon  *
165a99b9ea2SMatthew Dillon  * This routine is mostly used on meta-data and small-data blocks.  Generally
166a99b9ea2SMatthew Dillon  * speaking HAMMER assumes some locality of reference and will cluster
167a99b9ea2SMatthew Dillon  * a 64K read.
168af209b0fSMatthew Dillon  *
169af209b0fSMatthew Dillon  * Note that clustering occurs at the device layer, not the logical layer.
170af209b0fSMatthew Dillon  * If the buffers do not apply to the current operation they may apply to
171af209b0fSMatthew Dillon  * some other.
17266325755SMatthew Dillon  */
17366325755SMatthew Dillon int
1742f85fa4dSMatthew Dillon hammer_io_read(struct vnode *devvp, struct hammer_io *io, hammer_off_t limit)
17566325755SMatthew Dillon {
17666325755SMatthew Dillon 	struct buf *bp;
17766325755SMatthew Dillon 	int   error;
17866325755SMatthew Dillon 
17966325755SMatthew Dillon 	if ((bp = io->bp) == NULL) {
180f5a07a7aSMatthew Dillon 		hammer_count_io_running_read += io->bytes;
181ce0138a6SMatthew Dillon 		if (hammer_cluster_enable) {
182ce0138a6SMatthew Dillon 			error = cluster_read(devvp, limit,
183ce0138a6SMatthew Dillon 					     io->offset, io->bytes,
184af209b0fSMatthew Dillon 					     HAMMER_CLUSTER_SIZE,
185af209b0fSMatthew Dillon 					     HAMMER_CLUSTER_BUFS, &io->bp);
186ce0138a6SMatthew Dillon 		} else {
1874a2796f3SMatthew Dillon 			error = bread(devvp, io->offset, io->bytes, &io->bp);
188ce0138a6SMatthew Dillon 		}
189ce0138a6SMatthew Dillon 		hammer_stats_disk_read += io->bytes;
190f5a07a7aSMatthew Dillon 		hammer_count_io_running_read -= io->bytes;
191cdb6e4e6SMatthew Dillon 
192cdb6e4e6SMatthew Dillon 		/*
193cdb6e4e6SMatthew Dillon 		 * The code generally assumes b_ops/b_dep has been set-up,
194cdb6e4e6SMatthew Dillon 		 * even if we error out here.
195cdb6e4e6SMatthew Dillon 		 */
19666325755SMatthew Dillon 		bp = io->bp;
19766325755SMatthew Dillon 		bp->b_ops = &hammer_bioops;
198af209b0fSMatthew Dillon 		KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
19966325755SMatthew Dillon 		LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
20066325755SMatthew Dillon 		BUF_KERNPROC(bp);
20110a5d1baSMatthew Dillon 		KKASSERT(io->modified == 0);
20210a5d1baSMatthew Dillon 		KKASSERT(io->running == 0);
20310a5d1baSMatthew Dillon 		KKASSERT(io->waiting == 0);
20466325755SMatthew Dillon 		io->released = 0;	/* we hold an active lock on bp */
20566325755SMatthew Dillon 	} else {
20666325755SMatthew Dillon 		error = 0;
20766325755SMatthew Dillon 	}
20866325755SMatthew Dillon 	return(error);
20966325755SMatthew Dillon }
21066325755SMatthew Dillon 
21166325755SMatthew Dillon /*
21266325755SMatthew Dillon  * Similar to hammer_io_read() but returns a zero'd out buffer instead.
21310a5d1baSMatthew Dillon  * Must be called with the IO exclusively locked.
214055f5ff8SMatthew Dillon  *
21510a5d1baSMatthew Dillon  * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
21610a5d1baSMatthew Dillon  * I/O by forcing the buffer to not be in a released state before calling
21710a5d1baSMatthew Dillon  * it.
21810a5d1baSMatthew Dillon  *
21910a5d1baSMatthew Dillon  * This function will also mark the IO as modified but it will not
22010a5d1baSMatthew Dillon  * increment the modify_refs count.
22166325755SMatthew Dillon  */
22266325755SMatthew Dillon int
22366325755SMatthew Dillon hammer_io_new(struct vnode *devvp, struct hammer_io *io)
22466325755SMatthew Dillon {
22566325755SMatthew Dillon 	struct buf *bp;
22666325755SMatthew Dillon 
22766325755SMatthew Dillon 	if ((bp = io->bp) == NULL) {
2284a2796f3SMatthew Dillon 		io->bp = getblk(devvp, io->offset, io->bytes, 0, 0);
22966325755SMatthew Dillon 		bp = io->bp;
23066325755SMatthew Dillon 		bp->b_ops = &hammer_bioops;
231af209b0fSMatthew Dillon 		KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
23266325755SMatthew Dillon 		LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
233055f5ff8SMatthew Dillon 		io->released = 0;
23410a5d1baSMatthew Dillon 		KKASSERT(io->running == 0);
235055f5ff8SMatthew Dillon 		io->waiting = 0;
23666325755SMatthew Dillon 		BUF_KERNPROC(bp);
23766325755SMatthew Dillon 	} else {
23866325755SMatthew Dillon 		if (io->released) {
23966325755SMatthew Dillon 			regetblk(bp);
24066325755SMatthew Dillon 			BUF_KERNPROC(bp);
241d113fda1SMatthew Dillon 			io->released = 0;
24266325755SMatthew Dillon 		}
24366325755SMatthew Dillon 	}
24410a5d1baSMatthew Dillon 	hammer_io_modify(io, 0);
24566325755SMatthew Dillon 	vfs_bio_clrbuf(bp);
24666325755SMatthew Dillon 	return(0);
24766325755SMatthew Dillon }
24866325755SMatthew Dillon 
24966325755SMatthew Dillon /*
25047637bffSMatthew Dillon  * Remove potential device level aliases against buffers managed by high level
251*e469566bSMatthew Dillon  * vnodes.  Aliases can also be created due to mixed buffer sizes.
252*e469566bSMatthew Dillon  *
253*e469566bSMatthew Dillon  * This is nasty because the buffers are also VMIO-backed.  Even if a buffer
254*e469566bSMatthew Dillon  * does not exist its backing VM pages might, and we have to invalidate
255*e469566bSMatthew Dillon  * those as well or a getblk() will reinstate them.
25647637bffSMatthew Dillon  */
25747637bffSMatthew Dillon void
25847637bffSMatthew Dillon hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
25947637bffSMatthew Dillon {
260cebe9493SMatthew Dillon 	hammer_io_structure_t iou;
26147637bffSMatthew Dillon 	hammer_off_t phys_offset;
26247637bffSMatthew Dillon 	struct buf *bp;
26347637bffSMatthew Dillon 
26447637bffSMatthew Dillon 	phys_offset = volume->ondisk->vol_buf_beg +
26547637bffSMatthew Dillon 		      (zone2_offset & HAMMER_OFF_SHORT_MASK);
2664a2796f3SMatthew Dillon 	crit_enter();
267*e469566bSMatthew Dillon 	if ((bp = findblk(volume->devvp, phys_offset)) != NULL)
2684a2796f3SMatthew Dillon 		bp = getblk(volume->devvp, phys_offset, bp->b_bufsize, 0, 0);
269*e469566bSMatthew Dillon 	else
270*e469566bSMatthew Dillon 		bp = getblk(volume->devvp, phys_offset, HAMMER_BUFSIZE, 0, 0);
271cebe9493SMatthew Dillon 	if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
2724a2796f3SMatthew Dillon 		hammer_io_clear_modify(&iou->io, 1);
273cebe9493SMatthew Dillon 		bundirty(bp);
274cebe9493SMatthew Dillon 		iou->io.reclaim = 1;
2750832c9bbSMatthew Dillon 		hammer_io_deallocate(bp);
2760832c9bbSMatthew Dillon 	} else {
277cebe9493SMatthew Dillon 		KKASSERT((bp->b_flags & B_LOCKED) == 0);
278cebe9493SMatthew Dillon 		bundirty(bp);
279cebe9493SMatthew Dillon 		bp->b_flags |= B_NOCACHE|B_RELBUF;
28047637bffSMatthew Dillon 	}
281ecca949aSMatthew Dillon 	brelse(bp);
2824a2796f3SMatthew Dillon 	crit_exit();
2830832c9bbSMatthew Dillon }
28447637bffSMatthew Dillon 
28547637bffSMatthew Dillon /*
286b3deaf57SMatthew Dillon  * This routine is called on the last reference to a hammer structure.
287ecca949aSMatthew Dillon  * The io is usually interlocked with io.loading and io.refs must be 1.
288b3deaf57SMatthew Dillon  *
289ecca949aSMatthew Dillon  * This routine may return a non-NULL bp to the caller for dispoal.  Disposal
290ecca949aSMatthew Dillon  * simply means the caller finishes decrementing the ref-count on the
291ecca949aSMatthew Dillon  * IO structure then brelse()'s the bp.  The bp may or may not still be
292ecca949aSMatthew Dillon  * passively associated with the IO.
293ecca949aSMatthew Dillon  *
294ecca949aSMatthew Dillon  * The only requirement here is that modified meta-data and volume-header
295ecca949aSMatthew Dillon  * buffer may NOT be disassociated from the IO structure, and consequently
296ecca949aSMatthew Dillon  * we also leave such buffers actively associated with the IO if they already
297ecca949aSMatthew Dillon  * are (since the kernel can't do anything with them anyway).  Only the
298ecca949aSMatthew Dillon  * flusher is allowed to write such buffers out.  Modified pure-data and
299ecca949aSMatthew Dillon  * undo buffers are returned to the kernel but left passively associated
300ecca949aSMatthew Dillon  * so we can track when the kernel writes the bp out.
30166325755SMatthew Dillon  */
302ecca949aSMatthew Dillon struct buf *
30309ac686bSMatthew Dillon hammer_io_release(struct hammer_io *io, int flush)
30466325755SMatthew Dillon {
3059f5097dcSMatthew Dillon 	union hammer_io_structure *iou = (void *)io;
30666325755SMatthew Dillon 	struct buf *bp;
30766325755SMatthew Dillon 
308fbc6e32aSMatthew Dillon 	if ((bp = io->bp) == NULL)
309ecca949aSMatthew Dillon 		return(NULL);
310fbc6e32aSMatthew Dillon 
3110b075555SMatthew Dillon 	/*
31210a5d1baSMatthew Dillon 	 * Try to flush a dirty IO to disk if asked to by the
31310a5d1baSMatthew Dillon 	 * caller or if the kernel tried to flush the buffer in the past.
3140b075555SMatthew Dillon 	 *
31510a5d1baSMatthew Dillon 	 * Kernel-initiated flushes are only allowed for pure-data buffers.
31610a5d1baSMatthew Dillon 	 * meta-data and volume buffers can only be flushed explicitly
31710a5d1baSMatthew Dillon 	 * by HAMMER.
318055f5ff8SMatthew Dillon 	 */
31910a5d1baSMatthew Dillon 	if (io->modified) {
32009ac686bSMatthew Dillon 		if (flush) {
321055f5ff8SMatthew Dillon 			hammer_io_flush(io);
32210a5d1baSMatthew Dillon 		} else if (bp->b_flags & B_LOCKED) {
32310a5d1baSMatthew Dillon 			switch(io->type) {
32410a5d1baSMatthew Dillon 			case HAMMER_STRUCTURE_DATA_BUFFER:
32510a5d1baSMatthew Dillon 			case HAMMER_STRUCTURE_UNDO_BUFFER:
32610a5d1baSMatthew Dillon 				hammer_io_flush(io);
32710a5d1baSMatthew Dillon 				break;
32810a5d1baSMatthew Dillon 			default:
32910a5d1baSMatthew Dillon 				break;
33010a5d1baSMatthew Dillon 			}
33110a5d1baSMatthew Dillon 		} /* else no explicit request to flush the buffer */
33210a5d1baSMatthew Dillon 	}
333055f5ff8SMatthew Dillon 
334055f5ff8SMatthew Dillon 	/*
33510a5d1baSMatthew Dillon 	 * Wait for the IO to complete if asked to.
336055f5ff8SMatthew Dillon 	 */
337b58c6388SMatthew Dillon 	if (io->waitdep && io->running) {
338055f5ff8SMatthew Dillon 		hammer_io_wait(io);
339055f5ff8SMatthew Dillon 	}
340055f5ff8SMatthew Dillon 
341055f5ff8SMatthew Dillon 	/*
34210a5d1baSMatthew Dillon 	 * Return control of the buffer to the kernel (with the provisio
34310a5d1baSMatthew Dillon 	 * that our bioops can override kernel decisions with regards to
34410a5d1baSMatthew Dillon 	 * the buffer).
345055f5ff8SMatthew Dillon 	 */
346cebe9493SMatthew Dillon 	if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) {
34710a5d1baSMatthew Dillon 		/*
34810a5d1baSMatthew Dillon 		 * Always disassociate the bp if an explicit flush
34910a5d1baSMatthew Dillon 		 * was requested and the IO completed with no error
35010a5d1baSMatthew Dillon 		 * (so unmount can really clean up the structure).
35110a5d1baSMatthew Dillon 		 */
352055f5ff8SMatthew Dillon 		if (io->released) {
353055f5ff8SMatthew Dillon 			regetblk(bp);
35446fe7ae1SMatthew Dillon 			BUF_KERNPROC(bp);
355ecca949aSMatthew Dillon 		} else {
356ecca949aSMatthew Dillon 			io->released = 1;
357055f5ff8SMatthew Dillon 		}
358ecca949aSMatthew Dillon 		hammer_io_disassociate((hammer_io_structure_t)io);
359ecca949aSMatthew Dillon 		/* return the bp */
360055f5ff8SMatthew Dillon 	} else if (io->modified) {
36110a5d1baSMatthew Dillon 		/*
362ecca949aSMatthew Dillon 		 * Only certain IO types can be released to the kernel if
363ecca949aSMatthew Dillon 		 * the buffer has been modified.
364ecca949aSMatthew Dillon 		 *
365ecca949aSMatthew Dillon 		 * volume and meta-data IO types may only be explicitly
366ecca949aSMatthew Dillon 		 * flushed by HAMMER.
36710a5d1baSMatthew Dillon 		 */
36810a5d1baSMatthew Dillon 		switch(io->type) {
36910a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_DATA_BUFFER:
37010a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_UNDO_BUFFER:
371b58c6388SMatthew Dillon 			if (io->released == 0) {
372055f5ff8SMatthew Dillon 				io->released = 1;
373055f5ff8SMatthew Dillon 				bdwrite(bp);
374055f5ff8SMatthew Dillon 			}
37510a5d1baSMatthew Dillon 			break;
37610a5d1baSMatthew Dillon 		default:
37710a5d1baSMatthew Dillon 			break;
37810a5d1baSMatthew Dillon 		}
379ecca949aSMatthew Dillon 		bp = NULL;	/* bp left associated */
380055f5ff8SMatthew Dillon 	} else if (io->released == 0) {
38110a5d1baSMatthew Dillon 		/*
38210a5d1baSMatthew Dillon 		 * Clean buffers can be generally released to the kernel.
38310a5d1baSMatthew Dillon 		 * We leave the bp passively associated with the HAMMER
38410a5d1baSMatthew Dillon 		 * structure and use bioops to disconnect it later on
38510a5d1baSMatthew Dillon 		 * if the kernel wants to discard the buffer.
386ecca949aSMatthew Dillon 		 *
387ecca949aSMatthew Dillon 		 * We can steal the structure's ownership of the bp.
38810a5d1baSMatthew Dillon 		 */
389ecca949aSMatthew Dillon 		io->released = 1;
3909f5097dcSMatthew Dillon 		if (bp->b_flags & B_LOCKED) {
391ecca949aSMatthew Dillon 			hammer_io_disassociate(iou);
392ecca949aSMatthew Dillon 			/* return the bp */
3939f5097dcSMatthew Dillon 		} else {
394cebe9493SMatthew Dillon 			if (io->reclaim) {
395ecca949aSMatthew Dillon 				hammer_io_disassociate(iou);
396ecca949aSMatthew Dillon 				/* return the bp */
397cebe9493SMatthew Dillon 			} else {
398ecca949aSMatthew Dillon 				/* return the bp (bp passively associated) */
3999f5097dcSMatthew Dillon 			}
400cebe9493SMatthew Dillon 		}
40119b97e01SMatthew Dillon 	} else {
40219b97e01SMatthew Dillon 		/*
403af209b0fSMatthew Dillon 		 * A released buffer is passively associate with our
404af209b0fSMatthew Dillon 		 * hammer_io structure.  The kernel cannot destroy it
405af209b0fSMatthew Dillon 		 * without making a bioops call.  If the kernel (B_LOCKED)
406af209b0fSMatthew Dillon 		 * or we (reclaim) requested that the buffer be destroyed
407af209b0fSMatthew Dillon 		 * we destroy it, otherwise we do a quick get/release to
408af209b0fSMatthew Dillon 		 * reset its position in the kernel's LRU list.
409af209b0fSMatthew Dillon 		 *
410af209b0fSMatthew Dillon 		 * Leaving the buffer passively associated allows us to
411af209b0fSMatthew Dillon 		 * use the kernel's LRU buffer flushing mechanisms rather
412af209b0fSMatthew Dillon 		 * then rolling our own.
413cb51be26SMatthew Dillon 		 *
414cb51be26SMatthew Dillon 		 * XXX there are two ways of doing this.  We can re-acquire
415cb51be26SMatthew Dillon 		 * and passively release to reset the LRU, or not.
41619b97e01SMatthew Dillon 		 */
417af209b0fSMatthew Dillon 		if (io->running == 0) {
41819b97e01SMatthew Dillon 			regetblk(bp);
419cebe9493SMatthew Dillon 			if ((bp->b_flags & B_LOCKED) || io->reclaim) {
420ecca949aSMatthew Dillon 				hammer_io_disassociate(iou);
421ecca949aSMatthew Dillon 				/* return the bp */
4229f5097dcSMatthew Dillon 			} else {
423ecca949aSMatthew Dillon 				/* return the bp (bp passively associated) */
424ecca949aSMatthew Dillon 			}
425ecca949aSMatthew Dillon 		} else {
426ecca949aSMatthew Dillon 			/*
427ecca949aSMatthew Dillon 			 * bp is left passively associated but we do not
428ecca949aSMatthew Dillon 			 * try to reacquire it.  Interactions with the io
429ecca949aSMatthew Dillon 			 * structure will occur on completion of the bp's
430ecca949aSMatthew Dillon 			 * I/O.
431ecca949aSMatthew Dillon 			 */
432ecca949aSMatthew Dillon 			bp = NULL;
43319b97e01SMatthew Dillon 		}
4349f5097dcSMatthew Dillon 	}
435ecca949aSMatthew Dillon 	return(bp);
436055f5ff8SMatthew Dillon }
437055f5ff8SMatthew Dillon 
438055f5ff8SMatthew Dillon /*
439b33e2cc0SMatthew Dillon  * This routine is called with a locked IO when a flush is desired and
440b33e2cc0SMatthew Dillon  * no other references to the structure exists other then ours.  This
441b33e2cc0SMatthew Dillon  * routine is ONLY called when HAMMER believes it is safe to flush a
442b33e2cc0SMatthew Dillon  * potentially modified buffer out.
4430b075555SMatthew Dillon  */
4440b075555SMatthew Dillon void
445055f5ff8SMatthew Dillon hammer_io_flush(struct hammer_io *io)
4460b075555SMatthew Dillon {
447055f5ff8SMatthew Dillon 	struct buf *bp;
448055f5ff8SMatthew Dillon 
449055f5ff8SMatthew Dillon 	/*
45010a5d1baSMatthew Dillon 	 * Degenerate case - nothing to flush if nothing is dirty.
451055f5ff8SMatthew Dillon 	 */
452b58c6388SMatthew Dillon 	if (io->modified == 0) {
453055f5ff8SMatthew Dillon 		return;
454b58c6388SMatthew Dillon 	}
455055f5ff8SMatthew Dillon 
456055f5ff8SMatthew Dillon 	KKASSERT(io->bp);
4579f5097dcSMatthew Dillon 	KKASSERT(io->modify_refs <= 0);
458055f5ff8SMatthew Dillon 
459b33e2cc0SMatthew Dillon 	/*
46077062c8aSMatthew Dillon 	 * Acquire ownership of the bp, particularly before we clear our
46177062c8aSMatthew Dillon 	 * modified flag.
46277062c8aSMatthew Dillon 	 *
46377062c8aSMatthew Dillon 	 * We are going to bawrite() this bp.  Don't leave a window where
46477062c8aSMatthew Dillon 	 * io->released is set, we actually own the bp rather then our
46577062c8aSMatthew Dillon 	 * buffer.
46677062c8aSMatthew Dillon 	 */
46777062c8aSMatthew Dillon 	bp = io->bp;
46877062c8aSMatthew Dillon 	if (io->released) {
46977062c8aSMatthew Dillon 		regetblk(bp);
47077062c8aSMatthew Dillon 		/* BUF_KERNPROC(io->bp); */
47177062c8aSMatthew Dillon 		/* io->released = 0; */
47277062c8aSMatthew Dillon 		KKASSERT(io->released);
47377062c8aSMatthew Dillon 		KKASSERT(io->bp == bp);
47477062c8aSMatthew Dillon 	}
47577062c8aSMatthew Dillon 	io->released = 1;
47677062c8aSMatthew Dillon 
47777062c8aSMatthew Dillon 	/*
47810a5d1baSMatthew Dillon 	 * Acquire exclusive access to the bp and then clear the modified
47910a5d1baSMatthew Dillon 	 * state of the buffer prior to issuing I/O to interlock any
48010a5d1baSMatthew Dillon 	 * modifications made while the I/O is in progress.  This shouldn't
48110a5d1baSMatthew Dillon 	 * happen anyway but losing data would be worse.  The modified bit
48210a5d1baSMatthew Dillon 	 * will be rechecked after the IO completes.
48310a5d1baSMatthew Dillon 	 *
4844a2796f3SMatthew Dillon 	 * NOTE: This call also finalizes the buffer's content (inval == 0).
4854a2796f3SMatthew Dillon 	 *
486b33e2cc0SMatthew Dillon 	 * This is only legal when lock.refs == 1 (otherwise we might clear
487b33e2cc0SMatthew Dillon 	 * the modified bit while there are still users of the cluster
488b33e2cc0SMatthew Dillon 	 * modifying the data).
489b33e2cc0SMatthew Dillon 	 *
490b33e2cc0SMatthew Dillon 	 * Do this before potentially blocking so any attempt to modify the
491b33e2cc0SMatthew Dillon 	 * ondisk while we are blocked blocks waiting for us.
492b33e2cc0SMatthew Dillon 	 */
4934a2796f3SMatthew Dillon 	hammer_io_clear_modify(io, 0);
494bcac4bbbSMatthew Dillon 
495bcac4bbbSMatthew Dillon 	/*
49610a5d1baSMatthew Dillon 	 * Transfer ownership to the kernel and initiate I/O.
49710a5d1baSMatthew Dillon 	 */
498055f5ff8SMatthew Dillon 	io->running = 1;
499f5a07a7aSMatthew Dillon 	io->hmp->io_running_space += io->bytes;
500f5a07a7aSMatthew Dillon 	hammer_count_io_running_write += io->bytes;
501055f5ff8SMatthew Dillon 	bawrite(bp);
502055f5ff8SMatthew Dillon }
503055f5ff8SMatthew Dillon 
504055f5ff8SMatthew Dillon /************************************************************************
505055f5ff8SMatthew Dillon  *				BUFFER DIRTYING				*
506055f5ff8SMatthew Dillon  ************************************************************************
507055f5ff8SMatthew Dillon  *
508055f5ff8SMatthew Dillon  * These routines deal with dependancies created when IO buffers get
509055f5ff8SMatthew Dillon  * modified.  The caller must call hammer_modify_*() on a referenced
510055f5ff8SMatthew Dillon  * HAMMER structure prior to modifying its on-disk data.
511055f5ff8SMatthew Dillon  *
512055f5ff8SMatthew Dillon  * Any intent to modify an IO buffer acquires the related bp and imposes
513055f5ff8SMatthew Dillon  * various write ordering dependancies.
514055f5ff8SMatthew Dillon  */
515055f5ff8SMatthew Dillon 
516055f5ff8SMatthew Dillon /*
51710a5d1baSMatthew Dillon  * Mark a HAMMER structure as undergoing modification.  Meta-data buffers
51810a5d1baSMatthew Dillon  * are locked until the flusher can deal with them, pure data buffers
51910a5d1baSMatthew Dillon  * can be written out.
520055f5ff8SMatthew Dillon  */
52110a5d1baSMatthew Dillon static
522b58c6388SMatthew Dillon void
52310a5d1baSMatthew Dillon hammer_io_modify(hammer_io_t io, int count)
524055f5ff8SMatthew Dillon {
52546fe7ae1SMatthew Dillon 	/*
5269f5097dcSMatthew Dillon 	 * io->modify_refs must be >= 0
5279f5097dcSMatthew Dillon 	 */
5289f5097dcSMatthew Dillon 	while (io->modify_refs < 0) {
5299f5097dcSMatthew Dillon 		io->waitmod = 1;
5309f5097dcSMatthew Dillon 		tsleep(io, 0, "hmrmod", 0);
5319f5097dcSMatthew Dillon 	}
5329f5097dcSMatthew Dillon 
5339f5097dcSMatthew Dillon 	/*
53446fe7ae1SMatthew Dillon 	 * Shortcut if nothing to do.
53546fe7ae1SMatthew Dillon 	 */
536055f5ff8SMatthew Dillon 	KKASSERT(io->lock.refs != 0 && io->bp != NULL);
53710a5d1baSMatthew Dillon 	io->modify_refs += count;
538b58c6388SMatthew Dillon 	if (io->modified && io->released == 0)
539b58c6388SMatthew Dillon 		return;
54046fe7ae1SMatthew Dillon 
541055f5ff8SMatthew Dillon 	hammer_lock_ex(&io->lock);
54210a5d1baSMatthew Dillon 	if (io->modified == 0) {
543cdb6e4e6SMatthew Dillon 		hammer_io_set_modlist(io);
54446fe7ae1SMatthew Dillon 		io->modified = 1;
54510a5d1baSMatthew Dillon 	}
546055f5ff8SMatthew Dillon 	if (io->released) {
547055f5ff8SMatthew Dillon 		regetblk(io->bp);
548055f5ff8SMatthew Dillon 		BUF_KERNPROC(io->bp);
549055f5ff8SMatthew Dillon 		io->released = 0;
55046fe7ae1SMatthew Dillon 		KKASSERT(io->modified != 0);
551055f5ff8SMatthew Dillon 	}
552055f5ff8SMatthew Dillon 	hammer_unlock(&io->lock);
5530b075555SMatthew Dillon }
5540b075555SMatthew Dillon 
55510a5d1baSMatthew Dillon static __inline
55610a5d1baSMatthew Dillon void
55710a5d1baSMatthew Dillon hammer_io_modify_done(hammer_io_t io)
55810a5d1baSMatthew Dillon {
55910a5d1baSMatthew Dillon 	KKASSERT(io->modify_refs > 0);
56010a5d1baSMatthew Dillon 	--io->modify_refs;
5619f5097dcSMatthew Dillon 	if (io->modify_refs == 0 && io->waitmod) {
5629f5097dcSMatthew Dillon 		io->waitmod = 0;
5639f5097dcSMatthew Dillon 		wakeup(io);
5649f5097dcSMatthew Dillon 	}
5659f5097dcSMatthew Dillon }
5669f5097dcSMatthew Dillon 
5679f5097dcSMatthew Dillon void
5689f5097dcSMatthew Dillon hammer_io_write_interlock(hammer_io_t io)
5699f5097dcSMatthew Dillon {
5709f5097dcSMatthew Dillon 	while (io->modify_refs != 0) {
5719f5097dcSMatthew Dillon 		io->waitmod = 1;
5729f5097dcSMatthew Dillon 		tsleep(io, 0, "hmrmod", 0);
5739f5097dcSMatthew Dillon 	}
5749f5097dcSMatthew Dillon 	io->modify_refs = -1;
5759f5097dcSMatthew Dillon }
5769f5097dcSMatthew Dillon 
5779f5097dcSMatthew Dillon void
5789f5097dcSMatthew Dillon hammer_io_done_interlock(hammer_io_t io)
5799f5097dcSMatthew Dillon {
5809f5097dcSMatthew Dillon 	KKASSERT(io->modify_refs == -1);
5819f5097dcSMatthew Dillon 	io->modify_refs = 0;
5829f5097dcSMatthew Dillon 	if (io->waitmod) {
5839f5097dcSMatthew Dillon 		io->waitmod = 0;
5849f5097dcSMatthew Dillon 		wakeup(io);
5859f5097dcSMatthew Dillon 	}
58610a5d1baSMatthew Dillon }
58710a5d1baSMatthew Dillon 
5882f85fa4dSMatthew Dillon /*
5892f85fa4dSMatthew Dillon  * Caller intends to modify a volume's ondisk structure.
5902f85fa4dSMatthew Dillon  *
5912f85fa4dSMatthew Dillon  * This is only allowed if we are the flusher or we have a ref on the
5922f85fa4dSMatthew Dillon  * sync_lock.
5932f85fa4dSMatthew Dillon  */
5940b075555SMatthew Dillon void
59536f82b23SMatthew Dillon hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
59636f82b23SMatthew Dillon 		     void *base, int len)
5970b075555SMatthew Dillon {
5982f85fa4dSMatthew Dillon 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
599055f5ff8SMatthew Dillon 
6002f85fa4dSMatthew Dillon 	hammer_io_modify(&volume->io, 1);
60147197d71SMatthew Dillon 	if (len) {
60247197d71SMatthew Dillon 		intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
60347197d71SMatthew Dillon 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
604059819e3SMatthew Dillon 		hammer_generate_undo(trans, &volume->io,
60547197d71SMatthew Dillon 			 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
60647197d71SMatthew Dillon 			 base, len);
607055f5ff8SMatthew Dillon 	}
608055f5ff8SMatthew Dillon }
609055f5ff8SMatthew Dillon 
610055f5ff8SMatthew Dillon /*
6112f85fa4dSMatthew Dillon  * Caller intends to modify a buffer's ondisk structure.
6122f85fa4dSMatthew Dillon  *
6132f85fa4dSMatthew Dillon  * This is only allowed if we are the flusher or we have a ref on the
6142f85fa4dSMatthew Dillon  * sync_lock.
615055f5ff8SMatthew Dillon  */
616055f5ff8SMatthew Dillon void
61736f82b23SMatthew Dillon hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
61836f82b23SMatthew Dillon 		     void *base, int len)
61946fe7ae1SMatthew Dillon {
6202f85fa4dSMatthew Dillon 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
6212f85fa4dSMatthew Dillon 
62210a5d1baSMatthew Dillon 	hammer_io_modify(&buffer->io, 1);
62347197d71SMatthew Dillon 	if (len) {
62447197d71SMatthew Dillon 		intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
62547197d71SMatthew Dillon 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
626059819e3SMatthew Dillon 		hammer_generate_undo(trans, &buffer->io,
62734d829f7SMatthew Dillon 				     buffer->zone2_offset + rel_offset,
62847197d71SMatthew Dillon 				     base, len);
62947197d71SMatthew Dillon 	}
63046fe7ae1SMatthew Dillon }
63146fe7ae1SMatthew Dillon 
63210a5d1baSMatthew Dillon void
63310a5d1baSMatthew Dillon hammer_modify_volume_done(hammer_volume_t volume)
63410a5d1baSMatthew Dillon {
63510a5d1baSMatthew Dillon 	hammer_io_modify_done(&volume->io);
63610a5d1baSMatthew Dillon }
63710a5d1baSMatthew Dillon 
63810a5d1baSMatthew Dillon void
63910a5d1baSMatthew Dillon hammer_modify_buffer_done(hammer_buffer_t buffer)
64010a5d1baSMatthew Dillon {
64110a5d1baSMatthew Dillon 	hammer_io_modify_done(&buffer->io);
64210a5d1baSMatthew Dillon }
64310a5d1baSMatthew Dillon 
64446fe7ae1SMatthew Dillon /*
6454a2796f3SMatthew Dillon  * Mark an entity as not being dirty any more and finalize any
6464a2796f3SMatthew Dillon  * delayed adjustments to the buffer.
6474a2796f3SMatthew Dillon  *
6484a2796f3SMatthew Dillon  * Delayed adjustments are an important performance enhancement, allowing
6494a2796f3SMatthew Dillon  * us to avoid recalculating B-Tree node CRCs over and over again when
6504a2796f3SMatthew Dillon  * making bulk-modifications to the B-Tree.
6514a2796f3SMatthew Dillon  *
6524a2796f3SMatthew Dillon  * If inval is non-zero delayed adjustments are ignored.
65361aeeb33SMatthew Dillon  */
65461aeeb33SMatthew Dillon void
6554a2796f3SMatthew Dillon hammer_io_clear_modify(struct hammer_io *io, int inval)
65661aeeb33SMatthew Dillon {
6574a2796f3SMatthew Dillon 	if (io->modified == 0)
6584a2796f3SMatthew Dillon 		return;
6594a2796f3SMatthew Dillon 
6604a2796f3SMatthew Dillon 	/*
6614a2796f3SMatthew Dillon 	 * Take us off the mod-list and clear the modified bit.
6624a2796f3SMatthew Dillon 	 */
663cebe9493SMatthew Dillon 	KKASSERT(io->mod_list != NULL);
664cebe9493SMatthew Dillon 	if (io->mod_list == &io->hmp->volu_list ||
665cebe9493SMatthew Dillon 	    io->mod_list == &io->hmp->meta_list) {
666f5a07a7aSMatthew Dillon 		io->hmp->locked_dirty_space -= io->bytes;
667f5a07a7aSMatthew Dillon 		hammer_count_dirtybufspace -= io->bytes;
668cebe9493SMatthew Dillon 	}
669cebe9493SMatthew Dillon 	TAILQ_REMOVE(io->mod_list, io, mod_entry);
670cebe9493SMatthew Dillon 	io->mod_list = NULL;
67161aeeb33SMatthew Dillon 	io->modified = 0;
6724a2796f3SMatthew Dillon 
6734a2796f3SMatthew Dillon 	/*
6744a2796f3SMatthew Dillon 	 * If this bit is not set there are no delayed adjustments.
6754a2796f3SMatthew Dillon 	 */
6764a2796f3SMatthew Dillon 	if (io->gencrc == 0)
6774a2796f3SMatthew Dillon 		return;
6784a2796f3SMatthew Dillon 	io->gencrc = 0;
6794a2796f3SMatthew Dillon 
6804a2796f3SMatthew Dillon 	/*
6814a2796f3SMatthew Dillon 	 * Finalize requested CRCs.  The NEEDSCRC flag also holds a reference
6824a2796f3SMatthew Dillon 	 * on the node (& underlying buffer).  Release the node after clearing
6834a2796f3SMatthew Dillon 	 * the flag.
6844a2796f3SMatthew Dillon 	 */
6854a2796f3SMatthew Dillon 	if (io->type == HAMMER_STRUCTURE_META_BUFFER) {
6864a2796f3SMatthew Dillon 		hammer_buffer_t buffer = (void *)io;
6874a2796f3SMatthew Dillon 		hammer_node_t node;
6884a2796f3SMatthew Dillon 
6894a2796f3SMatthew Dillon restart:
6904a2796f3SMatthew Dillon 		TAILQ_FOREACH(node, &buffer->clist, entry) {
6914a2796f3SMatthew Dillon 			if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0)
6924a2796f3SMatthew Dillon 				continue;
6934a2796f3SMatthew Dillon 			node->flags &= ~HAMMER_NODE_NEEDSCRC;
6944a2796f3SMatthew Dillon 			KKASSERT(node->ondisk);
6954a2796f3SMatthew Dillon 			if (inval == 0)
6964a2796f3SMatthew Dillon 				node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
6974a2796f3SMatthew Dillon 			hammer_rel_node(node);
6984a2796f3SMatthew Dillon 			goto restart;
69961aeeb33SMatthew Dillon 		}
70061aeeb33SMatthew Dillon 	}
701cebe9493SMatthew Dillon 
7024a2796f3SMatthew Dillon }
7034a2796f3SMatthew Dillon 
704cebe9493SMatthew Dillon /*
705cebe9493SMatthew Dillon  * Clear the IO's modify list.  Even though the IO is no longer modified
706cebe9493SMatthew Dillon  * it may still be on the lose_list.  This routine is called just before
707cebe9493SMatthew Dillon  * the governing hammer_buffer is destroyed.
708cebe9493SMatthew Dillon  */
709cebe9493SMatthew Dillon void
710cebe9493SMatthew Dillon hammer_io_clear_modlist(struct hammer_io *io)
711cebe9493SMatthew Dillon {
7124a2796f3SMatthew Dillon 	KKASSERT(io->modified == 0);
713cebe9493SMatthew Dillon 	if (io->mod_list) {
714a99b9ea2SMatthew Dillon 		crit_enter();	/* biodone race against list */
715cebe9493SMatthew Dillon 		KKASSERT(io->mod_list == &io->hmp->lose_list);
716cebe9493SMatthew Dillon 		TAILQ_REMOVE(io->mod_list, io, mod_entry);
717cebe9493SMatthew Dillon 		io->mod_list = NULL;
718a99b9ea2SMatthew Dillon 		crit_exit();
719cebe9493SMatthew Dillon 	}
72066325755SMatthew Dillon }
72166325755SMatthew Dillon 
722cdb6e4e6SMatthew Dillon static void
723cdb6e4e6SMatthew Dillon hammer_io_set_modlist(struct hammer_io *io)
724cdb6e4e6SMatthew Dillon {
725cdb6e4e6SMatthew Dillon 	struct hammer_mount *hmp = io->hmp;
726cdb6e4e6SMatthew Dillon 
727cdb6e4e6SMatthew Dillon 	KKASSERT(io->mod_list == NULL);
728cdb6e4e6SMatthew Dillon 
729cdb6e4e6SMatthew Dillon 	switch(io->type) {
730cdb6e4e6SMatthew Dillon 	case HAMMER_STRUCTURE_VOLUME:
731cdb6e4e6SMatthew Dillon 		io->mod_list = &hmp->volu_list;
732cdb6e4e6SMatthew Dillon 		hmp->locked_dirty_space += io->bytes;
733cdb6e4e6SMatthew Dillon 		hammer_count_dirtybufspace += io->bytes;
734cdb6e4e6SMatthew Dillon 		break;
735cdb6e4e6SMatthew Dillon 	case HAMMER_STRUCTURE_META_BUFFER:
736cdb6e4e6SMatthew Dillon 		io->mod_list = &hmp->meta_list;
737cdb6e4e6SMatthew Dillon 		hmp->locked_dirty_space += io->bytes;
738cdb6e4e6SMatthew Dillon 		hammer_count_dirtybufspace += io->bytes;
739cdb6e4e6SMatthew Dillon 		break;
740cdb6e4e6SMatthew Dillon 	case HAMMER_STRUCTURE_UNDO_BUFFER:
741cdb6e4e6SMatthew Dillon 		io->mod_list = &hmp->undo_list;
742cdb6e4e6SMatthew Dillon 		break;
743cdb6e4e6SMatthew Dillon 	case HAMMER_STRUCTURE_DATA_BUFFER:
744cdb6e4e6SMatthew Dillon 		io->mod_list = &hmp->data_list;
745cdb6e4e6SMatthew Dillon 		break;
746cdb6e4e6SMatthew Dillon 	}
747cdb6e4e6SMatthew Dillon 	TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
748cdb6e4e6SMatthew Dillon }
749cdb6e4e6SMatthew Dillon 
750055f5ff8SMatthew Dillon /************************************************************************
751055f5ff8SMatthew Dillon  *				HAMMER_BIOOPS				*
752055f5ff8SMatthew Dillon  ************************************************************************
753055f5ff8SMatthew Dillon  *
754055f5ff8SMatthew Dillon  */
755055f5ff8SMatthew Dillon 
756055f5ff8SMatthew Dillon /*
757055f5ff8SMatthew Dillon  * Pre-IO initiation kernel callback - cluster build only
758055f5ff8SMatthew Dillon  */
759055f5ff8SMatthew Dillon static void
760055f5ff8SMatthew Dillon hammer_io_start(struct buf *bp)
761055f5ff8SMatthew Dillon {
762055f5ff8SMatthew Dillon }
763055f5ff8SMatthew Dillon 
764055f5ff8SMatthew Dillon /*
7657bc5b8c2SMatthew Dillon  * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
766b33e2cc0SMatthew Dillon  *
767b33e2cc0SMatthew Dillon  * NOTE: HAMMER may modify a buffer after initiating I/O.  The modified bit
768b33e2cc0SMatthew Dillon  * may also be set if we were marking a cluster header open.  Only remove
769b33e2cc0SMatthew Dillon  * our dependancy if the modified bit is clear.
770055f5ff8SMatthew Dillon  */
77166325755SMatthew Dillon static void
77266325755SMatthew Dillon hammer_io_complete(struct buf *bp)
77366325755SMatthew Dillon {
774055f5ff8SMatthew Dillon 	union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
775fbc6e32aSMatthew Dillon 
776055f5ff8SMatthew Dillon 	KKASSERT(iou->io.released == 1);
777055f5ff8SMatthew Dillon 
778bf3b416bSMatthew Dillon 	/*
779bf3b416bSMatthew Dillon 	 * Deal with people waiting for I/O to drain
780bf3b416bSMatthew Dillon 	 */
781f90dde4cSMatthew Dillon 	if (iou->io.running) {
782cdb6e4e6SMatthew Dillon 		/*
783cdb6e4e6SMatthew Dillon 		 * Deal with critical write errors.  Once a critical error
784cdb6e4e6SMatthew Dillon 		 * has been flagged in hmp the UNDO FIFO will not be updated.
785cdb6e4e6SMatthew Dillon 		 * That way crash recover will give us a consistent
786cdb6e4e6SMatthew Dillon 		 * filesystem.
787cdb6e4e6SMatthew Dillon 		 *
788cdb6e4e6SMatthew Dillon 		 * Because of this we can throw away failed UNDO buffers.  If
789cdb6e4e6SMatthew Dillon 		 * we throw away META or DATA buffers we risk corrupting
790cdb6e4e6SMatthew Dillon 		 * the now read-only version of the filesystem visible to
791cdb6e4e6SMatthew Dillon 		 * the user.  Clear B_ERROR so the buffer is not re-dirtied
792cdb6e4e6SMatthew Dillon 		 * by the kernel and ref the io so it doesn't get thrown
793cdb6e4e6SMatthew Dillon 		 * away.
794cdb6e4e6SMatthew Dillon 		 */
795cdb6e4e6SMatthew Dillon 		if (bp->b_flags & B_ERROR) {
796cdb6e4e6SMatthew Dillon 			hammer_critical_error(iou->io.hmp, NULL, bp->b_error,
797cdb6e4e6SMatthew Dillon 					      "while flushing meta-data");
798cdb6e4e6SMatthew Dillon 			switch(iou->io.type) {
799cdb6e4e6SMatthew Dillon 			case HAMMER_STRUCTURE_UNDO_BUFFER:
800cdb6e4e6SMatthew Dillon 				break;
801cdb6e4e6SMatthew Dillon 			default:
802cdb6e4e6SMatthew Dillon 				if (iou->io.ioerror == 0) {
803cdb6e4e6SMatthew Dillon 					iou->io.ioerror = 1;
804cdb6e4e6SMatthew Dillon 					if (iou->io.lock.refs == 0)
805cdb6e4e6SMatthew Dillon 						++hammer_count_refedbufs;
806cdb6e4e6SMatthew Dillon 					hammer_ref(&iou->io.lock);
807cdb6e4e6SMatthew Dillon 				}
808cdb6e4e6SMatthew Dillon 				break;
809cdb6e4e6SMatthew Dillon 			}
810cdb6e4e6SMatthew Dillon 			bp->b_flags &= ~B_ERROR;
811cdb6e4e6SMatthew Dillon 			bundirty(bp);
812cdb6e4e6SMatthew Dillon #if 0
813cdb6e4e6SMatthew Dillon 			hammer_io_set_modlist(&iou->io);
814cdb6e4e6SMatthew Dillon 			iou->io.modified = 1;
815cdb6e4e6SMatthew Dillon #endif
816cdb6e4e6SMatthew Dillon 		}
817ce0138a6SMatthew Dillon 		hammer_stats_disk_write += iou->io.bytes;
818f5a07a7aSMatthew Dillon 		hammer_count_io_running_write -= iou->io.bytes;
819f5a07a7aSMatthew Dillon 		iou->io.hmp->io_running_space -= iou->io.bytes;
820f5a07a7aSMatthew Dillon 		if (iou->io.hmp->io_running_space == 0)
821f5a07a7aSMatthew Dillon 			wakeup(&iou->io.hmp->io_running_space);
822f5a07a7aSMatthew Dillon 		KKASSERT(iou->io.hmp->io_running_space >= 0);
823f90dde4cSMatthew Dillon 		iou->io.running = 0;
824ce0138a6SMatthew Dillon 	} else {
825ce0138a6SMatthew Dillon 		hammer_stats_disk_read += iou->io.bytes;
826f90dde4cSMatthew Dillon 	}
827f90dde4cSMatthew Dillon 
828055f5ff8SMatthew Dillon 	if (iou->io.waiting) {
829055f5ff8SMatthew Dillon 		iou->io.waiting = 0;
830055f5ff8SMatthew Dillon 		wakeup(iou);
831055f5ff8SMatthew Dillon 	}
832055f5ff8SMatthew Dillon 
833055f5ff8SMatthew Dillon 	/*
834bf3b416bSMatthew Dillon 	 * If B_LOCKED is set someone wanted to deallocate the bp at some
835bf3b416bSMatthew Dillon 	 * point, do it now if refs has become zero.
836055f5ff8SMatthew Dillon 	 */
837055f5ff8SMatthew Dillon 	if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) {
838b33e2cc0SMatthew Dillon 		KKASSERT(iou->io.modified == 0);
839a99b9ea2SMatthew Dillon 		--hammer_count_io_locked;
840d5ef456eSMatthew Dillon 		bp->b_flags &= ~B_LOCKED;
841055f5ff8SMatthew Dillon 		hammer_io_deallocate(bp);
842055f5ff8SMatthew Dillon 		/* structure may be dead now */
843fbc6e32aSMatthew Dillon 	}
84466325755SMatthew Dillon }
84566325755SMatthew Dillon 
84666325755SMatthew Dillon /*
84766325755SMatthew Dillon  * Callback from kernel when it wishes to deallocate a passively
84810a5d1baSMatthew Dillon  * associated structure.  This mostly occurs with clean buffers
84910a5d1baSMatthew Dillon  * but it may be possible for a holding structure to be marked dirty
8507bc5b8c2SMatthew Dillon  * while its buffer is passively associated.  The caller owns the bp.
85166325755SMatthew Dillon  *
85266325755SMatthew Dillon  * If we cannot disassociate we set B_LOCKED to prevent the buffer
85366325755SMatthew Dillon  * from getting reused.
85446fe7ae1SMatthew Dillon  *
85546fe7ae1SMatthew Dillon  * WARNING: Because this can be called directly by getnewbuf we cannot
85646fe7ae1SMatthew Dillon  * recurse into the tree.  If a bp cannot be immediately disassociated
85746fe7ae1SMatthew Dillon  * our only recourse is to set B_LOCKED.
8587bc5b8c2SMatthew Dillon  *
8597bc5b8c2SMatthew Dillon  * WARNING: This may be called from an interrupt via hammer_io_complete()
86066325755SMatthew Dillon  */
86166325755SMatthew Dillon static void
86266325755SMatthew Dillon hammer_io_deallocate(struct buf *bp)
86366325755SMatthew Dillon {
864055f5ff8SMatthew Dillon 	hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
86566325755SMatthew Dillon 
866055f5ff8SMatthew Dillon 	KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
86746fe7ae1SMatthew Dillon 	if (iou->io.lock.refs > 0 || iou->io.modified) {
86810a5d1baSMatthew Dillon 		/*
86910a5d1baSMatthew Dillon 		 * It is not legal to disassociate a modified buffer.  This
87010a5d1baSMatthew Dillon 		 * case really shouldn't ever occur.
87110a5d1baSMatthew Dillon 		 */
872055f5ff8SMatthew Dillon 		bp->b_flags |= B_LOCKED;
873a99b9ea2SMatthew Dillon 		++hammer_count_io_locked;
874055f5ff8SMatthew Dillon 	} else {
87510a5d1baSMatthew Dillon 		/*
87610a5d1baSMatthew Dillon 		 * Disassociate the BP.  If the io has no refs left we
87710a5d1baSMatthew Dillon 		 * have to add it to the loose list.
87810a5d1baSMatthew Dillon 		 */
879ecca949aSMatthew Dillon 		hammer_io_disassociate(iou);
880ecca949aSMatthew Dillon 		if (iou->io.type != HAMMER_STRUCTURE_VOLUME) {
881ecca949aSMatthew Dillon 			KKASSERT(iou->io.bp == NULL);
88210a5d1baSMatthew Dillon 			KKASSERT(iou->io.mod_list == NULL);
883a99b9ea2SMatthew Dillon 			crit_enter();	/* biodone race against list */
88410a5d1baSMatthew Dillon 			iou->io.mod_list = &iou->io.hmp->lose_list;
88510a5d1baSMatthew Dillon 			TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
886a99b9ea2SMatthew Dillon 			crit_exit();
88766325755SMatthew Dillon 		}
88866325755SMatthew Dillon 	}
88966325755SMatthew Dillon }
89066325755SMatthew Dillon 
89166325755SMatthew Dillon static int
89266325755SMatthew Dillon hammer_io_fsync(struct vnode *vp)
89366325755SMatthew Dillon {
89466325755SMatthew Dillon 	return(0);
89566325755SMatthew Dillon }
89666325755SMatthew Dillon 
89766325755SMatthew Dillon /*
89866325755SMatthew Dillon  * NOTE: will not be called unless we tell the kernel about the
89966325755SMatthew Dillon  * bioops.  Unused... we use the mount's VFS_SYNC instead.
90066325755SMatthew Dillon  */
90166325755SMatthew Dillon static int
90266325755SMatthew Dillon hammer_io_sync(struct mount *mp)
90366325755SMatthew Dillon {
90466325755SMatthew Dillon 	return(0);
90566325755SMatthew Dillon }
90666325755SMatthew Dillon 
90766325755SMatthew Dillon static void
90866325755SMatthew Dillon hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
90966325755SMatthew Dillon {
91066325755SMatthew Dillon }
91166325755SMatthew Dillon 
91266325755SMatthew Dillon /*
91366325755SMatthew Dillon  * I/O pre-check for reading and writing.  HAMMER only uses this for
91466325755SMatthew Dillon  * B_CACHE buffers so checkread just shouldn't happen, but if it does
91566325755SMatthew Dillon  * allow it.
91666325755SMatthew Dillon  *
917fbc6e32aSMatthew Dillon  * Writing is a different case.  We don't want the kernel to try to write
918fbc6e32aSMatthew Dillon  * out a buffer that HAMMER may be modifying passively or which has a
91910a5d1baSMatthew Dillon  * dependancy.  In addition, kernel-demanded writes can only proceed for
92010a5d1baSMatthew Dillon  * certain types of buffers (i.e. UNDO and DATA types).  Other dirty
92110a5d1baSMatthew Dillon  * buffer types can only be explicitly written by the flusher.
922fbc6e32aSMatthew Dillon  *
92310a5d1baSMatthew Dillon  * checkwrite will only be called for bdwrite()n buffers.  If we return
92410a5d1baSMatthew Dillon  * success the kernel is guaranteed to initiate the buffer write.
92566325755SMatthew Dillon  */
92666325755SMatthew Dillon static int
92766325755SMatthew Dillon hammer_io_checkread(struct buf *bp)
92866325755SMatthew Dillon {
92966325755SMatthew Dillon 	return(0);
93066325755SMatthew Dillon }
93166325755SMatthew Dillon 
93266325755SMatthew Dillon static int
93366325755SMatthew Dillon hammer_io_checkwrite(struct buf *bp)
93466325755SMatthew Dillon {
93510a5d1baSMatthew Dillon 	hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
93666325755SMatthew Dillon 
93777062c8aSMatthew Dillon 	/*
93877062c8aSMatthew Dillon 	 * This shouldn't happen under normal operation.
93977062c8aSMatthew Dillon 	 */
94077062c8aSMatthew Dillon 	if (io->type == HAMMER_STRUCTURE_VOLUME ||
94177062c8aSMatthew Dillon 	    io->type == HAMMER_STRUCTURE_META_BUFFER) {
94277062c8aSMatthew Dillon 		if (!panicstr)
94377062c8aSMatthew Dillon 			panic("hammer_io_checkwrite: illegal buffer");
944a99b9ea2SMatthew Dillon 		if ((bp->b_flags & B_LOCKED) == 0) {
94577062c8aSMatthew Dillon 			bp->b_flags |= B_LOCKED;
946a99b9ea2SMatthew Dillon 			++hammer_count_io_locked;
947a99b9ea2SMatthew Dillon 		}
94877062c8aSMatthew Dillon 		return(1);
94977062c8aSMatthew Dillon 	}
950c9b9e29dSMatthew Dillon 
951fbc6e32aSMatthew Dillon 	/*
95210a5d1baSMatthew Dillon 	 * We can only clear the modified bit if the IO is not currently
95310a5d1baSMatthew Dillon 	 * undergoing modification.  Otherwise we may miss changes.
954b33e2cc0SMatthew Dillon 	 */
955cebe9493SMatthew Dillon 	if (io->modify_refs == 0 && io->modified)
9564a2796f3SMatthew Dillon 		hammer_io_clear_modify(io, 0);
957f90dde4cSMatthew Dillon 
958f90dde4cSMatthew Dillon 	/*
959f90dde4cSMatthew Dillon 	 * The kernel is going to start the IO, set io->running.
960f90dde4cSMatthew Dillon 	 */
961f90dde4cSMatthew Dillon 	KKASSERT(io->running == 0);
962f90dde4cSMatthew Dillon 	io->running = 1;
963f5a07a7aSMatthew Dillon 	io->hmp->io_running_space += io->bytes;
964f5a07a7aSMatthew Dillon 	hammer_count_io_running_write += io->bytes;
965055f5ff8SMatthew Dillon 	return(0);
966055f5ff8SMatthew Dillon }
96766325755SMatthew Dillon 
9688cd0a023SMatthew Dillon /*
96966325755SMatthew Dillon  * Return non-zero if we wish to delay the kernel's attempt to flush
97066325755SMatthew Dillon  * this buffer to disk.
97166325755SMatthew Dillon  */
97266325755SMatthew Dillon static int
97366325755SMatthew Dillon hammer_io_countdeps(struct buf *bp, int n)
97466325755SMatthew Dillon {
97566325755SMatthew Dillon 	return(0);
97666325755SMatthew Dillon }
97766325755SMatthew Dillon 
97866325755SMatthew Dillon struct bio_ops hammer_bioops = {
97966325755SMatthew Dillon 	.io_start	= hammer_io_start,
98066325755SMatthew Dillon 	.io_complete	= hammer_io_complete,
98166325755SMatthew Dillon 	.io_deallocate	= hammer_io_deallocate,
98266325755SMatthew Dillon 	.io_fsync	= hammer_io_fsync,
98366325755SMatthew Dillon 	.io_sync	= hammer_io_sync,
98466325755SMatthew Dillon 	.io_movedeps	= hammer_io_movedeps,
98566325755SMatthew Dillon 	.io_countdeps	= hammer_io_countdeps,
98666325755SMatthew Dillon 	.io_checkread	= hammer_io_checkread,
98766325755SMatthew Dillon 	.io_checkwrite	= hammer_io_checkwrite,
98866325755SMatthew Dillon };
98966325755SMatthew Dillon 
99047637bffSMatthew Dillon /************************************************************************
99147637bffSMatthew Dillon  *				DIRECT IO OPS 				*
99247637bffSMatthew Dillon  ************************************************************************
99347637bffSMatthew Dillon  *
99447637bffSMatthew Dillon  * These functions operate directly on the buffer cache buffer associated
99547637bffSMatthew Dillon  * with a front-end vnode rather then a back-end device vnode.
99647637bffSMatthew Dillon  */
99747637bffSMatthew Dillon 
99847637bffSMatthew Dillon /*
99947637bffSMatthew Dillon  * Read a buffer associated with a front-end vnode directly from the
10001b0ab2c3SMatthew Dillon  * disk media.  The bio may be issued asynchronously.  If leaf is non-NULL
10011b0ab2c3SMatthew Dillon  * we validate the CRC.
1002a99b9ea2SMatthew Dillon  *
10031b0ab2c3SMatthew Dillon  * We must check for the presence of a HAMMER buffer to handle the case
10041b0ab2c3SMatthew Dillon  * where the reblocker has rewritten the data (which it does via the HAMMER
10051b0ab2c3SMatthew Dillon  * buffer system, not via the high-level vnode buffer cache), but not yet
10061b0ab2c3SMatthew Dillon  * committed the buffer to the media.
100747637bffSMatthew Dillon  */
100847637bffSMatthew Dillon int
10091b0ab2c3SMatthew Dillon hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
10101b0ab2c3SMatthew Dillon 		      hammer_btree_leaf_elm_t leaf)
101147637bffSMatthew Dillon {
10121b0ab2c3SMatthew Dillon 	hammer_off_t buf_offset;
101347637bffSMatthew Dillon 	hammer_off_t zone2_offset;
101447637bffSMatthew Dillon 	hammer_volume_t volume;
101547637bffSMatthew Dillon 	struct buf *bp;
101647637bffSMatthew Dillon 	struct bio *nbio;
101747637bffSMatthew Dillon 	int vol_no;
101847637bffSMatthew Dillon 	int error;
101947637bffSMatthew Dillon 
10201b0ab2c3SMatthew Dillon 	buf_offset = bio->bio_offset;
10211b0ab2c3SMatthew Dillon 	KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
10221b0ab2c3SMatthew Dillon 		 HAMMER_ZONE_LARGE_DATA);
10234a2796f3SMatthew Dillon 
10241b0ab2c3SMatthew Dillon 	/*
10251b0ab2c3SMatthew Dillon 	 * The buffer cache may have an aliased buffer (the reblocker can
10261b0ab2c3SMatthew Dillon 	 * write them).  If it does we have to sync any dirty data before
10271b0ab2c3SMatthew Dillon 	 * we can build our direct-read.  This is a non-critical code path.
10281b0ab2c3SMatthew Dillon 	 */
10291b0ab2c3SMatthew Dillon 	bp = bio->bio_buf;
10301b0ab2c3SMatthew Dillon 	hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
10311b0ab2c3SMatthew Dillon 
10321b0ab2c3SMatthew Dillon 	/*
10331b0ab2c3SMatthew Dillon 	 * Resolve to a zone-2 offset.  The conversion just requires
10341b0ab2c3SMatthew Dillon 	 * munging the top 4 bits but we want to abstract it anyway
10351b0ab2c3SMatthew Dillon 	 * so the blockmap code can verify the zone assignment.
10361b0ab2c3SMatthew Dillon 	 */
10371b0ab2c3SMatthew Dillon 	zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
10381b0ab2c3SMatthew Dillon 	if (error)
10391b0ab2c3SMatthew Dillon 		goto done;
104043c665aeSMatthew Dillon 	KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
104143c665aeSMatthew Dillon 		 HAMMER_ZONE_RAW_BUFFER);
104243c665aeSMatthew Dillon 
10431b0ab2c3SMatthew Dillon 	/*
10441b0ab2c3SMatthew Dillon 	 * Resolve volume and raw-offset for 3rd level bio.  The
10451b0ab2c3SMatthew Dillon 	 * offset will be specific to the volume.
10461b0ab2c3SMatthew Dillon 	 */
104747637bffSMatthew Dillon 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
104847637bffSMatthew Dillon 	volume = hammer_get_volume(hmp, vol_no, &error);
104947637bffSMatthew Dillon 	if (error == 0 && zone2_offset >= volume->maxbuf_off)
105047637bffSMatthew Dillon 		error = EIO;
105143c665aeSMatthew Dillon 
105247637bffSMatthew Dillon 	if (error == 0) {
1053*e469566bSMatthew Dillon 		/*
1054*e469566bSMatthew Dillon 		 * 3rd level bio
1055*e469566bSMatthew Dillon 		 */
105647637bffSMatthew Dillon 		nbio = push_bio(bio);
105747637bffSMatthew Dillon 		nbio->bio_offset = volume->ondisk->vol_buf_beg +
1058*e469566bSMatthew Dillon 				   (zone2_offset & HAMMER_OFF_SHORT_MASK);
10591b0ab2c3SMatthew Dillon #if 0
10601b0ab2c3SMatthew Dillon 		/*
10611b0ab2c3SMatthew Dillon 		 * XXX disabled - our CRC check doesn't work if the OS
10621b0ab2c3SMatthew Dillon 		 * does bogus_page replacement on the direct-read.
10631b0ab2c3SMatthew Dillon 		 */
10641b0ab2c3SMatthew Dillon 		if (leaf && hammer_verify_data) {
10651b0ab2c3SMatthew Dillon 			nbio->bio_done = hammer_io_direct_read_complete;
10661b0ab2c3SMatthew Dillon 			nbio->bio_caller_info1.uvalue32 = leaf->data_crc;
10671b0ab2c3SMatthew Dillon 		}
10681b0ab2c3SMatthew Dillon #endif
1069ce0138a6SMatthew Dillon 		hammer_stats_disk_read += bp->b_bufsize;
107047637bffSMatthew Dillon 		vn_strategy(volume->devvp, nbio);
107147637bffSMatthew Dillon 	}
107247637bffSMatthew Dillon 	hammer_rel_volume(volume, 0);
10731b0ab2c3SMatthew Dillon done:
107447637bffSMatthew Dillon 	if (error) {
1075cebe9493SMatthew Dillon 		kprintf("hammer_direct_read: failed @ %016llx\n",
107643c665aeSMatthew Dillon 			zone2_offset);
107747637bffSMatthew Dillon 		bp->b_error = error;
107847637bffSMatthew Dillon 		bp->b_flags |= B_ERROR;
107947637bffSMatthew Dillon 		biodone(bio);
108047637bffSMatthew Dillon 	}
108147637bffSMatthew Dillon 	return(error);
108247637bffSMatthew Dillon }
108347637bffSMatthew Dillon 
10841b0ab2c3SMatthew Dillon #if 0
10851b0ab2c3SMatthew Dillon /*
10861b0ab2c3SMatthew Dillon  * On completion of the BIO this callback must check the data CRC
10871b0ab2c3SMatthew Dillon  * and chain to the previous bio.
10881b0ab2c3SMatthew Dillon  */
10891b0ab2c3SMatthew Dillon static
10901b0ab2c3SMatthew Dillon void
10911b0ab2c3SMatthew Dillon hammer_io_direct_read_complete(struct bio *nbio)
10921b0ab2c3SMatthew Dillon {
10931b0ab2c3SMatthew Dillon 	struct bio *obio;
10941b0ab2c3SMatthew Dillon 	struct buf *bp;
10951b0ab2c3SMatthew Dillon 	u_int32_t rec_crc = nbio->bio_caller_info1.uvalue32;
10961b0ab2c3SMatthew Dillon 
10971b0ab2c3SMatthew Dillon 	bp = nbio->bio_buf;
10981b0ab2c3SMatthew Dillon 	if (crc32(bp->b_data, bp->b_bufsize) != rec_crc) {
10991b0ab2c3SMatthew Dillon 		kprintf("HAMMER: data_crc error @%016llx/%d\n",
11001b0ab2c3SMatthew Dillon 			nbio->bio_offset, bp->b_bufsize);
11011b0ab2c3SMatthew Dillon 		if (hammer_debug_debug)
11021b0ab2c3SMatthew Dillon 			Debugger("");
11031b0ab2c3SMatthew Dillon 		bp->b_flags |= B_ERROR;
11041b0ab2c3SMatthew Dillon 		bp->b_error = EIO;
11051b0ab2c3SMatthew Dillon 	}
11061b0ab2c3SMatthew Dillon 	obio = pop_bio(nbio);
11071b0ab2c3SMatthew Dillon 	biodone(obio);
11081b0ab2c3SMatthew Dillon }
11091b0ab2c3SMatthew Dillon #endif
11101b0ab2c3SMatthew Dillon 
111147637bffSMatthew Dillon /*
111247637bffSMatthew Dillon  * Write a buffer associated with a front-end vnode directly to the
111347637bffSMatthew Dillon  * disk media.  The bio may be issued asynchronously.
11141b0ab2c3SMatthew Dillon  *
11151b0ab2c3SMatthew Dillon  * The BIO is associated with the specified record and RECF_DIRECT_IO
1116*e469566bSMatthew Dillon  * is set.  The recorded is added to its object.
111747637bffSMatthew Dillon  */
111847637bffSMatthew Dillon int
11191b0ab2c3SMatthew Dillon hammer_io_direct_write(hammer_mount_t hmp, hammer_record_t record,
112047637bffSMatthew Dillon 		       struct bio *bio)
112147637bffSMatthew Dillon {
11221b0ab2c3SMatthew Dillon 	hammer_btree_leaf_elm_t leaf = &record->leaf;
11230832c9bbSMatthew Dillon 	hammer_off_t buf_offset;
112447637bffSMatthew Dillon 	hammer_off_t zone2_offset;
112547637bffSMatthew Dillon 	hammer_volume_t volume;
11260832c9bbSMatthew Dillon 	hammer_buffer_t buffer;
112747637bffSMatthew Dillon 	struct buf *bp;
112847637bffSMatthew Dillon 	struct bio *nbio;
11290832c9bbSMatthew Dillon 	char *ptr;
113047637bffSMatthew Dillon 	int vol_no;
113147637bffSMatthew Dillon 	int error;
113247637bffSMatthew Dillon 
11330832c9bbSMatthew Dillon 	buf_offset = leaf->data_offset;
11340832c9bbSMatthew Dillon 
11350832c9bbSMatthew Dillon 	KKASSERT(buf_offset > HAMMER_ZONE_BTREE);
113647637bffSMatthew Dillon 	KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE);
113747637bffSMatthew Dillon 
11380832c9bbSMatthew Dillon 	if ((buf_offset & HAMMER_BUFMASK) == 0 &&
11394a2796f3SMatthew Dillon 	    leaf->data_len >= HAMMER_BUFSIZE) {
11400832c9bbSMatthew Dillon 		/*
11410832c9bbSMatthew Dillon 		 * We are using the vnode's bio to write directly to the
11420832c9bbSMatthew Dillon 		 * media, any hammer_buffer at the same zone-X offset will
11430832c9bbSMatthew Dillon 		 * now have stale data.
11440832c9bbSMatthew Dillon 		 */
11450832c9bbSMatthew Dillon 		zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
114647637bffSMatthew Dillon 		vol_no = HAMMER_VOL_DECODE(zone2_offset);
114747637bffSMatthew Dillon 		volume = hammer_get_volume(hmp, vol_no, &error);
114847637bffSMatthew Dillon 
114947637bffSMatthew Dillon 		if (error == 0 && zone2_offset >= volume->maxbuf_off)
115047637bffSMatthew Dillon 			error = EIO;
115147637bffSMatthew Dillon 		if (error == 0) {
11520832c9bbSMatthew Dillon 			bp = bio->bio_buf;
11534a2796f3SMatthew Dillon 			KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
1154*e469566bSMatthew Dillon 			/*
11554a2796f3SMatthew Dillon 			hammer_del_buffers(hmp, buf_offset,
11564a2796f3SMatthew Dillon 					   zone2_offset, bp->b_bufsize);
1157*e469566bSMatthew Dillon 			*/
11581b0ab2c3SMatthew Dillon 
115943c665aeSMatthew Dillon 			/*
116043c665aeSMatthew Dillon 			 * Second level bio - cached zone2 offset.
11611b0ab2c3SMatthew Dillon 			 *
11621b0ab2c3SMatthew Dillon 			 * (We can put our bio_done function in either the
11631b0ab2c3SMatthew Dillon 			 *  2nd or 3rd level).
116443c665aeSMatthew Dillon 			 */
116547637bffSMatthew Dillon 			nbio = push_bio(bio);
116643c665aeSMatthew Dillon 			nbio->bio_offset = zone2_offset;
11671b0ab2c3SMatthew Dillon 			nbio->bio_done = hammer_io_direct_write_complete;
11681b0ab2c3SMatthew Dillon 			nbio->bio_caller_info1.ptr = record;
1169*e469566bSMatthew Dillon 			record->zone2_offset = zone2_offset;
1170*e469566bSMatthew Dillon 			record->flags |= HAMMER_RECF_DIRECT_IO |
1171*e469566bSMatthew Dillon 					 HAMMER_RECF_DIRECT_INVAL;
117243c665aeSMatthew Dillon 
117343c665aeSMatthew Dillon 			/*
117443c665aeSMatthew Dillon 			 * Third level bio - raw offset specific to the
117543c665aeSMatthew Dillon 			 * correct volume.
117643c665aeSMatthew Dillon 			 */
117743c665aeSMatthew Dillon 			zone2_offset &= HAMMER_OFF_SHORT_MASK;
117843c665aeSMatthew Dillon 			nbio = push_bio(nbio);
117947637bffSMatthew Dillon 			nbio->bio_offset = volume->ondisk->vol_buf_beg +
11800832c9bbSMatthew Dillon 					   zone2_offset;
1181ce0138a6SMatthew Dillon 			hammer_stats_disk_write += bp->b_bufsize;
118247637bffSMatthew Dillon 			vn_strategy(volume->devvp, nbio);
118347637bffSMatthew Dillon 		}
118447637bffSMatthew Dillon 		hammer_rel_volume(volume, 0);
11850832c9bbSMatthew Dillon 	} else {
11861b0ab2c3SMatthew Dillon 		/*
11871b0ab2c3SMatthew Dillon 		 * Must fit in a standard HAMMER buffer.  In this case all
11881b0ab2c3SMatthew Dillon 		 * consumers use the HAMMER buffer system and RECF_DIRECT_IO
11891b0ab2c3SMatthew Dillon 		 * does not need to be set-up.
11901b0ab2c3SMatthew Dillon 		 */
11910832c9bbSMatthew Dillon 		KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0);
11920832c9bbSMatthew Dillon 		buffer = NULL;
11930832c9bbSMatthew Dillon 		ptr = hammer_bread(hmp, buf_offset, &error, &buffer);
11940832c9bbSMatthew Dillon 		if (error == 0) {
11950832c9bbSMatthew Dillon 			bp = bio->bio_buf;
11967bc5b8c2SMatthew Dillon 			bp->b_flags |= B_AGE;
11970832c9bbSMatthew Dillon 			hammer_io_modify(&buffer->io, 1);
11980832c9bbSMatthew Dillon 			bcopy(bp->b_data, ptr, leaf->data_len);
11990832c9bbSMatthew Dillon 			hammer_io_modify_done(&buffer->io);
12007bc5b8c2SMatthew Dillon 			hammer_rel_buffer(buffer, 0);
12010832c9bbSMatthew Dillon 			bp->b_resid = 0;
12020832c9bbSMatthew Dillon 			biodone(bio);
12030832c9bbSMatthew Dillon 		}
120447637bffSMatthew Dillon 	}
1205*e469566bSMatthew Dillon 	if (error == 0) {
1206*e469566bSMatthew Dillon 		/*
1207*e469566bSMatthew Dillon 		 * The record is all setup now, add it.  Potential conflics
1208*e469566bSMatthew Dillon 		 * have already been dealt with.
1209*e469566bSMatthew Dillon 		 */
1210*e469566bSMatthew Dillon 		error = hammer_mem_add(record);
1211*e469566bSMatthew Dillon 		KKASSERT(error == 0);
1212*e469566bSMatthew Dillon 	} else {
1213*e469566bSMatthew Dillon 		/*
1214*e469566bSMatthew Dillon 		 * Major suckage occured.
1215*e469566bSMatthew Dillon 		 */
1216cebe9493SMatthew Dillon 		kprintf("hammer_direct_write: failed @ %016llx\n",
1217cebe9493SMatthew Dillon 			leaf->data_offset);
121847637bffSMatthew Dillon 		bp = bio->bio_buf;
121947637bffSMatthew Dillon 		bp->b_resid = 0;
122047637bffSMatthew Dillon 		bp->b_error = EIO;
122147637bffSMatthew Dillon 		bp->b_flags |= B_ERROR;
122247637bffSMatthew Dillon 		biodone(bio);
1223*e469566bSMatthew Dillon 		record->flags |= HAMMER_RECF_DELETED_FE;
1224*e469566bSMatthew Dillon 		hammer_rel_mem_record(record);
122547637bffSMatthew Dillon 	}
122647637bffSMatthew Dillon 	return(error);
122747637bffSMatthew Dillon }
122847637bffSMatthew Dillon 
122943c665aeSMatthew Dillon /*
12301b0ab2c3SMatthew Dillon  * On completion of the BIO this callback must disconnect
12311b0ab2c3SMatthew Dillon  * it from the hammer_record and chain to the previous bio.
1232cdb6e4e6SMatthew Dillon  *
1233cdb6e4e6SMatthew Dillon  * An I/O error forces the mount to read-only.  Data buffers
1234cdb6e4e6SMatthew Dillon  * are not B_LOCKED like meta-data buffers are, so we have to
1235cdb6e4e6SMatthew Dillon  * throw the buffer away to prevent the kernel from retrying.
12361b0ab2c3SMatthew Dillon  */
12371b0ab2c3SMatthew Dillon static
12381b0ab2c3SMatthew Dillon void
12391b0ab2c3SMatthew Dillon hammer_io_direct_write_complete(struct bio *nbio)
12401b0ab2c3SMatthew Dillon {
12411b0ab2c3SMatthew Dillon 	struct bio *obio;
1242*e469566bSMatthew Dillon 	struct buf *bp;
12431b0ab2c3SMatthew Dillon 	hammer_record_t record = nbio->bio_caller_info1.ptr;
12441b0ab2c3SMatthew Dillon 
1245*e469566bSMatthew Dillon 	bp = nbio->bio_buf;
12461b0ab2c3SMatthew Dillon 	obio = pop_bio(nbio);
1247*e469566bSMatthew Dillon 	if (bp->b_flags & B_ERROR) {
1248cdb6e4e6SMatthew Dillon 		hammer_critical_error(record->ip->hmp, record->ip,
1249*e469566bSMatthew Dillon 				      bp->b_error,
1250cdb6e4e6SMatthew Dillon 				      "while writing bulk data");
1251*e469566bSMatthew Dillon 		bp->b_flags |= B_INVAL;
1252cdb6e4e6SMatthew Dillon 	}
12531b0ab2c3SMatthew Dillon 	biodone(obio);
1254*e469566bSMatthew Dillon 
1255*e469566bSMatthew Dillon 	KKASSERT(record != NULL);
1256*e469566bSMatthew Dillon 	KKASSERT(record->flags & HAMMER_RECF_DIRECT_IO);
12571b0ab2c3SMatthew Dillon 	record->flags &= ~HAMMER_RECF_DIRECT_IO;
12581b0ab2c3SMatthew Dillon 	if (record->flags & HAMMER_RECF_DIRECT_WAIT) {
12591b0ab2c3SMatthew Dillon 		record->flags &= ~HAMMER_RECF_DIRECT_WAIT;
12601b0ab2c3SMatthew Dillon 		wakeup(&record->flags);
12611b0ab2c3SMatthew Dillon 	}
12621b0ab2c3SMatthew Dillon }
12631b0ab2c3SMatthew Dillon 
12641b0ab2c3SMatthew Dillon 
12651b0ab2c3SMatthew Dillon /*
12661b0ab2c3SMatthew Dillon  * This is called before a record is either committed to the B-Tree
1267*e469566bSMatthew Dillon  * or destroyed, to resolve any associated direct-IO.
12681b0ab2c3SMatthew Dillon  *
1269*e469566bSMatthew Dillon  * (1) We must wait for any direct-IO related to the record to complete.
1270*e469566bSMatthew Dillon  *
1271*e469566bSMatthew Dillon  * (2) We must remove any buffer cache aliases for data accessed via
1272*e469566bSMatthew Dillon  *     leaf->data_offset or zone2_offset so non-direct-IO consumers
1273*e469566bSMatthew Dillon  *     (the mirroring and reblocking code) do not see stale data.
12741b0ab2c3SMatthew Dillon  */
12751b0ab2c3SMatthew Dillon void
12761b0ab2c3SMatthew Dillon hammer_io_direct_wait(hammer_record_t record)
12771b0ab2c3SMatthew Dillon {
1278*e469566bSMatthew Dillon 	/*
1279*e469566bSMatthew Dillon 	 * Wait for I/O to complete
1280*e469566bSMatthew Dillon 	 */
1281*e469566bSMatthew Dillon 	if (record->flags & HAMMER_RECF_DIRECT_IO) {
12821b0ab2c3SMatthew Dillon 		crit_enter();
12831b0ab2c3SMatthew Dillon 		while (record->flags & HAMMER_RECF_DIRECT_IO) {
12841b0ab2c3SMatthew Dillon 			record->flags |= HAMMER_RECF_DIRECT_WAIT;
12851b0ab2c3SMatthew Dillon 			tsleep(&record->flags, 0, "hmdiow", 0);
12861b0ab2c3SMatthew Dillon 		}
12871b0ab2c3SMatthew Dillon 		crit_exit();
12881b0ab2c3SMatthew Dillon 	}
12891b0ab2c3SMatthew Dillon 
12901b0ab2c3SMatthew Dillon 	/*
1291*e469566bSMatthew Dillon 	 * Invalidate any related buffer cache aliases.
1292*e469566bSMatthew Dillon 	 */
1293*e469566bSMatthew Dillon 	if (record->flags & HAMMER_RECF_DIRECT_INVAL) {
1294*e469566bSMatthew Dillon 		KKASSERT(record->leaf.data_offset);
1295*e469566bSMatthew Dillon 		hammer_del_buffers(record->ip->hmp,
1296*e469566bSMatthew Dillon 				   record->leaf.data_offset,
1297*e469566bSMatthew Dillon 				   record->zone2_offset,
1298*e469566bSMatthew Dillon 				   record->leaf.data_len);
1299*e469566bSMatthew Dillon 		record->flags &= ~HAMMER_RECF_DIRECT_INVAL;
1300*e469566bSMatthew Dillon 	}
1301*e469566bSMatthew Dillon }
1302*e469566bSMatthew Dillon 
1303*e469566bSMatthew Dillon /*
130443c665aeSMatthew Dillon  * This is called to remove the second-level cached zone-2 offset from
130543c665aeSMatthew Dillon  * frontend buffer cache buffers, now stale due to a data relocation.
130643c665aeSMatthew Dillon  * These offsets are generated by cluster_read() via VOP_BMAP, or directly
130743c665aeSMatthew Dillon  * by hammer_vop_strategy_read().
130843c665aeSMatthew Dillon  *
130943c665aeSMatthew Dillon  * This is rather nasty because here we have something like the reblocker
131043c665aeSMatthew Dillon  * scanning the raw B-Tree with no held references on anything, really,
131143c665aeSMatthew Dillon  * other then a shared lock on the B-Tree node, and we have to access the
131243c665aeSMatthew Dillon  * frontend's buffer cache to check for and clean out the association.
131343c665aeSMatthew Dillon  * Specifically, if the reblocker is moving data on the disk, these cached
131443c665aeSMatthew Dillon  * offsets will become invalid.
131543c665aeSMatthew Dillon  *
131643c665aeSMatthew Dillon  * Only data record types associated with the large-data zone are subject
131743c665aeSMatthew Dillon  * to direct-io and need to be checked.
131843c665aeSMatthew Dillon  *
131943c665aeSMatthew Dillon  */
132043c665aeSMatthew Dillon void
132143c665aeSMatthew Dillon hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf)
132243c665aeSMatthew Dillon {
132343c665aeSMatthew Dillon 	struct hammer_inode_info iinfo;
132443c665aeSMatthew Dillon 	int zone;
132543c665aeSMatthew Dillon 
132643c665aeSMatthew Dillon 	if (leaf->base.rec_type != HAMMER_RECTYPE_DATA)
132743c665aeSMatthew Dillon 		return;
132843c665aeSMatthew Dillon 	zone = HAMMER_ZONE_DECODE(leaf->data_offset);
132943c665aeSMatthew Dillon 	if (zone != HAMMER_ZONE_LARGE_DATA_INDEX)
133043c665aeSMatthew Dillon 		return;
133143c665aeSMatthew Dillon 	iinfo.obj_id = leaf->base.obj_id;
133243c665aeSMatthew Dillon 	iinfo.obj_asof = 0;	/* unused */
133343c665aeSMatthew Dillon 	iinfo.obj_localization = leaf->base.localization &
13345a930e66SMatthew Dillon 				 HAMMER_LOCALIZE_PSEUDOFS_MASK;
133543c665aeSMatthew Dillon 	iinfo.u.leaf = leaf;
133643c665aeSMatthew Dillon 	hammer_scan_inode_snapshots(hmp, &iinfo,
133743c665aeSMatthew Dillon 				    hammer_io_direct_uncache_callback,
133843c665aeSMatthew Dillon 				    leaf);
133943c665aeSMatthew Dillon }
134043c665aeSMatthew Dillon 
134143c665aeSMatthew Dillon static int
134243c665aeSMatthew Dillon hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data)
134343c665aeSMatthew Dillon {
134443c665aeSMatthew Dillon 	hammer_inode_info_t iinfo = data;
134543c665aeSMatthew Dillon 	hammer_off_t data_offset;
134643c665aeSMatthew Dillon 	hammer_off_t file_offset;
134743c665aeSMatthew Dillon 	struct vnode *vp;
134843c665aeSMatthew Dillon 	struct buf *bp;
134943c665aeSMatthew Dillon 	int blksize;
135043c665aeSMatthew Dillon 
135143c665aeSMatthew Dillon 	if (ip->vp == NULL)
135243c665aeSMatthew Dillon 		return(0);
135343c665aeSMatthew Dillon 	data_offset = iinfo->u.leaf->data_offset;
135443c665aeSMatthew Dillon 	file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len;
135543c665aeSMatthew Dillon 	blksize = iinfo->u.leaf->data_len;
135643c665aeSMatthew Dillon 	KKASSERT((blksize & HAMMER_BUFMASK) == 0);
135743c665aeSMatthew Dillon 
135843c665aeSMatthew Dillon 	hammer_ref(&ip->lock);
135943c665aeSMatthew Dillon 	if (hammer_get_vnode(ip, &vp) == 0) {
136043c665aeSMatthew Dillon 		if ((bp = findblk(ip->vp, file_offset)) != NULL &&
136143c665aeSMatthew Dillon 		    bp->b_bio2.bio_offset != NOOFFSET) {
136243c665aeSMatthew Dillon 			bp = getblk(ip->vp, file_offset, blksize, 0, 0);
136343c665aeSMatthew Dillon 			bp->b_bio2.bio_offset = NOOFFSET;
136443c665aeSMatthew Dillon 			brelse(bp);
136543c665aeSMatthew Dillon 		}
136643c665aeSMatthew Dillon 		vput(vp);
136743c665aeSMatthew Dillon 	}
136843c665aeSMatthew Dillon 	hammer_rel_inode(ip, 0);
136943c665aeSMatthew Dillon 	return(0);
137043c665aeSMatthew Dillon }
137147637bffSMatthew Dillon 
1372