xref: /dflybsd-src/sys/vfs/hammer/hammer_io.c (revision 2faf07370caaeaef6f64df1610d34b421946c538)
166325755SMatthew Dillon /*
2b84de5afSMatthew Dillon  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
366325755SMatthew Dillon  *
466325755SMatthew Dillon  * This code is derived from software contributed to The DragonFly Project
566325755SMatthew Dillon  * by Matthew Dillon <dillon@backplane.com>
666325755SMatthew Dillon  *
766325755SMatthew Dillon  * Redistribution and use in source and binary forms, with or without
866325755SMatthew Dillon  * modification, are permitted provided that the following conditions
966325755SMatthew Dillon  * are met:
1066325755SMatthew Dillon  *
1166325755SMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
1266325755SMatthew Dillon  *    notice, this list of conditions and the following disclaimer.
1366325755SMatthew Dillon  * 2. Redistributions in binary form must reproduce the above copyright
1466325755SMatthew Dillon  *    notice, this list of conditions and the following disclaimer in
1566325755SMatthew Dillon  *    the documentation and/or other materials provided with the
1666325755SMatthew Dillon  *    distribution.
1766325755SMatthew Dillon  * 3. Neither the name of The DragonFly Project nor the names of its
1866325755SMatthew Dillon  *    contributors may be used to endorse or promote products derived
1966325755SMatthew Dillon  *    from this software without specific, prior written permission.
2066325755SMatthew Dillon  *
2166325755SMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2266325755SMatthew Dillon  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2366325755SMatthew Dillon  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
2466325755SMatthew Dillon  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
2566325755SMatthew Dillon  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
2666325755SMatthew Dillon  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
2766325755SMatthew Dillon  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
2866325755SMatthew Dillon  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
2966325755SMatthew Dillon  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
3066325755SMatthew Dillon  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
3166325755SMatthew Dillon  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3266325755SMatthew Dillon  * SUCH DAMAGE.
3366325755SMatthew Dillon  *
34e83ca595SMatthew Dillon  * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.55 2008/09/15 17:02:49 dillon Exp $
3566325755SMatthew Dillon  */
3666325755SMatthew Dillon /*
3766325755SMatthew Dillon  * IO Primitives and buffer cache management
3866325755SMatthew Dillon  *
3966325755SMatthew Dillon  * All major data-tracking structures in HAMMER contain a struct hammer_io
4066325755SMatthew Dillon  * which is used to manage their backing store.  We use filesystem buffers
4166325755SMatthew Dillon  * for backing store and we leave them passively associated with their
4266325755SMatthew Dillon  * HAMMER structures.
4366325755SMatthew Dillon  *
449f5097dcSMatthew Dillon  * If the kernel tries to destroy a passively associated buf which we cannot
4566325755SMatthew Dillon  * yet let go we set B_LOCKED in the buffer and then actively released it
4666325755SMatthew Dillon  * later when we can.
4766325755SMatthew Dillon  */
4866325755SMatthew Dillon 
4966325755SMatthew Dillon #include "hammer.h"
5066325755SMatthew Dillon #include <sys/fcntl.h>
5166325755SMatthew Dillon #include <sys/nlookup.h>
5266325755SMatthew Dillon #include <sys/buf.h>
5366325755SMatthew Dillon #include <sys/buf2.h>
5466325755SMatthew Dillon 
5510a5d1baSMatthew Dillon static void hammer_io_modify(hammer_io_t io, int count);
56055f5ff8SMatthew Dillon static void hammer_io_deallocate(struct buf *bp);
571b0ab2c3SMatthew Dillon #if 0
581b0ab2c3SMatthew Dillon static void hammer_io_direct_read_complete(struct bio *nbio);
591b0ab2c3SMatthew Dillon #endif
601b0ab2c3SMatthew Dillon static void hammer_io_direct_write_complete(struct bio *nbio);
6143c665aeSMatthew Dillon static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data);
62cdb6e4e6SMatthew Dillon static void hammer_io_set_modlist(struct hammer_io *io);
63748efb59SMatthew Dillon static void hammer_io_flush_mark(hammer_volume_t volume);
64748efb59SMatthew Dillon 
65055f5ff8SMatthew Dillon 
66055f5ff8SMatthew Dillon /*
6710a5d1baSMatthew Dillon  * Initialize a new, already-zero'd hammer_io structure, or reinitialize
6810a5d1baSMatthew Dillon  * an existing hammer_io structure which may have switched to another type.
69055f5ff8SMatthew Dillon  */
70055f5ff8SMatthew Dillon void
71748efb59SMatthew Dillon hammer_io_init(hammer_io_t io, hammer_volume_t volume, enum hammer_io_type type)
72055f5ff8SMatthew Dillon {
73748efb59SMatthew Dillon 	io->volume = volume;
74748efb59SMatthew Dillon 	io->hmp = volume->io.hmp;
75055f5ff8SMatthew Dillon 	io->type = type;
76055f5ff8SMatthew Dillon }
77055f5ff8SMatthew Dillon 
7866325755SMatthew Dillon /*
79fbc6e32aSMatthew Dillon  * Helper routine to disassociate a buffer cache buffer from an I/O
80ecca949aSMatthew Dillon  * structure.  The buffer is unlocked and marked appropriate for reclamation.
81055f5ff8SMatthew Dillon  *
82055f5ff8SMatthew Dillon  * The io may have 0 or 1 references depending on who called us.  The
83055f5ff8SMatthew Dillon  * caller is responsible for dealing with the refs.
84055f5ff8SMatthew Dillon  *
85055f5ff8SMatthew Dillon  * This call can only be made when no action is required on the buffer.
86ecca949aSMatthew Dillon  *
87ecca949aSMatthew Dillon  * The caller must own the buffer and the IO must indicate that the
88ecca949aSMatthew Dillon  * structure no longer owns it (io.released != 0).
8966325755SMatthew Dillon  */
9066325755SMatthew Dillon static void
91ecca949aSMatthew Dillon hammer_io_disassociate(hammer_io_structure_t iou)
9266325755SMatthew Dillon {
93055f5ff8SMatthew Dillon 	struct buf *bp = iou->io.bp;
9466325755SMatthew Dillon 
95ecca949aSMatthew Dillon 	KKASSERT(iou->io.released);
96b58c6388SMatthew Dillon 	KKASSERT(iou->io.modified == 0);
97af209b0fSMatthew Dillon 	KKASSERT(LIST_FIRST(&bp->b_dep) == (void *)iou);
984d75d829SMatthew Dillon 	buf_dep_init(bp);
99055f5ff8SMatthew Dillon 	iou->io.bp = NULL;
1009f5097dcSMatthew Dillon 
1019f5097dcSMatthew Dillon 	/*
1029f5097dcSMatthew Dillon 	 * If the buffer was locked someone wanted to get rid of it.
1039f5097dcSMatthew Dillon 	 */
104a99b9ea2SMatthew Dillon 	if (bp->b_flags & B_LOCKED) {
105a99b9ea2SMatthew Dillon 		--hammer_count_io_locked;
106d8971d2bSMatthew Dillon 		bp->b_flags &= ~B_LOCKED;
107a99b9ea2SMatthew Dillon 	}
108ecca949aSMatthew Dillon 	if (iou->io.reclaim) {
109cebe9493SMatthew Dillon 		bp->b_flags |= B_NOCACHE|B_RELBUF;
110cebe9493SMatthew Dillon 		iou->io.reclaim = 0;
111ecca949aSMatthew Dillon 	}
11266325755SMatthew Dillon 
113055f5ff8SMatthew Dillon 	switch(iou->io.type) {
11466325755SMatthew Dillon 	case HAMMER_STRUCTURE_VOLUME:
115055f5ff8SMatthew Dillon 		iou->volume.ondisk = NULL;
11666325755SMatthew Dillon 		break;
11710a5d1baSMatthew Dillon 	case HAMMER_STRUCTURE_DATA_BUFFER:
11810a5d1baSMatthew Dillon 	case HAMMER_STRUCTURE_META_BUFFER:
11910a5d1baSMatthew Dillon 	case HAMMER_STRUCTURE_UNDO_BUFFER:
120055f5ff8SMatthew Dillon 		iou->buffer.ondisk = NULL;
12166325755SMatthew Dillon 		break;
12266325755SMatthew Dillon 	}
12366325755SMatthew Dillon }
124fbc6e32aSMatthew Dillon 
125fbc6e32aSMatthew Dillon /*
126055f5ff8SMatthew Dillon  * Wait for any physical IO to complete
127ae8e83e6SMatthew Dillon  *
128ae8e83e6SMatthew Dillon  * XXX we aren't interlocked against a spinlock or anything so there
129ae8e83e6SMatthew Dillon  *     is a small window in the interlock / io->running == 0 test.
130fbc6e32aSMatthew Dillon  */
1311b0ab2c3SMatthew Dillon void
132055f5ff8SMatthew Dillon hammer_io_wait(hammer_io_t io)
133fbc6e32aSMatthew Dillon {
134055f5ff8SMatthew Dillon 	if (io->running) {
135055f5ff8SMatthew Dillon 		for (;;) {
136ae8e83e6SMatthew Dillon 			io->waiting = 1;
137ae8e83e6SMatthew Dillon 			tsleep_interlock(io, 0);
138055f5ff8SMatthew Dillon 			if (io->running == 0)
139055f5ff8SMatthew Dillon 				break;
140ae8e83e6SMatthew Dillon 			tsleep(io, PINTERLOCKED, "hmrflw", hz);
141055f5ff8SMatthew Dillon 			if (io->running == 0)
142055f5ff8SMatthew Dillon 				break;
143055f5ff8SMatthew Dillon 		}
144055f5ff8SMatthew Dillon 	}
145055f5ff8SMatthew Dillon }
146055f5ff8SMatthew Dillon 
147af209b0fSMatthew Dillon /*
148af209b0fSMatthew Dillon  * Wait for all hammer_io-initated write I/O's to complete.  This is not
149af209b0fSMatthew Dillon  * supposed to count direct I/O's but some can leak through (for
150af209b0fSMatthew Dillon  * non-full-sized direct I/Os).
151af209b0fSMatthew Dillon  */
152af209b0fSMatthew Dillon void
153af209b0fSMatthew Dillon hammer_io_wait_all(hammer_mount_t hmp, const char *ident)
154af209b0fSMatthew Dillon {
155748efb59SMatthew Dillon 	hammer_io_flush_sync(hmp);
156af209b0fSMatthew Dillon 	crit_enter();
157f5a07a7aSMatthew Dillon 	while (hmp->io_running_space)
158f5a07a7aSMatthew Dillon 		tsleep(&hmp->io_running_space, 0, ident, 0);
159af209b0fSMatthew Dillon 	crit_exit();
160af209b0fSMatthew Dillon }
161af209b0fSMatthew Dillon 
162*2faf0737SMatthew Dillon /*
163*2faf0737SMatthew Dillon  * Clear a flagged error condition on a I/O buffer.  The caller must hold
164*2faf0737SMatthew Dillon  * its own ref on the buffer.
165*2faf0737SMatthew Dillon  */
166*2faf0737SMatthew Dillon void
167*2faf0737SMatthew Dillon hammer_io_clear_error(struct hammer_io *io)
168*2faf0737SMatthew Dillon {
169*2faf0737SMatthew Dillon 	if (io->ioerror) {
170*2faf0737SMatthew Dillon 		io->ioerror = 0;
171*2faf0737SMatthew Dillon 		hammer_unref(&io->lock);
172*2faf0737SMatthew Dillon 		KKASSERT(io->lock.refs > 0);
173*2faf0737SMatthew Dillon 	}
174*2faf0737SMatthew Dillon }
175*2faf0737SMatthew Dillon 
176*2faf0737SMatthew Dillon 
1772f85fa4dSMatthew Dillon #define HAMMER_MAXRA	4
1782f85fa4dSMatthew Dillon 
17961aeeb33SMatthew Dillon /*
18010a5d1baSMatthew Dillon  * Load bp for a HAMMER structure.  The io must be exclusively locked by
18110a5d1baSMatthew Dillon  * the caller.
1822f85fa4dSMatthew Dillon  *
183a99b9ea2SMatthew Dillon  * This routine is mostly used on meta-data and small-data blocks.  Generally
184a99b9ea2SMatthew Dillon  * speaking HAMMER assumes some locality of reference and will cluster
185a99b9ea2SMatthew Dillon  * a 64K read.
186af209b0fSMatthew Dillon  *
187af209b0fSMatthew Dillon  * Note that clustering occurs at the device layer, not the logical layer.
188af209b0fSMatthew Dillon  * If the buffers do not apply to the current operation they may apply to
189af209b0fSMatthew Dillon  * some other.
19066325755SMatthew Dillon  */
19166325755SMatthew Dillon int
1922f85fa4dSMatthew Dillon hammer_io_read(struct vnode *devvp, struct hammer_io *io, hammer_off_t limit)
19366325755SMatthew Dillon {
19466325755SMatthew Dillon 	struct buf *bp;
19566325755SMatthew Dillon 	int   error;
19666325755SMatthew Dillon 
19766325755SMatthew Dillon 	if ((bp = io->bp) == NULL) {
198f5a07a7aSMatthew Dillon 		hammer_count_io_running_read += io->bytes;
199ce0138a6SMatthew Dillon 		if (hammer_cluster_enable) {
200ce0138a6SMatthew Dillon 			error = cluster_read(devvp, limit,
201ce0138a6SMatthew Dillon 					     io->offset, io->bytes,
202af209b0fSMatthew Dillon 					     HAMMER_CLUSTER_SIZE,
203af209b0fSMatthew Dillon 					     HAMMER_CLUSTER_BUFS, &io->bp);
204ce0138a6SMatthew Dillon 		} else {
2054a2796f3SMatthew Dillon 			error = bread(devvp, io->offset, io->bytes, &io->bp);
206ce0138a6SMatthew Dillon 		}
207ce0138a6SMatthew Dillon 		hammer_stats_disk_read += io->bytes;
208f5a07a7aSMatthew Dillon 		hammer_count_io_running_read -= io->bytes;
209cdb6e4e6SMatthew Dillon 
210cdb6e4e6SMatthew Dillon 		/*
211cdb6e4e6SMatthew Dillon 		 * The code generally assumes b_ops/b_dep has been set-up,
212cdb6e4e6SMatthew Dillon 		 * even if we error out here.
213cdb6e4e6SMatthew Dillon 		 */
21466325755SMatthew Dillon 		bp = io->bp;
21566325755SMatthew Dillon 		bp->b_ops = &hammer_bioops;
216af209b0fSMatthew Dillon 		KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
21766325755SMatthew Dillon 		LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
21866325755SMatthew Dillon 		BUF_KERNPROC(bp);
21910a5d1baSMatthew Dillon 		KKASSERT(io->modified == 0);
22010a5d1baSMatthew Dillon 		KKASSERT(io->running == 0);
22110a5d1baSMatthew Dillon 		KKASSERT(io->waiting == 0);
22266325755SMatthew Dillon 		io->released = 0;	/* we hold an active lock on bp */
22366325755SMatthew Dillon 	} else {
22466325755SMatthew Dillon 		error = 0;
22566325755SMatthew Dillon 	}
22666325755SMatthew Dillon 	return(error);
22766325755SMatthew Dillon }
22866325755SMatthew Dillon 
22966325755SMatthew Dillon /*
23066325755SMatthew Dillon  * Similar to hammer_io_read() but returns a zero'd out buffer instead.
23110a5d1baSMatthew Dillon  * Must be called with the IO exclusively locked.
232055f5ff8SMatthew Dillon  *
23310a5d1baSMatthew Dillon  * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
23410a5d1baSMatthew Dillon  * I/O by forcing the buffer to not be in a released state before calling
23510a5d1baSMatthew Dillon  * it.
23610a5d1baSMatthew Dillon  *
23710a5d1baSMatthew Dillon  * This function will also mark the IO as modified but it will not
23810a5d1baSMatthew Dillon  * increment the modify_refs count.
23966325755SMatthew Dillon  */
24066325755SMatthew Dillon int
24166325755SMatthew Dillon hammer_io_new(struct vnode *devvp, struct hammer_io *io)
24266325755SMatthew Dillon {
24366325755SMatthew Dillon 	struct buf *bp;
24466325755SMatthew Dillon 
24566325755SMatthew Dillon 	if ((bp = io->bp) == NULL) {
2464a2796f3SMatthew Dillon 		io->bp = getblk(devvp, io->offset, io->bytes, 0, 0);
24766325755SMatthew Dillon 		bp = io->bp;
24866325755SMatthew Dillon 		bp->b_ops = &hammer_bioops;
249af209b0fSMatthew Dillon 		KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
25066325755SMatthew Dillon 		LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
251055f5ff8SMatthew Dillon 		io->released = 0;
25210a5d1baSMatthew Dillon 		KKASSERT(io->running == 0);
253055f5ff8SMatthew Dillon 		io->waiting = 0;
25466325755SMatthew Dillon 		BUF_KERNPROC(bp);
25566325755SMatthew Dillon 	} else {
25666325755SMatthew Dillon 		if (io->released) {
25766325755SMatthew Dillon 			regetblk(bp);
25866325755SMatthew Dillon 			BUF_KERNPROC(bp);
259d113fda1SMatthew Dillon 			io->released = 0;
26066325755SMatthew Dillon 		}
26166325755SMatthew Dillon 	}
26210a5d1baSMatthew Dillon 	hammer_io_modify(io, 0);
26366325755SMatthew Dillon 	vfs_bio_clrbuf(bp);
26466325755SMatthew Dillon 	return(0);
26566325755SMatthew Dillon }
26666325755SMatthew Dillon 
26766325755SMatthew Dillon /*
2680e8bd897SMatthew Dillon  * Advance the activity count on the underlying buffer because
2690e8bd897SMatthew Dillon  * HAMMER does not getblk/brelse on every access.
2700e8bd897SMatthew Dillon  */
2710e8bd897SMatthew Dillon void
2720e8bd897SMatthew Dillon hammer_io_advance(struct hammer_io *io)
2730e8bd897SMatthew Dillon {
2740e8bd897SMatthew Dillon 	if (io->bp)
2750e8bd897SMatthew Dillon 		buf_act_advance(io->bp);
2760e8bd897SMatthew Dillon }
2770e8bd897SMatthew Dillon 
2780e8bd897SMatthew Dillon /*
27947637bffSMatthew Dillon  * Remove potential device level aliases against buffers managed by high level
280362ec2dcSMatthew Dillon  * vnodes.  Aliases can also be created due to mixed buffer sizes or via
281362ec2dcSMatthew Dillon  * direct access to the backing store device.
282e469566bSMatthew Dillon  *
283e469566bSMatthew Dillon  * This is nasty because the buffers are also VMIO-backed.  Even if a buffer
284e469566bSMatthew Dillon  * does not exist its backing VM pages might, and we have to invalidate
285e469566bSMatthew Dillon  * those as well or a getblk() will reinstate them.
286362ec2dcSMatthew Dillon  *
287362ec2dcSMatthew Dillon  * Buffer cache buffers associated with hammer_buffers cannot be
288362ec2dcSMatthew Dillon  * invalidated.
28947637bffSMatthew Dillon  */
290362ec2dcSMatthew Dillon int
29147637bffSMatthew Dillon hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
29247637bffSMatthew Dillon {
293cebe9493SMatthew Dillon 	hammer_io_structure_t iou;
29447637bffSMatthew Dillon 	hammer_off_t phys_offset;
29547637bffSMatthew Dillon 	struct buf *bp;
296362ec2dcSMatthew Dillon 	int error;
29747637bffSMatthew Dillon 
29847637bffSMatthew Dillon 	phys_offset = volume->ondisk->vol_buf_beg +
29947637bffSMatthew Dillon 		      (zone2_offset & HAMMER_OFF_SHORT_MASK);
3004a2796f3SMatthew Dillon 	crit_enter();
301b1c20cfaSMatthew Dillon 	if ((bp = findblk(volume->devvp, phys_offset, FINDBLK_TEST)) != NULL)
3024a2796f3SMatthew Dillon 		bp = getblk(volume->devvp, phys_offset, bp->b_bufsize, 0, 0);
303e469566bSMatthew Dillon 	else
304e469566bSMatthew Dillon 		bp = getblk(volume->devvp, phys_offset, HAMMER_BUFSIZE, 0, 0);
305cebe9493SMatthew Dillon 	if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
306362ec2dcSMatthew Dillon #if 0
3075c8d05e2SMatthew Dillon 		hammer_ref(&iou->io.lock);
3084a2796f3SMatthew Dillon 		hammer_io_clear_modify(&iou->io, 1);
309cebe9493SMatthew Dillon 		bundirty(bp);
310e83ca595SMatthew Dillon 		iou->io.released = 0;
311e83ca595SMatthew Dillon 		BUF_KERNPROC(bp);
312cebe9493SMatthew Dillon 		iou->io.reclaim = 1;
3135c8d05e2SMatthew Dillon 		iou->io.waitdep = 1;
314e83ca595SMatthew Dillon 		KKASSERT(iou->io.lock.refs == 1);
3155c8d05e2SMatthew Dillon 		hammer_rel_buffer(&iou->buffer, 0);
3165c8d05e2SMatthew Dillon 		/*hammer_io_deallocate(bp);*/
317362ec2dcSMatthew Dillon #endif
31804b04ca6SMatthew Dillon 		bqrelse(bp);
319362ec2dcSMatthew Dillon 		error = EAGAIN;
3200832c9bbSMatthew Dillon 	} else {
321cebe9493SMatthew Dillon 		KKASSERT((bp->b_flags & B_LOCKED) == 0);
322cebe9493SMatthew Dillon 		bundirty(bp);
323cebe9493SMatthew Dillon 		bp->b_flags |= B_NOCACHE|B_RELBUF;
324ecca949aSMatthew Dillon 		brelse(bp);
325362ec2dcSMatthew Dillon 		error = 0;
326e83ca595SMatthew Dillon 	}
3274a2796f3SMatthew Dillon 	crit_exit();
328362ec2dcSMatthew Dillon 	return(error);
3290832c9bbSMatthew Dillon }
33047637bffSMatthew Dillon 
33147637bffSMatthew Dillon /*
332b3deaf57SMatthew Dillon  * This routine is called on the last reference to a hammer structure.
333ecca949aSMatthew Dillon  * The io is usually interlocked with io.loading and io.refs must be 1.
334b3deaf57SMatthew Dillon  *
335ecca949aSMatthew Dillon  * This routine may return a non-NULL bp to the caller for dispoal.  Disposal
336ecca949aSMatthew Dillon  * simply means the caller finishes decrementing the ref-count on the
337ecca949aSMatthew Dillon  * IO structure then brelse()'s the bp.  The bp may or may not still be
338ecca949aSMatthew Dillon  * passively associated with the IO.
339ecca949aSMatthew Dillon  *
340ecca949aSMatthew Dillon  * The only requirement here is that modified meta-data and volume-header
341ecca949aSMatthew Dillon  * buffer may NOT be disassociated from the IO structure, and consequently
342ecca949aSMatthew Dillon  * we also leave such buffers actively associated with the IO if they already
343ecca949aSMatthew Dillon  * are (since the kernel can't do anything with them anyway).  Only the
344ecca949aSMatthew Dillon  * flusher is allowed to write such buffers out.  Modified pure-data and
345ecca949aSMatthew Dillon  * undo buffers are returned to the kernel but left passively associated
346ecca949aSMatthew Dillon  * so we can track when the kernel writes the bp out.
34766325755SMatthew Dillon  */
348ecca949aSMatthew Dillon struct buf *
34909ac686bSMatthew Dillon hammer_io_release(struct hammer_io *io, int flush)
35066325755SMatthew Dillon {
3519f5097dcSMatthew Dillon 	union hammer_io_structure *iou = (void *)io;
35266325755SMatthew Dillon 	struct buf *bp;
35366325755SMatthew Dillon 
354fbc6e32aSMatthew Dillon 	if ((bp = io->bp) == NULL)
355ecca949aSMatthew Dillon 		return(NULL);
356fbc6e32aSMatthew Dillon 
3570b075555SMatthew Dillon 	/*
35810a5d1baSMatthew Dillon 	 * Try to flush a dirty IO to disk if asked to by the
35910a5d1baSMatthew Dillon 	 * caller or if the kernel tried to flush the buffer in the past.
3600b075555SMatthew Dillon 	 *
36110a5d1baSMatthew Dillon 	 * Kernel-initiated flushes are only allowed for pure-data buffers.
36210a5d1baSMatthew Dillon 	 * meta-data and volume buffers can only be flushed explicitly
36310a5d1baSMatthew Dillon 	 * by HAMMER.
364055f5ff8SMatthew Dillon 	 */
36510a5d1baSMatthew Dillon 	if (io->modified) {
36609ac686bSMatthew Dillon 		if (flush) {
367710733a6SMatthew Dillon 			hammer_io_flush(io, 0);
36810a5d1baSMatthew Dillon 		} else if (bp->b_flags & B_LOCKED) {
36910a5d1baSMatthew Dillon 			switch(io->type) {
37010a5d1baSMatthew Dillon 			case HAMMER_STRUCTURE_DATA_BUFFER:
371710733a6SMatthew Dillon 				hammer_io_flush(io, 0);
372710733a6SMatthew Dillon 				break;
37310a5d1baSMatthew Dillon 			case HAMMER_STRUCTURE_UNDO_BUFFER:
374710733a6SMatthew Dillon 				hammer_io_flush(io, hammer_undo_reclaim(io));
37510a5d1baSMatthew Dillon 				break;
37610a5d1baSMatthew Dillon 			default:
37710a5d1baSMatthew Dillon 				break;
37810a5d1baSMatthew Dillon 			}
37910a5d1baSMatthew Dillon 		} /* else no explicit request to flush the buffer */
38010a5d1baSMatthew Dillon 	}
381055f5ff8SMatthew Dillon 
382055f5ff8SMatthew Dillon 	/*
3835c8d05e2SMatthew Dillon 	 * Wait for the IO to complete if asked to.  This occurs when
3845c8d05e2SMatthew Dillon 	 * the buffer must be disposed of definitively during an umount
3855c8d05e2SMatthew Dillon 	 * or buffer invalidation.
386055f5ff8SMatthew Dillon 	 */
387b58c6388SMatthew Dillon 	if (io->waitdep && io->running) {
388055f5ff8SMatthew Dillon 		hammer_io_wait(io);
389055f5ff8SMatthew Dillon 	}
390055f5ff8SMatthew Dillon 
391055f5ff8SMatthew Dillon 	/*
39210a5d1baSMatthew Dillon 	 * Return control of the buffer to the kernel (with the provisio
39310a5d1baSMatthew Dillon 	 * that our bioops can override kernel decisions with regards to
39410a5d1baSMatthew Dillon 	 * the buffer).
395055f5ff8SMatthew Dillon 	 */
396cebe9493SMatthew Dillon 	if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) {
39710a5d1baSMatthew Dillon 		/*
39810a5d1baSMatthew Dillon 		 * Always disassociate the bp if an explicit flush
39910a5d1baSMatthew Dillon 		 * was requested and the IO completed with no error
40010a5d1baSMatthew Dillon 		 * (so unmount can really clean up the structure).
40110a5d1baSMatthew Dillon 		 */
402055f5ff8SMatthew Dillon 		if (io->released) {
403055f5ff8SMatthew Dillon 			regetblk(bp);
40446fe7ae1SMatthew Dillon 			BUF_KERNPROC(bp);
405ecca949aSMatthew Dillon 		} else {
406ecca949aSMatthew Dillon 			io->released = 1;
407055f5ff8SMatthew Dillon 		}
408ecca949aSMatthew Dillon 		hammer_io_disassociate((hammer_io_structure_t)io);
409ecca949aSMatthew Dillon 		/* return the bp */
410055f5ff8SMatthew Dillon 	} else if (io->modified) {
41110a5d1baSMatthew Dillon 		/*
412ecca949aSMatthew Dillon 		 * Only certain IO types can be released to the kernel if
413ecca949aSMatthew Dillon 		 * the buffer has been modified.
414ecca949aSMatthew Dillon 		 *
415ecca949aSMatthew Dillon 		 * volume and meta-data IO types may only be explicitly
416ecca949aSMatthew Dillon 		 * flushed by HAMMER.
41710a5d1baSMatthew Dillon 		 */
41810a5d1baSMatthew Dillon 		switch(io->type) {
41910a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_DATA_BUFFER:
42010a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_UNDO_BUFFER:
421b58c6388SMatthew Dillon 			if (io->released == 0) {
422055f5ff8SMatthew Dillon 				io->released = 1;
423055f5ff8SMatthew Dillon 				bdwrite(bp);
424055f5ff8SMatthew Dillon 			}
42510a5d1baSMatthew Dillon 			break;
42610a5d1baSMatthew Dillon 		default:
42710a5d1baSMatthew Dillon 			break;
42810a5d1baSMatthew Dillon 		}
429ecca949aSMatthew Dillon 		bp = NULL;	/* bp left associated */
430055f5ff8SMatthew Dillon 	} else if (io->released == 0) {
43110a5d1baSMatthew Dillon 		/*
43210a5d1baSMatthew Dillon 		 * Clean buffers can be generally released to the kernel.
43310a5d1baSMatthew Dillon 		 * We leave the bp passively associated with the HAMMER
43410a5d1baSMatthew Dillon 		 * structure and use bioops to disconnect it later on
43510a5d1baSMatthew Dillon 		 * if the kernel wants to discard the buffer.
436ecca949aSMatthew Dillon 		 *
437ecca949aSMatthew Dillon 		 * We can steal the structure's ownership of the bp.
43810a5d1baSMatthew Dillon 		 */
439ecca949aSMatthew Dillon 		io->released = 1;
4409f5097dcSMatthew Dillon 		if (bp->b_flags & B_LOCKED) {
441ecca949aSMatthew Dillon 			hammer_io_disassociate(iou);
442ecca949aSMatthew Dillon 			/* return the bp */
4439f5097dcSMatthew Dillon 		} else {
444cebe9493SMatthew Dillon 			if (io->reclaim) {
445ecca949aSMatthew Dillon 				hammer_io_disassociate(iou);
446ecca949aSMatthew Dillon 				/* return the bp */
447cebe9493SMatthew Dillon 			} else {
448ecca949aSMatthew Dillon 				/* return the bp (bp passively associated) */
4499f5097dcSMatthew Dillon 			}
450cebe9493SMatthew Dillon 		}
45119b97e01SMatthew Dillon 	} else {
45219b97e01SMatthew Dillon 		/*
453af209b0fSMatthew Dillon 		 * A released buffer is passively associate with our
454af209b0fSMatthew Dillon 		 * hammer_io structure.  The kernel cannot destroy it
455af209b0fSMatthew Dillon 		 * without making a bioops call.  If the kernel (B_LOCKED)
456af209b0fSMatthew Dillon 		 * or we (reclaim) requested that the buffer be destroyed
457af209b0fSMatthew Dillon 		 * we destroy it, otherwise we do a quick get/release to
458af209b0fSMatthew Dillon 		 * reset its position in the kernel's LRU list.
459af209b0fSMatthew Dillon 		 *
460af209b0fSMatthew Dillon 		 * Leaving the buffer passively associated allows us to
461af209b0fSMatthew Dillon 		 * use the kernel's LRU buffer flushing mechanisms rather
462af209b0fSMatthew Dillon 		 * then rolling our own.
463cb51be26SMatthew Dillon 		 *
464cb51be26SMatthew Dillon 		 * XXX there are two ways of doing this.  We can re-acquire
465cb51be26SMatthew Dillon 		 * and passively release to reset the LRU, or not.
46619b97e01SMatthew Dillon 		 */
467af209b0fSMatthew Dillon 		if (io->running == 0) {
46819b97e01SMatthew Dillon 			regetblk(bp);
469cebe9493SMatthew Dillon 			if ((bp->b_flags & B_LOCKED) || io->reclaim) {
470ecca949aSMatthew Dillon 				hammer_io_disassociate(iou);
471ecca949aSMatthew Dillon 				/* return the bp */
4729f5097dcSMatthew Dillon 			} else {
473ecca949aSMatthew Dillon 				/* return the bp (bp passively associated) */
474ecca949aSMatthew Dillon 			}
475ecca949aSMatthew Dillon 		} else {
476ecca949aSMatthew Dillon 			/*
477ecca949aSMatthew Dillon 			 * bp is left passively associated but we do not
478ecca949aSMatthew Dillon 			 * try to reacquire it.  Interactions with the io
479ecca949aSMatthew Dillon 			 * structure will occur on completion of the bp's
480ecca949aSMatthew Dillon 			 * I/O.
481ecca949aSMatthew Dillon 			 */
482ecca949aSMatthew Dillon 			bp = NULL;
48319b97e01SMatthew Dillon 		}
4849f5097dcSMatthew Dillon 	}
485ecca949aSMatthew Dillon 	return(bp);
486055f5ff8SMatthew Dillon }
487055f5ff8SMatthew Dillon 
488055f5ff8SMatthew Dillon /*
489b33e2cc0SMatthew Dillon  * This routine is called with a locked IO when a flush is desired and
490b33e2cc0SMatthew Dillon  * no other references to the structure exists other then ours.  This
491b33e2cc0SMatthew Dillon  * routine is ONLY called when HAMMER believes it is safe to flush a
492b33e2cc0SMatthew Dillon  * potentially modified buffer out.
4930b075555SMatthew Dillon  */
4940b075555SMatthew Dillon void
495710733a6SMatthew Dillon hammer_io_flush(struct hammer_io *io, int reclaim)
4960b075555SMatthew Dillon {
497055f5ff8SMatthew Dillon 	struct buf *bp;
498055f5ff8SMatthew Dillon 
499055f5ff8SMatthew Dillon 	/*
50010a5d1baSMatthew Dillon 	 * Degenerate case - nothing to flush if nothing is dirty.
501055f5ff8SMatthew Dillon 	 */
502b58c6388SMatthew Dillon 	if (io->modified == 0) {
503055f5ff8SMatthew Dillon 		return;
504b58c6388SMatthew Dillon 	}
505055f5ff8SMatthew Dillon 
506055f5ff8SMatthew Dillon 	KKASSERT(io->bp);
5079f5097dcSMatthew Dillon 	KKASSERT(io->modify_refs <= 0);
508055f5ff8SMatthew Dillon 
509b33e2cc0SMatthew Dillon 	/*
51077062c8aSMatthew Dillon 	 * Acquire ownership of the bp, particularly before we clear our
51177062c8aSMatthew Dillon 	 * modified flag.
51277062c8aSMatthew Dillon 	 *
51377062c8aSMatthew Dillon 	 * We are going to bawrite() this bp.  Don't leave a window where
51477062c8aSMatthew Dillon 	 * io->released is set, we actually own the bp rather then our
51577062c8aSMatthew Dillon 	 * buffer.
51677062c8aSMatthew Dillon 	 */
51777062c8aSMatthew Dillon 	bp = io->bp;
51877062c8aSMatthew Dillon 	if (io->released) {
51977062c8aSMatthew Dillon 		regetblk(bp);
52077062c8aSMatthew Dillon 		/* BUF_KERNPROC(io->bp); */
52177062c8aSMatthew Dillon 		/* io->released = 0; */
52277062c8aSMatthew Dillon 		KKASSERT(io->released);
52377062c8aSMatthew Dillon 		KKASSERT(io->bp == bp);
52477062c8aSMatthew Dillon 	}
52577062c8aSMatthew Dillon 	io->released = 1;
52677062c8aSMatthew Dillon 
527710733a6SMatthew Dillon 	if (reclaim) {
528710733a6SMatthew Dillon 		io->reclaim = 1;
529710733a6SMatthew Dillon 		if ((bp->b_flags & B_LOCKED) == 0) {
530710733a6SMatthew Dillon 			bp->b_flags |= B_LOCKED;
531710733a6SMatthew Dillon 			++hammer_count_io_locked;
532710733a6SMatthew Dillon 		}
533710733a6SMatthew Dillon 	}
534710733a6SMatthew Dillon 
53577062c8aSMatthew Dillon 	/*
53610a5d1baSMatthew Dillon 	 * Acquire exclusive access to the bp and then clear the modified
53710a5d1baSMatthew Dillon 	 * state of the buffer prior to issuing I/O to interlock any
53810a5d1baSMatthew Dillon 	 * modifications made while the I/O is in progress.  This shouldn't
53910a5d1baSMatthew Dillon 	 * happen anyway but losing data would be worse.  The modified bit
54010a5d1baSMatthew Dillon 	 * will be rechecked after the IO completes.
54110a5d1baSMatthew Dillon 	 *
5424a2796f3SMatthew Dillon 	 * NOTE: This call also finalizes the buffer's content (inval == 0).
5434a2796f3SMatthew Dillon 	 *
544b33e2cc0SMatthew Dillon 	 * This is only legal when lock.refs == 1 (otherwise we might clear
545b33e2cc0SMatthew Dillon 	 * the modified bit while there are still users of the cluster
546b33e2cc0SMatthew Dillon 	 * modifying the data).
547b33e2cc0SMatthew Dillon 	 *
548b33e2cc0SMatthew Dillon 	 * Do this before potentially blocking so any attempt to modify the
549b33e2cc0SMatthew Dillon 	 * ondisk while we are blocked blocks waiting for us.
550b33e2cc0SMatthew Dillon 	 */
5515c8d05e2SMatthew Dillon 	hammer_ref(&io->lock);
5524a2796f3SMatthew Dillon 	hammer_io_clear_modify(io, 0);
5535c8d05e2SMatthew Dillon 	hammer_unref(&io->lock);
554bcac4bbbSMatthew Dillon 
555bcac4bbbSMatthew Dillon 	/*
55610a5d1baSMatthew Dillon 	 * Transfer ownership to the kernel and initiate I/O.
55710a5d1baSMatthew Dillon 	 */
558055f5ff8SMatthew Dillon 	io->running = 1;
559f5a07a7aSMatthew Dillon 	io->hmp->io_running_space += io->bytes;
560f5a07a7aSMatthew Dillon 	hammer_count_io_running_write += io->bytes;
561055f5ff8SMatthew Dillon 	bawrite(bp);
562748efb59SMatthew Dillon 	hammer_io_flush_mark(io->volume);
563055f5ff8SMatthew Dillon }
564055f5ff8SMatthew Dillon 
565055f5ff8SMatthew Dillon /************************************************************************
566055f5ff8SMatthew Dillon  *				BUFFER DIRTYING				*
567055f5ff8SMatthew Dillon  ************************************************************************
568055f5ff8SMatthew Dillon  *
569055f5ff8SMatthew Dillon  * These routines deal with dependancies created when IO buffers get
570055f5ff8SMatthew Dillon  * modified.  The caller must call hammer_modify_*() on a referenced
571055f5ff8SMatthew Dillon  * HAMMER structure prior to modifying its on-disk data.
572055f5ff8SMatthew Dillon  *
573055f5ff8SMatthew Dillon  * Any intent to modify an IO buffer acquires the related bp and imposes
574055f5ff8SMatthew Dillon  * various write ordering dependancies.
575055f5ff8SMatthew Dillon  */
576055f5ff8SMatthew Dillon 
577055f5ff8SMatthew Dillon /*
57810a5d1baSMatthew Dillon  * Mark a HAMMER structure as undergoing modification.  Meta-data buffers
57910a5d1baSMatthew Dillon  * are locked until the flusher can deal with them, pure data buffers
58010a5d1baSMatthew Dillon  * can be written out.
581055f5ff8SMatthew Dillon  */
58210a5d1baSMatthew Dillon static
583b58c6388SMatthew Dillon void
58410a5d1baSMatthew Dillon hammer_io_modify(hammer_io_t io, int count)
585055f5ff8SMatthew Dillon {
58646fe7ae1SMatthew Dillon 	/*
5879f5097dcSMatthew Dillon 	 * io->modify_refs must be >= 0
5889f5097dcSMatthew Dillon 	 */
5899f5097dcSMatthew Dillon 	while (io->modify_refs < 0) {
5909f5097dcSMatthew Dillon 		io->waitmod = 1;
5919f5097dcSMatthew Dillon 		tsleep(io, 0, "hmrmod", 0);
5929f5097dcSMatthew Dillon 	}
5939f5097dcSMatthew Dillon 
5949f5097dcSMatthew Dillon 	/*
59546fe7ae1SMatthew Dillon 	 * Shortcut if nothing to do.
59646fe7ae1SMatthew Dillon 	 */
597055f5ff8SMatthew Dillon 	KKASSERT(io->lock.refs != 0 && io->bp != NULL);
59810a5d1baSMatthew Dillon 	io->modify_refs += count;
599b58c6388SMatthew Dillon 	if (io->modified && io->released == 0)
600b58c6388SMatthew Dillon 		return;
60146fe7ae1SMatthew Dillon 
602055f5ff8SMatthew Dillon 	hammer_lock_ex(&io->lock);
60310a5d1baSMatthew Dillon 	if (io->modified == 0) {
604cdb6e4e6SMatthew Dillon 		hammer_io_set_modlist(io);
60546fe7ae1SMatthew Dillon 		io->modified = 1;
60610a5d1baSMatthew Dillon 	}
607055f5ff8SMatthew Dillon 	if (io->released) {
608055f5ff8SMatthew Dillon 		regetblk(io->bp);
609055f5ff8SMatthew Dillon 		BUF_KERNPROC(io->bp);
610055f5ff8SMatthew Dillon 		io->released = 0;
61146fe7ae1SMatthew Dillon 		KKASSERT(io->modified != 0);
612055f5ff8SMatthew Dillon 	}
613055f5ff8SMatthew Dillon 	hammer_unlock(&io->lock);
6140b075555SMatthew Dillon }
6150b075555SMatthew Dillon 
61610a5d1baSMatthew Dillon static __inline
61710a5d1baSMatthew Dillon void
61810a5d1baSMatthew Dillon hammer_io_modify_done(hammer_io_t io)
61910a5d1baSMatthew Dillon {
62010a5d1baSMatthew Dillon 	KKASSERT(io->modify_refs > 0);
62110a5d1baSMatthew Dillon 	--io->modify_refs;
6229f5097dcSMatthew Dillon 	if (io->modify_refs == 0 && io->waitmod) {
6239f5097dcSMatthew Dillon 		io->waitmod = 0;
6249f5097dcSMatthew Dillon 		wakeup(io);
6259f5097dcSMatthew Dillon 	}
6269f5097dcSMatthew Dillon }
6279f5097dcSMatthew Dillon 
6289f5097dcSMatthew Dillon void
6299f5097dcSMatthew Dillon hammer_io_write_interlock(hammer_io_t io)
6309f5097dcSMatthew Dillon {
6319f5097dcSMatthew Dillon 	while (io->modify_refs != 0) {
6329f5097dcSMatthew Dillon 		io->waitmod = 1;
6339f5097dcSMatthew Dillon 		tsleep(io, 0, "hmrmod", 0);
6349f5097dcSMatthew Dillon 	}
6359f5097dcSMatthew Dillon 	io->modify_refs = -1;
6369f5097dcSMatthew Dillon }
6379f5097dcSMatthew Dillon 
6389f5097dcSMatthew Dillon void
6399f5097dcSMatthew Dillon hammer_io_done_interlock(hammer_io_t io)
6409f5097dcSMatthew Dillon {
6419f5097dcSMatthew Dillon 	KKASSERT(io->modify_refs == -1);
6429f5097dcSMatthew Dillon 	io->modify_refs = 0;
6439f5097dcSMatthew Dillon 	if (io->waitmod) {
6449f5097dcSMatthew Dillon 		io->waitmod = 0;
6459f5097dcSMatthew Dillon 		wakeup(io);
6469f5097dcSMatthew Dillon 	}
64710a5d1baSMatthew Dillon }
64810a5d1baSMatthew Dillon 
6492f85fa4dSMatthew Dillon /*
6502f85fa4dSMatthew Dillon  * Caller intends to modify a volume's ondisk structure.
6512f85fa4dSMatthew Dillon  *
6522f85fa4dSMatthew Dillon  * This is only allowed if we are the flusher or we have a ref on the
6532f85fa4dSMatthew Dillon  * sync_lock.
6542f85fa4dSMatthew Dillon  */
6550b075555SMatthew Dillon void
65636f82b23SMatthew Dillon hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
65736f82b23SMatthew Dillon 		     void *base, int len)
6580b075555SMatthew Dillon {
6592f85fa4dSMatthew Dillon 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
660055f5ff8SMatthew Dillon 
6612f85fa4dSMatthew Dillon 	hammer_io_modify(&volume->io, 1);
66247197d71SMatthew Dillon 	if (len) {
66347197d71SMatthew Dillon 		intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
66447197d71SMatthew Dillon 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
66502428fb6SMatthew Dillon 		hammer_generate_undo(trans,
66647197d71SMatthew Dillon 			 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
66747197d71SMatthew Dillon 			 base, len);
668055f5ff8SMatthew Dillon 	}
669055f5ff8SMatthew Dillon }
670055f5ff8SMatthew Dillon 
671055f5ff8SMatthew Dillon /*
6722f85fa4dSMatthew Dillon  * Caller intends to modify a buffer's ondisk structure.
6732f85fa4dSMatthew Dillon  *
6742f85fa4dSMatthew Dillon  * This is only allowed if we are the flusher or we have a ref on the
6752f85fa4dSMatthew Dillon  * sync_lock.
676055f5ff8SMatthew Dillon  */
677055f5ff8SMatthew Dillon void
67836f82b23SMatthew Dillon hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
67936f82b23SMatthew Dillon 		     void *base, int len)
68046fe7ae1SMatthew Dillon {
6812f85fa4dSMatthew Dillon 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
6822f85fa4dSMatthew Dillon 
68310a5d1baSMatthew Dillon 	hammer_io_modify(&buffer->io, 1);
68447197d71SMatthew Dillon 	if (len) {
68547197d71SMatthew Dillon 		intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
68647197d71SMatthew Dillon 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
68702428fb6SMatthew Dillon 		hammer_generate_undo(trans,
68834d829f7SMatthew Dillon 				     buffer->zone2_offset + rel_offset,
68947197d71SMatthew Dillon 				     base, len);
69047197d71SMatthew Dillon 	}
69146fe7ae1SMatthew Dillon }
69246fe7ae1SMatthew Dillon 
69310a5d1baSMatthew Dillon void
69410a5d1baSMatthew Dillon hammer_modify_volume_done(hammer_volume_t volume)
69510a5d1baSMatthew Dillon {
69610a5d1baSMatthew Dillon 	hammer_io_modify_done(&volume->io);
69710a5d1baSMatthew Dillon }
69810a5d1baSMatthew Dillon 
69910a5d1baSMatthew Dillon void
70010a5d1baSMatthew Dillon hammer_modify_buffer_done(hammer_buffer_t buffer)
70110a5d1baSMatthew Dillon {
70210a5d1baSMatthew Dillon 	hammer_io_modify_done(&buffer->io);
70310a5d1baSMatthew Dillon }
70410a5d1baSMatthew Dillon 
70546fe7ae1SMatthew Dillon /*
7064a2796f3SMatthew Dillon  * Mark an entity as not being dirty any more and finalize any
7074a2796f3SMatthew Dillon  * delayed adjustments to the buffer.
7084a2796f3SMatthew Dillon  *
7094a2796f3SMatthew Dillon  * Delayed adjustments are an important performance enhancement, allowing
7104a2796f3SMatthew Dillon  * us to avoid recalculating B-Tree node CRCs over and over again when
7114a2796f3SMatthew Dillon  * making bulk-modifications to the B-Tree.
7124a2796f3SMatthew Dillon  *
7134a2796f3SMatthew Dillon  * If inval is non-zero delayed adjustments are ignored.
7145c8d05e2SMatthew Dillon  *
7155c8d05e2SMatthew Dillon  * This routine may dereference related btree nodes and cause the
7165c8d05e2SMatthew Dillon  * buffer to be dereferenced.  The caller must own a reference on io.
71761aeeb33SMatthew Dillon  */
71861aeeb33SMatthew Dillon void
7194a2796f3SMatthew Dillon hammer_io_clear_modify(struct hammer_io *io, int inval)
72061aeeb33SMatthew Dillon {
7214a2796f3SMatthew Dillon 	if (io->modified == 0)
7224a2796f3SMatthew Dillon 		return;
7234a2796f3SMatthew Dillon 
7244a2796f3SMatthew Dillon 	/*
7254a2796f3SMatthew Dillon 	 * Take us off the mod-list and clear the modified bit.
7264a2796f3SMatthew Dillon 	 */
727cebe9493SMatthew Dillon 	KKASSERT(io->mod_list != NULL);
728cebe9493SMatthew Dillon 	if (io->mod_list == &io->hmp->volu_list ||
729cebe9493SMatthew Dillon 	    io->mod_list == &io->hmp->meta_list) {
730f5a07a7aSMatthew Dillon 		io->hmp->locked_dirty_space -= io->bytes;
731f5a07a7aSMatthew Dillon 		hammer_count_dirtybufspace -= io->bytes;
732cebe9493SMatthew Dillon 	}
733cebe9493SMatthew Dillon 	TAILQ_REMOVE(io->mod_list, io, mod_entry);
734cebe9493SMatthew Dillon 	io->mod_list = NULL;
73561aeeb33SMatthew Dillon 	io->modified = 0;
7364a2796f3SMatthew Dillon 
7374a2796f3SMatthew Dillon 	/*
7384a2796f3SMatthew Dillon 	 * If this bit is not set there are no delayed adjustments.
7394a2796f3SMatthew Dillon 	 */
7404a2796f3SMatthew Dillon 	if (io->gencrc == 0)
7414a2796f3SMatthew Dillon 		return;
7424a2796f3SMatthew Dillon 	io->gencrc = 0;
7434a2796f3SMatthew Dillon 
7444a2796f3SMatthew Dillon 	/*
7454a2796f3SMatthew Dillon 	 * Finalize requested CRCs.  The NEEDSCRC flag also holds a reference
7464a2796f3SMatthew Dillon 	 * on the node (& underlying buffer).  Release the node after clearing
7474a2796f3SMatthew Dillon 	 * the flag.
7484a2796f3SMatthew Dillon 	 */
7494a2796f3SMatthew Dillon 	if (io->type == HAMMER_STRUCTURE_META_BUFFER) {
7504a2796f3SMatthew Dillon 		hammer_buffer_t buffer = (void *)io;
7514a2796f3SMatthew Dillon 		hammer_node_t node;
7524a2796f3SMatthew Dillon 
7534a2796f3SMatthew Dillon restart:
7544a2796f3SMatthew Dillon 		TAILQ_FOREACH(node, &buffer->clist, entry) {
7554a2796f3SMatthew Dillon 			if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0)
7564a2796f3SMatthew Dillon 				continue;
7574a2796f3SMatthew Dillon 			node->flags &= ~HAMMER_NODE_NEEDSCRC;
7584a2796f3SMatthew Dillon 			KKASSERT(node->ondisk);
7594a2796f3SMatthew Dillon 			if (inval == 0)
7604a2796f3SMatthew Dillon 				node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
7614a2796f3SMatthew Dillon 			hammer_rel_node(node);
7624a2796f3SMatthew Dillon 			goto restart;
76361aeeb33SMatthew Dillon 		}
76461aeeb33SMatthew Dillon 	}
7655c8d05e2SMatthew Dillon 	/* caller must still have ref on io */
7665c8d05e2SMatthew Dillon 	KKASSERT(io->lock.refs > 0);
7674a2796f3SMatthew Dillon }
7684a2796f3SMatthew Dillon 
769cebe9493SMatthew Dillon /*
770cebe9493SMatthew Dillon  * Clear the IO's modify list.  Even though the IO is no longer modified
771cebe9493SMatthew Dillon  * it may still be on the lose_list.  This routine is called just before
772cebe9493SMatthew Dillon  * the governing hammer_buffer is destroyed.
773cebe9493SMatthew Dillon  */
774cebe9493SMatthew Dillon void
775cebe9493SMatthew Dillon hammer_io_clear_modlist(struct hammer_io *io)
776cebe9493SMatthew Dillon {
7774a2796f3SMatthew Dillon 	KKASSERT(io->modified == 0);
778cebe9493SMatthew Dillon 	if (io->mod_list) {
779a99b9ea2SMatthew Dillon 		crit_enter();	/* biodone race against list */
780cebe9493SMatthew Dillon 		KKASSERT(io->mod_list == &io->hmp->lose_list);
781cebe9493SMatthew Dillon 		TAILQ_REMOVE(io->mod_list, io, mod_entry);
782cebe9493SMatthew Dillon 		io->mod_list = NULL;
783a99b9ea2SMatthew Dillon 		crit_exit();
784cebe9493SMatthew Dillon 	}
78566325755SMatthew Dillon }
78666325755SMatthew Dillon 
787cdb6e4e6SMatthew Dillon static void
788cdb6e4e6SMatthew Dillon hammer_io_set_modlist(struct hammer_io *io)
789cdb6e4e6SMatthew Dillon {
790cdb6e4e6SMatthew Dillon 	struct hammer_mount *hmp = io->hmp;
791cdb6e4e6SMatthew Dillon 
792cdb6e4e6SMatthew Dillon 	KKASSERT(io->mod_list == NULL);
793cdb6e4e6SMatthew Dillon 
794cdb6e4e6SMatthew Dillon 	switch(io->type) {
795cdb6e4e6SMatthew Dillon 	case HAMMER_STRUCTURE_VOLUME:
796cdb6e4e6SMatthew Dillon 		io->mod_list = &hmp->volu_list;
797cdb6e4e6SMatthew Dillon 		hmp->locked_dirty_space += io->bytes;
798cdb6e4e6SMatthew Dillon 		hammer_count_dirtybufspace += io->bytes;
799cdb6e4e6SMatthew Dillon 		break;
800cdb6e4e6SMatthew Dillon 	case HAMMER_STRUCTURE_META_BUFFER:
801cdb6e4e6SMatthew Dillon 		io->mod_list = &hmp->meta_list;
802cdb6e4e6SMatthew Dillon 		hmp->locked_dirty_space += io->bytes;
803cdb6e4e6SMatthew Dillon 		hammer_count_dirtybufspace += io->bytes;
804cdb6e4e6SMatthew Dillon 		break;
805cdb6e4e6SMatthew Dillon 	case HAMMER_STRUCTURE_UNDO_BUFFER:
806cdb6e4e6SMatthew Dillon 		io->mod_list = &hmp->undo_list;
807cdb6e4e6SMatthew Dillon 		break;
808cdb6e4e6SMatthew Dillon 	case HAMMER_STRUCTURE_DATA_BUFFER:
809cdb6e4e6SMatthew Dillon 		io->mod_list = &hmp->data_list;
810cdb6e4e6SMatthew Dillon 		break;
811cdb6e4e6SMatthew Dillon 	}
812cdb6e4e6SMatthew Dillon 	TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
813cdb6e4e6SMatthew Dillon }
814cdb6e4e6SMatthew Dillon 
815055f5ff8SMatthew Dillon /************************************************************************
816055f5ff8SMatthew Dillon  *				HAMMER_BIOOPS				*
817055f5ff8SMatthew Dillon  ************************************************************************
818055f5ff8SMatthew Dillon  *
819055f5ff8SMatthew Dillon  */
820055f5ff8SMatthew Dillon 
821055f5ff8SMatthew Dillon /*
822055f5ff8SMatthew Dillon  * Pre-IO initiation kernel callback - cluster build only
823055f5ff8SMatthew Dillon  */
824055f5ff8SMatthew Dillon static void
825055f5ff8SMatthew Dillon hammer_io_start(struct buf *bp)
826055f5ff8SMatthew Dillon {
827055f5ff8SMatthew Dillon }
828055f5ff8SMatthew Dillon 
829055f5ff8SMatthew Dillon /*
8307bc5b8c2SMatthew Dillon  * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
831b33e2cc0SMatthew Dillon  *
832b33e2cc0SMatthew Dillon  * NOTE: HAMMER may modify a buffer after initiating I/O.  The modified bit
833b33e2cc0SMatthew Dillon  * may also be set if we were marking a cluster header open.  Only remove
834b33e2cc0SMatthew Dillon  * our dependancy if the modified bit is clear.
835055f5ff8SMatthew Dillon  */
83666325755SMatthew Dillon static void
83766325755SMatthew Dillon hammer_io_complete(struct buf *bp)
83866325755SMatthew Dillon {
839055f5ff8SMatthew Dillon 	union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
840fbc6e32aSMatthew Dillon 
841055f5ff8SMatthew Dillon 	KKASSERT(iou->io.released == 1);
842055f5ff8SMatthew Dillon 
843bf3b416bSMatthew Dillon 	/*
844bf3b416bSMatthew Dillon 	 * Deal with people waiting for I/O to drain
845bf3b416bSMatthew Dillon 	 */
846f90dde4cSMatthew Dillon 	if (iou->io.running) {
847cdb6e4e6SMatthew Dillon 		/*
848cdb6e4e6SMatthew Dillon 		 * Deal with critical write errors.  Once a critical error
849cdb6e4e6SMatthew Dillon 		 * has been flagged in hmp the UNDO FIFO will not be updated.
850cdb6e4e6SMatthew Dillon 		 * That way crash recover will give us a consistent
851cdb6e4e6SMatthew Dillon 		 * filesystem.
852cdb6e4e6SMatthew Dillon 		 *
853cdb6e4e6SMatthew Dillon 		 * Because of this we can throw away failed UNDO buffers.  If
854cdb6e4e6SMatthew Dillon 		 * we throw away META or DATA buffers we risk corrupting
855cdb6e4e6SMatthew Dillon 		 * the now read-only version of the filesystem visible to
856cdb6e4e6SMatthew Dillon 		 * the user.  Clear B_ERROR so the buffer is not re-dirtied
857cdb6e4e6SMatthew Dillon 		 * by the kernel and ref the io so it doesn't get thrown
858cdb6e4e6SMatthew Dillon 		 * away.
859cdb6e4e6SMatthew Dillon 		 */
860cdb6e4e6SMatthew Dillon 		if (bp->b_flags & B_ERROR) {
861cdb6e4e6SMatthew Dillon 			hammer_critical_error(iou->io.hmp, NULL, bp->b_error,
862cdb6e4e6SMatthew Dillon 					      "while flushing meta-data");
863cdb6e4e6SMatthew Dillon 			switch(iou->io.type) {
864cdb6e4e6SMatthew Dillon 			case HAMMER_STRUCTURE_UNDO_BUFFER:
865cdb6e4e6SMatthew Dillon 				break;
866cdb6e4e6SMatthew Dillon 			default:
867cdb6e4e6SMatthew Dillon 				if (iou->io.ioerror == 0) {
868cdb6e4e6SMatthew Dillon 					iou->io.ioerror = 1;
869cdb6e4e6SMatthew Dillon 					if (iou->io.lock.refs == 0)
870cdb6e4e6SMatthew Dillon 						++hammer_count_refedbufs;
871cdb6e4e6SMatthew Dillon 					hammer_ref(&iou->io.lock);
872cdb6e4e6SMatthew Dillon 				}
873cdb6e4e6SMatthew Dillon 				break;
874cdb6e4e6SMatthew Dillon 			}
875cdb6e4e6SMatthew Dillon 			bp->b_flags &= ~B_ERROR;
876cdb6e4e6SMatthew Dillon 			bundirty(bp);
877cdb6e4e6SMatthew Dillon #if 0
878cdb6e4e6SMatthew Dillon 			hammer_io_set_modlist(&iou->io);
879cdb6e4e6SMatthew Dillon 			iou->io.modified = 1;
880cdb6e4e6SMatthew Dillon #endif
881cdb6e4e6SMatthew Dillon 		}
882ce0138a6SMatthew Dillon 		hammer_stats_disk_write += iou->io.bytes;
883f5a07a7aSMatthew Dillon 		hammer_count_io_running_write -= iou->io.bytes;
884f5a07a7aSMatthew Dillon 		iou->io.hmp->io_running_space -= iou->io.bytes;
885f5a07a7aSMatthew Dillon 		if (iou->io.hmp->io_running_space == 0)
886f5a07a7aSMatthew Dillon 			wakeup(&iou->io.hmp->io_running_space);
887f5a07a7aSMatthew Dillon 		KKASSERT(iou->io.hmp->io_running_space >= 0);
888f90dde4cSMatthew Dillon 		iou->io.running = 0;
889ce0138a6SMatthew Dillon 	} else {
890ce0138a6SMatthew Dillon 		hammer_stats_disk_read += iou->io.bytes;
891f90dde4cSMatthew Dillon 	}
892f90dde4cSMatthew Dillon 
893055f5ff8SMatthew Dillon 	if (iou->io.waiting) {
894055f5ff8SMatthew Dillon 		iou->io.waiting = 0;
895055f5ff8SMatthew Dillon 		wakeup(iou);
896055f5ff8SMatthew Dillon 	}
897055f5ff8SMatthew Dillon 
898055f5ff8SMatthew Dillon 	/*
899bf3b416bSMatthew Dillon 	 * If B_LOCKED is set someone wanted to deallocate the bp at some
900bf3b416bSMatthew Dillon 	 * point, do it now if refs has become zero.
901055f5ff8SMatthew Dillon 	 */
902055f5ff8SMatthew Dillon 	if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) {
903b33e2cc0SMatthew Dillon 		KKASSERT(iou->io.modified == 0);
904a99b9ea2SMatthew Dillon 		--hammer_count_io_locked;
905d5ef456eSMatthew Dillon 		bp->b_flags &= ~B_LOCKED;
906055f5ff8SMatthew Dillon 		hammer_io_deallocate(bp);
907055f5ff8SMatthew Dillon 		/* structure may be dead now */
908fbc6e32aSMatthew Dillon 	}
90966325755SMatthew Dillon }
91066325755SMatthew Dillon 
91166325755SMatthew Dillon /*
91266325755SMatthew Dillon  * Callback from kernel when it wishes to deallocate a passively
91310a5d1baSMatthew Dillon  * associated structure.  This mostly occurs with clean buffers
91410a5d1baSMatthew Dillon  * but it may be possible for a holding structure to be marked dirty
9157bc5b8c2SMatthew Dillon  * while its buffer is passively associated.  The caller owns the bp.
91666325755SMatthew Dillon  *
91766325755SMatthew Dillon  * If we cannot disassociate we set B_LOCKED to prevent the buffer
91866325755SMatthew Dillon  * from getting reused.
91946fe7ae1SMatthew Dillon  *
92046fe7ae1SMatthew Dillon  * WARNING: Because this can be called directly by getnewbuf we cannot
92146fe7ae1SMatthew Dillon  * recurse into the tree.  If a bp cannot be immediately disassociated
92246fe7ae1SMatthew Dillon  * our only recourse is to set B_LOCKED.
9237bc5b8c2SMatthew Dillon  *
9247bc5b8c2SMatthew Dillon  * WARNING: This may be called from an interrupt via hammer_io_complete()
92566325755SMatthew Dillon  */
92666325755SMatthew Dillon static void
92766325755SMatthew Dillon hammer_io_deallocate(struct buf *bp)
92866325755SMatthew Dillon {
929055f5ff8SMatthew Dillon 	hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
93066325755SMatthew Dillon 
931055f5ff8SMatthew Dillon 	KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
93246fe7ae1SMatthew Dillon 	if (iou->io.lock.refs > 0 || iou->io.modified) {
93310a5d1baSMatthew Dillon 		/*
93410a5d1baSMatthew Dillon 		 * It is not legal to disassociate a modified buffer.  This
93510a5d1baSMatthew Dillon 		 * case really shouldn't ever occur.
93610a5d1baSMatthew Dillon 		 */
937055f5ff8SMatthew Dillon 		bp->b_flags |= B_LOCKED;
938a99b9ea2SMatthew Dillon 		++hammer_count_io_locked;
939055f5ff8SMatthew Dillon 	} else {
94010a5d1baSMatthew Dillon 		/*
94110a5d1baSMatthew Dillon 		 * Disassociate the BP.  If the io has no refs left we
94210a5d1baSMatthew Dillon 		 * have to add it to the loose list.
94310a5d1baSMatthew Dillon 		 */
944ecca949aSMatthew Dillon 		hammer_io_disassociate(iou);
945ecca949aSMatthew Dillon 		if (iou->io.type != HAMMER_STRUCTURE_VOLUME) {
946ecca949aSMatthew Dillon 			KKASSERT(iou->io.bp == NULL);
94710a5d1baSMatthew Dillon 			KKASSERT(iou->io.mod_list == NULL);
948a99b9ea2SMatthew Dillon 			crit_enter();	/* biodone race against list */
94910a5d1baSMatthew Dillon 			iou->io.mod_list = &iou->io.hmp->lose_list;
95010a5d1baSMatthew Dillon 			TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
951a99b9ea2SMatthew Dillon 			crit_exit();
95266325755SMatthew Dillon 		}
95366325755SMatthew Dillon 	}
95466325755SMatthew Dillon }
95566325755SMatthew Dillon 
95666325755SMatthew Dillon static int
95766325755SMatthew Dillon hammer_io_fsync(struct vnode *vp)
95866325755SMatthew Dillon {
95966325755SMatthew Dillon 	return(0);
96066325755SMatthew Dillon }
96166325755SMatthew Dillon 
96266325755SMatthew Dillon /*
96366325755SMatthew Dillon  * NOTE: will not be called unless we tell the kernel about the
96466325755SMatthew Dillon  * bioops.  Unused... we use the mount's VFS_SYNC instead.
96566325755SMatthew Dillon  */
96666325755SMatthew Dillon static int
96766325755SMatthew Dillon hammer_io_sync(struct mount *mp)
96866325755SMatthew Dillon {
96966325755SMatthew Dillon 	return(0);
97066325755SMatthew Dillon }
97166325755SMatthew Dillon 
97266325755SMatthew Dillon static void
97366325755SMatthew Dillon hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
97466325755SMatthew Dillon {
97566325755SMatthew Dillon }
97666325755SMatthew Dillon 
97766325755SMatthew Dillon /*
97866325755SMatthew Dillon  * I/O pre-check for reading and writing.  HAMMER only uses this for
97966325755SMatthew Dillon  * B_CACHE buffers so checkread just shouldn't happen, but if it does
98066325755SMatthew Dillon  * allow it.
98166325755SMatthew Dillon  *
982fbc6e32aSMatthew Dillon  * Writing is a different case.  We don't want the kernel to try to write
983fbc6e32aSMatthew Dillon  * out a buffer that HAMMER may be modifying passively or which has a
98410a5d1baSMatthew Dillon  * dependancy.  In addition, kernel-demanded writes can only proceed for
98510a5d1baSMatthew Dillon  * certain types of buffers (i.e. UNDO and DATA types).  Other dirty
98610a5d1baSMatthew Dillon  * buffer types can only be explicitly written by the flusher.
987fbc6e32aSMatthew Dillon  *
98810a5d1baSMatthew Dillon  * checkwrite will only be called for bdwrite()n buffers.  If we return
98910a5d1baSMatthew Dillon  * success the kernel is guaranteed to initiate the buffer write.
99066325755SMatthew Dillon  */
99166325755SMatthew Dillon static int
99266325755SMatthew Dillon hammer_io_checkread(struct buf *bp)
99366325755SMatthew Dillon {
99466325755SMatthew Dillon 	return(0);
99566325755SMatthew Dillon }
99666325755SMatthew Dillon 
99766325755SMatthew Dillon static int
99866325755SMatthew Dillon hammer_io_checkwrite(struct buf *bp)
99966325755SMatthew Dillon {
100010a5d1baSMatthew Dillon 	hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
100166325755SMatthew Dillon 
100277062c8aSMatthew Dillon 	/*
100377062c8aSMatthew Dillon 	 * This shouldn't happen under normal operation.
100477062c8aSMatthew Dillon 	 */
100577062c8aSMatthew Dillon 	if (io->type == HAMMER_STRUCTURE_VOLUME ||
100677062c8aSMatthew Dillon 	    io->type == HAMMER_STRUCTURE_META_BUFFER) {
100777062c8aSMatthew Dillon 		if (!panicstr)
100877062c8aSMatthew Dillon 			panic("hammer_io_checkwrite: illegal buffer");
1009a99b9ea2SMatthew Dillon 		if ((bp->b_flags & B_LOCKED) == 0) {
101077062c8aSMatthew Dillon 			bp->b_flags |= B_LOCKED;
1011a99b9ea2SMatthew Dillon 			++hammer_count_io_locked;
1012a99b9ea2SMatthew Dillon 		}
101377062c8aSMatthew Dillon 		return(1);
101477062c8aSMatthew Dillon 	}
1015c9b9e29dSMatthew Dillon 
1016fbc6e32aSMatthew Dillon 	/*
101710a5d1baSMatthew Dillon 	 * We can only clear the modified bit if the IO is not currently
101810a5d1baSMatthew Dillon 	 * undergoing modification.  Otherwise we may miss changes.
10195c8d05e2SMatthew Dillon 	 *
10205c8d05e2SMatthew Dillon 	 * Only data and undo buffers can reach here.  These buffers do
10215c8d05e2SMatthew Dillon 	 * not have terminal crc functions but we temporarily reference
10225c8d05e2SMatthew Dillon 	 * the IO anyway, just in case.
1023b33e2cc0SMatthew Dillon 	 */
10245c8d05e2SMatthew Dillon 	if (io->modify_refs == 0 && io->modified) {
10255c8d05e2SMatthew Dillon 		hammer_ref(&io->lock);
10264a2796f3SMatthew Dillon 		hammer_io_clear_modify(io, 0);
10275c8d05e2SMatthew Dillon 		hammer_unref(&io->lock);
10285c8d05e2SMatthew Dillon 	} else if (io->modified) {
10295c8d05e2SMatthew Dillon 		KKASSERT(io->type == HAMMER_STRUCTURE_DATA_BUFFER);
10305c8d05e2SMatthew Dillon 	}
1031f90dde4cSMatthew Dillon 
1032f90dde4cSMatthew Dillon 	/*
1033f90dde4cSMatthew Dillon 	 * The kernel is going to start the IO, set io->running.
1034f90dde4cSMatthew Dillon 	 */
1035f90dde4cSMatthew Dillon 	KKASSERT(io->running == 0);
1036f90dde4cSMatthew Dillon 	io->running = 1;
1037f5a07a7aSMatthew Dillon 	io->hmp->io_running_space += io->bytes;
1038f5a07a7aSMatthew Dillon 	hammer_count_io_running_write += io->bytes;
1039055f5ff8SMatthew Dillon 	return(0);
1040055f5ff8SMatthew Dillon }
104166325755SMatthew Dillon 
10428cd0a023SMatthew Dillon /*
104366325755SMatthew Dillon  * Return non-zero if we wish to delay the kernel's attempt to flush
104466325755SMatthew Dillon  * this buffer to disk.
104566325755SMatthew Dillon  */
104666325755SMatthew Dillon static int
104766325755SMatthew Dillon hammer_io_countdeps(struct buf *bp, int n)
104866325755SMatthew Dillon {
104966325755SMatthew Dillon 	return(0);
105066325755SMatthew Dillon }
105166325755SMatthew Dillon 
105266325755SMatthew Dillon struct bio_ops hammer_bioops = {
105366325755SMatthew Dillon 	.io_start	= hammer_io_start,
105466325755SMatthew Dillon 	.io_complete	= hammer_io_complete,
105566325755SMatthew Dillon 	.io_deallocate	= hammer_io_deallocate,
105666325755SMatthew Dillon 	.io_fsync	= hammer_io_fsync,
105766325755SMatthew Dillon 	.io_sync	= hammer_io_sync,
105866325755SMatthew Dillon 	.io_movedeps	= hammer_io_movedeps,
105966325755SMatthew Dillon 	.io_countdeps	= hammer_io_countdeps,
106066325755SMatthew Dillon 	.io_checkread	= hammer_io_checkread,
106166325755SMatthew Dillon 	.io_checkwrite	= hammer_io_checkwrite,
106266325755SMatthew Dillon };
106366325755SMatthew Dillon 
106447637bffSMatthew Dillon /************************************************************************
106547637bffSMatthew Dillon  *				DIRECT IO OPS 				*
106647637bffSMatthew Dillon  ************************************************************************
106747637bffSMatthew Dillon  *
106847637bffSMatthew Dillon  * These functions operate directly on the buffer cache buffer associated
106947637bffSMatthew Dillon  * with a front-end vnode rather then a back-end device vnode.
107047637bffSMatthew Dillon  */
107147637bffSMatthew Dillon 
107247637bffSMatthew Dillon /*
107347637bffSMatthew Dillon  * Read a buffer associated with a front-end vnode directly from the
10741b0ab2c3SMatthew Dillon  * disk media.  The bio may be issued asynchronously.  If leaf is non-NULL
10751b0ab2c3SMatthew Dillon  * we validate the CRC.
1076a99b9ea2SMatthew Dillon  *
10771b0ab2c3SMatthew Dillon  * We must check for the presence of a HAMMER buffer to handle the case
10781b0ab2c3SMatthew Dillon  * where the reblocker has rewritten the data (which it does via the HAMMER
10791b0ab2c3SMatthew Dillon  * buffer system, not via the high-level vnode buffer cache), but not yet
10801b0ab2c3SMatthew Dillon  * committed the buffer to the media.
108147637bffSMatthew Dillon  */
108247637bffSMatthew Dillon int
10831b0ab2c3SMatthew Dillon hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
10841b0ab2c3SMatthew Dillon 		      hammer_btree_leaf_elm_t leaf)
108547637bffSMatthew Dillon {
10861b0ab2c3SMatthew Dillon 	hammer_off_t buf_offset;
108747637bffSMatthew Dillon 	hammer_off_t zone2_offset;
108847637bffSMatthew Dillon 	hammer_volume_t volume;
108947637bffSMatthew Dillon 	struct buf *bp;
109047637bffSMatthew Dillon 	struct bio *nbio;
109147637bffSMatthew Dillon 	int vol_no;
109247637bffSMatthew Dillon 	int error;
109347637bffSMatthew Dillon 
10941b0ab2c3SMatthew Dillon 	buf_offset = bio->bio_offset;
10951b0ab2c3SMatthew Dillon 	KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
10961b0ab2c3SMatthew Dillon 		 HAMMER_ZONE_LARGE_DATA);
10974a2796f3SMatthew Dillon 
10981b0ab2c3SMatthew Dillon 	/*
10991b0ab2c3SMatthew Dillon 	 * The buffer cache may have an aliased buffer (the reblocker can
11001b0ab2c3SMatthew Dillon 	 * write them).  If it does we have to sync any dirty data before
11011b0ab2c3SMatthew Dillon 	 * we can build our direct-read.  This is a non-critical code path.
11021b0ab2c3SMatthew Dillon 	 */
11031b0ab2c3SMatthew Dillon 	bp = bio->bio_buf;
11041b0ab2c3SMatthew Dillon 	hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
11051b0ab2c3SMatthew Dillon 
11061b0ab2c3SMatthew Dillon 	/*
11071b0ab2c3SMatthew Dillon 	 * Resolve to a zone-2 offset.  The conversion just requires
11081b0ab2c3SMatthew Dillon 	 * munging the top 4 bits but we want to abstract it anyway
11091b0ab2c3SMatthew Dillon 	 * so the blockmap code can verify the zone assignment.
11101b0ab2c3SMatthew Dillon 	 */
11111b0ab2c3SMatthew Dillon 	zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
11121b0ab2c3SMatthew Dillon 	if (error)
11131b0ab2c3SMatthew Dillon 		goto done;
111443c665aeSMatthew Dillon 	KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
111543c665aeSMatthew Dillon 		 HAMMER_ZONE_RAW_BUFFER);
111643c665aeSMatthew Dillon 
11171b0ab2c3SMatthew Dillon 	/*
11181b0ab2c3SMatthew Dillon 	 * Resolve volume and raw-offset for 3rd level bio.  The
11191b0ab2c3SMatthew Dillon 	 * offset will be specific to the volume.
11201b0ab2c3SMatthew Dillon 	 */
112147637bffSMatthew Dillon 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
112247637bffSMatthew Dillon 	volume = hammer_get_volume(hmp, vol_no, &error);
112347637bffSMatthew Dillon 	if (error == 0 && zone2_offset >= volume->maxbuf_off)
112447637bffSMatthew Dillon 		error = EIO;
112543c665aeSMatthew Dillon 
112647637bffSMatthew Dillon 	if (error == 0) {
1127e469566bSMatthew Dillon 		/*
1128e469566bSMatthew Dillon 		 * 3rd level bio
1129e469566bSMatthew Dillon 		 */
113047637bffSMatthew Dillon 		nbio = push_bio(bio);
113147637bffSMatthew Dillon 		nbio->bio_offset = volume->ondisk->vol_buf_beg +
1132e469566bSMatthew Dillon 				   (zone2_offset & HAMMER_OFF_SHORT_MASK);
11331b0ab2c3SMatthew Dillon #if 0
11341b0ab2c3SMatthew Dillon 		/*
11351b0ab2c3SMatthew Dillon 		 * XXX disabled - our CRC check doesn't work if the OS
11361b0ab2c3SMatthew Dillon 		 * does bogus_page replacement on the direct-read.
11371b0ab2c3SMatthew Dillon 		 */
11381b0ab2c3SMatthew Dillon 		if (leaf && hammer_verify_data) {
11391b0ab2c3SMatthew Dillon 			nbio->bio_done = hammer_io_direct_read_complete;
11401b0ab2c3SMatthew Dillon 			nbio->bio_caller_info1.uvalue32 = leaf->data_crc;
11411b0ab2c3SMatthew Dillon 		}
11421b0ab2c3SMatthew Dillon #endif
1143ce0138a6SMatthew Dillon 		hammer_stats_disk_read += bp->b_bufsize;
114447637bffSMatthew Dillon 		vn_strategy(volume->devvp, nbio);
114547637bffSMatthew Dillon 	}
114647637bffSMatthew Dillon 	hammer_rel_volume(volume, 0);
11471b0ab2c3SMatthew Dillon done:
114847637bffSMatthew Dillon 	if (error) {
1149cebe9493SMatthew Dillon 		kprintf("hammer_direct_read: failed @ %016llx\n",
1150973c11b9SMatthew Dillon 			(long long)zone2_offset);
115147637bffSMatthew Dillon 		bp->b_error = error;
115247637bffSMatthew Dillon 		bp->b_flags |= B_ERROR;
115347637bffSMatthew Dillon 		biodone(bio);
115447637bffSMatthew Dillon 	}
115547637bffSMatthew Dillon 	return(error);
115647637bffSMatthew Dillon }
115747637bffSMatthew Dillon 
11581b0ab2c3SMatthew Dillon #if 0
11591b0ab2c3SMatthew Dillon /*
11601b0ab2c3SMatthew Dillon  * On completion of the BIO this callback must check the data CRC
11611b0ab2c3SMatthew Dillon  * and chain to the previous bio.
11621b0ab2c3SMatthew Dillon  */
11631b0ab2c3SMatthew Dillon static
11641b0ab2c3SMatthew Dillon void
11651b0ab2c3SMatthew Dillon hammer_io_direct_read_complete(struct bio *nbio)
11661b0ab2c3SMatthew Dillon {
11671b0ab2c3SMatthew Dillon 	struct bio *obio;
11681b0ab2c3SMatthew Dillon 	struct buf *bp;
11691b0ab2c3SMatthew Dillon 	u_int32_t rec_crc = nbio->bio_caller_info1.uvalue32;
11701b0ab2c3SMatthew Dillon 
11711b0ab2c3SMatthew Dillon 	bp = nbio->bio_buf;
11721b0ab2c3SMatthew Dillon 	if (crc32(bp->b_data, bp->b_bufsize) != rec_crc) {
11731b0ab2c3SMatthew Dillon 		kprintf("HAMMER: data_crc error @%016llx/%d\n",
11741b0ab2c3SMatthew Dillon 			nbio->bio_offset, bp->b_bufsize);
1175fc73edd8SMatthew Dillon 		if (hammer_debug_critical)
1176fc73edd8SMatthew Dillon 			Debugger("data_crc on read");
11771b0ab2c3SMatthew Dillon 		bp->b_flags |= B_ERROR;
11781b0ab2c3SMatthew Dillon 		bp->b_error = EIO;
11791b0ab2c3SMatthew Dillon 	}
11801b0ab2c3SMatthew Dillon 	obio = pop_bio(nbio);
11811b0ab2c3SMatthew Dillon 	biodone(obio);
11821b0ab2c3SMatthew Dillon }
11831b0ab2c3SMatthew Dillon #endif
11841b0ab2c3SMatthew Dillon 
118547637bffSMatthew Dillon /*
118647637bffSMatthew Dillon  * Write a buffer associated with a front-end vnode directly to the
118747637bffSMatthew Dillon  * disk media.  The bio may be issued asynchronously.
11881b0ab2c3SMatthew Dillon  *
11891b0ab2c3SMatthew Dillon  * The BIO is associated with the specified record and RECF_DIRECT_IO
1190e469566bSMatthew Dillon  * is set.  The recorded is added to its object.
119147637bffSMatthew Dillon  */
119247637bffSMatthew Dillon int
11931b0ab2c3SMatthew Dillon hammer_io_direct_write(hammer_mount_t hmp, hammer_record_t record,
119447637bffSMatthew Dillon 		       struct bio *bio)
119547637bffSMatthew Dillon {
11961b0ab2c3SMatthew Dillon 	hammer_btree_leaf_elm_t leaf = &record->leaf;
11970832c9bbSMatthew Dillon 	hammer_off_t buf_offset;
119847637bffSMatthew Dillon 	hammer_off_t zone2_offset;
119947637bffSMatthew Dillon 	hammer_volume_t volume;
12000832c9bbSMatthew Dillon 	hammer_buffer_t buffer;
120147637bffSMatthew Dillon 	struct buf *bp;
120247637bffSMatthew Dillon 	struct bio *nbio;
12030832c9bbSMatthew Dillon 	char *ptr;
120447637bffSMatthew Dillon 	int vol_no;
120547637bffSMatthew Dillon 	int error;
120647637bffSMatthew Dillon 
12070832c9bbSMatthew Dillon 	buf_offset = leaf->data_offset;
12080832c9bbSMatthew Dillon 
12090832c9bbSMatthew Dillon 	KKASSERT(buf_offset > HAMMER_ZONE_BTREE);
121047637bffSMatthew Dillon 	KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE);
121147637bffSMatthew Dillon 
12120832c9bbSMatthew Dillon 	if ((buf_offset & HAMMER_BUFMASK) == 0 &&
12134a2796f3SMatthew Dillon 	    leaf->data_len >= HAMMER_BUFSIZE) {
12140832c9bbSMatthew Dillon 		/*
12150832c9bbSMatthew Dillon 		 * We are using the vnode's bio to write directly to the
12160832c9bbSMatthew Dillon 		 * media, any hammer_buffer at the same zone-X offset will
12170832c9bbSMatthew Dillon 		 * now have stale data.
12180832c9bbSMatthew Dillon 		 */
12190832c9bbSMatthew Dillon 		zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
122047637bffSMatthew Dillon 		vol_no = HAMMER_VOL_DECODE(zone2_offset);
122147637bffSMatthew Dillon 		volume = hammer_get_volume(hmp, vol_no, &error);
122247637bffSMatthew Dillon 
122347637bffSMatthew Dillon 		if (error == 0 && zone2_offset >= volume->maxbuf_off)
122447637bffSMatthew Dillon 			error = EIO;
122547637bffSMatthew Dillon 		if (error == 0) {
12260832c9bbSMatthew Dillon 			bp = bio->bio_buf;
12274a2796f3SMatthew Dillon 			KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
1228e469566bSMatthew Dillon 			/*
12294a2796f3SMatthew Dillon 			hammer_del_buffers(hmp, buf_offset,
12304a2796f3SMatthew Dillon 					   zone2_offset, bp->b_bufsize);
1231e469566bSMatthew Dillon 			*/
12321b0ab2c3SMatthew Dillon 
123343c665aeSMatthew Dillon 			/*
123443c665aeSMatthew Dillon 			 * Second level bio - cached zone2 offset.
12351b0ab2c3SMatthew Dillon 			 *
12361b0ab2c3SMatthew Dillon 			 * (We can put our bio_done function in either the
12371b0ab2c3SMatthew Dillon 			 *  2nd or 3rd level).
123843c665aeSMatthew Dillon 			 */
123947637bffSMatthew Dillon 			nbio = push_bio(bio);
124043c665aeSMatthew Dillon 			nbio->bio_offset = zone2_offset;
12411b0ab2c3SMatthew Dillon 			nbio->bio_done = hammer_io_direct_write_complete;
12421b0ab2c3SMatthew Dillon 			nbio->bio_caller_info1.ptr = record;
1243e469566bSMatthew Dillon 			record->zone2_offset = zone2_offset;
1244e469566bSMatthew Dillon 			record->flags |= HAMMER_RECF_DIRECT_IO |
1245e469566bSMatthew Dillon 					 HAMMER_RECF_DIRECT_INVAL;
124643c665aeSMatthew Dillon 
124743c665aeSMatthew Dillon 			/*
124843c665aeSMatthew Dillon 			 * Third level bio - raw offset specific to the
124943c665aeSMatthew Dillon 			 * correct volume.
125043c665aeSMatthew Dillon 			 */
125143c665aeSMatthew Dillon 			zone2_offset &= HAMMER_OFF_SHORT_MASK;
125243c665aeSMatthew Dillon 			nbio = push_bio(nbio);
125347637bffSMatthew Dillon 			nbio->bio_offset = volume->ondisk->vol_buf_beg +
12540832c9bbSMatthew Dillon 					   zone2_offset;
1255ce0138a6SMatthew Dillon 			hammer_stats_disk_write += bp->b_bufsize;
125647637bffSMatthew Dillon 			vn_strategy(volume->devvp, nbio);
1257748efb59SMatthew Dillon 			hammer_io_flush_mark(volume);
125847637bffSMatthew Dillon 		}
125947637bffSMatthew Dillon 		hammer_rel_volume(volume, 0);
12600832c9bbSMatthew Dillon 	} else {
12611b0ab2c3SMatthew Dillon 		/*
12621b0ab2c3SMatthew Dillon 		 * Must fit in a standard HAMMER buffer.  In this case all
12631b0ab2c3SMatthew Dillon 		 * consumers use the HAMMER buffer system and RECF_DIRECT_IO
12641b0ab2c3SMatthew Dillon 		 * does not need to be set-up.
12651b0ab2c3SMatthew Dillon 		 */
12660832c9bbSMatthew Dillon 		KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0);
12670832c9bbSMatthew Dillon 		buffer = NULL;
12680832c9bbSMatthew Dillon 		ptr = hammer_bread(hmp, buf_offset, &error, &buffer);
12690832c9bbSMatthew Dillon 		if (error == 0) {
12700832c9bbSMatthew Dillon 			bp = bio->bio_buf;
12717bc5b8c2SMatthew Dillon 			bp->b_flags |= B_AGE;
12720832c9bbSMatthew Dillon 			hammer_io_modify(&buffer->io, 1);
12730832c9bbSMatthew Dillon 			bcopy(bp->b_data, ptr, leaf->data_len);
12740832c9bbSMatthew Dillon 			hammer_io_modify_done(&buffer->io);
12757bc5b8c2SMatthew Dillon 			hammer_rel_buffer(buffer, 0);
12760832c9bbSMatthew Dillon 			bp->b_resid = 0;
12770832c9bbSMatthew Dillon 			biodone(bio);
12780832c9bbSMatthew Dillon 		}
127947637bffSMatthew Dillon 	}
1280e469566bSMatthew Dillon 	if (error == 0) {
1281e469566bSMatthew Dillon 		/*
1282e469566bSMatthew Dillon 		 * The record is all setup now, add it.  Potential conflics
1283e469566bSMatthew Dillon 		 * have already been dealt with.
1284e469566bSMatthew Dillon 		 */
1285e469566bSMatthew Dillon 		error = hammer_mem_add(record);
1286e469566bSMatthew Dillon 		KKASSERT(error == 0);
1287e469566bSMatthew Dillon 	} else {
1288e469566bSMatthew Dillon 		/*
12893214ade6SMatthew Dillon 		 * Major suckage occured.  Also note:  The record was never added
12903214ade6SMatthew Dillon 		 * to the tree so we do not have to worry about the backend.
1291e469566bSMatthew Dillon 		 */
1292cebe9493SMatthew Dillon 		kprintf("hammer_direct_write: failed @ %016llx\n",
1293973c11b9SMatthew Dillon 			(long long)leaf->data_offset);
129447637bffSMatthew Dillon 		bp = bio->bio_buf;
129547637bffSMatthew Dillon 		bp->b_resid = 0;
129647637bffSMatthew Dillon 		bp->b_error = EIO;
129747637bffSMatthew Dillon 		bp->b_flags |= B_ERROR;
129847637bffSMatthew Dillon 		biodone(bio);
1299e469566bSMatthew Dillon 		record->flags |= HAMMER_RECF_DELETED_FE;
1300e469566bSMatthew Dillon 		hammer_rel_mem_record(record);
130147637bffSMatthew Dillon 	}
130247637bffSMatthew Dillon 	return(error);
130347637bffSMatthew Dillon }
130447637bffSMatthew Dillon 
130543c665aeSMatthew Dillon /*
13061b0ab2c3SMatthew Dillon  * On completion of the BIO this callback must disconnect
13071b0ab2c3SMatthew Dillon  * it from the hammer_record and chain to the previous bio.
1308cdb6e4e6SMatthew Dillon  *
1309cdb6e4e6SMatthew Dillon  * An I/O error forces the mount to read-only.  Data buffers
1310cdb6e4e6SMatthew Dillon  * are not B_LOCKED like meta-data buffers are, so we have to
1311cdb6e4e6SMatthew Dillon  * throw the buffer away to prevent the kernel from retrying.
13121b0ab2c3SMatthew Dillon  */
13131b0ab2c3SMatthew Dillon static
13141b0ab2c3SMatthew Dillon void
13151b0ab2c3SMatthew Dillon hammer_io_direct_write_complete(struct bio *nbio)
13161b0ab2c3SMatthew Dillon {
13171b0ab2c3SMatthew Dillon 	struct bio *obio;
1318e469566bSMatthew Dillon 	struct buf *bp;
13191b0ab2c3SMatthew Dillon 	hammer_record_t record = nbio->bio_caller_info1.ptr;
13201b0ab2c3SMatthew Dillon 
1321e469566bSMatthew Dillon 	bp = nbio->bio_buf;
13221b0ab2c3SMatthew Dillon 	obio = pop_bio(nbio);
1323e469566bSMatthew Dillon 	if (bp->b_flags & B_ERROR) {
1324cdb6e4e6SMatthew Dillon 		hammer_critical_error(record->ip->hmp, record->ip,
1325e469566bSMatthew Dillon 				      bp->b_error,
1326cdb6e4e6SMatthew Dillon 				      "while writing bulk data");
1327e469566bSMatthew Dillon 		bp->b_flags |= B_INVAL;
1328cdb6e4e6SMatthew Dillon 	}
13291b0ab2c3SMatthew Dillon 	biodone(obio);
1330e469566bSMatthew Dillon 
1331e469566bSMatthew Dillon 	KKASSERT(record != NULL);
1332e469566bSMatthew Dillon 	KKASSERT(record->flags & HAMMER_RECF_DIRECT_IO);
13331b0ab2c3SMatthew Dillon 	if (record->flags & HAMMER_RECF_DIRECT_WAIT) {
1334de996e86SMatthew Dillon 		record->flags &= ~(HAMMER_RECF_DIRECT_IO |
1335de996e86SMatthew Dillon 				   HAMMER_RECF_DIRECT_WAIT);
1336de996e86SMatthew Dillon 		/* record can disappear once DIRECT_IO flag is cleared */
13371b0ab2c3SMatthew Dillon 		wakeup(&record->flags);
1338de996e86SMatthew Dillon 	} else {
1339de996e86SMatthew Dillon 		record->flags &= ~HAMMER_RECF_DIRECT_IO;
1340de996e86SMatthew Dillon 		/* record can disappear once DIRECT_IO flag is cleared */
13411b0ab2c3SMatthew Dillon 	}
13421b0ab2c3SMatthew Dillon }
13431b0ab2c3SMatthew Dillon 
13441b0ab2c3SMatthew Dillon 
13451b0ab2c3SMatthew Dillon /*
13461b0ab2c3SMatthew Dillon  * This is called before a record is either committed to the B-Tree
1347e469566bSMatthew Dillon  * or destroyed, to resolve any associated direct-IO.
13481b0ab2c3SMatthew Dillon  *
1349e469566bSMatthew Dillon  * (1) We must wait for any direct-IO related to the record to complete.
1350e469566bSMatthew Dillon  *
1351e469566bSMatthew Dillon  * (2) We must remove any buffer cache aliases for data accessed via
1352e469566bSMatthew Dillon  *     leaf->data_offset or zone2_offset so non-direct-IO consumers
1353e469566bSMatthew Dillon  *     (the mirroring and reblocking code) do not see stale data.
13541b0ab2c3SMatthew Dillon  */
13551b0ab2c3SMatthew Dillon void
13561b0ab2c3SMatthew Dillon hammer_io_direct_wait(hammer_record_t record)
13571b0ab2c3SMatthew Dillon {
1358e469566bSMatthew Dillon 	/*
1359e469566bSMatthew Dillon 	 * Wait for I/O to complete
1360e469566bSMatthew Dillon 	 */
1361e469566bSMatthew Dillon 	if (record->flags & HAMMER_RECF_DIRECT_IO) {
13621b0ab2c3SMatthew Dillon 		crit_enter();
13631b0ab2c3SMatthew Dillon 		while (record->flags & HAMMER_RECF_DIRECT_IO) {
13641b0ab2c3SMatthew Dillon 			record->flags |= HAMMER_RECF_DIRECT_WAIT;
13651b0ab2c3SMatthew Dillon 			tsleep(&record->flags, 0, "hmdiow", 0);
13661b0ab2c3SMatthew Dillon 		}
13671b0ab2c3SMatthew Dillon 		crit_exit();
13681b0ab2c3SMatthew Dillon 	}
13691b0ab2c3SMatthew Dillon 
13701b0ab2c3SMatthew Dillon 	/*
1371362ec2dcSMatthew Dillon 	 * Invalidate any related buffer cache aliases associated with the
1372362ec2dcSMatthew Dillon 	 * backing device.  This is needed because the buffer cache buffer
1373362ec2dcSMatthew Dillon 	 * for file data is associated with the file vnode, not the backing
1374362ec2dcSMatthew Dillon 	 * device vnode.
1375362ec2dcSMatthew Dillon 	 *
1376362ec2dcSMatthew Dillon 	 * XXX I do not think this case can occur any more now that
1377362ec2dcSMatthew Dillon 	 * reservations ensure that all such buffers are removed before
1378362ec2dcSMatthew Dillon 	 * an area can be reused.
1379e469566bSMatthew Dillon 	 */
1380e469566bSMatthew Dillon 	if (record->flags & HAMMER_RECF_DIRECT_INVAL) {
1381e469566bSMatthew Dillon 		KKASSERT(record->leaf.data_offset);
1382362ec2dcSMatthew Dillon 		hammer_del_buffers(record->ip->hmp, record->leaf.data_offset,
1383362ec2dcSMatthew Dillon 				   record->zone2_offset, record->leaf.data_len,
1384362ec2dcSMatthew Dillon 				   1);
1385e469566bSMatthew Dillon 		record->flags &= ~HAMMER_RECF_DIRECT_INVAL;
1386e469566bSMatthew Dillon 	}
1387e469566bSMatthew Dillon }
1388e469566bSMatthew Dillon 
1389e469566bSMatthew Dillon /*
139043c665aeSMatthew Dillon  * This is called to remove the second-level cached zone-2 offset from
139143c665aeSMatthew Dillon  * frontend buffer cache buffers, now stale due to a data relocation.
139243c665aeSMatthew Dillon  * These offsets are generated by cluster_read() via VOP_BMAP, or directly
139343c665aeSMatthew Dillon  * by hammer_vop_strategy_read().
139443c665aeSMatthew Dillon  *
139543c665aeSMatthew Dillon  * This is rather nasty because here we have something like the reblocker
139643c665aeSMatthew Dillon  * scanning the raw B-Tree with no held references on anything, really,
139743c665aeSMatthew Dillon  * other then a shared lock on the B-Tree node, and we have to access the
139843c665aeSMatthew Dillon  * frontend's buffer cache to check for and clean out the association.
139943c665aeSMatthew Dillon  * Specifically, if the reblocker is moving data on the disk, these cached
140043c665aeSMatthew Dillon  * offsets will become invalid.
140143c665aeSMatthew Dillon  *
140243c665aeSMatthew Dillon  * Only data record types associated with the large-data zone are subject
140343c665aeSMatthew Dillon  * to direct-io and need to be checked.
140443c665aeSMatthew Dillon  *
140543c665aeSMatthew Dillon  */
140643c665aeSMatthew Dillon void
140743c665aeSMatthew Dillon hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf)
140843c665aeSMatthew Dillon {
140943c665aeSMatthew Dillon 	struct hammer_inode_info iinfo;
141043c665aeSMatthew Dillon 	int zone;
141143c665aeSMatthew Dillon 
141243c665aeSMatthew Dillon 	if (leaf->base.rec_type != HAMMER_RECTYPE_DATA)
141343c665aeSMatthew Dillon 		return;
141443c665aeSMatthew Dillon 	zone = HAMMER_ZONE_DECODE(leaf->data_offset);
141543c665aeSMatthew Dillon 	if (zone != HAMMER_ZONE_LARGE_DATA_INDEX)
141643c665aeSMatthew Dillon 		return;
141743c665aeSMatthew Dillon 	iinfo.obj_id = leaf->base.obj_id;
141843c665aeSMatthew Dillon 	iinfo.obj_asof = 0;	/* unused */
141943c665aeSMatthew Dillon 	iinfo.obj_localization = leaf->base.localization &
14205a930e66SMatthew Dillon 				 HAMMER_LOCALIZE_PSEUDOFS_MASK;
142143c665aeSMatthew Dillon 	iinfo.u.leaf = leaf;
142243c665aeSMatthew Dillon 	hammer_scan_inode_snapshots(hmp, &iinfo,
142343c665aeSMatthew Dillon 				    hammer_io_direct_uncache_callback,
142443c665aeSMatthew Dillon 				    leaf);
142543c665aeSMatthew Dillon }
142643c665aeSMatthew Dillon 
142743c665aeSMatthew Dillon static int
142843c665aeSMatthew Dillon hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data)
142943c665aeSMatthew Dillon {
143043c665aeSMatthew Dillon 	hammer_inode_info_t iinfo = data;
143143c665aeSMatthew Dillon 	hammer_off_t data_offset;
143243c665aeSMatthew Dillon 	hammer_off_t file_offset;
143343c665aeSMatthew Dillon 	struct vnode *vp;
143443c665aeSMatthew Dillon 	struct buf *bp;
143543c665aeSMatthew Dillon 	int blksize;
143643c665aeSMatthew Dillon 
143743c665aeSMatthew Dillon 	if (ip->vp == NULL)
143843c665aeSMatthew Dillon 		return(0);
143943c665aeSMatthew Dillon 	data_offset = iinfo->u.leaf->data_offset;
144043c665aeSMatthew Dillon 	file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len;
144143c665aeSMatthew Dillon 	blksize = iinfo->u.leaf->data_len;
144243c665aeSMatthew Dillon 	KKASSERT((blksize & HAMMER_BUFMASK) == 0);
144343c665aeSMatthew Dillon 
144443c665aeSMatthew Dillon 	hammer_ref(&ip->lock);
144543c665aeSMatthew Dillon 	if (hammer_get_vnode(ip, &vp) == 0) {
1446b1c20cfaSMatthew Dillon 		if ((bp = findblk(ip->vp, file_offset, FINDBLK_TEST)) != NULL &&
144743c665aeSMatthew Dillon 		    bp->b_bio2.bio_offset != NOOFFSET) {
144843c665aeSMatthew Dillon 			bp = getblk(ip->vp, file_offset, blksize, 0, 0);
144943c665aeSMatthew Dillon 			bp->b_bio2.bio_offset = NOOFFSET;
145043c665aeSMatthew Dillon 			brelse(bp);
145143c665aeSMatthew Dillon 		}
145243c665aeSMatthew Dillon 		vput(vp);
145343c665aeSMatthew Dillon 	}
145443c665aeSMatthew Dillon 	hammer_rel_inode(ip, 0);
145543c665aeSMatthew Dillon 	return(0);
145643c665aeSMatthew Dillon }
145747637bffSMatthew Dillon 
1458748efb59SMatthew Dillon 
1459748efb59SMatthew Dillon /*
1460748efb59SMatthew Dillon  * This function is called when writes may have occured on the volume,
1461748efb59SMatthew Dillon  * indicating that the device may be holding cached writes.
1462748efb59SMatthew Dillon  */
1463748efb59SMatthew Dillon static void
1464748efb59SMatthew Dillon hammer_io_flush_mark(hammer_volume_t volume)
1465748efb59SMatthew Dillon {
1466748efb59SMatthew Dillon 	volume->vol_flags |= HAMMER_VOLF_NEEDFLUSH;
1467748efb59SMatthew Dillon }
1468748efb59SMatthew Dillon 
1469748efb59SMatthew Dillon /*
1470748efb59SMatthew Dillon  * This function ensures that the device has flushed any cached writes out.
1471748efb59SMatthew Dillon  */
1472748efb59SMatthew Dillon void
1473748efb59SMatthew Dillon hammer_io_flush_sync(hammer_mount_t hmp)
1474748efb59SMatthew Dillon {
1475748efb59SMatthew Dillon 	hammer_volume_t volume;
1476748efb59SMatthew Dillon 	struct buf *bp_base = NULL;
1477748efb59SMatthew Dillon 	struct buf *bp;
1478748efb59SMatthew Dillon 
1479748efb59SMatthew Dillon 	RB_FOREACH(volume, hammer_vol_rb_tree, &hmp->rb_vols_root) {
1480748efb59SMatthew Dillon 		if (volume->vol_flags & HAMMER_VOLF_NEEDFLUSH) {
1481748efb59SMatthew Dillon 			volume->vol_flags &= ~HAMMER_VOLF_NEEDFLUSH;
1482748efb59SMatthew Dillon 			bp = getpbuf(NULL);
1483748efb59SMatthew Dillon 			bp->b_bio1.bio_offset = 0;
1484748efb59SMatthew Dillon 			bp->b_bufsize = 0;
1485748efb59SMatthew Dillon 			bp->b_bcount = 0;
1486748efb59SMatthew Dillon 			bp->b_cmd = BUF_CMD_FLUSH;
1487748efb59SMatthew Dillon 			bp->b_bio1.bio_caller_info1.cluster_head = bp_base;
1488ae8e83e6SMatthew Dillon 			bp->b_bio1.bio_done = biodone_sync;
1489ae8e83e6SMatthew Dillon 			bp->b_bio1.bio_flags |= BIO_SYNC;
1490748efb59SMatthew Dillon 			bp_base = bp;
1491748efb59SMatthew Dillon 			vn_strategy(volume->devvp, &bp->b_bio1);
1492748efb59SMatthew Dillon 		}
1493748efb59SMatthew Dillon 	}
1494748efb59SMatthew Dillon 	while ((bp = bp_base) != NULL) {
1495748efb59SMatthew Dillon 		bp_base = bp->b_bio1.bio_caller_info1.cluster_head;
1496ae8e83e6SMatthew Dillon 		biowait(&bp->b_bio1, "hmrFLS");
1497748efb59SMatthew Dillon 		relpbuf(bp, NULL);
1498748efb59SMatthew Dillon 	}
1499748efb59SMatthew Dillon }
1500