xref: /dflybsd-src/sys/vfs/hammer/hammer_io.c (revision 2f85fa4d65e69cdd2ee4ed3da50f49a52009eb4c)
166325755SMatthew Dillon /*
2b84de5afSMatthew Dillon  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
366325755SMatthew Dillon  *
466325755SMatthew Dillon  * This code is derived from software contributed to The DragonFly Project
566325755SMatthew Dillon  * by Matthew Dillon <dillon@backplane.com>
666325755SMatthew Dillon  *
766325755SMatthew Dillon  * Redistribution and use in source and binary forms, with or without
866325755SMatthew Dillon  * modification, are permitted provided that the following conditions
966325755SMatthew Dillon  * are met:
1066325755SMatthew Dillon  *
1166325755SMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
1266325755SMatthew Dillon  *    notice, this list of conditions and the following disclaimer.
1366325755SMatthew Dillon  * 2. Redistributions in binary form must reproduce the above copyright
1466325755SMatthew Dillon  *    notice, this list of conditions and the following disclaimer in
1566325755SMatthew Dillon  *    the documentation and/or other materials provided with the
1666325755SMatthew Dillon  *    distribution.
1766325755SMatthew Dillon  * 3. Neither the name of The DragonFly Project nor the names of its
1866325755SMatthew Dillon  *    contributors may be used to endorse or promote products derived
1966325755SMatthew Dillon  *    from this software without specific, prior written permission.
2066325755SMatthew Dillon  *
2166325755SMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2266325755SMatthew Dillon  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2366325755SMatthew Dillon  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
2466325755SMatthew Dillon  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
2566325755SMatthew Dillon  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
2666325755SMatthew Dillon  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
2766325755SMatthew Dillon  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
2866325755SMatthew Dillon  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
2966325755SMatthew Dillon  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
3066325755SMatthew Dillon  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
3166325755SMatthew Dillon  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3266325755SMatthew Dillon  * SUCH DAMAGE.
3366325755SMatthew Dillon  *
34*2f85fa4dSMatthew Dillon  * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.32 2008/05/18 01:48:50 dillon Exp $
3566325755SMatthew Dillon  */
3666325755SMatthew Dillon /*
3766325755SMatthew Dillon  * IO Primitives and buffer cache management
3866325755SMatthew Dillon  *
3966325755SMatthew Dillon  * All major data-tracking structures in HAMMER contain a struct hammer_io
4066325755SMatthew Dillon  * which is used to manage their backing store.  We use filesystem buffers
4166325755SMatthew Dillon  * for backing store and we leave them passively associated with their
4266325755SMatthew Dillon  * HAMMER structures.
4366325755SMatthew Dillon  *
4466325755SMatthew Dillon  * If the kernel tries to release a passively associated buf which we cannot
4566325755SMatthew Dillon  * yet let go we set B_LOCKED in the buffer and then actively released it
4666325755SMatthew Dillon  * later when we can.
4766325755SMatthew Dillon  */
4866325755SMatthew Dillon 
4966325755SMatthew Dillon #include "hammer.h"
5066325755SMatthew Dillon #include <sys/fcntl.h>
5166325755SMatthew Dillon #include <sys/nlookup.h>
5266325755SMatthew Dillon #include <sys/buf.h>
5366325755SMatthew Dillon #include <sys/buf2.h>
5466325755SMatthew Dillon 
5510a5d1baSMatthew Dillon static void hammer_io_modify(hammer_io_t io, int count);
56055f5ff8SMatthew Dillon static void hammer_io_deallocate(struct buf *bp);
57055f5ff8SMatthew Dillon 
58055f5ff8SMatthew Dillon /*
5910a5d1baSMatthew Dillon  * Initialize a new, already-zero'd hammer_io structure, or reinitialize
6010a5d1baSMatthew Dillon  * an existing hammer_io structure which may have switched to another type.
61055f5ff8SMatthew Dillon  */
62055f5ff8SMatthew Dillon void
6310a5d1baSMatthew Dillon hammer_io_init(hammer_io_t io, hammer_mount_t hmp, enum hammer_io_type type)
64055f5ff8SMatthew Dillon {
6510a5d1baSMatthew Dillon 	io->hmp = hmp;
66055f5ff8SMatthew Dillon 	io->type = type;
67055f5ff8SMatthew Dillon }
68055f5ff8SMatthew Dillon 
6910a5d1baSMatthew Dillon void
7010a5d1baSMatthew Dillon hammer_io_reinit(hammer_io_t io, enum hammer_io_type type)
7110a5d1baSMatthew Dillon {
7210a5d1baSMatthew Dillon 	hammer_mount_t hmp = io->hmp;
7310a5d1baSMatthew Dillon 
7410a5d1baSMatthew Dillon 	if (io->modified) {
7510a5d1baSMatthew Dillon 		KKASSERT(io->mod_list != NULL);
7610a5d1baSMatthew Dillon 		if (io->mod_list == &hmp->volu_list ||
7710a5d1baSMatthew Dillon 		    io->mod_list == &hmp->meta_list) {
7810a5d1baSMatthew Dillon 			--hmp->locked_dirty_count;
799480ff55SMatthew Dillon 			--hammer_count_dirtybufs;
8010a5d1baSMatthew Dillon 		}
8110a5d1baSMatthew Dillon 		TAILQ_REMOVE(io->mod_list, io, mod_entry);
8210a5d1baSMatthew Dillon 		io->mod_list = NULL;
8310a5d1baSMatthew Dillon 	}
8410a5d1baSMatthew Dillon 	io->type = type;
8510a5d1baSMatthew Dillon 	if (io->modified) {
8610a5d1baSMatthew Dillon 		switch(io->type) {
8710a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_VOLUME:
8810a5d1baSMatthew Dillon 			io->mod_list = &hmp->volu_list;
8910a5d1baSMatthew Dillon 			++hmp->locked_dirty_count;
909480ff55SMatthew Dillon 			++hammer_count_dirtybufs;
9110a5d1baSMatthew Dillon 			break;
9210a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_META_BUFFER:
9310a5d1baSMatthew Dillon 			io->mod_list = &hmp->meta_list;
9410a5d1baSMatthew Dillon 			++hmp->locked_dirty_count;
959480ff55SMatthew Dillon 			++hammer_count_dirtybufs;
9610a5d1baSMatthew Dillon 			break;
9710a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_UNDO_BUFFER:
9810a5d1baSMatthew Dillon 			io->mod_list = &hmp->undo_list;
9910a5d1baSMatthew Dillon 			break;
10010a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_DATA_BUFFER:
10110a5d1baSMatthew Dillon 			io->mod_list = &hmp->data_list;
10210a5d1baSMatthew Dillon 			break;
10310a5d1baSMatthew Dillon 		}
10410a5d1baSMatthew Dillon 		TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
10510a5d1baSMatthew Dillon 	}
10610a5d1baSMatthew Dillon }
10710a5d1baSMatthew Dillon 
10866325755SMatthew Dillon /*
109fbc6e32aSMatthew Dillon  * Helper routine to disassociate a buffer cache buffer from an I/O
110055f5ff8SMatthew Dillon  * structure.  Called with the io structure exclusively locked.
111055f5ff8SMatthew Dillon  *
112055f5ff8SMatthew Dillon  * The io may have 0 or 1 references depending on who called us.  The
113055f5ff8SMatthew Dillon  * caller is responsible for dealing with the refs.
114055f5ff8SMatthew Dillon  *
115055f5ff8SMatthew Dillon  * This call can only be made when no action is required on the buffer.
116d8971d2bSMatthew Dillon  * HAMMER must own the buffer (released == 0) since we mess around with it.
11766325755SMatthew Dillon  */
11866325755SMatthew Dillon static void
119055f5ff8SMatthew Dillon hammer_io_disassociate(hammer_io_structure_t iou, int elseit)
12066325755SMatthew Dillon {
121055f5ff8SMatthew Dillon 	struct buf *bp = iou->io.bp;
12266325755SMatthew Dillon 
123b58c6388SMatthew Dillon 	KKASSERT(iou->io.modified == 0);
1244d75d829SMatthew Dillon 	buf_dep_init(bp);
125055f5ff8SMatthew Dillon 	iou->io.bp = NULL;
126d8971d2bSMatthew Dillon 	bp->b_flags &= ~B_LOCKED;
127055f5ff8SMatthew Dillon 	if (elseit) {
128055f5ff8SMatthew Dillon 		KKASSERT(iou->io.released == 0);
129055f5ff8SMatthew Dillon 		iou->io.released = 1;
130055f5ff8SMatthew Dillon 		bqrelse(bp);
131055f5ff8SMatthew Dillon 	} else {
132055f5ff8SMatthew Dillon 		KKASSERT(iou->io.released);
133055f5ff8SMatthew Dillon 	}
13466325755SMatthew Dillon 
135055f5ff8SMatthew Dillon 	switch(iou->io.type) {
13666325755SMatthew Dillon 	case HAMMER_STRUCTURE_VOLUME:
137055f5ff8SMatthew Dillon 		iou->volume.ondisk = NULL;
13866325755SMatthew Dillon 		break;
13910a5d1baSMatthew Dillon 	case HAMMER_STRUCTURE_DATA_BUFFER:
14010a5d1baSMatthew Dillon 	case HAMMER_STRUCTURE_META_BUFFER:
14110a5d1baSMatthew Dillon 	case HAMMER_STRUCTURE_UNDO_BUFFER:
142055f5ff8SMatthew Dillon 		iou->buffer.ondisk = NULL;
14366325755SMatthew Dillon 		break;
14466325755SMatthew Dillon 	}
14566325755SMatthew Dillon }
146fbc6e32aSMatthew Dillon 
147fbc6e32aSMatthew Dillon /*
148055f5ff8SMatthew Dillon  * Wait for any physical IO to complete
149fbc6e32aSMatthew Dillon  */
150fbc6e32aSMatthew Dillon static void
151055f5ff8SMatthew Dillon hammer_io_wait(hammer_io_t io)
152fbc6e32aSMatthew Dillon {
153055f5ff8SMatthew Dillon 	if (io->running) {
154055f5ff8SMatthew Dillon 		crit_enter();
155055f5ff8SMatthew Dillon 		tsleep_interlock(io);
156055f5ff8SMatthew Dillon 		io->waiting = 1;
157055f5ff8SMatthew Dillon 		for (;;) {
158055f5ff8SMatthew Dillon 			tsleep(io, 0, "hmrflw", 0);
159055f5ff8SMatthew Dillon 			if (io->running == 0)
160055f5ff8SMatthew Dillon 				break;
161055f5ff8SMatthew Dillon 			tsleep_interlock(io);
162055f5ff8SMatthew Dillon 			io->waiting = 1;
163055f5ff8SMatthew Dillon 			if (io->running == 0)
164055f5ff8SMatthew Dillon 				break;
165055f5ff8SMatthew Dillon 		}
166055f5ff8SMatthew Dillon 		crit_exit();
167055f5ff8SMatthew Dillon 	}
168055f5ff8SMatthew Dillon }
169055f5ff8SMatthew Dillon 
170*2f85fa4dSMatthew Dillon #define HAMMER_MAXRA	4
171*2f85fa4dSMatthew Dillon 
17261aeeb33SMatthew Dillon /*
17310a5d1baSMatthew Dillon  * Load bp for a HAMMER structure.  The io must be exclusively locked by
17410a5d1baSMatthew Dillon  * the caller.
175*2f85fa4dSMatthew Dillon  *
176*2f85fa4dSMatthew Dillon  * Generally speaking HAMMER assumes either an optimized layout or that
177*2f85fa4dSMatthew Dillon  * typical access patterns will be close to the original layout when the
178*2f85fa4dSMatthew Dillon  * information was written.  For this reason we try to cluster all reads.
17966325755SMatthew Dillon  */
18066325755SMatthew Dillon int
181*2f85fa4dSMatthew Dillon hammer_io_read(struct vnode *devvp, struct hammer_io *io, hammer_off_t limit)
18266325755SMatthew Dillon {
18366325755SMatthew Dillon 	struct buf *bp;
18466325755SMatthew Dillon 	int   error;
18566325755SMatthew Dillon 
18666325755SMatthew Dillon 	if ((bp = io->bp) == NULL) {
187*2f85fa4dSMatthew Dillon #if 1
188*2f85fa4dSMatthew Dillon 		error = cluster_read(devvp, limit, io->offset,
189*2f85fa4dSMatthew Dillon 				     HAMMER_BUFSIZE, MAXBSIZE, 16, &io->bp);
190*2f85fa4dSMatthew Dillon #else
19166325755SMatthew Dillon 		error = bread(devvp, io->offset, HAMMER_BUFSIZE, &io->bp);
192*2f85fa4dSMatthew Dillon #endif
193*2f85fa4dSMatthew Dillon 
19466325755SMatthew Dillon 		if (error == 0) {
19566325755SMatthew Dillon 			bp = io->bp;
19666325755SMatthew Dillon 			bp->b_ops = &hammer_bioops;
19766325755SMatthew Dillon 			LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
19866325755SMatthew Dillon 			BUF_KERNPROC(bp);
19966325755SMatthew Dillon 		}
20010a5d1baSMatthew Dillon 		KKASSERT(io->modified == 0);
20110a5d1baSMatthew Dillon 		KKASSERT(io->running == 0);
20210a5d1baSMatthew Dillon 		KKASSERT(io->waiting == 0);
20366325755SMatthew Dillon 		io->released = 0;	/* we hold an active lock on bp */
20466325755SMatthew Dillon 	} else {
20566325755SMatthew Dillon 		error = 0;
20666325755SMatthew Dillon 	}
20766325755SMatthew Dillon 	return(error);
20866325755SMatthew Dillon }
20966325755SMatthew Dillon 
21066325755SMatthew Dillon /*
21166325755SMatthew Dillon  * Similar to hammer_io_read() but returns a zero'd out buffer instead.
21210a5d1baSMatthew Dillon  * Must be called with the IO exclusively locked.
213055f5ff8SMatthew Dillon  *
21410a5d1baSMatthew Dillon  * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
21510a5d1baSMatthew Dillon  * I/O by forcing the buffer to not be in a released state before calling
21610a5d1baSMatthew Dillon  * it.
21710a5d1baSMatthew Dillon  *
21810a5d1baSMatthew Dillon  * This function will also mark the IO as modified but it will not
21910a5d1baSMatthew Dillon  * increment the modify_refs count.
22066325755SMatthew Dillon  */
22166325755SMatthew Dillon int
22266325755SMatthew Dillon hammer_io_new(struct vnode *devvp, struct hammer_io *io)
22366325755SMatthew Dillon {
22466325755SMatthew Dillon 	struct buf *bp;
22566325755SMatthew Dillon 
22666325755SMatthew Dillon 	if ((bp = io->bp) == NULL) {
22766325755SMatthew Dillon 		io->bp = getblk(devvp, io->offset, HAMMER_BUFSIZE, 0, 0);
22866325755SMatthew Dillon 		bp = io->bp;
22966325755SMatthew Dillon 		bp->b_ops = &hammer_bioops;
23066325755SMatthew Dillon 		LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
231055f5ff8SMatthew Dillon 		io->released = 0;
23210a5d1baSMatthew Dillon 		KKASSERT(io->running == 0);
233055f5ff8SMatthew Dillon 		io->waiting = 0;
23466325755SMatthew Dillon 		BUF_KERNPROC(bp);
23566325755SMatthew Dillon 	} else {
23666325755SMatthew Dillon 		if (io->released) {
23766325755SMatthew Dillon 			regetblk(bp);
23866325755SMatthew Dillon 			BUF_KERNPROC(bp);
239d113fda1SMatthew Dillon 			io->released = 0;
24066325755SMatthew Dillon 		}
24166325755SMatthew Dillon 	}
24210a5d1baSMatthew Dillon 	hammer_io_modify(io, 0);
24366325755SMatthew Dillon 	vfs_bio_clrbuf(bp);
24466325755SMatthew Dillon 	return(0);
24566325755SMatthew Dillon }
24666325755SMatthew Dillon 
24766325755SMatthew Dillon /*
248b3deaf57SMatthew Dillon  * This routine is called on the last reference to a hammer structure.
249055f5ff8SMatthew Dillon  * The io is usually locked exclusively (but may not be during unmount).
250b3deaf57SMatthew Dillon  *
25110a5d1baSMatthew Dillon  * This routine is responsible for the disposition of the buffer cache
25210a5d1baSMatthew Dillon  * buffer backing the IO.  Only pure-data and undo buffers can be handed
25310a5d1baSMatthew Dillon  * back to the kernel.  Volume and meta-data buffers must be retained
25410a5d1baSMatthew Dillon  * by HAMMER until explicitly flushed by the backend.
25566325755SMatthew Dillon  */
25666325755SMatthew Dillon void
25709ac686bSMatthew Dillon hammer_io_release(struct hammer_io *io, int flush)
25866325755SMatthew Dillon {
25966325755SMatthew Dillon 	struct buf *bp;
26066325755SMatthew Dillon 
261fbc6e32aSMatthew Dillon 	if ((bp = io->bp) == NULL)
262fbc6e32aSMatthew Dillon 		return;
263fbc6e32aSMatthew Dillon 
2640b075555SMatthew Dillon 	/*
26510a5d1baSMatthew Dillon 	 * Try to flush a dirty IO to disk if asked to by the
26610a5d1baSMatthew Dillon 	 * caller or if the kernel tried to flush the buffer in the past.
2670b075555SMatthew Dillon 	 *
26810a5d1baSMatthew Dillon 	 * Kernel-initiated flushes are only allowed for pure-data buffers.
26910a5d1baSMatthew Dillon 	 * meta-data and volume buffers can only be flushed explicitly
27010a5d1baSMatthew Dillon 	 * by HAMMER.
271055f5ff8SMatthew Dillon 	 */
27210a5d1baSMatthew Dillon 	if (io->modified) {
27309ac686bSMatthew Dillon 		if (flush) {
274055f5ff8SMatthew Dillon 			hammer_io_flush(io);
27510a5d1baSMatthew Dillon 		} else if (bp->b_flags & B_LOCKED) {
27610a5d1baSMatthew Dillon 			switch(io->type) {
27710a5d1baSMatthew Dillon 			case HAMMER_STRUCTURE_DATA_BUFFER:
27810a5d1baSMatthew Dillon 			case HAMMER_STRUCTURE_UNDO_BUFFER:
27910a5d1baSMatthew Dillon 				hammer_io_flush(io);
28010a5d1baSMatthew Dillon 				break;
28110a5d1baSMatthew Dillon 			default:
28210a5d1baSMatthew Dillon 				break;
28310a5d1baSMatthew Dillon 			}
28410a5d1baSMatthew Dillon 		} /* else no explicit request to flush the buffer */
28510a5d1baSMatthew Dillon 	}
286055f5ff8SMatthew Dillon 
287055f5ff8SMatthew Dillon 	/*
28810a5d1baSMatthew Dillon 	 * Wait for the IO to complete if asked to.
289055f5ff8SMatthew Dillon 	 */
290b58c6388SMatthew Dillon 	if (io->waitdep && io->running) {
291055f5ff8SMatthew Dillon 		hammer_io_wait(io);
292055f5ff8SMatthew Dillon 	}
293055f5ff8SMatthew Dillon 
294055f5ff8SMatthew Dillon 	/*
29510a5d1baSMatthew Dillon 	 * Return control of the buffer to the kernel (with the provisio
29610a5d1baSMatthew Dillon 	 * that our bioops can override kernel decisions with regards to
29710a5d1baSMatthew Dillon 	 * the buffer).
298055f5ff8SMatthew Dillon 	 */
29909ac686bSMatthew Dillon 	if (flush && io->modified == 0 && io->running == 0) {
30010a5d1baSMatthew Dillon 		/*
30110a5d1baSMatthew Dillon 		 * Always disassociate the bp if an explicit flush
30210a5d1baSMatthew Dillon 		 * was requested and the IO completed with no error
30310a5d1baSMatthew Dillon 		 * (so unmount can really clean up the structure).
30410a5d1baSMatthew Dillon 		 */
305055f5ff8SMatthew Dillon 		if (io->released) {
306055f5ff8SMatthew Dillon 			regetblk(bp);
30746fe7ae1SMatthew Dillon 			BUF_KERNPROC(bp);
308055f5ff8SMatthew Dillon 			io->released = 0;
309055f5ff8SMatthew Dillon 		}
310055f5ff8SMatthew Dillon 		hammer_io_disassociate((hammer_io_structure_t)io, 1);
311055f5ff8SMatthew Dillon 	} else if (io->modified) {
31210a5d1baSMatthew Dillon 		/*
31310a5d1baSMatthew Dillon 		 * Only certain IO types can be released to the kernel.
31410a5d1baSMatthew Dillon 		 * volume and meta-data IO types must be explicitly flushed
31510a5d1baSMatthew Dillon 		 * by HAMMER.
31610a5d1baSMatthew Dillon 		 */
31710a5d1baSMatthew Dillon 		switch(io->type) {
31810a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_DATA_BUFFER:
31910a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_UNDO_BUFFER:
320b58c6388SMatthew Dillon 			if (io->released == 0) {
321055f5ff8SMatthew Dillon 				io->released = 1;
322055f5ff8SMatthew Dillon 				bdwrite(bp);
323055f5ff8SMatthew Dillon 			}
32410a5d1baSMatthew Dillon 			break;
32510a5d1baSMatthew Dillon 		default:
32610a5d1baSMatthew Dillon 			break;
32710a5d1baSMatthew Dillon 		}
328055f5ff8SMatthew Dillon 	} else if (io->released == 0) {
32910a5d1baSMatthew Dillon 		/*
33010a5d1baSMatthew Dillon 		 * Clean buffers can be generally released to the kernel.
33110a5d1baSMatthew Dillon 		 * We leave the bp passively associated with the HAMMER
33210a5d1baSMatthew Dillon 		 * structure and use bioops to disconnect it later on
33310a5d1baSMatthew Dillon 		 * if the kernel wants to discard the buffer.
33410a5d1baSMatthew Dillon 		 */
335055f5ff8SMatthew Dillon 		io->released = 1;
336055f5ff8SMatthew Dillon 		bqrelse(bp);
337055f5ff8SMatthew Dillon 	}
338055f5ff8SMatthew Dillon }
339055f5ff8SMatthew Dillon 
340055f5ff8SMatthew Dillon /*
341b33e2cc0SMatthew Dillon  * This routine is called with a locked IO when a flush is desired and
342b33e2cc0SMatthew Dillon  * no other references to the structure exists other then ours.  This
343b33e2cc0SMatthew Dillon  * routine is ONLY called when HAMMER believes it is safe to flush a
344b33e2cc0SMatthew Dillon  * potentially modified buffer out.
3450b075555SMatthew Dillon  */
3460b075555SMatthew Dillon void
347055f5ff8SMatthew Dillon hammer_io_flush(struct hammer_io *io)
3480b075555SMatthew Dillon {
349055f5ff8SMatthew Dillon 	struct buf *bp;
350055f5ff8SMatthew Dillon 
351055f5ff8SMatthew Dillon 	/*
35210a5d1baSMatthew Dillon 	 * Degenerate case - nothing to flush if nothing is dirty.
353055f5ff8SMatthew Dillon 	 */
354b58c6388SMatthew Dillon 	if (io->modified == 0) {
355055f5ff8SMatthew Dillon 		return;
356b58c6388SMatthew Dillon 	}
357055f5ff8SMatthew Dillon 
358055f5ff8SMatthew Dillon 	KKASSERT(io->bp);
35910a5d1baSMatthew Dillon 	KKASSERT(io->modify_refs == 0);
360055f5ff8SMatthew Dillon 
361b33e2cc0SMatthew Dillon 	/*
36277062c8aSMatthew Dillon 	 * Acquire ownership of the bp, particularly before we clear our
36377062c8aSMatthew Dillon 	 * modified flag.
36477062c8aSMatthew Dillon 	 *
36577062c8aSMatthew Dillon 	 * We are going to bawrite() this bp.  Don't leave a window where
36677062c8aSMatthew Dillon 	 * io->released is set, we actually own the bp rather then our
36777062c8aSMatthew Dillon 	 * buffer.
36877062c8aSMatthew Dillon 	 */
36977062c8aSMatthew Dillon 	bp = io->bp;
37077062c8aSMatthew Dillon 	if (io->released) {
37177062c8aSMatthew Dillon 		regetblk(bp);
37277062c8aSMatthew Dillon 		/* BUF_KERNPROC(io->bp); */
37377062c8aSMatthew Dillon 		/* io->released = 0; */
37477062c8aSMatthew Dillon 		KKASSERT(io->released);
37577062c8aSMatthew Dillon 		KKASSERT(io->bp == bp);
37677062c8aSMatthew Dillon 	}
37777062c8aSMatthew Dillon 	io->released = 1;
37877062c8aSMatthew Dillon 
37977062c8aSMatthew Dillon 	/*
38010a5d1baSMatthew Dillon 	 * Acquire exclusive access to the bp and then clear the modified
38110a5d1baSMatthew Dillon 	 * state of the buffer prior to issuing I/O to interlock any
38210a5d1baSMatthew Dillon 	 * modifications made while the I/O is in progress.  This shouldn't
38310a5d1baSMatthew Dillon 	 * happen anyway but losing data would be worse.  The modified bit
38410a5d1baSMatthew Dillon 	 * will be rechecked after the IO completes.
38510a5d1baSMatthew Dillon 	 *
386b33e2cc0SMatthew Dillon 	 * This is only legal when lock.refs == 1 (otherwise we might clear
387b33e2cc0SMatthew Dillon 	 * the modified bit while there are still users of the cluster
388b33e2cc0SMatthew Dillon 	 * modifying the data).
389b33e2cc0SMatthew Dillon 	 *
390b33e2cc0SMatthew Dillon 	 * Do this before potentially blocking so any attempt to modify the
391b33e2cc0SMatthew Dillon 	 * ondisk while we are blocked blocks waiting for us.
392b33e2cc0SMatthew Dillon 	 */
39310a5d1baSMatthew Dillon 	KKASSERT(io->mod_list != NULL);
39410a5d1baSMatthew Dillon 	if (io->mod_list == &io->hmp->volu_list ||
39510a5d1baSMatthew Dillon 	    io->mod_list == &io->hmp->meta_list) {
39610a5d1baSMatthew Dillon 		--io->hmp->locked_dirty_count;
3979480ff55SMatthew Dillon 		--hammer_count_dirtybufs;
39810a5d1baSMatthew Dillon 	}
39910a5d1baSMatthew Dillon 	TAILQ_REMOVE(io->mod_list, io, mod_entry);
40010a5d1baSMatthew Dillon 	io->mod_list = NULL;
40110a5d1baSMatthew Dillon 	io->modified = 0;
40210a5d1baSMatthew Dillon 
40310a5d1baSMatthew Dillon 	/*
40410a5d1baSMatthew Dillon 	 * Transfer ownership to the kernel and initiate I/O.
40510a5d1baSMatthew Dillon 	 */
406055f5ff8SMatthew Dillon 	io->running = 1;
407f90dde4cSMatthew Dillon 	++io->hmp->io_running_count;
408055f5ff8SMatthew Dillon 	bawrite(bp);
409055f5ff8SMatthew Dillon }
410055f5ff8SMatthew Dillon 
411055f5ff8SMatthew Dillon /************************************************************************
412055f5ff8SMatthew Dillon  *				BUFFER DIRTYING				*
413055f5ff8SMatthew Dillon  ************************************************************************
414055f5ff8SMatthew Dillon  *
415055f5ff8SMatthew Dillon  * These routines deal with dependancies created when IO buffers get
416055f5ff8SMatthew Dillon  * modified.  The caller must call hammer_modify_*() on a referenced
417055f5ff8SMatthew Dillon  * HAMMER structure prior to modifying its on-disk data.
418055f5ff8SMatthew Dillon  *
419055f5ff8SMatthew Dillon  * Any intent to modify an IO buffer acquires the related bp and imposes
420055f5ff8SMatthew Dillon  * various write ordering dependancies.
421055f5ff8SMatthew Dillon  */
422055f5ff8SMatthew Dillon 
423055f5ff8SMatthew Dillon /*
42410a5d1baSMatthew Dillon  * Mark a HAMMER structure as undergoing modification.  Meta-data buffers
42510a5d1baSMatthew Dillon  * are locked until the flusher can deal with them, pure data buffers
42610a5d1baSMatthew Dillon  * can be written out.
427055f5ff8SMatthew Dillon  */
42810a5d1baSMatthew Dillon static
429b58c6388SMatthew Dillon void
43010a5d1baSMatthew Dillon hammer_io_modify(hammer_io_t io, int count)
431055f5ff8SMatthew Dillon {
43210a5d1baSMatthew Dillon 	struct hammer_mount *hmp = io->hmp;
43310a5d1baSMatthew Dillon 
43446fe7ae1SMatthew Dillon 	/*
43546fe7ae1SMatthew Dillon 	 * Shortcut if nothing to do.
43646fe7ae1SMatthew Dillon 	 */
437055f5ff8SMatthew Dillon 	KKASSERT(io->lock.refs != 0 && io->bp != NULL);
43810a5d1baSMatthew Dillon 	io->modify_refs += count;
439b58c6388SMatthew Dillon 	if (io->modified && io->released == 0)
440b58c6388SMatthew Dillon 		return;
44146fe7ae1SMatthew Dillon 
442055f5ff8SMatthew Dillon 	hammer_lock_ex(&io->lock);
44310a5d1baSMatthew Dillon 	if (io->modified == 0) {
44410a5d1baSMatthew Dillon 		KKASSERT(io->mod_list == NULL);
44510a5d1baSMatthew Dillon 		switch(io->type) {
44610a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_VOLUME:
44710a5d1baSMatthew Dillon 			io->mod_list = &hmp->volu_list;
44810a5d1baSMatthew Dillon 			++hmp->locked_dirty_count;
4499480ff55SMatthew Dillon 			++hammer_count_dirtybufs;
45010a5d1baSMatthew Dillon 			break;
45110a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_META_BUFFER:
45210a5d1baSMatthew Dillon 			io->mod_list = &hmp->meta_list;
45310a5d1baSMatthew Dillon 			++hmp->locked_dirty_count;
4549480ff55SMatthew Dillon 			++hammer_count_dirtybufs;
45510a5d1baSMatthew Dillon 			break;
45610a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_UNDO_BUFFER:
45710a5d1baSMatthew Dillon 			io->mod_list = &hmp->undo_list;
45810a5d1baSMatthew Dillon 			break;
45910a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_DATA_BUFFER:
46010a5d1baSMatthew Dillon 			io->mod_list = &hmp->data_list;
46110a5d1baSMatthew Dillon 			break;
46210a5d1baSMatthew Dillon 		}
46310a5d1baSMatthew Dillon 		TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
46446fe7ae1SMatthew Dillon 		io->modified = 1;
46510a5d1baSMatthew Dillon 	}
466055f5ff8SMatthew Dillon 	if (io->released) {
467055f5ff8SMatthew Dillon 		regetblk(io->bp);
468055f5ff8SMatthew Dillon 		BUF_KERNPROC(io->bp);
469055f5ff8SMatthew Dillon 		io->released = 0;
47046fe7ae1SMatthew Dillon 		KKASSERT(io->modified != 0);
471055f5ff8SMatthew Dillon 	}
472055f5ff8SMatthew Dillon 	hammer_unlock(&io->lock);
4730b075555SMatthew Dillon }
4740b075555SMatthew Dillon 
47510a5d1baSMatthew Dillon static __inline
47610a5d1baSMatthew Dillon void
47710a5d1baSMatthew Dillon hammer_io_modify_done(hammer_io_t io)
47810a5d1baSMatthew Dillon {
47910a5d1baSMatthew Dillon 	KKASSERT(io->modify_refs > 0);
48010a5d1baSMatthew Dillon 	--io->modify_refs;
48110a5d1baSMatthew Dillon }
48210a5d1baSMatthew Dillon 
483*2f85fa4dSMatthew Dillon /*
484*2f85fa4dSMatthew Dillon  * Caller intends to modify a volume's ondisk structure.
485*2f85fa4dSMatthew Dillon  *
486*2f85fa4dSMatthew Dillon  * This is only allowed if we are the flusher or we have a ref on the
487*2f85fa4dSMatthew Dillon  * sync_lock.
488*2f85fa4dSMatthew Dillon  */
4890b075555SMatthew Dillon void
49036f82b23SMatthew Dillon hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
49136f82b23SMatthew Dillon 		     void *base, int len)
4920b075555SMatthew Dillon {
493*2f85fa4dSMatthew Dillon 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
494055f5ff8SMatthew Dillon 
495*2f85fa4dSMatthew Dillon 	hammer_io_modify(&volume->io, 1);
49647197d71SMatthew Dillon 	if (len) {
49747197d71SMatthew Dillon 		intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
49847197d71SMatthew Dillon 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
499059819e3SMatthew Dillon 		hammer_generate_undo(trans, &volume->io,
50047197d71SMatthew Dillon 			 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
50147197d71SMatthew Dillon 			 base, len);
502055f5ff8SMatthew Dillon 	}
503055f5ff8SMatthew Dillon }
504055f5ff8SMatthew Dillon 
505055f5ff8SMatthew Dillon /*
506*2f85fa4dSMatthew Dillon  * Caller intends to modify a buffer's ondisk structure.
507*2f85fa4dSMatthew Dillon  *
508*2f85fa4dSMatthew Dillon  * This is only allowed if we are the flusher or we have a ref on the
509*2f85fa4dSMatthew Dillon  * sync_lock.
510055f5ff8SMatthew Dillon  */
511055f5ff8SMatthew Dillon void
51236f82b23SMatthew Dillon hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
51336f82b23SMatthew Dillon 		     void *base, int len)
51446fe7ae1SMatthew Dillon {
515*2f85fa4dSMatthew Dillon 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
516*2f85fa4dSMatthew Dillon 
51710a5d1baSMatthew Dillon 	hammer_io_modify(&buffer->io, 1);
51847197d71SMatthew Dillon 	if (len) {
51947197d71SMatthew Dillon 		intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
52047197d71SMatthew Dillon 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
521059819e3SMatthew Dillon 		hammer_generate_undo(trans, &buffer->io,
52234d829f7SMatthew Dillon 				     buffer->zone2_offset + rel_offset,
52347197d71SMatthew Dillon 				     base, len);
52447197d71SMatthew Dillon 	}
52546fe7ae1SMatthew Dillon }
52646fe7ae1SMatthew Dillon 
52710a5d1baSMatthew Dillon void
52810a5d1baSMatthew Dillon hammer_modify_volume_done(hammer_volume_t volume)
52910a5d1baSMatthew Dillon {
53010a5d1baSMatthew Dillon 	hammer_io_modify_done(&volume->io);
53110a5d1baSMatthew Dillon }
53210a5d1baSMatthew Dillon 
53310a5d1baSMatthew Dillon void
53410a5d1baSMatthew Dillon hammer_modify_buffer_done(hammer_buffer_t buffer)
53510a5d1baSMatthew Dillon {
53610a5d1baSMatthew Dillon 	hammer_io_modify_done(&buffer->io);
53710a5d1baSMatthew Dillon }
53810a5d1baSMatthew Dillon 
53946fe7ae1SMatthew Dillon /*
540055f5ff8SMatthew Dillon  * Mark an entity as not being dirty any more -- this usually occurs when
54161aeeb33SMatthew Dillon  * the governing a-list has freed the entire entity.
542055f5ff8SMatthew Dillon  *
543055f5ff8SMatthew Dillon  * XXX
54461aeeb33SMatthew Dillon  */
54561aeeb33SMatthew Dillon void
54661aeeb33SMatthew Dillon hammer_io_clear_modify(struct hammer_io *io)
54761aeeb33SMatthew Dillon {
548055f5ff8SMatthew Dillon #if 0
54961aeeb33SMatthew Dillon 	struct buf *bp;
55061aeeb33SMatthew Dillon 
55161aeeb33SMatthew Dillon 	io->modified = 0;
55210a5d1baSMatthew Dillon 	XXX mod_list/entry
55361aeeb33SMatthew Dillon 	if ((bp = io->bp) != NULL) {
554055f5ff8SMatthew Dillon 		if (io->released) {
55561aeeb33SMatthew Dillon 			regetblk(bp);
556055f5ff8SMatthew Dillon 			/* BUF_KERNPROC(io->bp); */
557055f5ff8SMatthew Dillon 		} else {
55861aeeb33SMatthew Dillon 			io->released = 1;
559055f5ff8SMatthew Dillon 		}
56061aeeb33SMatthew Dillon 		if (io->modified == 0) {
56177062c8aSMatthew Dillon 			hkprintf("hammer_io_clear_modify: cleared %p\n", io);
56261aeeb33SMatthew Dillon 			bundirty(bp);
56361aeeb33SMatthew Dillon 			bqrelse(bp);
56461aeeb33SMatthew Dillon 		} else {
56561aeeb33SMatthew Dillon 			bdwrite(bp);
56661aeeb33SMatthew Dillon 		}
56761aeeb33SMatthew Dillon 	}
568fbc6e32aSMatthew Dillon #endif
56966325755SMatthew Dillon }
57066325755SMatthew Dillon 
571055f5ff8SMatthew Dillon /************************************************************************
572055f5ff8SMatthew Dillon  *				HAMMER_BIOOPS				*
573055f5ff8SMatthew Dillon  ************************************************************************
574055f5ff8SMatthew Dillon  *
575055f5ff8SMatthew Dillon  */
576055f5ff8SMatthew Dillon 
577055f5ff8SMatthew Dillon /*
578055f5ff8SMatthew Dillon  * Pre-IO initiation kernel callback - cluster build only
579055f5ff8SMatthew Dillon  */
580055f5ff8SMatthew Dillon static void
581055f5ff8SMatthew Dillon hammer_io_start(struct buf *bp)
582055f5ff8SMatthew Dillon {
583055f5ff8SMatthew Dillon }
584055f5ff8SMatthew Dillon 
585055f5ff8SMatthew Dillon /*
586055f5ff8SMatthew Dillon  * Post-IO completion kernel callback
587b33e2cc0SMatthew Dillon  *
588b33e2cc0SMatthew Dillon  * NOTE: HAMMER may modify a buffer after initiating I/O.  The modified bit
589b33e2cc0SMatthew Dillon  * may also be set if we were marking a cluster header open.  Only remove
590b33e2cc0SMatthew Dillon  * our dependancy if the modified bit is clear.
591055f5ff8SMatthew Dillon  */
59266325755SMatthew Dillon static void
59366325755SMatthew Dillon hammer_io_complete(struct buf *bp)
59466325755SMatthew Dillon {
595055f5ff8SMatthew Dillon 	union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
596fbc6e32aSMatthew Dillon 
597055f5ff8SMatthew Dillon 	KKASSERT(iou->io.released == 1);
598055f5ff8SMatthew Dillon 
599f90dde4cSMatthew Dillon 	if (iou->io.running) {
600f90dde4cSMatthew Dillon 		if (--iou->io.hmp->io_running_count == 0)
601f90dde4cSMatthew Dillon 			wakeup(&iou->io.hmp->io_running_count);
602f90dde4cSMatthew Dillon 		KKASSERT(iou->io.hmp->io_running_count >= 0);
603f90dde4cSMatthew Dillon 		iou->io.running = 0;
604f90dde4cSMatthew Dillon 	}
605f90dde4cSMatthew Dillon 
606055f5ff8SMatthew Dillon 	/*
607055f5ff8SMatthew Dillon 	 * If no lock references remain and we can acquire the IO lock and
608055f5ff8SMatthew Dillon 	 * someone at some point wanted us to flush (B_LOCKED test), then
609055f5ff8SMatthew Dillon 	 * try to dispose of the IO.
610055f5ff8SMatthew Dillon 	 */
611055f5ff8SMatthew Dillon 	if (iou->io.waiting) {
612055f5ff8SMatthew Dillon 		iou->io.waiting = 0;
613055f5ff8SMatthew Dillon 		wakeup(iou);
614055f5ff8SMatthew Dillon 	}
615055f5ff8SMatthew Dillon 
616055f5ff8SMatthew Dillon 	/*
617055f5ff8SMatthew Dillon 	 * Someone wanted us to flush, try to clean out the buffer.
618055f5ff8SMatthew Dillon 	 */
619055f5ff8SMatthew Dillon 	if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) {
620b33e2cc0SMatthew Dillon 		KKASSERT(iou->io.modified == 0);
621d5ef456eSMatthew Dillon 		bp->b_flags &= ~B_LOCKED;
622055f5ff8SMatthew Dillon 		hammer_io_deallocate(bp);
623055f5ff8SMatthew Dillon 		/* structure may be dead now */
624fbc6e32aSMatthew Dillon 	}
62566325755SMatthew Dillon }
62666325755SMatthew Dillon 
62766325755SMatthew Dillon /*
62866325755SMatthew Dillon  * Callback from kernel when it wishes to deallocate a passively
62910a5d1baSMatthew Dillon  * associated structure.  This mostly occurs with clean buffers
63010a5d1baSMatthew Dillon  * but it may be possible for a holding structure to be marked dirty
63110a5d1baSMatthew Dillon  * while its buffer is passively associated.
63266325755SMatthew Dillon  *
63366325755SMatthew Dillon  * If we cannot disassociate we set B_LOCKED to prevent the buffer
63466325755SMatthew Dillon  * from getting reused.
63546fe7ae1SMatthew Dillon  *
63646fe7ae1SMatthew Dillon  * WARNING: Because this can be called directly by getnewbuf we cannot
63746fe7ae1SMatthew Dillon  * recurse into the tree.  If a bp cannot be immediately disassociated
63846fe7ae1SMatthew Dillon  * our only recourse is to set B_LOCKED.
63966325755SMatthew Dillon  */
64066325755SMatthew Dillon static void
64166325755SMatthew Dillon hammer_io_deallocate(struct buf *bp)
64266325755SMatthew Dillon {
643055f5ff8SMatthew Dillon 	hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
64466325755SMatthew Dillon 
645055f5ff8SMatthew Dillon 	KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
64646fe7ae1SMatthew Dillon 	if (iou->io.lock.refs > 0 || iou->io.modified) {
64710a5d1baSMatthew Dillon 		/*
64810a5d1baSMatthew Dillon 		 * It is not legal to disassociate a modified buffer.  This
64910a5d1baSMatthew Dillon 		 * case really shouldn't ever occur.
65010a5d1baSMatthew Dillon 		 */
651055f5ff8SMatthew Dillon 		bp->b_flags |= B_LOCKED;
652055f5ff8SMatthew Dillon 	} else {
65310a5d1baSMatthew Dillon 		/*
65410a5d1baSMatthew Dillon 		 * Disassociate the BP.  If the io has no refs left we
65510a5d1baSMatthew Dillon 		 * have to add it to the loose list.
65610a5d1baSMatthew Dillon 		 */
657055f5ff8SMatthew Dillon 		hammer_io_disassociate(iou, 0);
65810a5d1baSMatthew Dillon 		if (iou->io.bp == NULL &&
65910a5d1baSMatthew Dillon 		    iou->io.type != HAMMER_STRUCTURE_VOLUME) {
66010a5d1baSMatthew Dillon 			KKASSERT(iou->io.mod_list == NULL);
66110a5d1baSMatthew Dillon 			iou->io.mod_list = &iou->io.hmp->lose_list;
66210a5d1baSMatthew Dillon 			TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
66366325755SMatthew Dillon 		}
66466325755SMatthew Dillon 	}
66566325755SMatthew Dillon }
66666325755SMatthew Dillon 
66766325755SMatthew Dillon static int
66866325755SMatthew Dillon hammer_io_fsync(struct vnode *vp)
66966325755SMatthew Dillon {
67066325755SMatthew Dillon 	return(0);
67166325755SMatthew Dillon }
67266325755SMatthew Dillon 
67366325755SMatthew Dillon /*
67466325755SMatthew Dillon  * NOTE: will not be called unless we tell the kernel about the
67566325755SMatthew Dillon  * bioops.  Unused... we use the mount's VFS_SYNC instead.
67666325755SMatthew Dillon  */
67766325755SMatthew Dillon static int
67866325755SMatthew Dillon hammer_io_sync(struct mount *mp)
67966325755SMatthew Dillon {
68066325755SMatthew Dillon 	return(0);
68166325755SMatthew Dillon }
68266325755SMatthew Dillon 
68366325755SMatthew Dillon static void
68466325755SMatthew Dillon hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
68566325755SMatthew Dillon {
68666325755SMatthew Dillon }
68766325755SMatthew Dillon 
68866325755SMatthew Dillon /*
68966325755SMatthew Dillon  * I/O pre-check for reading and writing.  HAMMER only uses this for
69066325755SMatthew Dillon  * B_CACHE buffers so checkread just shouldn't happen, but if it does
69166325755SMatthew Dillon  * allow it.
69266325755SMatthew Dillon  *
693fbc6e32aSMatthew Dillon  * Writing is a different case.  We don't want the kernel to try to write
694fbc6e32aSMatthew Dillon  * out a buffer that HAMMER may be modifying passively or which has a
69510a5d1baSMatthew Dillon  * dependancy.  In addition, kernel-demanded writes can only proceed for
69610a5d1baSMatthew Dillon  * certain types of buffers (i.e. UNDO and DATA types).  Other dirty
69710a5d1baSMatthew Dillon  * buffer types can only be explicitly written by the flusher.
698fbc6e32aSMatthew Dillon  *
69910a5d1baSMatthew Dillon  * checkwrite will only be called for bdwrite()n buffers.  If we return
70010a5d1baSMatthew Dillon  * success the kernel is guaranteed to initiate the buffer write.
70166325755SMatthew Dillon  */
70266325755SMatthew Dillon static int
70366325755SMatthew Dillon hammer_io_checkread(struct buf *bp)
70466325755SMatthew Dillon {
70566325755SMatthew Dillon 	return(0);
70666325755SMatthew Dillon }
70766325755SMatthew Dillon 
70866325755SMatthew Dillon static int
70966325755SMatthew Dillon hammer_io_checkwrite(struct buf *bp)
71066325755SMatthew Dillon {
71110a5d1baSMatthew Dillon 	hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
71266325755SMatthew Dillon 
71377062c8aSMatthew Dillon 	/*
71477062c8aSMatthew Dillon 	 * This shouldn't happen under normal operation.
71577062c8aSMatthew Dillon 	 */
71677062c8aSMatthew Dillon 	if (io->type == HAMMER_STRUCTURE_VOLUME ||
71777062c8aSMatthew Dillon 	    io->type == HAMMER_STRUCTURE_META_BUFFER) {
71877062c8aSMatthew Dillon 		if (!panicstr)
71977062c8aSMatthew Dillon 			panic("hammer_io_checkwrite: illegal buffer");
72077062c8aSMatthew Dillon 		hkprintf("x");
72177062c8aSMatthew Dillon 		bp->b_flags |= B_LOCKED;
72277062c8aSMatthew Dillon 		return(1);
72377062c8aSMatthew Dillon 	}
724c9b9e29dSMatthew Dillon 
725fbc6e32aSMatthew Dillon 	/*
72610a5d1baSMatthew Dillon 	 * We can only clear the modified bit if the IO is not currently
72710a5d1baSMatthew Dillon 	 * undergoing modification.  Otherwise we may miss changes.
728b33e2cc0SMatthew Dillon 	 */
72910a5d1baSMatthew Dillon 	if (io->modify_refs == 0 && io->modified) {
73010a5d1baSMatthew Dillon 		KKASSERT(io->mod_list != NULL);
73110a5d1baSMatthew Dillon 		if (io->mod_list == &io->hmp->volu_list ||
73210a5d1baSMatthew Dillon 		    io->mod_list == &io->hmp->meta_list) {
73310a5d1baSMatthew Dillon 			--io->hmp->locked_dirty_count;
7349480ff55SMatthew Dillon 			--hammer_count_dirtybufs;
73510a5d1baSMatthew Dillon 		}
73610a5d1baSMatthew Dillon 		TAILQ_REMOVE(io->mod_list, io, mod_entry);
73710a5d1baSMatthew Dillon 		io->mod_list = NULL;
73810a5d1baSMatthew Dillon 		io->modified = 0;
73966325755SMatthew Dillon 	}
740f90dde4cSMatthew Dillon 
741f90dde4cSMatthew Dillon 	/*
742f90dde4cSMatthew Dillon 	 * The kernel is going to start the IO, set io->running.
743f90dde4cSMatthew Dillon 	 */
744f90dde4cSMatthew Dillon 	KKASSERT(io->running == 0);
745f90dde4cSMatthew Dillon 	io->running = 1;
746f90dde4cSMatthew Dillon 	++io->hmp->io_running_count;
747055f5ff8SMatthew Dillon 	return(0);
748055f5ff8SMatthew Dillon }
74966325755SMatthew Dillon 
7508cd0a023SMatthew Dillon /*
7518cd0a023SMatthew Dillon  * Return non-zero if the caller should flush the structure associated
7528cd0a023SMatthew Dillon  * with this io sub-structure.
7538cd0a023SMatthew Dillon  */
7548cd0a023SMatthew Dillon int
7558cd0a023SMatthew Dillon hammer_io_checkflush(struct hammer_io *io)
7568cd0a023SMatthew Dillon {
757055f5ff8SMatthew Dillon 	if (io->bp == NULL || (io->bp->b_flags & B_LOCKED)) {
7588cd0a023SMatthew Dillon 		return(1);
759055f5ff8SMatthew Dillon 	}
7608cd0a023SMatthew Dillon 	return(0);
7618cd0a023SMatthew Dillon }
76266325755SMatthew Dillon 
76366325755SMatthew Dillon /*
76466325755SMatthew Dillon  * Return non-zero if we wish to delay the kernel's attempt to flush
76566325755SMatthew Dillon  * this buffer to disk.
76666325755SMatthew Dillon  */
76766325755SMatthew Dillon static int
76866325755SMatthew Dillon hammer_io_countdeps(struct buf *bp, int n)
76966325755SMatthew Dillon {
77066325755SMatthew Dillon 	return(0);
77166325755SMatthew Dillon }
77266325755SMatthew Dillon 
77366325755SMatthew Dillon struct bio_ops hammer_bioops = {
77466325755SMatthew Dillon 	.io_start	= hammer_io_start,
77566325755SMatthew Dillon 	.io_complete	= hammer_io_complete,
77666325755SMatthew Dillon 	.io_deallocate	= hammer_io_deallocate,
77766325755SMatthew Dillon 	.io_fsync	= hammer_io_fsync,
77866325755SMatthew Dillon 	.io_sync	= hammer_io_sync,
77966325755SMatthew Dillon 	.io_movedeps	= hammer_io_movedeps,
78066325755SMatthew Dillon 	.io_countdeps	= hammer_io_countdeps,
78166325755SMatthew Dillon 	.io_checkread	= hammer_io_checkread,
78266325755SMatthew Dillon 	.io_checkwrite	= hammer_io_checkwrite,
78366325755SMatthew Dillon };
78466325755SMatthew Dillon 
785