xref: /dflybsd-src/sys/vfs/hammer/hammer_io.c (revision c9b9e29d7630384cf4c416763d41b09bb927bc40)
166325755SMatthew Dillon /*
2b84de5afSMatthew Dillon  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
366325755SMatthew Dillon  *
466325755SMatthew Dillon  * This code is derived from software contributed to The DragonFly Project
566325755SMatthew Dillon  * by Matthew Dillon <dillon@backplane.com>
666325755SMatthew Dillon  *
766325755SMatthew Dillon  * Redistribution and use in source and binary forms, with or without
866325755SMatthew Dillon  * modification, are permitted provided that the following conditions
966325755SMatthew Dillon  * are met:
1066325755SMatthew Dillon  *
1166325755SMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
1266325755SMatthew Dillon  *    notice, this list of conditions and the following disclaimer.
1366325755SMatthew Dillon  * 2. Redistributions in binary form must reproduce the above copyright
1466325755SMatthew Dillon  *    notice, this list of conditions and the following disclaimer in
1566325755SMatthew Dillon  *    the documentation and/or other materials provided with the
1666325755SMatthew Dillon  *    distribution.
1766325755SMatthew Dillon  * 3. Neither the name of The DragonFly Project nor the names of its
1866325755SMatthew Dillon  *    contributors may be used to endorse or promote products derived
1966325755SMatthew Dillon  *    from this software without specific, prior written permission.
2066325755SMatthew Dillon  *
2166325755SMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2266325755SMatthew Dillon  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2366325755SMatthew Dillon  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
2466325755SMatthew Dillon  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
2566325755SMatthew Dillon  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
2666325755SMatthew Dillon  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
2766325755SMatthew Dillon  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
2866325755SMatthew Dillon  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
2966325755SMatthew Dillon  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
3066325755SMatthew Dillon  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
3166325755SMatthew Dillon  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3266325755SMatthew Dillon  * SUCH DAMAGE.
3366325755SMatthew Dillon  *
34*c9b9e29dSMatthew Dillon  * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.29 2008/05/04 09:06:45 dillon Exp $
3566325755SMatthew Dillon  */
3666325755SMatthew Dillon /*
3766325755SMatthew Dillon  * IO Primitives and buffer cache management
3866325755SMatthew Dillon  *
3966325755SMatthew Dillon  * All major data-tracking structures in HAMMER contain a struct hammer_io
4066325755SMatthew Dillon  * which is used to manage their backing store.  We use filesystem buffers
4166325755SMatthew Dillon  * for backing store and we leave them passively associated with their
4266325755SMatthew Dillon  * HAMMER structures.
4366325755SMatthew Dillon  *
4466325755SMatthew Dillon  * If the kernel tries to release a passively associated buf which we cannot
4566325755SMatthew Dillon  * yet let go we set B_LOCKED in the buffer and then actively released it
4666325755SMatthew Dillon  * later when we can.
4766325755SMatthew Dillon  */
4866325755SMatthew Dillon 
4966325755SMatthew Dillon #include "hammer.h"
5066325755SMatthew Dillon #include <sys/fcntl.h>
5166325755SMatthew Dillon #include <sys/nlookup.h>
5266325755SMatthew Dillon #include <sys/buf.h>
5366325755SMatthew Dillon #include <sys/buf2.h>
5466325755SMatthew Dillon 
5510a5d1baSMatthew Dillon static void hammer_io_modify(hammer_io_t io, int count);
56055f5ff8SMatthew Dillon static void hammer_io_deallocate(struct buf *bp);
57055f5ff8SMatthew Dillon 
58055f5ff8SMatthew Dillon /*
5910a5d1baSMatthew Dillon  * Initialize a new, already-zero'd hammer_io structure, or reinitialize
6010a5d1baSMatthew Dillon  * an existing hammer_io structure which may have switched to another type.
61055f5ff8SMatthew Dillon  */
62055f5ff8SMatthew Dillon void
6310a5d1baSMatthew Dillon hammer_io_init(hammer_io_t io, hammer_mount_t hmp, enum hammer_io_type type)
64055f5ff8SMatthew Dillon {
6510a5d1baSMatthew Dillon 	io->hmp = hmp;
66055f5ff8SMatthew Dillon 	io->type = type;
67055f5ff8SMatthew Dillon }
68055f5ff8SMatthew Dillon 
6910a5d1baSMatthew Dillon void
7010a5d1baSMatthew Dillon hammer_io_reinit(hammer_io_t io, enum hammer_io_type type)
7110a5d1baSMatthew Dillon {
7210a5d1baSMatthew Dillon 	hammer_mount_t hmp = io->hmp;
7310a5d1baSMatthew Dillon 
7410a5d1baSMatthew Dillon 	if (io->modified) {
7510a5d1baSMatthew Dillon 		KKASSERT(io->mod_list != NULL);
7610a5d1baSMatthew Dillon 		if (io->mod_list == &hmp->volu_list ||
7710a5d1baSMatthew Dillon 		    io->mod_list == &hmp->meta_list) {
7810a5d1baSMatthew Dillon 			--hmp->locked_dirty_count;
799480ff55SMatthew Dillon 			--hammer_count_dirtybufs;
8010a5d1baSMatthew Dillon 		}
8110a5d1baSMatthew Dillon 		TAILQ_REMOVE(io->mod_list, io, mod_entry);
8210a5d1baSMatthew Dillon 		io->mod_list = NULL;
8310a5d1baSMatthew Dillon 	}
8410a5d1baSMatthew Dillon 	io->type = type;
8510a5d1baSMatthew Dillon 	if (io->modified) {
8610a5d1baSMatthew Dillon 		switch(io->type) {
8710a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_VOLUME:
8810a5d1baSMatthew Dillon 			io->mod_list = &hmp->volu_list;
8910a5d1baSMatthew Dillon 			++hmp->locked_dirty_count;
909480ff55SMatthew Dillon 			++hammer_count_dirtybufs;
9110a5d1baSMatthew Dillon 			break;
9210a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_META_BUFFER:
9310a5d1baSMatthew Dillon 			io->mod_list = &hmp->meta_list;
9410a5d1baSMatthew Dillon 			++hmp->locked_dirty_count;
959480ff55SMatthew Dillon 			++hammer_count_dirtybufs;
9610a5d1baSMatthew Dillon 			break;
9710a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_UNDO_BUFFER:
9810a5d1baSMatthew Dillon 			io->mod_list = &hmp->undo_list;
9910a5d1baSMatthew Dillon 			break;
10010a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_DATA_BUFFER:
10110a5d1baSMatthew Dillon 			io->mod_list = &hmp->data_list;
10210a5d1baSMatthew Dillon 			break;
10310a5d1baSMatthew Dillon 		}
10410a5d1baSMatthew Dillon 		TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
10510a5d1baSMatthew Dillon 	}
10610a5d1baSMatthew Dillon }
10710a5d1baSMatthew Dillon 
10866325755SMatthew Dillon /*
109fbc6e32aSMatthew Dillon  * Helper routine to disassociate a buffer cache buffer from an I/O
110055f5ff8SMatthew Dillon  * structure.  Called with the io structure exclusively locked.
111055f5ff8SMatthew Dillon  *
112055f5ff8SMatthew Dillon  * The io may have 0 or 1 references depending on who called us.  The
113055f5ff8SMatthew Dillon  * caller is responsible for dealing with the refs.
114055f5ff8SMatthew Dillon  *
115055f5ff8SMatthew Dillon  * This call can only be made when no action is required on the buffer.
116d8971d2bSMatthew Dillon  * HAMMER must own the buffer (released == 0) since we mess around with it.
11766325755SMatthew Dillon  */
11866325755SMatthew Dillon static void
119055f5ff8SMatthew Dillon hammer_io_disassociate(hammer_io_structure_t iou, int elseit)
12066325755SMatthew Dillon {
121055f5ff8SMatthew Dillon 	struct buf *bp = iou->io.bp;
12266325755SMatthew Dillon 
123b58c6388SMatthew Dillon 	KKASSERT(iou->io.modified == 0);
1244d75d829SMatthew Dillon 	buf_dep_init(bp);
125055f5ff8SMatthew Dillon 	iou->io.bp = NULL;
126d8971d2bSMatthew Dillon 	bp->b_flags &= ~B_LOCKED;
127055f5ff8SMatthew Dillon 	if (elseit) {
128055f5ff8SMatthew Dillon 		KKASSERT(iou->io.released == 0);
129055f5ff8SMatthew Dillon 		iou->io.released = 1;
130055f5ff8SMatthew Dillon 		bqrelse(bp);
131055f5ff8SMatthew Dillon 	} else {
132055f5ff8SMatthew Dillon 		KKASSERT(iou->io.released);
133055f5ff8SMatthew Dillon 	}
13466325755SMatthew Dillon 
135055f5ff8SMatthew Dillon 	switch(iou->io.type) {
13666325755SMatthew Dillon 	case HAMMER_STRUCTURE_VOLUME:
137055f5ff8SMatthew Dillon 		iou->volume.ondisk = NULL;
13866325755SMatthew Dillon 		break;
13910a5d1baSMatthew Dillon 	case HAMMER_STRUCTURE_DATA_BUFFER:
14010a5d1baSMatthew Dillon 	case HAMMER_STRUCTURE_META_BUFFER:
14110a5d1baSMatthew Dillon 	case HAMMER_STRUCTURE_UNDO_BUFFER:
142055f5ff8SMatthew Dillon 		iou->buffer.ondisk = NULL;
14366325755SMatthew Dillon 		break;
14466325755SMatthew Dillon 	}
14566325755SMatthew Dillon }
146fbc6e32aSMatthew Dillon 
147fbc6e32aSMatthew Dillon /*
148055f5ff8SMatthew Dillon  * Wait for any physical IO to complete
149fbc6e32aSMatthew Dillon  */
150fbc6e32aSMatthew Dillon static void
151055f5ff8SMatthew Dillon hammer_io_wait(hammer_io_t io)
152fbc6e32aSMatthew Dillon {
153055f5ff8SMatthew Dillon 	if (io->running) {
154055f5ff8SMatthew Dillon 		crit_enter();
155055f5ff8SMatthew Dillon 		tsleep_interlock(io);
156055f5ff8SMatthew Dillon 		io->waiting = 1;
157055f5ff8SMatthew Dillon 		for (;;) {
158055f5ff8SMatthew Dillon 			tsleep(io, 0, "hmrflw", 0);
159055f5ff8SMatthew Dillon 			if (io->running == 0)
160055f5ff8SMatthew Dillon 				break;
161055f5ff8SMatthew Dillon 			tsleep_interlock(io);
162055f5ff8SMatthew Dillon 			io->waiting = 1;
163055f5ff8SMatthew Dillon 			if (io->running == 0)
164055f5ff8SMatthew Dillon 				break;
165055f5ff8SMatthew Dillon 		}
166055f5ff8SMatthew Dillon 		crit_exit();
167055f5ff8SMatthew Dillon 	}
168055f5ff8SMatthew Dillon }
169055f5ff8SMatthew Dillon 
17061aeeb33SMatthew Dillon /*
17110a5d1baSMatthew Dillon  * Load bp for a HAMMER structure.  The io must be exclusively locked by
17210a5d1baSMatthew Dillon  * the caller.
17366325755SMatthew Dillon  */
17466325755SMatthew Dillon int
17566325755SMatthew Dillon hammer_io_read(struct vnode *devvp, struct hammer_io *io)
17666325755SMatthew Dillon {
17766325755SMatthew Dillon 	struct buf *bp;
17866325755SMatthew Dillon 	int error;
17966325755SMatthew Dillon 
18066325755SMatthew Dillon 	if ((bp = io->bp) == NULL) {
18166325755SMatthew Dillon 		error = bread(devvp, io->offset, HAMMER_BUFSIZE, &io->bp);
18266325755SMatthew Dillon 		if (error == 0) {
18366325755SMatthew Dillon 			bp = io->bp;
18466325755SMatthew Dillon 			bp->b_ops = &hammer_bioops;
18566325755SMatthew Dillon 			LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
18666325755SMatthew Dillon 			BUF_KERNPROC(bp);
18766325755SMatthew Dillon 		}
18810a5d1baSMatthew Dillon 		KKASSERT(io->modified == 0);
18910a5d1baSMatthew Dillon 		KKASSERT(io->running == 0);
19010a5d1baSMatthew Dillon 		KKASSERT(io->waiting == 0);
19166325755SMatthew Dillon 		io->released = 0;	/* we hold an active lock on bp */
19266325755SMatthew Dillon 	} else {
19366325755SMatthew Dillon 		error = 0;
19466325755SMatthew Dillon 	}
19566325755SMatthew Dillon 	return(error);
19666325755SMatthew Dillon }
19766325755SMatthew Dillon 
19866325755SMatthew Dillon /*
19966325755SMatthew Dillon  * Similar to hammer_io_read() but returns a zero'd out buffer instead.
20010a5d1baSMatthew Dillon  * Must be called with the IO exclusively locked.
201055f5ff8SMatthew Dillon  *
20210a5d1baSMatthew Dillon  * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
20310a5d1baSMatthew Dillon  * I/O by forcing the buffer to not be in a released state before calling
20410a5d1baSMatthew Dillon  * it.
20510a5d1baSMatthew Dillon  *
20610a5d1baSMatthew Dillon  * This function will also mark the IO as modified but it will not
20710a5d1baSMatthew Dillon  * increment the modify_refs count.
20866325755SMatthew Dillon  */
20966325755SMatthew Dillon int
21066325755SMatthew Dillon hammer_io_new(struct vnode *devvp, struct hammer_io *io)
21166325755SMatthew Dillon {
21266325755SMatthew Dillon 	struct buf *bp;
21366325755SMatthew Dillon 
21466325755SMatthew Dillon 	if ((bp = io->bp) == NULL) {
21566325755SMatthew Dillon 		io->bp = getblk(devvp, io->offset, HAMMER_BUFSIZE, 0, 0);
21666325755SMatthew Dillon 		bp = io->bp;
21766325755SMatthew Dillon 		bp->b_ops = &hammer_bioops;
21866325755SMatthew Dillon 		LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
219055f5ff8SMatthew Dillon 		io->released = 0;
22010a5d1baSMatthew Dillon 		KKASSERT(io->running == 0);
221055f5ff8SMatthew Dillon 		io->waiting = 0;
22266325755SMatthew Dillon 		BUF_KERNPROC(bp);
22366325755SMatthew Dillon 	} else {
22466325755SMatthew Dillon 		if (io->released) {
22566325755SMatthew Dillon 			regetblk(bp);
22666325755SMatthew Dillon 			BUF_KERNPROC(bp);
227d113fda1SMatthew Dillon 			io->released = 0;
22866325755SMatthew Dillon 		}
22966325755SMatthew Dillon 	}
23010a5d1baSMatthew Dillon 	hammer_io_modify(io, 0);
23166325755SMatthew Dillon 	vfs_bio_clrbuf(bp);
23266325755SMatthew Dillon 	return(0);
23366325755SMatthew Dillon }
23466325755SMatthew Dillon 
23566325755SMatthew Dillon /*
236b3deaf57SMatthew Dillon  * This routine is called on the last reference to a hammer structure.
237055f5ff8SMatthew Dillon  * The io is usually locked exclusively (but may not be during unmount).
238b3deaf57SMatthew Dillon  *
23910a5d1baSMatthew Dillon  * This routine is responsible for the disposition of the buffer cache
24010a5d1baSMatthew Dillon  * buffer backing the IO.  Only pure-data and undo buffers can be handed
24110a5d1baSMatthew Dillon  * back to the kernel.  Volume and meta-data buffers must be retained
24210a5d1baSMatthew Dillon  * by HAMMER until explicitly flushed by the backend.
24366325755SMatthew Dillon  */
24466325755SMatthew Dillon void
245b58c6388SMatthew Dillon hammer_io_release(struct hammer_io *io)
24666325755SMatthew Dillon {
24766325755SMatthew Dillon 	struct buf *bp;
24866325755SMatthew Dillon 
249fbc6e32aSMatthew Dillon 	if ((bp = io->bp) == NULL)
250fbc6e32aSMatthew Dillon 		return;
251fbc6e32aSMatthew Dillon 
2520b075555SMatthew Dillon 	/*
25310a5d1baSMatthew Dillon 	 * Try to flush a dirty IO to disk if asked to by the
25410a5d1baSMatthew Dillon 	 * caller or if the kernel tried to flush the buffer in the past.
2550b075555SMatthew Dillon 	 *
25610a5d1baSMatthew Dillon 	 * Kernel-initiated flushes are only allowed for pure-data buffers.
25710a5d1baSMatthew Dillon 	 * meta-data and volume buffers can only be flushed explicitly
25810a5d1baSMatthew Dillon 	 * by HAMMER.
259055f5ff8SMatthew Dillon 	 */
26010a5d1baSMatthew Dillon 	if (io->modified) {
26110a5d1baSMatthew Dillon 		if (io->flush) {
262055f5ff8SMatthew Dillon 			hammer_io_flush(io);
26310a5d1baSMatthew Dillon 		} else if (bp->b_flags & B_LOCKED) {
26410a5d1baSMatthew Dillon 			switch(io->type) {
26510a5d1baSMatthew Dillon 			case HAMMER_STRUCTURE_DATA_BUFFER:
26610a5d1baSMatthew Dillon 			case HAMMER_STRUCTURE_UNDO_BUFFER:
26710a5d1baSMatthew Dillon 				hammer_io_flush(io);
26810a5d1baSMatthew Dillon 				break;
26910a5d1baSMatthew Dillon 			default:
27010a5d1baSMatthew Dillon 				break;
27110a5d1baSMatthew Dillon 			}
27210a5d1baSMatthew Dillon 		} /* else no explicit request to flush the buffer */
27310a5d1baSMatthew Dillon 	}
274055f5ff8SMatthew Dillon 
275055f5ff8SMatthew Dillon 	/*
27610a5d1baSMatthew Dillon 	 * Wait for the IO to complete if asked to.
277055f5ff8SMatthew Dillon 	 */
278b58c6388SMatthew Dillon 	if (io->waitdep && io->running) {
279055f5ff8SMatthew Dillon 		hammer_io_wait(io);
280055f5ff8SMatthew Dillon 	}
281055f5ff8SMatthew Dillon 
282055f5ff8SMatthew Dillon 	/*
28310a5d1baSMatthew Dillon 	 * Return control of the buffer to the kernel (with the provisio
28410a5d1baSMatthew Dillon 	 * that our bioops can override kernel decisions with regards to
28510a5d1baSMatthew Dillon 	 * the buffer).
286055f5ff8SMatthew Dillon 	 */
287b58c6388SMatthew Dillon 	if (io->flush && io->modified == 0 && io->running == 0) {
28810a5d1baSMatthew Dillon 		/*
28910a5d1baSMatthew Dillon 		 * Always disassociate the bp if an explicit flush
29010a5d1baSMatthew Dillon 		 * was requested and the IO completed with no error
29110a5d1baSMatthew Dillon 		 * (so unmount can really clean up the structure).
29210a5d1baSMatthew Dillon 		 */
293055f5ff8SMatthew Dillon 		if (io->released) {
294055f5ff8SMatthew Dillon 			regetblk(bp);
29546fe7ae1SMatthew Dillon 			BUF_KERNPROC(bp);
296055f5ff8SMatthew Dillon 			io->released = 0;
297055f5ff8SMatthew Dillon 		}
298055f5ff8SMatthew Dillon 		hammer_io_disassociate((hammer_io_structure_t)io, 1);
299055f5ff8SMatthew Dillon 	} else if (io->modified) {
30010a5d1baSMatthew Dillon 		/*
30110a5d1baSMatthew Dillon 		 * Only certain IO types can be released to the kernel.
30210a5d1baSMatthew Dillon 		 * volume and meta-data IO types must be explicitly flushed
30310a5d1baSMatthew Dillon 		 * by HAMMER.
30410a5d1baSMatthew Dillon 		 */
30510a5d1baSMatthew Dillon 		switch(io->type) {
30610a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_DATA_BUFFER:
30710a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_UNDO_BUFFER:
308b58c6388SMatthew Dillon 			if (io->released == 0) {
309055f5ff8SMatthew Dillon 				io->released = 1;
310055f5ff8SMatthew Dillon 				bdwrite(bp);
311055f5ff8SMatthew Dillon 			}
31210a5d1baSMatthew Dillon 			break;
31310a5d1baSMatthew Dillon 		default:
31410a5d1baSMatthew Dillon 			break;
31510a5d1baSMatthew Dillon 		}
316055f5ff8SMatthew Dillon 	} else if (io->released == 0) {
31710a5d1baSMatthew Dillon 		/*
31810a5d1baSMatthew Dillon 		 * Clean buffers can be generally released to the kernel.
31910a5d1baSMatthew Dillon 		 * We leave the bp passively associated with the HAMMER
32010a5d1baSMatthew Dillon 		 * structure and use bioops to disconnect it later on
32110a5d1baSMatthew Dillon 		 * if the kernel wants to discard the buffer.
32210a5d1baSMatthew Dillon 		 */
323055f5ff8SMatthew Dillon 		io->released = 1;
324055f5ff8SMatthew Dillon 		bqrelse(bp);
325055f5ff8SMatthew Dillon 	}
326055f5ff8SMatthew Dillon }
327055f5ff8SMatthew Dillon 
328055f5ff8SMatthew Dillon /*
329b33e2cc0SMatthew Dillon  * This routine is called with a locked IO when a flush is desired and
330b33e2cc0SMatthew Dillon  * no other references to the structure exists other then ours.  This
331b33e2cc0SMatthew Dillon  * routine is ONLY called when HAMMER believes it is safe to flush a
332b33e2cc0SMatthew Dillon  * potentially modified buffer out.
3330b075555SMatthew Dillon  */
3340b075555SMatthew Dillon void
335055f5ff8SMatthew Dillon hammer_io_flush(struct hammer_io *io)
3360b075555SMatthew Dillon {
337055f5ff8SMatthew Dillon 	struct buf *bp;
338055f5ff8SMatthew Dillon 
339055f5ff8SMatthew Dillon 	/*
34010a5d1baSMatthew Dillon 	 * Degenerate case - nothing to flush if nothing is dirty.
341055f5ff8SMatthew Dillon 	 */
342b58c6388SMatthew Dillon 	if (io->modified == 0) {
343b58c6388SMatthew Dillon 		io->flush = 0;
344055f5ff8SMatthew Dillon 		return;
345b58c6388SMatthew Dillon 	}
346055f5ff8SMatthew Dillon 
347055f5ff8SMatthew Dillon 	KKASSERT(io->bp);
34810a5d1baSMatthew Dillon 	KKASSERT(io->modify_refs == 0);
349055f5ff8SMatthew Dillon 
350b33e2cc0SMatthew Dillon 	/*
35110a5d1baSMatthew Dillon 	 * Acquire exclusive access to the bp and then clear the modified
35210a5d1baSMatthew Dillon 	 * state of the buffer prior to issuing I/O to interlock any
35310a5d1baSMatthew Dillon 	 * modifications made while the I/O is in progress.  This shouldn't
35410a5d1baSMatthew Dillon 	 * happen anyway but losing data would be worse.  The modified bit
35510a5d1baSMatthew Dillon 	 * will be rechecked after the IO completes.
35610a5d1baSMatthew Dillon 	 *
357b33e2cc0SMatthew Dillon 	 * This is only legal when lock.refs == 1 (otherwise we might clear
358b33e2cc0SMatthew Dillon 	 * the modified bit while there are still users of the cluster
359b33e2cc0SMatthew Dillon 	 * modifying the data).
360b33e2cc0SMatthew Dillon 	 *
361b33e2cc0SMatthew Dillon 	 * Do this before potentially blocking so any attempt to modify the
362b33e2cc0SMatthew Dillon 	 * ondisk while we are blocked blocks waiting for us.
363b33e2cc0SMatthew Dillon 	 */
36410a5d1baSMatthew Dillon 	KKASSERT(io->mod_list != NULL);
36510a5d1baSMatthew Dillon 	if (io->mod_list == &io->hmp->volu_list ||
36610a5d1baSMatthew Dillon 	    io->mod_list == &io->hmp->meta_list) {
36710a5d1baSMatthew Dillon 		--io->hmp->locked_dirty_count;
3689480ff55SMatthew Dillon 		--hammer_count_dirtybufs;
36910a5d1baSMatthew Dillon 	}
37010a5d1baSMatthew Dillon 	TAILQ_REMOVE(io->mod_list, io, mod_entry);
37110a5d1baSMatthew Dillon 	io->mod_list = NULL;
37210a5d1baSMatthew Dillon 	io->modified = 0;
373b58c6388SMatthew Dillon 	io->flush = 0;
374055f5ff8SMatthew Dillon 	bp = io->bp;
375055f5ff8SMatthew Dillon 
37610a5d1baSMatthew Dillon 	/*
37710a5d1baSMatthew Dillon 	 * Acquire ownership (released variable set for clarity)
37810a5d1baSMatthew Dillon 	 */
3790b075555SMatthew Dillon 	if (io->released) {
380055f5ff8SMatthew Dillon 		regetblk(bp);
381055f5ff8SMatthew Dillon 		/* BUF_KERNPROC(io->bp); */
382055f5ff8SMatthew Dillon 		io->released = 0;
383055f5ff8SMatthew Dillon 	}
38410a5d1baSMatthew Dillon 
38510a5d1baSMatthew Dillon 	/*
38610a5d1baSMatthew Dillon 	 * Transfer ownership to the kernel and initiate I/O.
38710a5d1baSMatthew Dillon 	 */
388055f5ff8SMatthew Dillon 	io->released = 1;
389055f5ff8SMatthew Dillon 	io->running = 1;
390f90dde4cSMatthew Dillon 	++io->hmp->io_running_count;
391055f5ff8SMatthew Dillon 	bawrite(bp);
392055f5ff8SMatthew Dillon }
393055f5ff8SMatthew Dillon 
394055f5ff8SMatthew Dillon /************************************************************************
395055f5ff8SMatthew Dillon  *				BUFFER DIRTYING				*
396055f5ff8SMatthew Dillon  ************************************************************************
397055f5ff8SMatthew Dillon  *
398055f5ff8SMatthew Dillon  * These routines deal with dependancies created when IO buffers get
399055f5ff8SMatthew Dillon  * modified.  The caller must call hammer_modify_*() on a referenced
400055f5ff8SMatthew Dillon  * HAMMER structure prior to modifying its on-disk data.
401055f5ff8SMatthew Dillon  *
402055f5ff8SMatthew Dillon  * Any intent to modify an IO buffer acquires the related bp and imposes
403055f5ff8SMatthew Dillon  * various write ordering dependancies.
404055f5ff8SMatthew Dillon  */
405055f5ff8SMatthew Dillon 
406055f5ff8SMatthew Dillon /*
40710a5d1baSMatthew Dillon  * Mark a HAMMER structure as undergoing modification.  Meta-data buffers
40810a5d1baSMatthew Dillon  * are locked until the flusher can deal with them, pure data buffers
40910a5d1baSMatthew Dillon  * can be written out.
410055f5ff8SMatthew Dillon  */
41110a5d1baSMatthew Dillon static
412b58c6388SMatthew Dillon void
41310a5d1baSMatthew Dillon hammer_io_modify(hammer_io_t io, int count)
414055f5ff8SMatthew Dillon {
41510a5d1baSMatthew Dillon 	struct hammer_mount *hmp = io->hmp;
41610a5d1baSMatthew Dillon 
41746fe7ae1SMatthew Dillon 	/*
41846fe7ae1SMatthew Dillon 	 * Shortcut if nothing to do.
41946fe7ae1SMatthew Dillon 	 */
420055f5ff8SMatthew Dillon 	KKASSERT(io->lock.refs != 0 && io->bp != NULL);
42110a5d1baSMatthew Dillon 	io->modify_refs += count;
422b58c6388SMatthew Dillon 	if (io->modified && io->released == 0)
423b58c6388SMatthew Dillon 		return;
42446fe7ae1SMatthew Dillon 
425055f5ff8SMatthew Dillon 	hammer_lock_ex(&io->lock);
42610a5d1baSMatthew Dillon 	if (io->modified == 0) {
42710a5d1baSMatthew Dillon 		KKASSERT(io->mod_list == NULL);
42810a5d1baSMatthew Dillon 		switch(io->type) {
42910a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_VOLUME:
43010a5d1baSMatthew Dillon 			io->mod_list = &hmp->volu_list;
43110a5d1baSMatthew Dillon 			++hmp->locked_dirty_count;
4329480ff55SMatthew Dillon 			++hammer_count_dirtybufs;
43310a5d1baSMatthew Dillon 			break;
43410a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_META_BUFFER:
43510a5d1baSMatthew Dillon 			io->mod_list = &hmp->meta_list;
43610a5d1baSMatthew Dillon 			++hmp->locked_dirty_count;
4379480ff55SMatthew Dillon 			++hammer_count_dirtybufs;
43810a5d1baSMatthew Dillon 			break;
43910a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_UNDO_BUFFER:
44010a5d1baSMatthew Dillon 			io->mod_list = &hmp->undo_list;
44110a5d1baSMatthew Dillon 			break;
44210a5d1baSMatthew Dillon 		case HAMMER_STRUCTURE_DATA_BUFFER:
44310a5d1baSMatthew Dillon 			io->mod_list = &hmp->data_list;
44410a5d1baSMatthew Dillon 			break;
44510a5d1baSMatthew Dillon 		}
44610a5d1baSMatthew Dillon 		TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
44746fe7ae1SMatthew Dillon 		io->modified = 1;
44810a5d1baSMatthew Dillon 	}
449055f5ff8SMatthew Dillon 	if (io->released) {
450055f5ff8SMatthew Dillon 		regetblk(io->bp);
451055f5ff8SMatthew Dillon 		BUF_KERNPROC(io->bp);
452055f5ff8SMatthew Dillon 		io->released = 0;
45346fe7ae1SMatthew Dillon 		KKASSERT(io->modified != 0);
454055f5ff8SMatthew Dillon 	}
455055f5ff8SMatthew Dillon 	hammer_unlock(&io->lock);
4560b075555SMatthew Dillon }
4570b075555SMatthew Dillon 
45810a5d1baSMatthew Dillon static __inline
45910a5d1baSMatthew Dillon void
46010a5d1baSMatthew Dillon hammer_io_modify_done(hammer_io_t io)
46110a5d1baSMatthew Dillon {
46210a5d1baSMatthew Dillon 	KKASSERT(io->modify_refs > 0);
46310a5d1baSMatthew Dillon 	--io->modify_refs;
46410a5d1baSMatthew Dillon }
46510a5d1baSMatthew Dillon 
4660b075555SMatthew Dillon void
46736f82b23SMatthew Dillon hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
46836f82b23SMatthew Dillon 		     void *base, int len)
4690b075555SMatthew Dillon {
47010a5d1baSMatthew Dillon 	hammer_io_modify(&volume->io, 1);
471055f5ff8SMatthew Dillon 
47247197d71SMatthew Dillon 	if (len) {
47347197d71SMatthew Dillon 		intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
47447197d71SMatthew Dillon 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
475059819e3SMatthew Dillon 		hammer_generate_undo(trans, &volume->io,
47647197d71SMatthew Dillon 			 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
47747197d71SMatthew Dillon 			 base, len);
478055f5ff8SMatthew Dillon 	}
479055f5ff8SMatthew Dillon }
480055f5ff8SMatthew Dillon 
481055f5ff8SMatthew Dillon /*
482055f5ff8SMatthew Dillon  * Caller intends to modify a buffer's ondisk structure.  The related
483055f5ff8SMatthew Dillon  * cluster must be marked open prior to being able to flush the modified
484055f5ff8SMatthew Dillon  * buffer so get that I/O going now.
485055f5ff8SMatthew Dillon  */
486055f5ff8SMatthew Dillon void
48736f82b23SMatthew Dillon hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
48836f82b23SMatthew Dillon 		     void *base, int len)
48946fe7ae1SMatthew Dillon {
49010a5d1baSMatthew Dillon 	hammer_io_modify(&buffer->io, 1);
49147197d71SMatthew Dillon 	if (len) {
49247197d71SMatthew Dillon 		intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
49347197d71SMatthew Dillon 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
494059819e3SMatthew Dillon 		hammer_generate_undo(trans, &buffer->io,
49534d829f7SMatthew Dillon 				     buffer->zone2_offset + rel_offset,
49647197d71SMatthew Dillon 				     base, len);
49747197d71SMatthew Dillon 	}
49846fe7ae1SMatthew Dillon }
49946fe7ae1SMatthew Dillon 
50010a5d1baSMatthew Dillon void
50110a5d1baSMatthew Dillon hammer_modify_volume_done(hammer_volume_t volume)
50210a5d1baSMatthew Dillon {
50310a5d1baSMatthew Dillon 	hammer_io_modify_done(&volume->io);
50410a5d1baSMatthew Dillon }
50510a5d1baSMatthew Dillon 
50610a5d1baSMatthew Dillon void
50710a5d1baSMatthew Dillon hammer_modify_buffer_done(hammer_buffer_t buffer)
50810a5d1baSMatthew Dillon {
50910a5d1baSMatthew Dillon 	hammer_io_modify_done(&buffer->io);
51010a5d1baSMatthew Dillon }
51110a5d1baSMatthew Dillon 
51246fe7ae1SMatthew Dillon /*
513055f5ff8SMatthew Dillon  * Mark an entity as not being dirty any more -- this usually occurs when
51461aeeb33SMatthew Dillon  * the governing a-list has freed the entire entity.
515055f5ff8SMatthew Dillon  *
516055f5ff8SMatthew Dillon  * XXX
51761aeeb33SMatthew Dillon  */
51861aeeb33SMatthew Dillon void
51961aeeb33SMatthew Dillon hammer_io_clear_modify(struct hammer_io *io)
52061aeeb33SMatthew Dillon {
521055f5ff8SMatthew Dillon #if 0
52261aeeb33SMatthew Dillon 	struct buf *bp;
52361aeeb33SMatthew Dillon 
52461aeeb33SMatthew Dillon 	io->modified = 0;
52510a5d1baSMatthew Dillon 	XXX mod_list/entry
52661aeeb33SMatthew Dillon 	if ((bp = io->bp) != NULL) {
527055f5ff8SMatthew Dillon 		if (io->released) {
52861aeeb33SMatthew Dillon 			regetblk(bp);
529055f5ff8SMatthew Dillon 			/* BUF_KERNPROC(io->bp); */
530055f5ff8SMatthew Dillon 		} else {
53161aeeb33SMatthew Dillon 			io->released = 1;
532055f5ff8SMatthew Dillon 		}
53361aeeb33SMatthew Dillon 		if (io->modified == 0) {
53461aeeb33SMatthew Dillon 			kprintf("hammer_io_clear_modify: cleared %p\n", io);
53561aeeb33SMatthew Dillon 			bundirty(bp);
53661aeeb33SMatthew Dillon 			bqrelse(bp);
53761aeeb33SMatthew Dillon 		} else {
53861aeeb33SMatthew Dillon 			bdwrite(bp);
53961aeeb33SMatthew Dillon 		}
54061aeeb33SMatthew Dillon 	}
541fbc6e32aSMatthew Dillon #endif
54266325755SMatthew Dillon }
54366325755SMatthew Dillon 
544055f5ff8SMatthew Dillon /************************************************************************
545055f5ff8SMatthew Dillon  *				HAMMER_BIOOPS				*
546055f5ff8SMatthew Dillon  ************************************************************************
547055f5ff8SMatthew Dillon  *
548055f5ff8SMatthew Dillon  */
549055f5ff8SMatthew Dillon 
550055f5ff8SMatthew Dillon /*
551055f5ff8SMatthew Dillon  * Pre-IO initiation kernel callback - cluster build only
552055f5ff8SMatthew Dillon  */
553055f5ff8SMatthew Dillon static void
554055f5ff8SMatthew Dillon hammer_io_start(struct buf *bp)
555055f5ff8SMatthew Dillon {
556055f5ff8SMatthew Dillon }
557055f5ff8SMatthew Dillon 
558055f5ff8SMatthew Dillon /*
559055f5ff8SMatthew Dillon  * Post-IO completion kernel callback
560b33e2cc0SMatthew Dillon  *
561b33e2cc0SMatthew Dillon  * NOTE: HAMMER may modify a buffer after initiating I/O.  The modified bit
562b33e2cc0SMatthew Dillon  * may also be set if we were marking a cluster header open.  Only remove
563b33e2cc0SMatthew Dillon  * our dependancy if the modified bit is clear.
564055f5ff8SMatthew Dillon  */
56566325755SMatthew Dillon static void
56666325755SMatthew Dillon hammer_io_complete(struct buf *bp)
56766325755SMatthew Dillon {
568055f5ff8SMatthew Dillon 	union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
569fbc6e32aSMatthew Dillon 
570055f5ff8SMatthew Dillon 	KKASSERT(iou->io.released == 1);
571055f5ff8SMatthew Dillon 
572f90dde4cSMatthew Dillon 	if (iou->io.running) {
573f90dde4cSMatthew Dillon 		if (--iou->io.hmp->io_running_count == 0)
574f90dde4cSMatthew Dillon 			wakeup(&iou->io.hmp->io_running_count);
575f90dde4cSMatthew Dillon 		KKASSERT(iou->io.hmp->io_running_count >= 0);
576f90dde4cSMatthew Dillon 		iou->io.running = 0;
577f90dde4cSMatthew Dillon 	}
578f90dde4cSMatthew Dillon 
579055f5ff8SMatthew Dillon 	/*
580055f5ff8SMatthew Dillon 	 * If no lock references remain and we can acquire the IO lock and
581055f5ff8SMatthew Dillon 	 * someone at some point wanted us to flush (B_LOCKED test), then
582055f5ff8SMatthew Dillon 	 * try to dispose of the IO.
583055f5ff8SMatthew Dillon 	 */
584055f5ff8SMatthew Dillon 	if (iou->io.waiting) {
585055f5ff8SMatthew Dillon 		iou->io.waiting = 0;
586055f5ff8SMatthew Dillon 		wakeup(iou);
587055f5ff8SMatthew Dillon 	}
588055f5ff8SMatthew Dillon 
589055f5ff8SMatthew Dillon 	/*
590055f5ff8SMatthew Dillon 	 * Someone wanted us to flush, try to clean out the buffer.
591055f5ff8SMatthew Dillon 	 */
592055f5ff8SMatthew Dillon 	if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) {
593b33e2cc0SMatthew Dillon 		KKASSERT(iou->io.modified == 0);
594d5ef456eSMatthew Dillon 		bp->b_flags &= ~B_LOCKED;
595055f5ff8SMatthew Dillon 		hammer_io_deallocate(bp);
596055f5ff8SMatthew Dillon 		/* structure may be dead now */
597fbc6e32aSMatthew Dillon 	}
59866325755SMatthew Dillon }
59966325755SMatthew Dillon 
60066325755SMatthew Dillon /*
60166325755SMatthew Dillon  * Callback from kernel when it wishes to deallocate a passively
60210a5d1baSMatthew Dillon  * associated structure.  This mostly occurs with clean buffers
60310a5d1baSMatthew Dillon  * but it may be possible for a holding structure to be marked dirty
60410a5d1baSMatthew Dillon  * while its buffer is passively associated.
60566325755SMatthew Dillon  *
60666325755SMatthew Dillon  * If we cannot disassociate we set B_LOCKED to prevent the buffer
60766325755SMatthew Dillon  * from getting reused.
60846fe7ae1SMatthew Dillon  *
60946fe7ae1SMatthew Dillon  * WARNING: Because this can be called directly by getnewbuf we cannot
61046fe7ae1SMatthew Dillon  * recurse into the tree.  If a bp cannot be immediately disassociated
61146fe7ae1SMatthew Dillon  * our only recourse is to set B_LOCKED.
61266325755SMatthew Dillon  */
61366325755SMatthew Dillon static void
61466325755SMatthew Dillon hammer_io_deallocate(struct buf *bp)
61566325755SMatthew Dillon {
616055f5ff8SMatthew Dillon 	hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
61766325755SMatthew Dillon 
618055f5ff8SMatthew Dillon 	KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
61946fe7ae1SMatthew Dillon 	if (iou->io.lock.refs > 0 || iou->io.modified) {
62010a5d1baSMatthew Dillon 		/*
62110a5d1baSMatthew Dillon 		 * It is not legal to disassociate a modified buffer.  This
62210a5d1baSMatthew Dillon 		 * case really shouldn't ever occur.
62310a5d1baSMatthew Dillon 		 */
624055f5ff8SMatthew Dillon 		bp->b_flags |= B_LOCKED;
625055f5ff8SMatthew Dillon 	} else {
62610a5d1baSMatthew Dillon 		/*
62710a5d1baSMatthew Dillon 		 * Disassociate the BP.  If the io has no refs left we
62810a5d1baSMatthew Dillon 		 * have to add it to the loose list.
62910a5d1baSMatthew Dillon 		 */
630055f5ff8SMatthew Dillon 		hammer_io_disassociate(iou, 0);
63110a5d1baSMatthew Dillon 		if (iou->io.bp == NULL &&
63210a5d1baSMatthew Dillon 		    iou->io.type != HAMMER_STRUCTURE_VOLUME) {
63310a5d1baSMatthew Dillon 			KKASSERT(iou->io.mod_list == NULL);
63410a5d1baSMatthew Dillon 			iou->io.mod_list = &iou->io.hmp->lose_list;
63510a5d1baSMatthew Dillon 			TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
63666325755SMatthew Dillon 		}
63766325755SMatthew Dillon 	}
63866325755SMatthew Dillon }
63966325755SMatthew Dillon 
64066325755SMatthew Dillon static int
64166325755SMatthew Dillon hammer_io_fsync(struct vnode *vp)
64266325755SMatthew Dillon {
64366325755SMatthew Dillon 	return(0);
64466325755SMatthew Dillon }
64566325755SMatthew Dillon 
64666325755SMatthew Dillon /*
64766325755SMatthew Dillon  * NOTE: will not be called unless we tell the kernel about the
64866325755SMatthew Dillon  * bioops.  Unused... we use the mount's VFS_SYNC instead.
64966325755SMatthew Dillon  */
65066325755SMatthew Dillon static int
65166325755SMatthew Dillon hammer_io_sync(struct mount *mp)
65266325755SMatthew Dillon {
65366325755SMatthew Dillon 	return(0);
65466325755SMatthew Dillon }
65566325755SMatthew Dillon 
65666325755SMatthew Dillon static void
65766325755SMatthew Dillon hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
65866325755SMatthew Dillon {
65966325755SMatthew Dillon }
66066325755SMatthew Dillon 
66166325755SMatthew Dillon /*
66266325755SMatthew Dillon  * I/O pre-check for reading and writing.  HAMMER only uses this for
66366325755SMatthew Dillon  * B_CACHE buffers so checkread just shouldn't happen, but if it does
66466325755SMatthew Dillon  * allow it.
66566325755SMatthew Dillon  *
666fbc6e32aSMatthew Dillon  * Writing is a different case.  We don't want the kernel to try to write
667fbc6e32aSMatthew Dillon  * out a buffer that HAMMER may be modifying passively or which has a
66810a5d1baSMatthew Dillon  * dependancy.  In addition, kernel-demanded writes can only proceed for
66910a5d1baSMatthew Dillon  * certain types of buffers (i.e. UNDO and DATA types).  Other dirty
67010a5d1baSMatthew Dillon  * buffer types can only be explicitly written by the flusher.
671fbc6e32aSMatthew Dillon  *
67210a5d1baSMatthew Dillon  * checkwrite will only be called for bdwrite()n buffers.  If we return
67310a5d1baSMatthew Dillon  * success the kernel is guaranteed to initiate the buffer write.
67466325755SMatthew Dillon  */
67566325755SMatthew Dillon static int
67666325755SMatthew Dillon hammer_io_checkread(struct buf *bp)
67766325755SMatthew Dillon {
67866325755SMatthew Dillon 	return(0);
67966325755SMatthew Dillon }
68066325755SMatthew Dillon 
68166325755SMatthew Dillon static int
68266325755SMatthew Dillon hammer_io_checkwrite(struct buf *bp)
68366325755SMatthew Dillon {
68410a5d1baSMatthew Dillon 	hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
68566325755SMatthew Dillon 
686*c9b9e29dSMatthew Dillon 	KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME &&
687*c9b9e29dSMatthew Dillon 		 io->type != HAMMER_STRUCTURE_META_BUFFER);
688*c9b9e29dSMatthew Dillon 
689fbc6e32aSMatthew Dillon 	/*
69010a5d1baSMatthew Dillon 	 * We can only clear the modified bit if the IO is not currently
69110a5d1baSMatthew Dillon 	 * undergoing modification.  Otherwise we may miss changes.
692b33e2cc0SMatthew Dillon 	 */
69310a5d1baSMatthew Dillon 	if (io->modify_refs == 0 && io->modified) {
69410a5d1baSMatthew Dillon 		KKASSERT(io->mod_list != NULL);
69510a5d1baSMatthew Dillon 		if (io->mod_list == &io->hmp->volu_list ||
69610a5d1baSMatthew Dillon 		    io->mod_list == &io->hmp->meta_list) {
69710a5d1baSMatthew Dillon 			--io->hmp->locked_dirty_count;
6989480ff55SMatthew Dillon 			--hammer_count_dirtybufs;
69910a5d1baSMatthew Dillon 		}
70010a5d1baSMatthew Dillon 		TAILQ_REMOVE(io->mod_list, io, mod_entry);
70110a5d1baSMatthew Dillon 		io->mod_list = NULL;
70210a5d1baSMatthew Dillon 		io->modified = 0;
70366325755SMatthew Dillon 	}
704f90dde4cSMatthew Dillon 
705f90dde4cSMatthew Dillon 	/*
706f90dde4cSMatthew Dillon 	 * The kernel is going to start the IO, set io->running.
707f90dde4cSMatthew Dillon 	 */
708f90dde4cSMatthew Dillon 	KKASSERT(io->running == 0);
709f90dde4cSMatthew Dillon 	io->running = 1;
710f90dde4cSMatthew Dillon 	++io->hmp->io_running_count;
711055f5ff8SMatthew Dillon 	return(0);
712055f5ff8SMatthew Dillon }
71366325755SMatthew Dillon 
7148cd0a023SMatthew Dillon /*
7158cd0a023SMatthew Dillon  * Return non-zero if the caller should flush the structure associated
7168cd0a023SMatthew Dillon  * with this io sub-structure.
7178cd0a023SMatthew Dillon  */
7188cd0a023SMatthew Dillon int
7198cd0a023SMatthew Dillon hammer_io_checkflush(struct hammer_io *io)
7208cd0a023SMatthew Dillon {
721055f5ff8SMatthew Dillon 	if (io->bp == NULL || (io->bp->b_flags & B_LOCKED)) {
7228cd0a023SMatthew Dillon 		return(1);
723055f5ff8SMatthew Dillon 	}
7248cd0a023SMatthew Dillon 	return(0);
7258cd0a023SMatthew Dillon }
72666325755SMatthew Dillon 
72766325755SMatthew Dillon /*
72866325755SMatthew Dillon  * Return non-zero if we wish to delay the kernel's attempt to flush
72966325755SMatthew Dillon  * this buffer to disk.
73066325755SMatthew Dillon  */
73166325755SMatthew Dillon static int
73266325755SMatthew Dillon hammer_io_countdeps(struct buf *bp, int n)
73366325755SMatthew Dillon {
73466325755SMatthew Dillon 	return(0);
73566325755SMatthew Dillon }
73666325755SMatthew Dillon 
73766325755SMatthew Dillon struct bio_ops hammer_bioops = {
73866325755SMatthew Dillon 	.io_start	= hammer_io_start,
73966325755SMatthew Dillon 	.io_complete	= hammer_io_complete,
74066325755SMatthew Dillon 	.io_deallocate	= hammer_io_deallocate,
74166325755SMatthew Dillon 	.io_fsync	= hammer_io_fsync,
74266325755SMatthew Dillon 	.io_sync	= hammer_io_sync,
74366325755SMatthew Dillon 	.io_movedeps	= hammer_io_movedeps,
74466325755SMatthew Dillon 	.io_countdeps	= hammer_io_countdeps,
74566325755SMatthew Dillon 	.io_checkread	= hammer_io_checkread,
74666325755SMatthew Dillon 	.io_checkwrite	= hammer_io_checkwrite,
74766325755SMatthew Dillon };
74866325755SMatthew Dillon 
749