xref: /dflybsd-src/sys/vfs/hammer/hammer_flusher.c (revision c9b9e29d7630384cf4c416763d41b09bb927bc40)
1059819e3SMatthew Dillon /*
2059819e3SMatthew Dillon  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3059819e3SMatthew Dillon  *
4059819e3SMatthew Dillon  * This code is derived from software contributed to The DragonFly Project
5059819e3SMatthew Dillon  * by Matthew Dillon <dillon@backplane.com>
6059819e3SMatthew Dillon  *
7059819e3SMatthew Dillon  * Redistribution and use in source and binary forms, with or without
8059819e3SMatthew Dillon  * modification, are permitted provided that the following conditions
9059819e3SMatthew Dillon  * are met:
10059819e3SMatthew Dillon  *
11059819e3SMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
12059819e3SMatthew Dillon  *    notice, this list of conditions and the following disclaimer.
13059819e3SMatthew Dillon  * 2. Redistributions in binary form must reproduce the above copyright
14059819e3SMatthew Dillon  *    notice, this list of conditions and the following disclaimer in
15059819e3SMatthew Dillon  *    the documentation and/or other materials provided with the
16059819e3SMatthew Dillon  *    distribution.
17059819e3SMatthew Dillon  * 3. Neither the name of The DragonFly Project nor the names of its
18059819e3SMatthew Dillon  *    contributors may be used to endorse or promote products derived
19059819e3SMatthew Dillon  *    from this software without specific, prior written permission.
20059819e3SMatthew Dillon  *
21059819e3SMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22059819e3SMatthew Dillon  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23059819e3SMatthew Dillon  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24059819e3SMatthew Dillon  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25059819e3SMatthew Dillon  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26059819e3SMatthew Dillon  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27059819e3SMatthew Dillon  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28059819e3SMatthew Dillon  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29059819e3SMatthew Dillon  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30059819e3SMatthew Dillon  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31059819e3SMatthew Dillon  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32059819e3SMatthew Dillon  * SUCH DAMAGE.
33059819e3SMatthew Dillon  *
34*c9b9e29dSMatthew Dillon  * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.12 2008/05/04 09:06:45 dillon Exp $
35059819e3SMatthew Dillon  */
36059819e3SMatthew Dillon /*
37059819e3SMatthew Dillon  * HAMMER dependancy flusher thread
38059819e3SMatthew Dillon  *
39059819e3SMatthew Dillon  * Meta data updates create buffer dependancies which are arranged as a
40059819e3SMatthew Dillon  * hierarchy of lists.
41059819e3SMatthew Dillon  */
42059819e3SMatthew Dillon 
43059819e3SMatthew Dillon #include "hammer.h"
44059819e3SMatthew Dillon 
45059819e3SMatthew Dillon static void hammer_flusher_thread(void *arg);
4610a5d1baSMatthew Dillon static void hammer_flusher_clean_loose_ios(hammer_mount_t hmp);
47059819e3SMatthew Dillon static void hammer_flusher_flush(hammer_mount_t hmp);
480729c8c8SMatthew Dillon static int hammer_must_finalize_undo(hammer_mount_t hmp);
49*c9b9e29dSMatthew Dillon static void hammer_flusher_finalize(hammer_transaction_t trans);
50*c9b9e29dSMatthew Dillon 
51*c9b9e29dSMatthew Dillon #define HAMMER_FLUSHER_IMMEDIATE	16
52059819e3SMatthew Dillon 
53059819e3SMatthew Dillon void
54059819e3SMatthew Dillon hammer_flusher_sync(hammer_mount_t hmp)
55059819e3SMatthew Dillon {
56059819e3SMatthew Dillon 	int seq;
57059819e3SMatthew Dillon 
58f90dde4cSMatthew Dillon 	if (hmp->flusher_td) {
591f07f686SMatthew Dillon 		seq = hmp->flusher_next;
601f07f686SMatthew Dillon 		if (hmp->flusher_signal == 0) {
61*c9b9e29dSMatthew Dillon 			hmp->flusher_signal = HAMMER_FLUSHER_IMMEDIATE;
621f07f686SMatthew Dillon 			wakeup(&hmp->flusher_signal);
631f07f686SMatthew Dillon 		}
641f07f686SMatthew Dillon 		while ((int)(seq - hmp->flusher_done) > 0)
651f07f686SMatthew Dillon 			tsleep(&hmp->flusher_done, 0, "hmrfls", 0);
66059819e3SMatthew Dillon 	}
67f90dde4cSMatthew Dillon }
68059819e3SMatthew Dillon 
69059819e3SMatthew Dillon void
70059819e3SMatthew Dillon hammer_flusher_async(hammer_mount_t hmp)
71059819e3SMatthew Dillon {
72f90dde4cSMatthew Dillon 	if (hmp->flusher_td) {
73*c9b9e29dSMatthew Dillon 		if (hmp->flusher_signal++ == 0)
741f07f686SMatthew Dillon 			wakeup(&hmp->flusher_signal);
751f07f686SMatthew Dillon 	}
76059819e3SMatthew Dillon }
77059819e3SMatthew Dillon 
78059819e3SMatthew Dillon void
79059819e3SMatthew Dillon hammer_flusher_create(hammer_mount_t hmp)
80059819e3SMatthew Dillon {
811f07f686SMatthew Dillon 	hmp->flusher_signal = 0;
821f07f686SMatthew Dillon 	hmp->flusher_act = 0;
831f07f686SMatthew Dillon 	hmp->flusher_done = 0;
841f07f686SMatthew Dillon 	hmp->flusher_next = 1;
85059819e3SMatthew Dillon 	lwkt_create(hammer_flusher_thread, hmp, &hmp->flusher_td, NULL,
86059819e3SMatthew Dillon 		    0, -1, "hammer");
87059819e3SMatthew Dillon }
88059819e3SMatthew Dillon 
89059819e3SMatthew Dillon void
90059819e3SMatthew Dillon hammer_flusher_destroy(hammer_mount_t hmp)
91059819e3SMatthew Dillon {
92f90dde4cSMatthew Dillon 	if (hmp->flusher_td) {
93059819e3SMatthew Dillon 		hmp->flusher_exiting = 1;
941f07f686SMatthew Dillon 		while (hmp->flusher_td) {
95*c9b9e29dSMatthew Dillon 			hmp->flusher_signal = HAMMER_FLUSHER_IMMEDIATE;
961f07f686SMatthew Dillon 			wakeup(&hmp->flusher_signal);
97059819e3SMatthew Dillon 			tsleep(&hmp->flusher_exiting, 0, "hmrwex", 0);
98059819e3SMatthew Dillon 		}
99f90dde4cSMatthew Dillon 	}
1001f07f686SMatthew Dillon }
101059819e3SMatthew Dillon 
102059819e3SMatthew Dillon static void
103059819e3SMatthew Dillon hammer_flusher_thread(void *arg)
104059819e3SMatthew Dillon {
105059819e3SMatthew Dillon 	hammer_mount_t hmp = arg;
1060729c8c8SMatthew Dillon 
107059819e3SMatthew Dillon 	for (;;) {
1084e17f465SMatthew Dillon 		while (hmp->flusher_lock)
1094e17f465SMatthew Dillon 			tsleep(&hmp->flusher_lock, 0, "hmrhld", 0);
1101f07f686SMatthew Dillon 		hmp->flusher_act = hmp->flusher_next;
1111f07f686SMatthew Dillon 		++hmp->flusher_next;
1121f07f686SMatthew Dillon 		kprintf("F");
11310a5d1baSMatthew Dillon 		hammer_flusher_clean_loose_ios(hmp);
114059819e3SMatthew Dillon 		hammer_flusher_flush(hmp);
11510a5d1baSMatthew Dillon 		hammer_flusher_clean_loose_ios(hmp);
1161f07f686SMatthew Dillon 		hmp->flusher_done = hmp->flusher_act;
1171f07f686SMatthew Dillon 
1181f07f686SMatthew Dillon 		wakeup(&hmp->flusher_done);
119c32a6806SMatthew Dillon 
120c32a6806SMatthew Dillon 		/*
1211f07f686SMatthew Dillon 		 * Wait for activity.
122c32a6806SMatthew Dillon 		 */
1231f07f686SMatthew Dillon 		if (hmp->flusher_exiting && TAILQ_EMPTY(&hmp->flush_list))
124059819e3SMatthew Dillon 			break;
1251f07f686SMatthew Dillon 		kprintf("E");
1261f07f686SMatthew Dillon 
127*c9b9e29dSMatthew Dillon 		/*
128*c9b9e29dSMatthew Dillon 		 * This is a hack until we can dispose of frontend buffer
129*c9b9e29dSMatthew Dillon 		 * cache buffers on the frontend.
130*c9b9e29dSMatthew Dillon 		 */
131*c9b9e29dSMatthew Dillon 		if (hmp->flusher_signal &&
132*c9b9e29dSMatthew Dillon 		    hmp->flusher_signal < HAMMER_FLUSHER_IMMEDIATE) {
133*c9b9e29dSMatthew Dillon 			--hmp->flusher_signal;
134*c9b9e29dSMatthew Dillon 			tsleep(&hmp->flusher_signal, 0, "hmrqwk", hz / 10);
135*c9b9e29dSMatthew Dillon 		} else {
1361f07f686SMatthew Dillon 			while (hmp->flusher_signal == 0 &&
1371f07f686SMatthew Dillon 			       TAILQ_EMPTY(&hmp->flush_list)) {
1381f07f686SMatthew Dillon 				tsleep(&hmp->flusher_signal, 0, "hmrwwa", 0);
139059819e3SMatthew Dillon 			}
1401f07f686SMatthew Dillon 			hmp->flusher_signal = 0;
1411f07f686SMatthew Dillon 		}
142*c9b9e29dSMatthew Dillon 	}
143059819e3SMatthew Dillon 	hmp->flusher_td = NULL;
144059819e3SMatthew Dillon 	wakeup(&hmp->flusher_exiting);
145059819e3SMatthew Dillon 	lwkt_exit();
146059819e3SMatthew Dillon }
147059819e3SMatthew Dillon 
14810a5d1baSMatthew Dillon static void
14910a5d1baSMatthew Dillon hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
15010a5d1baSMatthew Dillon {
15110a5d1baSMatthew Dillon 	hammer_buffer_t buffer;
15210a5d1baSMatthew Dillon 	hammer_io_t io;
15310a5d1baSMatthew Dillon 
15410a5d1baSMatthew Dillon 	/*
15510a5d1baSMatthew Dillon 	 * loose ends - buffers without bp's aren't tracked by the kernel
15610a5d1baSMatthew Dillon 	 * and can build up, so clean them out.  This can occur when an
15710a5d1baSMatthew Dillon 	 * IO completes on a buffer with no references left.
15810a5d1baSMatthew Dillon 	 */
15910a5d1baSMatthew Dillon 	while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
16010a5d1baSMatthew Dillon 		KKASSERT(io->mod_list == &hmp->lose_list);
16110a5d1baSMatthew Dillon 		TAILQ_REMOVE(io->mod_list, io, mod_entry);
16210a5d1baSMatthew Dillon 		io->mod_list = NULL;
16310a5d1baSMatthew Dillon 		hammer_ref(&io->lock);
16410a5d1baSMatthew Dillon 		buffer = (void *)io;
16510a5d1baSMatthew Dillon 		hammer_rel_buffer(buffer, 0);
16610a5d1baSMatthew Dillon 	}
16710a5d1baSMatthew Dillon }
16810a5d1baSMatthew Dillon 
169059819e3SMatthew Dillon /*
170059819e3SMatthew Dillon  * Flush stuff
171059819e3SMatthew Dillon  */
172059819e3SMatthew Dillon static void
173059819e3SMatthew Dillon hammer_flusher_flush(hammer_mount_t hmp)
174059819e3SMatthew Dillon {
175e8599db1SMatthew Dillon 	struct hammer_transaction trans;
17610a5d1baSMatthew Dillon 	hammer_blockmap_t rootmap;
177059819e3SMatthew Dillon 	hammer_inode_t ip;
17810a5d1baSMatthew Dillon 
179e8599db1SMatthew Dillon 	hammer_start_transaction_fls(&trans, hmp);
1800729c8c8SMatthew Dillon 	rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
181059819e3SMatthew Dillon 
1821f07f686SMatthew Dillon 	while ((ip = TAILQ_FIRST(&hmp->flush_list)) != NULL) {
1831f07f686SMatthew Dillon 		/*
1841f07f686SMatthew Dillon 		 * Stop when we hit a different flush group
1851f07f686SMatthew Dillon 		 */
1861f07f686SMatthew Dillon 		if (ip->flush_group != hmp->flusher_act)
1871f07f686SMatthew Dillon 			break;
188059819e3SMatthew Dillon 
189059819e3SMatthew Dillon 		/*
1901f07f686SMatthew Dillon 		 * Remove the inode from the flush list and inherit
1911f07f686SMatthew Dillon 		 * its reference, sync, and clean-up.
192059819e3SMatthew Dillon 		 */
1931f07f686SMatthew Dillon 		TAILQ_REMOVE(&hmp->flush_list, ip, flush_entry);
1941f07f686SMatthew Dillon 		ip->error = hammer_sync_inode(ip);
195b84de5afSMatthew Dillon 		hammer_flush_inode_done(ip);
1961f07f686SMatthew Dillon 
1971f07f686SMatthew Dillon 		/*
1981f07f686SMatthew Dillon 		 * XXX this breaks atomicy
1991f07f686SMatthew Dillon 		 */
2001f07f686SMatthew Dillon 		if (hammer_must_finalize_undo(hmp)) {
2011f07f686SMatthew Dillon 			Debugger("Too many undos!!");
202*c9b9e29dSMatthew Dillon 			hammer_flusher_finalize(&trans);
203059819e3SMatthew Dillon 		}
204059819e3SMatthew Dillon 	}
205*c9b9e29dSMatthew Dillon 	hammer_flusher_finalize(&trans);
206e8599db1SMatthew Dillon 	hammer_done_transaction(&trans);
20710a5d1baSMatthew Dillon }
208059819e3SMatthew Dillon 
20910a5d1baSMatthew Dillon /*
210ec4e8497SMatthew Dillon  * If the UNDO area gets over half full we have to flush it.  We can't
211ec4e8497SMatthew Dillon  * afford the UNDO area becoming completely full as that would break
212ec4e8497SMatthew Dillon  * the crash recovery atomicy.
213ec4e8497SMatthew Dillon  */
214ec4e8497SMatthew Dillon static
215ec4e8497SMatthew Dillon int
2160729c8c8SMatthew Dillon hammer_must_finalize_undo(hammer_mount_t hmp)
217ec4e8497SMatthew Dillon {
2181f07f686SMatthew Dillon 	if (hammer_undo_space(hmp) < hammer_undo_max(hmp) / 2) {
219ec4e8497SMatthew Dillon 		kprintf("*");
2201f07f686SMatthew Dillon 		return(1);
2211f07f686SMatthew Dillon 	} else {
2221f07f686SMatthew Dillon 		return(0);
2231f07f686SMatthew Dillon 	}
224ec4e8497SMatthew Dillon }
225ec4e8497SMatthew Dillon 
226ec4e8497SMatthew Dillon /*
22710a5d1baSMatthew Dillon  * To finalize the flush we finish flushing all undo and data buffers
22810a5d1baSMatthew Dillon  * still present, then we update the volume header and flush it,
22910a5d1baSMatthew Dillon  * then we flush out the mata-data (that can now be undone).
23010a5d1baSMatthew Dillon  *
23110a5d1baSMatthew Dillon  * Note that as long as the undo fifo's start and end points do not
23210a5d1baSMatthew Dillon  * match, we always must at least update the volume header.
2339480ff55SMatthew Dillon  *
2349480ff55SMatthew Dillon  * The sync_lock is used by other threads to issue modifying operations
2359480ff55SMatthew Dillon  * to HAMMER media without crossing a synchronization boundary or messing
2369480ff55SMatthew Dillon  * up the media synchronization operation.  Specifically, the pruning
2379480ff55SMatthew Dillon  * the reblocking ioctls, and allowing the frontend strategy code to
2389480ff55SMatthew Dillon  * allocate media data space.
23910a5d1baSMatthew Dillon  */
24010a5d1baSMatthew Dillon static
24110a5d1baSMatthew Dillon void
242*c9b9e29dSMatthew Dillon hammer_flusher_finalize(hammer_transaction_t trans)
243059819e3SMatthew Dillon {
244e8599db1SMatthew Dillon 	hammer_mount_t hmp = trans->hmp;
245e8599db1SMatthew Dillon 	hammer_volume_t root_volume = trans->rootvol;
246059819e3SMatthew Dillon 	hammer_blockmap_t rootmap;
247*c9b9e29dSMatthew Dillon 	const int bmsize = sizeof(root_volume->ondisk->vol0_blockmap);
24810a5d1baSMatthew Dillon 	hammer_io_t io;
249*c9b9e29dSMatthew Dillon 	int count;
25010a5d1baSMatthew Dillon 
2519480ff55SMatthew Dillon 	hammer_lock_ex(&hmp->sync_lock);
252*c9b9e29dSMatthew Dillon 	rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
2539480ff55SMatthew Dillon 
254059819e3SMatthew Dillon 	/*
255e8599db1SMatthew Dillon 	 * Sync the blockmap to the root volume ondisk buffer and generate
256e8599db1SMatthew Dillon 	 * the appropriate undo record.  We have to generate the UNDO even
257e8599db1SMatthew Dillon 	 * though we flush the volume header along with the UNDO fifo update
258e8599db1SMatthew Dillon 	 * because the meta-data (including the volume header) is flushed
259*c9b9e29dSMatthew Dillon 	 * after the fifo update, not before, and may have to be undone.
260*c9b9e29dSMatthew Dillon 	 *
261*c9b9e29dSMatthew Dillon 	 * No UNDOs can be created after this point until we finish the
262*c9b9e29dSMatthew Dillon 	 * flush.
263e8599db1SMatthew Dillon 	 */
264*c9b9e29dSMatthew Dillon 	if (root_volume->io.modified &&
265*c9b9e29dSMatthew Dillon 	    bcmp(hmp->blockmap, root_volume->ondisk->vol0_blockmap, bmsize)) {
266e8599db1SMatthew Dillon 		hammer_modify_volume(trans, root_volume,
267e8599db1SMatthew Dillon 			    &root_volume->ondisk->vol0_blockmap,
268*c9b9e29dSMatthew Dillon 			    bmsize);
269e8599db1SMatthew Dillon 		bcopy(hmp->blockmap, root_volume->ondisk->vol0_blockmap,
270*c9b9e29dSMatthew Dillon 		      bmsize);
271e8599db1SMatthew Dillon 		hammer_modify_volume_done(root_volume);
272e8599db1SMatthew Dillon 	}
273e8599db1SMatthew Dillon 
274e8599db1SMatthew Dillon 	/*
275*c9b9e29dSMatthew Dillon 	 * Flush the undo bufs, clear the undo cache.
276059819e3SMatthew Dillon 	 */
277e8599db1SMatthew Dillon 	hammer_clear_undo_history(hmp);
278e8599db1SMatthew Dillon 
279*c9b9e29dSMatthew Dillon 	count = 0;
28010a5d1baSMatthew Dillon 	while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
28110a5d1baSMatthew Dillon 		KKASSERT(io->modify_refs == 0);
28210a5d1baSMatthew Dillon 		hammer_ref(&io->lock);
28310a5d1baSMatthew Dillon 		KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
28410a5d1baSMatthew Dillon 		hammer_io_flush(io);
28510a5d1baSMatthew Dillon 		hammer_rel_buffer((hammer_buffer_t)io, 1);
286*c9b9e29dSMatthew Dillon 		++count;
287059819e3SMatthew Dillon 	}
288*c9b9e29dSMatthew Dillon 	if (count)
289*c9b9e29dSMatthew Dillon 		kprintf("X%d", count);
290059819e3SMatthew Dillon 
291059819e3SMatthew Dillon 	/*
29210a5d1baSMatthew Dillon 	 * Flush data bufs
293059819e3SMatthew Dillon 	 */
294*c9b9e29dSMatthew Dillon 	count = 0;
29510a5d1baSMatthew Dillon 	while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
29610a5d1baSMatthew Dillon 		KKASSERT(io->modify_refs == 0);
29710a5d1baSMatthew Dillon 		hammer_ref(&io->lock);
29810a5d1baSMatthew Dillon 		KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
29910a5d1baSMatthew Dillon 		hammer_io_flush(io);
30010a5d1baSMatthew Dillon 		hammer_rel_buffer((hammer_buffer_t)io, 1);
301*c9b9e29dSMatthew Dillon 		++count;
302059819e3SMatthew Dillon 	}
303*c9b9e29dSMatthew Dillon 	if (count)
304*c9b9e29dSMatthew Dillon 		kprintf("Y%d", count);
305059819e3SMatthew Dillon 
306059819e3SMatthew Dillon 	/*
307f90dde4cSMatthew Dillon 	 * Wait for I/O to complete
308059819e3SMatthew Dillon 	 */
309f90dde4cSMatthew Dillon 	crit_enter();
310*c9b9e29dSMatthew Dillon 	while (hmp->io_running_count)
311f90dde4cSMatthew Dillon 		tsleep(&hmp->io_running_count, 0, "hmrfl1", 0);
312f90dde4cSMatthew Dillon 	crit_exit();
313059819e3SMatthew Dillon 
314059819e3SMatthew Dillon 	/*
315*c9b9e29dSMatthew Dillon 	 * Update the root volume's next_tid field.  This field is updated
316*c9b9e29dSMatthew Dillon 	 * without any related undo.
317059819e3SMatthew Dillon 	 */
3180729c8c8SMatthew Dillon 	if (root_volume->ondisk->vol0_next_tid != hmp->next_tid) {
3190729c8c8SMatthew Dillon 		hammer_modify_volume(NULL, root_volume, NULL, 0);
3200729c8c8SMatthew Dillon 		root_volume->ondisk->vol0_next_tid = hmp->next_tid;
3210729c8c8SMatthew Dillon 		hammer_modify_volume_done(root_volume);
3220729c8c8SMatthew Dillon 	}
323*c9b9e29dSMatthew Dillon 
324*c9b9e29dSMatthew Dillon 	/*
325*c9b9e29dSMatthew Dillon 	 * Update the UNDO FIFO's first_offset.  Same deal.
326*c9b9e29dSMatthew Dillon 	 */
327*c9b9e29dSMatthew Dillon 	if (rootmap->first_offset != hmp->flusher_undo_start) {
328*c9b9e29dSMatthew Dillon 		hammer_modify_volume(NULL, root_volume, NULL, 0);
329*c9b9e29dSMatthew Dillon 		rootmap->first_offset = hmp->flusher_undo_start;
330*c9b9e29dSMatthew Dillon 		root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX].first_offset = rootmap->first_offset;
331*c9b9e29dSMatthew Dillon 		hammer_modify_volume_done(root_volume);
332*c9b9e29dSMatthew Dillon 	}
333*c9b9e29dSMatthew Dillon 	trans->hmp->flusher_undo_start = rootmap->next_offset;
334*c9b9e29dSMatthew Dillon 
335*c9b9e29dSMatthew Dillon 	/*
336*c9b9e29dSMatthew Dillon 	 * Flush the root volume header.
337*c9b9e29dSMatthew Dillon 	 *
338*c9b9e29dSMatthew Dillon 	 * If a crash occurs while the root volume header is being written
339*c9b9e29dSMatthew Dillon 	 * we just have to hope that the undo range has been updated.  It
340*c9b9e29dSMatthew Dillon 	 * should be done in one I/O but XXX this won't be perfect.
341*c9b9e29dSMatthew Dillon 	 */
342e8599db1SMatthew Dillon 	if (root_volume->io.modified)
34310a5d1baSMatthew Dillon 		hammer_io_flush(&root_volume->io);
344059819e3SMatthew Dillon 
345059819e3SMatthew Dillon 	/*
346f90dde4cSMatthew Dillon 	 * Wait for I/O to complete
347059819e3SMatthew Dillon 	 */
348f90dde4cSMatthew Dillon 	crit_enter();
349*c9b9e29dSMatthew Dillon 	while (hmp->io_running_count)
350f90dde4cSMatthew Dillon 		tsleep(&hmp->io_running_count, 0, "hmrfl2", 0);
351f90dde4cSMatthew Dillon 	crit_exit();
352059819e3SMatthew Dillon 
353059819e3SMatthew Dillon 	/*
354e8599db1SMatthew Dillon 	 * Flush meta-data.  The meta-data will be undone if we crash
355e8599db1SMatthew Dillon 	 * so we can safely flush it asynchronously.
356059819e3SMatthew Dillon 	 */
357*c9b9e29dSMatthew Dillon 	count = 0;
35810a5d1baSMatthew Dillon 	while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
35910a5d1baSMatthew Dillon 		KKASSERT(io->modify_refs == 0);
36010a5d1baSMatthew Dillon 		hammer_ref(&io->lock);
36110a5d1baSMatthew Dillon 		KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
36210a5d1baSMatthew Dillon 		hammer_io_flush(io);
36310a5d1baSMatthew Dillon 		hammer_rel_buffer((hammer_buffer_t)io, 1);
364*c9b9e29dSMatthew Dillon 		++count;
365059819e3SMatthew Dillon 	}
3669480ff55SMatthew Dillon 	hammer_unlock(&hmp->sync_lock);
367*c9b9e29dSMatthew Dillon 	if (count)
368*c9b9e29dSMatthew Dillon 		kprintf("Z%d", count);
369059819e3SMatthew Dillon }
370059819e3SMatthew Dillon 
371