1059819e3SMatthew Dillon /* 2059819e3SMatthew Dillon * Copyright (c) 2008 The DragonFly Project. All rights reserved. 3059819e3SMatthew Dillon * 4059819e3SMatthew Dillon * This code is derived from software contributed to The DragonFly Project 5059819e3SMatthew Dillon * by Matthew Dillon <dillon@backplane.com> 6059819e3SMatthew Dillon * 7059819e3SMatthew Dillon * Redistribution and use in source and binary forms, with or without 8059819e3SMatthew Dillon * modification, are permitted provided that the following conditions 9059819e3SMatthew Dillon * are met: 10059819e3SMatthew Dillon * 11059819e3SMatthew Dillon * 1. Redistributions of source code must retain the above copyright 12059819e3SMatthew Dillon * notice, this list of conditions and the following disclaimer. 13059819e3SMatthew Dillon * 2. Redistributions in binary form must reproduce the above copyright 14059819e3SMatthew Dillon * notice, this list of conditions and the following disclaimer in 15059819e3SMatthew Dillon * the documentation and/or other materials provided with the 16059819e3SMatthew Dillon * distribution. 17059819e3SMatthew Dillon * 3. Neither the name of The DragonFly Project nor the names of its 18059819e3SMatthew Dillon * contributors may be used to endorse or promote products derived 19059819e3SMatthew Dillon * from this software without specific, prior written permission. 20059819e3SMatthew Dillon * 21059819e3SMatthew Dillon * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22059819e3SMatthew Dillon * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23059819e3SMatthew Dillon * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24059819e3SMatthew Dillon * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25059819e3SMatthew Dillon * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26059819e3SMatthew Dillon * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27059819e3SMatthew Dillon * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28059819e3SMatthew Dillon * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29059819e3SMatthew Dillon * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30059819e3SMatthew Dillon * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31059819e3SMatthew Dillon * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32059819e3SMatthew Dillon * SUCH DAMAGE. 33059819e3SMatthew Dillon * 34*2f85fa4dSMatthew Dillon * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.17 2008/05/18 01:48:50 dillon Exp $ 35059819e3SMatthew Dillon */ 36059819e3SMatthew Dillon /* 37059819e3SMatthew Dillon * HAMMER dependancy flusher thread 38059819e3SMatthew Dillon * 39059819e3SMatthew Dillon * Meta data updates create buffer dependancies which are arranged as a 40059819e3SMatthew Dillon * hierarchy of lists. 41059819e3SMatthew Dillon */ 42059819e3SMatthew Dillon 43059819e3SMatthew Dillon #include "hammer.h" 44059819e3SMatthew Dillon 45059819e3SMatthew Dillon static void hammer_flusher_thread(void *arg); 4610a5d1baSMatthew Dillon static void hammer_flusher_clean_loose_ios(hammer_mount_t hmp); 47059819e3SMatthew Dillon static void hammer_flusher_flush(hammer_mount_t hmp); 480729c8c8SMatthew Dillon static int hammer_must_finalize_undo(hammer_mount_t hmp); 49c9b9e29dSMatthew Dillon static void hammer_flusher_finalize(hammer_transaction_t trans); 50c9b9e29dSMatthew Dillon 51c9b9e29dSMatthew Dillon #define HAMMER_FLUSHER_IMMEDIATE 16 52059819e3SMatthew Dillon 53059819e3SMatthew Dillon void 54059819e3SMatthew Dillon hammer_flusher_sync(hammer_mount_t hmp) 55059819e3SMatthew Dillon { 56059819e3SMatthew Dillon int seq; 57059819e3SMatthew Dillon 58f90dde4cSMatthew Dillon if (hmp->flusher_td) { 591f07f686SMatthew Dillon seq = hmp->flusher_next; 601f07f686SMatthew Dillon if (hmp->flusher_signal == 0) { 61c9b9e29dSMatthew Dillon hmp->flusher_signal = HAMMER_FLUSHER_IMMEDIATE; 621f07f686SMatthew Dillon wakeup(&hmp->flusher_signal); 631f07f686SMatthew Dillon } 641f07f686SMatthew Dillon while ((int)(seq - hmp->flusher_done) > 0) 651f07f686SMatthew Dillon tsleep(&hmp->flusher_done, 0, "hmrfls", 0); 66059819e3SMatthew Dillon } 67f90dde4cSMatthew Dillon } 68059819e3SMatthew Dillon 69059819e3SMatthew Dillon void 70059819e3SMatthew Dillon hammer_flusher_async(hammer_mount_t hmp) 71059819e3SMatthew Dillon { 72f90dde4cSMatthew Dillon if (hmp->flusher_td) { 73c9b9e29dSMatthew Dillon if (hmp->flusher_signal++ == 0) 741f07f686SMatthew Dillon wakeup(&hmp->flusher_signal); 751f07f686SMatthew Dillon } 76059819e3SMatthew Dillon } 77059819e3SMatthew Dillon 78059819e3SMatthew Dillon void 79059819e3SMatthew Dillon hammer_flusher_create(hammer_mount_t hmp) 80059819e3SMatthew Dillon { 811f07f686SMatthew Dillon hmp->flusher_signal = 0; 821f07f686SMatthew Dillon hmp->flusher_act = 0; 831f07f686SMatthew Dillon hmp->flusher_done = 0; 841f07f686SMatthew Dillon hmp->flusher_next = 1; 85059819e3SMatthew Dillon lwkt_create(hammer_flusher_thread, hmp, &hmp->flusher_td, NULL, 86059819e3SMatthew Dillon 0, -1, "hammer"); 87059819e3SMatthew Dillon } 88059819e3SMatthew Dillon 89059819e3SMatthew Dillon void 90059819e3SMatthew Dillon hammer_flusher_destroy(hammer_mount_t hmp) 91059819e3SMatthew Dillon { 92f90dde4cSMatthew Dillon if (hmp->flusher_td) { 93059819e3SMatthew Dillon hmp->flusher_exiting = 1; 941f07f686SMatthew Dillon while (hmp->flusher_td) { 95c9b9e29dSMatthew Dillon hmp->flusher_signal = HAMMER_FLUSHER_IMMEDIATE; 961f07f686SMatthew Dillon wakeup(&hmp->flusher_signal); 97059819e3SMatthew Dillon tsleep(&hmp->flusher_exiting, 0, "hmrwex", 0); 98059819e3SMatthew Dillon } 99f90dde4cSMatthew Dillon } 1001f07f686SMatthew Dillon } 101059819e3SMatthew Dillon 102059819e3SMatthew Dillon static void 103059819e3SMatthew Dillon hammer_flusher_thread(void *arg) 104059819e3SMatthew Dillon { 105059819e3SMatthew Dillon hammer_mount_t hmp = arg; 1060729c8c8SMatthew Dillon 107059819e3SMatthew Dillon for (;;) { 1084e17f465SMatthew Dillon while (hmp->flusher_lock) 1094e17f465SMatthew Dillon tsleep(&hmp->flusher_lock, 0, "hmrhld", 0); 1101f07f686SMatthew Dillon hmp->flusher_act = hmp->flusher_next; 1111f07f686SMatthew Dillon ++hmp->flusher_next; 11277062c8aSMatthew Dillon hkprintf("F"); 11310a5d1baSMatthew Dillon hammer_flusher_clean_loose_ios(hmp); 114059819e3SMatthew Dillon hammer_flusher_flush(hmp); 11510a5d1baSMatthew Dillon hammer_flusher_clean_loose_ios(hmp); 1161f07f686SMatthew Dillon hmp->flusher_done = hmp->flusher_act; 1171f07f686SMatthew Dillon 1181f07f686SMatthew Dillon wakeup(&hmp->flusher_done); 119c32a6806SMatthew Dillon 120c32a6806SMatthew Dillon /* 1211f07f686SMatthew Dillon * Wait for activity. 122c32a6806SMatthew Dillon */ 1231f07f686SMatthew Dillon if (hmp->flusher_exiting && TAILQ_EMPTY(&hmp->flush_list)) 124059819e3SMatthew Dillon break; 12577062c8aSMatthew Dillon hkprintf("E"); 1261f07f686SMatthew Dillon 127c9b9e29dSMatthew Dillon /* 128c9b9e29dSMatthew Dillon * This is a hack until we can dispose of frontend buffer 129c9b9e29dSMatthew Dillon * cache buffers on the frontend. 130c9b9e29dSMatthew Dillon */ 131c9b9e29dSMatthew Dillon if (hmp->flusher_signal && 132c9b9e29dSMatthew Dillon hmp->flusher_signal < HAMMER_FLUSHER_IMMEDIATE) { 133c9b9e29dSMatthew Dillon --hmp->flusher_signal; 134c9b9e29dSMatthew Dillon tsleep(&hmp->flusher_signal, 0, "hmrqwk", hz / 10); 135c9b9e29dSMatthew Dillon } else { 1361f07f686SMatthew Dillon while (hmp->flusher_signal == 0 && 1371f07f686SMatthew Dillon TAILQ_EMPTY(&hmp->flush_list)) { 1381f07f686SMatthew Dillon tsleep(&hmp->flusher_signal, 0, "hmrwwa", 0); 139059819e3SMatthew Dillon } 1401f07f686SMatthew Dillon hmp->flusher_signal = 0; 1411f07f686SMatthew Dillon } 142c9b9e29dSMatthew Dillon } 143059819e3SMatthew Dillon hmp->flusher_td = NULL; 144059819e3SMatthew Dillon wakeup(&hmp->flusher_exiting); 145059819e3SMatthew Dillon lwkt_exit(); 146059819e3SMatthew Dillon } 147059819e3SMatthew Dillon 14810a5d1baSMatthew Dillon static void 14910a5d1baSMatthew Dillon hammer_flusher_clean_loose_ios(hammer_mount_t hmp) 15010a5d1baSMatthew Dillon { 15110a5d1baSMatthew Dillon hammer_buffer_t buffer; 15210a5d1baSMatthew Dillon hammer_io_t io; 15310a5d1baSMatthew Dillon 15410a5d1baSMatthew Dillon /* 15510a5d1baSMatthew Dillon * loose ends - buffers without bp's aren't tracked by the kernel 15610a5d1baSMatthew Dillon * and can build up, so clean them out. This can occur when an 15710a5d1baSMatthew Dillon * IO completes on a buffer with no references left. 15810a5d1baSMatthew Dillon */ 15910a5d1baSMatthew Dillon while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) { 16010a5d1baSMatthew Dillon KKASSERT(io->mod_list == &hmp->lose_list); 16110a5d1baSMatthew Dillon TAILQ_REMOVE(io->mod_list, io, mod_entry); 16210a5d1baSMatthew Dillon io->mod_list = NULL; 16310a5d1baSMatthew Dillon hammer_ref(&io->lock); 16410a5d1baSMatthew Dillon buffer = (void *)io; 16510a5d1baSMatthew Dillon hammer_rel_buffer(buffer, 0); 16610a5d1baSMatthew Dillon } 16710a5d1baSMatthew Dillon } 16810a5d1baSMatthew Dillon 169059819e3SMatthew Dillon /* 170059819e3SMatthew Dillon * Flush stuff 171059819e3SMatthew Dillon */ 172059819e3SMatthew Dillon static void 173059819e3SMatthew Dillon hammer_flusher_flush(hammer_mount_t hmp) 174059819e3SMatthew Dillon { 175e8599db1SMatthew Dillon struct hammer_transaction trans; 17610a5d1baSMatthew Dillon hammer_blockmap_t rootmap; 177059819e3SMatthew Dillon hammer_inode_t ip; 17810a5d1baSMatthew Dillon 179e8599db1SMatthew Dillon hammer_start_transaction_fls(&trans, hmp); 1800729c8c8SMatthew Dillon rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX]; 181059819e3SMatthew Dillon 182*2f85fa4dSMatthew Dillon /* 183*2f85fa4dSMatthew Dillon * Flush all pending inodes 184*2f85fa4dSMatthew Dillon */ 1851f07f686SMatthew Dillon while ((ip = TAILQ_FIRST(&hmp->flush_list)) != NULL) { 1861f07f686SMatthew Dillon /* 1871f07f686SMatthew Dillon * Stop when we hit a different flush group 1881f07f686SMatthew Dillon */ 1891f07f686SMatthew Dillon if (ip->flush_group != hmp->flusher_act) 1901f07f686SMatthew Dillon break; 191059819e3SMatthew Dillon 192059819e3SMatthew Dillon /* 1931f07f686SMatthew Dillon * Remove the inode from the flush list and inherit 1941f07f686SMatthew Dillon * its reference, sync, and clean-up. 195059819e3SMatthew Dillon */ 1961f07f686SMatthew Dillon TAILQ_REMOVE(&hmp->flush_list, ip, flush_entry); 1971f07f686SMatthew Dillon ip->error = hammer_sync_inode(ip); 198b84de5afSMatthew Dillon hammer_flush_inode_done(ip); 1991f07f686SMatthew Dillon 2001f07f686SMatthew Dillon /* 2011f07f686SMatthew Dillon * XXX this breaks atomicy 2021f07f686SMatthew Dillon */ 2031f07f686SMatthew Dillon if (hammer_must_finalize_undo(hmp)) { 2041f07f686SMatthew Dillon Debugger("Too many undos!!"); 205c9b9e29dSMatthew Dillon hammer_flusher_finalize(&trans); 206059819e3SMatthew Dillon } 207059819e3SMatthew Dillon } 208c9b9e29dSMatthew Dillon hammer_flusher_finalize(&trans); 209f36a9737SMatthew Dillon hmp->flusher_tid = trans.tid; 210e8599db1SMatthew Dillon hammer_done_transaction(&trans); 21110a5d1baSMatthew Dillon } 212059819e3SMatthew Dillon 21310a5d1baSMatthew Dillon /* 214ec4e8497SMatthew Dillon * If the UNDO area gets over half full we have to flush it. We can't 215ec4e8497SMatthew Dillon * afford the UNDO area becoming completely full as that would break 216ec4e8497SMatthew Dillon * the crash recovery atomicy. 217ec4e8497SMatthew Dillon */ 218ec4e8497SMatthew Dillon static 219ec4e8497SMatthew Dillon int 2200729c8c8SMatthew Dillon hammer_must_finalize_undo(hammer_mount_t hmp) 221ec4e8497SMatthew Dillon { 2221f07f686SMatthew Dillon if (hammer_undo_space(hmp) < hammer_undo_max(hmp) / 2) { 22377062c8aSMatthew Dillon hkprintf("*"); 2241f07f686SMatthew Dillon return(1); 2251f07f686SMatthew Dillon } else { 2261f07f686SMatthew Dillon return(0); 2271f07f686SMatthew Dillon } 228ec4e8497SMatthew Dillon } 229ec4e8497SMatthew Dillon 230ec4e8497SMatthew Dillon /* 23110a5d1baSMatthew Dillon * To finalize the flush we finish flushing all undo and data buffers 23210a5d1baSMatthew Dillon * still present, then we update the volume header and flush it, 23310a5d1baSMatthew Dillon * then we flush out the mata-data (that can now be undone). 23410a5d1baSMatthew Dillon * 23510a5d1baSMatthew Dillon * Note that as long as the undo fifo's start and end points do not 23610a5d1baSMatthew Dillon * match, we always must at least update the volume header. 2379480ff55SMatthew Dillon * 2389480ff55SMatthew Dillon * The sync_lock is used by other threads to issue modifying operations 2399480ff55SMatthew Dillon * to HAMMER media without crossing a synchronization boundary or messing 2409480ff55SMatthew Dillon * up the media synchronization operation. Specifically, the pruning 2419480ff55SMatthew Dillon * the reblocking ioctls, and allowing the frontend strategy code to 2429480ff55SMatthew Dillon * allocate media data space. 24310a5d1baSMatthew Dillon */ 24410a5d1baSMatthew Dillon static 24510a5d1baSMatthew Dillon void 246c9b9e29dSMatthew Dillon hammer_flusher_finalize(hammer_transaction_t trans) 247059819e3SMatthew Dillon { 248e8599db1SMatthew Dillon hammer_mount_t hmp = trans->hmp; 249e8599db1SMatthew Dillon hammer_volume_t root_volume = trans->rootvol; 250059819e3SMatthew Dillon hammer_blockmap_t rootmap; 251c9b9e29dSMatthew Dillon const int bmsize = sizeof(root_volume->ondisk->vol0_blockmap); 25210a5d1baSMatthew Dillon hammer_io_t io; 253c9b9e29dSMatthew Dillon int count; 25419619882SMatthew Dillon int i; 25510a5d1baSMatthew Dillon 256*2f85fa4dSMatthew Dillon hammer_sync_lock_ex(trans); 257c9b9e29dSMatthew Dillon rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX]; 2589480ff55SMatthew Dillon 259059819e3SMatthew Dillon /* 260e8599db1SMatthew Dillon * Sync the blockmap to the root volume ondisk buffer and generate 261e8599db1SMatthew Dillon * the appropriate undo record. We have to generate the UNDO even 262e8599db1SMatthew Dillon * though we flush the volume header along with the UNDO fifo update 263e8599db1SMatthew Dillon * because the meta-data (including the volume header) is flushed 264c9b9e29dSMatthew Dillon * after the fifo update, not before, and may have to be undone. 265c9b9e29dSMatthew Dillon * 266c9b9e29dSMatthew Dillon * No UNDOs can be created after this point until we finish the 267c9b9e29dSMatthew Dillon * flush. 268e8599db1SMatthew Dillon */ 269c9b9e29dSMatthew Dillon if (root_volume->io.modified && 270c9b9e29dSMatthew Dillon bcmp(hmp->blockmap, root_volume->ondisk->vol0_blockmap, bmsize)) { 271e8599db1SMatthew Dillon hammer_modify_volume(trans, root_volume, 272e8599db1SMatthew Dillon &root_volume->ondisk->vol0_blockmap, 273c9b9e29dSMatthew Dillon bmsize); 27419619882SMatthew Dillon for (i = 0; i < HAMMER_MAX_ZONES; ++i) 27519619882SMatthew Dillon hammer_crc_set_blockmap(&hmp->blockmap[i]); 276e8599db1SMatthew Dillon bcopy(hmp->blockmap, root_volume->ondisk->vol0_blockmap, 277c9b9e29dSMatthew Dillon bmsize); 278e8599db1SMatthew Dillon hammer_modify_volume_done(root_volume); 279e8599db1SMatthew Dillon } 280e8599db1SMatthew Dillon 281e8599db1SMatthew Dillon /* 282c9b9e29dSMatthew Dillon * Flush the undo bufs, clear the undo cache. 283059819e3SMatthew Dillon */ 284e8599db1SMatthew Dillon hammer_clear_undo_history(hmp); 285e8599db1SMatthew Dillon 286c9b9e29dSMatthew Dillon count = 0; 28710a5d1baSMatthew Dillon while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) { 28810a5d1baSMatthew Dillon KKASSERT(io->modify_refs == 0); 28910a5d1baSMatthew Dillon hammer_ref(&io->lock); 29010a5d1baSMatthew Dillon KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME); 29110a5d1baSMatthew Dillon hammer_io_flush(io); 29209ac686bSMatthew Dillon hammer_rel_buffer((hammer_buffer_t)io, 0); 293c9b9e29dSMatthew Dillon ++count; 294059819e3SMatthew Dillon } 295c9b9e29dSMatthew Dillon if (count) 29677062c8aSMatthew Dillon hkprintf("X%d", count); 297059819e3SMatthew Dillon 298059819e3SMatthew Dillon /* 29910a5d1baSMatthew Dillon * Flush data bufs 300059819e3SMatthew Dillon */ 301c9b9e29dSMatthew Dillon count = 0; 30210a5d1baSMatthew Dillon while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) { 30310a5d1baSMatthew Dillon KKASSERT(io->modify_refs == 0); 30410a5d1baSMatthew Dillon hammer_ref(&io->lock); 30510a5d1baSMatthew Dillon KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME); 30610a5d1baSMatthew Dillon hammer_io_flush(io); 30709ac686bSMatthew Dillon hammer_rel_buffer((hammer_buffer_t)io, 0); 308c9b9e29dSMatthew Dillon ++count; 309059819e3SMatthew Dillon } 310c9b9e29dSMatthew Dillon if (count) 31177062c8aSMatthew Dillon hkprintf("Y%d", count); 312059819e3SMatthew Dillon 313059819e3SMatthew Dillon /* 314f90dde4cSMatthew Dillon * Wait for I/O to complete 315059819e3SMatthew Dillon */ 316f90dde4cSMatthew Dillon crit_enter(); 317c9b9e29dSMatthew Dillon while (hmp->io_running_count) 318f90dde4cSMatthew Dillon tsleep(&hmp->io_running_count, 0, "hmrfl1", 0); 319f90dde4cSMatthew Dillon crit_exit(); 320059819e3SMatthew Dillon 321059819e3SMatthew Dillon /* 322c9b9e29dSMatthew Dillon * Update the root volume's next_tid field. This field is updated 323c9b9e29dSMatthew Dillon * without any related undo. 324059819e3SMatthew Dillon */ 3250729c8c8SMatthew Dillon if (root_volume->ondisk->vol0_next_tid != hmp->next_tid) { 3260729c8c8SMatthew Dillon hammer_modify_volume(NULL, root_volume, NULL, 0); 3270729c8c8SMatthew Dillon root_volume->ondisk->vol0_next_tid = hmp->next_tid; 3280729c8c8SMatthew Dillon hammer_modify_volume_done(root_volume); 3290729c8c8SMatthew Dillon } 330c9b9e29dSMatthew Dillon 33109ac686bSMatthew Dillon if (hammer_debug_recover_faults > 0) { 33209ac686bSMatthew Dillon if (--hammer_debug_recover_faults == 0) { 33309ac686bSMatthew Dillon Debugger("hammer_debug_recover_faults"); 33409ac686bSMatthew Dillon } 33509ac686bSMatthew Dillon } 33609ac686bSMatthew Dillon 33709ac686bSMatthew Dillon 338c9b9e29dSMatthew Dillon /* 339c9b9e29dSMatthew Dillon * Update the UNDO FIFO's first_offset. Same deal. 340c9b9e29dSMatthew Dillon */ 341c9b9e29dSMatthew Dillon if (rootmap->first_offset != hmp->flusher_undo_start) { 342c9b9e29dSMatthew Dillon hammer_modify_volume(NULL, root_volume, NULL, 0); 343c9b9e29dSMatthew Dillon rootmap->first_offset = hmp->flusher_undo_start; 344c9b9e29dSMatthew Dillon root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX].first_offset = rootmap->first_offset; 34519619882SMatthew Dillon hammer_crc_set_blockmap(&root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX]); 346c9b9e29dSMatthew Dillon hammer_modify_volume_done(root_volume); 347c9b9e29dSMatthew Dillon } 34809ac686bSMatthew Dillon hmp->flusher_undo_start = rootmap->next_offset; 349c9b9e29dSMatthew Dillon 350c9b9e29dSMatthew Dillon /* 351c9b9e29dSMatthew Dillon * Flush the root volume header. 352c9b9e29dSMatthew Dillon * 353c9b9e29dSMatthew Dillon * If a crash occurs while the root volume header is being written 354c9b9e29dSMatthew Dillon * we just have to hope that the undo range has been updated. It 355c9b9e29dSMatthew Dillon * should be done in one I/O but XXX this won't be perfect. 356c9b9e29dSMatthew Dillon */ 35719619882SMatthew Dillon if (root_volume->io.modified) { 35819619882SMatthew Dillon hammer_crc_set_volume(root_volume->ondisk); 35910a5d1baSMatthew Dillon hammer_io_flush(&root_volume->io); 36019619882SMatthew Dillon } 361059819e3SMatthew Dillon 362059819e3SMatthew Dillon /* 363f90dde4cSMatthew Dillon * Wait for I/O to complete 364059819e3SMatthew Dillon */ 365f90dde4cSMatthew Dillon crit_enter(); 366c9b9e29dSMatthew Dillon while (hmp->io_running_count) 367f90dde4cSMatthew Dillon tsleep(&hmp->io_running_count, 0, "hmrfl2", 0); 368f90dde4cSMatthew Dillon crit_exit(); 369059819e3SMatthew Dillon 370059819e3SMatthew Dillon /* 371e8599db1SMatthew Dillon * Flush meta-data. The meta-data will be undone if we crash 372e8599db1SMatthew Dillon * so we can safely flush it asynchronously. 373059819e3SMatthew Dillon */ 374c9b9e29dSMatthew Dillon count = 0; 37510a5d1baSMatthew Dillon while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) { 37610a5d1baSMatthew Dillon KKASSERT(io->modify_refs == 0); 37710a5d1baSMatthew Dillon hammer_ref(&io->lock); 37810a5d1baSMatthew Dillon KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME); 37910a5d1baSMatthew Dillon hammer_io_flush(io); 38009ac686bSMatthew Dillon hammer_rel_buffer((hammer_buffer_t)io, 0); 381c9b9e29dSMatthew Dillon ++count; 382059819e3SMatthew Dillon } 383*2f85fa4dSMatthew Dillon hammer_sync_unlock(trans); 384c9b9e29dSMatthew Dillon if (count) 38577062c8aSMatthew Dillon hkprintf("Z%d", count); 386059819e3SMatthew Dillon } 387059819e3SMatthew Dillon 388