12d60b848STomohiro Kusumi /*
22d60b848STomohiro Kusumi * SPDX-License-Identifier: BSD-3-Clause
32d60b848STomohiro Kusumi *
42d60b848STomohiro Kusumi * Copyright (c) 2022 Tomohiro Kusumi <tkusumi@netbsd.org>
52d60b848STomohiro Kusumi * Copyright (c) 2011-2022 The DragonFly Project. All rights reserved.
62d60b848STomohiro Kusumi *
72d60b848STomohiro Kusumi * This code is derived from software contributed to The DragonFly Project
82d60b848STomohiro Kusumi * by Matthew Dillon <dillon@dragonflybsd.org>
92d60b848STomohiro Kusumi *
102d60b848STomohiro Kusumi * Redistribution and use in source and binary forms, with or without
112d60b848STomohiro Kusumi * modification, are permitted provided that the following conditions
122d60b848STomohiro Kusumi * are met:
132d60b848STomohiro Kusumi *
142d60b848STomohiro Kusumi * 1. Redistributions of source code must retain the above copyright
152d60b848STomohiro Kusumi * notice, this list of conditions and the following disclaimer.
162d60b848STomohiro Kusumi * 2. Redistributions in binary form must reproduce the above copyright
172d60b848STomohiro Kusumi * notice, this list of conditions and the following disclaimer in
182d60b848STomohiro Kusumi * the documentation and/or other materials provided with the
192d60b848STomohiro Kusumi * distribution.
202d60b848STomohiro Kusumi * 3. Neither the name of The DragonFly Project nor the names of its
212d60b848STomohiro Kusumi * contributors may be used to endorse or promote products derived
222d60b848STomohiro Kusumi * from this software without specific, prior written permission.
232d60b848STomohiro Kusumi *
242d60b848STomohiro Kusumi * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
252d60b848STomohiro Kusumi * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
262d60b848STomohiro Kusumi * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
272d60b848STomohiro Kusumi * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
282d60b848STomohiro Kusumi * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
292d60b848STomohiro Kusumi * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
302d60b848STomohiro Kusumi * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
312d60b848STomohiro Kusumi * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
322d60b848STomohiro Kusumi * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
332d60b848STomohiro Kusumi * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
342d60b848STomohiro Kusumi * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
352d60b848STomohiro Kusumi * SUCH DAMAGE.
362d60b848STomohiro Kusumi */
372d60b848STomohiro Kusumi /*
382d60b848STomohiro Kusumi * TRANSACTION AND FLUSH HANDLING
392d60b848STomohiro Kusumi *
402d60b848STomohiro Kusumi * Deceptively simple but actually fairly difficult to implement properly is
412d60b848STomohiro Kusumi * how I would describe it.
422d60b848STomohiro Kusumi *
432d60b848STomohiro Kusumi * Flushing generally occurs bottom-up but requires a top-down scan to
442d60b848STomohiro Kusumi * locate chains with MODIFIED and/or UPDATE bits set. The ONFLUSH flag
452d60b848STomohiro Kusumi * tells how to recurse downward to find these chains.
462d60b848STomohiro Kusumi */
472d60b848STomohiro Kusumi
482d60b848STomohiro Kusumi /*
492d60b848STomohiro Kusumi #include <sys/cdefs.h>
502d60b848STomohiro Kusumi #include <sys/param.h>
512d60b848STomohiro Kusumi #include <sys/systm.h>
522d60b848STomohiro Kusumi #include <sys/types.h>
532d60b848STomohiro Kusumi #include <sys/lock.h>
542d60b848STomohiro Kusumi #include <sys/vnode.h>
552d60b848STomohiro Kusumi #include <sys/buf.h>
562d60b848STomohiro Kusumi */
572d60b848STomohiro Kusumi
582d60b848STomohiro Kusumi #include "hammer2.h"
592d60b848STomohiro Kusumi
602d60b848STomohiro Kusumi #define HAMMER2_FLUSH_DEPTH_LIMIT 60 /* stack recursion limit */
612d60b848STomohiro Kusumi
622d60b848STomohiro Kusumi
632d60b848STomohiro Kusumi /*
642d60b848STomohiro Kusumi * Recursively flush the specified chain. The chain is locked and
652d60b848STomohiro Kusumi * referenced by the caller and will remain so on return. The chain
662d60b848STomohiro Kusumi * will remain referenced throughout but can temporarily lose its
672d60b848STomohiro Kusumi * lock during the recursion to avoid unnecessarily stalling user
682d60b848STomohiro Kusumi * processes.
692d60b848STomohiro Kusumi */
702d60b848STomohiro Kusumi struct hammer2_flush_info {
712d60b848STomohiro Kusumi hammer2_chain_t *parent;
722d60b848STomohiro Kusumi int depth;
732d60b848STomohiro Kusumi int error; /* cumulative error */
742d60b848STomohiro Kusumi int flags;
752d60b848STomohiro Kusumi #ifdef HAMMER2_SCAN_DEBUG
762d60b848STomohiro Kusumi long scan_count;
772d60b848STomohiro Kusumi long scan_mod_count;
782d60b848STomohiro Kusumi long scan_upd_count;
792d60b848STomohiro Kusumi long scan_onf_count;
802d60b848STomohiro Kusumi long scan_del_count;
812d60b848STomohiro Kusumi long scan_btype[7];
822d60b848STomohiro Kusumi #endif
832d60b848STomohiro Kusumi };
842d60b848STomohiro Kusumi
852d60b848STomohiro Kusumi typedef struct hammer2_flush_info hammer2_flush_info_t;
862d60b848STomohiro Kusumi
872d60b848STomohiro Kusumi static int hammer2_flush_core(hammer2_flush_info_t *info,
882d60b848STomohiro Kusumi hammer2_chain_t *chain, int flags);
892d60b848STomohiro Kusumi static int hammer2_flush_recurse(hammer2_chain_t *child, void *data);
902d60b848STomohiro Kusumi
912d60b848STomohiro Kusumi /*
922d60b848STomohiro Kusumi * Any per-pfs transaction initialization goes here.
932d60b848STomohiro Kusumi */
942d60b848STomohiro Kusumi void
hammer2_trans_manage_init(hammer2_pfs_t * pmp)952d60b848STomohiro Kusumi hammer2_trans_manage_init(hammer2_pfs_t *pmp)
962d60b848STomohiro Kusumi {
972d60b848STomohiro Kusumi }
982d60b848STomohiro Kusumi
992d60b848STomohiro Kusumi /*
1002d60b848STomohiro Kusumi * Transaction support for any modifying operation. Transactions are used
1012d60b848STomohiro Kusumi * in the pmp layer by the frontend and in the spmp layer by the backend.
1022d60b848STomohiro Kusumi *
1032d60b848STomohiro Kusumi * 0 - Normal transaction. Interlocks against just the
1042d60b848STomohiro Kusumi * COPYQ portion of an ISFLUSH transaction.
1052d60b848STomohiro Kusumi *
1062d60b848STomohiro Kusumi * TRANS_ISFLUSH - Flush transaction. Interlocks against other flush
1072d60b848STomohiro Kusumi * transactions.
1082d60b848STomohiro Kusumi *
1092d60b848STomohiro Kusumi * When COPYQ is also specified, waits for the count
1102d60b848STomohiro Kusumi * to drop to 1.
1112d60b848STomohiro Kusumi *
1122d60b848STomohiro Kusumi * TRANS_BUFCACHE - Buffer cache transaction. No interlock.
1132d60b848STomohiro Kusumi *
1142d60b848STomohiro Kusumi * TRANS_SIDEQ - Run the sideq (only tested in trans_done())
1152d60b848STomohiro Kusumi *
1162d60b848STomohiro Kusumi * Initializing a new transaction allocates a transaction ID. Typically
1172d60b848STomohiro Kusumi * passed a pmp (hmp passed as NULL), indicating a cluster transaction. Can
1182d60b848STomohiro Kusumi * be passed a NULL pmp and non-NULL hmp to indicate a transaction on a single
1192d60b848STomohiro Kusumi * media target. The latter mode is used by the recovery code.
1202d60b848STomohiro Kusumi */
1212d60b848STomohiro Kusumi void
hammer2_trans_init(hammer2_pfs_t * pmp,uint32_t flags)1222d60b848STomohiro Kusumi hammer2_trans_init(hammer2_pfs_t *pmp, uint32_t flags)
1232d60b848STomohiro Kusumi {
1242d60b848STomohiro Kusumi uint32_t oflags;
1252d60b848STomohiro Kusumi uint32_t nflags;
1262d60b848STomohiro Kusumi int dowait;
1272d60b848STomohiro Kusumi
1282d60b848STomohiro Kusumi for (;;) {
1292d60b848STomohiro Kusumi oflags = pmp->trans.flags;
1302d60b848STomohiro Kusumi cpu_ccfence();
1312d60b848STomohiro Kusumi dowait = 0;
1322d60b848STomohiro Kusumi
1332d60b848STomohiro Kusumi if (flags & HAMMER2_TRANS_ISFLUSH) {
1342d60b848STomohiro Kusumi /*
1352d60b848STomohiro Kusumi * Interlock against other flush transactions.
1362d60b848STomohiro Kusumi */
1372d60b848STomohiro Kusumi if (oflags & HAMMER2_TRANS_ISFLUSH) {
1382d60b848STomohiro Kusumi nflags = oflags | HAMMER2_TRANS_WAITING;
1392d60b848STomohiro Kusumi dowait = 1;
1402d60b848STomohiro Kusumi } else {
1412d60b848STomohiro Kusumi nflags = (oflags | flags) + 1;
1422d60b848STomohiro Kusumi }
1432d60b848STomohiro Kusumi } else if (flags & HAMMER2_TRANS_BUFCACHE) {
1442d60b848STomohiro Kusumi /*
1452d60b848STomohiro Kusumi * Requesting strategy transaction from buffer-cache,
1462d60b848STomohiro Kusumi * or a VM getpages/putpages through the buffer cache.
1472d60b848STomohiro Kusumi * We must allow such transactions in all situations
1482d60b848STomohiro Kusumi * to avoid deadlocks.
1492d60b848STomohiro Kusumi */
1502d60b848STomohiro Kusumi nflags = (oflags | flags) + 1;
1512d60b848STomohiro Kusumi } else {
1522d60b848STomohiro Kusumi /*
1532d60b848STomohiro Kusumi * Normal transaction. We do not interlock against
1542d60b848STomohiro Kusumi * BUFCACHE or ISFLUSH.
1552d60b848STomohiro Kusumi *
1562d60b848STomohiro Kusumi * Note that vnode locks may be held going into
1572d60b848STomohiro Kusumi * this call.
1582d60b848STomohiro Kusumi *
1592d60b848STomohiro Kusumi * NOTE: Remember that non-modifying operations
1602d60b848STomohiro Kusumi * such as read, stat, readdir, etc, do
1612d60b848STomohiro Kusumi * not use transactions.
1622d60b848STomohiro Kusumi */
1632d60b848STomohiro Kusumi nflags = (oflags | flags) + 1;
1642d60b848STomohiro Kusumi }
1652d60b848STomohiro Kusumi if (dowait)
1662d60b848STomohiro Kusumi tsleep_interlock(&pmp->trans.sync_wait, 0);
1672d60b848STomohiro Kusumi if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
1682d60b848STomohiro Kusumi if (dowait == 0)
1692d60b848STomohiro Kusumi break;
1702d60b848STomohiro Kusumi tsleep(&pmp->trans.sync_wait, PINTERLOCKED,
1712d60b848STomohiro Kusumi "h2trans", hz);
1722d60b848STomohiro Kusumi /* retry */
1732d60b848STomohiro Kusumi } else {
1742d60b848STomohiro Kusumi cpu_pause();
1752d60b848STomohiro Kusumi /* retry */
1762d60b848STomohiro Kusumi }
1772d60b848STomohiro Kusumi /* retry */
1782d60b848STomohiro Kusumi }
1792d60b848STomohiro Kusumi
1802d60b848STomohiro Kusumi #if 0
1812d60b848STomohiro Kusumi /*
1822d60b848STomohiro Kusumi * When entering a FLUSH transaction with COPYQ set, wait for the
1832d60b848STomohiro Kusumi * transaction count to drop to 1 (our flush transaction only)
1842d60b848STomohiro Kusumi * before proceeding.
1852d60b848STomohiro Kusumi *
1862d60b848STomohiro Kusumi * This waits for all non-flush transactions to complete and blocks
1872d60b848STomohiro Kusumi * new non-flush transactions from starting until COPYQ is cleared.
1882d60b848STomohiro Kusumi * (the flush will then proceed after clearing COPYQ). This should
1892d60b848STomohiro Kusumi * be a very short stall on modifying operations.
1902d60b848STomohiro Kusumi */
1912d60b848STomohiro Kusumi while ((flags & HAMMER2_TRANS_ISFLUSH) &&
1922d60b848STomohiro Kusumi (flags & HAMMER2_TRANS_COPYQ)) {
1932d60b848STomohiro Kusumi oflags = pmp->trans.flags;
1942d60b848STomohiro Kusumi cpu_ccfence();
1952d60b848STomohiro Kusumi if ((oflags & HAMMER2_TRANS_MASK) == 1)
1962d60b848STomohiro Kusumi break;
1972d60b848STomohiro Kusumi nflags = oflags | HAMMER2_TRANS_WAITING;
1982d60b848STomohiro Kusumi tsleep_interlock(&pmp->trans.sync_wait, 0);
1992d60b848STomohiro Kusumi if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
2002d60b848STomohiro Kusumi tsleep(&pmp->trans.sync_wait, PINTERLOCKED,
2012d60b848STomohiro Kusumi "h2trans2", hz);
2022d60b848STomohiro Kusumi }
2032d60b848STomohiro Kusumi }
2042d60b848STomohiro Kusumi #endif
2052d60b848STomohiro Kusumi }
2062d60b848STomohiro Kusumi
2072d60b848STomohiro Kusumi /*
2082d60b848STomohiro Kusumi * Start a sub-transaction, there is no 'subdone' function. This will
2092d60b848STomohiro Kusumi * issue a new modify_tid (mtid) for the current transaction, which is a
2102d60b848STomohiro Kusumi * CLC (cluster level change) id and not a per-node id.
2112d60b848STomohiro Kusumi *
2122d60b848STomohiro Kusumi * This function must be called for each XOP when multiple XOPs are run in
2132d60b848STomohiro Kusumi * sequence within a transaction.
2142d60b848STomohiro Kusumi *
2152d60b848STomohiro Kusumi * Callers typically update the inode with the transaction mtid manually
2162d60b848STomohiro Kusumi * to enforce sequencing.
2172d60b848STomohiro Kusumi */
2182d60b848STomohiro Kusumi hammer2_tid_t
hammer2_trans_sub(hammer2_pfs_t * pmp)2192d60b848STomohiro Kusumi hammer2_trans_sub(hammer2_pfs_t *pmp)
2202d60b848STomohiro Kusumi {
2212d60b848STomohiro Kusumi hammer2_tid_t mtid;
2222d60b848STomohiro Kusumi
2232d60b848STomohiro Kusumi mtid = atomic_fetchadd_64(&pmp->modify_tid, 1);
2242d60b848STomohiro Kusumi
2252d60b848STomohiro Kusumi return (mtid);
2262d60b848STomohiro Kusumi }
2272d60b848STomohiro Kusumi
2282d60b848STomohiro Kusumi void
hammer2_trans_setflags(hammer2_pfs_t * pmp,uint32_t flags)2292d60b848STomohiro Kusumi hammer2_trans_setflags(hammer2_pfs_t *pmp, uint32_t flags)
2302d60b848STomohiro Kusumi {
2312d60b848STomohiro Kusumi atomic_set_int(&pmp->trans.flags, flags);
2322d60b848STomohiro Kusumi }
2332d60b848STomohiro Kusumi
2342d60b848STomohiro Kusumi /*
2352d60b848STomohiro Kusumi * Typically used to clear trans flags asynchronously. If TRANS_WAITING
2362d60b848STomohiro Kusumi * is in the mask, and was previously set, this function will wake up
2372d60b848STomohiro Kusumi * any waiters.
2382d60b848STomohiro Kusumi */
2392d60b848STomohiro Kusumi void
hammer2_trans_clearflags(hammer2_pfs_t * pmp,uint32_t flags)2402d60b848STomohiro Kusumi hammer2_trans_clearflags(hammer2_pfs_t *pmp, uint32_t flags)
2412d60b848STomohiro Kusumi {
2422d60b848STomohiro Kusumi uint32_t oflags;
2432d60b848STomohiro Kusumi uint32_t nflags;
2442d60b848STomohiro Kusumi
2452d60b848STomohiro Kusumi for (;;) {
2462d60b848STomohiro Kusumi oflags = pmp->trans.flags;
2472d60b848STomohiro Kusumi cpu_ccfence();
2482d60b848STomohiro Kusumi nflags = oflags & ~flags;
2492d60b848STomohiro Kusumi if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
2502d60b848STomohiro Kusumi if ((oflags ^ nflags) & HAMMER2_TRANS_WAITING)
2512d60b848STomohiro Kusumi wakeup(&pmp->trans.sync_wait);
2522d60b848STomohiro Kusumi break;
2532d60b848STomohiro Kusumi }
2542d60b848STomohiro Kusumi cpu_pause();
2552d60b848STomohiro Kusumi /* retry */
2562d60b848STomohiro Kusumi }
2572d60b848STomohiro Kusumi }
2582d60b848STomohiro Kusumi
2592d60b848STomohiro Kusumi void
hammer2_trans_done(hammer2_pfs_t * pmp,uint32_t flags)2602d60b848STomohiro Kusumi hammer2_trans_done(hammer2_pfs_t *pmp, uint32_t flags)
2612d60b848STomohiro Kusumi {
2622d60b848STomohiro Kusumi uint32_t oflags;
2632d60b848STomohiro Kusumi uint32_t nflags;
2642d60b848STomohiro Kusumi
2652d60b848STomohiro Kusumi #if 0
2662d60b848STomohiro Kusumi /*
2672d60b848STomohiro Kusumi * Modifying ops on the front-end can cause dirty inodes to
2682d60b848STomohiro Kusumi * build up in the sideq. We don't flush these on inactive/reclaim
2692d60b848STomohiro Kusumi * due to potential deadlocks, so we have to deal with them from
2702d60b848STomohiro Kusumi * inside other nominal modifying front-end transactions.
2712d60b848STomohiro Kusumi */
2722d60b848STomohiro Kusumi if ((flags & HAMMER2_TRANS_SIDEQ) &&
2732d60b848STomohiro Kusumi pmp->sideq_count > hammer2_limit_dirty_inodes / 2 &&
2742d60b848STomohiro Kusumi pmp->sideq_count > (pmp->inum_count >> 3) &&
2752d60b848STomohiro Kusumi pmp->mp) {
2762d60b848STomohiro Kusumi speedup_syncer(pmp->mp);
2772d60b848STomohiro Kusumi }
2782d60b848STomohiro Kusumi #endif
2792d60b848STomohiro Kusumi
2802d60b848STomohiro Kusumi /*
2812d60b848STomohiro Kusumi * Clean-up the transaction. Wakeup any waiters when finishing
2822d60b848STomohiro Kusumi * a flush transaction or transitioning the non-flush transaction
2832d60b848STomohiro Kusumi * count from 2->1 while a flush transaction is pending.
2842d60b848STomohiro Kusumi */
2852d60b848STomohiro Kusumi for (;;) {
2862d60b848STomohiro Kusumi oflags = pmp->trans.flags;
2872d60b848STomohiro Kusumi cpu_ccfence();
2882d60b848STomohiro Kusumi KKASSERT(oflags & HAMMER2_TRANS_MASK);
2892d60b848STomohiro Kusumi
2902d60b848STomohiro Kusumi nflags = (oflags - 1) & ~flags;
2912d60b848STomohiro Kusumi if (flags & HAMMER2_TRANS_ISFLUSH) {
2922d60b848STomohiro Kusumi nflags &= ~HAMMER2_TRANS_WAITING;
2932d60b848STomohiro Kusumi }
2942d60b848STomohiro Kusumi if ((oflags & (HAMMER2_TRANS_ISFLUSH|HAMMER2_TRANS_MASK)) ==
2952d60b848STomohiro Kusumi (HAMMER2_TRANS_ISFLUSH|2)) {
2962d60b848STomohiro Kusumi nflags &= ~HAMMER2_TRANS_WAITING;
2972d60b848STomohiro Kusumi }
2982d60b848STomohiro Kusumi if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
2992d60b848STomohiro Kusumi if ((oflags ^ nflags) & HAMMER2_TRANS_WAITING)
3002d60b848STomohiro Kusumi wakeup(&pmp->trans.sync_wait);
3012d60b848STomohiro Kusumi break;
3022d60b848STomohiro Kusumi }
3032d60b848STomohiro Kusumi cpu_pause();
3042d60b848STomohiro Kusumi /* retry */
3052d60b848STomohiro Kusumi }
3062d60b848STomohiro Kusumi }
3072d60b848STomohiro Kusumi
3082d60b848STomohiro Kusumi /*
3092d60b848STomohiro Kusumi * Obtain new, unique inode number (not serialized by caller).
3102d60b848STomohiro Kusumi */
3112d60b848STomohiro Kusumi hammer2_tid_t
hammer2_trans_newinum(hammer2_pfs_t * pmp)3122d60b848STomohiro Kusumi hammer2_trans_newinum(hammer2_pfs_t *pmp)
3132d60b848STomohiro Kusumi {
3142d60b848STomohiro Kusumi hammer2_tid_t tid;
3152d60b848STomohiro Kusumi
3162d60b848STomohiro Kusumi tid = atomic_fetchadd_64(&pmp->inode_tid, 1);
3172d60b848STomohiro Kusumi
3182d60b848STomohiro Kusumi return tid;
3192d60b848STomohiro Kusumi }
3202d60b848STomohiro Kusumi
3212d60b848STomohiro Kusumi /*
3222d60b848STomohiro Kusumi * Assert that a strategy call is ok here. Currently we allow strategy
3232d60b848STomohiro Kusumi * calls in all situations, including during flushes. Previously:
3242d60b848STomohiro Kusumi * (old) (1) In a normal transaction.
3252d60b848STomohiro Kusumi */
3262d60b848STomohiro Kusumi void
hammer2_trans_assert_strategy(hammer2_pfs_t * pmp)3272d60b848STomohiro Kusumi hammer2_trans_assert_strategy(hammer2_pfs_t *pmp)
3282d60b848STomohiro Kusumi {
3292d60b848STomohiro Kusumi #if 0
3302d60b848STomohiro Kusumi KKASSERT((pmp->trans.flags & HAMMER2_TRANS_ISFLUSH) == 0);
3312d60b848STomohiro Kusumi #endif
3322d60b848STomohiro Kusumi }
3332d60b848STomohiro Kusumi
3342d60b848STomohiro Kusumi /*
3352d60b848STomohiro Kusumi * Flush the chain and all modified sub-chains through the specified
3362d60b848STomohiro Kusumi * synchronization point, propagating blockref updates back up. As
3372d60b848STomohiro Kusumi * part of this propagation, mirror_tid and inode/data usage statistics
3382d60b848STomohiro Kusumi * propagates back upward.
3392d60b848STomohiro Kusumi *
3402d60b848STomohiro Kusumi * Returns a HAMMER2 error code, 0 if no error. Note that I/O errors from
3412d60b848STomohiro Kusumi * buffers dirtied during the flush operation can occur later.
3422d60b848STomohiro Kusumi *
3432d60b848STomohiro Kusumi * modify_tid (clc - cluster level change) is not propagated.
3442d60b848STomohiro Kusumi *
3452d60b848STomohiro Kusumi * update_tid (clc) is used for validation and is not propagated by this
3462d60b848STomohiro Kusumi * function.
3472d60b848STomohiro Kusumi *
3482d60b848STomohiro Kusumi * This routine can be called from several places but the most important
3492d60b848STomohiro Kusumi * is from VFS_SYNC (frontend) via hammer2_xop_inode_flush (backend).
3502d60b848STomohiro Kusumi *
3512d60b848STomohiro Kusumi * chain is locked on call and will remain locked on return. The chain's
3522d60b848STomohiro Kusumi * UPDATE flag indicates that its parent's block table (which is not yet
3532d60b848STomohiro Kusumi * part of the flush) should be updated.
3542d60b848STomohiro Kusumi *
3552d60b848STomohiro Kusumi * flags:
3562d60b848STomohiro Kusumi * HAMMER2_FLUSH_TOP Indicates that this is the top of the flush.
3572d60b848STomohiro Kusumi * Is cleared for the recursion.
3582d60b848STomohiro Kusumi *
3592d60b848STomohiro Kusumi * HAMMER2_FLUSH_ALL Recurse everything
3602d60b848STomohiro Kusumi *
3612d60b848STomohiro Kusumi * HAMMER2_FLUSH_INODE_STOP
3622d60b848STomohiro Kusumi * Stop at PFS inode or normal inode boundary
3632d60b848STomohiro Kusumi */
3642d60b848STomohiro Kusumi int
hammer2_flush(hammer2_chain_t * chain,int flags)3652d60b848STomohiro Kusumi hammer2_flush(hammer2_chain_t *chain, int flags)
3662d60b848STomohiro Kusumi {
3672d60b848STomohiro Kusumi hammer2_flush_info_t info;
3682d60b848STomohiro Kusumi int loops;
3692d60b848STomohiro Kusumi
3702d60b848STomohiro Kusumi /*
3712d60b848STomohiro Kusumi * Execute the recursive flush and handle deferrals.
3722d60b848STomohiro Kusumi *
3732d60b848STomohiro Kusumi * Chains can be ridiculously long (thousands deep), so to
3742d60b848STomohiro Kusumi * avoid blowing out the kernel stack the recursive flush has a
3752d60b848STomohiro Kusumi * depth limit. Elements at the limit are placed on a list
3762d60b848STomohiro Kusumi * for re-execution after the stack has been popped.
3772d60b848STomohiro Kusumi */
3782d60b848STomohiro Kusumi bzero(&info, sizeof(info));
3792d60b848STomohiro Kusumi info.flags = flags & ~HAMMER2_FLUSH_TOP;
3802d60b848STomohiro Kusumi
3812d60b848STomohiro Kusumi /*
3822d60b848STomohiro Kusumi * Calculate parent (can be NULL), if not NULL the flush core
3832d60b848STomohiro Kusumi * expects the parent to be referenced so it can easily lock/unlock
3842d60b848STomohiro Kusumi * it without it getting ripped up.
3852d60b848STomohiro Kusumi */
3862d60b848STomohiro Kusumi if ((info.parent = chain->parent) != NULL)
3872d60b848STomohiro Kusumi hammer2_chain_ref(info.parent);
3882d60b848STomohiro Kusumi
3892d60b848STomohiro Kusumi /*
3902d60b848STomohiro Kusumi * Extra ref needed because flush_core expects it when replacing
3912d60b848STomohiro Kusumi * chain.
3922d60b848STomohiro Kusumi */
3932d60b848STomohiro Kusumi hammer2_chain_ref(chain);
3942d60b848STomohiro Kusumi loops = 0;
3952d60b848STomohiro Kusumi
3962d60b848STomohiro Kusumi for (;;) {
3972d60b848STomohiro Kusumi /*
3982d60b848STomohiro Kusumi * [re]flush chain as the deep recursion may have generated
3992d60b848STomohiro Kusumi * additional modifications.
4002d60b848STomohiro Kusumi */
4012d60b848STomohiro Kusumi if (info.parent != chain->parent) {
4022d60b848STomohiro Kusumi if (hammer2_debug & 0x0040) {
4032d60b848STomohiro Kusumi kprintf("LOST CHILD4 %p->%p "
4042d60b848STomohiro Kusumi "(actual parent %p)\n",
4052d60b848STomohiro Kusumi info.parent, chain, chain->parent);
4062d60b848STomohiro Kusumi }
4072d60b848STomohiro Kusumi hammer2_chain_drop(info.parent);
4082d60b848STomohiro Kusumi info.parent = chain->parent;
4092d60b848STomohiro Kusumi hammer2_chain_ref(info.parent);
4102d60b848STomohiro Kusumi }
4112d60b848STomohiro Kusumi if (hammer2_flush_core(&info, chain, flags) == 0)
4122d60b848STomohiro Kusumi break;
4132d60b848STomohiro Kusumi
4142d60b848STomohiro Kusumi if (++loops % 1000 == 0) {
4152d60b848STomohiro Kusumi kprintf("hammer2_flush: excessive loops on %p\n",
4162d60b848STomohiro Kusumi chain);
4172d60b848STomohiro Kusumi if (hammer2_debug & 0x100000)
4182d60b848STomohiro Kusumi Debugger("hell4");
4192d60b848STomohiro Kusumi }
4202d60b848STomohiro Kusumi }
4212d60b848STomohiro Kusumi #ifdef HAMMER2_SCAN_DEBUG
4222d60b848STomohiro Kusumi if (info.scan_count >= 10)
4232d60b848STomohiro Kusumi kprintf("hammer2_flush: scan_count %ld (%ld,%ld,%ld,%ld) "
4242d60b848STomohiro Kusumi "bt(%ld,%ld,%ld,%ld,%ld,%ld)\n",
4252d60b848STomohiro Kusumi info.scan_count,
4262d60b848STomohiro Kusumi info.scan_mod_count,
4272d60b848STomohiro Kusumi info.scan_upd_count,
4282d60b848STomohiro Kusumi info.scan_onf_count,
4292d60b848STomohiro Kusumi info.scan_del_count,
4302d60b848STomohiro Kusumi info.scan_btype[1],
4312d60b848STomohiro Kusumi info.scan_btype[2],
4322d60b848STomohiro Kusumi info.scan_btype[3],
4332d60b848STomohiro Kusumi info.scan_btype[4],
4342d60b848STomohiro Kusumi info.scan_btype[5],
4352d60b848STomohiro Kusumi info.scan_btype[6]);
4362d60b848STomohiro Kusumi #endif
4372d60b848STomohiro Kusumi hammer2_chain_drop(chain);
4382d60b848STomohiro Kusumi if (info.parent)
4392d60b848STomohiro Kusumi hammer2_chain_drop(info.parent);
4402d60b848STomohiro Kusumi return (info.error);
4412d60b848STomohiro Kusumi }
4422d60b848STomohiro Kusumi
4432d60b848STomohiro Kusumi /*
4442d60b848STomohiro Kusumi * This is the core of the chain flushing code. The chain is locked by the
4452d60b848STomohiro Kusumi * caller and must also have an extra ref on it by the caller, and remains
4462d60b848STomohiro Kusumi * locked and will have an extra ref on return. info.parent is referenced
4472d60b848STomohiro Kusumi * but not locked.
4482d60b848STomohiro Kusumi *
4492d60b848STomohiro Kusumi * Upon return, the caller can test the UPDATE bit on the chain to determine
4502d60b848STomohiro Kusumi * if the parent needs updating.
4512d60b848STomohiro Kusumi *
4522d60b848STomohiro Kusumi * If non-zero is returned, the chain's parent changed during the flush and
4532d60b848STomohiro Kusumi * the caller must retry the operation.
4542d60b848STomohiro Kusumi *
4552d60b848STomohiro Kusumi * (1) Determine if this node is a candidate for the flush, return if it is
4562d60b848STomohiro Kusumi * not. fchain and vchain are always candidates for the flush.
4572d60b848STomohiro Kusumi *
4582d60b848STomohiro Kusumi * (2) If we recurse too deep the chain is entered onto the deferral list and
4592d60b848STomohiro Kusumi * the current flush stack is aborted until after the deferral list is
4602d60b848STomohiro Kusumi * run.
4612d60b848STomohiro Kusumi *
4622d60b848STomohiro Kusumi * (3) Recursively flush live children (rbtree). This can create deferrals.
4632d60b848STomohiro Kusumi * A successful flush clears the MODIFIED and UPDATE bits on the children
4642d60b848STomohiro Kusumi * and typically causes the parent to be marked MODIFIED as the children
4652d60b848STomohiro Kusumi * update the parent's block table. A parent might already be marked
4662d60b848STomohiro Kusumi * MODIFIED due to a deletion (whos blocktable update in the parent is
4672d60b848STomohiro Kusumi * handled by the frontend), or if the parent itself is modified by the
4682d60b848STomohiro Kusumi * frontend for other reasons.
4692d60b848STomohiro Kusumi *
4702d60b848STomohiro Kusumi * (4) Permanently disconnected sub-trees are cleaned up by the front-end.
4712d60b848STomohiro Kusumi * Deleted-but-open inodes can still be individually flushed via the
4722d60b848STomohiro Kusumi * filesystem syncer.
4732d60b848STomohiro Kusumi *
4742d60b848STomohiro Kusumi * (5) Delete parents on the way back up if they are normal indirect blocks
4752d60b848STomohiro Kusumi * and have no children.
4762d60b848STomohiro Kusumi *
4772d60b848STomohiro Kusumi * (6) Note that an unmodified child may still need the block table in its
4782d60b848STomohiro Kusumi * parent updated (e.g. rename/move). The child will have UPDATE set
4792d60b848STomohiro Kusumi * in this case.
4802d60b848STomohiro Kusumi *
4812d60b848STomohiro Kusumi * WARNING ON BREF MODIFY_TID/MIRROR_TID
4822d60b848STomohiro Kusumi *
4832d60b848STomohiro Kusumi * blockref.modify_tid is consistent only within a PFS, and will not be
4842d60b848STomohiro Kusumi * consistent during synchronization. mirror_tid is consistent across the
4852d60b848STomohiro Kusumi * block device regardless of the PFS.
4862d60b848STomohiro Kusumi */
4872d60b848STomohiro Kusumi static int
hammer2_flush_core(hammer2_flush_info_t * info,hammer2_chain_t * chain,int flags)4882d60b848STomohiro Kusumi hammer2_flush_core(hammer2_flush_info_t *info, hammer2_chain_t *chain,
4892d60b848STomohiro Kusumi int flags)
4902d60b848STomohiro Kusumi {
4912d60b848STomohiro Kusumi hammer2_chain_t *parent;
4922d60b848STomohiro Kusumi hammer2_dev_t *hmp;
4932d60b848STomohiro Kusumi int save_error;
4942d60b848STomohiro Kusumi int retry;
4952d60b848STomohiro Kusumi
4962d60b848STomohiro Kusumi retry = 0;
4972d60b848STomohiro Kusumi
4982d60b848STomohiro Kusumi /*
4992d60b848STomohiro Kusumi * (1) Optimize downward recursion to locate nodes needing action.
5002d60b848STomohiro Kusumi * Nothing to do if none of these flags are set.
5012d60b848STomohiro Kusumi */
5022d60b848STomohiro Kusumi if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) == 0)
5032d60b848STomohiro Kusumi return 0;
5042d60b848STomohiro Kusumi
5052d60b848STomohiro Kusumi hmp = chain->hmp;
5062d60b848STomohiro Kusumi
5072d60b848STomohiro Kusumi /*
5082d60b848STomohiro Kusumi * NOTE: parent can be NULL, usually due to destroy races.
5092d60b848STomohiro Kusumi */
5102d60b848STomohiro Kusumi parent = info->parent;
5112d60b848STomohiro Kusumi KKASSERT(chain->parent == parent);
5122d60b848STomohiro Kusumi
5132d60b848STomohiro Kusumi /*
5142d60b848STomohiro Kusumi * Downward search recursion
5152d60b848STomohiro Kusumi *
5162d60b848STomohiro Kusumi * We must be careful on cold stops, which often occur on inode
5172d60b848STomohiro Kusumi * boundaries due to the way hammer2_vfs_sync() sequences the flush.
5182d60b848STomohiro Kusumi * Be sure to issue an appropriate chain_setflush()
5192d60b848STomohiro Kusumi */
5202d60b848STomohiro Kusumi if ((chain->flags & HAMMER2_CHAIN_PFSBOUNDARY) &&
5212d60b848STomohiro Kusumi (flags & HAMMER2_FLUSH_ALL) == 0 &&
5222d60b848STomohiro Kusumi (flags & HAMMER2_FLUSH_TOP) == 0 &&
5232d60b848STomohiro Kusumi chain->pmp && chain->pmp->mp) {
5242d60b848STomohiro Kusumi /*
5252d60b848STomohiro Kusumi * If FLUSH_ALL is not specified the caller does not want
5262d60b848STomohiro Kusumi * to recurse through PFS roots that have been mounted.
5272d60b848STomohiro Kusumi *
5282d60b848STomohiro Kusumi * (If the PFS has not been mounted there may not be
5292d60b848STomohiro Kusumi * anything monitoring its chains and its up to us
5302d60b848STomohiro Kusumi * to flush it).
5312d60b848STomohiro Kusumi *
5322d60b848STomohiro Kusumi * The typical sequence is to flush dirty PFS's starting at
5332d60b848STomohiro Kusumi * their root downward, then flush the device root (vchain).
5342d60b848STomohiro Kusumi * It is this second flush that typically leaves out the
5352d60b848STomohiro Kusumi * ALL flag.
5362d60b848STomohiro Kusumi *
5372d60b848STomohiro Kusumi * However we must still process the PFSROOT chains for block
5382d60b848STomohiro Kusumi * table updates in their parent (which IS part of our flush).
5392d60b848STomohiro Kusumi *
5402d60b848STomohiro Kusumi * NOTE: The volume root, vchain, does not set PFSBOUNDARY.
5412d60b848STomohiro Kusumi *
5422d60b848STomohiro Kusumi * NOTE: We must re-set ONFLUSH in the parent to retain if
5432d60b848STomohiro Kusumi * this chain (that we are skipping) requires work.
5442d60b848STomohiro Kusumi */
5452d60b848STomohiro Kusumi if (chain->flags & (HAMMER2_CHAIN_ONFLUSH |
5462d60b848STomohiro Kusumi HAMMER2_CHAIN_DESTROY |
5472d60b848STomohiro Kusumi HAMMER2_CHAIN_MODIFIED)) {
5482d60b848STomohiro Kusumi hammer2_chain_setflush(parent);
5492d60b848STomohiro Kusumi }
5502d60b848STomohiro Kusumi goto done;
5512d60b848STomohiro Kusumi } else if (chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
5522d60b848STomohiro Kusumi (flags & HAMMER2_FLUSH_INODE_STOP) &&
5532d60b848STomohiro Kusumi (flags & HAMMER2_FLUSH_ALL) == 0 &&
5542d60b848STomohiro Kusumi (flags & HAMMER2_FLUSH_TOP) == 0 &&
5552d60b848STomohiro Kusumi chain->pmp && chain->pmp->mp) {
5562d60b848STomohiro Kusumi /*
5572d60b848STomohiro Kusumi * When FLUSH_INODE_STOP is specified we are being asked not
5582d60b848STomohiro Kusumi * to include any inode changes for inodes we encounter,
5592d60b848STomohiro Kusumi * with the exception of the inode that the flush began with.
5602d60b848STomohiro Kusumi * So: INODE, INODE_STOP, and TOP==0 basically.
5612d60b848STomohiro Kusumi *
5622d60b848STomohiro Kusumi * Dirty inodes are flushed based on the hammer2_inode
5632d60b848STomohiro Kusumi * in-memory structure, issuing a chain_setflush() here
5642d60b848STomohiro Kusumi * will only cause unnecessary traversals of the topology.
5652d60b848STomohiro Kusumi */
5662d60b848STomohiro Kusumi goto done;
5672d60b848STomohiro Kusumi #if 0
5682d60b848STomohiro Kusumi /*
5692d60b848STomohiro Kusumi * If FLUSH_INODE_STOP is specified and both ALL and TOP
5702d60b848STomohiro Kusumi * are clear, we must not flush the chain. The chain should
5712d60b848STomohiro Kusumi * have already been flushed and any further ONFLUSH/UPDATE
5722d60b848STomohiro Kusumi * setting will be related to the next flush.
5732d60b848STomohiro Kusumi *
5742d60b848STomohiro Kusumi * This features allows us to flush inodes independently of
5752d60b848STomohiro Kusumi * each other and meta-data above the inodes separately.
5762d60b848STomohiro Kusumi */
5772d60b848STomohiro Kusumi if (chain->flags & (HAMMER2_CHAIN_ONFLUSH |
5782d60b848STomohiro Kusumi HAMMER2_CHAIN_DESTROY |
5792d60b848STomohiro Kusumi HAMMER2_CHAIN_MODIFIED)) {
5802d60b848STomohiro Kusumi if (parent)
5812d60b848STomohiro Kusumi hammer2_chain_setflush(parent);
5822d60b848STomohiro Kusumi }
5832d60b848STomohiro Kusumi #endif
5842d60b848STomohiro Kusumi } else if (info->depth == HAMMER2_FLUSH_DEPTH_LIMIT) {
5852d60b848STomohiro Kusumi /*
5862d60b848STomohiro Kusumi * Recursion depth reached.
5872d60b848STomohiro Kusumi */
5882d60b848STomohiro Kusumi panic("hammer2: flush depth limit");
5892d60b848STomohiro Kusumi } else if (chain->flags & (HAMMER2_CHAIN_ONFLUSH |
5902d60b848STomohiro Kusumi HAMMER2_CHAIN_DESTROY)) {
5912d60b848STomohiro Kusumi /*
5922d60b848STomohiro Kusumi * Downward recursion search (actual flush occurs bottom-up).
5932d60b848STomohiro Kusumi * pre-clear ONFLUSH. It can get set again due to races or
5942d60b848STomohiro Kusumi * flush errors, which we want so the scan finds us again in
5952d60b848STomohiro Kusumi * the next flush.
5962d60b848STomohiro Kusumi *
5972d60b848STomohiro Kusumi * We must also recurse if DESTROY is set so we can finally
5982d60b848STomohiro Kusumi * get rid of the related children, otherwise the node will
5992d60b848STomohiro Kusumi * just get re-flushed on lastdrop.
6002d60b848STomohiro Kusumi *
6012d60b848STomohiro Kusumi * WARNING! The recursion will unlock/relock info->parent
6022d60b848STomohiro Kusumi * (which is 'chain'), potentially allowing it
6032d60b848STomohiro Kusumi * to be ripped up.
6042d60b848STomohiro Kusumi */
6052d60b848STomohiro Kusumi atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH);
6062d60b848STomohiro Kusumi save_error = info->error;
6072d60b848STomohiro Kusumi info->error = 0;
6082d60b848STomohiro Kusumi info->parent = chain;
6092d60b848STomohiro Kusumi
6102d60b848STomohiro Kusumi /*
6112d60b848STomohiro Kusumi * We may have to do this twice to catch any indirect
6122d60b848STomohiro Kusumi * block maintenance that occurs.
6132d60b848STomohiro Kusumi */
6142d60b848STomohiro Kusumi hammer2_spin_ex(&chain->core.spin);
6152d60b848STomohiro Kusumi RB_SCAN(hammer2_chain_tree, &chain->core.rbtree,
6162d60b848STomohiro Kusumi NULL, hammer2_flush_recurse, info);
6172d60b848STomohiro Kusumi if (chain->flags & HAMMER2_CHAIN_ONFLUSH) {
6182d60b848STomohiro Kusumi atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH);
6192d60b848STomohiro Kusumi RB_SCAN(hammer2_chain_tree, &chain->core.rbtree,
6202d60b848STomohiro Kusumi NULL, hammer2_flush_recurse, info);
6212d60b848STomohiro Kusumi }
6222d60b848STomohiro Kusumi hammer2_spin_unex(&chain->core.spin);
6232d60b848STomohiro Kusumi info->parent = parent;
6242d60b848STomohiro Kusumi
6252d60b848STomohiro Kusumi /*
6262d60b848STomohiro Kusumi * Re-set the flush bits if the flush was incomplete or
6272d60b848STomohiro Kusumi * an error occurred. If an error occurs it is typically
6282d60b848STomohiro Kusumi * an allocation error. Errors do not cause deferrals.
6292d60b848STomohiro Kusumi */
6302d60b848STomohiro Kusumi if (info->error)
6312d60b848STomohiro Kusumi hammer2_chain_setflush(chain);
6322d60b848STomohiro Kusumi info->error |= save_error;
6332d60b848STomohiro Kusumi
6342d60b848STomohiro Kusumi /*
6352d60b848STomohiro Kusumi * If we lost the parent->chain association we have to
6362d60b848STomohiro Kusumi * stop processing this chain because it is no longer
6372d60b848STomohiro Kusumi * in this recursion. If it moved, it will be handled
6382d60b848STomohiro Kusumi * by the ONFLUSH flag elsewhere.
6392d60b848STomohiro Kusumi */
6402d60b848STomohiro Kusumi if (chain->parent != parent) {
6412d60b848STomohiro Kusumi kprintf("LOST CHILD2 %p->%p (actual parent %p)\n",
6422d60b848STomohiro Kusumi parent, chain, chain->parent);
6432d60b848STomohiro Kusumi goto done;
6442d60b848STomohiro Kusumi }
6452d60b848STomohiro Kusumi }
6462d60b848STomohiro Kusumi
6472d60b848STomohiro Kusumi /*
6482d60b848STomohiro Kusumi * Now we are in the bottom-up part of the recursion.
6492d60b848STomohiro Kusumi *
6502d60b848STomohiro Kusumi * We continue to try to update the chain on lower-level errors, but
6512d60b848STomohiro Kusumi * the flush code may decide not to flush the volume root.
6522d60b848STomohiro Kusumi *
6532d60b848STomohiro Kusumi * XXX should we continue to try to update the chain if an error
6542d60b848STomohiro Kusumi * occurred?
6552d60b848STomohiro Kusumi */
6562d60b848STomohiro Kusumi
6572d60b848STomohiro Kusumi /*
6582d60b848STomohiro Kusumi * Both parent and chain must be locked in order to flush chain,
6592d60b848STomohiro Kusumi * in order to properly update the parent under certain conditions.
6602d60b848STomohiro Kusumi *
6612d60b848STomohiro Kusumi * In addition, we can't safely unlock/relock the chain once we
6622d60b848STomohiro Kusumi * start flushing the chain itself, which we would have to do later
6632d60b848STomohiro Kusumi * on in order to lock the parent if we didn't do that now.
6642d60b848STomohiro Kusumi */
6652d60b848STomohiro Kusumi hammer2_chain_ref_hold(chain);
6662d60b848STomohiro Kusumi hammer2_chain_unlock(chain);
6672d60b848STomohiro Kusumi if (parent)
6682d60b848STomohiro Kusumi hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
6692d60b848STomohiro Kusumi hammer2_chain_lock(chain, HAMMER2_RESOLVE_MAYBE);
6702d60b848STomohiro Kusumi hammer2_chain_drop_unhold(chain);
6712d60b848STomohiro Kusumi
6722d60b848STomohiro Kusumi /*
6732d60b848STomohiro Kusumi * Can't process if we can't access their content.
6742d60b848STomohiro Kusumi */
6752d60b848STomohiro Kusumi if ((parent && parent->error) || chain->error) {
6762d60b848STomohiro Kusumi kprintf("hammer2: chain error during flush\n");
6772d60b848STomohiro Kusumi info->error |= chain->error;
6782d60b848STomohiro Kusumi if (parent) {
6792d60b848STomohiro Kusumi info->error |= parent->error;
6802d60b848STomohiro Kusumi hammer2_chain_unlock(parent);
6812d60b848STomohiro Kusumi }
6822d60b848STomohiro Kusumi goto done;
6832d60b848STomohiro Kusumi }
6842d60b848STomohiro Kusumi
6852d60b848STomohiro Kusumi if (chain->parent != parent) {
6862d60b848STomohiro Kusumi if (hammer2_debug & 0x0040) {
6872d60b848STomohiro Kusumi kprintf("LOST CHILD3 %p->%p (actual parent %p)\n",
6882d60b848STomohiro Kusumi parent, chain, chain->parent);
6892d60b848STomohiro Kusumi }
6902d60b848STomohiro Kusumi KKASSERT(parent != NULL);
6912d60b848STomohiro Kusumi hammer2_chain_unlock(parent);
6922d60b848STomohiro Kusumi retry = 1;
6932d60b848STomohiro Kusumi goto done;
6942d60b848STomohiro Kusumi }
6952d60b848STomohiro Kusumi
6962d60b848STomohiro Kusumi /*
6972d60b848STomohiro Kusumi * Propagate the DESTROY flag downwards. This dummies up the flush
6982d60b848STomohiro Kusumi * code and tries to invalidate related buffer cache buffers to
6992d60b848STomohiro Kusumi * avoid the disk write.
7002d60b848STomohiro Kusumi */
7012d60b848STomohiro Kusumi if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY))
7022d60b848STomohiro Kusumi atomic_set_int(&chain->flags, HAMMER2_CHAIN_DESTROY);
7032d60b848STomohiro Kusumi
7042d60b848STomohiro Kusumi /*
7052d60b848STomohiro Kusumi * Dispose of the modified bit.
7062d60b848STomohiro Kusumi *
7072d60b848STomohiro Kusumi * If parent is present, the UPDATE bit should already be set.
7082d60b848STomohiro Kusumi * UPDATE should already be set.
7092d60b848STomohiro Kusumi * bref.mirror_tid should already be set.
7102d60b848STomohiro Kusumi */
7112d60b848STomohiro Kusumi if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
7122d60b848STomohiro Kusumi KKASSERT((chain->flags & HAMMER2_CHAIN_UPDATE) ||
7132d60b848STomohiro Kusumi chain->parent == NULL);
7142d60b848STomohiro Kusumi atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
7152d60b848STomohiro Kusumi atomic_add_long(&hammer2_count_modified_chains, -1);
7162d60b848STomohiro Kusumi
7172d60b848STomohiro Kusumi /*
7182d60b848STomohiro Kusumi * Manage threads waiting for excessive dirty memory to
7192d60b848STomohiro Kusumi * be retired.
7202d60b848STomohiro Kusumi */
7212d60b848STomohiro Kusumi if (chain->pmp)
7222d60b848STomohiro Kusumi hammer2_pfs_memory_wakeup(chain->pmp, -1);
7232d60b848STomohiro Kusumi
7242d60b848STomohiro Kusumi #if 0
7252d60b848STomohiro Kusumi if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0 &&
7262d60b848STomohiro Kusumi chain != &hmp->vchain &&
7272d60b848STomohiro Kusumi chain != &hmp->fchain) {
7282d60b848STomohiro Kusumi /*
7292d60b848STomohiro Kusumi * Set UPDATE bit indicating that the parent block
7302d60b848STomohiro Kusumi * table requires updating.
7312d60b848STomohiro Kusumi */
7322d60b848STomohiro Kusumi atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
7332d60b848STomohiro Kusumi }
7342d60b848STomohiro Kusumi #endif
7352d60b848STomohiro Kusumi
7362d60b848STomohiro Kusumi /*
7372d60b848STomohiro Kusumi * Issue the flush. This is indirect via the DIO.
7382d60b848STomohiro Kusumi *
7392d60b848STomohiro Kusumi * NOTE: A DELETED node that reaches this point must be
7402d60b848STomohiro Kusumi * flushed for synchronization point consistency.
7412d60b848STomohiro Kusumi *
7422d60b848STomohiro Kusumi * NOTE: Even though MODIFIED was already set, the related DIO
7432d60b848STomohiro Kusumi * might not be dirty due to a system buffer cache
7442d60b848STomohiro Kusumi * flush and must be set dirty if we are going to make
7452d60b848STomohiro Kusumi * further modifications to the buffer. Chains with
7462d60b848STomohiro Kusumi * embedded data don't need this.
7472d60b848STomohiro Kusumi */
7482d60b848STomohiro Kusumi if (hammer2_debug & 0x1000) {
7492d60b848STomohiro Kusumi kprintf("Flush %p.%d %016jx/%d data=%016jx\n",
7502d60b848STomohiro Kusumi chain, chain->bref.type,
7512d60b848STomohiro Kusumi (uintmax_t)chain->bref.key,
7522d60b848STomohiro Kusumi chain->bref.keybits,
7532d60b848STomohiro Kusumi (uintmax_t)chain->bref.data_off);
7542d60b848STomohiro Kusumi }
7552d60b848STomohiro Kusumi
7562d60b848STomohiro Kusumi /*
7572d60b848STomohiro Kusumi * Update chain CRCs for flush.
7582d60b848STomohiro Kusumi *
7592d60b848STomohiro Kusumi * NOTE: Volume headers are NOT flushed here as they require
7602d60b848STomohiro Kusumi * special processing.
7612d60b848STomohiro Kusumi */
7622d60b848STomohiro Kusumi switch(chain->bref.type) {
7632d60b848STomohiro Kusumi case HAMMER2_BREF_TYPE_FREEMAP:
7642d60b848STomohiro Kusumi /*
7652d60b848STomohiro Kusumi * Update the volume header's freemap_tid to the
7662d60b848STomohiro Kusumi * freemap's flushing mirror_tid.
7672d60b848STomohiro Kusumi *
7682d60b848STomohiro Kusumi * (note: embedded data, do not call setdirty)
7692d60b848STomohiro Kusumi */
7702d60b848STomohiro Kusumi KKASSERT(hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED);
7712d60b848STomohiro Kusumi KKASSERT(chain == &hmp->fchain);
7722d60b848STomohiro Kusumi hmp->voldata.freemap_tid = chain->bref.mirror_tid;
7732d60b848STomohiro Kusumi if (hammer2_debug & 0x8000) {
7742d60b848STomohiro Kusumi /* debug only, avoid syslogd loop */
7752d60b848STomohiro Kusumi kprintf("sync freemap mirror_tid %08jx\n",
7762d60b848STomohiro Kusumi (intmax_t)chain->bref.mirror_tid);
7772d60b848STomohiro Kusumi }
7782d60b848STomohiro Kusumi
7792d60b848STomohiro Kusumi /*
7802d60b848STomohiro Kusumi * The freemap can be flushed independently of the
7812d60b848STomohiro Kusumi * main topology, but for the case where it is
7822d60b848STomohiro Kusumi * flushed in the same transaction, and flushed
7832d60b848STomohiro Kusumi * before vchain (a case we want to allow for
7842d60b848STomohiro Kusumi * performance reasons), make sure modifications
7852d60b848STomohiro Kusumi * made during the flush under vchain use a new
7862d60b848STomohiro Kusumi * transaction id.
7872d60b848STomohiro Kusumi *
7882d60b848STomohiro Kusumi * Otherwise the mount recovery code will get confused.
7892d60b848STomohiro Kusumi */
7902d60b848STomohiro Kusumi ++hmp->voldata.mirror_tid;
7912d60b848STomohiro Kusumi break;
7922d60b848STomohiro Kusumi case HAMMER2_BREF_TYPE_VOLUME:
7932d60b848STomohiro Kusumi /*
7942d60b848STomohiro Kusumi * The free block table is flushed by
7952d60b848STomohiro Kusumi * hammer2_vfs_sync() before it flushes vchain.
7962d60b848STomohiro Kusumi * We must still hold fchain locked while copying
7972d60b848STomohiro Kusumi * voldata to volsync, however.
7982d60b848STomohiro Kusumi *
7992d60b848STomohiro Kusumi * These do not error per-say since their data does
8002d60b848STomohiro Kusumi * not need to be re-read from media on lock.
8012d60b848STomohiro Kusumi *
8022d60b848STomohiro Kusumi * (note: embedded data, do not call setdirty)
8032d60b848STomohiro Kusumi */
8042d60b848STomohiro Kusumi hammer2_chain_lock(&hmp->fchain,
8052d60b848STomohiro Kusumi HAMMER2_RESOLVE_ALWAYS);
8062d60b848STomohiro Kusumi hammer2_voldata_lock(hmp);
8072d60b848STomohiro Kusumi if (hammer2_debug & 0x8000) {
8082d60b848STomohiro Kusumi /* debug only, avoid syslogd loop */
8092d60b848STomohiro Kusumi kprintf("sync volume mirror_tid %08jx\n",
8102d60b848STomohiro Kusumi (intmax_t)chain->bref.mirror_tid);
8112d60b848STomohiro Kusumi }
8122d60b848STomohiro Kusumi
8132d60b848STomohiro Kusumi /*
8142d60b848STomohiro Kusumi * Update the volume header's mirror_tid to the
8152d60b848STomohiro Kusumi * main topology's flushing mirror_tid. It is
8162d60b848STomohiro Kusumi * possible that voldata.mirror_tid is already
8172d60b848STomohiro Kusumi * beyond bref.mirror_tid due to the bump we made
8182d60b848STomohiro Kusumi * above in BREF_TYPE_FREEMAP.
8192d60b848STomohiro Kusumi */
8202d60b848STomohiro Kusumi if (hmp->voldata.mirror_tid < chain->bref.mirror_tid) {
8212d60b848STomohiro Kusumi hmp->voldata.mirror_tid =
8222d60b848STomohiro Kusumi chain->bref.mirror_tid;
8232d60b848STomohiro Kusumi }
8242d60b848STomohiro Kusumi
8252d60b848STomohiro Kusumi /*
8262d60b848STomohiro Kusumi * The volume header is flushed manually by the
8272d60b848STomohiro Kusumi * syncer, not here. All we do here is adjust the
8282d60b848STomohiro Kusumi * crc's.
8292d60b848STomohiro Kusumi */
8302d60b848STomohiro Kusumi KKASSERT(chain->data != NULL);
8312d60b848STomohiro Kusumi KKASSERT(chain->dio == NULL);
8322d60b848STomohiro Kusumi
8332d60b848STomohiro Kusumi hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT1]=
8342d60b848STomohiro Kusumi hammer2_icrc32(
8352d60b848STomohiro Kusumi (char *)&hmp->voldata +
8362d60b848STomohiro Kusumi HAMMER2_VOLUME_ICRC1_OFF,
8372d60b848STomohiro Kusumi HAMMER2_VOLUME_ICRC1_SIZE);
8382d60b848STomohiro Kusumi hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT0]=
8392d60b848STomohiro Kusumi hammer2_icrc32(
8402d60b848STomohiro Kusumi (char *)&hmp->voldata +
8412d60b848STomohiro Kusumi HAMMER2_VOLUME_ICRC0_OFF,
8422d60b848STomohiro Kusumi HAMMER2_VOLUME_ICRC0_SIZE);
8432d60b848STomohiro Kusumi hmp->voldata.icrc_volheader =
8442d60b848STomohiro Kusumi hammer2_icrc32(
8452d60b848STomohiro Kusumi (char *)&hmp->voldata +
8462d60b848STomohiro Kusumi HAMMER2_VOLUME_ICRCVH_OFF,
8472d60b848STomohiro Kusumi HAMMER2_VOLUME_ICRCVH_SIZE);
8482d60b848STomohiro Kusumi
8492d60b848STomohiro Kusumi if (hammer2_debug & 0x8000) {
8502d60b848STomohiro Kusumi /* debug only, avoid syslogd loop */
8512d60b848STomohiro Kusumi kprintf("syncvolhdr %016jx %016jx\n",
8522d60b848STomohiro Kusumi hmp->voldata.mirror_tid,
8532d60b848STomohiro Kusumi hmp->vchain.bref.mirror_tid);
8542d60b848STomohiro Kusumi }
8552d60b848STomohiro Kusumi hmp->volsync = hmp->voldata;
8562d60b848STomohiro Kusumi atomic_set_int(&chain->flags, HAMMER2_CHAIN_VOLUMESYNC);
8572d60b848STomohiro Kusumi hammer2_voldata_unlock(hmp);
8582d60b848STomohiro Kusumi hammer2_chain_unlock(&hmp->fchain);
8592d60b848STomohiro Kusumi break;
8602d60b848STomohiro Kusumi case HAMMER2_BREF_TYPE_DATA:
8612d60b848STomohiro Kusumi /*
8622d60b848STomohiro Kusumi * Data elements have already been flushed via the
8632d60b848STomohiro Kusumi * logical file buffer cache. Their hash was set in
8642d60b848STomohiro Kusumi * the bref by the vop_write code. Do not re-dirty.
8652d60b848STomohiro Kusumi *
8662d60b848STomohiro Kusumi * Make sure any device buffer(s) have been flushed
8672d60b848STomohiro Kusumi * out here (there aren't usually any to flush) XXX.
8682d60b848STomohiro Kusumi */
8692d60b848STomohiro Kusumi break;
8702d60b848STomohiro Kusumi case HAMMER2_BREF_TYPE_INDIRECT:
8712d60b848STomohiro Kusumi case HAMMER2_BREF_TYPE_FREEMAP_NODE:
8722d60b848STomohiro Kusumi case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
8732d60b848STomohiro Kusumi /*
8742d60b848STomohiro Kusumi * Buffer I/O will be cleaned up when the volume is
8752d60b848STomohiro Kusumi * flushed (but the kernel is free to flush it before
8762d60b848STomohiro Kusumi * then, as well).
8772d60b848STomohiro Kusumi */
8782d60b848STomohiro Kusumi hammer2_chain_setcheck(chain, chain->data);
8792d60b848STomohiro Kusumi break;
8802d60b848STomohiro Kusumi case HAMMER2_BREF_TYPE_DIRENT:
8812d60b848STomohiro Kusumi /*
8822d60b848STomohiro Kusumi * A directory entry can use the check area to store
8832d60b848STomohiro Kusumi * the filename for filenames <= 64 bytes, don't blow
8842d60b848STomohiro Kusumi * it up!
8852d60b848STomohiro Kusumi */
8862d60b848STomohiro Kusumi if (chain->bytes)
8872d60b848STomohiro Kusumi hammer2_chain_setcheck(chain, chain->data);
8882d60b848STomohiro Kusumi break;
8892d60b848STomohiro Kusumi case HAMMER2_BREF_TYPE_INODE:
8902d60b848STomohiro Kusumi /*
8912d60b848STomohiro Kusumi * NOTE: We must call io_setdirty() to make any late
8922d60b848STomohiro Kusumi * changes to the inode data, the system might
8932d60b848STomohiro Kusumi * have already flushed the buffer.
8942d60b848STomohiro Kusumi */
8952d60b848STomohiro Kusumi if (chain->data->ipdata.meta.op_flags &
8962d60b848STomohiro Kusumi HAMMER2_OPFLAG_PFSROOT) {
8972d60b848STomohiro Kusumi /*
8982d60b848STomohiro Kusumi * non-NULL pmp if mounted as a PFS. We must
8992d60b848STomohiro Kusumi * sync fields cached in the pmp? XXX
9002d60b848STomohiro Kusumi */
9012d60b848STomohiro Kusumi hammer2_inode_data_t *ipdata;
9022d60b848STomohiro Kusumi
9032d60b848STomohiro Kusumi hammer2_io_setdirty(chain->dio);
9042d60b848STomohiro Kusumi ipdata = &chain->data->ipdata;
9052d60b848STomohiro Kusumi if (chain->pmp) {
9062d60b848STomohiro Kusumi ipdata->meta.pfs_inum =
9072d60b848STomohiro Kusumi chain->pmp->inode_tid;
9082d60b848STomohiro Kusumi }
9092d60b848STomohiro Kusumi } else {
9102d60b848STomohiro Kusumi /* can't be mounted as a PFS */
9112d60b848STomohiro Kusumi }
9122d60b848STomohiro Kusumi
9132d60b848STomohiro Kusumi hammer2_chain_setcheck(chain, chain->data);
9142d60b848STomohiro Kusumi break;
9152d60b848STomohiro Kusumi default:
9162d60b848STomohiro Kusumi panic("hammer2_flush_core: unsupported "
9172d60b848STomohiro Kusumi "embedded bref %d",
9182d60b848STomohiro Kusumi chain->bref.type);
9192d60b848STomohiro Kusumi /* NOT REACHED */
9202d60b848STomohiro Kusumi }
9212d60b848STomohiro Kusumi
9222d60b848STomohiro Kusumi /*
9232d60b848STomohiro Kusumi * If the chain was destroyed try to avoid unnecessary I/O
9242d60b848STomohiro Kusumi * that might not have yet occurred. Remove the data range
9252d60b848STomohiro Kusumi * from dedup candidacy and attempt to invalidation that
9262d60b848STomohiro Kusumi * potentially dirty portion of the I/O buffer.
9272d60b848STomohiro Kusumi */
9282d60b848STomohiro Kusumi if (chain->flags & HAMMER2_CHAIN_DESTROY) {
9292d60b848STomohiro Kusumi hammer2_io_dedup_delete(hmp,
9302d60b848STomohiro Kusumi chain->bref.type,
9312d60b848STomohiro Kusumi chain->bref.data_off,
9322d60b848STomohiro Kusumi chain->bytes);
9332d60b848STomohiro Kusumi #if 0
9342d60b848STomohiro Kusumi hammer2_io_t *dio;
9352d60b848STomohiro Kusumi if (chain->dio) {
9362d60b848STomohiro Kusumi hammer2_io_inval(chain->dio,
9372d60b848STomohiro Kusumi chain->bref.data_off,
9382d60b848STomohiro Kusumi chain->bytes);
9392d60b848STomohiro Kusumi } else if ((dio = hammer2_io_getquick(hmp,
9402d60b848STomohiro Kusumi chain->bref.data_off,
9412d60b848STomohiro Kusumi chain->bytes,
9422d60b848STomohiro Kusumi 1)) != NULL) {
9432d60b848STomohiro Kusumi hammer2_io_inval(dio,
9442d60b848STomohiro Kusumi chain->bref.data_off,
9452d60b848STomohiro Kusumi chain->bytes);
9462d60b848STomohiro Kusumi hammer2_io_putblk(&dio);
9472d60b848STomohiro Kusumi }
9482d60b848STomohiro Kusumi #endif
9492d60b848STomohiro Kusumi }
9502d60b848STomohiro Kusumi }
9512d60b848STomohiro Kusumi
9522d60b848STomohiro Kusumi /*
9532d60b848STomohiro Kusumi * If UPDATE is set the parent block table may need to be updated.
9542d60b848STomohiro Kusumi * This can fail if the hammer2_chain_modify() fails.
9552d60b848STomohiro Kusumi *
9562d60b848STomohiro Kusumi * NOTE: UPDATE may be set on vchain or fchain in which case
9572d60b848STomohiro Kusumi * parent could be NULL, or on an inode that has not yet
9582d60b848STomohiro Kusumi * been inserted into the radix tree. It's easiest to allow
9592d60b848STomohiro Kusumi * the case and test for NULL. parent can also wind up being
9602d60b848STomohiro Kusumi * NULL due to a deletion so we need to handle the case anyway.
9612d60b848STomohiro Kusumi *
9622d60b848STomohiro Kusumi * NOTE: UPDATE can be set when chains are renamed into or out of
9632d60b848STomohiro Kusumi * an indirect block, without the chain itself being flagged
9642d60b848STomohiro Kusumi * MODIFIED.
9652d60b848STomohiro Kusumi *
9662d60b848STomohiro Kusumi * If no parent exists we can just clear the UPDATE bit. If the
9672d60b848STomohiro Kusumi * chain gets reattached later on the bit will simply get set
9682d60b848STomohiro Kusumi * again.
9692d60b848STomohiro Kusumi */
9702d60b848STomohiro Kusumi if ((chain->flags & HAMMER2_CHAIN_UPDATE) && parent == NULL)
9712d60b848STomohiro Kusumi atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
9722d60b848STomohiro Kusumi
9732d60b848STomohiro Kusumi /*
9742d60b848STomohiro Kusumi * When flushing an inode outside of a FLUSH_FSSYNC we must NOT
9752d60b848STomohiro Kusumi * update the parent block table to point at the flushed inode.
9762d60b848STomohiro Kusumi * The block table should only ever be updated by the filesystem
9772d60b848STomohiro Kusumi * sync code. If we do, inode<->inode dependencies (such as
9782d60b848STomohiro Kusumi * directory entries vs inode nlink count) can wind up not being
9792d60b848STomohiro Kusumi * flushed together and result in a broken topology if a crash/reboot
9802d60b848STomohiro Kusumi * occurs at the wrong time.
9812d60b848STomohiro Kusumi */
9822d60b848STomohiro Kusumi if (chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
9832d60b848STomohiro Kusumi (flags & HAMMER2_FLUSH_INODE_STOP) &&
9842d60b848STomohiro Kusumi (flags & HAMMER2_FLUSH_FSSYNC) == 0 &&
9852d60b848STomohiro Kusumi (flags & HAMMER2_FLUSH_ALL) == 0 &&
9862d60b848STomohiro Kusumi chain->pmp && chain->pmp->mp) {
9872d60b848STomohiro Kusumi #ifdef HAMMER2_DEBUG_SYNC
9882d60b848STomohiro Kusumi kprintf("inum %ld do not update parent, non-fssync\n",
9892d60b848STomohiro Kusumi (long)chain->bref.key);
9902d60b848STomohiro Kusumi #endif
9912d60b848STomohiro Kusumi goto skipupdate;
9922d60b848STomohiro Kusumi }
9932d60b848STomohiro Kusumi #ifdef HAMMER2_DEBUG_SYNC
9942d60b848STomohiro Kusumi if (chain->bref.type == HAMMER2_BREF_TYPE_INODE)
9952d60b848STomohiro Kusumi kprintf("inum %ld update parent\n", (long)chain->bref.key);
9962d60b848STomohiro Kusumi #endif
9972d60b848STomohiro Kusumi
9982d60b848STomohiro Kusumi /*
9992d60b848STomohiro Kusumi * The chain may need its blockrefs updated in the parent, normal
10002d60b848STomohiro Kusumi * path.
10012d60b848STomohiro Kusumi */
10022d60b848STomohiro Kusumi if (chain->flags & HAMMER2_CHAIN_UPDATE) {
10032d60b848STomohiro Kusumi hammer2_blockref_t *base;
10042d60b848STomohiro Kusumi int count;
10052d60b848STomohiro Kusumi
10062d60b848STomohiro Kusumi /*
10072d60b848STomohiro Kusumi * Clear UPDATE flag, mark parent modified, update its
10082d60b848STomohiro Kusumi * modify_tid if necessary, and adjust the parent blockmap.
10092d60b848STomohiro Kusumi */
10102d60b848STomohiro Kusumi atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
10112d60b848STomohiro Kusumi
10122d60b848STomohiro Kusumi /*
10132d60b848STomohiro Kusumi * (optional code)
10142d60b848STomohiro Kusumi *
10152d60b848STomohiro Kusumi * Avoid actually modifying and updating the parent if it
10162d60b848STomohiro Kusumi * was flagged for destruction. This can greatly reduce
10172d60b848STomohiro Kusumi * disk I/O in large tree removals because the
10182d60b848STomohiro Kusumi * hammer2_io_setinval() call in the upward recursion
10192d60b848STomohiro Kusumi * (see MODIFIED code above) can only handle a few cases.
10202d60b848STomohiro Kusumi */
10212d60b848STomohiro Kusumi if (parent->flags & HAMMER2_CHAIN_DESTROY) {
10222d60b848STomohiro Kusumi if (parent->bref.modify_tid < chain->bref.modify_tid) {
10232d60b848STomohiro Kusumi parent->bref.modify_tid =
10242d60b848STomohiro Kusumi chain->bref.modify_tid;
10252d60b848STomohiro Kusumi }
10262d60b848STomohiro Kusumi atomic_clear_int(&chain->flags, HAMMER2_CHAIN_BLKMAPPED |
10272d60b848STomohiro Kusumi HAMMER2_CHAIN_BLKMAPUPD);
10282d60b848STomohiro Kusumi goto skipupdate;
10292d60b848STomohiro Kusumi }
10302d60b848STomohiro Kusumi
10312d60b848STomohiro Kusumi /*
10322d60b848STomohiro Kusumi * The flusher is responsible for deleting empty indirect
10332d60b848STomohiro Kusumi * blocks at this point. If we don't do this, no major harm
10342d60b848STomohiro Kusumi * will be done but the empty indirect blocks will stay in
10352d60b848STomohiro Kusumi * the topology and make it a messy and inefficient.
10362d60b848STomohiro Kusumi *
10372d60b848STomohiro Kusumi * The flusher is also responsible for collapsing the
10382d60b848STomohiro Kusumi * content of an indirect block into its parent whenever
10392d60b848STomohiro Kusumi * possible (with some hysteresis). Not doing this will also
10402d60b848STomohiro Kusumi * not harm the topology, but would make it messy and
10412d60b848STomohiro Kusumi * inefficient.
10422d60b848STomohiro Kusumi */
10432d60b848STomohiro Kusumi if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT) {
10442d60b848STomohiro Kusumi if (hammer2_chain_indirect_maintenance(parent, chain))
10452d60b848STomohiro Kusumi goto skipupdate;
10462d60b848STomohiro Kusumi }
10472d60b848STomohiro Kusumi
10482d60b848STomohiro Kusumi /*
10492d60b848STomohiro Kusumi * We are updating the parent's blockmap, the parent must
10502d60b848STomohiro Kusumi * be set modified. If this fails we re-set the UPDATE flag
10512d60b848STomohiro Kusumi * in the child.
10522d60b848STomohiro Kusumi *
10532d60b848STomohiro Kusumi * NOTE! A modification error can be ENOSPC. We still want
10542d60b848STomohiro Kusumi * to flush modified chains recursively, not break out,
10552d60b848STomohiro Kusumi * so we just skip the update in this situation and
10562d60b848STomohiro Kusumi * continue. That is, we still need to try to clean
10572d60b848STomohiro Kusumi * out dirty chains and buffers.
10582d60b848STomohiro Kusumi *
10592d60b848STomohiro Kusumi * This may not help bulkfree though. XXX
10602d60b848STomohiro Kusumi */
10612d60b848STomohiro Kusumi save_error = hammer2_chain_modify(parent, 0, 0, 0);
10622d60b848STomohiro Kusumi if (save_error) {
10632d60b848STomohiro Kusumi info->error |= save_error;
10642d60b848STomohiro Kusumi kprintf("hammer2_flush: %016jx.%02x error=%08x\n",
10652d60b848STomohiro Kusumi parent->bref.data_off, parent->bref.type,
10662d60b848STomohiro Kusumi save_error);
10672d60b848STomohiro Kusumi atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
10682d60b848STomohiro Kusumi goto skipupdate;
10692d60b848STomohiro Kusumi }
10702d60b848STomohiro Kusumi if (parent->bref.modify_tid < chain->bref.modify_tid)
10712d60b848STomohiro Kusumi parent->bref.modify_tid = chain->bref.modify_tid;
10722d60b848STomohiro Kusumi
10732d60b848STomohiro Kusumi /*
10742d60b848STomohiro Kusumi * Calculate blockmap pointer
10752d60b848STomohiro Kusumi */
10762d60b848STomohiro Kusumi switch(parent->bref.type) {
10772d60b848STomohiro Kusumi case HAMMER2_BREF_TYPE_INODE:
10782d60b848STomohiro Kusumi /*
10792d60b848STomohiro Kusumi * Access the inode's block array. However, there is
10802d60b848STomohiro Kusumi * no block array if the inode is flagged DIRECTDATA.
10812d60b848STomohiro Kusumi */
10822d60b848STomohiro Kusumi if (parent->data &&
10832d60b848STomohiro Kusumi (parent->data->ipdata.meta.op_flags &
10842d60b848STomohiro Kusumi HAMMER2_OPFLAG_DIRECTDATA) == 0) {
10852d60b848STomohiro Kusumi base = &parent->data->
10862d60b848STomohiro Kusumi ipdata.u.blockset.blockref[0];
10872d60b848STomohiro Kusumi } else {
10882d60b848STomohiro Kusumi base = NULL;
10892d60b848STomohiro Kusumi }
10902d60b848STomohiro Kusumi count = HAMMER2_SET_COUNT;
10912d60b848STomohiro Kusumi break;
10922d60b848STomohiro Kusumi case HAMMER2_BREF_TYPE_INDIRECT:
10932d60b848STomohiro Kusumi case HAMMER2_BREF_TYPE_FREEMAP_NODE:
10942d60b848STomohiro Kusumi if (parent->data)
10952d60b848STomohiro Kusumi base = &parent->data->npdata[0];
10962d60b848STomohiro Kusumi else
10972d60b848STomohiro Kusumi base = NULL;
10982d60b848STomohiro Kusumi count = parent->bytes / sizeof(hammer2_blockref_t);
10992d60b848STomohiro Kusumi break;
11002d60b848STomohiro Kusumi case HAMMER2_BREF_TYPE_VOLUME:
11012d60b848STomohiro Kusumi base = &chain->hmp->voldata.sroot_blockset.blockref[0];
11022d60b848STomohiro Kusumi count = HAMMER2_SET_COUNT;
11032d60b848STomohiro Kusumi break;
11042d60b848STomohiro Kusumi case HAMMER2_BREF_TYPE_FREEMAP:
11052d60b848STomohiro Kusumi base = &parent->data->npdata[0];
11062d60b848STomohiro Kusumi count = HAMMER2_SET_COUNT;
11072d60b848STomohiro Kusumi break;
11082d60b848STomohiro Kusumi default:
11092d60b848STomohiro Kusumi base = NULL;
11102d60b848STomohiro Kusumi count = 0;
11112d60b848STomohiro Kusumi panic("hammer2_flush_core: "
11122d60b848STomohiro Kusumi "unrecognized blockref type: %d",
11132d60b848STomohiro Kusumi parent->bref.type);
11142d60b848STomohiro Kusumi break;
11152d60b848STomohiro Kusumi }
11162d60b848STomohiro Kusumi
11172d60b848STomohiro Kusumi /*
11182d60b848STomohiro Kusumi * Blocktable updates
11192d60b848STomohiro Kusumi */
11202d60b848STomohiro Kusumi if (base && (chain->flags & HAMMER2_CHAIN_BLKMAPUPD)) {
11212d60b848STomohiro Kusumi if (chain->flags & HAMMER2_CHAIN_BLKMAPPED) {
11222d60b848STomohiro Kusumi hammer2_spin_ex(&parent->core.spin);
11232d60b848STomohiro Kusumi hammer2_base_delete(parent, base, count, chain,
11242d60b848STomohiro Kusumi NULL);
11252d60b848STomohiro Kusumi hammer2_spin_unex(&parent->core.spin);
11262d60b848STomohiro Kusumi /* base_delete clears both bits */
11272d60b848STomohiro Kusumi } else {
11282d60b848STomohiro Kusumi atomic_clear_int(&chain->flags,
11292d60b848STomohiro Kusumi HAMMER2_CHAIN_BLKMAPUPD);
11302d60b848STomohiro Kusumi }
11312d60b848STomohiro Kusumi }
11322d60b848STomohiro Kusumi if (base && (chain->flags & HAMMER2_CHAIN_BLKMAPPED) == 0) {
11332d60b848STomohiro Kusumi hammer2_spin_ex(&parent->core.spin);
11342d60b848STomohiro Kusumi hammer2_base_insert(parent, base, count,
11352d60b848STomohiro Kusumi chain, &chain->bref);
11362d60b848STomohiro Kusumi hammer2_spin_unex(&parent->core.spin);
11372d60b848STomohiro Kusumi /* base_insert sets BLKMAPPED */
11382d60b848STomohiro Kusumi }
11392d60b848STomohiro Kusumi }
11402d60b848STomohiro Kusumi skipupdate:
11412d60b848STomohiro Kusumi if (parent)
11422d60b848STomohiro Kusumi hammer2_chain_unlock(parent);
11432d60b848STomohiro Kusumi
11442d60b848STomohiro Kusumi /*
11452d60b848STomohiro Kusumi * Final cleanup after flush
11462d60b848STomohiro Kusumi */
11472d60b848STomohiro Kusumi done:
11482d60b848STomohiro Kusumi KKASSERT(chain->refs > 0);
11492d60b848STomohiro Kusumi
11502d60b848STomohiro Kusumi return retry;
11512d60b848STomohiro Kusumi }
11522d60b848STomohiro Kusumi
11532d60b848STomohiro Kusumi /*
11542d60b848STomohiro Kusumi * Flush recursion helper, called from flush_core, calls flush_core.
11552d60b848STomohiro Kusumi *
11562d60b848STomohiro Kusumi * Flushes the children of the caller's chain (info->parent), restricted
11572d60b848STomohiro Kusumi * by sync_tid.
11582d60b848STomohiro Kusumi *
11592d60b848STomohiro Kusumi * This function may set info->error as a side effect.
11602d60b848STomohiro Kusumi *
11612d60b848STomohiro Kusumi * WARNING! If we do not call hammer2_flush_core() we must update
11622d60b848STomohiro Kusumi * bref.mirror_tid ourselves to indicate that the flush has
11632d60b848STomohiro Kusumi * processed the child.
11642d60b848STomohiro Kusumi *
11652d60b848STomohiro Kusumi * WARNING! parent->core spinlock is held on entry and return.
11662d60b848STomohiro Kusumi */
11672d60b848STomohiro Kusumi static int
hammer2_flush_recurse(hammer2_chain_t * child,void * data)11682d60b848STomohiro Kusumi hammer2_flush_recurse(hammer2_chain_t *child, void *data)
11692d60b848STomohiro Kusumi {
11702d60b848STomohiro Kusumi hammer2_flush_info_t *info = data;
11712d60b848STomohiro Kusumi hammer2_chain_t *parent = info->parent;
11722d60b848STomohiro Kusumi
11732d60b848STomohiro Kusumi #ifdef HAMMER2_SCAN_DEBUG
11742d60b848STomohiro Kusumi ++info->scan_count;
11752d60b848STomohiro Kusumi if (child->flags & HAMMER2_CHAIN_MODIFIED)
11762d60b848STomohiro Kusumi ++info->scan_mod_count;
11772d60b848STomohiro Kusumi if (child->flags & HAMMER2_CHAIN_UPDATE)
11782d60b848STomohiro Kusumi ++info->scan_upd_count;
11792d60b848STomohiro Kusumi if (child->flags & HAMMER2_CHAIN_ONFLUSH)
11802d60b848STomohiro Kusumi ++info->scan_onf_count;
11812d60b848STomohiro Kusumi #endif
11822d60b848STomohiro Kusumi
11832d60b848STomohiro Kusumi /*
11842d60b848STomohiro Kusumi * (child can never be fchain or vchain so a special check isn't
11852d60b848STomohiro Kusumi * needed).
11862d60b848STomohiro Kusumi *
11872d60b848STomohiro Kusumi * We must ref the child before unlocking the spinlock.
11882d60b848STomohiro Kusumi *
11892d60b848STomohiro Kusumi * The caller has added a ref to the parent so we can temporarily
11902d60b848STomohiro Kusumi * unlock it in order to lock the child. However, if it no longer
11912d60b848STomohiro Kusumi * winds up being the child of the parent we must skip this child.
11922d60b848STomohiro Kusumi *
11932d60b848STomohiro Kusumi * NOTE! chain locking errors are fatal. They are never out-of-space
11942d60b848STomohiro Kusumi * errors.
11952d60b848STomohiro Kusumi */
11962d60b848STomohiro Kusumi hammer2_chain_ref(child);
11972d60b848STomohiro Kusumi hammer2_spin_unex(&parent->core.spin);
11982d60b848STomohiro Kusumi
11992d60b848STomohiro Kusumi hammer2_chain_ref_hold(parent);
12002d60b848STomohiro Kusumi hammer2_chain_unlock(parent);
12012d60b848STomohiro Kusumi hammer2_chain_lock(child, HAMMER2_RESOLVE_MAYBE);
12022d60b848STomohiro Kusumi if (child->parent != parent) {
12032d60b848STomohiro Kusumi kprintf("LOST CHILD1 %p->%p (actual parent %p)\n",
12042d60b848STomohiro Kusumi parent, child, child->parent);
12052d60b848STomohiro Kusumi goto done;
12062d60b848STomohiro Kusumi }
12072d60b848STomohiro Kusumi if (child->error) {
12082d60b848STomohiro Kusumi kprintf("CHILD ERROR DURING FLUSH LOCK %p->%p\n",
12092d60b848STomohiro Kusumi parent, child);
12102d60b848STomohiro Kusumi info->error |= child->error;
12112d60b848STomohiro Kusumi goto done;
12122d60b848STomohiro Kusumi }
12132d60b848STomohiro Kusumi
12142d60b848STomohiro Kusumi /*
12152d60b848STomohiro Kusumi * Must propagate the DESTROY flag downwards, otherwise the
12162d60b848STomohiro Kusumi * parent could end up never being removed because it will
12172d60b848STomohiro Kusumi * be requeued to the flusher if it survives this run due to
12182d60b848STomohiro Kusumi * the flag.
12192d60b848STomohiro Kusumi */
12202d60b848STomohiro Kusumi if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY))
12212d60b848STomohiro Kusumi atomic_set_int(&child->flags, HAMMER2_CHAIN_DESTROY);
12222d60b848STomohiro Kusumi #ifdef HAMMER2_SCAN_DEBUG
12232d60b848STomohiro Kusumi if (child->flags & HAMMER2_CHAIN_DESTROY)
12242d60b848STomohiro Kusumi ++info->scan_del_count;
12252d60b848STomohiro Kusumi #endif
12262d60b848STomohiro Kusumi /*
12272d60b848STomohiro Kusumi * Special handling of the root inode. Because the root inode
12282d60b848STomohiro Kusumi * contains an index of all the inodes in the PFS in addition to
12292d60b848STomohiro Kusumi * its normal directory entries, any flush that is not part of a
12302d60b848STomohiro Kusumi * filesystem sync must only flush the directory entries, and not
12312d60b848STomohiro Kusumi * anything else.
12322d60b848STomohiro Kusumi *
12332d60b848STomohiro Kusumi * The child might be an indirect block, but H2 guarantees that
12342d60b848STomohiro Kusumi * the key-range will fully partition the inode index from the
12352d60b848STomohiro Kusumi * directory entries so the case just works naturally.
12362d60b848STomohiro Kusumi */
12372d60b848STomohiro Kusumi if ((parent->bref.flags & HAMMER2_BREF_FLAG_PFSROOT) &&
12382d60b848STomohiro Kusumi (child->flags & HAMMER2_CHAIN_DESTROY) == 0 &&
12392d60b848STomohiro Kusumi parent->bref.type == HAMMER2_BREF_TYPE_INODE &&
12402d60b848STomohiro Kusumi (info->flags & HAMMER2_FLUSH_FSSYNC) == 0) {
12412d60b848STomohiro Kusumi if ((child->bref.key & HAMMER2_DIRHASH_VISIBLE) == 0) {
12422d60b848STomohiro Kusumi if (child->flags & HAMMER2_CHAIN_FLUSH_MASK) {
12432d60b848STomohiro Kusumi hammer2_chain_setflush(parent);
12442d60b848STomohiro Kusumi }
12452d60b848STomohiro Kusumi goto done;
12462d60b848STomohiro Kusumi }
12472d60b848STomohiro Kusumi }
12482d60b848STomohiro Kusumi
12492d60b848STomohiro Kusumi /*
12502d60b848STomohiro Kusumi * Recurse and collect deferral data. We're in the media flush,
12512d60b848STomohiro Kusumi * this can cross PFS boundaries.
12522d60b848STomohiro Kusumi */
12532d60b848STomohiro Kusumi if (child->flags & HAMMER2_CHAIN_FLUSH_MASK) {
12542d60b848STomohiro Kusumi #ifdef HAMMER2_SCAN_DEBUG
12552d60b848STomohiro Kusumi if (child->bref.type < 7)
12562d60b848STomohiro Kusumi ++info->scan_btype[child->bref.type];
12572d60b848STomohiro Kusumi #endif
12582d60b848STomohiro Kusumi ++info->depth;
12592d60b848STomohiro Kusumi hammer2_flush_core(info, child, info->flags);
12602d60b848STomohiro Kusumi --info->depth;
12612d60b848STomohiro Kusumi }
12622d60b848STomohiro Kusumi
12632d60b848STomohiro Kusumi done:
12642d60b848STomohiro Kusumi /*
12652d60b848STomohiro Kusumi * Relock to continue the loop.
12662d60b848STomohiro Kusumi */
12672d60b848STomohiro Kusumi hammer2_chain_unlock(child);
12682d60b848STomohiro Kusumi hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
12692d60b848STomohiro Kusumi hammer2_chain_drop_unhold(parent);
12702d60b848STomohiro Kusumi if (parent->error) {
12712d60b848STomohiro Kusumi kprintf("PARENT ERROR DURING FLUSH LOCK %p->%p\n",
12722d60b848STomohiro Kusumi parent, child);
12732d60b848STomohiro Kusumi info->error |= parent->error;
12742d60b848STomohiro Kusumi }
12752d60b848STomohiro Kusumi hammer2_chain_drop(child);
12762d60b848STomohiro Kusumi KKASSERT(info->parent == parent);
12772d60b848STomohiro Kusumi hammer2_spin_ex(&parent->core.spin);
12782d60b848STomohiro Kusumi
12792d60b848STomohiro Kusumi return (0);
12802d60b848STomohiro Kusumi }
12812d60b848STomohiro Kusumi
12822d60b848STomohiro Kusumi /*
12832d60b848STomohiro Kusumi * flush helper (backend threaded)
12842d60b848STomohiro Kusumi *
12852d60b848STomohiro Kusumi * Flushes chain topology for the specified inode.
12862d60b848STomohiro Kusumi *
12872d60b848STomohiro Kusumi * HAMMER2_XOP_INODE_STOP The flush recursion stops at inode boundaries.
12882d60b848STomohiro Kusumi * Inodes belonging to the same flush are flushed
12892d60b848STomohiro Kusumi * separately.
12902d60b848STomohiro Kusumi *
12912d60b848STomohiro Kusumi * chain->parent can be NULL, usually due to destroy races or detached inodes.
12922d60b848STomohiro Kusumi *
12932d60b848STomohiro Kusumi * Primarily called from vfs_sync().
12942d60b848STomohiro Kusumi */
12952d60b848STomohiro Kusumi void
hammer2_xop_inode_flush(hammer2_xop_t * arg,void * scratch __unused,int clindex)12962d60b848STomohiro Kusumi hammer2_xop_inode_flush(hammer2_xop_t *arg, void *scratch __unused, int clindex)
12972d60b848STomohiro Kusumi {
12982d60b848STomohiro Kusumi hammer2_xop_flush_t *xop = &arg->xop_flush;
12992d60b848STomohiro Kusumi hammer2_chain_t *chain;
13002d60b848STomohiro Kusumi hammer2_inode_t *ip;
13012d60b848STomohiro Kusumi hammer2_dev_t *hmp;
13022d60b848STomohiro Kusumi hammer2_pfs_t *pmp;
13032d60b848STomohiro Kusumi hammer2_devvp_t *e;
13046bcbb706STomohiro Kusumi struct m_vnode *devvp;
13052d60b848STomohiro Kusumi int flush_error = 0;
13062d60b848STomohiro Kusumi int fsync_error = 0;
13072d60b848STomohiro Kusumi int total_error = 0;
13082d60b848STomohiro Kusumi int j;
13092d60b848STomohiro Kusumi int xflags;
13102d60b848STomohiro Kusumi int ispfsroot = 0;
13112d60b848STomohiro Kusumi
13122d60b848STomohiro Kusumi xflags = HAMMER2_FLUSH_TOP;
13132d60b848STomohiro Kusumi if (xop->head.flags & HAMMER2_XOP_INODE_STOP)
13142d60b848STomohiro Kusumi xflags |= HAMMER2_FLUSH_INODE_STOP;
13152d60b848STomohiro Kusumi if (xop->head.flags & HAMMER2_XOP_FSSYNC)
13162d60b848STomohiro Kusumi xflags |= HAMMER2_FLUSH_FSSYNC;
13172d60b848STomohiro Kusumi
13182d60b848STomohiro Kusumi /*
13192d60b848STomohiro Kusumi * Flush core chains
13202d60b848STomohiro Kusumi */
13212d60b848STomohiro Kusumi ip = xop->head.ip1;
13222d60b848STomohiro Kusumi pmp = ip->pmp;
13232d60b848STomohiro Kusumi chain = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
13242d60b848STomohiro Kusumi if (chain) {
13252d60b848STomohiro Kusumi hmp = chain->hmp;
13262d60b848STomohiro Kusumi if (chain->flags & HAMMER2_CHAIN_FLUSH_MASK) {
13272d60b848STomohiro Kusumi /*
13282d60b848STomohiro Kusumi * Due to flush partitioning the chain topology
13292d60b848STomohiro Kusumi * above the inode's chain may no longer be flagged.
13302d60b848STomohiro Kusumi * When asked to flush an inode, remark the topology
13312d60b848STomohiro Kusumi * leading to that inode.
13322d60b848STomohiro Kusumi */
13332d60b848STomohiro Kusumi if (chain->parent)
13342d60b848STomohiro Kusumi hammer2_chain_setflush(chain->parent);
13352d60b848STomohiro Kusumi hammer2_flush(chain, xflags);
13362d60b848STomohiro Kusumi
13372d60b848STomohiro Kusumi /* XXX cluster */
13382d60b848STomohiro Kusumi if (ip == pmp->iroot && pmp != hmp->spmp) {
1339*6b47f3eaSTomohiro Kusumi hammer2_spin_ex(&pmp->blockset_spin);
13402d60b848STomohiro Kusumi pmp->pfs_iroot_blocksets[clindex] =
13412d60b848STomohiro Kusumi chain->data->ipdata.u.blockset;
1342*6b47f3eaSTomohiro Kusumi hammer2_spin_unex(&pmp->blockset_spin);
13432d60b848STomohiro Kusumi }
13442d60b848STomohiro Kusumi
13452d60b848STomohiro Kusumi #if 0
13462d60b848STomohiro Kusumi /*
13472d60b848STomohiro Kusumi * Propogate upwards but only cross an inode boundary
13482d60b848STomohiro Kusumi * for inodes associated with the current filesystem
13492d60b848STomohiro Kusumi * sync.
13502d60b848STomohiro Kusumi */
13512d60b848STomohiro Kusumi if ((xop->head.flags & HAMMER2_XOP_PARENTONFLUSH) ||
13522d60b848STomohiro Kusumi chain->bref.type != HAMMER2_BREF_TYPE_INODE) {
13532d60b848STomohiro Kusumi parent = chain->parent;
13542d60b848STomohiro Kusumi if (parent)
13552d60b848STomohiro Kusumi hammer2_chain_setflush(parent);
13562d60b848STomohiro Kusumi }
13572d60b848STomohiro Kusumi #endif
13582d60b848STomohiro Kusumi }
13592d60b848STomohiro Kusumi if (chain->flags & HAMMER2_CHAIN_PFSBOUNDARY)
13602d60b848STomohiro Kusumi ispfsroot = 1;
13612d60b848STomohiro Kusumi hammer2_chain_unlock(chain);
13622d60b848STomohiro Kusumi hammer2_chain_drop(chain);
13632d60b848STomohiro Kusumi chain = NULL;
13642d60b848STomohiro Kusumi } else {
13652d60b848STomohiro Kusumi hmp = NULL;
13662d60b848STomohiro Kusumi }
13672d60b848STomohiro Kusumi
13682d60b848STomohiro Kusumi /*
13692d60b848STomohiro Kusumi * Only flush the volume header if asked to, plus the inode must also
13702d60b848STomohiro Kusumi * be the PFS root.
13712d60b848STomohiro Kusumi */
13722d60b848STomohiro Kusumi if ((xop->head.flags & HAMMER2_XOP_VOLHDR) == 0)
13732d60b848STomohiro Kusumi goto skip;
13742d60b848STomohiro Kusumi if (ispfsroot == 0)
13752d60b848STomohiro Kusumi goto skip;
13762d60b848STomohiro Kusumi
13772d60b848STomohiro Kusumi /*
13782d60b848STomohiro Kusumi * Flush volume roots. Avoid replication, we only want to
13792d60b848STomohiro Kusumi * flush each hammer2_dev (hmp) once.
13802d60b848STomohiro Kusumi */
13812d60b848STomohiro Kusumi for (j = clindex - 1; j >= 0; --j) {
13822d60b848STomohiro Kusumi if ((chain = ip->cluster.array[j].chain) != NULL) {
13832d60b848STomohiro Kusumi if (chain->hmp == hmp) {
13842d60b848STomohiro Kusumi chain = NULL; /* safety */
13852d60b848STomohiro Kusumi goto skip;
13862d60b848STomohiro Kusumi }
13872d60b848STomohiro Kusumi }
13882d60b848STomohiro Kusumi }
13892d60b848STomohiro Kusumi chain = NULL; /* safety */
13902d60b848STomohiro Kusumi
13912d60b848STomohiro Kusumi /*
13922d60b848STomohiro Kusumi * spmp transaction. The super-root is never directly mounted so
13932d60b848STomohiro Kusumi * there shouldn't be any vnodes, let alone any dirty vnodes
13942d60b848STomohiro Kusumi * associated with it, so we shouldn't have to mess around with any
13952d60b848STomohiro Kusumi * vnode flushes here.
13962d60b848STomohiro Kusumi */
13972d60b848STomohiro Kusumi hammer2_trans_init(hmp->spmp, HAMMER2_TRANS_ISFLUSH);
13982d60b848STomohiro Kusumi
13992d60b848STomohiro Kusumi /*
14002d60b848STomohiro Kusumi * We must flush the superroot down to the PFS iroot. Remember
14012d60b848STomohiro Kusumi * that hammer2_chain_setflush() stops at inode boundaries, so
14022d60b848STomohiro Kusumi * the pmp->iroot has been flushed and flagged down to the superroot,
14032d60b848STomohiro Kusumi * but the volume root (vchain) probably has not yet been flagged.
14042d60b848STomohiro Kusumi */
14052d60b848STomohiro Kusumi if (hmp->spmp->iroot) {
14062d60b848STomohiro Kusumi chain = hmp->spmp->iroot->cluster.array[0].chain;
14072d60b848STomohiro Kusumi if (chain) {
14082d60b848STomohiro Kusumi hammer2_chain_ref(chain);
14092d60b848STomohiro Kusumi hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS);
14102d60b848STomohiro Kusumi flush_error |=
14112d60b848STomohiro Kusumi hammer2_flush(chain,
14122d60b848STomohiro Kusumi HAMMER2_FLUSH_TOP |
14132d60b848STomohiro Kusumi HAMMER2_FLUSH_INODE_STOP |
14142d60b848STomohiro Kusumi HAMMER2_FLUSH_FSSYNC);
14152d60b848STomohiro Kusumi hammer2_chain_unlock(chain);
14162d60b848STomohiro Kusumi hammer2_chain_drop(chain);
14172d60b848STomohiro Kusumi }
14182d60b848STomohiro Kusumi }
14192d60b848STomohiro Kusumi
14202d60b848STomohiro Kusumi /*
14212d60b848STomohiro Kusumi * Media mounts have two 'roots', vchain for the topology
14222d60b848STomohiro Kusumi * and fchain for the free block table. Flush both.
14232d60b848STomohiro Kusumi *
14242d60b848STomohiro Kusumi * Note that the topology and free block table are handled
14252d60b848STomohiro Kusumi * independently, so the free block table can wind up being
14262d60b848STomohiro Kusumi * ahead of the topology. We depend on the bulk free scan
14272d60b848STomohiro Kusumi * code to deal with any loose ends.
14282d60b848STomohiro Kusumi *
14292d60b848STomohiro Kusumi * vchain and fchain do not error on-lock since their data does
14302d60b848STomohiro Kusumi * not have to be re-read from media.
14312d60b848STomohiro Kusumi */
14322d60b848STomohiro Kusumi hammer2_chain_ref(&hmp->vchain);
14332d60b848STomohiro Kusumi hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
14342d60b848STomohiro Kusumi hammer2_chain_ref(&hmp->fchain);
14352d60b848STomohiro Kusumi hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS);
14362d60b848STomohiro Kusumi if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
14372d60b848STomohiro Kusumi /*
14382d60b848STomohiro Kusumi * This will also modify vchain as a side effect,
14392d60b848STomohiro Kusumi * mark vchain as modified now.
14402d60b848STomohiro Kusumi */
14412d60b848STomohiro Kusumi hammer2_voldata_modify(hmp);
14422d60b848STomohiro Kusumi chain = &hmp->fchain;
14432d60b848STomohiro Kusumi flush_error |= hammer2_flush(chain, HAMMER2_FLUSH_TOP);
14442d60b848STomohiro Kusumi KKASSERT(chain == &hmp->fchain);
14452d60b848STomohiro Kusumi }
14462d60b848STomohiro Kusumi hammer2_chain_unlock(&hmp->fchain);
14472d60b848STomohiro Kusumi hammer2_chain_unlock(&hmp->vchain);
14482d60b848STomohiro Kusumi hammer2_chain_drop(&hmp->fchain);
14492d60b848STomohiro Kusumi /* vchain dropped down below */
14502d60b848STomohiro Kusumi
14512d60b848STomohiro Kusumi hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
14522d60b848STomohiro Kusumi if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
14532d60b848STomohiro Kusumi chain = &hmp->vchain;
14542d60b848STomohiro Kusumi flush_error |= hammer2_flush(chain, HAMMER2_FLUSH_TOP);
14552d60b848STomohiro Kusumi KKASSERT(chain == &hmp->vchain);
14562d60b848STomohiro Kusumi }
14572d60b848STomohiro Kusumi hammer2_chain_unlock(&hmp->vchain);
14582d60b848STomohiro Kusumi hammer2_chain_drop(&hmp->vchain);
14592d60b848STomohiro Kusumi
14602d60b848STomohiro Kusumi /*
14612d60b848STomohiro Kusumi * We can't safely flush the volume header until we have
14622d60b848STomohiro Kusumi * flushed any device buffers which have built up.
14632d60b848STomohiro Kusumi *
14642d60b848STomohiro Kusumi * XXX this isn't being incremental
14652d60b848STomohiro Kusumi */
14662d60b848STomohiro Kusumi TAILQ_FOREACH(e, &hmp->devvpl, entry) {
14672d60b848STomohiro Kusumi devvp = e->devvp;
14682d60b848STomohiro Kusumi KKASSERT(devvp);
14692d60b848STomohiro Kusumi vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
14702d60b848STomohiro Kusumi fsync_error = VOP_FSYNC(devvp, MNT_WAIT, 0);
14712d60b848STomohiro Kusumi vn_unlock(devvp);
14722d60b848STomohiro Kusumi if (fsync_error || flush_error) {
14732d60b848STomohiro Kusumi kprintf("hammer2: sync error fsync=%d h2flush=0x%04x dev=%s\n",
14742d60b848STomohiro Kusumi fsync_error, flush_error, e->path);
14752d60b848STomohiro Kusumi }
14762d60b848STomohiro Kusumi }
14772d60b848STomohiro Kusumi
14782d60b848STomohiro Kusumi /*
14792d60b848STomohiro Kusumi * The flush code sets CHAIN_VOLUMESYNC to indicate that the
14802d60b848STomohiro Kusumi * volume header needs synchronization via hmp->volsync.
14812d60b848STomohiro Kusumi *
14822d60b848STomohiro Kusumi * XXX synchronize the flag & data with only this flush XXX
14832d60b848STomohiro Kusumi */
14842d60b848STomohiro Kusumi if (fsync_error == 0 && flush_error == 0 &&
14852d60b848STomohiro Kusumi (hmp->vchain.flags & HAMMER2_CHAIN_VOLUMESYNC)) {
14864e2eefe9STomohiro Kusumi struct m_buf *bp;
14872d60b848STomohiro Kusumi int vol_error = 0;
14882d60b848STomohiro Kusumi
14892d60b848STomohiro Kusumi /*
14902d60b848STomohiro Kusumi * Synchronize the disk before flushing the volume
14912d60b848STomohiro Kusumi * header.
14922d60b848STomohiro Kusumi */
14932d60b848STomohiro Kusumi /*
14942d60b848STomohiro Kusumi bp = getpbuf(NULL);
14952d60b848STomohiro Kusumi bp->b_bio1.bio_offset = 0;
14962d60b848STomohiro Kusumi bp->b_bufsize = 0;
14972d60b848STomohiro Kusumi bp->b_bcount = 0;
14982d60b848STomohiro Kusumi bp->b_cmd = BUF_CMD_FLUSH;
14992d60b848STomohiro Kusumi bp->b_bio1.bio_done = biodone_sync;
15002d60b848STomohiro Kusumi bp->b_bio1.bio_flags |= BIO_SYNC;
15012d60b848STomohiro Kusumi vn_strategy(hmp->devvp, &bp->b_bio1);
15022d60b848STomohiro Kusumi fsync_error = biowait(&bp->b_bio1, "h2vol");
15032d60b848STomohiro Kusumi relpbuf(bp, NULL);
15042d60b848STomohiro Kusumi */
15052d60b848STomohiro Kusumi
15062d60b848STomohiro Kusumi /*
15072d60b848STomohiro Kusumi * Then we can safely flush the version of the
15082d60b848STomohiro Kusumi * volume header synchronized by the flush code.
15092d60b848STomohiro Kusumi */
15102d60b848STomohiro Kusumi j = hmp->volhdrno + 1;
15112d60b848STomohiro Kusumi if (j < 0)
15122d60b848STomohiro Kusumi j = 0;
15132d60b848STomohiro Kusumi if (j >= HAMMER2_NUM_VOLHDRS)
15142d60b848STomohiro Kusumi j = 0;
15152d60b848STomohiro Kusumi if (j * HAMMER2_ZONE_BYTES64 + HAMMER2_SEGSIZE >
15162d60b848STomohiro Kusumi hmp->volsync.volu_size) {
15172d60b848STomohiro Kusumi j = 0;
15182d60b848STomohiro Kusumi }
15192d60b848STomohiro Kusumi if (hammer2_debug & 0x8000) {
15202d60b848STomohiro Kusumi /* debug only, avoid syslogd loop */
15212d60b848STomohiro Kusumi kprintf("sync volhdr %d %jd\n",
15222d60b848STomohiro Kusumi j, (intmax_t)hmp->volsync.volu_size);
15232d60b848STomohiro Kusumi }
15242d60b848STomohiro Kusumi bp = getblkx(hmp->devvp, j * HAMMER2_ZONE_BYTES64,
1525*6b47f3eaSTomohiro Kusumi HAMMER2_VOLUME_BYTES, GETBLK_KVABIO, 0);
15262d60b848STomohiro Kusumi atomic_clear_int(&hmp->vchain.flags,
15272d60b848STomohiro Kusumi HAMMER2_CHAIN_VOLUMESYNC);
15282d60b848STomohiro Kusumi bkvasync(bp);
1529*6b47f3eaSTomohiro Kusumi bcopy(&hmp->volsync, bp->b_data, HAMMER2_VOLUME_BYTES);
15302d60b848STomohiro Kusumi vol_error = bwrite(bp);
15312d60b848STomohiro Kusumi hmp->volhdrno = j;
15322d60b848STomohiro Kusumi if (vol_error)
15332d60b848STomohiro Kusumi fsync_error = vol_error;
15342d60b848STomohiro Kusumi }
15352d60b848STomohiro Kusumi if (flush_error)
15362d60b848STomohiro Kusumi total_error = flush_error;
15372d60b848STomohiro Kusumi if (fsync_error)
15382d60b848STomohiro Kusumi total_error = hammer2_errno_to_error(fsync_error);
15392d60b848STomohiro Kusumi
15402d60b848STomohiro Kusumi /* spmp trans */
15412d60b848STomohiro Kusumi hammer2_trans_done(hmp->spmp, HAMMER2_TRANS_ISFLUSH);
15422d60b848STomohiro Kusumi skip:
15432d60b848STomohiro Kusumi hammer2_xop_feed(&xop->head, NULL, clindex, total_error);
15442d60b848STomohiro Kusumi }
1545