1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 51472Sperrin * Common Development and Distribution License (the "License"). 61472Sperrin * You may not use this file except in compliance with the License. 7789Sahrens * 8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9789Sahrens * or http://www.opensolaris.org/os/licensing. 10789Sahrens * See the License for the specific language governing permissions 11789Sahrens * and limitations under the License. 12789Sahrens * 13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15789Sahrens * If applicable, add the following below this CDDL HEADER, with the 16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18789Sahrens * 19789Sahrens * CDDL HEADER END 20789Sahrens */ 21789Sahrens /* 228746SMatthew.Ahrens@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23789Sahrens * Use is subject to license terms. 24789Sahrens */ 25789Sahrens 26789Sahrens #include <sys/zfs_context.h> 27789Sahrens #include <sys/spa.h> 289701SGeorge.Wilson@Sun.COM #include <sys/spa_impl.h> 29789Sahrens #include <sys/dmu.h> 30789Sahrens #include <sys/zap.h> 31789Sahrens #include <sys/arc.h> 32789Sahrens #include <sys/stat.h> 33789Sahrens #include <sys/resource.h> 34789Sahrens #include <sys/zil.h> 35789Sahrens #include <sys/zil_impl.h> 36789Sahrens #include <sys/dsl_dataset.h> 37789Sahrens #include <sys/vdev.h> 383668Sgw25295 #include <sys/dmu_tx.h> 39789Sahrens 40789Sahrens /* 41789Sahrens * The zfs intent log (ZIL) saves transaction records of system calls 42789Sahrens * that change the file system in memory with enough information 43789Sahrens * to be able to replay them. These are stored in memory until 44789Sahrens * either the DMU transaction group (txg) commits them to the stable pool 45789Sahrens * and they can be discarded, or they are flushed to the stable log 46789Sahrens * (also in the pool) due to a fsync, O_DSYNC or other synchronous 47789Sahrens * requirement. In the event of a panic or power fail then those log 48789Sahrens * records (transactions) are replayed. 49789Sahrens * 50789Sahrens * There is one ZIL per file system. Its on-disk (pool) format consists 51789Sahrens * of 3 parts: 52789Sahrens * 53789Sahrens * - ZIL header 54789Sahrens * - ZIL blocks 55789Sahrens * - ZIL records 56789Sahrens * 57789Sahrens * A log record holds a system call transaction. Log blocks can 58789Sahrens * hold many log records and the blocks are chained together. 59789Sahrens * Each ZIL block contains a block pointer (blkptr_t) to the next 60789Sahrens * ZIL block in the chain. The ZIL header points to the first 61789Sahrens * block in the chain. Note there is not a fixed place in the pool 62789Sahrens * to hold blocks. They are dynamically allocated and freed as 63789Sahrens * needed from the blocks available. Figure X shows the ZIL structure: 64789Sahrens */ 65789Sahrens 66789Sahrens /* 672986Sek110237 * This global ZIL switch affects all pools 68789Sahrens */ 69789Sahrens int zil_disable = 0; /* disable intent logging */ 702986Sek110237 712986Sek110237 /* 722986Sek110237 * Tunable parameter for debugging or performance analysis. Setting 732986Sek110237 * zfs_nocacheflush will cause corruption on power loss if a volatile 742986Sek110237 * out-of-order write cache is enabled. 752986Sek110237 */ 762986Sek110237 boolean_t zfs_nocacheflush = B_FALSE; 77789Sahrens 78789Sahrens static kmem_cache_t *zil_lwb_cache; 79789Sahrens 8010685SGeorge.Wilson@Sun.COM static boolean_t zil_empty(zilog_t *zilog); 8110685SGeorge.Wilson@Sun.COM 82789Sahrens static int 83789Sahrens zil_dva_compare(const void *x1, const void *x2) 84789Sahrens { 85789Sahrens const dva_t *dva1 = x1; 86789Sahrens const dva_t *dva2 = x2; 87789Sahrens 88789Sahrens if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2)) 89789Sahrens return (-1); 90789Sahrens if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2)) 91789Sahrens return (1); 92789Sahrens 93789Sahrens if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2)) 94789Sahrens return (-1); 95789Sahrens if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2)) 96789Sahrens return (1); 97789Sahrens 98789Sahrens return (0); 99789Sahrens } 100789Sahrens 101789Sahrens static void 102789Sahrens zil_dva_tree_init(avl_tree_t *t) 103789Sahrens { 104789Sahrens avl_create(t, zil_dva_compare, sizeof (zil_dva_node_t), 105789Sahrens offsetof(zil_dva_node_t, zn_node)); 106789Sahrens } 107789Sahrens 108789Sahrens static void 109789Sahrens zil_dva_tree_fini(avl_tree_t *t) 110789Sahrens { 111789Sahrens zil_dva_node_t *zn; 112789Sahrens void *cookie = NULL; 113789Sahrens 114789Sahrens while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 115789Sahrens kmem_free(zn, sizeof (zil_dva_node_t)); 116789Sahrens 117789Sahrens avl_destroy(t); 118789Sahrens } 119789Sahrens 120789Sahrens static int 121789Sahrens zil_dva_tree_add(avl_tree_t *t, dva_t *dva) 122789Sahrens { 123789Sahrens zil_dva_node_t *zn; 124789Sahrens avl_index_t where; 125789Sahrens 126789Sahrens if (avl_find(t, dva, &where) != NULL) 127789Sahrens return (EEXIST); 128789Sahrens 129789Sahrens zn = kmem_alloc(sizeof (zil_dva_node_t), KM_SLEEP); 130789Sahrens zn->zn_dva = *dva; 131789Sahrens avl_insert(t, zn, where); 132789Sahrens 133789Sahrens return (0); 134789Sahrens } 135789Sahrens 1361807Sbonwick static zil_header_t * 1371807Sbonwick zil_header_in_syncing_context(zilog_t *zilog) 1381807Sbonwick { 1391807Sbonwick return ((zil_header_t *)zilog->zl_header); 1401807Sbonwick } 1411807Sbonwick 1421807Sbonwick static void 1431807Sbonwick zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 1441807Sbonwick { 1451807Sbonwick zio_cksum_t *zc = &bp->blk_cksum; 1461807Sbonwick 1471807Sbonwick zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL); 1481807Sbonwick zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL); 1491807Sbonwick zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 1501807Sbonwick zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 1511807Sbonwick } 1521807Sbonwick 153789Sahrens /* 154789Sahrens * Read a log block, make sure it's valid, and byteswap it if necessary. 155789Sahrens */ 156789Sahrens static int 1571807Sbonwick zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, arc_buf_t **abufpp) 158789Sahrens { 1591807Sbonwick blkptr_t blk = *bp; 1601544Seschrock zbookmark_t zb; 1612391Smaybee uint32_t aflags = ARC_WAIT; 162789Sahrens int error; 163789Sahrens 1641807Sbonwick zb.zb_objset = bp->blk_cksum.zc_word[ZIL_ZC_OBJSET]; 1651544Seschrock zb.zb_object = 0; 1661544Seschrock zb.zb_level = -1; 1671807Sbonwick zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ]; 1681807Sbonwick 1691807Sbonwick *abufpp = NULL; 1701807Sbonwick 1717046Sahrens /* 1727046Sahrens * We shouldn't be doing any scrubbing while we're doing log 1737046Sahrens * replay, it's OK to not lock. 1747046Sahrens */ 1757046Sahrens error = arc_read_nolock(NULL, zilog->zl_spa, &blk, 1761807Sbonwick arc_getbuf_func, abufpp, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL | 1772391Smaybee ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB, &aflags, &zb); 1781807Sbonwick 1791807Sbonwick if (error == 0) { 1801807Sbonwick char *data = (*abufpp)->b_data; 1811807Sbonwick uint64_t blksz = BP_GET_LSIZE(bp); 1821807Sbonwick zil_trailer_t *ztp = (zil_trailer_t *)(data + blksz) - 1; 1831807Sbonwick zio_cksum_t cksum = bp->blk_cksum; 1841544Seschrock 1851807Sbonwick /* 1867522SNeil.Perrin@Sun.COM * Validate the checksummed log block. 1877522SNeil.Perrin@Sun.COM * 1881807Sbonwick * Sequence numbers should be... sequential. The checksum 1891807Sbonwick * verifier for the next block should be bp's checksum plus 1. 1907522SNeil.Perrin@Sun.COM * 1917522SNeil.Perrin@Sun.COM * Also check the log chain linkage and size used. 1921807Sbonwick */ 1931807Sbonwick cksum.zc_word[ZIL_ZC_SEQ]++; 1941807Sbonwick 1957522SNeil.Perrin@Sun.COM if (bcmp(&cksum, &ztp->zit_next_blk.blk_cksum, 1967522SNeil.Perrin@Sun.COM sizeof (cksum)) || BP_IS_HOLE(&ztp->zit_next_blk) || 1977522SNeil.Perrin@Sun.COM (ztp->zit_nused > (blksz - sizeof (zil_trailer_t)))) { 1987522SNeil.Perrin@Sun.COM error = ECKSUM; 1997522SNeil.Perrin@Sun.COM } 2001807Sbonwick 2011807Sbonwick if (error) { 2021807Sbonwick VERIFY(arc_buf_remove_ref(*abufpp, abufpp) == 1); 2031807Sbonwick *abufpp = NULL; 2041807Sbonwick } 205789Sahrens } 206789Sahrens 2071807Sbonwick dprintf("error %d on %llu:%llu\n", error, zb.zb_objset, zb.zb_blkid); 208789Sahrens 2091807Sbonwick return (error); 210789Sahrens } 211789Sahrens 212789Sahrens /* 213789Sahrens * Parse the intent log, and call parse_func for each valid record within. 2141807Sbonwick * Return the highest sequence number. 215789Sahrens */ 2161807Sbonwick uint64_t 217789Sahrens zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 218789Sahrens zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 219789Sahrens { 2201807Sbonwick const zil_header_t *zh = zilog->zl_header; 2211807Sbonwick uint64_t claim_seq = zh->zh_claim_seq; 2221807Sbonwick uint64_t seq = 0; 2231807Sbonwick uint64_t max_seq = 0; 2241807Sbonwick blkptr_t blk = zh->zh_log; 2251807Sbonwick arc_buf_t *abuf; 226789Sahrens char *lrbuf, *lrp; 227789Sahrens zil_trailer_t *ztp; 228789Sahrens int reclen, error; 229789Sahrens 230789Sahrens if (BP_IS_HOLE(&blk)) 2311807Sbonwick return (max_seq); 232789Sahrens 233789Sahrens /* 234789Sahrens * Starting at the block pointed to by zh_log we read the log chain. 235789Sahrens * For each block in the chain we strongly check that block to 236789Sahrens * ensure its validity. We stop when an invalid block is found. 237789Sahrens * For each block pointer in the chain we call parse_blk_func(). 238789Sahrens * For each record in each valid block we call parse_lr_func(). 2391807Sbonwick * If the log has been claimed, stop if we encounter a sequence 2401807Sbonwick * number greater than the highest claimed sequence number. 241789Sahrens */ 242789Sahrens zil_dva_tree_init(&zilog->zl_dva_tree); 243789Sahrens for (;;) { 2441807Sbonwick seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 2451807Sbonwick 2461807Sbonwick if (claim_seq != 0 && seq > claim_seq) 2471807Sbonwick break; 2481807Sbonwick 2491807Sbonwick ASSERT(max_seq < seq); 2501807Sbonwick max_seq = seq; 2511807Sbonwick 2521807Sbonwick error = zil_read_log_block(zilog, &blk, &abuf); 253789Sahrens 254789Sahrens if (parse_blk_func != NULL) 255789Sahrens parse_blk_func(zilog, &blk, arg, txg); 256789Sahrens 257789Sahrens if (error) 258789Sahrens break; 259789Sahrens 2601807Sbonwick lrbuf = abuf->b_data; 261789Sahrens ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1; 262789Sahrens blk = ztp->zit_next_blk; 263789Sahrens 2641807Sbonwick if (parse_lr_func == NULL) { 2651807Sbonwick VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 266789Sahrens continue; 2671807Sbonwick } 268789Sahrens 269789Sahrens for (lrp = lrbuf; lrp < lrbuf + ztp->zit_nused; lrp += reclen) { 270789Sahrens lr_t *lr = (lr_t *)lrp; 271789Sahrens reclen = lr->lrc_reclen; 272789Sahrens ASSERT3U(reclen, >=, sizeof (lr_t)); 273789Sahrens parse_lr_func(zilog, lr, arg, txg); 274789Sahrens } 2751807Sbonwick VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 276789Sahrens } 277789Sahrens zil_dva_tree_fini(&zilog->zl_dva_tree); 2781807Sbonwick 2791807Sbonwick return (max_seq); 280789Sahrens } 281789Sahrens 282789Sahrens /* ARGSUSED */ 283789Sahrens static void 284789Sahrens zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 285789Sahrens { 286789Sahrens spa_t *spa = zilog->zl_spa; 287789Sahrens int err; 288789Sahrens 289789Sahrens /* 290789Sahrens * Claim log block if not already committed and not already claimed. 291789Sahrens */ 292789Sahrens if (bp->blk_birth >= first_txg && 293789Sahrens zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp)) == 0) { 2947754SJeff.Bonwick@Sun.COM err = zio_wait(zio_claim(NULL, spa, first_txg, bp, NULL, NULL, 2957754SJeff.Bonwick@Sun.COM ZIO_FLAG_MUSTSUCCEED)); 296789Sahrens ASSERT(err == 0); 297789Sahrens } 298789Sahrens } 299789Sahrens 300789Sahrens static void 301789Sahrens zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 302789Sahrens { 303789Sahrens if (lrc->lrc_txtype == TX_WRITE) { 304789Sahrens lr_write_t *lr = (lr_write_t *)lrc; 305789Sahrens zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg); 306789Sahrens } 307789Sahrens } 308789Sahrens 309789Sahrens /* ARGSUSED */ 310789Sahrens static void 311789Sahrens zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 312789Sahrens { 313789Sahrens zio_free_blk(zilog->zl_spa, bp, dmu_tx_get_txg(tx)); 314789Sahrens } 315789Sahrens 316789Sahrens static void 317789Sahrens zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 318789Sahrens { 319789Sahrens /* 320789Sahrens * If we previously claimed it, we need to free it. 321789Sahrens */ 322789Sahrens if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE) { 323789Sahrens lr_write_t *lr = (lr_write_t *)lrc; 324789Sahrens blkptr_t *bp = &lr->lr_blkptr; 325789Sahrens if (bp->blk_birth >= claim_txg && 326789Sahrens !zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp))) { 327789Sahrens (void) arc_free(NULL, zilog->zl_spa, 328789Sahrens dmu_tx_get_txg(tx), bp, NULL, NULL, ARC_WAIT); 329789Sahrens } 330789Sahrens } 331789Sahrens } 332789Sahrens 333789Sahrens /* 334789Sahrens * Create an on-disk intent log. 335789Sahrens */ 336789Sahrens static void 337789Sahrens zil_create(zilog_t *zilog) 338789Sahrens { 3391807Sbonwick const zil_header_t *zh = zilog->zl_header; 340789Sahrens lwb_t *lwb; 3411807Sbonwick uint64_t txg = 0; 3421807Sbonwick dmu_tx_t *tx = NULL; 343789Sahrens blkptr_t blk; 3441807Sbonwick int error = 0; 345789Sahrens 346789Sahrens /* 3471807Sbonwick * Wait for any previous destroy to complete. 348789Sahrens */ 3491807Sbonwick txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 3501807Sbonwick 3511807Sbonwick ASSERT(zh->zh_claim_txg == 0); 3521807Sbonwick ASSERT(zh->zh_replay_seq == 0); 3531807Sbonwick 3541807Sbonwick blk = zh->zh_log; 355789Sahrens 356789Sahrens /* 3578109SNeil.Perrin@Sun.COM * If we don't already have an initial log block or we have one 3588109SNeil.Perrin@Sun.COM * but it's the wrong endianness then allocate one. 359789Sahrens */ 3608109SNeil.Perrin@Sun.COM if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { 3611807Sbonwick tx = dmu_tx_create(zilog->zl_os); 3621807Sbonwick (void) dmu_tx_assign(tx, TXG_WAIT); 3631807Sbonwick dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 3641807Sbonwick txg = dmu_tx_get_txg(tx); 3651807Sbonwick 3668109SNeil.Perrin@Sun.COM if (!BP_IS_HOLE(&blk)) { 3678109SNeil.Perrin@Sun.COM zio_free_blk(zilog->zl_spa, &blk, txg); 3688109SNeil.Perrin@Sun.COM BP_ZERO(&blk); 3698109SNeil.Perrin@Sun.COM } 3708109SNeil.Perrin@Sun.COM 3713063Sperrin error = zio_alloc_blk(zilog->zl_spa, ZIL_MIN_BLKSZ, &blk, 372*10879SNeil.Perrin@Sun.COM NULL, txg, zilog->zl_logbias == ZFS_LOGBIAS_LATENCY); 3731807Sbonwick 3741807Sbonwick if (error == 0) 3751807Sbonwick zil_init_log_chain(zilog, &blk); 3761362Sperrin } 3771807Sbonwick 3781807Sbonwick /* 3791807Sbonwick * Allocate a log write buffer (lwb) for the first log block. 3801807Sbonwick */ 381789Sahrens if (error == 0) { 382789Sahrens lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 383789Sahrens lwb->lwb_zilog = zilog; 384789Sahrens lwb->lwb_blk = blk; 385789Sahrens lwb->lwb_nused = 0; 386789Sahrens lwb->lwb_sz = BP_GET_LSIZE(&lwb->lwb_blk); 387789Sahrens lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz); 388789Sahrens lwb->lwb_max_txg = txg; 3892237Smaybee lwb->lwb_zio = NULL; 3902237Smaybee 391789Sahrens mutex_enter(&zilog->zl_lock); 392789Sahrens list_insert_tail(&zilog->zl_lwb_list, lwb); 393789Sahrens mutex_exit(&zilog->zl_lock); 394789Sahrens } 395789Sahrens 3961807Sbonwick /* 3971807Sbonwick * If we just allocated the first log block, commit our transaction 3981807Sbonwick * and wait for zil_sync() to stuff the block poiner into zh_log. 3991807Sbonwick * (zh is part of the MOS, so we cannot modify it in open context.) 4001807Sbonwick */ 4011807Sbonwick if (tx != NULL) { 4021807Sbonwick dmu_tx_commit(tx); 4031362Sperrin txg_wait_synced(zilog->zl_dmu_pool, txg); 4041807Sbonwick } 4051807Sbonwick 4061807Sbonwick ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 407789Sahrens } 408789Sahrens 409789Sahrens /* 410789Sahrens * In one tx, free all log blocks and clear the log header. 4111807Sbonwick * If keep_first is set, then we're replaying a log with no content. 4121807Sbonwick * We want to keep the first block, however, so that the first 4131807Sbonwick * synchronous transaction doesn't require a txg_wait_synced() 4141807Sbonwick * in zil_create(). We don't need to txg_wait_synced() here either 4151807Sbonwick * when keep_first is set, because both zil_create() and zil_destroy() 4161807Sbonwick * will wait for any in-progress destroys to complete. 417789Sahrens */ 418789Sahrens void 4191807Sbonwick zil_destroy(zilog_t *zilog, boolean_t keep_first) 420789Sahrens { 4211807Sbonwick const zil_header_t *zh = zilog->zl_header; 4221807Sbonwick lwb_t *lwb; 423789Sahrens dmu_tx_t *tx; 424789Sahrens uint64_t txg; 425789Sahrens 4261807Sbonwick /* 4271807Sbonwick * Wait for any previous destroy to complete. 4281807Sbonwick */ 4291807Sbonwick txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 430789Sahrens 4311807Sbonwick if (BP_IS_HOLE(&zh->zh_log)) 432789Sahrens return; 433789Sahrens 434789Sahrens tx = dmu_tx_create(zilog->zl_os); 435789Sahrens (void) dmu_tx_assign(tx, TXG_WAIT); 436789Sahrens dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 437789Sahrens txg = dmu_tx_get_txg(tx); 438789Sahrens 4391807Sbonwick mutex_enter(&zilog->zl_lock); 4401807Sbonwick 4411807Sbonwick ASSERT3U(zilog->zl_destroy_txg, <, txg); 442789Sahrens zilog->zl_destroy_txg = txg; 4431807Sbonwick 4441807Sbonwick if (!list_is_empty(&zilog->zl_lwb_list)) { 4451807Sbonwick ASSERT(zh->zh_claim_txg == 0); 44610685SGeorge.Wilson@Sun.COM zilog->zl_keep_first = B_FALSE; 4471807Sbonwick while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 4481807Sbonwick list_remove(&zilog->zl_lwb_list, lwb); 4491807Sbonwick if (lwb->lwb_buf != NULL) 4501807Sbonwick zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 4511807Sbonwick zio_free_blk(zilog->zl_spa, &lwb->lwb_blk, txg); 4521807Sbonwick kmem_cache_free(zil_lwb_cache, lwb); 4531807Sbonwick } 4541807Sbonwick } else { 45510685SGeorge.Wilson@Sun.COM zilog->zl_keep_first = keep_first; 45610685SGeorge.Wilson@Sun.COM if (zh->zh_flags & ZIL_REPLAY_NEEDED) { 45710685SGeorge.Wilson@Sun.COM ASSERT(!keep_first); 4581807Sbonwick (void) zil_parse(zilog, zil_free_log_block, 4591807Sbonwick zil_free_log_record, tx, zh->zh_claim_txg); 46010685SGeorge.Wilson@Sun.COM } else { 46110685SGeorge.Wilson@Sun.COM /* 46210685SGeorge.Wilson@Sun.COM * Would like to assert zil_empty() but that 46310685SGeorge.Wilson@Sun.COM * would force us to read the log chain which 46410685SGeorge.Wilson@Sun.COM * requires us to do I/O to the log. This is 46510685SGeorge.Wilson@Sun.COM * overkill since we really just want to destroy 46610685SGeorge.Wilson@Sun.COM * the chain anyway. 46710685SGeorge.Wilson@Sun.COM */ 46810685SGeorge.Wilson@Sun.COM if (!keep_first) { 46910685SGeorge.Wilson@Sun.COM blkptr_t bp = zh->zh_log; 47010685SGeorge.Wilson@Sun.COM zio_free_blk(zilog->zl_spa, &bp, txg); 47110685SGeorge.Wilson@Sun.COM } 4721807Sbonwick } 4731807Sbonwick } 4742638Sperrin mutex_exit(&zilog->zl_lock); 475789Sahrens 476789Sahrens dmu_tx_commit(tx); 477789Sahrens } 478789Sahrens 4798989SNeil.Perrin@Sun.COM /* 4808989SNeil.Perrin@Sun.COM * return true if the initial log block is not valid 4818989SNeil.Perrin@Sun.COM */ 4828989SNeil.Perrin@Sun.COM static boolean_t 4838989SNeil.Perrin@Sun.COM zil_empty(zilog_t *zilog) 4848989SNeil.Perrin@Sun.COM { 4858989SNeil.Perrin@Sun.COM const zil_header_t *zh = zilog->zl_header; 4868989SNeil.Perrin@Sun.COM arc_buf_t *abuf = NULL; 4878989SNeil.Perrin@Sun.COM 4888989SNeil.Perrin@Sun.COM if (BP_IS_HOLE(&zh->zh_log)) 4898989SNeil.Perrin@Sun.COM return (B_TRUE); 4908989SNeil.Perrin@Sun.COM 4918989SNeil.Perrin@Sun.COM if (zil_read_log_block(zilog, &zh->zh_log, &abuf) != 0) 4928989SNeil.Perrin@Sun.COM return (B_TRUE); 4938989SNeil.Perrin@Sun.COM 4948989SNeil.Perrin@Sun.COM VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 4958989SNeil.Perrin@Sun.COM return (B_FALSE); 4968989SNeil.Perrin@Sun.COM } 4978989SNeil.Perrin@Sun.COM 4982199Sahrens int 499789Sahrens zil_claim(char *osname, void *txarg) 500789Sahrens { 501789Sahrens dmu_tx_t *tx = txarg; 502789Sahrens uint64_t first_txg = dmu_tx_get_txg(tx); 503789Sahrens zilog_t *zilog; 504789Sahrens zil_header_t *zh; 505789Sahrens objset_t *os; 506789Sahrens int error; 507789Sahrens 50810298SMatthew.Ahrens@Sun.COM error = dmu_objset_hold(osname, FTAG, &os); 509789Sahrens if (error) { 5107294Sperrin cmn_err(CE_WARN, "can't open objset for %s", osname); 5112199Sahrens return (0); 512789Sahrens } 513789Sahrens 514789Sahrens zilog = dmu_objset_zil(os); 5151807Sbonwick zh = zil_header_in_syncing_context(zilog); 516789Sahrens 5179701SGeorge.Wilson@Sun.COM if (zilog->zl_spa->spa_log_state == SPA_LOG_CLEAR) { 5189701SGeorge.Wilson@Sun.COM if (!BP_IS_HOLE(&zh->zh_log)) 5199701SGeorge.Wilson@Sun.COM zio_free_blk(zilog->zl_spa, &zh->zh_log, first_txg); 5209701SGeorge.Wilson@Sun.COM BP_ZERO(&zh->zh_log); 5219701SGeorge.Wilson@Sun.COM dsl_dataset_dirty(dmu_objset_ds(os), tx); 5229701SGeorge.Wilson@Sun.COM } 5239701SGeorge.Wilson@Sun.COM 524789Sahrens /* 5258989SNeil.Perrin@Sun.COM * Record here whether the zil has any records to replay. 5268989SNeil.Perrin@Sun.COM * If the header block pointer is null or the block points 5278989SNeil.Perrin@Sun.COM * to the stubby then we know there are no valid log records. 5288989SNeil.Perrin@Sun.COM * We use the header to store this state as the the zilog gets 5298989SNeil.Perrin@Sun.COM * freed later in dmu_objset_close(). 5308989SNeil.Perrin@Sun.COM * The flags (and the rest of the header fields) are cleared in 5318989SNeil.Perrin@Sun.COM * zil_sync() as a result of a zil_destroy(), after replaying the log. 5328989SNeil.Perrin@Sun.COM * 5338989SNeil.Perrin@Sun.COM * Note, the intent log can be empty but still need the 5348989SNeil.Perrin@Sun.COM * stubby to be claimed. 5358989SNeil.Perrin@Sun.COM */ 5369701SGeorge.Wilson@Sun.COM if (!zil_empty(zilog)) { 5378989SNeil.Perrin@Sun.COM zh->zh_flags |= ZIL_REPLAY_NEEDED; 5389701SGeorge.Wilson@Sun.COM dsl_dataset_dirty(dmu_objset_ds(os), tx); 5399701SGeorge.Wilson@Sun.COM } 5408989SNeil.Perrin@Sun.COM 5418989SNeil.Perrin@Sun.COM /* 5421807Sbonwick * Claim all log blocks if we haven't already done so, and remember 5431807Sbonwick * the highest claimed sequence number. This ensures that if we can 5441807Sbonwick * read only part of the log now (e.g. due to a missing device), 5451807Sbonwick * but we can read the entire log later, we will not try to replay 5461807Sbonwick * or destroy beyond the last block we successfully claimed. 547789Sahrens */ 548789Sahrens ASSERT3U(zh->zh_claim_txg, <=, first_txg); 549789Sahrens if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 550789Sahrens zh->zh_claim_txg = first_txg; 5511807Sbonwick zh->zh_claim_seq = zil_parse(zilog, zil_claim_log_block, 5521807Sbonwick zil_claim_log_record, tx, first_txg); 553789Sahrens dsl_dataset_dirty(dmu_objset_ds(os), tx); 554789Sahrens } 5551807Sbonwick 556789Sahrens ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 55710298SMatthew.Ahrens@Sun.COM dmu_objset_rele(os, FTAG); 5582199Sahrens return (0); 559789Sahrens } 560789Sahrens 5617294Sperrin /* 5627294Sperrin * Check the log by walking the log chain. 5637294Sperrin * Checksum errors are ok as they indicate the end of the chain. 5647294Sperrin * Any other error (no device or read failure) returns an error. 5657294Sperrin */ 5667294Sperrin /* ARGSUSED */ 5677294Sperrin int 5687294Sperrin zil_check_log_chain(char *osname, void *txarg) 5697294Sperrin { 5707294Sperrin zilog_t *zilog; 5717294Sperrin zil_header_t *zh; 5727294Sperrin blkptr_t blk; 5737294Sperrin arc_buf_t *abuf; 5747294Sperrin objset_t *os; 5757294Sperrin char *lrbuf; 5767294Sperrin zil_trailer_t *ztp; 5777294Sperrin int error; 5787294Sperrin 57910298SMatthew.Ahrens@Sun.COM error = dmu_objset_hold(osname, FTAG, &os); 5807294Sperrin if (error) { 5817294Sperrin cmn_err(CE_WARN, "can't open objset for %s", osname); 5827294Sperrin return (0); 5837294Sperrin } 5847294Sperrin 5857294Sperrin zilog = dmu_objset_zil(os); 5867294Sperrin zh = zil_header_in_syncing_context(zilog); 5877294Sperrin blk = zh->zh_log; 5887294Sperrin if (BP_IS_HOLE(&blk)) { 58910298SMatthew.Ahrens@Sun.COM dmu_objset_rele(os, FTAG); 5907294Sperrin return (0); /* no chain */ 5917294Sperrin } 5927294Sperrin 5937294Sperrin for (;;) { 5947294Sperrin error = zil_read_log_block(zilog, &blk, &abuf); 5957294Sperrin if (error) 5967294Sperrin break; 5977294Sperrin lrbuf = abuf->b_data; 5987294Sperrin ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1; 5997294Sperrin blk = ztp->zit_next_blk; 6007294Sperrin VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 6017294Sperrin } 60210298SMatthew.Ahrens@Sun.COM dmu_objset_rele(os, FTAG); 6037294Sperrin if (error == ECKSUM) 6047294Sperrin return (0); /* normal end of chain */ 6057294Sperrin return (error); 6067294Sperrin } 6077294Sperrin 6085688Sbonwick static int 6095688Sbonwick zil_vdev_compare(const void *x1, const void *x2) 610789Sahrens { 6115875Sperrin uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; 6125875Sperrin uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; 6135688Sbonwick 6145688Sbonwick if (v1 < v2) 6155688Sbonwick return (-1); 6165688Sbonwick if (v1 > v2) 6175688Sbonwick return (1); 6185688Sbonwick 6195688Sbonwick return (0); 6205688Sbonwick } 6215688Sbonwick 6225688Sbonwick void 6235688Sbonwick zil_add_block(zilog_t *zilog, blkptr_t *bp) 6245688Sbonwick { 6255688Sbonwick avl_tree_t *t = &zilog->zl_vdev_tree; 6265688Sbonwick avl_index_t where; 6275688Sbonwick zil_vdev_node_t *zv, zvsearch; 6285688Sbonwick int ndvas = BP_GET_NDVAS(bp); 6295688Sbonwick int i; 630789Sahrens 6312986Sek110237 if (zfs_nocacheflush) 632789Sahrens return; 633789Sahrens 6345688Sbonwick ASSERT(zilog->zl_writer); 6355688Sbonwick 6365688Sbonwick /* 6375688Sbonwick * Even though we're zl_writer, we still need a lock because the 6385688Sbonwick * zl_get_data() callbacks may have dmu_sync() done callbacks 6395688Sbonwick * that will run concurrently. 6405688Sbonwick */ 6415688Sbonwick mutex_enter(&zilog->zl_vdev_lock); 6425688Sbonwick for (i = 0; i < ndvas; i++) { 6435688Sbonwick zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 6445688Sbonwick if (avl_find(t, &zvsearch, &where) == NULL) { 6455688Sbonwick zv = kmem_alloc(sizeof (*zv), KM_SLEEP); 6465688Sbonwick zv->zv_vdev = zvsearch.zv_vdev; 6475688Sbonwick avl_insert(t, zv, where); 6483063Sperrin } 6493063Sperrin } 6505688Sbonwick mutex_exit(&zilog->zl_vdev_lock); 6513063Sperrin } 6523063Sperrin 653789Sahrens void 6542638Sperrin zil_flush_vdevs(zilog_t *zilog) 655789Sahrens { 6563063Sperrin spa_t *spa = zilog->zl_spa; 6575688Sbonwick avl_tree_t *t = &zilog->zl_vdev_tree; 6585688Sbonwick void *cookie = NULL; 6595688Sbonwick zil_vdev_node_t *zv; 6605688Sbonwick zio_t *zio; 6613063Sperrin 6623063Sperrin ASSERT(zilog->zl_writer); 663789Sahrens 6645688Sbonwick /* 6655688Sbonwick * We don't need zl_vdev_lock here because we're the zl_writer, 6665688Sbonwick * and all zl_get_data() callbacks are done. 6675688Sbonwick */ 6685688Sbonwick if (avl_numnodes(t) == 0) 6695688Sbonwick return; 6705688Sbonwick 6717754SJeff.Bonwick@Sun.COM spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6725688Sbonwick 6737754SJeff.Bonwick@Sun.COM zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 6745688Sbonwick 6755688Sbonwick while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { 6765688Sbonwick vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); 6775688Sbonwick if (vd != NULL) 6785688Sbonwick zio_flush(zio, vd); 6795688Sbonwick kmem_free(zv, sizeof (*zv)); 6803063Sperrin } 681789Sahrens 682789Sahrens /* 683789Sahrens * Wait for all the flushes to complete. Not all devices actually 684789Sahrens * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails. 685789Sahrens */ 6865688Sbonwick (void) zio_wait(zio); 6875688Sbonwick 6887754SJeff.Bonwick@Sun.COM spa_config_exit(spa, SCL_STATE, FTAG); 689789Sahrens } 690789Sahrens 691789Sahrens /* 692789Sahrens * Function called when a log block write completes 693789Sahrens */ 694789Sahrens static void 695789Sahrens zil_lwb_write_done(zio_t *zio) 696789Sahrens { 697789Sahrens lwb_t *lwb = zio->io_private; 698789Sahrens zilog_t *zilog = lwb->lwb_zilog; 699789Sahrens 7007754SJeff.Bonwick@Sun.COM ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 7017754SJeff.Bonwick@Sun.COM ASSERT(BP_GET_CHECKSUM(zio->io_bp) == ZIO_CHECKSUM_ZILOG); 7027754SJeff.Bonwick@Sun.COM ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG); 7037754SJeff.Bonwick@Sun.COM ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 7047754SJeff.Bonwick@Sun.COM ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER); 7057754SJeff.Bonwick@Sun.COM ASSERT(!BP_IS_GANG(zio->io_bp)); 7067754SJeff.Bonwick@Sun.COM ASSERT(!BP_IS_HOLE(zio->io_bp)); 7077754SJeff.Bonwick@Sun.COM ASSERT(zio->io_bp->blk_fill == 0); 7087754SJeff.Bonwick@Sun.COM 709789Sahrens /* 7109493SNeil.Perrin@Sun.COM * Ensure the lwb buffer pointer is cleared before releasing 7119493SNeil.Perrin@Sun.COM * the txg. If we have had an allocation failure and 7129493SNeil.Perrin@Sun.COM * the txg is waiting to sync then we want want zil_sync() 7139493SNeil.Perrin@Sun.COM * to remove the lwb so that it's not picked up as the next new 7149493SNeil.Perrin@Sun.COM * one in zil_commit_writer(). zil_sync() will only remove 7159493SNeil.Perrin@Sun.COM * the lwb if lwb_buf is null. 716789Sahrens */ 717789Sahrens zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 718789Sahrens mutex_enter(&zilog->zl_lock); 719789Sahrens lwb->lwb_buf = NULL; 7204527Sperrin if (zio->io_error) 721789Sahrens zilog->zl_log_error = B_TRUE; 7229493SNeil.Perrin@Sun.COM 7239493SNeil.Perrin@Sun.COM /* 7249493SNeil.Perrin@Sun.COM * Now that we've written this log block, we have a stable pointer 7259493SNeil.Perrin@Sun.COM * to the next block in the chain, so it's OK to let the txg in 7269904SNeil.Perrin@Sun.COM * which we allocated the next block sync. We still have the 7279904SNeil.Perrin@Sun.COM * zl_lock to ensure zil_sync doesn't kmem free the lwb. 7289493SNeil.Perrin@Sun.COM */ 7299493SNeil.Perrin@Sun.COM txg_rele_to_sync(&lwb->lwb_txgh); 7309904SNeil.Perrin@Sun.COM mutex_exit(&zilog->zl_lock); 731789Sahrens } 732789Sahrens 733789Sahrens /* 7342237Smaybee * Initialize the io for a log block. 7352237Smaybee */ 7362237Smaybee static void 7372237Smaybee zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb) 7382237Smaybee { 7392237Smaybee zbookmark_t zb; 7402237Smaybee 7412237Smaybee zb.zb_objset = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET]; 7422237Smaybee zb.zb_object = 0; 7432237Smaybee zb.zb_level = -1; 7442237Smaybee zb.zb_blkid = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 7452237Smaybee 7462638Sperrin if (zilog->zl_root_zio == NULL) { 7472638Sperrin zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL, 7482638Sperrin ZIO_FLAG_CANFAIL); 7492638Sperrin } 7503063Sperrin if (lwb->lwb_zio == NULL) { 7513063Sperrin lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa, 7529701SGeorge.Wilson@Sun.COM 0, &lwb->lwb_blk, lwb->lwb_buf, lwb->lwb_sz, 7539701SGeorge.Wilson@Sun.COM zil_lwb_write_done, lwb, ZIO_PRIORITY_LOG_WRITE, 75410685SGeorge.Wilson@Sun.COM ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb); 7553063Sperrin } 7562237Smaybee } 7572237Smaybee 7582237Smaybee /* 759*10879SNeil.Perrin@Sun.COM * Use the slog as long as the logbias is 'latency' and the current commit size 760*10879SNeil.Perrin@Sun.COM * is less than the limit or the total list size is less than 2X the limit. 761*10879SNeil.Perrin@Sun.COM * Limit checking is disabled by setting zil_slog_limit to UINT64_MAX. 762*10879SNeil.Perrin@Sun.COM */ 763*10879SNeil.Perrin@Sun.COM uint64_t zil_slog_limit = 1024 * 1024; 764*10879SNeil.Perrin@Sun.COM #define USE_SLOG(zilog) (((zilog)->zl_logbias == ZFS_LOGBIAS_LATENCY) && \ 765*10879SNeil.Perrin@Sun.COM (((zilog)->zl_cur_used < zil_slog_limit) || \ 766*10879SNeil.Perrin@Sun.COM ((zilog)->zl_itx_list_sz < (zil_slog_limit << 1)))) 767*10879SNeil.Perrin@Sun.COM 768*10879SNeil.Perrin@Sun.COM /* 769789Sahrens * Start a log block write and advance to the next log block. 770789Sahrens * Calls are serialized. 771789Sahrens */ 772789Sahrens static lwb_t * 773789Sahrens zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb) 774789Sahrens { 775789Sahrens lwb_t *nlwb; 776789Sahrens zil_trailer_t *ztp = (zil_trailer_t *)(lwb->lwb_buf + lwb->lwb_sz) - 1; 7771807Sbonwick spa_t *spa = zilog->zl_spa; 7781807Sbonwick blkptr_t *bp = &ztp->zit_next_blk; 779789Sahrens uint64_t txg; 780789Sahrens uint64_t zil_blksz; 781789Sahrens int error; 782789Sahrens 783789Sahrens ASSERT(lwb->lwb_nused <= ZIL_BLK_DATA_SZ(lwb)); 784789Sahrens 785789Sahrens /* 786789Sahrens * Allocate the next block and save its address in this block 787789Sahrens * before writing it in order to establish the log chain. 788789Sahrens * Note that if the allocation of nlwb synced before we wrote 789789Sahrens * the block that points at it (lwb), we'd leak it if we crashed. 790789Sahrens * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done(). 791789Sahrens */ 792789Sahrens txg = txg_hold_open(zilog->zl_dmu_pool, &lwb->lwb_txgh); 793789Sahrens txg_rele_to_quiesce(&lwb->lwb_txgh); 794789Sahrens 795789Sahrens /* 7961141Sperrin * Pick a ZIL blocksize. We request a size that is the 7971141Sperrin * maximum of the previous used size, the current used size and 7981141Sperrin * the amount waiting in the queue. 799789Sahrens */ 8002237Smaybee zil_blksz = MAX(zilog->zl_prev_used, 8012237Smaybee zilog->zl_cur_used + sizeof (*ztp)); 8021141Sperrin zil_blksz = MAX(zil_blksz, zilog->zl_itx_list_sz + sizeof (*ztp)); 8031842Sperrin zil_blksz = P2ROUNDUP_TYPED(zil_blksz, ZIL_MIN_BLKSZ, uint64_t); 8041141Sperrin if (zil_blksz > ZIL_MAX_BLKSZ) 8051141Sperrin zil_blksz = ZIL_MAX_BLKSZ; 806789Sahrens 8073063Sperrin BP_ZERO(bp); 8083063Sperrin /* pass the old blkptr in order to spread log blocks across devs */ 80910310SNeil.Perrin@Sun.COM error = zio_alloc_blk(spa, zil_blksz, bp, &lwb->lwb_blk, txg, 810*10879SNeil.Perrin@Sun.COM USE_SLOG(zilog)); 811789Sahrens if (error) { 8123668Sgw25295 dmu_tx_t *tx = dmu_tx_create_assigned(zilog->zl_dmu_pool, txg); 8133668Sgw25295 8141544Seschrock /* 8153668Sgw25295 * We dirty the dataset to ensure that zil_sync() will 8163668Sgw25295 * be called to remove this lwb from our zl_lwb_list. 8173668Sgw25295 * Failing to do so, may leave an lwb with a NULL lwb_buf 8183668Sgw25295 * hanging around on the zl_lwb_list. 8193668Sgw25295 */ 8203668Sgw25295 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 8213848Sgw25295 dmu_tx_commit(tx); 8223668Sgw25295 8233668Sgw25295 /* 8243668Sgw25295 * Since we've just experienced an allocation failure so we 8253668Sgw25295 * terminate the current lwb and send it on its way. 8263668Sgw25295 */ 8273668Sgw25295 ztp->zit_pad = 0; 8283668Sgw25295 ztp->zit_nused = lwb->lwb_nused; 8293668Sgw25295 ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum; 8303668Sgw25295 zio_nowait(lwb->lwb_zio); 8313668Sgw25295 8323668Sgw25295 /* 8331544Seschrock * By returning NULL the caller will call tx_wait_synced() 8341544Seschrock */ 835789Sahrens return (NULL); 836789Sahrens } 837789Sahrens 8381807Sbonwick ASSERT3U(bp->blk_birth, ==, txg); 8391544Seschrock ztp->zit_pad = 0; 840789Sahrens ztp->zit_nused = lwb->lwb_nused; 841789Sahrens ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum; 8421807Sbonwick bp->blk_cksum = lwb->lwb_blk.blk_cksum; 8431807Sbonwick bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 844789Sahrens 845789Sahrens /* 846789Sahrens * Allocate a new log write buffer (lwb). 847789Sahrens */ 848789Sahrens nlwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 849789Sahrens 850789Sahrens nlwb->lwb_zilog = zilog; 8511807Sbonwick nlwb->lwb_blk = *bp; 852789Sahrens nlwb->lwb_nused = 0; 853789Sahrens nlwb->lwb_sz = BP_GET_LSIZE(&nlwb->lwb_blk); 854789Sahrens nlwb->lwb_buf = zio_buf_alloc(nlwb->lwb_sz); 855789Sahrens nlwb->lwb_max_txg = txg; 8562237Smaybee nlwb->lwb_zio = NULL; 857789Sahrens 858789Sahrens /* 8593063Sperrin * Put new lwb at the end of the log chain 860789Sahrens */ 861789Sahrens mutex_enter(&zilog->zl_lock); 862789Sahrens list_insert_tail(&zilog->zl_lwb_list, nlwb); 8633063Sperrin mutex_exit(&zilog->zl_lock); 8643063Sperrin 8655688Sbonwick /* Record the block for later vdev flushing */ 8665688Sbonwick zil_add_block(zilog, &lwb->lwb_blk); 867789Sahrens 868789Sahrens /* 8692237Smaybee * kick off the write for the old log block 870789Sahrens */ 8712237Smaybee dprintf_bp(&lwb->lwb_blk, "lwb %p txg %llu: ", lwb, txg); 8723063Sperrin ASSERT(lwb->lwb_zio); 8732237Smaybee zio_nowait(lwb->lwb_zio); 874789Sahrens 875789Sahrens return (nlwb); 876789Sahrens } 877789Sahrens 878789Sahrens static lwb_t * 879789Sahrens zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 880789Sahrens { 881789Sahrens lr_t *lrc = &itx->itx_lr; /* common log record */ 8822237Smaybee lr_write_t *lr = (lr_write_t *)lrc; 883789Sahrens uint64_t txg = lrc->lrc_txg; 884789Sahrens uint64_t reclen = lrc->lrc_reclen; 8852237Smaybee uint64_t dlen; 886789Sahrens 887789Sahrens if (lwb == NULL) 888789Sahrens return (NULL); 889789Sahrens ASSERT(lwb->lwb_buf != NULL); 890789Sahrens 8912237Smaybee if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) 8922237Smaybee dlen = P2ROUNDUP_TYPED( 8932237Smaybee lr->lr_length, sizeof (uint64_t), uint64_t); 8942237Smaybee else 8952237Smaybee dlen = 0; 8961669Sperrin 8971669Sperrin zilog->zl_cur_used += (reclen + dlen); 8981669Sperrin 8993063Sperrin zil_lwb_write_init(zilog, lwb); 9003063Sperrin 9011669Sperrin /* 9021669Sperrin * If this record won't fit in the current log block, start a new one. 9031669Sperrin */ 9041669Sperrin if (lwb->lwb_nused + reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) { 9051669Sperrin lwb = zil_lwb_write_start(zilog, lwb); 9062237Smaybee if (lwb == NULL) 9071669Sperrin return (NULL); 9083063Sperrin zil_lwb_write_init(zilog, lwb); 9091669Sperrin ASSERT(lwb->lwb_nused == 0); 9101669Sperrin if (reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) { 9111669Sperrin txg_wait_synced(zilog->zl_dmu_pool, txg); 912789Sahrens return (lwb); 913789Sahrens } 914789Sahrens } 915789Sahrens 9162638Sperrin /* 9172638Sperrin * Update the lrc_seq, to be log record sequence number. See zil.h 9182638Sperrin * Then copy the record to the log buffer. 9192638Sperrin */ 9202638Sperrin lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */ 921789Sahrens bcopy(lrc, lwb->lwb_buf + lwb->lwb_nused, reclen); 9222237Smaybee 9232237Smaybee /* 9242237Smaybee * If it's a write, fetch the data or get its blkptr as appropriate. 9252237Smaybee */ 9262237Smaybee if (lrc->lrc_txtype == TX_WRITE) { 9272237Smaybee if (txg > spa_freeze_txg(zilog->zl_spa)) 9282237Smaybee txg_wait_synced(zilog->zl_dmu_pool, txg); 9292237Smaybee if (itx->itx_wr_state != WR_COPIED) { 9302237Smaybee char *dbuf; 9312237Smaybee int error; 9322237Smaybee 9332237Smaybee /* alignment is guaranteed */ 9342237Smaybee lr = (lr_write_t *)(lwb->lwb_buf + lwb->lwb_nused); 9352237Smaybee if (dlen) { 9362237Smaybee ASSERT(itx->itx_wr_state == WR_NEED_COPY); 9372237Smaybee dbuf = lwb->lwb_buf + lwb->lwb_nused + reclen; 9382237Smaybee lr->lr_common.lrc_reclen += dlen; 9392237Smaybee } else { 9402237Smaybee ASSERT(itx->itx_wr_state == WR_INDIRECT); 9412237Smaybee dbuf = NULL; 9422237Smaybee } 9432237Smaybee error = zilog->zl_get_data( 9442237Smaybee itx->itx_private, lr, dbuf, lwb->lwb_zio); 94510209SMark.Musante@Sun.COM if (error == EIO) { 94610209SMark.Musante@Sun.COM txg_wait_synced(zilog->zl_dmu_pool, txg); 94710209SMark.Musante@Sun.COM return (lwb); 94810209SMark.Musante@Sun.COM } 9492237Smaybee if (error) { 9502237Smaybee ASSERT(error == ENOENT || error == EEXIST || 9512237Smaybee error == EALREADY); 9522237Smaybee return (lwb); 9532237Smaybee } 9542237Smaybee } 9551669Sperrin } 9562237Smaybee 9572237Smaybee lwb->lwb_nused += reclen + dlen; 958789Sahrens lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 959789Sahrens ASSERT3U(lwb->lwb_nused, <=, ZIL_BLK_DATA_SZ(lwb)); 960789Sahrens ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0); 961789Sahrens 962789Sahrens return (lwb); 963789Sahrens } 964789Sahrens 965789Sahrens itx_t * 9665331Samw zil_itx_create(uint64_t txtype, size_t lrsize) 967789Sahrens { 968789Sahrens itx_t *itx; 969789Sahrens 9701842Sperrin lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t); 971789Sahrens 972789Sahrens itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 973789Sahrens itx->itx_lr.lrc_txtype = txtype; 974789Sahrens itx->itx_lr.lrc_reclen = lrsize; 9756101Sperrin itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */ 976789Sahrens itx->itx_lr.lrc_seq = 0; /* defensive */ 977789Sahrens 978789Sahrens return (itx); 979789Sahrens } 980789Sahrens 981789Sahrens uint64_t 982789Sahrens zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 983789Sahrens { 984789Sahrens uint64_t seq; 985789Sahrens 986789Sahrens ASSERT(itx->itx_lr.lrc_seq == 0); 987789Sahrens 988789Sahrens mutex_enter(&zilog->zl_lock); 989789Sahrens list_insert_tail(&zilog->zl_itx_list, itx); 9906101Sperrin zilog->zl_itx_list_sz += itx->itx_sod; 991789Sahrens itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 992789Sahrens itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq; 993789Sahrens mutex_exit(&zilog->zl_lock); 994789Sahrens 995789Sahrens return (seq); 996789Sahrens } 997789Sahrens 998789Sahrens /* 999789Sahrens * Free up all in-memory intent log transactions that have now been synced. 1000789Sahrens */ 1001789Sahrens static void 1002789Sahrens zil_itx_clean(zilog_t *zilog) 1003789Sahrens { 1004789Sahrens uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa); 1005789Sahrens uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa); 10063778Sjohansen list_t clean_list; 1007789Sahrens itx_t *itx; 1008789Sahrens 10093778Sjohansen list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); 10103778Sjohansen 1011789Sahrens mutex_enter(&zilog->zl_lock); 10122638Sperrin /* wait for a log writer to finish walking list */ 10132638Sperrin while (zilog->zl_writer) { 10142638Sperrin cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 10152638Sperrin } 10163778Sjohansen 10173778Sjohansen /* 10183778Sjohansen * Move the sync'd log transactions to a separate list so we can call 10193778Sjohansen * kmem_free without holding the zl_lock. 10203778Sjohansen * 10213778Sjohansen * There is no need to set zl_writer as we don't drop zl_lock here 10223778Sjohansen */ 1023789Sahrens while ((itx = list_head(&zilog->zl_itx_list)) != NULL && 1024789Sahrens itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) { 1025789Sahrens list_remove(&zilog->zl_itx_list, itx); 10266101Sperrin zilog->zl_itx_list_sz -= itx->itx_sod; 10273778Sjohansen list_insert_tail(&clean_list, itx); 10283778Sjohansen } 10293778Sjohansen cv_broadcast(&zilog->zl_cv_writer); 10303778Sjohansen mutex_exit(&zilog->zl_lock); 10313778Sjohansen 10323778Sjohansen /* destroy sync'd log transactions */ 10333778Sjohansen while ((itx = list_head(&clean_list)) != NULL) { 10343778Sjohansen list_remove(&clean_list, itx); 1035789Sahrens kmem_free(itx, offsetof(itx_t, itx_lr) 1036789Sahrens + itx->itx_lr.lrc_reclen); 1037789Sahrens } 10383778Sjohansen list_destroy(&clean_list); 1039789Sahrens } 1040789Sahrens 10412638Sperrin /* 10423063Sperrin * If there are any in-memory intent log transactions which have now been 10433063Sperrin * synced then start up a taskq to free them. 10442638Sperrin */ 1045789Sahrens void 1046789Sahrens zil_clean(zilog_t *zilog) 1047789Sahrens { 10483063Sperrin itx_t *itx; 10493063Sperrin 1050789Sahrens mutex_enter(&zilog->zl_lock); 10513063Sperrin itx = list_head(&zilog->zl_itx_list); 10523063Sperrin if ((itx != NULL) && 10533063Sperrin (itx->itx_lr.lrc_txg <= spa_last_synced_txg(zilog->zl_spa))) { 1054789Sahrens (void) taskq_dispatch(zilog->zl_clean_taskq, 1055*10879SNeil.Perrin@Sun.COM (task_func_t *)zil_itx_clean, zilog, TQ_NOSLEEP); 10563063Sperrin } 1057789Sahrens mutex_exit(&zilog->zl_lock); 1058789Sahrens } 1059789Sahrens 10607754SJeff.Bonwick@Sun.COM static void 10612638Sperrin zil_commit_writer(zilog_t *zilog, uint64_t seq, uint64_t foid) 1062789Sahrens { 1063789Sahrens uint64_t txg; 10643063Sperrin uint64_t commit_seq = 0; 10652638Sperrin itx_t *itx, *itx_next = (itx_t *)-1; 1066789Sahrens lwb_t *lwb; 1067789Sahrens spa_t *spa; 1068789Sahrens 10692638Sperrin zilog->zl_writer = B_TRUE; 10707754SJeff.Bonwick@Sun.COM ASSERT(zilog->zl_root_zio == NULL); 1071789Sahrens spa = zilog->zl_spa; 1072789Sahrens 1073789Sahrens if (zilog->zl_suspend) { 1074789Sahrens lwb = NULL; 1075789Sahrens } else { 1076789Sahrens lwb = list_tail(&zilog->zl_lwb_list); 1077789Sahrens if (lwb == NULL) { 10782638Sperrin /* 10792638Sperrin * Return if there's nothing to flush before we 10802638Sperrin * dirty the fs by calling zil_create() 10812638Sperrin */ 10822638Sperrin if (list_is_empty(&zilog->zl_itx_list)) { 10832638Sperrin zilog->zl_writer = B_FALSE; 10842638Sperrin return; 10852638Sperrin } 1086789Sahrens mutex_exit(&zilog->zl_lock); 1087789Sahrens zil_create(zilog); 1088789Sahrens mutex_enter(&zilog->zl_lock); 1089789Sahrens lwb = list_tail(&zilog->zl_lwb_list); 1090789Sahrens } 1091789Sahrens } 1092789Sahrens 10933063Sperrin /* Loop through in-memory log transactions filling log blocks. */ 10942638Sperrin DTRACE_PROBE1(zil__cw1, zilog_t *, zilog); 1095789Sahrens for (;;) { 10962638Sperrin /* 10972638Sperrin * Find the next itx to push: 10982638Sperrin * Push all transactions related to specified foid and all 10992638Sperrin * other transactions except TX_WRITE, TX_TRUNCATE, 11002638Sperrin * TX_SETATTR and TX_ACL for all other files. 11012638Sperrin */ 11022638Sperrin if (itx_next != (itx_t *)-1) 11032638Sperrin itx = itx_next; 11042638Sperrin else 11052638Sperrin itx = list_head(&zilog->zl_itx_list); 11062638Sperrin for (; itx != NULL; itx = list_next(&zilog->zl_itx_list, itx)) { 11072638Sperrin if (foid == 0) /* push all foids? */ 11082638Sperrin break; 11093063Sperrin if (itx->itx_sync) /* push all O_[D]SYNC */ 11103063Sperrin break; 11112638Sperrin switch (itx->itx_lr.lrc_txtype) { 11122638Sperrin case TX_SETATTR: 11132638Sperrin case TX_WRITE: 11142638Sperrin case TX_TRUNCATE: 11152638Sperrin case TX_ACL: 11162638Sperrin /* lr_foid is same offset for these records */ 11172638Sperrin if (((lr_write_t *)&itx->itx_lr)->lr_foid 11182638Sperrin != foid) { 11192638Sperrin continue; /* skip this record */ 11202638Sperrin } 11212638Sperrin } 11222638Sperrin break; 11232638Sperrin } 1124789Sahrens if (itx == NULL) 1125789Sahrens break; 1126789Sahrens 1127789Sahrens if ((itx->itx_lr.lrc_seq > seq) && 11282638Sperrin ((lwb == NULL) || (lwb->lwb_nused == 0) || 11296101Sperrin (lwb->lwb_nused + itx->itx_sod > ZIL_BLK_DATA_SZ(lwb)))) { 1130789Sahrens break; 11313063Sperrin } 1132789Sahrens 11332638Sperrin /* 11342638Sperrin * Save the next pointer. Even though we soon drop 11352638Sperrin * zl_lock all threads that may change the list 11362638Sperrin * (another writer or zil_itx_clean) can't do so until 11372638Sperrin * they have zl_writer. 11382638Sperrin */ 11392638Sperrin itx_next = list_next(&zilog->zl_itx_list, itx); 1140789Sahrens list_remove(&zilog->zl_itx_list, itx); 11416101Sperrin zilog->zl_itx_list_sz -= itx->itx_sod; 11423063Sperrin mutex_exit(&zilog->zl_lock); 1143789Sahrens txg = itx->itx_lr.lrc_txg; 1144789Sahrens ASSERT(txg); 1145789Sahrens 1146789Sahrens if (txg > spa_last_synced_txg(spa) || 1147789Sahrens txg > spa_freeze_txg(spa)) 1148789Sahrens lwb = zil_lwb_commit(zilog, itx, lwb); 1149789Sahrens kmem_free(itx, offsetof(itx_t, itx_lr) 1150789Sahrens + itx->itx_lr.lrc_reclen); 1151789Sahrens mutex_enter(&zilog->zl_lock); 1152789Sahrens } 11532638Sperrin DTRACE_PROBE1(zil__cw2, zilog_t *, zilog); 11543063Sperrin /* determine commit sequence number */ 11553063Sperrin itx = list_head(&zilog->zl_itx_list); 11563063Sperrin if (itx) 11573063Sperrin commit_seq = itx->itx_lr.lrc_seq; 11583063Sperrin else 11593063Sperrin commit_seq = zilog->zl_itx_seq; 1160789Sahrens mutex_exit(&zilog->zl_lock); 1161789Sahrens 1162789Sahrens /* write the last block out */ 11633063Sperrin if (lwb != NULL && lwb->lwb_zio != NULL) 1164789Sahrens lwb = zil_lwb_write_start(zilog, lwb); 1165789Sahrens 11661141Sperrin zilog->zl_prev_used = zilog->zl_cur_used; 11671141Sperrin zilog->zl_cur_used = 0; 11681141Sperrin 11692638Sperrin /* 11702638Sperrin * Wait if necessary for the log blocks to be on stable storage. 11712638Sperrin */ 11722638Sperrin if (zilog->zl_root_zio) { 11732638Sperrin DTRACE_PROBE1(zil__cw3, zilog_t *, zilog); 11742638Sperrin (void) zio_wait(zilog->zl_root_zio); 11757754SJeff.Bonwick@Sun.COM zilog->zl_root_zio = NULL; 11762638Sperrin DTRACE_PROBE1(zil__cw4, zilog_t *, zilog); 11775688Sbonwick zil_flush_vdevs(zilog); 1178789Sahrens } 11791141Sperrin 1180789Sahrens if (zilog->zl_log_error || lwb == NULL) { 1181789Sahrens zilog->zl_log_error = 0; 1182789Sahrens txg_wait_synced(zilog->zl_dmu_pool, 0); 1183789Sahrens } 11843063Sperrin 11853063Sperrin mutex_enter(&zilog->zl_lock); 11861141Sperrin zilog->zl_writer = B_FALSE; 11873063Sperrin 11883063Sperrin ASSERT3U(commit_seq, >=, zilog->zl_commit_seq); 11893063Sperrin zilog->zl_commit_seq = commit_seq; 11902638Sperrin } 11912638Sperrin 11922638Sperrin /* 11932638Sperrin * Push zfs transactions to stable storage up to the supplied sequence number. 11942638Sperrin * If foid is 0 push out all transactions, otherwise push only those 11952638Sperrin * for that file or might have been used to create that file. 11962638Sperrin */ 11972638Sperrin void 11982638Sperrin zil_commit(zilog_t *zilog, uint64_t seq, uint64_t foid) 11992638Sperrin { 12002638Sperrin if (zilog == NULL || seq == 0) 12012638Sperrin return; 12022638Sperrin 12032638Sperrin mutex_enter(&zilog->zl_lock); 12042638Sperrin 12052638Sperrin seq = MIN(seq, zilog->zl_itx_seq); /* cap seq at largest itx seq */ 12062638Sperrin 12073063Sperrin while (zilog->zl_writer) { 12082638Sperrin cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 12093063Sperrin if (seq < zilog->zl_commit_seq) { 12103063Sperrin mutex_exit(&zilog->zl_lock); 12113063Sperrin return; 12123063Sperrin } 12133063Sperrin } 12142638Sperrin zil_commit_writer(zilog, seq, foid); /* drops zl_lock */ 12153063Sperrin /* wake up others waiting on the commit */ 12163063Sperrin cv_broadcast(&zilog->zl_cv_writer); 12173063Sperrin mutex_exit(&zilog->zl_lock); 1218789Sahrens } 1219789Sahrens 1220789Sahrens /* 1221789Sahrens * Called in syncing context to free committed log blocks and update log header. 1222789Sahrens */ 1223789Sahrens void 1224789Sahrens zil_sync(zilog_t *zilog, dmu_tx_t *tx) 1225789Sahrens { 12261807Sbonwick zil_header_t *zh = zil_header_in_syncing_context(zilog); 1227789Sahrens uint64_t txg = dmu_tx_get_txg(tx); 1228789Sahrens spa_t *spa = zilog->zl_spa; 1229789Sahrens lwb_t *lwb; 1230789Sahrens 12319396SMatthew.Ahrens@Sun.COM /* 12329396SMatthew.Ahrens@Sun.COM * We don't zero out zl_destroy_txg, so make sure we don't try 12339396SMatthew.Ahrens@Sun.COM * to destroy it twice. 12349396SMatthew.Ahrens@Sun.COM */ 12359396SMatthew.Ahrens@Sun.COM if (spa_sync_pass(spa) != 1) 12369396SMatthew.Ahrens@Sun.COM return; 12379396SMatthew.Ahrens@Sun.COM 12381807Sbonwick mutex_enter(&zilog->zl_lock); 12391807Sbonwick 1240789Sahrens ASSERT(zilog->zl_stop_sync == 0); 1241789Sahrens 12428227SNeil.Perrin@Sun.COM zh->zh_replay_seq = zilog->zl_replayed_seq[txg & TXG_MASK]; 1243789Sahrens 1244789Sahrens if (zilog->zl_destroy_txg == txg) { 12451807Sbonwick blkptr_t blk = zh->zh_log; 12461807Sbonwick 12471807Sbonwick ASSERT(list_head(&zilog->zl_lwb_list) == NULL); 12481807Sbonwick 12491807Sbonwick bzero(zh, sizeof (zil_header_t)); 12508227SNeil.Perrin@Sun.COM bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq)); 12511807Sbonwick 12521807Sbonwick if (zilog->zl_keep_first) { 12531807Sbonwick /* 12541807Sbonwick * If this block was part of log chain that couldn't 12551807Sbonwick * be claimed because a device was missing during 12561807Sbonwick * zil_claim(), but that device later returns, 12571807Sbonwick * then this block could erroneously appear valid. 12581807Sbonwick * To guard against this, assign a new GUID to the new 12591807Sbonwick * log chain so it doesn't matter what blk points to. 12601807Sbonwick */ 12611807Sbonwick zil_init_log_chain(zilog, &blk); 12621807Sbonwick zh->zh_log = blk; 12631807Sbonwick } 1264789Sahrens } 1265789Sahrens 12669701SGeorge.Wilson@Sun.COM while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 12672638Sperrin zh->zh_log = lwb->lwb_blk; 1268789Sahrens if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 1269789Sahrens break; 1270789Sahrens list_remove(&zilog->zl_lwb_list, lwb); 1271789Sahrens zio_free_blk(spa, &lwb->lwb_blk, txg); 1272789Sahrens kmem_cache_free(zil_lwb_cache, lwb); 12733668Sgw25295 12743668Sgw25295 /* 12753668Sgw25295 * If we don't have anything left in the lwb list then 12763668Sgw25295 * we've had an allocation failure and we need to zero 12773668Sgw25295 * out the zil_header blkptr so that we don't end 12783668Sgw25295 * up freeing the same block twice. 12793668Sgw25295 */ 12803668Sgw25295 if (list_head(&zilog->zl_lwb_list) == NULL) 12813668Sgw25295 BP_ZERO(&zh->zh_log); 1282789Sahrens } 1283789Sahrens mutex_exit(&zilog->zl_lock); 1284789Sahrens } 1285789Sahrens 1286789Sahrens void 1287789Sahrens zil_init(void) 1288789Sahrens { 1289789Sahrens zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 12902856Snd150628 sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0); 1291789Sahrens } 1292789Sahrens 1293789Sahrens void 1294789Sahrens zil_fini(void) 1295789Sahrens { 1296789Sahrens kmem_cache_destroy(zil_lwb_cache); 1297789Sahrens } 1298789Sahrens 129910310SNeil.Perrin@Sun.COM void 130010310SNeil.Perrin@Sun.COM zil_set_logbias(zilog_t *zilog, uint64_t logbias) 130110310SNeil.Perrin@Sun.COM { 130210310SNeil.Perrin@Sun.COM zilog->zl_logbias = logbias; 130310310SNeil.Perrin@Sun.COM } 130410310SNeil.Perrin@Sun.COM 1305789Sahrens zilog_t * 1306789Sahrens zil_alloc(objset_t *os, zil_header_t *zh_phys) 1307789Sahrens { 1308789Sahrens zilog_t *zilog; 1309789Sahrens 1310789Sahrens zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 1311789Sahrens 1312789Sahrens zilog->zl_header = zh_phys; 1313789Sahrens zilog->zl_os = os; 1314789Sahrens zilog->zl_spa = dmu_objset_spa(os); 1315789Sahrens zilog->zl_dmu_pool = dmu_objset_pool(os); 13161807Sbonwick zilog->zl_destroy_txg = TXG_INITIAL - 1; 131710310SNeil.Perrin@Sun.COM zilog->zl_logbias = dmu_objset_logbias(os); 1318789Sahrens 13192856Snd150628 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); 13202856Snd150628 1321789Sahrens list_create(&zilog->zl_itx_list, sizeof (itx_t), 1322789Sahrens offsetof(itx_t, itx_node)); 1323789Sahrens 1324789Sahrens list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 1325789Sahrens offsetof(lwb_t, lwb_node)); 1326789Sahrens 13275688Sbonwick mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL); 13285688Sbonwick 13295688Sbonwick avl_create(&zilog->zl_vdev_tree, zil_vdev_compare, 13305688Sbonwick sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); 1331789Sahrens 13325913Sperrin cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL); 13335913Sperrin cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); 13345913Sperrin 1335789Sahrens return (zilog); 1336789Sahrens } 1337789Sahrens 1338789Sahrens void 1339789Sahrens zil_free(zilog_t *zilog) 1340789Sahrens { 1341789Sahrens lwb_t *lwb; 1342789Sahrens 1343789Sahrens zilog->zl_stop_sync = 1; 1344789Sahrens 1345789Sahrens while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 1346789Sahrens list_remove(&zilog->zl_lwb_list, lwb); 1347789Sahrens if (lwb->lwb_buf != NULL) 1348789Sahrens zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1349789Sahrens kmem_cache_free(zil_lwb_cache, lwb); 1350789Sahrens } 1351789Sahrens list_destroy(&zilog->zl_lwb_list); 1352789Sahrens 13535688Sbonwick avl_destroy(&zilog->zl_vdev_tree); 13545688Sbonwick mutex_destroy(&zilog->zl_vdev_lock); 1355789Sahrens 1356789Sahrens ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1357789Sahrens list_destroy(&zilog->zl_itx_list); 13582856Snd150628 mutex_destroy(&zilog->zl_lock); 1359789Sahrens 13605913Sperrin cv_destroy(&zilog->zl_cv_writer); 13615913Sperrin cv_destroy(&zilog->zl_cv_suspend); 13625913Sperrin 1363789Sahrens kmem_free(zilog, sizeof (zilog_t)); 1364789Sahrens } 1365789Sahrens 1366789Sahrens /* 1367789Sahrens * Open an intent log. 1368789Sahrens */ 1369789Sahrens zilog_t * 1370789Sahrens zil_open(objset_t *os, zil_get_data_t *get_data) 1371789Sahrens { 1372789Sahrens zilog_t *zilog = dmu_objset_zil(os); 1373789Sahrens 1374789Sahrens zilog->zl_get_data = get_data; 1375789Sahrens zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri, 1376789Sahrens 2, 2, TASKQ_PREPOPULATE); 1377789Sahrens 1378789Sahrens return (zilog); 1379789Sahrens } 1380789Sahrens 1381789Sahrens /* 1382789Sahrens * Close an intent log. 1383789Sahrens */ 1384789Sahrens void 1385789Sahrens zil_close(zilog_t *zilog) 1386789Sahrens { 13871807Sbonwick /* 13881807Sbonwick * If the log isn't already committed, mark the objset dirty 13891807Sbonwick * (so zil_sync() will be called) and wait for that txg to sync. 13901807Sbonwick */ 13911807Sbonwick if (!zil_is_committed(zilog)) { 13921807Sbonwick uint64_t txg; 13931807Sbonwick dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); 13941807Sbonwick (void) dmu_tx_assign(tx, TXG_WAIT); 13951807Sbonwick dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 13961807Sbonwick txg = dmu_tx_get_txg(tx); 13971807Sbonwick dmu_tx_commit(tx); 13981807Sbonwick txg_wait_synced(zilog->zl_dmu_pool, txg); 13991807Sbonwick } 14001807Sbonwick 1401789Sahrens taskq_destroy(zilog->zl_clean_taskq); 1402789Sahrens zilog->zl_clean_taskq = NULL; 1403789Sahrens zilog->zl_get_data = NULL; 1404789Sahrens 1405789Sahrens zil_itx_clean(zilog); 1406789Sahrens ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1407789Sahrens } 1408789Sahrens 1409789Sahrens /* 1410789Sahrens * Suspend an intent log. While in suspended mode, we still honor 1411789Sahrens * synchronous semantics, but we rely on txg_wait_synced() to do it. 1412789Sahrens * We suspend the log briefly when taking a snapshot so that the snapshot 1413789Sahrens * contains all the data it's supposed to, and has an empty intent log. 1414789Sahrens */ 1415789Sahrens int 1416789Sahrens zil_suspend(zilog_t *zilog) 1417789Sahrens { 14181807Sbonwick const zil_header_t *zh = zilog->zl_header; 1419789Sahrens 1420789Sahrens mutex_enter(&zilog->zl_lock); 14218989SNeil.Perrin@Sun.COM if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ 1422789Sahrens mutex_exit(&zilog->zl_lock); 1423789Sahrens return (EBUSY); 1424789Sahrens } 14251807Sbonwick if (zilog->zl_suspend++ != 0) { 14261807Sbonwick /* 14271807Sbonwick * Someone else already began a suspend. 14281807Sbonwick * Just wait for them to finish. 14291807Sbonwick */ 14301807Sbonwick while (zilog->zl_suspending) 14311807Sbonwick cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 14321807Sbonwick mutex_exit(&zilog->zl_lock); 14331807Sbonwick return (0); 14341807Sbonwick } 14351807Sbonwick zilog->zl_suspending = B_TRUE; 1436789Sahrens mutex_exit(&zilog->zl_lock); 1437789Sahrens 14382638Sperrin zil_commit(zilog, UINT64_MAX, 0); 1439789Sahrens 14402638Sperrin /* 14412638Sperrin * Wait for any in-flight log writes to complete. 14422638Sperrin */ 1443789Sahrens mutex_enter(&zilog->zl_lock); 14442638Sperrin while (zilog->zl_writer) 14452638Sperrin cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 1446789Sahrens mutex_exit(&zilog->zl_lock); 1447789Sahrens 14481807Sbonwick zil_destroy(zilog, B_FALSE); 14491807Sbonwick 14501807Sbonwick mutex_enter(&zilog->zl_lock); 14511807Sbonwick zilog->zl_suspending = B_FALSE; 14521807Sbonwick cv_broadcast(&zilog->zl_cv_suspend); 14531807Sbonwick mutex_exit(&zilog->zl_lock); 1454789Sahrens 1455789Sahrens return (0); 1456789Sahrens } 1457789Sahrens 1458789Sahrens void 1459789Sahrens zil_resume(zilog_t *zilog) 1460789Sahrens { 1461789Sahrens mutex_enter(&zilog->zl_lock); 1462789Sahrens ASSERT(zilog->zl_suspend != 0); 1463789Sahrens zilog->zl_suspend--; 1464789Sahrens mutex_exit(&zilog->zl_lock); 1465789Sahrens } 1466789Sahrens 146710800SNeil.Perrin@Sun.COM /* 146810800SNeil.Perrin@Sun.COM * Read in the data for the dmu_sync()ed block, and change the log 146910800SNeil.Perrin@Sun.COM * record to write this whole block. 147010800SNeil.Perrin@Sun.COM */ 147110800SNeil.Perrin@Sun.COM void 147210800SNeil.Perrin@Sun.COM zil_get_replay_data(zilog_t *zilog, lr_write_t *lr) 147310800SNeil.Perrin@Sun.COM { 147410800SNeil.Perrin@Sun.COM blkptr_t *wbp = &lr->lr_blkptr; 147510800SNeil.Perrin@Sun.COM char *wbuf = (char *)(lr + 1); /* data follows lr_write_t */ 147610800SNeil.Perrin@Sun.COM uint64_t blksz; 147710800SNeil.Perrin@Sun.COM 147810800SNeil.Perrin@Sun.COM if (BP_IS_HOLE(wbp)) { /* compressed to a hole */ 147910800SNeil.Perrin@Sun.COM blksz = BP_GET_LSIZE(&lr->lr_blkptr); 148010800SNeil.Perrin@Sun.COM /* 148110800SNeil.Perrin@Sun.COM * If the blksz is zero then we must be replaying a log 148210800SNeil.Perrin@Sun.COM * from an version prior to setting the blksize of null blocks. 148310800SNeil.Perrin@Sun.COM * So we just zero the actual write size reqeusted. 148410800SNeil.Perrin@Sun.COM */ 148510800SNeil.Perrin@Sun.COM if (blksz == 0) { 148610800SNeil.Perrin@Sun.COM bzero(wbuf, lr->lr_length); 148710800SNeil.Perrin@Sun.COM return; 148810800SNeil.Perrin@Sun.COM } 148910800SNeil.Perrin@Sun.COM bzero(wbuf, blksz); 149010800SNeil.Perrin@Sun.COM } else { 149110800SNeil.Perrin@Sun.COM /* 149210800SNeil.Perrin@Sun.COM * A subsequent write may have overwritten this block, in which 149310800SNeil.Perrin@Sun.COM * case wbp may have been been freed and reallocated, and our 149410800SNeil.Perrin@Sun.COM * read of wbp may fail with a checksum error. We can safely 149510800SNeil.Perrin@Sun.COM * ignore this because the later write will provide the 149610800SNeil.Perrin@Sun.COM * correct data. 149710800SNeil.Perrin@Sun.COM */ 149810800SNeil.Perrin@Sun.COM zbookmark_t zb; 149910800SNeil.Perrin@Sun.COM 150010800SNeil.Perrin@Sun.COM zb.zb_objset = dmu_objset_id(zilog->zl_os); 150110800SNeil.Perrin@Sun.COM zb.zb_object = lr->lr_foid; 150210800SNeil.Perrin@Sun.COM zb.zb_level = 0; 150310800SNeil.Perrin@Sun.COM zb.zb_blkid = -1; /* unknown */ 150410800SNeil.Perrin@Sun.COM 150510800SNeil.Perrin@Sun.COM blksz = BP_GET_LSIZE(&lr->lr_blkptr); 150610800SNeil.Perrin@Sun.COM (void) zio_wait(zio_read(NULL, zilog->zl_spa, wbp, wbuf, blksz, 150710800SNeil.Perrin@Sun.COM NULL, NULL, ZIO_PRIORITY_SYNC_READ, 150810800SNeil.Perrin@Sun.COM ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb)); 150910800SNeil.Perrin@Sun.COM } 151010800SNeil.Perrin@Sun.COM lr->lr_offset -= lr->lr_offset % blksz; 151110800SNeil.Perrin@Sun.COM lr->lr_length = blksz; 151210800SNeil.Perrin@Sun.COM } 151310800SNeil.Perrin@Sun.COM 1514789Sahrens typedef struct zil_replay_arg { 1515789Sahrens objset_t *zr_os; 1516789Sahrens zil_replay_func_t **zr_replay; 1517789Sahrens void *zr_arg; 1518789Sahrens boolean_t zr_byteswap; 1519789Sahrens char *zr_lrbuf; 1520789Sahrens } zil_replay_arg_t; 1521789Sahrens 1522789Sahrens static void 1523789Sahrens zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 1524789Sahrens { 1525789Sahrens zil_replay_arg_t *zr = zra; 15261807Sbonwick const zil_header_t *zh = zilog->zl_header; 1527789Sahrens uint64_t reclen = lr->lrc_reclen; 1528789Sahrens uint64_t txtype = lr->lrc_txtype; 15293063Sperrin char *name; 15308227SNeil.Perrin@Sun.COM int pass, error; 1531789Sahrens 15328227SNeil.Perrin@Sun.COM if (!zilog->zl_replay) /* giving up */ 1533789Sahrens return; 1534789Sahrens 1535789Sahrens if (lr->lrc_txg < claim_txg) /* already committed */ 1536789Sahrens return; 1537789Sahrens 1538789Sahrens if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 1539789Sahrens return; 1540789Sahrens 15415331Samw /* Strip case-insensitive bit, still present in log record */ 15425331Samw txtype &= ~TX_CI; 15435331Samw 15448227SNeil.Perrin@Sun.COM if (txtype == 0 || txtype >= TX_MAX_TYPE) { 15458227SNeil.Perrin@Sun.COM error = EINVAL; 15468227SNeil.Perrin@Sun.COM goto bad; 15478227SNeil.Perrin@Sun.COM } 15488227SNeil.Perrin@Sun.COM 1549789Sahrens /* 1550789Sahrens * Make a copy of the data so we can revise and extend it. 1551789Sahrens */ 1552789Sahrens bcopy(lr, zr->zr_lrbuf, reclen); 1553789Sahrens 1554789Sahrens /* 1555789Sahrens * The log block containing this lr may have been byteswapped 1556789Sahrens * so that we can easily examine common fields like lrc_txtype. 1557789Sahrens * However, the log is a mix of different data types, and only the 1558789Sahrens * replay vectors know how to byteswap their records. Therefore, if 1559789Sahrens * the lr was byteswapped, undo it before invoking the replay vector. 1560789Sahrens */ 1561789Sahrens if (zr->zr_byteswap) 1562789Sahrens byteswap_uint64_array(zr->zr_lrbuf, reclen); 1563789Sahrens 1564789Sahrens /* 1565789Sahrens * We must now do two things atomically: replay this log record, 15668227SNeil.Perrin@Sun.COM * and update the log header sequence number to reflect the fact that 15678227SNeil.Perrin@Sun.COM * we did so. At the end of each replay function the sequence number 15688227SNeil.Perrin@Sun.COM * is updated if we are in replay mode. 1569789Sahrens */ 15708227SNeil.Perrin@Sun.COM for (pass = 1; pass <= 2; pass++) { 15718227SNeil.Perrin@Sun.COM zilog->zl_replaying_seq = lr->lrc_seq; 15728227SNeil.Perrin@Sun.COM /* Only byteswap (if needed) on the 1st pass. */ 15738227SNeil.Perrin@Sun.COM error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf, 15748227SNeil.Perrin@Sun.COM zr->zr_byteswap && pass == 1); 1575789Sahrens 15763063Sperrin if (!error) 15773063Sperrin return; 15783063Sperrin 15793063Sperrin /* 15803063Sperrin * The DMU's dnode layer doesn't see removes until the txg 15813063Sperrin * commits, so a subsequent claim can spuriously fail with 15828227SNeil.Perrin@Sun.COM * EEXIST. So if we receive any error we try syncing out 15838227SNeil.Perrin@Sun.COM * any removes then retry the transaction. 15843063Sperrin */ 15858227SNeil.Perrin@Sun.COM if (pass == 1) 15863063Sperrin txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 1587789Sahrens } 1588789Sahrens 15897904SNeil.Perrin@Sun.COM bad: 15908227SNeil.Perrin@Sun.COM ASSERT(error); 15913063Sperrin name = kmem_alloc(MAXNAMELEN, KM_SLEEP); 15923063Sperrin dmu_objset_name(zr->zr_os, name); 15933063Sperrin cmn_err(CE_WARN, "ZFS replay transaction error %d, " 15945331Samw "dataset %s, seq 0x%llx, txtype %llu %s\n", 15955331Samw error, name, (u_longlong_t)lr->lrc_seq, (u_longlong_t)txtype, 15965331Samw (lr->lrc_txtype & TX_CI) ? "CI" : ""); 15978227SNeil.Perrin@Sun.COM zilog->zl_replay = B_FALSE; 15983063Sperrin kmem_free(name, MAXNAMELEN); 15993063Sperrin } 1600789Sahrens 16013063Sperrin /* ARGSUSED */ 16023063Sperrin static void 16033063Sperrin zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 16043063Sperrin { 16053063Sperrin zilog->zl_replay_blks++; 1606789Sahrens } 1607789Sahrens 1608789Sahrens /* 16091362Sperrin * If this dataset has a non-empty intent log, replay it and destroy it. 1610789Sahrens */ 1611789Sahrens void 16128227SNeil.Perrin@Sun.COM zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE]) 1613789Sahrens { 1614789Sahrens zilog_t *zilog = dmu_objset_zil(os); 16151807Sbonwick const zil_header_t *zh = zilog->zl_header; 16161807Sbonwick zil_replay_arg_t zr; 16171362Sperrin 16188989SNeil.Perrin@Sun.COM if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { 16191807Sbonwick zil_destroy(zilog, B_TRUE); 16201362Sperrin return; 16211362Sperrin } 1622789Sahrens 1623789Sahrens zr.zr_os = os; 1624789Sahrens zr.zr_replay = replay_func; 1625789Sahrens zr.zr_arg = arg; 16261807Sbonwick zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 1627789Sahrens zr.zr_lrbuf = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 1628789Sahrens 1629789Sahrens /* 1630789Sahrens * Wait for in-progress removes to sync before starting replay. 1631789Sahrens */ 1632789Sahrens txg_wait_synced(zilog->zl_dmu_pool, 0); 1633789Sahrens 16348227SNeil.Perrin@Sun.COM zilog->zl_replay = B_TRUE; 16353063Sperrin zilog->zl_replay_time = lbolt; 16363063Sperrin ASSERT(zilog->zl_replay_blks == 0); 16373063Sperrin (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, 16381807Sbonwick zh->zh_claim_txg); 1639789Sahrens kmem_free(zr.zr_lrbuf, 2 * SPA_MAXBLOCKSIZE); 1640789Sahrens 16411807Sbonwick zil_destroy(zilog, B_FALSE); 16425712Sahrens txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 16438227SNeil.Perrin@Sun.COM zilog->zl_replay = B_FALSE; 1644789Sahrens } 16451646Sperrin 16461646Sperrin /* 16471646Sperrin * Report whether all transactions are committed 16481646Sperrin */ 16491646Sperrin int 16501646Sperrin zil_is_committed(zilog_t *zilog) 16511646Sperrin { 16521646Sperrin lwb_t *lwb; 16532638Sperrin int ret; 16541646Sperrin 16552638Sperrin mutex_enter(&zilog->zl_lock); 16562638Sperrin while (zilog->zl_writer) 16572638Sperrin cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 16582638Sperrin 16592638Sperrin /* recent unpushed intent log transactions? */ 16602638Sperrin if (!list_is_empty(&zilog->zl_itx_list)) { 16612638Sperrin ret = B_FALSE; 16622638Sperrin goto out; 16632638Sperrin } 16642638Sperrin 16652638Sperrin /* intent log never used? */ 16662638Sperrin lwb = list_head(&zilog->zl_lwb_list); 16672638Sperrin if (lwb == NULL) { 16682638Sperrin ret = B_TRUE; 16692638Sperrin goto out; 16702638Sperrin } 16711646Sperrin 16721646Sperrin /* 16732638Sperrin * more than 1 log buffer means zil_sync() hasn't yet freed 16742638Sperrin * entries after a txg has committed 16751646Sperrin */ 16762638Sperrin if (list_next(&zilog->zl_lwb_list, lwb)) { 16772638Sperrin ret = B_FALSE; 16782638Sperrin goto out; 16792638Sperrin } 16802638Sperrin 16811646Sperrin ASSERT(zil_empty(zilog)); 16822638Sperrin ret = B_TRUE; 16832638Sperrin out: 16842638Sperrin cv_broadcast(&zilog->zl_cv_writer); 16852638Sperrin mutex_exit(&zilog->zl_lock); 16862638Sperrin return (ret); 16871646Sperrin } 16889701SGeorge.Wilson@Sun.COM 16899701SGeorge.Wilson@Sun.COM /* ARGSUSED */ 16909701SGeorge.Wilson@Sun.COM int 16919701SGeorge.Wilson@Sun.COM zil_vdev_offline(char *osname, void *arg) 16929701SGeorge.Wilson@Sun.COM { 16939701SGeorge.Wilson@Sun.COM objset_t *os; 16949701SGeorge.Wilson@Sun.COM zilog_t *zilog; 16959701SGeorge.Wilson@Sun.COM int error; 16969701SGeorge.Wilson@Sun.COM 169710298SMatthew.Ahrens@Sun.COM error = dmu_objset_hold(osname, FTAG, &os); 16989701SGeorge.Wilson@Sun.COM if (error) 16999701SGeorge.Wilson@Sun.COM return (error); 17009701SGeorge.Wilson@Sun.COM 17019701SGeorge.Wilson@Sun.COM zilog = dmu_objset_zil(os); 17029701SGeorge.Wilson@Sun.COM if (zil_suspend(zilog) != 0) 17039701SGeorge.Wilson@Sun.COM error = EEXIST; 17049701SGeorge.Wilson@Sun.COM else 17059701SGeorge.Wilson@Sun.COM zil_resume(zilog); 170610298SMatthew.Ahrens@Sun.COM dmu_objset_rele(os, FTAG); 17079701SGeorge.Wilson@Sun.COM return (error); 17089701SGeorge.Wilson@Sun.COM } 1709