1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 51472Sperrin * Common Development and Distribution License (the "License"). 61472Sperrin * You may not use this file except in compliance with the License. 7789Sahrens * 8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9789Sahrens * or http://www.opensolaris.org/os/licensing. 10789Sahrens * See the License for the specific language governing permissions 11789Sahrens * and limitations under the License. 12789Sahrens * 13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15789Sahrens * If applicable, add the following below this CDDL HEADER, with the 16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18789Sahrens * 19789Sahrens * CDDL HEADER END 20789Sahrens */ 21789Sahrens /* 228746SMatthew.Ahrens@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23789Sahrens * Use is subject to license terms. 24789Sahrens */ 25789Sahrens 26789Sahrens #include <sys/zfs_context.h> 27789Sahrens #include <sys/spa.h> 289701SGeorge.Wilson@Sun.COM #include <sys/spa_impl.h> 29789Sahrens #include <sys/dmu.h> 30789Sahrens #include <sys/zap.h> 31789Sahrens #include <sys/arc.h> 32789Sahrens #include <sys/stat.h> 33789Sahrens #include <sys/resource.h> 34789Sahrens #include <sys/zil.h> 35789Sahrens #include <sys/zil_impl.h> 36789Sahrens #include <sys/dsl_dataset.h> 37789Sahrens #include <sys/vdev.h> 383668Sgw25295 #include <sys/dmu_tx.h> 39789Sahrens 40789Sahrens /* 41789Sahrens * The zfs intent log (ZIL) saves transaction records of system calls 42789Sahrens * that change the file system in memory with enough information 43789Sahrens * to be able to replay them. These are stored in memory until 44789Sahrens * either the DMU transaction group (txg) commits them to the stable pool 45789Sahrens * and they can be discarded, or they are flushed to the stable log 46789Sahrens * (also in the pool) due to a fsync, O_DSYNC or other synchronous 47789Sahrens * requirement. In the event of a panic or power fail then those log 48789Sahrens * records (transactions) are replayed. 49789Sahrens * 50789Sahrens * There is one ZIL per file system. Its on-disk (pool) format consists 51789Sahrens * of 3 parts: 52789Sahrens * 53789Sahrens * - ZIL header 54789Sahrens * - ZIL blocks 55789Sahrens * - ZIL records 56789Sahrens * 57789Sahrens * A log record holds a system call transaction. Log blocks can 58789Sahrens * hold many log records and the blocks are chained together. 59789Sahrens * Each ZIL block contains a block pointer (blkptr_t) to the next 60789Sahrens * ZIL block in the chain. The ZIL header points to the first 61789Sahrens * block in the chain. Note there is not a fixed place in the pool 62789Sahrens * to hold blocks. They are dynamically allocated and freed as 63789Sahrens * needed from the blocks available. Figure X shows the ZIL structure: 64789Sahrens */ 65789Sahrens 66789Sahrens /* 672986Sek110237 * This global ZIL switch affects all pools 68789Sahrens */ 69789Sahrens int zil_disable = 0; /* disable intent logging */ 702986Sek110237 712986Sek110237 /* 722986Sek110237 * Tunable parameter for debugging or performance analysis. Setting 732986Sek110237 * zfs_nocacheflush will cause corruption on power loss if a volatile 742986Sek110237 * out-of-order write cache is enabled. 752986Sek110237 */ 762986Sek110237 boolean_t zfs_nocacheflush = B_FALSE; 77789Sahrens 78789Sahrens static kmem_cache_t *zil_lwb_cache; 79789Sahrens 80789Sahrens static int 81789Sahrens zil_dva_compare(const void *x1, const void *x2) 82789Sahrens { 83789Sahrens const dva_t *dva1 = x1; 84789Sahrens const dva_t *dva2 = x2; 85789Sahrens 86789Sahrens if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2)) 87789Sahrens return (-1); 88789Sahrens if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2)) 89789Sahrens return (1); 90789Sahrens 91789Sahrens if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2)) 92789Sahrens return (-1); 93789Sahrens if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2)) 94789Sahrens return (1); 95789Sahrens 96789Sahrens return (0); 97789Sahrens } 98789Sahrens 99789Sahrens static void 100789Sahrens zil_dva_tree_init(avl_tree_t *t) 101789Sahrens { 102789Sahrens avl_create(t, zil_dva_compare, sizeof (zil_dva_node_t), 103789Sahrens offsetof(zil_dva_node_t, zn_node)); 104789Sahrens } 105789Sahrens 106789Sahrens static void 107789Sahrens zil_dva_tree_fini(avl_tree_t *t) 108789Sahrens { 109789Sahrens zil_dva_node_t *zn; 110789Sahrens void *cookie = NULL; 111789Sahrens 112789Sahrens while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 113789Sahrens kmem_free(zn, sizeof (zil_dva_node_t)); 114789Sahrens 115789Sahrens avl_destroy(t); 116789Sahrens } 117789Sahrens 118789Sahrens static int 119789Sahrens zil_dva_tree_add(avl_tree_t *t, dva_t *dva) 120789Sahrens { 121789Sahrens zil_dva_node_t *zn; 122789Sahrens avl_index_t where; 123789Sahrens 124789Sahrens if (avl_find(t, dva, &where) != NULL) 125789Sahrens return (EEXIST); 126789Sahrens 127789Sahrens zn = kmem_alloc(sizeof (zil_dva_node_t), KM_SLEEP); 128789Sahrens zn->zn_dva = *dva; 129789Sahrens avl_insert(t, zn, where); 130789Sahrens 131789Sahrens return (0); 132789Sahrens } 133789Sahrens 1341807Sbonwick static zil_header_t * 1351807Sbonwick zil_header_in_syncing_context(zilog_t *zilog) 1361807Sbonwick { 1371807Sbonwick return ((zil_header_t *)zilog->zl_header); 1381807Sbonwick } 1391807Sbonwick 1401807Sbonwick static void 1411807Sbonwick zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 1421807Sbonwick { 1431807Sbonwick zio_cksum_t *zc = &bp->blk_cksum; 1441807Sbonwick 1451807Sbonwick zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL); 1461807Sbonwick zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL); 1471807Sbonwick zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 1481807Sbonwick zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 1491807Sbonwick } 1501807Sbonwick 151789Sahrens /* 152789Sahrens * Read a log block, make sure it's valid, and byteswap it if necessary. 153789Sahrens */ 154789Sahrens static int 1551807Sbonwick zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, arc_buf_t **abufpp) 156789Sahrens { 1571807Sbonwick blkptr_t blk = *bp; 1581544Seschrock zbookmark_t zb; 1592391Smaybee uint32_t aflags = ARC_WAIT; 160789Sahrens int error; 161789Sahrens 1621807Sbonwick zb.zb_objset = bp->blk_cksum.zc_word[ZIL_ZC_OBJSET]; 1631544Seschrock zb.zb_object = 0; 1641544Seschrock zb.zb_level = -1; 1651807Sbonwick zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ]; 1661807Sbonwick 1671807Sbonwick *abufpp = NULL; 1681807Sbonwick 1697046Sahrens /* 1707046Sahrens * We shouldn't be doing any scrubbing while we're doing log 1717046Sahrens * replay, it's OK to not lock. 1727046Sahrens */ 1737046Sahrens error = arc_read_nolock(NULL, zilog->zl_spa, &blk, 1741807Sbonwick arc_getbuf_func, abufpp, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL | 1752391Smaybee ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB, &aflags, &zb); 1761807Sbonwick 1771807Sbonwick if (error == 0) { 1781807Sbonwick char *data = (*abufpp)->b_data; 1791807Sbonwick uint64_t blksz = BP_GET_LSIZE(bp); 1801807Sbonwick zil_trailer_t *ztp = (zil_trailer_t *)(data + blksz) - 1; 1811807Sbonwick zio_cksum_t cksum = bp->blk_cksum; 1821544Seschrock 1831807Sbonwick /* 1847522SNeil.Perrin@Sun.COM * Validate the checksummed log block. 1857522SNeil.Perrin@Sun.COM * 1861807Sbonwick * Sequence numbers should be... sequential. The checksum 1871807Sbonwick * verifier for the next block should be bp's checksum plus 1. 1887522SNeil.Perrin@Sun.COM * 1897522SNeil.Perrin@Sun.COM * Also check the log chain linkage and size used. 1901807Sbonwick */ 1911807Sbonwick cksum.zc_word[ZIL_ZC_SEQ]++; 1921807Sbonwick 1937522SNeil.Perrin@Sun.COM if (bcmp(&cksum, &ztp->zit_next_blk.blk_cksum, 1947522SNeil.Perrin@Sun.COM sizeof (cksum)) || BP_IS_HOLE(&ztp->zit_next_blk) || 1957522SNeil.Perrin@Sun.COM (ztp->zit_nused > (blksz - sizeof (zil_trailer_t)))) { 1967522SNeil.Perrin@Sun.COM error = ECKSUM; 1977522SNeil.Perrin@Sun.COM } 1981807Sbonwick 1991807Sbonwick if (error) { 2001807Sbonwick VERIFY(arc_buf_remove_ref(*abufpp, abufpp) == 1); 2011807Sbonwick *abufpp = NULL; 2021807Sbonwick } 203789Sahrens } 204789Sahrens 2051807Sbonwick dprintf("error %d on %llu:%llu\n", error, zb.zb_objset, zb.zb_blkid); 206789Sahrens 2071807Sbonwick return (error); 208789Sahrens } 209789Sahrens 210789Sahrens /* 211789Sahrens * Parse the intent log, and call parse_func for each valid record within. 2121807Sbonwick * Return the highest sequence number. 213789Sahrens */ 2141807Sbonwick uint64_t 215789Sahrens zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 216789Sahrens zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 217789Sahrens { 2181807Sbonwick const zil_header_t *zh = zilog->zl_header; 2191807Sbonwick uint64_t claim_seq = zh->zh_claim_seq; 2201807Sbonwick uint64_t seq = 0; 2211807Sbonwick uint64_t max_seq = 0; 2221807Sbonwick blkptr_t blk = zh->zh_log; 2231807Sbonwick arc_buf_t *abuf; 224789Sahrens char *lrbuf, *lrp; 225789Sahrens zil_trailer_t *ztp; 226789Sahrens int reclen, error; 227789Sahrens 228789Sahrens if (BP_IS_HOLE(&blk)) 2291807Sbonwick return (max_seq); 230789Sahrens 231789Sahrens /* 232789Sahrens * Starting at the block pointed to by zh_log we read the log chain. 233789Sahrens * For each block in the chain we strongly check that block to 234789Sahrens * ensure its validity. We stop when an invalid block is found. 235789Sahrens * For each block pointer in the chain we call parse_blk_func(). 236789Sahrens * For each record in each valid block we call parse_lr_func(). 2371807Sbonwick * If the log has been claimed, stop if we encounter a sequence 2381807Sbonwick * number greater than the highest claimed sequence number. 239789Sahrens */ 240789Sahrens zil_dva_tree_init(&zilog->zl_dva_tree); 241789Sahrens for (;;) { 2421807Sbonwick seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 2431807Sbonwick 2441807Sbonwick if (claim_seq != 0 && seq > claim_seq) 2451807Sbonwick break; 2461807Sbonwick 2471807Sbonwick ASSERT(max_seq < seq); 2481807Sbonwick max_seq = seq; 2491807Sbonwick 2501807Sbonwick error = zil_read_log_block(zilog, &blk, &abuf); 251789Sahrens 252789Sahrens if (parse_blk_func != NULL) 253789Sahrens parse_blk_func(zilog, &blk, arg, txg); 254789Sahrens 255789Sahrens if (error) 256789Sahrens break; 257789Sahrens 2581807Sbonwick lrbuf = abuf->b_data; 259789Sahrens ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1; 260789Sahrens blk = ztp->zit_next_blk; 261789Sahrens 2621807Sbonwick if (parse_lr_func == NULL) { 2631807Sbonwick VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 264789Sahrens continue; 2651807Sbonwick } 266789Sahrens 267789Sahrens for (lrp = lrbuf; lrp < lrbuf + ztp->zit_nused; lrp += reclen) { 268789Sahrens lr_t *lr = (lr_t *)lrp; 269789Sahrens reclen = lr->lrc_reclen; 270789Sahrens ASSERT3U(reclen, >=, sizeof (lr_t)); 271789Sahrens parse_lr_func(zilog, lr, arg, txg); 272789Sahrens } 2731807Sbonwick VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 274789Sahrens } 275789Sahrens zil_dva_tree_fini(&zilog->zl_dva_tree); 2761807Sbonwick 2771807Sbonwick return (max_seq); 278789Sahrens } 279789Sahrens 280789Sahrens /* ARGSUSED */ 281789Sahrens static void 282789Sahrens zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 283789Sahrens { 284789Sahrens spa_t *spa = zilog->zl_spa; 285789Sahrens int err; 286789Sahrens 287789Sahrens /* 288789Sahrens * Claim log block if not already committed and not already claimed. 289789Sahrens */ 290789Sahrens if (bp->blk_birth >= first_txg && 291789Sahrens zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp)) == 0) { 2927754SJeff.Bonwick@Sun.COM err = zio_wait(zio_claim(NULL, spa, first_txg, bp, NULL, NULL, 2937754SJeff.Bonwick@Sun.COM ZIO_FLAG_MUSTSUCCEED)); 294789Sahrens ASSERT(err == 0); 295789Sahrens } 296789Sahrens } 297789Sahrens 298789Sahrens static void 299789Sahrens zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 300789Sahrens { 301789Sahrens if (lrc->lrc_txtype == TX_WRITE) { 302789Sahrens lr_write_t *lr = (lr_write_t *)lrc; 303789Sahrens zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg); 304789Sahrens } 305789Sahrens } 306789Sahrens 307789Sahrens /* ARGSUSED */ 308789Sahrens static void 309789Sahrens zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 310789Sahrens { 311789Sahrens zio_free_blk(zilog->zl_spa, bp, dmu_tx_get_txg(tx)); 312789Sahrens } 313789Sahrens 314789Sahrens static void 315789Sahrens zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 316789Sahrens { 317789Sahrens /* 318789Sahrens * If we previously claimed it, we need to free it. 319789Sahrens */ 320789Sahrens if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE) { 321789Sahrens lr_write_t *lr = (lr_write_t *)lrc; 322789Sahrens blkptr_t *bp = &lr->lr_blkptr; 323789Sahrens if (bp->blk_birth >= claim_txg && 324789Sahrens !zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp))) { 325789Sahrens (void) arc_free(NULL, zilog->zl_spa, 326789Sahrens dmu_tx_get_txg(tx), bp, NULL, NULL, ARC_WAIT); 327789Sahrens } 328789Sahrens } 329789Sahrens } 330789Sahrens 331789Sahrens /* 332789Sahrens * Create an on-disk intent log. 333789Sahrens */ 334789Sahrens static void 335789Sahrens zil_create(zilog_t *zilog) 336789Sahrens { 3371807Sbonwick const zil_header_t *zh = zilog->zl_header; 338789Sahrens lwb_t *lwb; 3391807Sbonwick uint64_t txg = 0; 3401807Sbonwick dmu_tx_t *tx = NULL; 341789Sahrens blkptr_t blk; 3421807Sbonwick int error = 0; 343789Sahrens 344789Sahrens /* 3451807Sbonwick * Wait for any previous destroy to complete. 346789Sahrens */ 3471807Sbonwick txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 3481807Sbonwick 3491807Sbonwick ASSERT(zh->zh_claim_txg == 0); 3501807Sbonwick ASSERT(zh->zh_replay_seq == 0); 3511807Sbonwick 3521807Sbonwick blk = zh->zh_log; 353789Sahrens 354789Sahrens /* 3558109SNeil.Perrin@Sun.COM * If we don't already have an initial log block or we have one 3568109SNeil.Perrin@Sun.COM * but it's the wrong endianness then allocate one. 357789Sahrens */ 3588109SNeil.Perrin@Sun.COM if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { 3591807Sbonwick tx = dmu_tx_create(zilog->zl_os); 3601807Sbonwick (void) dmu_tx_assign(tx, TXG_WAIT); 3611807Sbonwick dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 3621807Sbonwick txg = dmu_tx_get_txg(tx); 3631807Sbonwick 3648109SNeil.Perrin@Sun.COM if (!BP_IS_HOLE(&blk)) { 3658109SNeil.Perrin@Sun.COM zio_free_blk(zilog->zl_spa, &blk, txg); 3668109SNeil.Perrin@Sun.COM BP_ZERO(&blk); 3678109SNeil.Perrin@Sun.COM } 3688109SNeil.Perrin@Sun.COM 3693063Sperrin error = zio_alloc_blk(zilog->zl_spa, ZIL_MIN_BLKSZ, &blk, 370*10310SNeil.Perrin@Sun.COM NULL, txg, zilog->zl_logbias != ZFS_LOGBIAS_LATENCY); 3711807Sbonwick 3721807Sbonwick if (error == 0) 3731807Sbonwick zil_init_log_chain(zilog, &blk); 3741362Sperrin } 3751807Sbonwick 3761807Sbonwick /* 3771807Sbonwick * Allocate a log write buffer (lwb) for the first log block. 3781807Sbonwick */ 379789Sahrens if (error == 0) { 380789Sahrens lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 381789Sahrens lwb->lwb_zilog = zilog; 382789Sahrens lwb->lwb_blk = blk; 383789Sahrens lwb->lwb_nused = 0; 384789Sahrens lwb->lwb_sz = BP_GET_LSIZE(&lwb->lwb_blk); 385789Sahrens lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz); 386789Sahrens lwb->lwb_max_txg = txg; 3872237Smaybee lwb->lwb_zio = NULL; 3882237Smaybee 389789Sahrens mutex_enter(&zilog->zl_lock); 390789Sahrens list_insert_tail(&zilog->zl_lwb_list, lwb); 391789Sahrens mutex_exit(&zilog->zl_lock); 392789Sahrens } 393789Sahrens 3941807Sbonwick /* 3951807Sbonwick * If we just allocated the first log block, commit our transaction 3961807Sbonwick * and wait for zil_sync() to stuff the block poiner into zh_log. 3971807Sbonwick * (zh is part of the MOS, so we cannot modify it in open context.) 3981807Sbonwick */ 3991807Sbonwick if (tx != NULL) { 4001807Sbonwick dmu_tx_commit(tx); 4011362Sperrin txg_wait_synced(zilog->zl_dmu_pool, txg); 4021807Sbonwick } 4031807Sbonwick 4041807Sbonwick ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 405789Sahrens } 406789Sahrens 407789Sahrens /* 408789Sahrens * In one tx, free all log blocks and clear the log header. 4091807Sbonwick * If keep_first is set, then we're replaying a log with no content. 4101807Sbonwick * We want to keep the first block, however, so that the first 4111807Sbonwick * synchronous transaction doesn't require a txg_wait_synced() 4121807Sbonwick * in zil_create(). We don't need to txg_wait_synced() here either 4131807Sbonwick * when keep_first is set, because both zil_create() and zil_destroy() 4141807Sbonwick * will wait for any in-progress destroys to complete. 415789Sahrens */ 416789Sahrens void 4171807Sbonwick zil_destroy(zilog_t *zilog, boolean_t keep_first) 418789Sahrens { 4191807Sbonwick const zil_header_t *zh = zilog->zl_header; 4201807Sbonwick lwb_t *lwb; 421789Sahrens dmu_tx_t *tx; 422789Sahrens uint64_t txg; 423789Sahrens 4241807Sbonwick /* 4251807Sbonwick * Wait for any previous destroy to complete. 4261807Sbonwick */ 4271807Sbonwick txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 428789Sahrens 4291807Sbonwick if (BP_IS_HOLE(&zh->zh_log)) 430789Sahrens return; 431789Sahrens 432789Sahrens tx = dmu_tx_create(zilog->zl_os); 433789Sahrens (void) dmu_tx_assign(tx, TXG_WAIT); 434789Sahrens dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 435789Sahrens txg = dmu_tx_get_txg(tx); 436789Sahrens 4371807Sbonwick mutex_enter(&zilog->zl_lock); 4381807Sbonwick 4395223Sperrin /* 4405223Sperrin * It is possible for the ZIL to get the previously mounted zilog 4415223Sperrin * structure of the same dataset if quickly remounted and the dbuf 4425223Sperrin * eviction has not completed. In this case we can see a non 4435223Sperrin * empty lwb list and keep_first will be set. We fix this by 4445223Sperrin * clearing the keep_first. This will be slower but it's very rare. 4455223Sperrin */ 4465223Sperrin if (!list_is_empty(&zilog->zl_lwb_list) && keep_first) 4475223Sperrin keep_first = B_FALSE; 4485223Sperrin 4491807Sbonwick ASSERT3U(zilog->zl_destroy_txg, <, txg); 450789Sahrens zilog->zl_destroy_txg = txg; 4511807Sbonwick zilog->zl_keep_first = keep_first; 4521807Sbonwick 4531807Sbonwick if (!list_is_empty(&zilog->zl_lwb_list)) { 4541807Sbonwick ASSERT(zh->zh_claim_txg == 0); 4551807Sbonwick ASSERT(!keep_first); 4561807Sbonwick while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 4571807Sbonwick list_remove(&zilog->zl_lwb_list, lwb); 4581807Sbonwick if (lwb->lwb_buf != NULL) 4591807Sbonwick zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 4601807Sbonwick zio_free_blk(zilog->zl_spa, &lwb->lwb_blk, txg); 4611807Sbonwick kmem_cache_free(zil_lwb_cache, lwb); 4621807Sbonwick } 4631807Sbonwick } else { 4641807Sbonwick if (!keep_first) { 4651807Sbonwick (void) zil_parse(zilog, zil_free_log_block, 4661807Sbonwick zil_free_log_record, tx, zh->zh_claim_txg); 4671807Sbonwick } 4681807Sbonwick } 4692638Sperrin mutex_exit(&zilog->zl_lock); 470789Sahrens 471789Sahrens dmu_tx_commit(tx); 472789Sahrens } 473789Sahrens 4748989SNeil.Perrin@Sun.COM /* 4758989SNeil.Perrin@Sun.COM * return true if the initial log block is not valid 4768989SNeil.Perrin@Sun.COM */ 4778989SNeil.Perrin@Sun.COM static boolean_t 4788989SNeil.Perrin@Sun.COM zil_empty(zilog_t *zilog) 4798989SNeil.Perrin@Sun.COM { 4808989SNeil.Perrin@Sun.COM const zil_header_t *zh = zilog->zl_header; 4818989SNeil.Perrin@Sun.COM arc_buf_t *abuf = NULL; 4828989SNeil.Perrin@Sun.COM 4838989SNeil.Perrin@Sun.COM if (BP_IS_HOLE(&zh->zh_log)) 4848989SNeil.Perrin@Sun.COM return (B_TRUE); 4858989SNeil.Perrin@Sun.COM 4868989SNeil.Perrin@Sun.COM if (zil_read_log_block(zilog, &zh->zh_log, &abuf) != 0) 4878989SNeil.Perrin@Sun.COM return (B_TRUE); 4888989SNeil.Perrin@Sun.COM 4898989SNeil.Perrin@Sun.COM VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 4908989SNeil.Perrin@Sun.COM return (B_FALSE); 4918989SNeil.Perrin@Sun.COM } 4928989SNeil.Perrin@Sun.COM 4932199Sahrens int 494789Sahrens zil_claim(char *osname, void *txarg) 495789Sahrens { 496789Sahrens dmu_tx_t *tx = txarg; 497789Sahrens uint64_t first_txg = dmu_tx_get_txg(tx); 498789Sahrens zilog_t *zilog; 499789Sahrens zil_header_t *zh; 500789Sahrens objset_t *os; 501789Sahrens int error; 502789Sahrens 50310298SMatthew.Ahrens@Sun.COM error = dmu_objset_hold(osname, FTAG, &os); 504789Sahrens if (error) { 5057294Sperrin cmn_err(CE_WARN, "can't open objset for %s", osname); 5062199Sahrens return (0); 507789Sahrens } 508789Sahrens 509789Sahrens zilog = dmu_objset_zil(os); 5101807Sbonwick zh = zil_header_in_syncing_context(zilog); 511789Sahrens 5129701SGeorge.Wilson@Sun.COM if (zilog->zl_spa->spa_log_state == SPA_LOG_CLEAR) { 5139701SGeorge.Wilson@Sun.COM if (!BP_IS_HOLE(&zh->zh_log)) 5149701SGeorge.Wilson@Sun.COM zio_free_blk(zilog->zl_spa, &zh->zh_log, first_txg); 5159701SGeorge.Wilson@Sun.COM BP_ZERO(&zh->zh_log); 5169701SGeorge.Wilson@Sun.COM dsl_dataset_dirty(dmu_objset_ds(os), tx); 5179701SGeorge.Wilson@Sun.COM } 5189701SGeorge.Wilson@Sun.COM 519789Sahrens /* 5208989SNeil.Perrin@Sun.COM * Record here whether the zil has any records to replay. 5218989SNeil.Perrin@Sun.COM * If the header block pointer is null or the block points 5228989SNeil.Perrin@Sun.COM * to the stubby then we know there are no valid log records. 5238989SNeil.Perrin@Sun.COM * We use the header to store this state as the the zilog gets 5248989SNeil.Perrin@Sun.COM * freed later in dmu_objset_close(). 5258989SNeil.Perrin@Sun.COM * The flags (and the rest of the header fields) are cleared in 5268989SNeil.Perrin@Sun.COM * zil_sync() as a result of a zil_destroy(), after replaying the log. 5278989SNeil.Perrin@Sun.COM * 5288989SNeil.Perrin@Sun.COM * Note, the intent log can be empty but still need the 5298989SNeil.Perrin@Sun.COM * stubby to be claimed. 5308989SNeil.Perrin@Sun.COM */ 5319701SGeorge.Wilson@Sun.COM if (!zil_empty(zilog)) { 5328989SNeil.Perrin@Sun.COM zh->zh_flags |= ZIL_REPLAY_NEEDED; 5339701SGeorge.Wilson@Sun.COM dsl_dataset_dirty(dmu_objset_ds(os), tx); 5349701SGeorge.Wilson@Sun.COM } 5358989SNeil.Perrin@Sun.COM 5368989SNeil.Perrin@Sun.COM /* 5371807Sbonwick * Claim all log blocks if we haven't already done so, and remember 5381807Sbonwick * the highest claimed sequence number. This ensures that if we can 5391807Sbonwick * read only part of the log now (e.g. due to a missing device), 5401807Sbonwick * but we can read the entire log later, we will not try to replay 5411807Sbonwick * or destroy beyond the last block we successfully claimed. 542789Sahrens */ 543789Sahrens ASSERT3U(zh->zh_claim_txg, <=, first_txg); 544789Sahrens if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 545789Sahrens zh->zh_claim_txg = first_txg; 5461807Sbonwick zh->zh_claim_seq = zil_parse(zilog, zil_claim_log_block, 5471807Sbonwick zil_claim_log_record, tx, first_txg); 548789Sahrens dsl_dataset_dirty(dmu_objset_ds(os), tx); 549789Sahrens } 5501807Sbonwick 551789Sahrens ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 55210298SMatthew.Ahrens@Sun.COM dmu_objset_rele(os, FTAG); 5532199Sahrens return (0); 554789Sahrens } 555789Sahrens 5567294Sperrin /* 5577294Sperrin * Check the log by walking the log chain. 5587294Sperrin * Checksum errors are ok as they indicate the end of the chain. 5597294Sperrin * Any other error (no device or read failure) returns an error. 5607294Sperrin */ 5617294Sperrin /* ARGSUSED */ 5627294Sperrin int 5637294Sperrin zil_check_log_chain(char *osname, void *txarg) 5647294Sperrin { 5657294Sperrin zilog_t *zilog; 5667294Sperrin zil_header_t *zh; 5677294Sperrin blkptr_t blk; 5687294Sperrin arc_buf_t *abuf; 5697294Sperrin objset_t *os; 5707294Sperrin char *lrbuf; 5717294Sperrin zil_trailer_t *ztp; 5727294Sperrin int error; 5737294Sperrin 57410298SMatthew.Ahrens@Sun.COM error = dmu_objset_hold(osname, FTAG, &os); 5757294Sperrin if (error) { 5767294Sperrin cmn_err(CE_WARN, "can't open objset for %s", osname); 5777294Sperrin return (0); 5787294Sperrin } 5797294Sperrin 5807294Sperrin zilog = dmu_objset_zil(os); 5817294Sperrin zh = zil_header_in_syncing_context(zilog); 5827294Sperrin blk = zh->zh_log; 5837294Sperrin if (BP_IS_HOLE(&blk)) { 58410298SMatthew.Ahrens@Sun.COM dmu_objset_rele(os, FTAG); 5857294Sperrin return (0); /* no chain */ 5867294Sperrin } 5877294Sperrin 5887294Sperrin for (;;) { 5897294Sperrin error = zil_read_log_block(zilog, &blk, &abuf); 5907294Sperrin if (error) 5917294Sperrin break; 5927294Sperrin lrbuf = abuf->b_data; 5937294Sperrin ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1; 5947294Sperrin blk = ztp->zit_next_blk; 5957294Sperrin VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 5967294Sperrin } 59710298SMatthew.Ahrens@Sun.COM dmu_objset_rele(os, FTAG); 5987294Sperrin if (error == ECKSUM) 5997294Sperrin return (0); /* normal end of chain */ 6007294Sperrin return (error); 6017294Sperrin } 6027294Sperrin 6035688Sbonwick static int 6045688Sbonwick zil_vdev_compare(const void *x1, const void *x2) 605789Sahrens { 6065875Sperrin uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; 6075875Sperrin uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; 6085688Sbonwick 6095688Sbonwick if (v1 < v2) 6105688Sbonwick return (-1); 6115688Sbonwick if (v1 > v2) 6125688Sbonwick return (1); 6135688Sbonwick 6145688Sbonwick return (0); 6155688Sbonwick } 6165688Sbonwick 6175688Sbonwick void 6185688Sbonwick zil_add_block(zilog_t *zilog, blkptr_t *bp) 6195688Sbonwick { 6205688Sbonwick avl_tree_t *t = &zilog->zl_vdev_tree; 6215688Sbonwick avl_index_t where; 6225688Sbonwick zil_vdev_node_t *zv, zvsearch; 6235688Sbonwick int ndvas = BP_GET_NDVAS(bp); 6245688Sbonwick int i; 625789Sahrens 6262986Sek110237 if (zfs_nocacheflush) 627789Sahrens return; 628789Sahrens 6295688Sbonwick ASSERT(zilog->zl_writer); 6305688Sbonwick 6315688Sbonwick /* 6325688Sbonwick * Even though we're zl_writer, we still need a lock because the 6335688Sbonwick * zl_get_data() callbacks may have dmu_sync() done callbacks 6345688Sbonwick * that will run concurrently. 6355688Sbonwick */ 6365688Sbonwick mutex_enter(&zilog->zl_vdev_lock); 6375688Sbonwick for (i = 0; i < ndvas; i++) { 6385688Sbonwick zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 6395688Sbonwick if (avl_find(t, &zvsearch, &where) == NULL) { 6405688Sbonwick zv = kmem_alloc(sizeof (*zv), KM_SLEEP); 6415688Sbonwick zv->zv_vdev = zvsearch.zv_vdev; 6425688Sbonwick avl_insert(t, zv, where); 6433063Sperrin } 6443063Sperrin } 6455688Sbonwick mutex_exit(&zilog->zl_vdev_lock); 6463063Sperrin } 6473063Sperrin 648789Sahrens void 6492638Sperrin zil_flush_vdevs(zilog_t *zilog) 650789Sahrens { 6513063Sperrin spa_t *spa = zilog->zl_spa; 6525688Sbonwick avl_tree_t *t = &zilog->zl_vdev_tree; 6535688Sbonwick void *cookie = NULL; 6545688Sbonwick zil_vdev_node_t *zv; 6555688Sbonwick zio_t *zio; 6563063Sperrin 6573063Sperrin ASSERT(zilog->zl_writer); 658789Sahrens 6595688Sbonwick /* 6605688Sbonwick * We don't need zl_vdev_lock here because we're the zl_writer, 6615688Sbonwick * and all zl_get_data() callbacks are done. 6625688Sbonwick */ 6635688Sbonwick if (avl_numnodes(t) == 0) 6645688Sbonwick return; 6655688Sbonwick 6667754SJeff.Bonwick@Sun.COM spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6675688Sbonwick 6687754SJeff.Bonwick@Sun.COM zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 6695688Sbonwick 6705688Sbonwick while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { 6715688Sbonwick vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); 6725688Sbonwick if (vd != NULL) 6735688Sbonwick zio_flush(zio, vd); 6745688Sbonwick kmem_free(zv, sizeof (*zv)); 6753063Sperrin } 676789Sahrens 677789Sahrens /* 678789Sahrens * Wait for all the flushes to complete. Not all devices actually 679789Sahrens * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails. 680789Sahrens */ 6815688Sbonwick (void) zio_wait(zio); 6825688Sbonwick 6837754SJeff.Bonwick@Sun.COM spa_config_exit(spa, SCL_STATE, FTAG); 684789Sahrens } 685789Sahrens 686789Sahrens /* 687789Sahrens * Function called when a log block write completes 688789Sahrens */ 689789Sahrens static void 690789Sahrens zil_lwb_write_done(zio_t *zio) 691789Sahrens { 692789Sahrens lwb_t *lwb = zio->io_private; 693789Sahrens zilog_t *zilog = lwb->lwb_zilog; 694789Sahrens 6957754SJeff.Bonwick@Sun.COM ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 6967754SJeff.Bonwick@Sun.COM ASSERT(BP_GET_CHECKSUM(zio->io_bp) == ZIO_CHECKSUM_ZILOG); 6977754SJeff.Bonwick@Sun.COM ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG); 6987754SJeff.Bonwick@Sun.COM ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 6997754SJeff.Bonwick@Sun.COM ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER); 7007754SJeff.Bonwick@Sun.COM ASSERT(!BP_IS_GANG(zio->io_bp)); 7017754SJeff.Bonwick@Sun.COM ASSERT(!BP_IS_HOLE(zio->io_bp)); 7027754SJeff.Bonwick@Sun.COM ASSERT(zio->io_bp->blk_fill == 0); 7037754SJeff.Bonwick@Sun.COM 704789Sahrens /* 7059493SNeil.Perrin@Sun.COM * Ensure the lwb buffer pointer is cleared before releasing 7069493SNeil.Perrin@Sun.COM * the txg. If we have had an allocation failure and 7079493SNeil.Perrin@Sun.COM * the txg is waiting to sync then we want want zil_sync() 7089493SNeil.Perrin@Sun.COM * to remove the lwb so that it's not picked up as the next new 7099493SNeil.Perrin@Sun.COM * one in zil_commit_writer(). zil_sync() will only remove 7109493SNeil.Perrin@Sun.COM * the lwb if lwb_buf is null. 711789Sahrens */ 712789Sahrens zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 713789Sahrens mutex_enter(&zilog->zl_lock); 714789Sahrens lwb->lwb_buf = NULL; 7154527Sperrin if (zio->io_error) 716789Sahrens zilog->zl_log_error = B_TRUE; 7179493SNeil.Perrin@Sun.COM 7189493SNeil.Perrin@Sun.COM /* 7199493SNeil.Perrin@Sun.COM * Now that we've written this log block, we have a stable pointer 7209493SNeil.Perrin@Sun.COM * to the next block in the chain, so it's OK to let the txg in 7219904SNeil.Perrin@Sun.COM * which we allocated the next block sync. We still have the 7229904SNeil.Perrin@Sun.COM * zl_lock to ensure zil_sync doesn't kmem free the lwb. 7239493SNeil.Perrin@Sun.COM */ 7249493SNeil.Perrin@Sun.COM txg_rele_to_sync(&lwb->lwb_txgh); 7259904SNeil.Perrin@Sun.COM mutex_exit(&zilog->zl_lock); 726789Sahrens } 727789Sahrens 728789Sahrens /* 7292237Smaybee * Initialize the io for a log block. 7302237Smaybee */ 7312237Smaybee static void 7322237Smaybee zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb) 7332237Smaybee { 7342237Smaybee zbookmark_t zb; 7352237Smaybee 7362237Smaybee zb.zb_objset = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET]; 7372237Smaybee zb.zb_object = 0; 7382237Smaybee zb.zb_level = -1; 7392237Smaybee zb.zb_blkid = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 7402237Smaybee 7412638Sperrin if (zilog->zl_root_zio == NULL) { 7422638Sperrin zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL, 7432638Sperrin ZIO_FLAG_CANFAIL); 7442638Sperrin } 7453063Sperrin if (lwb->lwb_zio == NULL) { 7463063Sperrin lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa, 7479701SGeorge.Wilson@Sun.COM 0, &lwb->lwb_blk, lwb->lwb_buf, lwb->lwb_sz, 7489701SGeorge.Wilson@Sun.COM zil_lwb_write_done, lwb, ZIO_PRIORITY_LOG_WRITE, 7499701SGeorge.Wilson@Sun.COM ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb); 7503063Sperrin } 7512237Smaybee } 7522237Smaybee 7532237Smaybee /* 754789Sahrens * Start a log block write and advance to the next log block. 755789Sahrens * Calls are serialized. 756789Sahrens */ 757789Sahrens static lwb_t * 758789Sahrens zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb) 759789Sahrens { 760789Sahrens lwb_t *nlwb; 761789Sahrens zil_trailer_t *ztp = (zil_trailer_t *)(lwb->lwb_buf + lwb->lwb_sz) - 1; 7621807Sbonwick spa_t *spa = zilog->zl_spa; 7631807Sbonwick blkptr_t *bp = &ztp->zit_next_blk; 764789Sahrens uint64_t txg; 765789Sahrens uint64_t zil_blksz; 766789Sahrens int error; 767789Sahrens 768789Sahrens ASSERT(lwb->lwb_nused <= ZIL_BLK_DATA_SZ(lwb)); 769789Sahrens 770789Sahrens /* 771789Sahrens * Allocate the next block and save its address in this block 772789Sahrens * before writing it in order to establish the log chain. 773789Sahrens * Note that if the allocation of nlwb synced before we wrote 774789Sahrens * the block that points at it (lwb), we'd leak it if we crashed. 775789Sahrens * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done(). 776789Sahrens */ 777789Sahrens txg = txg_hold_open(zilog->zl_dmu_pool, &lwb->lwb_txgh); 778789Sahrens txg_rele_to_quiesce(&lwb->lwb_txgh); 779789Sahrens 780789Sahrens /* 7811141Sperrin * Pick a ZIL blocksize. We request a size that is the 7821141Sperrin * maximum of the previous used size, the current used size and 7831141Sperrin * the amount waiting in the queue. 784789Sahrens */ 7852237Smaybee zil_blksz = MAX(zilog->zl_prev_used, 7862237Smaybee zilog->zl_cur_used + sizeof (*ztp)); 7871141Sperrin zil_blksz = MAX(zil_blksz, zilog->zl_itx_list_sz + sizeof (*ztp)); 7881842Sperrin zil_blksz = P2ROUNDUP_TYPED(zil_blksz, ZIL_MIN_BLKSZ, uint64_t); 7891141Sperrin if (zil_blksz > ZIL_MAX_BLKSZ) 7901141Sperrin zil_blksz = ZIL_MAX_BLKSZ; 791789Sahrens 7923063Sperrin BP_ZERO(bp); 7933063Sperrin /* pass the old blkptr in order to spread log blocks across devs */ 794*10310SNeil.Perrin@Sun.COM error = zio_alloc_blk(spa, zil_blksz, bp, &lwb->lwb_blk, txg, 795*10310SNeil.Perrin@Sun.COM zilog->zl_logbias != ZFS_LOGBIAS_LATENCY); 796789Sahrens if (error) { 7973668Sgw25295 dmu_tx_t *tx = dmu_tx_create_assigned(zilog->zl_dmu_pool, txg); 7983668Sgw25295 7991544Seschrock /* 8003668Sgw25295 * We dirty the dataset to ensure that zil_sync() will 8013668Sgw25295 * be called to remove this lwb from our zl_lwb_list. 8023668Sgw25295 * Failing to do so, may leave an lwb with a NULL lwb_buf 8033668Sgw25295 * hanging around on the zl_lwb_list. 8043668Sgw25295 */ 8053668Sgw25295 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 8063848Sgw25295 dmu_tx_commit(tx); 8073668Sgw25295 8083668Sgw25295 /* 8093668Sgw25295 * Since we've just experienced an allocation failure so we 8103668Sgw25295 * terminate the current lwb and send it on its way. 8113668Sgw25295 */ 8123668Sgw25295 ztp->zit_pad = 0; 8133668Sgw25295 ztp->zit_nused = lwb->lwb_nused; 8143668Sgw25295 ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum; 8153668Sgw25295 zio_nowait(lwb->lwb_zio); 8163668Sgw25295 8173668Sgw25295 /* 8181544Seschrock * By returning NULL the caller will call tx_wait_synced() 8191544Seschrock */ 820789Sahrens return (NULL); 821789Sahrens } 822789Sahrens 8231807Sbonwick ASSERT3U(bp->blk_birth, ==, txg); 8241544Seschrock ztp->zit_pad = 0; 825789Sahrens ztp->zit_nused = lwb->lwb_nused; 826789Sahrens ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum; 8271807Sbonwick bp->blk_cksum = lwb->lwb_blk.blk_cksum; 8281807Sbonwick bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 829789Sahrens 830789Sahrens /* 831789Sahrens * Allocate a new log write buffer (lwb). 832789Sahrens */ 833789Sahrens nlwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 834789Sahrens 835789Sahrens nlwb->lwb_zilog = zilog; 8361807Sbonwick nlwb->lwb_blk = *bp; 837789Sahrens nlwb->lwb_nused = 0; 838789Sahrens nlwb->lwb_sz = BP_GET_LSIZE(&nlwb->lwb_blk); 839789Sahrens nlwb->lwb_buf = zio_buf_alloc(nlwb->lwb_sz); 840789Sahrens nlwb->lwb_max_txg = txg; 8412237Smaybee nlwb->lwb_zio = NULL; 842789Sahrens 843789Sahrens /* 8443063Sperrin * Put new lwb at the end of the log chain 845789Sahrens */ 846789Sahrens mutex_enter(&zilog->zl_lock); 847789Sahrens list_insert_tail(&zilog->zl_lwb_list, nlwb); 8483063Sperrin mutex_exit(&zilog->zl_lock); 8493063Sperrin 8505688Sbonwick /* Record the block for later vdev flushing */ 8515688Sbonwick zil_add_block(zilog, &lwb->lwb_blk); 852789Sahrens 853789Sahrens /* 8542237Smaybee * kick off the write for the old log block 855789Sahrens */ 8562237Smaybee dprintf_bp(&lwb->lwb_blk, "lwb %p txg %llu: ", lwb, txg); 8573063Sperrin ASSERT(lwb->lwb_zio); 8582237Smaybee zio_nowait(lwb->lwb_zio); 859789Sahrens 860789Sahrens return (nlwb); 861789Sahrens } 862789Sahrens 863789Sahrens static lwb_t * 864789Sahrens zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 865789Sahrens { 866789Sahrens lr_t *lrc = &itx->itx_lr; /* common log record */ 8672237Smaybee lr_write_t *lr = (lr_write_t *)lrc; 868789Sahrens uint64_t txg = lrc->lrc_txg; 869789Sahrens uint64_t reclen = lrc->lrc_reclen; 8702237Smaybee uint64_t dlen; 871789Sahrens 872789Sahrens if (lwb == NULL) 873789Sahrens return (NULL); 874789Sahrens ASSERT(lwb->lwb_buf != NULL); 875789Sahrens 8762237Smaybee if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) 8772237Smaybee dlen = P2ROUNDUP_TYPED( 8782237Smaybee lr->lr_length, sizeof (uint64_t), uint64_t); 8792237Smaybee else 8802237Smaybee dlen = 0; 8811669Sperrin 8821669Sperrin zilog->zl_cur_used += (reclen + dlen); 8831669Sperrin 8843063Sperrin zil_lwb_write_init(zilog, lwb); 8853063Sperrin 8861669Sperrin /* 8871669Sperrin * If this record won't fit in the current log block, start a new one. 8881669Sperrin */ 8891669Sperrin if (lwb->lwb_nused + reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) { 8901669Sperrin lwb = zil_lwb_write_start(zilog, lwb); 8912237Smaybee if (lwb == NULL) 8921669Sperrin return (NULL); 8933063Sperrin zil_lwb_write_init(zilog, lwb); 8941669Sperrin ASSERT(lwb->lwb_nused == 0); 8951669Sperrin if (reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) { 8961669Sperrin txg_wait_synced(zilog->zl_dmu_pool, txg); 897789Sahrens return (lwb); 898789Sahrens } 899789Sahrens } 900789Sahrens 9012638Sperrin /* 9022638Sperrin * Update the lrc_seq, to be log record sequence number. See zil.h 9032638Sperrin * Then copy the record to the log buffer. 9042638Sperrin */ 9052638Sperrin lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */ 906789Sahrens bcopy(lrc, lwb->lwb_buf + lwb->lwb_nused, reclen); 9072237Smaybee 9082237Smaybee /* 9092237Smaybee * If it's a write, fetch the data or get its blkptr as appropriate. 9102237Smaybee */ 9112237Smaybee if (lrc->lrc_txtype == TX_WRITE) { 9122237Smaybee if (txg > spa_freeze_txg(zilog->zl_spa)) 9132237Smaybee txg_wait_synced(zilog->zl_dmu_pool, txg); 9142237Smaybee if (itx->itx_wr_state != WR_COPIED) { 9152237Smaybee char *dbuf; 9162237Smaybee int error; 9172237Smaybee 9182237Smaybee /* alignment is guaranteed */ 9192237Smaybee lr = (lr_write_t *)(lwb->lwb_buf + lwb->lwb_nused); 9202237Smaybee if (dlen) { 9212237Smaybee ASSERT(itx->itx_wr_state == WR_NEED_COPY); 9222237Smaybee dbuf = lwb->lwb_buf + lwb->lwb_nused + reclen; 9232237Smaybee lr->lr_common.lrc_reclen += dlen; 9242237Smaybee } else { 9252237Smaybee ASSERT(itx->itx_wr_state == WR_INDIRECT); 9262237Smaybee dbuf = NULL; 9272237Smaybee } 9282237Smaybee error = zilog->zl_get_data( 9292237Smaybee itx->itx_private, lr, dbuf, lwb->lwb_zio); 93010209SMark.Musante@Sun.COM if (error == EIO) { 93110209SMark.Musante@Sun.COM txg_wait_synced(zilog->zl_dmu_pool, txg); 93210209SMark.Musante@Sun.COM return (lwb); 93310209SMark.Musante@Sun.COM } 9342237Smaybee if (error) { 9352237Smaybee ASSERT(error == ENOENT || error == EEXIST || 9362237Smaybee error == EALREADY); 9372237Smaybee return (lwb); 9382237Smaybee } 9392237Smaybee } 9401669Sperrin } 9412237Smaybee 9422237Smaybee lwb->lwb_nused += reclen + dlen; 943789Sahrens lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 944789Sahrens ASSERT3U(lwb->lwb_nused, <=, ZIL_BLK_DATA_SZ(lwb)); 945789Sahrens ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0); 946789Sahrens 947789Sahrens return (lwb); 948789Sahrens } 949789Sahrens 950789Sahrens itx_t * 9515331Samw zil_itx_create(uint64_t txtype, size_t lrsize) 952789Sahrens { 953789Sahrens itx_t *itx; 954789Sahrens 9551842Sperrin lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t); 956789Sahrens 957789Sahrens itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 958789Sahrens itx->itx_lr.lrc_txtype = txtype; 959789Sahrens itx->itx_lr.lrc_reclen = lrsize; 9606101Sperrin itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */ 961789Sahrens itx->itx_lr.lrc_seq = 0; /* defensive */ 962789Sahrens 963789Sahrens return (itx); 964789Sahrens } 965789Sahrens 966789Sahrens uint64_t 967789Sahrens zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 968789Sahrens { 969789Sahrens uint64_t seq; 970789Sahrens 971789Sahrens ASSERT(itx->itx_lr.lrc_seq == 0); 972789Sahrens 973789Sahrens mutex_enter(&zilog->zl_lock); 974789Sahrens list_insert_tail(&zilog->zl_itx_list, itx); 9756101Sperrin zilog->zl_itx_list_sz += itx->itx_sod; 976789Sahrens itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 977789Sahrens itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq; 978789Sahrens mutex_exit(&zilog->zl_lock); 979789Sahrens 980789Sahrens return (seq); 981789Sahrens } 982789Sahrens 983789Sahrens /* 984789Sahrens * Free up all in-memory intent log transactions that have now been synced. 985789Sahrens */ 986789Sahrens static void 987789Sahrens zil_itx_clean(zilog_t *zilog) 988789Sahrens { 989789Sahrens uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa); 990789Sahrens uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa); 9913778Sjohansen list_t clean_list; 992789Sahrens itx_t *itx; 993789Sahrens 9943778Sjohansen list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); 9953778Sjohansen 996789Sahrens mutex_enter(&zilog->zl_lock); 9972638Sperrin /* wait for a log writer to finish walking list */ 9982638Sperrin while (zilog->zl_writer) { 9992638Sperrin cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 10002638Sperrin } 10013778Sjohansen 10023778Sjohansen /* 10033778Sjohansen * Move the sync'd log transactions to a separate list so we can call 10043778Sjohansen * kmem_free without holding the zl_lock. 10053778Sjohansen * 10063778Sjohansen * There is no need to set zl_writer as we don't drop zl_lock here 10073778Sjohansen */ 1008789Sahrens while ((itx = list_head(&zilog->zl_itx_list)) != NULL && 1009789Sahrens itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) { 1010789Sahrens list_remove(&zilog->zl_itx_list, itx); 10116101Sperrin zilog->zl_itx_list_sz -= itx->itx_sod; 10123778Sjohansen list_insert_tail(&clean_list, itx); 10133778Sjohansen } 10143778Sjohansen cv_broadcast(&zilog->zl_cv_writer); 10153778Sjohansen mutex_exit(&zilog->zl_lock); 10163778Sjohansen 10173778Sjohansen /* destroy sync'd log transactions */ 10183778Sjohansen while ((itx = list_head(&clean_list)) != NULL) { 10193778Sjohansen list_remove(&clean_list, itx); 1020789Sahrens kmem_free(itx, offsetof(itx_t, itx_lr) 1021789Sahrens + itx->itx_lr.lrc_reclen); 1022789Sahrens } 10233778Sjohansen list_destroy(&clean_list); 1024789Sahrens } 1025789Sahrens 10262638Sperrin /* 10273063Sperrin * If there are any in-memory intent log transactions which have now been 10283063Sperrin * synced then start up a taskq to free them. 10292638Sperrin */ 1030789Sahrens void 1031789Sahrens zil_clean(zilog_t *zilog) 1032789Sahrens { 10333063Sperrin itx_t *itx; 10343063Sperrin 1035789Sahrens mutex_enter(&zilog->zl_lock); 10363063Sperrin itx = list_head(&zilog->zl_itx_list); 10373063Sperrin if ((itx != NULL) && 10383063Sperrin (itx->itx_lr.lrc_txg <= spa_last_synced_txg(zilog->zl_spa))) { 1039789Sahrens (void) taskq_dispatch(zilog->zl_clean_taskq, 10409321SNeil.Perrin@Sun.COM (task_func_t *)zil_itx_clean, zilog, TQ_SLEEP); 10413063Sperrin } 1042789Sahrens mutex_exit(&zilog->zl_lock); 1043789Sahrens } 1044789Sahrens 10457754SJeff.Bonwick@Sun.COM static void 10462638Sperrin zil_commit_writer(zilog_t *zilog, uint64_t seq, uint64_t foid) 1047789Sahrens { 1048789Sahrens uint64_t txg; 10493063Sperrin uint64_t commit_seq = 0; 10502638Sperrin itx_t *itx, *itx_next = (itx_t *)-1; 1051789Sahrens lwb_t *lwb; 1052789Sahrens spa_t *spa; 1053789Sahrens 10542638Sperrin zilog->zl_writer = B_TRUE; 10557754SJeff.Bonwick@Sun.COM ASSERT(zilog->zl_root_zio == NULL); 1056789Sahrens spa = zilog->zl_spa; 1057789Sahrens 1058789Sahrens if (zilog->zl_suspend) { 1059789Sahrens lwb = NULL; 1060789Sahrens } else { 1061789Sahrens lwb = list_tail(&zilog->zl_lwb_list); 1062789Sahrens if (lwb == NULL) { 10632638Sperrin /* 10642638Sperrin * Return if there's nothing to flush before we 10652638Sperrin * dirty the fs by calling zil_create() 10662638Sperrin */ 10672638Sperrin if (list_is_empty(&zilog->zl_itx_list)) { 10682638Sperrin zilog->zl_writer = B_FALSE; 10692638Sperrin return; 10702638Sperrin } 1071789Sahrens mutex_exit(&zilog->zl_lock); 1072789Sahrens zil_create(zilog); 1073789Sahrens mutex_enter(&zilog->zl_lock); 1074789Sahrens lwb = list_tail(&zilog->zl_lwb_list); 1075789Sahrens } 1076789Sahrens } 1077789Sahrens 10783063Sperrin /* Loop through in-memory log transactions filling log blocks. */ 10792638Sperrin DTRACE_PROBE1(zil__cw1, zilog_t *, zilog); 1080789Sahrens for (;;) { 10812638Sperrin /* 10822638Sperrin * Find the next itx to push: 10832638Sperrin * Push all transactions related to specified foid and all 10842638Sperrin * other transactions except TX_WRITE, TX_TRUNCATE, 10852638Sperrin * TX_SETATTR and TX_ACL for all other files. 10862638Sperrin */ 10872638Sperrin if (itx_next != (itx_t *)-1) 10882638Sperrin itx = itx_next; 10892638Sperrin else 10902638Sperrin itx = list_head(&zilog->zl_itx_list); 10912638Sperrin for (; itx != NULL; itx = list_next(&zilog->zl_itx_list, itx)) { 10922638Sperrin if (foid == 0) /* push all foids? */ 10932638Sperrin break; 10943063Sperrin if (itx->itx_sync) /* push all O_[D]SYNC */ 10953063Sperrin break; 10962638Sperrin switch (itx->itx_lr.lrc_txtype) { 10972638Sperrin case TX_SETATTR: 10982638Sperrin case TX_WRITE: 10992638Sperrin case TX_TRUNCATE: 11002638Sperrin case TX_ACL: 11012638Sperrin /* lr_foid is same offset for these records */ 11022638Sperrin if (((lr_write_t *)&itx->itx_lr)->lr_foid 11032638Sperrin != foid) { 11042638Sperrin continue; /* skip this record */ 11052638Sperrin } 11062638Sperrin } 11072638Sperrin break; 11082638Sperrin } 1109789Sahrens if (itx == NULL) 1110789Sahrens break; 1111789Sahrens 1112789Sahrens if ((itx->itx_lr.lrc_seq > seq) && 11132638Sperrin ((lwb == NULL) || (lwb->lwb_nused == 0) || 11146101Sperrin (lwb->lwb_nused + itx->itx_sod > ZIL_BLK_DATA_SZ(lwb)))) { 1115789Sahrens break; 11163063Sperrin } 1117789Sahrens 11182638Sperrin /* 11192638Sperrin * Save the next pointer. Even though we soon drop 11202638Sperrin * zl_lock all threads that may change the list 11212638Sperrin * (another writer or zil_itx_clean) can't do so until 11222638Sperrin * they have zl_writer. 11232638Sperrin */ 11242638Sperrin itx_next = list_next(&zilog->zl_itx_list, itx); 1125789Sahrens list_remove(&zilog->zl_itx_list, itx); 11266101Sperrin zilog->zl_itx_list_sz -= itx->itx_sod; 11273063Sperrin mutex_exit(&zilog->zl_lock); 1128789Sahrens txg = itx->itx_lr.lrc_txg; 1129789Sahrens ASSERT(txg); 1130789Sahrens 1131789Sahrens if (txg > spa_last_synced_txg(spa) || 1132789Sahrens txg > spa_freeze_txg(spa)) 1133789Sahrens lwb = zil_lwb_commit(zilog, itx, lwb); 1134789Sahrens kmem_free(itx, offsetof(itx_t, itx_lr) 1135789Sahrens + itx->itx_lr.lrc_reclen); 1136789Sahrens mutex_enter(&zilog->zl_lock); 1137789Sahrens } 11382638Sperrin DTRACE_PROBE1(zil__cw2, zilog_t *, zilog); 11393063Sperrin /* determine commit sequence number */ 11403063Sperrin itx = list_head(&zilog->zl_itx_list); 11413063Sperrin if (itx) 11423063Sperrin commit_seq = itx->itx_lr.lrc_seq; 11433063Sperrin else 11443063Sperrin commit_seq = zilog->zl_itx_seq; 1145789Sahrens mutex_exit(&zilog->zl_lock); 1146789Sahrens 1147789Sahrens /* write the last block out */ 11483063Sperrin if (lwb != NULL && lwb->lwb_zio != NULL) 1149789Sahrens lwb = zil_lwb_write_start(zilog, lwb); 1150789Sahrens 11511141Sperrin zilog->zl_prev_used = zilog->zl_cur_used; 11521141Sperrin zilog->zl_cur_used = 0; 11531141Sperrin 11542638Sperrin /* 11552638Sperrin * Wait if necessary for the log blocks to be on stable storage. 11562638Sperrin */ 11572638Sperrin if (zilog->zl_root_zio) { 11582638Sperrin DTRACE_PROBE1(zil__cw3, zilog_t *, zilog); 11592638Sperrin (void) zio_wait(zilog->zl_root_zio); 11607754SJeff.Bonwick@Sun.COM zilog->zl_root_zio = NULL; 11612638Sperrin DTRACE_PROBE1(zil__cw4, zilog_t *, zilog); 11625688Sbonwick zil_flush_vdevs(zilog); 1163789Sahrens } 11641141Sperrin 1165789Sahrens if (zilog->zl_log_error || lwb == NULL) { 1166789Sahrens zilog->zl_log_error = 0; 1167789Sahrens txg_wait_synced(zilog->zl_dmu_pool, 0); 1168789Sahrens } 11693063Sperrin 11703063Sperrin mutex_enter(&zilog->zl_lock); 11711141Sperrin zilog->zl_writer = B_FALSE; 11723063Sperrin 11733063Sperrin ASSERT3U(commit_seq, >=, zilog->zl_commit_seq); 11743063Sperrin zilog->zl_commit_seq = commit_seq; 11752638Sperrin } 11762638Sperrin 11772638Sperrin /* 11782638Sperrin * Push zfs transactions to stable storage up to the supplied sequence number. 11792638Sperrin * If foid is 0 push out all transactions, otherwise push only those 11802638Sperrin * for that file or might have been used to create that file. 11812638Sperrin */ 11822638Sperrin void 11832638Sperrin zil_commit(zilog_t *zilog, uint64_t seq, uint64_t foid) 11842638Sperrin { 11852638Sperrin if (zilog == NULL || seq == 0) 11862638Sperrin return; 11872638Sperrin 11882638Sperrin mutex_enter(&zilog->zl_lock); 11892638Sperrin 11902638Sperrin seq = MIN(seq, zilog->zl_itx_seq); /* cap seq at largest itx seq */ 11912638Sperrin 11923063Sperrin while (zilog->zl_writer) { 11932638Sperrin cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 11943063Sperrin if (seq < zilog->zl_commit_seq) { 11953063Sperrin mutex_exit(&zilog->zl_lock); 11963063Sperrin return; 11973063Sperrin } 11983063Sperrin } 11992638Sperrin zil_commit_writer(zilog, seq, foid); /* drops zl_lock */ 12003063Sperrin /* wake up others waiting on the commit */ 12013063Sperrin cv_broadcast(&zilog->zl_cv_writer); 12023063Sperrin mutex_exit(&zilog->zl_lock); 1203789Sahrens } 1204789Sahrens 1205789Sahrens /* 1206789Sahrens * Called in syncing context to free committed log blocks and update log header. 1207789Sahrens */ 1208789Sahrens void 1209789Sahrens zil_sync(zilog_t *zilog, dmu_tx_t *tx) 1210789Sahrens { 12111807Sbonwick zil_header_t *zh = zil_header_in_syncing_context(zilog); 1212789Sahrens uint64_t txg = dmu_tx_get_txg(tx); 1213789Sahrens spa_t *spa = zilog->zl_spa; 1214789Sahrens lwb_t *lwb; 1215789Sahrens 12169396SMatthew.Ahrens@Sun.COM /* 12179396SMatthew.Ahrens@Sun.COM * We don't zero out zl_destroy_txg, so make sure we don't try 12189396SMatthew.Ahrens@Sun.COM * to destroy it twice. 12199396SMatthew.Ahrens@Sun.COM */ 12209396SMatthew.Ahrens@Sun.COM if (spa_sync_pass(spa) != 1) 12219396SMatthew.Ahrens@Sun.COM return; 12229396SMatthew.Ahrens@Sun.COM 12231807Sbonwick mutex_enter(&zilog->zl_lock); 12241807Sbonwick 1225789Sahrens ASSERT(zilog->zl_stop_sync == 0); 1226789Sahrens 12278227SNeil.Perrin@Sun.COM zh->zh_replay_seq = zilog->zl_replayed_seq[txg & TXG_MASK]; 1228789Sahrens 1229789Sahrens if (zilog->zl_destroy_txg == txg) { 12301807Sbonwick blkptr_t blk = zh->zh_log; 12311807Sbonwick 12321807Sbonwick ASSERT(list_head(&zilog->zl_lwb_list) == NULL); 12331807Sbonwick 12341807Sbonwick bzero(zh, sizeof (zil_header_t)); 12358227SNeil.Perrin@Sun.COM bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq)); 12361807Sbonwick 12371807Sbonwick if (zilog->zl_keep_first) { 12381807Sbonwick /* 12391807Sbonwick * If this block was part of log chain that couldn't 12401807Sbonwick * be claimed because a device was missing during 12411807Sbonwick * zil_claim(), but that device later returns, 12421807Sbonwick * then this block could erroneously appear valid. 12431807Sbonwick * To guard against this, assign a new GUID to the new 12441807Sbonwick * log chain so it doesn't matter what blk points to. 12451807Sbonwick */ 12461807Sbonwick zil_init_log_chain(zilog, &blk); 12471807Sbonwick zh->zh_log = blk; 12481807Sbonwick } 1249789Sahrens } 1250789Sahrens 12519701SGeorge.Wilson@Sun.COM while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 12522638Sperrin zh->zh_log = lwb->lwb_blk; 1253789Sahrens if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 1254789Sahrens break; 1255789Sahrens list_remove(&zilog->zl_lwb_list, lwb); 1256789Sahrens zio_free_blk(spa, &lwb->lwb_blk, txg); 1257789Sahrens kmem_cache_free(zil_lwb_cache, lwb); 12583668Sgw25295 12593668Sgw25295 /* 12603668Sgw25295 * If we don't have anything left in the lwb list then 12613668Sgw25295 * we've had an allocation failure and we need to zero 12623668Sgw25295 * out the zil_header blkptr so that we don't end 12633668Sgw25295 * up freeing the same block twice. 12643668Sgw25295 */ 12653668Sgw25295 if (list_head(&zilog->zl_lwb_list) == NULL) 12663668Sgw25295 BP_ZERO(&zh->zh_log); 1267789Sahrens } 1268789Sahrens mutex_exit(&zilog->zl_lock); 1269789Sahrens } 1270789Sahrens 1271789Sahrens void 1272789Sahrens zil_init(void) 1273789Sahrens { 1274789Sahrens zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 12752856Snd150628 sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0); 1276789Sahrens } 1277789Sahrens 1278789Sahrens void 1279789Sahrens zil_fini(void) 1280789Sahrens { 1281789Sahrens kmem_cache_destroy(zil_lwb_cache); 1282789Sahrens } 1283789Sahrens 1284*10310SNeil.Perrin@Sun.COM void 1285*10310SNeil.Perrin@Sun.COM zil_set_logbias(zilog_t *zilog, uint64_t logbias) 1286*10310SNeil.Perrin@Sun.COM { 1287*10310SNeil.Perrin@Sun.COM zilog->zl_logbias = logbias; 1288*10310SNeil.Perrin@Sun.COM } 1289*10310SNeil.Perrin@Sun.COM 1290789Sahrens zilog_t * 1291789Sahrens zil_alloc(objset_t *os, zil_header_t *zh_phys) 1292789Sahrens { 1293789Sahrens zilog_t *zilog; 1294789Sahrens 1295789Sahrens zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 1296789Sahrens 1297789Sahrens zilog->zl_header = zh_phys; 1298789Sahrens zilog->zl_os = os; 1299789Sahrens zilog->zl_spa = dmu_objset_spa(os); 1300789Sahrens zilog->zl_dmu_pool = dmu_objset_pool(os); 13011807Sbonwick zilog->zl_destroy_txg = TXG_INITIAL - 1; 1302*10310SNeil.Perrin@Sun.COM zilog->zl_logbias = dmu_objset_logbias(os); 1303789Sahrens 13042856Snd150628 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); 13052856Snd150628 1306789Sahrens list_create(&zilog->zl_itx_list, sizeof (itx_t), 1307789Sahrens offsetof(itx_t, itx_node)); 1308789Sahrens 1309789Sahrens list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 1310789Sahrens offsetof(lwb_t, lwb_node)); 1311789Sahrens 13125688Sbonwick mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL); 13135688Sbonwick 13145688Sbonwick avl_create(&zilog->zl_vdev_tree, zil_vdev_compare, 13155688Sbonwick sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); 1316789Sahrens 13175913Sperrin cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL); 13185913Sperrin cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); 13195913Sperrin 1320789Sahrens return (zilog); 1321789Sahrens } 1322789Sahrens 1323789Sahrens void 1324789Sahrens zil_free(zilog_t *zilog) 1325789Sahrens { 1326789Sahrens lwb_t *lwb; 1327789Sahrens 1328789Sahrens zilog->zl_stop_sync = 1; 1329789Sahrens 1330789Sahrens while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 1331789Sahrens list_remove(&zilog->zl_lwb_list, lwb); 1332789Sahrens if (lwb->lwb_buf != NULL) 1333789Sahrens zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1334789Sahrens kmem_cache_free(zil_lwb_cache, lwb); 1335789Sahrens } 1336789Sahrens list_destroy(&zilog->zl_lwb_list); 1337789Sahrens 13385688Sbonwick avl_destroy(&zilog->zl_vdev_tree); 13395688Sbonwick mutex_destroy(&zilog->zl_vdev_lock); 1340789Sahrens 1341789Sahrens ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1342789Sahrens list_destroy(&zilog->zl_itx_list); 13432856Snd150628 mutex_destroy(&zilog->zl_lock); 1344789Sahrens 13455913Sperrin cv_destroy(&zilog->zl_cv_writer); 13465913Sperrin cv_destroy(&zilog->zl_cv_suspend); 13475913Sperrin 1348789Sahrens kmem_free(zilog, sizeof (zilog_t)); 1349789Sahrens } 1350789Sahrens 1351789Sahrens /* 1352789Sahrens * Open an intent log. 1353789Sahrens */ 1354789Sahrens zilog_t * 1355789Sahrens zil_open(objset_t *os, zil_get_data_t *get_data) 1356789Sahrens { 1357789Sahrens zilog_t *zilog = dmu_objset_zil(os); 1358789Sahrens 1359789Sahrens zilog->zl_get_data = get_data; 1360789Sahrens zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri, 1361789Sahrens 2, 2, TASKQ_PREPOPULATE); 1362789Sahrens 1363789Sahrens return (zilog); 1364789Sahrens } 1365789Sahrens 1366789Sahrens /* 1367789Sahrens * Close an intent log. 1368789Sahrens */ 1369789Sahrens void 1370789Sahrens zil_close(zilog_t *zilog) 1371789Sahrens { 13721807Sbonwick /* 13731807Sbonwick * If the log isn't already committed, mark the objset dirty 13741807Sbonwick * (so zil_sync() will be called) and wait for that txg to sync. 13751807Sbonwick */ 13761807Sbonwick if (!zil_is_committed(zilog)) { 13771807Sbonwick uint64_t txg; 13781807Sbonwick dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); 13791807Sbonwick (void) dmu_tx_assign(tx, TXG_WAIT); 13801807Sbonwick dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 13811807Sbonwick txg = dmu_tx_get_txg(tx); 13821807Sbonwick dmu_tx_commit(tx); 13831807Sbonwick txg_wait_synced(zilog->zl_dmu_pool, txg); 13841807Sbonwick } 13851807Sbonwick 1386789Sahrens taskq_destroy(zilog->zl_clean_taskq); 1387789Sahrens zilog->zl_clean_taskq = NULL; 1388789Sahrens zilog->zl_get_data = NULL; 1389789Sahrens 1390789Sahrens zil_itx_clean(zilog); 1391789Sahrens ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1392789Sahrens } 1393789Sahrens 1394789Sahrens /* 1395789Sahrens * Suspend an intent log. While in suspended mode, we still honor 1396789Sahrens * synchronous semantics, but we rely on txg_wait_synced() to do it. 1397789Sahrens * We suspend the log briefly when taking a snapshot so that the snapshot 1398789Sahrens * contains all the data it's supposed to, and has an empty intent log. 1399789Sahrens */ 1400789Sahrens int 1401789Sahrens zil_suspend(zilog_t *zilog) 1402789Sahrens { 14031807Sbonwick const zil_header_t *zh = zilog->zl_header; 1404789Sahrens 1405789Sahrens mutex_enter(&zilog->zl_lock); 14068989SNeil.Perrin@Sun.COM if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ 1407789Sahrens mutex_exit(&zilog->zl_lock); 1408789Sahrens return (EBUSY); 1409789Sahrens } 14101807Sbonwick if (zilog->zl_suspend++ != 0) { 14111807Sbonwick /* 14121807Sbonwick * Someone else already began a suspend. 14131807Sbonwick * Just wait for them to finish. 14141807Sbonwick */ 14151807Sbonwick while (zilog->zl_suspending) 14161807Sbonwick cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 14171807Sbonwick mutex_exit(&zilog->zl_lock); 14181807Sbonwick return (0); 14191807Sbonwick } 14201807Sbonwick zilog->zl_suspending = B_TRUE; 1421789Sahrens mutex_exit(&zilog->zl_lock); 1422789Sahrens 14232638Sperrin zil_commit(zilog, UINT64_MAX, 0); 1424789Sahrens 14252638Sperrin /* 14262638Sperrin * Wait for any in-flight log writes to complete. 14272638Sperrin */ 1428789Sahrens mutex_enter(&zilog->zl_lock); 14292638Sperrin while (zilog->zl_writer) 14302638Sperrin cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 1431789Sahrens mutex_exit(&zilog->zl_lock); 1432789Sahrens 14331807Sbonwick zil_destroy(zilog, B_FALSE); 14341807Sbonwick 14351807Sbonwick mutex_enter(&zilog->zl_lock); 14361807Sbonwick zilog->zl_suspending = B_FALSE; 14371807Sbonwick cv_broadcast(&zilog->zl_cv_suspend); 14381807Sbonwick mutex_exit(&zilog->zl_lock); 1439789Sahrens 1440789Sahrens return (0); 1441789Sahrens } 1442789Sahrens 1443789Sahrens void 1444789Sahrens zil_resume(zilog_t *zilog) 1445789Sahrens { 1446789Sahrens mutex_enter(&zilog->zl_lock); 1447789Sahrens ASSERT(zilog->zl_suspend != 0); 1448789Sahrens zilog->zl_suspend--; 1449789Sahrens mutex_exit(&zilog->zl_lock); 1450789Sahrens } 1451789Sahrens 1452789Sahrens typedef struct zil_replay_arg { 1453789Sahrens objset_t *zr_os; 1454789Sahrens zil_replay_func_t **zr_replay; 1455789Sahrens void *zr_arg; 1456789Sahrens boolean_t zr_byteswap; 1457789Sahrens char *zr_lrbuf; 1458789Sahrens } zil_replay_arg_t; 1459789Sahrens 1460789Sahrens static void 1461789Sahrens zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 1462789Sahrens { 1463789Sahrens zil_replay_arg_t *zr = zra; 14641807Sbonwick const zil_header_t *zh = zilog->zl_header; 1465789Sahrens uint64_t reclen = lr->lrc_reclen; 1466789Sahrens uint64_t txtype = lr->lrc_txtype; 14673063Sperrin char *name; 14688227SNeil.Perrin@Sun.COM int pass, error; 1469789Sahrens 14708227SNeil.Perrin@Sun.COM if (!zilog->zl_replay) /* giving up */ 1471789Sahrens return; 1472789Sahrens 1473789Sahrens if (lr->lrc_txg < claim_txg) /* already committed */ 1474789Sahrens return; 1475789Sahrens 1476789Sahrens if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 1477789Sahrens return; 1478789Sahrens 14795331Samw /* Strip case-insensitive bit, still present in log record */ 14805331Samw txtype &= ~TX_CI; 14815331Samw 14828227SNeil.Perrin@Sun.COM if (txtype == 0 || txtype >= TX_MAX_TYPE) { 14838227SNeil.Perrin@Sun.COM error = EINVAL; 14848227SNeil.Perrin@Sun.COM goto bad; 14858227SNeil.Perrin@Sun.COM } 14868227SNeil.Perrin@Sun.COM 1487789Sahrens /* 1488789Sahrens * Make a copy of the data so we can revise and extend it. 1489789Sahrens */ 1490789Sahrens bcopy(lr, zr->zr_lrbuf, reclen); 1491789Sahrens 1492789Sahrens /* 1493789Sahrens * The log block containing this lr may have been byteswapped 1494789Sahrens * so that we can easily examine common fields like lrc_txtype. 1495789Sahrens * However, the log is a mix of different data types, and only the 1496789Sahrens * replay vectors know how to byteswap their records. Therefore, if 1497789Sahrens * the lr was byteswapped, undo it before invoking the replay vector. 1498789Sahrens */ 1499789Sahrens if (zr->zr_byteswap) 1500789Sahrens byteswap_uint64_array(zr->zr_lrbuf, reclen); 1501789Sahrens 1502789Sahrens /* 1503789Sahrens * If this is a TX_WRITE with a blkptr, suck in the data. 1504789Sahrens */ 1505789Sahrens if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 1506789Sahrens lr_write_t *lrw = (lr_write_t *)lr; 1507789Sahrens blkptr_t *wbp = &lrw->lr_blkptr; 1508789Sahrens uint64_t wlen = lrw->lr_length; 1509789Sahrens char *wbuf = zr->zr_lrbuf + reclen; 1510789Sahrens 1511789Sahrens if (BP_IS_HOLE(wbp)) { /* compressed to a hole */ 1512789Sahrens bzero(wbuf, wlen); 1513789Sahrens } else { 1514789Sahrens /* 1515789Sahrens * A subsequent write may have overwritten this block, 1516789Sahrens * in which case wbp may have been been freed and 1517789Sahrens * reallocated, and our read of wbp may fail with a 1518789Sahrens * checksum error. We can safely ignore this because 1519789Sahrens * the later write will provide the correct data. 1520789Sahrens */ 15211544Seschrock zbookmark_t zb; 15221544Seschrock 15231544Seschrock zb.zb_objset = dmu_objset_id(zilog->zl_os); 15241544Seschrock zb.zb_object = lrw->lr_foid; 15251544Seschrock zb.zb_level = -1; 15261544Seschrock zb.zb_blkid = lrw->lr_offset / BP_GET_LSIZE(wbp); 15271544Seschrock 1528789Sahrens (void) zio_wait(zio_read(NULL, zilog->zl_spa, 1529789Sahrens wbp, wbuf, BP_GET_LSIZE(wbp), NULL, NULL, 1530789Sahrens ZIO_PRIORITY_SYNC_READ, 15311544Seschrock ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb)); 1532789Sahrens (void) memmove(wbuf, wbuf + lrw->lr_blkoff, wlen); 1533789Sahrens } 1534789Sahrens } 1535789Sahrens 1536789Sahrens /* 1537789Sahrens * We must now do two things atomically: replay this log record, 15388227SNeil.Perrin@Sun.COM * and update the log header sequence number to reflect the fact that 15398227SNeil.Perrin@Sun.COM * we did so. At the end of each replay function the sequence number 15408227SNeil.Perrin@Sun.COM * is updated if we are in replay mode. 1541789Sahrens */ 15428227SNeil.Perrin@Sun.COM for (pass = 1; pass <= 2; pass++) { 15438227SNeil.Perrin@Sun.COM zilog->zl_replaying_seq = lr->lrc_seq; 15448227SNeil.Perrin@Sun.COM /* Only byteswap (if needed) on the 1st pass. */ 15458227SNeil.Perrin@Sun.COM error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf, 15468227SNeil.Perrin@Sun.COM zr->zr_byteswap && pass == 1); 1547789Sahrens 15483063Sperrin if (!error) 15493063Sperrin return; 15503063Sperrin 15513063Sperrin /* 15523063Sperrin * The DMU's dnode layer doesn't see removes until the txg 15533063Sperrin * commits, so a subsequent claim can spuriously fail with 15548227SNeil.Perrin@Sun.COM * EEXIST. So if we receive any error we try syncing out 15558227SNeil.Perrin@Sun.COM * any removes then retry the transaction. 15563063Sperrin */ 15578227SNeil.Perrin@Sun.COM if (pass == 1) 15583063Sperrin txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 1559789Sahrens } 1560789Sahrens 15617904SNeil.Perrin@Sun.COM bad: 15628227SNeil.Perrin@Sun.COM ASSERT(error); 15633063Sperrin name = kmem_alloc(MAXNAMELEN, KM_SLEEP); 15643063Sperrin dmu_objset_name(zr->zr_os, name); 15653063Sperrin cmn_err(CE_WARN, "ZFS replay transaction error %d, " 15665331Samw "dataset %s, seq 0x%llx, txtype %llu %s\n", 15675331Samw error, name, (u_longlong_t)lr->lrc_seq, (u_longlong_t)txtype, 15685331Samw (lr->lrc_txtype & TX_CI) ? "CI" : ""); 15698227SNeil.Perrin@Sun.COM zilog->zl_replay = B_FALSE; 15703063Sperrin kmem_free(name, MAXNAMELEN); 15713063Sperrin } 1572789Sahrens 15733063Sperrin /* ARGSUSED */ 15743063Sperrin static void 15753063Sperrin zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 15763063Sperrin { 15773063Sperrin zilog->zl_replay_blks++; 1578789Sahrens } 1579789Sahrens 1580789Sahrens /* 15811362Sperrin * If this dataset has a non-empty intent log, replay it and destroy it. 1582789Sahrens */ 1583789Sahrens void 15848227SNeil.Perrin@Sun.COM zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE]) 1585789Sahrens { 1586789Sahrens zilog_t *zilog = dmu_objset_zil(os); 15871807Sbonwick const zil_header_t *zh = zilog->zl_header; 15881807Sbonwick zil_replay_arg_t zr; 15891362Sperrin 15908989SNeil.Perrin@Sun.COM if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { 15911807Sbonwick zil_destroy(zilog, B_TRUE); 15921362Sperrin return; 15931362Sperrin } 1594789Sahrens 1595789Sahrens zr.zr_os = os; 1596789Sahrens zr.zr_replay = replay_func; 1597789Sahrens zr.zr_arg = arg; 15981807Sbonwick zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 1599789Sahrens zr.zr_lrbuf = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 1600789Sahrens 1601789Sahrens /* 1602789Sahrens * Wait for in-progress removes to sync before starting replay. 1603789Sahrens */ 1604789Sahrens txg_wait_synced(zilog->zl_dmu_pool, 0); 1605789Sahrens 16068227SNeil.Perrin@Sun.COM zilog->zl_replay = B_TRUE; 16073063Sperrin zilog->zl_replay_time = lbolt; 16083063Sperrin ASSERT(zilog->zl_replay_blks == 0); 16093063Sperrin (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, 16101807Sbonwick zh->zh_claim_txg); 1611789Sahrens kmem_free(zr.zr_lrbuf, 2 * SPA_MAXBLOCKSIZE); 1612789Sahrens 16131807Sbonwick zil_destroy(zilog, B_FALSE); 16145712Sahrens txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 16158227SNeil.Perrin@Sun.COM zilog->zl_replay = B_FALSE; 1616789Sahrens } 16171646Sperrin 16181646Sperrin /* 16191646Sperrin * Report whether all transactions are committed 16201646Sperrin */ 16211646Sperrin int 16221646Sperrin zil_is_committed(zilog_t *zilog) 16231646Sperrin { 16241646Sperrin lwb_t *lwb; 16252638Sperrin int ret; 16261646Sperrin 16272638Sperrin mutex_enter(&zilog->zl_lock); 16282638Sperrin while (zilog->zl_writer) 16292638Sperrin cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 16302638Sperrin 16312638Sperrin /* recent unpushed intent log transactions? */ 16322638Sperrin if (!list_is_empty(&zilog->zl_itx_list)) { 16332638Sperrin ret = B_FALSE; 16342638Sperrin goto out; 16352638Sperrin } 16362638Sperrin 16372638Sperrin /* intent log never used? */ 16382638Sperrin lwb = list_head(&zilog->zl_lwb_list); 16392638Sperrin if (lwb == NULL) { 16402638Sperrin ret = B_TRUE; 16412638Sperrin goto out; 16422638Sperrin } 16431646Sperrin 16441646Sperrin /* 16452638Sperrin * more than 1 log buffer means zil_sync() hasn't yet freed 16462638Sperrin * entries after a txg has committed 16471646Sperrin */ 16482638Sperrin if (list_next(&zilog->zl_lwb_list, lwb)) { 16492638Sperrin ret = B_FALSE; 16502638Sperrin goto out; 16512638Sperrin } 16522638Sperrin 16531646Sperrin ASSERT(zil_empty(zilog)); 16542638Sperrin ret = B_TRUE; 16552638Sperrin out: 16562638Sperrin cv_broadcast(&zilog->zl_cv_writer); 16572638Sperrin mutex_exit(&zilog->zl_lock); 16582638Sperrin return (ret); 16591646Sperrin } 16609701SGeorge.Wilson@Sun.COM 16619701SGeorge.Wilson@Sun.COM /* ARGSUSED */ 16629701SGeorge.Wilson@Sun.COM int 16639701SGeorge.Wilson@Sun.COM zil_vdev_offline(char *osname, void *arg) 16649701SGeorge.Wilson@Sun.COM { 16659701SGeorge.Wilson@Sun.COM objset_t *os; 16669701SGeorge.Wilson@Sun.COM zilog_t *zilog; 16679701SGeorge.Wilson@Sun.COM int error; 16689701SGeorge.Wilson@Sun.COM 166910298SMatthew.Ahrens@Sun.COM error = dmu_objset_hold(osname, FTAG, &os); 16709701SGeorge.Wilson@Sun.COM if (error) 16719701SGeorge.Wilson@Sun.COM return (error); 16729701SGeorge.Wilson@Sun.COM 16739701SGeorge.Wilson@Sun.COM zilog = dmu_objset_zil(os); 16749701SGeorge.Wilson@Sun.COM if (zil_suspend(zilog) != 0) 16759701SGeorge.Wilson@Sun.COM error = EEXIST; 16769701SGeorge.Wilson@Sun.COM else 16779701SGeorge.Wilson@Sun.COM zil_resume(zilog); 167810298SMatthew.Ahrens@Sun.COM dmu_objset_rele(os, FTAG); 16799701SGeorge.Wilson@Sun.COM return (error); 16809701SGeorge.Wilson@Sun.COM } 1681