1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 51472Sperrin * Common Development and Distribution License (the "License"). 61472Sperrin * You may not use this file except in compliance with the License. 7789Sahrens * 8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9789Sahrens * or http://www.opensolaris.org/os/licensing. 10789Sahrens * See the License for the specific language governing permissions 11789Sahrens * and limitations under the License. 12789Sahrens * 13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15789Sahrens * If applicable, add the following below this CDDL HEADER, with the 16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18789Sahrens * 19789Sahrens * CDDL HEADER END 20789Sahrens */ 21789Sahrens /* 22*12294SMark.Musante@Sun.COM * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23789Sahrens */ 24789Sahrens 25*12294SMark.Musante@Sun.COM /* Portions Copyright 2010 Robert Milkowski */ 26*12294SMark.Musante@Sun.COM 27789Sahrens #include <sys/zfs_context.h> 28789Sahrens #include <sys/spa.h> 29789Sahrens #include <sys/dmu.h> 30789Sahrens #include <sys/zap.h> 31789Sahrens #include <sys/arc.h> 32789Sahrens #include <sys/stat.h> 33789Sahrens #include <sys/resource.h> 34789Sahrens #include <sys/zil.h> 35789Sahrens #include <sys/zil_impl.h> 36789Sahrens #include <sys/dsl_dataset.h> 37789Sahrens #include <sys/vdev.h> 383668Sgw25295 #include <sys/dmu_tx.h> 39789Sahrens 40789Sahrens /* 41789Sahrens * The zfs intent log (ZIL) saves transaction records of system calls 42789Sahrens * that change the file system in memory with enough information 43789Sahrens * to be able to replay them. These are stored in memory until 44789Sahrens * either the DMU transaction group (txg) commits them to the stable pool 45789Sahrens * and they can be discarded, or they are flushed to the stable log 46789Sahrens * (also in the pool) due to a fsync, O_DSYNC or other synchronous 47789Sahrens * requirement. In the event of a panic or power fail then those log 48789Sahrens * records (transactions) are replayed. 49789Sahrens * 50789Sahrens * There is one ZIL per file system. Its on-disk (pool) format consists 51789Sahrens * of 3 parts: 52789Sahrens * 53789Sahrens * - ZIL header 54789Sahrens * - ZIL blocks 55789Sahrens * - ZIL records 56789Sahrens * 57789Sahrens * A log record holds a system call transaction. Log blocks can 58789Sahrens * hold many log records and the blocks are chained together. 59789Sahrens * Each ZIL block contains a block pointer (blkptr_t) to the next 60789Sahrens * ZIL block in the chain. The ZIL header points to the first 61789Sahrens * block in the chain. Note there is not a fixed place in the pool 62789Sahrens * to hold blocks. They are dynamically allocated and freed as 63789Sahrens * needed from the blocks available. Figure X shows the ZIL structure: 64789Sahrens */ 65789Sahrens 66789Sahrens /* 672986Sek110237 * This global ZIL switch affects all pools 68789Sahrens */ 69*12294SMark.Musante@Sun.COM int zil_replay_disable = 0; /* disable intent logging replay */ 702986Sek110237 712986Sek110237 /* 722986Sek110237 * Tunable parameter for debugging or performance analysis. Setting 732986Sek110237 * zfs_nocacheflush will cause corruption on power loss if a volatile 742986Sek110237 * out-of-order write cache is enabled. 752986Sek110237 */ 762986Sek110237 boolean_t zfs_nocacheflush = B_FALSE; 77789Sahrens 78789Sahrens static kmem_cache_t *zil_lwb_cache; 79789Sahrens 8010685SGeorge.Wilson@Sun.COM static boolean_t zil_empty(zilog_t *zilog); 8110685SGeorge.Wilson@Sun.COM 8211670SNeil.Perrin@Sun.COM #define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \ 8311670SNeil.Perrin@Sun.COM sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused)) 8411670SNeil.Perrin@Sun.COM 8511670SNeil.Perrin@Sun.COM 86789Sahrens static int 8710922SJeff.Bonwick@Sun.COM zil_bp_compare(const void *x1, const void *x2) 88789Sahrens { 8910922SJeff.Bonwick@Sun.COM const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva; 9010922SJeff.Bonwick@Sun.COM const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva; 91789Sahrens 92789Sahrens if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2)) 93789Sahrens return (-1); 94789Sahrens if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2)) 95789Sahrens return (1); 96789Sahrens 97789Sahrens if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2)) 98789Sahrens return (-1); 99789Sahrens if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2)) 100789Sahrens return (1); 101789Sahrens 102789Sahrens return (0); 103789Sahrens } 104789Sahrens 105789Sahrens static void 10610922SJeff.Bonwick@Sun.COM zil_bp_tree_init(zilog_t *zilog) 107789Sahrens { 10810922SJeff.Bonwick@Sun.COM avl_create(&zilog->zl_bp_tree, zil_bp_compare, 10910922SJeff.Bonwick@Sun.COM sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node)); 110789Sahrens } 111789Sahrens 112789Sahrens static void 11310922SJeff.Bonwick@Sun.COM zil_bp_tree_fini(zilog_t *zilog) 114789Sahrens { 11510922SJeff.Bonwick@Sun.COM avl_tree_t *t = &zilog->zl_bp_tree; 11610922SJeff.Bonwick@Sun.COM zil_bp_node_t *zn; 117789Sahrens void *cookie = NULL; 118789Sahrens 119789Sahrens while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 12010922SJeff.Bonwick@Sun.COM kmem_free(zn, sizeof (zil_bp_node_t)); 121789Sahrens 122789Sahrens avl_destroy(t); 123789Sahrens } 124789Sahrens 12510922SJeff.Bonwick@Sun.COM int 12610922SJeff.Bonwick@Sun.COM zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp) 127789Sahrens { 12810922SJeff.Bonwick@Sun.COM avl_tree_t *t = &zilog->zl_bp_tree; 12910922SJeff.Bonwick@Sun.COM const dva_t *dva = BP_IDENTITY(bp); 13010922SJeff.Bonwick@Sun.COM zil_bp_node_t *zn; 131789Sahrens avl_index_t where; 132789Sahrens 133789Sahrens if (avl_find(t, dva, &where) != NULL) 134789Sahrens return (EEXIST); 135789Sahrens 13610922SJeff.Bonwick@Sun.COM zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP); 137789Sahrens zn->zn_dva = *dva; 138789Sahrens avl_insert(t, zn, where); 139789Sahrens 140789Sahrens return (0); 141789Sahrens } 142789Sahrens 1431807Sbonwick static zil_header_t * 1441807Sbonwick zil_header_in_syncing_context(zilog_t *zilog) 1451807Sbonwick { 1461807Sbonwick return ((zil_header_t *)zilog->zl_header); 1471807Sbonwick } 1481807Sbonwick 1491807Sbonwick static void 1501807Sbonwick zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 1511807Sbonwick { 1521807Sbonwick zio_cksum_t *zc = &bp->blk_cksum; 1531807Sbonwick 1541807Sbonwick zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL); 1551807Sbonwick zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL); 1561807Sbonwick zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 1571807Sbonwick zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 1581807Sbonwick } 1591807Sbonwick 160789Sahrens /* 16110922SJeff.Bonwick@Sun.COM * Read a log block and make sure it's valid. 162789Sahrens */ 163789Sahrens static int 16411670SNeil.Perrin@Sun.COM zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst, 16511670SNeil.Perrin@Sun.COM char **end) 166789Sahrens { 16710922SJeff.Bonwick@Sun.COM enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 16810922SJeff.Bonwick@Sun.COM uint32_t aflags = ARC_WAIT; 16910922SJeff.Bonwick@Sun.COM arc_buf_t *abuf = NULL; 1701544Seschrock zbookmark_t zb; 171789Sahrens int error; 172789Sahrens 17310922SJeff.Bonwick@Sun.COM if (zilog->zl_header->zh_claim_txg == 0) 17410922SJeff.Bonwick@Sun.COM zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 1751807Sbonwick 17610922SJeff.Bonwick@Sun.COM if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 17710922SJeff.Bonwick@Sun.COM zio_flags |= ZIO_FLAG_SPECULATIVE; 1781807Sbonwick 17910922SJeff.Bonwick@Sun.COM SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET], 18010922SJeff.Bonwick@Sun.COM ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 18110922SJeff.Bonwick@Sun.COM 18210922SJeff.Bonwick@Sun.COM error = arc_read_nolock(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 18310922SJeff.Bonwick@Sun.COM ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 1841807Sbonwick 1851807Sbonwick if (error == 0) { 1861807Sbonwick zio_cksum_t cksum = bp->blk_cksum; 1871544Seschrock 1881807Sbonwick /* 1897522SNeil.Perrin@Sun.COM * Validate the checksummed log block. 1907522SNeil.Perrin@Sun.COM * 1911807Sbonwick * Sequence numbers should be... sequential. The checksum 1921807Sbonwick * verifier for the next block should be bp's checksum plus 1. 1937522SNeil.Perrin@Sun.COM * 1947522SNeil.Perrin@Sun.COM * Also check the log chain linkage and size used. 1951807Sbonwick */ 1961807Sbonwick cksum.zc_word[ZIL_ZC_SEQ]++; 1971807Sbonwick 19811670SNeil.Perrin@Sun.COM if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 19911670SNeil.Perrin@Sun.COM zil_chain_t *zilc = abuf->b_data; 20011670SNeil.Perrin@Sun.COM char *lr = (char *)(zilc + 1); 20111670SNeil.Perrin@Sun.COM uint64_t len = zilc->zc_nused - sizeof (zil_chain_t); 20211670SNeil.Perrin@Sun.COM 20311670SNeil.Perrin@Sun.COM if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 20411670SNeil.Perrin@Sun.COM sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) { 20511670SNeil.Perrin@Sun.COM error = ECKSUM; 20611670SNeil.Perrin@Sun.COM } else { 20711670SNeil.Perrin@Sun.COM bcopy(lr, dst, len); 20811670SNeil.Perrin@Sun.COM *end = (char *)dst + len; 20911670SNeil.Perrin@Sun.COM *nbp = zilc->zc_next_blk; 21011670SNeil.Perrin@Sun.COM } 21111670SNeil.Perrin@Sun.COM } else { 21211670SNeil.Perrin@Sun.COM char *lr = abuf->b_data; 21311670SNeil.Perrin@Sun.COM uint64_t size = BP_GET_LSIZE(bp); 21411670SNeil.Perrin@Sun.COM zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1; 21511670SNeil.Perrin@Sun.COM 21611670SNeil.Perrin@Sun.COM if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 21711670SNeil.Perrin@Sun.COM sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) || 21811670SNeil.Perrin@Sun.COM (zilc->zc_nused > (size - sizeof (*zilc)))) { 21911670SNeil.Perrin@Sun.COM error = ECKSUM; 22011670SNeil.Perrin@Sun.COM } else { 22111670SNeil.Perrin@Sun.COM bcopy(lr, dst, zilc->zc_nused); 22211670SNeil.Perrin@Sun.COM *end = (char *)dst + zilc->zc_nused; 22311670SNeil.Perrin@Sun.COM *nbp = zilc->zc_next_blk; 22411670SNeil.Perrin@Sun.COM } 22511670SNeil.Perrin@Sun.COM } 2261807Sbonwick 22710922SJeff.Bonwick@Sun.COM VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 228789Sahrens } 229789Sahrens 23010922SJeff.Bonwick@Sun.COM return (error); 23110922SJeff.Bonwick@Sun.COM } 23210922SJeff.Bonwick@Sun.COM 23310922SJeff.Bonwick@Sun.COM /* 23410922SJeff.Bonwick@Sun.COM * Read a TX_WRITE log data block. 23510922SJeff.Bonwick@Sun.COM */ 23610922SJeff.Bonwick@Sun.COM static int 23710922SJeff.Bonwick@Sun.COM zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf) 23810922SJeff.Bonwick@Sun.COM { 23910922SJeff.Bonwick@Sun.COM enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 24010922SJeff.Bonwick@Sun.COM const blkptr_t *bp = &lr->lr_blkptr; 24110922SJeff.Bonwick@Sun.COM uint32_t aflags = ARC_WAIT; 24210922SJeff.Bonwick@Sun.COM arc_buf_t *abuf = NULL; 24310922SJeff.Bonwick@Sun.COM zbookmark_t zb; 24410922SJeff.Bonwick@Sun.COM int error; 24510922SJeff.Bonwick@Sun.COM 24610922SJeff.Bonwick@Sun.COM if (BP_IS_HOLE(bp)) { 24710922SJeff.Bonwick@Sun.COM if (wbuf != NULL) 24810922SJeff.Bonwick@Sun.COM bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length)); 24910922SJeff.Bonwick@Sun.COM return (0); 25010922SJeff.Bonwick@Sun.COM } 25110922SJeff.Bonwick@Sun.COM 25210922SJeff.Bonwick@Sun.COM if (zilog->zl_header->zh_claim_txg == 0) 25310922SJeff.Bonwick@Sun.COM zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 25410922SJeff.Bonwick@Sun.COM 25510922SJeff.Bonwick@Sun.COM SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid, 25610922SJeff.Bonwick@Sun.COM ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); 25710922SJeff.Bonwick@Sun.COM 25810922SJeff.Bonwick@Sun.COM error = arc_read_nolock(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 25910922SJeff.Bonwick@Sun.COM ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 26010922SJeff.Bonwick@Sun.COM 26110922SJeff.Bonwick@Sun.COM if (error == 0) { 26210922SJeff.Bonwick@Sun.COM if (wbuf != NULL) 26310922SJeff.Bonwick@Sun.COM bcopy(abuf->b_data, wbuf, arc_buf_size(abuf)); 26410922SJeff.Bonwick@Sun.COM (void) arc_buf_remove_ref(abuf, &abuf); 26510922SJeff.Bonwick@Sun.COM } 266789Sahrens 2671807Sbonwick return (error); 268789Sahrens } 269789Sahrens 270789Sahrens /* 271789Sahrens * Parse the intent log, and call parse_func for each valid record within. 272789Sahrens */ 27310922SJeff.Bonwick@Sun.COM int 274789Sahrens zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 275789Sahrens zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 276789Sahrens { 2771807Sbonwick const zil_header_t *zh = zilog->zl_header; 27810922SJeff.Bonwick@Sun.COM boolean_t claimed = !!zh->zh_claim_txg; 27910922SJeff.Bonwick@Sun.COM uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX; 28010922SJeff.Bonwick@Sun.COM uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX; 28110922SJeff.Bonwick@Sun.COM uint64_t max_blk_seq = 0; 28210922SJeff.Bonwick@Sun.COM uint64_t max_lr_seq = 0; 28310922SJeff.Bonwick@Sun.COM uint64_t blk_count = 0; 28410922SJeff.Bonwick@Sun.COM uint64_t lr_count = 0; 28510922SJeff.Bonwick@Sun.COM blkptr_t blk, next_blk; 286789Sahrens char *lrbuf, *lrp; 28710922SJeff.Bonwick@Sun.COM int error = 0; 288789Sahrens 28910922SJeff.Bonwick@Sun.COM /* 29010922SJeff.Bonwick@Sun.COM * Old logs didn't record the maximum zh_claim_lr_seq. 29110922SJeff.Bonwick@Sun.COM */ 29210922SJeff.Bonwick@Sun.COM if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 29310922SJeff.Bonwick@Sun.COM claim_lr_seq = UINT64_MAX; 294789Sahrens 295789Sahrens /* 296789Sahrens * Starting at the block pointed to by zh_log we read the log chain. 297789Sahrens * For each block in the chain we strongly check that block to 298789Sahrens * ensure its validity. We stop when an invalid block is found. 299789Sahrens * For each block pointer in the chain we call parse_blk_func(). 300789Sahrens * For each record in each valid block we call parse_lr_func(). 3011807Sbonwick * If the log has been claimed, stop if we encounter a sequence 3021807Sbonwick * number greater than the highest claimed sequence number. 303789Sahrens */ 30410922SJeff.Bonwick@Sun.COM lrbuf = zio_buf_alloc(SPA_MAXBLOCKSIZE); 30510922SJeff.Bonwick@Sun.COM zil_bp_tree_init(zilog); 30610922SJeff.Bonwick@Sun.COM 30710922SJeff.Bonwick@Sun.COM for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) { 30810922SJeff.Bonwick@Sun.COM uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 30910922SJeff.Bonwick@Sun.COM int reclen; 31011670SNeil.Perrin@Sun.COM char *end; 3111807Sbonwick 31210922SJeff.Bonwick@Sun.COM if (blk_seq > claim_blk_seq) 31310922SJeff.Bonwick@Sun.COM break; 31410922SJeff.Bonwick@Sun.COM if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0) 31510922SJeff.Bonwick@Sun.COM break; 31611670SNeil.Perrin@Sun.COM ASSERT3U(max_blk_seq, <, blk_seq); 31710922SJeff.Bonwick@Sun.COM max_blk_seq = blk_seq; 31810922SJeff.Bonwick@Sun.COM blk_count++; 31910922SJeff.Bonwick@Sun.COM 32010922SJeff.Bonwick@Sun.COM if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq) 3211807Sbonwick break; 3221807Sbonwick 32311670SNeil.Perrin@Sun.COM error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end); 324789Sahrens if (error) 325789Sahrens break; 326789Sahrens 32711670SNeil.Perrin@Sun.COM for (lrp = lrbuf; lrp < end; lrp += reclen) { 328789Sahrens lr_t *lr = (lr_t *)lrp; 329789Sahrens reclen = lr->lrc_reclen; 330789Sahrens ASSERT3U(reclen, >=, sizeof (lr_t)); 33110922SJeff.Bonwick@Sun.COM if (lr->lrc_seq > claim_lr_seq) 33210922SJeff.Bonwick@Sun.COM goto done; 33310922SJeff.Bonwick@Sun.COM if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0) 33410922SJeff.Bonwick@Sun.COM goto done; 33511670SNeil.Perrin@Sun.COM ASSERT3U(max_lr_seq, <, lr->lrc_seq); 33610922SJeff.Bonwick@Sun.COM max_lr_seq = lr->lrc_seq; 33710922SJeff.Bonwick@Sun.COM lr_count++; 338789Sahrens } 339789Sahrens } 34010922SJeff.Bonwick@Sun.COM done: 34110922SJeff.Bonwick@Sun.COM zilog->zl_parse_error = error; 34210922SJeff.Bonwick@Sun.COM zilog->zl_parse_blk_seq = max_blk_seq; 34310922SJeff.Bonwick@Sun.COM zilog->zl_parse_lr_seq = max_lr_seq; 34410922SJeff.Bonwick@Sun.COM zilog->zl_parse_blk_count = blk_count; 34510922SJeff.Bonwick@Sun.COM zilog->zl_parse_lr_count = lr_count; 34610922SJeff.Bonwick@Sun.COM 34710922SJeff.Bonwick@Sun.COM ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) || 34810922SJeff.Bonwick@Sun.COM (max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq)); 34910922SJeff.Bonwick@Sun.COM 35010922SJeff.Bonwick@Sun.COM zil_bp_tree_fini(zilog); 35110922SJeff.Bonwick@Sun.COM zio_buf_free(lrbuf, SPA_MAXBLOCKSIZE); 35210922SJeff.Bonwick@Sun.COM 35310922SJeff.Bonwick@Sun.COM return (error); 35410922SJeff.Bonwick@Sun.COM } 35510922SJeff.Bonwick@Sun.COM 35610922SJeff.Bonwick@Sun.COM static int 35710922SJeff.Bonwick@Sun.COM zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 35810922SJeff.Bonwick@Sun.COM { 35910922SJeff.Bonwick@Sun.COM /* 36010922SJeff.Bonwick@Sun.COM * Claim log block if not already committed and not already claimed. 36110922SJeff.Bonwick@Sun.COM * If tx == NULL, just verify that the block is claimable. 36210922SJeff.Bonwick@Sun.COM */ 36310922SJeff.Bonwick@Sun.COM if (bp->blk_birth < first_txg || zil_bp_tree_add(zilog, bp) != 0) 36410922SJeff.Bonwick@Sun.COM return (0); 3651807Sbonwick 36610922SJeff.Bonwick@Sun.COM return (zio_wait(zio_claim(NULL, zilog->zl_spa, 36710922SJeff.Bonwick@Sun.COM tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL, 36810922SJeff.Bonwick@Sun.COM ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB))); 36910922SJeff.Bonwick@Sun.COM } 37010922SJeff.Bonwick@Sun.COM 37110922SJeff.Bonwick@Sun.COM static int 37210922SJeff.Bonwick@Sun.COM zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 37310922SJeff.Bonwick@Sun.COM { 37410922SJeff.Bonwick@Sun.COM lr_write_t *lr = (lr_write_t *)lrc; 37510922SJeff.Bonwick@Sun.COM int error; 37610922SJeff.Bonwick@Sun.COM 37710922SJeff.Bonwick@Sun.COM if (lrc->lrc_txtype != TX_WRITE) 37810922SJeff.Bonwick@Sun.COM return (0); 37910922SJeff.Bonwick@Sun.COM 38010922SJeff.Bonwick@Sun.COM /* 38110922SJeff.Bonwick@Sun.COM * If the block is not readable, don't claim it. This can happen 38210922SJeff.Bonwick@Sun.COM * in normal operation when a log block is written to disk before 38310922SJeff.Bonwick@Sun.COM * some of the dmu_sync() blocks it points to. In this case, the 38410922SJeff.Bonwick@Sun.COM * transaction cannot have been committed to anyone (we would have 38510922SJeff.Bonwick@Sun.COM * waited for all writes to be stable first), so it is semantically 38610922SJeff.Bonwick@Sun.COM * correct to declare this the end of the log. 38710922SJeff.Bonwick@Sun.COM */ 38810922SJeff.Bonwick@Sun.COM if (lr->lr_blkptr.blk_birth >= first_txg && 38910922SJeff.Bonwick@Sun.COM (error = zil_read_log_data(zilog, lr, NULL)) != 0) 39010922SJeff.Bonwick@Sun.COM return (error); 39110922SJeff.Bonwick@Sun.COM return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg)); 392789Sahrens } 393789Sahrens 394789Sahrens /* ARGSUSED */ 39510922SJeff.Bonwick@Sun.COM static int 39610922SJeff.Bonwick@Sun.COM zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 397789Sahrens { 39810922SJeff.Bonwick@Sun.COM zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 399789Sahrens 40010922SJeff.Bonwick@Sun.COM return (0); 401789Sahrens } 402789Sahrens 40310922SJeff.Bonwick@Sun.COM static int 404789Sahrens zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 405789Sahrens { 40610922SJeff.Bonwick@Sun.COM lr_write_t *lr = (lr_write_t *)lrc; 40710922SJeff.Bonwick@Sun.COM blkptr_t *bp = &lr->lr_blkptr; 40810922SJeff.Bonwick@Sun.COM 409789Sahrens /* 410789Sahrens * If we previously claimed it, we need to free it. 411789Sahrens */ 41210922SJeff.Bonwick@Sun.COM if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE && 41310922SJeff.Bonwick@Sun.COM bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0) 41410922SJeff.Bonwick@Sun.COM zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 41510922SJeff.Bonwick@Sun.COM 41610922SJeff.Bonwick@Sun.COM return (0); 417789Sahrens } 418789Sahrens 41911670SNeil.Perrin@Sun.COM static lwb_t * 42011670SNeil.Perrin@Sun.COM zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, uint64_t txg) 42111670SNeil.Perrin@Sun.COM { 42211670SNeil.Perrin@Sun.COM lwb_t *lwb; 42311670SNeil.Perrin@Sun.COM 42411670SNeil.Perrin@Sun.COM lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 42511670SNeil.Perrin@Sun.COM lwb->lwb_zilog = zilog; 42611670SNeil.Perrin@Sun.COM lwb->lwb_blk = *bp; 42711670SNeil.Perrin@Sun.COM lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp)); 42811670SNeil.Perrin@Sun.COM lwb->lwb_max_txg = txg; 42911670SNeil.Perrin@Sun.COM lwb->lwb_zio = NULL; 43011670SNeil.Perrin@Sun.COM lwb->lwb_tx = NULL; 43111670SNeil.Perrin@Sun.COM if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 43211670SNeil.Perrin@Sun.COM lwb->lwb_nused = sizeof (zil_chain_t); 43311670SNeil.Perrin@Sun.COM lwb->lwb_sz = BP_GET_LSIZE(bp); 43411670SNeil.Perrin@Sun.COM } else { 43511670SNeil.Perrin@Sun.COM lwb->lwb_nused = 0; 43611670SNeil.Perrin@Sun.COM lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t); 43711670SNeil.Perrin@Sun.COM } 43811670SNeil.Perrin@Sun.COM 43911670SNeil.Perrin@Sun.COM mutex_enter(&zilog->zl_lock); 44011670SNeil.Perrin@Sun.COM list_insert_tail(&zilog->zl_lwb_list, lwb); 44111670SNeil.Perrin@Sun.COM mutex_exit(&zilog->zl_lock); 44211670SNeil.Perrin@Sun.COM 44311670SNeil.Perrin@Sun.COM return (lwb); 44411670SNeil.Perrin@Sun.COM } 44511670SNeil.Perrin@Sun.COM 446789Sahrens /* 447789Sahrens * Create an on-disk intent log. 448789Sahrens */ 44911670SNeil.Perrin@Sun.COM static lwb_t * 450789Sahrens zil_create(zilog_t *zilog) 451789Sahrens { 4521807Sbonwick const zil_header_t *zh = zilog->zl_header; 45311670SNeil.Perrin@Sun.COM lwb_t *lwb = NULL; 4541807Sbonwick uint64_t txg = 0; 4551807Sbonwick dmu_tx_t *tx = NULL; 456789Sahrens blkptr_t blk; 4571807Sbonwick int error = 0; 458789Sahrens 459789Sahrens /* 4601807Sbonwick * Wait for any previous destroy to complete. 461789Sahrens */ 4621807Sbonwick txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 4631807Sbonwick 4641807Sbonwick ASSERT(zh->zh_claim_txg == 0); 4651807Sbonwick ASSERT(zh->zh_replay_seq == 0); 4661807Sbonwick 4671807Sbonwick blk = zh->zh_log; 468789Sahrens 469789Sahrens /* 47011670SNeil.Perrin@Sun.COM * Allocate an initial log block if: 47111670SNeil.Perrin@Sun.COM * - there isn't one already 47211670SNeil.Perrin@Sun.COM * - the existing block is the wrong endianess 473789Sahrens */ 4748109SNeil.Perrin@Sun.COM if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { 4751807Sbonwick tx = dmu_tx_create(zilog->zl_os); 47610922SJeff.Bonwick@Sun.COM VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 4771807Sbonwick dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 4781807Sbonwick txg = dmu_tx_get_txg(tx); 4791807Sbonwick 4808109SNeil.Perrin@Sun.COM if (!BP_IS_HOLE(&blk)) { 48110922SJeff.Bonwick@Sun.COM zio_free_zil(zilog->zl_spa, txg, &blk); 4828109SNeil.Perrin@Sun.COM BP_ZERO(&blk); 4838109SNeil.Perrin@Sun.COM } 4848109SNeil.Perrin@Sun.COM 48510922SJeff.Bonwick@Sun.COM error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL, 48610922SJeff.Bonwick@Sun.COM ZIL_MIN_BLKSZ, zilog->zl_logbias == ZFS_LOGBIAS_LATENCY); 4871807Sbonwick 4881807Sbonwick if (error == 0) 4891807Sbonwick zil_init_log_chain(zilog, &blk); 4901362Sperrin } 4911807Sbonwick 4921807Sbonwick /* 4931807Sbonwick * Allocate a log write buffer (lwb) for the first log block. 4941807Sbonwick */ 49511670SNeil.Perrin@Sun.COM if (error == 0) 49611670SNeil.Perrin@Sun.COM lwb = zil_alloc_lwb(zilog, &blk, txg); 497789Sahrens 4981807Sbonwick /* 4991807Sbonwick * If we just allocated the first log block, commit our transaction 5001807Sbonwick * and wait for zil_sync() to stuff the block poiner into zh_log. 5011807Sbonwick * (zh is part of the MOS, so we cannot modify it in open context.) 5021807Sbonwick */ 5031807Sbonwick if (tx != NULL) { 5041807Sbonwick dmu_tx_commit(tx); 5051362Sperrin txg_wait_synced(zilog->zl_dmu_pool, txg); 5061807Sbonwick } 5071807Sbonwick 5081807Sbonwick ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 50911670SNeil.Perrin@Sun.COM 51011670SNeil.Perrin@Sun.COM return (lwb); 511789Sahrens } 512789Sahrens 513789Sahrens /* 514789Sahrens * In one tx, free all log blocks and clear the log header. 5151807Sbonwick * If keep_first is set, then we're replaying a log with no content. 5161807Sbonwick * We want to keep the first block, however, so that the first 5171807Sbonwick * synchronous transaction doesn't require a txg_wait_synced() 5181807Sbonwick * in zil_create(). We don't need to txg_wait_synced() here either 5191807Sbonwick * when keep_first is set, because both zil_create() and zil_destroy() 5201807Sbonwick * will wait for any in-progress destroys to complete. 521789Sahrens */ 522789Sahrens void 5231807Sbonwick zil_destroy(zilog_t *zilog, boolean_t keep_first) 524789Sahrens { 5251807Sbonwick const zil_header_t *zh = zilog->zl_header; 5261807Sbonwick lwb_t *lwb; 527789Sahrens dmu_tx_t *tx; 528789Sahrens uint64_t txg; 529789Sahrens 5301807Sbonwick /* 5311807Sbonwick * Wait for any previous destroy to complete. 5321807Sbonwick */ 5331807Sbonwick txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 534789Sahrens 53510922SJeff.Bonwick@Sun.COM zilog->zl_old_header = *zh; /* debugging aid */ 53610922SJeff.Bonwick@Sun.COM 5371807Sbonwick if (BP_IS_HOLE(&zh->zh_log)) 538789Sahrens return; 539789Sahrens 540789Sahrens tx = dmu_tx_create(zilog->zl_os); 54110922SJeff.Bonwick@Sun.COM VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 542789Sahrens dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 543789Sahrens txg = dmu_tx_get_txg(tx); 544789Sahrens 5451807Sbonwick mutex_enter(&zilog->zl_lock); 5461807Sbonwick 5471807Sbonwick ASSERT3U(zilog->zl_destroy_txg, <, txg); 548789Sahrens zilog->zl_destroy_txg = txg; 54910922SJeff.Bonwick@Sun.COM zilog->zl_keep_first = keep_first; 5501807Sbonwick 5511807Sbonwick if (!list_is_empty(&zilog->zl_lwb_list)) { 5521807Sbonwick ASSERT(zh->zh_claim_txg == 0); 55310922SJeff.Bonwick@Sun.COM ASSERT(!keep_first); 5541807Sbonwick while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 5551807Sbonwick list_remove(&zilog->zl_lwb_list, lwb); 5561807Sbonwick if (lwb->lwb_buf != NULL) 5571807Sbonwick zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 55810922SJeff.Bonwick@Sun.COM zio_free_zil(zilog->zl_spa, txg, &lwb->lwb_blk); 5591807Sbonwick kmem_cache_free(zil_lwb_cache, lwb); 5601807Sbonwick } 56110922SJeff.Bonwick@Sun.COM } else if (!keep_first) { 56210922SJeff.Bonwick@Sun.COM (void) zil_parse(zilog, zil_free_log_block, 56310922SJeff.Bonwick@Sun.COM zil_free_log_record, tx, zh->zh_claim_txg); 5641807Sbonwick } 5652638Sperrin mutex_exit(&zilog->zl_lock); 566789Sahrens 567789Sahrens dmu_tx_commit(tx); 568789Sahrens } 569789Sahrens 5702199Sahrens int 57111209SMatthew.Ahrens@Sun.COM zil_claim(const char *osname, void *txarg) 572789Sahrens { 573789Sahrens dmu_tx_t *tx = txarg; 574789Sahrens uint64_t first_txg = dmu_tx_get_txg(tx); 575789Sahrens zilog_t *zilog; 576789Sahrens zil_header_t *zh; 577789Sahrens objset_t *os; 578789Sahrens int error; 579789Sahrens 58010298SMatthew.Ahrens@Sun.COM error = dmu_objset_hold(osname, FTAG, &os); 581789Sahrens if (error) { 5827294Sperrin cmn_err(CE_WARN, "can't open objset for %s", osname); 5832199Sahrens return (0); 584789Sahrens } 585789Sahrens 586789Sahrens zilog = dmu_objset_zil(os); 5871807Sbonwick zh = zil_header_in_syncing_context(zilog); 588789Sahrens 58910922SJeff.Bonwick@Sun.COM if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) { 5909701SGeorge.Wilson@Sun.COM if (!BP_IS_HOLE(&zh->zh_log)) 59110922SJeff.Bonwick@Sun.COM zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log); 5929701SGeorge.Wilson@Sun.COM BP_ZERO(&zh->zh_log); 5939701SGeorge.Wilson@Sun.COM dsl_dataset_dirty(dmu_objset_ds(os), tx); 59410921STim.Haley@Sun.COM dmu_objset_rele(os, FTAG); 59510921STim.Haley@Sun.COM return (0); 5969701SGeorge.Wilson@Sun.COM } 5979701SGeorge.Wilson@Sun.COM 598789Sahrens /* 5991807Sbonwick * Claim all log blocks if we haven't already done so, and remember 6001807Sbonwick * the highest claimed sequence number. This ensures that if we can 6011807Sbonwick * read only part of the log now (e.g. due to a missing device), 6021807Sbonwick * but we can read the entire log later, we will not try to replay 6031807Sbonwick * or destroy beyond the last block we successfully claimed. 604789Sahrens */ 605789Sahrens ASSERT3U(zh->zh_claim_txg, <=, first_txg); 606789Sahrens if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 60710922SJeff.Bonwick@Sun.COM (void) zil_parse(zilog, zil_claim_log_block, 60810922SJeff.Bonwick@Sun.COM zil_claim_log_record, tx, first_txg); 609789Sahrens zh->zh_claim_txg = first_txg; 61010922SJeff.Bonwick@Sun.COM zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq; 61110922SJeff.Bonwick@Sun.COM zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq; 61210922SJeff.Bonwick@Sun.COM if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1) 61310922SJeff.Bonwick@Sun.COM zh->zh_flags |= ZIL_REPLAY_NEEDED; 61410922SJeff.Bonwick@Sun.COM zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID; 615789Sahrens dsl_dataset_dirty(dmu_objset_ds(os), tx); 616789Sahrens } 6171807Sbonwick 618789Sahrens ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 61910298SMatthew.Ahrens@Sun.COM dmu_objset_rele(os, FTAG); 6202199Sahrens return (0); 621789Sahrens } 622789Sahrens 6237294Sperrin /* 6247294Sperrin * Check the log by walking the log chain. 6257294Sperrin * Checksum errors are ok as they indicate the end of the chain. 6267294Sperrin * Any other error (no device or read failure) returns an error. 6277294Sperrin */ 6287294Sperrin int 62911209SMatthew.Ahrens@Sun.COM zil_check_log_chain(const char *osname, void *tx) 6307294Sperrin { 6317294Sperrin zilog_t *zilog; 6327294Sperrin objset_t *os; 6337294Sperrin int error; 6347294Sperrin 63510922SJeff.Bonwick@Sun.COM ASSERT(tx == NULL); 63610922SJeff.Bonwick@Sun.COM 63710298SMatthew.Ahrens@Sun.COM error = dmu_objset_hold(osname, FTAG, &os); 6387294Sperrin if (error) { 6397294Sperrin cmn_err(CE_WARN, "can't open objset for %s", osname); 6407294Sperrin return (0); 6417294Sperrin } 6427294Sperrin 6437294Sperrin zilog = dmu_objset_zil(os); 6447294Sperrin 64510922SJeff.Bonwick@Sun.COM /* 64610922SJeff.Bonwick@Sun.COM * Because tx == NULL, zil_claim_log_block() will not actually claim 64710922SJeff.Bonwick@Sun.COM * any blocks, but just determine whether it is possible to do so. 64810922SJeff.Bonwick@Sun.COM * In addition to checking the log chain, zil_claim_log_block() 64910922SJeff.Bonwick@Sun.COM * will invoke zio_claim() with a done func of spa_claim_notify(), 65010922SJeff.Bonwick@Sun.COM * which will update spa_max_claim_txg. See spa_load() for details. 65110922SJeff.Bonwick@Sun.COM */ 65210922SJeff.Bonwick@Sun.COM error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx, 65310922SJeff.Bonwick@Sun.COM zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa)); 65410922SJeff.Bonwick@Sun.COM 65510298SMatthew.Ahrens@Sun.COM dmu_objset_rele(os, FTAG); 65610922SJeff.Bonwick@Sun.COM 65710922SJeff.Bonwick@Sun.COM return ((error == ECKSUM || error == ENOENT) ? 0 : error); 6587294Sperrin } 6597294Sperrin 6605688Sbonwick static int 6615688Sbonwick zil_vdev_compare(const void *x1, const void *x2) 662789Sahrens { 6635875Sperrin uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; 6645875Sperrin uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; 6655688Sbonwick 6665688Sbonwick if (v1 < v2) 6675688Sbonwick return (-1); 6685688Sbonwick if (v1 > v2) 6695688Sbonwick return (1); 6705688Sbonwick 6715688Sbonwick return (0); 6725688Sbonwick } 6735688Sbonwick 6745688Sbonwick void 67510922SJeff.Bonwick@Sun.COM zil_add_block(zilog_t *zilog, const blkptr_t *bp) 6765688Sbonwick { 6775688Sbonwick avl_tree_t *t = &zilog->zl_vdev_tree; 6785688Sbonwick avl_index_t where; 6795688Sbonwick zil_vdev_node_t *zv, zvsearch; 6805688Sbonwick int ndvas = BP_GET_NDVAS(bp); 6815688Sbonwick int i; 682789Sahrens 6832986Sek110237 if (zfs_nocacheflush) 684789Sahrens return; 685789Sahrens 6865688Sbonwick ASSERT(zilog->zl_writer); 6875688Sbonwick 6885688Sbonwick /* 6895688Sbonwick * Even though we're zl_writer, we still need a lock because the 6905688Sbonwick * zl_get_data() callbacks may have dmu_sync() done callbacks 6915688Sbonwick * that will run concurrently. 6925688Sbonwick */ 6935688Sbonwick mutex_enter(&zilog->zl_vdev_lock); 6945688Sbonwick for (i = 0; i < ndvas; i++) { 6955688Sbonwick zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 6965688Sbonwick if (avl_find(t, &zvsearch, &where) == NULL) { 6975688Sbonwick zv = kmem_alloc(sizeof (*zv), KM_SLEEP); 6985688Sbonwick zv->zv_vdev = zvsearch.zv_vdev; 6995688Sbonwick avl_insert(t, zv, where); 7003063Sperrin } 7013063Sperrin } 7025688Sbonwick mutex_exit(&zilog->zl_vdev_lock); 7033063Sperrin } 7043063Sperrin 705789Sahrens void 7062638Sperrin zil_flush_vdevs(zilog_t *zilog) 707789Sahrens { 7083063Sperrin spa_t *spa = zilog->zl_spa; 7095688Sbonwick avl_tree_t *t = &zilog->zl_vdev_tree; 7105688Sbonwick void *cookie = NULL; 7115688Sbonwick zil_vdev_node_t *zv; 7125688Sbonwick zio_t *zio; 7133063Sperrin 7143063Sperrin ASSERT(zilog->zl_writer); 715789Sahrens 7165688Sbonwick /* 7175688Sbonwick * We don't need zl_vdev_lock here because we're the zl_writer, 7185688Sbonwick * and all zl_get_data() callbacks are done. 7195688Sbonwick */ 7205688Sbonwick if (avl_numnodes(t) == 0) 7215688Sbonwick return; 7225688Sbonwick 7237754SJeff.Bonwick@Sun.COM spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 7245688Sbonwick 7257754SJeff.Bonwick@Sun.COM zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 7265688Sbonwick 7275688Sbonwick while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { 7285688Sbonwick vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); 7295688Sbonwick if (vd != NULL) 7305688Sbonwick zio_flush(zio, vd); 7315688Sbonwick kmem_free(zv, sizeof (*zv)); 7323063Sperrin } 733789Sahrens 734789Sahrens /* 735789Sahrens * Wait for all the flushes to complete. Not all devices actually 736789Sahrens * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails. 737789Sahrens */ 7385688Sbonwick (void) zio_wait(zio); 7395688Sbonwick 7407754SJeff.Bonwick@Sun.COM spa_config_exit(spa, SCL_STATE, FTAG); 741789Sahrens } 742789Sahrens 743789Sahrens /* 744789Sahrens * Function called when a log block write completes 745789Sahrens */ 746789Sahrens static void 747789Sahrens zil_lwb_write_done(zio_t *zio) 748789Sahrens { 749789Sahrens lwb_t *lwb = zio->io_private; 750789Sahrens zilog_t *zilog = lwb->lwb_zilog; 75110922SJeff.Bonwick@Sun.COM dmu_tx_t *tx = lwb->lwb_tx; 752789Sahrens 7537754SJeff.Bonwick@Sun.COM ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 7547754SJeff.Bonwick@Sun.COM ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG); 7557754SJeff.Bonwick@Sun.COM ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 7567754SJeff.Bonwick@Sun.COM ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER); 7577754SJeff.Bonwick@Sun.COM ASSERT(!BP_IS_GANG(zio->io_bp)); 7587754SJeff.Bonwick@Sun.COM ASSERT(!BP_IS_HOLE(zio->io_bp)); 7597754SJeff.Bonwick@Sun.COM ASSERT(zio->io_bp->blk_fill == 0); 7607754SJeff.Bonwick@Sun.COM 761789Sahrens /* 7629493SNeil.Perrin@Sun.COM * Ensure the lwb buffer pointer is cleared before releasing 7639493SNeil.Perrin@Sun.COM * the txg. If we have had an allocation failure and 7649493SNeil.Perrin@Sun.COM * the txg is waiting to sync then we want want zil_sync() 7659493SNeil.Perrin@Sun.COM * to remove the lwb so that it's not picked up as the next new 7669493SNeil.Perrin@Sun.COM * one in zil_commit_writer(). zil_sync() will only remove 7679493SNeil.Perrin@Sun.COM * the lwb if lwb_buf is null. 768789Sahrens */ 769789Sahrens zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 770789Sahrens mutex_enter(&zilog->zl_lock); 771789Sahrens lwb->lwb_buf = NULL; 77210922SJeff.Bonwick@Sun.COM lwb->lwb_tx = NULL; 77310922SJeff.Bonwick@Sun.COM mutex_exit(&zilog->zl_lock); 7749493SNeil.Perrin@Sun.COM 7759493SNeil.Perrin@Sun.COM /* 7769493SNeil.Perrin@Sun.COM * Now that we've written this log block, we have a stable pointer 7779493SNeil.Perrin@Sun.COM * to the next block in the chain, so it's OK to let the txg in 77810922SJeff.Bonwick@Sun.COM * which we allocated the next block sync. 7799493SNeil.Perrin@Sun.COM */ 78010922SJeff.Bonwick@Sun.COM dmu_tx_commit(tx); 781789Sahrens } 782789Sahrens 783789Sahrens /* 7842237Smaybee * Initialize the io for a log block. 7852237Smaybee */ 7862237Smaybee static void 7872237Smaybee zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb) 7882237Smaybee { 7892237Smaybee zbookmark_t zb; 7902237Smaybee 79110922SJeff.Bonwick@Sun.COM SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET], 79210922SJeff.Bonwick@Sun.COM ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, 79310922SJeff.Bonwick@Sun.COM lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]); 7942237Smaybee 7952638Sperrin if (zilog->zl_root_zio == NULL) { 7962638Sperrin zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL, 7972638Sperrin ZIO_FLAG_CANFAIL); 7982638Sperrin } 7993063Sperrin if (lwb->lwb_zio == NULL) { 8003063Sperrin lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa, 80111670SNeil.Perrin@Sun.COM 0, &lwb->lwb_blk, lwb->lwb_buf, BP_GET_LSIZE(&lwb->lwb_blk), 8029701SGeorge.Wilson@Sun.COM zil_lwb_write_done, lwb, ZIO_PRIORITY_LOG_WRITE, 80310685SGeorge.Wilson@Sun.COM ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb); 8043063Sperrin } 8052237Smaybee } 8062237Smaybee 8072237Smaybee /* 80811670SNeil.Perrin@Sun.COM * Define a limited set of intent log block sizes. 80911670SNeil.Perrin@Sun.COM * These must be a multiple of 4KB. Note only the amount used (again 81011670SNeil.Perrin@Sun.COM * aligned to 4KB) actually gets written. However, we can't always just 81111670SNeil.Perrin@Sun.COM * allocate SPA_MAXBLOCKSIZE as the slog space could be exhausted. 81211670SNeil.Perrin@Sun.COM */ 81311670SNeil.Perrin@Sun.COM uint64_t zil_block_buckets[] = { 81411670SNeil.Perrin@Sun.COM 4096, /* non TX_WRITE */ 81511670SNeil.Perrin@Sun.COM 8192+4096, /* data base */ 81611670SNeil.Perrin@Sun.COM 32*1024 + 4096, /* NFS writes */ 81711670SNeil.Perrin@Sun.COM UINT64_MAX 81811670SNeil.Perrin@Sun.COM }; 81911670SNeil.Perrin@Sun.COM 82011670SNeil.Perrin@Sun.COM /* 82110879SNeil.Perrin@Sun.COM * Use the slog as long as the logbias is 'latency' and the current commit size 82210879SNeil.Perrin@Sun.COM * is less than the limit or the total list size is less than 2X the limit. 82310879SNeil.Perrin@Sun.COM * Limit checking is disabled by setting zil_slog_limit to UINT64_MAX. 82410879SNeil.Perrin@Sun.COM */ 82510879SNeil.Perrin@Sun.COM uint64_t zil_slog_limit = 1024 * 1024; 82610879SNeil.Perrin@Sun.COM #define USE_SLOG(zilog) (((zilog)->zl_logbias == ZFS_LOGBIAS_LATENCY) && \ 82710879SNeil.Perrin@Sun.COM (((zilog)->zl_cur_used < zil_slog_limit) || \ 82810879SNeil.Perrin@Sun.COM ((zilog)->zl_itx_list_sz < (zil_slog_limit << 1)))) 82910879SNeil.Perrin@Sun.COM 83010879SNeil.Perrin@Sun.COM /* 831789Sahrens * Start a log block write and advance to the next log block. 832789Sahrens * Calls are serialized. 833789Sahrens */ 834789Sahrens static lwb_t * 835789Sahrens zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb) 836789Sahrens { 83711670SNeil.Perrin@Sun.COM lwb_t *nlwb = NULL; 83811670SNeil.Perrin@Sun.COM zil_chain_t *zilc; 8391807Sbonwick spa_t *spa = zilog->zl_spa; 84011670SNeil.Perrin@Sun.COM blkptr_t *bp; 84110922SJeff.Bonwick@Sun.COM dmu_tx_t *tx; 842789Sahrens uint64_t txg; 84311813SNeil.Perrin@Sun.COM uint64_t zil_blksz, wsz; 84411670SNeil.Perrin@Sun.COM int i, error; 845789Sahrens 84611670SNeil.Perrin@Sun.COM if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 84711670SNeil.Perrin@Sun.COM zilc = (zil_chain_t *)lwb->lwb_buf; 84811670SNeil.Perrin@Sun.COM bp = &zilc->zc_next_blk; 84911670SNeil.Perrin@Sun.COM } else { 85011670SNeil.Perrin@Sun.COM zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz); 85111670SNeil.Perrin@Sun.COM bp = &zilc->zc_next_blk; 85211670SNeil.Perrin@Sun.COM } 85311670SNeil.Perrin@Sun.COM 85411670SNeil.Perrin@Sun.COM ASSERT(lwb->lwb_nused <= lwb->lwb_sz); 855789Sahrens 856789Sahrens /* 857789Sahrens * Allocate the next block and save its address in this block 858789Sahrens * before writing it in order to establish the log chain. 859789Sahrens * Note that if the allocation of nlwb synced before we wrote 860789Sahrens * the block that points at it (lwb), we'd leak it if we crashed. 86110922SJeff.Bonwick@Sun.COM * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done(). 86210922SJeff.Bonwick@Sun.COM * We dirty the dataset to ensure that zil_sync() will be called 86310922SJeff.Bonwick@Sun.COM * to clean up in the event of allocation failure or I/O failure. 864789Sahrens */ 86510922SJeff.Bonwick@Sun.COM tx = dmu_tx_create(zilog->zl_os); 86610922SJeff.Bonwick@Sun.COM VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 86710922SJeff.Bonwick@Sun.COM dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 86810922SJeff.Bonwick@Sun.COM txg = dmu_tx_get_txg(tx); 86910922SJeff.Bonwick@Sun.COM 87010922SJeff.Bonwick@Sun.COM lwb->lwb_tx = tx; 871789Sahrens 872789Sahrens /* 87311670SNeil.Perrin@Sun.COM * Log blocks are pre-allocated. Here we select the size of the next 87411670SNeil.Perrin@Sun.COM * block, based on size used in the last block. 87511670SNeil.Perrin@Sun.COM * - first find the smallest bucket that will fit the block from a 87611670SNeil.Perrin@Sun.COM * limited set of block sizes. This is because it's faster to write 87711670SNeil.Perrin@Sun.COM * blocks allocated from the same metaslab as they are adjacent or 87811670SNeil.Perrin@Sun.COM * close. 87911670SNeil.Perrin@Sun.COM * - next find the maximum from the new suggested size and an array of 88011670SNeil.Perrin@Sun.COM * previous sizes. This lessens a picket fence effect of wrongly 88111670SNeil.Perrin@Sun.COM * guesssing the size if we have a stream of say 2k, 64k, 2k, 64k 88211670SNeil.Perrin@Sun.COM * requests. 88311670SNeil.Perrin@Sun.COM * 88411670SNeil.Perrin@Sun.COM * Note we only write what is used, but we can't just allocate 88511670SNeil.Perrin@Sun.COM * the maximum block size because we can exhaust the available 88611670SNeil.Perrin@Sun.COM * pool log space. 887789Sahrens */ 88811670SNeil.Perrin@Sun.COM zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t); 88911670SNeil.Perrin@Sun.COM for (i = 0; zil_blksz > zil_block_buckets[i]; i++) 89011670SNeil.Perrin@Sun.COM continue; 89111670SNeil.Perrin@Sun.COM zil_blksz = zil_block_buckets[i]; 89211670SNeil.Perrin@Sun.COM if (zil_blksz == UINT64_MAX) 89311670SNeil.Perrin@Sun.COM zil_blksz = SPA_MAXBLOCKSIZE; 89411670SNeil.Perrin@Sun.COM zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz; 89511670SNeil.Perrin@Sun.COM for (i = 0; i < ZIL_PREV_BLKS; i++) 89611670SNeil.Perrin@Sun.COM zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]); 89711670SNeil.Perrin@Sun.COM zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1); 898789Sahrens 8993063Sperrin BP_ZERO(bp); 9003063Sperrin /* pass the old blkptr in order to spread log blocks across devs */ 90110922SJeff.Bonwick@Sun.COM error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz, 90210879SNeil.Perrin@Sun.COM USE_SLOG(zilog)); 90311670SNeil.Perrin@Sun.COM if (!error) { 90411670SNeil.Perrin@Sun.COM ASSERT3U(bp->blk_birth, ==, txg); 90511670SNeil.Perrin@Sun.COM bp->blk_cksum = lwb->lwb_blk.blk_cksum; 90611670SNeil.Perrin@Sun.COM bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 9073668Sgw25295 9083668Sgw25295 /* 90911670SNeil.Perrin@Sun.COM * Allocate a new log write buffer (lwb). 9101544Seschrock */ 91111670SNeil.Perrin@Sun.COM nlwb = zil_alloc_lwb(zilog, bp, txg); 91211670SNeil.Perrin@Sun.COM 91311670SNeil.Perrin@Sun.COM /* Record the block for later vdev flushing */ 91411670SNeil.Perrin@Sun.COM zil_add_block(zilog, &lwb->lwb_blk); 915789Sahrens } 916789Sahrens 91711670SNeil.Perrin@Sun.COM if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 91811813SNeil.Perrin@Sun.COM /* For Slim ZIL only write what is used. */ 91911813SNeil.Perrin@Sun.COM wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t); 92011813SNeil.Perrin@Sun.COM ASSERT3U(wsz, <=, lwb->lwb_sz); 92111813SNeil.Perrin@Sun.COM zio_shrink(lwb->lwb_zio, wsz); 92211670SNeil.Perrin@Sun.COM 92311813SNeil.Perrin@Sun.COM } else { 92411813SNeil.Perrin@Sun.COM wsz = lwb->lwb_sz; 92511813SNeil.Perrin@Sun.COM } 92611670SNeil.Perrin@Sun.COM 92711670SNeil.Perrin@Sun.COM zilc->zc_pad = 0; 92811670SNeil.Perrin@Sun.COM zilc->zc_nused = lwb->lwb_nused; 92911670SNeil.Perrin@Sun.COM zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum; 93011670SNeil.Perrin@Sun.COM 93111813SNeil.Perrin@Sun.COM /* 93211813SNeil.Perrin@Sun.COM * clear unused data for security 93311813SNeil.Perrin@Sun.COM */ 93411813SNeil.Perrin@Sun.COM bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused); 93511813SNeil.Perrin@Sun.COM 93611670SNeil.Perrin@Sun.COM zio_nowait(lwb->lwb_zio); /* Kick off the write for the old log block */ 937789Sahrens 938789Sahrens /* 93911670SNeil.Perrin@Sun.COM * If there was an allocation failure then nlwb will be null which 94011670SNeil.Perrin@Sun.COM * forces a txg_wait_synced(). 941789Sahrens */ 942789Sahrens return (nlwb); 943789Sahrens } 944789Sahrens 945789Sahrens static lwb_t * 946789Sahrens zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 947789Sahrens { 948789Sahrens lr_t *lrc = &itx->itx_lr; /* common log record */ 94910922SJeff.Bonwick@Sun.COM lr_write_t *lrw = (lr_write_t *)lrc; 95010922SJeff.Bonwick@Sun.COM char *lr_buf; 951789Sahrens uint64_t txg = lrc->lrc_txg; 952789Sahrens uint64_t reclen = lrc->lrc_reclen; 95310922SJeff.Bonwick@Sun.COM uint64_t dlen = 0; 954789Sahrens 955789Sahrens if (lwb == NULL) 956789Sahrens return (NULL); 95710922SJeff.Bonwick@Sun.COM 958789Sahrens ASSERT(lwb->lwb_buf != NULL); 959789Sahrens 9602237Smaybee if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) 9612237Smaybee dlen = P2ROUNDUP_TYPED( 96210922SJeff.Bonwick@Sun.COM lrw->lr_length, sizeof (uint64_t), uint64_t); 9631669Sperrin 9641669Sperrin zilog->zl_cur_used += (reclen + dlen); 9651669Sperrin 9663063Sperrin zil_lwb_write_init(zilog, lwb); 9673063Sperrin 9681669Sperrin /* 9691669Sperrin * If this record won't fit in the current log block, start a new one. 9701669Sperrin */ 97111670SNeil.Perrin@Sun.COM if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) { 9721669Sperrin lwb = zil_lwb_write_start(zilog, lwb); 9732237Smaybee if (lwb == NULL) 9741669Sperrin return (NULL); 9753063Sperrin zil_lwb_write_init(zilog, lwb); 97611670SNeil.Perrin@Sun.COM ASSERT(LWB_EMPTY(lwb)); 97711670SNeil.Perrin@Sun.COM if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) { 9781669Sperrin txg_wait_synced(zilog->zl_dmu_pool, txg); 979789Sahrens return (lwb); 980789Sahrens } 981789Sahrens } 982789Sahrens 98310922SJeff.Bonwick@Sun.COM lr_buf = lwb->lwb_buf + lwb->lwb_nused; 98410922SJeff.Bonwick@Sun.COM bcopy(lrc, lr_buf, reclen); 98510922SJeff.Bonwick@Sun.COM lrc = (lr_t *)lr_buf; 98610922SJeff.Bonwick@Sun.COM lrw = (lr_write_t *)lrc; 9872237Smaybee 9882237Smaybee /* 9892237Smaybee * If it's a write, fetch the data or get its blkptr as appropriate. 9902237Smaybee */ 9912237Smaybee if (lrc->lrc_txtype == TX_WRITE) { 9922237Smaybee if (txg > spa_freeze_txg(zilog->zl_spa)) 9932237Smaybee txg_wait_synced(zilog->zl_dmu_pool, txg); 9942237Smaybee if (itx->itx_wr_state != WR_COPIED) { 9952237Smaybee char *dbuf; 9962237Smaybee int error; 9972237Smaybee 9982237Smaybee if (dlen) { 9992237Smaybee ASSERT(itx->itx_wr_state == WR_NEED_COPY); 100010922SJeff.Bonwick@Sun.COM dbuf = lr_buf + reclen; 100110922SJeff.Bonwick@Sun.COM lrw->lr_common.lrc_reclen += dlen; 10022237Smaybee } else { 10032237Smaybee ASSERT(itx->itx_wr_state == WR_INDIRECT); 10042237Smaybee dbuf = NULL; 10052237Smaybee } 10062237Smaybee error = zilog->zl_get_data( 100710922SJeff.Bonwick@Sun.COM itx->itx_private, lrw, dbuf, lwb->lwb_zio); 100810209SMark.Musante@Sun.COM if (error == EIO) { 100910209SMark.Musante@Sun.COM txg_wait_synced(zilog->zl_dmu_pool, txg); 101010209SMark.Musante@Sun.COM return (lwb); 101110209SMark.Musante@Sun.COM } 10122237Smaybee if (error) { 10132237Smaybee ASSERT(error == ENOENT || error == EEXIST || 10142237Smaybee error == EALREADY); 10152237Smaybee return (lwb); 10162237Smaybee } 10172237Smaybee } 10181669Sperrin } 10192237Smaybee 102010922SJeff.Bonwick@Sun.COM /* 102110922SJeff.Bonwick@Sun.COM * We're actually making an entry, so update lrc_seq to be the 102210922SJeff.Bonwick@Sun.COM * log record sequence number. Note that this is generally not 102310922SJeff.Bonwick@Sun.COM * equal to the itx sequence number because not all transactions 102410922SJeff.Bonwick@Sun.COM * are synchronous, and sometimes spa_sync() gets there first. 102510922SJeff.Bonwick@Sun.COM */ 102610922SJeff.Bonwick@Sun.COM lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */ 10272237Smaybee lwb->lwb_nused += reclen + dlen; 1028789Sahrens lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 102911670SNeil.Perrin@Sun.COM ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz); 1030789Sahrens ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0); 1031789Sahrens 1032789Sahrens return (lwb); 1033789Sahrens } 1034789Sahrens 1035789Sahrens itx_t * 10365331Samw zil_itx_create(uint64_t txtype, size_t lrsize) 1037789Sahrens { 1038789Sahrens itx_t *itx; 1039789Sahrens 10401842Sperrin lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t); 1041789Sahrens 1042789Sahrens itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 1043789Sahrens itx->itx_lr.lrc_txtype = txtype; 1044789Sahrens itx->itx_lr.lrc_reclen = lrsize; 10456101Sperrin itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */ 1046789Sahrens itx->itx_lr.lrc_seq = 0; /* defensive */ 1047789Sahrens 1048789Sahrens return (itx); 1049789Sahrens } 1050789Sahrens 105110922SJeff.Bonwick@Sun.COM void 105210922SJeff.Bonwick@Sun.COM zil_itx_destroy(itx_t *itx) 105310922SJeff.Bonwick@Sun.COM { 105410922SJeff.Bonwick@Sun.COM kmem_free(itx, offsetof(itx_t, itx_lr) + itx->itx_lr.lrc_reclen); 105510922SJeff.Bonwick@Sun.COM } 105610922SJeff.Bonwick@Sun.COM 1057789Sahrens uint64_t 1058789Sahrens zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 1059789Sahrens { 1060789Sahrens uint64_t seq; 1061789Sahrens 1062789Sahrens ASSERT(itx->itx_lr.lrc_seq == 0); 106310922SJeff.Bonwick@Sun.COM ASSERT(!zilog->zl_replay); 1064789Sahrens 1065789Sahrens mutex_enter(&zilog->zl_lock); 1066789Sahrens list_insert_tail(&zilog->zl_itx_list, itx); 10676101Sperrin zilog->zl_itx_list_sz += itx->itx_sod; 1068789Sahrens itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 1069789Sahrens itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq; 1070789Sahrens mutex_exit(&zilog->zl_lock); 1071789Sahrens 1072789Sahrens return (seq); 1073789Sahrens } 1074789Sahrens 1075789Sahrens /* 1076789Sahrens * Free up all in-memory intent log transactions that have now been synced. 1077789Sahrens */ 1078789Sahrens static void 1079789Sahrens zil_itx_clean(zilog_t *zilog) 1080789Sahrens { 1081789Sahrens uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa); 1082789Sahrens uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa); 10833778Sjohansen list_t clean_list; 1084789Sahrens itx_t *itx; 1085789Sahrens 10863778Sjohansen list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); 10873778Sjohansen 1088789Sahrens mutex_enter(&zilog->zl_lock); 10892638Sperrin /* wait for a log writer to finish walking list */ 10902638Sperrin while (zilog->zl_writer) { 10912638Sperrin cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 10922638Sperrin } 10933778Sjohansen 10943778Sjohansen /* 10953778Sjohansen * Move the sync'd log transactions to a separate list so we can call 10963778Sjohansen * kmem_free without holding the zl_lock. 10973778Sjohansen * 10983778Sjohansen * There is no need to set zl_writer as we don't drop zl_lock here 10993778Sjohansen */ 1100789Sahrens while ((itx = list_head(&zilog->zl_itx_list)) != NULL && 1101789Sahrens itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) { 1102789Sahrens list_remove(&zilog->zl_itx_list, itx); 11036101Sperrin zilog->zl_itx_list_sz -= itx->itx_sod; 11043778Sjohansen list_insert_tail(&clean_list, itx); 11053778Sjohansen } 11063778Sjohansen cv_broadcast(&zilog->zl_cv_writer); 11073778Sjohansen mutex_exit(&zilog->zl_lock); 11083778Sjohansen 11093778Sjohansen /* destroy sync'd log transactions */ 11103778Sjohansen while ((itx = list_head(&clean_list)) != NULL) { 11113778Sjohansen list_remove(&clean_list, itx); 111210922SJeff.Bonwick@Sun.COM zil_itx_destroy(itx); 1113789Sahrens } 11143778Sjohansen list_destroy(&clean_list); 1115789Sahrens } 1116789Sahrens 11172638Sperrin /* 11183063Sperrin * If there are any in-memory intent log transactions which have now been 11193063Sperrin * synced then start up a taskq to free them. 11202638Sperrin */ 1121789Sahrens void 1122789Sahrens zil_clean(zilog_t *zilog) 1123789Sahrens { 11243063Sperrin itx_t *itx; 11253063Sperrin 1126789Sahrens mutex_enter(&zilog->zl_lock); 11273063Sperrin itx = list_head(&zilog->zl_itx_list); 11283063Sperrin if ((itx != NULL) && 11293063Sperrin (itx->itx_lr.lrc_txg <= spa_last_synced_txg(zilog->zl_spa))) { 1130789Sahrens (void) taskq_dispatch(zilog->zl_clean_taskq, 113110879SNeil.Perrin@Sun.COM (task_func_t *)zil_itx_clean, zilog, TQ_NOSLEEP); 11323063Sperrin } 1133789Sahrens mutex_exit(&zilog->zl_lock); 1134789Sahrens } 1135789Sahrens 11367754SJeff.Bonwick@Sun.COM static void 11372638Sperrin zil_commit_writer(zilog_t *zilog, uint64_t seq, uint64_t foid) 1138789Sahrens { 1139789Sahrens uint64_t txg; 11403063Sperrin uint64_t commit_seq = 0; 114110922SJeff.Bonwick@Sun.COM itx_t *itx, *itx_next; 1142789Sahrens lwb_t *lwb; 1143789Sahrens spa_t *spa; 114410922SJeff.Bonwick@Sun.COM int error = 0; 1145789Sahrens 11462638Sperrin zilog->zl_writer = B_TRUE; 11477754SJeff.Bonwick@Sun.COM ASSERT(zilog->zl_root_zio == NULL); 1148789Sahrens spa = zilog->zl_spa; 1149789Sahrens 1150789Sahrens if (zilog->zl_suspend) { 1151789Sahrens lwb = NULL; 1152789Sahrens } else { 1153789Sahrens lwb = list_tail(&zilog->zl_lwb_list); 1154789Sahrens if (lwb == NULL) { 11552638Sperrin /* 11562638Sperrin * Return if there's nothing to flush before we 11572638Sperrin * dirty the fs by calling zil_create() 11582638Sperrin */ 11592638Sperrin if (list_is_empty(&zilog->zl_itx_list)) { 11602638Sperrin zilog->zl_writer = B_FALSE; 11612638Sperrin return; 11622638Sperrin } 1163789Sahrens mutex_exit(&zilog->zl_lock); 116411670SNeil.Perrin@Sun.COM lwb = zil_create(zilog); 1165789Sahrens mutex_enter(&zilog->zl_lock); 1166789Sahrens } 1167789Sahrens } 116811670SNeil.Perrin@Sun.COM ASSERT(lwb == NULL || lwb->lwb_zio == NULL); 1169789Sahrens 11703063Sperrin /* Loop through in-memory log transactions filling log blocks. */ 11712638Sperrin DTRACE_PROBE1(zil__cw1, zilog_t *, zilog); 117210922SJeff.Bonwick@Sun.COM 117310922SJeff.Bonwick@Sun.COM for (itx = list_head(&zilog->zl_itx_list); itx; itx = itx_next) { 11742638Sperrin /* 117510922SJeff.Bonwick@Sun.COM * Save the next pointer. Even though we drop zl_lock below, 117610922SJeff.Bonwick@Sun.COM * all threads that can remove itx list entries (other writers 117710922SJeff.Bonwick@Sun.COM * and zil_itx_clean()) can't do so until they have zl_writer. 11782638Sperrin */ 117910922SJeff.Bonwick@Sun.COM itx_next = list_next(&zilog->zl_itx_list, itx); 118010922SJeff.Bonwick@Sun.COM 118110922SJeff.Bonwick@Sun.COM /* 118210922SJeff.Bonwick@Sun.COM * Determine whether to push this itx. 118310922SJeff.Bonwick@Sun.COM * Push all transactions related to specified foid and 118410922SJeff.Bonwick@Sun.COM * all other transactions except those that can be logged 118510922SJeff.Bonwick@Sun.COM * out of order (TX_WRITE, TX_TRUNCATE, TX_SETATTR, TX_ACL) 118610922SJeff.Bonwick@Sun.COM * for all other files. 118710922SJeff.Bonwick@Sun.COM * 118810922SJeff.Bonwick@Sun.COM * If foid == 0 (meaning "push all foids") or 118910922SJeff.Bonwick@Sun.COM * itx->itx_sync is set (meaning O_[D]SYNC), push regardless. 119010922SJeff.Bonwick@Sun.COM */ 119110922SJeff.Bonwick@Sun.COM if (foid != 0 && !itx->itx_sync && 119210922SJeff.Bonwick@Sun.COM TX_OOO(itx->itx_lr.lrc_txtype) && 119310922SJeff.Bonwick@Sun.COM ((lr_ooo_t *)&itx->itx_lr)->lr_foid != foid) 119410922SJeff.Bonwick@Sun.COM continue; /* skip this record */ 1195789Sahrens 1196789Sahrens if ((itx->itx_lr.lrc_seq > seq) && 119711670SNeil.Perrin@Sun.COM ((lwb == NULL) || (LWB_EMPTY(lwb)) || 119811670SNeil.Perrin@Sun.COM (lwb->lwb_nused + itx->itx_sod > lwb->lwb_sz))) 1199789Sahrens break; 1200789Sahrens 1201789Sahrens list_remove(&zilog->zl_itx_list, itx); 12026101Sperrin zilog->zl_itx_list_sz -= itx->itx_sod; 120310922SJeff.Bonwick@Sun.COM 12043063Sperrin mutex_exit(&zilog->zl_lock); 120510922SJeff.Bonwick@Sun.COM 1206789Sahrens txg = itx->itx_lr.lrc_txg; 1207789Sahrens ASSERT(txg); 1208789Sahrens 1209789Sahrens if (txg > spa_last_synced_txg(spa) || 1210789Sahrens txg > spa_freeze_txg(spa)) 1211789Sahrens lwb = zil_lwb_commit(zilog, itx, lwb); 121210922SJeff.Bonwick@Sun.COM 121310922SJeff.Bonwick@Sun.COM zil_itx_destroy(itx); 121410922SJeff.Bonwick@Sun.COM 1215789Sahrens mutex_enter(&zilog->zl_lock); 1216789Sahrens } 12172638Sperrin DTRACE_PROBE1(zil__cw2, zilog_t *, zilog); 12183063Sperrin /* determine commit sequence number */ 12193063Sperrin itx = list_head(&zilog->zl_itx_list); 12203063Sperrin if (itx) 122110922SJeff.Bonwick@Sun.COM commit_seq = itx->itx_lr.lrc_seq - 1; 12223063Sperrin else 12233063Sperrin commit_seq = zilog->zl_itx_seq; 1224789Sahrens mutex_exit(&zilog->zl_lock); 1225789Sahrens 1226789Sahrens /* write the last block out */ 12273063Sperrin if (lwb != NULL && lwb->lwb_zio != NULL) 1228789Sahrens lwb = zil_lwb_write_start(zilog, lwb); 1229789Sahrens 12301141Sperrin zilog->zl_prev_used = zilog->zl_cur_used; 12311141Sperrin zilog->zl_cur_used = 0; 12321141Sperrin 12332638Sperrin /* 12342638Sperrin * Wait if necessary for the log blocks to be on stable storage. 12352638Sperrin */ 12362638Sperrin if (zilog->zl_root_zio) { 12372638Sperrin DTRACE_PROBE1(zil__cw3, zilog_t *, zilog); 123810922SJeff.Bonwick@Sun.COM error = zio_wait(zilog->zl_root_zio); 12397754SJeff.Bonwick@Sun.COM zilog->zl_root_zio = NULL; 12402638Sperrin DTRACE_PROBE1(zil__cw4, zilog_t *, zilog); 12415688Sbonwick zil_flush_vdevs(zilog); 1242789Sahrens } 12431141Sperrin 124410922SJeff.Bonwick@Sun.COM if (error || lwb == NULL) 1245789Sahrens txg_wait_synced(zilog->zl_dmu_pool, 0); 12463063Sperrin 12473063Sperrin mutex_enter(&zilog->zl_lock); 12481141Sperrin zilog->zl_writer = B_FALSE; 12493063Sperrin 12503063Sperrin ASSERT3U(commit_seq, >=, zilog->zl_commit_seq); 12513063Sperrin zilog->zl_commit_seq = commit_seq; 125210922SJeff.Bonwick@Sun.COM 125310922SJeff.Bonwick@Sun.COM /* 125410922SJeff.Bonwick@Sun.COM * Remember the highest committed log sequence number for ztest. 125510922SJeff.Bonwick@Sun.COM * We only update this value when all the log writes succeeded, 125610922SJeff.Bonwick@Sun.COM * because ztest wants to ASSERT that it got the whole log chain. 125710922SJeff.Bonwick@Sun.COM */ 125810922SJeff.Bonwick@Sun.COM if (error == 0 && lwb != NULL) 125910922SJeff.Bonwick@Sun.COM zilog->zl_commit_lr_seq = zilog->zl_lr_seq; 12602638Sperrin } 12612638Sperrin 12622638Sperrin /* 12632638Sperrin * Push zfs transactions to stable storage up to the supplied sequence number. 12642638Sperrin * If foid is 0 push out all transactions, otherwise push only those 12652638Sperrin * for that file or might have been used to create that file. 12662638Sperrin */ 12672638Sperrin void 12682638Sperrin zil_commit(zilog_t *zilog, uint64_t seq, uint64_t foid) 12692638Sperrin { 1270*12294SMark.Musante@Sun.COM if (zilog->zl_sync == ZFS_SYNC_DISABLED || seq == 0) 12712638Sperrin return; 12722638Sperrin 12732638Sperrin mutex_enter(&zilog->zl_lock); 12742638Sperrin 12752638Sperrin seq = MIN(seq, zilog->zl_itx_seq); /* cap seq at largest itx seq */ 12762638Sperrin 12773063Sperrin while (zilog->zl_writer) { 12782638Sperrin cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 127910922SJeff.Bonwick@Sun.COM if (seq <= zilog->zl_commit_seq) { 12803063Sperrin mutex_exit(&zilog->zl_lock); 12813063Sperrin return; 12823063Sperrin } 12833063Sperrin } 12842638Sperrin zil_commit_writer(zilog, seq, foid); /* drops zl_lock */ 12853063Sperrin /* wake up others waiting on the commit */ 12863063Sperrin cv_broadcast(&zilog->zl_cv_writer); 12873063Sperrin mutex_exit(&zilog->zl_lock); 1288789Sahrens } 1289789Sahrens 1290789Sahrens /* 129110922SJeff.Bonwick@Sun.COM * Report whether all transactions are committed. 129210922SJeff.Bonwick@Sun.COM */ 129310922SJeff.Bonwick@Sun.COM static boolean_t 129410922SJeff.Bonwick@Sun.COM zil_is_committed(zilog_t *zilog) 129510922SJeff.Bonwick@Sun.COM { 129610922SJeff.Bonwick@Sun.COM lwb_t *lwb; 129710922SJeff.Bonwick@Sun.COM boolean_t committed; 129810922SJeff.Bonwick@Sun.COM 129910922SJeff.Bonwick@Sun.COM mutex_enter(&zilog->zl_lock); 130010922SJeff.Bonwick@Sun.COM 130110922SJeff.Bonwick@Sun.COM while (zilog->zl_writer) 130210922SJeff.Bonwick@Sun.COM cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 130310922SJeff.Bonwick@Sun.COM 130410922SJeff.Bonwick@Sun.COM if (!list_is_empty(&zilog->zl_itx_list)) 130510922SJeff.Bonwick@Sun.COM committed = B_FALSE; /* unpushed transactions */ 130610922SJeff.Bonwick@Sun.COM else if ((lwb = list_head(&zilog->zl_lwb_list)) == NULL) 130710922SJeff.Bonwick@Sun.COM committed = B_TRUE; /* intent log never used */ 130810922SJeff.Bonwick@Sun.COM else if (list_next(&zilog->zl_lwb_list, lwb) != NULL) 130910922SJeff.Bonwick@Sun.COM committed = B_FALSE; /* zil_sync() not done yet */ 131010922SJeff.Bonwick@Sun.COM else 131110922SJeff.Bonwick@Sun.COM committed = B_TRUE; /* everything synced */ 131210922SJeff.Bonwick@Sun.COM 131310922SJeff.Bonwick@Sun.COM mutex_exit(&zilog->zl_lock); 131410922SJeff.Bonwick@Sun.COM return (committed); 131510922SJeff.Bonwick@Sun.COM } 131610922SJeff.Bonwick@Sun.COM 131710922SJeff.Bonwick@Sun.COM /* 1318789Sahrens * Called in syncing context to free committed log blocks and update log header. 1319789Sahrens */ 1320789Sahrens void 1321789Sahrens zil_sync(zilog_t *zilog, dmu_tx_t *tx) 1322789Sahrens { 13231807Sbonwick zil_header_t *zh = zil_header_in_syncing_context(zilog); 1324789Sahrens uint64_t txg = dmu_tx_get_txg(tx); 1325789Sahrens spa_t *spa = zilog->zl_spa; 132610922SJeff.Bonwick@Sun.COM uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK]; 1327789Sahrens lwb_t *lwb; 1328789Sahrens 13299396SMatthew.Ahrens@Sun.COM /* 13309396SMatthew.Ahrens@Sun.COM * We don't zero out zl_destroy_txg, so make sure we don't try 13319396SMatthew.Ahrens@Sun.COM * to destroy it twice. 13329396SMatthew.Ahrens@Sun.COM */ 13339396SMatthew.Ahrens@Sun.COM if (spa_sync_pass(spa) != 1) 13349396SMatthew.Ahrens@Sun.COM return; 13359396SMatthew.Ahrens@Sun.COM 13361807Sbonwick mutex_enter(&zilog->zl_lock); 13371807Sbonwick 1338789Sahrens ASSERT(zilog->zl_stop_sync == 0); 1339789Sahrens 134010922SJeff.Bonwick@Sun.COM if (*replayed_seq != 0) { 134110922SJeff.Bonwick@Sun.COM ASSERT(zh->zh_replay_seq < *replayed_seq); 134210922SJeff.Bonwick@Sun.COM zh->zh_replay_seq = *replayed_seq; 134310922SJeff.Bonwick@Sun.COM *replayed_seq = 0; 134410922SJeff.Bonwick@Sun.COM } 1345789Sahrens 1346789Sahrens if (zilog->zl_destroy_txg == txg) { 13471807Sbonwick blkptr_t blk = zh->zh_log; 13481807Sbonwick 13491807Sbonwick ASSERT(list_head(&zilog->zl_lwb_list) == NULL); 13501807Sbonwick 13511807Sbonwick bzero(zh, sizeof (zil_header_t)); 13528227SNeil.Perrin@Sun.COM bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq)); 13531807Sbonwick 13541807Sbonwick if (zilog->zl_keep_first) { 13551807Sbonwick /* 13561807Sbonwick * If this block was part of log chain that couldn't 13571807Sbonwick * be claimed because a device was missing during 13581807Sbonwick * zil_claim(), but that device later returns, 13591807Sbonwick * then this block could erroneously appear valid. 13601807Sbonwick * To guard against this, assign a new GUID to the new 13611807Sbonwick * log chain so it doesn't matter what blk points to. 13621807Sbonwick */ 13631807Sbonwick zil_init_log_chain(zilog, &blk); 13641807Sbonwick zh->zh_log = blk; 13651807Sbonwick } 1366789Sahrens } 1367789Sahrens 13689701SGeorge.Wilson@Sun.COM while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 13692638Sperrin zh->zh_log = lwb->lwb_blk; 1370789Sahrens if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 1371789Sahrens break; 1372789Sahrens list_remove(&zilog->zl_lwb_list, lwb); 137310922SJeff.Bonwick@Sun.COM zio_free_zil(spa, txg, &lwb->lwb_blk); 1374789Sahrens kmem_cache_free(zil_lwb_cache, lwb); 13753668Sgw25295 13763668Sgw25295 /* 13773668Sgw25295 * If we don't have anything left in the lwb list then 13783668Sgw25295 * we've had an allocation failure and we need to zero 13793668Sgw25295 * out the zil_header blkptr so that we don't end 13803668Sgw25295 * up freeing the same block twice. 13813668Sgw25295 */ 13823668Sgw25295 if (list_head(&zilog->zl_lwb_list) == NULL) 13833668Sgw25295 BP_ZERO(&zh->zh_log); 1384789Sahrens } 1385789Sahrens mutex_exit(&zilog->zl_lock); 1386789Sahrens } 1387789Sahrens 1388789Sahrens void 1389789Sahrens zil_init(void) 1390789Sahrens { 1391789Sahrens zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 13922856Snd150628 sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0); 1393789Sahrens } 1394789Sahrens 1395789Sahrens void 1396789Sahrens zil_fini(void) 1397789Sahrens { 1398789Sahrens kmem_cache_destroy(zil_lwb_cache); 1399789Sahrens } 1400789Sahrens 140110310SNeil.Perrin@Sun.COM void 1402*12294SMark.Musante@Sun.COM zil_set_sync(zilog_t *zilog, uint64_t sync) 1403*12294SMark.Musante@Sun.COM { 1404*12294SMark.Musante@Sun.COM zilog->zl_sync = sync; 1405*12294SMark.Musante@Sun.COM } 1406*12294SMark.Musante@Sun.COM 1407*12294SMark.Musante@Sun.COM void 140810310SNeil.Perrin@Sun.COM zil_set_logbias(zilog_t *zilog, uint64_t logbias) 140910310SNeil.Perrin@Sun.COM { 141010310SNeil.Perrin@Sun.COM zilog->zl_logbias = logbias; 141110310SNeil.Perrin@Sun.COM } 141210310SNeil.Perrin@Sun.COM 1413789Sahrens zilog_t * 1414789Sahrens zil_alloc(objset_t *os, zil_header_t *zh_phys) 1415789Sahrens { 1416789Sahrens zilog_t *zilog; 1417789Sahrens 1418789Sahrens zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 1419789Sahrens 1420789Sahrens zilog->zl_header = zh_phys; 1421789Sahrens zilog->zl_os = os; 1422789Sahrens zilog->zl_spa = dmu_objset_spa(os); 1423789Sahrens zilog->zl_dmu_pool = dmu_objset_pool(os); 14241807Sbonwick zilog->zl_destroy_txg = TXG_INITIAL - 1; 142510310SNeil.Perrin@Sun.COM zilog->zl_logbias = dmu_objset_logbias(os); 1426*12294SMark.Musante@Sun.COM zilog->zl_sync = dmu_objset_syncprop(os); 1427789Sahrens 14282856Snd150628 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); 14292856Snd150628 1430789Sahrens list_create(&zilog->zl_itx_list, sizeof (itx_t), 1431789Sahrens offsetof(itx_t, itx_node)); 1432789Sahrens 1433789Sahrens list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 1434789Sahrens offsetof(lwb_t, lwb_node)); 1435789Sahrens 14365688Sbonwick mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL); 14375688Sbonwick 14385688Sbonwick avl_create(&zilog->zl_vdev_tree, zil_vdev_compare, 14395688Sbonwick sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); 1440789Sahrens 14415913Sperrin cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL); 14425913Sperrin cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); 14435913Sperrin 1444789Sahrens return (zilog); 1445789Sahrens } 1446789Sahrens 1447789Sahrens void 1448789Sahrens zil_free(zilog_t *zilog) 1449789Sahrens { 1450789Sahrens lwb_t *lwb; 1451789Sahrens 1452789Sahrens zilog->zl_stop_sync = 1; 1453789Sahrens 1454789Sahrens while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 1455789Sahrens list_remove(&zilog->zl_lwb_list, lwb); 1456789Sahrens if (lwb->lwb_buf != NULL) 1457789Sahrens zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1458789Sahrens kmem_cache_free(zil_lwb_cache, lwb); 1459789Sahrens } 1460789Sahrens list_destroy(&zilog->zl_lwb_list); 1461789Sahrens 14625688Sbonwick avl_destroy(&zilog->zl_vdev_tree); 14635688Sbonwick mutex_destroy(&zilog->zl_vdev_lock); 1464789Sahrens 1465789Sahrens ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1466789Sahrens list_destroy(&zilog->zl_itx_list); 14672856Snd150628 mutex_destroy(&zilog->zl_lock); 1468789Sahrens 14695913Sperrin cv_destroy(&zilog->zl_cv_writer); 14705913Sperrin cv_destroy(&zilog->zl_cv_suspend); 14715913Sperrin 1472789Sahrens kmem_free(zilog, sizeof (zilog_t)); 1473789Sahrens } 1474789Sahrens 1475789Sahrens /* 1476789Sahrens * Open an intent log. 1477789Sahrens */ 1478789Sahrens zilog_t * 1479789Sahrens zil_open(objset_t *os, zil_get_data_t *get_data) 1480789Sahrens { 1481789Sahrens zilog_t *zilog = dmu_objset_zil(os); 1482789Sahrens 1483789Sahrens zilog->zl_get_data = get_data; 1484789Sahrens zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri, 1485789Sahrens 2, 2, TASKQ_PREPOPULATE); 1486789Sahrens 1487789Sahrens return (zilog); 1488789Sahrens } 1489789Sahrens 1490789Sahrens /* 1491789Sahrens * Close an intent log. 1492789Sahrens */ 1493789Sahrens void 1494789Sahrens zil_close(zilog_t *zilog) 1495789Sahrens { 14961807Sbonwick /* 14971807Sbonwick * If the log isn't already committed, mark the objset dirty 14981807Sbonwick * (so zil_sync() will be called) and wait for that txg to sync. 14991807Sbonwick */ 15001807Sbonwick if (!zil_is_committed(zilog)) { 15011807Sbonwick uint64_t txg; 15021807Sbonwick dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); 150310922SJeff.Bonwick@Sun.COM VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 15041807Sbonwick dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 15051807Sbonwick txg = dmu_tx_get_txg(tx); 15061807Sbonwick dmu_tx_commit(tx); 15071807Sbonwick txg_wait_synced(zilog->zl_dmu_pool, txg); 15081807Sbonwick } 15091807Sbonwick 1510789Sahrens taskq_destroy(zilog->zl_clean_taskq); 1511789Sahrens zilog->zl_clean_taskq = NULL; 1512789Sahrens zilog->zl_get_data = NULL; 1513789Sahrens 1514789Sahrens zil_itx_clean(zilog); 1515789Sahrens ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1516789Sahrens } 1517789Sahrens 1518789Sahrens /* 1519789Sahrens * Suspend an intent log. While in suspended mode, we still honor 1520789Sahrens * synchronous semantics, but we rely on txg_wait_synced() to do it. 1521789Sahrens * We suspend the log briefly when taking a snapshot so that the snapshot 1522789Sahrens * contains all the data it's supposed to, and has an empty intent log. 1523789Sahrens */ 1524789Sahrens int 1525789Sahrens zil_suspend(zilog_t *zilog) 1526789Sahrens { 15271807Sbonwick const zil_header_t *zh = zilog->zl_header; 1528789Sahrens 1529789Sahrens mutex_enter(&zilog->zl_lock); 15308989SNeil.Perrin@Sun.COM if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ 1531789Sahrens mutex_exit(&zilog->zl_lock); 1532789Sahrens return (EBUSY); 1533789Sahrens } 15341807Sbonwick if (zilog->zl_suspend++ != 0) { 15351807Sbonwick /* 15361807Sbonwick * Someone else already began a suspend. 15371807Sbonwick * Just wait for them to finish. 15381807Sbonwick */ 15391807Sbonwick while (zilog->zl_suspending) 15401807Sbonwick cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 15411807Sbonwick mutex_exit(&zilog->zl_lock); 15421807Sbonwick return (0); 15431807Sbonwick } 15441807Sbonwick zilog->zl_suspending = B_TRUE; 1545789Sahrens mutex_exit(&zilog->zl_lock); 1546789Sahrens 15472638Sperrin zil_commit(zilog, UINT64_MAX, 0); 1548789Sahrens 15492638Sperrin /* 15502638Sperrin * Wait for any in-flight log writes to complete. 15512638Sperrin */ 1552789Sahrens mutex_enter(&zilog->zl_lock); 15532638Sperrin while (zilog->zl_writer) 15542638Sperrin cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 1555789Sahrens mutex_exit(&zilog->zl_lock); 1556789Sahrens 15571807Sbonwick zil_destroy(zilog, B_FALSE); 15581807Sbonwick 15591807Sbonwick mutex_enter(&zilog->zl_lock); 15601807Sbonwick zilog->zl_suspending = B_FALSE; 15611807Sbonwick cv_broadcast(&zilog->zl_cv_suspend); 15621807Sbonwick mutex_exit(&zilog->zl_lock); 1563789Sahrens 1564789Sahrens return (0); 1565789Sahrens } 1566789Sahrens 1567789Sahrens void 1568789Sahrens zil_resume(zilog_t *zilog) 1569789Sahrens { 1570789Sahrens mutex_enter(&zilog->zl_lock); 1571789Sahrens ASSERT(zilog->zl_suspend != 0); 1572789Sahrens zilog->zl_suspend--; 1573789Sahrens mutex_exit(&zilog->zl_lock); 1574789Sahrens } 1575789Sahrens 1576789Sahrens typedef struct zil_replay_arg { 1577789Sahrens zil_replay_func_t **zr_replay; 1578789Sahrens void *zr_arg; 1579789Sahrens boolean_t zr_byteswap; 158010922SJeff.Bonwick@Sun.COM char *zr_lr; 1581789Sahrens } zil_replay_arg_t; 1582789Sahrens 158310922SJeff.Bonwick@Sun.COM static int 158410922SJeff.Bonwick@Sun.COM zil_replay_error(zilog_t *zilog, lr_t *lr, int error) 158510922SJeff.Bonwick@Sun.COM { 158610922SJeff.Bonwick@Sun.COM char name[MAXNAMELEN]; 158710922SJeff.Bonwick@Sun.COM 158810922SJeff.Bonwick@Sun.COM zilog->zl_replaying_seq--; /* didn't actually replay this one */ 158910922SJeff.Bonwick@Sun.COM 159010922SJeff.Bonwick@Sun.COM dmu_objset_name(zilog->zl_os, name); 159110922SJeff.Bonwick@Sun.COM 159210922SJeff.Bonwick@Sun.COM cmn_err(CE_WARN, "ZFS replay transaction error %d, " 159310922SJeff.Bonwick@Sun.COM "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name, 159410922SJeff.Bonwick@Sun.COM (u_longlong_t)lr->lrc_seq, 159510922SJeff.Bonwick@Sun.COM (u_longlong_t)(lr->lrc_txtype & ~TX_CI), 159610922SJeff.Bonwick@Sun.COM (lr->lrc_txtype & TX_CI) ? "CI" : ""); 159710922SJeff.Bonwick@Sun.COM 159810922SJeff.Bonwick@Sun.COM return (error); 159910922SJeff.Bonwick@Sun.COM } 160010922SJeff.Bonwick@Sun.COM 160110922SJeff.Bonwick@Sun.COM static int 1602789Sahrens zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 1603789Sahrens { 1604789Sahrens zil_replay_arg_t *zr = zra; 16051807Sbonwick const zil_header_t *zh = zilog->zl_header; 1606789Sahrens uint64_t reclen = lr->lrc_reclen; 1607789Sahrens uint64_t txtype = lr->lrc_txtype; 160810922SJeff.Bonwick@Sun.COM int error = 0; 1609789Sahrens 161010922SJeff.Bonwick@Sun.COM zilog->zl_replaying_seq = lr->lrc_seq; 161110922SJeff.Bonwick@Sun.COM 161210922SJeff.Bonwick@Sun.COM if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 161310922SJeff.Bonwick@Sun.COM return (0); 1614789Sahrens 1615789Sahrens if (lr->lrc_txg < claim_txg) /* already committed */ 161610922SJeff.Bonwick@Sun.COM return (0); 1617789Sahrens 16185331Samw /* Strip case-insensitive bit, still present in log record */ 16195331Samw txtype &= ~TX_CI; 16205331Samw 162110922SJeff.Bonwick@Sun.COM if (txtype == 0 || txtype >= TX_MAX_TYPE) 162210922SJeff.Bonwick@Sun.COM return (zil_replay_error(zilog, lr, EINVAL)); 162310922SJeff.Bonwick@Sun.COM 162410922SJeff.Bonwick@Sun.COM /* 162510922SJeff.Bonwick@Sun.COM * If this record type can be logged out of order, the object 162610922SJeff.Bonwick@Sun.COM * (lr_foid) may no longer exist. That's legitimate, not an error. 162710922SJeff.Bonwick@Sun.COM */ 162810922SJeff.Bonwick@Sun.COM if (TX_OOO(txtype)) { 162910922SJeff.Bonwick@Sun.COM error = dmu_object_info(zilog->zl_os, 163010922SJeff.Bonwick@Sun.COM ((lr_ooo_t *)lr)->lr_foid, NULL); 163110922SJeff.Bonwick@Sun.COM if (error == ENOENT || error == EEXIST) 163210922SJeff.Bonwick@Sun.COM return (0); 16338227SNeil.Perrin@Sun.COM } 16348227SNeil.Perrin@Sun.COM 1635789Sahrens /* 1636789Sahrens * Make a copy of the data so we can revise and extend it. 1637789Sahrens */ 163810922SJeff.Bonwick@Sun.COM bcopy(lr, zr->zr_lr, reclen); 163910922SJeff.Bonwick@Sun.COM 164010922SJeff.Bonwick@Sun.COM /* 164110922SJeff.Bonwick@Sun.COM * If this is a TX_WRITE with a blkptr, suck in the data. 164210922SJeff.Bonwick@Sun.COM */ 164310922SJeff.Bonwick@Sun.COM if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 164410922SJeff.Bonwick@Sun.COM error = zil_read_log_data(zilog, (lr_write_t *)lr, 164510922SJeff.Bonwick@Sun.COM zr->zr_lr + reclen); 164610922SJeff.Bonwick@Sun.COM if (error) 164710922SJeff.Bonwick@Sun.COM return (zil_replay_error(zilog, lr, error)); 164810922SJeff.Bonwick@Sun.COM } 1649789Sahrens 1650789Sahrens /* 1651789Sahrens * The log block containing this lr may have been byteswapped 1652789Sahrens * so that we can easily examine common fields like lrc_txtype. 165310922SJeff.Bonwick@Sun.COM * However, the log is a mix of different record types, and only the 1654789Sahrens * replay vectors know how to byteswap their records. Therefore, if 1655789Sahrens * the lr was byteswapped, undo it before invoking the replay vector. 1656789Sahrens */ 1657789Sahrens if (zr->zr_byteswap) 165810922SJeff.Bonwick@Sun.COM byteswap_uint64_array(zr->zr_lr, reclen); 1659789Sahrens 1660789Sahrens /* 1661789Sahrens * We must now do two things atomically: replay this log record, 16628227SNeil.Perrin@Sun.COM * and update the log header sequence number to reflect the fact that 16638227SNeil.Perrin@Sun.COM * we did so. At the end of each replay function the sequence number 16648227SNeil.Perrin@Sun.COM * is updated if we are in replay mode. 1665789Sahrens */ 166610922SJeff.Bonwick@Sun.COM error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap); 166710922SJeff.Bonwick@Sun.COM if (error) { 16683063Sperrin /* 16693063Sperrin * The DMU's dnode layer doesn't see removes until the txg 16703063Sperrin * commits, so a subsequent claim can spuriously fail with 16718227SNeil.Perrin@Sun.COM * EEXIST. So if we receive any error we try syncing out 167210922SJeff.Bonwick@Sun.COM * any removes then retry the transaction. Note that we 167310922SJeff.Bonwick@Sun.COM * specify B_FALSE for byteswap now, so we don't do it twice. 16743063Sperrin */ 167510922SJeff.Bonwick@Sun.COM txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 167610922SJeff.Bonwick@Sun.COM error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE); 167710922SJeff.Bonwick@Sun.COM if (error) 167810922SJeff.Bonwick@Sun.COM return (zil_replay_error(zilog, lr, error)); 1679789Sahrens } 168010922SJeff.Bonwick@Sun.COM return (0); 16813063Sperrin } 1682789Sahrens 16833063Sperrin /* ARGSUSED */ 168410922SJeff.Bonwick@Sun.COM static int 16853063Sperrin zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 16863063Sperrin { 16873063Sperrin zilog->zl_replay_blks++; 168810922SJeff.Bonwick@Sun.COM 168910922SJeff.Bonwick@Sun.COM return (0); 1690789Sahrens } 1691789Sahrens 1692789Sahrens /* 16931362Sperrin * If this dataset has a non-empty intent log, replay it and destroy it. 1694789Sahrens */ 1695789Sahrens void 16968227SNeil.Perrin@Sun.COM zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE]) 1697789Sahrens { 1698789Sahrens zilog_t *zilog = dmu_objset_zil(os); 16991807Sbonwick const zil_header_t *zh = zilog->zl_header; 17001807Sbonwick zil_replay_arg_t zr; 17011362Sperrin 17028989SNeil.Perrin@Sun.COM if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { 17031807Sbonwick zil_destroy(zilog, B_TRUE); 17041362Sperrin return; 17051362Sperrin } 1706789Sahrens 1707789Sahrens zr.zr_replay = replay_func; 1708789Sahrens zr.zr_arg = arg; 17091807Sbonwick zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 171010922SJeff.Bonwick@Sun.COM zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 1711789Sahrens 1712789Sahrens /* 1713789Sahrens * Wait for in-progress removes to sync before starting replay. 1714789Sahrens */ 1715789Sahrens txg_wait_synced(zilog->zl_dmu_pool, 0); 1716789Sahrens 17178227SNeil.Perrin@Sun.COM zilog->zl_replay = B_TRUE; 171811066Srafael.vanoni@sun.com zilog->zl_replay_time = ddi_get_lbolt(); 17193063Sperrin ASSERT(zilog->zl_replay_blks == 0); 17203063Sperrin (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, 17211807Sbonwick zh->zh_claim_txg); 172210922SJeff.Bonwick@Sun.COM kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE); 1723789Sahrens 17241807Sbonwick zil_destroy(zilog, B_FALSE); 17255712Sahrens txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 17268227SNeil.Perrin@Sun.COM zilog->zl_replay = B_FALSE; 1727789Sahrens } 17281646Sperrin 172910922SJeff.Bonwick@Sun.COM boolean_t 173010922SJeff.Bonwick@Sun.COM zil_replaying(zilog_t *zilog, dmu_tx_t *tx) 17311646Sperrin { 1732*12294SMark.Musante@Sun.COM if (zilog->zl_sync == ZFS_SYNC_DISABLED) 173310922SJeff.Bonwick@Sun.COM return (B_TRUE); 17341646Sperrin 173510922SJeff.Bonwick@Sun.COM if (zilog->zl_replay) { 173610922SJeff.Bonwick@Sun.COM dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 173710922SJeff.Bonwick@Sun.COM zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] = 173810922SJeff.Bonwick@Sun.COM zilog->zl_replaying_seq; 173910922SJeff.Bonwick@Sun.COM return (B_TRUE); 17402638Sperrin } 17412638Sperrin 174210922SJeff.Bonwick@Sun.COM return (B_FALSE); 17431646Sperrin } 17449701SGeorge.Wilson@Sun.COM 17459701SGeorge.Wilson@Sun.COM /* ARGSUSED */ 17469701SGeorge.Wilson@Sun.COM int 174711209SMatthew.Ahrens@Sun.COM zil_vdev_offline(const char *osname, void *arg) 17489701SGeorge.Wilson@Sun.COM { 17499701SGeorge.Wilson@Sun.COM objset_t *os; 17509701SGeorge.Wilson@Sun.COM zilog_t *zilog; 17519701SGeorge.Wilson@Sun.COM int error; 17529701SGeorge.Wilson@Sun.COM 175310298SMatthew.Ahrens@Sun.COM error = dmu_objset_hold(osname, FTAG, &os); 17549701SGeorge.Wilson@Sun.COM if (error) 17559701SGeorge.Wilson@Sun.COM return (error); 17569701SGeorge.Wilson@Sun.COM 17579701SGeorge.Wilson@Sun.COM zilog = dmu_objset_zil(os); 17589701SGeorge.Wilson@Sun.COM if (zil_suspend(zilog) != 0) 17599701SGeorge.Wilson@Sun.COM error = EEXIST; 17609701SGeorge.Wilson@Sun.COM else 17619701SGeorge.Wilson@Sun.COM zil_resume(zilog); 176210298SMatthew.Ahrens@Sun.COM dmu_objset_rele(os, FTAG); 17639701SGeorge.Wilson@Sun.COM return (error); 17649701SGeorge.Wilson@Sun.COM } 1765