1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 51472Sperrin * Common Development and Distribution License (the "License"). 61472Sperrin * You may not use this file except in compliance with the License. 7789Sahrens * 8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9789Sahrens * or http://www.opensolaris.org/os/licensing. 10789Sahrens * See the License for the specific language governing permissions 11789Sahrens * and limitations under the License. 12789Sahrens * 13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15789Sahrens * If applicable, add the following below this CDDL HEADER, with the 16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18789Sahrens * 19789Sahrens * CDDL HEADER END 20789Sahrens */ 21789Sahrens /* 2211670SNeil.Perrin@Sun.COM * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23789Sahrens * Use is subject to license terms. 24789Sahrens */ 25789Sahrens 26789Sahrens #include <sys/zfs_context.h> 27789Sahrens #include <sys/spa.h> 28789Sahrens #include <sys/dmu.h> 29789Sahrens #include <sys/zap.h> 30789Sahrens #include <sys/arc.h> 31789Sahrens #include <sys/stat.h> 32789Sahrens #include <sys/resource.h> 33789Sahrens #include <sys/zil.h> 34789Sahrens #include <sys/zil_impl.h> 35789Sahrens #include <sys/dsl_dataset.h> 36789Sahrens #include <sys/vdev.h> 373668Sgw25295 #include <sys/dmu_tx.h> 38789Sahrens 39789Sahrens /* 40789Sahrens * The zfs intent log (ZIL) saves transaction records of system calls 41789Sahrens * that change the file system in memory with enough information 42789Sahrens * to be able to replay them. These are stored in memory until 43789Sahrens * either the DMU transaction group (txg) commits them to the stable pool 44789Sahrens * and they can be discarded, or they are flushed to the stable log 45789Sahrens * (also in the pool) due to a fsync, O_DSYNC or other synchronous 46789Sahrens * requirement. In the event of a panic or power fail then those log 47789Sahrens * records (transactions) are replayed. 48789Sahrens * 49789Sahrens * There is one ZIL per file system. Its on-disk (pool) format consists 50789Sahrens * of 3 parts: 51789Sahrens * 52789Sahrens * - ZIL header 53789Sahrens * - ZIL blocks 54789Sahrens * - ZIL records 55789Sahrens * 56789Sahrens * A log record holds a system call transaction. Log blocks can 57789Sahrens * hold many log records and the blocks are chained together. 58789Sahrens * Each ZIL block contains a block pointer (blkptr_t) to the next 59789Sahrens * ZIL block in the chain. The ZIL header points to the first 60789Sahrens * block in the chain. Note there is not a fixed place in the pool 61789Sahrens * to hold blocks. They are dynamically allocated and freed as 62789Sahrens * needed from the blocks available. Figure X shows the ZIL structure: 63789Sahrens */ 64789Sahrens 65789Sahrens /* 662986Sek110237 * This global ZIL switch affects all pools 67789Sahrens */ 68789Sahrens int zil_disable = 0; /* disable intent logging */ 692986Sek110237 702986Sek110237 /* 712986Sek110237 * Tunable parameter for debugging or performance analysis. Setting 722986Sek110237 * zfs_nocacheflush will cause corruption on power loss if a volatile 732986Sek110237 * out-of-order write cache is enabled. 742986Sek110237 */ 752986Sek110237 boolean_t zfs_nocacheflush = B_FALSE; 76789Sahrens 77789Sahrens static kmem_cache_t *zil_lwb_cache; 78789Sahrens 7910685SGeorge.Wilson@Sun.COM static boolean_t zil_empty(zilog_t *zilog); 8010685SGeorge.Wilson@Sun.COM 8111670SNeil.Perrin@Sun.COM #define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \ 8211670SNeil.Perrin@Sun.COM sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused)) 8311670SNeil.Perrin@Sun.COM 8411670SNeil.Perrin@Sun.COM 85789Sahrens static int 8610922SJeff.Bonwick@Sun.COM zil_bp_compare(const void *x1, const void *x2) 87789Sahrens { 8810922SJeff.Bonwick@Sun.COM const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva; 8910922SJeff.Bonwick@Sun.COM const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva; 90789Sahrens 91789Sahrens if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2)) 92789Sahrens return (-1); 93789Sahrens if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2)) 94789Sahrens return (1); 95789Sahrens 96789Sahrens if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2)) 97789Sahrens return (-1); 98789Sahrens if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2)) 99789Sahrens return (1); 100789Sahrens 101789Sahrens return (0); 102789Sahrens } 103789Sahrens 104789Sahrens static void 10510922SJeff.Bonwick@Sun.COM zil_bp_tree_init(zilog_t *zilog) 106789Sahrens { 10710922SJeff.Bonwick@Sun.COM avl_create(&zilog->zl_bp_tree, zil_bp_compare, 10810922SJeff.Bonwick@Sun.COM sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node)); 109789Sahrens } 110789Sahrens 111789Sahrens static void 11210922SJeff.Bonwick@Sun.COM zil_bp_tree_fini(zilog_t *zilog) 113789Sahrens { 11410922SJeff.Bonwick@Sun.COM avl_tree_t *t = &zilog->zl_bp_tree; 11510922SJeff.Bonwick@Sun.COM zil_bp_node_t *zn; 116789Sahrens void *cookie = NULL; 117789Sahrens 118789Sahrens while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 11910922SJeff.Bonwick@Sun.COM kmem_free(zn, sizeof (zil_bp_node_t)); 120789Sahrens 121789Sahrens avl_destroy(t); 122789Sahrens } 123789Sahrens 12410922SJeff.Bonwick@Sun.COM int 12510922SJeff.Bonwick@Sun.COM zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp) 126789Sahrens { 12710922SJeff.Bonwick@Sun.COM avl_tree_t *t = &zilog->zl_bp_tree; 12810922SJeff.Bonwick@Sun.COM const dva_t *dva = BP_IDENTITY(bp); 12910922SJeff.Bonwick@Sun.COM zil_bp_node_t *zn; 130789Sahrens avl_index_t where; 131789Sahrens 132789Sahrens if (avl_find(t, dva, &where) != NULL) 133789Sahrens return (EEXIST); 134789Sahrens 13510922SJeff.Bonwick@Sun.COM zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP); 136789Sahrens zn->zn_dva = *dva; 137789Sahrens avl_insert(t, zn, where); 138789Sahrens 139789Sahrens return (0); 140789Sahrens } 141789Sahrens 1421807Sbonwick static zil_header_t * 1431807Sbonwick zil_header_in_syncing_context(zilog_t *zilog) 1441807Sbonwick { 1451807Sbonwick return ((zil_header_t *)zilog->zl_header); 1461807Sbonwick } 1471807Sbonwick 1481807Sbonwick static void 1491807Sbonwick zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 1501807Sbonwick { 1511807Sbonwick zio_cksum_t *zc = &bp->blk_cksum; 1521807Sbonwick 1531807Sbonwick zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL); 1541807Sbonwick zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL); 1551807Sbonwick zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 1561807Sbonwick zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 1571807Sbonwick } 1581807Sbonwick 159789Sahrens /* 16010922SJeff.Bonwick@Sun.COM * Read a log block and make sure it's valid. 161789Sahrens */ 162789Sahrens static int 16311670SNeil.Perrin@Sun.COM zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst, 16411670SNeil.Perrin@Sun.COM char **end) 165789Sahrens { 16610922SJeff.Bonwick@Sun.COM enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 16710922SJeff.Bonwick@Sun.COM uint32_t aflags = ARC_WAIT; 16810922SJeff.Bonwick@Sun.COM arc_buf_t *abuf = NULL; 1691544Seschrock zbookmark_t zb; 170789Sahrens int error; 171789Sahrens 17210922SJeff.Bonwick@Sun.COM if (zilog->zl_header->zh_claim_txg == 0) 17310922SJeff.Bonwick@Sun.COM zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 1741807Sbonwick 17510922SJeff.Bonwick@Sun.COM if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 17610922SJeff.Bonwick@Sun.COM zio_flags |= ZIO_FLAG_SPECULATIVE; 1771807Sbonwick 17810922SJeff.Bonwick@Sun.COM SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET], 17910922SJeff.Bonwick@Sun.COM ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 18010922SJeff.Bonwick@Sun.COM 18110922SJeff.Bonwick@Sun.COM error = arc_read_nolock(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 18210922SJeff.Bonwick@Sun.COM ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 1831807Sbonwick 1841807Sbonwick if (error == 0) { 1851807Sbonwick zio_cksum_t cksum = bp->blk_cksum; 1861544Seschrock 1871807Sbonwick /* 1887522SNeil.Perrin@Sun.COM * Validate the checksummed log block. 1897522SNeil.Perrin@Sun.COM * 1901807Sbonwick * Sequence numbers should be... sequential. The checksum 1911807Sbonwick * verifier for the next block should be bp's checksum plus 1. 1927522SNeil.Perrin@Sun.COM * 1937522SNeil.Perrin@Sun.COM * Also check the log chain linkage and size used. 1941807Sbonwick */ 1951807Sbonwick cksum.zc_word[ZIL_ZC_SEQ]++; 1961807Sbonwick 19711670SNeil.Perrin@Sun.COM if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 19811670SNeil.Perrin@Sun.COM zil_chain_t *zilc = abuf->b_data; 19911670SNeil.Perrin@Sun.COM char *lr = (char *)(zilc + 1); 20011670SNeil.Perrin@Sun.COM uint64_t len = zilc->zc_nused - sizeof (zil_chain_t); 20111670SNeil.Perrin@Sun.COM 20211670SNeil.Perrin@Sun.COM if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 20311670SNeil.Perrin@Sun.COM sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) { 20411670SNeil.Perrin@Sun.COM error = ECKSUM; 20511670SNeil.Perrin@Sun.COM } else { 20611670SNeil.Perrin@Sun.COM bcopy(lr, dst, len); 20711670SNeil.Perrin@Sun.COM *end = (char *)dst + len; 20811670SNeil.Perrin@Sun.COM *nbp = zilc->zc_next_blk; 20911670SNeil.Perrin@Sun.COM } 21011670SNeil.Perrin@Sun.COM } else { 21111670SNeil.Perrin@Sun.COM char *lr = abuf->b_data; 21211670SNeil.Perrin@Sun.COM uint64_t size = BP_GET_LSIZE(bp); 21311670SNeil.Perrin@Sun.COM zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1; 21411670SNeil.Perrin@Sun.COM 21511670SNeil.Perrin@Sun.COM if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 21611670SNeil.Perrin@Sun.COM sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) || 21711670SNeil.Perrin@Sun.COM (zilc->zc_nused > (size - sizeof (*zilc)))) { 21811670SNeil.Perrin@Sun.COM error = ECKSUM; 21911670SNeil.Perrin@Sun.COM } else { 22011670SNeil.Perrin@Sun.COM bcopy(lr, dst, zilc->zc_nused); 22111670SNeil.Perrin@Sun.COM *end = (char *)dst + zilc->zc_nused; 22211670SNeil.Perrin@Sun.COM *nbp = zilc->zc_next_blk; 22311670SNeil.Perrin@Sun.COM } 22411670SNeil.Perrin@Sun.COM } 2251807Sbonwick 22610922SJeff.Bonwick@Sun.COM VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 227789Sahrens } 228789Sahrens 22910922SJeff.Bonwick@Sun.COM return (error); 23010922SJeff.Bonwick@Sun.COM } 23110922SJeff.Bonwick@Sun.COM 23210922SJeff.Bonwick@Sun.COM /* 23310922SJeff.Bonwick@Sun.COM * Read a TX_WRITE log data block. 23410922SJeff.Bonwick@Sun.COM */ 23510922SJeff.Bonwick@Sun.COM static int 23610922SJeff.Bonwick@Sun.COM zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf) 23710922SJeff.Bonwick@Sun.COM { 23810922SJeff.Bonwick@Sun.COM enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 23910922SJeff.Bonwick@Sun.COM const blkptr_t *bp = &lr->lr_blkptr; 24010922SJeff.Bonwick@Sun.COM uint32_t aflags = ARC_WAIT; 24110922SJeff.Bonwick@Sun.COM arc_buf_t *abuf = NULL; 24210922SJeff.Bonwick@Sun.COM zbookmark_t zb; 24310922SJeff.Bonwick@Sun.COM int error; 24410922SJeff.Bonwick@Sun.COM 24510922SJeff.Bonwick@Sun.COM if (BP_IS_HOLE(bp)) { 24610922SJeff.Bonwick@Sun.COM if (wbuf != NULL) 24710922SJeff.Bonwick@Sun.COM bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length)); 24810922SJeff.Bonwick@Sun.COM return (0); 24910922SJeff.Bonwick@Sun.COM } 25010922SJeff.Bonwick@Sun.COM 25110922SJeff.Bonwick@Sun.COM if (zilog->zl_header->zh_claim_txg == 0) 25210922SJeff.Bonwick@Sun.COM zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 25310922SJeff.Bonwick@Sun.COM 25410922SJeff.Bonwick@Sun.COM SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid, 25510922SJeff.Bonwick@Sun.COM ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); 25610922SJeff.Bonwick@Sun.COM 25710922SJeff.Bonwick@Sun.COM error = arc_read_nolock(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 25810922SJeff.Bonwick@Sun.COM ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 25910922SJeff.Bonwick@Sun.COM 26010922SJeff.Bonwick@Sun.COM if (error == 0) { 26110922SJeff.Bonwick@Sun.COM if (wbuf != NULL) 26210922SJeff.Bonwick@Sun.COM bcopy(abuf->b_data, wbuf, arc_buf_size(abuf)); 26310922SJeff.Bonwick@Sun.COM (void) arc_buf_remove_ref(abuf, &abuf); 26410922SJeff.Bonwick@Sun.COM } 265789Sahrens 2661807Sbonwick return (error); 267789Sahrens } 268789Sahrens 269789Sahrens /* 270789Sahrens * Parse the intent log, and call parse_func for each valid record within. 271789Sahrens */ 27210922SJeff.Bonwick@Sun.COM int 273789Sahrens zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 274789Sahrens zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 275789Sahrens { 2761807Sbonwick const zil_header_t *zh = zilog->zl_header; 27710922SJeff.Bonwick@Sun.COM boolean_t claimed = !!zh->zh_claim_txg; 27810922SJeff.Bonwick@Sun.COM uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX; 27910922SJeff.Bonwick@Sun.COM uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX; 28010922SJeff.Bonwick@Sun.COM uint64_t max_blk_seq = 0; 28110922SJeff.Bonwick@Sun.COM uint64_t max_lr_seq = 0; 28210922SJeff.Bonwick@Sun.COM uint64_t blk_count = 0; 28310922SJeff.Bonwick@Sun.COM uint64_t lr_count = 0; 28410922SJeff.Bonwick@Sun.COM blkptr_t blk, next_blk; 285789Sahrens char *lrbuf, *lrp; 28610922SJeff.Bonwick@Sun.COM int error = 0; 287789Sahrens 28810922SJeff.Bonwick@Sun.COM /* 28910922SJeff.Bonwick@Sun.COM * Old logs didn't record the maximum zh_claim_lr_seq. 29010922SJeff.Bonwick@Sun.COM */ 29110922SJeff.Bonwick@Sun.COM if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 29210922SJeff.Bonwick@Sun.COM claim_lr_seq = UINT64_MAX; 293789Sahrens 294789Sahrens /* 295789Sahrens * Starting at the block pointed to by zh_log we read the log chain. 296789Sahrens * For each block in the chain we strongly check that block to 297789Sahrens * ensure its validity. We stop when an invalid block is found. 298789Sahrens * For each block pointer in the chain we call parse_blk_func(). 299789Sahrens * For each record in each valid block we call parse_lr_func(). 3001807Sbonwick * If the log has been claimed, stop if we encounter a sequence 3011807Sbonwick * number greater than the highest claimed sequence number. 302789Sahrens */ 30310922SJeff.Bonwick@Sun.COM lrbuf = zio_buf_alloc(SPA_MAXBLOCKSIZE); 30410922SJeff.Bonwick@Sun.COM zil_bp_tree_init(zilog); 30510922SJeff.Bonwick@Sun.COM 30610922SJeff.Bonwick@Sun.COM for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) { 30710922SJeff.Bonwick@Sun.COM uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 30810922SJeff.Bonwick@Sun.COM int reclen; 30911670SNeil.Perrin@Sun.COM char *end; 3101807Sbonwick 31110922SJeff.Bonwick@Sun.COM if (blk_seq > claim_blk_seq) 31210922SJeff.Bonwick@Sun.COM break; 31310922SJeff.Bonwick@Sun.COM if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0) 31410922SJeff.Bonwick@Sun.COM break; 31511670SNeil.Perrin@Sun.COM ASSERT3U(max_blk_seq, <, blk_seq); 31610922SJeff.Bonwick@Sun.COM max_blk_seq = blk_seq; 31710922SJeff.Bonwick@Sun.COM blk_count++; 31810922SJeff.Bonwick@Sun.COM 31910922SJeff.Bonwick@Sun.COM if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq) 3201807Sbonwick break; 3211807Sbonwick 32211670SNeil.Perrin@Sun.COM error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end); 323789Sahrens if (error) 324789Sahrens break; 325789Sahrens 32611670SNeil.Perrin@Sun.COM for (lrp = lrbuf; lrp < end; lrp += reclen) { 327789Sahrens lr_t *lr = (lr_t *)lrp; 328789Sahrens reclen = lr->lrc_reclen; 329789Sahrens ASSERT3U(reclen, >=, sizeof (lr_t)); 33010922SJeff.Bonwick@Sun.COM if (lr->lrc_seq > claim_lr_seq) 33110922SJeff.Bonwick@Sun.COM goto done; 33210922SJeff.Bonwick@Sun.COM if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0) 33310922SJeff.Bonwick@Sun.COM goto done; 33411670SNeil.Perrin@Sun.COM ASSERT3U(max_lr_seq, <, lr->lrc_seq); 33510922SJeff.Bonwick@Sun.COM max_lr_seq = lr->lrc_seq; 33610922SJeff.Bonwick@Sun.COM lr_count++; 337789Sahrens } 338789Sahrens } 33910922SJeff.Bonwick@Sun.COM done: 34010922SJeff.Bonwick@Sun.COM zilog->zl_parse_error = error; 34110922SJeff.Bonwick@Sun.COM zilog->zl_parse_blk_seq = max_blk_seq; 34210922SJeff.Bonwick@Sun.COM zilog->zl_parse_lr_seq = max_lr_seq; 34310922SJeff.Bonwick@Sun.COM zilog->zl_parse_blk_count = blk_count; 34410922SJeff.Bonwick@Sun.COM zilog->zl_parse_lr_count = lr_count; 34510922SJeff.Bonwick@Sun.COM 34610922SJeff.Bonwick@Sun.COM ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) || 34710922SJeff.Bonwick@Sun.COM (max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq)); 34810922SJeff.Bonwick@Sun.COM 34910922SJeff.Bonwick@Sun.COM zil_bp_tree_fini(zilog); 35010922SJeff.Bonwick@Sun.COM zio_buf_free(lrbuf, SPA_MAXBLOCKSIZE); 35110922SJeff.Bonwick@Sun.COM 35210922SJeff.Bonwick@Sun.COM return (error); 35310922SJeff.Bonwick@Sun.COM } 35410922SJeff.Bonwick@Sun.COM 35510922SJeff.Bonwick@Sun.COM static int 35610922SJeff.Bonwick@Sun.COM zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 35710922SJeff.Bonwick@Sun.COM { 35810922SJeff.Bonwick@Sun.COM /* 35910922SJeff.Bonwick@Sun.COM * Claim log block if not already committed and not already claimed. 36010922SJeff.Bonwick@Sun.COM * If tx == NULL, just verify that the block is claimable. 36110922SJeff.Bonwick@Sun.COM */ 36210922SJeff.Bonwick@Sun.COM if (bp->blk_birth < first_txg || zil_bp_tree_add(zilog, bp) != 0) 36310922SJeff.Bonwick@Sun.COM return (0); 3641807Sbonwick 36510922SJeff.Bonwick@Sun.COM return (zio_wait(zio_claim(NULL, zilog->zl_spa, 36610922SJeff.Bonwick@Sun.COM tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL, 36710922SJeff.Bonwick@Sun.COM ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB))); 36810922SJeff.Bonwick@Sun.COM } 36910922SJeff.Bonwick@Sun.COM 37010922SJeff.Bonwick@Sun.COM static int 37110922SJeff.Bonwick@Sun.COM zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 37210922SJeff.Bonwick@Sun.COM { 37310922SJeff.Bonwick@Sun.COM lr_write_t *lr = (lr_write_t *)lrc; 37410922SJeff.Bonwick@Sun.COM int error; 37510922SJeff.Bonwick@Sun.COM 37610922SJeff.Bonwick@Sun.COM if (lrc->lrc_txtype != TX_WRITE) 37710922SJeff.Bonwick@Sun.COM return (0); 37810922SJeff.Bonwick@Sun.COM 37910922SJeff.Bonwick@Sun.COM /* 38010922SJeff.Bonwick@Sun.COM * If the block is not readable, don't claim it. This can happen 38110922SJeff.Bonwick@Sun.COM * in normal operation when a log block is written to disk before 38210922SJeff.Bonwick@Sun.COM * some of the dmu_sync() blocks it points to. In this case, the 38310922SJeff.Bonwick@Sun.COM * transaction cannot have been committed to anyone (we would have 38410922SJeff.Bonwick@Sun.COM * waited for all writes to be stable first), so it is semantically 38510922SJeff.Bonwick@Sun.COM * correct to declare this the end of the log. 38610922SJeff.Bonwick@Sun.COM */ 38710922SJeff.Bonwick@Sun.COM if (lr->lr_blkptr.blk_birth >= first_txg && 38810922SJeff.Bonwick@Sun.COM (error = zil_read_log_data(zilog, lr, NULL)) != 0) 38910922SJeff.Bonwick@Sun.COM return (error); 39010922SJeff.Bonwick@Sun.COM return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg)); 391789Sahrens } 392789Sahrens 393789Sahrens /* ARGSUSED */ 39410922SJeff.Bonwick@Sun.COM static int 39510922SJeff.Bonwick@Sun.COM zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 396789Sahrens { 39710922SJeff.Bonwick@Sun.COM zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 398789Sahrens 39910922SJeff.Bonwick@Sun.COM return (0); 400789Sahrens } 401789Sahrens 40210922SJeff.Bonwick@Sun.COM static int 403789Sahrens zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 404789Sahrens { 40510922SJeff.Bonwick@Sun.COM lr_write_t *lr = (lr_write_t *)lrc; 40610922SJeff.Bonwick@Sun.COM blkptr_t *bp = &lr->lr_blkptr; 40710922SJeff.Bonwick@Sun.COM 408789Sahrens /* 409789Sahrens * If we previously claimed it, we need to free it. 410789Sahrens */ 41110922SJeff.Bonwick@Sun.COM if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE && 41210922SJeff.Bonwick@Sun.COM bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0) 41310922SJeff.Bonwick@Sun.COM zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 41410922SJeff.Bonwick@Sun.COM 41510922SJeff.Bonwick@Sun.COM return (0); 416789Sahrens } 417789Sahrens 41811670SNeil.Perrin@Sun.COM static lwb_t * 41911670SNeil.Perrin@Sun.COM zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, uint64_t txg) 42011670SNeil.Perrin@Sun.COM { 42111670SNeil.Perrin@Sun.COM lwb_t *lwb; 42211670SNeil.Perrin@Sun.COM 42311670SNeil.Perrin@Sun.COM lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 42411670SNeil.Perrin@Sun.COM lwb->lwb_zilog = zilog; 42511670SNeil.Perrin@Sun.COM lwb->lwb_blk = *bp; 42611670SNeil.Perrin@Sun.COM lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp)); 42711670SNeil.Perrin@Sun.COM lwb->lwb_max_txg = txg; 42811670SNeil.Perrin@Sun.COM lwb->lwb_zio = NULL; 42911670SNeil.Perrin@Sun.COM lwb->lwb_tx = NULL; 43011670SNeil.Perrin@Sun.COM if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 43111670SNeil.Perrin@Sun.COM lwb->lwb_nused = sizeof (zil_chain_t); 43211670SNeil.Perrin@Sun.COM lwb->lwb_sz = BP_GET_LSIZE(bp); 43311670SNeil.Perrin@Sun.COM } else { 43411670SNeil.Perrin@Sun.COM lwb->lwb_nused = 0; 43511670SNeil.Perrin@Sun.COM lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t); 43611670SNeil.Perrin@Sun.COM } 43711670SNeil.Perrin@Sun.COM 43811670SNeil.Perrin@Sun.COM mutex_enter(&zilog->zl_lock); 43911670SNeil.Perrin@Sun.COM list_insert_tail(&zilog->zl_lwb_list, lwb); 44011670SNeil.Perrin@Sun.COM mutex_exit(&zilog->zl_lock); 44111670SNeil.Perrin@Sun.COM 44211670SNeil.Perrin@Sun.COM return (lwb); 44311670SNeil.Perrin@Sun.COM } 44411670SNeil.Perrin@Sun.COM 445789Sahrens /* 446789Sahrens * Create an on-disk intent log. 447789Sahrens */ 44811670SNeil.Perrin@Sun.COM static lwb_t * 449789Sahrens zil_create(zilog_t *zilog) 450789Sahrens { 4511807Sbonwick const zil_header_t *zh = zilog->zl_header; 45211670SNeil.Perrin@Sun.COM lwb_t *lwb = NULL; 4531807Sbonwick uint64_t txg = 0; 4541807Sbonwick dmu_tx_t *tx = NULL; 455789Sahrens blkptr_t blk; 4561807Sbonwick int error = 0; 457789Sahrens 458789Sahrens /* 4591807Sbonwick * Wait for any previous destroy to complete. 460789Sahrens */ 4611807Sbonwick txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 4621807Sbonwick 4631807Sbonwick ASSERT(zh->zh_claim_txg == 0); 4641807Sbonwick ASSERT(zh->zh_replay_seq == 0); 4651807Sbonwick 4661807Sbonwick blk = zh->zh_log; 467789Sahrens 468789Sahrens /* 46911670SNeil.Perrin@Sun.COM * Allocate an initial log block if: 47011670SNeil.Perrin@Sun.COM * - there isn't one already 47111670SNeil.Perrin@Sun.COM * - the existing block is the wrong endianess 472789Sahrens */ 4738109SNeil.Perrin@Sun.COM if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { 4741807Sbonwick tx = dmu_tx_create(zilog->zl_os); 47510922SJeff.Bonwick@Sun.COM VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 4761807Sbonwick dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 4771807Sbonwick txg = dmu_tx_get_txg(tx); 4781807Sbonwick 4798109SNeil.Perrin@Sun.COM if (!BP_IS_HOLE(&blk)) { 48010922SJeff.Bonwick@Sun.COM zio_free_zil(zilog->zl_spa, txg, &blk); 4818109SNeil.Perrin@Sun.COM BP_ZERO(&blk); 4828109SNeil.Perrin@Sun.COM } 4838109SNeil.Perrin@Sun.COM 48410922SJeff.Bonwick@Sun.COM error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL, 48510922SJeff.Bonwick@Sun.COM ZIL_MIN_BLKSZ, zilog->zl_logbias == ZFS_LOGBIAS_LATENCY); 4861807Sbonwick 4871807Sbonwick if (error == 0) 4881807Sbonwick zil_init_log_chain(zilog, &blk); 4891362Sperrin } 4901807Sbonwick 4911807Sbonwick /* 4921807Sbonwick * Allocate a log write buffer (lwb) for the first log block. 4931807Sbonwick */ 49411670SNeil.Perrin@Sun.COM if (error == 0) 49511670SNeil.Perrin@Sun.COM lwb = zil_alloc_lwb(zilog, &blk, txg); 496789Sahrens 4971807Sbonwick /* 4981807Sbonwick * If we just allocated the first log block, commit our transaction 4991807Sbonwick * and wait for zil_sync() to stuff the block poiner into zh_log. 5001807Sbonwick * (zh is part of the MOS, so we cannot modify it in open context.) 5011807Sbonwick */ 5021807Sbonwick if (tx != NULL) { 5031807Sbonwick dmu_tx_commit(tx); 5041362Sperrin txg_wait_synced(zilog->zl_dmu_pool, txg); 5051807Sbonwick } 5061807Sbonwick 5071807Sbonwick ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 50811670SNeil.Perrin@Sun.COM 50911670SNeil.Perrin@Sun.COM return (lwb); 510789Sahrens } 511789Sahrens 512789Sahrens /* 513789Sahrens * In one tx, free all log blocks and clear the log header. 5141807Sbonwick * If keep_first is set, then we're replaying a log with no content. 5151807Sbonwick * We want to keep the first block, however, so that the first 5161807Sbonwick * synchronous transaction doesn't require a txg_wait_synced() 5171807Sbonwick * in zil_create(). We don't need to txg_wait_synced() here either 5181807Sbonwick * when keep_first is set, because both zil_create() and zil_destroy() 5191807Sbonwick * will wait for any in-progress destroys to complete. 520789Sahrens */ 521789Sahrens void 5221807Sbonwick zil_destroy(zilog_t *zilog, boolean_t keep_first) 523789Sahrens { 5241807Sbonwick const zil_header_t *zh = zilog->zl_header; 5251807Sbonwick lwb_t *lwb; 526789Sahrens dmu_tx_t *tx; 527789Sahrens uint64_t txg; 528789Sahrens 5291807Sbonwick /* 5301807Sbonwick * Wait for any previous destroy to complete. 5311807Sbonwick */ 5321807Sbonwick txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 533789Sahrens 53410922SJeff.Bonwick@Sun.COM zilog->zl_old_header = *zh; /* debugging aid */ 53510922SJeff.Bonwick@Sun.COM 5361807Sbonwick if (BP_IS_HOLE(&zh->zh_log)) 537789Sahrens return; 538789Sahrens 539789Sahrens tx = dmu_tx_create(zilog->zl_os); 54010922SJeff.Bonwick@Sun.COM VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 541789Sahrens dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 542789Sahrens txg = dmu_tx_get_txg(tx); 543789Sahrens 5441807Sbonwick mutex_enter(&zilog->zl_lock); 5451807Sbonwick 5461807Sbonwick ASSERT3U(zilog->zl_destroy_txg, <, txg); 547789Sahrens zilog->zl_destroy_txg = txg; 54810922SJeff.Bonwick@Sun.COM zilog->zl_keep_first = keep_first; 5491807Sbonwick 5501807Sbonwick if (!list_is_empty(&zilog->zl_lwb_list)) { 5511807Sbonwick ASSERT(zh->zh_claim_txg == 0); 55210922SJeff.Bonwick@Sun.COM ASSERT(!keep_first); 5531807Sbonwick while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 5541807Sbonwick list_remove(&zilog->zl_lwb_list, lwb); 5551807Sbonwick if (lwb->lwb_buf != NULL) 5561807Sbonwick zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 55710922SJeff.Bonwick@Sun.COM zio_free_zil(zilog->zl_spa, txg, &lwb->lwb_blk); 5581807Sbonwick kmem_cache_free(zil_lwb_cache, lwb); 5591807Sbonwick } 56010922SJeff.Bonwick@Sun.COM } else if (!keep_first) { 56110922SJeff.Bonwick@Sun.COM (void) zil_parse(zilog, zil_free_log_block, 56210922SJeff.Bonwick@Sun.COM zil_free_log_record, tx, zh->zh_claim_txg); 5631807Sbonwick } 5642638Sperrin mutex_exit(&zilog->zl_lock); 565789Sahrens 566789Sahrens dmu_tx_commit(tx); 567789Sahrens } 568789Sahrens 5692199Sahrens int 57011209SMatthew.Ahrens@Sun.COM zil_claim(const char *osname, void *txarg) 571789Sahrens { 572789Sahrens dmu_tx_t *tx = txarg; 573789Sahrens uint64_t first_txg = dmu_tx_get_txg(tx); 574789Sahrens zilog_t *zilog; 575789Sahrens zil_header_t *zh; 576789Sahrens objset_t *os; 577789Sahrens int error; 578789Sahrens 57910298SMatthew.Ahrens@Sun.COM error = dmu_objset_hold(osname, FTAG, &os); 580789Sahrens if (error) { 5817294Sperrin cmn_err(CE_WARN, "can't open objset for %s", osname); 5822199Sahrens return (0); 583789Sahrens } 584789Sahrens 585789Sahrens zilog = dmu_objset_zil(os); 5861807Sbonwick zh = zil_header_in_syncing_context(zilog); 587789Sahrens 58810922SJeff.Bonwick@Sun.COM if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) { 5899701SGeorge.Wilson@Sun.COM if (!BP_IS_HOLE(&zh->zh_log)) 59010922SJeff.Bonwick@Sun.COM zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log); 5919701SGeorge.Wilson@Sun.COM BP_ZERO(&zh->zh_log); 5929701SGeorge.Wilson@Sun.COM dsl_dataset_dirty(dmu_objset_ds(os), tx); 59310921STim.Haley@Sun.COM dmu_objset_rele(os, FTAG); 59410921STim.Haley@Sun.COM return (0); 5959701SGeorge.Wilson@Sun.COM } 5969701SGeorge.Wilson@Sun.COM 597789Sahrens /* 5981807Sbonwick * Claim all log blocks if we haven't already done so, and remember 5991807Sbonwick * the highest claimed sequence number. This ensures that if we can 6001807Sbonwick * read only part of the log now (e.g. due to a missing device), 6011807Sbonwick * but we can read the entire log later, we will not try to replay 6021807Sbonwick * or destroy beyond the last block we successfully claimed. 603789Sahrens */ 604789Sahrens ASSERT3U(zh->zh_claim_txg, <=, first_txg); 605789Sahrens if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 60610922SJeff.Bonwick@Sun.COM (void) zil_parse(zilog, zil_claim_log_block, 60710922SJeff.Bonwick@Sun.COM zil_claim_log_record, tx, first_txg); 608789Sahrens zh->zh_claim_txg = first_txg; 60910922SJeff.Bonwick@Sun.COM zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq; 61010922SJeff.Bonwick@Sun.COM zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq; 61110922SJeff.Bonwick@Sun.COM if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1) 61210922SJeff.Bonwick@Sun.COM zh->zh_flags |= ZIL_REPLAY_NEEDED; 61310922SJeff.Bonwick@Sun.COM zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID; 614789Sahrens dsl_dataset_dirty(dmu_objset_ds(os), tx); 615789Sahrens } 6161807Sbonwick 617789Sahrens ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 61810298SMatthew.Ahrens@Sun.COM dmu_objset_rele(os, FTAG); 6192199Sahrens return (0); 620789Sahrens } 621789Sahrens 6227294Sperrin /* 6237294Sperrin * Check the log by walking the log chain. 6247294Sperrin * Checksum errors are ok as they indicate the end of the chain. 6257294Sperrin * Any other error (no device or read failure) returns an error. 6267294Sperrin */ 6277294Sperrin int 62811209SMatthew.Ahrens@Sun.COM zil_check_log_chain(const char *osname, void *tx) 6297294Sperrin { 6307294Sperrin zilog_t *zilog; 6317294Sperrin objset_t *os; 6327294Sperrin int error; 6337294Sperrin 63410922SJeff.Bonwick@Sun.COM ASSERT(tx == NULL); 63510922SJeff.Bonwick@Sun.COM 63610298SMatthew.Ahrens@Sun.COM error = dmu_objset_hold(osname, FTAG, &os); 6377294Sperrin if (error) { 6387294Sperrin cmn_err(CE_WARN, "can't open objset for %s", osname); 6397294Sperrin return (0); 6407294Sperrin } 6417294Sperrin 6427294Sperrin zilog = dmu_objset_zil(os); 6437294Sperrin 64410922SJeff.Bonwick@Sun.COM /* 64510922SJeff.Bonwick@Sun.COM * Because tx == NULL, zil_claim_log_block() will not actually claim 64610922SJeff.Bonwick@Sun.COM * any blocks, but just determine whether it is possible to do so. 64710922SJeff.Bonwick@Sun.COM * In addition to checking the log chain, zil_claim_log_block() 64810922SJeff.Bonwick@Sun.COM * will invoke zio_claim() with a done func of spa_claim_notify(), 64910922SJeff.Bonwick@Sun.COM * which will update spa_max_claim_txg. See spa_load() for details. 65010922SJeff.Bonwick@Sun.COM */ 65110922SJeff.Bonwick@Sun.COM error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx, 65210922SJeff.Bonwick@Sun.COM zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa)); 65310922SJeff.Bonwick@Sun.COM 65410298SMatthew.Ahrens@Sun.COM dmu_objset_rele(os, FTAG); 65510922SJeff.Bonwick@Sun.COM 65610922SJeff.Bonwick@Sun.COM return ((error == ECKSUM || error == ENOENT) ? 0 : error); 6577294Sperrin } 6587294Sperrin 6595688Sbonwick static int 6605688Sbonwick zil_vdev_compare(const void *x1, const void *x2) 661789Sahrens { 6625875Sperrin uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; 6635875Sperrin uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; 6645688Sbonwick 6655688Sbonwick if (v1 < v2) 6665688Sbonwick return (-1); 6675688Sbonwick if (v1 > v2) 6685688Sbonwick return (1); 6695688Sbonwick 6705688Sbonwick return (0); 6715688Sbonwick } 6725688Sbonwick 6735688Sbonwick void 67410922SJeff.Bonwick@Sun.COM zil_add_block(zilog_t *zilog, const blkptr_t *bp) 6755688Sbonwick { 6765688Sbonwick avl_tree_t *t = &zilog->zl_vdev_tree; 6775688Sbonwick avl_index_t where; 6785688Sbonwick zil_vdev_node_t *zv, zvsearch; 6795688Sbonwick int ndvas = BP_GET_NDVAS(bp); 6805688Sbonwick int i; 681789Sahrens 6822986Sek110237 if (zfs_nocacheflush) 683789Sahrens return; 684789Sahrens 6855688Sbonwick ASSERT(zilog->zl_writer); 6865688Sbonwick 6875688Sbonwick /* 6885688Sbonwick * Even though we're zl_writer, we still need a lock because the 6895688Sbonwick * zl_get_data() callbacks may have dmu_sync() done callbacks 6905688Sbonwick * that will run concurrently. 6915688Sbonwick */ 6925688Sbonwick mutex_enter(&zilog->zl_vdev_lock); 6935688Sbonwick for (i = 0; i < ndvas; i++) { 6945688Sbonwick zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 6955688Sbonwick if (avl_find(t, &zvsearch, &where) == NULL) { 6965688Sbonwick zv = kmem_alloc(sizeof (*zv), KM_SLEEP); 6975688Sbonwick zv->zv_vdev = zvsearch.zv_vdev; 6985688Sbonwick avl_insert(t, zv, where); 6993063Sperrin } 7003063Sperrin } 7015688Sbonwick mutex_exit(&zilog->zl_vdev_lock); 7023063Sperrin } 7033063Sperrin 704789Sahrens void 7052638Sperrin zil_flush_vdevs(zilog_t *zilog) 706789Sahrens { 7073063Sperrin spa_t *spa = zilog->zl_spa; 7085688Sbonwick avl_tree_t *t = &zilog->zl_vdev_tree; 7095688Sbonwick void *cookie = NULL; 7105688Sbonwick zil_vdev_node_t *zv; 7115688Sbonwick zio_t *zio; 7123063Sperrin 7133063Sperrin ASSERT(zilog->zl_writer); 714789Sahrens 7155688Sbonwick /* 7165688Sbonwick * We don't need zl_vdev_lock here because we're the zl_writer, 7175688Sbonwick * and all zl_get_data() callbacks are done. 7185688Sbonwick */ 7195688Sbonwick if (avl_numnodes(t) == 0) 7205688Sbonwick return; 7215688Sbonwick 7227754SJeff.Bonwick@Sun.COM spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 7235688Sbonwick 7247754SJeff.Bonwick@Sun.COM zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 7255688Sbonwick 7265688Sbonwick while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { 7275688Sbonwick vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); 7285688Sbonwick if (vd != NULL) 7295688Sbonwick zio_flush(zio, vd); 7305688Sbonwick kmem_free(zv, sizeof (*zv)); 7313063Sperrin } 732789Sahrens 733789Sahrens /* 734789Sahrens * Wait for all the flushes to complete. Not all devices actually 735789Sahrens * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails. 736789Sahrens */ 7375688Sbonwick (void) zio_wait(zio); 7385688Sbonwick 7397754SJeff.Bonwick@Sun.COM spa_config_exit(spa, SCL_STATE, FTAG); 740789Sahrens } 741789Sahrens 742789Sahrens /* 743789Sahrens * Function called when a log block write completes 744789Sahrens */ 745789Sahrens static void 746789Sahrens zil_lwb_write_done(zio_t *zio) 747789Sahrens { 748789Sahrens lwb_t *lwb = zio->io_private; 749789Sahrens zilog_t *zilog = lwb->lwb_zilog; 75010922SJeff.Bonwick@Sun.COM dmu_tx_t *tx = lwb->lwb_tx; 751789Sahrens 7527754SJeff.Bonwick@Sun.COM ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 7537754SJeff.Bonwick@Sun.COM ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG); 7547754SJeff.Bonwick@Sun.COM ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 7557754SJeff.Bonwick@Sun.COM ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER); 7567754SJeff.Bonwick@Sun.COM ASSERT(!BP_IS_GANG(zio->io_bp)); 7577754SJeff.Bonwick@Sun.COM ASSERT(!BP_IS_HOLE(zio->io_bp)); 7587754SJeff.Bonwick@Sun.COM ASSERT(zio->io_bp->blk_fill == 0); 7597754SJeff.Bonwick@Sun.COM 760789Sahrens /* 7619493SNeil.Perrin@Sun.COM * Ensure the lwb buffer pointer is cleared before releasing 7629493SNeil.Perrin@Sun.COM * the txg. If we have had an allocation failure and 7639493SNeil.Perrin@Sun.COM * the txg is waiting to sync then we want want zil_sync() 7649493SNeil.Perrin@Sun.COM * to remove the lwb so that it's not picked up as the next new 7659493SNeil.Perrin@Sun.COM * one in zil_commit_writer(). zil_sync() will only remove 7669493SNeil.Perrin@Sun.COM * the lwb if lwb_buf is null. 767789Sahrens */ 768789Sahrens zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 769789Sahrens mutex_enter(&zilog->zl_lock); 770789Sahrens lwb->lwb_buf = NULL; 77110922SJeff.Bonwick@Sun.COM lwb->lwb_tx = NULL; 77210922SJeff.Bonwick@Sun.COM mutex_exit(&zilog->zl_lock); 7739493SNeil.Perrin@Sun.COM 7749493SNeil.Perrin@Sun.COM /* 7759493SNeil.Perrin@Sun.COM * Now that we've written this log block, we have a stable pointer 7769493SNeil.Perrin@Sun.COM * to the next block in the chain, so it's OK to let the txg in 77710922SJeff.Bonwick@Sun.COM * which we allocated the next block sync. 7789493SNeil.Perrin@Sun.COM */ 77910922SJeff.Bonwick@Sun.COM dmu_tx_commit(tx); 780789Sahrens } 781789Sahrens 782789Sahrens /* 7832237Smaybee * Initialize the io for a log block. 7842237Smaybee */ 7852237Smaybee static void 7862237Smaybee zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb) 7872237Smaybee { 7882237Smaybee zbookmark_t zb; 7892237Smaybee 79010922SJeff.Bonwick@Sun.COM SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET], 79110922SJeff.Bonwick@Sun.COM ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, 79210922SJeff.Bonwick@Sun.COM lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]); 7932237Smaybee 7942638Sperrin if (zilog->zl_root_zio == NULL) { 7952638Sperrin zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL, 7962638Sperrin ZIO_FLAG_CANFAIL); 7972638Sperrin } 7983063Sperrin if (lwb->lwb_zio == NULL) { 7993063Sperrin lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa, 80011670SNeil.Perrin@Sun.COM 0, &lwb->lwb_blk, lwb->lwb_buf, BP_GET_LSIZE(&lwb->lwb_blk), 8019701SGeorge.Wilson@Sun.COM zil_lwb_write_done, lwb, ZIO_PRIORITY_LOG_WRITE, 80210685SGeorge.Wilson@Sun.COM ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb); 8033063Sperrin } 8042237Smaybee } 8052237Smaybee 8062237Smaybee /* 80711670SNeil.Perrin@Sun.COM * Define a limited set of intent log block sizes. 80811670SNeil.Perrin@Sun.COM * These must be a multiple of 4KB. Note only the amount used (again 80911670SNeil.Perrin@Sun.COM * aligned to 4KB) actually gets written. However, we can't always just 81011670SNeil.Perrin@Sun.COM * allocate SPA_MAXBLOCKSIZE as the slog space could be exhausted. 81111670SNeil.Perrin@Sun.COM */ 81211670SNeil.Perrin@Sun.COM uint64_t zil_block_buckets[] = { 81311670SNeil.Perrin@Sun.COM 4096, /* non TX_WRITE */ 81411670SNeil.Perrin@Sun.COM 8192+4096, /* data base */ 81511670SNeil.Perrin@Sun.COM 32*1024 + 4096, /* NFS writes */ 81611670SNeil.Perrin@Sun.COM UINT64_MAX 81711670SNeil.Perrin@Sun.COM }; 81811670SNeil.Perrin@Sun.COM 81911670SNeil.Perrin@Sun.COM /* 82010879SNeil.Perrin@Sun.COM * Use the slog as long as the logbias is 'latency' and the current commit size 82110879SNeil.Perrin@Sun.COM * is less than the limit or the total list size is less than 2X the limit. 82210879SNeil.Perrin@Sun.COM * Limit checking is disabled by setting zil_slog_limit to UINT64_MAX. 82310879SNeil.Perrin@Sun.COM */ 82410879SNeil.Perrin@Sun.COM uint64_t zil_slog_limit = 1024 * 1024; 82510879SNeil.Perrin@Sun.COM #define USE_SLOG(zilog) (((zilog)->zl_logbias == ZFS_LOGBIAS_LATENCY) && \ 82610879SNeil.Perrin@Sun.COM (((zilog)->zl_cur_used < zil_slog_limit) || \ 82710879SNeil.Perrin@Sun.COM ((zilog)->zl_itx_list_sz < (zil_slog_limit << 1)))) 82810879SNeil.Perrin@Sun.COM 82910879SNeil.Perrin@Sun.COM /* 830789Sahrens * Start a log block write and advance to the next log block. 831789Sahrens * Calls are serialized. 832789Sahrens */ 833789Sahrens static lwb_t * 834789Sahrens zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb) 835789Sahrens { 83611670SNeil.Perrin@Sun.COM lwb_t *nlwb = NULL; 83711670SNeil.Perrin@Sun.COM zil_chain_t *zilc; 8381807Sbonwick spa_t *spa = zilog->zl_spa; 83911670SNeil.Perrin@Sun.COM blkptr_t *bp; 84010922SJeff.Bonwick@Sun.COM dmu_tx_t *tx; 841789Sahrens uint64_t txg; 842*11813SNeil.Perrin@Sun.COM uint64_t zil_blksz, wsz; 84311670SNeil.Perrin@Sun.COM int i, error; 844789Sahrens 84511670SNeil.Perrin@Sun.COM if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 84611670SNeil.Perrin@Sun.COM zilc = (zil_chain_t *)lwb->lwb_buf; 84711670SNeil.Perrin@Sun.COM bp = &zilc->zc_next_blk; 84811670SNeil.Perrin@Sun.COM } else { 84911670SNeil.Perrin@Sun.COM zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz); 85011670SNeil.Perrin@Sun.COM bp = &zilc->zc_next_blk; 85111670SNeil.Perrin@Sun.COM } 85211670SNeil.Perrin@Sun.COM 85311670SNeil.Perrin@Sun.COM ASSERT(lwb->lwb_nused <= lwb->lwb_sz); 854789Sahrens 855789Sahrens /* 856789Sahrens * Allocate the next block and save its address in this block 857789Sahrens * before writing it in order to establish the log chain. 858789Sahrens * Note that if the allocation of nlwb synced before we wrote 859789Sahrens * the block that points at it (lwb), we'd leak it if we crashed. 86010922SJeff.Bonwick@Sun.COM * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done(). 86110922SJeff.Bonwick@Sun.COM * We dirty the dataset to ensure that zil_sync() will be called 86210922SJeff.Bonwick@Sun.COM * to clean up in the event of allocation failure or I/O failure. 863789Sahrens */ 86410922SJeff.Bonwick@Sun.COM tx = dmu_tx_create(zilog->zl_os); 86510922SJeff.Bonwick@Sun.COM VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 86610922SJeff.Bonwick@Sun.COM dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 86710922SJeff.Bonwick@Sun.COM txg = dmu_tx_get_txg(tx); 86810922SJeff.Bonwick@Sun.COM 86910922SJeff.Bonwick@Sun.COM lwb->lwb_tx = tx; 870789Sahrens 871789Sahrens /* 87211670SNeil.Perrin@Sun.COM * Log blocks are pre-allocated. Here we select the size of the next 87311670SNeil.Perrin@Sun.COM * block, based on size used in the last block. 87411670SNeil.Perrin@Sun.COM * - first find the smallest bucket that will fit the block from a 87511670SNeil.Perrin@Sun.COM * limited set of block sizes. This is because it's faster to write 87611670SNeil.Perrin@Sun.COM * blocks allocated from the same metaslab as they are adjacent or 87711670SNeil.Perrin@Sun.COM * close. 87811670SNeil.Perrin@Sun.COM * - next find the maximum from the new suggested size and an array of 87911670SNeil.Perrin@Sun.COM * previous sizes. This lessens a picket fence effect of wrongly 88011670SNeil.Perrin@Sun.COM * guesssing the size if we have a stream of say 2k, 64k, 2k, 64k 88111670SNeil.Perrin@Sun.COM * requests. 88211670SNeil.Perrin@Sun.COM * 88311670SNeil.Perrin@Sun.COM * Note we only write what is used, but we can't just allocate 88411670SNeil.Perrin@Sun.COM * the maximum block size because we can exhaust the available 88511670SNeil.Perrin@Sun.COM * pool log space. 886789Sahrens */ 88711670SNeil.Perrin@Sun.COM zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t); 88811670SNeil.Perrin@Sun.COM for (i = 0; zil_blksz > zil_block_buckets[i]; i++) 88911670SNeil.Perrin@Sun.COM continue; 89011670SNeil.Perrin@Sun.COM zil_blksz = zil_block_buckets[i]; 89111670SNeil.Perrin@Sun.COM if (zil_blksz == UINT64_MAX) 89211670SNeil.Perrin@Sun.COM zil_blksz = SPA_MAXBLOCKSIZE; 89311670SNeil.Perrin@Sun.COM zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz; 89411670SNeil.Perrin@Sun.COM for (i = 0; i < ZIL_PREV_BLKS; i++) 89511670SNeil.Perrin@Sun.COM zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]); 89611670SNeil.Perrin@Sun.COM zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1); 897789Sahrens 8983063Sperrin BP_ZERO(bp); 8993063Sperrin /* pass the old blkptr in order to spread log blocks across devs */ 90010922SJeff.Bonwick@Sun.COM error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz, 90110879SNeil.Perrin@Sun.COM USE_SLOG(zilog)); 90211670SNeil.Perrin@Sun.COM if (!error) { 90311670SNeil.Perrin@Sun.COM ASSERT3U(bp->blk_birth, ==, txg); 90411670SNeil.Perrin@Sun.COM bp->blk_cksum = lwb->lwb_blk.blk_cksum; 90511670SNeil.Perrin@Sun.COM bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 9063668Sgw25295 9073668Sgw25295 /* 90811670SNeil.Perrin@Sun.COM * Allocate a new log write buffer (lwb). 9091544Seschrock */ 91011670SNeil.Perrin@Sun.COM nlwb = zil_alloc_lwb(zilog, bp, txg); 91111670SNeil.Perrin@Sun.COM 91211670SNeil.Perrin@Sun.COM /* Record the block for later vdev flushing */ 91311670SNeil.Perrin@Sun.COM zil_add_block(zilog, &lwb->lwb_blk); 914789Sahrens } 915789Sahrens 91611670SNeil.Perrin@Sun.COM if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 917*11813SNeil.Perrin@Sun.COM /* For Slim ZIL only write what is used. */ 918*11813SNeil.Perrin@Sun.COM wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t); 919*11813SNeil.Perrin@Sun.COM ASSERT3U(wsz, <=, lwb->lwb_sz); 920*11813SNeil.Perrin@Sun.COM zio_shrink(lwb->lwb_zio, wsz); 92111670SNeil.Perrin@Sun.COM 922*11813SNeil.Perrin@Sun.COM } else { 923*11813SNeil.Perrin@Sun.COM wsz = lwb->lwb_sz; 924*11813SNeil.Perrin@Sun.COM } 92511670SNeil.Perrin@Sun.COM 92611670SNeil.Perrin@Sun.COM zilc->zc_pad = 0; 92711670SNeil.Perrin@Sun.COM zilc->zc_nused = lwb->lwb_nused; 92811670SNeil.Perrin@Sun.COM zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum; 92911670SNeil.Perrin@Sun.COM 930*11813SNeil.Perrin@Sun.COM /* 931*11813SNeil.Perrin@Sun.COM * clear unused data for security 932*11813SNeil.Perrin@Sun.COM */ 933*11813SNeil.Perrin@Sun.COM bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused); 934*11813SNeil.Perrin@Sun.COM 93511670SNeil.Perrin@Sun.COM zio_nowait(lwb->lwb_zio); /* Kick off the write for the old log block */ 936789Sahrens 937789Sahrens /* 93811670SNeil.Perrin@Sun.COM * If there was an allocation failure then nlwb will be null which 93911670SNeil.Perrin@Sun.COM * forces a txg_wait_synced(). 940789Sahrens */ 941789Sahrens return (nlwb); 942789Sahrens } 943789Sahrens 944789Sahrens static lwb_t * 945789Sahrens zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 946789Sahrens { 947789Sahrens lr_t *lrc = &itx->itx_lr; /* common log record */ 94810922SJeff.Bonwick@Sun.COM lr_write_t *lrw = (lr_write_t *)lrc; 94910922SJeff.Bonwick@Sun.COM char *lr_buf; 950789Sahrens uint64_t txg = lrc->lrc_txg; 951789Sahrens uint64_t reclen = lrc->lrc_reclen; 95210922SJeff.Bonwick@Sun.COM uint64_t dlen = 0; 953789Sahrens 954789Sahrens if (lwb == NULL) 955789Sahrens return (NULL); 95610922SJeff.Bonwick@Sun.COM 957789Sahrens ASSERT(lwb->lwb_buf != NULL); 958789Sahrens 9592237Smaybee if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) 9602237Smaybee dlen = P2ROUNDUP_TYPED( 96110922SJeff.Bonwick@Sun.COM lrw->lr_length, sizeof (uint64_t), uint64_t); 9621669Sperrin 9631669Sperrin zilog->zl_cur_used += (reclen + dlen); 9641669Sperrin 9653063Sperrin zil_lwb_write_init(zilog, lwb); 9663063Sperrin 9671669Sperrin /* 9681669Sperrin * If this record won't fit in the current log block, start a new one. 9691669Sperrin */ 97011670SNeil.Perrin@Sun.COM if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) { 9711669Sperrin lwb = zil_lwb_write_start(zilog, lwb); 9722237Smaybee if (lwb == NULL) 9731669Sperrin return (NULL); 9743063Sperrin zil_lwb_write_init(zilog, lwb); 97511670SNeil.Perrin@Sun.COM ASSERT(LWB_EMPTY(lwb)); 97611670SNeil.Perrin@Sun.COM if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) { 9771669Sperrin txg_wait_synced(zilog->zl_dmu_pool, txg); 978789Sahrens return (lwb); 979789Sahrens } 980789Sahrens } 981789Sahrens 98210922SJeff.Bonwick@Sun.COM lr_buf = lwb->lwb_buf + lwb->lwb_nused; 98310922SJeff.Bonwick@Sun.COM bcopy(lrc, lr_buf, reclen); 98410922SJeff.Bonwick@Sun.COM lrc = (lr_t *)lr_buf; 98510922SJeff.Bonwick@Sun.COM lrw = (lr_write_t *)lrc; 9862237Smaybee 9872237Smaybee /* 9882237Smaybee * If it's a write, fetch the data or get its blkptr as appropriate. 9892237Smaybee */ 9902237Smaybee if (lrc->lrc_txtype == TX_WRITE) { 9912237Smaybee if (txg > spa_freeze_txg(zilog->zl_spa)) 9922237Smaybee txg_wait_synced(zilog->zl_dmu_pool, txg); 9932237Smaybee if (itx->itx_wr_state != WR_COPIED) { 9942237Smaybee char *dbuf; 9952237Smaybee int error; 9962237Smaybee 9972237Smaybee if (dlen) { 9982237Smaybee ASSERT(itx->itx_wr_state == WR_NEED_COPY); 99910922SJeff.Bonwick@Sun.COM dbuf = lr_buf + reclen; 100010922SJeff.Bonwick@Sun.COM lrw->lr_common.lrc_reclen += dlen; 10012237Smaybee } else { 10022237Smaybee ASSERT(itx->itx_wr_state == WR_INDIRECT); 10032237Smaybee dbuf = NULL; 10042237Smaybee } 10052237Smaybee error = zilog->zl_get_data( 100610922SJeff.Bonwick@Sun.COM itx->itx_private, lrw, dbuf, lwb->lwb_zio); 100710209SMark.Musante@Sun.COM if (error == EIO) { 100810209SMark.Musante@Sun.COM txg_wait_synced(zilog->zl_dmu_pool, txg); 100910209SMark.Musante@Sun.COM return (lwb); 101010209SMark.Musante@Sun.COM } 10112237Smaybee if (error) { 10122237Smaybee ASSERT(error == ENOENT || error == EEXIST || 10132237Smaybee error == EALREADY); 10142237Smaybee return (lwb); 10152237Smaybee } 10162237Smaybee } 10171669Sperrin } 10182237Smaybee 101910922SJeff.Bonwick@Sun.COM /* 102010922SJeff.Bonwick@Sun.COM * We're actually making an entry, so update lrc_seq to be the 102110922SJeff.Bonwick@Sun.COM * log record sequence number. Note that this is generally not 102210922SJeff.Bonwick@Sun.COM * equal to the itx sequence number because not all transactions 102310922SJeff.Bonwick@Sun.COM * are synchronous, and sometimes spa_sync() gets there first. 102410922SJeff.Bonwick@Sun.COM */ 102510922SJeff.Bonwick@Sun.COM lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */ 10262237Smaybee lwb->lwb_nused += reclen + dlen; 1027789Sahrens lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 102811670SNeil.Perrin@Sun.COM ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz); 1029789Sahrens ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0); 1030789Sahrens 1031789Sahrens return (lwb); 1032789Sahrens } 1033789Sahrens 1034789Sahrens itx_t * 10355331Samw zil_itx_create(uint64_t txtype, size_t lrsize) 1036789Sahrens { 1037789Sahrens itx_t *itx; 1038789Sahrens 10391842Sperrin lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t); 1040789Sahrens 1041789Sahrens itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 1042789Sahrens itx->itx_lr.lrc_txtype = txtype; 1043789Sahrens itx->itx_lr.lrc_reclen = lrsize; 10446101Sperrin itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */ 1045789Sahrens itx->itx_lr.lrc_seq = 0; /* defensive */ 1046789Sahrens 1047789Sahrens return (itx); 1048789Sahrens } 1049789Sahrens 105010922SJeff.Bonwick@Sun.COM void 105110922SJeff.Bonwick@Sun.COM zil_itx_destroy(itx_t *itx) 105210922SJeff.Bonwick@Sun.COM { 105310922SJeff.Bonwick@Sun.COM kmem_free(itx, offsetof(itx_t, itx_lr) + itx->itx_lr.lrc_reclen); 105410922SJeff.Bonwick@Sun.COM } 105510922SJeff.Bonwick@Sun.COM 1056789Sahrens uint64_t 1057789Sahrens zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 1058789Sahrens { 1059789Sahrens uint64_t seq; 1060789Sahrens 1061789Sahrens ASSERT(itx->itx_lr.lrc_seq == 0); 106210922SJeff.Bonwick@Sun.COM ASSERT(!zilog->zl_replay); 1063789Sahrens 1064789Sahrens mutex_enter(&zilog->zl_lock); 1065789Sahrens list_insert_tail(&zilog->zl_itx_list, itx); 10666101Sperrin zilog->zl_itx_list_sz += itx->itx_sod; 1067789Sahrens itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 1068789Sahrens itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq; 1069789Sahrens mutex_exit(&zilog->zl_lock); 1070789Sahrens 1071789Sahrens return (seq); 1072789Sahrens } 1073789Sahrens 1074789Sahrens /* 1075789Sahrens * Free up all in-memory intent log transactions that have now been synced. 1076789Sahrens */ 1077789Sahrens static void 1078789Sahrens zil_itx_clean(zilog_t *zilog) 1079789Sahrens { 1080789Sahrens uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa); 1081789Sahrens uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa); 10823778Sjohansen list_t clean_list; 1083789Sahrens itx_t *itx; 1084789Sahrens 10853778Sjohansen list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); 10863778Sjohansen 1087789Sahrens mutex_enter(&zilog->zl_lock); 10882638Sperrin /* wait for a log writer to finish walking list */ 10892638Sperrin while (zilog->zl_writer) { 10902638Sperrin cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 10912638Sperrin } 10923778Sjohansen 10933778Sjohansen /* 10943778Sjohansen * Move the sync'd log transactions to a separate list so we can call 10953778Sjohansen * kmem_free without holding the zl_lock. 10963778Sjohansen * 10973778Sjohansen * There is no need to set zl_writer as we don't drop zl_lock here 10983778Sjohansen */ 1099789Sahrens while ((itx = list_head(&zilog->zl_itx_list)) != NULL && 1100789Sahrens itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) { 1101789Sahrens list_remove(&zilog->zl_itx_list, itx); 11026101Sperrin zilog->zl_itx_list_sz -= itx->itx_sod; 11033778Sjohansen list_insert_tail(&clean_list, itx); 11043778Sjohansen } 11053778Sjohansen cv_broadcast(&zilog->zl_cv_writer); 11063778Sjohansen mutex_exit(&zilog->zl_lock); 11073778Sjohansen 11083778Sjohansen /* destroy sync'd log transactions */ 11093778Sjohansen while ((itx = list_head(&clean_list)) != NULL) { 11103778Sjohansen list_remove(&clean_list, itx); 111110922SJeff.Bonwick@Sun.COM zil_itx_destroy(itx); 1112789Sahrens } 11133778Sjohansen list_destroy(&clean_list); 1114789Sahrens } 1115789Sahrens 11162638Sperrin /* 11173063Sperrin * If there are any in-memory intent log transactions which have now been 11183063Sperrin * synced then start up a taskq to free them. 11192638Sperrin */ 1120789Sahrens void 1121789Sahrens zil_clean(zilog_t *zilog) 1122789Sahrens { 11233063Sperrin itx_t *itx; 11243063Sperrin 1125789Sahrens mutex_enter(&zilog->zl_lock); 11263063Sperrin itx = list_head(&zilog->zl_itx_list); 11273063Sperrin if ((itx != NULL) && 11283063Sperrin (itx->itx_lr.lrc_txg <= spa_last_synced_txg(zilog->zl_spa))) { 1129789Sahrens (void) taskq_dispatch(zilog->zl_clean_taskq, 113010879SNeil.Perrin@Sun.COM (task_func_t *)zil_itx_clean, zilog, TQ_NOSLEEP); 11313063Sperrin } 1132789Sahrens mutex_exit(&zilog->zl_lock); 1133789Sahrens } 1134789Sahrens 11357754SJeff.Bonwick@Sun.COM static void 11362638Sperrin zil_commit_writer(zilog_t *zilog, uint64_t seq, uint64_t foid) 1137789Sahrens { 1138789Sahrens uint64_t txg; 11393063Sperrin uint64_t commit_seq = 0; 114010922SJeff.Bonwick@Sun.COM itx_t *itx, *itx_next; 1141789Sahrens lwb_t *lwb; 1142789Sahrens spa_t *spa; 114310922SJeff.Bonwick@Sun.COM int error = 0; 1144789Sahrens 11452638Sperrin zilog->zl_writer = B_TRUE; 11467754SJeff.Bonwick@Sun.COM ASSERT(zilog->zl_root_zio == NULL); 1147789Sahrens spa = zilog->zl_spa; 1148789Sahrens 1149789Sahrens if (zilog->zl_suspend) { 1150789Sahrens lwb = NULL; 1151789Sahrens } else { 1152789Sahrens lwb = list_tail(&zilog->zl_lwb_list); 1153789Sahrens if (lwb == NULL) { 11542638Sperrin /* 11552638Sperrin * Return if there's nothing to flush before we 11562638Sperrin * dirty the fs by calling zil_create() 11572638Sperrin */ 11582638Sperrin if (list_is_empty(&zilog->zl_itx_list)) { 11592638Sperrin zilog->zl_writer = B_FALSE; 11602638Sperrin return; 11612638Sperrin } 1162789Sahrens mutex_exit(&zilog->zl_lock); 116311670SNeil.Perrin@Sun.COM lwb = zil_create(zilog); 1164789Sahrens mutex_enter(&zilog->zl_lock); 1165789Sahrens } 1166789Sahrens } 116711670SNeil.Perrin@Sun.COM ASSERT(lwb == NULL || lwb->lwb_zio == NULL); 1168789Sahrens 11693063Sperrin /* Loop through in-memory log transactions filling log blocks. */ 11702638Sperrin DTRACE_PROBE1(zil__cw1, zilog_t *, zilog); 117110922SJeff.Bonwick@Sun.COM 117210922SJeff.Bonwick@Sun.COM for (itx = list_head(&zilog->zl_itx_list); itx; itx = itx_next) { 11732638Sperrin /* 117410922SJeff.Bonwick@Sun.COM * Save the next pointer. Even though we drop zl_lock below, 117510922SJeff.Bonwick@Sun.COM * all threads that can remove itx list entries (other writers 117610922SJeff.Bonwick@Sun.COM * and zil_itx_clean()) can't do so until they have zl_writer. 11772638Sperrin */ 117810922SJeff.Bonwick@Sun.COM itx_next = list_next(&zilog->zl_itx_list, itx); 117910922SJeff.Bonwick@Sun.COM 118010922SJeff.Bonwick@Sun.COM /* 118110922SJeff.Bonwick@Sun.COM * Determine whether to push this itx. 118210922SJeff.Bonwick@Sun.COM * Push all transactions related to specified foid and 118310922SJeff.Bonwick@Sun.COM * all other transactions except those that can be logged 118410922SJeff.Bonwick@Sun.COM * out of order (TX_WRITE, TX_TRUNCATE, TX_SETATTR, TX_ACL) 118510922SJeff.Bonwick@Sun.COM * for all other files. 118610922SJeff.Bonwick@Sun.COM * 118710922SJeff.Bonwick@Sun.COM * If foid == 0 (meaning "push all foids") or 118810922SJeff.Bonwick@Sun.COM * itx->itx_sync is set (meaning O_[D]SYNC), push regardless. 118910922SJeff.Bonwick@Sun.COM */ 119010922SJeff.Bonwick@Sun.COM if (foid != 0 && !itx->itx_sync && 119110922SJeff.Bonwick@Sun.COM TX_OOO(itx->itx_lr.lrc_txtype) && 119210922SJeff.Bonwick@Sun.COM ((lr_ooo_t *)&itx->itx_lr)->lr_foid != foid) 119310922SJeff.Bonwick@Sun.COM continue; /* skip this record */ 1194789Sahrens 1195789Sahrens if ((itx->itx_lr.lrc_seq > seq) && 119611670SNeil.Perrin@Sun.COM ((lwb == NULL) || (LWB_EMPTY(lwb)) || 119711670SNeil.Perrin@Sun.COM (lwb->lwb_nused + itx->itx_sod > lwb->lwb_sz))) 1198789Sahrens break; 1199789Sahrens 1200789Sahrens list_remove(&zilog->zl_itx_list, itx); 12016101Sperrin zilog->zl_itx_list_sz -= itx->itx_sod; 120210922SJeff.Bonwick@Sun.COM 12033063Sperrin mutex_exit(&zilog->zl_lock); 120410922SJeff.Bonwick@Sun.COM 1205789Sahrens txg = itx->itx_lr.lrc_txg; 1206789Sahrens ASSERT(txg); 1207789Sahrens 1208789Sahrens if (txg > spa_last_synced_txg(spa) || 1209789Sahrens txg > spa_freeze_txg(spa)) 1210789Sahrens lwb = zil_lwb_commit(zilog, itx, lwb); 121110922SJeff.Bonwick@Sun.COM 121210922SJeff.Bonwick@Sun.COM zil_itx_destroy(itx); 121310922SJeff.Bonwick@Sun.COM 1214789Sahrens mutex_enter(&zilog->zl_lock); 1215789Sahrens } 12162638Sperrin DTRACE_PROBE1(zil__cw2, zilog_t *, zilog); 12173063Sperrin /* determine commit sequence number */ 12183063Sperrin itx = list_head(&zilog->zl_itx_list); 12193063Sperrin if (itx) 122010922SJeff.Bonwick@Sun.COM commit_seq = itx->itx_lr.lrc_seq - 1; 12213063Sperrin else 12223063Sperrin commit_seq = zilog->zl_itx_seq; 1223789Sahrens mutex_exit(&zilog->zl_lock); 1224789Sahrens 1225789Sahrens /* write the last block out */ 12263063Sperrin if (lwb != NULL && lwb->lwb_zio != NULL) 1227789Sahrens lwb = zil_lwb_write_start(zilog, lwb); 1228789Sahrens 12291141Sperrin zilog->zl_prev_used = zilog->zl_cur_used; 12301141Sperrin zilog->zl_cur_used = 0; 12311141Sperrin 12322638Sperrin /* 12332638Sperrin * Wait if necessary for the log blocks to be on stable storage. 12342638Sperrin */ 12352638Sperrin if (zilog->zl_root_zio) { 12362638Sperrin DTRACE_PROBE1(zil__cw3, zilog_t *, zilog); 123710922SJeff.Bonwick@Sun.COM error = zio_wait(zilog->zl_root_zio); 12387754SJeff.Bonwick@Sun.COM zilog->zl_root_zio = NULL; 12392638Sperrin DTRACE_PROBE1(zil__cw4, zilog_t *, zilog); 12405688Sbonwick zil_flush_vdevs(zilog); 1241789Sahrens } 12421141Sperrin 124310922SJeff.Bonwick@Sun.COM if (error || lwb == NULL) 1244789Sahrens txg_wait_synced(zilog->zl_dmu_pool, 0); 12453063Sperrin 12463063Sperrin mutex_enter(&zilog->zl_lock); 12471141Sperrin zilog->zl_writer = B_FALSE; 12483063Sperrin 12493063Sperrin ASSERT3U(commit_seq, >=, zilog->zl_commit_seq); 12503063Sperrin zilog->zl_commit_seq = commit_seq; 125110922SJeff.Bonwick@Sun.COM 125210922SJeff.Bonwick@Sun.COM /* 125310922SJeff.Bonwick@Sun.COM * Remember the highest committed log sequence number for ztest. 125410922SJeff.Bonwick@Sun.COM * We only update this value when all the log writes succeeded, 125510922SJeff.Bonwick@Sun.COM * because ztest wants to ASSERT that it got the whole log chain. 125610922SJeff.Bonwick@Sun.COM */ 125710922SJeff.Bonwick@Sun.COM if (error == 0 && lwb != NULL) 125810922SJeff.Bonwick@Sun.COM zilog->zl_commit_lr_seq = zilog->zl_lr_seq; 12592638Sperrin } 12602638Sperrin 12612638Sperrin /* 12622638Sperrin * Push zfs transactions to stable storage up to the supplied sequence number. 12632638Sperrin * If foid is 0 push out all transactions, otherwise push only those 12642638Sperrin * for that file or might have been used to create that file. 12652638Sperrin */ 12662638Sperrin void 12672638Sperrin zil_commit(zilog_t *zilog, uint64_t seq, uint64_t foid) 12682638Sperrin { 12692638Sperrin if (zilog == NULL || seq == 0) 12702638Sperrin return; 12712638Sperrin 12722638Sperrin mutex_enter(&zilog->zl_lock); 12732638Sperrin 12742638Sperrin seq = MIN(seq, zilog->zl_itx_seq); /* cap seq at largest itx seq */ 12752638Sperrin 12763063Sperrin while (zilog->zl_writer) { 12772638Sperrin cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 127810922SJeff.Bonwick@Sun.COM if (seq <= zilog->zl_commit_seq) { 12793063Sperrin mutex_exit(&zilog->zl_lock); 12803063Sperrin return; 12813063Sperrin } 12823063Sperrin } 12832638Sperrin zil_commit_writer(zilog, seq, foid); /* drops zl_lock */ 12843063Sperrin /* wake up others waiting on the commit */ 12853063Sperrin cv_broadcast(&zilog->zl_cv_writer); 12863063Sperrin mutex_exit(&zilog->zl_lock); 1287789Sahrens } 1288789Sahrens 1289789Sahrens /* 129010922SJeff.Bonwick@Sun.COM * Report whether all transactions are committed. 129110922SJeff.Bonwick@Sun.COM */ 129210922SJeff.Bonwick@Sun.COM static boolean_t 129310922SJeff.Bonwick@Sun.COM zil_is_committed(zilog_t *zilog) 129410922SJeff.Bonwick@Sun.COM { 129510922SJeff.Bonwick@Sun.COM lwb_t *lwb; 129610922SJeff.Bonwick@Sun.COM boolean_t committed; 129710922SJeff.Bonwick@Sun.COM 129810922SJeff.Bonwick@Sun.COM mutex_enter(&zilog->zl_lock); 129910922SJeff.Bonwick@Sun.COM 130010922SJeff.Bonwick@Sun.COM while (zilog->zl_writer) 130110922SJeff.Bonwick@Sun.COM cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 130210922SJeff.Bonwick@Sun.COM 130310922SJeff.Bonwick@Sun.COM if (!list_is_empty(&zilog->zl_itx_list)) 130410922SJeff.Bonwick@Sun.COM committed = B_FALSE; /* unpushed transactions */ 130510922SJeff.Bonwick@Sun.COM else if ((lwb = list_head(&zilog->zl_lwb_list)) == NULL) 130610922SJeff.Bonwick@Sun.COM committed = B_TRUE; /* intent log never used */ 130710922SJeff.Bonwick@Sun.COM else if (list_next(&zilog->zl_lwb_list, lwb) != NULL) 130810922SJeff.Bonwick@Sun.COM committed = B_FALSE; /* zil_sync() not done yet */ 130910922SJeff.Bonwick@Sun.COM else 131010922SJeff.Bonwick@Sun.COM committed = B_TRUE; /* everything synced */ 131110922SJeff.Bonwick@Sun.COM 131210922SJeff.Bonwick@Sun.COM mutex_exit(&zilog->zl_lock); 131310922SJeff.Bonwick@Sun.COM return (committed); 131410922SJeff.Bonwick@Sun.COM } 131510922SJeff.Bonwick@Sun.COM 131610922SJeff.Bonwick@Sun.COM /* 1317789Sahrens * Called in syncing context to free committed log blocks and update log header. 1318789Sahrens */ 1319789Sahrens void 1320789Sahrens zil_sync(zilog_t *zilog, dmu_tx_t *tx) 1321789Sahrens { 13221807Sbonwick zil_header_t *zh = zil_header_in_syncing_context(zilog); 1323789Sahrens uint64_t txg = dmu_tx_get_txg(tx); 1324789Sahrens spa_t *spa = zilog->zl_spa; 132510922SJeff.Bonwick@Sun.COM uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK]; 1326789Sahrens lwb_t *lwb; 1327789Sahrens 13289396SMatthew.Ahrens@Sun.COM /* 13299396SMatthew.Ahrens@Sun.COM * We don't zero out zl_destroy_txg, so make sure we don't try 13309396SMatthew.Ahrens@Sun.COM * to destroy it twice. 13319396SMatthew.Ahrens@Sun.COM */ 13329396SMatthew.Ahrens@Sun.COM if (spa_sync_pass(spa) != 1) 13339396SMatthew.Ahrens@Sun.COM return; 13349396SMatthew.Ahrens@Sun.COM 13351807Sbonwick mutex_enter(&zilog->zl_lock); 13361807Sbonwick 1337789Sahrens ASSERT(zilog->zl_stop_sync == 0); 1338789Sahrens 133910922SJeff.Bonwick@Sun.COM if (*replayed_seq != 0) { 134010922SJeff.Bonwick@Sun.COM ASSERT(zh->zh_replay_seq < *replayed_seq); 134110922SJeff.Bonwick@Sun.COM zh->zh_replay_seq = *replayed_seq; 134210922SJeff.Bonwick@Sun.COM *replayed_seq = 0; 134310922SJeff.Bonwick@Sun.COM } 1344789Sahrens 1345789Sahrens if (zilog->zl_destroy_txg == txg) { 13461807Sbonwick blkptr_t blk = zh->zh_log; 13471807Sbonwick 13481807Sbonwick ASSERT(list_head(&zilog->zl_lwb_list) == NULL); 13491807Sbonwick 13501807Sbonwick bzero(zh, sizeof (zil_header_t)); 13518227SNeil.Perrin@Sun.COM bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq)); 13521807Sbonwick 13531807Sbonwick if (zilog->zl_keep_first) { 13541807Sbonwick /* 13551807Sbonwick * If this block was part of log chain that couldn't 13561807Sbonwick * be claimed because a device was missing during 13571807Sbonwick * zil_claim(), but that device later returns, 13581807Sbonwick * then this block could erroneously appear valid. 13591807Sbonwick * To guard against this, assign a new GUID to the new 13601807Sbonwick * log chain so it doesn't matter what blk points to. 13611807Sbonwick */ 13621807Sbonwick zil_init_log_chain(zilog, &blk); 13631807Sbonwick zh->zh_log = blk; 13641807Sbonwick } 1365789Sahrens } 1366789Sahrens 13679701SGeorge.Wilson@Sun.COM while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 13682638Sperrin zh->zh_log = lwb->lwb_blk; 1369789Sahrens if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 1370789Sahrens break; 1371789Sahrens list_remove(&zilog->zl_lwb_list, lwb); 137210922SJeff.Bonwick@Sun.COM zio_free_zil(spa, txg, &lwb->lwb_blk); 1373789Sahrens kmem_cache_free(zil_lwb_cache, lwb); 13743668Sgw25295 13753668Sgw25295 /* 13763668Sgw25295 * If we don't have anything left in the lwb list then 13773668Sgw25295 * we've had an allocation failure and we need to zero 13783668Sgw25295 * out the zil_header blkptr so that we don't end 13793668Sgw25295 * up freeing the same block twice. 13803668Sgw25295 */ 13813668Sgw25295 if (list_head(&zilog->zl_lwb_list) == NULL) 13823668Sgw25295 BP_ZERO(&zh->zh_log); 1383789Sahrens } 1384789Sahrens mutex_exit(&zilog->zl_lock); 1385789Sahrens } 1386789Sahrens 1387789Sahrens void 1388789Sahrens zil_init(void) 1389789Sahrens { 1390789Sahrens zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 13912856Snd150628 sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0); 1392789Sahrens } 1393789Sahrens 1394789Sahrens void 1395789Sahrens zil_fini(void) 1396789Sahrens { 1397789Sahrens kmem_cache_destroy(zil_lwb_cache); 1398789Sahrens } 1399789Sahrens 140010310SNeil.Perrin@Sun.COM void 140110310SNeil.Perrin@Sun.COM zil_set_logbias(zilog_t *zilog, uint64_t logbias) 140210310SNeil.Perrin@Sun.COM { 140310310SNeil.Perrin@Sun.COM zilog->zl_logbias = logbias; 140410310SNeil.Perrin@Sun.COM } 140510310SNeil.Perrin@Sun.COM 1406789Sahrens zilog_t * 1407789Sahrens zil_alloc(objset_t *os, zil_header_t *zh_phys) 1408789Sahrens { 1409789Sahrens zilog_t *zilog; 1410789Sahrens 1411789Sahrens zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 1412789Sahrens 1413789Sahrens zilog->zl_header = zh_phys; 1414789Sahrens zilog->zl_os = os; 1415789Sahrens zilog->zl_spa = dmu_objset_spa(os); 1416789Sahrens zilog->zl_dmu_pool = dmu_objset_pool(os); 14171807Sbonwick zilog->zl_destroy_txg = TXG_INITIAL - 1; 141810310SNeil.Perrin@Sun.COM zilog->zl_logbias = dmu_objset_logbias(os); 1419789Sahrens 14202856Snd150628 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); 14212856Snd150628 1422789Sahrens list_create(&zilog->zl_itx_list, sizeof (itx_t), 1423789Sahrens offsetof(itx_t, itx_node)); 1424789Sahrens 1425789Sahrens list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 1426789Sahrens offsetof(lwb_t, lwb_node)); 1427789Sahrens 14285688Sbonwick mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL); 14295688Sbonwick 14305688Sbonwick avl_create(&zilog->zl_vdev_tree, zil_vdev_compare, 14315688Sbonwick sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); 1432789Sahrens 14335913Sperrin cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL); 14345913Sperrin cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); 14355913Sperrin 1436789Sahrens return (zilog); 1437789Sahrens } 1438789Sahrens 1439789Sahrens void 1440789Sahrens zil_free(zilog_t *zilog) 1441789Sahrens { 1442789Sahrens lwb_t *lwb; 1443789Sahrens 1444789Sahrens zilog->zl_stop_sync = 1; 1445789Sahrens 1446789Sahrens while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 1447789Sahrens list_remove(&zilog->zl_lwb_list, lwb); 1448789Sahrens if (lwb->lwb_buf != NULL) 1449789Sahrens zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1450789Sahrens kmem_cache_free(zil_lwb_cache, lwb); 1451789Sahrens } 1452789Sahrens list_destroy(&zilog->zl_lwb_list); 1453789Sahrens 14545688Sbonwick avl_destroy(&zilog->zl_vdev_tree); 14555688Sbonwick mutex_destroy(&zilog->zl_vdev_lock); 1456789Sahrens 1457789Sahrens ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1458789Sahrens list_destroy(&zilog->zl_itx_list); 14592856Snd150628 mutex_destroy(&zilog->zl_lock); 1460789Sahrens 14615913Sperrin cv_destroy(&zilog->zl_cv_writer); 14625913Sperrin cv_destroy(&zilog->zl_cv_suspend); 14635913Sperrin 1464789Sahrens kmem_free(zilog, sizeof (zilog_t)); 1465789Sahrens } 1466789Sahrens 1467789Sahrens /* 1468789Sahrens * Open an intent log. 1469789Sahrens */ 1470789Sahrens zilog_t * 1471789Sahrens zil_open(objset_t *os, zil_get_data_t *get_data) 1472789Sahrens { 1473789Sahrens zilog_t *zilog = dmu_objset_zil(os); 1474789Sahrens 1475789Sahrens zilog->zl_get_data = get_data; 1476789Sahrens zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri, 1477789Sahrens 2, 2, TASKQ_PREPOPULATE); 1478789Sahrens 1479789Sahrens return (zilog); 1480789Sahrens } 1481789Sahrens 1482789Sahrens /* 1483789Sahrens * Close an intent log. 1484789Sahrens */ 1485789Sahrens void 1486789Sahrens zil_close(zilog_t *zilog) 1487789Sahrens { 14881807Sbonwick /* 14891807Sbonwick * If the log isn't already committed, mark the objset dirty 14901807Sbonwick * (so zil_sync() will be called) and wait for that txg to sync. 14911807Sbonwick */ 14921807Sbonwick if (!zil_is_committed(zilog)) { 14931807Sbonwick uint64_t txg; 14941807Sbonwick dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); 149510922SJeff.Bonwick@Sun.COM VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 14961807Sbonwick dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 14971807Sbonwick txg = dmu_tx_get_txg(tx); 14981807Sbonwick dmu_tx_commit(tx); 14991807Sbonwick txg_wait_synced(zilog->zl_dmu_pool, txg); 15001807Sbonwick } 15011807Sbonwick 1502789Sahrens taskq_destroy(zilog->zl_clean_taskq); 1503789Sahrens zilog->zl_clean_taskq = NULL; 1504789Sahrens zilog->zl_get_data = NULL; 1505789Sahrens 1506789Sahrens zil_itx_clean(zilog); 1507789Sahrens ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1508789Sahrens } 1509789Sahrens 1510789Sahrens /* 1511789Sahrens * Suspend an intent log. While in suspended mode, we still honor 1512789Sahrens * synchronous semantics, but we rely on txg_wait_synced() to do it. 1513789Sahrens * We suspend the log briefly when taking a snapshot so that the snapshot 1514789Sahrens * contains all the data it's supposed to, and has an empty intent log. 1515789Sahrens */ 1516789Sahrens int 1517789Sahrens zil_suspend(zilog_t *zilog) 1518789Sahrens { 15191807Sbonwick const zil_header_t *zh = zilog->zl_header; 1520789Sahrens 1521789Sahrens mutex_enter(&zilog->zl_lock); 15228989SNeil.Perrin@Sun.COM if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ 1523789Sahrens mutex_exit(&zilog->zl_lock); 1524789Sahrens return (EBUSY); 1525789Sahrens } 15261807Sbonwick if (zilog->zl_suspend++ != 0) { 15271807Sbonwick /* 15281807Sbonwick * Someone else already began a suspend. 15291807Sbonwick * Just wait for them to finish. 15301807Sbonwick */ 15311807Sbonwick while (zilog->zl_suspending) 15321807Sbonwick cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 15331807Sbonwick mutex_exit(&zilog->zl_lock); 15341807Sbonwick return (0); 15351807Sbonwick } 15361807Sbonwick zilog->zl_suspending = B_TRUE; 1537789Sahrens mutex_exit(&zilog->zl_lock); 1538789Sahrens 15392638Sperrin zil_commit(zilog, UINT64_MAX, 0); 1540789Sahrens 15412638Sperrin /* 15422638Sperrin * Wait for any in-flight log writes to complete. 15432638Sperrin */ 1544789Sahrens mutex_enter(&zilog->zl_lock); 15452638Sperrin while (zilog->zl_writer) 15462638Sperrin cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 1547789Sahrens mutex_exit(&zilog->zl_lock); 1548789Sahrens 15491807Sbonwick zil_destroy(zilog, B_FALSE); 15501807Sbonwick 15511807Sbonwick mutex_enter(&zilog->zl_lock); 15521807Sbonwick zilog->zl_suspending = B_FALSE; 15531807Sbonwick cv_broadcast(&zilog->zl_cv_suspend); 15541807Sbonwick mutex_exit(&zilog->zl_lock); 1555789Sahrens 1556789Sahrens return (0); 1557789Sahrens } 1558789Sahrens 1559789Sahrens void 1560789Sahrens zil_resume(zilog_t *zilog) 1561789Sahrens { 1562789Sahrens mutex_enter(&zilog->zl_lock); 1563789Sahrens ASSERT(zilog->zl_suspend != 0); 1564789Sahrens zilog->zl_suspend--; 1565789Sahrens mutex_exit(&zilog->zl_lock); 1566789Sahrens } 1567789Sahrens 1568789Sahrens typedef struct zil_replay_arg { 1569789Sahrens zil_replay_func_t **zr_replay; 1570789Sahrens void *zr_arg; 1571789Sahrens boolean_t zr_byteswap; 157210922SJeff.Bonwick@Sun.COM char *zr_lr; 1573789Sahrens } zil_replay_arg_t; 1574789Sahrens 157510922SJeff.Bonwick@Sun.COM static int 157610922SJeff.Bonwick@Sun.COM zil_replay_error(zilog_t *zilog, lr_t *lr, int error) 157710922SJeff.Bonwick@Sun.COM { 157810922SJeff.Bonwick@Sun.COM char name[MAXNAMELEN]; 157910922SJeff.Bonwick@Sun.COM 158010922SJeff.Bonwick@Sun.COM zilog->zl_replaying_seq--; /* didn't actually replay this one */ 158110922SJeff.Bonwick@Sun.COM 158210922SJeff.Bonwick@Sun.COM dmu_objset_name(zilog->zl_os, name); 158310922SJeff.Bonwick@Sun.COM 158410922SJeff.Bonwick@Sun.COM cmn_err(CE_WARN, "ZFS replay transaction error %d, " 158510922SJeff.Bonwick@Sun.COM "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name, 158610922SJeff.Bonwick@Sun.COM (u_longlong_t)lr->lrc_seq, 158710922SJeff.Bonwick@Sun.COM (u_longlong_t)(lr->lrc_txtype & ~TX_CI), 158810922SJeff.Bonwick@Sun.COM (lr->lrc_txtype & TX_CI) ? "CI" : ""); 158910922SJeff.Bonwick@Sun.COM 159010922SJeff.Bonwick@Sun.COM return (error); 159110922SJeff.Bonwick@Sun.COM } 159210922SJeff.Bonwick@Sun.COM 159310922SJeff.Bonwick@Sun.COM static int 1594789Sahrens zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 1595789Sahrens { 1596789Sahrens zil_replay_arg_t *zr = zra; 15971807Sbonwick const zil_header_t *zh = zilog->zl_header; 1598789Sahrens uint64_t reclen = lr->lrc_reclen; 1599789Sahrens uint64_t txtype = lr->lrc_txtype; 160010922SJeff.Bonwick@Sun.COM int error = 0; 1601789Sahrens 160210922SJeff.Bonwick@Sun.COM zilog->zl_replaying_seq = lr->lrc_seq; 160310922SJeff.Bonwick@Sun.COM 160410922SJeff.Bonwick@Sun.COM if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 160510922SJeff.Bonwick@Sun.COM return (0); 1606789Sahrens 1607789Sahrens if (lr->lrc_txg < claim_txg) /* already committed */ 160810922SJeff.Bonwick@Sun.COM return (0); 1609789Sahrens 16105331Samw /* Strip case-insensitive bit, still present in log record */ 16115331Samw txtype &= ~TX_CI; 16125331Samw 161310922SJeff.Bonwick@Sun.COM if (txtype == 0 || txtype >= TX_MAX_TYPE) 161410922SJeff.Bonwick@Sun.COM return (zil_replay_error(zilog, lr, EINVAL)); 161510922SJeff.Bonwick@Sun.COM 161610922SJeff.Bonwick@Sun.COM /* 161710922SJeff.Bonwick@Sun.COM * If this record type can be logged out of order, the object 161810922SJeff.Bonwick@Sun.COM * (lr_foid) may no longer exist. That's legitimate, not an error. 161910922SJeff.Bonwick@Sun.COM */ 162010922SJeff.Bonwick@Sun.COM if (TX_OOO(txtype)) { 162110922SJeff.Bonwick@Sun.COM error = dmu_object_info(zilog->zl_os, 162210922SJeff.Bonwick@Sun.COM ((lr_ooo_t *)lr)->lr_foid, NULL); 162310922SJeff.Bonwick@Sun.COM if (error == ENOENT || error == EEXIST) 162410922SJeff.Bonwick@Sun.COM return (0); 16258227SNeil.Perrin@Sun.COM } 16268227SNeil.Perrin@Sun.COM 1627789Sahrens /* 1628789Sahrens * Make a copy of the data so we can revise and extend it. 1629789Sahrens */ 163010922SJeff.Bonwick@Sun.COM bcopy(lr, zr->zr_lr, reclen); 163110922SJeff.Bonwick@Sun.COM 163210922SJeff.Bonwick@Sun.COM /* 163310922SJeff.Bonwick@Sun.COM * If this is a TX_WRITE with a blkptr, suck in the data. 163410922SJeff.Bonwick@Sun.COM */ 163510922SJeff.Bonwick@Sun.COM if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 163610922SJeff.Bonwick@Sun.COM error = zil_read_log_data(zilog, (lr_write_t *)lr, 163710922SJeff.Bonwick@Sun.COM zr->zr_lr + reclen); 163810922SJeff.Bonwick@Sun.COM if (error) 163910922SJeff.Bonwick@Sun.COM return (zil_replay_error(zilog, lr, error)); 164010922SJeff.Bonwick@Sun.COM } 1641789Sahrens 1642789Sahrens /* 1643789Sahrens * The log block containing this lr may have been byteswapped 1644789Sahrens * so that we can easily examine common fields like lrc_txtype. 164510922SJeff.Bonwick@Sun.COM * However, the log is a mix of different record types, and only the 1646789Sahrens * replay vectors know how to byteswap their records. Therefore, if 1647789Sahrens * the lr was byteswapped, undo it before invoking the replay vector. 1648789Sahrens */ 1649789Sahrens if (zr->zr_byteswap) 165010922SJeff.Bonwick@Sun.COM byteswap_uint64_array(zr->zr_lr, reclen); 1651789Sahrens 1652789Sahrens /* 1653789Sahrens * We must now do two things atomically: replay this log record, 16548227SNeil.Perrin@Sun.COM * and update the log header sequence number to reflect the fact that 16558227SNeil.Perrin@Sun.COM * we did so. At the end of each replay function the sequence number 16568227SNeil.Perrin@Sun.COM * is updated if we are in replay mode. 1657789Sahrens */ 165810922SJeff.Bonwick@Sun.COM error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap); 165910922SJeff.Bonwick@Sun.COM if (error) { 16603063Sperrin /* 16613063Sperrin * The DMU's dnode layer doesn't see removes until the txg 16623063Sperrin * commits, so a subsequent claim can spuriously fail with 16638227SNeil.Perrin@Sun.COM * EEXIST. So if we receive any error we try syncing out 166410922SJeff.Bonwick@Sun.COM * any removes then retry the transaction. Note that we 166510922SJeff.Bonwick@Sun.COM * specify B_FALSE for byteswap now, so we don't do it twice. 16663063Sperrin */ 166710922SJeff.Bonwick@Sun.COM txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 166810922SJeff.Bonwick@Sun.COM error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE); 166910922SJeff.Bonwick@Sun.COM if (error) 167010922SJeff.Bonwick@Sun.COM return (zil_replay_error(zilog, lr, error)); 1671789Sahrens } 167210922SJeff.Bonwick@Sun.COM return (0); 16733063Sperrin } 1674789Sahrens 16753063Sperrin /* ARGSUSED */ 167610922SJeff.Bonwick@Sun.COM static int 16773063Sperrin zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 16783063Sperrin { 16793063Sperrin zilog->zl_replay_blks++; 168010922SJeff.Bonwick@Sun.COM 168110922SJeff.Bonwick@Sun.COM return (0); 1682789Sahrens } 1683789Sahrens 1684789Sahrens /* 16851362Sperrin * If this dataset has a non-empty intent log, replay it and destroy it. 1686789Sahrens */ 1687789Sahrens void 16888227SNeil.Perrin@Sun.COM zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE]) 1689789Sahrens { 1690789Sahrens zilog_t *zilog = dmu_objset_zil(os); 16911807Sbonwick const zil_header_t *zh = zilog->zl_header; 16921807Sbonwick zil_replay_arg_t zr; 16931362Sperrin 16948989SNeil.Perrin@Sun.COM if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { 16951807Sbonwick zil_destroy(zilog, B_TRUE); 16961362Sperrin return; 16971362Sperrin } 1698789Sahrens 1699789Sahrens zr.zr_replay = replay_func; 1700789Sahrens zr.zr_arg = arg; 17011807Sbonwick zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 170210922SJeff.Bonwick@Sun.COM zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 1703789Sahrens 1704789Sahrens /* 1705789Sahrens * Wait for in-progress removes to sync before starting replay. 1706789Sahrens */ 1707789Sahrens txg_wait_synced(zilog->zl_dmu_pool, 0); 1708789Sahrens 17098227SNeil.Perrin@Sun.COM zilog->zl_replay = B_TRUE; 171011066Srafael.vanoni@sun.com zilog->zl_replay_time = ddi_get_lbolt(); 17113063Sperrin ASSERT(zilog->zl_replay_blks == 0); 17123063Sperrin (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, 17131807Sbonwick zh->zh_claim_txg); 171410922SJeff.Bonwick@Sun.COM kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE); 1715789Sahrens 17161807Sbonwick zil_destroy(zilog, B_FALSE); 17175712Sahrens txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 17188227SNeil.Perrin@Sun.COM zilog->zl_replay = B_FALSE; 1719789Sahrens } 17201646Sperrin 172110922SJeff.Bonwick@Sun.COM boolean_t 172210922SJeff.Bonwick@Sun.COM zil_replaying(zilog_t *zilog, dmu_tx_t *tx) 17231646Sperrin { 172410922SJeff.Bonwick@Sun.COM if (zilog == NULL) 172510922SJeff.Bonwick@Sun.COM return (B_TRUE); 17261646Sperrin 172710922SJeff.Bonwick@Sun.COM if (zilog->zl_replay) { 172810922SJeff.Bonwick@Sun.COM dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 172910922SJeff.Bonwick@Sun.COM zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] = 173010922SJeff.Bonwick@Sun.COM zilog->zl_replaying_seq; 173110922SJeff.Bonwick@Sun.COM return (B_TRUE); 17322638Sperrin } 17332638Sperrin 173410922SJeff.Bonwick@Sun.COM return (B_FALSE); 17351646Sperrin } 17369701SGeorge.Wilson@Sun.COM 17379701SGeorge.Wilson@Sun.COM /* ARGSUSED */ 17389701SGeorge.Wilson@Sun.COM int 173911209SMatthew.Ahrens@Sun.COM zil_vdev_offline(const char *osname, void *arg) 17409701SGeorge.Wilson@Sun.COM { 17419701SGeorge.Wilson@Sun.COM objset_t *os; 17429701SGeorge.Wilson@Sun.COM zilog_t *zilog; 17439701SGeorge.Wilson@Sun.COM int error; 17449701SGeorge.Wilson@Sun.COM 174510298SMatthew.Ahrens@Sun.COM error = dmu_objset_hold(osname, FTAG, &os); 17469701SGeorge.Wilson@Sun.COM if (error) 17479701SGeorge.Wilson@Sun.COM return (error); 17489701SGeorge.Wilson@Sun.COM 17499701SGeorge.Wilson@Sun.COM zilog = dmu_objset_zil(os); 17509701SGeorge.Wilson@Sun.COM if (zil_suspend(zilog) != 0) 17519701SGeorge.Wilson@Sun.COM error = EEXIST; 17529701SGeorge.Wilson@Sun.COM else 17539701SGeorge.Wilson@Sun.COM zil_resume(zilog); 175410298SMatthew.Ahrens@Sun.COM dmu_objset_rele(os, FTAG); 17559701SGeorge.Wilson@Sun.COM return (error); 17569701SGeorge.Wilson@Sun.COM } 1757