xref: /onnv-gate/usr/src/uts/common/fs/zfs/zil.c (revision 2856)
1789Sahrens /*
2789Sahrens  * CDDL HEADER START
3789Sahrens  *
4789Sahrens  * The contents of this file are subject to the terms of the
51472Sperrin  * Common Development and Distribution License (the "License").
61472Sperrin  * You may not use this file except in compliance with the License.
7789Sahrens  *
8789Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9789Sahrens  * or http://www.opensolaris.org/os/licensing.
10789Sahrens  * See the License for the specific language governing permissions
11789Sahrens  * and limitations under the License.
12789Sahrens  *
13789Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14789Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15789Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16789Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17789Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18789Sahrens  *
19789Sahrens  * CDDL HEADER END
20789Sahrens  */
21789Sahrens /*
221362Sperrin  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23789Sahrens  * Use is subject to license terms.
24789Sahrens  */
25789Sahrens 
26789Sahrens #pragma ident	"%Z%%M%	%I%	%E% SMI"
27789Sahrens 
28789Sahrens #include <sys/zfs_context.h>
29789Sahrens #include <sys/spa.h>
30789Sahrens #include <sys/dmu.h>
31789Sahrens #include <sys/zap.h>
32789Sahrens #include <sys/arc.h>
33789Sahrens #include <sys/stat.h>
34789Sahrens #include <sys/resource.h>
35789Sahrens #include <sys/zil.h>
36789Sahrens #include <sys/zil_impl.h>
37789Sahrens #include <sys/dsl_dataset.h>
38789Sahrens #include <sys/vdev.h>
39789Sahrens 
40789Sahrens /*
41789Sahrens  * The zfs intent log (ZIL) saves transaction records of system calls
42789Sahrens  * that change the file system in memory with enough information
43789Sahrens  * to be able to replay them. These are stored in memory until
44789Sahrens  * either the DMU transaction group (txg) commits them to the stable pool
45789Sahrens  * and they can be discarded, or they are flushed to the stable log
46789Sahrens  * (also in the pool) due to a fsync, O_DSYNC or other synchronous
47789Sahrens  * requirement. In the event of a panic or power fail then those log
48789Sahrens  * records (transactions) are replayed.
49789Sahrens  *
50789Sahrens  * There is one ZIL per file system. Its on-disk (pool) format consists
51789Sahrens  * of 3 parts:
52789Sahrens  *
53789Sahrens  * 	- ZIL header
54789Sahrens  * 	- ZIL blocks
55789Sahrens  * 	- ZIL records
56789Sahrens  *
57789Sahrens  * A log record holds a system call transaction. Log blocks can
58789Sahrens  * hold many log records and the blocks are chained together.
59789Sahrens  * Each ZIL block contains a block pointer (blkptr_t) to the next
60789Sahrens  * ZIL block in the chain. The ZIL header points to the first
61789Sahrens  * block in the chain. Note there is not a fixed place in the pool
62789Sahrens  * to hold blocks. They are dynamically allocated and freed as
63789Sahrens  * needed from the blocks available. Figure X shows the ZIL structure:
64789Sahrens  */
65789Sahrens 
66789Sahrens /*
67789Sahrens  * These global ZIL switches affect all pools
68789Sahrens  */
69789Sahrens int zil_disable = 0;	/* disable intent logging */
70789Sahrens int zil_noflush = 0;	/* don't flush write cache buffers on disks */
71789Sahrens 
72789Sahrens static kmem_cache_t *zil_lwb_cache;
73789Sahrens 
74789Sahrens static int
75789Sahrens zil_dva_compare(const void *x1, const void *x2)
76789Sahrens {
77789Sahrens 	const dva_t *dva1 = x1;
78789Sahrens 	const dva_t *dva2 = x2;
79789Sahrens 
80789Sahrens 	if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2))
81789Sahrens 		return (-1);
82789Sahrens 	if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2))
83789Sahrens 		return (1);
84789Sahrens 
85789Sahrens 	if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2))
86789Sahrens 		return (-1);
87789Sahrens 	if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2))
88789Sahrens 		return (1);
89789Sahrens 
90789Sahrens 	return (0);
91789Sahrens }
92789Sahrens 
93789Sahrens static void
94789Sahrens zil_dva_tree_init(avl_tree_t *t)
95789Sahrens {
96789Sahrens 	avl_create(t, zil_dva_compare, sizeof (zil_dva_node_t),
97789Sahrens 	    offsetof(zil_dva_node_t, zn_node));
98789Sahrens }
99789Sahrens 
100789Sahrens static void
101789Sahrens zil_dva_tree_fini(avl_tree_t *t)
102789Sahrens {
103789Sahrens 	zil_dva_node_t *zn;
104789Sahrens 	void *cookie = NULL;
105789Sahrens 
106789Sahrens 	while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
107789Sahrens 		kmem_free(zn, sizeof (zil_dva_node_t));
108789Sahrens 
109789Sahrens 	avl_destroy(t);
110789Sahrens }
111789Sahrens 
112789Sahrens static int
113789Sahrens zil_dva_tree_add(avl_tree_t *t, dva_t *dva)
114789Sahrens {
115789Sahrens 	zil_dva_node_t *zn;
116789Sahrens 	avl_index_t where;
117789Sahrens 
118789Sahrens 	if (avl_find(t, dva, &where) != NULL)
119789Sahrens 		return (EEXIST);
120789Sahrens 
121789Sahrens 	zn = kmem_alloc(sizeof (zil_dva_node_t), KM_SLEEP);
122789Sahrens 	zn->zn_dva = *dva;
123789Sahrens 	avl_insert(t, zn, where);
124789Sahrens 
125789Sahrens 	return (0);
126789Sahrens }
127789Sahrens 
1281807Sbonwick static zil_header_t *
1291807Sbonwick zil_header_in_syncing_context(zilog_t *zilog)
1301807Sbonwick {
1311807Sbonwick 	return ((zil_header_t *)zilog->zl_header);
1321807Sbonwick }
1331807Sbonwick 
1341807Sbonwick static void
1351807Sbonwick zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
1361807Sbonwick {
1371807Sbonwick 	zio_cksum_t *zc = &bp->blk_cksum;
1381807Sbonwick 
1391807Sbonwick 	zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL);
1401807Sbonwick 	zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL);
1411807Sbonwick 	zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
1421807Sbonwick 	zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
1431807Sbonwick }
1441807Sbonwick 
145789Sahrens /*
146789Sahrens  * Read a log block, make sure it's valid, and byteswap it if necessary.
147789Sahrens  */
148789Sahrens static int
1491807Sbonwick zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, arc_buf_t **abufpp)
150789Sahrens {
1511807Sbonwick 	blkptr_t blk = *bp;
1521544Seschrock 	zbookmark_t zb;
1532391Smaybee 	uint32_t aflags = ARC_WAIT;
154789Sahrens 	int error;
155789Sahrens 
1561807Sbonwick 	zb.zb_objset = bp->blk_cksum.zc_word[ZIL_ZC_OBJSET];
1571544Seschrock 	zb.zb_object = 0;
1581544Seschrock 	zb.zb_level = -1;
1591807Sbonwick 	zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ];
1601807Sbonwick 
1611807Sbonwick 	*abufpp = NULL;
1621807Sbonwick 
1631807Sbonwick 	error = arc_read(NULL, zilog->zl_spa, &blk, byteswap_uint64_array,
1641807Sbonwick 	    arc_getbuf_func, abufpp, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL |
1652391Smaybee 	    ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB, &aflags, &zb);
1661807Sbonwick 
1671807Sbonwick 	if (error == 0) {
1681807Sbonwick 		char *data = (*abufpp)->b_data;
1691807Sbonwick 		uint64_t blksz = BP_GET_LSIZE(bp);
1701807Sbonwick 		zil_trailer_t *ztp = (zil_trailer_t *)(data + blksz) - 1;
1711807Sbonwick 		zio_cksum_t cksum = bp->blk_cksum;
1721544Seschrock 
1731807Sbonwick 		/*
1741807Sbonwick 		 * Sequence numbers should be... sequential.  The checksum
1751807Sbonwick 		 * verifier for the next block should be bp's checksum plus 1.
1761807Sbonwick 		 */
1771807Sbonwick 		cksum.zc_word[ZIL_ZC_SEQ]++;
1781807Sbonwick 
1791807Sbonwick 		if (bcmp(&cksum, &ztp->zit_next_blk.blk_cksum, sizeof (cksum)))
1801807Sbonwick 			error = ESTALE;
1811807Sbonwick 		else if (BP_IS_HOLE(&ztp->zit_next_blk))
1821807Sbonwick 			error = ENOENT;
1831807Sbonwick 		else if (ztp->zit_nused > (blksz - sizeof (zil_trailer_t)))
1841807Sbonwick 			error = EOVERFLOW;
1851807Sbonwick 
1861807Sbonwick 		if (error) {
1871807Sbonwick 			VERIFY(arc_buf_remove_ref(*abufpp, abufpp) == 1);
1881807Sbonwick 			*abufpp = NULL;
1891807Sbonwick 		}
190789Sahrens 	}
191789Sahrens 
1921807Sbonwick 	dprintf("error %d on %llu:%llu\n", error, zb.zb_objset, zb.zb_blkid);
193789Sahrens 
1941807Sbonwick 	return (error);
195789Sahrens }
196789Sahrens 
197789Sahrens /*
198789Sahrens  * Parse the intent log, and call parse_func for each valid record within.
1991807Sbonwick  * Return the highest sequence number.
200789Sahrens  */
2011807Sbonwick uint64_t
202789Sahrens zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
203789Sahrens     zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg)
204789Sahrens {
2051807Sbonwick 	const zil_header_t *zh = zilog->zl_header;
2061807Sbonwick 	uint64_t claim_seq = zh->zh_claim_seq;
2071807Sbonwick 	uint64_t seq = 0;
2081807Sbonwick 	uint64_t max_seq = 0;
2091807Sbonwick 	blkptr_t blk = zh->zh_log;
2101807Sbonwick 	arc_buf_t *abuf;
211789Sahrens 	char *lrbuf, *lrp;
212789Sahrens 	zil_trailer_t *ztp;
213789Sahrens 	int reclen, error;
214789Sahrens 
215789Sahrens 	if (BP_IS_HOLE(&blk))
2161807Sbonwick 		return (max_seq);
217789Sahrens 
218789Sahrens 	/*
219789Sahrens 	 * Starting at the block pointed to by zh_log we read the log chain.
220789Sahrens 	 * For each block in the chain we strongly check that block to
221789Sahrens 	 * ensure its validity.  We stop when an invalid block is found.
222789Sahrens 	 * For each block pointer in the chain we call parse_blk_func().
223789Sahrens 	 * For each record in each valid block we call parse_lr_func().
2241807Sbonwick 	 * If the log has been claimed, stop if we encounter a sequence
2251807Sbonwick 	 * number greater than the highest claimed sequence number.
226789Sahrens 	 */
227789Sahrens 	zil_dva_tree_init(&zilog->zl_dva_tree);
228789Sahrens 	for (;;) {
2291807Sbonwick 		seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
2301807Sbonwick 
2311807Sbonwick 		if (claim_seq != 0 && seq > claim_seq)
2321807Sbonwick 			break;
2331807Sbonwick 
2341807Sbonwick 		ASSERT(max_seq < seq);
2351807Sbonwick 		max_seq = seq;
2361807Sbonwick 
2371807Sbonwick 		error = zil_read_log_block(zilog, &blk, &abuf);
238789Sahrens 
239789Sahrens 		if (parse_blk_func != NULL)
240789Sahrens 			parse_blk_func(zilog, &blk, arg, txg);
241789Sahrens 
242789Sahrens 		if (error)
243789Sahrens 			break;
244789Sahrens 
2451807Sbonwick 		lrbuf = abuf->b_data;
246789Sahrens 		ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1;
247789Sahrens 		blk = ztp->zit_next_blk;
248789Sahrens 
2491807Sbonwick 		if (parse_lr_func == NULL) {
2501807Sbonwick 			VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
251789Sahrens 			continue;
2521807Sbonwick 		}
253789Sahrens 
254789Sahrens 		for (lrp = lrbuf; lrp < lrbuf + ztp->zit_nused; lrp += reclen) {
255789Sahrens 			lr_t *lr = (lr_t *)lrp;
256789Sahrens 			reclen = lr->lrc_reclen;
257789Sahrens 			ASSERT3U(reclen, >=, sizeof (lr_t));
258789Sahrens 			parse_lr_func(zilog, lr, arg, txg);
259789Sahrens 		}
2601807Sbonwick 		VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
261789Sahrens 	}
262789Sahrens 	zil_dva_tree_fini(&zilog->zl_dva_tree);
2631807Sbonwick 
2641807Sbonwick 	return (max_seq);
265789Sahrens }
266789Sahrens 
267789Sahrens /* ARGSUSED */
268789Sahrens static void
269789Sahrens zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg)
270789Sahrens {
271789Sahrens 	spa_t *spa = zilog->zl_spa;
272789Sahrens 	int err;
273789Sahrens 
274789Sahrens 	/*
275789Sahrens 	 * Claim log block if not already committed and not already claimed.
276789Sahrens 	 */
277789Sahrens 	if (bp->blk_birth >= first_txg &&
278789Sahrens 	    zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp)) == 0) {
279789Sahrens 		err = zio_wait(zio_claim(NULL, spa, first_txg, bp, NULL, NULL));
280789Sahrens 		ASSERT(err == 0);
281789Sahrens 	}
282789Sahrens }
283789Sahrens 
284789Sahrens static void
285789Sahrens zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg)
286789Sahrens {
287789Sahrens 	if (lrc->lrc_txtype == TX_WRITE) {
288789Sahrens 		lr_write_t *lr = (lr_write_t *)lrc;
289789Sahrens 		zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg);
290789Sahrens 	}
291789Sahrens }
292789Sahrens 
293789Sahrens /* ARGSUSED */
294789Sahrens static void
295789Sahrens zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg)
296789Sahrens {
297789Sahrens 	zio_free_blk(zilog->zl_spa, bp, dmu_tx_get_txg(tx));
298789Sahrens }
299789Sahrens 
300789Sahrens static void
301789Sahrens zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg)
302789Sahrens {
303789Sahrens 	/*
304789Sahrens 	 * If we previously claimed it, we need to free it.
305789Sahrens 	 */
306789Sahrens 	if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE) {
307789Sahrens 		lr_write_t *lr = (lr_write_t *)lrc;
308789Sahrens 		blkptr_t *bp = &lr->lr_blkptr;
309789Sahrens 		if (bp->blk_birth >= claim_txg &&
310789Sahrens 		    !zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp))) {
311789Sahrens 			(void) arc_free(NULL, zilog->zl_spa,
312789Sahrens 			    dmu_tx_get_txg(tx), bp, NULL, NULL, ARC_WAIT);
313789Sahrens 		}
314789Sahrens 	}
315789Sahrens }
316789Sahrens 
317789Sahrens /*
318789Sahrens  * Create an on-disk intent log.
319789Sahrens  */
320789Sahrens static void
321789Sahrens zil_create(zilog_t *zilog)
322789Sahrens {
3231807Sbonwick 	const zil_header_t *zh = zilog->zl_header;
324789Sahrens 	lwb_t *lwb;
3251807Sbonwick 	uint64_t txg = 0;
3261807Sbonwick 	dmu_tx_t *tx = NULL;
327789Sahrens 	blkptr_t blk;
3281807Sbonwick 	int error = 0;
329789Sahrens 
330789Sahrens 	/*
3311807Sbonwick 	 * Wait for any previous destroy to complete.
332789Sahrens 	 */
3331807Sbonwick 	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
3341807Sbonwick 
3351807Sbonwick 	ASSERT(zh->zh_claim_txg == 0);
3361807Sbonwick 	ASSERT(zh->zh_replay_seq == 0);
3371807Sbonwick 
3381807Sbonwick 	blk = zh->zh_log;
339789Sahrens 
340789Sahrens 	/*
3411807Sbonwick 	 * If we don't already have an initial log block, allocate one now.
342789Sahrens 	 */
3431807Sbonwick 	if (BP_IS_HOLE(&blk)) {
3441807Sbonwick 		tx = dmu_tx_create(zilog->zl_os);
3451807Sbonwick 		(void) dmu_tx_assign(tx, TXG_WAIT);
3461807Sbonwick 		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
3471807Sbonwick 		txg = dmu_tx_get_txg(tx);
3481807Sbonwick 
3491807Sbonwick 		error = zio_alloc_blk(zilog->zl_spa, ZIL_MIN_BLKSZ, &blk, txg);
3501807Sbonwick 
3511807Sbonwick 		if (error == 0)
3521807Sbonwick 			zil_init_log_chain(zilog, &blk);
3531362Sperrin 	}
3541807Sbonwick 
3551807Sbonwick 	/*
3561807Sbonwick 	 * Allocate a log write buffer (lwb) for the first log block.
3571807Sbonwick 	 */
358789Sahrens 	if (error == 0) {
359789Sahrens 		lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
360789Sahrens 		lwb->lwb_zilog = zilog;
361789Sahrens 		lwb->lwb_blk = blk;
362789Sahrens 		lwb->lwb_nused = 0;
363789Sahrens 		lwb->lwb_sz = BP_GET_LSIZE(&lwb->lwb_blk);
364789Sahrens 		lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz);
365789Sahrens 		lwb->lwb_max_txg = txg;
3662237Smaybee 		lwb->lwb_zio = NULL;
3672237Smaybee 
368789Sahrens 		mutex_enter(&zilog->zl_lock);
369789Sahrens 		list_insert_tail(&zilog->zl_lwb_list, lwb);
370789Sahrens 		mutex_exit(&zilog->zl_lock);
371789Sahrens 	}
372789Sahrens 
3731807Sbonwick 	/*
3741807Sbonwick 	 * If we just allocated the first log block, commit our transaction
3751807Sbonwick 	 * and wait for zil_sync() to stuff the block poiner into zh_log.
3761807Sbonwick 	 * (zh is part of the MOS, so we cannot modify it in open context.)
3771807Sbonwick 	 */
3781807Sbonwick 	if (tx != NULL) {
3791807Sbonwick 		dmu_tx_commit(tx);
3801362Sperrin 		txg_wait_synced(zilog->zl_dmu_pool, txg);
3811807Sbonwick 	}
3821807Sbonwick 
3831807Sbonwick 	ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
384789Sahrens }
385789Sahrens 
386789Sahrens /*
387789Sahrens  * In one tx, free all log blocks and clear the log header.
3881807Sbonwick  * If keep_first is set, then we're replaying a log with no content.
3891807Sbonwick  * We want to keep the first block, however, so that the first
3901807Sbonwick  * synchronous transaction doesn't require a txg_wait_synced()
3911807Sbonwick  * in zil_create().  We don't need to txg_wait_synced() here either
3921807Sbonwick  * when keep_first is set, because both zil_create() and zil_destroy()
3931807Sbonwick  * will wait for any in-progress destroys to complete.
394789Sahrens  */
395789Sahrens void
3961807Sbonwick zil_destroy(zilog_t *zilog, boolean_t keep_first)
397789Sahrens {
3981807Sbonwick 	const zil_header_t *zh = zilog->zl_header;
3991807Sbonwick 	lwb_t *lwb;
400789Sahrens 	dmu_tx_t *tx;
401789Sahrens 	uint64_t txg;
402789Sahrens 
4031807Sbonwick 	/*
4041807Sbonwick 	 * Wait for any previous destroy to complete.
4051807Sbonwick 	 */
4061807Sbonwick 	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
407789Sahrens 
4081807Sbonwick 	if (BP_IS_HOLE(&zh->zh_log))
409789Sahrens 		return;
410789Sahrens 
411789Sahrens 	tx = dmu_tx_create(zilog->zl_os);
412789Sahrens 	(void) dmu_tx_assign(tx, TXG_WAIT);
413789Sahrens 	dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
414789Sahrens 	txg = dmu_tx_get_txg(tx);
415789Sahrens 
4161807Sbonwick 	mutex_enter(&zilog->zl_lock);
4171807Sbonwick 
4181807Sbonwick 	ASSERT3U(zilog->zl_destroy_txg, <, txg);
419789Sahrens 	zilog->zl_destroy_txg = txg;
4201807Sbonwick 	zilog->zl_keep_first = keep_first;
4211807Sbonwick 
4221807Sbonwick 	if (!list_is_empty(&zilog->zl_lwb_list)) {
4231807Sbonwick 		ASSERT(zh->zh_claim_txg == 0);
4241807Sbonwick 		ASSERT(!keep_first);
4251807Sbonwick 		while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
4261807Sbonwick 			list_remove(&zilog->zl_lwb_list, lwb);
4271807Sbonwick 			if (lwb->lwb_buf != NULL)
4281807Sbonwick 				zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
4291807Sbonwick 			zio_free_blk(zilog->zl_spa, &lwb->lwb_blk, txg);
4301807Sbonwick 			kmem_cache_free(zil_lwb_cache, lwb);
4311807Sbonwick 		}
4321807Sbonwick 	} else {
4331807Sbonwick 		if (!keep_first) {
4341807Sbonwick 			(void) zil_parse(zilog, zil_free_log_block,
4351807Sbonwick 			    zil_free_log_record, tx, zh->zh_claim_txg);
4361807Sbonwick 		}
4371807Sbonwick 	}
4382638Sperrin 	mutex_exit(&zilog->zl_lock);
439789Sahrens 
440789Sahrens 	dmu_tx_commit(tx);
441789Sahrens 
4421807Sbonwick 	if (keep_first)			/* no need to wait in this case */
4431807Sbonwick 		return;
4441807Sbonwick 
4451807Sbonwick 	txg_wait_synced(zilog->zl_dmu_pool, txg);
4461807Sbonwick 	ASSERT(BP_IS_HOLE(&zh->zh_log));
447789Sahrens }
448789Sahrens 
4492199Sahrens int
450789Sahrens zil_claim(char *osname, void *txarg)
451789Sahrens {
452789Sahrens 	dmu_tx_t *tx = txarg;
453789Sahrens 	uint64_t first_txg = dmu_tx_get_txg(tx);
454789Sahrens 	zilog_t *zilog;
455789Sahrens 	zil_header_t *zh;
456789Sahrens 	objset_t *os;
457789Sahrens 	int error;
458789Sahrens 
459789Sahrens 	error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_STANDARD, &os);
460789Sahrens 	if (error) {
461789Sahrens 		cmn_err(CE_WARN, "can't process intent log for %s", osname);
4622199Sahrens 		return (0);
463789Sahrens 	}
464789Sahrens 
465789Sahrens 	zilog = dmu_objset_zil(os);
4661807Sbonwick 	zh = zil_header_in_syncing_context(zilog);
467789Sahrens 
468789Sahrens 	/*
4691807Sbonwick 	 * Claim all log blocks if we haven't already done so, and remember
4701807Sbonwick 	 * the highest claimed sequence number.  This ensures that if we can
4711807Sbonwick 	 * read only part of the log now (e.g. due to a missing device),
4721807Sbonwick 	 * but we can read the entire log later, we will not try to replay
4731807Sbonwick 	 * or destroy beyond the last block we successfully claimed.
474789Sahrens 	 */
475789Sahrens 	ASSERT3U(zh->zh_claim_txg, <=, first_txg);
476789Sahrens 	if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
477789Sahrens 		zh->zh_claim_txg = first_txg;
4781807Sbonwick 		zh->zh_claim_seq = zil_parse(zilog, zil_claim_log_block,
4791807Sbonwick 		    zil_claim_log_record, tx, first_txg);
480789Sahrens 		dsl_dataset_dirty(dmu_objset_ds(os), tx);
481789Sahrens 	}
4821807Sbonwick 
483789Sahrens 	ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
484789Sahrens 	dmu_objset_close(os);
4852199Sahrens 	return (0);
486789Sahrens }
487789Sahrens 
488789Sahrens void
4892638Sperrin zil_add_vdev(zilog_t *zilog, uint64_t vdev)
490789Sahrens {
491789Sahrens 	zil_vdev_t *zv;
492789Sahrens 
493789Sahrens 	if (zil_noflush)
494789Sahrens 		return;
495789Sahrens 
496789Sahrens 	ASSERT(MUTEX_HELD(&zilog->zl_lock));
497789Sahrens 	zv = kmem_alloc(sizeof (zil_vdev_t), KM_SLEEP);
498789Sahrens 	zv->vdev = vdev;
499789Sahrens 	list_insert_tail(&zilog->zl_vdev_list, zv);
500789Sahrens }
501789Sahrens 
502789Sahrens void
5032638Sperrin zil_flush_vdevs(zilog_t *zilog)
504789Sahrens {
505789Sahrens 	vdev_t *vd;
506789Sahrens 	zil_vdev_t *zv, *zv2;
507789Sahrens 	zio_t *zio;
508789Sahrens 	spa_t *spa;
509789Sahrens 	uint64_t vdev;
510789Sahrens 
511789Sahrens 	if (zil_noflush)
512789Sahrens 		return;
513789Sahrens 
514789Sahrens 	ASSERT(MUTEX_HELD(&zilog->zl_lock));
515789Sahrens 
516789Sahrens 	spa = zilog->zl_spa;
517789Sahrens 	zio = NULL;
518789Sahrens 
5192638Sperrin 	while ((zv = list_head(&zilog->zl_vdev_list)) != NULL) {
520789Sahrens 		vdev = zv->vdev;
521789Sahrens 		list_remove(&zilog->zl_vdev_list, zv);
522789Sahrens 		kmem_free(zv, sizeof (zil_vdev_t));
523789Sahrens 
524789Sahrens 		/*
5252638Sperrin 		 * remove all chained entries with same vdev
526789Sahrens 		 */
527789Sahrens 		zv = list_head(&zilog->zl_vdev_list);
5282638Sperrin 		while (zv) {
529789Sahrens 			zv2 = list_next(&zilog->zl_vdev_list, zv);
530789Sahrens 			if (zv->vdev == vdev) {
531789Sahrens 				list_remove(&zilog->zl_vdev_list, zv);
532789Sahrens 				kmem_free(zv, sizeof (zil_vdev_t));
533789Sahrens 			}
534789Sahrens 			zv = zv2;
535789Sahrens 		}
536789Sahrens 
537789Sahrens 		/* flush the write cache for this vdev */
538789Sahrens 		mutex_exit(&zilog->zl_lock);
539789Sahrens 		if (zio == NULL)
540789Sahrens 			zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
541789Sahrens 		vd = vdev_lookup_top(spa, vdev);
542789Sahrens 		ASSERT(vd);
543789Sahrens 		(void) zio_nowait(zio_ioctl(zio, spa, vd, DKIOCFLUSHWRITECACHE,
544789Sahrens 		    NULL, NULL, ZIO_PRIORITY_NOW,
545789Sahrens 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY));
546789Sahrens 		mutex_enter(&zilog->zl_lock);
547789Sahrens 	}
548789Sahrens 
549789Sahrens 	/*
550789Sahrens 	 * Wait for all the flushes to complete.  Not all devices actually
551789Sahrens 	 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails.
552789Sahrens 	 */
5531141Sperrin 	if (zio != NULL) {
5541141Sperrin 		mutex_exit(&zilog->zl_lock);
555789Sahrens 		(void) zio_wait(zio);
5561141Sperrin 		mutex_enter(&zilog->zl_lock);
5571141Sperrin 	}
558789Sahrens }
559789Sahrens 
560789Sahrens /*
561789Sahrens  * Function called when a log block write completes
562789Sahrens  */
563789Sahrens static void
564789Sahrens zil_lwb_write_done(zio_t *zio)
565789Sahrens {
566789Sahrens 	lwb_t *lwb = zio->io_private;
567789Sahrens 	zilog_t *zilog = lwb->lwb_zilog;
568789Sahrens 
569789Sahrens 	/*
570789Sahrens 	 * Now that we've written this log block, we have a stable pointer
571789Sahrens 	 * to the next block in the chain, so it's OK to let the txg in
572789Sahrens 	 * which we allocated the next block sync.
573789Sahrens 	 */
574789Sahrens 	txg_rele_to_sync(&lwb->lwb_txgh);
575789Sahrens 
576789Sahrens 	zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
577789Sahrens 	mutex_enter(&zilog->zl_lock);
578789Sahrens 	lwb->lwb_buf = NULL;
579789Sahrens 	if (zio->io_error) {
580789Sahrens 		zilog->zl_log_error = B_TRUE;
581789Sahrens 		mutex_exit(&zilog->zl_lock);
582789Sahrens 		return;
583789Sahrens 	}
584789Sahrens 	mutex_exit(&zilog->zl_lock);
585789Sahrens }
586789Sahrens 
587789Sahrens /*
5882237Smaybee  * Initialize the io for a log block.
5892237Smaybee  *
5902237Smaybee  * Note, we should not initialize the IO until we are about
5912237Smaybee  * to use it, since zio_rewrite() does a spa_config_enter().
5922237Smaybee  */
5932237Smaybee static void
5942237Smaybee zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb)
5952237Smaybee {
5962237Smaybee 	zbookmark_t zb;
5972237Smaybee 
5982237Smaybee 	zb.zb_objset = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET];
5992237Smaybee 	zb.zb_object = 0;
6002237Smaybee 	zb.zb_level = -1;
6012237Smaybee 	zb.zb_blkid = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
6022237Smaybee 
6032638Sperrin 	if (zilog->zl_root_zio == NULL) {
6042638Sperrin 		zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL,
6052638Sperrin 		    ZIO_FLAG_CANFAIL);
6062638Sperrin 	}
6072638Sperrin 	lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa,
6082237Smaybee 	    ZIO_CHECKSUM_ZILOG, 0, &lwb->lwb_blk, lwb->lwb_buf,
6092237Smaybee 	    lwb->lwb_sz, zil_lwb_write_done, lwb,
6102237Smaybee 	    ZIO_PRIORITY_LOG_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
6112237Smaybee }
6122237Smaybee 
6132237Smaybee /*
614789Sahrens  * Start a log block write and advance to the next log block.
615789Sahrens  * Calls are serialized.
616789Sahrens  */
617789Sahrens static lwb_t *
618789Sahrens zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb)
619789Sahrens {
620789Sahrens 	lwb_t *nlwb;
621789Sahrens 	zil_trailer_t *ztp = (zil_trailer_t *)(lwb->lwb_buf + lwb->lwb_sz) - 1;
6221807Sbonwick 	spa_t *spa = zilog->zl_spa;
6231807Sbonwick 	blkptr_t *bp = &ztp->zit_next_blk;
624789Sahrens 	uint64_t txg;
625789Sahrens 	uint64_t zil_blksz;
626789Sahrens 	int error;
627789Sahrens 
628789Sahrens 	ASSERT(lwb->lwb_nused <= ZIL_BLK_DATA_SZ(lwb));
629789Sahrens 
630789Sahrens 	/*
631789Sahrens 	 * Allocate the next block and save its address in this block
632789Sahrens 	 * before writing it in order to establish the log chain.
633789Sahrens 	 * Note that if the allocation of nlwb synced before we wrote
634789Sahrens 	 * the block that points at it (lwb), we'd leak it if we crashed.
635789Sahrens 	 * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done().
636789Sahrens 	 */
637789Sahrens 	txg = txg_hold_open(zilog->zl_dmu_pool, &lwb->lwb_txgh);
638789Sahrens 	txg_rele_to_quiesce(&lwb->lwb_txgh);
639789Sahrens 
640789Sahrens 	/*
6411141Sperrin 	 * Pick a ZIL blocksize. We request a size that is the
6421141Sperrin 	 * maximum of the previous used size, the current used size and
6431141Sperrin 	 * the amount waiting in the queue.
644789Sahrens 	 */
6452237Smaybee 	zil_blksz = MAX(zilog->zl_prev_used,
6462237Smaybee 	    zilog->zl_cur_used + sizeof (*ztp));
6471141Sperrin 	zil_blksz = MAX(zil_blksz, zilog->zl_itx_list_sz + sizeof (*ztp));
6481842Sperrin 	zil_blksz = P2ROUNDUP_TYPED(zil_blksz, ZIL_MIN_BLKSZ, uint64_t);
6491141Sperrin 	if (zil_blksz > ZIL_MAX_BLKSZ)
6501141Sperrin 		zil_blksz = ZIL_MAX_BLKSZ;
651789Sahrens 
6521807Sbonwick 	error = zio_alloc_blk(spa, zil_blksz, bp, txg);
653789Sahrens 	if (error) {
6541544Seschrock 		/*
6551544Seschrock 		 * Reinitialise the lwb.
6561544Seschrock 		 * By returning NULL the caller will call tx_wait_synced()
6571544Seschrock 		 */
6581544Seschrock 		mutex_enter(&zilog->zl_lock);
6591544Seschrock 		lwb->lwb_nused = 0;
6601544Seschrock 		mutex_exit(&zilog->zl_lock);
661789Sahrens 		txg_rele_to_sync(&lwb->lwb_txgh);
662789Sahrens 		return (NULL);
663789Sahrens 	}
664789Sahrens 
6651807Sbonwick 	ASSERT3U(bp->blk_birth, ==, txg);
6661544Seschrock 	ztp->zit_pad = 0;
667789Sahrens 	ztp->zit_nused = lwb->lwb_nused;
668789Sahrens 	ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum;
6691807Sbonwick 	bp->blk_cksum = lwb->lwb_blk.blk_cksum;
6701807Sbonwick 	bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
671789Sahrens 
672789Sahrens 	/*
673789Sahrens 	 * Allocate a new log write buffer (lwb).
674789Sahrens 	 */
675789Sahrens 	nlwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
676789Sahrens 
677789Sahrens 	nlwb->lwb_zilog = zilog;
6781807Sbonwick 	nlwb->lwb_blk = *bp;
679789Sahrens 	nlwb->lwb_nused = 0;
680789Sahrens 	nlwb->lwb_sz = BP_GET_LSIZE(&nlwb->lwb_blk);
681789Sahrens 	nlwb->lwb_buf = zio_buf_alloc(nlwb->lwb_sz);
682789Sahrens 	nlwb->lwb_max_txg = txg;
6832237Smaybee 	nlwb->lwb_zio = NULL;
684789Sahrens 
685789Sahrens 	/*
686789Sahrens 	 * Put new lwb at the end of the log chain,
687789Sahrens 	 * and record the vdev for later flushing
688789Sahrens 	 */
689789Sahrens 	mutex_enter(&zilog->zl_lock);
690789Sahrens 	list_insert_tail(&zilog->zl_lwb_list, nlwb);
6912638Sperrin 	zil_add_vdev(zilog, DVA_GET_VDEV(BP_IDENTITY(&(lwb->lwb_blk))));
692789Sahrens 	mutex_exit(&zilog->zl_lock);
693789Sahrens 
694789Sahrens 	/*
6952237Smaybee 	 * kick off the write for the old log block
696789Sahrens 	 */
6972237Smaybee 	dprintf_bp(&lwb->lwb_blk, "lwb %p txg %llu: ", lwb, txg);
6982638Sperrin 	if (lwb->lwb_zio == NULL)
6992237Smaybee 		zil_lwb_write_init(zilog, lwb);
7002237Smaybee 	zio_nowait(lwb->lwb_zio);
701789Sahrens 
702789Sahrens 	return (nlwb);
703789Sahrens }
704789Sahrens 
705789Sahrens static lwb_t *
706789Sahrens zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
707789Sahrens {
708789Sahrens 	lr_t *lrc = &itx->itx_lr; /* common log record */
7092237Smaybee 	lr_write_t *lr = (lr_write_t *)lrc;
710789Sahrens 	uint64_t txg = lrc->lrc_txg;
711789Sahrens 	uint64_t reclen = lrc->lrc_reclen;
7122237Smaybee 	uint64_t dlen;
713789Sahrens 
714789Sahrens 	if (lwb == NULL)
715789Sahrens 		return (NULL);
716789Sahrens 	ASSERT(lwb->lwb_buf != NULL);
717789Sahrens 
7182237Smaybee 	if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY)
7192237Smaybee 		dlen = P2ROUNDUP_TYPED(
7202237Smaybee 		    lr->lr_length, sizeof (uint64_t), uint64_t);
7212237Smaybee 	else
7222237Smaybee 		dlen = 0;
7231669Sperrin 
7241669Sperrin 	zilog->zl_cur_used += (reclen + dlen);
7251669Sperrin 
7261669Sperrin 	/*
7271669Sperrin 	 * If this record won't fit in the current log block, start a new one.
7281669Sperrin 	 */
7291669Sperrin 	if (lwb->lwb_nused + reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) {
7301669Sperrin 		lwb = zil_lwb_write_start(zilog, lwb);
7312237Smaybee 		if (lwb == NULL)
7321669Sperrin 			return (NULL);
7331669Sperrin 		ASSERT(lwb->lwb_nused == 0);
7341669Sperrin 		if (reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) {
7351669Sperrin 			txg_wait_synced(zilog->zl_dmu_pool, txg);
736789Sahrens 			return (lwb);
737789Sahrens 		}
738789Sahrens 	}
739789Sahrens 
7402638Sperrin 	/*
7412638Sperrin 	 * Update the lrc_seq, to be log record sequence number. See zil.h
7422638Sperrin 	 * Then copy the record to the log buffer.
7432638Sperrin 	 */
7442638Sperrin 	lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */
745789Sahrens 	bcopy(lrc, lwb->lwb_buf + lwb->lwb_nused, reclen);
7462237Smaybee 
7472237Smaybee 	/*
7482237Smaybee 	 * If it's a write, fetch the data or get its blkptr as appropriate.
7492237Smaybee 	 */
7502237Smaybee 	if (lrc->lrc_txtype == TX_WRITE) {
7512237Smaybee 		if (txg > spa_freeze_txg(zilog->zl_spa))
7522237Smaybee 			txg_wait_synced(zilog->zl_dmu_pool, txg);
7532237Smaybee 		if (itx->itx_wr_state != WR_COPIED) {
7542237Smaybee 			char *dbuf;
7552237Smaybee 			int error;
7562237Smaybee 
7572237Smaybee 			/* alignment is guaranteed */
7582237Smaybee 			lr = (lr_write_t *)(lwb->lwb_buf + lwb->lwb_nused);
7592237Smaybee 			if (dlen) {
7602237Smaybee 				ASSERT(itx->itx_wr_state == WR_NEED_COPY);
7612237Smaybee 				dbuf = lwb->lwb_buf + lwb->lwb_nused + reclen;
7622237Smaybee 				lr->lr_common.lrc_reclen += dlen;
7632237Smaybee 			} else {
7642237Smaybee 				ASSERT(itx->itx_wr_state == WR_INDIRECT);
7652237Smaybee 				dbuf = NULL;
7662237Smaybee 			}
7672237Smaybee 			error = zilog->zl_get_data(
7682237Smaybee 			    itx->itx_private, lr, dbuf, lwb->lwb_zio);
7692237Smaybee 			if (error) {
7702237Smaybee 				ASSERT(error == ENOENT || error == EEXIST ||
7712237Smaybee 				    error == EALREADY);
7722237Smaybee 				return (lwb);
7732237Smaybee 			}
7742237Smaybee 		}
7751669Sperrin 	}
7762237Smaybee 
7772237Smaybee 	lwb->lwb_nused += reclen + dlen;
778789Sahrens 	lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
779789Sahrens 	ASSERT3U(lwb->lwb_nused, <=, ZIL_BLK_DATA_SZ(lwb));
780789Sahrens 	ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0);
781789Sahrens 
782789Sahrens 	return (lwb);
783789Sahrens }
784789Sahrens 
785789Sahrens itx_t *
786789Sahrens zil_itx_create(int txtype, size_t lrsize)
787789Sahrens {
788789Sahrens 	itx_t *itx;
789789Sahrens 
7901842Sperrin 	lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
791789Sahrens 
792789Sahrens 	itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP);
793789Sahrens 	itx->itx_lr.lrc_txtype = txtype;
794789Sahrens 	itx->itx_lr.lrc_reclen = lrsize;
795789Sahrens 	itx->itx_lr.lrc_seq = 0;	/* defensive */
796789Sahrens 
797789Sahrens 	return (itx);
798789Sahrens }
799789Sahrens 
800789Sahrens uint64_t
801789Sahrens zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
802789Sahrens {
803789Sahrens 	uint64_t seq;
804789Sahrens 
805789Sahrens 	ASSERT(itx->itx_lr.lrc_seq == 0);
806789Sahrens 
807789Sahrens 	mutex_enter(&zilog->zl_lock);
808789Sahrens 	list_insert_tail(&zilog->zl_itx_list, itx);
809789Sahrens 	zilog->zl_itx_list_sz += itx->itx_lr.lrc_reclen;
810789Sahrens 	itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
811789Sahrens 	itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq;
812789Sahrens 	mutex_exit(&zilog->zl_lock);
813789Sahrens 
814789Sahrens 	return (seq);
815789Sahrens }
816789Sahrens 
817789Sahrens /*
818789Sahrens  * Free up all in-memory intent log transactions that have now been synced.
819789Sahrens  */
820789Sahrens static void
821789Sahrens zil_itx_clean(zilog_t *zilog)
822789Sahrens {
823789Sahrens 	uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa);
824789Sahrens 	uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa);
825789Sahrens 	itx_t *itx;
826789Sahrens 
827789Sahrens 	mutex_enter(&zilog->zl_lock);
8282638Sperrin 	/* wait for a log writer to finish walking list */
8292638Sperrin 	while (zilog->zl_writer) {
8302638Sperrin 		cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
8312638Sperrin 	}
8322638Sperrin 	/* no need to set zl_writer as we never drop zl_lock */
833789Sahrens 	while ((itx = list_head(&zilog->zl_itx_list)) != NULL &&
834789Sahrens 	    itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) {
835789Sahrens 		list_remove(&zilog->zl_itx_list, itx);
836789Sahrens 		zilog->zl_itx_list_sz -= itx->itx_lr.lrc_reclen;
837789Sahrens 		kmem_free(itx, offsetof(itx_t, itx_lr)
838789Sahrens 		    + itx->itx_lr.lrc_reclen);
839789Sahrens 	}
840789Sahrens 	mutex_exit(&zilog->zl_lock);
841789Sahrens }
842789Sahrens 
8432638Sperrin /*
8442638Sperrin  * If there are in-memory intent log transactions then
8452638Sperrin  * start up a taskq to free up any that have now been synced.
8462638Sperrin  */
847789Sahrens void
848789Sahrens zil_clean(zilog_t *zilog)
849789Sahrens {
850789Sahrens 	mutex_enter(&zilog->zl_lock);
851789Sahrens 	if (list_head(&zilog->zl_itx_list) != NULL)
852789Sahrens 		(void) taskq_dispatch(zilog->zl_clean_taskq,
853789Sahrens 		    (void (*)(void *))zil_itx_clean, zilog, TQ_NOSLEEP);
854789Sahrens 	mutex_exit(&zilog->zl_lock);
855789Sahrens }
856789Sahrens 
857789Sahrens void
8582638Sperrin zil_commit_writer(zilog_t *zilog, uint64_t seq, uint64_t foid)
859789Sahrens {
860789Sahrens 	uint64_t txg;
861789Sahrens 	uint64_t reclen;
8622638Sperrin 	itx_t *itx, *itx_next = (itx_t *)-1;
863789Sahrens 	lwb_t *lwb;
864789Sahrens 	spa_t *spa;
865789Sahrens 
8662638Sperrin 	zilog->zl_writer = B_TRUE;
8672638Sperrin 	zilog->zl_root_zio = NULL;
868789Sahrens 	spa = zilog->zl_spa;
869789Sahrens 
870789Sahrens 	if (zilog->zl_suspend) {
871789Sahrens 		lwb = NULL;
872789Sahrens 	} else {
873789Sahrens 		lwb = list_tail(&zilog->zl_lwb_list);
874789Sahrens 		if (lwb == NULL) {
8752638Sperrin 			/*
8762638Sperrin 			 * Return if there's nothing to flush before we
8772638Sperrin 			 * dirty the fs by calling zil_create()
8782638Sperrin 			 */
8792638Sperrin 			if (list_is_empty(&zilog->zl_itx_list)) {
8802638Sperrin 				/* wake up others waiting to start a write */
8812638Sperrin 				zilog->zl_writer = B_FALSE;
8822638Sperrin 				cv_broadcast(&zilog->zl_cv_writer);
8832638Sperrin 				mutex_exit(&zilog->zl_lock);
8842638Sperrin 				return;
8852638Sperrin 			}
8862638Sperrin 
887789Sahrens 			mutex_exit(&zilog->zl_lock);
888789Sahrens 			zil_create(zilog);
889789Sahrens 			mutex_enter(&zilog->zl_lock);
890789Sahrens 			lwb = list_tail(&zilog->zl_lwb_list);
891789Sahrens 		}
892789Sahrens 	}
893789Sahrens 
894789Sahrens 	/*
895789Sahrens 	 * Loop through in-memory log transactions filling log blocks,
896789Sahrens 	 * until we reach the given sequence number and there's no more
897789Sahrens 	 * room in the write buffer.
898789Sahrens 	 */
8992638Sperrin 	DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
900789Sahrens 	for (;;) {
9012638Sperrin 		/*
9022638Sperrin 		 * Find the next itx to push:
9032638Sperrin 		 * Push all transactions related to specified foid and all
9042638Sperrin 		 * other transactions except TX_WRITE, TX_TRUNCATE,
9052638Sperrin 		 * TX_SETATTR and TX_ACL for all other files.
9062638Sperrin 		 */
9072638Sperrin 		if (itx_next != (itx_t *)-1)
9082638Sperrin 			itx = itx_next;
9092638Sperrin 		else
9102638Sperrin 			itx = list_head(&zilog->zl_itx_list);
9112638Sperrin 		for (; itx != NULL; itx = list_next(&zilog->zl_itx_list, itx)) {
9122638Sperrin 			if (foid == 0) /* push all foids? */
9132638Sperrin 				break;
9142638Sperrin 			switch (itx->itx_lr.lrc_txtype) {
9152638Sperrin 			case TX_SETATTR:
9162638Sperrin 			case TX_WRITE:
9172638Sperrin 			case TX_TRUNCATE:
9182638Sperrin 			case TX_ACL:
9192638Sperrin 				/* lr_foid is same offset for these records */
9202638Sperrin 				if (((lr_write_t *)&itx->itx_lr)->lr_foid
9212638Sperrin 				    != foid) {
9222638Sperrin 					continue; /* skip this record */
9232638Sperrin 				}
9242638Sperrin 			}
9252638Sperrin 			break;
9262638Sperrin 		}
927789Sahrens 		if (itx == NULL)
928789Sahrens 			break;
929789Sahrens 
930789Sahrens 		reclen = itx->itx_lr.lrc_reclen;
931789Sahrens 		if ((itx->itx_lr.lrc_seq > seq) &&
9322638Sperrin 		    ((lwb == NULL) || (lwb->lwb_nused == 0) ||
9332638Sperrin 		    (lwb->lwb_nused + reclen > ZIL_BLK_DATA_SZ(lwb))))
934789Sahrens 			break;
935789Sahrens 
9362638Sperrin 		/*
9372638Sperrin 		 * Save the next pointer.  Even though we soon drop
9382638Sperrin 		 * zl_lock all threads that may change the list
9392638Sperrin 		 * (another writer or zil_itx_clean) can't do so until
9402638Sperrin 		 * they have zl_writer.
9412638Sperrin 		 */
9422638Sperrin 		itx_next = list_next(&zilog->zl_itx_list, itx);
943789Sahrens 		list_remove(&zilog->zl_itx_list, itx);
944789Sahrens 		txg = itx->itx_lr.lrc_txg;
945789Sahrens 		ASSERT(txg);
946789Sahrens 
947789Sahrens 		mutex_exit(&zilog->zl_lock);
948789Sahrens 		if (txg > spa_last_synced_txg(spa) ||
949789Sahrens 		    txg > spa_freeze_txg(spa))
950789Sahrens 			lwb = zil_lwb_commit(zilog, itx, lwb);
951789Sahrens 		kmem_free(itx, offsetof(itx_t, itx_lr)
952789Sahrens 		    + itx->itx_lr.lrc_reclen);
953789Sahrens 		mutex_enter(&zilog->zl_lock);
954789Sahrens 		zilog->zl_itx_list_sz -= reclen;
955789Sahrens 	}
9562638Sperrin 	DTRACE_PROBE1(zil__cw2, zilog_t *, zilog);
957789Sahrens 	mutex_exit(&zilog->zl_lock);
958789Sahrens 
959789Sahrens 	/* write the last block out */
960789Sahrens 	if (lwb != NULL && lwb->lwb_nused != 0)
961789Sahrens 		lwb = zil_lwb_write_start(zilog, lwb);
962789Sahrens 
9631141Sperrin 	zilog->zl_prev_used = zilog->zl_cur_used;
9641141Sperrin 	zilog->zl_cur_used = 0;
9651141Sperrin 
9662638Sperrin 	/*
9672638Sperrin 	 * Wait if necessary for the log blocks to be on stable storage.
9682638Sperrin 	 */
969789Sahrens 	mutex_enter(&zilog->zl_lock);
9702638Sperrin 	if (zilog->zl_root_zio) {
9712638Sperrin 		mutex_exit(&zilog->zl_lock);
9722638Sperrin 		DTRACE_PROBE1(zil__cw3, zilog_t *, zilog);
9732638Sperrin 		(void) zio_wait(zilog->zl_root_zio);
9742638Sperrin 		DTRACE_PROBE1(zil__cw4, zilog_t *, zilog);
9752638Sperrin 		mutex_enter(&zilog->zl_lock);
9762638Sperrin 		zil_flush_vdevs(zilog);
977789Sahrens 	}
9781141Sperrin 
979789Sahrens 	if (zilog->zl_log_error || lwb == NULL) {
980789Sahrens 		zilog->zl_log_error = 0;
981789Sahrens 		mutex_exit(&zilog->zl_lock);
982789Sahrens 		txg_wait_synced(zilog->zl_dmu_pool, 0);
983789Sahrens 		mutex_enter(&zilog->zl_lock);
984789Sahrens 	}
9851141Sperrin 	/* wake up others waiting to start a write */
9861141Sperrin 	zilog->zl_writer = B_FALSE;
9872638Sperrin 	cv_broadcast(&zilog->zl_cv_writer);
988789Sahrens 	mutex_exit(&zilog->zl_lock);
9892638Sperrin }
9902638Sperrin 
9912638Sperrin /*
9922638Sperrin  * Push zfs transactions to stable storage up to the supplied sequence number.
9932638Sperrin  * If foid is 0 push out all transactions, otherwise push only those
9942638Sperrin  * for that file or might have been used to create that file.
9952638Sperrin  */
9962638Sperrin void
9972638Sperrin zil_commit(zilog_t *zilog, uint64_t seq, uint64_t foid)
9982638Sperrin {
9992638Sperrin 	if (zilog == NULL || seq == 0)
10002638Sperrin 		return;
10012638Sperrin 
10022638Sperrin 	mutex_enter(&zilog->zl_lock);
10032638Sperrin 
10042638Sperrin 	seq = MIN(seq, zilog->zl_itx_seq);	/* cap seq at largest itx seq */
10052638Sperrin 
10062638Sperrin 	while (zilog->zl_writer)
10072638Sperrin 		cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
10082638Sperrin 	zil_commit_writer(zilog, seq, foid); /* drops zl_lock */
1009789Sahrens }
1010789Sahrens 
1011789Sahrens /*
1012789Sahrens  * Called in syncing context to free committed log blocks and update log header.
1013789Sahrens  */
1014789Sahrens void
1015789Sahrens zil_sync(zilog_t *zilog, dmu_tx_t *tx)
1016789Sahrens {
10171807Sbonwick 	zil_header_t *zh = zil_header_in_syncing_context(zilog);
1018789Sahrens 	uint64_t txg = dmu_tx_get_txg(tx);
1019789Sahrens 	spa_t *spa = zilog->zl_spa;
1020789Sahrens 	lwb_t *lwb;
1021789Sahrens 
10221807Sbonwick 	mutex_enter(&zilog->zl_lock);
10231807Sbonwick 
1024789Sahrens 	ASSERT(zilog->zl_stop_sync == 0);
1025789Sahrens 
10261807Sbonwick 	zh->zh_replay_seq = zilog->zl_replay_seq[txg & TXG_MASK];
1027789Sahrens 
1028789Sahrens 	if (zilog->zl_destroy_txg == txg) {
10291807Sbonwick 		blkptr_t blk = zh->zh_log;
10301807Sbonwick 
10311807Sbonwick 		ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
10321807Sbonwick 		ASSERT(spa_sync_pass(spa) == 1);
10331807Sbonwick 
10341807Sbonwick 		bzero(zh, sizeof (zil_header_t));
1035789Sahrens 		bzero(zilog->zl_replay_seq, sizeof (zilog->zl_replay_seq));
10361807Sbonwick 
10371807Sbonwick 		if (zilog->zl_keep_first) {
10381807Sbonwick 			/*
10391807Sbonwick 			 * If this block was part of log chain that couldn't
10401807Sbonwick 			 * be claimed because a device was missing during
10411807Sbonwick 			 * zil_claim(), but that device later returns,
10421807Sbonwick 			 * then this block could erroneously appear valid.
10431807Sbonwick 			 * To guard against this, assign a new GUID to the new
10441807Sbonwick 			 * log chain so it doesn't matter what blk points to.
10451807Sbonwick 			 */
10461807Sbonwick 			zil_init_log_chain(zilog, &blk);
10471807Sbonwick 			zh->zh_log = blk;
10481807Sbonwick 		}
1049789Sahrens 	}
1050789Sahrens 
1051789Sahrens 	for (;;) {
1052789Sahrens 		lwb = list_head(&zilog->zl_lwb_list);
1053789Sahrens 		if (lwb == NULL) {
1054789Sahrens 			mutex_exit(&zilog->zl_lock);
1055789Sahrens 			return;
1056789Sahrens 		}
10572638Sperrin 		zh->zh_log = lwb->lwb_blk;
1058789Sahrens 		if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
1059789Sahrens 			break;
1060789Sahrens 		list_remove(&zilog->zl_lwb_list, lwb);
1061789Sahrens 		zio_free_blk(spa, &lwb->lwb_blk, txg);
1062789Sahrens 		kmem_cache_free(zil_lwb_cache, lwb);
1063789Sahrens 	}
1064789Sahrens 	mutex_exit(&zilog->zl_lock);
1065789Sahrens }
1066789Sahrens 
1067789Sahrens void
1068789Sahrens zil_init(void)
1069789Sahrens {
1070789Sahrens 	zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
1071*2856Snd150628 	    sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0);
1072789Sahrens }
1073789Sahrens 
1074789Sahrens void
1075789Sahrens zil_fini(void)
1076789Sahrens {
1077789Sahrens 	kmem_cache_destroy(zil_lwb_cache);
1078789Sahrens }
1079789Sahrens 
1080789Sahrens zilog_t *
1081789Sahrens zil_alloc(objset_t *os, zil_header_t *zh_phys)
1082789Sahrens {
1083789Sahrens 	zilog_t *zilog;
1084789Sahrens 
1085789Sahrens 	zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
1086789Sahrens 
1087789Sahrens 	zilog->zl_header = zh_phys;
1088789Sahrens 	zilog->zl_os = os;
1089789Sahrens 	zilog->zl_spa = dmu_objset_spa(os);
1090789Sahrens 	zilog->zl_dmu_pool = dmu_objset_pool(os);
10911807Sbonwick 	zilog->zl_destroy_txg = TXG_INITIAL - 1;
1092789Sahrens 
1093*2856Snd150628 	mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
1094*2856Snd150628 
1095789Sahrens 	list_create(&zilog->zl_itx_list, sizeof (itx_t),
1096789Sahrens 	    offsetof(itx_t, itx_node));
1097789Sahrens 
1098789Sahrens 	list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
1099789Sahrens 	    offsetof(lwb_t, lwb_node));
1100789Sahrens 
1101789Sahrens 	list_create(&zilog->zl_vdev_list, sizeof (zil_vdev_t),
1102789Sahrens 	    offsetof(zil_vdev_t, vdev_seq_node));
1103789Sahrens 
1104789Sahrens 	return (zilog);
1105789Sahrens }
1106789Sahrens 
1107789Sahrens void
1108789Sahrens zil_free(zilog_t *zilog)
1109789Sahrens {
1110789Sahrens 	lwb_t *lwb;
1111789Sahrens 	zil_vdev_t *zv;
1112789Sahrens 
1113789Sahrens 	zilog->zl_stop_sync = 1;
1114789Sahrens 
1115789Sahrens 	while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
1116789Sahrens 		list_remove(&zilog->zl_lwb_list, lwb);
1117789Sahrens 		if (lwb->lwb_buf != NULL)
1118789Sahrens 			zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
1119789Sahrens 		kmem_cache_free(zil_lwb_cache, lwb);
1120789Sahrens 	}
1121789Sahrens 	list_destroy(&zilog->zl_lwb_list);
1122789Sahrens 
1123789Sahrens 	while ((zv = list_head(&zilog->zl_vdev_list)) != NULL) {
1124789Sahrens 		list_remove(&zilog->zl_vdev_list, zv);
1125789Sahrens 		kmem_free(zv, sizeof (zil_vdev_t));
1126789Sahrens 	}
1127789Sahrens 	list_destroy(&zilog->zl_vdev_list);
1128789Sahrens 
1129789Sahrens 	ASSERT(list_head(&zilog->zl_itx_list) == NULL);
1130789Sahrens 	list_destroy(&zilog->zl_itx_list);
1131*2856Snd150628 	mutex_destroy(&zilog->zl_lock);
1132789Sahrens 
1133789Sahrens 	kmem_free(zilog, sizeof (zilog_t));
1134789Sahrens }
1135789Sahrens 
1136789Sahrens /*
11371646Sperrin  * return true if the initial log block is not valid
11381362Sperrin  */
11391362Sperrin static int
11401362Sperrin zil_empty(zilog_t *zilog)
11411362Sperrin {
11421807Sbonwick 	const zil_header_t *zh = zilog->zl_header;
11431807Sbonwick 	arc_buf_t *abuf = NULL;
11441362Sperrin 
11451807Sbonwick 	if (BP_IS_HOLE(&zh->zh_log))
11461362Sperrin 		return (1);
11471362Sperrin 
11481807Sbonwick 	if (zil_read_log_block(zilog, &zh->zh_log, &abuf) != 0)
11491807Sbonwick 		return (1);
11501807Sbonwick 
11511807Sbonwick 	VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
11521807Sbonwick 	return (0);
11531362Sperrin }
11541362Sperrin 
11551362Sperrin /*
1156789Sahrens  * Open an intent log.
1157789Sahrens  */
1158789Sahrens zilog_t *
1159789Sahrens zil_open(objset_t *os, zil_get_data_t *get_data)
1160789Sahrens {
1161789Sahrens 	zilog_t *zilog = dmu_objset_zil(os);
1162789Sahrens 
1163789Sahrens 	zilog->zl_get_data = get_data;
1164789Sahrens 	zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri,
1165789Sahrens 	    2, 2, TASKQ_PREPOPULATE);
1166789Sahrens 
1167789Sahrens 	return (zilog);
1168789Sahrens }
1169789Sahrens 
1170789Sahrens /*
1171789Sahrens  * Close an intent log.
1172789Sahrens  */
1173789Sahrens void
1174789Sahrens zil_close(zilog_t *zilog)
1175789Sahrens {
11761807Sbonwick 	/*
11771807Sbonwick 	 * If the log isn't already committed, mark the objset dirty
11781807Sbonwick 	 * (so zil_sync() will be called) and wait for that txg to sync.
11791807Sbonwick 	 */
11801807Sbonwick 	if (!zil_is_committed(zilog)) {
11811807Sbonwick 		uint64_t txg;
11821807Sbonwick 		dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
11831807Sbonwick 		(void) dmu_tx_assign(tx, TXG_WAIT);
11841807Sbonwick 		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
11851807Sbonwick 		txg = dmu_tx_get_txg(tx);
11861807Sbonwick 		dmu_tx_commit(tx);
11871807Sbonwick 		txg_wait_synced(zilog->zl_dmu_pool, txg);
11881807Sbonwick 	}
11891807Sbonwick 
1190789Sahrens 	taskq_destroy(zilog->zl_clean_taskq);
1191789Sahrens 	zilog->zl_clean_taskq = NULL;
1192789Sahrens 	zilog->zl_get_data = NULL;
1193789Sahrens 
1194789Sahrens 	zil_itx_clean(zilog);
1195789Sahrens 	ASSERT(list_head(&zilog->zl_itx_list) == NULL);
1196789Sahrens }
1197789Sahrens 
1198789Sahrens /*
1199789Sahrens  * Suspend an intent log.  While in suspended mode, we still honor
1200789Sahrens  * synchronous semantics, but we rely on txg_wait_synced() to do it.
1201789Sahrens  * We suspend the log briefly when taking a snapshot so that the snapshot
1202789Sahrens  * contains all the data it's supposed to, and has an empty intent log.
1203789Sahrens  */
1204789Sahrens int
1205789Sahrens zil_suspend(zilog_t *zilog)
1206789Sahrens {
12071807Sbonwick 	const zil_header_t *zh = zilog->zl_header;
1208789Sahrens 
1209789Sahrens 	mutex_enter(&zilog->zl_lock);
12101807Sbonwick 	if (zh->zh_claim_txg != 0) {		/* unplayed log */
1211789Sahrens 		mutex_exit(&zilog->zl_lock);
1212789Sahrens 		return (EBUSY);
1213789Sahrens 	}
12141807Sbonwick 	if (zilog->zl_suspend++ != 0) {
12151807Sbonwick 		/*
12161807Sbonwick 		 * Someone else already began a suspend.
12171807Sbonwick 		 * Just wait for them to finish.
12181807Sbonwick 		 */
12191807Sbonwick 		while (zilog->zl_suspending)
12201807Sbonwick 			cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
12211807Sbonwick 		ASSERT(BP_IS_HOLE(&zh->zh_log));
12221807Sbonwick 		mutex_exit(&zilog->zl_lock);
12231807Sbonwick 		return (0);
12241807Sbonwick 	}
12251807Sbonwick 	zilog->zl_suspending = B_TRUE;
1226789Sahrens 	mutex_exit(&zilog->zl_lock);
1227789Sahrens 
12282638Sperrin 	zil_commit(zilog, UINT64_MAX, 0);
1229789Sahrens 
12302638Sperrin 	/*
12312638Sperrin 	 * Wait for any in-flight log writes to complete.
12322638Sperrin 	 */
1233789Sahrens 	mutex_enter(&zilog->zl_lock);
12342638Sperrin 	while (zilog->zl_writer)
12352638Sperrin 		cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1236789Sahrens 	mutex_exit(&zilog->zl_lock);
1237789Sahrens 
12381807Sbonwick 	zil_destroy(zilog, B_FALSE);
12391807Sbonwick 
12401807Sbonwick 	mutex_enter(&zilog->zl_lock);
12411807Sbonwick 	ASSERT(BP_IS_HOLE(&zh->zh_log));
12421807Sbonwick 	zilog->zl_suspending = B_FALSE;
12431807Sbonwick 	cv_broadcast(&zilog->zl_cv_suspend);
12441807Sbonwick 	mutex_exit(&zilog->zl_lock);
1245789Sahrens 
1246789Sahrens 	return (0);
1247789Sahrens }
1248789Sahrens 
1249789Sahrens void
1250789Sahrens zil_resume(zilog_t *zilog)
1251789Sahrens {
1252789Sahrens 	mutex_enter(&zilog->zl_lock);
1253789Sahrens 	ASSERT(zilog->zl_suspend != 0);
1254789Sahrens 	zilog->zl_suspend--;
1255789Sahrens 	mutex_exit(&zilog->zl_lock);
1256789Sahrens }
1257789Sahrens 
1258789Sahrens typedef struct zil_replay_arg {
1259789Sahrens 	objset_t	*zr_os;
1260789Sahrens 	zil_replay_func_t **zr_replay;
1261789Sahrens 	void		*zr_arg;
1262789Sahrens 	void		(*zr_rm_sync)(void *arg);
1263789Sahrens 	uint64_t	*zr_txgp;
1264789Sahrens 	boolean_t	zr_byteswap;
1265789Sahrens 	char		*zr_lrbuf;
1266789Sahrens } zil_replay_arg_t;
1267789Sahrens 
1268789Sahrens static void
1269789Sahrens zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg)
1270789Sahrens {
1271789Sahrens 	zil_replay_arg_t *zr = zra;
12721807Sbonwick 	const zil_header_t *zh = zilog->zl_header;
1273789Sahrens 	uint64_t reclen = lr->lrc_reclen;
1274789Sahrens 	uint64_t txtype = lr->lrc_txtype;
1275789Sahrens 	int pass, error;
1276789Sahrens 
1277789Sahrens 	if (zilog->zl_stop_replay)
1278789Sahrens 		return;
1279789Sahrens 
1280789Sahrens 	if (lr->lrc_txg < claim_txg)		/* already committed */
1281789Sahrens 		return;
1282789Sahrens 
1283789Sahrens 	if (lr->lrc_seq <= zh->zh_replay_seq)	/* already replayed */
1284789Sahrens 		return;
1285789Sahrens 
1286789Sahrens 	/*
1287789Sahrens 	 * Make a copy of the data so we can revise and extend it.
1288789Sahrens 	 */
1289789Sahrens 	bcopy(lr, zr->zr_lrbuf, reclen);
1290789Sahrens 
1291789Sahrens 	/*
1292789Sahrens 	 * The log block containing this lr may have been byteswapped
1293789Sahrens 	 * so that we can easily examine common fields like lrc_txtype.
1294789Sahrens 	 * However, the log is a mix of different data types, and only the
1295789Sahrens 	 * replay vectors know how to byteswap their records.  Therefore, if
1296789Sahrens 	 * the lr was byteswapped, undo it before invoking the replay vector.
1297789Sahrens 	 */
1298789Sahrens 	if (zr->zr_byteswap)
1299789Sahrens 		byteswap_uint64_array(zr->zr_lrbuf, reclen);
1300789Sahrens 
1301789Sahrens 	/*
1302789Sahrens 	 * If this is a TX_WRITE with a blkptr, suck in the data.
1303789Sahrens 	 */
1304789Sahrens 	if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
1305789Sahrens 		lr_write_t *lrw = (lr_write_t *)lr;
1306789Sahrens 		blkptr_t *wbp = &lrw->lr_blkptr;
1307789Sahrens 		uint64_t wlen = lrw->lr_length;
1308789Sahrens 		char *wbuf = zr->zr_lrbuf + reclen;
1309789Sahrens 
1310789Sahrens 		if (BP_IS_HOLE(wbp)) {	/* compressed to a hole */
1311789Sahrens 			bzero(wbuf, wlen);
1312789Sahrens 		} else {
1313789Sahrens 			/*
1314789Sahrens 			 * A subsequent write may have overwritten this block,
1315789Sahrens 			 * in which case wbp may have been been freed and
1316789Sahrens 			 * reallocated, and our read of wbp may fail with a
1317789Sahrens 			 * checksum error.  We can safely ignore this because
1318789Sahrens 			 * the later write will provide the correct data.
1319789Sahrens 			 */
13201544Seschrock 			zbookmark_t zb;
13211544Seschrock 
13221544Seschrock 			zb.zb_objset = dmu_objset_id(zilog->zl_os);
13231544Seschrock 			zb.zb_object = lrw->lr_foid;
13241544Seschrock 			zb.zb_level = -1;
13251544Seschrock 			zb.zb_blkid = lrw->lr_offset / BP_GET_LSIZE(wbp);
13261544Seschrock 
1327789Sahrens 			(void) zio_wait(zio_read(NULL, zilog->zl_spa,
1328789Sahrens 			    wbp, wbuf, BP_GET_LSIZE(wbp), NULL, NULL,
1329789Sahrens 			    ZIO_PRIORITY_SYNC_READ,
13301544Seschrock 			    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb));
1331789Sahrens 			(void) memmove(wbuf, wbuf + lrw->lr_blkoff, wlen);
1332789Sahrens 		}
1333789Sahrens 	}
1334789Sahrens 
1335789Sahrens 	/*
1336789Sahrens 	 * We must now do two things atomically: replay this log record,
1337789Sahrens 	 * and update the log header to reflect the fact that we did so.
1338789Sahrens 	 * We use the DMU's ability to assign into a specific txg to do this.
1339789Sahrens 	 */
1340789Sahrens 	for (pass = 1; /* CONSTANTCONDITION */; pass++) {
1341789Sahrens 		uint64_t replay_txg;
1342789Sahrens 		dmu_tx_t *replay_tx;
1343789Sahrens 
1344789Sahrens 		replay_tx = dmu_tx_create(zr->zr_os);
1345789Sahrens 		error = dmu_tx_assign(replay_tx, TXG_WAIT);
1346789Sahrens 		if (error) {
1347789Sahrens 			dmu_tx_abort(replay_tx);
1348789Sahrens 			break;
1349789Sahrens 		}
1350789Sahrens 
1351789Sahrens 		replay_txg = dmu_tx_get_txg(replay_tx);
1352789Sahrens 
1353789Sahrens 		if (txtype == 0 || txtype >= TX_MAX_TYPE) {
1354789Sahrens 			error = EINVAL;
1355789Sahrens 		} else {
1356789Sahrens 			/*
1357789Sahrens 			 * On the first pass, arrange for the replay vector
1358789Sahrens 			 * to fail its dmu_tx_assign().  That's the only way
1359789Sahrens 			 * to ensure that those code paths remain well tested.
1360789Sahrens 			 */
1361789Sahrens 			*zr->zr_txgp = replay_txg - (pass == 1);
1362789Sahrens 			error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf,
1363789Sahrens 			    zr->zr_byteswap);
1364789Sahrens 			*zr->zr_txgp = TXG_NOWAIT;
1365789Sahrens 		}
1366789Sahrens 
1367789Sahrens 		if (error == 0) {
1368789Sahrens 			dsl_dataset_dirty(dmu_objset_ds(zr->zr_os), replay_tx);
1369789Sahrens 			zilog->zl_replay_seq[replay_txg & TXG_MASK] =
1370789Sahrens 			    lr->lrc_seq;
1371789Sahrens 		}
1372789Sahrens 
1373789Sahrens 		dmu_tx_commit(replay_tx);
1374789Sahrens 
1375789Sahrens 		if (error != ERESTART)
1376789Sahrens 			break;
1377789Sahrens 
1378789Sahrens 		if (pass != 1)
1379789Sahrens 			txg_wait_open(spa_get_dsl(zilog->zl_spa),
1380789Sahrens 			    replay_txg + 1);
1381789Sahrens 
1382789Sahrens 		dprintf("pass %d, retrying\n", pass);
1383789Sahrens 	}
1384789Sahrens 
1385789Sahrens 	if (error) {
1386789Sahrens 		char *name = kmem_alloc(MAXNAMELEN, KM_SLEEP);
1387789Sahrens 		dmu_objset_name(zr->zr_os, name);
1388789Sahrens 		cmn_err(CE_WARN, "ZFS replay transaction error %d, "
1389789Sahrens 		    "dataset %s, seq 0x%llx, txtype %llu\n",
1390789Sahrens 		    error, name,
1391789Sahrens 		    (u_longlong_t)lr->lrc_seq, (u_longlong_t)txtype);
1392789Sahrens 		zilog->zl_stop_replay = 1;
1393789Sahrens 		kmem_free(name, MAXNAMELEN);
1394789Sahrens 	}
1395789Sahrens 
1396789Sahrens 	/*
1397789Sahrens 	 * The DMU's dnode layer doesn't see removes until the txg commits,
1398789Sahrens 	 * so a subsequent claim can spuriously fail with EEXIST.
1399789Sahrens 	 * To prevent this, if we might have removed an object,
1400789Sahrens 	 * wait for the delete thread to delete it, and then
1401789Sahrens 	 * wait for the transaction group to sync.
1402789Sahrens 	 */
1403789Sahrens 	if (txtype == TX_REMOVE || txtype == TX_RMDIR || txtype == TX_RENAME) {
1404789Sahrens 		if (zr->zr_rm_sync != NULL)
1405789Sahrens 			zr->zr_rm_sync(zr->zr_arg);
1406789Sahrens 		txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
1407789Sahrens 	}
1408789Sahrens }
1409789Sahrens 
1410789Sahrens /*
14111362Sperrin  * If this dataset has a non-empty intent log, replay it and destroy it.
1412789Sahrens  */
1413789Sahrens void
1414789Sahrens zil_replay(objset_t *os, void *arg, uint64_t *txgp,
1415789Sahrens 	zil_replay_func_t *replay_func[TX_MAX_TYPE], void (*rm_sync)(void *arg))
1416789Sahrens {
1417789Sahrens 	zilog_t *zilog = dmu_objset_zil(os);
14181807Sbonwick 	const zil_header_t *zh = zilog->zl_header;
14191807Sbonwick 	zil_replay_arg_t zr;
14201362Sperrin 
14211362Sperrin 	if (zil_empty(zilog)) {
14221807Sbonwick 		zil_destroy(zilog, B_TRUE);
14231362Sperrin 		return;
14241362Sperrin 	}
1425789Sahrens 
1426789Sahrens 	zr.zr_os = os;
1427789Sahrens 	zr.zr_replay = replay_func;
1428789Sahrens 	zr.zr_arg = arg;
1429789Sahrens 	zr.zr_rm_sync = rm_sync;
1430789Sahrens 	zr.zr_txgp = txgp;
14311807Sbonwick 	zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
1432789Sahrens 	zr.zr_lrbuf = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
1433789Sahrens 
1434789Sahrens 	/*
1435789Sahrens 	 * Wait for in-progress removes to sync before starting replay.
1436789Sahrens 	 */
1437789Sahrens 	if (rm_sync != NULL)
1438789Sahrens 		rm_sync(arg);
1439789Sahrens 	txg_wait_synced(zilog->zl_dmu_pool, 0);
1440789Sahrens 
1441789Sahrens 	zilog->zl_stop_replay = 0;
14421807Sbonwick 	(void) zil_parse(zilog, NULL, zil_replay_log_record, &zr,
14431807Sbonwick 	    zh->zh_claim_txg);
1444789Sahrens 	kmem_free(zr.zr_lrbuf, 2 * SPA_MAXBLOCKSIZE);
1445789Sahrens 
14461807Sbonwick 	zil_destroy(zilog, B_FALSE);
1447789Sahrens }
14481646Sperrin 
14491646Sperrin /*
14501646Sperrin  * Report whether all transactions are committed
14511646Sperrin  */
14521646Sperrin int
14531646Sperrin zil_is_committed(zilog_t *zilog)
14541646Sperrin {
14551646Sperrin 	lwb_t *lwb;
14562638Sperrin 	int ret;
14571646Sperrin 
14582638Sperrin 	mutex_enter(&zilog->zl_lock);
14592638Sperrin 	while (zilog->zl_writer)
14602638Sperrin 		cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
14612638Sperrin 
14622638Sperrin 	/* recent unpushed intent log transactions? */
14632638Sperrin 	if (!list_is_empty(&zilog->zl_itx_list)) {
14642638Sperrin 		ret = B_FALSE;
14652638Sperrin 		goto out;
14662638Sperrin 	}
14672638Sperrin 
14682638Sperrin 	/* intent log never used? */
14692638Sperrin 	lwb = list_head(&zilog->zl_lwb_list);
14702638Sperrin 	if (lwb == NULL) {
14712638Sperrin 		ret = B_TRUE;
14722638Sperrin 		goto out;
14732638Sperrin 	}
14741646Sperrin 
14751646Sperrin 	/*
14762638Sperrin 	 * more than 1 log buffer means zil_sync() hasn't yet freed
14772638Sperrin 	 * entries after a txg has committed
14781646Sperrin 	 */
14792638Sperrin 	if (list_next(&zilog->zl_lwb_list, lwb)) {
14802638Sperrin 		ret = B_FALSE;
14812638Sperrin 		goto out;
14822638Sperrin 	}
14832638Sperrin 
14841646Sperrin 	ASSERT(zil_empty(zilog));
14852638Sperrin 	ret = B_TRUE;
14862638Sperrin out:
14872638Sperrin 	cv_broadcast(&zilog->zl_cv_writer);
14882638Sperrin 	mutex_exit(&zilog->zl_lock);
14892638Sperrin 	return (ret);
14901646Sperrin }
1491