Lines Matching defs:zilog

147 static void zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx);
149 static uint64_t zil_max_waste_space(zilog_t *zilog);
165 zil_bp_tree_init(zilog_t *zilog)
167 avl_create(&zilog->zl_bp_tree, zil_bp_compare,
172 zil_bp_tree_fini(zilog_t *zilog)
174 avl_tree_t *t = &zilog->zl_bp_tree;
185 zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
187 avl_tree_t *t = &zilog->zl_bp_tree;
208 zil_header_in_syncing_context(zilog_t *zilog)
210 return ((zil_header_t *)zilog->zl_header);
214 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
222 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
245 zil_read_log_block(zilog_t *zilog, boolean_t decrypt, const blkptr_t *bp,
253 if (zilog->zl_header->zh_claim_txg == 0)
256 if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
265 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func,
319 zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
334 if (zilog->zl_header->zh_claim_txg == 0)
346 SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
349 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
460 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
464 const zil_header_t *zh = zilog->zl_header;
490 zil_bp_tree_init(zilog);
501 error = parse_blk_func(zilog, &blk, arg, txg);
511 error = zil_read_log_block(zilog, decrypt, &blk, &next_blk,
519 dmu_objset_name(zilog->zl_os, name);
555 error = parse_lr_func(zilog, lr, arg, txg);
567 zilog->zl_parse_error = error;
568 zilog->zl_parse_blk_seq = max_blk_seq;
569 zilog->zl_parse_lr_seq = max_lr_seq;
570 zilog->zl_parse_blk_count = blk_count;
571 zilog->zl_parse_lr_count = lr_count;
573 zil_bp_tree_fini(zilog);
579 zil_clear_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
594 if (zil_bp_tree_add(zilog, bp) != 0)
597 zio_free(zilog->zl_spa, first_txg, bp);
602 zil_noop_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
605 (void) zilog, (void) lrc, (void) tx, (void) first_txg;
610 zil_claim_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
618 zil_bp_tree_add(zilog, bp) != 0)
621 return (zio_wait(zio_claim(NULL, zilog->zl_spa,
627 zil_claim_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t first_txg)
643 error = zil_read_log_data(zilog, lr, NULL);
648 return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg));
652 zil_claim_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx,
657 spa_t *spa = zilog->zl_spa;
708 zil_claim_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
714 return (zil_claim_write(zilog, lrc, tx, first_txg));
716 return (zil_claim_clone_range(zilog, lrc, tx, first_txg));
723 zil_free_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
728 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
734 zil_free_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t claim_txg)
745 zil_bp_tree_add(zilog, bp) == 0 && !BP_IS_HOLE(bp)) {
746 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
753 zil_free_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx)
768 spa = zilog->zl_spa;
782 zil_free_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
792 return (zil_free_write(zilog, lrc, tx, claim_txg));
794 return (zil_free_clone_range(zilog, lrc, tx));
815 zil_alloc_lwb(zilog_t *zilog, int sz, blkptr_t *bp, boolean_t slog,
821 lwb->lwb_zilog = zilog;
828 lwb->lwb_slim = (spa_version(zilog->zl_spa) >=
851 mutex_enter(&zilog->zl_lock);
852 list_insert_tail(&zilog->zl_lwb_list, lwb);
854 zilog->zl_last_lwb_opened = lwb;
855 mutex_exit(&zilog->zl_lock);
861 zil_free_lwb(zilog_t *zilog, lwb_t *lwb)
863 ASSERT(MUTEX_HELD(&zilog->zl_lock));
869 ASSERT3U(lwb->lwb_alloc_txg, <=, spa_syncing_txg(zilog->zl_spa));
870 ASSERT3U(lwb->lwb_max_txg, <=, spa_syncing_txg(zilog->zl_spa));
877 * Clear the zilog's field to indicate this lwb is no longer
880 if (zilog->zl_last_lwb_opened == lwb)
881 zilog->zl_last_lwb_opened = NULL;
891 zilog_dirty(zilog_t *zilog, uint64_t txg)
893 dsl_pool_t *dp = zilog->zl_dmu_pool;
894 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
896 ASSERT(spa_writeable(zilog->zl_spa));
901 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
903 dmu_buf_add_ref(ds->ds_dbuf, zilog);
905 zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg);
917 zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg)
919 dsl_pool_t *dp = zilog->zl_dmu_pool;
921 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK))
931 zilog_is_dirty(zilog_t *zilog)
933 dsl_pool_t *dp = zilog->zl_dmu_pool;
936 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
949 zil_commit_activate_saxattr_feature(zilog_t *zilog)
951 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
955 if (spa_feature_is_enabled(zilog->zl_spa, SPA_FEATURE_ZILSAXATTR) &&
956 dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL &&
958 tx = dmu_tx_create(zilog->zl_os);
968 txg_wait_synced(zilog->zl_dmu_pool, txg);
976 zil_create(zilog_t *zilog)
978 const zil_header_t *zh = zilog->zl_header;
985 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
991 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
1004 tx = dmu_tx_create(zilog->zl_os);
1006 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
1010 zio_free(zilog->zl_spa, txg, &blk);
1014 error = zio_alloc_zil(zilog->zl_spa, zilog->zl_os, txg, &blk,
1017 zil_init_log_chain(zilog, &blk);
1024 lwb = zil_alloc_lwb(zilog, 0, &blk, slog, txg, LWB_STATE_NEW);
1038 if (spa_feature_is_enabled(zilog->zl_spa,
1039 SPA_FEATURE_ZILSAXATTR) && dmu_objset_type(zilog->zl_os) !=
1048 txg_wait_synced(zilog->zl_dmu_pool, txg);
1054 zil_commit_activate_saxattr_feature(zilog);
1056 IMPLY(spa_feature_is_enabled(zilog->zl_spa, SPA_FEATURE_ZILSAXATTR) &&
1057 dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL,
1077 zil_destroy(zilog_t *zilog, boolean_t keep_first)
1079 const zil_header_t *zh = zilog->zl_header;
1087 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
1089 zilog->zl_old_header = *zh; /* debugging aid */
1094 tx = dmu_tx_create(zilog->zl_os);
1096 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
1099 mutex_enter(&zilog->zl_lock);
1101 ASSERT3U(zilog->zl_destroy_txg, <, txg);
1102 zilog->zl_destroy_txg = txg;
1103 zilog->zl_keep_first = keep_first;
1105 if (!list_is_empty(&zilog->zl_lwb_list)) {
1108 while ((lwb = list_remove_head(&zilog->zl_lwb_list)) != NULL) {
1112 zio_free(zilog->zl_spa, txg, &lwb->lwb_blk);
1113 zil_free_lwb(zilog, lwb);
1116 zil_destroy_sync(zilog, tx);
1118 mutex_exit(&zilog->zl_lock);
1126 zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
1128 ASSERT(list_is_empty(&zilog->zl_lwb_list));
1129 (void) zil_parse(zilog, zil_free_log_block,
1130 zil_free_log_record, tx, zilog->zl_header->zh_claim_txg, B_FALSE);
1137 zilog_t *zilog;
1158 zilog = dmu_objset_zil(os);
1159 zh = zil_header_in_syncing_context(zilog);
1160 ASSERT3U(tx->tx_txg, ==, spa_first_txg(zilog->zl_spa));
1161 first_txg = spa_min_claim_txg(zilog->zl_spa);
1190 if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR ||
1191 (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 &&
1194 (void) zil_parse(zilog, zil_clear_log_block,
1209 ASSERT3U(first_txg, ==, spa_first_txg(zilog->zl_spa));
1220 (void) zil_parse(zilog, zil_claim_log_block,
1223 zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq;
1224 zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq;
1225 if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1)
1233 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
1247 zilog_t *zilog;
1261 zilog = dmu_objset_zil(os);
1262 bp = (blkptr_t *)&zilog->zl_header->zh_log;
1291 zil_header_t *zh = zil_header_in_syncing_context(zilog);
1292 if (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 &&
1304 error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx,
1305 zilog->zl_header->zh_claim_txg ? -1ULL :
1339 * The lwb_waiters field of the lwb is protected by the zilog's
1412 * while holding zilog->zl_lock) as its writes and those of its
1456 zilog_t *zilog = lwb->lwb_zilog;
1460 spa_config_exit(zilog->zl_spa, SCL_STATE, lwb);
1464 mutex_enter(&zilog->zl_lock);
1466 zilog->zl_last_lwb_latency = (zilog->zl_last_lwb_latency * 7 + t) / 8;
1473 if (zilog->zl_last_lwb_opened == lwb) {
1480 zilog->zl_commit_lr_seq = zilog->zl_lr_seq;
1518 mutex_exit(&zilog->zl_lock);
1520 mutex_enter(&zilog->zl_lwb_io_lock);
1521 ASSERT3U(zilog->zl_lwb_inflight[txg & TXG_MASK], >, 0);
1522 zilog->zl_lwb_inflight[txg & TXG_MASK]--;
1523 if (zilog->zl_lwb_inflight[txg & TXG_MASK] == 0)
1524 cv_broadcast(&zilog->zl_lwb_io_cv);
1525 mutex_exit(&zilog->zl_lwb_io_lock);
1533 zil_lwb_flush_wait_all(zilog_t *zilog, uint64_t txg)
1535 ASSERT3U(txg, ==, spa_syncing_txg(zilog->zl_spa));
1537 mutex_enter(&zilog->zl_lwb_io_lock);
1538 while (zilog->zl_lwb_inflight[txg & TXG_MASK] > 0)
1539 cv_wait(&zilog->zl_lwb_io_cv, &zilog->zl_lwb_io_lock);
1540 mutex_exit(&zilog->zl_lwb_io_lock);
1543 mutex_enter(&zilog->zl_lock);
1544 mutex_enter(&zilog->zl_lwb_io_lock);
1545 lwb_t *lwb = list_head(&zilog->zl_lwb_list);
1556 lwb = list_next(&zilog->zl_lwb_list, lwb);
1558 mutex_exit(&zilog->zl_lwb_io_lock);
1559 mutex_exit(&zilog->zl_lock);
1580 zilog_t *zilog = lwb->lwb_zilog;
1592 mutex_enter(&zilog->zl_lock);
1604 nlwb = list_next(&zilog->zl_lwb_list, lwb);
1607 mutex_exit(&zilog->zl_lock);
1680 zil_lwb_set_zio_dependency(zilog_t *zilog, lwb_t *lwb)
1682 ASSERT(MUTEX_HELD(&zilog->zl_lock));
1684 lwb_t *prev_lwb = list_prev(&zilog->zl_lwb_list, lwb);
1726 zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb)
1728 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
1735 mutex_enter(&zilog->zl_lock);
1737 zilog->zl_last_lwb_opened = lwb;
1738 mutex_exit(&zilog->zl_lock);
1752 zil_lwb_plan(zilog_t *zilog, uint64_t size, uint_t *minsize)
1754 uint_t md = zilog->zl_max_block_size - sizeof (zil_chain_t);
1780 uint_t waste = zil_max_waste_space(zilog);
1781 waste = MAX(waste, zilog->zl_cur_max);
1798 zil_lwb_predict(zilog_t *zilog)
1803 if (zilog->zl_cur_size > 0) {
1804 o = zil_lwb_plan(zilog, zilog->zl_cur_size, &m);
1812 o = MIN(o, zilog->zl_prev_opt[i]);
1817 m = zilog->zl_prev_min[i];
1838 zil_lwb_write_close(zilog_t *zilog, lwb_t *lwb, lwb_state_t state)
1842 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
1861 if (zilog->zl_cur_left > 0) {
1868 plan = zil_lwb_plan(zilog, zilog->zl_cur_left, &m);
1869 if (zilog->zl_parallel) {
1870 plan2 = zil_lwb_plan(zilog, zilog->zl_cur_left +
1871 zil_lwb_predict(zilog), &m);
1880 plan = zil_lwb_predict(zilog);
1884 blksz = MIN(blksz, zilog->zl_max_block_size);
1885 DTRACE_PROBE3(zil__block__size, zilog_t *, zilog, uint64_t, blksz,
1888 return (zil_alloc_lwb(zilog, blksz, NULL, 0, 0, state));
1895 zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
1897 spa_t *spa = zilog->zl_spa;
1909 zil_lwb_commit(zilog, lwb, itx);
1923 mutex_enter(&zilog->zl_lock);
1926 mutex_exit(&zilog->zl_lock);
1929 mutex_exit(&zilog->zl_lock);
1939 if (!lwb->lwb_slog || zilog->zl_cur_size <= zil_slog_bulk)
1978 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
1980 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
1986 lwb_t *nlwb = list_next(&zilog->zl_lwb_list, lwb);
1991 error = zio_alloc_zil(spa, zilog->zl_os, txg, bp, nlwb->lwb_sz,
2006 mutex_enter(&zilog->zl_lwb_io_lock);
2008 zilog->zl_lwb_inflight[txg & TXG_MASK]++;
2009 zilog->zl_lwb_max_issued_txg = MAX(txg, zilog->zl_lwb_max_issued_txg);
2010 mutex_exit(&zilog->zl_lwb_io_lock);
2019 mutex_enter(&zilog->zl_lock);
2020 zil_lwb_set_zio_dependency(zilog, lwb);
2031 mutex_exit(&zilog->zl_lock);
2034 ZIL_STAT_BUMP(zilog, zil_itx_metaslab_slog_count);
2035 ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_bytes,
2037 ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_write,
2039 ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_alloc,
2042 ZIL_STAT_BUMP(zilog, zil_itx_metaslab_normal_count);
2043 ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_bytes,
2045 ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_write,
2047 ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_alloc,
2069 zil_max_log_data(zilog_t *zilog, size_t hdrsize)
2071 return (zilog->zl_max_block_size - sizeof (zil_chain_t) - hdrsize);
2079 zil_max_waste_space(zilog_t *zilog)
2081 return (zil_max_log_data(zilog, sizeof (lr_write_t)) / 16);
2094 zil_max_copied_data(zilog_t *zilog)
2096 uint64_t max_data = zil_max_log_data(zilog, sizeof (lr_write_t));
2143 zil_lwb_assign(zilog_t *zilog, lwb_t *lwb, itx_t *itx, list_t *ilwbs)
2150 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
2154 zil_lwb_write_open(zilog, lwb);
2179 ASSERT3U(reclen, <=, zil_max_log_data(zilog, 0));
2188 max_log_data = zil_max_log_data(zilog, sizeof (lr_write_t));
2190 lwb_sp < zil_max_waste_space(zilog) &&
2194 lwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_OPENED);
2221 zilog->zl_cur_left -= dnow;
2233 clr->lrc_seq = ++zilog->zl_lr_seq;
2247 lr->lrc_txg > spa_freeze_txg(zilog->zl_spa))
2248 txg_wait_synced(zilog->zl_dmu_pool, lr->lrc_txg);
2258 zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx)
2280 ZIL_STAT_BUMP(zilog, zil_itx_count);
2287 ZIL_STAT_BUMP(zilog, zil_itx_copied_count);
2288 ZIL_STAT_INCR(zilog, zil_itx_copied_bytes,
2297 ZIL_STAT_BUMP(zilog, zil_itx_needcopy_count);
2298 ZIL_STAT_INCR(zilog, zil_itx_needcopy_bytes,
2303 ZIL_STAT_BUMP(zilog, zil_itx_indirect_count);
2304 ZIL_STAT_INCR(zilog, zil_itx_indirect_bytes,
2308 zilog->zl_spa, NULL, NULL, NULL,
2321 error = zilog->zl_get_data(itx->itx_private,
2356 txg_wait_synced(zilog->zl_dmu_pool,
2498 zil_remove_async(zilog_t *zilog, uint64_t oid)
2510 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
2513 otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
2516 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
2543 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
2553 zil_async_to_sync(zilog, itx->itx_oid);
2555 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
2560 itxg = &zilog->zl_itxg[txg & TXG_MASK];
2613 zilog_dirty(zilog, dmu_tx_get_txg(tx));
2629 zil_clean(zilog_t *zilog, uint64_t synced_txg)
2631 itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK];
2653 ASSERT3P(zilog->zl_dmu_pool, !=, NULL);
2654 ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL);
2655 taskqid_t id = taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq,
2666 zil_get_commit_list(zilog_t *zilog)
2669 list_t *commit_list = &zilog->zl_itx_commit_list;
2671 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
2673 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
2676 otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
2684 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
2700 ASSERT(zilog_is_dirty_in_txg(zilog, txg) ||
2701 spa_freeze_txg(zilog->zl_spa) != UINT64_MAX);
2704 if (unlikely(zilog->zl_suspend > 0)) {
2721 zilog->zl_cur_size += s;
2722 zilog->zl_cur_left += s;
2724 zilog->zl_cur_max = MAX(zilog->zl_cur_max, s);
2735 zil_async_to_sync(zilog_t *zilog, uint64_t foid)
2742 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
2745 otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
2752 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
2798 zil_prune_commit_list(zilog_t *zilog)
2802 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
2804 while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) {
2809 mutex_enter(&zilog->zl_lock);
2811 lwb_t *last_lwb = zilog->zl_last_lwb_opened;
2825 mutex_exit(&zilog->zl_lock);
2827 list_remove(&zilog->zl_itx_commit_list, itx);
2835 zil_commit_writer_stall(zilog_t *zilog)
2840 * lwbs in the zilog's zl_lwb_list are synced and then freed (in
2852 * We must hold the zilog's zl_issuer_lock while we do this, to
2857 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
2858 ZIL_STAT_BUMP(zilog, zil_commit_stall_count);
2859 txg_wait_synced(zilog->zl_dmu_pool, 0);
2860 ASSERT(list_is_empty(&zilog->zl_lwb_list));
2864 zil_burst_done(zilog_t *zilog)
2866 if (!list_is_empty(&zilog->zl_itx_commit_list) ||
2867 zilog->zl_cur_size == 0)
2870 if (zilog->zl_parallel)
2871 zilog->zl_parallel--;
2873 uint_t r = (zilog->zl_prev_rotor + 1) & (ZIL_BURSTS - 1);
2874 zilog->zl_prev_rotor = r;
2875 zilog->zl_prev_opt[r] = zil_lwb_plan(zilog, zilog->zl_cur_size,
2876 &zilog->zl_prev_min[r]);
2878 zilog->zl_cur_size = 0;
2879 zilog->zl_cur_max = 0;
2880 zilog->zl_cur_left = 0;
2890 zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs)
2892 spa_t *spa = zilog->zl_spa;
2898 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
2904 if (list_is_empty(&zilog->zl_itx_commit_list))
2911 lwb = list_tail(&zilog->zl_lwb_list);
2913 lwb = zil_create(zilog);
2919 zil_commit_activate_saxattr_feature(zilog);
2932 zilog->zl_parallel = ZIL_BURSTS;
2933 } else if ((plwb = list_prev(&zilog->zl_lwb_list, lwb))
2935 zilog->zl_parallel = MAX(zilog->zl_parallel,
2940 while ((itx = list_remove_head(&zilog->zl_itx_commit_list)) != NULL) {
2948 zilog_t *, zilog, itx_t *, itx);
2951 zilog_t *, zilog, itx_t *, itx);
3003 lwb = zil_lwb_assign(zilog, lwb, itx, ilwbs);
3012 zilog->zl_parallel = ZIL_BURSTS;
3013 zilog->zl_cur_left -=
3024 zilog->zl_cur_left -= zil_itx_full_size(itx);
3027 zilog->zl_cur_left -= zil_itx_full_size(itx);
3040 zil_lwb_write_issue(zilog, lwb);
3041 zil_commit_writer_stall(zilog);
3109 if (lwb->lwb_state == LWB_STATE_OPENED && !zilog->zl_parallel) {
3110 zil_burst_done(zilog);
3112 lwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_NEW);
3115 zil_lwb_write_issue(zilog, lwb);
3116 zil_commit_writer_stall(zilog);
3125 * not already committed to an lwb, all itxs in the zilog's queue of
3137 zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw)
3143 ASSERT(!MUTEX_HELD(&zilog->zl_lock));
3144 ASSERT(spa_writeable(zilog->zl_spa));
3147 mutex_enter(&zilog->zl_issuer_lock);
3154 * without processing any of the zilog's queue of itxs.
3169 ZIL_STAT_BUMP(zilog, zil_commit_writer_count);
3171 wtxg = zil_get_commit_list(zilog);
3172 zil_prune_commit_list(zilog);
3173 zil_process_commit_list(zilog, zcw, &ilwbs);
3176 mutex_exit(&zilog->zl_issuer_lock);
3178 zil_lwb_write_issue(zilog, lwb);
3184 zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw)
3186 ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock));
3206 * zilog's "zl_issuer_lock". We can't simply acquire that lock,
3212 mutex_enter(&zilog->zl_issuer_lock);
3224 mutex_exit(&zilog->zl_issuer_lock);
3232 * the zilog's zl_issuer_lock, we have to perform this check a
3251 mutex_exit(&zilog->zl_issuer_lock);
3270 zil_burst_done(zilog);
3271 lwb_t *nlwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_NEW);
3283 zil_lwb_write_issue(zilog, lwb);
3284 zil_commit_writer_stall(zilog);
3285 mutex_exit(&zilog->zl_issuer_lock);
3287 mutex_exit(&zilog->zl_issuer_lock);
3288 zil_lwb_write_issue(zilog, lwb);
3313 zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw)
3315 ASSERT(!MUTEX_HELD(&zilog->zl_lock));
3316 ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock));
3317 ASSERT(spa_writeable(zilog->zl_spa));
3328 hrtime_t sleep = (zilog->zl_last_lwb_latency * pct) / 100;
3375 zil_commit_waiter_timeout(zilog, zcw);
3448 zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw)
3450 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
3464 zil_itx_assign(zilog, itx, tx);
3574 * the zilog's "zl_issuer_lock", which ensures only a single
3585 zil_commit(zilog_t *zilog, uint64_t foid)
3595 * be assigned to this zilog; as a result, the zilog will be
3596 * dirtied. We must not dirty the zilog of a snapshot; there's
3600 ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE);
3602 if (zilog->zl_sync == ZFS_SYNC_DISABLED)
3605 if (!spa_writeable(zilog->zl_spa)) {
3613 ASSERT(list_is_empty(&zilog->zl_lwb_list));
3614 ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
3616 ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL);
3627 if (zilog->zl_suspend > 0) {
3628 ZIL_STAT_BUMP(zilog, zil_commit_suspend_count);
3629 txg_wait_synced(zilog->zl_dmu_pool, 0);
3633 zil_commit_impl(zilog, foid);
3637 zil_commit_impl(zilog_t *zilog, uint64_t foid)
3639 ZIL_STAT_BUMP(zilog, zil_commit_count);
3650 zil_async_to_sync(zilog, foid);
3668 zil_commit_itx_assign(zilog, zcw);
3670 uint64_t wtxg = zil_commit_writer(zilog, zcw);
3671 zil_commit_waiter(zilog, zcw);
3682 ZIL_STAT_BUMP(zilog, zil_commit_error_count);
3684 zilog_t *, zilog, zil_commit_waiter_t *, zcw);
3685 txg_wait_synced(zilog->zl_dmu_pool, 0);
3687 ZIL_STAT_BUMP(zilog, zil_commit_suspend_count);
3688 txg_wait_synced(zilog->zl_dmu_pool, wtxg);
3698 zil_sync(zilog_t *zilog, dmu_tx_t *tx)
3700 zil_header_t *zh = zil_header_in_syncing_context(zilog);
3702 spa_t *spa = zilog->zl_spa;
3703 uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
3713 zil_lwb_flush_wait_all(zilog, txg);
3715 mutex_enter(&zilog->zl_lock);
3717 ASSERT(zilog->zl_stop_sync == 0);
3725 if (zilog->zl_destroy_txg == txg) {
3727 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
3729 ASSERT(list_is_empty(&zilog->zl_lwb_list));
3732 memset(zilog->zl_replayed_seq, 0,
3733 sizeof (zilog->zl_replayed_seq));
3735 if (zilog->zl_keep_first) {
3744 zil_init_log_chain(zilog, &blk);
3759 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
3764 list_remove(&zilog->zl_lwb_list, lwb);
3767 zil_free_lwb(zilog, lwb);
3775 if (list_is_empty(&zilog->zl_lwb_list))
3779 mutex_exit(&zilog->zl_lock);
3844 zil_set_sync(zilog_t *zilog, uint64_t sync)
3846 zilog->zl_sync = sync;
3850 zil_set_logbias(zilog_t *zilog, uint64_t logbias)
3852 zilog->zl_logbias = logbias;
3858 zilog_t *zilog;
3860 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
3862 zilog->zl_header = zh_phys;
3863 zilog->zl_os = os;
3864 zilog->zl_spa = dmu_objset_spa(os);
3865 zilog->zl_dmu_pool = dmu_objset_pool(os);
3866 zilog->zl_destroy_txg = TXG_INITIAL - 1;
3867 zilog->zl_logbias = dmu_objset_logbias(os);
3868 zilog->zl_sync = dmu_objset_syncprop(os);
3869 zilog->zl_dirty_max_txg = 0;
3870 zilog->zl_last_lwb_opened = NULL;
3871 zilog->zl_last_lwb_latency = 0;
3872 zilog->zl_max_block_size = MIN(MAX(P2ALIGN_TYPED(zil_maxblocksize,
3876 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
3877 mutex_init(&zilog->zl_issuer_lock, NULL, MUTEX_DEFAULT, NULL);
3878 mutex_init(&zilog->zl_lwb_io_lock, NULL, MUTEX_DEFAULT, NULL);
3881 mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
3885 list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
3888 list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
3891 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
3892 cv_init(&zilog->zl_lwb_io_cv, NULL, CV_DEFAULT, NULL);
3895 zilog->zl_prev_opt[i] = zilog->zl_max_block_size -
3899 return (zilog);
3903 zil_free(zilog_t *zilog)
3907 zilog->zl_stop_sync = 1;
3909 ASSERT0(zilog->zl_suspend);
3910 ASSERT0(zilog->zl_suspending);
3912 ASSERT(list_is_empty(&zilog->zl_lwb_list));
3913 list_destroy(&zilog->zl_lwb_list);
3915 ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
3916 list_destroy(&zilog->zl_itx_commit_list);
3926 if (zilog->zl_itxg[i].itxg_itxs)
3927 zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs);
3928 mutex_destroy(&zilog->zl_itxg[i].itxg_lock);
3931 mutex_destroy(&zilog->zl_issuer_lock);
3932 mutex_destroy(&zilog->zl_lock);
3933 mutex_destroy(&zilog->zl_lwb_io_lock);
3935 cv_destroy(&zilog->zl_cv_suspend);
3936 cv_destroy(&zilog->zl_lwb_io_cv);
3938 kmem_free(zilog, sizeof (zilog_t));
3947 zilog_t *zilog = dmu_objset_zil(os);
3949 ASSERT3P(zilog->zl_get_data, ==, NULL);
3950 ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
3951 ASSERT(list_is_empty(&zilog->zl_lwb_list));
3953 zilog->zl_get_data = get_data;
3954 zilog->zl_sums = zil_sums;
3956 return (zilog);
3963 zil_close(zilog_t *zilog)
3968 if (!dmu_objset_is_snapshot(zilog->zl_os)) {
3969 zil_commit(zilog, 0);
3971 ASSERT(list_is_empty(&zilog->zl_lwb_list));
3972 ASSERT0(zilog->zl_dirty_max_txg);
3973 ASSERT3B(zilog_is_dirty(zilog), ==, B_FALSE);
3976 mutex_enter(&zilog->zl_lock);
3977 txg = zilog->zl_dirty_max_txg;
3978 lwb = list_tail(&zilog->zl_lwb_list);
3983 mutex_exit(&zilog->zl_lock);
3990 mutex_enter(&zilog->zl_lwb_io_lock);
3991 txg = MAX(zilog->zl_lwb_max_issued_txg, txg);
3992 mutex_exit(&zilog->zl_lwb_io_lock);
4000 txg_wait_synced(zilog->zl_dmu_pool, txg);
4002 if (zilog_is_dirty(zilog))
4003 zfs_dbgmsg("zil (%px) is dirty, txg %llu", zilog,
4005 if (txg < spa_freeze_txg(zilog->zl_spa))
4006 VERIFY(!zilog_is_dirty(zilog));
4008 zilog->zl_get_data = NULL;
4013 mutex_enter(&zilog->zl_lock);
4014 lwb = list_remove_head(&zilog->zl_lwb_list);
4016 ASSERT(list_is_empty(&zilog->zl_lwb_list));
4019 zil_free_lwb(zilog, lwb);
4021 mutex_exit(&zilog->zl_lock);
4052 zilog_t *zilog;
4059 zilog = dmu_objset_zil(os);
4061 mutex_enter(&zilog->zl_lock);
4062 zh = zilog->zl_header;
4065 mutex_exit(&zilog->zl_lock);
4076 if (cookiep == NULL && !zilog->zl_suspending &&
4077 (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) {
4078 mutex_exit(&zilog->zl_lock);
4086 zilog->zl_suspend++;
4088 if (zilog->zl_suspend > 1) {
4094 while (zilog->zl_suspending)
4095 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
4096 mutex_exit(&zilog->zl_lock);
4114 mutex_exit(&zilog->zl_lock);
4126 zilog->zl_suspend--;
4127 mutex_exit(&zilog->zl_lock);
4133 zilog->zl_suspending = B_TRUE;
4134 mutex_exit(&zilog->zl_lock);
4144 zil_commit_impl(zilog, 0);
4148 * use txg_wait_synced() to ensure the data from the zilog has
4151 txg_wait_synced(zilog->zl_dmu_pool, 0);
4153 zil_destroy(zilog, B_FALSE);
4155 mutex_enter(&zilog->zl_lock);
4156 zilog->zl_suspending = B_FALSE;
4157 cv_broadcast(&zilog->zl_cv_suspend);
4158 mutex_exit(&zilog->zl_lock);
4174 zilog_t *zilog = dmu_objset_zil(os);
4176 mutex_enter(&zilog->zl_lock);
4177 ASSERT(zilog->zl_suspend != 0);
4178 zilog->zl_suspend--;
4179 mutex_exit(&zilog->zl_lock);
4192 zil_replay_error(zilog_t *zilog, const lr_t *lr, int error)
4196 zilog->zl_replaying_seq--; /* didn't actually replay this one */
4198 dmu_objset_name(zilog->zl_os, name);
4210 zil_replay_log_record(zilog_t *zilog, const lr_t *lr, void *zra,
4214 const zil_header_t *zh = zilog->zl_header;
4219 zilog->zl_replaying_seq = lr->lrc_seq;
4231 return (zil_replay_error(zilog, lr, EINVAL));
4238 error = dmu_object_info(zilog->zl_os,
4253 error = zil_read_log_data(zilog, (lr_write_t *)lr,
4256 return (zil_replay_error(zilog, lr, error));
4284 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
4287 return (zil_replay_error(zilog, lr, error));
4293 zil_incr_blks(zilog_t *zilog, const blkptr_t *bp, void *arg, uint64_t claim_txg)
4297 zilog->zl_replay_blks++;
4310 zilog_t *zilog = dmu_objset_zil(os);
4311 const zil_header_t *zh = zilog->zl_header;
4315 return (zil_destroy(zilog, B_TRUE));
4326 txg_wait_synced(zilog->zl_dmu_pool, 0);
4328 zilog->zl_replay = B_TRUE;
4329 zilog->zl_replay_time = ddi_get_lbolt();
4330 ASSERT(zilog->zl_replay_blks == 0);
4331 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
4335 zil_destroy(zilog, B_FALSE);
4336 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
4337 zilog->zl_replay = B_FALSE;
4343 zil_replaying(zilog_t *zilog, dmu_tx_t *tx)
4345 if (zilog->zl_sync == ZFS_SYNC_DISABLED)
4348 if (zilog->zl_replay) {
4349 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
4350 zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] =
4351 zilog->zl_replaying_seq;