Lines Matching defs:txg

78  * txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of
81 * relevant, the per-txg value is useful for debugging. The tunable
86 * ensure that there is a txg syncing (see the comment in txg.c for a full
109 * when approaching the limit until log data is cleared out after txg sync.
116 * zfs_dirty_data_max), push out a txg. This should be less than
189 dsl_pool_open_impl(spa_t *spa, uint64_t txg)
198 txg_init(dp, txg);
239 dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp)
242 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
477 dsl_crypto_params_t *dcp, uint64_t txg)
480 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
481 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
503 VERIFY0(dsl_scan_init(dp, txg));
614 dsl_pool_wrlog_count(dsl_pool_t *dp, int64_t size, uint64_t txg)
618 aggsum_add(&dp->dp_wrlog_pertxg[txg & TXG_MASK], size);
624 if (aggsum_compare(&dp->dp_wrlog_pertxg[txg & TXG_MASK], sync_min) > 0)
625 txg_kick(dp, txg);
638 dsl_pool_wrlog_clear(dsl_pool_t *dp, uint64_t txg)
641 delta = -(int64_t)aggsum_value(&dp->dp_wrlog_pertxg[txg & TXG_MASK]);
642 aggsum_add(&dp->dp_wrlog_pertxg[txg & TXG_MASK], delta);
645 (void) aggsum_value(&dp->dp_wrlog_pertxg[txg & TXG_MASK]);
651 dsl_early_sync_task_verify(dsl_pool_t *dp, uint64_t txg)
661 for (ms = txg_list_head(tl, TXG_CLEAN(txg)); ms;
662 ms = txg_list_next(tl, ms, TXG_CLEAN(txg))) {
671 #define dsl_early_sync_task_verify(dp, txg) \
672 ((void) sizeof (dp), (void) sizeof (txg), B_TRUE)
676 dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
688 tx = dmu_tx_create_assigned(dp, txg);
695 if (!txg_list_empty(&dp->dp_early_sync_tasks, txg)) {
700 txg_list_remove(&dp->dp_early_sync_tasks, txg)) != NULL) {
701 ASSERT(dsl_early_sync_task_verify(dp, txg));
704 ASSERT(dsl_early_sync_task_verify(dp, txg));
712 while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
730 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] == 0);
731 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] = 0;
754 while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
767 !os->os_next_write_raw[txg & TXG_MASK]) {
787 !os->os_next_write_raw[txg & TXG_MASK]) {
796 while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) {
816 if (dmu_objset_is_dirty(mos, txg)) {
833 dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg);
836 * If we modify a dataset in the same txg that we want to destroy it,
844 if (!txg_list_empty(&dp->dp_sync_tasks, txg)) {
851 while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)) != NULL)
857 DTRACE_PROBE2(dsl_pool_sync__done, dsl_pool_t *dp, dp, uint64_t, txg);
861 dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg)
865 while ((zilog = txg_list_head(&dp->dp_dirty_zilogs, txg))) {
873 zil_clean(zilog, txg);
874 (void) txg_list_remove_this(&dp->dp_dirty_zilogs, zilog, txg);
875 ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg));
879 dsl_pool_wrlog_clear(dp, txg);
881 ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg));
980 dsl_pool_need_dirty_sync(dsl_pool_t *dp, uint64_t txg)
984 uint64_t dirty = dp->dp_dirty_pertxg[txg & TXG_MASK];
1006 dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg)
1013 if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) {
1015 space = dp->dp_dirty_pertxg[txg & TXG_MASK];
1017 ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space);
1018 dp->dp_dirty_pertxg[txg & TXG_MASK] -= space;
1483 "Dirty data txg sync threshold as a percentage of zfs_dirty_data_max");