Lines Matching defs:txg

37 #include <sys/txg.h>
192 vdev_passivate(vdev_t *vd, uint64_t *txg)
238 *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
248 *txg = spa_vdev_config_enter(spa);
278 uint64_t txg;
284 txg = spa_vdev_enter(spa);
295 error = vdev_passivate(vd, &txg);
298 vdev_dirty_leaves(vd, VDD_DTL, txg);
302 error = spa_vdev_exit(spa, NULL, txg, error);
311 uint64_t txg;
317 txg = spa_vdev_enter(spa);
331 vdev_dirty_leaves(vd, VDD_DTL, txg);
335 (void) spa_vdev_exit(spa, NULL, txg, error);
403 * This is called as a synctask in the txg in which we will mark this vdev
422 uint64_t txg __maybe_unused = dmu_tx_get_txg(tx);
474 * Space which we are freeing this txg does not need to
489 ASSERT3P(txg_list_head(&vd->vdev_ms_list, TXG_CLEAN(txg)), ==, NULL);
518 zfs_dbgmsg("starting removal thread for vdev %llu (%px) in txg %llu "
666 uint64_t txg = spa_syncing_txg(spa);
729 * this txg and iterating forward, we might find that this region
733 int txgoff = (txg + i) & TXG_MASK;
737 * will be synced in txg+i.
746 uint64_t, txg + i);
752 * copied in that txg).
769 * the mapped space later, in the txg where we have
780 * done, because it is being copied in txg+i.
782 * a future txg. If we crash after this txg
783 * syncs but before txg+i syncs, then the space
785 * for the space being done in *this* txg
786 * (when it is freed) rather than the future txg
792 svr->svr_bytes_done[txg & TXG_MASK] += inflight_size;
795 ASSERT0(svr->svr_max_offset_to_sync[TXG_CLEAN(txg) & TXG_MASK]);
816 svr->svr_bytes_done[txg & TXG_MASK] += size;
893 * Called as a sync task in every txg that the removal thread makes progress.
902 uint64_t txg = dmu_tx_get_txg(tx);
906 ASSERT3U(txg, ==, spa_syncing_txg(spa));
909 &svr->svr_new_segments[txg & TXG_MASK], tx);
918 range_tree_vacate(svr->svr_frees[txg & TXG_MASK],
920 ASSERT3U(svr->svr_max_offset_to_sync[txg & TXG_MASK], >=,
922 svr->svr_max_offset_to_sync[txg & TXG_MASK] = 0;
1123 uint64_t maxalloc, uint64_t txg,
1171 int error = metaslab_alloc_dva(spa, mc, size, &dst, 0, NULL, txg,
1175 &dst, 0, NULL, txg, METASLAB_DONT_THROTTLE, zal, 0);
1226 vcsa->vcsa_txg = txg;
1232 zio_t *nzio = zio_null(spa->spa_txg_zio[txg & TXG_MASK], spa, NULL,
1247 list_insert_tail(&svr->svr_new_segments[txg & TXG_MASK], entry);
1249 vdev_dirty(vd, 0, NULL, txg);
1256 * synctask in the same txg that we will sync out the new config (to the
1311 vdev_remove_replace_with_indirect(vdev_t *vd, uint64_t txg)
1342 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
1355 uint64_t txg;
1362 txg = spa_vdev_enter(spa);
1375 zfs_dbgmsg("finishing device removal for vdev %llu in txg %llu",
1376 (u_longlong_t)vd->vdev_id, (u_longlong_t)txg);
1400 vdev_remove_replace_with_indirect(vd, txg);
1410 (void) spa_vdev_exit(spa, NULL, txg, 0);
1423 txg = spa_vdev_enter(spa);
1429 (void) spa_vdev_exit(spa, vd, txg, 0);
1440 * this size again this txg.
1446 uint64_t txg = dmu_tx_get_txg(tx);
1505 if (svr->svr_max_offset_to_sync[txg & TXG_MASK] == 0) {
1510 svr->svr_max_offset_to_sync[txg & TXG_MASK] = range_tree_max(segs);
1514 * that we are taking care of each txg.
1516 svr->svr_bytes_done[txg & TXG_MASK] += range_tree_space(segs);
1525 segs, thismax, txg, vca, &zal);
1530 * segment size again this txg. Note that the
1687 * for a txg to sync while holding a config lock
1715 uint64_t txg = dmu_tx_get_txg(tx);
1725 if (txg != last_txg)
1727 last_txg = txg;
1956 * txg, thus increasing svr_bytes_done; discard that here to
1958 * Note that future txg's can not have any bytes_done, because
2056 spa_vdev_remove_log(vdev_t *vd, uint64_t *txg)
2077 *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
2096 *txg = spa_vdev_config_enter(spa);
2111 vdev_dirty_leaves(vd, VDD_DTL, *txg);
2136 spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG);
2137 *txg = spa_vdev_config_enter(spa);
2298 spa_vdev_remove_top(vdev_t *vd, uint64_t *txg)
2321 error = vdev_passivate(vd, txg);
2333 spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG);
2339 *txg = spa_vdev_config_enter(spa);
2358 vdev_dirty_leaves(vd, VDD_DTL, *txg);
2360 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, *txg);
2382 uint64_t txg = 0;
2393 txg = spa_vdev_enter(spa);
2401 return (spa_vdev_exit(spa, NULL, txg, error));
2463 txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
2467 txg = spa_vdev_config_enter(spa);
2479 error = spa_vdev_remove_log(vd, &txg);
2482 error = spa_vdev_remove_top(vd, &txg);
2493 error = spa_vdev_exit(spa, NULL, txg, error);
2499 * Doing that would prevent the txg sync from actually happening,