Lines Matching defs:txg

638 metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg)
663 if (txg >
669 metaslab_evict(msp, txg);
761 * have any space until we finish syncing out this txg.
1945 metaslab_verify_space(metaslab_t *msp, uint64_t txg)
1964 if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
1991 range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
2221 * succeed. Between that and the normal unloading processing during txg sync,
2490 zfs_dbgmsg("metaslab_load: txg %llu, spa %s, vdev_id %llu, "
2603 zfs_dbgmsg("metaslab_unload: txg %llu, spa %s, vdev_id %llu, "
2605 "selected txg %llu (%llu ms ago), alloc_txg %llu, "
2660 metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg)
2668 msp->ms_selected_txg = txg;
2689 uint64_t txg, metaslab_t **msp)
2772 * If we're opening an existing pool (txg == 0) or creating
2773 * a new one (txg == TXG_INITIAL), all space is available now.
2775 * does not become available until after this txg has synced.
2777 * out this txg. This ensures that we don't attempt to allocate
2780 if (txg <= TXG_INITIAL) {
2786 if (txg != 0) {
2787 vdev_dirty(vd, 0, NULL, txg);
2788 vdev_dirty(vd, VDD_METASLAB, ms, txg);
2966 uint64_t txg = spa_syncing_txg(spa);
2970 * If we've reached the final dirty txg, then we must
2978 spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
2980 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2981 zfs_dbgmsg("txg %llu, requesting force condense: "
2982 "ms_id %llu, vdev_id %llu", (u_longlong_t)txg,
3251 * allocatable even before they finish their first txg sync.
3583 * that force condensing happens in the next txg.
3659 uint64_t txg = dmu_tx_get_txg(tx);
3674 * in the spacemap (they will be written later this txg).
3711 zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, "
3713 (u_longlong_t)txg, (u_longlong_t)msp->ms_id, msp,
3734 range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
3984 zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, "
4019 metaslab_sync(metaslab_t *msp, uint64_t txg)
4025 range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
4046 * dirty txg, we need to let it through. Not condensing beyond the
4047 * final dirty txg prevents an issue where metaslabs that need to be
4049 * here. By only checking the txg in that branch of the conditional,
4057 txg <= spa_final_dirty_txg(spa)))
4061 VERIFY3U(txg, <=, spa_final_dirty_txg(spa));
4066 * other thread can be modifying this txg's alloc, freeing,
4076 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
4257 * For sync pass 1, we avoid traversing this txg's free range tree
4276 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
4277 ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
4298 metaslab_evict(metaslab_t *msp, uint64_t txg)
4305 msp->ms_allocating[(txg + t) & TXG_MASK]));
4319 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
4344 defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
4423 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
4440 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
4648 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
4670 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
4671 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
4673 range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size);
4677 msp->ms_alloc_txg = txg;
4678 metaslab_verify_space(msp, txg);
4801 uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
4949 metaslab_set_selected_txg(msp, txg);
5020 offset = metaslab_block_alloc(msp, asize, txg);
5107 uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
5112 offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique,
5145 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
5275 uint64_t asize = vdev_psize_to_asize_txg(vd, psize, txg);
5284 uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
5556 * would no longer know what their phys birth txg is.
5593 metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
5604 if (txg > spa_freeze_txg(spa))
5626 range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
5709 uint64_t txg)
5723 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) {
5736 if (error || txg == 0) { /* txg == 0 indicates dry run */
5754 msp->ms_selected_txg = txg;
5759 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
5760 vdev_dirty(vd, VDD_METASLAB, msp, txg);
5761 range_tree_add(msp->ms_allocating[txg & TXG_MASK],
5790 metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
5802 arg.mcca_txg = txg;
5809 offset, size, txg);
5813 return (metaslab_claim_concrete(vd, offset, size, txg));
5824 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
5840 return (metaslab_claim_impl(vd, offset, size, txg));
5845 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
5870 txg, flags, zal, allocator);
5873 metaslab_unalloc_dva(spa, &dva[d], txg);
5895 BP_SET_BIRTH(bp, txg, 0);
5901 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
5921 * syncing txg <= spa_checkpoint_txg. We want these frees to sync
5929 * there is no way it was created in the current txg.
5932 ASSERT3U(spa_syncing_txg(spa), ==, txg);
5940 metaslab_unalloc_dva(spa, &dva[d], txg);
5942 ASSERT3U(txg, ==, spa_syncing_txg(spa));
5951 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
5959 if (txg != 0) {
5971 error = metaslab_claim_dva(spa, &dva[d], txg);
5978 ASSERT(error == 0 || txg == 0);
6027 * allocated and freed in the same sync pass within the same txg.
6029 * segment but then we free part of it within the same txg
6190 metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx)
6192 ms->ms_unflushed_txg = txg;