Lines Matching defs:txg
56 * (txg), at which point they can be discarded; or
461 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg,
501 error = parse_blk_func(zilog, &blk, arg, txg);
555 error = parse_lr_func(zilog, lr, arg, txg);
587 * checkpoint, each ZIL block whose txg is later than the txg
816 uint64_t txg, lwb_state_t state)
848 lwb->lwb_alloc_txg = txg;
891 zilog_dirty(zilog_t *zilog, uint64_t txg)
901 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
905 zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg);
910 * Determine if the zil is dirty in the specified txg. Callers wanting to
912 * the specified txg. Holding the lock will ensure that the zil cannot be
917 zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg)
921 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK))
952 uint64_t txg = 0;
961 txg = dmu_tx_get_txg(tx);
968 txg_wait_synced(zilog->zl_dmu_pool, txg);
980 uint64_t txg = 0;
1007 txg = dmu_tx_get_txg(tx);
1010 zio_free(zilog->zl_spa, txg, &blk);
1014 error = zio_alloc_zil(zilog->zl_spa, zilog->zl_os, txg, &blk,
1024 lwb = zil_alloc_lwb(zilog, 0, &blk, slog, txg, LWB_STATE_NEW);
1048 txg_wait_synced(zilog->zl_dmu_pool, txg);
1082 uint64_t txg;
1097 txg = dmu_tx_get_txg(tx);
1101 ASSERT3U(zilog->zl_destroy_txg, <, txg);
1102 zilog->zl_destroy_txg = txg;
1112 zio_free(zilog->zl_spa, txg, &lwb->lwb_blk);
1207 * the min_claim_txg should be equal to the first txg of the pool.
1434 zil_lwb_add_txg(lwb_t *lwb, uint64_t txg)
1436 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
1515 uint64_t txg = lwb->lwb_issued_txg;
1521 ASSERT3U(zilog->zl_lwb_inflight[txg & TXG_MASK], >, 0);
1522 zilog->zl_lwb_inflight[txg & TXG_MASK]--;
1523 if (zilog->zl_lwb_inflight[txg & TXG_MASK] == 0)
1529 * Wait for the completion of all issued write/flush of that txg provided.
1533 zil_lwb_flush_wait_all(zilog_t *zilog, uint64_t txg)
1535 ASSERT3U(txg, ==, spa_syncing_txg(zilog->zl_spa));
1538 while (zilog->zl_lwb_inflight[txg & TXG_MASK] > 0)
1547 if (lwb->lwb_issued_txg <= txg) {
1981 uint64_t txg = dmu_tx_get_txg(tx);
1991 error = zio_alloc_zil(spa, zilog->zl_os, txg, bp, nlwb->lwb_sz,
1995 ASSERT3U(BP_GET_LOGICAL_BIRTH(bp), ==, txg);
2007 lwb->lwb_issued_txg = txg;
2008 zilog->zl_lwb_inflight[txg & TXG_MASK]++;
2009 zilog->zl_lwb_max_issued_txg = MAX(txg, zilog->zl_lwb_max_issued_txg);
2027 nlwb->lwb_alloc_txg = txg;
2451 * commit itx to a per-txg i_sync_list
2455 * There's nothing to prevent syncing the txg while the
2500 uint64_t otxg, txg;
2515 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
2516 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
2519 if (itxg->itxg_txg != txg) {
2545 uint64_t txg;
2556 txg = ZILTEST_TXG;
2558 txg = dmu_tx_get_txg(tx);
2560 itxg = &zilog->zl_itxg[txg & TXG_MASK];
2563 if (itxg->itxg_txg != txg) {
2571 "txg %llu", (u_longlong_t)itxg->itxg_txg);
2574 itxg->itxg_txg = txg;
2624 * have written out the uberblocks (i.e. txg has been committed) so that
2668 uint64_t otxg, txg, wtxg = 0;
2680 * the last synced txg from changing. That's okay since we'll
2683 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
2684 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
2687 if (itxg->itxg_txg != txg) {
2694 * then the zil better be dirty in this "txg". We can assert
2698 * if it's unnecessary (i.e. the txg was synced).
2700 ASSERT(zilog_is_dirty_in_txg(zilog, txg) ||
2708 * caller to do txg_wait_synced(txg) for any new.
2711 wtxg = MAX(wtxg, txg);
2737 uint64_t otxg, txg;
2749 * the last synced txg from changing.
2751 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
2752 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
2755 if (itxg->itxg_txg != txg) {
2942 uint64_t txg = lrc->lrc_txg;
2944 ASSERT3U(txg, !=, 0);
2954 boolean_t synced = txg <= spa_last_synced_txg(spa);
2955 boolean_t frozen = txg > spa_freeze_txg(spa);
2958 * If the txg of this itx has already been synced out, then
2962 * it's still ok to commit an itx whose txg has already
2967 * to an lwb, regardless of whether or not that itx's txg
2973 * whose txg had already been synced, the following
2978 * itx's txg is 10 and the last synced txg is 9.
2979 * 2. spa_sync finishes syncing out txg 10.
2981 * whose txg is 10, so we skip it rather than committing
2991 * an lwb here, even if that itx's txg has already been
2998 * data; i.e. when the pool is frozen, the last synced txg
3701 uint64_t txg = dmu_tx_get_txg(tx);
3703 uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
3713 zil_lwb_flush_wait_all(zilog, txg);
3725 if (zilog->zl_destroy_txg == txg) {
3762 lwb->lwb_alloc_txg > txg || lwb->lwb_max_txg > txg)
3766 zio_free(spa, txg, &lwb->lwb_blk);
3921 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
3966 uint64_t txg;
3977 txg = zilog->zl_dirty_max_txg;
3980 txg = MAX(txg, lwb->lwb_alloc_txg);
3981 txg = MAX(txg, lwb->lwb_max_txg);
3991 txg = MAX(zilog->zl_lwb_max_issued_txg, txg);
3995 * We need to use txg_wait_synced() to wait until that txg is synced.
3996 * zil_sync() will guarantee all lwbs up to that txg have been
3999 if (txg != 0)
4000 txg_wait_synced(zilog->zl_dmu_pool, txg);
4003 zfs_dbgmsg("zil (%px) is dirty, txg %llu", zilog,
4004 (u_longlong_t)txg);
4005 if (txg < spa_freeze_txg(zilog->zl_spa))
4278 * The DMU's dnode layer doesn't see removes until the txg