Lines Matching refs:tx

40 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
47 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP); in dmu_tx_create_dd() local
48 tx->tx_dir = dd; in dmu_tx_create_dd()
50 tx->tx_pool = dd->dd_pool; in dmu_tx_create_dd()
51 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t), in dmu_tx_create_dd()
53 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t), in dmu_tx_create_dd()
56 refcount_create(&tx->tx_space_written); in dmu_tx_create_dd()
57 refcount_create(&tx->tx_space_freed); in dmu_tx_create_dd()
59 return (tx); in dmu_tx_create_dd()
65 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir); in dmu_tx_create() local
66 tx->tx_objset = os; in dmu_tx_create()
67 tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset); in dmu_tx_create()
68 return (tx); in dmu_tx_create()
74 dmu_tx_t *tx = dmu_tx_create_dd(NULL); in dmu_tx_create_assigned() local
77 tx->tx_pool = dp; in dmu_tx_create_assigned()
78 tx->tx_txg = txg; in dmu_tx_create_assigned()
79 tx->tx_anyobj = TRUE; in dmu_tx_create_assigned()
81 return (tx); in dmu_tx_create_assigned()
85 dmu_tx_is_syncing(dmu_tx_t *tx) in dmu_tx_is_syncing() argument
87 return (tx->tx_anyobj); in dmu_tx_is_syncing()
91 dmu_tx_private_ok(dmu_tx_t *tx) in dmu_tx_private_ok() argument
93 return (tx->tx_anyobj); in dmu_tx_private_ok()
97 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object, in dmu_tx_hold_object_impl() argument
105 err = dnode_hold(os, object, tx, &dn); in dmu_tx_hold_object_impl()
107 tx->tx_err = err; in dmu_tx_hold_object_impl()
111 if (err == 0 && tx->tx_txg != 0) { in dmu_tx_hold_object_impl()
119 dn->dn_assigned_txg = tx->tx_txg; in dmu_tx_hold_object_impl()
120 (void) refcount_add(&dn->dn_tx_holds, tx); in dmu_tx_hold_object_impl()
126 txh->txh_tx = tx; in dmu_tx_hold_object_impl()
133 list_insert_tail(&tx->tx_holds, txh); in dmu_tx_hold_object_impl()
139 dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object) in dmu_tx_add_new_object() argument
145 if (!dmu_tx_is_syncing(tx)) { in dmu_tx_add_new_object()
146 (void) dmu_tx_hold_object_impl(tx, os, in dmu_tx_add_new_object()
404 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len) in dmu_tx_hold_write() argument
408 ASSERT(tx->tx_txg == 0); in dmu_tx_hold_write()
412 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_write()
578 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len) in dmu_tx_hold_free() argument
586 ASSERT(tx->tx_txg == 0); in dmu_tx_hold_free()
588 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_free()
619 zio = zio_root(tx->tx_pool->dp_spa, in dmu_tx_hold_free()
628 tx->tx_err = err; in dmu_tx_hold_free()
634 tx->tx_err = err; in dmu_tx_hold_free()
640 tx->tx_err = err; in dmu_tx_hold_free()
649 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name) in dmu_tx_hold_zap() argument
656 ASSERT(tx->tx_txg == 0); in dmu_tx_hold_zap()
658 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_zap()
685 tx->tx_err = err; in dmu_tx_hold_zap()
713 tx->tx_err = err; in dmu_tx_hold_zap()
734 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object) in dmu_tx_hold_bonus() argument
738 ASSERT(tx->tx_txg == 0); in dmu_tx_hold_bonus()
740 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_bonus()
747 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space) in dmu_tx_hold_space() argument
750 ASSERT(tx->tx_txg == 0); in dmu_tx_hold_space()
752 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_space()
759 dmu_tx_holds(dmu_tx_t *tx, uint64_t object) in dmu_tx_holds() argument
770 ASSERT(tx->tx_txg != 0); in dmu_tx_holds()
775 for (txh = list_head(&tx->tx_holds); txh; in dmu_tx_holds()
776 txh = list_next(&tx->tx_holds, txh)) { in dmu_tx_holds()
786 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db) in dmu_tx_dirty_buf() argument
794 ASSERT(tx->tx_txg != 0); in dmu_tx_dirty_buf()
795 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset); in dmu_tx_dirty_buf()
798 if (tx->tx_anyobj) { in dmu_tx_dirty_buf()
809 for (txh = list_head(&tx->tx_holds); txh; in dmu_tx_dirty_buf()
810 txh = list_next(&tx->tx_holds, txh)) { in dmu_tx_dirty_buf()
811 ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg); in dmu_tx_dirty_buf()
892 dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how) in dmu_tx_try_assign() argument
895 spa_t *spa = tx->tx_pool->dp_spa; in dmu_tx_try_assign()
899 ASSERT3U(tx->tx_txg, ==, 0); in dmu_tx_try_assign()
901 if (tx->tx_err) in dmu_tx_try_assign()
902 return (tx->tx_err); in dmu_tx_try_assign()
921 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh); in dmu_tx_try_assign()
922 tx->tx_needassign_txh = NULL; in dmu_tx_try_assign()
931 for (txh = list_head(&tx->tx_holds); txh; in dmu_tx_try_assign()
932 txh = list_next(&tx->tx_holds, txh)) { in dmu_tx_try_assign()
936 if (dn->dn_assigned_txg == tx->tx_txg - 1) { in dmu_tx_try_assign()
938 tx->tx_needassign_txh = txh; in dmu_tx_try_assign()
942 dn->dn_assigned_txg = tx->tx_txg; in dmu_tx_try_assign()
943 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); in dmu_tx_try_assign()
944 (void) refcount_add(&dn->dn_tx_holds, tx); in dmu_tx_try_assign()
959 if (txg_how >= TXG_INITIAL && txg_how != tx->tx_txg) in dmu_tx_try_assign()
966 if (tx->tx_objset && in dmu_tx_try_assign()
967 dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) > in dmu_tx_try_assign()
968 tx->tx_lastsnap_txg) { in dmu_tx_try_assign()
974 asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite); in dmu_tx_try_assign()
976 fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree; in dmu_tx_try_assign()
978 usize = spa_get_asize(tx->tx_pool->dp_spa, tounref); in dmu_tx_try_assign()
988 tx->tx_space_towrite = asize + in dmu_tx_try_assign()
989 spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge); in dmu_tx_try_assign()
990 tx->tx_space_tofree = tofree; in dmu_tx_try_assign()
991 tx->tx_space_tooverwrite = tooverwrite; in dmu_tx_try_assign()
992 tx->tx_space_tounref = tounref; in dmu_tx_try_assign()
995 if (tx->tx_dir && asize != 0) { in dmu_tx_try_assign()
996 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory, in dmu_tx_try_assign()
997 asize, fsize, usize, &tx->tx_tempreserve_cookie, tx); in dmu_tx_try_assign()
1006 dmu_tx_unassign(dmu_tx_t *tx) in dmu_tx_unassign() argument
1010 if (tx->tx_txg == 0) in dmu_tx_unassign()
1013 txg_rele_to_quiesce(&tx->tx_txgh); in dmu_tx_unassign()
1015 for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh; in dmu_tx_unassign()
1016 txh = list_next(&tx->tx_holds, txh)) { in dmu_tx_unassign()
1022 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); in dmu_tx_unassign()
1024 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { in dmu_tx_unassign()
1031 txg_rele_to_sync(&tx->tx_txgh); in dmu_tx_unassign()
1033 tx->tx_lasttried_txg = tx->tx_txg; in dmu_tx_unassign()
1034 tx->tx_txg = 0; in dmu_tx_unassign()
1054 dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how) in dmu_tx_assign() argument
1058 ASSERT(tx->tx_txg == 0); in dmu_tx_assign()
1060 ASSERT(!dsl_pool_sync_context(tx->tx_pool)); in dmu_tx_assign()
1062 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) { in dmu_tx_assign()
1063 dmu_tx_unassign(tx); in dmu_tx_assign()
1068 dmu_tx_wait(tx); in dmu_tx_assign()
1071 txg_rele_to_quiesce(&tx->tx_txgh); in dmu_tx_assign()
1077 dmu_tx_wait(dmu_tx_t *tx) in dmu_tx_wait() argument
1079 spa_t *spa = tx->tx_pool->dp_spa; in dmu_tx_wait()
1081 ASSERT(tx->tx_txg == 0); in dmu_tx_wait()
1088 if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) { in dmu_tx_wait()
1089 txg_wait_synced(tx->tx_pool, spa_last_synced_txg(spa) + 1); in dmu_tx_wait()
1090 } else if (tx->tx_needassign_txh) { in dmu_tx_wait()
1091 dnode_t *dn = tx->tx_needassign_txh->txh_dnode; in dmu_tx_wait()
1094 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1) in dmu_tx_wait()
1097 tx->tx_needassign_txh = NULL; in dmu_tx_wait()
1099 txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1); in dmu_tx_wait()
1104 dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta) in dmu_tx_willuse_space() argument
1107 if (tx->tx_dir == NULL || delta == 0) in dmu_tx_willuse_space()
1111 ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=, in dmu_tx_willuse_space()
1112 tx->tx_space_towrite); in dmu_tx_willuse_space()
1113 (void) refcount_add_many(&tx->tx_space_written, delta, NULL); in dmu_tx_willuse_space()
1115 (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL); in dmu_tx_willuse_space()
1121 dmu_tx_commit(dmu_tx_t *tx) in dmu_tx_commit() argument
1125 ASSERT(tx->tx_txg != 0); in dmu_tx_commit()
1127 while (txh = list_head(&tx->tx_holds)) { in dmu_tx_commit()
1130 list_remove(&tx->tx_holds, txh); in dmu_tx_commit()
1135 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); in dmu_tx_commit()
1137 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { in dmu_tx_commit()
1142 dnode_rele(dn, tx); in dmu_tx_commit()
1145 if (tx->tx_tempreserve_cookie) in dmu_tx_commit()
1146 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx); in dmu_tx_commit()
1148 if (!list_is_empty(&tx->tx_callbacks)) in dmu_tx_commit()
1149 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks); in dmu_tx_commit()
1151 if (tx->tx_anyobj == FALSE) in dmu_tx_commit()
1152 txg_rele_to_sync(&tx->tx_txgh); in dmu_tx_commit()
1154 list_destroy(&tx->tx_callbacks); in dmu_tx_commit()
1155 list_destroy(&tx->tx_holds); in dmu_tx_commit()
1158 tx->tx_space_towrite, refcount_count(&tx->tx_space_written), in dmu_tx_commit()
1159 tx->tx_space_tofree, refcount_count(&tx->tx_space_freed)); in dmu_tx_commit()
1160 refcount_destroy_many(&tx->tx_space_written, in dmu_tx_commit()
1161 refcount_count(&tx->tx_space_written)); in dmu_tx_commit()
1162 refcount_destroy_many(&tx->tx_space_freed, in dmu_tx_commit()
1163 refcount_count(&tx->tx_space_freed)); in dmu_tx_commit()
1165 kmem_free(tx, sizeof (dmu_tx_t)); in dmu_tx_commit()
1169 dmu_tx_abort(dmu_tx_t *tx) in dmu_tx_abort() argument
1173 ASSERT(tx->tx_txg == 0); in dmu_tx_abort()
1175 while (txh = list_head(&tx->tx_holds)) { in dmu_tx_abort()
1178 list_remove(&tx->tx_holds, txh); in dmu_tx_abort()
1181 dnode_rele(dn, tx); in dmu_tx_abort()
1187 if (!list_is_empty(&tx->tx_callbacks)) in dmu_tx_abort()
1188 dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED); in dmu_tx_abort()
1190 list_destroy(&tx->tx_callbacks); in dmu_tx_abort()
1191 list_destroy(&tx->tx_holds); in dmu_tx_abort()
1193 refcount_destroy_many(&tx->tx_space_written, in dmu_tx_abort()
1194 refcount_count(&tx->tx_space_written)); in dmu_tx_abort()
1195 refcount_destroy_many(&tx->tx_space_freed, in dmu_tx_abort()
1196 refcount_count(&tx->tx_space_freed)); in dmu_tx_abort()
1198 kmem_free(tx, sizeof (dmu_tx_t)); in dmu_tx_abort()
1202 dmu_tx_get_txg(dmu_tx_t *tx) in dmu_tx_get_txg() argument
1204 ASSERT(tx->tx_txg != 0); in dmu_tx_get_txg()
1205 return (tx->tx_txg); in dmu_tx_get_txg()
1209 dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data) in dmu_tx_callback_register() argument
1218 list_insert_tail(&tx->tx_callbacks, dcb); in dmu_tx_callback_register()
1251 dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx) in dmu_tx_sa_registration_hold() argument
1261 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj, in dmu_tx_sa_registration_hold()
1264 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, in dmu_tx_sa_registration_hold()
1272 dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object) in dmu_tx_hold_spill() argument
1278 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object, in dmu_tx_hold_spill()
1303 dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize) in dmu_tx_hold_sa_create() argument
1305 sa_os_t *sa = tx->tx_objset->os_sa; in dmu_tx_hold_sa_create()
1307 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); in dmu_tx_hold_sa_create()
1309 if (tx->tx_objset->os_sa->sa_master_obj == 0) in dmu_tx_hold_sa_create()
1312 if (tx->tx_objset->os_sa->sa_layout_attr_obj) in dmu_tx_hold_sa_create()
1313 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); in dmu_tx_hold_sa_create()
1315 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); in dmu_tx_hold_sa_create()
1316 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); in dmu_tx_hold_sa_create()
1317 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); in dmu_tx_hold_sa_create()
1318 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); in dmu_tx_hold_sa_create()
1321 dmu_tx_sa_registration_hold(sa, tx); in dmu_tx_hold_sa_create()
1326 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT, in dmu_tx_hold_sa_create()
1340 dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow) in dmu_tx_hold_sa() argument
1343 sa_os_t *sa = tx->tx_objset->os_sa; in dmu_tx_hold_sa()
1349 dmu_tx_hold_bonus(tx, object); in dmu_tx_hold_sa()
1351 if (tx->tx_objset->os_sa->sa_master_obj == 0) in dmu_tx_hold_sa()
1354 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 || in dmu_tx_hold_sa()
1355 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) { in dmu_tx_hold_sa()
1356 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); in dmu_tx_hold_sa()
1357 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); in dmu_tx_hold_sa()
1358 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); in dmu_tx_hold_sa()
1359 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); in dmu_tx_hold_sa()
1362 dmu_tx_sa_registration_hold(sa, tx); in dmu_tx_hold_sa()
1364 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj) in dmu_tx_hold_sa()
1365 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); in dmu_tx_hold_sa()
1368 ASSERT(tx->tx_txg == 0); in dmu_tx_hold_sa()
1369 dmu_tx_hold_spill(tx, object); in dmu_tx_hold_sa()
1377 ASSERT(tx->tx_txg == 0); in dmu_tx_hold_sa()
1378 dmu_tx_hold_spill(tx, object); in dmu_tx_hold_sa()