| /onnv-gate/usr/src/uts/common/fs/zfs/ |
| H A D | txg.c | 48 tx_state_t *tx = &dp->dp_tx; in txg_init() local 50 bzero(tx, sizeof (tx_state_t)); in txg_init() 52 tx->tx_cpu = kmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP); in txg_init() 57 mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL); in txg_init() 59 cv_init(&tx->tx_cpu[c].tc_cv[i], NULL, CV_DEFAULT, in txg_init() 61 list_create(&tx->tx_cpu[c].tc_callbacks[i], in txg_init() 67 mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL); in txg_init() 69 cv_init(&tx->tx_sync_more_cv, NULL, CV_DEFAULT, NULL); in txg_init() 70 cv_init(&tx->tx_sync_done_cv, NULL, CV_DEFAULT, NULL); in txg_init() 71 cv_init(&tx->tx_quiesce_more_cv, NULL, CV_DEFAULT, NULL); in txg_init() [all …]
|
| H A D | dmu_tx.c | 40 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn, 47 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP); in dmu_tx_create_dd() local 48 tx->tx_dir = dd; in dmu_tx_create_dd() 50 tx->tx_pool = dd->dd_pool; in dmu_tx_create_dd() 51 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t), in dmu_tx_create_dd() 53 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t), in dmu_tx_create_dd() 56 refcount_create(&tx->tx_space_written); in dmu_tx_create_dd() 57 refcount_create(&tx->tx_space_freed); in dmu_tx_create_dd() 59 return (tx); in dmu_tx_create_dd() 65 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir); in dmu_tx_create() local [all …]
|
| H A D | dsl_pool.c | 228 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); in dsl_pool_create() local 235 NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx); in dsl_pool_create() 239 DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx); in dsl_pool_create() 246 dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx); in dsl_pool_create() 251 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx); in dsl_pool_create() 258 FREE_DIR_NAME, tx); in dsl_pool_create() 263 obj = bpobj_alloc(dp->dp_meta_objset, SPA_MAXBLOCKSIZE, tx); in dsl_pool_create() 265 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0); in dsl_pool_create() 271 dsl_pool_create_origin(dp, tx); in dsl_pool_create() 274 obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, 0, tx); in dsl_pool_create() [all …]
|
| H A D | dmu_object.c | 32 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx) in dmu_object_alloc() argument 76 dnode_allocate(dn, ot, blocksize, 0, bonustype, bonuslen, tx); in dmu_object_alloc() 81 dmu_tx_add_new_object(tx, os, object); in dmu_object_alloc() 87 int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx) in dmu_object_claim() argument 92 if (object == DMU_META_DNODE_OBJECT && !dmu_tx_private_ok(tx)) in dmu_object_claim() 98 dnode_allocate(dn, ot, blocksize, 0, bonustype, bonuslen, tx); in dmu_object_claim() 101 dmu_tx_add_new_object(tx, os, object); in dmu_object_claim() 110 dmu_tx_t *tx; in dmu_object_reclaim() local 146 tx = dmu_tx_create(os); in dmu_object_reclaim() 147 dmu_tx_hold_bonus(tx, object); in dmu_object_reclaim() [all …]
|
| H A D | dsl_deadlist.c | 122 dsl_deadlist_alloc(objset_t *os, dmu_tx_t *tx) in dsl_deadlist_alloc() argument 125 return (bpobj_alloc(os, SPA_MAXBLOCKSIZE, tx)); in dsl_deadlist_alloc() 127 sizeof (dsl_deadlist_phys_t), tx)); in dsl_deadlist_alloc() 131 dsl_deadlist_free(objset_t *os, uint64_t dlobj, dmu_tx_t *tx) in dsl_deadlist_free() argument 139 bpobj_free(os, dlobj, tx); in dsl_deadlist_free() 146 bpobj_free(os, za.za_first_integer, tx); in dsl_deadlist_free() 148 VERIFY3U(0, ==, dmu_object_free(os, dlobj, tx)); in dsl_deadlist_free() 152 dsl_deadlist_insert(dsl_deadlist_t *dl, const blkptr_t *bp, dmu_tx_t *tx) in dsl_deadlist_insert() argument 159 bpobj_enqueue(&dl->dl_bpobj, bp, tx); in dsl_deadlist_insert() 165 dmu_buf_will_dirty(dl->dl_dbuf, tx); in dsl_deadlist_insert() [all …]
|
| H A D | dsl_dir.c | 42 static void dsl_dir_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx); 411 dmu_tx_t *tx) in dsl_dir_create_sync() argument 419 DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx); in dsl_dir_create_sync() 422 name, sizeof (uint64_t), 1, &ddobj, tx)); in dsl_dir_create_sync() 426 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, &ddobj, tx)); in dsl_dir_create_sync() 429 dmu_buf_will_dirty(dbuf, tx); in dsl_dir_create_sync() 436 DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx); in dsl_dir_create_sync() 438 DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx); in dsl_dir_create_sync() 448 dsl_dir_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx) in dsl_dir_destroy_check() argument 478 dsl_dir_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx) in dsl_dir_destroy_sync() argument [all …]
|
| H A D | spa_history.c | 81 spa_history_create_obj(spa_t *spa, dmu_tx_t *tx) in spa_history_create_obj() argument 90 sizeof (spa_history_phys_t), tx); in spa_history_create_obj() 94 &spa->spa_history, tx) == 0); in spa_history_create_obj() 100 dmu_buf_will_dirty(dbp, tx); in spa_history_create_obj() 146 dmu_tx_t *tx) in spa_history_write() argument 165 dmu_write(mos, spa->spa_history, phys_eof, firstwrite, buf, tx); in spa_history_write() 171 len, (char *)buf + firstwrite, tx); in spa_history_write() 192 spa_history_log_sync(void *arg1, void *arg2, dmu_tx_t *tx) in spa_history_log_sync() argument 212 spa_history_create_obj(spa, tx); in spa_history_log_sync() 222 dmu_buf_will_dirty(dbp, tx); in spa_history_log_sync() [all …]
|
| H A D | zfs_dir.c | 450 zfs_unlinked_add(znode_t *zp, dmu_tx_t *tx) in zfs_unlinked_add() argument 458 zap_add_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx)); in zfs_unlinked_add() 530 dmu_tx_t *tx; in zfs_purgedir() local 549 tx = dmu_tx_create(zfsvfs->z_os); in zfs_purgedir() 550 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); in zfs_purgedir() 551 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, zap.za_name); in zfs_purgedir() 552 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE); in zfs_purgedir() 553 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); in zfs_purgedir() 555 zfs_sa_upgrade_txholds(tx, xzp); in zfs_purgedir() 556 error = dmu_tx_assign(tx, TXG_WAIT); in zfs_purgedir() [all …]
|
| H A D | dsl_dataset.c | 86 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx) in dsl_dataset_block_born() argument 88 int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp); in dsl_dataset_block_born() 95 ASSERT(dmu_tx_is_syncing(tx)); in dsl_dataset_block_born() 107 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD, in dsl_dataset_block_born() 108 used, compressed, uncompressed, tx); in dsl_dataset_block_born() 109 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx); in dsl_dataset_block_born() 112 dmu_buf_will_dirty(ds->ds_dbuf, tx); in dsl_dataset_block_born() 123 compressed, uncompressed, tx); in dsl_dataset_block_born() 125 DD_USED_REFRSRV, DD_USED_HEAD, tx); in dsl_dataset_block_born() 130 dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx, in dsl_dataset_block_kill() argument [all …]
|
| H A D | dsl_scan.c | 57 static void dsl_scan_sync_state(dsl_scan_t *, dmu_tx_t *tx); 155 dsl_scan_setup_check(void *arg1, void *arg2, dmu_tx_t *tx) in dsl_scan_setup_check() argument 167 dsl_scan_setup_sync(void *arg1, void *arg2, dmu_tx_t *tx) in dsl_scan_setup_sync() argument 181 scn->scn_phys.scn_max_txg = tx->tx_txg; in dsl_scan_setup_sync() 225 ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx); in dsl_scan_setup_sync() 227 dsl_scan_sync_state(scn, tx); in dsl_scan_setup_sync() 229 spa_history_log_internal(LOG_POOL_SCAN, spa, tx, in dsl_scan_setup_sync() 236 dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) in dsl_scan_done() argument 257 DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx); in dsl_scan_done() 262 scn->scn_phys.scn_queue_obj, tx)); in dsl_scan_done() [all …]
|
| H A D | dsl_synctask.c | 36 dsl_null_checkfunc(void *arg1, void *arg2, dmu_tx_t *tx) in dsl_null_checkfunc() argument 76 dmu_tx_t *tx; in dsl_sync_task_group_wait() local 81 tx = dmu_tx_create_dd(dstg->dstg_pool->dp_mos_dir); in dsl_sync_task_group_wait() 82 VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT)); in dsl_sync_task_group_wait() 84 txg = dmu_tx_get_txg(tx); in dsl_sync_task_group_wait() 100 dst->dst_checkfunc(dst->dst_arg1, dst->dst_arg2, tx); in dsl_sync_task_group_wait() 107 dmu_tx_commit(tx); in dsl_sync_task_group_wait() 118 dmu_tx_commit(tx); in dsl_sync_task_group_wait() 131 dsl_sync_task_group_nowait(dsl_sync_task_group_t *dstg, dmu_tx_t *tx) in dsl_sync_task_group_nowait() argument 136 txg = dmu_tx_get_txg(tx); in dsl_sync_task_group_nowait() [all …]
|
| H A D | bpobj.c | 30 bpobj_alloc(objset_t *os, int blocksize, dmu_tx_t *tx) in bpobj_alloc() argument 42 DMU_OT_BPOBJ_HDR, size, tx)); in bpobj_alloc() 46 bpobj_free(objset_t *os, uint64_t obj, dmu_tx_t *tx) in bpobj_free() argument 82 bpobj_free(os, objarray[blkoff], tx); in bpobj_free() 88 VERIFY3U(0, ==, dmu_object_free(os, bpo.bpo_phys->bpo_subobjs, tx)); in bpobj_free() 94 VERIFY3U(0, ==, dmu_object_free(os, obj, tx)); in bpobj_free() 148 bpobj_iterate_impl(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx, in bpobj_iterate_impl() argument 160 dmu_buf_will_dirty(bpo->bpo_dbuf, tx); in bpobj_iterate_impl() 184 err = func(arg, bp, tx); in bpobj_iterate_impl() 206 i * sizeof (blkptr_t), -1ULL, tx)); in bpobj_iterate_impl() [all …]
|
| H A D | zap.c | 72 fzap_upgrade(zap_t *zap, dmu_tx_t *tx, zap_flags_t flags) in fzap_upgrade() argument 115 dmu_buf_will_dirty(db, tx); in fzap_upgrade() 128 zap_tryupgradedir(zap_t *zap, dmu_tx_t *tx) in zap_tryupgradedir() argument 133 dmu_buf_will_dirty(zap->zap_dbuf, tx); in zap_tryupgradedir() 146 dmu_tx_t *tx) in zap_table_grow() argument 182 dmu_buf_will_dirty(db_new, tx); in zap_table_grow() 189 dmu_buf_will_dirty(db_new, tx); in zap_table_grow() 203 tbl->zt_blk << bs, tbl->zt_numblks << bs, tx); in zap_table_grow() 220 dmu_tx_t *tx) in zap_table_store() argument 239 dmu_buf_will_dirty(db, tx); in zap_table_store() [all …]
|
| H A D | zfs_vnops.c | 594 dmu_tx_t *tx; in zfs_write() local 773 tx = dmu_tx_create(zfsvfs->z_os); in zfs_write() 774 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); in zfs_write() 775 dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz)); in zfs_write() 776 zfs_sa_upgrade_txholds(tx, zp); in zfs_write() 777 error = dmu_tx_assign(tx, TXG_NOWAIT); in zfs_write() 780 dmu_tx_wait(tx); in zfs_write() 781 dmu_tx_abort(tx); in zfs_write() 784 dmu_tx_abort(tx); in zfs_write() 805 zfs_grow_blocksize(zp, new_blksz, tx); in zfs_write() [all …]
|
| H A D | zvol.c | 310 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) in zvol_create_cb() argument 333 DMU_OT_NONE, 0, tx); in zvol_create_cb() 337 DMU_OT_NONE, 0, tx); in zvol_create_cb() 340 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx); in zvol_create_cb() 354 dmu_tx_t *tx; in zvol_replay_write() local 372 tx = dmu_tx_create(os); in zvol_replay_write() 373 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length); in zvol_replay_write() 374 error = dmu_tx_assign(tx, TXG_WAIT); in zvol_replay_write() 376 dmu_tx_abort(tx); in zvol_replay_write() 378 dmu_write(os, ZVOL_OBJ, offset, length, data, tx); in zvol_replay_write() [all …]
|
| /onnv-gate/usr/src/uts/common/fs/zfs/sys/ |
| H A D | dmu_tx.h | 111 int dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how); 112 void dmu_tx_commit(dmu_tx_t *tx); 113 void dmu_tx_abort(dmu_tx_t *tx); 114 uint64_t dmu_tx_get_txg(dmu_tx_t *tx); 115 void dmu_tx_wait(dmu_tx_t *tx); 117 void dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *dcb_func, 130 int dmu_tx_is_syncing(dmu_tx_t *tx); 131 int dmu_tx_private_ok(dmu_tx_t *tx); 132 void dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object); 133 void dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta); [all …]
|
| H A D | zap.h | 129 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx); 131 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx); 134 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx); 141 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx); 144 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx); 156 int zap_destroy(objset_t *ds, uint64_t zapobj, dmu_tx_t *tx); 214 const void *val, dmu_tx_t *tx); 217 const void *val, dmu_tx_t *tx); 228 int integer_size, uint64_t num_integers, const void *val, dmu_tx_t *tx); 231 int integer_size, uint64_t num_integers, const void *val, dmu_tx_t *tx); [all …]
|
| H A D | dnode.h | 258 void dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx); 259 void dnode_setbonus_type(dnode_t *dn, dmu_object_type_t, dmu_tx_t *tx); 260 void dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx); 268 void dnode_setdirty(dnode_t *dn, dmu_tx_t *tx); 269 void dnode_sync(dnode_t *dn, dmu_tx_t *tx); 271 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx); 273 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx); 274 void dnode_free(dnode_t *dn, dmu_tx_t *tx); 278 int dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx); 280 void dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx); [all …]
|
| H A D | dmu.h | 190 void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg); 249 int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx); 251 int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx); 271 int dmu_object_free(objset_t *os, uint64_t object, dmu_tx_t *tx); 299 int ibs, dmu_tx_t *tx); 306 dmu_tx_t *tx); 313 dmu_tx_t *tx); 428 void dmu_buf_will_dirty(dmu_buf_t *db, dmu_tx_t *tx); 457 void dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len); 458 void dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, [all …]
|
| /onnv-gate/usr/src/cmd/refer/ |
| H A D | refer6.c | 33 char *s, *tx; in putref() local 48 tx = s+3; in putref() 52 tx = s+2; in putref() 57 tx = s; in putref() 63 tx = caps(tx, buf1); in putref() 65 fprintf(stderr, " s %o tx %o %s\n", s, tx, tx); in putref() 70 fprintf(fo, "%s%c", tx, sep); in putref() 73 lastype, tx, sep); in putref() 76 last(tx)) != 0); in putref() 78 lauth = last(tx) == '.'; in putref() [all …]
|
| H A D | tick.c | 32 struct tbuffer tx; in tick() local 34 times(&tx); in tick() 36 user = tx.proc_user_time; in tick() 37 systm = tx.proc_system_time; in tick() 44 struct tbuffer tx; in tock() local 49 times(&tx); in tock() 52 use = (tx.proc_user_time - user)/60.; in tock() 53 sys = (tx.proc_system_time - systm)/60.; in tock()
|
| /onnv-gate/usr/src/uts/common/io/nxge/ |
| H A D | nxge_hv.c | 53 nxhv_dc_fp_t *tx; in nxge_hio_hv_init() local 69 tx = &nhd->hio.tx; in nxge_hio_hv_init() 71 tx->assign = &hv_niu_tx_dma_assign; in nxge_hio_hv_init() 72 tx->unassign = &hv_niu_tx_dma_unassign; in nxge_hio_hv_init() 73 tx->get_map = &hv_niu_vr_get_txmap; in nxge_hio_hv_init() 76 tx->lp_conf = &hv_niu_tx_logical_page_conf; in nxge_hio_hv_init() 77 tx->lp_info = &hv_niu_tx_logical_page_info; in nxge_hio_hv_init() 79 tx->lp_cfgh_conf = &hv_niu_cfgh_tx_logical_page_conf; in nxge_hio_hv_init() 80 tx->lp_cfgh_info = &hv_niu_cfgh_tx_logical_page_info; in nxge_hio_hv_init() 82 tx->getinfo = &hv_niu_vrtx_getinfo; in nxge_hio_hv_init()
|
| /onnv-gate/usr/src/common/openssl/crypto/rc4/asm/ |
| H A D | rc4-586.pl | 33 $tx="ecx"; 70 &add( &LB($y), &LB($tx)); 74 &add( $ty, $tx); 75 &mov( &DWP(0,$d,$y,4),$tx); 78 &mov( $tx, &DWP(0,$d,$x,4)) if $p < 1; # NEXT ROUND 145 &mov( $tx, &DWP(0,$d,$x,4)); 169 &mov( $tx, &swtmp(0)); 171 &xor( $tx, $ty); 173 &mov( &DWP(-8,$out,"",0), $tx); 174 &mov( $tx, &swtmp(1)); [all …]
|
| /onnv-gate/usr/src/cmd/rcap/common/ |
| H A D | rcapd_conf.c | 169 scf_transaction_t *tx = NULL; in modify_config() local 179 if ((tx = scf_transaction_setup(simple_h)) == NULL) { in modify_config() 186 if (scf_set_count_property(tx, PRESSURE, in modify_config() 194 if (scf_set_count_property(tx, RECONFIG_INT, in modify_config() 201 if (scf_set_count_property(tx, RSS_SAMPLE_INT, in modify_config() 208 if (scf_set_count_property(tx, REPORT_INT, in modify_config() 215 if (scf_set_count_property(tx, WALK_INT, in modify_config() 222 if ((rval = scf_transaction_commit(tx)) == -1) in modify_config() 226 if (scf_transaction_restart(simple_h, tx) in modify_config() 238 if (tx != NULL) { in modify_config() [all …]
|
| /onnv-gate/usr/src/uts/common/io/myri10ge/drv/ |
| H A D | myri10ge.c | 769 myri10ge_tx_ring_t *tx = &ss->tx; in myri10ge_add_tx_handle() local 788 mutex_enter(&tx->handle_lock); in myri10ge_add_tx_handle() 790 handle->next = tx->free_tx_handles; in myri10ge_add_tx_handle() 791 tx->free_tx_handles = handle; in myri10ge_add_tx_handle() 792 mutex_exit(&tx->handle_lock); in myri10ge_add_tx_handle() 799 myri10ge_tx_ring_t *tx = &ss->tx; in myri10ge_remove_tx_handles() local 801 mutex_enter(&tx->handle_lock); in myri10ge_remove_tx_handles() 803 handle = tx->free_tx_handles; in myri10ge_remove_tx_handles() 805 tx->free_tx_handles = handle->next; in myri10ge_remove_tx_handles() 808 handle = tx->free_tx_handles; in myri10ge_remove_tx_handles() [all …]
|