Lines Matching defs:itx

51  * calls that change the file system. Each itx has enough information to
147 static void zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx);
928 * any pending itx records that have not been cleaned by zil_clean().
1312 * When an itx is "skipped", this function is used to properly mark the
1313 * waiter as "done, and signal any thread(s) waiting on it. An itx can
1315 * one of them being that the itx was committed via spa_sync(), prior to
1331 * lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb.
1332 * At this point, the waiter will no longer be referenced by the itx,
1458 itx_t *itx;
1483 while ((itx = list_remove_head(&lwb->lwb_itxs)) != NULL)
1484 zil_itx_destroy(itx);
1907 for (itx_t *itx = list_head(&lwb->lwb_itxs); itx;
1908 itx = list_next(&lwb->lwb_itxs, itx))
1909 zil_lwb_commit(zilog, lwb, itx);
2101 zil_itx_record_size(itx_t *itx)
2103 lr_t *lr = &itx->itx_lr;
2112 zil_itx_data_size(itx_t *itx)
2114 lr_t *lr = &itx->itx_lr;
2117 if (lr->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) {
2126 zil_itx_full_size(itx_t *itx)
2128 lr_t *lr = &itx->itx_lr;
2133 return (lr->lrc_reclen + zil_itx_data_size(itx));
2137 * Estimate space needed in the lwb for the itx. Allocate more lwbs or
2138 * split the itx as needed, but don't touch the actual transaction data.
2143 zil_lwb_assign(zilog_t *zilog, lwb_t *lwb, itx_t *itx, list_t *ilwbs)
2156 lr = &itx->itx_lr;
2160 * A commit itx doesn't represent any on-disk state; instead
2164 * completion of that lwb). Thus, we don't process this itx's
2165 * log record if it's a commit itx (these itx's don't have log
2166 * records), and instead link the itx's waiter onto the lwb's
2172 zil_commit_waiter_link_lwb(itx->itx_private, lwb);
2173 list_insert_tail(&lwb->lwb_itxs, itx);
2180 dlen = zil_itx_data_size(itx);
2214 ASSERT3U(itx->itx_wr_state, ==, WR_NEED_COPY);
2215 citx = zil_itx_clone(itx);
2223 citx = itx;
2230 * equal to the itx sequence number because not all transactions
2258 zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx)
2265 lr = &itx->itx_lr;
2272 dlen = zil_itx_data_size(itx);
2286 if (itx->itx_wr_state == WR_COPIED) {
2294 if (itx->itx_wr_state == WR_NEED_COPY) {
2301 ASSERT3S(itx->itx_wr_state, ==, WR_INDIRECT);
2321 error = zilog->zl_get_data(itx->itx_private,
2322 itx->itx_gen, lrwb, dbuf, lwb,
2378 itx_t *itx;
2385 itx = zio_data_buf_alloc(itxsize);
2386 itx->itx_lr.lrc_txtype = txtype;
2387 itx->itx_lr.lrc_reclen = lrsize;
2388 itx->itx_lr.lrc_seq = 0; /* defensive */
2389 memset((char *)&itx->itx_lr + olrsize, 0, lrsize - olrsize);
2390 itx->itx_sync = B_TRUE; /* default is synchronous */
2391 itx->itx_callback = NULL;
2392 itx->itx_callback_data = NULL;
2393 itx->itx_size = itxsize;
2395 return (itx);
2405 itx_t *itx = zio_data_buf_alloc(oitx->itx_size);
2406 memcpy(itx, oitx, oitx->itx_size);
2407 itx->itx_callback = NULL;
2408 itx->itx_callback_data = NULL;
2409 return (itx);
2413 zil_itx_destroy(itx_t *itx)
2415 ASSERT3U(itx->itx_size, >=, sizeof (itx_t));
2416 ASSERT3U(itx->itx_lr.lrc_reclen, ==,
2417 itx->itx_size - offsetof(itx_t, itx_lr));
2418 IMPLY(itx->itx_lr.lrc_txtype == TX_COMMIT, itx->itx_callback == NULL);
2419 IMPLY(itx->itx_callback != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
2421 if (itx->itx_callback != NULL)
2422 itx->itx_callback(itx->itx_callback_data);
2424 zio_data_buf_free(itx, itx->itx_size);
2434 itx_t *itx;
2442 while ((itx = list_remove_head(list)) != NULL) {
2451 * commit itx to a per-txg i_sync_list
2462 if (itx->itx_lr.lrc_txtype == TX_COMMIT)
2463 zil_commit_waiter_skip(itx->itx_private);
2465 zil_itx_destroy(itx);
2472 while ((itx = list_remove_head(list)) != NULL) {
2474 ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
2475 zil_itx_destroy(itx);
2495 * Remove all async itx with the given oid.
2505 itx_t *itx;
2534 while ((itx = list_remove_head(&clean_list)) != NULL) {
2536 ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
2537 zil_itx_destroy(itx);
2543 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
2552 if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME)
2553 zil_async_to_sync(zilog, itx->itx_oid);
2570 zfs_dbgmsg("zil_itx_assign: missed itx cleanup for "
2584 if (itx->itx_sync) {
2585 list_insert_tail(&itxs->i_sync_list, itx);
2589 LR_FOID_GET_OBJ(((lr_ooo_t *)&itx->itx_lr)->lr_foid);
2602 list_insert_tail(&ian->ia_list, itx);
2605 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
2693 * If we're adding itx records to the zl_itx_commit_list,
2703 itx_t *itx = NULL;
2713 itx = list_head(sync_list);
2719 while (itx != NULL) {
2720 uint64_t s = zil_itx_full_size(itx);
2723 s = zil_itx_record_size(itx);
2725 itx = list_next(commit_list, itx);
2790 * commit list (it won't prune past the first non-commit itx), and
2800 itx_t *itx;
2804 while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) {
2805 lr_t *lrc = &itx->itx_lr;
2817 * never any itx's for it to wait on), so it's
2820 zil_commit_waiter_skip(itx->itx_private);
2822 zil_commit_waiter_link_lwb(itx->itx_private, last_lwb);
2827 list_remove(&zilog->zl_itx_commit_list, itx);
2828 zil_itx_destroy(itx);
2831 IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
2896 itx_t *itx;
2940 while ((itx = list_remove_head(&zilog->zl_itx_commit_list)) != NULL) {
2941 lr_t *lrc = &itx->itx_lr;
2948 zilog_t *, zilog, itx_t *, itx);
2951 zilog_t *, zilog, itx_t *, itx);
2958 * If the txg of this itx has already been synced out, then
2959 * we don't need to commit this itx to an lwb. This is
2960 * because the data of this itx will have already been
2962 * it's still ok to commit an itx whose txg has already
2967 * to an lwb, regardless of whether or not that itx's txg
2972 * As a counter-example, if we skipped TX_COMMIT itx's
2977 * 1. We commit a non-TX_COMMIT itx to an lwb, where the
2978 * itx's txg is 10 and the last synced txg is 9.
2980 * 3. We move to the next itx in the list, it's a TX_COMMIT
2984 * If the itx that is skipped in (3) is the last TX_COMMIT
2985 * itx in the commit list, than it's possible for the lwb
2990 * and eventually DONE, we always commit TX_COMMIT itx's to
2991 * an lwb here, even if that itx's txg has already been
2995 * itx. The point of freezing the pool is to prevent data
3003 lwb = zil_lwb_assign(zilog, lwb, itx, ilwbs);
3005 list_insert_tail(&nolwb_itxs, itx);
3010 * itx list to somebody else who care.
3014 zil_itx_full_size(itx);
3020 itx->itx_private, &nolwb_waiters);
3022 list_insert_tail(&nolwb_itxs, itx);
3024 zilog->zl_cur_left -= zil_itx_full_size(itx);
3027 zilog->zl_cur_left -= zil_itx_full_size(itx);
3028 zil_itx_destroy(itx);
3054 * And finally, we have to destroy the itx's that
3056 * the itx's callback if one exists for the itx.
3058 while ((itx = list_remove_head(&nolwb_itxs)) != NULL)
3059 zil_itx_destroy(itx);
3107 * of each individual itx.
3124 * (and associated commit itx) is committed to an lwb. If the waiter is
3127 * commit itx will found in the queue just like the other non-commit
3323 * significantly impacting the latency of each individual itx.
3442 * This function is used to create a TX_COMMIT itx and assign it. This
3460 itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t));
3461 itx->itx_sync = B_TRUE;
3462 itx->itx_private = zcw;
3464 zil_itx_assign(zilog, itx, tx);
3476 * been flushed), each itx that was committed to that lwb is also
3479 * When an itx is committed to an lwb, the log record (lr_t) contained
3480 * by the itx is copied into the lwb's zio buffer, and once this buffer
3497 * simply trying to waiting for a specific itx to be committed to disk,
3500 * to create and assign an itx, and then pass a reference to this itx to
3502 * specific itx was committed to disk (instead of waiting for _all_
3505 * When a thread calls zil_commit() a special "commit itx" will be
3506 * generated, along with a corresponding "waiter" for this commit itx.
3510 * This commit itx is inserted into the queue of uncommitted itxs. This
3515 * The commit itx is special; it doesn't have any on-disk representation.
3516 * When a commit itx is "committed" to an lwb, the waiter associated
3541 * itxs to commit is traversed from head to tail, and each itx is
3565 * Thus, we *must not* deem an itx as being committed to stable
3594 * 2. By design, when zil_commit() is called, a commit itx will
3654 * linked to the commit itx using the itx's "itx_private" field.
3655 * Since the commit itx doesn't represent any on-disk state,
3657 * lr_t into the lwb's buffer, the commit itx's "waiter" will be
3662 * We must create the waiter and assign the commit itx prior to
3663 * calling zil_commit_writer(), or else our specific commit itx
3920 * It's possible for an itx to be generated that doesn't dirty