Lines Matching defs:rwa
1405 do_corrective_recv(struct receive_writer_arg *rwa, struct drr_write *drrw,
1417 if (rwa->raw)
1420 err = dnode_hold(rwa->os, drrw->drr_object, FTAG, &dn);
1423 SET_BOOKMARK(&zb, dmu_objset_id(rwa->os), drrw->drr_object, 0,
1427 if (!rwa->raw && DRR_WRITE_COMPRESSED(drrw)) {
1444 if (!rwa->raw && BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
1450 rwa->os->os_complevel);
1464 if (!rwa->raw && BP_USES_CRYPT(bp)) {
1471 dsl_pool_t *dp = dmu_objset_pool(rwa->os);
1478 err = dsl_dataset_hold_flags(dp, rwa->tofs,
1487 err = spa_keystore_lookup_key(rwa->os->os_spa,
1501 spa_keystore_dsl_key_rele(rwa->os->os_spa, dck, FTAG);
1522 io = zio_rewrite(NULL, rwa->os->os_spa, BP_GET_LOGICAL_BIRTH(bp), bp,
1543 cb_data->spa = rwa->os->os_spa;
1547 err = zio_wait(zio_read(rwa->heal_pio, rwa->os->os_spa, bp,
1608 save_resume_state(struct receive_writer_arg *rwa,
1613 if (!rwa->resumable)
1620 ASSERT(rwa->bytes_read != 0);
1633 ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]);
1634 ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] ||
1635 offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]);
1636 ASSERT3U(rwa->bytes_read, >=,
1637 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]);
1639 rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object;
1640 rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset;
1641 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read;
1673 receive_handle_existing_object(const struct receive_writer_arg *rwa,
1690 if (rwa->raw && nblkptr != drro->drr_nblkptr)
1728 if (rwa->raw) {
1739 } else if (rwa->full) {
1778 err = receive_object_is_same_generation(rwa->os,
1823 if (rwa->raw) {
1831 err = dmu_free_long_range(rwa->os, drro->drr_object,
1848 if ((rwa->raw && ((doi->doi_indirection > 1 &&
1852 err = dmu_free_long_object(rwa->os, drro->drr_object);
1856 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
1873 if (rwa->raw && *object_to_hold != DMU_NEW_OBJECT && !do_free_range) {
1874 err = dmu_free_long_range(rwa->os, drro->drr_object,
1884 receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
1901 drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
1903 DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) ||
1905 (spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) {
1909 if (rwa->raw) {
1912 * containing this block and stored it in rwa.
1914 if (drro->drr_object < rwa->or_firstobj ||
1915 drro->drr_object >= rwa->or_firstobj + rwa->or_numslots ||
1929 (!rwa->spill && DRR_OBJECT_HAS_SPILL(drro->drr_flags))) {
1939 err = dmu_object_info(rwa->os, drro->drr_object, &doi);
1944 if (drro->drr_object > rwa->max_object)
1945 rwa->max_object = drro->drr_object;
1956 err = receive_handle_existing_object(rwa, drro, &doi, data,
1968 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
1970 if (dmu_object_info(rwa->os, drro->drr_object, NULL) != ENOENT)
1982 if (rwa->or_need_sync == ORNS_YES)
1983 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
1990 rwa->or_need_sync = ORNS_NO;
2006 err = dmu_object_info(rwa->os, slot, &slot_doi);
2012 err = dmu_free_long_object(rwa->os, slot);
2020 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
2023 tx = dmu_tx_create(rwa->os);
2034 err = dmu_object_claim_dnsize(rwa->os, drro->drr_object,
2043 err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object,
2046 dn_slots << DNODE_SHIFT, rwa->spill ?
2048 } else if (rwa->spill && !DRR_OBJECT_HAS_SPILL(drro->drr_flags)) {
2054 err = dmu_object_rm_spill(rwa->os, drro->drr_object, tx);
2062 if (rwa->or_crypt_params_present) {
2079 uint64_t offset = rwa->or_firstobj * DNODE_MIN_SIZE;
2081 err = dmu_buf_hold_by_dnode(DMU_META_DNODE(rwa->os),
2088 dmu_buf_set_crypt_params(db, rwa->or_byteorder,
2089 rwa->or_salt, rwa->or_iv, rwa->or_mac, tx);
2093 rwa->or_crypt_params_present = B_FALSE;
2096 dmu_object_set_checksum(rwa->os, drro->drr_object,
2098 dmu_object_set_compress(rwa->os, drro->drr_object,
2102 if (rwa->raw) {
2111 VERIFY0(dmu_object_set_blocksize(rwa->os, drro->drr_object,
2113 VERIFY0(dmu_object_set_nlevels(rwa->os, drro->drr_object,
2120 VERIFY0(dmu_object_set_maxblkid(rwa->os, drro->drr_object,
2129 if (rwa->raw)
2132 VERIFY0(dnode_hold(rwa->os, drro->drr_object, FTAG, &dn));
2144 if (rwa->byteswap && !rwa->raw) {
2161 save_resume_state(rwa, drro->drr_object, 0, tx);
2169 receive_freeobjects(struct receive_writer_arg *rwa,
2181 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) {
2185 err = dmu_object_info(rwa->os, obj, &doi);
2191 err = dmu_free_long_object(rwa->os, obj);
2196 if (rwa->or_need_sync == ORNS_MAYBE)
2197 rwa->or_need_sync = ORNS_YES;
2206 * rwa->write_batch list.
2209 flush_write_batch_impl(struct receive_writer_arg *rwa)
2214 if (dnode_hold(rwa->os, rwa->last_object, FTAG, &dn) != 0)
2217 struct receive_record_arg *last_rrd = list_tail(&rwa->write_batch);
2220 struct receive_record_arg *first_rrd = list_head(&rwa->write_batch);
2223 ASSERT3U(rwa->last_object, ==, last_drrw->drr_object);
2224 ASSERT3U(rwa->last_offset, ==, last_drrw->drr_offset);
2226 dmu_tx_t *tx = dmu_tx_create(rwa->os);
2238 while ((rrd = list_head(&rwa->write_batch)) != NULL) {
2242 ASSERT3U(drrw->drr_object, ==, rwa->last_object);
2282 dmu_write_policy(rwa->os, dn, 0, 0, &zp);
2286 if (rwa->raw) {
2291 rwa->byteswap;
2310 } else if (rwa->byteswap) {
2350 save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx);
2352 list_remove(&rwa->write_batch, rrd);
2362 flush_write_batch(struct receive_writer_arg *rwa)
2364 if (list_is_empty(&rwa->write_batch))
2366 int err = rwa->err;
2368 err = flush_write_batch_impl(rwa);
2371 while ((rrd = list_remove_head(&rwa->write_batch)) != NULL) {
2376 ASSERT(list_is_empty(&rwa->write_batch));
2381 receive_process_write_record(struct receive_writer_arg *rwa,
2393 if (rwa->heal) {
2398 if (rwa->raw)
2401 if (rwa->byteswap) {
2408 err = dmu_buf_hold_noread(rwa->os, drrw->drr_object,
2435 err = do_corrective_recv(rwa, drrw, rrd, bp);
2444 if (drrw->drr_object < rwa->last_object ||
2445 (drrw->drr_object == rwa->last_object &&
2446 drrw->drr_offset < rwa->last_offset)) {
2450 struct receive_record_arg *first_rrd = list_head(&rwa->write_batch);
2457 err = flush_write_batch(rwa);
2462 rwa->last_object = drrw->drr_object;
2463 rwa->last_offset = drrw->drr_offset;
2465 if (rwa->last_object > rwa->max_object)
2466 rwa->max_object = rwa->last_object;
2468 list_insert_tail(&rwa->write_batch, rrd);
2477 receive_write_embedded(struct receive_writer_arg *rwa,
2493 if (rwa->raw)
2496 if (drrwe->drr_object > rwa->max_object)
2497 rwa->max_object = drrwe->drr_object;
2499 tx = dmu_tx_create(rwa->os);
2509 dmu_write_embedded(rwa->os, drrwe->drr_object,
2512 rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx);
2515 save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx);
2521 receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
2528 drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
2537 if (rwa->spill && DRR_SPILL_IS_UNMODIFIED(drrs->drr_flags)) {
2542 if (rwa->raw) {
2549 if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
2552 if (drrs->drr_object > rwa->max_object)
2553 rwa->max_object = drrs->drr_object;
2555 VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
2562 dmu_tx_t *tx = dmu_tx_create(rwa->os);
2586 if (rwa->raw) {
2589 rwa->byteswap;
2591 abuf = arc_loan_raw_buf(dmu_objset_spa(rwa->os),
2597 abuf = arc_loan_buf(dmu_objset_spa(rwa->os),
2600 if (rwa->byteswap) {
2620 receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
2628 if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0)
2631 if (drrf->drr_object > rwa->max_object)
2632 rwa->max_object = drrf->drr_object;
2634 err = dmu_free_long_range(rwa->os, drrf->drr_object,
2641 receive_object_range(struct receive_writer_arg *rwa,
2647 * the send stream is byteswapped (rwa->byteswap). Finally,
2651 boolean_t byteorder = ZFS_HOST_BYTEORDER ^ rwa->byteswap ^
2667 !rwa->raw)
2670 if (drror->drr_firstobj > rwa->max_object)
2671 rwa->max_object = drror->drr_firstobj;
2678 rwa->or_crypt_params_present = B_TRUE;
2679 rwa->or_firstobj = drror->drr_firstobj;
2680 rwa->or_numslots = drror->drr_numslots;
2681 memcpy(rwa->or_salt, drror->drr_salt, ZIO_DATA_SALT_LEN);
2682 memcpy(rwa->or_iv, drror->drr_iv, ZIO_DATA_IV_LEN);
2683 memcpy(rwa->or_mac, drror->drr_mac, ZIO_DATA_MAC_LEN);
2684 rwa->or_byteorder = byteorder;
2686 rwa->or_need_sync = ORNS_MAYBE;
2696 receive_redact(struct receive_writer_arg *rwa, struct drr_redact *drrr)
2703 return (receive_free(rwa, &drrf));
3087 receive_process_record(struct receive_writer_arg *rwa,
3093 ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read);
3094 rwa->bytes_read = rrd->bytes_read;
3097 if (rwa->heal && rrd->header.drr_type != DRR_WRITE) {
3108 if (!rwa->heal && rrd->header.drr_type != DRR_WRITE) {
3109 err = flush_write_batch(rwa);
3128 err = receive_object(rwa, drro, rrd->payload);
3137 err = receive_freeobjects(rwa, drrfo);
3142 err = receive_process_write_record(rwa, rrd);
3143 if (rwa->heal) {
3166 err = receive_write_embedded(rwa, drrwe, rrd->payload);
3174 err = receive_free(rwa, drrf);
3180 err = receive_spill(rwa, drrs, rrd->abd);
3191 err = receive_object_range(rwa, drror);
3197 err = receive_redact(rwa, drrr);
3217 struct receive_writer_arg *rwa = arg;
3221 for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker;
3222 rrd = bqueue_dequeue(&rwa->q)) {
3229 if (rwa->err == 0) {
3230 err = receive_process_record(rwa, rrd);
3245 if (err != EAGAIN || rwa->heal) {
3246 if (rwa->err == 0)
3247 rwa->err = err;
3253 if (rwa->heal) {
3254 zio_wait(rwa->heal_pio);
3256 int err = flush_write_batch(rwa);
3257 if (rwa->err == 0)
3258 rwa->err = err;
3260 mutex_enter(&rwa->mutex);
3261 rwa->done = B_TRUE;
3262 cv_signal(&rwa->cv);
3263 mutex_exit(&rwa->mutex);
3310 struct receive_writer_arg *rwa = kmem_zalloc(sizeof (*rwa), KM_SLEEP);
3390 (void) bqueue_init(&rwa->q, zfs_recv_queue_ff,
3393 cv_init(&rwa->cv, NULL, CV_DEFAULT, NULL);
3394 mutex_init(&rwa->mutex, NULL, MUTEX_DEFAULT, NULL);
3395 rwa->os = drc->drc_os;
3396 rwa->byteswap = drc->drc_byteswap;
3397 rwa->heal = drc->drc_heal;
3398 rwa->tofs = drc->drc_tofs;
3399 rwa->resumable = drc->drc_resumable;
3400 rwa->raw = drc->drc_raw;
3401 rwa->spill = drc->drc_spill;
3402 rwa->full = (drc->drc_drr_begin->drr_u.drr_begin.drr_fromguid == 0);
3403 rwa->os->os_raw_receive = drc->drc_raw;
3405 rwa->heal_pio = zio_root(drc->drc_os->os_spa, NULL, NULL,
3408 list_create(&rwa->write_batch, sizeof (struct receive_record_arg),
3411 (void) thread_create(NULL, 0, receive_writer_thread, rwa, 0, curproc,
3414 * We're reading rwa->err without locks, which is safe since we are the
3420 * We can leave this loop in 3 ways: First, if rwa->err is
3428 while (rwa->err == 0) {
3446 bqueue_enqueue(&rwa->q, drc->drc_rrd,
3455 bqueue_enqueue_flush(&rwa->q, drc->drc_rrd, 1);
3457 mutex_enter(&rwa->mutex);
3458 while (!rwa->done) {
3463 (void) cv_wait_sig(&rwa->cv, &rwa->mutex);
3465 mutex_exit(&rwa->mutex);
3473 uint64_t obj = rwa->max_object + 1;
3478 free_err = dmu_free_long_object(rwa->os, obj);
3482 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0);
3493 cv_destroy(&rwa->cv);
3494 mutex_destroy(&rwa->mutex);
3495 bqueue_destroy(&rwa->q);
3496 list_destroy(&rwa->write_batch);
3498 err = rwa->err;
3515 kmem_free(rwa, sizeof (*rwa));