Lines Matching defs:drc

284 	dmu_recv_cookie_t *drc = drba->drba_cookie;
285 struct drr_begin *drrb = drc->drc_drrb;
301 if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
321 err = nvlist_lookup_uint64_array(drc->drc_begin_nvl,
843 dmu_recv_cookie_t *drc = drba->drba_cookie;
844 struct drr_begin *drrb = drc->drc_drrb;
845 const char *tofs = drc->drc_tofs;
846 uint64_t featureflags = drc->drc_featureflags;
886 if (drc->drc_heal) {
888 VERIFY0(dsl_dataset_snap_lookup(ds, drc->drc_tosnap,
917 drc->drc_newfs = B_TRUE;
934 if (drc->drc_resumable) {
971 if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
994 VERIFY0(nvlist_lookup_uint64_array(drc->drc_begin_nvl,
1040 !drc->drc_heal) {
1056 dmu_recv_cookie_t *drc = drba->drba_cookie;
1058 struct drr_begin *drrb = drc->drc_drrb;
1062 const char *tofs = drc->drc_tofs;
1066 ASSERT(drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING);
1077 error = recv_begin_check_feature_flags_impl(drc->drc_featureflags,
1088 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) {
1109 if (recvexist && drrb->drr_fromguid == 0 && !drc->drc_force) {
1164 drc->drc_fromsnapobj = ds->ds_prev->ds_object;
1171 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_REDACTED) {
1178 if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
1201 error = recv_check_large_blocks(ds, drc->drc_featureflags);
1261 const char *origin, dmu_recv_cookie_t *drc, zfs_file_t *fp,
1267 memset(drc, 0, sizeof (dmu_recv_cookie_t));
1268 drc->drc_drr_begin = drr_begin;
1269 drc->drc_drrb = &drr_begin->drr_u.drr_begin;
1270 drc->drc_tosnap = tosnap;
1271 drc->drc_tofs = tofs;
1272 drc->drc_force = force;
1273 drc->drc_heal = heal;
1274 drc->drc_resumable = resumable;
1275 drc->drc_cred = CRED();
1276 drc->drc_proc = curproc;
1277 drc->drc_clone = (origin != NULL);
1279 if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
1280 drc->drc_byteswap = B_TRUE;
1282 sizeof (dmu_replay_record_t), &drc->drc_cksum);
1284 } else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) {
1286 sizeof (dmu_replay_record_t), &drc->drc_cksum);
1291 drc->drc_fp = fp;
1292 drc->drc_voff = *voffp;
1293 drc->drc_featureflags =
1294 DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
1296 uint32_t payloadlen = drc->drc_drr_begin->drr_payloadlen;
1318 err = receive_read_payload_and_next_header(drc, payloadlen,
1324 err = nvlist_unpack(payload, payloadlen, &drc->drc_begin_nvl,
1328 kmem_free(drc->drc_next_rrd,
1329 sizeof (*drc->drc_next_rrd));
1334 if (drc->drc_drrb->drr_flags & DRR_FLAG_SPILL_BLOCK)
1335 drc->drc_spill = B_TRUE;
1338 drba.drba_cookie = drc;
1342 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING) {
1357 if ((DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) &
1359 origin == NULL && drc->drc_drrb->drr_fromguid == 0) {
1373 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
1374 nvlist_free(drc->drc_begin_nvl);
1559 receive_read(dmu_recv_cookie_t *drc, int len, void *buf)
1568 (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) != 0);
1572 zfs_file_t *fp = drc->drc_fp;
1583 drc->drc_voff += len - done - resid;
1589 drc->drc_bytes_read += len;
2708 dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
2710 dsl_dataset_t *ds = drc->drc_ds;
2713 dsflags = (drc->drc_raw) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
2725 if (drc->drc_resumable && drc->drc_should_save &&
2734 if (!drc->drc_heal)
2740 receive_cksum(dmu_recv_cookie_t *drc, int len, void *buf)
2742 if (drc->drc_byteswap) {
2744 &drc->drc_cksum);
2746 (void) fletcher_4_incremental_native(buf, len, &drc->drc_cksum);
2753 * Allocate drc->drc_next_rrd and read the next record's header into
2754 * drc->drc_next_rrd->header.
2758 receive_read_payload_and_next_header(dmu_recv_cookie_t *drc, int len, void *buf)
2764 err = receive_read(drc, len, buf);
2767 receive_cksum(drc, len, buf);
2770 if (drc->drc_rrd != NULL) {
2771 drc->drc_rrd->payload = buf;
2772 drc->drc_rrd->payload_size = len;
2773 drc->drc_rrd->bytes_read = drc->drc_bytes_read;
2779 drc->drc_prev_cksum = drc->drc_cksum;
2781 drc->drc_next_rrd = kmem_zalloc(sizeof (*drc->drc_next_rrd), KM_SLEEP);
2782 err = receive_read(drc, sizeof (drc->drc_next_rrd->header),
2783 &drc->drc_next_rrd->header);
2784 drc->drc_next_rrd->bytes_read = drc->drc_bytes_read;
2787 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
2788 drc->drc_next_rrd = NULL;
2791 if (drc->drc_next_rrd->header.drr_type == DRR_BEGIN) {
2792 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
2793 drc->drc_next_rrd = NULL;
2803 receive_cksum(drc,
2805 &drc->drc_next_rrd->header);
2808 drc->drc_next_rrd->header.drr_u.drr_checksum.drr_checksum;
2810 &drc->drc_next_rrd->header.drr_u.drr_checksum.drr_checksum;
2812 if (drc->drc_byteswap)
2813 byteswap_record(&drc->drc_next_rrd->header);
2816 !ZIO_CHECKSUM_EQUAL(drc->drc_cksum, *cksump)) {
2817 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
2818 drc->drc_next_rrd = NULL;
2822 receive_cksum(drc, sizeof (cksum_orig), &cksum_orig);
2845 receive_read_prefetch(dmu_recv_cookie_t *drc, uint64_t object, uint64_t offset,
2848 if (!objlist_exists(drc->drc_ignore_objlist, object)) {
2849 dmu_prefetch(drc->drc_os, object, 1, offset, length,
2858 receive_read_record(dmu_recv_cookie_t *drc)
2862 switch (drc->drc_rrd->header.drr_type) {
2866 &drc->drc_rrd->header.drr_u.drr_object;
2874 err = receive_read_payload_and_next_header(drc, size, buf);
2879 err = dmu_object_info(drc->drc_os, drro->drr_object, &doi);
2886 objlist_insert(drc->drc_ignore_objlist,
2894 err = receive_read_payload_and_next_header(drc, 0, NULL);
2899 struct drr_write *drrw = &drc->drc_rrd->header.drr_u.drr_write;
2902 err = receive_read_payload_and_next_header(drc, size,
2908 drc->drc_rrd->abd = abd;
2909 receive_read_prefetch(drc, drrw->drr_object, drrw->drr_offset,
2916 &drc->drc_rrd->header.drr_u.drr_write_embedded;
2920 err = receive_read_payload_and_next_header(drc, size, buf);
2926 receive_read_prefetch(drc, drrwe->drr_object, drrwe->drr_offset,
2937 err = receive_read_payload_and_next_header(drc, 0, NULL);
2942 struct drr_end *drre = &drc->drc_rrd->header.drr_u.drr_end;
2943 if (!ZIO_CHECKSUM_EQUAL(drc->drc_prev_cksum,
2950 struct drr_spill *drrs = &drc->drc_rrd->header.drr_u.drr_spill;
2953 err = receive_read_payload_and_next_header(drc, size,
2958 drc->drc_rrd->abd = abd;
2963 err = receive_read_payload_and_next_header(drc, 0, NULL);
3269 resume_check(dmu_recv_cookie_t *drc, nvlist_t *begin_nvl)
3272 objset_t *mos = dmu_objset_pool(drc->drc_os)->dp_meta_objset;
3273 uint64_t dsobj = dmu_objset_id(drc->drc_os);
3307 dmu_recv_stream(dmu_recv_cookie_t *drc, offset_t *voffp)
3312 if (dsl_dataset_has_resume_receive_state(drc->drc_ds)) {
3314 (void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset,
3315 drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES,
3317 drc->drc_bytes_read += bytes;
3320 drc->drc_ignore_objlist = objlist_create();
3323 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
3325 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
3327 ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
3328 ASSERT0(drc->drc_os->os_encrypted &&
3329 (drc->drc_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA));
3332 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) {
3335 ASSERT(drc->drc_os->os_encrypted);
3336 ASSERT(drc->drc_raw);
3338 err = nvlist_lookup_nvlist(drc->drc_begin_nvl, "crypt_keydata",
3343 if (!drc->drc_heal) {
3350 err = dsl_crypto_recv_raw(spa_name(drc->drc_os->os_spa),
3351 drc->drc_ds->ds_object, drc->drc_fromsnapobj,
3352 drc->drc_drrb->drr_type, keynvl, drc->drc_newfs);
3358 drc->drc_ivset_guid = 0;
3360 &drc->drc_ivset_guid);
3362 if (!drc->drc_newfs)
3363 drc->drc_keynvl = fnvlist_dup(keynvl);
3366 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING) {
3367 err = resume_check(drc, drc->drc_begin_nvl);
3377 if (drc->drc_drr_begin->drr_payloadlen == 0) {
3378 err = receive_read_payload_and_next_header(drc, 0, NULL);
3388 drc->drc_should_save = B_TRUE;
3395 rwa->os = drc->drc_os;
3396 rwa->byteswap = drc->drc_byteswap;
3397 rwa->heal = drc->drc_heal;
3398 rwa->tofs = drc->drc_tofs;
3399 rwa->resumable = drc->drc_resumable;
3400 rwa->raw = drc->drc_raw;
3401 rwa->spill = drc->drc_spill;
3402 rwa->full = (drc->drc_drr_begin->drr_u.drr_begin.drr_fromguid == 0);
3403 rwa->os->os_raw_receive = drc->drc_raw;
3404 if (drc->drc_heal) {
3405 rwa->heal_pio = zio_root(drc->drc_os->os_spa, NULL, NULL,
3423 * first loop and drc->drc_rrd was never allocated, or it's later, and
3424 * drc->drc_rrd has been handed off to the writer thread who will free
3426 * stream, then we free drc->drc_rrd and exit.
3434 ASSERT3P(drc->drc_rrd, ==, NULL);
3435 drc->drc_rrd = drc->drc_next_rrd;
3436 drc->drc_next_rrd = NULL;
3437 /* Allocates and loads header into drc->drc_next_rrd */
3438 err = receive_read_record(drc);
3440 if (drc->drc_rrd->header.drr_type == DRR_END || err != 0) {
3441 kmem_free(drc->drc_rrd, sizeof (*drc->drc_rrd));
3442 drc->drc_rrd = NULL;
3446 bqueue_enqueue(&rwa->q, drc->drc_rrd,
3448 drc->drc_rrd->payload_size);
3449 drc->drc_rrd = NULL;
3452 ASSERT3P(drc->drc_rrd, ==, NULL);
3453 drc->drc_rrd = kmem_zalloc(sizeof (*drc->drc_rrd), KM_SLEEP);
3454 drc->drc_rrd->eos_marker = B_TRUE;
3455 bqueue_enqueue_flush(&rwa->q, drc->drc_rrd, 1);
3472 if (drc->drc_clone && drc->drc_drrb->drr_fromguid == 0) {
3506 if (drc->drc_next_rrd != NULL)
3507 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
3513 drc->drc_os = NULL;
3516 nvlist_free(drc->drc_begin_nvl);
3524 dmu_recv_cleanup_ds(drc);
3525 nvlist_free(drc->drc_keynvl);
3528 objlist_destroy(drc->drc_ignore_objlist);
3529 drc->drc_ignore_objlist = NULL;
3530 *voffp = drc->drc_voff;
3537 dmu_recv_cookie_t *drc = arg;
3541 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
3543 if (drc->drc_heal) {
3545 } else if (!drc->drc_newfs) {
3548 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
3551 if (drc->drc_force) {
3562 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
3584 if (drc->drc_keynvl != NULL) {
3585 error = dsl_crypto_recv_raw_key_check(drc->drc_ds,
3586 drc->drc_keynvl, tx);
3593 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
3594 origin_head, drc->drc_force, drc->drc_owner, tx);
3600 drc->drc_tosnap, tx, B_TRUE, 1,
3601 drc->drc_cred, drc->drc_proc);
3606 error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
3608 error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
3609 drc->drc_tosnap, tx, B_TRUE, 1,
3610 drc->drc_cred, drc->drc_proc);
3618 dmu_recv_cookie_t *drc = arg;
3620 boolean_t encrypted = drc->drc_ds->ds_dir->dd_crypto_obj != 0;
3623 spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
3624 tx, "snap=%s", drc->drc_tosnap);
3625 drc->drc_ds->ds_objset->os_raw_receive = B_FALSE;
3627 if (drc->drc_heal) {
3628 if (drc->drc_keynvl != NULL) {
3629 nvlist_free(drc->drc_keynvl);
3630 drc->drc_keynvl = NULL;
3632 } else if (!drc->drc_newfs) {
3635 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
3638 if (drc->drc_force) {
3647 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
3658 if (drc->drc_keynvl != NULL) {
3659 dsl_crypto_recv_raw_key_sync(drc->drc_ds,
3660 drc->drc_keynvl, tx);
3661 nvlist_free(drc->drc_keynvl);
3662 drc->drc_keynvl = NULL;
3665 VERIFY3P(drc->drc_ds->ds_prev, ==,
3668 dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
3674 drc->drc_os = NULL;
3677 drc->drc_tosnap, tx);
3682 drc->drc_drrb->drr_creation_time;
3684 drc->drc_drrb->drr_toguid;
3696 dsl_destroy_head_sync_impl(drc->drc_ds, tx);
3698 if (drc->drc_owner != NULL)
3699 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
3701 dsl_dataset_t *ds = drc->drc_ds;
3703 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
3708 drc->drc_drrb->drr_creation_time;
3710 drc->drc_drrb->drr_toguid;
3733 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
3746 if (!drc->drc_heal && drc->drc_raw && drc->drc_ivset_guid != 0) {
3751 &drc->drc_ivset_guid, tx));
3762 if (!drc->drc_raw && encrypted) {
3764 drc->drc_ds->ds_object, drc->drc_ds);
3766 dsl_dataset_disown(drc->drc_ds, 0, dmu_recv_tag);
3767 drc->drc_ds = NULL;
3773 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
3781 dsl_dataset_name(drc->drc_ds, name);
3785 return (dsl_sync_task(drc->drc_tofs,
3786 dmu_recv_end_check, dmu_recv_end_sync, drc,
3791 dmu_recv_new_end(dmu_recv_cookie_t *drc)
3793 return (dsl_sync_task(drc->drc_tofs,
3794 dmu_recv_end_check, dmu_recv_end_sync, drc,
3799 dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
3803 drc->drc_owner = owner;
3805 if (drc->drc_newfs)
3806 error = dmu_recv_new_end(drc);
3808 error = dmu_recv_existing_end(drc);
3811 dmu_recv_cleanup_ds(drc);
3812 nvlist_free(drc->drc_keynvl);
3813 } else if (!drc->drc_heal) {
3814 if (drc->drc_newfs) {
3815 zvol_create_minor(drc->drc_tofs);
3818 drc->drc_tofs, drc->drc_tosnap);