xref: /onnv-gate/usr/src/uts/common/fs/zfs/zvol.c (revision 7294:c9c31ef4c960)
1789Sahrens /*
2789Sahrens  * CDDL HEADER START
3789Sahrens  *
4789Sahrens  * The contents of this file are subject to the terms of the
51544Seschrock  * Common Development and Distribution License (the "License").
61544Seschrock  * You may not use this file except in compliance with the License.
7789Sahrens  *
8789Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9789Sahrens  * or http://www.opensolaris.org/os/licensing.
10789Sahrens  * See the License for the specific language governing permissions
11789Sahrens  * and limitations under the License.
12789Sahrens  *
13789Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14789Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15789Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16789Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17789Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18789Sahrens  *
19789Sahrens  * CDDL HEADER END
20789Sahrens  */
21789Sahrens /*
226423Sgw25295  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23789Sahrens  * Use is subject to license terms.
24789Sahrens  */
25789Sahrens 
26789Sahrens #pragma ident	"%Z%%M%	%I%	%E% SMI"
27789Sahrens 
28789Sahrens /*
29789Sahrens  * ZFS volume emulation driver.
30789Sahrens  *
31789Sahrens  * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
32789Sahrens  * Volumes are accessed through the symbolic links named:
33789Sahrens  *
34789Sahrens  * /dev/zvol/dsk/<pool_name>/<dataset_name>
35789Sahrens  * /dev/zvol/rdsk/<pool_name>/<dataset_name>
36789Sahrens  *
37789Sahrens  * These links are created by the ZFS-specific devfsadm link generator.
38789Sahrens  * Volumes are persistent through reboot.  No user command needs to be
39789Sahrens  * run before opening and using a device.
40789Sahrens  */
41789Sahrens 
42789Sahrens #include <sys/types.h>
43789Sahrens #include <sys/param.h>
44789Sahrens #include <sys/errno.h>
45789Sahrens #include <sys/uio.h>
46789Sahrens #include <sys/buf.h>
47789Sahrens #include <sys/modctl.h>
48789Sahrens #include <sys/open.h>
49789Sahrens #include <sys/kmem.h>
50789Sahrens #include <sys/conf.h>
51789Sahrens #include <sys/cmn_err.h>
52789Sahrens #include <sys/stat.h>
53789Sahrens #include <sys/zap.h>
54789Sahrens #include <sys/spa.h>
55789Sahrens #include <sys/zio.h>
566423Sgw25295 #include <sys/dmu_traverse.h>
576423Sgw25295 #include <sys/dnode.h>
586423Sgw25295 #include <sys/dsl_dataset.h>
59789Sahrens #include <sys/dsl_prop.h>
60789Sahrens #include <sys/dkio.h>
61789Sahrens #include <sys/efi_partition.h>
62789Sahrens #include <sys/byteorder.h>
63789Sahrens #include <sys/pathname.h>
64789Sahrens #include <sys/ddi.h>
65789Sahrens #include <sys/sunddi.h>
66789Sahrens #include <sys/crc32.h>
67789Sahrens #include <sys/dirent.h>
68789Sahrens #include <sys/policy.h>
69789Sahrens #include <sys/fs/zfs.h>
70789Sahrens #include <sys/zfs_ioctl.h>
71789Sahrens #include <sys/mkdev.h>
721141Sperrin #include <sys/zil.h>
732237Smaybee #include <sys/refcount.h>
743755Sperrin #include <sys/zfs_znode.h>
753755Sperrin #include <sys/zfs_rlock.h>
766423Sgw25295 #include <sys/vdev_disk.h>
776423Sgw25295 #include <sys/vdev_impl.h>
786423Sgw25295 #include <sys/zvol.h>
796423Sgw25295 #include <sys/dumphdr.h>
80789Sahrens 
81789Sahrens #include "zfs_namecheck.h"
82789Sahrens 
836423Sgw25295 static void *zvol_state;
84789Sahrens 
856423Sgw25295 #define	ZVOL_DUMPSIZE		"dumpsize"
86789Sahrens 
87789Sahrens /*
88789Sahrens  * This lock protects the zvol_state structure from being modified
89789Sahrens  * while it's being used, e.g. an open that comes in before a create
90789Sahrens  * finishes.  It also protects temporary opens of the dataset so that,
91789Sahrens  * e.g., an open doesn't get a spurious EBUSY.
92789Sahrens  */
93789Sahrens static kmutex_t zvol_state_lock;
94789Sahrens static uint32_t zvol_minors;
95789Sahrens 
966423Sgw25295 #define	NUM_EXTENTS	((SPA_MAXBLOCKSIZE) / sizeof (zvol_extent_t))
976423Sgw25295 
986423Sgw25295 typedef struct zvol_extent {
996423Sgw25295 	dva_t		ze_dva;		/* dva associated with this extent */
1006423Sgw25295 	uint64_t	ze_stride;	/* extent stride */
1016423Sgw25295 	uint64_t	ze_size;	/* number of blocks in extent */
1026423Sgw25295 } zvol_extent_t;
1036423Sgw25295 
1046423Sgw25295 /*
1056423Sgw25295  * The list of extents associated with the dump device
1066423Sgw25295  */
1076423Sgw25295 typedef struct zvol_ext_list {
1086423Sgw25295 	zvol_extent_t		zl_extents[NUM_EXTENTS];
1096423Sgw25295 	struct zvol_ext_list	*zl_next;
1106423Sgw25295 } zvol_ext_list_t;
1116423Sgw25295 
112789Sahrens /*
113789Sahrens  * The in-core state of each volume.
114789Sahrens  */
115789Sahrens typedef struct zvol_state {
116789Sahrens 	char		zv_name[MAXPATHLEN]; /* pool/dd name */
117789Sahrens 	uint64_t	zv_volsize;	/* amount of space we advertise */
1183063Sperrin 	uint64_t	zv_volblocksize; /* volume block size */
119789Sahrens 	minor_t		zv_minor;	/* minor number */
120789Sahrens 	uint8_t		zv_min_bs;	/* minimum addressable block shift */
1216423Sgw25295 	uint8_t		zv_flags;	/* readonly; dumpified */
122789Sahrens 	objset_t	*zv_objset;	/* objset handle */
123789Sahrens 	uint32_t	zv_mode;	/* DS_MODE_* flags at open time */
124789Sahrens 	uint32_t	zv_open_count[OTYPCNT];	/* open counts */
125789Sahrens 	uint32_t	zv_total_opens;	/* total open count */
1261141Sperrin 	zilog_t		*zv_zilog;	/* ZIL handle */
1276423Sgw25295 	zvol_ext_list_t	*zv_list;	/* List of extents for dump */
1281141Sperrin 	uint64_t	zv_txg_assign;	/* txg to assign during ZIL replay */
1293755Sperrin 	znode_t		zv_znode;	/* for range locking */
130789Sahrens } zvol_state_t;
131789Sahrens 
1323063Sperrin /*
1336423Sgw25295  * zvol specific flags
1346423Sgw25295  */
1356423Sgw25295 #define	ZVOL_RDONLY	0x1
1366423Sgw25295 #define	ZVOL_DUMPIFIED	0x2
1376423Sgw25295 
1386423Sgw25295 /*
1393063Sperrin  * zvol maximum transfer in one DMU tx.
1403063Sperrin  */
1413063Sperrin int zvol_maxphys = DMU_MAX_ACCESS/2;
1423063Sperrin 
1436423Sgw25295 extern int zfs_set_prop_nvlist(const char *, nvlist_t *);
1443638Sbillm static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
1456423Sgw25295 static int zvol_dumpify(zvol_state_t *zv);
1466423Sgw25295 static int zvol_dump_fini(zvol_state_t *zv);
1476423Sgw25295 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
1483063Sperrin 
149789Sahrens static void
1504787Sahrens zvol_size_changed(zvol_state_t *zv, major_t maj)
151789Sahrens {
1524787Sahrens 	dev_t dev = makedevice(maj, zv->zv_minor);
153789Sahrens 
154789Sahrens 	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
155789Sahrens 	    "Size", zv->zv_volsize) == DDI_SUCCESS);
156789Sahrens 	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
157789Sahrens 	    "Nblocks", lbtodb(zv->zv_volsize)) == DDI_SUCCESS);
1586423Sgw25295 
1596423Sgw25295 	/* Notify specfs to invalidate the cached size */
1606423Sgw25295 	spec_size_invalidate(dev, VBLK);
1616423Sgw25295 	spec_size_invalidate(dev, VCHR);
162789Sahrens }
163789Sahrens 
164789Sahrens int
1652676Seschrock zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
166789Sahrens {
1672676Seschrock 	if (volsize == 0)
168789Sahrens 		return (EINVAL);
169789Sahrens 
1702676Seschrock 	if (volsize % blocksize != 0)
1711133Seschrock 		return (EINVAL);
1721133Seschrock 
173789Sahrens #ifdef _ILP32
1742676Seschrock 	if (volsize - 1 > SPEC_MAXOFFSET_T)
175789Sahrens 		return (EOVERFLOW);
176789Sahrens #endif
177789Sahrens 	return (0);
178789Sahrens }
179789Sahrens 
180789Sahrens int
1812676Seschrock zvol_check_volblocksize(uint64_t volblocksize)
182789Sahrens {
1832676Seschrock 	if (volblocksize < SPA_MINBLOCKSIZE ||
1842676Seschrock 	    volblocksize > SPA_MAXBLOCKSIZE ||
1852676Seschrock 	    !ISP2(volblocksize))
186789Sahrens 		return (EDOM);
187789Sahrens 
188789Sahrens 	return (0);
189789Sahrens }
190789Sahrens 
191789Sahrens static void
192789Sahrens zvol_readonly_changed_cb(void *arg, uint64_t newval)
193789Sahrens {
194789Sahrens 	zvol_state_t *zv = arg;
195789Sahrens 
1966423Sgw25295 	if (newval)
1976423Sgw25295 		zv->zv_flags |= ZVOL_RDONLY;
1986423Sgw25295 	else
1996423Sgw25295 		zv->zv_flags &= ~ZVOL_RDONLY;
200789Sahrens }
201789Sahrens 
202789Sahrens int
2032885Sahrens zvol_get_stats(objset_t *os, nvlist_t *nv)
204789Sahrens {
205789Sahrens 	int error;
206789Sahrens 	dmu_object_info_t doi;
2072885Sahrens 	uint64_t val;
208789Sahrens 
209789Sahrens 
2102885Sahrens 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
211789Sahrens 	if (error)
212789Sahrens 		return (error);
213789Sahrens 
2142885Sahrens 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
2152885Sahrens 
216789Sahrens 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
217789Sahrens 
2182885Sahrens 	if (error == 0) {
2192885Sahrens 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
2202885Sahrens 		    doi.doi_data_block_size);
2212885Sahrens 	}
222789Sahrens 
223789Sahrens 	return (error);
224789Sahrens }
225789Sahrens 
226789Sahrens /*
227789Sahrens  * Find a free minor number.
228789Sahrens  */
229789Sahrens static minor_t
230789Sahrens zvol_minor_alloc(void)
231789Sahrens {
232789Sahrens 	minor_t minor;
233789Sahrens 
234789Sahrens 	ASSERT(MUTEX_HELD(&zvol_state_lock));
235789Sahrens 
236789Sahrens 	for (minor = 1; minor <= ZVOL_MAX_MINOR; minor++)
237789Sahrens 		if (ddi_get_soft_state(zvol_state, minor) == NULL)
238789Sahrens 			return (minor);
239789Sahrens 
240789Sahrens 	return (0);
241789Sahrens }
242789Sahrens 
243789Sahrens static zvol_state_t *
2442676Seschrock zvol_minor_lookup(const char *name)
245789Sahrens {
246789Sahrens 	minor_t minor;
247789Sahrens 	zvol_state_t *zv;
248789Sahrens 
249789Sahrens 	ASSERT(MUTEX_HELD(&zvol_state_lock));
250789Sahrens 
251789Sahrens 	for (minor = 1; minor <= ZVOL_MAX_MINOR; minor++) {
252789Sahrens 		zv = ddi_get_soft_state(zvol_state, minor);
253789Sahrens 		if (zv == NULL)
254789Sahrens 			continue;
255789Sahrens 		if (strcmp(zv->zv_name, name) == 0)
256789Sahrens 			break;
257789Sahrens 	}
258789Sahrens 
259789Sahrens 	return (zv);
260789Sahrens }
261789Sahrens 
2626423Sgw25295 void
2636423Sgw25295 zvol_init_extent(zvol_extent_t *ze, blkptr_t *bp)
2646423Sgw25295 {
2656423Sgw25295 	ze->ze_dva = bp->blk_dva[0];	/* structure assignment */
2666423Sgw25295 	ze->ze_stride = 0;
2676423Sgw25295 	ze->ze_size = 1;
2686423Sgw25295 }
2696423Sgw25295 
2706423Sgw25295 /* extent mapping arg */
2716423Sgw25295 struct maparg {
2726423Sgw25295 	zvol_ext_list_t	*ma_list;
2736423Sgw25295 	zvol_extent_t	*ma_extent;
2746423Sgw25295 	int		ma_gang;
2756423Sgw25295 };
2766423Sgw25295 
2776423Sgw25295 /*ARGSUSED*/
2786423Sgw25295 static int
2796423Sgw25295 zvol_map_block(traverse_blk_cache_t *bc, spa_t *spa, void *arg)
2806423Sgw25295 {
2816423Sgw25295 	zbookmark_t *zb = &bc->bc_bookmark;
2826423Sgw25295 	blkptr_t *bp = &bc->bc_blkptr;
2836423Sgw25295 	void *data = bc->bc_data;
2846423Sgw25295 	dnode_phys_t *dnp = bc->bc_dnode;
2856423Sgw25295 	struct maparg *ma = (struct maparg *)arg;
2866423Sgw25295 	uint64_t stride;
2876423Sgw25295 
2886423Sgw25295 	/* If there is an error, then keep trying to make progress */
2896423Sgw25295 	if (bc->bc_errno)
2906423Sgw25295 		return (ERESTART);
2916423Sgw25295 
2926423Sgw25295 #ifdef ZFS_DEBUG
2936423Sgw25295 	if (zb->zb_level == -1) {
2946423Sgw25295 		ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_OBJSET);
2956423Sgw25295 		ASSERT3U(BP_GET_LEVEL(bp), ==, 0);
2966423Sgw25295 	} else {
2976423Sgw25295 		ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type);
2986423Sgw25295 		ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level);
2996423Sgw25295 	}
3006423Sgw25295 
3016423Sgw25295 	if (zb->zb_level > 0) {
3026423Sgw25295 		uint64_t fill = 0;
3036423Sgw25295 		blkptr_t *bpx, *bpend;
3046423Sgw25295 
3056423Sgw25295 		for (bpx = data, bpend = bpx + BP_GET_LSIZE(bp) / sizeof (*bpx);
3066423Sgw25295 		    bpx < bpend; bpx++) {
3076423Sgw25295 			if (bpx->blk_birth != 0) {
3086423Sgw25295 				fill += bpx->blk_fill;
3096423Sgw25295 			} else {
3106423Sgw25295 				ASSERT(bpx->blk_fill == 0);
3116423Sgw25295 			}
3126423Sgw25295 		}
3136423Sgw25295 		ASSERT3U(fill, ==, bp->blk_fill);
3146423Sgw25295 	}
3156423Sgw25295 
3166423Sgw25295 	if (zb->zb_level == 0 && dnp->dn_type == DMU_OT_DNODE) {
3176423Sgw25295 		uint64_t fill = 0;
3186423Sgw25295 		dnode_phys_t *dnx, *dnend;
3196423Sgw25295 
3206423Sgw25295 		for (dnx = data, dnend = dnx + (BP_GET_LSIZE(bp)>>DNODE_SHIFT);
3216423Sgw25295 		    dnx < dnend; dnx++) {
3226423Sgw25295 			if (dnx->dn_type != DMU_OT_NONE)
3236423Sgw25295 				fill++;
3246423Sgw25295 		}
3256423Sgw25295 		ASSERT3U(fill, ==, bp->blk_fill);
3266423Sgw25295 	}
3276423Sgw25295 #endif
3286423Sgw25295 
3296423Sgw25295 	if (zb->zb_level || dnp->dn_type == DMU_OT_DNODE)
3306423Sgw25295 		return (0);
3316423Sgw25295 
3326423Sgw25295 	/* Abort immediately if we have encountered gang blocks */
3336423Sgw25295 	if (BP_IS_GANG(bp)) {
3346423Sgw25295 		ma->ma_gang++;
3356423Sgw25295 		return (EINTR);
3366423Sgw25295 	}
3376423Sgw25295 
3386423Sgw25295 	/* first time? */
3396423Sgw25295 	if (ma->ma_extent->ze_size == 0) {
3406423Sgw25295 		zvol_init_extent(ma->ma_extent, bp);
3416423Sgw25295 		return (0);
3426423Sgw25295 	}
3436423Sgw25295 
3446423Sgw25295 	stride = (DVA_GET_OFFSET(&bp->blk_dva[0])) -
3456423Sgw25295 	    ((DVA_GET_OFFSET(&ma->ma_extent->ze_dva)) +
3466423Sgw25295 	    (ma->ma_extent->ze_size - 1) * (ma->ma_extent->ze_stride));
3476423Sgw25295 	if (DVA_GET_VDEV(BP_IDENTITY(bp)) ==
3486423Sgw25295 	    DVA_GET_VDEV(&ma->ma_extent->ze_dva)) {
3496423Sgw25295 		if (ma->ma_extent->ze_stride == 0) {
3506423Sgw25295 			/* second block in this extent */
3516423Sgw25295 			ma->ma_extent->ze_stride = stride;
3526423Sgw25295 			ma->ma_extent->ze_size++;
3536423Sgw25295 			return (0);
3546423Sgw25295 		} else if (ma->ma_extent->ze_stride == stride) {
3556423Sgw25295 			/*
3566423Sgw25295 			 * the block we allocated has the same
3576423Sgw25295 			 * stride
3586423Sgw25295 			 */
3596423Sgw25295 			ma->ma_extent->ze_size++;
3606423Sgw25295 			return (0);
3616423Sgw25295 		}
3626423Sgw25295 	}
3636423Sgw25295 
3646423Sgw25295 	/*
3656423Sgw25295 	 * dtrace -n 'zfs-dprintf
3666423Sgw25295 	 * /stringof(arg0) == "zvol.c"/
3676423Sgw25295 	 * {
3686423Sgw25295 	 *	printf("%s: %s", stringof(arg1), stringof(arg3))
3696423Sgw25295 	 * } '
3706423Sgw25295 	 */
3716423Sgw25295 	dprintf("ma_extent 0x%lx mrstride 0x%lx stride %lx\n",
3726423Sgw25295 	    ma->ma_extent->ze_size, ma->ma_extent->ze_stride, stride);
3736423Sgw25295 	dprintf_bp(bp, "%s", "next blkptr:");
3746423Sgw25295 	/* start a new extent */
3756423Sgw25295 	if (ma->ma_extent == &ma->ma_list->zl_extents[NUM_EXTENTS - 1]) {
3766423Sgw25295 		ma->ma_list->zl_next = kmem_zalloc(sizeof (zvol_ext_list_t),
3776423Sgw25295 		    KM_SLEEP);
3786423Sgw25295 		ma->ma_list = ma->ma_list->zl_next;
3796423Sgw25295 		ma->ma_extent = &ma->ma_list->zl_extents[0];
3806423Sgw25295 	} else {
3816423Sgw25295 		ma->ma_extent++;
3826423Sgw25295 	}
3836423Sgw25295 	zvol_init_extent(ma->ma_extent, bp);
3846423Sgw25295 	return (0);
3856423Sgw25295 }
3866423Sgw25295 
3874543Smarks /* ARGSUSED */
388789Sahrens void
3894543Smarks zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
390789Sahrens {
3915331Samw 	zfs_creat_t *zct = arg;
3925331Samw 	nvlist_t *nvprops = zct->zct_props;
393789Sahrens 	int error;
3942676Seschrock 	uint64_t volblocksize, volsize;
395789Sahrens 
3964543Smarks 	VERIFY(nvlist_lookup_uint64(nvprops,
3972676Seschrock 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
3984543Smarks 	if (nvlist_lookup_uint64(nvprops,
3992676Seschrock 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
4002676Seschrock 		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
4012676Seschrock 
4022676Seschrock 	/*
4036423Sgw25295 	 * These properties must be removed from the list so the generic
4042676Seschrock 	 * property setting step won't apply to them.
4052676Seschrock 	 */
4064543Smarks 	VERIFY(nvlist_remove_all(nvprops,
4072676Seschrock 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
4084543Smarks 	(void) nvlist_remove_all(nvprops,
4092676Seschrock 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
4102676Seschrock 
4112676Seschrock 	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
412789Sahrens 	    DMU_OT_NONE, 0, tx);
413789Sahrens 	ASSERT(error == 0);
414789Sahrens 
415789Sahrens 	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
416789Sahrens 	    DMU_OT_NONE, 0, tx);
417789Sahrens 	ASSERT(error == 0);
418789Sahrens 
4192676Seschrock 	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
420789Sahrens 	ASSERT(error == 0);
421789Sahrens }
422789Sahrens 
423789Sahrens /*
4241141Sperrin  * Replay a TX_WRITE ZIL transaction that didn't get committed
4251141Sperrin  * after a system failure
4261141Sperrin  */
4271141Sperrin static int
4281141Sperrin zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
4291141Sperrin {
4301141Sperrin 	objset_t *os = zv->zv_objset;
4311141Sperrin 	char *data = (char *)(lr + 1);	/* data follows lr_write_t */
4321141Sperrin 	uint64_t off = lr->lr_offset;
4331141Sperrin 	uint64_t len = lr->lr_length;
4341141Sperrin 	dmu_tx_t *tx;
4351141Sperrin 	int error;
4361141Sperrin 
4371141Sperrin 	if (byteswap)
4381141Sperrin 		byteswap_uint64_array(lr, sizeof (*lr));
4391141Sperrin 
4401141Sperrin 	tx = dmu_tx_create(os);
4411141Sperrin 	dmu_tx_hold_write(tx, ZVOL_OBJ, off, len);
4421141Sperrin 	error = dmu_tx_assign(tx, zv->zv_txg_assign);
4431141Sperrin 	if (error) {
4441141Sperrin 		dmu_tx_abort(tx);
4451141Sperrin 	} else {
4461141Sperrin 		dmu_write(os, ZVOL_OBJ, off, len, data, tx);
4471141Sperrin 		dmu_tx_commit(tx);
4481141Sperrin 	}
4491141Sperrin 
4501141Sperrin 	return (error);
4511141Sperrin }
4521141Sperrin 
4531141Sperrin /* ARGSUSED */
4541141Sperrin static int
4551141Sperrin zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
4561141Sperrin {
4571141Sperrin 	return (ENOTSUP);
4581141Sperrin }
4591141Sperrin 
4601141Sperrin /*
4611141Sperrin  * Callback vectors for replaying records.
4621141Sperrin  * Only TX_WRITE is needed for zvol.
4631141Sperrin  */
4641141Sperrin zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
4651141Sperrin 	zvol_replay_err,	/* 0 no such transaction type */
4661141Sperrin 	zvol_replay_err,	/* TX_CREATE */
4671141Sperrin 	zvol_replay_err,	/* TX_MKDIR */
4681141Sperrin 	zvol_replay_err,	/* TX_MKXATTR */
4691141Sperrin 	zvol_replay_err,	/* TX_SYMLINK */
4701141Sperrin 	zvol_replay_err,	/* TX_REMOVE */
4711141Sperrin 	zvol_replay_err,	/* TX_RMDIR */
4721141Sperrin 	zvol_replay_err,	/* TX_LINK */
4731141Sperrin 	zvol_replay_err,	/* TX_RENAME */
4741141Sperrin 	zvol_replay_write,	/* TX_WRITE */
4751141Sperrin 	zvol_replay_err,	/* TX_TRUNCATE */
4761141Sperrin 	zvol_replay_err,	/* TX_SETATTR */
4771141Sperrin 	zvol_replay_err,	/* TX_ACL */
4781141Sperrin };
4791141Sperrin 
4801141Sperrin /*
4816423Sgw25295  * reconstruct dva that gets us to the desired offset (offset
4826423Sgw25295  * is in bytes)
4836423Sgw25295  */
4846423Sgw25295 int
4856423Sgw25295 zvol_get_dva(zvol_state_t *zv, uint64_t offset, dva_t *dva)
4866423Sgw25295 {
4876423Sgw25295 	zvol_ext_list_t	*zl;
4886423Sgw25295 	zvol_extent_t	*ze;
4896423Sgw25295 	int		idx;
4906423Sgw25295 	uint64_t	tmp;
4916423Sgw25295 
4926423Sgw25295 	if ((zl = zv->zv_list) == NULL)
4936423Sgw25295 		return (EIO);
4946423Sgw25295 	idx = 0;
4956423Sgw25295 	ze =  &zl->zl_extents[0];
4966423Sgw25295 	while (offset >= ze->ze_size * zv->zv_volblocksize) {
4976423Sgw25295 		offset -= ze->ze_size * zv->zv_volblocksize;
4986423Sgw25295 
4996423Sgw25295 		if (idx == NUM_EXTENTS - 1) {
5006423Sgw25295 			/* we've reached the end of this array */
5016423Sgw25295 			ASSERT(zl->zl_next != NULL);
5026423Sgw25295 			if (zl->zl_next == NULL)
5036423Sgw25295 				return (-1);
5046423Sgw25295 			zl = zl->zl_next;
5056423Sgw25295 			ze = &zl->zl_extents[0];
5066423Sgw25295 			idx = 0;
5076423Sgw25295 		} else {
5086423Sgw25295 			ze++;
5096423Sgw25295 			idx++;
5106423Sgw25295 		}
5116423Sgw25295 	}
5126423Sgw25295 	DVA_SET_VDEV(dva, DVA_GET_VDEV(&ze->ze_dva));
5136423Sgw25295 	tmp = DVA_GET_OFFSET((&ze->ze_dva));
5146423Sgw25295 	tmp += (ze->ze_stride * (offset / zv->zv_volblocksize));
5156423Sgw25295 	DVA_SET_OFFSET(dva, tmp);
5166423Sgw25295 	return (0);
5176423Sgw25295 }
5186423Sgw25295 
5196423Sgw25295 static void
5206423Sgw25295 zvol_free_extents(zvol_state_t *zv)
5216423Sgw25295 {
5226423Sgw25295 	zvol_ext_list_t *zl;
5236423Sgw25295 	zvol_ext_list_t *tmp;
5246423Sgw25295 
5256423Sgw25295 	if (zv->zv_list != NULL) {
5266423Sgw25295 		zl = zv->zv_list;
5276423Sgw25295 		while (zl != NULL) {
5286423Sgw25295 			tmp = zl->zl_next;
5296423Sgw25295 			kmem_free(zl, sizeof (zvol_ext_list_t));
5306423Sgw25295 			zl = tmp;
5316423Sgw25295 		}
5326423Sgw25295 		zv->zv_list = NULL;
5336423Sgw25295 	}
5346423Sgw25295 }
5356423Sgw25295 
5366423Sgw25295 int
5376423Sgw25295 zvol_get_lbas(zvol_state_t *zv)
5386423Sgw25295 {
5396423Sgw25295 	struct maparg	ma;
5406423Sgw25295 	zvol_ext_list_t	*zl;
5416423Sgw25295 	zvol_extent_t	*ze;
5426423Sgw25295 	uint64_t	blocks = 0;
5436423Sgw25295 	int		err;
5446423Sgw25295 
5456423Sgw25295 	ma.ma_list = zl = kmem_zalloc(sizeof (zvol_ext_list_t), KM_SLEEP);
5466423Sgw25295 	ma.ma_extent = &ma.ma_list->zl_extents[0];
5476423Sgw25295 	ma.ma_gang = 0;
5486423Sgw25295 	zv->zv_list = ma.ma_list;
5496423Sgw25295 
5506423Sgw25295 	err = traverse_zvol(zv->zv_objset, ADVANCE_PRE, zvol_map_block, &ma);
5516423Sgw25295 	if (err == EINTR && ma.ma_gang) {
5526423Sgw25295 		/*
5536423Sgw25295 		 * We currently don't support dump devices when the pool
5546423Sgw25295 		 * is so fragmented that our allocation has resulted in
5556423Sgw25295 		 * gang blocks.
5566423Sgw25295 		 */
5576423Sgw25295 		zvol_free_extents(zv);
5586423Sgw25295 		return (EFRAGS);
5596423Sgw25295 	}
5606423Sgw25295 	ASSERT3U(err, ==, 0);
5616423Sgw25295 
5626423Sgw25295 	ze = &zl->zl_extents[0];
5636423Sgw25295 	while (ze) {
5646423Sgw25295 		blocks += ze->ze_size;
5656423Sgw25295 		if (ze == &zl->zl_extents[NUM_EXTENTS - 1]) {
5666423Sgw25295 			zl = zl->zl_next;
5676423Sgw25295 			ze = &zl->zl_extents[0];
5686423Sgw25295 		} else {
5696423Sgw25295 			ze++;
5706423Sgw25295 		}
5716423Sgw25295 	}
5726423Sgw25295 	if (blocks != (zv->zv_volsize / zv->zv_volblocksize)) {
5736423Sgw25295 		zvol_free_extents(zv);
5746423Sgw25295 		return (EIO);
5756423Sgw25295 	}
5766423Sgw25295 
5776423Sgw25295 	return (0);
5786423Sgw25295 }
5796423Sgw25295 
5806423Sgw25295 /*
5816423Sgw25295  * Create a minor node (plus a whole lot more) for the specified volume.
582789Sahrens  */
583789Sahrens int
5844787Sahrens zvol_create_minor(const char *name, major_t maj)
585789Sahrens {
586789Sahrens 	zvol_state_t *zv;
587789Sahrens 	objset_t *os;
5883063Sperrin 	dmu_object_info_t doi;
589789Sahrens 	uint64_t volsize;
590789Sahrens 	minor_t minor = 0;
591789Sahrens 	struct pathname linkpath;
5926689Smaybee 	int ds_mode = DS_MODE_OWNER;
593789Sahrens 	vnode_t *vp = NULL;
594789Sahrens 	char *devpath;
5956423Sgw25295 	size_t devpathlen = strlen(ZVOL_FULL_DEV_DIR) + strlen(name) + 1;
596789Sahrens 	char chrbuf[30], blkbuf[30];
597789Sahrens 	int error;
598789Sahrens 
599789Sahrens 	mutex_enter(&zvol_state_lock);
600789Sahrens 
601789Sahrens 	if ((zv = zvol_minor_lookup(name)) != NULL) {
602789Sahrens 		mutex_exit(&zvol_state_lock);
603789Sahrens 		return (EEXIST);
604789Sahrens 	}
605789Sahrens 
606789Sahrens 	if (strchr(name, '@') != 0)
607789Sahrens 		ds_mode |= DS_MODE_READONLY;
608789Sahrens 
609789Sahrens 	error = dmu_objset_open(name, DMU_OST_ZVOL, ds_mode, &os);
610789Sahrens 
611789Sahrens 	if (error) {
612789Sahrens 		mutex_exit(&zvol_state_lock);
613789Sahrens 		return (error);
614789Sahrens 	}
615789Sahrens 
616789Sahrens 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
617789Sahrens 
618789Sahrens 	if (error) {
619789Sahrens 		dmu_objset_close(os);
620789Sahrens 		mutex_exit(&zvol_state_lock);
621789Sahrens 		return (error);
622789Sahrens 	}
623789Sahrens 
624789Sahrens 	/*
625789Sahrens 	 * If there's an existing /dev/zvol symlink, try to use the
626789Sahrens 	 * same minor number we used last time.
627789Sahrens 	 */
628789Sahrens 	devpath = kmem_alloc(devpathlen, KM_SLEEP);
629789Sahrens 
6306423Sgw25295 	(void) sprintf(devpath, "%s%s", ZVOL_FULL_DEV_DIR, name);
631789Sahrens 
632789Sahrens 	error = lookupname(devpath, UIO_SYSSPACE, NO_FOLLOW, NULL, &vp);
633789Sahrens 
634789Sahrens 	kmem_free(devpath, devpathlen);
635789Sahrens 
636789Sahrens 	if (error == 0 && vp->v_type != VLNK)
637789Sahrens 		error = EINVAL;
638789Sahrens 
639789Sahrens 	if (error == 0) {
640789Sahrens 		pn_alloc(&linkpath);
641789Sahrens 		error = pn_getsymlink(vp, &linkpath, kcred);
642789Sahrens 		if (error == 0) {
643789Sahrens 			char *ms = strstr(linkpath.pn_path, ZVOL_PSEUDO_DEV);
644789Sahrens 			if (ms != NULL) {
645789Sahrens 				ms += strlen(ZVOL_PSEUDO_DEV);
646789Sahrens 				minor = stoi(&ms);
647789Sahrens 			}
648789Sahrens 		}
649789Sahrens 		pn_free(&linkpath);
650789Sahrens 	}
651789Sahrens 
652789Sahrens 	if (vp != NULL)
653789Sahrens 		VN_RELE(vp);
654789Sahrens 
655789Sahrens 	/*
656789Sahrens 	 * If we found a minor but it's already in use, we must pick a new one.
657789Sahrens 	 */
658789Sahrens 	if (minor != 0 && ddi_get_soft_state(zvol_state, minor) != NULL)
659789Sahrens 		minor = 0;
660789Sahrens 
661789Sahrens 	if (minor == 0)
662789Sahrens 		minor = zvol_minor_alloc();
663789Sahrens 
664789Sahrens 	if (minor == 0) {
665789Sahrens 		dmu_objset_close(os);
666789Sahrens 		mutex_exit(&zvol_state_lock);
667789Sahrens 		return (ENXIO);
668789Sahrens 	}
669789Sahrens 
670789Sahrens 	if (ddi_soft_state_zalloc(zvol_state, minor) != DDI_SUCCESS) {
671789Sahrens 		dmu_objset_close(os);
672789Sahrens 		mutex_exit(&zvol_state_lock);
673789Sahrens 		return (EAGAIN);
674789Sahrens 	}
675789Sahrens 
6762676Seschrock 	(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
6772676Seschrock 	    (char *)name);
678789Sahrens 
679789Sahrens 	(void) sprintf(chrbuf, "%uc,raw", minor);
680789Sahrens 
681789Sahrens 	if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
682789Sahrens 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
683789Sahrens 		ddi_soft_state_free(zvol_state, minor);
684789Sahrens 		dmu_objset_close(os);
685789Sahrens 		mutex_exit(&zvol_state_lock);
686789Sahrens 		return (EAGAIN);
687789Sahrens 	}
688789Sahrens 
689789Sahrens 	(void) sprintf(blkbuf, "%uc", minor);
690789Sahrens 
691789Sahrens 	if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
692789Sahrens 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
693789Sahrens 		ddi_remove_minor_node(zfs_dip, chrbuf);
694789Sahrens 		ddi_soft_state_free(zvol_state, minor);
695789Sahrens 		dmu_objset_close(os);
696789Sahrens 		mutex_exit(&zvol_state_lock);
697789Sahrens 		return (EAGAIN);
698789Sahrens 	}
699789Sahrens 
700789Sahrens 	zv = ddi_get_soft_state(zvol_state, minor);
701789Sahrens 
702789Sahrens 	(void) strcpy(zv->zv_name, name);
703789Sahrens 	zv->zv_min_bs = DEV_BSHIFT;
704789Sahrens 	zv->zv_minor = minor;
705789Sahrens 	zv->zv_volsize = volsize;
706789Sahrens 	zv->zv_objset = os;
707789Sahrens 	zv->zv_mode = ds_mode;
7083063Sperrin 	zv->zv_zilog = zil_open(os, zvol_get_data);
7093755Sperrin 	mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
7103755Sperrin 	avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
7113755Sperrin 	    sizeof (rl_t), offsetof(rl_t, r_node));
7123063Sperrin 	/* get and cache the blocksize */
7133063Sperrin 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
7143063Sperrin 	ASSERT(error == 0);
7153063Sperrin 	zv->zv_volblocksize = doi.doi_data_block_size;
7161141Sperrin 
7173461Sahrens 	zil_replay(os, zv, &zv->zv_txg_assign, zvol_replay_vector);
7184787Sahrens 	zvol_size_changed(zv, maj);
719789Sahrens 
7201544Seschrock 	/* XXX this should handle the possible i/o error */
721789Sahrens 	VERIFY(dsl_prop_register(dmu_objset_ds(zv->zv_objset),
722789Sahrens 	    "readonly", zvol_readonly_changed_cb, zv) == 0);
723789Sahrens 
724789Sahrens 	zvol_minors++;
725789Sahrens 
726789Sahrens 	mutex_exit(&zvol_state_lock);
727789Sahrens 
728789Sahrens 	return (0);
729789Sahrens }
730789Sahrens 
731789Sahrens /*
732789Sahrens  * Remove minor node for the specified volume.
733789Sahrens  */
734789Sahrens int
7352676Seschrock zvol_remove_minor(const char *name)
736789Sahrens {
737789Sahrens 	zvol_state_t *zv;
738789Sahrens 	char namebuf[30];
739789Sahrens 
740789Sahrens 	mutex_enter(&zvol_state_lock);
741789Sahrens 
7422676Seschrock 	if ((zv = zvol_minor_lookup(name)) == NULL) {
743789Sahrens 		mutex_exit(&zvol_state_lock);
744789Sahrens 		return (ENXIO);
745789Sahrens 	}
746789Sahrens 
747789Sahrens 	if (zv->zv_total_opens != 0) {
748789Sahrens 		mutex_exit(&zvol_state_lock);
749789Sahrens 		return (EBUSY);
750789Sahrens 	}
751789Sahrens 
752789Sahrens 	(void) sprintf(namebuf, "%uc,raw", zv->zv_minor);
753789Sahrens 	ddi_remove_minor_node(zfs_dip, namebuf);
754789Sahrens 
755789Sahrens 	(void) sprintf(namebuf, "%uc", zv->zv_minor);
756789Sahrens 	ddi_remove_minor_node(zfs_dip, namebuf);
757789Sahrens 
758789Sahrens 	VERIFY(dsl_prop_unregister(dmu_objset_ds(zv->zv_objset),
759789Sahrens 	    "readonly", zvol_readonly_changed_cb, zv) == 0);
760789Sahrens 
7611141Sperrin 	zil_close(zv->zv_zilog);
7621141Sperrin 	zv->zv_zilog = NULL;
763789Sahrens 	dmu_objset_close(zv->zv_objset);
764789Sahrens 	zv->zv_objset = NULL;
7653755Sperrin 	avl_destroy(&zv->zv_znode.z_range_avl);
7663755Sperrin 	mutex_destroy(&zv->zv_znode.z_range_lock);
767789Sahrens 
768789Sahrens 	ddi_soft_state_free(zvol_state, zv->zv_minor);
769789Sahrens 
770789Sahrens 	zvol_minors--;
771789Sahrens 
772789Sahrens 	mutex_exit(&zvol_state_lock);
773789Sahrens 
774789Sahrens 	return (0);
775789Sahrens }
776789Sahrens 
7776423Sgw25295 int
7786423Sgw25295 zvol_prealloc(zvol_state_t *zv)
7796423Sgw25295 {
7806423Sgw25295 	objset_t *os = zv->zv_objset;
7816423Sgw25295 	dmu_tx_t *tx;
7826423Sgw25295 	void *data;
7836423Sgw25295 	uint64_t refd, avail, usedobjs, availobjs;
7846423Sgw25295 	uint64_t resid = zv->zv_volsize;
7856423Sgw25295 	uint64_t off = 0;
7866423Sgw25295 
7876423Sgw25295 	/* Check the space usage before attempting to allocate the space */
7886423Sgw25295 	dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
7896423Sgw25295 	if (avail < zv->zv_volsize)
7906423Sgw25295 		return (ENOSPC);
7916423Sgw25295 
7926423Sgw25295 	/* Free old extents if they exist */
7936423Sgw25295 	zvol_free_extents(zv);
7946423Sgw25295 
7956423Sgw25295 	/* allocate the blocks by writing each one */
7966423Sgw25295 	data = kmem_zalloc(SPA_MAXBLOCKSIZE, KM_SLEEP);
7976423Sgw25295 
7986423Sgw25295 	while (resid != 0) {
7996423Sgw25295 		int error;
8006423Sgw25295 		uint64_t bytes = MIN(resid, SPA_MAXBLOCKSIZE);
8016423Sgw25295 
8026423Sgw25295 		tx = dmu_tx_create(os);
8036423Sgw25295 		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
8046423Sgw25295 		error = dmu_tx_assign(tx, TXG_WAIT);
8056423Sgw25295 		if (error) {
8066423Sgw25295 			dmu_tx_abort(tx);
8076423Sgw25295 			kmem_free(data, SPA_MAXBLOCKSIZE);
8086992Smaybee 			(void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
8096423Sgw25295 			return (error);
8106423Sgw25295 		}
8116423Sgw25295 		dmu_write(os, ZVOL_OBJ, off, bytes, data, tx);
8126423Sgw25295 		dmu_tx_commit(tx);
8136423Sgw25295 		off += bytes;
8146423Sgw25295 		resid -= bytes;
8156423Sgw25295 	}
8166423Sgw25295 	kmem_free(data, SPA_MAXBLOCKSIZE);
8176423Sgw25295 	txg_wait_synced(dmu_objset_pool(os), 0);
8186423Sgw25295 
8196423Sgw25295 	return (0);
8206423Sgw25295 }
8216423Sgw25295 
8226423Sgw25295 int
8236423Sgw25295 zvol_update_volsize(zvol_state_t *zv, major_t maj, uint64_t volsize)
8246423Sgw25295 {
8256423Sgw25295 	dmu_tx_t *tx;
8266423Sgw25295 	int error;
8276423Sgw25295 
8286423Sgw25295 	ASSERT(MUTEX_HELD(&zvol_state_lock));
8296423Sgw25295 
8306423Sgw25295 	tx = dmu_tx_create(zv->zv_objset);
8316423Sgw25295 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
8326423Sgw25295 	error = dmu_tx_assign(tx, TXG_WAIT);
8336423Sgw25295 	if (error) {
8346423Sgw25295 		dmu_tx_abort(tx);
8356423Sgw25295 		return (error);
8366423Sgw25295 	}
8376423Sgw25295 
8386423Sgw25295 	error = zap_update(zv->zv_objset, ZVOL_ZAP_OBJ, "size", 8, 1,
8396423Sgw25295 	    &volsize, tx);
8406423Sgw25295 	dmu_tx_commit(tx);
8416423Sgw25295 
8426423Sgw25295 	if (error == 0)
8436992Smaybee 		error = dmu_free_long_range(zv->zv_objset,
8446992Smaybee 		    ZVOL_OBJ, volsize, DMU_OBJECT_END);
8456423Sgw25295 
8467265Sahrens 	/*
8477265Sahrens 	 * If we are using a faked-up state (zv_minor == 0) then don't
8487265Sahrens 	 * try to update the in-core zvol state.
8497265Sahrens 	 */
8507265Sahrens 	if (error == 0 && zv->zv_minor) {
8516423Sgw25295 		zv->zv_volsize = volsize;
8526423Sgw25295 		zvol_size_changed(zv, maj);
8536423Sgw25295 	}
8546423Sgw25295 	return (error);
8556423Sgw25295 }
8566423Sgw25295 
857789Sahrens int
8584787Sahrens zvol_set_volsize(const char *name, major_t maj, uint64_t volsize)
859789Sahrens {
860789Sahrens 	zvol_state_t *zv;
861789Sahrens 	int error;
8621133Seschrock 	dmu_object_info_t doi;
8636423Sgw25295 	uint64_t old_volsize = 0ULL;
8647265Sahrens 	zvol_state_t state = { 0 };
865789Sahrens 
866789Sahrens 	mutex_enter(&zvol_state_lock);
867789Sahrens 
8682676Seschrock 	if ((zv = zvol_minor_lookup(name)) == NULL) {
8697265Sahrens 		/*
8707265Sahrens 		 * If we are doing a "zfs clone -o volsize=", then the
8717265Sahrens 		 * minor node won't exist yet.
8727265Sahrens 		 */
8737265Sahrens 		error = dmu_objset_open(name, DMU_OST_ZVOL, DS_MODE_OWNER,
8747265Sahrens 		    &state.zv_objset);
8757265Sahrens 		if (error != 0)
8767265Sahrens 			goto out;
8777265Sahrens 		zv = &state;
878789Sahrens 	}
8796423Sgw25295 	old_volsize = zv->zv_volsize;
880789Sahrens 
8811133Seschrock 	if ((error = dmu_object_info(zv->zv_objset, ZVOL_OBJ, &doi)) != 0 ||
8822676Seschrock 	    (error = zvol_check_volsize(volsize,
8837265Sahrens 	    doi.doi_data_block_size)) != 0)
8847265Sahrens 		goto out;
8851133Seschrock 
8866423Sgw25295 	if (zv->zv_flags & ZVOL_RDONLY || (zv->zv_mode & DS_MODE_READONLY)) {
8877265Sahrens 		error = EROFS;
8887265Sahrens 		goto out;
889789Sahrens 	}
890789Sahrens 
8916423Sgw25295 	error = zvol_update_volsize(zv, maj, volsize);
892789Sahrens 
8936423Sgw25295 	/*
8946423Sgw25295 	 * Reinitialize the dump area to the new size. If we
8956423Sgw25295 	 * failed to resize the dump area then restore the it back to
8966423Sgw25295 	 * it's original size.
8976423Sgw25295 	 */
8986423Sgw25295 	if (error == 0 && zv->zv_flags & ZVOL_DUMPIFIED) {
8996423Sgw25295 		if ((error = zvol_dumpify(zv)) != 0 ||
9006423Sgw25295 		    (error = dumpvp_resize()) != 0) {
9016423Sgw25295 			(void) zvol_update_volsize(zv, maj, old_volsize);
9026423Sgw25295 			error = zvol_dumpify(zv);
9036423Sgw25295 		}
904789Sahrens 	}
905789Sahrens 
9067265Sahrens out:
9077265Sahrens 	if (state.zv_objset)
9087265Sahrens 		dmu_objset_close(state.zv_objset);
9097265Sahrens 
910789Sahrens 	mutex_exit(&zvol_state_lock);
911789Sahrens 
912789Sahrens 	return (error);
913789Sahrens }
914789Sahrens 
915789Sahrens int
9162676Seschrock zvol_set_volblocksize(const char *name, uint64_t volblocksize)
917789Sahrens {
918789Sahrens 	zvol_state_t *zv;
919789Sahrens 	dmu_tx_t *tx;
920789Sahrens 	int error;
921789Sahrens 
922789Sahrens 	mutex_enter(&zvol_state_lock);
923789Sahrens 
9242676Seschrock 	if ((zv = zvol_minor_lookup(name)) == NULL) {
925789Sahrens 		mutex_exit(&zvol_state_lock);
926789Sahrens 		return (ENXIO);
927789Sahrens 	}
9286423Sgw25295 	if (zv->zv_flags & ZVOL_RDONLY || (zv->zv_mode & DS_MODE_READONLY)) {
929789Sahrens 		mutex_exit(&zvol_state_lock);
930789Sahrens 		return (EROFS);
931789Sahrens 	}
932789Sahrens 
933789Sahrens 	tx = dmu_tx_create(zv->zv_objset);
934789Sahrens 	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
935789Sahrens 	error = dmu_tx_assign(tx, TXG_WAIT);
936789Sahrens 	if (error) {
937789Sahrens 		dmu_tx_abort(tx);
938789Sahrens 	} else {
939789Sahrens 		error = dmu_object_set_blocksize(zv->zv_objset, ZVOL_OBJ,
9402676Seschrock 		    volblocksize, 0, tx);
941789Sahrens 		if (error == ENOTSUP)
942789Sahrens 			error = EBUSY;
943789Sahrens 		dmu_tx_commit(tx);
944789Sahrens 	}
945789Sahrens 
946789Sahrens 	mutex_exit(&zvol_state_lock);
947789Sahrens 
948789Sahrens 	return (error);
949789Sahrens }
950789Sahrens 
951789Sahrens /*ARGSUSED*/
952789Sahrens int
953789Sahrens zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
954789Sahrens {
955789Sahrens 	minor_t minor = getminor(*devp);
956789Sahrens 	zvol_state_t *zv;
957789Sahrens 
958789Sahrens 	if (minor == 0)			/* This is the control device */
959789Sahrens 		return (0);
960789Sahrens 
961789Sahrens 	mutex_enter(&zvol_state_lock);
962789Sahrens 
963789Sahrens 	zv = ddi_get_soft_state(zvol_state, minor);
964789Sahrens 	if (zv == NULL) {
965789Sahrens 		mutex_exit(&zvol_state_lock);
966789Sahrens 		return (ENXIO);
967789Sahrens 	}
968789Sahrens 
969789Sahrens 	ASSERT(zv->zv_objset != NULL);
970789Sahrens 
971789Sahrens 	if ((flag & FWRITE) &&
9726423Sgw25295 	    (zv->zv_flags & ZVOL_RDONLY || (zv->zv_mode & DS_MODE_READONLY))) {
973789Sahrens 		mutex_exit(&zvol_state_lock);
974789Sahrens 		return (EROFS);
975789Sahrens 	}
976789Sahrens 
977789Sahrens 	if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
978789Sahrens 		zv->zv_open_count[otyp]++;
979789Sahrens 		zv->zv_total_opens++;
980789Sahrens 	}
981789Sahrens 
982789Sahrens 	mutex_exit(&zvol_state_lock);
983789Sahrens 
984789Sahrens 	return (0);
985789Sahrens }
986789Sahrens 
987789Sahrens /*ARGSUSED*/
988789Sahrens int
989789Sahrens zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
990789Sahrens {
991789Sahrens 	minor_t minor = getminor(dev);
992789Sahrens 	zvol_state_t *zv;
993789Sahrens 
994789Sahrens 	if (minor == 0)		/* This is the control device */
995789Sahrens 		return (0);
996789Sahrens 
997789Sahrens 	mutex_enter(&zvol_state_lock);
998789Sahrens 
999789Sahrens 	zv = ddi_get_soft_state(zvol_state, minor);
1000789Sahrens 	if (zv == NULL) {
1001789Sahrens 		mutex_exit(&zvol_state_lock);
1002789Sahrens 		return (ENXIO);
1003789Sahrens 	}
1004789Sahrens 
1005789Sahrens 	/*
1006789Sahrens 	 * The next statement is a workaround for the following DDI bug:
1007789Sahrens 	 * 6343604 specfs race: multiple "last-close" of the same device
1008789Sahrens 	 */
1009789Sahrens 	if (zv->zv_total_opens == 0) {
1010789Sahrens 		mutex_exit(&zvol_state_lock);
1011789Sahrens 		return (0);
1012789Sahrens 	}
1013789Sahrens 
1014789Sahrens 	/*
1015789Sahrens 	 * If the open count is zero, this is a spurious close.
1016789Sahrens 	 * That indicates a bug in the kernel / DDI framework.
1017789Sahrens 	 */
1018789Sahrens 	ASSERT(zv->zv_open_count[otyp] != 0);
1019789Sahrens 	ASSERT(zv->zv_total_opens != 0);
1020789Sahrens 
1021789Sahrens 	/*
1022789Sahrens 	 * You may get multiple opens, but only one close.
1023789Sahrens 	 */
1024789Sahrens 	zv->zv_open_count[otyp]--;
1025789Sahrens 	zv->zv_total_opens--;
1026789Sahrens 
1027789Sahrens 	mutex_exit(&zvol_state_lock);
1028789Sahrens 
1029789Sahrens 	return (0);
1030789Sahrens }
1031789Sahrens 
10323638Sbillm static void
10333063Sperrin zvol_get_done(dmu_buf_t *db, void *vzgd)
10343063Sperrin {
10353063Sperrin 	zgd_t *zgd = (zgd_t *)vzgd;
10363755Sperrin 	rl_t *rl = zgd->zgd_rl;
10373063Sperrin 
10383063Sperrin 	dmu_buf_rele(db, vzgd);
10393755Sperrin 	zfs_range_unlock(rl);
10405688Sbonwick 	zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
10413063Sperrin 	kmem_free(zgd, sizeof (zgd_t));
10423063Sperrin }
10433063Sperrin 
10443063Sperrin /*
10453063Sperrin  * Get data to generate a TX_WRITE intent log record.
10463063Sperrin  */
10473638Sbillm static int
10483063Sperrin zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
10493063Sperrin {
10503063Sperrin 	zvol_state_t *zv = arg;
10513063Sperrin 	objset_t *os = zv->zv_objset;
10523063Sperrin 	dmu_buf_t *db;
10533755Sperrin 	rl_t *rl;
10543063Sperrin 	zgd_t *zgd;
10553755Sperrin 	uint64_t boff; 			/* block starting offset */
10563755Sperrin 	int dlen = lr->lr_length;	/* length of user data */
10573063Sperrin 	int error;
10583063Sperrin 
10593063Sperrin 	ASSERT(zio);
10603755Sperrin 	ASSERT(dlen != 0);
10613638Sbillm 
10623755Sperrin 	/*
10633755Sperrin 	 * Write records come in two flavors: immediate and indirect.
10643755Sperrin 	 * For small writes it's cheaper to store the data with the
10653755Sperrin 	 * log record (immediate); for large writes it's cheaper to
10663755Sperrin 	 * sync the data and get a pointer to it (indirect) so that
10673755Sperrin 	 * we don't have to write the data twice.
10683755Sperrin 	 */
10693755Sperrin 	if (buf != NULL) /* immediate write */
10703755Sperrin 		return (dmu_read(os, ZVOL_OBJ, lr->lr_offset, dlen, buf));
10713063Sperrin 
10723063Sperrin 	zgd = (zgd_t *)kmem_alloc(sizeof (zgd_t), KM_SLEEP);
10733063Sperrin 	zgd->zgd_zilog = zv->zv_zilog;
10743063Sperrin 	zgd->zgd_bp = &lr->lr_blkptr;
10753063Sperrin 
10763755Sperrin 	/*
10773755Sperrin 	 * Lock the range of the block to ensure that when the data is
10786423Sgw25295 	 * written out and its checksum is being calculated that no other
10793755Sperrin 	 * thread can change the block.
10803755Sperrin 	 */
10813755Sperrin 	boff = P2ALIGN_TYPED(lr->lr_offset, zv->zv_volblocksize, uint64_t);
10823755Sperrin 	rl = zfs_range_lock(&zv->zv_znode, boff, zv->zv_volblocksize,
10833755Sperrin 	    RL_READER);
10843755Sperrin 	zgd->zgd_rl = rl;
10853755Sperrin 
10863063Sperrin 	VERIFY(0 == dmu_buf_hold(os, ZVOL_OBJ, lr->lr_offset, zgd, &db));
10873063Sperrin 	error = dmu_sync(zio, db, &lr->lr_blkptr,
10883063Sperrin 	    lr->lr_common.lrc_txg, zvol_get_done, zgd);
10893638Sbillm 	if (error == 0)
10905688Sbonwick 		zil_add_block(zv->zv_zilog, &lr->lr_blkptr);
10913063Sperrin 	/*
10923063Sperrin 	 * If we get EINPROGRESS, then we need to wait for a
10933063Sperrin 	 * write IO initiated by dmu_sync() to complete before
10943063Sperrin 	 * we can release this dbuf.  We will finish everything
10953063Sperrin 	 * up in the zvol_get_done() callback.
10963063Sperrin 	 */
10973063Sperrin 	if (error == EINPROGRESS)
10983063Sperrin 		return (0);
10993063Sperrin 	dmu_buf_rele(db, zgd);
11003755Sperrin 	zfs_range_unlock(rl);
11013063Sperrin 	kmem_free(zgd, sizeof (zgd_t));
11023063Sperrin 	return (error);
11033063Sperrin }
11043063Sperrin 
11051861Sperrin /*
11061861Sperrin  * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
11071141Sperrin  *
11081141Sperrin  * We store data in the log buffers if it's small enough.
11093063Sperrin  * Otherwise we will later flush the data out via dmu_sync().
11101141Sperrin  */
11113063Sperrin ssize_t zvol_immediate_write_sz = 32768;
11121141Sperrin 
11133638Sbillm static void
11143638Sbillm zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t len)
11151141Sperrin {
11163638Sbillm 	uint32_t blocksize = zv->zv_volblocksize;
11171141Sperrin 	lr_write_t *lr;
11181861Sperrin 
11191861Sperrin 	while (len) {
11203638Sbillm 		ssize_t nbytes = MIN(len, blocksize - P2PHASE(off, blocksize));
11213638Sbillm 		itx_t *itx = zil_itx_create(TX_WRITE, sizeof (*lr));
11223638Sbillm 
11233638Sbillm 		itx->itx_wr_state =
11243638Sbillm 		    len > zvol_immediate_write_sz ?  WR_INDIRECT : WR_NEED_COPY;
11253638Sbillm 		itx->itx_private = zv;
11263638Sbillm 		lr = (lr_write_t *)&itx->itx_lr;
11273638Sbillm 		lr->lr_foid = ZVOL_OBJ;
11283638Sbillm 		lr->lr_offset = off;
11293638Sbillm 		lr->lr_length = nbytes;
11303638Sbillm 		lr->lr_blkoff = off - P2ALIGN_TYPED(off, blocksize, uint64_t);
11313638Sbillm 		BP_ZERO(&lr->lr_blkptr);
11323638Sbillm 
11333638Sbillm 		(void) zil_itx_assign(zv->zv_zilog, itx, tx);
11341861Sperrin 		len -= nbytes;
11351861Sperrin 		off += nbytes;
11361141Sperrin 	}
11371141Sperrin }
11381141Sperrin 
1139789Sahrens int
11406423Sgw25295 zvol_dumpio(vdev_t *vd, uint64_t size, uint64_t offset, void *addr,
11416423Sgw25295     int bflags, int isdump)
11426423Sgw25295 {
11436423Sgw25295 	vdev_disk_t *dvd;
11446423Sgw25295 	int direction;
11456423Sgw25295 	int c;
11466423Sgw25295 	int numerrors = 0;
11476423Sgw25295 
11486423Sgw25295 	for (c = 0; c < vd->vdev_children; c++) {
11496423Sgw25295 		if (zvol_dumpio(vd->vdev_child[c], size, offset,
11506423Sgw25295 		    addr, bflags, isdump) != 0) {
11516423Sgw25295 			numerrors++;
11526423Sgw25295 		} else if (bflags & B_READ) {
11536423Sgw25295 			break;
11546423Sgw25295 		}
11556423Sgw25295 	}
11566423Sgw25295 
11576423Sgw25295 	if (!vd->vdev_ops->vdev_op_leaf)
11586423Sgw25295 		return (numerrors < vd->vdev_children ? 0 : EIO);
11596423Sgw25295 
11606423Sgw25295 	if (!vdev_writeable(vd))
11616423Sgw25295 		return (EIO);
11626423Sgw25295 
11636423Sgw25295 	dvd = vd->vdev_tsd;
11646423Sgw25295 	ASSERT3P(dvd, !=, NULL);
11656423Sgw25295 	direction = bflags & (B_WRITE | B_READ);
11666423Sgw25295 	ASSERT(ISP2(direction));
11676423Sgw25295 	offset += VDEV_LABEL_START_SIZE;
11686423Sgw25295 
11696423Sgw25295 	if (ddi_in_panic() || isdump) {
11706423Sgw25295 		if (direction & B_READ)
11716423Sgw25295 			return (EIO);
11726423Sgw25295 		return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
11736423Sgw25295 		    lbtodb(size)));
11746423Sgw25295 	} else {
11756423Sgw25295 		return (vdev_disk_physio(dvd->vd_lh, addr, size, offset,
11766423Sgw25295 		    direction));
11776423Sgw25295 	}
11786423Sgw25295 }
11796423Sgw25295 
11806423Sgw25295 int
11816423Sgw25295 zvol_physio(zvol_state_t *zv, int bflags, uint64_t off,
11826423Sgw25295     uint64_t size, void *addr, int isdump)
11836423Sgw25295 {
11846423Sgw25295 	dva_t dva;
11856423Sgw25295 	vdev_t *vd;
11866423Sgw25295 	int error;
11876423Sgw25295 	spa_t *spa = dmu_objset_spa(zv->zv_objset);
11886423Sgw25295 
11896423Sgw25295 	ASSERT(size <= zv->zv_volblocksize);
11906423Sgw25295 
11916423Sgw25295 	/* restrict requests to multiples of the system block size */
11926423Sgw25295 	if (P2PHASE(off, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE))
11936423Sgw25295 		return (EINVAL);
11946423Sgw25295 
11956423Sgw25295 	if (zvol_get_dva(zv, off, &dva) != 0)
11966423Sgw25295 		return (EIO);
11976423Sgw25295 
11986423Sgw25295 	spa_config_enter(spa, RW_READER, FTAG);
11996423Sgw25295 	vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva));
12006423Sgw25295 
12016423Sgw25295 	error = zvol_dumpio(vd, size,
12026423Sgw25295 	    DVA_GET_OFFSET(&dva) + (off % zv->zv_volblocksize),
12036423Sgw25295 	    addr, bflags & (B_READ | B_WRITE | B_PHYS), isdump);
12046423Sgw25295 
12056423Sgw25295 	spa_config_exit(spa, FTAG);
12066423Sgw25295 	return (error);
12076423Sgw25295 }
12086423Sgw25295 
12096423Sgw25295 int
1210789Sahrens zvol_strategy(buf_t *bp)
1211789Sahrens {
1212789Sahrens 	zvol_state_t *zv = ddi_get_soft_state(zvol_state, getminor(bp->b_edev));
1213789Sahrens 	uint64_t off, volsize;
1214789Sahrens 	size_t size, resid;
1215789Sahrens 	char *addr;
12161141Sperrin 	objset_t *os;
12173755Sperrin 	rl_t *rl;
1218789Sahrens 	int error = 0;
12196423Sgw25295 	boolean_t reading, is_dump = zv->zv_flags & ZVOL_DUMPIFIED;
1220789Sahrens 
1221789Sahrens 	if (zv == NULL) {
1222789Sahrens 		bioerror(bp, ENXIO);
1223789Sahrens 		biodone(bp);
1224789Sahrens 		return (0);
1225789Sahrens 	}
1226789Sahrens 
1227789Sahrens 	if (getminor(bp->b_edev) == 0) {
1228789Sahrens 		bioerror(bp, EINVAL);
1229789Sahrens 		biodone(bp);
1230789Sahrens 		return (0);
1231789Sahrens 	}
1232789Sahrens 
12336423Sgw25295 	if (!(bp->b_flags & B_READ) &&
12346423Sgw25295 	    (zv->zv_flags & ZVOL_RDONLY ||
12356423Sgw25295 	    zv->zv_mode & DS_MODE_READONLY)) {
1236789Sahrens 		bioerror(bp, EROFS);
1237789Sahrens 		biodone(bp);
1238789Sahrens 		return (0);
1239789Sahrens 	}
1240789Sahrens 
1241789Sahrens 	off = ldbtob(bp->b_blkno);
1242789Sahrens 	volsize = zv->zv_volsize;
1243789Sahrens 
12441141Sperrin 	os = zv->zv_objset;
12451141Sperrin 	ASSERT(os != NULL);
1246789Sahrens 
1247789Sahrens 	bp_mapin(bp);
1248789Sahrens 	addr = bp->b_un.b_addr;
1249789Sahrens 	resid = bp->b_bcount;
1250789Sahrens 
12517013Sgw25295 	if (resid > 0 && (off < 0 || off >= volsize))
12527013Sgw25295 		return (EIO);
12537013Sgw25295 
12541861Sperrin 	/*
12551861Sperrin 	 * There must be no buffer changes when doing a dmu_sync() because
12561861Sperrin 	 * we can't change the data whilst calculating the checksum.
12571861Sperrin 	 */
12581861Sperrin 	reading = bp->b_flags & B_READ;
12593755Sperrin 	rl = zfs_range_lock(&zv->zv_znode, off, resid,
12603755Sperrin 	    reading ? RL_READER : RL_WRITER);
12611861Sperrin 
12626423Sgw25295 	if (resid > volsize - off)	/* don't write past the end */
12636423Sgw25295 		resid = volsize - off;
12646423Sgw25295 
1265789Sahrens 	while (resid != 0 && off < volsize) {
1266789Sahrens 
12676423Sgw25295 		size = MIN(resid, zvol_maxphys);
12686423Sgw25295 		if (is_dump) {
12696423Sgw25295 			/* can't straddle a block boundary */
12706423Sgw25295 			size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
12716423Sgw25295 			error = zvol_physio(zv, bp->b_flags, off, size,
12726423Sgw25295 			    addr, 0);
12736423Sgw25295 		} else if (reading) {
12741861Sperrin 			error = dmu_read(os, ZVOL_OBJ, off, size, addr);
1275789Sahrens 		} else {
12761141Sperrin 			dmu_tx_t *tx = dmu_tx_create(os);
1277789Sahrens 			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1278789Sahrens 			error = dmu_tx_assign(tx, TXG_WAIT);
1279789Sahrens 			if (error) {
1280789Sahrens 				dmu_tx_abort(tx);
1281789Sahrens 			} else {
12821141Sperrin 				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
12833638Sbillm 				zvol_log_write(zv, tx, off, size);
1284789Sahrens 				dmu_tx_commit(tx);
1285789Sahrens 			}
1286789Sahrens 		}
1287*7294Sperrin 		if (error) {
1288*7294Sperrin 			/* convert checksum errors into IO errors */
1289*7294Sperrin 			if (error == ECKSUM)
1290*7294Sperrin 				error = EIO;
1291789Sahrens 			break;
1292*7294Sperrin 		}
1293789Sahrens 		off += size;
1294789Sahrens 		addr += size;
1295789Sahrens 		resid -= size;
1296789Sahrens 	}
12973755Sperrin 	zfs_range_unlock(rl);
1298789Sahrens 
1299789Sahrens 	if ((bp->b_resid = resid) == bp->b_bcount)
1300789Sahrens 		bioerror(bp, off > volsize ? EINVAL : error);
1301789Sahrens 
13026423Sgw25295 	if (!(bp->b_flags & B_ASYNC) && !reading && !zil_disable && !is_dump)
13033638Sbillm 		zil_commit(zv->zv_zilog, UINT64_MAX, ZVOL_OBJ);
13043638Sbillm 	biodone(bp);
13051141Sperrin 
1306789Sahrens 	return (0);
1307789Sahrens }
1308789Sahrens 
13093063Sperrin /*
13103063Sperrin  * Set the buffer count to the zvol maximum transfer.
13113063Sperrin  * Using our own routine instead of the default minphys()
13123063Sperrin  * means that for larger writes we write bigger buffers on X86
13133063Sperrin  * (128K instead of 56K) and flush the disk write cache less often
13143063Sperrin  * (every zvol_maxphys - currently 1MB) instead of minphys (currently
13153063Sperrin  * 56K on X86 and 128K on sparc).
13163063Sperrin  */
13173063Sperrin void
13183063Sperrin zvol_minphys(struct buf *bp)
13193063Sperrin {
13203063Sperrin 	if (bp->b_bcount > zvol_maxphys)
13213063Sperrin 		bp->b_bcount = zvol_maxphys;
13223063Sperrin }
13233063Sperrin 
13246423Sgw25295 int
13256423Sgw25295 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
13266423Sgw25295 {
13276423Sgw25295 	minor_t minor = getminor(dev);
13286423Sgw25295 	zvol_state_t *zv;
13296423Sgw25295 	int error = 0;
13306423Sgw25295 	uint64_t size;
13316423Sgw25295 	uint64_t boff;
13326423Sgw25295 	uint64_t resid;
13336423Sgw25295 
13346423Sgw25295 	if (minor == 0)			/* This is the control device */
13356423Sgw25295 		return (ENXIO);
13366423Sgw25295 
13376423Sgw25295 	zv = ddi_get_soft_state(zvol_state, minor);
13386423Sgw25295 	if (zv == NULL)
13396423Sgw25295 		return (ENXIO);
13406423Sgw25295 
13416423Sgw25295 	boff = ldbtob(blkno);
13426423Sgw25295 	resid = ldbtob(nblocks);
13436423Sgw25295 	if (boff + resid > zv->zv_volsize) {
13446423Sgw25295 		/* dump should know better than to write here */
13456423Sgw25295 		ASSERT(blkno + resid <= zv->zv_volsize);
13466423Sgw25295 		return (EIO);
13476423Sgw25295 	}
13486423Sgw25295 	while (resid) {
13496423Sgw25295 		/* can't straddle a block boundary */
13506423Sgw25295 		size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
13516423Sgw25295 
13526423Sgw25295 		error = zvol_physio(zv, B_WRITE, boff, size, addr, 1);
13536423Sgw25295 		if (error)
13546423Sgw25295 			break;
13556423Sgw25295 		boff += size;
13566423Sgw25295 		addr += size;
13576423Sgw25295 		resid -= size;
13586423Sgw25295 	}
13596423Sgw25295 
13606423Sgw25295 	return (error);
13616423Sgw25295 }
13626423Sgw25295 
1363789Sahrens /*ARGSUSED*/
1364789Sahrens int
13653638Sbillm zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1366789Sahrens {
13674107Sgw25295 	minor_t minor = getminor(dev);
13684107Sgw25295 	zvol_state_t *zv;
13697013Sgw25295 	uint64_t volsize;
13703755Sperrin 	rl_t *rl;
13713638Sbillm 	int error = 0;
13723638Sbillm 
13734107Sgw25295 	if (minor == 0)			/* This is the control device */
13744107Sgw25295 		return (ENXIO);
13754107Sgw25295 
13764107Sgw25295 	zv = ddi_get_soft_state(zvol_state, minor);
13774107Sgw25295 	if (zv == NULL)
13784107Sgw25295 		return (ENXIO);
13794107Sgw25295 
13807013Sgw25295 	volsize = zv->zv_volsize;
13817013Sgw25295 	if (uio->uio_resid > 0 &&
13827013Sgw25295 	    (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
13837013Sgw25295 		return (EIO);
13847013Sgw25295 
13853755Sperrin 	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
13863755Sperrin 	    RL_READER);
13877013Sgw25295 	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
13883638Sbillm 		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
13893638Sbillm 
13907013Sgw25295 		/* don't read past the end */
13917013Sgw25295 		if (bytes > volsize - uio->uio_loffset)
13927013Sgw25295 			bytes = volsize - uio->uio_loffset;
13937013Sgw25295 
13943638Sbillm 		error =  dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1395*7294Sperrin 		if (error) {
1396*7294Sperrin 			/* convert checksum errors into IO errors */
1397*7294Sperrin 			if (error == ECKSUM)
1398*7294Sperrin 				error = EIO;
13993638Sbillm 			break;
1400*7294Sperrin 		}
14013638Sbillm 	}
14023755Sperrin 	zfs_range_unlock(rl);
14033638Sbillm 	return (error);
1404789Sahrens }
1405789Sahrens 
1406789Sahrens /*ARGSUSED*/
1407789Sahrens int
14083638Sbillm zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1409789Sahrens {
14104107Sgw25295 	minor_t minor = getminor(dev);
14114107Sgw25295 	zvol_state_t *zv;
14127013Sgw25295 	uint64_t volsize;
14133755Sperrin 	rl_t *rl;
14143638Sbillm 	int error = 0;
14153638Sbillm 
14164107Sgw25295 	if (minor == 0)			/* This is the control device */
14174107Sgw25295 		return (ENXIO);
14184107Sgw25295 
14194107Sgw25295 	zv = ddi_get_soft_state(zvol_state, minor);
14204107Sgw25295 	if (zv == NULL)
14214107Sgw25295 		return (ENXIO);
14224107Sgw25295 
14237013Sgw25295 	volsize = zv->zv_volsize;
14247013Sgw25295 	if (uio->uio_resid > 0 &&
14257013Sgw25295 	    (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
14267013Sgw25295 		return (EIO);
14277013Sgw25295 
14286423Sgw25295 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
14296423Sgw25295 		error = physio(zvol_strategy, NULL, dev, B_WRITE,
14306423Sgw25295 		    zvol_minphys, uio);
14316423Sgw25295 		return (error);
14326423Sgw25295 	}
14336423Sgw25295 
14343755Sperrin 	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
14353755Sperrin 	    RL_WRITER);
14367013Sgw25295 	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
14373638Sbillm 		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
14383638Sbillm 		uint64_t off = uio->uio_loffset;
14397013Sgw25295 		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1440789Sahrens 
14417013Sgw25295 		if (bytes > volsize - off)	/* don't write past the end */
14427013Sgw25295 			bytes = volsize - off;
14437013Sgw25295 
14443638Sbillm 		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
14453638Sbillm 		error = dmu_tx_assign(tx, TXG_WAIT);
14463638Sbillm 		if (error) {
14473638Sbillm 			dmu_tx_abort(tx);
14483638Sbillm 			break;
14493638Sbillm 		}
14503638Sbillm 		error = dmu_write_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes, tx);
14513638Sbillm 		if (error == 0)
14523638Sbillm 			zvol_log_write(zv, tx, off, bytes);
14533638Sbillm 		dmu_tx_commit(tx);
14543638Sbillm 
14553638Sbillm 		if (error)
14563638Sbillm 			break;
14573638Sbillm 	}
14583755Sperrin 	zfs_range_unlock(rl);
14593638Sbillm 	return (error);
1460789Sahrens }
1461789Sahrens 
1462789Sahrens /*
1463789Sahrens  * Dirtbag ioctls to support mkfs(1M) for UFS filesystems.  See dkio(7I).
1464789Sahrens  */
1465789Sahrens /*ARGSUSED*/
1466789Sahrens int
1467789Sahrens zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1468789Sahrens {
1469789Sahrens 	zvol_state_t *zv;
14703897Smaybee 	struct dk_cinfo dki;
1471789Sahrens 	struct dk_minfo dkm;
1472789Sahrens 	dk_efi_t efi;
14733897Smaybee 	struct dk_callback *dkc;
1474789Sahrens 	struct uuid uuid = EFI_RESERVED;
1475789Sahrens 	uint32_t crc;
1476789Sahrens 	int error = 0;
14776423Sgw25295 	rl_t *rl;
1478789Sahrens 
1479789Sahrens 	mutex_enter(&zvol_state_lock);
1480789Sahrens 
1481789Sahrens 	zv = ddi_get_soft_state(zvol_state, getminor(dev));
1482789Sahrens 
1483789Sahrens 	if (zv == NULL) {
1484789Sahrens 		mutex_exit(&zvol_state_lock);
1485789Sahrens 		return (ENXIO);
1486789Sahrens 	}
1487789Sahrens 
1488789Sahrens 	switch (cmd) {
1489789Sahrens 
1490789Sahrens 	case DKIOCINFO:
14913897Smaybee 		bzero(&dki, sizeof (dki));
14923897Smaybee 		(void) strcpy(dki.dki_cname, "zvol");
14933897Smaybee 		(void) strcpy(dki.dki_dname, "zvol");
14943897Smaybee 		dki.dki_ctype = DKC_UNKNOWN;
14953897Smaybee 		dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs);
1496789Sahrens 		mutex_exit(&zvol_state_lock);
14973897Smaybee 		if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1498789Sahrens 			error = EFAULT;
1499789Sahrens 		return (error);
1500789Sahrens 
1501789Sahrens 	case DKIOCGMEDIAINFO:
1502789Sahrens 		bzero(&dkm, sizeof (dkm));
1503789Sahrens 		dkm.dki_lbsize = 1U << zv->zv_min_bs;
1504789Sahrens 		dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1505789Sahrens 		dkm.dki_media_type = DK_UNKNOWN;
1506789Sahrens 		mutex_exit(&zvol_state_lock);
1507789Sahrens 		if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1508789Sahrens 			error = EFAULT;
1509789Sahrens 		return (error);
1510789Sahrens 
1511789Sahrens 	case DKIOCGETEFI:
1512789Sahrens 		if (ddi_copyin((void *)arg, &efi, sizeof (dk_efi_t), flag)) {
1513789Sahrens 			mutex_exit(&zvol_state_lock);
1514789Sahrens 			return (EFAULT);
1515789Sahrens 		}
1516789Sahrens 		efi.dki_data = (void *)(uintptr_t)efi.dki_data_64;
1517789Sahrens 
15183016Smaybee 		/*
15193016Smaybee 		 * Some clients may attempt to request a PMBR for the
15203016Smaybee 		 * zvol.  Currently this interface will return ENOTTY to
15213016Smaybee 		 * such requests.  These requests could be supported by
15223016Smaybee 		 * adding a check for lba == 0 and consing up an appropriate
15236423Sgw25295 		 * PMBR.
15243016Smaybee 		 */
15253016Smaybee 		if (efi.dki_lba == 1) {
15263016Smaybee 			efi_gpt_t gpt;
15273016Smaybee 			efi_gpe_t gpe;
15283016Smaybee 
15293016Smaybee 			bzero(&gpt, sizeof (gpt));
15303016Smaybee 			bzero(&gpe, sizeof (gpe));
1531789Sahrens 
15323016Smaybee 			if (efi.dki_length < sizeof (gpt)) {
15333016Smaybee 				mutex_exit(&zvol_state_lock);
15343016Smaybee 				return (EINVAL);
15353016Smaybee 			}
15363016Smaybee 
15373016Smaybee 			gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
15383016Smaybee 			gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
15393016Smaybee 			gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
15403016Smaybee 			gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
15413016Smaybee 			gpt.efi_gpt_LastUsableLBA =
15423016Smaybee 			    LE_64((zv->zv_volsize >> zv->zv_min_bs) - 1);
15433016Smaybee 			gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
15443080Smaybee 			gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
15453016Smaybee 			gpt.efi_gpt_SizeOfPartitionEntry = LE_32(sizeof (gpe));
1546789Sahrens 
15473016Smaybee 			UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
15483016Smaybee 			gpe.efi_gpe_StartingLBA = gpt.efi_gpt_FirstUsableLBA;
15493016Smaybee 			gpe.efi_gpe_EndingLBA = gpt.efi_gpt_LastUsableLBA;
15503016Smaybee 
15513016Smaybee 			CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
15523016Smaybee 			gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
15533016Smaybee 
15543016Smaybee 			CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
15553016Smaybee 			gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
15563016Smaybee 
15573016Smaybee 			mutex_exit(&zvol_state_lock);
15583016Smaybee 			if (ddi_copyout(&gpt, efi.dki_data, sizeof (gpt), flag))
15593016Smaybee 				error = EFAULT;
15603016Smaybee 		} else if (efi.dki_lba == 2) {
15613016Smaybee 			efi_gpe_t gpe;
15623016Smaybee 
15633016Smaybee 			bzero(&gpe, sizeof (gpe));
1564789Sahrens 
15653016Smaybee 			if (efi.dki_length < sizeof (gpe)) {
15663016Smaybee 				mutex_exit(&zvol_state_lock);
15673016Smaybee 				return (EINVAL);
15683016Smaybee 			}
1569789Sahrens 
15703016Smaybee 			UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
15713016Smaybee 			gpe.efi_gpe_StartingLBA = LE_64(34ULL);
15723016Smaybee 			gpe.efi_gpe_EndingLBA =
15733016Smaybee 			    LE_64((zv->zv_volsize >> zv->zv_min_bs) - 1);
1574789Sahrens 
15753016Smaybee 			mutex_exit(&zvol_state_lock);
15763016Smaybee 			if (ddi_copyout(&gpe, efi.dki_data, sizeof (gpe), flag))
15773016Smaybee 				error = EFAULT;
15783016Smaybee 		} else {
15793016Smaybee 			mutex_exit(&zvol_state_lock);
15803016Smaybee 			error = EINVAL;
15813016Smaybee 		}
1582789Sahrens 		return (error);
1583789Sahrens 
15843638Sbillm 	case DKIOCFLUSHWRITECACHE:
15853897Smaybee 		dkc = (struct dk_callback *)arg;
15863638Sbillm 		zil_commit(zv->zv_zilog, UINT64_MAX, ZVOL_OBJ);
15873897Smaybee 		if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
15883897Smaybee 			(*dkc->dkc_callback)(dkc->dkc_cookie, error);
15893897Smaybee 			error = 0;
15903897Smaybee 		}
15913638Sbillm 		break;
15923638Sbillm 
15933245Smaybee 	case DKIOCGGEOM:
15943245Smaybee 	case DKIOCGVTOC:
15956423Sgw25295 		/*
15966423Sgw25295 		 * commands using these (like prtvtoc) expect ENOTSUP
15976423Sgw25295 		 * since we're emulating an EFI label
15986423Sgw25295 		 */
15993245Smaybee 		error = ENOTSUP;
16003245Smaybee 		break;
16013245Smaybee 
16026423Sgw25295 	case DKIOCDUMPINIT:
16036423Sgw25295 		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
16046423Sgw25295 		    RL_WRITER);
16056423Sgw25295 		error = zvol_dumpify(zv);
16066423Sgw25295 		zfs_range_unlock(rl);
16076423Sgw25295 		break;
16086423Sgw25295 
16096423Sgw25295 	case DKIOCDUMPFINI:
16106423Sgw25295 		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
16116423Sgw25295 		    RL_WRITER);
16126423Sgw25295 		error = zvol_dump_fini(zv);
16136423Sgw25295 		zfs_range_unlock(rl);
16146423Sgw25295 		break;
16156423Sgw25295 
1616789Sahrens 	default:
16173016Smaybee 		error = ENOTTY;
1618789Sahrens 		break;
1619789Sahrens 
1620789Sahrens 	}
1621789Sahrens 	mutex_exit(&zvol_state_lock);
1622789Sahrens 	return (error);
1623789Sahrens }
1624789Sahrens 
1625789Sahrens int
1626789Sahrens zvol_busy(void)
1627789Sahrens {
1628789Sahrens 	return (zvol_minors != 0);
1629789Sahrens }
1630789Sahrens 
1631789Sahrens void
1632789Sahrens zvol_init(void)
1633789Sahrens {
1634789Sahrens 	VERIFY(ddi_soft_state_init(&zvol_state, sizeof (zvol_state_t), 1) == 0);
1635789Sahrens 	mutex_init(&zvol_state_lock, NULL, MUTEX_DEFAULT, NULL);
1636789Sahrens }
1637789Sahrens 
1638789Sahrens void
1639789Sahrens zvol_fini(void)
1640789Sahrens {
1641789Sahrens 	mutex_destroy(&zvol_state_lock);
1642789Sahrens 	ddi_soft_state_fini(&zvol_state);
1643789Sahrens }
16446423Sgw25295 
16456423Sgw25295 static boolean_t
16466423Sgw25295 zvol_is_swap(zvol_state_t *zv)
16476423Sgw25295 {
16486423Sgw25295 	vnode_t *vp;
16496423Sgw25295 	boolean_t ret = B_FALSE;
16506423Sgw25295 	char *devpath;
16516423Sgw25295 	size_t devpathlen;
16526423Sgw25295 	int error;
16536423Sgw25295 
16546423Sgw25295 	devpathlen = strlen(ZVOL_FULL_DEV_DIR) + strlen(zv->zv_name) + 1;
16556423Sgw25295 	devpath = kmem_alloc(devpathlen, KM_SLEEP);
16566423Sgw25295 	(void) sprintf(devpath, "%s%s", ZVOL_FULL_DEV_DIR, zv->zv_name);
16576423Sgw25295 	error = lookupname(devpath, UIO_SYSSPACE, FOLLOW, NULLVPP, &vp);
16586423Sgw25295 	kmem_free(devpath, devpathlen);
16596423Sgw25295 
16606423Sgw25295 	ret = !error && IS_SWAPVP(common_specvp(vp));
16616423Sgw25295 
16626423Sgw25295 	if (vp != NULL)
16636423Sgw25295 		VN_RELE(vp);
16646423Sgw25295 
16656423Sgw25295 	return (ret);
16666423Sgw25295 }
16676423Sgw25295 
16686423Sgw25295 static int
16696423Sgw25295 zvol_dump_init(zvol_state_t *zv, boolean_t resize)
16706423Sgw25295 {
16716423Sgw25295 	dmu_tx_t *tx;
16726423Sgw25295 	int error = 0;
16736423Sgw25295 	objset_t *os = zv->zv_objset;
16746423Sgw25295 	nvlist_t *nv = NULL;
16756423Sgw25295 	uint64_t checksum, compress, refresrv;
16766423Sgw25295 
16776423Sgw25295 	ASSERT(MUTEX_HELD(&zvol_state_lock));
16786423Sgw25295 
16796423Sgw25295 	tx = dmu_tx_create(os);
16806423Sgw25295 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
16816423Sgw25295 	error = dmu_tx_assign(tx, TXG_WAIT);
16826423Sgw25295 	if (error) {
16836423Sgw25295 		dmu_tx_abort(tx);
16846423Sgw25295 		return (error);
16856423Sgw25295 	}
16866423Sgw25295 
16876423Sgw25295 	/*
16886423Sgw25295 	 * If we are resizing the dump device then we only need to
16896423Sgw25295 	 * update the refreservation to match the newly updated
16906423Sgw25295 	 * zvolsize. Otherwise, we save off the original state of the
16916423Sgw25295 	 * zvol so that we can restore them if the zvol is ever undumpified.
16926423Sgw25295 	 */
16936423Sgw25295 	if (resize) {
16946423Sgw25295 		error = zap_update(os, ZVOL_ZAP_OBJ,
16956423Sgw25295 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
16966423Sgw25295 		    &zv->zv_volsize, tx);
16976423Sgw25295 	} else {
16986423Sgw25295 		error = dsl_prop_get_integer(zv->zv_name,
16996423Sgw25295 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
17006423Sgw25295 		error = error ? error : dsl_prop_get_integer(zv->zv_name,
17016423Sgw25295 		    zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL);
17026423Sgw25295 		error = error ? error : dsl_prop_get_integer(zv->zv_name,
17036423Sgw25295 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL);
17046423Sgw25295 
17056423Sgw25295 		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
17066423Sgw25295 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
17076423Sgw25295 		    &compress, tx);
17086423Sgw25295 		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
17096423Sgw25295 		    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx);
17106423Sgw25295 		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
17116423Sgw25295 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
17126423Sgw25295 		    &refresrv, tx);
17136423Sgw25295 	}
17146423Sgw25295 	dmu_tx_commit(tx);
17156423Sgw25295 
17166423Sgw25295 	/* Truncate the file */
17176423Sgw25295 	if (!error)
17186992Smaybee 		error = dmu_free_long_range(zv->zv_objset,
17196992Smaybee 		    ZVOL_OBJ, 0, DMU_OBJECT_END);
17206423Sgw25295 
17216423Sgw25295 	if (error)
17226423Sgw25295 		return (error);
17236423Sgw25295 
17246423Sgw25295 	/*
17256423Sgw25295 	 * We only need update the zvol's property if we are initializing
17266423Sgw25295 	 * the dump area for the first time.
17276423Sgw25295 	 */
17286423Sgw25295 	if (!resize) {
17296423Sgw25295 		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
17306423Sgw25295 		VERIFY(nvlist_add_uint64(nv,
17316423Sgw25295 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
17326423Sgw25295 		VERIFY(nvlist_add_uint64(nv,
17336423Sgw25295 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
17346423Sgw25295 		    ZIO_COMPRESS_OFF) == 0);
17356423Sgw25295 		VERIFY(nvlist_add_uint64(nv,
17366423Sgw25295 		    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
17376423Sgw25295 		    ZIO_CHECKSUM_OFF) == 0);
17386423Sgw25295 
17396423Sgw25295 		error = zfs_set_prop_nvlist(zv->zv_name, nv);
17406423Sgw25295 		nvlist_free(nv);
17416423Sgw25295 
17426423Sgw25295 		if (error)
17436423Sgw25295 			return (error);
17446423Sgw25295 	}
17456423Sgw25295 
17466423Sgw25295 	/* Allocate the space for the dump */
17476423Sgw25295 	error = zvol_prealloc(zv);
17486423Sgw25295 	return (error);
17496423Sgw25295 }
17506423Sgw25295 
17516423Sgw25295 static int
17526423Sgw25295 zvol_dumpify(zvol_state_t *zv)
17536423Sgw25295 {
17546423Sgw25295 	int error = 0;
17556423Sgw25295 	uint64_t dumpsize = 0;
17566423Sgw25295 	dmu_tx_t *tx;
17576423Sgw25295 	objset_t *os = zv->zv_objset;
17586423Sgw25295 
17596423Sgw25295 	if (zv->zv_flags & ZVOL_RDONLY || (zv->zv_mode & DS_MODE_READONLY))
17606423Sgw25295 		return (EROFS);
17616423Sgw25295 
17626423Sgw25295 	/*
17636423Sgw25295 	 * We do not support swap devices acting as dump devices.
17646423Sgw25295 	 */
17656423Sgw25295 	if (zvol_is_swap(zv))
17666423Sgw25295 		return (ENOTSUP);
17676423Sgw25295 
17686423Sgw25295 	if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
17696423Sgw25295 	    8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
17706423Sgw25295 		boolean_t resize = (dumpsize > 0) ? B_TRUE : B_FALSE;
17716423Sgw25295 
17726423Sgw25295 		if ((error = zvol_dump_init(zv, resize)) != 0) {
17736423Sgw25295 			(void) zvol_dump_fini(zv);
17746423Sgw25295 			return (error);
17756423Sgw25295 		}
17766423Sgw25295 	}
17776423Sgw25295 
17786423Sgw25295 	/*
17796423Sgw25295 	 * Build up our lba mapping.
17806423Sgw25295 	 */
17816423Sgw25295 	error = zvol_get_lbas(zv);
17826423Sgw25295 	if (error) {
17836423Sgw25295 		(void) zvol_dump_fini(zv);
17846423Sgw25295 		return (error);
17856423Sgw25295 	}
17866423Sgw25295 
17876423Sgw25295 	tx = dmu_tx_create(os);
17886423Sgw25295 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
17896423Sgw25295 	error = dmu_tx_assign(tx, TXG_WAIT);
17906423Sgw25295 	if (error) {
17916423Sgw25295 		dmu_tx_abort(tx);
17926423Sgw25295 		(void) zvol_dump_fini(zv);
17936423Sgw25295 		return (error);
17946423Sgw25295 	}
17956423Sgw25295 
17966423Sgw25295 	zv->zv_flags |= ZVOL_DUMPIFIED;
17976423Sgw25295 	error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
17986423Sgw25295 	    &zv->zv_volsize, tx);
17996423Sgw25295 	dmu_tx_commit(tx);
18006423Sgw25295 
18016423Sgw25295 	if (error) {
18026423Sgw25295 		(void) zvol_dump_fini(zv);
18036423Sgw25295 		return (error);
18046423Sgw25295 	}
18056423Sgw25295 
18066423Sgw25295 	txg_wait_synced(dmu_objset_pool(os), 0);
18076423Sgw25295 	return (0);
18086423Sgw25295 }
18096423Sgw25295 
18106423Sgw25295 static int
18116423Sgw25295 zvol_dump_fini(zvol_state_t *zv)
18126423Sgw25295 {
18136423Sgw25295 	dmu_tx_t *tx;
18146423Sgw25295 	objset_t *os = zv->zv_objset;
18156423Sgw25295 	nvlist_t *nv;
18166423Sgw25295 	int error = 0;
18176423Sgw25295 	uint64_t checksum, compress, refresrv;
18186423Sgw25295 
18197080Smaybee 	/*
18207080Smaybee 	 * Attempt to restore the zvol back to its pre-dumpified state.
18217080Smaybee 	 * This is a best-effort attempt as it's possible that not all
18227080Smaybee 	 * of these properties were initialized during the dumpify process
18237080Smaybee 	 * (i.e. error during zvol_dump_init).
18247080Smaybee 	 */
18257080Smaybee 
18266423Sgw25295 	tx = dmu_tx_create(os);
18276423Sgw25295 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
18286423Sgw25295 	error = dmu_tx_assign(tx, TXG_WAIT);
18296423Sgw25295 	if (error) {
18306423Sgw25295 		dmu_tx_abort(tx);
18316423Sgw25295 		return (error);
18326423Sgw25295 	}
18337080Smaybee 	(void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
18347080Smaybee 	dmu_tx_commit(tx);
18356423Sgw25295 
18366423Sgw25295 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
18376423Sgw25295 	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
18386423Sgw25295 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
18396423Sgw25295 	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
18406423Sgw25295 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
18416423Sgw25295 	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
18426423Sgw25295 
18436423Sgw25295 	VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
18446423Sgw25295 	(void) nvlist_add_uint64(nv,
18456423Sgw25295 	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
18466423Sgw25295 	(void) nvlist_add_uint64(nv,
18476423Sgw25295 	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
18486423Sgw25295 	(void) nvlist_add_uint64(nv,
18496423Sgw25295 	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
18506423Sgw25295 	(void) zfs_set_prop_nvlist(zv->zv_name, nv);
18516423Sgw25295 	nvlist_free(nv);
18526423Sgw25295 
18537080Smaybee 	zvol_free_extents(zv);
18547080Smaybee 	zv->zv_flags &= ~ZVOL_DUMPIFIED;
18557080Smaybee 	(void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
18567080Smaybee 
18576423Sgw25295 	return (0);
18586423Sgw25295 }
1859