xref: /netbsd-src/external/cddl/osnet/dist/uts/common/fs/zfs/zvol.c (revision 9fb66d812c00ebfb445c0b47dea128f32aa6fe96)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  *
24  * Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org>
25  * All rights reserved.
26  *
27  * Portions Copyright 2010 Robert Milkowski
28  *
29  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
30  * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
31  * Copyright (c) 2013, Joyent, Inc. All rights reserved.
32  * Copyright (c) 2014 Integros [integros.com]
33  */
34 
35 /* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
36 
37 /*
38  * ZFS volume emulation driver.
39  *
40  * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
41  * Volumes are accessed through the symbolic links named:
42  *
43  * /dev/zvol/dsk/<pool_name>/<dataset_name>
44  * /dev/zvol/rdsk/<pool_name>/<dataset_name>
45  *
46  * These links are created by the /dev filesystem (sdev_zvolops.c).
47  * Volumes are persistent through reboot.  No user command needs to be
48  * run before opening and using a device.
49  *
50  * FreeBSD notes.
51  * On FreeBSD ZVOLs are simply GEOM providers like any other storage device
52  * in the system.
53  */
54 
55 #include <sys/types.h>
56 #include <sys/param.h>
57 #include <sys/kernel.h>
58 #include <sys/errno.h>
59 #include <sys/uio.h>
60 #include <sys/buf.h>
61 #include <sys/open.h>
62 #include <sys/kmem.h>
63 #include <sys/conf.h>
64 #include <sys/cmn_err.h>
65 #include <sys/stat.h>
66 #include <sys/zap.h>
67 #include <sys/spa.h>
68 #include <sys/spa_impl.h>
69 #include <sys/zio.h>
70 #include <sys/disk.h>
71 #include <sys/dmu_traverse.h>
72 #include <sys/dnode.h>
73 #include <sys/dsl_dataset.h>
74 #include <sys/dsl_prop.h>
75 #ifdef __NetBSD__
76 #include <sys/disklabel.h>
77 #endif
78 #include <sys/dkio.h>
79 #include <sys/byteorder.h>
80 #include <sys/sunddi.h>
81 #include <sys/dirent.h>
82 #include <sys/policy.h>
83 #include <sys/queue.h>
84 #include <sys/fs/zfs.h>
85 #include <sys/zfs_ioctl.h>
86 #include <sys/zil.h>
87 #include <sys/refcount.h>
88 #include <sys/zfs_znode.h>
89 #include <sys/zfs_rlock.h>
90 #include <sys/vdev_impl.h>
91 #include <sys/vdev_raidz.h>
92 #include <sys/zvol.h>
93 #include <sys/zil_impl.h>
94 #include <sys/dbuf.h>
95 #include <sys/dmu_tx.h>
96 #include <sys/zfeature.h>
97 #include <sys/zio_checksum.h>
98 #include <sys/filio.h>
99 
100 #include "zfs_namecheck.h"
101 
102 #ifdef __FreeBSD__
103 #include <sys/bio.h>
104 #include <geom/geom.h>
105 
106 struct g_class zfs_zvol_class = {
107 	.name = "ZFS::ZVOL",
108 	.version = G_VERSION,
109 };
110 
111 DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol);
112 #endif
113 
114 #ifdef __NetBSD__
115 #include <sys/pathname.h>
116 #include <prop/proplib.h>
117 
118 #define	DROP_GIANT()	/* nothing */
119 #define PICKUP_GIANT()	/* nothing */
120 
121 void	zvol_minphys(struct buf *);
122 static struct dkdriver zvol_dkdriver = { zvol_strategy, zvol_minphys };
123 
124 #define	bioerror(bp, er)	((bp)->b_error = (er))
125 #define	b_edev			b_dev
126 #endif
127 
128 void *zfsdev_state;
129 static char *zvol_tag = "zvol_tag";
130 
131 #define	ZVOL_DUMPSIZE		"dumpsize"
132 
133 #ifdef __FreeBSD__
134 /*
135  * In FreeBSD we've replaced the upstream zfsdev_state_lock with the
136  * spa_namespace_lock in the ZVOL code.
137  */
138 #define zfsdev_state_lock spa_namespace_lock
139 #else
140 /*
141  * This lock protects the zfsdev_state structure from being modified
142  * while it's being used, e.g. an open that comes in before a create
143  * finishes.  It also protects temporary opens of the dataset so that,
144  * e.g., an open doesn't get a spurious EBUSY.
145  */
146 kmutex_t zfsdev_state_lock;
147 #endif
148 static uint32_t zvol_minors;
149 
150 #ifndef illumos
151 SYSCTL_DECL(_vfs_zfs);
152 SYSCTL_NODE(_vfs_zfs, OID_AUTO, vol, CTLFLAG_RW, 0, "ZFS VOLUME");
153 static int	volmode = ZFS_VOLMODE_GEOM;
154 SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, mode, CTLFLAG_RWTUN, &volmode, 0,
155     "Expose as GEOM providers (1), device files (2) or neither");
156 static boolean_t zpool_on_zvol = B_FALSE;
157 SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, recursive, CTLFLAG_RWTUN, &zpool_on_zvol, 0,
158     "Allow zpools to use zvols as vdevs (DANGEROUS)");
159 
160 #endif
161 typedef struct zvol_extent {
162 	list_node_t	ze_node;
163 	dva_t		ze_dva;		/* dva associated with this extent */
164 	uint64_t	ze_nblks;	/* number of blocks in extent */
165 } zvol_extent_t;
166 
167 /*
168  * The in-core state of each volume.
169  */
170 typedef struct zvol_state {
171 #ifndef illumos
172 	LIST_ENTRY(zvol_state)	zv_links;
173 #endif
174 	char		zv_name[MAXPATHLEN]; /* pool/dd name */
175 	uint64_t	zv_volsize;	/* amount of space we advertise */
176 	uint64_t	zv_volblocksize; /* volume block size */
177 #ifdef __FreeBSD__
178 	struct cdev	*zv_dev;	/* non-GEOM device */
179 	struct g_provider *zv_provider;	/* GEOM provider */
180 #else
181 	minor_t		zv_minor;	/* minor number */
182 #endif
183 	uint8_t		zv_min_bs;	/* minimum addressable block shift */
184 	uint8_t		zv_flags;	/* readonly, dumpified, etc. */
185 	objset_t	*zv_objset;	/* objset handle */
186 #if defined(illumos) || defined(__NetBSD__)
187 	uint32_t	zv_open_count[OTYPCNT];	/* open counts */
188 #endif
189 	uint32_t	zv_total_opens;	/* total open count */
190 	uint32_t	zv_sync_cnt;	/* synchronous open count */
191 	zilog_t		*zv_zilog;	/* ZIL handle */
192 	list_t		zv_extents;	/* List of extents for dump */
193 	znode_t		zv_znode;	/* for range locking */
194 	dmu_buf_t	*zv_dbuf;	/* bonus handle */
195 #ifdef __FreeBSD__
196 	int		zv_state;
197 	int		zv_volmode;	/* Provide GEOM or cdev */
198 	struct bio_queue_head zv_queue;
199 	struct mtx	zv_queue_mtx;	/* zv_queue mutex */
200 #endif
201 #ifdef __NetBSD__
202 	struct disk	zv_dk;		/* disk statistics */
203 	kmutex_t	zv_dklock;	/* disk statistics */
204 #endif
205 } zvol_state_t;
206 
207 #ifndef illumos
208 static LIST_HEAD(, zvol_state) all_zvols;
209 #endif
210 /*
211  * zvol specific flags
212  */
213 #define	ZVOL_RDONLY	0x1
214 #define	ZVOL_DUMPIFIED	0x2
215 #define	ZVOL_EXCL	0x4
216 #define	ZVOL_WCE	0x8
217 
218 /*
219  * zvol maximum transfer in one DMU tx.
220  */
221 int zvol_maxphys = DMU_MAX_ACCESS/2;
222 
223 /*
224  * Toggle unmap functionality.
225  */
226 boolean_t zvol_unmap_enabled = B_TRUE;
227 
228 /*
229  * If true, unmaps requested as synchronous are executed synchronously,
230  * otherwise all unmaps are asynchronous.
231  */
232 boolean_t zvol_unmap_sync_enabled = B_FALSE;
233 
234 #ifdef __FreeBSD__
235 SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, unmap_enabled, CTLFLAG_RWTUN,
236     &zvol_unmap_enabled, 0,
237     "Enable UNMAP functionality");
238 
239 SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, unmap_sync_enabled, CTLFLAG_RWTUN,
240     &zvol_unmap_sync_enabled, 0,
241     "UNMAPs requested as sync are executed synchronously");
242 
243 static d_open_t		zvol_d_open;
244 static d_close_t	zvol_d_close;
245 static d_read_t		zvol_read;
246 static d_write_t	zvol_write;
247 static d_ioctl_t	zvol_d_ioctl;
248 static d_strategy_t	zvol_strategy;
249 
250 static struct cdevsw zvol_cdevsw = {
251 	.d_version =	D_VERSION,
252 	.d_open =	zvol_d_open,
253 	.d_close =	zvol_d_close,
254 	.d_read =	zvol_read,
255 	.d_write =	zvol_write,
256 	.d_ioctl =	zvol_d_ioctl,
257 	.d_strategy =	zvol_strategy,
258 	.d_name =	"zvol",
259 	.d_flags =	D_DISK | D_TRACKCLOSE,
260 };
261 
262 static void zvol_geom_run(zvol_state_t *zv);
263 static void zvol_geom_destroy(zvol_state_t *zv);
264 static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace);
265 static void zvol_geom_start(struct bio *bp);
266 static void zvol_geom_worker(void *arg);
267 static void zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off,
268     uint64_t len, boolean_t sync);
269 #endif /* __FreeBSD__ */
270 #ifdef __NetBSD__
271 /* XXXNETBSD need devsw, etc */
272 #endif
273 
274 extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
275     nvlist_t *, nvlist_t *);
276 static int zvol_remove_zv(zvol_state_t *);
277 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
278 static int zvol_dumpify(zvol_state_t *zv);
279 static int zvol_dump_fini(zvol_state_t *zv);
280 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
281 
282 static void
283 zvol_size_changed(zvol_state_t *zv, uint64_t volsize)
284 {
285 #ifdef illumos
286 	dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor);
287 
288 	zv->zv_volsize = volsize;
289 	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
290 	    "Size", volsize) == DDI_SUCCESS);
291 	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
292 	    "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
293 
294 	/* Notify specfs to invalidate the cached size */
295 	spec_size_invalidate(dev, VBLK);
296 	spec_size_invalidate(dev, VCHR);
297 #endif /* illumos */
298 #ifdef __FreeBSD__
299 	zv->zv_volsize = volsize;
300 	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
301 		struct g_provider *pp;
302 
303 		pp = zv->zv_provider;
304 		if (pp == NULL)
305 			return;
306 		g_topology_lock();
307 
308 		/*
309 		 * Do not invoke resize event when initial size was zero.
310 		 * ZVOL initializes the size on first open, this is not
311 		 * real resizing.
312 		 */
313 		if (pp->mediasize == 0)
314 			pp->mediasize = zv->zv_volsize;
315 		else
316 			g_resize_provider(pp, zv->zv_volsize);
317 		g_topology_unlock();
318 	}
319 #endif /* __FreeBSD__ */
320 #ifdef __NetBSD__
321 	struct disk_geom *dg = &zv->zv_dk.dk_geom;
322 	objset_t *os = zv->zv_objset;
323 	spa_t *spa = dmu_objset_spa(os);
324 	unsigned secsize;
325 
326 	zv->zv_volsize = volsize;
327 
328 	secsize = MAX(DEV_BSIZE, 1U << spa->spa_max_ashift);
329 
330 	memset(dg, 0, sizeof(*dg));
331 	dg->dg_secsize = secsize;
332 	dg->dg_secperunit = volsize / secsize;
333 	disk_set_info(NULL, &zv->zv_dk, "ZVOL");
334 #endif
335 }
336 
337 
338 int
339 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
340 {
341 	if (volsize == 0)
342 		return (SET_ERROR(EINVAL));
343 
344 	if (volsize % blocksize != 0)
345 		return (SET_ERROR(EINVAL));
346 
347 #ifdef _ILP32
348 	if (volsize - 1 > SPEC_MAXOFFSET_T)
349 		return (SET_ERROR(EOVERFLOW));
350 #endif
351 	return (0);
352 }
353 
354 int
355 zvol_check_volblocksize(uint64_t volblocksize)
356 {
357 	if (volblocksize < SPA_MINBLOCKSIZE ||
358 	    volblocksize > SPA_OLD_MAXBLOCKSIZE ||
359 	    !ISP2(volblocksize))
360 		return (SET_ERROR(EDOM));
361 
362 	return (0);
363 }
364 
365 int
366 zvol_get_stats(objset_t *os, nvlist_t *nv)
367 {
368 	int error;
369 	dmu_object_info_t doi;
370 	uint64_t val;
371 
372 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
373 	if (error)
374 		return (error);
375 
376 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
377 
378 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
379 
380 	if (error == 0) {
381 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
382 		    doi.doi_data_block_size);
383 	}
384 
385 	return (error);
386 }
387 
388 static zvol_state_t *
389 zvol_minor_lookup(const char *name)
390 {
391 #ifdef illumos
392 	minor_t minor;
393 #endif
394 	zvol_state_t *zv;
395 
396 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
397 
398 #ifdef illumos
399 	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++)
400 #else
401 	LIST_FOREACH(zv, &all_zvols, zv_links)
402 #endif
403 	{
404 #ifdef illumos
405 		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
406 		if (zv == NULL)
407 			continue;
408 #endif
409 
410 		if (strcmp(zv->zv_name, name) == 0)
411 			return (zv);
412 	}
413 
414 	return (NULL);
415 }
416 
417 /* extent mapping arg */
418 struct maparg {
419 	zvol_state_t	*ma_zv;
420 	uint64_t	ma_blks;
421 };
422 
423 /*ARGSUSED*/
424 static int
425 zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
426     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
427 {
428 	struct maparg *ma = arg;
429 	zvol_extent_t *ze;
430 	int bs = ma->ma_zv->zv_volblocksize;
431 
432 	if (bp == NULL || BP_IS_HOLE(bp) ||
433 	    zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
434 		return (0);
435 
436 	VERIFY(!BP_IS_EMBEDDED(bp));
437 
438 	VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
439 	ma->ma_blks++;
440 
441 	/* Abort immediately if we have encountered gang blocks */
442 	if (BP_IS_GANG(bp))
443 		return (SET_ERROR(EFRAGS));
444 
445 	/*
446 	 * See if the block is at the end of the previous extent.
447 	 */
448 	ze = list_tail(&ma->ma_zv->zv_extents);
449 	if (ze &&
450 	    DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
451 	    DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
452 	    DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
453 		ze->ze_nblks++;
454 		return (0);
455 	}
456 
457 	dprintf_bp(bp, "%s", "next blkptr:");
458 
459 	/* start a new extent */
460 	ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
461 	ze->ze_dva = bp->blk_dva[0];	/* structure assignment */
462 	ze->ze_nblks = 1;
463 	list_insert_tail(&ma->ma_zv->zv_extents, ze);
464 	return (0);
465 }
466 
467 static void
468 zvol_free_extents(zvol_state_t *zv)
469 {
470 	zvol_extent_t *ze;
471 
472 	while (ze = list_head(&zv->zv_extents)) {
473 		list_remove(&zv->zv_extents, ze);
474 		kmem_free(ze, sizeof (zvol_extent_t));
475 	}
476 }
477 
478 static int
479 zvol_get_lbas(zvol_state_t *zv)
480 {
481 	objset_t *os = zv->zv_objset;
482 	struct maparg	ma;
483 	int		err;
484 
485 	ma.ma_zv = zv;
486 	ma.ma_blks = 0;
487 	zvol_free_extents(zv);
488 
489 	/* commit any in-flight changes before traversing the dataset */
490 	txg_wait_synced(dmu_objset_pool(os), 0);
491 	err = traverse_dataset(dmu_objset_ds(os), 0,
492 	    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
493 	if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
494 		zvol_free_extents(zv);
495 		return (err ? err : EIO);
496 	}
497 
498 	return (0);
499 }
500 
501 /* ARGSUSED */
502 void
503 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
504 {
505 	zfs_creat_t *zct = arg;
506 	nvlist_t *nvprops = zct->zct_props;
507 	int error;
508 	uint64_t volblocksize, volsize;
509 
510 	VERIFY(nvlist_lookup_uint64(nvprops,
511 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
512 	if (nvlist_lookup_uint64(nvprops,
513 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
514 		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
515 
516 	/*
517 	 * These properties must be removed from the list so the generic
518 	 * property setting step won't apply to them.
519 	 */
520 	VERIFY(nvlist_remove_all(nvprops,
521 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
522 	(void) nvlist_remove_all(nvprops,
523 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
524 
525 	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
526 	    DMU_OT_NONE, 0, tx);
527 	ASSERT(error == 0);
528 
529 	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
530 	    DMU_OT_NONE, 0, tx);
531 	ASSERT(error == 0);
532 
533 	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
534 	ASSERT(error == 0);
535 }
536 
537 /*
538  * Replay a TX_TRUNCATE ZIL transaction if asked.  TX_TRUNCATE is how we
539  * implement DKIOCFREE/free-long-range.
540  */
541 static int
542 zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap)
543 {
544 	uint64_t offset, length;
545 
546 	if (byteswap)
547 		byteswap_uint64_array(lr, sizeof (*lr));
548 
549 	offset = lr->lr_offset;
550 	length = lr->lr_length;
551 
552 	return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
553 }
554 
555 /*
556  * Replay a TX_WRITE ZIL transaction that didn't get committed
557  * after a system failure
558  */
559 static int
560 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
561 {
562 	objset_t *os = zv->zv_objset;
563 	char *data = (char *)(lr + 1);	/* data follows lr_write_t */
564 	uint64_t offset, length;
565 	dmu_tx_t *tx;
566 	int error;
567 
568 	if (byteswap)
569 		byteswap_uint64_array(lr, sizeof (*lr));
570 
571 	offset = lr->lr_offset;
572 	length = lr->lr_length;
573 
574 	/* If it's a dmu_sync() block, write the whole block */
575 	if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
576 		uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
577 		if (length < blocksize) {
578 			offset -= offset % blocksize;
579 			length = blocksize;
580 		}
581 	}
582 
583 	tx = dmu_tx_create(os);
584 	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
585 	error = dmu_tx_assign(tx, TXG_WAIT);
586 	if (error) {
587 		dmu_tx_abort(tx);
588 	} else {
589 		dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
590 		dmu_tx_commit(tx);
591 	}
592 
593 	return (error);
594 }
595 
596 /* ARGSUSED */
597 static int
598 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
599 {
600 	return (SET_ERROR(ENOTSUP));
601 }
602 
603 /*
604  * Callback vectors for replaying records.
605  * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
606  */
607 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
608 	zvol_replay_err,	/* 0 no such transaction type */
609 	zvol_replay_err,	/* TX_CREATE */
610 	zvol_replay_err,	/* TX_MKDIR */
611 	zvol_replay_err,	/* TX_MKXATTR */
612 	zvol_replay_err,	/* TX_SYMLINK */
613 	zvol_replay_err,	/* TX_REMOVE */
614 	zvol_replay_err,	/* TX_RMDIR */
615 	zvol_replay_err,	/* TX_LINK */
616 	zvol_replay_err,	/* TX_RENAME */
617 	zvol_replay_write,	/* TX_WRITE */
618 	zvol_replay_truncate,	/* TX_TRUNCATE */
619 	zvol_replay_err,	/* TX_SETATTR */
620 	zvol_replay_err,	/* TX_ACL */
621 	zvol_replay_err,	/* TX_CREATE_ACL */
622 	zvol_replay_err,	/* TX_CREATE_ATTR */
623 	zvol_replay_err,	/* TX_CREATE_ACL_ATTR */
624 	zvol_replay_err,	/* TX_MKDIR_ACL */
625 	zvol_replay_err,	/* TX_MKDIR_ATTR */
626 	zvol_replay_err,	/* TX_MKDIR_ACL_ATTR */
627 	zvol_replay_err,	/* TX_WRITE2 */
628 };
629 
630 #ifdef illumos
631 int
632 zvol_name2minor(const char *name, minor_t *minor)
633 {
634 	zvol_state_t *zv;
635 
636 	mutex_enter(&zfsdev_state_lock);
637 	zv = zvol_minor_lookup(name);
638 	if (minor && zv)
639 		*minor = zv->zv_minor;
640 	mutex_exit(&zfsdev_state_lock);
641 	return (zv ? 0 : -1);
642 }
643 #endif	/* illumos */
644 
645 /*
646  * Create a minor node (plus a whole lot more) for the specified volume.
647  */
648 int
649 zvol_create_minor(const char *name)
650 {
651 	zfs_soft_state_t *zs;
652 	zvol_state_t *zv;
653 	objset_t *os;
654 	int error;
655 #ifdef illumos
656 	dmu_object_info_t doi;
657 	minor_t minor = 0;
658 	char chrbuf[30], blkbuf[30];
659 #endif
660 #ifdef __FreeBSD__
661 	struct g_provider *pp;
662 	struct g_geom *gp;
663 	uint64_t mode;
664 
665 	ZFS_LOG(1, "Creating ZVOL %s...", name);
666 #endif
667 #ifdef __NetBSD__
668 	dmu_object_info_t doi;
669 	minor_t minor = 0;
670 	vnode_t *vp = NULL;
671 	char *devpath;
672 	size_t devpathlen = strlen(ZVOL_FULL_DEV_DIR) + strlen(name) + 2;
673 #endif
674 
675 	mutex_enter(&zfsdev_state_lock);
676 
677 	if (zvol_minor_lookup(name) != NULL) {
678 		mutex_exit(&zfsdev_state_lock);
679 		return (SET_ERROR(EEXIST));
680 	}
681 
682 	/* lie and say we're read-only */
683 	error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
684 
685 	if (error) {
686 		mutex_exit(&zfsdev_state_lock);
687 		return (error);
688 	}
689 
690 #ifdef illumos
691 	if ((minor = zfsdev_minor_alloc()) == 0) {
692 		dmu_objset_disown(os, FTAG);
693 		mutex_exit(&zfsdev_state_lock);
694 		return (SET_ERROR(ENXIO));
695 	}
696 
697 	if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
698 		dmu_objset_disown(os, FTAG);
699 		mutex_exit(&zfsdev_state_lock);
700 		return (SET_ERROR(EAGAIN));
701 	}
702 	(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
703 	    (char *)name);
704 
705 	(void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
706 
707 	if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
708 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
709 		ddi_soft_state_free(zfsdev_state, minor);
710 		dmu_objset_disown(os, FTAG);
711 		mutex_exit(&zfsdev_state_lock);
712 		return (SET_ERROR(EAGAIN));
713 	}
714 
715 	(void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
716 
717 	if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
718 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
719 		ddi_remove_minor_node(zfs_dip, chrbuf);
720 		ddi_soft_state_free(zfsdev_state, minor);
721 		dmu_objset_disown(os, FTAG);
722 		mutex_exit(&zfsdev_state_lock);
723 		return (SET_ERROR(EAGAIN));
724 	}
725 
726 	zs = ddi_get_soft_state(zfsdev_state, minor);
727 	zs->zss_type = ZSST_ZVOL;
728 	zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
729 #endif /* illumos */
730 
731 #ifdef __FreeBSD__
732 	zv = kmem_zalloc(sizeof(*zv), KM_SLEEP);
733 	zv->zv_state = 0;
734 	error = dsl_prop_get_integer(name,
735 	    zfs_prop_to_name(ZFS_PROP_VOLMODE), &mode, NULL);
736 	if (error != 0 || mode == ZFS_VOLMODE_DEFAULT)
737 		mode = volmode;
738 
739 	DROP_GIANT();
740 	zv->zv_volmode = mode;
741 	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
742 		g_topology_lock();
743 		gp = g_new_geomf(&zfs_zvol_class, "zfs::zvol::%s", name);
744 		gp->start = zvol_geom_start;
745 		gp->access = zvol_geom_access;
746 		pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name);
747 		pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
748 		pp->sectorsize = DEV_BSIZE;
749 		pp->mediasize = 0;
750 		pp->private = zv;
751 
752 		zv->zv_provider = pp;
753 		bioq_init(&zv->zv_queue);
754 		mtx_init(&zv->zv_queue_mtx, "zvol", NULL, MTX_DEF);
755 	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
756 		struct make_dev_args args;
757 
758 		make_dev_args_init(&args);
759 		args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
760 		args.mda_devsw = &zvol_cdevsw;
761 		args.mda_cr = NULL;
762 		args.mda_uid = UID_ROOT;
763 		args.mda_gid = GID_OPERATOR;
764 		args.mda_mode = 0640;
765 		args.mda_si_drv2 = zv;
766 		error = make_dev_s(&args, &zv->zv_dev,
767 		    "%s/%s", ZVOL_DRIVER, name);
768 		if (error != 0) {
769 			kmem_free(zv, sizeof(*zv));
770 			dmu_objset_disown(os, FTAG);
771 			mutex_exit(&zfsdev_state_lock);
772 			return (error);
773 		}
774 		zv->zv_dev->si_iosize_max = MAXPHYS;
775 	}
776 	LIST_INSERT_HEAD(&all_zvols, zv, zv_links);
777 #endif /* __FreeBSD__ */
778 
779 #ifdef __NetBSD__
780 
781 	/*
782 	 * If there's an existing /dev/zvol symlink, try to use the
783 	 * same minor number we used last time.
784 	 */
785 	devpath = kmem_alloc(devpathlen, KM_SLEEP);
786 
787 	/* Get full path to ZFS volume disk device */
788 	(void) snprintf(devpath, devpathlen, "%s/%s", ZVOL_FULL_DEV_DIR, name);
789 
790 	error = lookupname(devpath, UIO_SYSSPACE, NO_FOLLOW, NULL, &vp);
791 
792 	if (error == 0 && vp->v_type != VBLK) {
793 		error = EINVAL;
794 	}
795 
796 	if (error == 0) {
797 		struct stat sb;
798 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
799 		error = vn_stat(vp, &sb);
800 		VOP_UNLOCK(vp, 0);
801 		if (error == 0) {
802 			minor = getminor(sb.st_rdev);
803 		}
804 	}
805 
806 	if (vp != NULL)
807 		VN_RELE(vp);
808 
809 	/*
810 	 * If we found a minor but it's already in use, we must pick a new one.
811 	 */
812 
813 	if (minor != 0 && zfsdev_get_soft_state(minor, ZSST_ZVOL) != NULL)
814 		minor = 0;
815 
816 	if (minor == 0)
817 		minor = zfsdev_minor_alloc();
818 
819 	if (minor == 0) {
820 		dmu_objset_disown(os, zvol_tag);
821 		mutex_exit(&zfsdev_state_lock);
822 		kmem_free(devpath, devpathlen);
823 		return (ENXIO);
824 	}
825 
826 	if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
827 		dmu_objset_disown(os, zvol_tag);
828 		mutex_exit(&zfsdev_state_lock);
829 		kmem_free(devpath, devpathlen);
830 		return (EAGAIN);
831 	}
832 	(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
833 	    (char *)name);
834 
835 	if (ddi_create_minor_node(zfs_dip, (char *)name, S_IFCHR,
836 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
837 		ddi_soft_state_free(zfsdev_state, minor);
838 		dmu_objset_disown(os, zvol_tag);
839 		mutex_exit(&zfsdev_state_lock);
840 		kmem_free(devpath, devpathlen);
841 		return (EAGAIN);
842 	}
843 
844 	if (ddi_create_minor_node(zfs_dip, (char *)name, S_IFBLK,
845 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
846 		ddi_remove_minor_node(zfs_dip, (char *)name);
847 		ddi_soft_state_free(zfsdev_state, minor);
848 		dmu_objset_disown(os, zvol_tag);
849 		mutex_exit(&zfsdev_state_lock);
850 		kmem_free(devpath, devpathlen);
851 		return (EAGAIN);
852 	}
853 	zs = ddi_get_soft_state(zfsdev_state, minor);
854 	zs->zss_type = ZSST_ZVOL;
855 	zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
856 
857 	disk_init(&zv->zv_dk, name, &zvol_dkdriver);
858 	disk_attach(&zv->zv_dk);
859 	mutex_init(&zv->zv_dklock, NULL, MUTEX_DEFAULT, NULL);
860 
861 	LIST_INSERT_HEAD(&all_zvols, zv, zv_links);
862 #endif /* __NetBSD__ */
863 
864 	(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
865 	zv->zv_min_bs = DEV_BSHIFT;
866 #if defined(illumos) || defined(__NetBSD__)
867 	zv->zv_minor = minor;
868 #endif
869 	zv->zv_objset = os;
870 	if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
871 		zv->zv_flags |= ZVOL_RDONLY;
872 	mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
873 	avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
874 	    sizeof (rl_t), offsetof(rl_t, r_node));
875 	list_create(&zv->zv_extents, sizeof (zvol_extent_t),
876 	    offsetof(zvol_extent_t, ze_node));
877 #if defined(illumos) || defined(__NetBSD__)
878 	/* get and cache the blocksize */
879 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
880 	ASSERT(error == 0);
881 	zv->zv_volblocksize = doi.doi_data_block_size;
882 #endif
883 
884 	if (spa_writeable(dmu_objset_spa(os))) {
885 		if (zil_replay_disable)
886 			zil_destroy(dmu_objset_zil(os), B_FALSE);
887 		else
888 			zil_replay(os, zv, zvol_replay_vector);
889 	}
890 	dmu_objset_disown(os, FTAG);
891 	zv->zv_objset = NULL;
892 
893 	zvol_minors++;
894 
895 	mutex_exit(&zfsdev_state_lock);
896 #ifdef __FreeBSD__
897 	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
898 		zvol_geom_run(zv);
899 		g_topology_unlock();
900 	}
901 	PICKUP_GIANT();
902 
903 	ZFS_LOG(1, "ZVOL %s created.", name);
904 #endif
905 	return (0);
906 }
907 
908 /*
909  * Remove minor node for the specified volume.
910  */
911 static int
912 zvol_remove_zv(zvol_state_t *zv)
913 {
914 
915 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
916 	if (zv->zv_total_opens != 0)
917 		return (SET_ERROR(EBUSY));
918 
919 #ifdef illumos
920 	char nmbuf[20];
921 	minor_t minor = zv->zv_minor;
922 
923 	(void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
924 	ddi_remove_minor_node(zfs_dip, nmbuf);
925 
926 	(void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor);
927 	ddi_remove_minor_node(zfs_dip, nmbuf);
928 #endif
929 #ifdef __FreeBSD__
930 	ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
931 
932 	LIST_REMOVE(zv, zv_links);
933 	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
934 		g_topology_lock();
935 		zvol_geom_destroy(zv);
936 		g_topology_unlock();
937 	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
938 		if (zv->zv_dev != NULL)
939 			destroy_dev(zv->zv_dev);
940 	}
941 #endif
942 #ifdef __NetBSD__
943 	char nmbuf[20];
944 	minor_t minor = zv->zv_minor;
945 
946 	LIST_REMOVE(zv, zv_links);
947 
948 	(void) snprintf(nmbuf, sizeof (nmbuf), "%s", zv->zv_name);
949 	ddi_remove_minor_node(zfs_dip, nmbuf);
950 
951 	(void) snprintf(nmbuf, sizeof (nmbuf), "%s", zv->zv_name);
952 	ddi_remove_minor_node(zfs_dip, nmbuf);
953 
954 	disk_detach(&zv->zv_dk);
955 	disk_destroy(&zv->zv_dk);
956 	mutex_destroy(&zv->zv_dklock);
957 #endif
958 
959 	avl_destroy(&zv->zv_znode.z_range_avl);
960 	mutex_destroy(&zv->zv_znode.z_range_lock);
961 
962 	kmem_free(zv, sizeof (zvol_state_t));
963 #ifdef illumos
964 	ddi_soft_state_free(zfsdev_state, minor);
965 #endif
966 #ifdef __NetBSD__
967 	ddi_soft_state_free(zfsdev_state, minor);
968 #endif
969 	zvol_minors--;
970 	return (0);
971 }
972 
973 int
974 zvol_remove_minor(const char *name)
975 {
976 	zvol_state_t *zv;
977 	int rc;
978 
979 	mutex_enter(&zfsdev_state_lock);
980 	if ((zv = zvol_minor_lookup(name)) == NULL) {
981 		mutex_exit(&zfsdev_state_lock);
982 		return (SET_ERROR(ENXIO));
983 	}
984 #ifdef __NetBSD__
985 	disk_detach(&zv->zv_dk);
986 	disk_destroy(&zv->zv_dk);
987 	mutex_destroy(&zv->zv_dklock);
988 #endif
989 	rc = zvol_remove_zv(zv);
990 	mutex_exit(&zfsdev_state_lock);
991 	return (rc);
992 }
993 
994 int
995 zvol_first_open(zvol_state_t *zv)
996 {
997 	dmu_object_info_t doi;
998 	objset_t *os;
999 	uint64_t volsize;
1000 	int error;
1001 	uint64_t readonly;
1002 
1003 	/* lie and say we're read-only */
1004 	error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
1005 	    zvol_tag, &os);
1006 	if (error)
1007 		return (error);
1008 
1009 	zv->zv_objset = os;
1010 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
1011 	if (error) {
1012 		ASSERT(error == 0);
1013 		dmu_objset_disown(os, zvol_tag);
1014 		return (error);
1015 	}
1016 
1017 	/* get and cache the blocksize */
1018 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
1019 	if (error) {
1020 		ASSERT(error == 0);
1021 		dmu_objset_disown(os, zvol_tag);
1022 		return (error);
1023 	}
1024 	zv->zv_volblocksize = doi.doi_data_block_size;
1025 
1026 	error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
1027 	if (error) {
1028 		dmu_objset_disown(os, zvol_tag);
1029 		return (error);
1030 	}
1031 
1032 	zvol_size_changed(zv, volsize);
1033 	zv->zv_zilog = zil_open(os, zvol_get_data);
1034 
1035 	VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
1036 	    NULL) == 0);
1037 	if (readonly || dmu_objset_is_snapshot(os) ||
1038 	    !spa_writeable(dmu_objset_spa(os)))
1039 		zv->zv_flags |= ZVOL_RDONLY;
1040 	else
1041 		zv->zv_flags &= ~ZVOL_RDONLY;
1042 	return (error);
1043 }
1044 
1045 void
1046 zvol_last_close(zvol_state_t *zv)
1047 {
1048 	zil_close(zv->zv_zilog);
1049 	zv->zv_zilog = NULL;
1050 
1051 	dmu_buf_rele(zv->zv_dbuf, zvol_tag);
1052 	zv->zv_dbuf = NULL;
1053 
1054 	/*
1055 	 * Evict cached data
1056 	 */
1057 	if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
1058 	    !(zv->zv_flags & ZVOL_RDONLY))
1059 		txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1060 	dmu_objset_evict_dbufs(zv->zv_objset);
1061 
1062 	dmu_objset_disown(zv->zv_objset, zvol_tag);
1063 	zv->zv_objset = NULL;
1064 }
1065 
1066 #ifdef illumos
1067 int
1068 zvol_prealloc(zvol_state_t *zv)
1069 {
1070 	objset_t *os = zv->zv_objset;
1071 	dmu_tx_t *tx;
1072 	uint64_t refd, avail, usedobjs, availobjs;
1073 	uint64_t resid = zv->zv_volsize;
1074 	uint64_t off = 0;
1075 
1076 	/* Check the space usage before attempting to allocate the space */
1077 	dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
1078 	if (avail < zv->zv_volsize)
1079 		return (SET_ERROR(ENOSPC));
1080 
1081 	/* Free old extents if they exist */
1082 	zvol_free_extents(zv);
1083 
1084 	while (resid != 0) {
1085 		int error;
1086 		uint64_t bytes = MIN(resid, SPA_OLD_MAXBLOCKSIZE);
1087 
1088 		tx = dmu_tx_create(os);
1089 		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1090 		error = dmu_tx_assign(tx, TXG_WAIT);
1091 		if (error) {
1092 			dmu_tx_abort(tx);
1093 			(void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
1094 			return (error);
1095 		}
1096 		dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
1097 		dmu_tx_commit(tx);
1098 		off += bytes;
1099 		resid -= bytes;
1100 	}
1101 	txg_wait_synced(dmu_objset_pool(os), 0);
1102 
1103 	return (0);
1104 }
1105 #endif	/* illumos */
1106 
1107 static int
1108 zvol_update_volsize(objset_t *os, uint64_t volsize)
1109 {
1110 	dmu_tx_t *tx;
1111 	int error;
1112 
1113 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
1114 
1115 	tx = dmu_tx_create(os);
1116 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1117 	dmu_tx_mark_netfree(tx);
1118 	error = dmu_tx_assign(tx, TXG_WAIT);
1119 	if (error) {
1120 		dmu_tx_abort(tx);
1121 		return (error);
1122 	}
1123 
1124 	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
1125 	    &volsize, tx);
1126 	dmu_tx_commit(tx);
1127 
1128 	if (error == 0)
1129 		error = dmu_free_long_range(os,
1130 		    ZVOL_OBJ, volsize, DMU_OBJECT_END);
1131 	return (error);
1132 }
1133 
1134 void
1135 zvol_remove_minors(const char *name)
1136 {
1137 #ifdef illumos
1138 	zvol_state_t *zv;
1139 	char *namebuf;
1140 	minor_t minor;
1141 
1142 	namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP);
1143 	(void) strncpy(namebuf, name, strlen(name));
1144 	(void) strcat(namebuf, "/");
1145 	mutex_enter(&zfsdev_state_lock);
1146 	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
1147 
1148 		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1149 		if (zv == NULL)
1150 			continue;
1151 		if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0)
1152 			(void) zvol_remove_zv(zv);
1153 	}
1154 	kmem_free(namebuf, strlen(name) + 2);
1155 
1156 	mutex_exit(&zfsdev_state_lock);
1157 #else	/* !illumos */
1158 	zvol_state_t *zv, *tzv;
1159 	size_t namelen;
1160 
1161 	namelen = strlen(name);
1162 
1163 	DROP_GIANT();
1164 	mutex_enter(&zfsdev_state_lock);
1165 
1166 	LIST_FOREACH_SAFE(zv, &all_zvols, zv_links, tzv) {
1167 		if (strcmp(zv->zv_name, name) == 0 ||
1168 		    (strncmp(zv->zv_name, name, namelen) == 0 &&
1169 		    strlen(zv->zv_name) > namelen && (zv->zv_name[namelen] == '/' ||
1170 		    zv->zv_name[namelen] == '@'))) {
1171 			(void) zvol_remove_zv(zv);
1172 		}
1173 	}
1174 
1175 	mutex_exit(&zfsdev_state_lock);
1176 	PICKUP_GIANT();
1177 #endif	/* illumos */
1178 }
1179 
1180 static int
1181 zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize)
1182 {
1183 	uint64_t old_volsize = 0ULL;
1184 	int error = 0;
1185 
1186 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
1187 
1188 	/*
1189 	 * Reinitialize the dump area to the new size. If we
1190 	 * failed to resize the dump area then restore it back to
1191 	 * its original size.  We must set the new volsize prior
1192 	 * to calling dumpvp_resize() to ensure that the devices'
1193 	 * size(9P) is not visible by the dump subsystem.
1194 	 */
1195 	old_volsize = zv->zv_volsize;
1196 	zvol_size_changed(zv, volsize);
1197 
1198 #ifdef ZVOL_DUMP
1199 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1200 		if ((error = zvol_dumpify(zv)) != 0 ||
1201 		    (error = dumpvp_resize()) != 0) {
1202 			int dumpify_error;
1203 
1204 			(void) zvol_update_volsize(zv->zv_objset, old_volsize);
1205 			zvol_size_changed(zv, old_volsize);
1206 			dumpify_error = zvol_dumpify(zv);
1207 			error = dumpify_error ? dumpify_error : error;
1208 		}
1209 	}
1210 #endif	/* ZVOL_DUMP */
1211 
1212 #ifdef illumos
1213 	/*
1214 	 * Generate a LUN expansion event.
1215 	 */
1216 	if (error == 0) {
1217 		sysevent_id_t eid;
1218 		nvlist_t *attr;
1219 		char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
1220 
1221 		(void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
1222 		    zv->zv_minor);
1223 
1224 		VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1225 		VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
1226 
1227 		(void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
1228 		    ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
1229 
1230 		nvlist_free(attr);
1231 		kmem_free(physpath, MAXPATHLEN);
1232 	}
1233 #endif	/* illumos */
1234 	return (error);
1235 }
1236 
1237 int
1238 zvol_set_volsize(const char *name, uint64_t volsize)
1239 {
1240 	zvol_state_t *zv = NULL;
1241 	objset_t *os;
1242 	int error;
1243 	dmu_object_info_t doi;
1244 	uint64_t readonly;
1245 	boolean_t owned = B_FALSE;
1246 
1247 	error = dsl_prop_get_integer(name,
1248 	    zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
1249 	if (error != 0)
1250 		return (error);
1251 	if (readonly)
1252 		return (SET_ERROR(EROFS));
1253 
1254 	mutex_enter(&zfsdev_state_lock);
1255 	zv = zvol_minor_lookup(name);
1256 
1257 	if (zv == NULL || zv->zv_objset == NULL) {
1258 		if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE,
1259 		    FTAG, &os)) != 0) {
1260 			mutex_exit(&zfsdev_state_lock);
1261 			return (error);
1262 		}
1263 		owned = B_TRUE;
1264 		if (zv != NULL)
1265 			zv->zv_objset = os;
1266 	} else {
1267 		os = zv->zv_objset;
1268 	}
1269 
1270 	if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
1271 	    (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0)
1272 		goto out;
1273 
1274 	error = zvol_update_volsize(os, volsize);
1275 
1276 	if (error == 0 && zv != NULL)
1277 		error = zvol_update_live_volsize(zv, volsize);
1278 out:
1279 	if (owned) {
1280 		dmu_objset_disown(os, FTAG);
1281 		if (zv != NULL)
1282 			zv->zv_objset = NULL;
1283 	}
1284 	mutex_exit(&zfsdev_state_lock);
1285 	return (error);
1286 }
1287 
1288 /*ARGSUSED*/
1289 #ifdef illumos
1290 int
1291 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
1292 #endif
1293 #ifdef __FreeBSD__
1294 static int
1295 zvol_open(struct g_provider *pp, int flag, int count)
1296 #endif
1297 #ifdef __NetBSD__
1298 int
1299 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
1300 #endif
1301 {
1302 	zvol_state_t *zv;
1303 	int err = 0;
1304 
1305 #ifdef illumos
1306 	mutex_enter(&zfsdev_state_lock);
1307 
1308 	zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL);
1309 	if (zv == NULL) {
1310 		mutex_exit(&zfsdev_state_lock);
1311 		return (SET_ERROR(ENXIO));
1312 	}
1313 
1314 	if (zv->zv_total_opens == 0)
1315 		err = zvol_first_open(zv);
1316 	if (err) {
1317 		mutex_exit(&zfsdev_state_lock);
1318 		return (err);
1319 	}
1320 #endif /* !illumos */
1321 #ifdef __FreeBSD__
1322 	boolean_t locked = B_FALSE;
1323 
1324 	if (!zpool_on_zvol && tsd_get(zfs_geom_probe_vdev_key) != NULL) {
1325 		/*
1326 		 * if zfs_geom_probe_vdev_key is set, that means that zfs is
1327 		 * attempting to probe geom providers while looking for a
1328 		 * replacement for a missing VDEV.  In this case, the
1329 		 * spa_namespace_lock will not be held, but it is still illegal
1330 		 * to use a zvol as a vdev.  Deadlocks can result if another
1331 		 * thread has spa_namespace_lock
1332 		 */
1333 		return (EOPNOTSUPP);
1334 	}
1335 	/*
1336 	 * Protect against recursively entering spa_namespace_lock
1337 	 * when spa_open() is used for a pool on a (local) ZVOL(s).
1338 	 * This is needed since we replaced upstream zfsdev_state_lock
1339 	 * with spa_namespace_lock in the ZVOL code.
1340 	 * We are using the same trick as spa_open().
1341 	 * Note that calls in zvol_first_open which need to resolve
1342 	 * pool name to a spa object will enter spa_open()
1343 	 * recursively, but that function already has all the
1344 	 * necessary protection.
1345 	 */
1346 	if (!MUTEX_HELD(&zfsdev_state_lock)) {
1347 		mutex_enter(&zfsdev_state_lock);
1348 		locked = B_TRUE;
1349 	}
1350 
1351 	zv = pp->private;
1352 	if (zv == NULL) {
1353 		if (locked)
1354 			mutex_exit(&zfsdev_state_lock);
1355 		return (SET_ERROR(ENXIO));
1356 	}
1357 
1358 	if (zv->zv_total_opens == 0) {
1359 		err = zvol_first_open(zv);
1360 		if (err) {
1361 			if (locked)
1362 				mutex_exit(&zfsdev_state_lock);
1363 			return (err);
1364 		}
1365 		pp->mediasize = zv->zv_volsize;
1366 		pp->stripeoffset = 0;
1367 		pp->stripesize = zv->zv_volblocksize;
1368 	}
1369 #endif /* __FreeBSD__ */
1370 #ifdef __NetBSD__
1371 	mutex_enter(&zfsdev_state_lock);
1372 
1373 	zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL);
1374 	if (zv == NULL) {
1375 		mutex_exit(&zfsdev_state_lock);
1376 		return (SET_ERROR(ENXIO));
1377 	}
1378 
1379 	if (zv->zv_total_opens == 0)
1380 		err = zvol_first_open(zv);
1381 	if (err) {
1382 		mutex_exit(&zfsdev_state_lock);
1383 		return (err);
1384 	}
1385 #endif
1386 
1387 	if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
1388 		err = SET_ERROR(EROFS);
1389 		goto out;
1390 	}
1391 	if (zv->zv_flags & ZVOL_EXCL) {
1392 		err = SET_ERROR(EBUSY);
1393 		goto out;
1394 	}
1395 #ifdef FEXCL
1396 	if (flag & FEXCL) {
1397 		if (zv->zv_total_opens != 0) {
1398 			err = SET_ERROR(EBUSY);
1399 			goto out;
1400 		}
1401 		zv->zv_flags |= ZVOL_EXCL;
1402 	}
1403 #endif
1404 
1405 #ifdef illumos
1406 	if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
1407 		zv->zv_open_count[otyp]++;
1408 		zv->zv_total_opens++;
1409 	}
1410 	mutex_exit(&zfsdev_state_lock);
1411 #endif
1412 #ifdef __FreeBSD__
1413 	zv->zv_total_opens += count;
1414 	if (locked)
1415 		mutex_exit(&zfsdev_state_lock);
1416 #endif
1417 #ifdef __NetBSD__
1418 	if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
1419 		zv->zv_open_count[otyp]++;
1420 		zv->zv_total_opens++;
1421 	}
1422 	mutex_exit(&zfsdev_state_lock);
1423 #endif
1424 
1425 	return (err);
1426 out:
1427 	if (zv->zv_total_opens == 0)
1428 		zvol_last_close(zv);
1429 #ifdef __FreeBSD__
1430 	if (locked)
1431 #endif
1432 		mutex_exit(&zfsdev_state_lock);
1433 
1434 	return (err);
1435 }
1436 
1437 /*ARGSUSED*/
1438 #if defined(illumos) || defined(__NetBSD__)
1439 int
1440 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
1441 #endif
1442 #ifdef __FreeBSD__
1443 static int
1444 zvol_close(struct g_provider *pp, int flag, int count)
1445 #endif
1446 {
1447 #if defined(illumos) || defined(__NetBSD__)
1448 	minor_t minor = getminor(dev);
1449 	zvol_state_t *zv;
1450 	int error = 0;
1451 
1452 	mutex_enter(&zfsdev_state_lock);
1453 
1454 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1455 	if (zv == NULL) {
1456 		mutex_exit(&zfsdev_state_lock);
1457 		return (SET_ERROR(ENXIO));
1458 	}
1459 #endif /* illumos */
1460 #ifdef __FreeBSD__
1461 	zvol_state_t *zv;
1462 	int error = 0;
1463 	boolean_t locked = B_FALSE;
1464 
1465 	/* See comment in zvol_open(). */
1466 	if (!MUTEX_HELD(&zfsdev_state_lock)) {
1467 		mutex_enter(&zfsdev_state_lock);
1468 		locked = B_TRUE;
1469 	}
1470 
1471 	zv = pp->private;
1472 	if (zv == NULL) {
1473 		if (locked)
1474 			mutex_exit(&zfsdev_state_lock);
1475 		return (SET_ERROR(ENXIO));
1476 	}
1477 #endif /* __FreeBSD__ */
1478 
1479 	if (zv->zv_flags & ZVOL_EXCL) {
1480 		ASSERT(zv->zv_total_opens == 1);
1481 		zv->zv_flags &= ~ZVOL_EXCL;
1482 	}
1483 
1484 	/*
1485 	 * If the open count is zero, this is a spurious close.
1486 	 * That indicates a bug in the kernel / DDI framework.
1487 	 */
1488 #if defined(illumos) || defined(__NetBSD__)
1489 	ASSERT(zv->zv_open_count[otyp] != 0);
1490 #endif
1491 	ASSERT(zv->zv_total_opens != 0);
1492 
1493 	/*
1494 	 * You may get multiple opens, but only one close.
1495 	 */
1496 #if defined(illumos) || defined(__NetBSD__)
1497 	zv->zv_open_count[otyp]--;
1498 	zv->zv_total_opens--;
1499 #else
1500 	zv->zv_total_opens -= count;
1501 #endif
1502 
1503 	if (zv->zv_total_opens == 0)
1504 		zvol_last_close(zv);
1505 
1506 #if defined(illumos) || defined(__NetBSD__)
1507 	mutex_exit(&zfsdev_state_lock);
1508 #else
1509 	if (locked)
1510 		mutex_exit(&zfsdev_state_lock);
1511 #endif
1512 	return (error);
1513 }
1514 
1515 static void
1516 zvol_get_done(zgd_t *zgd, int error)
1517 {
1518 	if (zgd->zgd_db)
1519 		dmu_buf_rele(zgd->zgd_db, zgd);
1520 
1521 	zfs_range_unlock(zgd->zgd_rl);
1522 
1523 	if (error == 0 && zgd->zgd_bp)
1524 		zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1525 
1526 	kmem_free(zgd, sizeof (zgd_t));
1527 }
1528 
1529 /*
1530  * Get data to generate a TX_WRITE intent log record.
1531  */
1532 static int
1533 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1534 {
1535 	zvol_state_t *zv = arg;
1536 	objset_t *os = zv->zv_objset;
1537 	uint64_t object = ZVOL_OBJ;
1538 	uint64_t offset = lr->lr_offset;
1539 	uint64_t size = lr->lr_length;	/* length of user data */
1540 	blkptr_t *bp = &lr->lr_blkptr;
1541 	dmu_buf_t *db;
1542 	zgd_t *zgd;
1543 	int error;
1544 
1545 	ASSERT(zio != NULL);
1546 	ASSERT(size != 0);
1547 
1548 	zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1549 	zgd->zgd_zilog = zv->zv_zilog;
1550 	zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
1551 
1552 	/*
1553 	 * Write records come in two flavors: immediate and indirect.
1554 	 * For small writes it's cheaper to store the data with the
1555 	 * log record (immediate); for large writes it's cheaper to
1556 	 * sync the data and get a pointer to it (indirect) so that
1557 	 * we don't have to write the data twice.
1558 	 */
1559 	if (buf != NULL) {	/* immediate write */
1560 		error = dmu_read(os, object, offset, size, buf,
1561 		    DMU_READ_NO_PREFETCH);
1562 	} else {
1563 		size = zv->zv_volblocksize;
1564 		offset = P2ALIGN(offset, size);
1565 		error = dmu_buf_hold(os, object, offset, zgd, &db,
1566 		    DMU_READ_NO_PREFETCH);
1567 		if (error == 0) {
1568 			blkptr_t *obp = dmu_buf_get_blkptr(db);
1569 			if (obp) {
1570 				ASSERT(BP_IS_HOLE(bp));
1571 				*bp = *obp;
1572 			}
1573 
1574 			zgd->zgd_db = db;
1575 			zgd->zgd_bp = bp;
1576 
1577 			ASSERT(db->db_offset == offset);
1578 			ASSERT(db->db_size == size);
1579 
1580 			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1581 			    zvol_get_done, zgd);
1582 
1583 			if (error == 0)
1584 				return (0);
1585 		}
1586 	}
1587 
1588 	zvol_get_done(zgd, error);
1589 
1590 	return (error);
1591 }
1592 
1593 /*
1594  * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1595  *
1596  * We store data in the log buffers if it's small enough.
1597  * Otherwise we will later flush the data out via dmu_sync().
1598  */
1599 ssize_t zvol_immediate_write_sz = 32768;
1600 #ifdef _KERNEL
1601 SYSCTL_LONG(_vfs_zfs_vol, OID_AUTO, immediate_write_sz, CTLFLAG_RWTUN,
1602     &zvol_immediate_write_sz, 0, "Minimal size for indirect log write");
1603 #endif
1604 
1605 static void
1606 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1607     boolean_t sync)
1608 {
1609 	uint32_t blocksize = zv->zv_volblocksize;
1610 	zilog_t *zilog = zv->zv_zilog;
1611 	itx_wr_state_t write_state;
1612 
1613 	if (zil_replaying(zilog, tx))
1614 		return;
1615 
1616 	if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1617 		write_state = WR_INDIRECT;
1618 	else if (!spa_has_slogs(zilog->zl_spa) &&
1619 	    resid >= blocksize && blocksize > zvol_immediate_write_sz)
1620 		write_state = WR_INDIRECT;
1621 	else if (sync)
1622 		write_state = WR_COPIED;
1623 	else
1624 		write_state = WR_NEED_COPY;
1625 
1626 	while (resid) {
1627 		itx_t *itx;
1628 		lr_write_t *lr;
1629 		itx_wr_state_t wr_state = write_state;
1630 		ssize_t len = resid;
1631 
1632 		if (wr_state == WR_COPIED && resid > ZIL_MAX_COPIED_DATA)
1633 			wr_state = WR_NEED_COPY;
1634 		else if (wr_state == WR_INDIRECT)
1635 			len = MIN(blocksize - P2PHASE(off, blocksize), resid);
1636 
1637 		itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1638 		    (wr_state == WR_COPIED ? len : 0));
1639 		lr = (lr_write_t *)&itx->itx_lr;
1640 		if (wr_state == WR_COPIED && dmu_read(zv->zv_objset,
1641 		    ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1642 			zil_itx_destroy(itx);
1643 			itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1644 			lr = (lr_write_t *)&itx->itx_lr;
1645 			wr_state = WR_NEED_COPY;
1646 		}
1647 
1648 		itx->itx_wr_state = wr_state;
1649 		lr->lr_foid = ZVOL_OBJ;
1650 		lr->lr_offset = off;
1651 		lr->lr_length = len;
1652 		lr->lr_blkoff = 0;
1653 		BP_ZERO(&lr->lr_blkptr);
1654 
1655 		itx->itx_private = zv;
1656 
1657 		if (!sync && (zv->zv_sync_cnt == 0))
1658 			itx->itx_sync = B_FALSE;
1659 
1660 		zil_itx_assign(zilog, itx, tx);
1661 
1662 		off += len;
1663 		resid -= len;
1664 	}
1665 }
1666 
1667 #ifdef illumos
1668 static int
1669 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1670     uint64_t size, boolean_t doread, boolean_t isdump)
1671 {
1672 	vdev_disk_t *dvd;
1673 	int c;
1674 	int numerrors = 0;
1675 
1676 	if (vd->vdev_ops == &vdev_mirror_ops ||
1677 	    vd->vdev_ops == &vdev_replacing_ops ||
1678 	    vd->vdev_ops == &vdev_spare_ops) {
1679 		for (c = 0; c < vd->vdev_children; c++) {
1680 			int err = zvol_dumpio_vdev(vd->vdev_child[c],
1681 			    addr, offset, origoffset, size, doread, isdump);
1682 			if (err != 0) {
1683 				numerrors++;
1684 			} else if (doread) {
1685 				break;
1686 			}
1687 		}
1688 	}
1689 
1690 	if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops)
1691 		return (numerrors < vd->vdev_children ? 0 : EIO);
1692 
1693 	if (doread && !vdev_readable(vd))
1694 		return (SET_ERROR(EIO));
1695 	else if (!doread && !vdev_writeable(vd))
1696 		return (SET_ERROR(EIO));
1697 
1698 	if (vd->vdev_ops == &vdev_raidz_ops) {
1699 		return (vdev_raidz_physio(vd,
1700 		    addr, size, offset, origoffset, doread, isdump));
1701 	}
1702 
1703 	offset += VDEV_LABEL_START_SIZE;
1704 
1705 	if (ddi_in_panic() || isdump) {
1706 		ASSERT(!doread);
1707 		if (doread)
1708 			return (SET_ERROR(EIO));
1709 		dvd = vd->vdev_tsd;
1710 		ASSERT3P(dvd, !=, NULL);
1711 		return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1712 		    lbtodb(size)));
1713 	} else {
1714 		dvd = vd->vdev_tsd;
1715 		ASSERT3P(dvd, !=, NULL);
1716 		return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size,
1717 		    offset, doread ? B_READ : B_WRITE));
1718 	}
1719 }
1720 
1721 static int
1722 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1723     boolean_t doread, boolean_t isdump)
1724 {
1725 	vdev_t *vd;
1726 	int error;
1727 	zvol_extent_t *ze;
1728 	spa_t *spa = dmu_objset_spa(zv->zv_objset);
1729 
1730 	/* Must be sector aligned, and not stradle a block boundary. */
1731 	if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1732 	    P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1733 		return (SET_ERROR(EINVAL));
1734 	}
1735 	ASSERT(size <= zv->zv_volblocksize);
1736 
1737 	/* Locate the extent this belongs to */
1738 	ze = list_head(&zv->zv_extents);
1739 	while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1740 		offset -= ze->ze_nblks * zv->zv_volblocksize;
1741 		ze = list_next(&zv->zv_extents, ze);
1742 	}
1743 
1744 	if (ze == NULL)
1745 		return (SET_ERROR(EINVAL));
1746 
1747 	if (!ddi_in_panic())
1748 		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1749 
1750 	vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1751 	offset += DVA_GET_OFFSET(&ze->ze_dva);
1752 	error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1753 	    size, doread, isdump);
1754 
1755 	if (!ddi_in_panic())
1756 		spa_config_exit(spa, SCL_STATE, FTAG);
1757 
1758 	return (error);
1759 }
1760 #else /* !illumos */
1761 static inline int
1762 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1763     boolean_t doread, boolean_t isdump)
1764 {
1765 	return 0;
1766 }
1767 #endif /* illumos */
1768 
1769 #ifdef illumos
1770 int
1771 zvol_strategy(buf_t *bp)
1772 #endif
1773 #ifdef __FreeBSD__
1774 void
1775 zvol_strategy(struct bio *bp)
1776 #endif
1777 #ifdef __NetBSD__
1778 void
1779 zvol_strategy(buf_t *bp)
1780 #endif
1781 {
1782 	zvol_state_t *zv;
1783 	uint64_t off, volsize;
1784 	size_t resid;
1785 	char *addr;
1786 	objset_t *os;
1787 	rl_t *rl;
1788 	int error = 0;
1789 #if defined(illumos) || defined(__NetBSD__)
1790 	boolean_t doread = bp->b_flags & B_READ;
1791 #else
1792 	boolean_t doread = 0;
1793 #endif
1794 	boolean_t is_dumpified;
1795 	boolean_t sync;
1796 
1797 #ifdef illumos
1798 	zfs_soft_state_t *zs = NULL;
1799 
1800 	if (getminor(bp->b_edev) == 0) {
1801 		error = SET_ERROR(EINVAL);
1802 	} else {
1803 		zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev));
1804 		if (zs == NULL)
1805 			error = SET_ERROR(ENXIO);
1806 		else if (zs->zss_type != ZSST_ZVOL)
1807 			error = SET_ERROR(EINVAL);
1808 	}
1809 
1810 	if (error) {
1811 		bioerror(bp, error);
1812 		biodone(bp);
1813 		return (0);
1814 	}
1815 
1816 	zv = zs->zss_data;
1817 
1818 	if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) {
1819 		bioerror(bp, EROFS);
1820 		biodone(bp);
1821 		return (0);
1822 	}
1823 
1824 	off = ldbtob(bp->b_blkno);
1825 #endif /* illumos */
1826 #ifdef __FreeBSD__
1827 	if (bp->bio_to)
1828 		zv = bp->bio_to->private;
1829 	else
1830 		zv = bp->bio_dev->si_drv2;
1831 
1832 	if (zv == NULL) {
1833 		error = SET_ERROR(ENXIO);
1834 		goto out;
1835 	}
1836 
1837 	if (bp->bio_cmd != BIO_READ && (zv->zv_flags & ZVOL_RDONLY)) {
1838 		error = SET_ERROR(EROFS);
1839 		goto out;
1840 	}
1841 
1842 	switch (bp->bio_cmd) {
1843 	case BIO_FLUSH:
1844 		goto sync;
1845 	case BIO_READ:
1846 		doread = 1;
1847 	case BIO_WRITE:
1848 	case BIO_DELETE:
1849 		break;
1850 	default:
1851 		error = EOPNOTSUPP;
1852 		goto out;
1853 	}
1854 
1855 	off = bp->bio_offset;
1856 #endif /* __FreeBSD__ */
1857 #ifdef __NetBSD__
1858 	zfs_soft_state_t *zs = NULL;
1859 
1860 	if (getminor(bp->b_edev) == 0) {
1861 		error = SET_ERROR(EINVAL);
1862 	} else {
1863 		zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev));
1864 		if (zs == NULL)
1865 			error = SET_ERROR(ENXIO);
1866 		else if (zs->zss_type != ZSST_ZVOL)
1867 			error = SET_ERROR(EINVAL);
1868 	}
1869 
1870 	if (error) {
1871 		bioerror(bp, error);
1872 		biodone(bp);
1873 		return;
1874 	}
1875 
1876 	zv = zs->zss_data;
1877 
1878 	if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) {
1879 		bioerror(bp, EROFS);
1880 		biodone(bp);
1881 		return;
1882 	}
1883 	off = (uint64_t)bp->b_blkno * DEV_BSIZE;
1884 #endif
1885 
1886 	volsize = zv->zv_volsize;
1887 
1888 	os = zv->zv_objset;
1889 	ASSERT(os != NULL);
1890 
1891 #ifdef illumos
1892 	bp_mapin(bp);
1893 	addr = bp->b_un.b_addr;
1894 	resid = bp->b_bcount;
1895 
1896 	if (resid > 0 && (off < 0 || off >= volsize)) {
1897 		bioerror(bp, EIO);
1898 		biodone(bp);
1899 		return (0);
1900 	}
1901 
1902 	is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
1903 	sync = ((!(bp->b_flags & B_ASYNC) &&
1904 	    !(zv->zv_flags & ZVOL_WCE)) ||
1905 	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) &&
1906 	    !doread && !is_dumpified;
1907 #endif /* illumos */
1908 #ifdef __FreeBSD__
1909 	addr = bp->bio_data;
1910 	resid = bp->bio_length;
1911 
1912 	if (resid > 0 && (off < 0 || off >= volsize)) {
1913 		error = SET_ERROR(EIO);
1914 		goto out;
1915 	}
1916 
1917 	is_dumpified = B_FALSE;
1918 	sync = !doread && !is_dumpified &&
1919 	    zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
1920 #endif /* __FreeBSD__ */
1921 #ifdef __NetBSD__
1922 	addr = bp->b_data;
1923 	resid = bp->b_bcount;
1924 
1925 	if (resid > 0 && off >= volsize) {
1926 		bioerror(bp, EIO);
1927 		biodone(bp);
1928 		return;
1929 	}
1930 
1931 	is_dumpified = B_FALSE;
1932 	sync = ((!(bp->b_flags & B_ASYNC) &&
1933 	    !(zv->zv_flags & ZVOL_WCE)) ||
1934 	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) &&
1935 	    !doread && !is_dumpified;
1936 
1937 	mutex_enter(&zv->zv_dklock);
1938 	disk_busy(&zv->zv_dk);
1939 	mutex_exit(&zv->zv_dklock);
1940 #endif
1941 
1942 	/*
1943 	 * There must be no buffer changes when doing a dmu_sync() because
1944 	 * we can't change the data whilst calculating the checksum.
1945 	 */
1946 	rl = zfs_range_lock(&zv->zv_znode, off, resid,
1947 	    doread ? RL_READER : RL_WRITER);
1948 
1949 #ifdef __FreeBSD__
1950 	if (bp->bio_cmd == BIO_DELETE) {
1951 		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1952 		error = dmu_tx_assign(tx, TXG_WAIT);
1953 		if (error != 0) {
1954 			dmu_tx_abort(tx);
1955 		} else {
1956 			zvol_log_truncate(zv, tx, off, resid, sync);
1957 			dmu_tx_commit(tx);
1958 			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1959 			    off, resid);
1960 			resid = 0;
1961 		}
1962 		goto unlock;
1963 	}
1964 #endif
1965 	while (resid != 0 && off < volsize) {
1966 		size_t size = MIN(resid, zvol_maxphys);
1967 		if (is_dumpified) {
1968 			size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1969 			error = zvol_dumpio(zv, addr, off, size,
1970 			    doread, B_FALSE);
1971 		} else if (doread) {
1972 			error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1973 			    DMU_READ_PREFETCH);
1974 		} else {
1975 			dmu_tx_t *tx = dmu_tx_create(os);
1976 			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1977 			error = dmu_tx_assign(tx, TXG_WAIT);
1978 			if (error) {
1979 				dmu_tx_abort(tx);
1980 			} else {
1981 				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1982 				zvol_log_write(zv, tx, off, size, sync);
1983 				dmu_tx_commit(tx);
1984 			}
1985 		}
1986 		if (error) {
1987 			/* convert checksum errors into IO errors */
1988 			if (error == ECKSUM)
1989 				error = SET_ERROR(EIO);
1990 			break;
1991 		}
1992 		off += size;
1993 		addr += size;
1994 		resid -= size;
1995 	}
1996 #ifdef __FreeBSD__
1997 unlock:
1998 #endif
1999 	zfs_range_unlock(rl);
2000 
2001 #ifdef illumos
2002 	if ((bp->b_resid = resid) == bp->b_bcount)
2003 		bioerror(bp, off > volsize ? EINVAL : error);
2004 
2005 	if (sync)
2006 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2007 	biodone(bp);
2008 
2009 	return (0);
2010 #endif /* illumos */
2011 #ifdef __FreeBSD__
2012 	bp->bio_completed = bp->bio_length - resid;
2013 	if (bp->bio_completed < bp->bio_length && off > volsize)
2014 		error = EINVAL;
2015 
2016 	if (sync) {
2017 sync:
2018 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2019 	}
2020 out:
2021 	if (bp->bio_to)
2022 		g_io_deliver(bp, error);
2023 	else
2024 		biofinish(bp, NULL, error);
2025 #endif /* __FreeBSD__ */
2026 #ifdef __NetBSD__
2027 	if ((bp->b_resid = resid) == bp->b_bcount)
2028 		bioerror(bp, off > volsize ? EINVAL : error);
2029 
2030 	if (sync)
2031 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2032 	mutex_enter(&zv->zv_dklock);
2033 	disk_unbusy(&zv->zv_dk, bp->b_bcount - bp->b_resid, doread);
2034 	mutex_exit(&zv->zv_dklock);
2035 	biodone(bp);
2036 #endif /* __NetBSD__ */
2037 }
2038 
2039 #if defined(illumos) || defined(__NetBSD__)
2040 /*
2041  * Set the buffer count to the zvol maximum transfer.
2042  * Using our own routine instead of the default minphys()
2043  * means that for larger writes we write bigger buffers on X86
2044  * (128K instead of 56K) and flush the disk write cache less often
2045  * (every zvol_maxphys - currently 1MB) instead of minphys (currently
2046  * 56K on X86 and 128K on sparc).
2047  */
2048 void
2049 zvol_minphys(struct buf *bp)
2050 {
2051 	if (bp->b_bcount > zvol_maxphys)
2052 		bp->b_bcount = zvol_maxphys;
2053 }
2054 #endif
2055 
2056 #ifdef illumos
2057 int
2058 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
2059 {
2060 	minor_t minor = getminor(dev);
2061 	zvol_state_t *zv;
2062 	int error = 0;
2063 	uint64_t size;
2064 	uint64_t boff;
2065 	uint64_t resid;
2066 
2067 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
2068 	if (zv == NULL)
2069 		return (SET_ERROR(ENXIO));
2070 
2071 	if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
2072 		return (SET_ERROR(EINVAL));
2073 
2074 	boff = ldbtob(blkno);
2075 	resid = ldbtob(nblocks);
2076 
2077 	VERIFY3U(boff + resid, <=, zv->zv_volsize);
2078 
2079 	while (resid) {
2080 		size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
2081 		error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
2082 		if (error)
2083 			break;
2084 		boff += size;
2085 		addr += size;
2086 		resid -= size;
2087 	}
2088 
2089 	return (error);
2090 }
2091 #endif
2092 
2093 /*ARGSUSED*/
2094 #if defined(illumos) || defined(__NetBSD__)
2095 int
2096 zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
2097 #endif
2098 #ifdef __FreeBSD__
2099 int
2100 zvol_read(struct cdev *dev, struct uio *uio, int ioflag)
2101 #endif
2102 {
2103 	zvol_state_t *zv;
2104 	uint64_t volsize;
2105 	rl_t *rl;
2106 	int error = 0;
2107 
2108 #if defined(illumos) || defined(__NetBSD__)
2109 	minor_t minor = getminor(dev);
2110 
2111 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
2112 	if (zv == NULL)
2113 		return (SET_ERROR(ENXIO));
2114 #else
2115 	zv = dev->si_drv2;
2116 #endif
2117 
2118 	volsize = zv->zv_volsize;
2119 	/* uio_loffset == volsize isn't an error as its required for EOF processing. */
2120 	if (uio->uio_resid > 0 &&
2121 	    (uio->uio_loffset < 0 || uio->uio_loffset > volsize))
2122 		return (SET_ERROR(EIO));
2123 
2124 #ifdef illumos
2125 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
2126 		error = physio(zvol_strategy, NULL, dev, B_READ,
2127 		    zvol_minphys, uio);
2128 		return (error);
2129 	}
2130 #endif
2131 
2132 #ifdef __NetBSD__
2133 	uint64_t resid = uio->uio_resid;
2134 	mutex_enter(&zv->zv_dklock);
2135 	disk_busy(&zv->zv_dk);
2136 	mutex_exit(&zv->zv_dklock);
2137 #endif
2138 	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
2139 	    RL_READER);
2140 	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
2141 		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
2142 
2143 		/* don't read past the end */
2144 		if (bytes > volsize - uio->uio_loffset)
2145 			bytes = volsize - uio->uio_loffset;
2146 
2147 		error =  dmu_read_uio_dbuf(zv->zv_dbuf, uio, bytes);
2148 		if (error) {
2149 			/* convert checksum errors into IO errors */
2150 			if (error == ECKSUM)
2151 				error = SET_ERROR(EIO);
2152 			break;
2153 		}
2154 	}
2155 	zfs_range_unlock(rl);
2156 #ifdef __NetBSD__
2157 	mutex_enter(&zv->zv_dklock);
2158 	disk_unbusy(&zv->zv_dk, resid - uio->uio_resid, 1);
2159 	mutex_exit(&zv->zv_dklock);
2160 #endif
2161 	return (error);
2162 }
2163 
2164 /*ARGSUSED*/
2165 #if defined(illumos) || defined(__NetBSD__)
2166 int
2167 zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
2168 #else
2169 int
2170 zvol_write(struct cdev *dev, struct uio *uio, int ioflag)
2171 #endif
2172 {
2173 	zvol_state_t *zv;
2174 	uint64_t volsize;
2175 	rl_t *rl;
2176 	int error = 0;
2177 	boolean_t sync;
2178 
2179 #if defined(illumos) || defined(__NetBSD__)
2180 	minor_t minor = getminor(dev);
2181 
2182 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
2183 	if (zv == NULL)
2184 		return (SET_ERROR(ENXIO));
2185 #else
2186 	zv = dev->si_drv2;
2187 #endif
2188 
2189 	volsize = zv->zv_volsize;
2190 	/* uio_loffset == volsize isn't an error as its required for EOF processing. */
2191 	if (uio->uio_resid > 0 &&
2192 	    (uio->uio_loffset < 0 || uio->uio_loffset > volsize))
2193 		return (SET_ERROR(EIO));
2194 
2195 #ifdef illumos
2196 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
2197 		error = physio(zvol_strategy, NULL, dev, B_WRITE,
2198 		    zvol_minphys, uio);
2199 		return (error);
2200 	}
2201 
2202 	sync = !(zv->zv_flags & ZVOL_WCE) ||
2203 #endif
2204 #ifdef __FreeBSD__
2205 	sync = (ioflag & IO_SYNC) ||
2206 #endif
2207 #ifdef __NetBSD__
2208 	sync = 1 ||
2209 #endif
2210 	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
2211 
2212 #ifdef __NetBSD__
2213 	uint64_t resid = uio->uio_resid;
2214 	mutex_enter(&zv->zv_dklock);
2215 	disk_busy(&zv->zv_dk);
2216 	mutex_exit(&zv->zv_dklock);
2217 #endif
2218 	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
2219 	    RL_WRITER);
2220 	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
2221 		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
2222 		uint64_t off = uio->uio_loffset;
2223 		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
2224 
2225 		if (bytes > volsize - off)	/* don't write past the end */
2226 			bytes = volsize - off;
2227 
2228 		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
2229 		error = dmu_tx_assign(tx, TXG_WAIT);
2230 		if (error) {
2231 			dmu_tx_abort(tx);
2232 			break;
2233 		}
2234 		error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
2235 		if (error == 0)
2236 			zvol_log_write(zv, tx, off, bytes, sync);
2237 		dmu_tx_commit(tx);
2238 
2239 		if (error)
2240 			break;
2241 	}
2242 	zfs_range_unlock(rl);
2243 	if (sync)
2244 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2245 #ifdef __NetBSD__
2246 	mutex_enter(&zv->zv_dklock);
2247 	disk_unbusy(&zv->zv_dk, resid - uio->uio_resid, 0);
2248 	mutex_exit(&zv->zv_dklock);
2249 #endif
2250 	return (error);
2251 }
2252 
2253 #ifdef illumos
2254 int
2255 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
2256 {
2257 	struct uuid uuid = EFI_RESERVED;
2258 	efi_gpe_t gpe = { 0 };
2259 	uint32_t crc;
2260 	dk_efi_t efi;
2261 	int length;
2262 	char *ptr;
2263 
2264 	if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
2265 		return (SET_ERROR(EFAULT));
2266 	ptr = (char *)(uintptr_t)efi.dki_data_64;
2267 	length = efi.dki_length;
2268 	/*
2269 	 * Some clients may attempt to request a PMBR for the
2270 	 * zvol.  Currently this interface will return EINVAL to
2271 	 * such requests.  These requests could be supported by
2272 	 * adding a check for lba == 0 and consing up an appropriate
2273 	 * PMBR.
2274 	 */
2275 	if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
2276 		return (SET_ERROR(EINVAL));
2277 
2278 	gpe.efi_gpe_StartingLBA = LE_64(34ULL);
2279 	gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
2280 	UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
2281 
2282 	if (efi.dki_lba == 1) {
2283 		efi_gpt_t gpt = { 0 };
2284 
2285 		gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
2286 		gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
2287 		gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
2288 		gpt.efi_gpt_MyLBA = LE_64(1ULL);
2289 		gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
2290 		gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
2291 		gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
2292 		gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
2293 		gpt.efi_gpt_SizeOfPartitionEntry =
2294 		    LE_32(sizeof (efi_gpe_t));
2295 		CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
2296 		gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
2297 		CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
2298 		gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
2299 		if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
2300 		    flag))
2301 			return (SET_ERROR(EFAULT));
2302 		ptr += sizeof (gpt);
2303 		length -= sizeof (gpt);
2304 	}
2305 	if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
2306 	    length), flag))
2307 		return (SET_ERROR(EFAULT));
2308 	return (0);
2309 }
2310 
2311 /*
2312  * BEGIN entry points to allow external callers access to the volume.
2313  */
2314 /*
2315  * Return the volume parameters needed for access from an external caller.
2316  * These values are invariant as long as the volume is held open.
2317  */
2318 int
2319 zvol_get_volume_params(minor_t minor, uint64_t *blksize,
2320     uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
2321     void **rl_hdl, void **bonus_hdl)
2322 {
2323 	zvol_state_t *zv;
2324 
2325 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
2326 	if (zv == NULL)
2327 		return (SET_ERROR(ENXIO));
2328 	if (zv->zv_flags & ZVOL_DUMPIFIED)
2329 		return (SET_ERROR(ENXIO));
2330 
2331 	ASSERT(blksize && max_xfer_len && minor_hdl &&
2332 	    objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
2333 
2334 	*blksize = zv->zv_volblocksize;
2335 	*max_xfer_len = (uint64_t)zvol_maxphys;
2336 	*minor_hdl = zv;
2337 	*objset_hdl = zv->zv_objset;
2338 	*zil_hdl = zv->zv_zilog;
2339 	*rl_hdl = &zv->zv_znode;
2340 	*bonus_hdl = zv->zv_dbuf;
2341 	return (0);
2342 }
2343 
2344 /*
2345  * Return the current volume size to an external caller.
2346  * The size can change while the volume is open.
2347  */
2348 uint64_t
2349 zvol_get_volume_size(void *minor_hdl)
2350 {
2351 	zvol_state_t *zv = minor_hdl;
2352 
2353 	return (zv->zv_volsize);
2354 }
2355 
2356 /*
2357  * Return the current WCE setting to an external caller.
2358  * The WCE setting can change while the volume is open.
2359  */
2360 int
2361 zvol_get_volume_wce(void *minor_hdl)
2362 {
2363 	zvol_state_t *zv = minor_hdl;
2364 
2365 	return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
2366 }
2367 
2368 /*
2369  * Entry point for external callers to zvol_log_write
2370  */
2371 void
2372 zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
2373     boolean_t sync)
2374 {
2375 	zvol_state_t *zv = minor_hdl;
2376 
2377 	zvol_log_write(zv, tx, off, resid, sync);
2378 }
2379 /*
2380  * END entry points to allow external callers access to the volume.
2381  */
2382 #endif	/* illumos */
2383 
2384 /*
2385  * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
2386  */
2387 static void
2388 zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
2389     boolean_t sync)
2390 {
2391 	itx_t *itx;
2392 	lr_truncate_t *lr;
2393 	zilog_t *zilog = zv->zv_zilog;
2394 
2395 	if (zil_replaying(zilog, tx))
2396 		return;
2397 
2398 	itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
2399 	lr = (lr_truncate_t *)&itx->itx_lr;
2400 	lr->lr_foid = ZVOL_OBJ;
2401 	lr->lr_offset = off;
2402 	lr->lr_length = len;
2403 
2404 	itx->itx_sync = (sync || zv->zv_sync_cnt != 0);
2405 	zil_itx_assign(zilog, itx, tx);
2406 }
2407 
2408 #ifdef illumos
2409 /*
2410  * Dirtbag ioctls to support mkfs(1M) for UFS filesystems.  See dkio(7I).
2411  * Also a dirtbag dkio ioctl for unmap/free-block functionality.
2412  */
2413 /*ARGSUSED*/
2414 int
2415 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
2416 {
2417 	zvol_state_t *zv;
2418 	struct dk_callback *dkc;
2419 	int error = 0;
2420 	rl_t *rl;
2421 
2422 	mutex_enter(&zfsdev_state_lock);
2423 
2424 	zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
2425 
2426 	if (zv == NULL) {
2427 		mutex_exit(&zfsdev_state_lock);
2428 		return (SET_ERROR(ENXIO));
2429 	}
2430 	ASSERT(zv->zv_total_opens > 0);
2431 
2432 	switch (cmd) {
2433 
2434 	case DKIOCINFO:
2435 	{
2436 		struct dk_cinfo dki;
2437 
2438 		bzero(&dki, sizeof (dki));
2439 		(void) strcpy(dki.dki_cname, "zvol");
2440 		(void) strcpy(dki.dki_dname, "zvol");
2441 		dki.dki_ctype = DKC_UNKNOWN;
2442 		dki.dki_unit = getminor(dev);
2443 		dki.dki_maxtransfer =
2444 		    1 << (SPA_OLD_MAXBLOCKSHIFT - zv->zv_min_bs);
2445 		mutex_exit(&zfsdev_state_lock);
2446 		if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
2447 			error = SET_ERROR(EFAULT);
2448 		return (error);
2449 	}
2450 
2451 	case DKIOCGMEDIAINFO:
2452 	{
2453 		struct dk_minfo dkm;
2454 
2455 		bzero(&dkm, sizeof (dkm));
2456 		dkm.dki_lbsize = 1U << zv->zv_min_bs;
2457 		dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
2458 		dkm.dki_media_type = DK_UNKNOWN;
2459 		mutex_exit(&zfsdev_state_lock);
2460 		if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
2461 			error = SET_ERROR(EFAULT);
2462 		return (error);
2463 	}
2464 
2465 	case DKIOCGMEDIAINFOEXT:
2466 	{
2467 		struct dk_minfo_ext dkmext;
2468 
2469 		bzero(&dkmext, sizeof (dkmext));
2470 		dkmext.dki_lbsize = 1U << zv->zv_min_bs;
2471 		dkmext.dki_pbsize = zv->zv_volblocksize;
2472 		dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
2473 		dkmext.dki_media_type = DK_UNKNOWN;
2474 		mutex_exit(&zfsdev_state_lock);
2475 		if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag))
2476 			error = SET_ERROR(EFAULT);
2477 		return (error);
2478 	}
2479 
2480 	case DKIOCGETEFI:
2481 	{
2482 		uint64_t vs = zv->zv_volsize;
2483 		uint8_t bs = zv->zv_min_bs;
2484 
2485 		mutex_exit(&zfsdev_state_lock);
2486 		error = zvol_getefi((void *)arg, flag, vs, bs);
2487 		return (error);
2488 	}
2489 
2490 	case DKIOCFLUSHWRITECACHE:
2491 		dkc = (struct dk_callback *)arg;
2492 		mutex_exit(&zfsdev_state_lock);
2493 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2494 		if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
2495 			(*dkc->dkc_callback)(dkc->dkc_cookie, error);
2496 			error = 0;
2497 		}
2498 		return (error);
2499 
2500 	case DKIOCGETWCE:
2501 	{
2502 		int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
2503 		if (ddi_copyout(&wce, (void *)arg, sizeof (int),
2504 		    flag))
2505 			error = SET_ERROR(EFAULT);
2506 		break;
2507 	}
2508 	case DKIOCSETWCE:
2509 	{
2510 		int wce;
2511 		if (ddi_copyin((void *)arg, &wce, sizeof (int),
2512 		    flag)) {
2513 			error = SET_ERROR(EFAULT);
2514 			break;
2515 		}
2516 		if (wce) {
2517 			zv->zv_flags |= ZVOL_WCE;
2518 			mutex_exit(&zfsdev_state_lock);
2519 		} else {
2520 			zv->zv_flags &= ~ZVOL_WCE;
2521 			mutex_exit(&zfsdev_state_lock);
2522 			zil_commit(zv->zv_zilog, ZVOL_OBJ);
2523 		}
2524 		return (0);
2525 	}
2526 
2527 	case DKIOCGGEOM:
2528 	case DKIOCGVTOC:
2529 		/*
2530 		 * commands using these (like prtvtoc) expect ENOTSUP
2531 		 * since we're emulating an EFI label
2532 		 */
2533 		error = SET_ERROR(ENOTSUP);
2534 		break;
2535 
2536 	case DKIOCDUMPINIT:
2537 		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
2538 		    RL_WRITER);
2539 		error = zvol_dumpify(zv);
2540 		zfs_range_unlock(rl);
2541 		break;
2542 
2543 	case DKIOCDUMPFINI:
2544 		if (!(zv->zv_flags & ZVOL_DUMPIFIED))
2545 			break;
2546 		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
2547 		    RL_WRITER);
2548 		error = zvol_dump_fini(zv);
2549 		zfs_range_unlock(rl);
2550 		break;
2551 
2552 	case DKIOCFREE:
2553 	{
2554 		dkioc_free_t df;
2555 		dmu_tx_t *tx;
2556 
2557 		if (!zvol_unmap_enabled)
2558 			break;
2559 
2560 		if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) {
2561 			error = SET_ERROR(EFAULT);
2562 			break;
2563 		}
2564 
2565 		/*
2566 		 * Apply Postel's Law to length-checking.  If they overshoot,
2567 		 * just blank out until the end, if there's a need to blank
2568 		 * out anything.
2569 		 */
2570 		if (df.df_start >= zv->zv_volsize)
2571 			break;	/* No need to do anything... */
2572 
2573 		mutex_exit(&zfsdev_state_lock);
2574 
2575 		rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length,
2576 		    RL_WRITER);
2577 		tx = dmu_tx_create(zv->zv_objset);
2578 		dmu_tx_mark_netfree(tx);
2579 		error = dmu_tx_assign(tx, TXG_WAIT);
2580 		if (error != 0) {
2581 			dmu_tx_abort(tx);
2582 		} else {
2583 			zvol_log_truncate(zv, tx, df.df_start,
2584 			    df.df_length, B_TRUE);
2585 			dmu_tx_commit(tx);
2586 			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
2587 			    df.df_start, df.df_length);
2588 		}
2589 
2590 		zfs_range_unlock(rl);
2591 
2592 		/*
2593 		 * If the write-cache is disabled, 'sync' property
2594 		 * is set to 'always', or if the caller is asking for
2595 		 * a synchronous free, commit this operation to the zil.
2596 		 * This will sync any previous uncommitted writes to the
2597 		 * zvol object.
2598 		 * Can be overridden by the zvol_unmap_sync_enabled tunable.
2599 		 */
2600 		if ((error == 0) && zvol_unmap_sync_enabled &&
2601 		    (!(zv->zv_flags & ZVOL_WCE) ||
2602 		    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS) ||
2603 		    (df.df_flags & DF_WAIT_SYNC))) {
2604 			zil_commit(zv->zv_zilog, ZVOL_OBJ);
2605 		}
2606 
2607 		return (error);
2608 	}
2609 
2610 	default:
2611 		error = SET_ERROR(ENOTTY);
2612 		break;
2613 
2614 	}
2615 	mutex_exit(&zfsdev_state_lock);
2616 	return (error);
2617 }
2618 #endif	/* illumos */
2619 
2620 int
2621 zvol_busy(void)
2622 {
2623 	return (zvol_minors != 0);
2624 }
2625 
2626 void
2627 zvol_init(void)
2628 {
2629 	VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
2630 	    1) == 0);
2631 #ifndef __FreeBSD__
2632 	mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
2633 #endif
2634 	ZFS_LOG(1, "ZVOL Initialized.");
2635 }
2636 
2637 void
2638 zvol_fini(void)
2639 {
2640 #ifndef __FreeBSD__
2641 	mutex_destroy(&zfsdev_state_lock);
2642 #endif
2643 	ddi_soft_state_fini(&zfsdev_state);
2644 	ZFS_LOG(1, "ZVOL Deinitialized.");
2645 }
2646 
2647 #ifdef illumos
2648 /*ARGSUSED*/
2649 static int
2650 zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
2651 {
2652 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
2653 
2654 	if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
2655 		return (1);
2656 	return (0);
2657 }
2658 
2659 /*ARGSUSED*/
2660 static void
2661 zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
2662 {
2663 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
2664 
2665 	spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx);
2666 }
2667 
2668 static int
2669 zvol_dump_init(zvol_state_t *zv, boolean_t resize)
2670 {
2671 	dmu_tx_t *tx;
2672 	int error;
2673 	objset_t *os = zv->zv_objset;
2674 	spa_t *spa = dmu_objset_spa(os);
2675 	vdev_t *vd = spa->spa_root_vdev;
2676 	nvlist_t *nv = NULL;
2677 	uint64_t version = spa_version(spa);
2678 	uint64_t checksum, compress, refresrv, vbs, dedup;
2679 
2680 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
2681 	ASSERT(vd->vdev_ops == &vdev_root_ops);
2682 
2683 	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
2684 	    DMU_OBJECT_END);
2685 	if (error != 0)
2686 		return (error);
2687 	/* wait for dmu_free_long_range to actually free the blocks */
2688 	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2689 
2690 	/*
2691 	 * If the pool on which the dump device is being initialized has more
2692 	 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
2693 	 * enabled.  If so, bump that feature's counter to indicate that the
2694 	 * feature is active. We also check the vdev type to handle the
2695 	 * following case:
2696 	 *   # zpool create test raidz disk1 disk2 disk3
2697 	 *   Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
2698 	 *   the raidz vdev itself has 3 children.
2699 	 */
2700 	if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
2701 		if (!spa_feature_is_enabled(spa,
2702 		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
2703 			return (SET_ERROR(ENOTSUP));
2704 		(void) dsl_sync_task(spa_name(spa),
2705 		    zfs_mvdev_dump_feature_check,
2706 		    zfs_mvdev_dump_activate_feature_sync, NULL,
2707 		    2, ZFS_SPACE_CHECK_RESERVED);
2708 	}
2709 
2710 	if (!resize) {
2711 		error = dsl_prop_get_integer(zv->zv_name,
2712 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
2713 		if (error == 0) {
2714 			error = dsl_prop_get_integer(zv->zv_name,
2715 			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum,
2716 			    NULL);
2717 		}
2718 		if (error == 0) {
2719 			error = dsl_prop_get_integer(zv->zv_name,
2720 			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
2721 			    &refresrv, NULL);
2722 		}
2723 		if (error == 0) {
2724 			error = dsl_prop_get_integer(zv->zv_name,
2725 			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs,
2726 			    NULL);
2727 		}
2728 		if (version >= SPA_VERSION_DEDUP && error == 0) {
2729 			error = dsl_prop_get_integer(zv->zv_name,
2730 			    zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
2731 		}
2732 	}
2733 	if (error != 0)
2734 		return (error);
2735 
2736 	tx = dmu_tx_create(os);
2737 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2738 	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2739 	error = dmu_tx_assign(tx, TXG_WAIT);
2740 	if (error != 0) {
2741 		dmu_tx_abort(tx);
2742 		return (error);
2743 	}
2744 
2745 	/*
2746 	 * If we are resizing the dump device then we only need to
2747 	 * update the refreservation to match the newly updated
2748 	 * zvolsize. Otherwise, we save off the original state of the
2749 	 * zvol so that we can restore them if the zvol is ever undumpified.
2750 	 */
2751 	if (resize) {
2752 		error = zap_update(os, ZVOL_ZAP_OBJ,
2753 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2754 		    &zv->zv_volsize, tx);
2755 	} else {
2756 		error = zap_update(os, ZVOL_ZAP_OBJ,
2757 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
2758 		    &compress, tx);
2759 		if (error == 0) {
2760 			error = zap_update(os, ZVOL_ZAP_OBJ,
2761 			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1,
2762 			    &checksum, tx);
2763 		}
2764 		if (error == 0) {
2765 			error = zap_update(os, ZVOL_ZAP_OBJ,
2766 			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2767 			    &refresrv, tx);
2768 		}
2769 		if (error == 0) {
2770 			error = zap_update(os, ZVOL_ZAP_OBJ,
2771 			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
2772 			    &vbs, tx);
2773 		}
2774 		if (error == 0) {
2775 			error = dmu_object_set_blocksize(
2776 			    os, ZVOL_OBJ, SPA_OLD_MAXBLOCKSIZE, 0, tx);
2777 		}
2778 		if (version >= SPA_VERSION_DEDUP && error == 0) {
2779 			error = zap_update(os, ZVOL_ZAP_OBJ,
2780 			    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
2781 			    &dedup, tx);
2782 		}
2783 		if (error == 0)
2784 			zv->zv_volblocksize = SPA_OLD_MAXBLOCKSIZE;
2785 	}
2786 	dmu_tx_commit(tx);
2787 
2788 	/*
2789 	 * We only need update the zvol's property if we are initializing
2790 	 * the dump area for the first time.
2791 	 */
2792 	if (error == 0 && !resize) {
2793 		/*
2794 		 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
2795 		 * function.  Otherwise, use the old default -- OFF.
2796 		 */
2797 		checksum = spa_feature_is_active(spa,
2798 		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY :
2799 		    ZIO_CHECKSUM_OFF;
2800 
2801 		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2802 		VERIFY(nvlist_add_uint64(nv,
2803 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
2804 		VERIFY(nvlist_add_uint64(nv,
2805 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
2806 		    ZIO_COMPRESS_OFF) == 0);
2807 		VERIFY(nvlist_add_uint64(nv,
2808 		    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
2809 		    checksum) == 0);
2810 		if (version >= SPA_VERSION_DEDUP) {
2811 			VERIFY(nvlist_add_uint64(nv,
2812 			    zfs_prop_to_name(ZFS_PROP_DEDUP),
2813 			    ZIO_CHECKSUM_OFF) == 0);
2814 		}
2815 
2816 		error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2817 		    nv, NULL);
2818 		nvlist_free(nv);
2819 	}
2820 
2821 	/* Allocate the space for the dump */
2822 	if (error == 0)
2823 		error = zvol_prealloc(zv);
2824 	return (error);
2825 }
2826 
2827 static int
2828 zvol_dumpify(zvol_state_t *zv)
2829 {
2830 	int error = 0;
2831 	uint64_t dumpsize = 0;
2832 	dmu_tx_t *tx;
2833 	objset_t *os = zv->zv_objset;
2834 
2835 	if (zv->zv_flags & ZVOL_RDONLY)
2836 		return (SET_ERROR(EROFS));
2837 
2838 	if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2839 	    8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
2840 		boolean_t resize = (dumpsize > 0);
2841 
2842 		if ((error = zvol_dump_init(zv, resize)) != 0) {
2843 			(void) zvol_dump_fini(zv);
2844 			return (error);
2845 		}
2846 	}
2847 
2848 	/*
2849 	 * Build up our lba mapping.
2850 	 */
2851 	error = zvol_get_lbas(zv);
2852 	if (error) {
2853 		(void) zvol_dump_fini(zv);
2854 		return (error);
2855 	}
2856 
2857 	tx = dmu_tx_create(os);
2858 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2859 	error = dmu_tx_assign(tx, TXG_WAIT);
2860 	if (error) {
2861 		dmu_tx_abort(tx);
2862 		(void) zvol_dump_fini(zv);
2863 		return (error);
2864 	}
2865 
2866 	zv->zv_flags |= ZVOL_DUMPIFIED;
2867 	error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2868 	    &zv->zv_volsize, tx);
2869 	dmu_tx_commit(tx);
2870 
2871 	if (error) {
2872 		(void) zvol_dump_fini(zv);
2873 		return (error);
2874 	}
2875 
2876 	txg_wait_synced(dmu_objset_pool(os), 0);
2877 	return (0);
2878 }
2879 
2880 static int
2881 zvol_dump_fini(zvol_state_t *zv)
2882 {
2883 	dmu_tx_t *tx;
2884 	objset_t *os = zv->zv_objset;
2885 	nvlist_t *nv;
2886 	int error = 0;
2887 	uint64_t checksum, compress, refresrv, vbs, dedup;
2888 	uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2889 
2890 	/*
2891 	 * Attempt to restore the zvol back to its pre-dumpified state.
2892 	 * This is a best-effort attempt as it's possible that not all
2893 	 * of these properties were initialized during the dumpify process
2894 	 * (i.e. error during zvol_dump_init).
2895 	 */
2896 
2897 	tx = dmu_tx_create(os);
2898 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2899 	error = dmu_tx_assign(tx, TXG_WAIT);
2900 	if (error) {
2901 		dmu_tx_abort(tx);
2902 		return (error);
2903 	}
2904 	(void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2905 	dmu_tx_commit(tx);
2906 
2907 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2908 	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2909 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2910 	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2911 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2912 	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
2913 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2914 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2915 
2916 	VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2917 	(void) nvlist_add_uint64(nv,
2918 	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2919 	(void) nvlist_add_uint64(nv,
2920 	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2921 	(void) nvlist_add_uint64(nv,
2922 	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
2923 	if (version >= SPA_VERSION_DEDUP &&
2924 	    zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2925 	    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
2926 		(void) nvlist_add_uint64(nv,
2927 		    zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
2928 	}
2929 	(void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2930 	    nv, NULL);
2931 	nvlist_free(nv);
2932 
2933 	zvol_free_extents(zv);
2934 	zv->zv_flags &= ~ZVOL_DUMPIFIED;
2935 	(void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2936 	/* wait for dmu_free_long_range to actually free the blocks */
2937 	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2938 	tx = dmu_tx_create(os);
2939 	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2940 	error = dmu_tx_assign(tx, TXG_WAIT);
2941 	if (error) {
2942 		dmu_tx_abort(tx);
2943 		return (error);
2944 	}
2945 	if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2946 		zv->zv_volblocksize = vbs;
2947 	dmu_tx_commit(tx);
2948 
2949 	return (0);
2950 }
2951 #endif /* illumos */
2952 
2953 #ifdef __FreeBSD__
2954 static void
2955 zvol_geom_run(zvol_state_t *zv)
2956 {
2957 	struct g_provider *pp;
2958 
2959 	pp = zv->zv_provider;
2960 	g_error_provider(pp, 0);
2961 
2962 	kproc_kthread_add(zvol_geom_worker, zv, &zfsproc, NULL, 0, 0,
2963 	    "zfskern", "zvol %s", pp->name + sizeof(ZVOL_DRIVER));
2964 }
2965 
2966 static void
2967 zvol_geom_destroy(zvol_state_t *zv)
2968 {
2969 	struct g_provider *pp;
2970 
2971 	g_topology_assert();
2972 
2973 	mtx_lock(&zv->zv_queue_mtx);
2974 	zv->zv_state = 1;
2975 	wakeup_one(&zv->zv_queue);
2976 	while (zv->zv_state != 2)
2977 		msleep(&zv->zv_state, &zv->zv_queue_mtx, 0, "zvol:w", 0);
2978 	mtx_destroy(&zv->zv_queue_mtx);
2979 
2980 	pp = zv->zv_provider;
2981 	zv->zv_provider = NULL;
2982 	pp->private = NULL;
2983 	g_wither_geom(pp->geom, ENXIO);
2984 }
2985 
2986 static int
2987 zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
2988 {
2989 	int count, error, flags;
2990 
2991 	g_topology_assert();
2992 
2993 	/*
2994 	 * To make it easier we expect either open or close, but not both
2995 	 * at the same time.
2996 	 */
2997 	KASSERT((acr >= 0 && acw >= 0 && ace >= 0) ||
2998 	    (acr <= 0 && acw <= 0 && ace <= 0),
2999 	    ("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).",
3000 	    pp->name, acr, acw, ace));
3001 
3002 	if (pp->private == NULL) {
3003 		if (acr <= 0 && acw <= 0 && ace <= 0)
3004 			return (0);
3005 		return (pp->error);
3006 	}
3007 
3008 	/*
3009 	 * We don't pass FEXCL flag to zvol_open()/zvol_close() if ace != 0,
3010 	 * because GEOM already handles that and handles it a bit differently.
3011 	 * GEOM allows for multiple read/exclusive consumers and ZFS allows
3012 	 * only one exclusive consumer, no matter if it is reader or writer.
3013 	 * I like better the way GEOM works so I'll leave it for GEOM to
3014 	 * decide what to do.
3015 	 */
3016 
3017 	count = acr + acw + ace;
3018 	if (count == 0)
3019 		return (0);
3020 
3021 	flags = 0;
3022 	if (acr != 0 || ace != 0)
3023 		flags |= FREAD;
3024 	if (acw != 0)
3025 		flags |= FWRITE;
3026 
3027 	g_topology_unlock();
3028 	if (count > 0)
3029 		error = zvol_open(pp, flags, count);
3030 	else
3031 		error = zvol_close(pp, flags, -count);
3032 	g_topology_lock();
3033 	return (error);
3034 }
3035 
3036 static void
3037 zvol_geom_start(struct bio *bp)
3038 {
3039 	zvol_state_t *zv;
3040 	boolean_t first;
3041 
3042 	zv = bp->bio_to->private;
3043 	ASSERT(zv != NULL);
3044 	switch (bp->bio_cmd) {
3045 	case BIO_FLUSH:
3046 		if (!THREAD_CAN_SLEEP())
3047 			goto enqueue;
3048 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
3049 		g_io_deliver(bp, 0);
3050 		break;
3051 	case BIO_READ:
3052 	case BIO_WRITE:
3053 	case BIO_DELETE:
3054 		if (!THREAD_CAN_SLEEP())
3055 			goto enqueue;
3056 		zvol_strategy(bp);
3057 		break;
3058 	case BIO_GETATTR: {
3059 		spa_t *spa = dmu_objset_spa(zv->zv_objset);
3060 		uint64_t refd, avail, usedobjs, availobjs, val;
3061 
3062 		if (g_handleattr_int(bp, "GEOM::candelete", 1))
3063 			return;
3064 		if (strcmp(bp->bio_attribute, "blocksavail") == 0) {
3065 			dmu_objset_space(zv->zv_objset, &refd, &avail,
3066 			    &usedobjs, &availobjs);
3067 			if (g_handleattr_off_t(bp, "blocksavail",
3068 			    avail / DEV_BSIZE))
3069 				return;
3070 		} else if (strcmp(bp->bio_attribute, "blocksused") == 0) {
3071 			dmu_objset_space(zv->zv_objset, &refd, &avail,
3072 			    &usedobjs, &availobjs);
3073 			if (g_handleattr_off_t(bp, "blocksused",
3074 			    refd / DEV_BSIZE))
3075 				return;
3076 		} else if (strcmp(bp->bio_attribute, "poolblocksavail") == 0) {
3077 			avail = metaslab_class_get_space(spa_normal_class(spa));
3078 			avail -= metaslab_class_get_alloc(spa_normal_class(spa));
3079 			if (g_handleattr_off_t(bp, "poolblocksavail",
3080 			    avail / DEV_BSIZE))
3081 				return;
3082 		} else if (strcmp(bp->bio_attribute, "poolblocksused") == 0) {
3083 			refd = metaslab_class_get_alloc(spa_normal_class(spa));
3084 			if (g_handleattr_off_t(bp, "poolblocksused",
3085 			    refd / DEV_BSIZE))
3086 				return;
3087 		}
3088 		/* FALLTHROUGH */
3089 	}
3090 	default:
3091 		g_io_deliver(bp, EOPNOTSUPP);
3092 		break;
3093 	}
3094 	return;
3095 
3096 enqueue:
3097 	mtx_lock(&zv->zv_queue_mtx);
3098 	first = (bioq_first(&zv->zv_queue) == NULL);
3099 	bioq_insert_tail(&zv->zv_queue, bp);
3100 	mtx_unlock(&zv->zv_queue_mtx);
3101 	if (first)
3102 		wakeup_one(&zv->zv_queue);
3103 }
3104 
3105 static void
3106 zvol_geom_worker(void *arg)
3107 {
3108 	zvol_state_t *zv;
3109 	struct bio *bp;
3110 
3111 	thread_lock(curthread);
3112 	sched_prio(curthread, PRIBIO);
3113 	thread_unlock(curthread);
3114 
3115 	zv = arg;
3116 	for (;;) {
3117 		mtx_lock(&zv->zv_queue_mtx);
3118 		bp = bioq_takefirst(&zv->zv_queue);
3119 		if (bp == NULL) {
3120 			if (zv->zv_state == 1) {
3121 				zv->zv_state = 2;
3122 				wakeup(&zv->zv_state);
3123 				mtx_unlock(&zv->zv_queue_mtx);
3124 				kthread_exit();
3125 			}
3126 			msleep(&zv->zv_queue, &zv->zv_queue_mtx, PRIBIO | PDROP,
3127 			    "zvol:io", 0);
3128 			continue;
3129 		}
3130 		mtx_unlock(&zv->zv_queue_mtx);
3131 		switch (bp->bio_cmd) {
3132 		case BIO_FLUSH:
3133 			zil_commit(zv->zv_zilog, ZVOL_OBJ);
3134 			g_io_deliver(bp, 0);
3135 			break;
3136 		case BIO_READ:
3137 		case BIO_WRITE:
3138 		case BIO_DELETE:
3139 			zvol_strategy(bp);
3140 			break;
3141 		default:
3142 			g_io_deliver(bp, EOPNOTSUPP);
3143 			break;
3144 		}
3145 	}
3146 }
3147 #endif
3148 
3149 extern boolean_t dataset_name_hidden(const char *name);
3150 
3151 static int
3152 zvol_create_snapshots(objset_t *os, const char *name)
3153 {
3154 	uint64_t cookie, obj;
3155 	char *sname;
3156 	int error, len;
3157 
3158 	cookie = obj = 0;
3159 	sname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3160 
3161 #if 0
3162 	(void) dmu_objset_find(name, dmu_objset_prefetch, NULL,
3163 	    DS_FIND_SNAPSHOTS);
3164 #endif
3165 
3166 	for (;;) {
3167 		len = snprintf(sname, MAXPATHLEN, "%s@", name);
3168 		if (len >= MAXPATHLEN) {
3169 			dmu_objset_rele(os, FTAG);
3170 			error = ENAMETOOLONG;
3171 			break;
3172 		}
3173 
3174 		dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
3175 		error = dmu_snapshot_list_next(os, MAXPATHLEN - len,
3176 		    sname + len, &obj, &cookie, NULL);
3177 		dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
3178 		if (error != 0) {
3179 			if (error == ENOENT)
3180 				error = 0;
3181 			break;
3182 		}
3183 
3184 		error = zvol_create_minor(sname);
3185 		if (error != 0 && error != EEXIST) {
3186 			printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
3187 			    sname, error);
3188 			break;
3189 		}
3190 	}
3191 
3192 	kmem_free(sname, MAXPATHLEN);
3193 	return (error);
3194 }
3195 
3196 int
3197 zvol_create_minors(const char *name)
3198 {
3199 	uint64_t cookie;
3200 	objset_t *os;
3201 	char *osname, *p;
3202 	int error, len;
3203 
3204 	if (dataset_name_hidden(name))
3205 		return (0);
3206 
3207 	if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
3208 		printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
3209 		    name, error);
3210 		return (error);
3211 	}
3212 	if (dmu_objset_type(os) == DMU_OST_ZVOL) {
3213 		dsl_dataset_long_hold(os->os_dsl_dataset, FTAG);
3214 		dsl_pool_rele(dmu_objset_pool(os), FTAG);
3215 		error = zvol_create_minor(name);
3216 		if (error == 0 || error == EEXIST) {
3217 			error = zvol_create_snapshots(os, name);
3218 		} else {
3219 			printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
3220 			    name, error);
3221 		}
3222 		dsl_dataset_long_rele(os->os_dsl_dataset, FTAG);
3223 		dsl_dataset_rele(os->os_dsl_dataset, FTAG);
3224 		return (error);
3225 	}
3226 	if (dmu_objset_type(os) != DMU_OST_ZFS) {
3227 		dmu_objset_rele(os, FTAG);
3228 		return (0);
3229 	}
3230 
3231 	osname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3232 	if (snprintf(osname, MAXPATHLEN, "%s/", name) >= MAXPATHLEN) {
3233 		dmu_objset_rele(os, FTAG);
3234 		kmem_free(osname, MAXPATHLEN);
3235 		return (ENOENT);
3236 	}
3237 	p = osname + strlen(osname);
3238 	len = MAXPATHLEN - (p - osname);
3239 
3240 #if 0
3241 	/* Prefetch the datasets. */
3242 	cookie = 0;
3243 	while (dmu_dir_list_next(os, len, p, NULL, &cookie) == 0) {
3244 		if (!dataset_name_hidden(osname))
3245 			(void) dmu_objset_prefetch(osname, NULL);
3246 	}
3247 #endif
3248 
3249 	cookie = 0;
3250 	while (dmu_dir_list_next(os, MAXPATHLEN - (p - osname), p, NULL,
3251 	    &cookie) == 0) {
3252 		dmu_objset_rele(os, FTAG);
3253 		(void)zvol_create_minors(osname);
3254 		if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
3255 			printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
3256 			    name, error);
3257 			return (error);
3258 		}
3259 	}
3260 
3261 	dmu_objset_rele(os, FTAG);
3262 	kmem_free(osname, MAXPATHLEN);
3263 	return (0);
3264 }
3265 
3266 #ifdef __NetBSD__
3267 void
3268 zvol_rename_minor(zvol_state_t *zv, const char *newname)
3269 {
3270 	char *nm;
3271 	minor_t minor = zv->zv_minor;
3272 
3273 	nm = PNBUF_GET();
3274 	strlcpy(nm, newname, MAXPATHLEN);
3275 	ddi_remove_minor_node(zfs_dip, zv->zv_name);
3276 	(void)ddi_create_minor_node(zfs_dip, nm, S_IFCHR, minor, DDI_PSEUDO, 0);
3277 	(void)ddi_create_minor_node(zfs_dip, nm, S_IFBLK, minor, DDI_PSEUDO, 0);
3278 	PNBUF_PUT(nm);
3279 
3280 	strlcpy(zv->zv_name, newname, sizeof(zv->zv_name));
3281 	mutex_enter(&zv->zv_dklock);
3282 	disk_rename(&zv->zv_dk, zv->zv_name);
3283 	mutex_exit(&zv->zv_dklock);
3284 }
3285 #endif
3286 
3287 #ifdef __FreeBSD__
3288 static void
3289 zvol_rename_minor(zvol_state_t *zv, const char *newname)
3290 {
3291 	struct g_geom *gp;
3292 	struct g_provider *pp;
3293 	struct cdev *dev;
3294 
3295 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
3296 
3297 	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
3298 		g_topology_lock();
3299 		pp = zv->zv_provider;
3300 		ASSERT(pp != NULL);
3301 		gp = pp->geom;
3302 		ASSERT(gp != NULL);
3303 
3304 		zv->zv_provider = NULL;
3305 		g_wither_provider(pp, ENXIO);
3306 
3307 		pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname);
3308 		pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
3309 		pp->sectorsize = DEV_BSIZE;
3310 		pp->mediasize = zv->zv_volsize;
3311 		pp->private = zv;
3312 		zv->zv_provider = pp;
3313 		g_error_provider(pp, 0);
3314 		g_topology_unlock();
3315 	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
3316 		struct make_dev_args args;
3317 
3318 		if ((dev = zv->zv_dev) != NULL) {
3319 			zv->zv_dev = NULL;
3320 			destroy_dev(dev);
3321 			if (zv->zv_total_opens > 0) {
3322 				zv->zv_flags &= ~ZVOL_EXCL;
3323 				zv->zv_total_opens = 0;
3324 				zvol_last_close(zv);
3325 			}
3326 		}
3327 
3328 		make_dev_args_init(&args);
3329 		args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
3330 		args.mda_devsw = &zvol_cdevsw;
3331 		args.mda_cr = NULL;
3332 		args.mda_uid = UID_ROOT;
3333 		args.mda_gid = GID_OPERATOR;
3334 		args.mda_mode = 0640;
3335 		args.mda_si_drv2 = zv;
3336 		if (make_dev_s(&args, &zv->zv_dev,
3337 		    "%s/%s", ZVOL_DRIVER, newname) == 0)
3338 			zv->zv_dev->si_iosize_max = MAXPHYS;
3339 	}
3340 	strlcpy(zv->zv_name, newname, sizeof(zv->zv_name));
3341 }
3342 #endif
3343 
3344 void
3345 zvol_rename_minors(const char *oldname, const char *newname)
3346 {
3347 	char name[MAXPATHLEN];
3348 	struct g_provider *pp;
3349 	struct g_geom *gp;
3350 	size_t oldnamelen, newnamelen;
3351 	zvol_state_t *zv;
3352 	char *namebuf;
3353 	boolean_t locked = B_FALSE;
3354 
3355 	oldnamelen = strlen(oldname);
3356 	newnamelen = strlen(newname);
3357 
3358 	DROP_GIANT();
3359 	/* See comment in zvol_open(). */
3360 	if (!MUTEX_HELD(&zfsdev_state_lock)) {
3361 		mutex_enter(&zfsdev_state_lock);
3362 		locked = B_TRUE;
3363 	}
3364 
3365 	LIST_FOREACH(zv, &all_zvols, zv_links) {
3366 		if (strcmp(zv->zv_name, oldname) == 0) {
3367 			zvol_rename_minor(zv, newname);
3368 		} else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
3369 		    (zv->zv_name[oldnamelen] == '/' ||
3370 		     zv->zv_name[oldnamelen] == '@')) {
3371 			snprintf(name, sizeof(name), "%s%c%s", newname,
3372 			    zv->zv_name[oldnamelen],
3373 			    zv->zv_name + oldnamelen + 1);
3374 			zvol_rename_minor(zv, name);
3375 		}
3376 	}
3377 
3378 	if (locked)
3379 		mutex_exit(&zfsdev_state_lock);
3380 	PICKUP_GIANT();
3381 }
3382 
3383 #ifdef __FreeBSD__
3384 static int
3385 zvol_d_open(struct cdev *dev, int flags, int fmt, struct thread *td)
3386 {
3387 	zvol_state_t *zv = dev->si_drv2;
3388 	int err = 0;
3389 
3390 	mutex_enter(&zfsdev_state_lock);
3391 	if (zv->zv_total_opens == 0)
3392 		err = zvol_first_open(zv);
3393 	if (err) {
3394 		mutex_exit(&zfsdev_state_lock);
3395 		return (err);
3396 	}
3397 	if ((flags & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
3398 		err = SET_ERROR(EROFS);
3399 		goto out;
3400 	}
3401 	if (zv->zv_flags & ZVOL_EXCL) {
3402 		err = SET_ERROR(EBUSY);
3403 		goto out;
3404 	}
3405 #ifdef FEXCL
3406 	if (flags & FEXCL) {
3407 		if (zv->zv_total_opens != 0) {
3408 			err = SET_ERROR(EBUSY);
3409 			goto out;
3410 		}
3411 		zv->zv_flags |= ZVOL_EXCL;
3412 	}
3413 #endif
3414 
3415 	zv->zv_total_opens++;
3416 	if (flags & (FSYNC | FDSYNC)) {
3417 		zv->zv_sync_cnt++;
3418 		if (zv->zv_sync_cnt == 1)
3419 			zil_async_to_sync(zv->zv_zilog, ZVOL_OBJ);
3420 	}
3421 	mutex_exit(&zfsdev_state_lock);
3422 	return (err);
3423 out:
3424 	if (zv->zv_total_opens == 0)
3425 		zvol_last_close(zv);
3426 	mutex_exit(&zfsdev_state_lock);
3427 	return (err);
3428 }
3429 
3430 static int
3431 zvol_d_close(struct cdev *dev, int flags, int fmt, struct thread *td)
3432 {
3433 	zvol_state_t *zv = dev->si_drv2;
3434 
3435 	mutex_enter(&zfsdev_state_lock);
3436 	if (zv->zv_flags & ZVOL_EXCL) {
3437 		ASSERT(zv->zv_total_opens == 1);
3438 		zv->zv_flags &= ~ZVOL_EXCL;
3439 	}
3440 
3441 	/*
3442 	 * If the open count is zero, this is a spurious close.
3443 	 * That indicates a bug in the kernel / DDI framework.
3444 	 */
3445 	ASSERT(zv->zv_total_opens != 0);
3446 
3447 	/*
3448 	 * You may get multiple opens, but only one close.
3449 	 */
3450 	zv->zv_total_opens--;
3451 	if (flags & (FSYNC | FDSYNC))
3452 		zv->zv_sync_cnt--;
3453 
3454 	if (zv->zv_total_opens == 0)
3455 		zvol_last_close(zv);
3456 
3457 	mutex_exit(&zfsdev_state_lock);
3458 	return (0);
3459 }
3460 
3461 static int
3462 zvol_d_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
3463 {
3464 	zvol_state_t *zv;
3465 	rl_t *rl;
3466 	off_t offset, length;
3467 	int i, error;
3468 	boolean_t sync;
3469 
3470 	zv = dev->si_drv2;
3471 
3472 	error = 0;
3473 	KASSERT(zv->zv_total_opens > 0,
3474 	    ("Device with zero access count in zvol_d_ioctl"));
3475 
3476 	i = IOCPARM_LEN(cmd);
3477 	switch (cmd) {
3478 	case DIOCGSECTORSIZE:
3479 		*(u_int *)data = DEV_BSIZE;
3480 		break;
3481 	case DIOCGMEDIASIZE:
3482 		*(off_t *)data = zv->zv_volsize;
3483 		break;
3484 	case DIOCGFLUSH:
3485 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
3486 		break;
3487 	case DIOCGDELETE:
3488 		if (!zvol_unmap_enabled)
3489 			break;
3490 
3491 		offset = ((off_t *)data)[0];
3492 		length = ((off_t *)data)[1];
3493 		if ((offset % DEV_BSIZE) != 0 || (length % DEV_BSIZE) != 0 ||
3494 		    offset < 0 || offset >= zv->zv_volsize ||
3495 		    length <= 0) {
3496 			printf("%s: offset=%jd length=%jd\n", __func__, offset,
3497 			    length);
3498 			error = EINVAL;
3499 			break;
3500 		}
3501 
3502 		rl = zfs_range_lock(&zv->zv_znode, offset, length, RL_WRITER);
3503 		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
3504 		error = dmu_tx_assign(tx, TXG_WAIT);
3505 		if (error != 0) {
3506 			sync = FALSE;
3507 			dmu_tx_abort(tx);
3508 		} else {
3509 			sync = (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
3510 			zvol_log_truncate(zv, tx, offset, length, sync);
3511 			dmu_tx_commit(tx);
3512 			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
3513 			    offset, length);
3514 		}
3515 		zfs_range_unlock(rl);
3516 		if (sync)
3517 			zil_commit(zv->zv_zilog, ZVOL_OBJ);
3518 		break;
3519 	case DIOCGSTRIPESIZE:
3520 		*(off_t *)data = zv->zv_volblocksize;
3521 		break;
3522 	case DIOCGSTRIPEOFFSET:
3523 		*(off_t *)data = 0;
3524 		break;
3525 	case DIOCGATTR: {
3526 		spa_t *spa = dmu_objset_spa(zv->zv_objset);
3527 		struct diocgattr_arg *arg = (struct diocgattr_arg *)data;
3528 		uint64_t refd, avail, usedobjs, availobjs;
3529 
3530 		if (strcmp(arg->name, "GEOM::candelete") == 0)
3531 			arg->value.i = 1;
3532 		else if (strcmp(arg->name, "blocksavail") == 0) {
3533 			dmu_objset_space(zv->zv_objset, &refd, &avail,
3534 			    &usedobjs, &availobjs);
3535 			arg->value.off = avail / DEV_BSIZE;
3536 		} else if (strcmp(arg->name, "blocksused") == 0) {
3537 			dmu_objset_space(zv->zv_objset, &refd, &avail,
3538 			    &usedobjs, &availobjs);
3539 			arg->value.off = refd / DEV_BSIZE;
3540 		} else if (strcmp(arg->name, "poolblocksavail") == 0) {
3541 			avail = metaslab_class_get_space(spa_normal_class(spa));
3542 			avail -= metaslab_class_get_alloc(spa_normal_class(spa));
3543 			arg->value.off = avail / DEV_BSIZE;
3544 		} else if (strcmp(arg->name, "poolblocksused") == 0) {
3545 			refd = metaslab_class_get_alloc(spa_normal_class(spa));
3546 			arg->value.off = refd / DEV_BSIZE;
3547 		} else
3548 			error = ENOIOCTL;
3549 		break;
3550 	}
3551 	case FIOSEEKHOLE:
3552 	case FIOSEEKDATA: {
3553 		off_t *off = (off_t *)data;
3554 		uint64_t noff;
3555 		boolean_t hole;
3556 
3557 		hole = (cmd == FIOSEEKHOLE);
3558 		noff = *off;
3559 		error = dmu_offset_next(zv->zv_objset, ZVOL_OBJ, hole, &noff);
3560 		*off = noff;
3561 		break;
3562 	}
3563 	default:
3564 		error = ENOIOCTL;
3565 	}
3566 
3567 	return (error);
3568 }
3569 #endif /* __FreeBSD__ */
3570 
3571 #ifdef __NetBSD__
3572 /*ARGSUSED*/
3573 int
3574 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
3575 {
3576 	zvol_state_t *zv;
3577 	int error = 0;
3578 
3579 	mutex_enter(&zfsdev_state_lock);
3580 
3581 	zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
3582 
3583 	if (zv == NULL) {
3584 		mutex_exit(&zfsdev_state_lock);
3585 		return (ENXIO);
3586 	}
3587 
3588 	error = disk_ioctl(&zv->zv_dk, NODEV, cmd, (void *)arg, flag, curlwp);
3589 	if (error != EPASSTHROUGH) {
3590 		mutex_exit(&zfsdev_state_lock);
3591 		return error;
3592 	}
3593 
3594 	error = 0;
3595 
3596 	switch(cmd) {
3597 	case DIOCGWEDGEINFO:
3598 	{
3599 		struct dkwedge_info *dkw = (void *) arg;
3600 		struct disk_geom *dg = &zv->zv_dk.dk_geom;
3601 
3602 		memset(dkw, 0, sizeof(*dkw));
3603 		strlcpy(dkw->dkw_devname, zv->zv_name,
3604 		    sizeof(dkw->dkw_devname));
3605 
3606 		/*
3607 		 * dkw_parent is interpreted as disk device name by the kernel
3608 		 * to locate the disk driver and its geometry data. The faked
3609 		 * name "ZFS" must never match a device name. The kernel will
3610 		 * then call DIOCGPARTINFO below to retrieve the missing
3611 		 * information.
3612 		 *
3613 		 * Userland will also be confused, but it can use the
3614 		 * proplib based DIOCGDISKINFO to get the geometry
3615 		 * information.
3616 		 */
3617 		strlcpy(dkw->dkw_parent, "ZFS", sizeof(dkw->dkw_parent));
3618 
3619 		dkw->dkw_offset = 0;
3620 		dkw->dkw_size = dg->dg_secperunit;
3621 		strcpy(dkw->dkw_ptype, DKW_PTYPE_FFS);
3622 
3623 		break;
3624 	}
3625 
3626 	case DIOCGPARTINFO:
3627 	{
3628 		struct partinfo *pi = (void *) arg;
3629 		struct disk_geom *dg = &zv->zv_dk.dk_geom;
3630 
3631 		memset(pi, 0, sizeof(*pi));
3632 		pi->pi_offset = 0;
3633 		pi->pi_secsize = dg->dg_secsize;
3634 		pi->pi_size = dg->dg_secperunit;
3635 		pi->pi_fstype = FS_OTHER;
3636 		pi->pi_bsize = MAX(BLKDEV_IOSIZE, pi->pi_secsize);
3637 
3638 		break;
3639 	}
3640 
3641 	default:
3642 		dprintf("unknown disk_ioctl called\n");
3643 		error = ENOTTY;
3644 		break;
3645 	}
3646 
3647 	mutex_exit(&zfsdev_state_lock);
3648 	return (error);
3649 }
3650 #endif /* __NetBSD__ */
3651