1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /* Portions Copyright 2010 Robert Milkowski */
26
27 /*
28 * ZFS volume emulation driver.
29 *
30 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
31 * Volumes are accessed through the symbolic links named:
32 *
33 * /dev/zvol/dsk/<pool_name>/<dataset_name>
34 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
35 *
36 * These links are created by the /dev filesystem (sdev_zvolops.c).
37 * Volumes are persistent through reboot. No user command needs to be
38 * run before opening and using a device.
39 */
40
41 #include <sys/types.h>
42 #include <sys/param.h>
43 #include <sys/errno.h>
44 #include <sys/uio.h>
45 #include <sys/buf.h>
46 #include <sys/modctl.h>
47 #include <sys/open.h>
48 #include <sys/kmem.h>
49 #include <sys/conf.h>
50 #include <sys/cmn_err.h>
51 #include <sys/stat.h>
52 #include <sys/zap.h>
53 #include <sys/spa.h>
54 #include <sys/zio.h>
55 #include <sys/dmu_traverse.h>
56 #include <sys/dnode.h>
57 #include <sys/dsl_dataset.h>
58 #include <sys/dsl_prop.h>
59 #include <sys/dkio.h>
60 #include <sys/efi_partition.h>
61 #include <sys/byteorder.h>
62 #include <sys/pathname.h>
63 #include <sys/ddi.h>
64 #include <sys/sunddi.h>
65 #include <sys/crc32.h>
66 #include <sys/dirent.h>
67 #include <sys/policy.h>
68 #include <sys/fs/zfs.h>
69 #include <sys/zfs_ioctl.h>
70 #include <sys/mkdev.h>
71 #include <sys/zil.h>
72 #include <sys/refcount.h>
73 #include <sys/zfs_znode.h>
74 #include <sys/zfs_rlock.h>
75 #include <sys/vdev_disk.h>
76 #include <sys/vdev_impl.h>
77 #include <sys/zvol.h>
78 #include <sys/dumphdr.h>
79 #include <sys/zil_impl.h>
80
81 #include "zfs_namecheck.h"
82
83 void *zfsdev_state;
84 static char *zvol_tag = "zvol_tag";
85
86 #define ZVOL_DUMPSIZE "dumpsize"
87
88 /*
89 * This lock protects the zfsdev_state structure from being modified
90 * while it's being used, e.g. an open that comes in before a create
91 * finishes. It also protects temporary opens of the dataset so that,
92 * e.g., an open doesn't get a spurious EBUSY.
93 */
94 kmutex_t zfsdev_state_lock;
95 static uint32_t zvol_minors;
96
97 typedef struct zvol_extent {
98 list_node_t ze_node;
99 dva_t ze_dva; /* dva associated with this extent */
100 uint64_t ze_nblks; /* number of blocks in extent */
101 } zvol_extent_t;
102
103 /*
104 * The in-core state of each volume.
105 */
106 typedef struct zvol_state {
107 char zv_name[MAXPATHLEN]; /* pool/dd name */
108 uint64_t zv_volsize; /* amount of space we advertise */
109 uint64_t zv_volblocksize; /* volume block size */
110 minor_t zv_minor; /* minor number */
111 uint8_t zv_min_bs; /* minimum addressable block shift */
112 uint8_t zv_flags; /* readonly, dumpified, etc. */
113 objset_t *zv_objset; /* objset handle */
114 uint32_t zv_open_count[OTYPCNT]; /* open counts */
115 uint32_t zv_total_opens; /* total open count */
116 zilog_t *zv_zilog; /* ZIL handle */
117 list_t zv_extents; /* List of extents for dump */
118 znode_t zv_znode; /* for range locking */
119 dmu_buf_t *zv_dbuf; /* bonus handle */
120 } zvol_state_t;
121
122 /*
123 * zvol specific flags
124 */
125 #define ZVOL_RDONLY 0x1
126 #define ZVOL_DUMPIFIED 0x2
127 #define ZVOL_EXCL 0x4
128 #define ZVOL_WCE 0x8
129
130 /*
131 * zvol maximum transfer in one DMU tx.
132 */
133 int zvol_maxphys = DMU_MAX_ACCESS/2;
134
135 extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
136 nvlist_t *, nvlist_t **);
137 static int zvol_remove_zv(zvol_state_t *);
138 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
139 static int zvol_dumpify(zvol_state_t *zv);
140 static int zvol_dump_fini(zvol_state_t *zv);
141 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
142
143 static void
zvol_size_changed(uint64_t volsize,major_t maj,minor_t min)144 zvol_size_changed(uint64_t volsize, major_t maj, minor_t min)
145 {
146 dev_t dev = makedevice(maj, min);
147
148 VERIFY(ddi_prop_update_int64(dev, zfs_dip,
149 "Size", volsize) == DDI_SUCCESS);
150 VERIFY(ddi_prop_update_int64(dev, zfs_dip,
151 "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
152
153 /* Notify specfs to invalidate the cached size */
154 spec_size_invalidate(dev, VBLK);
155 spec_size_invalidate(dev, VCHR);
156 }
157
158 int
zvol_check_volsize(uint64_t volsize,uint64_t blocksize)159 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
160 {
161 if (volsize == 0)
162 return (EINVAL);
163
164 if (volsize % blocksize != 0)
165 return (EINVAL);
166
167 #ifdef _ILP32
168 if (volsize - 1 > SPEC_MAXOFFSET_T)
169 return (EOVERFLOW);
170 #endif
171 return (0);
172 }
173
174 int
zvol_check_volblocksize(uint64_t volblocksize)175 zvol_check_volblocksize(uint64_t volblocksize)
176 {
177 if (volblocksize < SPA_MINBLOCKSIZE ||
178 volblocksize > SPA_MAXBLOCKSIZE ||
179 !ISP2(volblocksize))
180 return (EDOM);
181
182 return (0);
183 }
184
185 int
zvol_get_stats(objset_t * os,nvlist_t * nv)186 zvol_get_stats(objset_t *os, nvlist_t *nv)
187 {
188 int error;
189 dmu_object_info_t doi;
190 uint64_t val;
191
192 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
193 if (error)
194 return (error);
195
196 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
197
198 error = dmu_object_info(os, ZVOL_OBJ, &doi);
199
200 if (error == 0) {
201 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
202 doi.doi_data_block_size);
203 }
204
205 return (error);
206 }
207
208 static zvol_state_t *
zvol_minor_lookup(const char * name)209 zvol_minor_lookup(const char *name)
210 {
211 minor_t minor;
212 zvol_state_t *zv;
213
214 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
215
216 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
217 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
218 if (zv == NULL)
219 continue;
220 if (strcmp(zv->zv_name, name) == 0)
221 return (zv);
222 }
223
224 return (NULL);
225 }
226
227 /* extent mapping arg */
228 struct maparg {
229 zvol_state_t *ma_zv;
230 uint64_t ma_blks;
231 };
232
233 /*ARGSUSED*/
234 static int
zvol_map_block(spa_t * spa,zilog_t * zilog,const blkptr_t * bp,arc_buf_t * pbuf,const zbookmark_t * zb,const dnode_phys_t * dnp,void * arg)235 zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
236 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
237 {
238 struct maparg *ma = arg;
239 zvol_extent_t *ze;
240 int bs = ma->ma_zv->zv_volblocksize;
241
242 if (bp == NULL || zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
243 return (0);
244
245 VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
246 ma->ma_blks++;
247
248 /* Abort immediately if we have encountered gang blocks */
249 if (BP_IS_GANG(bp))
250 return (EFRAGS);
251
252 /*
253 * See if the block is at the end of the previous extent.
254 */
255 ze = list_tail(&ma->ma_zv->zv_extents);
256 if (ze &&
257 DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
258 DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
259 DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
260 ze->ze_nblks++;
261 return (0);
262 }
263
264 dprintf_bp(bp, "%s", "next blkptr:");
265
266 /* start a new extent */
267 ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
268 ze->ze_dva = bp->blk_dva[0]; /* structure assignment */
269 ze->ze_nblks = 1;
270 list_insert_tail(&ma->ma_zv->zv_extents, ze);
271 return (0);
272 }
273
274 static void
zvol_free_extents(zvol_state_t * zv)275 zvol_free_extents(zvol_state_t *zv)
276 {
277 zvol_extent_t *ze;
278
279 while (ze = list_head(&zv->zv_extents)) {
280 list_remove(&zv->zv_extents, ze);
281 kmem_free(ze, sizeof (zvol_extent_t));
282 }
283 }
284
285 static int
zvol_get_lbas(zvol_state_t * zv)286 zvol_get_lbas(zvol_state_t *zv)
287 {
288 objset_t *os = zv->zv_objset;
289 struct maparg ma;
290 int err;
291
292 ma.ma_zv = zv;
293 ma.ma_blks = 0;
294 zvol_free_extents(zv);
295
296 /* commit any in-flight changes before traversing the dataset */
297 txg_wait_synced(dmu_objset_pool(os), 0);
298 err = traverse_dataset(dmu_objset_ds(os), 0,
299 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
300 if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
301 zvol_free_extents(zv);
302 return (err ? err : EIO);
303 }
304
305 return (0);
306 }
307
308 /* ARGSUSED */
309 void
zvol_create_cb(objset_t * os,void * arg,cred_t * cr,dmu_tx_t * tx)310 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
311 {
312 zfs_creat_t *zct = arg;
313 nvlist_t *nvprops = zct->zct_props;
314 int error;
315 uint64_t volblocksize, volsize;
316
317 VERIFY(nvlist_lookup_uint64(nvprops,
318 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
319 if (nvlist_lookup_uint64(nvprops,
320 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
321 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
322
323 /*
324 * These properties must be removed from the list so the generic
325 * property setting step won't apply to them.
326 */
327 VERIFY(nvlist_remove_all(nvprops,
328 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
329 (void) nvlist_remove_all(nvprops,
330 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
331
332 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
333 DMU_OT_NONE, 0, tx);
334 ASSERT(error == 0);
335
336 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
337 DMU_OT_NONE, 0, tx);
338 ASSERT(error == 0);
339
340 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
341 ASSERT(error == 0);
342 }
343
344 /*
345 * Replay a TX_WRITE ZIL transaction that didn't get committed
346 * after a system failure
347 */
348 static int
zvol_replay_write(zvol_state_t * zv,lr_write_t * lr,boolean_t byteswap)349 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
350 {
351 objset_t *os = zv->zv_objset;
352 char *data = (char *)(lr + 1); /* data follows lr_write_t */
353 uint64_t offset, length;
354 dmu_tx_t *tx;
355 int error;
356
357 if (byteswap)
358 byteswap_uint64_array(lr, sizeof (*lr));
359
360 offset = lr->lr_offset;
361 length = lr->lr_length;
362
363 /* If it's a dmu_sync() block, write the whole block */
364 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
365 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
366 if (length < blocksize) {
367 offset -= offset % blocksize;
368 length = blocksize;
369 }
370 }
371
372 tx = dmu_tx_create(os);
373 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
374 error = dmu_tx_assign(tx, TXG_WAIT);
375 if (error) {
376 dmu_tx_abort(tx);
377 } else {
378 dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
379 dmu_tx_commit(tx);
380 }
381
382 return (error);
383 }
384
385 /* ARGSUSED */
386 static int
zvol_replay_err(zvol_state_t * zv,lr_t * lr,boolean_t byteswap)387 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
388 {
389 return (ENOTSUP);
390 }
391
392 /*
393 * Callback vectors for replaying records.
394 * Only TX_WRITE is needed for zvol.
395 */
396 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
397 zvol_replay_err, /* 0 no such transaction type */
398 zvol_replay_err, /* TX_CREATE */
399 zvol_replay_err, /* TX_MKDIR */
400 zvol_replay_err, /* TX_MKXATTR */
401 zvol_replay_err, /* TX_SYMLINK */
402 zvol_replay_err, /* TX_REMOVE */
403 zvol_replay_err, /* TX_RMDIR */
404 zvol_replay_err, /* TX_LINK */
405 zvol_replay_err, /* TX_RENAME */
406 zvol_replay_write, /* TX_WRITE */
407 zvol_replay_err, /* TX_TRUNCATE */
408 zvol_replay_err, /* TX_SETATTR */
409 zvol_replay_err, /* TX_ACL */
410 zvol_replay_err, /* TX_CREATE_ACL */
411 zvol_replay_err, /* TX_CREATE_ATTR */
412 zvol_replay_err, /* TX_CREATE_ACL_ATTR */
413 zvol_replay_err, /* TX_MKDIR_ACL */
414 zvol_replay_err, /* TX_MKDIR_ATTR */
415 zvol_replay_err, /* TX_MKDIR_ACL_ATTR */
416 zvol_replay_err, /* TX_WRITE2 */
417 };
418
419 int
zvol_name2minor(const char * name,minor_t * minor)420 zvol_name2minor(const char *name, minor_t *minor)
421 {
422 zvol_state_t *zv;
423
424 mutex_enter(&zfsdev_state_lock);
425 zv = zvol_minor_lookup(name);
426 if (minor && zv)
427 *minor = zv->zv_minor;
428 mutex_exit(&zfsdev_state_lock);
429 return (zv ? 0 : -1);
430 }
431
432 /*
433 * Create a minor node (plus a whole lot more) for the specified volume.
434 */
435 int
zvol_create_minor(const char * name)436 zvol_create_minor(const char *name)
437 {
438 zfs_soft_state_t *zs;
439 zvol_state_t *zv;
440 objset_t *os;
441 dmu_object_info_t doi;
442 minor_t minor = 0;
443 char chrbuf[30], blkbuf[30];
444 int error;
445
446 mutex_enter(&zfsdev_state_lock);
447
448 if (zvol_minor_lookup(name) != NULL) {
449 mutex_exit(&zfsdev_state_lock);
450 return (EEXIST);
451 }
452
453 /* lie and say we're read-only */
454 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
455
456 if (error) {
457 mutex_exit(&zfsdev_state_lock);
458 return (error);
459 }
460
461 if ((minor = zfsdev_minor_alloc()) == 0) {
462 dmu_objset_disown(os, FTAG);
463 mutex_exit(&zfsdev_state_lock);
464 return (ENXIO);
465 }
466
467 if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
468 dmu_objset_disown(os, FTAG);
469 mutex_exit(&zfsdev_state_lock);
470 return (EAGAIN);
471 }
472 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
473 (char *)name);
474
475 (void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
476
477 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
478 minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
479 ddi_soft_state_free(zfsdev_state, minor);
480 dmu_objset_disown(os, FTAG);
481 mutex_exit(&zfsdev_state_lock);
482 return (EAGAIN);
483 }
484
485 (void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
486
487 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
488 minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
489 ddi_remove_minor_node(zfs_dip, chrbuf);
490 ddi_soft_state_free(zfsdev_state, minor);
491 dmu_objset_disown(os, FTAG);
492 mutex_exit(&zfsdev_state_lock);
493 return (EAGAIN);
494 }
495
496 zs = ddi_get_soft_state(zfsdev_state, minor);
497 zs->zss_type = ZSST_ZVOL;
498 zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
499 (void) strlcpy(zv->zv_name, name, MAXPATHLEN);
500 zv->zv_min_bs = DEV_BSHIFT;
501 zv->zv_minor = minor;
502 zv->zv_objset = os;
503 if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
504 zv->zv_flags |= ZVOL_RDONLY;
505 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
506 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
507 sizeof (rl_t), offsetof(rl_t, r_node));
508 list_create(&zv->zv_extents, sizeof (zvol_extent_t),
509 offsetof(zvol_extent_t, ze_node));
510 /* get and cache the blocksize */
511 error = dmu_object_info(os, ZVOL_OBJ, &doi);
512 ASSERT(error == 0);
513 zv->zv_volblocksize = doi.doi_data_block_size;
514
515 if (spa_writeable(dmu_objset_spa(os))) {
516 if (zil_replay_disable)
517 zil_destroy(dmu_objset_zil(os), B_FALSE);
518 else
519 zil_replay(os, zv, zvol_replay_vector);
520 }
521 dmu_objset_disown(os, FTAG);
522 zv->zv_objset = NULL;
523
524 zvol_minors++;
525
526 mutex_exit(&zfsdev_state_lock);
527
528 return (0);
529 }
530
531 /*
532 * Remove minor node for the specified volume.
533 */
534 static int
zvol_remove_zv(zvol_state_t * zv)535 zvol_remove_zv(zvol_state_t *zv)
536 {
537 char nmbuf[20];
538 minor_t minor = zv->zv_minor;
539
540 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
541 if (zv->zv_total_opens != 0)
542 return (EBUSY);
543
544 (void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
545 ddi_remove_minor_node(zfs_dip, nmbuf);
546
547 (void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor);
548 ddi_remove_minor_node(zfs_dip, nmbuf);
549
550 avl_destroy(&zv->zv_znode.z_range_avl);
551 mutex_destroy(&zv->zv_znode.z_range_lock);
552
553 kmem_free(zv, sizeof (zvol_state_t));
554
555 ddi_soft_state_free(zfsdev_state, minor);
556
557 zvol_minors--;
558 return (0);
559 }
560
561 int
zvol_remove_minor(const char * name)562 zvol_remove_minor(const char *name)
563 {
564 zvol_state_t *zv;
565 int rc;
566
567 mutex_enter(&zfsdev_state_lock);
568 if ((zv = zvol_minor_lookup(name)) == NULL) {
569 mutex_exit(&zfsdev_state_lock);
570 return (ENXIO);
571 }
572 rc = zvol_remove_zv(zv);
573 mutex_exit(&zfsdev_state_lock);
574 return (rc);
575 }
576
577 int
zvol_first_open(zvol_state_t * zv)578 zvol_first_open(zvol_state_t *zv)
579 {
580 objset_t *os;
581 uint64_t volsize;
582 int error;
583 uint64_t readonly;
584
585 /* lie and say we're read-only */
586 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
587 zvol_tag, &os);
588 if (error)
589 return (error);
590
591 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
592 if (error) {
593 ASSERT(error == 0);
594 dmu_objset_disown(os, zvol_tag);
595 return (error);
596 }
597 zv->zv_objset = os;
598 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
599 if (error) {
600 dmu_objset_disown(os, zvol_tag);
601 return (error);
602 }
603 zv->zv_volsize = volsize;
604 zv->zv_zilog = zil_open(os, zvol_get_data);
605 zvol_size_changed(zv->zv_volsize, ddi_driver_major(zfs_dip),
606 zv->zv_minor);
607
608 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
609 NULL) == 0);
610 if (readonly || dmu_objset_is_snapshot(os) ||
611 !spa_writeable(dmu_objset_spa(os)))
612 zv->zv_flags |= ZVOL_RDONLY;
613 else
614 zv->zv_flags &= ~ZVOL_RDONLY;
615 return (error);
616 }
617
618 void
zvol_last_close(zvol_state_t * zv)619 zvol_last_close(zvol_state_t *zv)
620 {
621 zil_close(zv->zv_zilog);
622 zv->zv_zilog = NULL;
623 dmu_buf_rele(zv->zv_dbuf, zvol_tag);
624 zv->zv_dbuf = NULL;
625 dmu_objset_disown(zv->zv_objset, zvol_tag);
626 zv->zv_objset = NULL;
627 }
628
629 int
zvol_prealloc(zvol_state_t * zv)630 zvol_prealloc(zvol_state_t *zv)
631 {
632 objset_t *os = zv->zv_objset;
633 dmu_tx_t *tx;
634 uint64_t refd, avail, usedobjs, availobjs;
635 uint64_t resid = zv->zv_volsize;
636 uint64_t off = 0;
637
638 /* Check the space usage before attempting to allocate the space */
639 dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
640 if (avail < zv->zv_volsize)
641 return (ENOSPC);
642
643 /* Free old extents if they exist */
644 zvol_free_extents(zv);
645
646 while (resid != 0) {
647 int error;
648 uint64_t bytes = MIN(resid, SPA_MAXBLOCKSIZE);
649
650 tx = dmu_tx_create(os);
651 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
652 error = dmu_tx_assign(tx, TXG_WAIT);
653 if (error) {
654 dmu_tx_abort(tx);
655 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
656 return (error);
657 }
658 dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
659 dmu_tx_commit(tx);
660 off += bytes;
661 resid -= bytes;
662 }
663 txg_wait_synced(dmu_objset_pool(os), 0);
664
665 return (0);
666 }
667
668 int
zvol_update_volsize(objset_t * os,uint64_t volsize)669 zvol_update_volsize(objset_t *os, uint64_t volsize)
670 {
671 dmu_tx_t *tx;
672 int error;
673
674 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
675
676 tx = dmu_tx_create(os);
677 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
678 error = dmu_tx_assign(tx, TXG_WAIT);
679 if (error) {
680 dmu_tx_abort(tx);
681 return (error);
682 }
683
684 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
685 &volsize, tx);
686 dmu_tx_commit(tx);
687
688 if (error == 0)
689 error = dmu_free_long_range(os,
690 ZVOL_OBJ, volsize, DMU_OBJECT_END);
691 return (error);
692 }
693
694 void
zvol_remove_minors(const char * name)695 zvol_remove_minors(const char *name)
696 {
697 zvol_state_t *zv;
698 char *namebuf;
699 minor_t minor;
700
701 namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP);
702 (void) strncpy(namebuf, name, strlen(name));
703 (void) strcat(namebuf, "/");
704 mutex_enter(&zfsdev_state_lock);
705 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
706
707 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
708 if (zv == NULL)
709 continue;
710 if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0)
711 (void) zvol_remove_zv(zv);
712 }
713 kmem_free(namebuf, strlen(name) + 2);
714
715 mutex_exit(&zfsdev_state_lock);
716 }
717
718 int
zvol_set_volsize(const char * name,major_t maj,uint64_t volsize)719 zvol_set_volsize(const char *name, major_t maj, uint64_t volsize)
720 {
721 zvol_state_t *zv = NULL;
722 objset_t *os;
723 int error;
724 dmu_object_info_t doi;
725 uint64_t old_volsize = 0ULL;
726 uint64_t readonly;
727
728 mutex_enter(&zfsdev_state_lock);
729 zv = zvol_minor_lookup(name);
730 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
731 mutex_exit(&zfsdev_state_lock);
732 return (error);
733 }
734
735 if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
736 (error = zvol_check_volsize(volsize,
737 doi.doi_data_block_size)) != 0)
738 goto out;
739
740 VERIFY(dsl_prop_get_integer(name, "readonly", &readonly,
741 NULL) == 0);
742 if (readonly) {
743 error = EROFS;
744 goto out;
745 }
746
747 error = zvol_update_volsize(os, volsize);
748 /*
749 * Reinitialize the dump area to the new size. If we
750 * failed to resize the dump area then restore it back to
751 * its original size.
752 */
753 if (zv && error == 0) {
754 if (zv->zv_flags & ZVOL_DUMPIFIED) {
755 old_volsize = zv->zv_volsize;
756 zv->zv_volsize = volsize;
757 if ((error = zvol_dumpify(zv)) != 0 ||
758 (error = dumpvp_resize()) != 0) {
759 (void) zvol_update_volsize(os, old_volsize);
760 zv->zv_volsize = old_volsize;
761 error = zvol_dumpify(zv);
762 }
763 }
764 if (error == 0) {
765 zv->zv_volsize = volsize;
766 zvol_size_changed(volsize, maj, zv->zv_minor);
767 }
768 }
769
770 /*
771 * Generate a LUN expansion event.
772 */
773 if (zv && error == 0) {
774 sysevent_id_t eid;
775 nvlist_t *attr;
776 char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
777
778 (void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
779 zv->zv_minor);
780
781 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
782 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
783
784 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
785 ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
786
787 nvlist_free(attr);
788 kmem_free(physpath, MAXPATHLEN);
789 }
790
791 out:
792 dmu_objset_rele(os, FTAG);
793
794 mutex_exit(&zfsdev_state_lock);
795
796 return (error);
797 }
798
799 /*ARGSUSED*/
800 int
zvol_open(dev_t * devp,int flag,int otyp,cred_t * cr)801 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
802 {
803 zvol_state_t *zv;
804 int err = 0;
805
806 mutex_enter(&zfsdev_state_lock);
807
808 zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL);
809 if (zv == NULL) {
810 mutex_exit(&zfsdev_state_lock);
811 return (ENXIO);
812 }
813
814 if (zv->zv_total_opens == 0)
815 err = zvol_first_open(zv);
816 if (err) {
817 mutex_exit(&zfsdev_state_lock);
818 return (err);
819 }
820 if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
821 err = EROFS;
822 goto out;
823 }
824 if (zv->zv_flags & ZVOL_EXCL) {
825 err = EBUSY;
826 goto out;
827 }
828 if (flag & FEXCL) {
829 if (zv->zv_total_opens != 0) {
830 err = EBUSY;
831 goto out;
832 }
833 zv->zv_flags |= ZVOL_EXCL;
834 }
835
836 if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
837 zv->zv_open_count[otyp]++;
838 zv->zv_total_opens++;
839 }
840 mutex_exit(&zfsdev_state_lock);
841
842 return (err);
843 out:
844 if (zv->zv_total_opens == 0)
845 zvol_last_close(zv);
846 mutex_exit(&zfsdev_state_lock);
847 return (err);
848 }
849
850 /*ARGSUSED*/
851 int
zvol_close(dev_t dev,int flag,int otyp,cred_t * cr)852 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
853 {
854 minor_t minor = getminor(dev);
855 zvol_state_t *zv;
856 int error = 0;
857
858 mutex_enter(&zfsdev_state_lock);
859
860 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
861 if (zv == NULL) {
862 mutex_exit(&zfsdev_state_lock);
863 return (ENXIO);
864 }
865
866 if (zv->zv_flags & ZVOL_EXCL) {
867 ASSERT(zv->zv_total_opens == 1);
868 zv->zv_flags &= ~ZVOL_EXCL;
869 }
870
871 /*
872 * If the open count is zero, this is a spurious close.
873 * That indicates a bug in the kernel / DDI framework.
874 */
875 ASSERT(zv->zv_open_count[otyp] != 0);
876 ASSERT(zv->zv_total_opens != 0);
877
878 /*
879 * You may get multiple opens, but only one close.
880 */
881 zv->zv_open_count[otyp]--;
882 zv->zv_total_opens--;
883
884 if (zv->zv_total_opens == 0)
885 zvol_last_close(zv);
886
887 mutex_exit(&zfsdev_state_lock);
888 return (error);
889 }
890
891 static void
zvol_get_done(zgd_t * zgd,int error)892 zvol_get_done(zgd_t *zgd, int error)
893 {
894 if (zgd->zgd_db)
895 dmu_buf_rele(zgd->zgd_db, zgd);
896
897 zfs_range_unlock(zgd->zgd_rl);
898
899 if (error == 0 && zgd->zgd_bp)
900 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
901
902 kmem_free(zgd, sizeof (zgd_t));
903 }
904
905 /*
906 * Get data to generate a TX_WRITE intent log record.
907 */
908 static int
zvol_get_data(void * arg,lr_write_t * lr,char * buf,zio_t * zio)909 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
910 {
911 zvol_state_t *zv = arg;
912 objset_t *os = zv->zv_objset;
913 uint64_t object = ZVOL_OBJ;
914 uint64_t offset = lr->lr_offset;
915 uint64_t size = lr->lr_length; /* length of user data */
916 blkptr_t *bp = &lr->lr_blkptr;
917 dmu_buf_t *db;
918 zgd_t *zgd;
919 int error;
920
921 ASSERT(zio != NULL);
922 ASSERT(size != 0);
923
924 zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
925 zgd->zgd_zilog = zv->zv_zilog;
926 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
927
928 /*
929 * Write records come in two flavors: immediate and indirect.
930 * For small writes it's cheaper to store the data with the
931 * log record (immediate); for large writes it's cheaper to
932 * sync the data and get a pointer to it (indirect) so that
933 * we don't have to write the data twice.
934 */
935 if (buf != NULL) { /* immediate write */
936 error = dmu_read(os, object, offset, size, buf,
937 DMU_READ_NO_PREFETCH);
938 } else {
939 size = zv->zv_volblocksize;
940 offset = P2ALIGN(offset, size);
941 error = dmu_buf_hold(os, object, offset, zgd, &db,
942 DMU_READ_NO_PREFETCH);
943 if (error == 0) {
944 zgd->zgd_db = db;
945 zgd->zgd_bp = bp;
946
947 ASSERT(db->db_offset == offset);
948 ASSERT(db->db_size == size);
949
950 error = dmu_sync(zio, lr->lr_common.lrc_txg,
951 zvol_get_done, zgd);
952
953 if (error == 0)
954 return (0);
955 }
956 }
957
958 zvol_get_done(zgd, error);
959
960 return (error);
961 }
962
963 /*
964 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
965 *
966 * We store data in the log buffers if it's small enough.
967 * Otherwise we will later flush the data out via dmu_sync().
968 */
969 ssize_t zvol_immediate_write_sz = 32768;
970
971 static void
zvol_log_write(zvol_state_t * zv,dmu_tx_t * tx,offset_t off,ssize_t resid,boolean_t sync)972 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
973 boolean_t sync)
974 {
975 uint32_t blocksize = zv->zv_volblocksize;
976 zilog_t *zilog = zv->zv_zilog;
977 boolean_t slogging;
978 ssize_t immediate_write_sz;
979
980 if (zil_replaying(zilog, tx))
981 return;
982
983 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
984 ? 0 : zvol_immediate_write_sz;
985
986 slogging = spa_has_slogs(zilog->zl_spa) &&
987 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
988
989 while (resid) {
990 itx_t *itx;
991 lr_write_t *lr;
992 ssize_t len;
993 itx_wr_state_t write_state;
994
995 /*
996 * Unlike zfs_log_write() we can be called with
997 * upto DMU_MAX_ACCESS/2 (5MB) writes.
998 */
999 if (blocksize > immediate_write_sz && !slogging &&
1000 resid >= blocksize && off % blocksize == 0) {
1001 write_state = WR_INDIRECT; /* uses dmu_sync */
1002 len = blocksize;
1003 } else if (sync) {
1004 write_state = WR_COPIED;
1005 len = MIN(ZIL_MAX_LOG_DATA, resid);
1006 } else {
1007 write_state = WR_NEED_COPY;
1008 len = MIN(ZIL_MAX_LOG_DATA, resid);
1009 }
1010
1011 itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1012 (write_state == WR_COPIED ? len : 0));
1013 lr = (lr_write_t *)&itx->itx_lr;
1014 if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
1015 ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1016 zil_itx_destroy(itx);
1017 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1018 lr = (lr_write_t *)&itx->itx_lr;
1019 write_state = WR_NEED_COPY;
1020 }
1021
1022 itx->itx_wr_state = write_state;
1023 if (write_state == WR_NEED_COPY)
1024 itx->itx_sod += len;
1025 lr->lr_foid = ZVOL_OBJ;
1026 lr->lr_offset = off;
1027 lr->lr_length = len;
1028 lr->lr_blkoff = 0;
1029 BP_ZERO(&lr->lr_blkptr);
1030
1031 itx->itx_private = zv;
1032 itx->itx_sync = sync;
1033
1034 zil_itx_assign(zilog, itx, tx);
1035
1036 off += len;
1037 resid -= len;
1038 }
1039 }
1040
1041 static int
zvol_dumpio_vdev(vdev_t * vd,void * addr,uint64_t offset,uint64_t size,boolean_t doread,boolean_t isdump)1042 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t size,
1043 boolean_t doread, boolean_t isdump)
1044 {
1045 vdev_disk_t *dvd;
1046 int c;
1047 int numerrors = 0;
1048
1049 for (c = 0; c < vd->vdev_children; c++) {
1050 ASSERT(vd->vdev_ops == &vdev_mirror_ops ||
1051 vd->vdev_ops == &vdev_replacing_ops ||
1052 vd->vdev_ops == &vdev_spare_ops);
1053 int err = zvol_dumpio_vdev(vd->vdev_child[c],
1054 addr, offset, size, doread, isdump);
1055 if (err != 0) {
1056 numerrors++;
1057 } else if (doread) {
1058 break;
1059 }
1060 }
1061
1062 if (!vd->vdev_ops->vdev_op_leaf)
1063 return (numerrors < vd->vdev_children ? 0 : EIO);
1064
1065 if (doread && !vdev_readable(vd))
1066 return (EIO);
1067 else if (!doread && !vdev_writeable(vd))
1068 return (EIO);
1069
1070 dvd = vd->vdev_tsd;
1071 ASSERT3P(dvd, !=, NULL);
1072 offset += VDEV_LABEL_START_SIZE;
1073
1074 if (ddi_in_panic() || isdump) {
1075 ASSERT(!doread);
1076 if (doread)
1077 return (EIO);
1078 return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1079 lbtodb(size)));
1080 } else {
1081 return (vdev_disk_physio(dvd->vd_lh, addr, size, offset,
1082 doread ? B_READ : B_WRITE));
1083 }
1084 }
1085
1086 static int
zvol_dumpio(zvol_state_t * zv,void * addr,uint64_t offset,uint64_t size,boolean_t doread,boolean_t isdump)1087 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1088 boolean_t doread, boolean_t isdump)
1089 {
1090 vdev_t *vd;
1091 int error;
1092 zvol_extent_t *ze;
1093 spa_t *spa = dmu_objset_spa(zv->zv_objset);
1094
1095 /* Must be sector aligned, and not stradle a block boundary. */
1096 if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1097 P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1098 return (EINVAL);
1099 }
1100 ASSERT(size <= zv->zv_volblocksize);
1101
1102 /* Locate the extent this belongs to */
1103 ze = list_head(&zv->zv_extents);
1104 while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1105 offset -= ze->ze_nblks * zv->zv_volblocksize;
1106 ze = list_next(&zv->zv_extents, ze);
1107 }
1108
1109 if (!ddi_in_panic())
1110 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1111
1112 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1113 offset += DVA_GET_OFFSET(&ze->ze_dva);
1114 error = zvol_dumpio_vdev(vd, addr, offset, size, doread, isdump);
1115
1116 if (!ddi_in_panic())
1117 spa_config_exit(spa, SCL_STATE, FTAG);
1118
1119 return (error);
1120 }
1121
1122 int
zvol_strategy(buf_t * bp)1123 zvol_strategy(buf_t *bp)
1124 {
1125 zfs_soft_state_t *zs = NULL;
1126 zvol_state_t *zv;
1127 uint64_t off, volsize;
1128 size_t resid;
1129 char *addr;
1130 objset_t *os;
1131 rl_t *rl;
1132 int error = 0;
1133 boolean_t doread = bp->b_flags & B_READ;
1134 boolean_t is_dump;
1135 boolean_t sync;
1136
1137 if (getminor(bp->b_edev) == 0) {
1138 error = EINVAL;
1139 } else {
1140 zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev));
1141 if (zs == NULL)
1142 error = ENXIO;
1143 else if (zs->zss_type != ZSST_ZVOL)
1144 error = EINVAL;
1145 }
1146
1147 if (error) {
1148 bioerror(bp, error);
1149 biodone(bp);
1150 return (0);
1151 }
1152
1153 zv = zs->zss_data;
1154
1155 if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) {
1156 bioerror(bp, EROFS);
1157 biodone(bp);
1158 return (0);
1159 }
1160
1161 off = ldbtob(bp->b_blkno);
1162 volsize = zv->zv_volsize;
1163
1164 os = zv->zv_objset;
1165 ASSERT(os != NULL);
1166
1167 bp_mapin(bp);
1168 addr = bp->b_un.b_addr;
1169 resid = bp->b_bcount;
1170
1171 if (resid > 0 && (off < 0 || off >= volsize)) {
1172 bioerror(bp, EIO);
1173 biodone(bp);
1174 return (0);
1175 }
1176
1177 is_dump = zv->zv_flags & ZVOL_DUMPIFIED;
1178 sync = ((!(bp->b_flags & B_ASYNC) &&
1179 !(zv->zv_flags & ZVOL_WCE)) ||
1180 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) &&
1181 !doread && !is_dump;
1182
1183 /*
1184 * There must be no buffer changes when doing a dmu_sync() because
1185 * we can't change the data whilst calculating the checksum.
1186 */
1187 rl = zfs_range_lock(&zv->zv_znode, off, resid,
1188 doread ? RL_READER : RL_WRITER);
1189
1190 while (resid != 0 && off < volsize) {
1191 size_t size = MIN(resid, zvol_maxphys);
1192 if (is_dump) {
1193 size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1194 error = zvol_dumpio(zv, addr, off, size,
1195 doread, B_FALSE);
1196 } else if (doread) {
1197 error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1198 DMU_READ_PREFETCH);
1199 } else {
1200 dmu_tx_t *tx = dmu_tx_create(os);
1201 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1202 error = dmu_tx_assign(tx, TXG_WAIT);
1203 if (error) {
1204 dmu_tx_abort(tx);
1205 } else {
1206 dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1207 zvol_log_write(zv, tx, off, size, sync);
1208 dmu_tx_commit(tx);
1209 }
1210 }
1211 if (error) {
1212 /* convert checksum errors into IO errors */
1213 if (error == ECKSUM)
1214 error = EIO;
1215 break;
1216 }
1217 off += size;
1218 addr += size;
1219 resid -= size;
1220 }
1221 zfs_range_unlock(rl);
1222
1223 if ((bp->b_resid = resid) == bp->b_bcount)
1224 bioerror(bp, off > volsize ? EINVAL : error);
1225
1226 if (sync)
1227 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1228 biodone(bp);
1229
1230 return (0);
1231 }
1232
1233 /*
1234 * Set the buffer count to the zvol maximum transfer.
1235 * Using our own routine instead of the default minphys()
1236 * means that for larger writes we write bigger buffers on X86
1237 * (128K instead of 56K) and flush the disk write cache less often
1238 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1239 * 56K on X86 and 128K on sparc).
1240 */
1241 void
zvol_minphys(struct buf * bp)1242 zvol_minphys(struct buf *bp)
1243 {
1244 if (bp->b_bcount > zvol_maxphys)
1245 bp->b_bcount = zvol_maxphys;
1246 }
1247
1248 int
zvol_dump(dev_t dev,caddr_t addr,daddr_t blkno,int nblocks)1249 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1250 {
1251 minor_t minor = getminor(dev);
1252 zvol_state_t *zv;
1253 int error = 0;
1254 uint64_t size;
1255 uint64_t boff;
1256 uint64_t resid;
1257
1258 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1259 if (zv == NULL)
1260 return (ENXIO);
1261
1262 boff = ldbtob(blkno);
1263 resid = ldbtob(nblocks);
1264
1265 VERIFY3U(boff + resid, <=, zv->zv_volsize);
1266
1267 while (resid) {
1268 size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1269 error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1270 if (error)
1271 break;
1272 boff += size;
1273 addr += size;
1274 resid -= size;
1275 }
1276
1277 return (error);
1278 }
1279
1280 /*ARGSUSED*/
1281 int
zvol_read(dev_t dev,uio_t * uio,cred_t * cr)1282 zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1283 {
1284 minor_t minor = getminor(dev);
1285 zvol_state_t *zv;
1286 uint64_t volsize;
1287 rl_t *rl;
1288 int error = 0;
1289
1290 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1291 if (zv == NULL)
1292 return (ENXIO);
1293
1294 volsize = zv->zv_volsize;
1295 if (uio->uio_resid > 0 &&
1296 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1297 return (EIO);
1298
1299 if (zv->zv_flags & ZVOL_DUMPIFIED) {
1300 error = physio(zvol_strategy, NULL, dev, B_READ,
1301 zvol_minphys, uio);
1302 return (error);
1303 }
1304
1305 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1306 RL_READER);
1307 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1308 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1309
1310 /* don't read past the end */
1311 if (bytes > volsize - uio->uio_loffset)
1312 bytes = volsize - uio->uio_loffset;
1313
1314 error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1315 if (error) {
1316 /* convert checksum errors into IO errors */
1317 if (error == ECKSUM)
1318 error = EIO;
1319 break;
1320 }
1321 }
1322 zfs_range_unlock(rl);
1323 return (error);
1324 }
1325
1326 /*ARGSUSED*/
1327 int
zvol_write(dev_t dev,uio_t * uio,cred_t * cr)1328 zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1329 {
1330 minor_t minor = getminor(dev);
1331 zvol_state_t *zv;
1332 uint64_t volsize;
1333 rl_t *rl;
1334 int error = 0;
1335 boolean_t sync;
1336
1337 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1338 if (zv == NULL)
1339 return (ENXIO);
1340
1341 volsize = zv->zv_volsize;
1342 if (uio->uio_resid > 0 &&
1343 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1344 return (EIO);
1345
1346 if (zv->zv_flags & ZVOL_DUMPIFIED) {
1347 error = physio(zvol_strategy, NULL, dev, B_WRITE,
1348 zvol_minphys, uio);
1349 return (error);
1350 }
1351
1352 sync = !(zv->zv_flags & ZVOL_WCE) ||
1353 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1354
1355 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1356 RL_WRITER);
1357 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1358 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1359 uint64_t off = uio->uio_loffset;
1360 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1361
1362 if (bytes > volsize - off) /* don't write past the end */
1363 bytes = volsize - off;
1364
1365 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1366 error = dmu_tx_assign(tx, TXG_WAIT);
1367 if (error) {
1368 dmu_tx_abort(tx);
1369 break;
1370 }
1371 error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1372 if (error == 0)
1373 zvol_log_write(zv, tx, off, bytes, sync);
1374 dmu_tx_commit(tx);
1375
1376 if (error)
1377 break;
1378 }
1379 zfs_range_unlock(rl);
1380 if (sync)
1381 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1382 return (error);
1383 }
1384
1385 int
zvol_getefi(void * arg,int flag,uint64_t vs,uint8_t bs)1386 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1387 {
1388 struct uuid uuid = EFI_RESERVED;
1389 efi_gpe_t gpe = { 0 };
1390 uint32_t crc;
1391 dk_efi_t efi;
1392 int length;
1393 char *ptr;
1394
1395 if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1396 return (EFAULT);
1397 ptr = (char *)(uintptr_t)efi.dki_data_64;
1398 length = efi.dki_length;
1399 /*
1400 * Some clients may attempt to request a PMBR for the
1401 * zvol. Currently this interface will return EINVAL to
1402 * such requests. These requests could be supported by
1403 * adding a check for lba == 0 and consing up an appropriate
1404 * PMBR.
1405 */
1406 if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1407 return (EINVAL);
1408
1409 gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1410 gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1411 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1412
1413 if (efi.dki_lba == 1) {
1414 efi_gpt_t gpt = { 0 };
1415
1416 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1417 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1418 gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1419 gpt.efi_gpt_MyLBA = LE_64(1ULL);
1420 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1421 gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1422 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1423 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1424 gpt.efi_gpt_SizeOfPartitionEntry =
1425 LE_32(sizeof (efi_gpe_t));
1426 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1427 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1428 CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1429 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1430 if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1431 flag))
1432 return (EFAULT);
1433 ptr += sizeof (gpt);
1434 length -= sizeof (gpt);
1435 }
1436 if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1437 length), flag))
1438 return (EFAULT);
1439 return (0);
1440 }
1441
1442 /*
1443 * BEGIN entry points to allow external callers access to the volume.
1444 */
1445 /*
1446 * Return the volume parameters needed for access from an external caller.
1447 * These values are invariant as long as the volume is held open.
1448 */
1449 int
zvol_get_volume_params(minor_t minor,uint64_t * blksize,uint64_t * max_xfer_len,void ** minor_hdl,void ** objset_hdl,void ** zil_hdl,void ** rl_hdl,void ** bonus_hdl)1450 zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1451 uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1452 void **rl_hdl, void **bonus_hdl)
1453 {
1454 zvol_state_t *zv;
1455
1456 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1457 if (zv == NULL)
1458 return (ENXIO);
1459 if (zv->zv_flags & ZVOL_DUMPIFIED)
1460 return (ENXIO);
1461
1462 ASSERT(blksize && max_xfer_len && minor_hdl &&
1463 objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1464
1465 *blksize = zv->zv_volblocksize;
1466 *max_xfer_len = (uint64_t)zvol_maxphys;
1467 *minor_hdl = zv;
1468 *objset_hdl = zv->zv_objset;
1469 *zil_hdl = zv->zv_zilog;
1470 *rl_hdl = &zv->zv_znode;
1471 *bonus_hdl = zv->zv_dbuf;
1472 return (0);
1473 }
1474
1475 /*
1476 * Return the current volume size to an external caller.
1477 * The size can change while the volume is open.
1478 */
1479 uint64_t
zvol_get_volume_size(void * minor_hdl)1480 zvol_get_volume_size(void *minor_hdl)
1481 {
1482 zvol_state_t *zv = minor_hdl;
1483
1484 return (zv->zv_volsize);
1485 }
1486
1487 /*
1488 * Return the current WCE setting to an external caller.
1489 * The WCE setting can change while the volume is open.
1490 */
1491 int
zvol_get_volume_wce(void * minor_hdl)1492 zvol_get_volume_wce(void *minor_hdl)
1493 {
1494 zvol_state_t *zv = minor_hdl;
1495
1496 return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
1497 }
1498
1499 /*
1500 * Entry point for external callers to zvol_log_write
1501 */
1502 void
zvol_log_write_minor(void * minor_hdl,dmu_tx_t * tx,offset_t off,ssize_t resid,boolean_t sync)1503 zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
1504 boolean_t sync)
1505 {
1506 zvol_state_t *zv = minor_hdl;
1507
1508 zvol_log_write(zv, tx, off, resid, sync);
1509 }
1510 /*
1511 * END entry points to allow external callers access to the volume.
1512 */
1513
1514 /*
1515 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I).
1516 */
1517 /*ARGSUSED*/
1518 int
zvol_ioctl(dev_t dev,int cmd,intptr_t arg,int flag,cred_t * cr,int * rvalp)1519 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1520 {
1521 zvol_state_t *zv;
1522 struct dk_cinfo dki;
1523 struct dk_minfo dkm;
1524 struct dk_callback *dkc;
1525 int error = 0;
1526 rl_t *rl;
1527
1528 mutex_enter(&zfsdev_state_lock);
1529
1530 zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1531
1532 if (zv == NULL) {
1533 mutex_exit(&zfsdev_state_lock);
1534 return (ENXIO);
1535 }
1536 ASSERT(zv->zv_total_opens > 0);
1537
1538 switch (cmd) {
1539
1540 case DKIOCINFO:
1541 bzero(&dki, sizeof (dki));
1542 (void) strcpy(dki.dki_cname, "zvol");
1543 (void) strcpy(dki.dki_dname, "zvol");
1544 dki.dki_ctype = DKC_UNKNOWN;
1545 dki.dki_unit = getminor(dev);
1546 dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs);
1547 mutex_exit(&zfsdev_state_lock);
1548 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1549 error = EFAULT;
1550 return (error);
1551
1552 case DKIOCGMEDIAINFO:
1553 bzero(&dkm, sizeof (dkm));
1554 dkm.dki_lbsize = 1U << zv->zv_min_bs;
1555 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1556 dkm.dki_media_type = DK_UNKNOWN;
1557 mutex_exit(&zfsdev_state_lock);
1558 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1559 error = EFAULT;
1560 return (error);
1561
1562 case DKIOCGETEFI:
1563 {
1564 uint64_t vs = zv->zv_volsize;
1565 uint8_t bs = zv->zv_min_bs;
1566
1567 mutex_exit(&zfsdev_state_lock);
1568 error = zvol_getefi((void *)arg, flag, vs, bs);
1569 return (error);
1570 }
1571
1572 case DKIOCFLUSHWRITECACHE:
1573 dkc = (struct dk_callback *)arg;
1574 mutex_exit(&zfsdev_state_lock);
1575 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1576 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1577 (*dkc->dkc_callback)(dkc->dkc_cookie, error);
1578 error = 0;
1579 }
1580 return (error);
1581
1582 case DKIOCGETWCE:
1583 {
1584 int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1585 if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1586 flag))
1587 error = EFAULT;
1588 break;
1589 }
1590 case DKIOCSETWCE:
1591 {
1592 int wce;
1593 if (ddi_copyin((void *)arg, &wce, sizeof (int),
1594 flag)) {
1595 error = EFAULT;
1596 break;
1597 }
1598 if (wce) {
1599 zv->zv_flags |= ZVOL_WCE;
1600 mutex_exit(&zfsdev_state_lock);
1601 } else {
1602 zv->zv_flags &= ~ZVOL_WCE;
1603 mutex_exit(&zfsdev_state_lock);
1604 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1605 }
1606 return (0);
1607 }
1608
1609 case DKIOCGGEOM:
1610 case DKIOCGVTOC:
1611 /*
1612 * commands using these (like prtvtoc) expect ENOTSUP
1613 * since we're emulating an EFI label
1614 */
1615 error = ENOTSUP;
1616 break;
1617
1618 case DKIOCDUMPINIT:
1619 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1620 RL_WRITER);
1621 error = zvol_dumpify(zv);
1622 zfs_range_unlock(rl);
1623 break;
1624
1625 case DKIOCDUMPFINI:
1626 if (!(zv->zv_flags & ZVOL_DUMPIFIED))
1627 break;
1628 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1629 RL_WRITER);
1630 error = zvol_dump_fini(zv);
1631 zfs_range_unlock(rl);
1632 break;
1633
1634 default:
1635 error = ENOTTY;
1636 break;
1637
1638 }
1639 mutex_exit(&zfsdev_state_lock);
1640 return (error);
1641 }
1642
1643 int
zvol_busy(void)1644 zvol_busy(void)
1645 {
1646 return (zvol_minors != 0);
1647 }
1648
1649 void
zvol_init(void)1650 zvol_init(void)
1651 {
1652 VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
1653 1) == 0);
1654 mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
1655 }
1656
1657 void
zvol_fini(void)1658 zvol_fini(void)
1659 {
1660 mutex_destroy(&zfsdev_state_lock);
1661 ddi_soft_state_fini(&zfsdev_state);
1662 }
1663
1664 static int
zvol_dump_init(zvol_state_t * zv,boolean_t resize)1665 zvol_dump_init(zvol_state_t *zv, boolean_t resize)
1666 {
1667 dmu_tx_t *tx;
1668 int error = 0;
1669 objset_t *os = zv->zv_objset;
1670 nvlist_t *nv = NULL;
1671 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
1672
1673 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
1674 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
1675 DMU_OBJECT_END);
1676 /* wait for dmu_free_long_range to actually free the blocks */
1677 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1678
1679 tx = dmu_tx_create(os);
1680 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1681 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1682 error = dmu_tx_assign(tx, TXG_WAIT);
1683 if (error) {
1684 dmu_tx_abort(tx);
1685 return (error);
1686 }
1687
1688 /*
1689 * If we are resizing the dump device then we only need to
1690 * update the refreservation to match the newly updated
1691 * zvolsize. Otherwise, we save off the original state of the
1692 * zvol so that we can restore them if the zvol is ever undumpified.
1693 */
1694 if (resize) {
1695 error = zap_update(os, ZVOL_ZAP_OBJ,
1696 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1697 &zv->zv_volsize, tx);
1698 } else {
1699 uint64_t checksum, compress, refresrv, vbs, dedup;
1700
1701 error = dsl_prop_get_integer(zv->zv_name,
1702 zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
1703 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1704 zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL);
1705 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1706 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL);
1707 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1708 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, NULL);
1709 if (version >= SPA_VERSION_DEDUP) {
1710 error = error ? error :
1711 dsl_prop_get_integer(zv->zv_name,
1712 zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
1713 }
1714
1715 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1716 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
1717 &compress, tx);
1718 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1719 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx);
1720 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1721 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1722 &refresrv, tx);
1723 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1724 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
1725 &vbs, tx);
1726 error = error ? error : dmu_object_set_blocksize(
1727 os, ZVOL_OBJ, SPA_MAXBLOCKSIZE, 0, tx);
1728 if (version >= SPA_VERSION_DEDUP) {
1729 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1730 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
1731 &dedup, tx);
1732 }
1733 if (error == 0)
1734 zv->zv_volblocksize = SPA_MAXBLOCKSIZE;
1735 }
1736 dmu_tx_commit(tx);
1737
1738 /*
1739 * We only need update the zvol's property if we are initializing
1740 * the dump area for the first time.
1741 */
1742 if (!resize) {
1743 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1744 VERIFY(nvlist_add_uint64(nv,
1745 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
1746 VERIFY(nvlist_add_uint64(nv,
1747 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
1748 ZIO_COMPRESS_OFF) == 0);
1749 VERIFY(nvlist_add_uint64(nv,
1750 zfs_prop_to_name(ZFS_PROP_CHECKSUM),
1751 ZIO_CHECKSUM_OFF) == 0);
1752 if (version >= SPA_VERSION_DEDUP) {
1753 VERIFY(nvlist_add_uint64(nv,
1754 zfs_prop_to_name(ZFS_PROP_DEDUP),
1755 ZIO_CHECKSUM_OFF) == 0);
1756 }
1757
1758 error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
1759 nv, NULL);
1760 nvlist_free(nv);
1761
1762 if (error)
1763 return (error);
1764 }
1765
1766 /* Allocate the space for the dump */
1767 error = zvol_prealloc(zv);
1768 return (error);
1769 }
1770
1771 static int
zvol_dumpify(zvol_state_t * zv)1772 zvol_dumpify(zvol_state_t *zv)
1773 {
1774 int error = 0;
1775 uint64_t dumpsize = 0;
1776 dmu_tx_t *tx;
1777 objset_t *os = zv->zv_objset;
1778
1779 if (zv->zv_flags & ZVOL_RDONLY)
1780 return (EROFS);
1781
1782 if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
1783 8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
1784 boolean_t resize = (dumpsize > 0) ? B_TRUE : B_FALSE;
1785
1786 if ((error = zvol_dump_init(zv, resize)) != 0) {
1787 (void) zvol_dump_fini(zv);
1788 return (error);
1789 }
1790 }
1791
1792 /*
1793 * Build up our lba mapping.
1794 */
1795 error = zvol_get_lbas(zv);
1796 if (error) {
1797 (void) zvol_dump_fini(zv);
1798 return (error);
1799 }
1800
1801 tx = dmu_tx_create(os);
1802 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1803 error = dmu_tx_assign(tx, TXG_WAIT);
1804 if (error) {
1805 dmu_tx_abort(tx);
1806 (void) zvol_dump_fini(zv);
1807 return (error);
1808 }
1809
1810 zv->zv_flags |= ZVOL_DUMPIFIED;
1811 error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
1812 &zv->zv_volsize, tx);
1813 dmu_tx_commit(tx);
1814
1815 if (error) {
1816 (void) zvol_dump_fini(zv);
1817 return (error);
1818 }
1819
1820 txg_wait_synced(dmu_objset_pool(os), 0);
1821 return (0);
1822 }
1823
1824 static int
zvol_dump_fini(zvol_state_t * zv)1825 zvol_dump_fini(zvol_state_t *zv)
1826 {
1827 dmu_tx_t *tx;
1828 objset_t *os = zv->zv_objset;
1829 nvlist_t *nv;
1830 int error = 0;
1831 uint64_t checksum, compress, refresrv, vbs, dedup;
1832 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
1833
1834 /*
1835 * Attempt to restore the zvol back to its pre-dumpified state.
1836 * This is a best-effort attempt as it's possible that not all
1837 * of these properties were initialized during the dumpify process
1838 * (i.e. error during zvol_dump_init).
1839 */
1840
1841 tx = dmu_tx_create(os);
1842 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1843 error = dmu_tx_assign(tx, TXG_WAIT);
1844 if (error) {
1845 dmu_tx_abort(tx);
1846 return (error);
1847 }
1848 (void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
1849 dmu_tx_commit(tx);
1850
1851 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1852 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
1853 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1854 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
1855 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1856 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
1857 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1858 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
1859
1860 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1861 (void) nvlist_add_uint64(nv,
1862 zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
1863 (void) nvlist_add_uint64(nv,
1864 zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
1865 (void) nvlist_add_uint64(nv,
1866 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
1867 if (version >= SPA_VERSION_DEDUP &&
1868 zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1869 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
1870 (void) nvlist_add_uint64(nv,
1871 zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
1872 }
1873 (void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
1874 nv, NULL);
1875 nvlist_free(nv);
1876
1877 zvol_free_extents(zv);
1878 zv->zv_flags &= ~ZVOL_DUMPIFIED;
1879 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
1880 /* wait for dmu_free_long_range to actually free the blocks */
1881 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1882 tx = dmu_tx_create(os);
1883 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1884 error = dmu_tx_assign(tx, TXG_WAIT);
1885 if (error) {
1886 dmu_tx_abort(tx);
1887 return (error);
1888 }
1889 if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
1890 zv->zv_volblocksize = vbs;
1891 dmu_tx_commit(tx);
1892
1893 return (0);
1894 }
1895