1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * 24 * Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org> 25 * All rights reserved. 26 * 27 * Portions Copyright 2010 Robert Milkowski 28 * 29 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 30 * Copyright (c) 2012, 2017 by Delphix. All rights reserved. 31 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 32 * Copyright (c) 2014 Integros [integros.com] 33 * Copyright (c) 2024, Klara, Inc. 34 */ 35 36 /* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */ 37 38 /* 39 * ZFS volume emulation driver. 40 * 41 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes. 42 * Volumes are accessed through the symbolic links named: 43 * 44 * /dev/zvol/<pool_name>/<dataset_name> 45 * 46 * Volumes are persistent through reboot. No user command needs to be 47 * run before opening and using a device. 48 * 49 * On FreeBSD ZVOLs are simply GEOM providers like any other storage device 50 * in the system. Except when they're simply character devices (volmode=dev). 51 */ 52 53 #include <sys/types.h> 54 #include <sys/param.h> 55 #include <sys/kernel.h> 56 #include <sys/errno.h> 57 #include <sys/uio.h> 58 #include <sys/bio.h> 59 #include <sys/buf.h> 60 #include <sys/kmem.h> 61 #include <sys/conf.h> 62 #include <sys/cmn_err.h> 63 #include <sys/stat.h> 64 #include <sys/proc.h> 65 #include <sys/zap.h> 66 #include <sys/spa.h> 67 #include <sys/spa_impl.h> 68 #include <sys/zio.h> 69 #include <sys/disk.h> 70 #include <sys/dmu_traverse.h> 71 #include <sys/dnode.h> 72 #include <sys/dsl_dataset.h> 73 #include <sys/dsl_prop.h> 74 #include <sys/dsl_dir.h> 75 #include <sys/byteorder.h> 76 #include <sys/sunddi.h> 77 #include <sys/dirent.h> 78 #include <sys/policy.h> 79 #include <sys/queue.h> 80 #include <sys/fs/zfs.h> 81 #include <sys/zfs_ioctl.h> 82 #include <sys/zil.h> 83 #include <sys/zfs_znode.h> 84 #include <sys/zfs_rlock.h> 85 #include <sys/vdev_impl.h> 86 #include <sys/vdev_raidz.h> 87 #include <sys/zvol.h> 88 #include <sys/zil_impl.h> 89 #include <sys/dataset_kstats.h> 90 #include <sys/dbuf.h> 91 #include <sys/dmu_tx.h> 92 #include <sys/zfeature.h> 93 #include <sys/zio_checksum.h> 94 #include <sys/zil_impl.h> 95 #include <sys/filio.h> 96 #include <sys/freebsd_event.h> 97 98 #include <geom/geom.h> 99 #include <sys/zvol.h> 100 #include <sys/zvol_impl.h> 101 102 #include "zfs_namecheck.h" 103 104 #define ZVOL_DUMPSIZE "dumpsize" 105 106 #ifdef ZVOL_LOCK_DEBUG 107 #define ZVOL_RW_READER RW_WRITER 108 #define ZVOL_RW_READ_HELD RW_WRITE_HELD 109 #else 110 #define ZVOL_RW_READER RW_READER 111 #define ZVOL_RW_READ_HELD RW_READ_HELD 112 #endif 113 114 enum zvol_geom_state { 115 ZVOL_GEOM_UNINIT, 116 ZVOL_GEOM_STOPPED, 117 ZVOL_GEOM_RUNNING, 118 }; 119 120 struct zvol_state_os { 121 #define zso_dev _zso_state._zso_dev 122 #define zso_geom _zso_state._zso_geom 123 union { 124 /* volmode=dev */ 125 struct zvol_state_dev { 126 struct cdev *zsd_cdev; 127 struct selinfo zsd_selinfo; 128 } _zso_dev; 129 130 /* volmode=geom */ 131 struct zvol_state_geom { 132 struct g_provider *zsg_provider; 133 struct bio_queue_head zsg_queue; 134 struct mtx zsg_queue_mtx; 135 enum zvol_geom_state zsg_state; 136 } _zso_geom; 137 } _zso_state; 138 int zso_dying; 139 }; 140 141 static uint32_t zvol_minors; 142 143 SYSCTL_DECL(_vfs_zfs); 144 SYSCTL_NODE(_vfs_zfs, OID_AUTO, vol, CTLFLAG_RW, 0, "ZFS VOLUME"); 145 SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, mode, CTLFLAG_RWTUN, &zvol_volmode, 0, 146 "Expose as GEOM providers (1), device files (2) or neither"); 147 static boolean_t zpool_on_zvol = B_FALSE; 148 SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, recursive, CTLFLAG_RWTUN, &zpool_on_zvol, 0, 149 "Allow zpools to use zvols as vdevs (DANGEROUS)"); 150 151 /* 152 * Toggle unmap functionality. 153 */ 154 boolean_t zvol_unmap_enabled = B_TRUE; 155 156 SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, unmap_enabled, CTLFLAG_RWTUN, 157 &zvol_unmap_enabled, 0, "Enable UNMAP functionality"); 158 159 /* 160 * zvol maximum transfer in one DMU tx. 161 */ 162 int zvol_maxphys = DMU_MAX_ACCESS / 2; 163 164 static void zvol_ensure_zilog(zvol_state_t *zv); 165 166 static d_open_t zvol_cdev_open; 167 static d_close_t zvol_cdev_close; 168 static d_ioctl_t zvol_cdev_ioctl; 169 static d_read_t zvol_cdev_read; 170 static d_write_t zvol_cdev_write; 171 static d_strategy_t zvol_geom_bio_strategy; 172 static d_kqfilter_t zvol_cdev_kqfilter; 173 174 static struct cdevsw zvol_cdevsw = { 175 .d_name = "zvol", 176 .d_version = D_VERSION, 177 .d_flags = D_DISK | D_TRACKCLOSE, 178 .d_open = zvol_cdev_open, 179 .d_close = zvol_cdev_close, 180 .d_ioctl = zvol_cdev_ioctl, 181 .d_read = zvol_cdev_read, 182 .d_write = zvol_cdev_write, 183 .d_strategy = zvol_geom_bio_strategy, 184 .d_kqfilter = zvol_cdev_kqfilter, 185 }; 186 187 static void zvol_filter_detach(struct knote *kn); 188 static int zvol_filter_vnode(struct knote *kn, long hint); 189 190 static struct filterops zvol_filterops_vnode = { 191 .f_isfd = 1, 192 .f_detach = zvol_filter_detach, 193 .f_event = zvol_filter_vnode, 194 }; 195 196 extern uint_t zfs_geom_probe_vdev_key; 197 198 struct g_class zfs_zvol_class = { 199 .name = "ZFS::ZVOL", 200 .version = G_VERSION, 201 }; 202 203 DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol); 204 205 static int zvol_geom_open(struct g_provider *pp, int flag, int count); 206 static int zvol_geom_close(struct g_provider *pp, int flag, int count); 207 static void zvol_geom_run(zvol_state_t *zv); 208 static void zvol_geom_destroy(zvol_state_t *zv); 209 static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace); 210 static void zvol_geom_worker(void *arg); 211 static void zvol_geom_bio_start(struct bio *bp); 212 static int zvol_geom_bio_getattr(struct bio *bp); 213 /* static d_strategy_t zvol_geom_bio_strategy; (declared elsewhere) */ 214 215 /* 216 * GEOM mode implementation 217 */ 218 219 static int 220 zvol_geom_open(struct g_provider *pp, int flag, int count) 221 { 222 zvol_state_t *zv; 223 int err = 0; 224 boolean_t drop_suspend = B_FALSE; 225 226 if (!zpool_on_zvol && tsd_get(zfs_geom_probe_vdev_key) != NULL) { 227 /* 228 * If zfs_geom_probe_vdev_key is set, that means that zfs is 229 * attempting to probe geom providers while looking for a 230 * replacement for a missing VDEV. In this case, the 231 * spa_namespace_lock will not be held, but it is still illegal 232 * to use a zvol as a vdev. Deadlocks can result if another 233 * thread has spa_namespace_lock. 234 */ 235 return (SET_ERROR(EOPNOTSUPP)); 236 } 237 238 retry: 239 rw_enter(&zvol_state_lock, ZVOL_RW_READER); 240 /* 241 * Obtain a copy of private under zvol_state_lock to make sure either 242 * the result of zvol free code setting private to NULL is observed, 243 * or the zv is protected from being freed because of the positive 244 * zv_open_count. 245 */ 246 zv = pp->private; 247 if (zv == NULL) { 248 rw_exit(&zvol_state_lock); 249 err = SET_ERROR(ENXIO); 250 goto out_locked; 251 } 252 253 mutex_enter(&zv->zv_state_lock); 254 if (zv->zv_zso->zso_dying || zv->zv_flags & ZVOL_REMOVING) { 255 rw_exit(&zvol_state_lock); 256 err = SET_ERROR(ENXIO); 257 goto out_zv_locked; 258 } 259 ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM); 260 261 /* 262 * Make sure zvol is not suspended during first open 263 * (hold zv_suspend_lock) and respect proper lock acquisition 264 * ordering - zv_suspend_lock before zv_state_lock. 265 */ 266 if (zv->zv_open_count == 0) { 267 drop_suspend = B_TRUE; 268 if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) { 269 mutex_exit(&zv->zv_state_lock); 270 rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER); 271 mutex_enter(&zv->zv_state_lock); 272 /* Check to see if zv_suspend_lock is needed. */ 273 if (zv->zv_open_count != 0) { 274 rw_exit(&zv->zv_suspend_lock); 275 drop_suspend = B_FALSE; 276 } 277 } 278 } 279 rw_exit(&zvol_state_lock); 280 281 ASSERT(MUTEX_HELD(&zv->zv_state_lock)); 282 283 if (zv->zv_open_count == 0) { 284 boolean_t drop_namespace = B_FALSE; 285 286 ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock)); 287 288 /* 289 * Take spa_namespace_lock to prevent lock inversion when 290 * zvols from one pool are opened as vdevs in another. 291 */ 292 if (!mutex_owned(&spa_namespace_lock)) { 293 if (!mutex_tryenter(&spa_namespace_lock)) { 294 mutex_exit(&zv->zv_state_lock); 295 rw_exit(&zv->zv_suspend_lock); 296 drop_suspend = B_FALSE; 297 kern_yield(PRI_USER); 298 goto retry; 299 } else { 300 drop_namespace = B_TRUE; 301 } 302 } 303 err = zvol_first_open(zv, !(flag & FWRITE)); 304 if (drop_namespace) 305 mutex_exit(&spa_namespace_lock); 306 if (err) 307 goto out_zv_locked; 308 pp->mediasize = zv->zv_volsize; 309 pp->stripeoffset = 0; 310 pp->stripesize = zv->zv_volblocksize; 311 } 312 313 ASSERT(MUTEX_HELD(&zv->zv_state_lock)); 314 315 /* 316 * Check for a bad on-disk format version now since we 317 * lied about owning the dataset readonly before. 318 */ 319 if ((flag & FWRITE) && ((zv->zv_flags & ZVOL_RDONLY) || 320 dmu_objset_incompatible_encryption_version(zv->zv_objset))) { 321 err = SET_ERROR(EROFS); 322 goto out_opened; 323 } 324 if (zv->zv_flags & ZVOL_EXCL) { 325 err = SET_ERROR(EBUSY); 326 goto out_opened; 327 } 328 if (flag & O_EXCL) { 329 if (zv->zv_open_count != 0) { 330 err = SET_ERROR(EBUSY); 331 goto out_opened; 332 } 333 zv->zv_flags |= ZVOL_EXCL; 334 } 335 336 zv->zv_open_count += count; 337 out_opened: 338 if (zv->zv_open_count == 0) { 339 zvol_last_close(zv); 340 wakeup(zv); 341 } 342 out_zv_locked: 343 mutex_exit(&zv->zv_state_lock); 344 out_locked: 345 if (drop_suspend) 346 rw_exit(&zv->zv_suspend_lock); 347 return (err); 348 } 349 350 static int 351 zvol_geom_close(struct g_provider *pp, int flag, int count) 352 { 353 (void) flag; 354 zvol_state_t *zv; 355 boolean_t drop_suspend = B_TRUE; 356 int new_open_count; 357 358 rw_enter(&zvol_state_lock, ZVOL_RW_READER); 359 zv = pp->private; 360 if (zv == NULL) { 361 rw_exit(&zvol_state_lock); 362 return (SET_ERROR(ENXIO)); 363 } 364 365 mutex_enter(&zv->zv_state_lock); 366 if (zv->zv_flags & ZVOL_EXCL) { 367 ASSERT3U(zv->zv_open_count, ==, 1); 368 zv->zv_flags &= ~ZVOL_EXCL; 369 } 370 371 ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM); 372 373 /* 374 * If the open count is zero, this is a spurious close. 375 * That indicates a bug in the kernel / DDI framework. 376 */ 377 ASSERT3U(zv->zv_open_count, >, 0); 378 379 /* 380 * Make sure zvol is not suspended during last close 381 * (hold zv_suspend_lock) and respect proper lock acquisition 382 * ordering - zv_suspend_lock before zv_state_lock. 383 */ 384 new_open_count = zv->zv_open_count - count; 385 if (new_open_count == 0) { 386 if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) { 387 mutex_exit(&zv->zv_state_lock); 388 rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER); 389 mutex_enter(&zv->zv_state_lock); 390 /* Check to see if zv_suspend_lock is needed. */ 391 new_open_count = zv->zv_open_count - count; 392 if (new_open_count != 0) { 393 rw_exit(&zv->zv_suspend_lock); 394 drop_suspend = B_FALSE; 395 } 396 } 397 } else { 398 drop_suspend = B_FALSE; 399 } 400 rw_exit(&zvol_state_lock); 401 402 ASSERT(MUTEX_HELD(&zv->zv_state_lock)); 403 404 /* 405 * You may get multiple opens, but only one close. 406 */ 407 zv->zv_open_count = new_open_count; 408 if (zv->zv_open_count == 0) { 409 ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock)); 410 zvol_last_close(zv); 411 wakeup(zv); 412 } 413 414 mutex_exit(&zv->zv_state_lock); 415 416 if (drop_suspend) 417 rw_exit(&zv->zv_suspend_lock); 418 return (0); 419 } 420 421 static void 422 zvol_geom_run(zvol_state_t *zv) 423 { 424 struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom; 425 struct g_provider *pp = zsg->zsg_provider; 426 427 ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM); 428 429 g_error_provider(pp, 0); 430 431 kproc_kthread_add(zvol_geom_worker, zv, &system_proc, NULL, 0, 0, 432 "zfskern", "zvol %s", pp->name + sizeof (ZVOL_DRIVER)); 433 } 434 435 static void 436 zvol_geom_destroy(zvol_state_t *zv) 437 { 438 struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom; 439 struct g_provider *pp = zsg->zsg_provider; 440 441 ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM); 442 443 g_topology_assert(); 444 445 mutex_enter(&zv->zv_state_lock); 446 VERIFY3S(zsg->zsg_state, ==, ZVOL_GEOM_RUNNING); 447 mutex_exit(&zv->zv_state_lock); 448 zsg->zsg_provider = NULL; 449 g_wither_geom(pp->geom, ENXIO); 450 } 451 452 void 453 zvol_wait_close(zvol_state_t *zv) 454 { 455 456 if (zv->zv_volmode != ZFS_VOLMODE_GEOM) 457 return; 458 mutex_enter(&zv->zv_state_lock); 459 zv->zv_zso->zso_dying = B_TRUE; 460 461 if (zv->zv_open_count) 462 msleep(zv, &zv->zv_state_lock, 463 PRIBIO, "zvol:dying", 10*hz); 464 mutex_exit(&zv->zv_state_lock); 465 } 466 467 468 static int 469 zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace) 470 { 471 int count, error, flags; 472 473 g_topology_assert(); 474 475 /* 476 * To make it easier we expect either open or close, but not both 477 * at the same time. 478 */ 479 KASSERT((acr >= 0 && acw >= 0 && ace >= 0) || 480 (acr <= 0 && acw <= 0 && ace <= 0), 481 ("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).", 482 pp->name, acr, acw, ace)); 483 484 if (pp->private == NULL) { 485 if (acr <= 0 && acw <= 0 && ace <= 0) 486 return (0); 487 return (pp->error); 488 } 489 490 /* 491 * We don't pass FEXCL flag to zvol_geom_open()/zvol_geom_close() if 492 * ace != 0, because GEOM already handles that and handles it a bit 493 * differently. GEOM allows for multiple read/exclusive consumers and 494 * ZFS allows only one exclusive consumer, no matter if it is reader or 495 * writer. I like better the way GEOM works so I'll leave it for GEOM 496 * to decide what to do. 497 */ 498 499 count = acr + acw + ace; 500 if (count == 0) 501 return (0); 502 503 flags = 0; 504 if (acr != 0 || ace != 0) 505 flags |= FREAD; 506 if (acw != 0) 507 flags |= FWRITE; 508 509 g_topology_unlock(); 510 if (count > 0) 511 error = zvol_geom_open(pp, flags, count); 512 else 513 error = zvol_geom_close(pp, flags, -count); 514 g_topology_lock(); 515 return (error); 516 } 517 518 static void 519 zvol_geom_worker(void *arg) 520 { 521 zvol_state_t *zv = arg; 522 struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom; 523 struct bio *bp; 524 525 ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM); 526 527 thread_lock(curthread); 528 sched_prio(curthread, PRIBIO); 529 thread_unlock(curthread); 530 531 for (;;) { 532 mtx_lock(&zsg->zsg_queue_mtx); 533 bp = bioq_takefirst(&zsg->zsg_queue); 534 if (bp == NULL) { 535 if (zsg->zsg_state == ZVOL_GEOM_STOPPED) { 536 zsg->zsg_state = ZVOL_GEOM_RUNNING; 537 wakeup(&zsg->zsg_state); 538 mtx_unlock(&zsg->zsg_queue_mtx); 539 kthread_exit(); 540 } 541 msleep(&zsg->zsg_queue, &zsg->zsg_queue_mtx, 542 PRIBIO | PDROP, "zvol:io", 0); 543 continue; 544 } 545 mtx_unlock(&zsg->zsg_queue_mtx); 546 zvol_geom_bio_strategy(bp); 547 } 548 } 549 550 static void 551 zvol_geom_bio_start(struct bio *bp) 552 { 553 zvol_state_t *zv = bp->bio_to->private; 554 struct zvol_state_geom *zsg; 555 boolean_t first; 556 557 if (zv == NULL) { 558 g_io_deliver(bp, ENXIO); 559 return; 560 } 561 if (bp->bio_cmd == BIO_GETATTR) { 562 if (zvol_geom_bio_getattr(bp)) 563 g_io_deliver(bp, EOPNOTSUPP); 564 return; 565 } 566 567 if (!THREAD_CAN_SLEEP()) { 568 zsg = &zv->zv_zso->zso_geom; 569 mtx_lock(&zsg->zsg_queue_mtx); 570 first = (bioq_first(&zsg->zsg_queue) == NULL); 571 bioq_insert_tail(&zsg->zsg_queue, bp); 572 mtx_unlock(&zsg->zsg_queue_mtx); 573 if (first) 574 wakeup_one(&zsg->zsg_queue); 575 return; 576 } 577 578 zvol_geom_bio_strategy(bp); 579 } 580 581 static int 582 zvol_geom_bio_getattr(struct bio *bp) 583 { 584 zvol_state_t *zv; 585 586 zv = bp->bio_to->private; 587 ASSERT3P(zv, !=, NULL); 588 589 spa_t *spa = dmu_objset_spa(zv->zv_objset); 590 uint64_t refd, avail, usedobjs, availobjs; 591 592 if (g_handleattr_int(bp, "GEOM::candelete", 1)) 593 return (0); 594 if (strcmp(bp->bio_attribute, "blocksavail") == 0) { 595 dmu_objset_space(zv->zv_objset, &refd, &avail, 596 &usedobjs, &availobjs); 597 if (g_handleattr_off_t(bp, "blocksavail", avail / DEV_BSIZE)) 598 return (0); 599 } else if (strcmp(bp->bio_attribute, "blocksused") == 0) { 600 dmu_objset_space(zv->zv_objset, &refd, &avail, 601 &usedobjs, &availobjs); 602 if (g_handleattr_off_t(bp, "blocksused", refd / DEV_BSIZE)) 603 return (0); 604 } else if (strcmp(bp->bio_attribute, "poolblocksavail") == 0) { 605 avail = metaslab_class_get_space(spa_normal_class(spa)); 606 avail -= metaslab_class_get_alloc(spa_normal_class(spa)); 607 if (g_handleattr_off_t(bp, "poolblocksavail", 608 avail / DEV_BSIZE)) 609 return (0); 610 } else if (strcmp(bp->bio_attribute, "poolblocksused") == 0) { 611 refd = metaslab_class_get_alloc(spa_normal_class(spa)); 612 if (g_handleattr_off_t(bp, "poolblocksused", refd / DEV_BSIZE)) 613 return (0); 614 } 615 return (1); 616 } 617 618 static void 619 zvol_filter_detach(struct knote *kn) 620 { 621 zvol_state_t *zv; 622 struct zvol_state_dev *zsd; 623 624 zv = kn->kn_hook; 625 zsd = &zv->zv_zso->zso_dev; 626 627 knlist_remove(&zsd->zsd_selinfo.si_note, kn, 0); 628 } 629 630 static int 631 zvol_filter_vnode(struct knote *kn, long hint) 632 { 633 kn->kn_fflags |= kn->kn_sfflags & hint; 634 635 return (kn->kn_fflags != 0); 636 } 637 638 static int 639 zvol_cdev_kqfilter(struct cdev *dev, struct knote *kn) 640 { 641 zvol_state_t *zv; 642 struct zvol_state_dev *zsd; 643 644 zv = dev->si_drv2; 645 zsd = &zv->zv_zso->zso_dev; 646 647 if (kn->kn_filter != EVFILT_VNODE) 648 return (EINVAL); 649 650 /* XXX: extend support for other NOTE_* events */ 651 if (kn->kn_sfflags != NOTE_ATTRIB) 652 return (EINVAL); 653 654 kn->kn_fop = &zvol_filterops_vnode; 655 kn->kn_hook = zv; 656 knlist_add(&zsd->zsd_selinfo.si_note, kn, 0); 657 658 return (0); 659 } 660 661 static void 662 zvol_geom_bio_strategy(struct bio *bp) 663 { 664 zvol_state_t *zv; 665 uint64_t off, volsize; 666 size_t resid; 667 char *addr; 668 objset_t *os; 669 zfs_locked_range_t *lr; 670 int error = 0; 671 boolean_t doread = B_FALSE; 672 boolean_t is_dumpified; 673 boolean_t commit; 674 675 if (bp->bio_to) 676 zv = bp->bio_to->private; 677 else 678 zv = bp->bio_dev->si_drv2; 679 680 if (zv == NULL) { 681 error = SET_ERROR(ENXIO); 682 goto out; 683 } 684 685 rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER); 686 687 if (zv->zv_flags & ZVOL_REMOVING) { 688 error = SET_ERROR(ENXIO); 689 goto resume; 690 } 691 692 switch (bp->bio_cmd) { 693 case BIO_READ: 694 doread = B_TRUE; 695 break; 696 case BIO_WRITE: 697 case BIO_FLUSH: 698 case BIO_DELETE: 699 if (zv->zv_flags & ZVOL_RDONLY) { 700 error = SET_ERROR(EROFS); 701 goto resume; 702 } 703 zvol_ensure_zilog(zv); 704 if (bp->bio_cmd == BIO_FLUSH) 705 goto commit; 706 break; 707 default: 708 error = SET_ERROR(EOPNOTSUPP); 709 goto resume; 710 } 711 712 off = bp->bio_offset; 713 volsize = zv->zv_volsize; 714 715 os = zv->zv_objset; 716 ASSERT3P(os, !=, NULL); 717 718 addr = bp->bio_data; 719 resid = bp->bio_length; 720 721 if (resid > 0 && off >= volsize) { 722 error = SET_ERROR(EIO); 723 goto resume; 724 } 725 726 is_dumpified = B_FALSE; 727 commit = !doread && !is_dumpified && 728 zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS; 729 730 /* 731 * There must be no buffer changes when doing a dmu_sync() because 732 * we can't change the data whilst calculating the checksum. 733 */ 734 lr = zfs_rangelock_enter(&zv->zv_rangelock, off, resid, 735 doread ? RL_READER : RL_WRITER); 736 737 if (bp->bio_cmd == BIO_DELETE) { 738 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); 739 error = dmu_tx_assign(tx, TXG_WAIT); 740 if (error != 0) { 741 dmu_tx_abort(tx); 742 } else { 743 zvol_log_truncate(zv, tx, off, resid); 744 dmu_tx_commit(tx); 745 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 746 off, resid); 747 resid = 0; 748 } 749 goto unlock; 750 } 751 while (resid != 0 && off < volsize) { 752 size_t size = MIN(resid, zvol_maxphys); 753 if (doread) { 754 error = dmu_read(os, ZVOL_OBJ, off, size, addr, 755 DMU_READ_PREFETCH); 756 } else { 757 dmu_tx_t *tx = dmu_tx_create(os); 758 dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, size); 759 error = dmu_tx_assign(tx, TXG_WAIT); 760 if (error) { 761 dmu_tx_abort(tx); 762 } else { 763 dmu_write(os, ZVOL_OBJ, off, size, addr, tx); 764 zvol_log_write(zv, tx, off, size, commit); 765 dmu_tx_commit(tx); 766 } 767 } 768 if (error) { 769 /* Convert checksum errors into IO errors. */ 770 if (error == ECKSUM) 771 error = SET_ERROR(EIO); 772 break; 773 } 774 off += size; 775 addr += size; 776 resid -= size; 777 } 778 unlock: 779 zfs_rangelock_exit(lr); 780 781 bp->bio_completed = bp->bio_length - resid; 782 if (bp->bio_completed < bp->bio_length && off > volsize) 783 error = SET_ERROR(EINVAL); 784 785 switch (bp->bio_cmd) { 786 case BIO_FLUSH: 787 break; 788 case BIO_READ: 789 dataset_kstats_update_read_kstats(&zv->zv_kstat, 790 bp->bio_completed); 791 break; 792 case BIO_WRITE: 793 dataset_kstats_update_write_kstats(&zv->zv_kstat, 794 bp->bio_completed); 795 break; 796 case BIO_DELETE: 797 break; 798 default: 799 break; 800 } 801 802 if (commit) { 803 commit: 804 zil_commit(zv->zv_zilog, ZVOL_OBJ); 805 } 806 resume: 807 rw_exit(&zv->zv_suspend_lock); 808 out: 809 if (bp->bio_to) 810 g_io_deliver(bp, error); 811 else 812 biofinish(bp, NULL, error); 813 } 814 815 /* 816 * Character device mode implementation 817 */ 818 819 static int 820 zvol_cdev_read(struct cdev *dev, struct uio *uio_s, int ioflag) 821 { 822 zvol_state_t *zv; 823 uint64_t volsize; 824 zfs_locked_range_t *lr; 825 int error = 0; 826 zfs_uio_t uio; 827 828 zfs_uio_init(&uio, uio_s); 829 830 zv = dev->si_drv2; 831 832 volsize = zv->zv_volsize; 833 /* 834 * uio_loffset == volsize isn't an error as 835 * it's required for EOF processing. 836 */ 837 if (zfs_uio_resid(&uio) > 0 && 838 (zfs_uio_offset(&uio) < 0 || zfs_uio_offset(&uio) > volsize)) 839 return (SET_ERROR(EIO)); 840 841 rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER); 842 ssize_t start_resid = zfs_uio_resid(&uio); 843 lr = zfs_rangelock_enter(&zv->zv_rangelock, zfs_uio_offset(&uio), 844 zfs_uio_resid(&uio), RL_READER); 845 while (zfs_uio_resid(&uio) > 0 && zfs_uio_offset(&uio) < volsize) { 846 uint64_t bytes = MIN(zfs_uio_resid(&uio), DMU_MAX_ACCESS >> 1); 847 848 /* Don't read past the end. */ 849 if (bytes > volsize - zfs_uio_offset(&uio)) 850 bytes = volsize - zfs_uio_offset(&uio); 851 852 error = dmu_read_uio_dnode(zv->zv_dn, &uio, bytes); 853 if (error) { 854 /* Convert checksum errors into IO errors. */ 855 if (error == ECKSUM) 856 error = SET_ERROR(EIO); 857 break; 858 } 859 } 860 zfs_rangelock_exit(lr); 861 int64_t nread = start_resid - zfs_uio_resid(&uio); 862 dataset_kstats_update_read_kstats(&zv->zv_kstat, nread); 863 rw_exit(&zv->zv_suspend_lock); 864 865 return (error); 866 } 867 868 static int 869 zvol_cdev_write(struct cdev *dev, struct uio *uio_s, int ioflag) 870 { 871 zvol_state_t *zv; 872 uint64_t volsize; 873 zfs_locked_range_t *lr; 874 int error = 0; 875 boolean_t commit; 876 zfs_uio_t uio; 877 878 zv = dev->si_drv2; 879 880 volsize = zv->zv_volsize; 881 882 zfs_uio_init(&uio, uio_s); 883 884 if (zfs_uio_resid(&uio) > 0 && 885 (zfs_uio_offset(&uio) < 0 || zfs_uio_offset(&uio) > volsize)) 886 return (SET_ERROR(EIO)); 887 888 ssize_t start_resid = zfs_uio_resid(&uio); 889 commit = (ioflag & IO_SYNC) || 890 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS); 891 892 rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER); 893 zvol_ensure_zilog(zv); 894 895 lr = zfs_rangelock_enter(&zv->zv_rangelock, zfs_uio_offset(&uio), 896 zfs_uio_resid(&uio), RL_WRITER); 897 while (zfs_uio_resid(&uio) > 0 && zfs_uio_offset(&uio) < volsize) { 898 uint64_t bytes = MIN(zfs_uio_resid(&uio), DMU_MAX_ACCESS >> 1); 899 uint64_t off = zfs_uio_offset(&uio); 900 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); 901 902 if (bytes > volsize - off) /* Don't write past the end. */ 903 bytes = volsize - off; 904 905 dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes); 906 error = dmu_tx_assign(tx, TXG_WAIT); 907 if (error) { 908 dmu_tx_abort(tx); 909 break; 910 } 911 error = dmu_write_uio_dnode(zv->zv_dn, &uio, bytes, tx); 912 if (error == 0) 913 zvol_log_write(zv, tx, off, bytes, commit); 914 dmu_tx_commit(tx); 915 916 if (error) 917 break; 918 } 919 zfs_rangelock_exit(lr); 920 int64_t nwritten = start_resid - zfs_uio_resid(&uio); 921 dataset_kstats_update_write_kstats(&zv->zv_kstat, nwritten); 922 if (commit) 923 zil_commit(zv->zv_zilog, ZVOL_OBJ); 924 rw_exit(&zv->zv_suspend_lock); 925 return (error); 926 } 927 928 static int 929 zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td) 930 { 931 zvol_state_t *zv; 932 int err = 0; 933 boolean_t drop_suspend = B_FALSE; 934 935 retry: 936 rw_enter(&zvol_state_lock, ZVOL_RW_READER); 937 /* 938 * Obtain a copy of si_drv2 under zvol_state_lock to make sure either 939 * the result of zvol free code setting si_drv2 to NULL is observed, 940 * or the zv is protected from being freed because of the positive 941 * zv_open_count. 942 */ 943 zv = dev->si_drv2; 944 if (zv == NULL) { 945 rw_exit(&zvol_state_lock); 946 err = SET_ERROR(ENXIO); 947 goto out_locked; 948 } 949 950 mutex_enter(&zv->zv_state_lock); 951 if (zv->zv_zso->zso_dying) { 952 rw_exit(&zvol_state_lock); 953 err = SET_ERROR(ENXIO); 954 goto out_zv_locked; 955 } 956 ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_DEV); 957 958 /* 959 * Make sure zvol is not suspended during first open 960 * (hold zv_suspend_lock) and respect proper lock acquisition 961 * ordering - zv_suspend_lock before zv_state_lock. 962 */ 963 if (zv->zv_open_count == 0) { 964 drop_suspend = B_TRUE; 965 if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) { 966 mutex_exit(&zv->zv_state_lock); 967 rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER); 968 mutex_enter(&zv->zv_state_lock); 969 /* Check to see if zv_suspend_lock is needed. */ 970 if (zv->zv_open_count != 0) { 971 rw_exit(&zv->zv_suspend_lock); 972 drop_suspend = B_FALSE; 973 } 974 } 975 } 976 rw_exit(&zvol_state_lock); 977 978 ASSERT(MUTEX_HELD(&zv->zv_state_lock)); 979 980 if (zv->zv_open_count == 0) { 981 boolean_t drop_namespace = B_FALSE; 982 983 ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock)); 984 985 /* 986 * Take spa_namespace_lock to prevent lock inversion when 987 * zvols from one pool are opened as vdevs in another. 988 */ 989 if (!mutex_owned(&spa_namespace_lock)) { 990 if (!mutex_tryenter(&spa_namespace_lock)) { 991 mutex_exit(&zv->zv_state_lock); 992 rw_exit(&zv->zv_suspend_lock); 993 drop_suspend = B_FALSE; 994 kern_yield(PRI_USER); 995 goto retry; 996 } else { 997 drop_namespace = B_TRUE; 998 } 999 } 1000 err = zvol_first_open(zv, !(flags & FWRITE)); 1001 if (drop_namespace) 1002 mutex_exit(&spa_namespace_lock); 1003 if (err) 1004 goto out_zv_locked; 1005 } 1006 1007 ASSERT(MUTEX_HELD(&zv->zv_state_lock)); 1008 1009 if ((flags & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) { 1010 err = SET_ERROR(EROFS); 1011 goto out_opened; 1012 } 1013 if (zv->zv_flags & ZVOL_EXCL) { 1014 err = SET_ERROR(EBUSY); 1015 goto out_opened; 1016 } 1017 if (flags & O_EXCL) { 1018 if (zv->zv_open_count != 0) { 1019 err = SET_ERROR(EBUSY); 1020 goto out_opened; 1021 } 1022 zv->zv_flags |= ZVOL_EXCL; 1023 } 1024 1025 zv->zv_open_count++; 1026 out_opened: 1027 if (zv->zv_open_count == 0) { 1028 zvol_last_close(zv); 1029 wakeup(zv); 1030 } 1031 out_zv_locked: 1032 mutex_exit(&zv->zv_state_lock); 1033 out_locked: 1034 if (drop_suspend) 1035 rw_exit(&zv->zv_suspend_lock); 1036 return (err); 1037 } 1038 1039 static int 1040 zvol_cdev_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1041 { 1042 zvol_state_t *zv; 1043 boolean_t drop_suspend = B_TRUE; 1044 1045 rw_enter(&zvol_state_lock, ZVOL_RW_READER); 1046 zv = dev->si_drv2; 1047 if (zv == NULL) { 1048 rw_exit(&zvol_state_lock); 1049 return (SET_ERROR(ENXIO)); 1050 } 1051 1052 mutex_enter(&zv->zv_state_lock); 1053 if (zv->zv_flags & ZVOL_EXCL) { 1054 ASSERT3U(zv->zv_open_count, ==, 1); 1055 zv->zv_flags &= ~ZVOL_EXCL; 1056 } 1057 1058 ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_DEV); 1059 1060 /* 1061 * If the open count is zero, this is a spurious close. 1062 * That indicates a bug in the kernel / DDI framework. 1063 */ 1064 ASSERT3U(zv->zv_open_count, >, 0); 1065 /* 1066 * Make sure zvol is not suspended during last close 1067 * (hold zv_suspend_lock) and respect proper lock acquisition 1068 * ordering - zv_suspend_lock before zv_state_lock. 1069 */ 1070 if (zv->zv_open_count == 1) { 1071 if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) { 1072 mutex_exit(&zv->zv_state_lock); 1073 rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER); 1074 mutex_enter(&zv->zv_state_lock); 1075 /* Check to see if zv_suspend_lock is needed. */ 1076 if (zv->zv_open_count != 1) { 1077 rw_exit(&zv->zv_suspend_lock); 1078 drop_suspend = B_FALSE; 1079 } 1080 } 1081 } else { 1082 drop_suspend = B_FALSE; 1083 } 1084 rw_exit(&zvol_state_lock); 1085 1086 ASSERT(MUTEX_HELD(&zv->zv_state_lock)); 1087 1088 /* 1089 * You may get multiple opens, but only one close. 1090 */ 1091 zv->zv_open_count--; 1092 1093 if (zv->zv_open_count == 0) { 1094 ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock)); 1095 zvol_last_close(zv); 1096 wakeup(zv); 1097 } 1098 1099 mutex_exit(&zv->zv_state_lock); 1100 1101 if (drop_suspend) 1102 rw_exit(&zv->zv_suspend_lock); 1103 return (0); 1104 } 1105 1106 static int 1107 zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data, 1108 int fflag, struct thread *td) 1109 { 1110 zvol_state_t *zv; 1111 zfs_locked_range_t *lr; 1112 off_t offset, length; 1113 int error; 1114 boolean_t sync; 1115 1116 zv = dev->si_drv2; 1117 1118 error = 0; 1119 KASSERT(zv->zv_open_count > 0, 1120 ("Device with zero access count in %s", __func__)); 1121 1122 switch (cmd) { 1123 case DIOCGSECTORSIZE: 1124 *(uint32_t *)data = DEV_BSIZE; 1125 break; 1126 case DIOCGMEDIASIZE: 1127 *(off_t *)data = zv->zv_volsize; 1128 break; 1129 case DIOCGFLUSH: 1130 rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER); 1131 if (zv->zv_zilog != NULL) 1132 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1133 rw_exit(&zv->zv_suspend_lock); 1134 break; 1135 case DIOCGDELETE: 1136 if (!zvol_unmap_enabled) 1137 break; 1138 1139 offset = ((off_t *)data)[0]; 1140 length = ((off_t *)data)[1]; 1141 if ((offset % DEV_BSIZE) != 0 || (length % DEV_BSIZE) != 0 || 1142 offset < 0 || offset >= zv->zv_volsize || 1143 length <= 0) { 1144 printf("%s: offset=%jd length=%jd\n", __func__, offset, 1145 length); 1146 error = SET_ERROR(EINVAL); 1147 break; 1148 } 1149 rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER); 1150 zvol_ensure_zilog(zv); 1151 lr = zfs_rangelock_enter(&zv->zv_rangelock, offset, length, 1152 RL_WRITER); 1153 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); 1154 error = dmu_tx_assign(tx, TXG_WAIT); 1155 if (error != 0) { 1156 sync = FALSE; 1157 dmu_tx_abort(tx); 1158 } else { 1159 sync = (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS); 1160 zvol_log_truncate(zv, tx, offset, length); 1161 dmu_tx_commit(tx); 1162 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 1163 offset, length); 1164 } 1165 zfs_rangelock_exit(lr); 1166 if (sync) 1167 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1168 rw_exit(&zv->zv_suspend_lock); 1169 break; 1170 case DIOCGSTRIPESIZE: 1171 *(off_t *)data = zv->zv_volblocksize; 1172 break; 1173 case DIOCGSTRIPEOFFSET: 1174 *(off_t *)data = 0; 1175 break; 1176 case DIOCGATTR: { 1177 spa_t *spa = dmu_objset_spa(zv->zv_objset); 1178 struct diocgattr_arg *arg = (struct diocgattr_arg *)data; 1179 uint64_t refd, avail, usedobjs, availobjs; 1180 1181 if (strcmp(arg->name, "GEOM::candelete") == 0) 1182 arg->value.i = 1; 1183 else if (strcmp(arg->name, "blocksavail") == 0) { 1184 dmu_objset_space(zv->zv_objset, &refd, &avail, 1185 &usedobjs, &availobjs); 1186 arg->value.off = avail / DEV_BSIZE; 1187 } else if (strcmp(arg->name, "blocksused") == 0) { 1188 dmu_objset_space(zv->zv_objset, &refd, &avail, 1189 &usedobjs, &availobjs); 1190 arg->value.off = refd / DEV_BSIZE; 1191 } else if (strcmp(arg->name, "poolblocksavail") == 0) { 1192 avail = metaslab_class_get_space(spa_normal_class(spa)); 1193 avail -= metaslab_class_get_alloc( 1194 spa_normal_class(spa)); 1195 arg->value.off = avail / DEV_BSIZE; 1196 } else if (strcmp(arg->name, "poolblocksused") == 0) { 1197 refd = metaslab_class_get_alloc(spa_normal_class(spa)); 1198 arg->value.off = refd / DEV_BSIZE; 1199 } else 1200 error = SET_ERROR(ENOIOCTL); 1201 break; 1202 } 1203 case FIOSEEKHOLE: 1204 case FIOSEEKDATA: { 1205 off_t *off = (off_t *)data; 1206 uint64_t noff; 1207 boolean_t hole; 1208 1209 hole = (cmd == FIOSEEKHOLE); 1210 noff = *off; 1211 lr = zfs_rangelock_enter(&zv->zv_rangelock, 0, UINT64_MAX, 1212 RL_READER); 1213 error = dmu_offset_next(zv->zv_objset, ZVOL_OBJ, hole, &noff); 1214 zfs_rangelock_exit(lr); 1215 *off = noff; 1216 break; 1217 } 1218 default: 1219 error = SET_ERROR(ENOIOCTL); 1220 } 1221 1222 return (error); 1223 } 1224 1225 /* 1226 * Misc. helpers 1227 */ 1228 1229 static void 1230 zvol_ensure_zilog(zvol_state_t *zv) 1231 { 1232 ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock)); 1233 1234 /* 1235 * Open a ZIL if this is the first time we have written to this 1236 * zvol. We protect zv->zv_zilog with zv_suspend_lock rather 1237 * than zv_state_lock so that we don't need to acquire an 1238 * additional lock in this path. 1239 */ 1240 if (zv->zv_zilog == NULL) { 1241 if (!rw_tryupgrade(&zv->zv_suspend_lock)) { 1242 rw_exit(&zv->zv_suspend_lock); 1243 rw_enter(&zv->zv_suspend_lock, RW_WRITER); 1244 } 1245 if (zv->zv_zilog == NULL) { 1246 zv->zv_zilog = zil_open(zv->zv_objset, 1247 zvol_get_data, &zv->zv_kstat.dk_zil_sums); 1248 zv->zv_flags |= ZVOL_WRITTEN_TO; 1249 /* replay / destroy done in zvol_os_create_minor() */ 1250 VERIFY0(zv->zv_zilog->zl_header->zh_flags & 1251 ZIL_REPLAY_NEEDED); 1252 } 1253 rw_downgrade(&zv->zv_suspend_lock); 1254 } 1255 } 1256 1257 boolean_t 1258 zvol_os_is_zvol(const char *device) 1259 { 1260 return (device && strncmp(device, ZVOL_DIR, strlen(ZVOL_DIR)) == 0); 1261 } 1262 1263 void 1264 zvol_os_rename_minor(zvol_state_t *zv, const char *newname) 1265 { 1266 ASSERT(RW_LOCK_HELD(&zvol_state_lock)); 1267 ASSERT(MUTEX_HELD(&zv->zv_state_lock)); 1268 1269 /* Move to a new hashtable entry. */ 1270 zv->zv_hash = zvol_name_hash(newname); 1271 hlist_del(&zv->zv_hlink); 1272 hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash)); 1273 1274 if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { 1275 struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom; 1276 struct g_provider *pp = zsg->zsg_provider; 1277 struct g_geom *gp; 1278 1279 g_topology_lock(); 1280 gp = pp->geom; 1281 ASSERT3P(gp, !=, NULL); 1282 1283 zsg->zsg_provider = NULL; 1284 g_wither_provider(pp, ENXIO); 1285 1286 pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname); 1287 pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND; 1288 pp->sectorsize = DEV_BSIZE; 1289 pp->mediasize = zv->zv_volsize; 1290 pp->private = zv; 1291 zsg->zsg_provider = pp; 1292 g_error_provider(pp, 0); 1293 g_topology_unlock(); 1294 } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) { 1295 struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev; 1296 struct cdev *dev; 1297 struct make_dev_args args; 1298 1299 dev = zsd->zsd_cdev; 1300 if (dev != NULL) { 1301 destroy_dev(dev); 1302 dev = zsd->zsd_cdev = NULL; 1303 if (zv->zv_open_count > 0) { 1304 zv->zv_flags &= ~ZVOL_EXCL; 1305 zv->zv_open_count = 0; 1306 /* XXX need suspend lock but lock order */ 1307 zvol_last_close(zv); 1308 } 1309 } 1310 1311 make_dev_args_init(&args); 1312 args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK; 1313 args.mda_devsw = &zvol_cdevsw; 1314 args.mda_cr = NULL; 1315 args.mda_uid = UID_ROOT; 1316 args.mda_gid = GID_OPERATOR; 1317 args.mda_mode = 0640; 1318 args.mda_si_drv2 = zv; 1319 if (make_dev_s(&args, &dev, "%s/%s", ZVOL_DRIVER, newname) 1320 == 0) { 1321 dev->si_iosize_max = maxphys; 1322 zsd->zsd_cdev = dev; 1323 } 1324 } 1325 strlcpy(zv->zv_name, newname, sizeof (zv->zv_name)); 1326 dataset_kstats_rename(&zv->zv_kstat, newname); 1327 } 1328 1329 /* 1330 * Remove minor node for the specified volume. 1331 */ 1332 void 1333 zvol_os_free(zvol_state_t *zv) 1334 { 1335 ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock)); 1336 ASSERT(!MUTEX_HELD(&zv->zv_state_lock)); 1337 ASSERT0(zv->zv_open_count); 1338 1339 ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name); 1340 1341 rw_destroy(&zv->zv_suspend_lock); 1342 zfs_rangelock_fini(&zv->zv_rangelock); 1343 1344 if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { 1345 struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom; 1346 struct g_provider *pp __maybe_unused = zsg->zsg_provider; 1347 1348 ASSERT3P(pp->private, ==, NULL); 1349 1350 g_topology_lock(); 1351 zvol_geom_destroy(zv); 1352 g_topology_unlock(); 1353 mtx_destroy(&zsg->zsg_queue_mtx); 1354 } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) { 1355 struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev; 1356 struct cdev *dev = zsd->zsd_cdev; 1357 1358 if (dev != NULL) { 1359 ASSERT3P(dev->si_drv2, ==, NULL); 1360 destroy_dev(dev); 1361 knlist_clear(&zsd->zsd_selinfo.si_note, 0); 1362 knlist_destroy(&zsd->zsd_selinfo.si_note); 1363 } 1364 } 1365 1366 mutex_destroy(&zv->zv_state_lock); 1367 cv_destroy(&zv->zv_removing_cv); 1368 dataset_kstats_destroy(&zv->zv_kstat); 1369 kmem_free(zv->zv_zso, sizeof (struct zvol_state_os)); 1370 kmem_free(zv, sizeof (zvol_state_t)); 1371 zvol_minors--; 1372 } 1373 1374 /* 1375 * Create a minor node (plus a whole lot more) for the specified volume. 1376 */ 1377 int 1378 zvol_os_create_minor(const char *name) 1379 { 1380 zvol_state_t *zv; 1381 objset_t *os; 1382 dmu_object_info_t *doi; 1383 uint64_t volsize; 1384 uint64_t volmode, hash; 1385 int error; 1386 bool replayed_zil = B_FALSE; 1387 1388 ZFS_LOG(1, "Creating ZVOL %s...", name); 1389 hash = zvol_name_hash(name); 1390 if ((zv = zvol_find_by_name_hash(name, hash, RW_NONE)) != NULL) { 1391 ASSERT(MUTEX_HELD(&zv->zv_state_lock)); 1392 mutex_exit(&zv->zv_state_lock); 1393 return (SET_ERROR(EEXIST)); 1394 } 1395 1396 DROP_GIANT(); 1397 1398 doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP); 1399 1400 /* Lie and say we're read-only. */ 1401 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, B_TRUE, FTAG, &os); 1402 if (error) 1403 goto out_doi; 1404 1405 error = dmu_object_info(os, ZVOL_OBJ, doi); 1406 if (error) 1407 goto out_dmu_objset_disown; 1408 1409 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize); 1410 if (error) 1411 goto out_dmu_objset_disown; 1412 1413 error = dsl_prop_get_integer(name, 1414 zfs_prop_to_name(ZFS_PROP_VOLMODE), &volmode, NULL); 1415 if (error || volmode == ZFS_VOLMODE_DEFAULT) 1416 volmode = zvol_volmode; 1417 error = 0; 1418 1419 /* 1420 * zvol_alloc equivalent ... 1421 */ 1422 zv = kmem_zalloc(sizeof (*zv), KM_SLEEP); 1423 zv->zv_hash = hash; 1424 mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL); 1425 cv_init(&zv->zv_removing_cv, NULL, CV_DEFAULT, NULL); 1426 zv->zv_zso = kmem_zalloc(sizeof (struct zvol_state_os), KM_SLEEP); 1427 zv->zv_volmode = volmode; 1428 if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { 1429 struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom; 1430 struct g_provider *pp; 1431 struct g_geom *gp; 1432 1433 zsg->zsg_state = ZVOL_GEOM_UNINIT; 1434 mtx_init(&zsg->zsg_queue_mtx, "zvol", NULL, MTX_DEF); 1435 1436 g_topology_lock(); 1437 gp = g_new_geomf(&zfs_zvol_class, "zfs::zvol::%s", name); 1438 gp->start = zvol_geom_bio_start; 1439 gp->access = zvol_geom_access; 1440 pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name); 1441 pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND; 1442 pp->sectorsize = DEV_BSIZE; 1443 pp->mediasize = 0; 1444 pp->private = zv; 1445 1446 zsg->zsg_provider = pp; 1447 bioq_init(&zsg->zsg_queue); 1448 } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) { 1449 struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev; 1450 struct cdev *dev; 1451 struct make_dev_args args; 1452 1453 make_dev_args_init(&args); 1454 args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK; 1455 args.mda_devsw = &zvol_cdevsw; 1456 args.mda_cr = NULL; 1457 args.mda_uid = UID_ROOT; 1458 args.mda_gid = GID_OPERATOR; 1459 args.mda_mode = 0640; 1460 args.mda_si_drv2 = zv; 1461 if (make_dev_s(&args, &dev, "%s/%s", ZVOL_DRIVER, name) 1462 == 0) { 1463 dev->si_iosize_max = maxphys; 1464 zsd->zsd_cdev = dev; 1465 knlist_init_sx(&zsd->zsd_selinfo.si_note, 1466 &zv->zv_state_lock); 1467 } 1468 } 1469 (void) strlcpy(zv->zv_name, name, MAXPATHLEN); 1470 rw_init(&zv->zv_suspend_lock, NULL, RW_DEFAULT, NULL); 1471 zfs_rangelock_init(&zv->zv_rangelock, NULL, NULL); 1472 1473 if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os))) 1474 zv->zv_flags |= ZVOL_RDONLY; 1475 1476 zv->zv_volblocksize = doi->doi_data_block_size; 1477 zv->zv_volsize = volsize; 1478 zv->zv_objset = os; 1479 1480 ASSERT3P(zv->zv_kstat.dk_kstats, ==, NULL); 1481 error = dataset_kstats_create(&zv->zv_kstat, zv->zv_objset); 1482 if (error) 1483 goto out_dmu_objset_disown; 1484 ASSERT3P(zv->zv_zilog, ==, NULL); 1485 zv->zv_zilog = zil_open(os, zvol_get_data, &zv->zv_kstat.dk_zil_sums); 1486 if (spa_writeable(dmu_objset_spa(os))) { 1487 if (zil_replay_disable) 1488 replayed_zil = zil_destroy(zv->zv_zilog, B_FALSE); 1489 else 1490 replayed_zil = zil_replay(os, zv, zvol_replay_vector); 1491 } 1492 if (replayed_zil) 1493 zil_close(zv->zv_zilog); 1494 zv->zv_zilog = NULL; 1495 1496 /* TODO: prefetch for geom tasting */ 1497 1498 zv->zv_objset = NULL; 1499 out_dmu_objset_disown: 1500 dmu_objset_disown(os, B_TRUE, FTAG); 1501 1502 if (error == 0 && volmode == ZFS_VOLMODE_GEOM) { 1503 zvol_geom_run(zv); 1504 g_topology_unlock(); 1505 } 1506 out_doi: 1507 kmem_free(doi, sizeof (dmu_object_info_t)); 1508 if (error == 0) { 1509 rw_enter(&zvol_state_lock, RW_WRITER); 1510 zvol_insert(zv); 1511 zvol_minors++; 1512 rw_exit(&zvol_state_lock); 1513 ZFS_LOG(1, "ZVOL %s created.", name); 1514 } 1515 PICKUP_GIANT(); 1516 return (error); 1517 } 1518 1519 void 1520 zvol_os_clear_private(zvol_state_t *zv) 1521 { 1522 ASSERT(RW_LOCK_HELD(&zvol_state_lock)); 1523 if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { 1524 struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom; 1525 struct g_provider *pp = zsg->zsg_provider; 1526 1527 if (pp->private == NULL) /* already cleared */ 1528 return; 1529 1530 mtx_lock(&zsg->zsg_queue_mtx); 1531 zsg->zsg_state = ZVOL_GEOM_STOPPED; 1532 pp->private = NULL; 1533 wakeup_one(&zsg->zsg_queue); 1534 while (zsg->zsg_state != ZVOL_GEOM_RUNNING) 1535 msleep(&zsg->zsg_state, &zsg->zsg_queue_mtx, 1536 0, "zvol:w", 0); 1537 mtx_unlock(&zsg->zsg_queue_mtx); 1538 ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock)); 1539 } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) { 1540 struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev; 1541 struct cdev *dev = zsd->zsd_cdev; 1542 1543 if (dev != NULL) 1544 dev->si_drv2 = NULL; 1545 } 1546 } 1547 1548 int 1549 zvol_os_update_volsize(zvol_state_t *zv, uint64_t volsize) 1550 { 1551 zv->zv_volsize = volsize; 1552 if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { 1553 struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom; 1554 struct g_provider *pp = zsg->zsg_provider; 1555 1556 g_topology_lock(); 1557 1558 if (pp->private == NULL) { 1559 g_topology_unlock(); 1560 return (SET_ERROR(ENXIO)); 1561 } 1562 1563 /* 1564 * Do not invoke resize event when initial size was zero. 1565 * ZVOL initializes the size on first open, this is not 1566 * real resizing. 1567 */ 1568 if (pp->mediasize == 0) 1569 pp->mediasize = zv->zv_volsize; 1570 else 1571 g_resize_provider(pp, zv->zv_volsize); 1572 1573 g_topology_unlock(); 1574 } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) { 1575 struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev; 1576 1577 KNOTE_UNLOCKED(&zsd->zsd_selinfo.si_note, NOTE_ATTRIB); 1578 } 1579 return (0); 1580 } 1581 1582 void 1583 zvol_os_set_disk_ro(zvol_state_t *zv, int flags) 1584 { 1585 // XXX? set_disk_ro(zv->zv_zso->zvo_disk, flags); 1586 } 1587 1588 void 1589 zvol_os_set_capacity(zvol_state_t *zv, uint64_t capacity) 1590 { 1591 // XXX? set_capacity(zv->zv_zso->zvo_disk, capacity); 1592 } 1593 1594 /* 1595 * Public interfaces 1596 */ 1597 1598 int 1599 zvol_busy(void) 1600 { 1601 return (zvol_minors != 0); 1602 } 1603 1604 int 1605 zvol_init(void) 1606 { 1607 zvol_init_impl(); 1608 return (0); 1609 } 1610 1611 void 1612 zvol_fini(void) 1613 { 1614 zvol_fini_impl(); 1615 } 1616