1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012, 2015 by Delphix. All rights reserved. 24 * Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved. 25 */ 26 27 /* 28 * ZFS control directory (a.k.a. ".zfs") 29 * 30 * This directory provides a common location for all ZFS meta-objects. 31 * Currently, this is only the 'snapshot' directory, but this may expand in the 32 * future. The elements are built using the GFS primitives, as the hierarchy 33 * does not actually exist on disk. 34 * 35 * For 'snapshot', we don't want to have all snapshots always mounted, because 36 * this would take up a huge amount of space in /etc/mnttab. We have three 37 * types of objects: 38 * 39 * ctldir ------> snapshotdir -------> snapshot 40 * | 41 * | 42 * V 43 * mounted fs 44 * 45 * The 'snapshot' node contains just enough information to lookup '..' and act 46 * as a mountpoint for the snapshot. Whenever we lookup a specific snapshot, we 47 * perform an automount of the underlying filesystem and return the 48 * corresponding vnode. 49 * 50 * All mounts are handled automatically by the kernel, but unmounts are 51 * (currently) handled from user land. The main reason is that there is no 52 * reliable way to auto-unmount the filesystem when it's "no longer in use". 53 * When the user unmounts a filesystem, we call zfsctl_unmount(), which 54 * unmounts any snapshots within the snapshot directory. 55 * 56 * The '.zfs', '.zfs/snapshot', and all directories created under 57 * '.zfs/snapshot' (ie: '.zfs/snapshot/<snapname>') are all GFS nodes and 58 * share the same vfs_t as the head filesystem (what '.zfs' lives under). 59 * 60 * File systems mounted ontop of the GFS nodes '.zfs/snapshot/<snapname>' 61 * (ie: snapshots) are ZFS nodes and have their own unique vfs_t. 62 * However, vnodes within these mounted on file systems have their v_vfsp 63 * fields set to the head filesystem to make NFS happy (see 64 * zfsctl_snapdir_lookup()). We VFS_HOLD the head filesystem's vfs_t 65 * so that it cannot be freed until all snapshots have been unmounted. 66 */ 67 68 #include <sys/types.h> 69 #include <sys/param.h> 70 #include <sys/libkern.h> 71 #include <sys/dirent.h> 72 #include <sys/zfs_context.h> 73 #include <sys/zfs_ctldir.h> 74 #include <sys/zfs_ioctl.h> 75 #include <sys/zfs_vfsops.h> 76 #include <sys/namei.h> 77 #include <sys/stat.h> 78 #include <sys/dmu.h> 79 #include <sys/dsl_dataset.h> 80 #include <sys/dsl_destroy.h> 81 #include <sys/dsl_deleg.h> 82 #include <sys/mount.h> 83 #include <sys/zap.h> 84 #include <sys/sysproto.h> 85 86 #include "zfs_namecheck.h" 87 88 #include <sys/kernel.h> 89 #include <sys/ccompat.h> 90 91 /* Common access mode for all virtual directories under the ctldir */ 92 const uint16_t zfsctl_ctldir_mode = S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | 93 S_IROTH | S_IXOTH; 94 95 /* 96 * "Synthetic" filesystem implementation. 97 */ 98 99 /* 100 * Assert that A implies B. 101 */ 102 #define KASSERT_IMPLY(A, B, msg) KASSERT(!(A) || (B), (msg)); 103 104 static MALLOC_DEFINE(M_SFSNODES, "sfs_nodes", "synthetic-fs nodes"); 105 106 typedef struct sfs_node { 107 char sn_name[ZFS_MAX_DATASET_NAME_LEN]; 108 uint64_t sn_parent_id; 109 uint64_t sn_id; 110 } sfs_node_t; 111 112 /* 113 * Check the parent's ID as well as the node's to account for a chance 114 * that IDs originating from different domains (snapshot IDs, artificial 115 * IDs, znode IDs) may clash. 116 */ 117 static int 118 sfs_compare_ids(struct vnode *vp, void *arg) 119 { 120 sfs_node_t *n1 = vp->v_data; 121 sfs_node_t *n2 = arg; 122 bool equal; 123 124 equal = n1->sn_id == n2->sn_id && 125 n1->sn_parent_id == n2->sn_parent_id; 126 127 /* Zero means equality. */ 128 return (!equal); 129 } 130 131 static int 132 sfs_vnode_get(const struct mount *mp, int flags, uint64_t parent_id, 133 uint64_t id, struct vnode **vpp) 134 { 135 sfs_node_t search; 136 int err; 137 138 search.sn_id = id; 139 search.sn_parent_id = parent_id; 140 err = vfs_hash_get(mp, (uint32_t)id, flags, curthread, vpp, 141 sfs_compare_ids, &search); 142 return (err); 143 } 144 145 static int 146 sfs_vnode_insert(struct vnode *vp, int flags, uint64_t parent_id, 147 uint64_t id, struct vnode **vpp) 148 { 149 int err; 150 151 KASSERT(vp->v_data != NULL, ("sfs_vnode_insert with NULL v_data")); 152 err = vfs_hash_insert(vp, (uint32_t)id, flags, curthread, vpp, 153 sfs_compare_ids, vp->v_data); 154 return (err); 155 } 156 157 static void 158 sfs_vnode_remove(struct vnode *vp) 159 { 160 vfs_hash_remove(vp); 161 } 162 163 typedef void sfs_vnode_setup_fn(vnode_t *vp, void *arg); 164 165 static int 166 sfs_vgetx(struct mount *mp, int flags, uint64_t parent_id, uint64_t id, 167 const char *tag, struct vop_vector *vops, 168 sfs_vnode_setup_fn setup, void *arg, 169 struct vnode **vpp) 170 { 171 struct vnode *vp; 172 int error; 173 174 error = sfs_vnode_get(mp, flags, parent_id, id, vpp); 175 if (error != 0 || *vpp != NULL) { 176 KASSERT_IMPLY(error == 0, (*vpp)->v_data != NULL, 177 "sfs vnode with no data"); 178 return (error); 179 } 180 181 /* Allocate a new vnode/inode. */ 182 error = getnewvnode(tag, mp, vops, &vp); 183 if (error != 0) { 184 *vpp = NULL; 185 return (error); 186 } 187 188 /* 189 * Exclusively lock the vnode vnode while it's being constructed. 190 */ 191 lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); 192 error = insmntque(vp, mp); 193 if (error != 0) { 194 *vpp = NULL; 195 return (error); 196 } 197 198 setup(vp, arg); 199 200 error = sfs_vnode_insert(vp, flags, parent_id, id, vpp); 201 if (error != 0 || *vpp != NULL) { 202 KASSERT_IMPLY(error == 0, (*vpp)->v_data != NULL, 203 "sfs vnode with no data"); 204 return (error); 205 } 206 207 *vpp = vp; 208 return (0); 209 } 210 211 static void 212 sfs_print_node(sfs_node_t *node) 213 { 214 printf("\tname = %s\n", node->sn_name); 215 printf("\tparent_id = %ju\n", (uintmax_t)node->sn_parent_id); 216 printf("\tid = %ju\n", (uintmax_t)node->sn_id); 217 } 218 219 static sfs_node_t * 220 sfs_alloc_node(size_t size, const char *name, uint64_t parent_id, uint64_t id) 221 { 222 struct sfs_node *node; 223 224 KASSERT(strlen(name) < sizeof (node->sn_name), 225 ("sfs node name is too long")); 226 KASSERT(size >= sizeof (*node), ("sfs node size is too small")); 227 node = malloc(size, M_SFSNODES, M_WAITOK | M_ZERO); 228 strlcpy(node->sn_name, name, sizeof (node->sn_name)); 229 node->sn_parent_id = parent_id; 230 node->sn_id = id; 231 232 return (node); 233 } 234 235 static void 236 sfs_destroy_node(sfs_node_t *node) 237 { 238 free(node, M_SFSNODES); 239 } 240 241 static void * 242 sfs_reclaim_vnode(vnode_t *vp) 243 { 244 void *data; 245 246 sfs_vnode_remove(vp); 247 data = vp->v_data; 248 vp->v_data = NULL; 249 return (data); 250 } 251 252 static int 253 sfs_readdir_common(uint64_t parent_id, uint64_t id, struct vop_readdir_args *ap, 254 uio_t *uio, off_t *offp) 255 { 256 struct dirent entry; 257 int error; 258 259 /* Reset ncookies for subsequent use of vfs_read_dirent. */ 260 if (ap->a_ncookies != NULL) 261 *ap->a_ncookies = 0; 262 263 if (uio->uio_resid < sizeof (entry)) 264 return (SET_ERROR(EINVAL)); 265 266 if (uio->uio_offset < 0) 267 return (SET_ERROR(EINVAL)); 268 if (uio->uio_offset == 0) { 269 entry.d_fileno = id; 270 entry.d_type = DT_DIR; 271 entry.d_name[0] = '.'; 272 entry.d_name[1] = '\0'; 273 entry.d_namlen = 1; 274 entry.d_reclen = sizeof (entry); 275 error = vfs_read_dirent(ap, &entry, uio->uio_offset); 276 if (error != 0) 277 return (SET_ERROR(error)); 278 } 279 280 if (uio->uio_offset < sizeof (entry)) 281 return (SET_ERROR(EINVAL)); 282 if (uio->uio_offset == sizeof (entry)) { 283 entry.d_fileno = parent_id; 284 entry.d_type = DT_DIR; 285 entry.d_name[0] = '.'; 286 entry.d_name[1] = '.'; 287 entry.d_name[2] = '\0'; 288 entry.d_namlen = 2; 289 entry.d_reclen = sizeof (entry); 290 error = vfs_read_dirent(ap, &entry, uio->uio_offset); 291 if (error != 0) 292 return (SET_ERROR(error)); 293 } 294 295 if (offp != NULL) 296 *offp = 2 * sizeof (entry); 297 return (0); 298 } 299 300 301 /* 302 * .zfs inode namespace 303 * 304 * We need to generate unique inode numbers for all files and directories 305 * within the .zfs pseudo-filesystem. We use the following scheme: 306 * 307 * ENTRY ZFSCTL_INODE 308 * .zfs 1 309 * .zfs/snapshot 2 310 * .zfs/snapshot/<snap> objectid(snap) 311 */ 312 #define ZFSCTL_INO_SNAP(id) (id) 313 314 static struct vop_vector zfsctl_ops_root; 315 static struct vop_vector zfsctl_ops_snapdir; 316 static struct vop_vector zfsctl_ops_snapshot; 317 static struct vop_vector zfsctl_ops_shares_dir; 318 319 void 320 zfsctl_init(void) 321 { 322 } 323 324 void 325 zfsctl_fini(void) 326 { 327 } 328 329 boolean_t 330 zfsctl_is_node(vnode_t *vp) 331 { 332 return (vn_matchops(vp, zfsctl_ops_root) || 333 vn_matchops(vp, zfsctl_ops_snapdir) || 334 vn_matchops(vp, zfsctl_ops_snapshot) || 335 vn_matchops(vp, zfsctl_ops_shares_dir)); 336 337 } 338 339 typedef struct zfsctl_root { 340 sfs_node_t node; 341 sfs_node_t *snapdir; 342 timestruc_t cmtime; 343 } zfsctl_root_t; 344 345 346 /* 347 * Create the '.zfs' directory. 348 */ 349 void 350 zfsctl_create(zfsvfs_t *zfsvfs) 351 { 352 zfsctl_root_t *dot_zfs; 353 sfs_node_t *snapdir; 354 vnode_t *rvp; 355 uint64_t crtime[2]; 356 357 ASSERT(zfsvfs->z_ctldir == NULL); 358 359 snapdir = sfs_alloc_node(sizeof (*snapdir), "snapshot", ZFSCTL_INO_ROOT, 360 ZFSCTL_INO_SNAPDIR); 361 dot_zfs = (zfsctl_root_t *)sfs_alloc_node(sizeof (*dot_zfs), ".zfs", 0, 362 ZFSCTL_INO_ROOT); 363 dot_zfs->snapdir = snapdir; 364 365 VERIFY(VFS_ROOT(zfsvfs->z_vfs, LK_EXCLUSIVE, &rvp) == 0); 366 VERIFY(0 == sa_lookup(VTOZ(rvp)->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs), 367 &crtime, sizeof (crtime))); 368 ZFS_TIME_DECODE(&dot_zfs->cmtime, crtime); 369 vput(rvp); 370 371 zfsvfs->z_ctldir = dot_zfs; 372 } 373 374 /* 375 * Destroy the '.zfs' directory. Only called when the filesystem is unmounted. 376 * The nodes must not have any associated vnodes by now as they should be 377 * vflush-ed. 378 */ 379 void 380 zfsctl_destroy(zfsvfs_t *zfsvfs) 381 { 382 sfs_destroy_node(zfsvfs->z_ctldir->snapdir); 383 sfs_destroy_node((sfs_node_t *)zfsvfs->z_ctldir); 384 zfsvfs->z_ctldir = NULL; 385 } 386 387 static int 388 zfsctl_fs_root_vnode(struct mount *mp, void *arg __unused, int flags, 389 struct vnode **vpp) 390 { 391 return (VFS_ROOT(mp, flags, vpp)); 392 } 393 394 static void 395 zfsctl_common_vnode_setup(vnode_t *vp, void *arg) 396 { 397 ASSERT_VOP_ELOCKED(vp, __func__); 398 399 /* We support shared locking. */ 400 VN_LOCK_ASHARE(vp); 401 vp->v_type = VDIR; 402 vp->v_data = arg; 403 } 404 405 static int 406 zfsctl_root_vnode(struct mount *mp, void *arg __unused, int flags, 407 struct vnode **vpp) 408 { 409 void *node; 410 int err; 411 412 node = ((zfsvfs_t *)mp->mnt_data)->z_ctldir; 413 err = sfs_vgetx(mp, flags, 0, ZFSCTL_INO_ROOT, "zfs", &zfsctl_ops_root, 414 zfsctl_common_vnode_setup, node, vpp); 415 return (err); 416 } 417 418 static int 419 zfsctl_snapdir_vnode(struct mount *mp, void *arg __unused, int flags, 420 struct vnode **vpp) 421 { 422 void *node; 423 int err; 424 425 node = ((zfsvfs_t *)mp->mnt_data)->z_ctldir->snapdir; 426 err = sfs_vgetx(mp, flags, ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, "zfs", 427 &zfsctl_ops_snapdir, zfsctl_common_vnode_setup, node, vpp); 428 return (err); 429 } 430 431 /* 432 * Given a root znode, retrieve the associated .zfs directory. 433 * Add a hold to the vnode and return it. 434 */ 435 int 436 zfsctl_root(zfsvfs_t *zfsvfs, int flags, vnode_t **vpp) 437 { 438 int error; 439 440 error = zfsctl_root_vnode(zfsvfs->z_vfs, NULL, flags, vpp); 441 return (error); 442 } 443 444 /* 445 * Common open routine. Disallow any write access. 446 */ 447 static int 448 zfsctl_common_open(struct vop_open_args *ap) 449 { 450 int flags = ap->a_mode; 451 452 if (flags & FWRITE) 453 return (SET_ERROR(EACCES)); 454 455 return (0); 456 } 457 458 /* 459 * Common close routine. Nothing to do here. 460 */ 461 /* ARGSUSED */ 462 static int 463 zfsctl_common_close(struct vop_close_args *ap) 464 { 465 return (0); 466 } 467 468 /* 469 * Common access routine. Disallow writes. 470 */ 471 static int 472 zfsctl_common_access(struct vop_access_args *ap) 473 { 474 accmode_t accmode = ap->a_accmode; 475 476 if (accmode & VWRITE) 477 return (SET_ERROR(EACCES)); 478 return (0); 479 } 480 481 /* 482 * Common getattr function. Fill in basic information. 483 */ 484 static void 485 zfsctl_common_getattr(vnode_t *vp, vattr_t *vap) 486 { 487 timestruc_t now; 488 sfs_node_t *node; 489 490 node = vp->v_data; 491 492 vap->va_uid = 0; 493 vap->va_gid = 0; 494 vap->va_rdev = 0; 495 /* 496 * We are a purely virtual object, so we have no 497 * blocksize or allocated blocks. 498 */ 499 vap->va_blksize = 0; 500 vap->va_nblocks = 0; 501 vap->va_seq = 0; 502 vn_fsid(vp, vap); 503 vap->va_mode = zfsctl_ctldir_mode; 504 vap->va_type = VDIR; 505 /* 506 * We live in the now (for atime). 507 */ 508 gethrestime(&now); 509 vap->va_atime = now; 510 /* FreeBSD: Reset chflags(2) flags. */ 511 vap->va_flags = 0; 512 513 vap->va_nodeid = node->sn_id; 514 515 /* At least '.' and '..'. */ 516 vap->va_nlink = 2; 517 } 518 519 #ifndef _OPENSOLARIS_SYS_VNODE_H_ 520 struct vop_fid_args { 521 struct vnode *a_vp; 522 struct fid *a_fid; 523 }; 524 #endif 525 526 static int 527 zfsctl_common_fid(struct vop_fid_args *ap) 528 { 529 vnode_t *vp = ap->a_vp; 530 fid_t *fidp = (void *)ap->a_fid; 531 sfs_node_t *node = vp->v_data; 532 uint64_t object = node->sn_id; 533 zfid_short_t *zfid; 534 int i; 535 536 zfid = (zfid_short_t *)fidp; 537 zfid->zf_len = SHORT_FID_LEN; 538 539 for (i = 0; i < sizeof (zfid->zf_object); i++) 540 zfid->zf_object[i] = (uint8_t)(object >> (8 * i)); 541 542 /* .zfs nodes always have a generation number of 0 */ 543 for (i = 0; i < sizeof (zfid->zf_gen); i++) 544 zfid->zf_gen[i] = 0; 545 546 return (0); 547 } 548 549 #ifndef _SYS_SYSPROTO_H_ 550 struct vop_reclaim_args { 551 struct vnode *a_vp; 552 struct thread *a_td; 553 }; 554 #endif 555 556 static int 557 zfsctl_common_reclaim(struct vop_reclaim_args *ap) 558 { 559 vnode_t *vp = ap->a_vp; 560 561 (void) sfs_reclaim_vnode(vp); 562 return (0); 563 } 564 565 #ifndef _SYS_SYSPROTO_H_ 566 struct vop_print_args { 567 struct vnode *a_vp; 568 }; 569 #endif 570 571 static int 572 zfsctl_common_print(struct vop_print_args *ap) 573 { 574 sfs_print_node(ap->a_vp->v_data); 575 return (0); 576 } 577 578 #ifndef _SYS_SYSPROTO_H_ 579 struct vop_getattr_args { 580 struct vnode *a_vp; 581 struct vattr *a_vap; 582 struct ucred *a_cred; 583 }; 584 #endif 585 586 /* 587 * Get root directory attributes. 588 */ 589 static int 590 zfsctl_root_getattr(struct vop_getattr_args *ap) 591 { 592 struct vnode *vp = ap->a_vp; 593 struct vattr *vap = ap->a_vap; 594 zfsctl_root_t *node = vp->v_data; 595 596 zfsctl_common_getattr(vp, vap); 597 vap->va_ctime = node->cmtime; 598 vap->va_mtime = vap->va_ctime; 599 vap->va_birthtime = vap->va_ctime; 600 vap->va_nlink += 1; /* snapdir */ 601 vap->va_size = vap->va_nlink; 602 return (0); 603 } 604 605 /* 606 * When we lookup "." we still can be asked to lock it 607 * differently, can't we? 608 */ 609 static int 610 zfsctl_relock_dot(vnode_t *dvp, int ltype) 611 { 612 vref(dvp); 613 if (ltype != VOP_ISLOCKED(dvp)) { 614 if (ltype == LK_EXCLUSIVE) 615 vn_lock(dvp, LK_UPGRADE | LK_RETRY); 616 else /* if (ltype == LK_SHARED) */ 617 vn_lock(dvp, LK_DOWNGRADE | LK_RETRY); 618 619 /* Relock for the "." case may left us with reclaimed vnode. */ 620 if (VN_IS_DOOMED(dvp)) { 621 vrele(dvp); 622 return (SET_ERROR(ENOENT)); 623 } 624 } 625 return (0); 626 } 627 628 /* 629 * Special case the handling of "..". 630 */ 631 static int 632 zfsctl_root_lookup(struct vop_lookup_args *ap) 633 { 634 struct componentname *cnp = ap->a_cnp; 635 vnode_t *dvp = ap->a_dvp; 636 vnode_t **vpp = ap->a_vpp; 637 int flags = ap->a_cnp->cn_flags; 638 int lkflags = ap->a_cnp->cn_lkflags; 639 int nameiop = ap->a_cnp->cn_nameiop; 640 int err; 641 642 ASSERT(dvp->v_type == VDIR); 643 644 if ((flags & ISLASTCN) != 0 && nameiop != LOOKUP) 645 return (SET_ERROR(ENOTSUP)); 646 647 if (cnp->cn_namelen == 1 && *cnp->cn_nameptr == '.') { 648 err = zfsctl_relock_dot(dvp, lkflags & LK_TYPE_MASK); 649 if (err == 0) 650 *vpp = dvp; 651 } else if ((flags & ISDOTDOT) != 0) { 652 err = vn_vget_ino_gen(dvp, zfsctl_fs_root_vnode, NULL, 653 lkflags, vpp); 654 } else if (strncmp(cnp->cn_nameptr, "snapshot", cnp->cn_namelen) == 0) { 655 err = zfsctl_snapdir_vnode(dvp->v_mount, NULL, lkflags, vpp); 656 } else { 657 err = SET_ERROR(ENOENT); 658 } 659 if (err != 0) 660 *vpp = NULL; 661 return (err); 662 } 663 664 static int 665 zfsctl_root_readdir(struct vop_readdir_args *ap) 666 { 667 struct dirent entry; 668 vnode_t *vp = ap->a_vp; 669 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; 670 zfsctl_root_t *node = vp->v_data; 671 uio_t *uio = ap->a_uio; 672 int *eofp = ap->a_eofflag; 673 off_t dots_offset; 674 int error; 675 676 ASSERT(vp->v_type == VDIR); 677 678 error = sfs_readdir_common(zfsvfs->z_root, ZFSCTL_INO_ROOT, ap, uio, 679 &dots_offset); 680 if (error != 0) { 681 if (error == ENAMETOOLONG) /* ran out of destination space */ 682 error = 0; 683 return (error); 684 } 685 if (uio->uio_offset != dots_offset) 686 return (SET_ERROR(EINVAL)); 687 688 CTASSERT(sizeof (node->snapdir->sn_name) <= sizeof (entry.d_name)); 689 entry.d_fileno = node->snapdir->sn_id; 690 entry.d_type = DT_DIR; 691 strcpy(entry.d_name, node->snapdir->sn_name); 692 entry.d_namlen = strlen(entry.d_name); 693 entry.d_reclen = sizeof (entry); 694 error = vfs_read_dirent(ap, &entry, uio->uio_offset); 695 if (error != 0) { 696 if (error == ENAMETOOLONG) 697 error = 0; 698 return (SET_ERROR(error)); 699 } 700 if (eofp != NULL) 701 *eofp = 1; 702 return (0); 703 } 704 705 static int 706 zfsctl_root_vptocnp(struct vop_vptocnp_args *ap) 707 { 708 static const char dotzfs_name[4] = ".zfs"; 709 vnode_t *dvp; 710 int error; 711 712 if (*ap->a_buflen < sizeof (dotzfs_name)) 713 return (SET_ERROR(ENOMEM)); 714 715 error = vn_vget_ino_gen(ap->a_vp, zfsctl_fs_root_vnode, NULL, 716 LK_SHARED, &dvp); 717 if (error != 0) 718 return (SET_ERROR(error)); 719 720 VOP_UNLOCK1(dvp); 721 *ap->a_vpp = dvp; 722 *ap->a_buflen -= sizeof (dotzfs_name); 723 bcopy(dotzfs_name, ap->a_buf + *ap->a_buflen, sizeof (dotzfs_name)); 724 return (0); 725 } 726 727 static int 728 zfsctl_common_pathconf(struct vop_pathconf_args *ap) 729 { 730 /* 731 * We care about ACL variables so that user land utilities like ls 732 * can display them correctly. Since the ctldir's st_dev is set to be 733 * the same as the parent dataset, we must support all variables that 734 * it supports. 735 */ 736 switch (ap->a_name) { 737 case _PC_LINK_MAX: 738 *ap->a_retval = MIN(LONG_MAX, ZFS_LINK_MAX); 739 return (0); 740 741 case _PC_FILESIZEBITS: 742 *ap->a_retval = 64; 743 return (0); 744 745 case _PC_MIN_HOLE_SIZE: 746 *ap->a_retval = (int)SPA_MINBLOCKSIZE; 747 return (0); 748 749 case _PC_ACL_EXTENDED: 750 *ap->a_retval = 0; 751 return (0); 752 753 case _PC_ACL_NFS4: 754 *ap->a_retval = 1; 755 return (0); 756 757 case _PC_ACL_PATH_MAX: 758 *ap->a_retval = ACL_MAX_ENTRIES; 759 return (0); 760 761 case _PC_NAME_MAX: 762 *ap->a_retval = NAME_MAX; 763 return (0); 764 765 default: 766 return (vop_stdpathconf(ap)); 767 } 768 } 769 770 /* 771 * Returns a trivial ACL 772 */ 773 static int 774 zfsctl_common_getacl(struct vop_getacl_args *ap) 775 { 776 int i; 777 778 if (ap->a_type != ACL_TYPE_NFS4) 779 return (EINVAL); 780 781 acl_nfs4_sync_acl_from_mode(ap->a_aclp, zfsctl_ctldir_mode, 0); 782 /* 783 * acl_nfs4_sync_acl_from_mode assumes that the owner can always modify 784 * attributes. That is not the case for the ctldir, so we must clear 785 * those bits. We also must clear ACL_READ_NAMED_ATTRS, because xattrs 786 * aren't supported by the ctldir. 787 */ 788 for (i = 0; i < ap->a_aclp->acl_cnt; i++) { 789 struct acl_entry *entry; 790 entry = &(ap->a_aclp->acl_entry[i]); 791 entry->ae_perm &= ~(ACL_WRITE_ACL | ACL_WRITE_OWNER | 792 ACL_WRITE_ATTRIBUTES | ACL_WRITE_NAMED_ATTRS | 793 ACL_READ_NAMED_ATTRS); 794 } 795 796 return (0); 797 } 798 799 static struct vop_vector zfsctl_ops_root = { 800 .vop_default = &default_vnodeops, 801 .vop_open = zfsctl_common_open, 802 .vop_close = zfsctl_common_close, 803 .vop_ioctl = VOP_EINVAL, 804 .vop_getattr = zfsctl_root_getattr, 805 .vop_access = zfsctl_common_access, 806 .vop_readdir = zfsctl_root_readdir, 807 .vop_lookup = zfsctl_root_lookup, 808 .vop_inactive = VOP_NULL, 809 .vop_reclaim = zfsctl_common_reclaim, 810 .vop_fid = zfsctl_common_fid, 811 .vop_print = zfsctl_common_print, 812 .vop_vptocnp = zfsctl_root_vptocnp, 813 .vop_pathconf = zfsctl_common_pathconf, 814 .vop_getacl = zfsctl_common_getacl, 815 }; 816 VFS_VOP_VECTOR_REGISTER(zfsctl_ops_root); 817 818 static int 819 zfsctl_snapshot_zname(vnode_t *vp, const char *name, int len, char *zname) 820 { 821 objset_t *os = ((zfsvfs_t *)((vp)->v_vfsp->vfs_data))->z_os; 822 823 dmu_objset_name(os, zname); 824 if (strlen(zname) + 1 + strlen(name) >= len) 825 return (SET_ERROR(ENAMETOOLONG)); 826 (void) strcat(zname, "@"); 827 (void) strcat(zname, name); 828 return (0); 829 } 830 831 static int 832 zfsctl_snapshot_lookup(vnode_t *vp, const char *name, uint64_t *id) 833 { 834 objset_t *os = ((zfsvfs_t *)((vp)->v_vfsp->vfs_data))->z_os; 835 int err; 836 837 err = dsl_dataset_snap_lookup(dmu_objset_ds(os), name, id); 838 return (err); 839 } 840 841 /* 842 * Given a vnode get a root vnode of a filesystem mounted on top of 843 * the vnode, if any. The root vnode is referenced and locked. 844 * If no filesystem is mounted then the orinal vnode remains referenced 845 * and locked. If any error happens the orinal vnode is unlocked and 846 * released. 847 */ 848 static int 849 zfsctl_mounted_here(vnode_t **vpp, int flags) 850 { 851 struct mount *mp; 852 int err; 853 854 ASSERT_VOP_LOCKED(*vpp, __func__); 855 ASSERT3S((*vpp)->v_type, ==, VDIR); 856 857 if ((mp = (*vpp)->v_mountedhere) != NULL) { 858 err = vfs_busy(mp, 0); 859 KASSERT(err == 0, ("vfs_busy(mp, 0) failed with %d", err)); 860 KASSERT(vrefcnt(*vpp) > 1, ("unreferenced mountpoint")); 861 vput(*vpp); 862 err = VFS_ROOT(mp, flags, vpp); 863 vfs_unbusy(mp); 864 return (err); 865 } 866 return (EJUSTRETURN); 867 } 868 869 typedef struct { 870 const char *snap_name; 871 uint64_t snap_id; 872 } snapshot_setup_arg_t; 873 874 static void 875 zfsctl_snapshot_vnode_setup(vnode_t *vp, void *arg) 876 { 877 snapshot_setup_arg_t *ssa = arg; 878 sfs_node_t *node; 879 880 ASSERT_VOP_ELOCKED(vp, __func__); 881 882 node = sfs_alloc_node(sizeof (sfs_node_t), 883 ssa->snap_name, ZFSCTL_INO_SNAPDIR, ssa->snap_id); 884 zfsctl_common_vnode_setup(vp, node); 885 886 /* We have to support recursive locking. */ 887 VN_LOCK_AREC(vp); 888 } 889 890 /* 891 * Lookup entry point for the 'snapshot' directory. Try to open the 892 * snapshot if it exist, creating the pseudo filesystem vnode as necessary. 893 * Perform a mount of the associated dataset on top of the vnode. 894 * There are four possibilities: 895 * - the snapshot node and vnode do not exist 896 * - the snapshot vnode is covered by the mounted snapshot 897 * - the snapshot vnode is not covered yet, the mount operation is in progress 898 * - the snapshot vnode is not covered, because the snapshot has been unmounted 899 * The last two states are transient and should be relatively short-lived. 900 */ 901 static int 902 zfsctl_snapdir_lookup(struct vop_lookup_args *ap) 903 { 904 vnode_t *dvp = ap->a_dvp; 905 vnode_t **vpp = ap->a_vpp; 906 struct componentname *cnp = ap->a_cnp; 907 char name[NAME_MAX + 1]; 908 char fullname[ZFS_MAX_DATASET_NAME_LEN]; 909 char *mountpoint; 910 size_t mountpoint_len; 911 zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data; 912 uint64_t snap_id; 913 int nameiop = cnp->cn_nameiop; 914 int lkflags = cnp->cn_lkflags; 915 int flags = cnp->cn_flags; 916 int err; 917 918 ASSERT(dvp->v_type == VDIR); 919 920 if ((flags & ISLASTCN) != 0 && nameiop != LOOKUP) 921 return (SET_ERROR(ENOTSUP)); 922 923 if (cnp->cn_namelen == 1 && *cnp->cn_nameptr == '.') { 924 err = zfsctl_relock_dot(dvp, lkflags & LK_TYPE_MASK); 925 if (err == 0) 926 *vpp = dvp; 927 return (err); 928 } 929 if (flags & ISDOTDOT) { 930 err = vn_vget_ino_gen(dvp, zfsctl_root_vnode, NULL, lkflags, 931 vpp); 932 return (err); 933 } 934 935 if (cnp->cn_namelen >= sizeof (name)) 936 return (SET_ERROR(ENAMETOOLONG)); 937 938 strlcpy(name, ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen + 1); 939 err = zfsctl_snapshot_lookup(dvp, name, &snap_id); 940 if (err != 0) 941 return (SET_ERROR(ENOENT)); 942 943 for (;;) { 944 snapshot_setup_arg_t ssa; 945 946 ssa.snap_name = name; 947 ssa.snap_id = snap_id; 948 err = sfs_vgetx(dvp->v_mount, LK_SHARED, ZFSCTL_INO_SNAPDIR, 949 snap_id, "zfs", &zfsctl_ops_snapshot, 950 zfsctl_snapshot_vnode_setup, &ssa, vpp); 951 if (err != 0) 952 return (err); 953 954 /* Check if a new vnode has just been created. */ 955 if (VOP_ISLOCKED(*vpp) == LK_EXCLUSIVE) 956 break; 957 958 /* 959 * Check if a snapshot is already mounted on top of the vnode. 960 */ 961 err = zfsctl_mounted_here(vpp, lkflags); 962 if (err != EJUSTRETURN) 963 return (err); 964 965 /* 966 * If the vnode is not covered, then either the mount operation 967 * is in progress or the snapshot has already been unmounted 968 * but the vnode hasn't been inactivated and reclaimed yet. 969 * We can try to re-use the vnode in the latter case. 970 */ 971 VI_LOCK(*vpp); 972 if (((*vpp)->v_iflag & VI_MOUNT) == 0) { 973 /* 974 * Upgrade to exclusive lock in order to: 975 * - avoid race conditions 976 * - satisfy the contract of mount_snapshot() 977 */ 978 err = VOP_LOCK(*vpp, LK_TRYUPGRADE | LK_INTERLOCK); 979 if (err == 0) 980 break; 981 } else { 982 VI_UNLOCK(*vpp); 983 } 984 985 /* 986 * In this state we can loop on uncontested locks and starve 987 * the thread doing the lengthy, non-trivial mount operation. 988 * So, yield to prevent that from happening. 989 */ 990 vput(*vpp); 991 kern_yield(PRI_USER); 992 } 993 994 VERIFY0(zfsctl_snapshot_zname(dvp, name, sizeof (fullname), fullname)); 995 996 mountpoint_len = strlen(dvp->v_vfsp->mnt_stat.f_mntonname) + 997 strlen("/" ZFS_CTLDIR_NAME "/snapshot/") + strlen(name) + 1; 998 mountpoint = kmem_alloc(mountpoint_len, KM_SLEEP); 999 (void) snprintf(mountpoint, mountpoint_len, 1000 "%s/" ZFS_CTLDIR_NAME "/snapshot/%s", 1001 dvp->v_vfsp->mnt_stat.f_mntonname, name); 1002 1003 err = mount_snapshot(curthread, vpp, "zfs", mountpoint, fullname, 0); 1004 kmem_free(mountpoint, mountpoint_len); 1005 if (err == 0) { 1006 /* 1007 * Fix up the root vnode mounted on .zfs/snapshot/<snapname>. 1008 * 1009 * This is where we lie about our v_vfsp in order to 1010 * make .zfs/snapshot/<snapname> accessible over NFS 1011 * without requiring manual mounts of <snapname>. 1012 */ 1013 ASSERT(VTOZ(*vpp)->z_zfsvfs != zfsvfs); 1014 VTOZ(*vpp)->z_zfsvfs->z_parent = zfsvfs; 1015 1016 /* Clear the root flag (set via VFS_ROOT) as well. */ 1017 (*vpp)->v_vflag &= ~VV_ROOT; 1018 } 1019 1020 if (err != 0) 1021 *vpp = NULL; 1022 return (err); 1023 } 1024 1025 static int 1026 zfsctl_snapdir_readdir(struct vop_readdir_args *ap) 1027 { 1028 char snapname[ZFS_MAX_DATASET_NAME_LEN]; 1029 struct dirent entry; 1030 vnode_t *vp = ap->a_vp; 1031 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; 1032 uio_t *uio = ap->a_uio; 1033 int *eofp = ap->a_eofflag; 1034 off_t dots_offset; 1035 int error; 1036 1037 ASSERT(vp->v_type == VDIR); 1038 1039 error = sfs_readdir_common(ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, ap, uio, 1040 &dots_offset); 1041 if (error != 0) { 1042 if (error == ENAMETOOLONG) /* ran out of destination space */ 1043 error = 0; 1044 return (error); 1045 } 1046 1047 ZFS_ENTER(zfsvfs); 1048 for (;;) { 1049 uint64_t cookie; 1050 uint64_t id; 1051 1052 cookie = uio->uio_offset - dots_offset; 1053 1054 dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG); 1055 error = dmu_snapshot_list_next(zfsvfs->z_os, sizeof (snapname), 1056 snapname, &id, &cookie, NULL); 1057 dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG); 1058 if (error != 0) { 1059 if (error == ENOENT) { 1060 if (eofp != NULL) 1061 *eofp = 1; 1062 error = 0; 1063 } 1064 ZFS_EXIT(zfsvfs); 1065 return (error); 1066 } 1067 1068 entry.d_fileno = id; 1069 entry.d_type = DT_DIR; 1070 strcpy(entry.d_name, snapname); 1071 entry.d_namlen = strlen(entry.d_name); 1072 entry.d_reclen = sizeof (entry); 1073 error = vfs_read_dirent(ap, &entry, uio->uio_offset); 1074 if (error != 0) { 1075 if (error == ENAMETOOLONG) 1076 error = 0; 1077 ZFS_EXIT(zfsvfs); 1078 return (SET_ERROR(error)); 1079 } 1080 uio->uio_offset = cookie + dots_offset; 1081 } 1082 /* NOTREACHED */ 1083 } 1084 1085 static int 1086 zfsctl_snapdir_getattr(struct vop_getattr_args *ap) 1087 { 1088 vnode_t *vp = ap->a_vp; 1089 vattr_t *vap = ap->a_vap; 1090 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; 1091 dsl_dataset_t *ds; 1092 uint64_t snap_count; 1093 int err; 1094 1095 ZFS_ENTER(zfsvfs); 1096 ds = dmu_objset_ds(zfsvfs->z_os); 1097 zfsctl_common_getattr(vp, vap); 1098 vap->va_ctime = dmu_objset_snap_cmtime(zfsvfs->z_os); 1099 vap->va_mtime = vap->va_ctime; 1100 vap->va_birthtime = vap->va_ctime; 1101 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0) { 1102 err = zap_count(dmu_objset_pool(ds->ds_objset)->dp_meta_objset, 1103 dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count); 1104 if (err != 0) { 1105 ZFS_EXIT(zfsvfs); 1106 return (err); 1107 } 1108 vap->va_nlink += snap_count; 1109 } 1110 vap->va_size = vap->va_nlink; 1111 1112 ZFS_EXIT(zfsvfs); 1113 return (0); 1114 } 1115 1116 static struct vop_vector zfsctl_ops_snapdir = { 1117 .vop_default = &default_vnodeops, 1118 .vop_open = zfsctl_common_open, 1119 .vop_close = zfsctl_common_close, 1120 .vop_getattr = zfsctl_snapdir_getattr, 1121 .vop_access = zfsctl_common_access, 1122 .vop_readdir = zfsctl_snapdir_readdir, 1123 .vop_lookup = zfsctl_snapdir_lookup, 1124 .vop_reclaim = zfsctl_common_reclaim, 1125 .vop_fid = zfsctl_common_fid, 1126 .vop_print = zfsctl_common_print, 1127 .vop_pathconf = zfsctl_common_pathconf, 1128 .vop_getacl = zfsctl_common_getacl, 1129 }; 1130 VFS_VOP_VECTOR_REGISTER(zfsctl_ops_snapdir); 1131 1132 1133 static int 1134 zfsctl_snapshot_inactive(struct vop_inactive_args *ap) 1135 { 1136 vnode_t *vp = ap->a_vp; 1137 1138 VERIFY(vrecycle(vp) == 1); 1139 return (0); 1140 } 1141 1142 static int 1143 zfsctl_snapshot_reclaim(struct vop_reclaim_args *ap) 1144 { 1145 vnode_t *vp = ap->a_vp; 1146 void *data = vp->v_data; 1147 1148 sfs_reclaim_vnode(vp); 1149 sfs_destroy_node(data); 1150 return (0); 1151 } 1152 1153 static int 1154 zfsctl_snapshot_vptocnp(struct vop_vptocnp_args *ap) 1155 { 1156 struct mount *mp; 1157 vnode_t *dvp; 1158 vnode_t *vp; 1159 sfs_node_t *node; 1160 size_t len; 1161 int locked; 1162 int error; 1163 1164 vp = ap->a_vp; 1165 node = vp->v_data; 1166 len = strlen(node->sn_name); 1167 if (*ap->a_buflen < len) 1168 return (SET_ERROR(ENOMEM)); 1169 1170 /* 1171 * Prevent unmounting of the snapshot while the vnode lock 1172 * is not held. That is not strictly required, but allows 1173 * us to assert that an uncovered snapshot vnode is never 1174 * "leaked". 1175 */ 1176 mp = vp->v_mountedhere; 1177 if (mp == NULL) 1178 return (SET_ERROR(ENOENT)); 1179 error = vfs_busy(mp, 0); 1180 KASSERT(error == 0, ("vfs_busy(mp, 0) failed with %d", error)); 1181 1182 /* 1183 * We can vput the vnode as we can now depend on the reference owned 1184 * by the busied mp. But we also need to hold the vnode, because 1185 * the reference may go after vfs_unbusy() which has to be called 1186 * before we can lock the vnode again. 1187 */ 1188 locked = VOP_ISLOCKED(vp); 1189 #if __FreeBSD_version >= 1300045 1190 enum vgetstate vs = vget_prep(vp); 1191 #else 1192 vhold(vp); 1193 #endif 1194 vput(vp); 1195 1196 /* Look up .zfs/snapshot, our parent. */ 1197 error = zfsctl_snapdir_vnode(vp->v_mount, NULL, LK_SHARED, &dvp); 1198 if (error == 0) { 1199 VOP_UNLOCK1(dvp); 1200 *ap->a_vpp = dvp; 1201 *ap->a_buflen -= len; 1202 bcopy(node->sn_name, ap->a_buf + *ap->a_buflen, len); 1203 } 1204 vfs_unbusy(mp); 1205 #if __FreeBSD_version >= 1300045 1206 vget_finish(vp, locked | LK_RETRY, vs); 1207 #else 1208 vget(vp, locked | LK_VNHELD | LK_RETRY, curthread); 1209 #endif 1210 return (error); 1211 } 1212 1213 /* 1214 * These VP's should never see the light of day. They should always 1215 * be covered. 1216 */ 1217 static struct vop_vector zfsctl_ops_snapshot = { 1218 .vop_default = NULL, /* ensure very restricted access */ 1219 .vop_inactive = zfsctl_snapshot_inactive, 1220 #if __FreeBSD_version >= 1300045 1221 .vop_need_inactive = vop_stdneed_inactive, 1222 #endif 1223 .vop_reclaim = zfsctl_snapshot_reclaim, 1224 .vop_vptocnp = zfsctl_snapshot_vptocnp, 1225 .vop_lock1 = vop_stdlock, 1226 .vop_unlock = vop_stdunlock, 1227 .vop_islocked = vop_stdislocked, 1228 .vop_advlockpurge = vop_stdadvlockpurge, /* called by vgone */ 1229 .vop_print = zfsctl_common_print, 1230 }; 1231 VFS_VOP_VECTOR_REGISTER(zfsctl_ops_snapshot); 1232 1233 int 1234 zfsctl_lookup_objset(vfs_t *vfsp, uint64_t objsetid, zfsvfs_t **zfsvfsp) 1235 { 1236 zfsvfs_t *zfsvfs __unused = vfsp->vfs_data; 1237 vnode_t *vp; 1238 int error; 1239 1240 ASSERT(zfsvfs->z_ctldir != NULL); 1241 *zfsvfsp = NULL; 1242 error = sfs_vnode_get(vfsp, LK_EXCLUSIVE, 1243 ZFSCTL_INO_SNAPDIR, objsetid, &vp); 1244 if (error == 0 && vp != NULL) { 1245 /* 1246 * XXX Probably need to at least reference, if not busy, the mp. 1247 */ 1248 if (vp->v_mountedhere != NULL) 1249 *zfsvfsp = vp->v_mountedhere->mnt_data; 1250 vput(vp); 1251 } 1252 if (*zfsvfsp == NULL) 1253 return (SET_ERROR(EINVAL)); 1254 return (0); 1255 } 1256 1257 /* 1258 * Unmount any snapshots for the given filesystem. This is called from 1259 * zfs_umount() - if we have a ctldir, then go through and unmount all the 1260 * snapshots. 1261 */ 1262 int 1263 zfsctl_umount_snapshots(vfs_t *vfsp, int fflags, cred_t *cr) 1264 { 1265 char snapname[ZFS_MAX_DATASET_NAME_LEN]; 1266 zfsvfs_t *zfsvfs = vfsp->vfs_data; 1267 struct mount *mp; 1268 vnode_t *vp; 1269 uint64_t cookie; 1270 int error; 1271 1272 ASSERT(zfsvfs->z_ctldir != NULL); 1273 1274 cookie = 0; 1275 for (;;) { 1276 uint64_t id; 1277 1278 dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG); 1279 error = dmu_snapshot_list_next(zfsvfs->z_os, sizeof (snapname), 1280 snapname, &id, &cookie, NULL); 1281 dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG); 1282 if (error != 0) { 1283 if (error == ENOENT) 1284 error = 0; 1285 break; 1286 } 1287 1288 for (;;) { 1289 error = sfs_vnode_get(vfsp, LK_EXCLUSIVE, 1290 ZFSCTL_INO_SNAPDIR, id, &vp); 1291 if (error != 0 || vp == NULL) 1292 break; 1293 1294 mp = vp->v_mountedhere; 1295 1296 /* 1297 * v_mountedhere being NULL means that the 1298 * (uncovered) vnode is in a transient state 1299 * (mounting or unmounting), so loop until it 1300 * settles down. 1301 */ 1302 if (mp != NULL) 1303 break; 1304 vput(vp); 1305 } 1306 if (error != 0) 1307 break; 1308 if (vp == NULL) 1309 continue; /* no mountpoint, nothing to do */ 1310 1311 /* 1312 * The mount-point vnode is kept locked to avoid spurious EBUSY 1313 * from a concurrent umount. 1314 * The vnode lock must have recursive locking enabled. 1315 */ 1316 vfs_ref(mp); 1317 error = dounmount(mp, fflags, curthread); 1318 KASSERT_IMPLY(error == 0, vrefcnt(vp) == 1, 1319 ("extra references after unmount")); 1320 vput(vp); 1321 if (error != 0) 1322 break; 1323 } 1324 KASSERT_IMPLY((fflags & MS_FORCE) != 0, error == 0, 1325 ("force unmounting failed")); 1326 return (error); 1327 } 1328 1329 int 1330 zfsctl_snapshot_unmount(char *snapname, int flags __unused) 1331 { 1332 vfs_t *vfsp = NULL; 1333 zfsvfs_t *zfsvfs = NULL; 1334 1335 if (strchr(snapname, '@') == NULL) 1336 return (0); 1337 1338 int err = getzfsvfs(snapname, &zfsvfs); 1339 if (err != 0) { 1340 ASSERT3P(zfsvfs, ==, NULL); 1341 return (0); 1342 } 1343 vfsp = zfsvfs->z_vfs; 1344 1345 ASSERT(!dsl_pool_config_held(dmu_objset_pool(zfsvfs->z_os))); 1346 1347 vfs_ref(vfsp); 1348 vfs_unbusy(vfsp); 1349 return (dounmount(vfsp, MS_FORCE, curthread)); 1350 } 1351