1 /* $NetBSD: vfs_mount.c,v 1.13 2012/03/13 18:40:55 elad Exp $ */ 2 3 /*- 4 * Copyright (c) 1997-2011 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1989, 1993 35 * The Regents of the University of California. All rights reserved. 36 * (c) UNIX System Laboratories, Inc. 37 * All or some portions of this file are derived from material licensed 38 * to the University of California by American Telephone and Telegraph 39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 40 * the permission of UNIX System Laboratories, Inc. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94 67 */ 68 69 #include <sys/cdefs.h> 70 __KERNEL_RCSID(0, "$NetBSD: vfs_mount.c,v 1.13 2012/03/13 18:40:55 elad Exp $"); 71 72 #include <sys/param.h> 73 #include <sys/kernel.h> 74 75 #include <sys/atomic.h> 76 #include <sys/buf.h> 77 #include <sys/conf.h> 78 #include <sys/fcntl.h> 79 #include <sys/filedesc.h> 80 #include <sys/device.h> 81 #include <sys/kauth.h> 82 #include <sys/kmem.h> 83 #include <sys/module.h> 84 #include <sys/mount.h> 85 #include <sys/namei.h> 86 #include <sys/syscallargs.h> 87 #include <sys/sysctl.h> 88 #include <sys/systm.h> 89 #include <sys/vfs_syscalls.h> 90 #include <sys/vnode.h> 91 92 #include <miscfs/genfs/genfs.h> 93 #include <miscfs/syncfs/syncfs.h> 94 #include <miscfs/specfs/specdev.h> 95 96 /* Root filesystem and device. */ 97 vnode_t * rootvnode; 98 struct device * root_device; 99 100 /* Mounted filesystem list. */ 101 struct mntlist mountlist; 102 kmutex_t mountlist_lock; 103 104 kmutex_t mntvnode_lock; 105 kmutex_t vfs_list_lock; 106 107 static specificdata_domain_t mount_specificdata_domain; 108 static kmutex_t mntid_lock; 109 110 static kmutex_t mountgen_lock; 111 static uint64_t mountgen; 112 113 void 114 vfs_mount_sysinit(void) 115 { 116 117 CIRCLEQ_INIT(&mountlist); 118 mutex_init(&mountlist_lock, MUTEX_DEFAULT, IPL_NONE); 119 mutex_init(&mntvnode_lock, MUTEX_DEFAULT, IPL_NONE); 120 mutex_init(&vfs_list_lock, MUTEX_DEFAULT, IPL_NONE); 121 122 mount_specificdata_domain = specificdata_domain_create(); 123 mutex_init(&mntid_lock, MUTEX_DEFAULT, IPL_NONE); 124 mutex_init(&mountgen_lock, MUTEX_DEFAULT, IPL_NONE); 125 mountgen = 0; 126 } 127 128 struct mount * 129 vfs_mountalloc(struct vfsops *vfsops, vnode_t *vp) 130 { 131 struct mount *mp; 132 int error; 133 134 mp = kmem_zalloc(sizeof(*mp), KM_SLEEP); 135 if (mp == NULL) 136 return NULL; 137 138 mp->mnt_op = vfsops; 139 mp->mnt_refcnt = 1; 140 TAILQ_INIT(&mp->mnt_vnodelist); 141 rw_init(&mp->mnt_unmounting); 142 mutex_init(&mp->mnt_renamelock, MUTEX_DEFAULT, IPL_NONE); 143 mutex_init(&mp->mnt_updating, MUTEX_DEFAULT, IPL_NONE); 144 error = vfs_busy(mp, NULL); 145 KASSERT(error == 0); 146 mp->mnt_vnodecovered = vp; 147 mount_initspecific(mp); 148 149 mutex_enter(&mountgen_lock); 150 mp->mnt_gen = mountgen++; 151 mutex_exit(&mountgen_lock); 152 153 return mp; 154 } 155 156 /* 157 * vfs_rootmountalloc: lookup a filesystem type, and if found allocate and 158 * initialize a mount structure for it. 159 * 160 * Devname is usually updated by mount(8) after booting. 161 */ 162 int 163 vfs_rootmountalloc(const char *fstypename, const char *devname, 164 struct mount **mpp) 165 { 166 struct vfsops *vfsp = NULL; 167 struct mount *mp; 168 169 mutex_enter(&vfs_list_lock); 170 LIST_FOREACH(vfsp, &vfs_list, vfs_list) 171 if (!strncmp(vfsp->vfs_name, fstypename, 172 sizeof(mp->mnt_stat.f_fstypename))) 173 break; 174 if (vfsp == NULL) { 175 mutex_exit(&vfs_list_lock); 176 return (ENODEV); 177 } 178 vfsp->vfs_refcount++; 179 mutex_exit(&vfs_list_lock); 180 181 if ((mp = vfs_mountalloc(vfsp, NULL)) == NULL) 182 return ENOMEM; 183 mp->mnt_flag = MNT_RDONLY; 184 (void)strlcpy(mp->mnt_stat.f_fstypename, vfsp->vfs_name, 185 sizeof(mp->mnt_stat.f_fstypename)); 186 mp->mnt_stat.f_mntonname[0] = '/'; 187 mp->mnt_stat.f_mntonname[1] = '\0'; 188 mp->mnt_stat.f_mntfromname[sizeof(mp->mnt_stat.f_mntfromname) - 1] = 189 '\0'; 190 (void)copystr(devname, mp->mnt_stat.f_mntfromname, 191 sizeof(mp->mnt_stat.f_mntfromname) - 1, 0); 192 *mpp = mp; 193 return 0; 194 } 195 196 /* 197 * vfs_getnewfsid: get a new unique fsid. 198 */ 199 void 200 vfs_getnewfsid(struct mount *mp) 201 { 202 static u_short xxxfs_mntid; 203 fsid_t tfsid; 204 int mtype; 205 206 mutex_enter(&mntid_lock); 207 mtype = makefstype(mp->mnt_op->vfs_name); 208 mp->mnt_stat.f_fsidx.__fsid_val[0] = makedev(mtype, 0); 209 mp->mnt_stat.f_fsidx.__fsid_val[1] = mtype; 210 mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0]; 211 if (xxxfs_mntid == 0) 212 ++xxxfs_mntid; 213 tfsid.__fsid_val[0] = makedev(mtype & 0xff, xxxfs_mntid); 214 tfsid.__fsid_val[1] = mtype; 215 if (!CIRCLEQ_EMPTY(&mountlist)) { 216 while (vfs_getvfs(&tfsid)) { 217 tfsid.__fsid_val[0]++; 218 xxxfs_mntid++; 219 } 220 } 221 mp->mnt_stat.f_fsidx.__fsid_val[0] = tfsid.__fsid_val[0]; 222 mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0]; 223 mutex_exit(&mntid_lock); 224 } 225 226 /* 227 * Lookup a mount point by filesystem identifier. 228 * 229 * XXX Needs to add a reference to the mount point. 230 */ 231 struct mount * 232 vfs_getvfs(fsid_t *fsid) 233 { 234 struct mount *mp; 235 236 mutex_enter(&mountlist_lock); 237 CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) { 238 if (mp->mnt_stat.f_fsidx.__fsid_val[0] == fsid->__fsid_val[0] && 239 mp->mnt_stat.f_fsidx.__fsid_val[1] == fsid->__fsid_val[1]) { 240 mutex_exit(&mountlist_lock); 241 return (mp); 242 } 243 } 244 mutex_exit(&mountlist_lock); 245 return NULL; 246 } 247 248 /* 249 * Drop a reference to a mount structure, freeing if the last reference. 250 */ 251 void 252 vfs_destroy(struct mount *mp) 253 { 254 255 if (__predict_true((int)atomic_dec_uint_nv(&mp->mnt_refcnt) > 0)) { 256 return; 257 } 258 259 /* 260 * Nothing else has visibility of the mount: we can now 261 * free the data structures. 262 */ 263 KASSERT(mp->mnt_refcnt == 0); 264 specificdata_fini(mount_specificdata_domain, &mp->mnt_specdataref); 265 rw_destroy(&mp->mnt_unmounting); 266 mutex_destroy(&mp->mnt_updating); 267 mutex_destroy(&mp->mnt_renamelock); 268 if (mp->mnt_op != NULL) { 269 vfs_delref(mp->mnt_op); 270 } 271 kmem_free(mp, sizeof(*mp)); 272 } 273 274 /* 275 * Mark a mount point as busy, and gain a new reference to it. Used to 276 * prevent the file system from being unmounted during critical sections. 277 * 278 * => The caller must hold a pre-existing reference to the mount. 279 * => Will fail if the file system is being unmounted, or is unmounted. 280 */ 281 int 282 vfs_busy(struct mount *mp, struct mount **nextp) 283 { 284 285 KASSERT(mp->mnt_refcnt > 0); 286 287 if (__predict_false(!rw_tryenter(&mp->mnt_unmounting, RW_READER))) { 288 if (nextp != NULL) { 289 KASSERT(mutex_owned(&mountlist_lock)); 290 *nextp = CIRCLEQ_NEXT(mp, mnt_list); 291 } 292 return EBUSY; 293 } 294 if (__predict_false((mp->mnt_iflag & IMNT_GONE) != 0)) { 295 rw_exit(&mp->mnt_unmounting); 296 if (nextp != NULL) { 297 KASSERT(mutex_owned(&mountlist_lock)); 298 *nextp = CIRCLEQ_NEXT(mp, mnt_list); 299 } 300 return ENOENT; 301 } 302 if (nextp != NULL) { 303 mutex_exit(&mountlist_lock); 304 } 305 atomic_inc_uint(&mp->mnt_refcnt); 306 return 0; 307 } 308 309 /* 310 * Unbusy a busy filesystem. 311 * 312 * => If keepref is true, preserve reference added by vfs_busy(). 313 * => If nextp != NULL, acquire mountlist_lock. 314 */ 315 void 316 vfs_unbusy(struct mount *mp, bool keepref, struct mount **nextp) 317 { 318 319 KASSERT(mp->mnt_refcnt > 0); 320 321 if (nextp != NULL) { 322 mutex_enter(&mountlist_lock); 323 } 324 rw_exit(&mp->mnt_unmounting); 325 if (!keepref) { 326 vfs_destroy(mp); 327 } 328 if (nextp != NULL) { 329 KASSERT(mutex_owned(&mountlist_lock)); 330 *nextp = CIRCLEQ_NEXT(mp, mnt_list); 331 } 332 } 333 334 /* 335 * Insert a marker vnode into a mount's vnode list, after the 336 * specified vnode. mntvnode_lock must be held. 337 */ 338 void 339 vmark(vnode_t *mvp, vnode_t *vp) 340 { 341 struct mount *mp = mvp->v_mount; 342 343 KASSERT(mutex_owned(&mntvnode_lock)); 344 KASSERT((mvp->v_iflag & VI_MARKER) != 0); 345 KASSERT(vp->v_mount == mp); 346 347 TAILQ_INSERT_AFTER(&mp->mnt_vnodelist, vp, mvp, v_mntvnodes); 348 } 349 350 /* 351 * Remove a marker vnode from a mount's vnode list, and return 352 * a pointer to the next vnode in the list. mntvnode_lock must 353 * be held. 354 */ 355 vnode_t * 356 vunmark(vnode_t *mvp) 357 { 358 struct mount *mp = mvp->v_mount; 359 vnode_t *vp; 360 361 KASSERT(mutex_owned(&mntvnode_lock)); 362 KASSERT((mvp->v_iflag & VI_MARKER) != 0); 363 364 vp = TAILQ_NEXT(mvp, v_mntvnodes); 365 TAILQ_REMOVE(&mp->mnt_vnodelist, mvp, v_mntvnodes); 366 367 KASSERT(vp == NULL || vp->v_mount == mp); 368 369 return vp; 370 } 371 372 /* 373 * Move a vnode from one mount queue to another. 374 */ 375 void 376 vfs_insmntque(vnode_t *vp, struct mount *mp) 377 { 378 struct mount *omp; 379 380 KASSERT(mp == NULL || (mp->mnt_iflag & IMNT_UNMOUNT) == 0 || 381 vp->v_tag == VT_VFS); 382 383 mutex_enter(&mntvnode_lock); 384 /* 385 * Delete from old mount point vnode list, if on one. 386 */ 387 if ((omp = vp->v_mount) != NULL) 388 TAILQ_REMOVE(&vp->v_mount->mnt_vnodelist, vp, v_mntvnodes); 389 /* 390 * Insert into list of vnodes for the new mount point, if 391 * available. The caller must take a reference on the mount 392 * structure and donate to the vnode. 393 */ 394 if ((vp->v_mount = mp) != NULL) 395 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes); 396 mutex_exit(&mntvnode_lock); 397 398 if (omp != NULL) { 399 /* Release reference to old mount. */ 400 vfs_destroy(omp); 401 } 402 } 403 404 /* 405 * Remove any vnodes in the vnode table belonging to mount point mp. 406 * 407 * If FORCECLOSE is not specified, there should not be any active ones, 408 * return error if any are found (nb: this is a user error, not a 409 * system error). If FORCECLOSE is specified, detach any active vnodes 410 * that are found. 411 * 412 * If WRITECLOSE is set, only flush out regular file vnodes open for 413 * writing. 414 * 415 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 416 */ 417 #ifdef DEBUG 418 int busyprt = 0; /* print out busy vnodes */ 419 struct ctldebug debug1 = { "busyprt", &busyprt }; 420 #endif 421 422 static vnode_t * 423 vflushnext(vnode_t *mvp, int *when) 424 { 425 426 if (hardclock_ticks > *when) { 427 mutex_exit(&mntvnode_lock); 428 yield(); 429 mutex_enter(&mntvnode_lock); 430 *when = hardclock_ticks + hz / 10; 431 } 432 return vunmark(mvp); 433 } 434 435 int 436 vflush(struct mount *mp, vnode_t *skipvp, int flags) 437 { 438 vnode_t *vp, *mvp; 439 int busy = 0, when = 0; 440 441 /* First, flush out any vnode references from vrele_list. */ 442 vrele_flush(); 443 444 /* Allocate a marker vnode. */ 445 mvp = vnalloc(mp); 446 447 /* 448 * NOTE: not using the TAILQ_FOREACH here since in this loop vgone() 449 * and vclean() are called. 450 */ 451 mutex_enter(&mntvnode_lock); 452 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp != NULL; 453 vp = vflushnext(mvp, &when)) { 454 vmark(mvp, vp); 455 if (vp->v_mount != mp || vismarker(vp)) 456 continue; 457 /* 458 * Skip over a selected vnode. 459 */ 460 if (vp == skipvp) 461 continue; 462 mutex_enter(vp->v_interlock); 463 /* 464 * Ignore clean but still referenced vnodes. 465 */ 466 if ((vp->v_iflag & VI_CLEAN) != 0) { 467 mutex_exit(vp->v_interlock); 468 continue; 469 } 470 /* 471 * Skip over a vnodes marked VSYSTEM. 472 */ 473 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 474 mutex_exit(vp->v_interlock); 475 continue; 476 } 477 /* 478 * If WRITECLOSE is set, only flush out regular file 479 * vnodes open for writing. 480 */ 481 if ((flags & WRITECLOSE) && 482 (vp->v_writecount == 0 || vp->v_type != VREG)) { 483 mutex_exit(vp->v_interlock); 484 continue; 485 } 486 /* 487 * With v_usecount == 0, all we need to do is clear 488 * out the vnode data structures and we are done. 489 */ 490 if (vp->v_usecount == 0) { 491 mutex_exit(&mntvnode_lock); 492 vremfree(vp); 493 vp->v_usecount = 1; 494 vclean(vp, DOCLOSE); 495 vrelel(vp, 0); 496 mutex_enter(&mntvnode_lock); 497 continue; 498 } 499 /* 500 * If FORCECLOSE is set, forcibly close the vnode. 501 * For block or character devices, revert to an 502 * anonymous device. For all other files, just 503 * kill them. 504 */ 505 if (flags & FORCECLOSE) { 506 mutex_exit(&mntvnode_lock); 507 atomic_inc_uint(&vp->v_usecount); 508 if (vp->v_type != VBLK && vp->v_type != VCHR) { 509 vclean(vp, DOCLOSE); 510 vrelel(vp, 0); 511 } else { 512 vclean(vp, 0); 513 vp->v_op = spec_vnodeop_p; /* XXXSMP */ 514 mutex_exit(vp->v_interlock); 515 /* 516 * The vnode isn't clean, but still resides 517 * on the mount list. Remove it. XXX This 518 * is a bit dodgy. 519 */ 520 vfs_insmntque(vp, NULL); 521 vrele(vp); 522 } 523 mutex_enter(&mntvnode_lock); 524 continue; 525 } 526 #ifdef DEBUG 527 if (busyprt) 528 vprint("vflush: busy vnode", vp); 529 #endif 530 mutex_exit(vp->v_interlock); 531 busy++; 532 } 533 mutex_exit(&mntvnode_lock); 534 vnfree(mvp); 535 if (busy) 536 return (EBUSY); 537 return (0); 538 } 539 540 /* 541 * Remove clean vnodes from a mountpoint's vnode list. 542 */ 543 void 544 vfs_scrubvnlist(struct mount *mp) 545 { 546 vnode_t *vp, *nvp; 547 548 retry: 549 mutex_enter(&mntvnode_lock); 550 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) { 551 nvp = TAILQ_NEXT(vp, v_mntvnodes); 552 mutex_enter(vp->v_interlock); 553 if ((vp->v_iflag & VI_CLEAN) != 0) { 554 TAILQ_REMOVE(&mp->mnt_vnodelist, vp, v_mntvnodes); 555 vp->v_mount = NULL; 556 mutex_exit(&mntvnode_lock); 557 mutex_exit(vp->v_interlock); 558 vfs_destroy(mp); 559 goto retry; 560 } 561 mutex_exit(vp->v_interlock); 562 } 563 mutex_exit(&mntvnode_lock); 564 } 565 566 /* 567 * Mount a file system. 568 */ 569 570 /* 571 * Scan all active processes to see if any of them have a current or root 572 * directory onto which the new filesystem has just been mounted. If so, 573 * replace them with the new mount point. 574 */ 575 static void 576 mount_checkdirs(vnode_t *olddp) 577 { 578 vnode_t *newdp, *rele1, *rele2; 579 struct cwdinfo *cwdi; 580 struct proc *p; 581 bool retry; 582 583 if (olddp->v_usecount == 1) { 584 return; 585 } 586 if (VFS_ROOT(olddp->v_mountedhere, &newdp)) 587 panic("mount: lost mount"); 588 589 do { 590 retry = false; 591 mutex_enter(proc_lock); 592 PROCLIST_FOREACH(p, &allproc) { 593 if ((cwdi = p->p_cwdi) == NULL) 594 continue; 595 /* 596 * Cannot change to the old directory any more, 597 * so even if we see a stale value it is not a 598 * problem. 599 */ 600 if (cwdi->cwdi_cdir != olddp && 601 cwdi->cwdi_rdir != olddp) 602 continue; 603 retry = true; 604 rele1 = NULL; 605 rele2 = NULL; 606 atomic_inc_uint(&cwdi->cwdi_refcnt); 607 mutex_exit(proc_lock); 608 rw_enter(&cwdi->cwdi_lock, RW_WRITER); 609 if (cwdi->cwdi_cdir == olddp) { 610 rele1 = cwdi->cwdi_cdir; 611 vref(newdp); 612 cwdi->cwdi_cdir = newdp; 613 } 614 if (cwdi->cwdi_rdir == olddp) { 615 rele2 = cwdi->cwdi_rdir; 616 vref(newdp); 617 cwdi->cwdi_rdir = newdp; 618 } 619 rw_exit(&cwdi->cwdi_lock); 620 cwdfree(cwdi); 621 if (rele1 != NULL) 622 vrele(rele1); 623 if (rele2 != NULL) 624 vrele(rele2); 625 mutex_enter(proc_lock); 626 break; 627 } 628 mutex_exit(proc_lock); 629 } while (retry); 630 631 if (rootvnode == olddp) { 632 vrele(rootvnode); 633 vref(newdp); 634 rootvnode = newdp; 635 } 636 vput(newdp); 637 } 638 639 int 640 mount_domount(struct lwp *l, vnode_t **vpp, struct vfsops *vfsops, 641 const char *path, int flags, void *data, size_t *data_len) 642 { 643 vnode_t *vp = *vpp; 644 struct mount *mp; 645 struct pathbuf *pb; 646 struct nameidata nd; 647 int error; 648 649 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT, 650 KAUTH_REQ_SYSTEM_MOUNT_NEW, vp, KAUTH_ARG(flags), data); 651 if (error) { 652 vfs_delref(vfsops); 653 return error; 654 } 655 656 /* Cannot make a non-dir a mount-point (from here anyway). */ 657 if (vp->v_type != VDIR) { 658 vfs_delref(vfsops); 659 return ENOTDIR; 660 } 661 662 if (flags & MNT_EXPORTED) { 663 vfs_delref(vfsops); 664 return EINVAL; 665 } 666 667 if ((mp = vfs_mountalloc(vfsops, vp)) == NULL) { 668 vfs_delref(vfsops); 669 return ENOMEM; 670 } 671 672 mp->mnt_stat.f_owner = kauth_cred_geteuid(l->l_cred); 673 674 /* 675 * The underlying file system may refuse the mount for 676 * various reasons. Allow the user to force it to happen. 677 * 678 * Set the mount level flags. 679 */ 680 mp->mnt_flag = flags & (MNT_BASIC_FLAGS | MNT_FORCE | MNT_IGNORE); 681 682 mutex_enter(&mp->mnt_updating); 683 error = VFS_MOUNT(mp, path, data, data_len); 684 mp->mnt_flag &= ~MNT_OP_FLAGS; 685 686 if (error != 0) 687 goto err_unmounted; 688 689 /* 690 * Validate and prepare the mount point. 691 */ 692 error = pathbuf_copyin(path, &pb); 693 if (error != 0) { 694 goto err_mounted; 695 } 696 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, pb); 697 error = namei(&nd); 698 pathbuf_destroy(pb); 699 if (error != 0) { 700 goto err_mounted; 701 } 702 if (nd.ni_vp != vp) { 703 vput(nd.ni_vp); 704 error = EINVAL; 705 goto err_mounted; 706 } 707 if (vp->v_mountedhere != NULL) { 708 vput(nd.ni_vp); 709 error = EBUSY; 710 goto err_mounted; 711 } 712 error = vinvalbuf(vp, V_SAVE, l->l_cred, l, 0, 0); 713 if (error != 0) { 714 vput(nd.ni_vp); 715 goto err_mounted; 716 } 717 718 /* 719 * Put the new filesystem on the mount list after root. 720 */ 721 cache_purge(vp); 722 mp->mnt_iflag &= ~IMNT_WANTRDWR; 723 724 mutex_enter(&mountlist_lock); 725 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list); 726 mutex_exit(&mountlist_lock); 727 if ((mp->mnt_flag & (MNT_RDONLY | MNT_ASYNC)) == 0) 728 error = vfs_allocate_syncvnode(mp); 729 if (error == 0) 730 vp->v_mountedhere = mp; 731 vput(nd.ni_vp); 732 if (error != 0) 733 goto err_onmountlist; 734 735 mount_checkdirs(vp); 736 mutex_exit(&mp->mnt_updating); 737 738 /* Hold an additional reference to the mount across VFS_START(). */ 739 vfs_unbusy(mp, true, NULL); 740 (void) VFS_STATVFS(mp, &mp->mnt_stat); 741 error = VFS_START(mp, 0); 742 if (error) 743 vrele(vp); 744 /* Drop reference held for VFS_START(). */ 745 vfs_destroy(mp); 746 *vpp = NULL; 747 return error; 748 749 err_onmountlist: 750 mutex_enter(&mountlist_lock); 751 CIRCLEQ_REMOVE(&mountlist, mp, mnt_list); 752 mp->mnt_iflag |= IMNT_GONE; 753 mutex_exit(&mountlist_lock); 754 755 err_mounted: 756 if (VFS_UNMOUNT(mp, MNT_FORCE) != 0) 757 panic("Unmounting fresh file system failed"); 758 759 err_unmounted: 760 vp->v_mountedhere = NULL; 761 mutex_exit(&mp->mnt_updating); 762 vfs_unbusy(mp, false, NULL); 763 vfs_destroy(mp); 764 765 return error; 766 } 767 768 /* 769 * Do the actual file system unmount. File system is assumed to have 770 * been locked by the caller. 771 * 772 * => Caller hold reference to the mount, explicitly for dounmount(). 773 */ 774 int 775 dounmount(struct mount *mp, int flags, struct lwp *l) 776 { 777 vnode_t *coveredvp; 778 int error, async, used_syncer; 779 780 #if NVERIEXEC > 0 781 error = veriexec_unmountchk(mp); 782 if (error) 783 return (error); 784 #endif /* NVERIEXEC > 0 */ 785 786 /* 787 * XXX Freeze syncer. Must do this before locking the 788 * mount point. See dounmount() for details. 789 */ 790 mutex_enter(&syncer_mutex); 791 rw_enter(&mp->mnt_unmounting, RW_WRITER); 792 if ((mp->mnt_iflag & IMNT_GONE) != 0) { 793 rw_exit(&mp->mnt_unmounting); 794 mutex_exit(&syncer_mutex); 795 return ENOENT; 796 } 797 798 used_syncer = (mp->mnt_syncer != NULL); 799 800 /* 801 * XXX Syncer must be frozen when we get here. This should really 802 * be done on a per-mountpoint basis, but the syncer doesn't work 803 * like that. 804 * 805 * The caller of dounmount() must acquire syncer_mutex because 806 * the syncer itself acquires locks in syncer_mutex -> vfs_busy 807 * order, and we must preserve that order to avoid deadlock. 808 * 809 * So, if the file system did not use the syncer, now is 810 * the time to release the syncer_mutex. 811 */ 812 if (used_syncer == 0) { 813 mutex_exit(&syncer_mutex); 814 } 815 mp->mnt_iflag |= IMNT_UNMOUNT; 816 async = mp->mnt_flag & MNT_ASYNC; 817 mp->mnt_flag &= ~MNT_ASYNC; 818 cache_purgevfs(mp); /* remove cache entries for this file sys */ 819 if (mp->mnt_syncer != NULL) 820 vfs_deallocate_syncvnode(mp); 821 error = 0; 822 if ((mp->mnt_flag & MNT_RDONLY) == 0) { 823 error = VFS_SYNC(mp, MNT_WAIT, l->l_cred); 824 } 825 vfs_scrubvnlist(mp); 826 if (error == 0 || (flags & MNT_FORCE)) { 827 error = VFS_UNMOUNT(mp, flags); 828 } 829 if (error) { 830 if ((mp->mnt_flag & (MNT_RDONLY | MNT_ASYNC)) == 0) 831 (void) vfs_allocate_syncvnode(mp); 832 mp->mnt_iflag &= ~IMNT_UNMOUNT; 833 mp->mnt_flag |= async; 834 rw_exit(&mp->mnt_unmounting); 835 if (used_syncer) 836 mutex_exit(&syncer_mutex); 837 return (error); 838 } 839 vfs_scrubvnlist(mp); 840 mutex_enter(&mountlist_lock); 841 if ((coveredvp = mp->mnt_vnodecovered) != NULLVP) 842 coveredvp->v_mountedhere = NULL; 843 CIRCLEQ_REMOVE(&mountlist, mp, mnt_list); 844 mp->mnt_iflag |= IMNT_GONE; 845 mutex_exit(&mountlist_lock); 846 if (TAILQ_FIRST(&mp->mnt_vnodelist) != NULL) 847 panic("unmount: dangling vnode"); 848 if (used_syncer) 849 mutex_exit(&syncer_mutex); 850 vfs_hooks_unmount(mp); 851 rw_exit(&mp->mnt_unmounting); 852 vfs_destroy(mp); /* reference from mount() */ 853 if (coveredvp != NULLVP) { 854 vrele(coveredvp); 855 } 856 return (0); 857 } 858 859 /* 860 * Unmount all file systems. 861 * We traverse the list in reverse order under the assumption that doing so 862 * will avoid needing to worry about dependencies. 863 */ 864 bool 865 vfs_unmountall(struct lwp *l) 866 { 867 868 printf("unmounting file systems..."); 869 return vfs_unmountall1(l, true, true); 870 } 871 872 static void 873 vfs_unmount_print(struct mount *mp, const char *pfx) 874 { 875 876 aprint_verbose("%sunmounted %s on %s type %s\n", pfx, 877 mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname, 878 mp->mnt_stat.f_fstypename); 879 } 880 881 bool 882 vfs_unmount_forceone(struct lwp *l) 883 { 884 struct mount *mp, *nmp; 885 int error; 886 887 nmp = NULL; 888 889 CIRCLEQ_FOREACH_REVERSE(mp, &mountlist, mnt_list) { 890 if (nmp == NULL || mp->mnt_gen > nmp->mnt_gen) { 891 nmp = mp; 892 } 893 } 894 if (nmp == NULL) { 895 return false; 896 } 897 898 #ifdef DEBUG 899 printf("\nforcefully unmounting %s (%s)...", 900 nmp->mnt_stat.f_mntonname, nmp->mnt_stat.f_mntfromname); 901 #endif 902 atomic_inc_uint(&nmp->mnt_refcnt); 903 if ((error = dounmount(nmp, MNT_FORCE, l)) == 0) { 904 vfs_unmount_print(nmp, "forcefully "); 905 return true; 906 } else { 907 vfs_destroy(nmp); 908 } 909 910 #ifdef DEBUG 911 printf("forceful unmount of %s failed with error %d\n", 912 nmp->mnt_stat.f_mntonname, error); 913 #endif 914 915 return false; 916 } 917 918 bool 919 vfs_unmountall1(struct lwp *l, bool force, bool verbose) 920 { 921 struct mount *mp, *nmp; 922 bool any_error = false, progress = false; 923 int error; 924 925 for (mp = CIRCLEQ_LAST(&mountlist); 926 mp != (void *)&mountlist; 927 mp = nmp) { 928 nmp = CIRCLEQ_PREV(mp, mnt_list); 929 #ifdef DEBUG 930 printf("\nunmounting %p %s (%s)...", 931 (void *)mp, mp->mnt_stat.f_mntonname, 932 mp->mnt_stat.f_mntfromname); 933 #endif 934 atomic_inc_uint(&mp->mnt_refcnt); 935 if ((error = dounmount(mp, force ? MNT_FORCE : 0, l)) == 0) { 936 vfs_unmount_print(mp, ""); 937 progress = true; 938 } else { 939 vfs_destroy(mp); 940 if (verbose) { 941 printf("unmount of %s failed with error %d\n", 942 mp->mnt_stat.f_mntonname, error); 943 } 944 any_error = true; 945 } 946 } 947 if (verbose) { 948 printf(" done\n"); 949 } 950 if (any_error && verbose) { 951 printf("WARNING: some file systems would not unmount\n"); 952 } 953 return progress; 954 } 955 956 void 957 vfs_sync_all(struct lwp *l) 958 { 959 printf("syncing disks... "); 960 961 /* remove user processes from run queue */ 962 suspendsched(); 963 (void)spl0(); 964 965 /* avoid coming back this way again if we panic. */ 966 doing_shutdown = 1; 967 968 do_sys_sync(l); 969 970 /* Wait for sync to finish. */ 971 if (buf_syncwait() != 0) { 972 #if defined(DDB) && defined(DEBUG_HALT_BUSY) 973 Debugger(); 974 #endif 975 printf("giving up\n"); 976 return; 977 } else 978 printf("done\n"); 979 } 980 981 /* 982 * Sync and unmount file systems before shutting down. 983 */ 984 void 985 vfs_shutdown(void) 986 { 987 lwp_t *l = curlwp; 988 989 vfs_sync_all(l); 990 991 /* 992 * If we have paniced - do not make the situation potentially 993 * worse by unmounting the file systems. 994 */ 995 if (panicstr != NULL) { 996 return; 997 } 998 999 /* Unmount file systems. */ 1000 vfs_unmountall(l); 1001 } 1002 1003 /* 1004 * Print a list of supported file system types (used by vfs_mountroot) 1005 */ 1006 static void 1007 vfs_print_fstypes(void) 1008 { 1009 struct vfsops *v; 1010 int cnt = 0; 1011 1012 mutex_enter(&vfs_list_lock); 1013 LIST_FOREACH(v, &vfs_list, vfs_list) 1014 ++cnt; 1015 mutex_exit(&vfs_list_lock); 1016 1017 if (cnt == 0) { 1018 printf("WARNING: No file system modules have been loaded.\n"); 1019 return; 1020 } 1021 1022 printf("Supported file systems:"); 1023 mutex_enter(&vfs_list_lock); 1024 LIST_FOREACH(v, &vfs_list, vfs_list) { 1025 printf(" %s", v->vfs_name); 1026 } 1027 mutex_exit(&vfs_list_lock); 1028 printf("\n"); 1029 } 1030 1031 /* 1032 * Mount the root file system. If the operator didn't specify a 1033 * file system to use, try all possible file systems until one 1034 * succeeds. 1035 */ 1036 int 1037 vfs_mountroot(void) 1038 { 1039 struct vfsops *v; 1040 int error = ENODEV; 1041 1042 if (root_device == NULL) 1043 panic("vfs_mountroot: root device unknown"); 1044 1045 switch (device_class(root_device)) { 1046 case DV_IFNET: 1047 if (rootdev != NODEV) 1048 panic("vfs_mountroot: rootdev set for DV_IFNET " 1049 "(0x%llx -> %llu,%llu)", 1050 (unsigned long long)rootdev, 1051 (unsigned long long)major(rootdev), 1052 (unsigned long long)minor(rootdev)); 1053 break; 1054 1055 case DV_DISK: 1056 if (rootdev == NODEV) 1057 panic("vfs_mountroot: rootdev not set for DV_DISK"); 1058 if (bdevvp(rootdev, &rootvp)) 1059 panic("vfs_mountroot: can't get vnode for rootdev"); 1060 error = VOP_OPEN(rootvp, FREAD, FSCRED); 1061 if (error) { 1062 printf("vfs_mountroot: can't open root device\n"); 1063 return (error); 1064 } 1065 break; 1066 1067 case DV_VIRTUAL: 1068 break; 1069 1070 default: 1071 printf("%s: inappropriate for root file system\n", 1072 device_xname(root_device)); 1073 return (ENODEV); 1074 } 1075 1076 /* 1077 * If user specified a root fs type, use it. Make sure the 1078 * specified type exists and has a mount_root() 1079 */ 1080 if (strcmp(rootfstype, ROOT_FSTYPE_ANY) != 0) { 1081 v = vfs_getopsbyname(rootfstype); 1082 error = EFTYPE; 1083 if (v != NULL) { 1084 if (v->vfs_mountroot != NULL) { 1085 error = (v->vfs_mountroot)(); 1086 } 1087 v->vfs_refcount--; 1088 } 1089 goto done; 1090 } 1091 1092 /* 1093 * Try each file system currently configured into the kernel. 1094 */ 1095 mutex_enter(&vfs_list_lock); 1096 LIST_FOREACH(v, &vfs_list, vfs_list) { 1097 if (v->vfs_mountroot == NULL) 1098 continue; 1099 #ifdef DEBUG 1100 aprint_normal("mountroot: trying %s...\n", v->vfs_name); 1101 #endif 1102 v->vfs_refcount++; 1103 mutex_exit(&vfs_list_lock); 1104 error = (*v->vfs_mountroot)(); 1105 mutex_enter(&vfs_list_lock); 1106 v->vfs_refcount--; 1107 if (!error) { 1108 aprint_normal("root file system type: %s\n", 1109 v->vfs_name); 1110 break; 1111 } 1112 } 1113 mutex_exit(&vfs_list_lock); 1114 1115 if (v == NULL) { 1116 vfs_print_fstypes(); 1117 printf("no file system for %s", device_xname(root_device)); 1118 if (device_class(root_device) == DV_DISK) 1119 printf(" (dev 0x%llx)", (unsigned long long)rootdev); 1120 printf("\n"); 1121 error = EFTYPE; 1122 } 1123 1124 done: 1125 if (error && device_class(root_device) == DV_DISK) { 1126 VOP_CLOSE(rootvp, FREAD, FSCRED); 1127 vrele(rootvp); 1128 } 1129 if (error == 0) { 1130 extern struct cwdinfo cwdi0; 1131 1132 CIRCLEQ_FIRST(&mountlist)->mnt_flag |= MNT_ROOTFS; 1133 CIRCLEQ_FIRST(&mountlist)->mnt_op->vfs_refcount++; 1134 1135 /* 1136 * Get the vnode for '/'. Set cwdi0.cwdi_cdir to 1137 * reference it. 1138 */ 1139 error = VFS_ROOT(CIRCLEQ_FIRST(&mountlist), &rootvnode); 1140 if (error) 1141 panic("cannot find root vnode, error=%d", error); 1142 cwdi0.cwdi_cdir = rootvnode; 1143 vref(cwdi0.cwdi_cdir); 1144 VOP_UNLOCK(rootvnode); 1145 cwdi0.cwdi_rdir = NULL; 1146 1147 /* 1148 * Now that root is mounted, we can fixup initproc's CWD 1149 * info. All other processes are kthreads, which merely 1150 * share proc0's CWD info. 1151 */ 1152 initproc->p_cwdi->cwdi_cdir = rootvnode; 1153 vref(initproc->p_cwdi->cwdi_cdir); 1154 initproc->p_cwdi->cwdi_rdir = NULL; 1155 /* 1156 * Enable loading of modules from the filesystem 1157 */ 1158 module_load_vfs_init(); 1159 1160 } 1161 return (error); 1162 } 1163 1164 /* 1165 * mount_specific_key_create -- 1166 * Create a key for subsystem mount-specific data. 1167 */ 1168 int 1169 mount_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor) 1170 { 1171 1172 return specificdata_key_create(mount_specificdata_domain, keyp, dtor); 1173 } 1174 1175 /* 1176 * mount_specific_key_delete -- 1177 * Delete a key for subsystem mount-specific data. 1178 */ 1179 void 1180 mount_specific_key_delete(specificdata_key_t key) 1181 { 1182 1183 specificdata_key_delete(mount_specificdata_domain, key); 1184 } 1185 1186 /* 1187 * mount_initspecific -- 1188 * Initialize a mount's specificdata container. 1189 */ 1190 void 1191 mount_initspecific(struct mount *mp) 1192 { 1193 int error; 1194 1195 error = specificdata_init(mount_specificdata_domain, 1196 &mp->mnt_specdataref); 1197 KASSERT(error == 0); 1198 } 1199 1200 /* 1201 * mount_finispecific -- 1202 * Finalize a mount's specificdata container. 1203 */ 1204 void 1205 mount_finispecific(struct mount *mp) 1206 { 1207 1208 specificdata_fini(mount_specificdata_domain, &mp->mnt_specdataref); 1209 } 1210 1211 /* 1212 * mount_getspecific -- 1213 * Return mount-specific data corresponding to the specified key. 1214 */ 1215 void * 1216 mount_getspecific(struct mount *mp, specificdata_key_t key) 1217 { 1218 1219 return specificdata_getspecific(mount_specificdata_domain, 1220 &mp->mnt_specdataref, key); 1221 } 1222 1223 /* 1224 * mount_setspecific -- 1225 * Set mount-specific data corresponding to the specified key. 1226 */ 1227 void 1228 mount_setspecific(struct mount *mp, specificdata_key_t key, void *data) 1229 { 1230 1231 specificdata_setspecific(mount_specificdata_domain, 1232 &mp->mnt_specdataref, key, data); 1233 } 1234 1235 /* 1236 * Check to see if a filesystem is mounted on a block device. 1237 */ 1238 int 1239 vfs_mountedon(vnode_t *vp) 1240 { 1241 vnode_t *vq; 1242 int error = 0; 1243 1244 if (vp->v_type != VBLK) 1245 return ENOTBLK; 1246 if (vp->v_specmountpoint != NULL) 1247 return (EBUSY); 1248 mutex_enter(&device_lock); 1249 for (vq = specfs_hash[SPECHASH(vp->v_rdev)]; vq != NULL; 1250 vq = vq->v_specnext) { 1251 if (vq->v_type != vp->v_type || vq->v_rdev != vp->v_rdev) 1252 continue; 1253 if (vq->v_specmountpoint != NULL) { 1254 error = EBUSY; 1255 break; 1256 } 1257 } 1258 mutex_exit(&device_lock); 1259 return (error); 1260 } 1261 1262 /* 1263 * Check if a device pointed to by vp is mounted. 1264 * 1265 * Returns: 1266 * EINVAL if it's not a disk 1267 * EBUSY if it's a disk and mounted 1268 * 0 if it's a disk and not mounted 1269 */ 1270 int 1271 rawdev_mounted(vnode_t *vp, vnode_t **bvpp) 1272 { 1273 vnode_t *bvp; 1274 dev_t dev; 1275 int d_type; 1276 1277 bvp = NULL; 1278 d_type = D_OTHER; 1279 1280 if (iskmemvp(vp)) 1281 return EINVAL; 1282 1283 switch (vp->v_type) { 1284 case VCHR: { 1285 const struct cdevsw *cdev; 1286 1287 dev = vp->v_rdev; 1288 cdev = cdevsw_lookup(dev); 1289 if (cdev != NULL) { 1290 dev_t blkdev; 1291 1292 blkdev = devsw_chr2blk(dev); 1293 if (blkdev != NODEV) { 1294 if (vfinddev(blkdev, VBLK, &bvp) != 0) { 1295 d_type = (cdev->d_flag & D_TYPEMASK); 1296 /* XXX: what if bvp disappears? */ 1297 vrele(bvp); 1298 } 1299 } 1300 } 1301 1302 break; 1303 } 1304 1305 case VBLK: { 1306 const struct bdevsw *bdev; 1307 1308 dev = vp->v_rdev; 1309 bdev = bdevsw_lookup(dev); 1310 if (bdev != NULL) 1311 d_type = (bdev->d_flag & D_TYPEMASK); 1312 1313 bvp = vp; 1314 1315 break; 1316 } 1317 1318 default: 1319 break; 1320 } 1321 1322 if (d_type != D_DISK) 1323 return EINVAL; 1324 1325 if (bvpp != NULL) 1326 *bvpp = bvp; 1327 1328 /* 1329 * XXX: This is bogus. We should be failing the request 1330 * XXX: not only if this specific slice is mounted, but 1331 * XXX: if it's on a disk with any other mounted slice. 1332 */ 1333 if (vfs_mountedon(bvp)) 1334 return EBUSY; 1335 1336 return 0; 1337 } 1338 1339 /* 1340 * Make a 'unique' number from a mount type name. 1341 */ 1342 long 1343 makefstype(const char *type) 1344 { 1345 long rv; 1346 1347 for (rv = 0; *type; type++) { 1348 rv <<= 2; 1349 rv ^= *type; 1350 } 1351 return rv; 1352 } 1353