1 /* $NetBSD: vfs_subr.c,v 1.194 2003/04/22 13:11:23 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Copyright (c) 1989, 1993 42 * The Regents of the University of California. All rights reserved. 43 * (c) UNIX System Laboratories, Inc. 44 * All or some portions of this file are derived from material licensed 45 * to the University of California by American Telephone and Telegraph 46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 47 * the permission of UNIX System Laboratories, Inc. 48 * 49 * Redistribution and use in source and binary forms, with or without 50 * modification, are permitted provided that the following conditions 51 * are met: 52 * 1. Redistributions of source code must retain the above copyright 53 * notice, this list of conditions and the following disclaimer. 54 * 2. Redistributions in binary form must reproduce the above copyright 55 * notice, this list of conditions and the following disclaimer in the 56 * documentation and/or other materials provided with the distribution. 57 * 3. All advertising materials mentioning features or use of this software 58 * must display the following acknowledgement: 59 * This product includes software developed by the University of 60 * California, Berkeley and its contributors. 61 * 4. Neither the name of the University nor the names of its contributors 62 * may be used to endorse or promote products derived from this software 63 * without specific prior written permission. 64 * 65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 75 * SUCH DAMAGE. 76 * 77 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94 78 */ 79 80 /* 81 * External virtual filesystem routines 82 */ 83 84 #include <sys/cdefs.h> 85 __KERNEL_RCSID(0, "$NetBSD: vfs_subr.c,v 1.194 2003/04/22 13:11:23 christos Exp $"); 86 87 #include "opt_ddb.h" 88 #include "opt_compat_netbsd.h" 89 #include "opt_compat_43.h" 90 91 #include <sys/param.h> 92 #include <sys/systm.h> 93 #include <sys/proc.h> 94 #include <sys/kernel.h> 95 #include <sys/mount.h> 96 #include <sys/time.h> 97 #include <sys/event.h> 98 #include <sys/fcntl.h> 99 #include <sys/vnode.h> 100 #include <sys/stat.h> 101 #include <sys/namei.h> 102 #include <sys/ucred.h> 103 #include <sys/buf.h> 104 #include <sys/errno.h> 105 #include <sys/malloc.h> 106 #include <sys/domain.h> 107 #include <sys/mbuf.h> 108 #include <sys/sa.h> 109 #include <sys/syscallargs.h> 110 #include <sys/device.h> 111 #include <sys/dirent.h> 112 #include <sys/filedesc.h> 113 114 #include <miscfs/specfs/specdev.h> 115 #include <miscfs/genfs/genfs.h> 116 #include <miscfs/syncfs/syncfs.h> 117 118 #include <uvm/uvm.h> 119 #include <uvm/uvm_ddb.h> 120 121 #include <sys/sysctl.h> 122 123 const enum vtype iftovt_tab[16] = { 124 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 125 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 126 }; 127 const int vttoif_tab[9] = { 128 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 129 S_IFSOCK, S_IFIFO, S_IFMT, 130 }; 131 132 int doforce = 1; /* 1 => permit forcible unmounting */ 133 int prtactive = 0; /* 1 => print out reclaim of active vnodes */ 134 135 extern int dovfsusermount; /* 1 => permit any user to mount filesystems */ 136 137 /* 138 * Insq/Remq for the vnode usage lists. 139 */ 140 #define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) 141 #define bufremvn(bp) { \ 142 LIST_REMOVE(bp, b_vnbufs); \ 143 (bp)->b_vnbufs.le_next = NOLIST; \ 144 } 145 /* TAILQ_HEAD(freelst, vnode) vnode_free_list = vnode free list (in vnode.h) */ 146 struct freelst vnode_free_list = TAILQ_HEAD_INITIALIZER(vnode_free_list); 147 struct freelst vnode_hold_list = TAILQ_HEAD_INITIALIZER(vnode_hold_list); 148 149 struct mntlist mountlist = /* mounted filesystem list */ 150 CIRCLEQ_HEAD_INITIALIZER(mountlist); 151 struct vfs_list_head vfs_list = /* vfs list */ 152 LIST_HEAD_INITIALIZER(vfs_list); 153 154 struct nfs_public nfs_pub; /* publicly exported FS */ 155 156 struct simplelock mountlist_slock = SIMPLELOCK_INITIALIZER; 157 static struct simplelock mntid_slock = SIMPLELOCK_INITIALIZER; 158 struct simplelock mntvnode_slock = SIMPLELOCK_INITIALIZER; 159 struct simplelock vnode_free_list_slock = SIMPLELOCK_INITIALIZER; 160 struct simplelock spechash_slock = SIMPLELOCK_INITIALIZER; 161 162 /* XXX - gross; single global lock to protect v_numoutput */ 163 struct simplelock global_v_numoutput_slock = SIMPLELOCK_INITIALIZER; 164 165 /* 166 * These define the root filesystem and device. 167 */ 168 struct mount *rootfs; 169 struct vnode *rootvnode; 170 struct device *root_device; /* root device */ 171 172 struct pool vnode_pool; /* memory pool for vnodes */ 173 174 MALLOC_DEFINE(M_VNODE, "vnodes", "Dynamically allocated vnodes"); 175 176 /* 177 * Local declarations. 178 */ 179 void insmntque __P((struct vnode *, struct mount *)); 180 int getdevvp __P((dev_t, struct vnode **, enum vtype)); 181 void vgoneall __P((struct vnode *)); 182 183 void vclean(struct vnode *, int, struct proc *); 184 185 static int vfs_hang_addrlist __P((struct mount *, struct netexport *, 186 struct export_args *)); 187 static int vfs_free_netcred __P((struct radix_node *, void *)); 188 static void vfs_free_addrlist __P((struct netexport *)); 189 190 #ifdef DEBUG 191 void printlockedvnodes __P((void)); 192 #endif 193 194 /* 195 * Initialize the vnode management data structures. 196 */ 197 void 198 vntblinit() 199 { 200 201 pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodepl", 202 &pool_allocator_nointr); 203 204 /* 205 * Initialize the filesystem syncer. 206 */ 207 vn_initialize_syncerd(); 208 } 209 210 /* 211 * Mark a mount point as busy. Used to synchronize access and to delay 212 * unmounting. Interlock is not released on failure. 213 */ 214 int 215 vfs_busy(mp, flags, interlkp) 216 struct mount *mp; 217 int flags; 218 struct simplelock *interlkp; 219 { 220 int lkflags; 221 222 while (mp->mnt_flag & MNT_UNMOUNT) { 223 int gone; 224 225 if (flags & LK_NOWAIT) 226 return (ENOENT); 227 if ((flags & LK_RECURSEFAIL) && mp->mnt_unmounter != NULL 228 && mp->mnt_unmounter == curproc) 229 return (EDEADLK); 230 if (interlkp) 231 simple_unlock(interlkp); 232 /* 233 * Since all busy locks are shared except the exclusive 234 * lock granted when unmounting, the only place that a 235 * wakeup needs to be done is at the release of the 236 * exclusive lock at the end of dounmount. 237 * 238 * XXX MP: add spinlock protecting mnt_wcnt here once you 239 * can atomically unlock-and-sleep. 240 */ 241 mp->mnt_wcnt++; 242 tsleep((caddr_t)mp, PVFS, "vfs_busy", 0); 243 mp->mnt_wcnt--; 244 gone = mp->mnt_flag & MNT_GONE; 245 246 if (mp->mnt_wcnt == 0) 247 wakeup(&mp->mnt_wcnt); 248 if (interlkp) 249 simple_lock(interlkp); 250 if (gone) 251 return (ENOENT); 252 } 253 lkflags = LK_SHARED; 254 if (interlkp) 255 lkflags |= LK_INTERLOCK; 256 if (lockmgr(&mp->mnt_lock, lkflags, interlkp)) 257 panic("vfs_busy: unexpected lock failure"); 258 return (0); 259 } 260 261 /* 262 * Free a busy filesystem. 263 */ 264 void 265 vfs_unbusy(mp) 266 struct mount *mp; 267 { 268 269 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL); 270 } 271 272 /* 273 * Lookup a filesystem type, and if found allocate and initialize 274 * a mount structure for it. 275 * 276 * Devname is usually updated by mount(8) after booting. 277 */ 278 int 279 vfs_rootmountalloc(fstypename, devname, mpp) 280 char *fstypename; 281 char *devname; 282 struct mount **mpp; 283 { 284 struct vfsops *vfsp = NULL; 285 struct mount *mp; 286 287 LIST_FOREACH(vfsp, &vfs_list, vfs_list) 288 if (!strncmp(vfsp->vfs_name, fstypename, MFSNAMELEN)) 289 break; 290 291 if (vfsp == NULL) 292 return (ENODEV); 293 mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); 294 memset((char *)mp, 0, (u_long)sizeof(struct mount)); 295 lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0); 296 (void)vfs_busy(mp, LK_NOWAIT, 0); 297 LIST_INIT(&mp->mnt_vnodelist); 298 mp->mnt_op = vfsp; 299 mp->mnt_flag = MNT_RDONLY; 300 mp->mnt_vnodecovered = NULLVP; 301 vfsp->vfs_refcount++; 302 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfs_name, MFSNAMELEN); 303 mp->mnt_stat.f_mntonname[0] = '/'; 304 (void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 305 *mpp = mp; 306 return (0); 307 } 308 309 /* 310 * Lookup a mount point by filesystem identifier. 311 */ 312 struct mount * 313 vfs_getvfs(fsid) 314 fsid_t *fsid; 315 { 316 struct mount *mp; 317 318 simple_lock(&mountlist_slock); 319 CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) { 320 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 321 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 322 simple_unlock(&mountlist_slock); 323 return (mp); 324 } 325 } 326 simple_unlock(&mountlist_slock); 327 return ((struct mount *)0); 328 } 329 330 /* 331 * Get a new unique fsid 332 */ 333 void 334 vfs_getnewfsid(mp) 335 struct mount *mp; 336 { 337 static u_short xxxfs_mntid; 338 fsid_t tfsid; 339 int mtype; 340 341 simple_lock(&mntid_slock); 342 mtype = makefstype(mp->mnt_op->vfs_name); 343 mp->mnt_stat.f_fsid.val[0] = makedev(mtype, 0); 344 mp->mnt_stat.f_fsid.val[1] = mtype; 345 if (xxxfs_mntid == 0) 346 ++xxxfs_mntid; 347 tfsid.val[0] = makedev(mtype & 0xff, xxxfs_mntid); 348 tfsid.val[1] = mtype; 349 if (!CIRCLEQ_EMPTY(&mountlist)) { 350 while (vfs_getvfs(&tfsid)) { 351 tfsid.val[0]++; 352 xxxfs_mntid++; 353 } 354 } 355 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 356 simple_unlock(&mntid_slock); 357 } 358 359 /* 360 * Make a 'unique' number from a mount type name. 361 */ 362 long 363 makefstype(type) 364 const char *type; 365 { 366 long rv; 367 368 for (rv = 0; *type; type++) { 369 rv <<= 2; 370 rv ^= *type; 371 } 372 return rv; 373 } 374 375 376 /* 377 * Set vnode attributes to VNOVAL 378 */ 379 void 380 vattr_null(vap) 381 struct vattr *vap; 382 { 383 384 vap->va_type = VNON; 385 386 /* 387 * Assign individually so that it is safe even if size and 388 * sign of each member are varied. 389 */ 390 vap->va_mode = VNOVAL; 391 vap->va_nlink = VNOVAL; 392 vap->va_uid = VNOVAL; 393 vap->va_gid = VNOVAL; 394 vap->va_fsid = VNOVAL; 395 vap->va_fileid = VNOVAL; 396 vap->va_size = VNOVAL; 397 vap->va_blocksize = VNOVAL; 398 vap->va_atime.tv_sec = 399 vap->va_mtime.tv_sec = 400 vap->va_ctime.tv_sec = 401 vap->va_birthtime.tv_sec = VNOVAL; 402 vap->va_atime.tv_nsec = 403 vap->va_mtime.tv_nsec = 404 vap->va_ctime.tv_nsec = 405 vap->va_birthtime.tv_nsec = VNOVAL; 406 vap->va_gen = VNOVAL; 407 vap->va_flags = VNOVAL; 408 vap->va_rdev = VNOVAL; 409 vap->va_bytes = VNOVAL; 410 vap->va_vaflags = 0; 411 } 412 413 /* 414 * Routines having to do with the management of the vnode table. 415 */ 416 extern int (**dead_vnodeop_p) __P((void *)); 417 long numvnodes; 418 419 /* 420 * Return the next vnode from the free list. 421 */ 422 int 423 getnewvnode(tag, mp, vops, vpp) 424 enum vtagtype tag; 425 struct mount *mp; 426 int (**vops) __P((void *)); 427 struct vnode **vpp; 428 { 429 extern struct uvm_pagerops uvm_vnodeops; 430 struct uvm_object *uobj; 431 struct proc *p = curproc; /* XXX */ 432 struct freelst *listhd; 433 static int toggle; 434 struct vnode *vp; 435 int error = 0, tryalloc; 436 437 try_again: 438 if (mp) { 439 /* 440 * Mark filesystem busy while we're creating a vnode. 441 * If unmount is in progress, this will wait; if the 442 * unmount succeeds (only if umount -f), this will 443 * return an error. If the unmount fails, we'll keep 444 * going afterwards. 445 * (This puts the per-mount vnode list logically under 446 * the protection of the vfs_busy lock). 447 */ 448 error = vfs_busy(mp, LK_RECURSEFAIL, 0); 449 if (error && error != EDEADLK) 450 return error; 451 } 452 453 /* 454 * We must choose whether to allocate a new vnode or recycle an 455 * existing one. The criterion for allocating a new one is that 456 * the total number of vnodes is less than the number desired or 457 * there are no vnodes on either free list. Generally we only 458 * want to recycle vnodes that have no buffers associated with 459 * them, so we look first on the vnode_free_list. If it is empty, 460 * we next consider vnodes with referencing buffers on the 461 * vnode_hold_list. The toggle ensures that half the time we 462 * will use a buffer from the vnode_hold_list, and half the time 463 * we will allocate a new one unless the list has grown to twice 464 * the desired size. We are reticent to recycle vnodes from the 465 * vnode_hold_list because we will lose the identity of all its 466 * referencing buffers. 467 */ 468 469 vp = NULL; 470 471 simple_lock(&vnode_free_list_slock); 472 473 toggle ^= 1; 474 if (numvnodes > 2 * desiredvnodes) 475 toggle = 0; 476 477 tryalloc = numvnodes < desiredvnodes || 478 (TAILQ_FIRST(&vnode_free_list) == NULL && 479 (TAILQ_FIRST(&vnode_hold_list) == NULL || toggle)); 480 481 if (tryalloc && 482 (vp = pool_get(&vnode_pool, PR_NOWAIT)) != NULL) { 483 simple_unlock(&vnode_free_list_slock); 484 memset(vp, 0, sizeof(*vp)); 485 simple_lock_init(&vp->v_interlock); 486 uobj = &vp->v_uobj; 487 uobj->pgops = &uvm_vnodeops; 488 uobj->uo_npages = 0; 489 TAILQ_INIT(&uobj->memq); 490 numvnodes++; 491 } else { 492 if ((vp = TAILQ_FIRST(listhd = &vnode_free_list)) == NULL) 493 vp = TAILQ_FIRST(listhd = &vnode_hold_list); 494 for (; vp != NULL; vp = TAILQ_NEXT(vp, v_freelist)) { 495 if (simple_lock_try(&vp->v_interlock)) { 496 if ((vp->v_flag & VLAYER) == 0) { 497 break; 498 } 499 if (VOP_ISLOCKED(vp) == 0) 500 break; 501 else 502 simple_unlock(&vp->v_interlock); 503 } 504 } 505 /* 506 * Unless this is a bad time of the month, at most 507 * the first NCPUS items on the free list are 508 * locked, so this is close enough to being empty. 509 */ 510 if (vp == NULLVP) { 511 simple_unlock(&vnode_free_list_slock); 512 if (mp && error != EDEADLK) 513 vfs_unbusy(mp); 514 if (tryalloc) { 515 printf("WARNING: unable to allocate new " 516 "vnode, retrying...\n"); 517 (void) tsleep(&lbolt, PRIBIO, "newvn", hz); 518 goto try_again; 519 } 520 tablefull("vnode", "increase kern.maxvnodes or NVNODE"); 521 *vpp = 0; 522 return (ENFILE); 523 } 524 if (vp->v_usecount) 525 panic("free vnode isn't, vp %p", vp); 526 TAILQ_REMOVE(listhd, vp, v_freelist); 527 /* see comment on why 0xdeadb is set at end of vgone (below) */ 528 vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb; 529 simple_unlock(&vnode_free_list_slock); 530 vp->v_lease = NULL; 531 532 if (vp->v_type != VBAD) 533 vgonel(vp, p); 534 else 535 simple_unlock(&vp->v_interlock); 536 #ifdef DIAGNOSTIC 537 if (vp->v_data || vp->v_uobj.uo_npages || 538 TAILQ_FIRST(&vp->v_uobj.memq)) 539 panic("cleaned vnode isn't, vp %p", vp); 540 if (vp->v_numoutput) 541 panic("clean vnode has pending I/O's, vp %p", vp); 542 #endif 543 KASSERT((vp->v_flag & VONWORKLST) == 0); 544 vp->v_flag = 0; 545 vp->v_socket = NULL; 546 #ifdef VERIFIED_EXEC 547 vp->fp_status = FINGERPRINT_INVALID; 548 #endif 549 } 550 vp->v_type = VNON; 551 vp->v_vnlock = &vp->v_lock; 552 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); 553 cache_purge(vp); 554 vp->v_tag = tag; 555 vp->v_op = vops; 556 insmntque(vp, mp); 557 *vpp = vp; 558 vp->v_usecount = 1; 559 vp->v_data = 0; 560 simple_lock_init(&vp->v_uobj.vmobjlock); 561 562 /* 563 * initialize uvm_object within vnode. 564 */ 565 566 uobj = &vp->v_uobj; 567 KASSERT(uobj->pgops == &uvm_vnodeops); 568 KASSERT(uobj->uo_npages == 0); 569 KASSERT(TAILQ_FIRST(&uobj->memq) == NULL); 570 vp->v_size = VSIZENOTSET; 571 572 if (mp && error != EDEADLK) 573 vfs_unbusy(mp); 574 return (0); 575 } 576 577 /* 578 * This is really just the reverse of getnewvnode(). Needed for 579 * VFS_VGET functions who may need to push back a vnode in case 580 * of a locking race. 581 */ 582 void 583 ungetnewvnode(vp) 584 struct vnode *vp; 585 { 586 #ifdef DIAGNOSTIC 587 if (vp->v_usecount != 1) 588 panic("ungetnewvnode: busy vnode"); 589 #endif 590 vp->v_usecount--; 591 insmntque(vp, NULL); 592 vp->v_type = VBAD; 593 594 simple_lock(&vp->v_interlock); 595 /* 596 * Insert at head of LRU list 597 */ 598 simple_lock(&vnode_free_list_slock); 599 if (vp->v_holdcnt > 0) 600 TAILQ_INSERT_HEAD(&vnode_hold_list, vp, v_freelist); 601 else 602 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 603 simple_unlock(&vnode_free_list_slock); 604 simple_unlock(&vp->v_interlock); 605 } 606 607 /* 608 * Move a vnode from one mount queue to another. 609 */ 610 void 611 insmntque(vp, mp) 612 struct vnode *vp; 613 struct mount *mp; 614 { 615 616 #ifdef DIAGNOSTIC 617 if ((mp != NULL) && 618 (mp->mnt_flag & MNT_UNMOUNT) && 619 !(mp->mnt_flag & MNT_SOFTDEP) && 620 vp->v_tag != VT_VFS) { 621 panic("insmntque into dying filesystem"); 622 } 623 #endif 624 625 simple_lock(&mntvnode_slock); 626 /* 627 * Delete from old mount point vnode list, if on one. 628 */ 629 if (vp->v_mount != NULL) 630 LIST_REMOVE(vp, v_mntvnodes); 631 /* 632 * Insert into list of vnodes for the new mount point, if available. 633 */ 634 if ((vp->v_mount = mp) != NULL) 635 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); 636 simple_unlock(&mntvnode_slock); 637 } 638 639 /* 640 * Update outstanding I/O count and do wakeup if requested. 641 */ 642 void 643 vwakeup(bp) 644 struct buf *bp; 645 { 646 struct vnode *vp; 647 648 if ((vp = bp->b_vp) != NULL) { 649 /* XXX global lock hack 650 * can't use v_interlock here since this is called 651 * in interrupt context from biodone(). 652 */ 653 simple_lock(&global_v_numoutput_slock); 654 if (--vp->v_numoutput < 0) 655 panic("vwakeup: neg numoutput, vp %p", vp); 656 if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) { 657 vp->v_flag &= ~VBWAIT; 658 wakeup((caddr_t)&vp->v_numoutput); 659 } 660 simple_unlock(&global_v_numoutput_slock); 661 } 662 } 663 664 /* 665 * Flush out and invalidate all buffers associated with a vnode. 666 * Called with the underlying vnode locked, which should prevent new dirty 667 * buffers from being queued. 668 */ 669 int 670 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo) 671 struct vnode *vp; 672 int flags; 673 struct ucred *cred; 674 struct proc *p; 675 int slpflag, slptimeo; 676 { 677 struct buf *bp, *nbp; 678 int s, error; 679 int flushflags = PGO_ALLPAGES | PGO_FREE | PGO_SYNCIO | 680 (flags & V_SAVE ? PGO_CLEANIT : 0); 681 682 /* XXXUBC this doesn't look at flags or slp* */ 683 simple_lock(&vp->v_interlock); 684 error = VOP_PUTPAGES(vp, 0, 0, flushflags); 685 if (error) { 686 return error; 687 } 688 689 if (flags & V_SAVE) { 690 error = VOP_FSYNC(vp, cred, FSYNC_WAIT|FSYNC_RECLAIM, 0, 0, p); 691 if (error) 692 return (error); 693 #ifdef DIAGNOSTIC 694 s = splbio(); 695 if (vp->v_numoutput > 0 || !LIST_EMPTY(&vp->v_dirtyblkhd)) 696 panic("vinvalbuf: dirty bufs, vp %p", vp); 697 splx(s); 698 #endif 699 } 700 701 s = splbio(); 702 703 restart: 704 for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 705 nbp = LIST_NEXT(bp, b_vnbufs); 706 simple_lock(&bp->b_interlock); 707 if (bp->b_flags & B_BUSY) { 708 bp->b_flags |= B_WANTED; 709 error = ltsleep((caddr_t)bp, 710 slpflag | (PRIBIO + 1) | PNORELOCK, 711 "vinvalbuf", slptimeo, &bp->b_interlock); 712 if (error) { 713 splx(s); 714 return (error); 715 } 716 goto restart; 717 } 718 bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; 719 simple_unlock(&bp->b_interlock); 720 brelse(bp); 721 } 722 723 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 724 nbp = LIST_NEXT(bp, b_vnbufs); 725 simple_lock(&bp->b_interlock); 726 if (bp->b_flags & B_BUSY) { 727 bp->b_flags |= B_WANTED; 728 error = ltsleep((caddr_t)bp, 729 slpflag | (PRIBIO + 1) | PNORELOCK, 730 "vinvalbuf", slptimeo, &bp->b_interlock); 731 if (error) { 732 splx(s); 733 return (error); 734 } 735 goto restart; 736 } 737 /* 738 * XXX Since there are no node locks for NFS, I believe 739 * there is a slight chance that a delayed write will 740 * occur while sleeping just above, so check for it. 741 */ 742 if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) { 743 #ifdef DEBUG 744 printf("buffer still DELWRI\n"); 745 #endif 746 bp->b_flags |= B_BUSY | B_VFLUSH; 747 simple_unlock(&bp->b_interlock); 748 VOP_BWRITE(bp); 749 goto restart; 750 } 751 bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; 752 simple_unlock(&bp->b_interlock); 753 brelse(bp); 754 } 755 756 #ifdef DIAGNOSTIC 757 if (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd)) 758 panic("vinvalbuf: flush failed, vp %p", vp); 759 #endif 760 761 splx(s); 762 763 return (0); 764 } 765 766 /* 767 * Destroy any in core blocks past the truncation length. 768 * Called with the underlying vnode locked, which should prevent new dirty 769 * buffers from being queued. 770 */ 771 int 772 vtruncbuf(vp, lbn, slpflag, slptimeo) 773 struct vnode *vp; 774 daddr_t lbn; 775 int slpflag, slptimeo; 776 { 777 struct buf *bp, *nbp; 778 int s, error; 779 voff_t off; 780 781 off = round_page((voff_t)lbn << vp->v_mount->mnt_fs_bshift); 782 simple_lock(&vp->v_interlock); 783 error = VOP_PUTPAGES(vp, off, 0, PGO_FREE | PGO_SYNCIO); 784 if (error) { 785 return error; 786 } 787 788 s = splbio(); 789 790 restart: 791 for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 792 nbp = LIST_NEXT(bp, b_vnbufs); 793 if (bp->b_lblkno < lbn) 794 continue; 795 simple_lock(&bp->b_interlock); 796 if (bp->b_flags & B_BUSY) { 797 bp->b_flags |= B_WANTED; 798 error = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK, 799 "vtruncbuf", slptimeo, &bp->b_interlock); 800 if (error) { 801 splx(s); 802 return (error); 803 } 804 goto restart; 805 } 806 bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; 807 simple_unlock(&bp->b_interlock); 808 brelse(bp); 809 } 810 811 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 812 nbp = LIST_NEXT(bp, b_vnbufs); 813 if (bp->b_lblkno < lbn) 814 continue; 815 simple_lock(&bp->b_interlock); 816 if (bp->b_flags & B_BUSY) { 817 bp->b_flags |= B_WANTED; 818 error = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK, 819 "vtruncbuf", slptimeo, &bp->b_interlock); 820 if (error) { 821 splx(s); 822 return (error); 823 } 824 goto restart; 825 } 826 bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; 827 simple_unlock(&bp->b_interlock); 828 brelse(bp); 829 } 830 831 splx(s); 832 833 return (0); 834 } 835 836 void 837 vflushbuf(vp, sync) 838 struct vnode *vp; 839 int sync; 840 { 841 struct buf *bp, *nbp; 842 int flags = PGO_CLEANIT | PGO_ALLPAGES | (sync ? PGO_SYNCIO : 0); 843 int s; 844 845 simple_lock(&vp->v_interlock); 846 (void) VOP_PUTPAGES(vp, 0, 0, flags); 847 848 loop: 849 s = splbio(); 850 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 851 nbp = LIST_NEXT(bp, b_vnbufs); 852 simple_lock(&bp->b_interlock); 853 if ((bp->b_flags & B_BUSY)) { 854 simple_unlock(&bp->b_interlock); 855 continue; 856 } 857 if ((bp->b_flags & B_DELWRI) == 0) 858 panic("vflushbuf: not dirty, bp %p", bp); 859 bp->b_flags |= B_BUSY | B_VFLUSH; 860 simple_unlock(&bp->b_interlock); 861 splx(s); 862 /* 863 * Wait for I/O associated with indirect blocks to complete, 864 * since there is no way to quickly wait for them below. 865 */ 866 if (bp->b_vp == vp || sync == 0) 867 (void) bawrite(bp); 868 else 869 (void) bwrite(bp); 870 goto loop; 871 } 872 if (sync == 0) { 873 splx(s); 874 return; 875 } 876 simple_lock(&global_v_numoutput_slock); 877 while (vp->v_numoutput) { 878 vp->v_flag |= VBWAIT; 879 ltsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "vflushbuf", 0, 880 &global_v_numoutput_slock); 881 } 882 simple_unlock(&global_v_numoutput_slock); 883 splx(s); 884 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 885 vprint("vflushbuf: dirty", vp); 886 goto loop; 887 } 888 } 889 890 /* 891 * Associate a buffer with a vnode. 892 */ 893 void 894 bgetvp(vp, bp) 895 struct vnode *vp; 896 struct buf *bp; 897 { 898 int s; 899 900 if (bp->b_vp) 901 panic("bgetvp: not free, bp %p", bp); 902 VHOLD(vp); 903 s = splbio(); 904 bp->b_vp = vp; 905 if (vp->v_type == VBLK || vp->v_type == VCHR) 906 bp->b_dev = vp->v_rdev; 907 else 908 bp->b_dev = NODEV; 909 /* 910 * Insert onto list for new vnode. 911 */ 912 bufinsvn(bp, &vp->v_cleanblkhd); 913 splx(s); 914 } 915 916 /* 917 * Disassociate a buffer from a vnode. 918 */ 919 void 920 brelvp(bp) 921 struct buf *bp; 922 { 923 struct vnode *vp; 924 int s; 925 926 if (bp->b_vp == NULL) 927 panic("brelvp: vp NULL, bp %p", bp); 928 929 s = splbio(); 930 vp = bp->b_vp; 931 /* 932 * Delete from old vnode list, if on one. 933 */ 934 if (LIST_NEXT(bp, b_vnbufs) != NOLIST) 935 bufremvn(bp); 936 937 if (TAILQ_EMPTY(&vp->v_uobj.memq) && (vp->v_flag & VONWORKLST) && 938 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { 939 vp->v_flag &= ~VONWORKLST; 940 LIST_REMOVE(vp, v_synclist); 941 } 942 943 bp->b_vp = NULL; 944 HOLDRELE(vp); 945 splx(s); 946 } 947 948 /* 949 * Reassign a buffer from one vnode to another. 950 * Used to assign file specific control information 951 * (indirect blocks) to the vnode to which they belong. 952 * 953 * This function must be called at splbio(). 954 */ 955 void 956 reassignbuf(bp, newvp) 957 struct buf *bp; 958 struct vnode *newvp; 959 { 960 struct buflists *listheadp; 961 int delay; 962 963 /* 964 * Delete from old vnode list, if on one. 965 */ 966 if (LIST_NEXT(bp, b_vnbufs) != NOLIST) 967 bufremvn(bp); 968 /* 969 * If dirty, put on list of dirty buffers; 970 * otherwise insert onto list of clean buffers. 971 */ 972 if ((bp->b_flags & B_DELWRI) == 0) { 973 listheadp = &newvp->v_cleanblkhd; 974 if (TAILQ_EMPTY(&newvp->v_uobj.memq) && 975 (newvp->v_flag & VONWORKLST) && 976 LIST_FIRST(&newvp->v_dirtyblkhd) == NULL) { 977 newvp->v_flag &= ~VONWORKLST; 978 LIST_REMOVE(newvp, v_synclist); 979 } 980 } else { 981 listheadp = &newvp->v_dirtyblkhd; 982 if ((newvp->v_flag & VONWORKLST) == 0) { 983 switch (newvp->v_type) { 984 case VDIR: 985 delay = dirdelay; 986 break; 987 case VBLK: 988 if (newvp->v_specmountpoint != NULL) { 989 delay = metadelay; 990 break; 991 } 992 /* fall through */ 993 default: 994 delay = filedelay; 995 break; 996 } 997 if (!newvp->v_mount || 998 (newvp->v_mount->mnt_flag & MNT_ASYNC) == 0) 999 vn_syncer_add_to_worklist(newvp, delay); 1000 } 1001 } 1002 bufinsvn(bp, listheadp); 1003 } 1004 1005 /* 1006 * Create a vnode for a block device. 1007 * Used for root filesystem and swap areas. 1008 * Also used for memory file system special devices. 1009 */ 1010 int 1011 bdevvp(dev, vpp) 1012 dev_t dev; 1013 struct vnode **vpp; 1014 { 1015 1016 return (getdevvp(dev, vpp, VBLK)); 1017 } 1018 1019 /* 1020 * Create a vnode for a character device. 1021 * Used for kernfs and some console handling. 1022 */ 1023 int 1024 cdevvp(dev, vpp) 1025 dev_t dev; 1026 struct vnode **vpp; 1027 { 1028 1029 return (getdevvp(dev, vpp, VCHR)); 1030 } 1031 1032 /* 1033 * Create a vnode for a device. 1034 * Used by bdevvp (block device) for root file system etc., 1035 * and by cdevvp (character device) for console and kernfs. 1036 */ 1037 int 1038 getdevvp(dev, vpp, type) 1039 dev_t dev; 1040 struct vnode **vpp; 1041 enum vtype type; 1042 { 1043 struct vnode *vp; 1044 struct vnode *nvp; 1045 int error; 1046 1047 if (dev == NODEV) { 1048 *vpp = NULLVP; 1049 return (0); 1050 } 1051 error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp); 1052 if (error) { 1053 *vpp = NULLVP; 1054 return (error); 1055 } 1056 vp = nvp; 1057 vp->v_type = type; 1058 if ((nvp = checkalias(vp, dev, NULL)) != 0) { 1059 vput(vp); 1060 vp = nvp; 1061 } 1062 *vpp = vp; 1063 return (0); 1064 } 1065 1066 /* 1067 * Check to see if the new vnode represents a special device 1068 * for which we already have a vnode (either because of 1069 * bdevvp() or because of a different vnode representing 1070 * the same block device). If such an alias exists, deallocate 1071 * the existing contents and return the aliased vnode. The 1072 * caller is responsible for filling it with its new contents. 1073 */ 1074 struct vnode * 1075 checkalias(nvp, nvp_rdev, mp) 1076 struct vnode *nvp; 1077 dev_t nvp_rdev; 1078 struct mount *mp; 1079 { 1080 struct proc *p = curproc; /* XXX */ 1081 struct vnode *vp; 1082 struct vnode **vpp; 1083 1084 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 1085 return (NULLVP); 1086 1087 vpp = &speclisth[SPECHASH(nvp_rdev)]; 1088 loop: 1089 simple_lock(&spechash_slock); 1090 for (vp = *vpp; vp; vp = vp->v_specnext) { 1091 if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) 1092 continue; 1093 /* 1094 * Alias, but not in use, so flush it out. 1095 */ 1096 simple_lock(&vp->v_interlock); 1097 if (vp->v_usecount == 0) { 1098 simple_unlock(&spechash_slock); 1099 vgonel(vp, p); 1100 goto loop; 1101 } 1102 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) { 1103 simple_unlock(&spechash_slock); 1104 goto loop; 1105 } 1106 break; 1107 } 1108 if (vp == NULL || vp->v_tag != VT_NON || vp->v_type != VBLK) { 1109 MALLOC(nvp->v_specinfo, struct specinfo *, 1110 sizeof(struct specinfo), M_VNODE, M_NOWAIT); 1111 /* XXX Erg. */ 1112 if (nvp->v_specinfo == NULL) { 1113 simple_unlock(&spechash_slock); 1114 uvm_wait("checkalias"); 1115 goto loop; 1116 } 1117 1118 nvp->v_rdev = nvp_rdev; 1119 nvp->v_hashchain = vpp; 1120 nvp->v_specnext = *vpp; 1121 nvp->v_specmountpoint = NULL; 1122 simple_unlock(&spechash_slock); 1123 nvp->v_speclockf = NULL; 1124 *vpp = nvp; 1125 if (vp != NULLVP) { 1126 nvp->v_flag |= VALIASED; 1127 vp->v_flag |= VALIASED; 1128 vput(vp); 1129 } 1130 return (NULLVP); 1131 } 1132 simple_unlock(&spechash_slock); 1133 VOP_UNLOCK(vp, 0); 1134 simple_lock(&vp->v_interlock); 1135 vclean(vp, 0, p); 1136 vp->v_op = nvp->v_op; 1137 vp->v_tag = nvp->v_tag; 1138 vp->v_vnlock = &vp->v_lock; 1139 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); 1140 nvp->v_type = VNON; 1141 insmntque(vp, mp); 1142 return (vp); 1143 } 1144 1145 /* 1146 * Grab a particular vnode from the free list, increment its 1147 * reference count and lock it. If the vnode lock bit is set the 1148 * vnode is being eliminated in vgone. In that case, we can not 1149 * grab the vnode, so the process is awakened when the transition is 1150 * completed, and an error returned to indicate that the vnode is no 1151 * longer usable (possibly having been changed to a new file system type). 1152 */ 1153 int 1154 vget(vp, flags) 1155 struct vnode *vp; 1156 int flags; 1157 { 1158 int error; 1159 1160 /* 1161 * If the vnode is in the process of being cleaned out for 1162 * another use, we wait for the cleaning to finish and then 1163 * return failure. Cleaning is determined by checking that 1164 * the VXLOCK flag is set. 1165 */ 1166 1167 if ((flags & LK_INTERLOCK) == 0) 1168 simple_lock(&vp->v_interlock); 1169 if (vp->v_flag & VXLOCK) { 1170 if (flags & LK_NOWAIT) { 1171 simple_unlock(&vp->v_interlock); 1172 return EBUSY; 1173 } 1174 vp->v_flag |= VXWANT; 1175 ltsleep(vp, PINOD|PNORELOCK, "vget", 0, &vp->v_interlock); 1176 return (ENOENT); 1177 } 1178 if (vp->v_usecount == 0) { 1179 simple_lock(&vnode_free_list_slock); 1180 if (vp->v_holdcnt > 0) 1181 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 1182 else 1183 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1184 simple_unlock(&vnode_free_list_slock); 1185 } 1186 vp->v_usecount++; 1187 #ifdef DIAGNOSTIC 1188 if (vp->v_usecount == 0) { 1189 vprint("vget", vp); 1190 panic("vget: usecount overflow, vp %p", vp); 1191 } 1192 #endif 1193 if (flags & LK_TYPE_MASK) { 1194 if ((error = vn_lock(vp, flags | LK_INTERLOCK))) { 1195 /* 1196 * must expand vrele here because we do not want 1197 * to call VOP_INACTIVE if the reference count 1198 * drops back to zero since it was never really 1199 * active. We must remove it from the free list 1200 * before sleeping so that multiple processes do 1201 * not try to recycle it. 1202 */ 1203 simple_lock(&vp->v_interlock); 1204 vp->v_usecount--; 1205 if (vp->v_usecount > 0) { 1206 simple_unlock(&vp->v_interlock); 1207 return (error); 1208 } 1209 /* 1210 * insert at tail of LRU list 1211 */ 1212 simple_lock(&vnode_free_list_slock); 1213 if (vp->v_holdcnt > 0) 1214 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, 1215 v_freelist); 1216 else 1217 TAILQ_INSERT_TAIL(&vnode_free_list, vp, 1218 v_freelist); 1219 simple_unlock(&vnode_free_list_slock); 1220 simple_unlock(&vp->v_interlock); 1221 } 1222 return (error); 1223 } 1224 simple_unlock(&vp->v_interlock); 1225 return (0); 1226 } 1227 1228 /* 1229 * vput(), just unlock and vrele() 1230 */ 1231 void 1232 vput(vp) 1233 struct vnode *vp; 1234 { 1235 struct proc *p = curproc; /* XXX */ 1236 1237 #ifdef DIAGNOSTIC 1238 if (vp == NULL) 1239 panic("vput: null vp"); 1240 #endif 1241 simple_lock(&vp->v_interlock); 1242 vp->v_usecount--; 1243 if (vp->v_usecount > 0) { 1244 simple_unlock(&vp->v_interlock); 1245 VOP_UNLOCK(vp, 0); 1246 return; 1247 } 1248 #ifdef DIAGNOSTIC 1249 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 1250 vprint("vput: bad ref count", vp); 1251 panic("vput: ref cnt"); 1252 } 1253 #endif 1254 /* 1255 * Insert at tail of LRU list. 1256 */ 1257 simple_lock(&vnode_free_list_slock); 1258 if (vp->v_holdcnt > 0) 1259 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 1260 else 1261 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 1262 simple_unlock(&vnode_free_list_slock); 1263 if (vp->v_flag & VEXECMAP) { 1264 uvmexp.execpages -= vp->v_uobj.uo_npages; 1265 uvmexp.filepages += vp->v_uobj.uo_npages; 1266 } 1267 vp->v_flag &= ~(VTEXT|VEXECMAP); 1268 simple_unlock(&vp->v_interlock); 1269 VOP_INACTIVE(vp, p); 1270 } 1271 1272 /* 1273 * Vnode release. 1274 * If count drops to zero, call inactive routine and return to freelist. 1275 */ 1276 void 1277 vrele(vp) 1278 struct vnode *vp; 1279 { 1280 struct proc *p = curproc; /* XXX */ 1281 1282 #ifdef DIAGNOSTIC 1283 if (vp == NULL) 1284 panic("vrele: null vp"); 1285 #endif 1286 simple_lock(&vp->v_interlock); 1287 vp->v_usecount--; 1288 if (vp->v_usecount > 0) { 1289 simple_unlock(&vp->v_interlock); 1290 return; 1291 } 1292 #ifdef DIAGNOSTIC 1293 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 1294 vprint("vrele: bad ref count", vp); 1295 panic("vrele: ref cnt vp %p", vp); 1296 } 1297 #endif 1298 /* 1299 * Insert at tail of LRU list. 1300 */ 1301 simple_lock(&vnode_free_list_slock); 1302 if (vp->v_holdcnt > 0) 1303 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 1304 else 1305 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 1306 simple_unlock(&vnode_free_list_slock); 1307 if (vp->v_flag & VEXECMAP) { 1308 uvmexp.execpages -= vp->v_uobj.uo_npages; 1309 uvmexp.filepages += vp->v_uobj.uo_npages; 1310 } 1311 vp->v_flag &= ~(VTEXT|VEXECMAP); 1312 if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK) == 0) 1313 VOP_INACTIVE(vp, p); 1314 } 1315 1316 #ifdef DIAGNOSTIC 1317 /* 1318 * Page or buffer structure gets a reference. 1319 */ 1320 void 1321 vhold(vp) 1322 struct vnode *vp; 1323 { 1324 1325 /* 1326 * If it is on the freelist and the hold count is currently 1327 * zero, move it to the hold list. The test of the back 1328 * pointer and the use reference count of zero is because 1329 * it will be removed from a free list by getnewvnode, 1330 * but will not have its reference count incremented until 1331 * after calling vgone. If the reference count were 1332 * incremented first, vgone would (incorrectly) try to 1333 * close the previous instance of the underlying object. 1334 * So, the back pointer is explicitly set to `0xdeadb' in 1335 * getnewvnode after removing it from a freelist to ensure 1336 * that we do not try to move it here. 1337 */ 1338 simple_lock(&vp->v_interlock); 1339 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) && 1340 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 1341 simple_lock(&vnode_free_list_slock); 1342 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1343 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 1344 simple_unlock(&vnode_free_list_slock); 1345 } 1346 vp->v_holdcnt++; 1347 simple_unlock(&vp->v_interlock); 1348 } 1349 1350 /* 1351 * Page or buffer structure frees a reference. 1352 */ 1353 void 1354 holdrele(vp) 1355 struct vnode *vp; 1356 { 1357 1358 simple_lock(&vp->v_interlock); 1359 if (vp->v_holdcnt <= 0) 1360 panic("holdrele: holdcnt vp %p", vp); 1361 vp->v_holdcnt--; 1362 1363 /* 1364 * If it is on the holdlist and the hold count drops to 1365 * zero, move it to the free list. The test of the back 1366 * pointer and the use reference count of zero is because 1367 * it will be removed from a free list by getnewvnode, 1368 * but will not have its reference count incremented until 1369 * after calling vgone. If the reference count were 1370 * incremented first, vgone would (incorrectly) try to 1371 * close the previous instance of the underlying object. 1372 * So, the back pointer is explicitly set to `0xdeadb' in 1373 * getnewvnode after removing it from a freelist to ensure 1374 * that we do not try to move it here. 1375 */ 1376 1377 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) && 1378 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 1379 simple_lock(&vnode_free_list_slock); 1380 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 1381 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 1382 simple_unlock(&vnode_free_list_slock); 1383 } 1384 simple_unlock(&vp->v_interlock); 1385 } 1386 1387 /* 1388 * Vnode reference. 1389 */ 1390 void 1391 vref(vp) 1392 struct vnode *vp; 1393 { 1394 1395 simple_lock(&vp->v_interlock); 1396 if (vp->v_usecount <= 0) 1397 panic("vref used where vget required, vp %p", vp); 1398 vp->v_usecount++; 1399 #ifdef DIAGNOSTIC 1400 if (vp->v_usecount == 0) { 1401 vprint("vref", vp); 1402 panic("vref: usecount overflow, vp %p", vp); 1403 } 1404 #endif 1405 simple_unlock(&vp->v_interlock); 1406 } 1407 #endif /* DIAGNOSTIC */ 1408 1409 /* 1410 * Remove any vnodes in the vnode table belonging to mount point mp. 1411 * 1412 * If FORCECLOSE is not specified, there should not be any active ones, 1413 * return error if any are found (nb: this is a user error, not a 1414 * system error). If FORCECLOSE is specified, detach any active vnodes 1415 * that are found. 1416 * 1417 * If WRITECLOSE is set, only flush out regular file vnodes open for 1418 * writing. 1419 * 1420 * SKIPSYSTEM causes any vnodes marked V_SYSTEM to be skipped. 1421 */ 1422 #ifdef DEBUG 1423 int busyprt = 0; /* print out busy vnodes */ 1424 struct ctldebug debug1 = { "busyprt", &busyprt }; 1425 #endif 1426 1427 int 1428 vflush(mp, skipvp, flags) 1429 struct mount *mp; 1430 struct vnode *skipvp; 1431 int flags; 1432 { 1433 struct proc *p = curproc; /* XXX */ 1434 struct vnode *vp, *nvp; 1435 int busy = 0; 1436 1437 simple_lock(&mntvnode_slock); 1438 loop: 1439 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) { 1440 if (vp->v_mount != mp) 1441 goto loop; 1442 nvp = LIST_NEXT(vp, v_mntvnodes); 1443 /* 1444 * Skip over a selected vnode. 1445 */ 1446 if (vp == skipvp) 1447 continue; 1448 simple_lock(&vp->v_interlock); 1449 /* 1450 * Skip over a vnodes marked VSYSTEM. 1451 */ 1452 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1453 simple_unlock(&vp->v_interlock); 1454 continue; 1455 } 1456 /* 1457 * If WRITECLOSE is set, only flush out regular file 1458 * vnodes open for writing. 1459 */ 1460 if ((flags & WRITECLOSE) && 1461 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1462 simple_unlock(&vp->v_interlock); 1463 continue; 1464 } 1465 /* 1466 * With v_usecount == 0, all we need to do is clear 1467 * out the vnode data structures and we are done. 1468 */ 1469 if (vp->v_usecount == 0) { 1470 simple_unlock(&mntvnode_slock); 1471 vgonel(vp, p); 1472 simple_lock(&mntvnode_slock); 1473 continue; 1474 } 1475 /* 1476 * If FORCECLOSE is set, forcibly close the vnode. 1477 * For block or character devices, revert to an 1478 * anonymous device. For all other files, just kill them. 1479 */ 1480 if (flags & FORCECLOSE) { 1481 simple_unlock(&mntvnode_slock); 1482 if (vp->v_type != VBLK && vp->v_type != VCHR) { 1483 vgonel(vp, p); 1484 } else { 1485 vclean(vp, 0, p); 1486 vp->v_op = spec_vnodeop_p; 1487 insmntque(vp, (struct mount *)0); 1488 } 1489 simple_lock(&mntvnode_slock); 1490 continue; 1491 } 1492 #ifdef DEBUG 1493 if (busyprt) 1494 vprint("vflush: busy vnode", vp); 1495 #endif 1496 simple_unlock(&vp->v_interlock); 1497 busy++; 1498 } 1499 simple_unlock(&mntvnode_slock); 1500 if (busy) 1501 return (EBUSY); 1502 return (0); 1503 } 1504 1505 /* 1506 * Disassociate the underlying file system from a vnode. 1507 */ 1508 void 1509 vclean(vp, flags, p) 1510 struct vnode *vp; 1511 int flags; 1512 struct proc *p; 1513 { 1514 int active; 1515 1516 LOCK_ASSERT(simple_lock_held(&vp->v_interlock)); 1517 1518 /* 1519 * Check to see if the vnode is in use. 1520 * If so we have to reference it before we clean it out 1521 * so that its count cannot fall to zero and generate a 1522 * race against ourselves to recycle it. 1523 */ 1524 1525 if ((active = vp->v_usecount) != 0) { 1526 vp->v_usecount++; 1527 #ifdef DIAGNOSTIC 1528 if (vp->v_usecount == 0) { 1529 vprint("vclean", vp); 1530 panic("vclean: usecount overflow"); 1531 } 1532 #endif 1533 } 1534 1535 /* 1536 * Prevent the vnode from being recycled or 1537 * brought into use while we clean it out. 1538 */ 1539 if (vp->v_flag & VXLOCK) 1540 panic("vclean: deadlock, vp %p", vp); 1541 vp->v_flag |= VXLOCK; 1542 if (vp->v_flag & VEXECMAP) { 1543 uvmexp.execpages -= vp->v_uobj.uo_npages; 1544 uvmexp.filepages += vp->v_uobj.uo_npages; 1545 } 1546 vp->v_flag &= ~(VTEXT|VEXECMAP); 1547 1548 /* 1549 * Even if the count is zero, the VOP_INACTIVE routine may still 1550 * have the object locked while it cleans it out. The VOP_LOCK 1551 * ensures that the VOP_INACTIVE routine is done with its work. 1552 * For active vnodes, it ensures that no other activity can 1553 * occur while the underlying object is being cleaned out. 1554 */ 1555 VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK); 1556 1557 /* 1558 * Clean out any cached data associated with the vnode. 1559 */ 1560 if (flags & DOCLOSE) { 1561 vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0); 1562 KASSERT((vp->v_flag & VONWORKLST) == 0); 1563 } 1564 LOCK_ASSERT(!simple_lock_held(&vp->v_interlock)); 1565 1566 /* 1567 * If purging an active vnode, it must be closed and 1568 * deactivated before being reclaimed. Note that the 1569 * VOP_INACTIVE will unlock the vnode. 1570 */ 1571 if (active) { 1572 if (flags & DOCLOSE) 1573 VOP_CLOSE(vp, FNONBLOCK, NOCRED, NULL); 1574 VOP_INACTIVE(vp, p); 1575 } else { 1576 /* 1577 * Any other processes trying to obtain this lock must first 1578 * wait for VXLOCK to clear, then call the new lock operation. 1579 */ 1580 VOP_UNLOCK(vp, 0); 1581 } 1582 /* 1583 * Reclaim the vnode. 1584 */ 1585 if (VOP_RECLAIM(vp, p)) 1586 panic("vclean: cannot reclaim, vp %p", vp); 1587 if (active) { 1588 /* 1589 * Inline copy of vrele() since VOP_INACTIVE 1590 * has already been called. 1591 */ 1592 simple_lock(&vp->v_interlock); 1593 if (--vp->v_usecount <= 0) { 1594 #ifdef DIAGNOSTIC 1595 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 1596 vprint("vclean: bad ref count", vp); 1597 panic("vclean: ref cnt"); 1598 } 1599 #endif 1600 /* 1601 * Insert at tail of LRU list. 1602 */ 1603 1604 simple_unlock(&vp->v_interlock); 1605 simple_lock(&vnode_free_list_slock); 1606 #ifdef DIAGNOSTIC 1607 if (vp->v_holdcnt > 0) 1608 panic("vclean: not clean, vp %p", vp); 1609 #endif 1610 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 1611 simple_unlock(&vnode_free_list_slock); 1612 } else 1613 simple_unlock(&vp->v_interlock); 1614 } 1615 1616 KASSERT(vp->v_uobj.uo_npages == 0); 1617 cache_purge(vp); 1618 1619 /* 1620 * Done with purge, notify sleepers of the grim news. 1621 */ 1622 vp->v_op = dead_vnodeop_p; 1623 vp->v_tag = VT_NON; 1624 simple_lock(&vp->v_interlock); 1625 VN_KNOTE(vp, NOTE_REVOKE); /* FreeBSD has this in vn_pollgone() */ 1626 vp->v_flag &= ~VXLOCK; 1627 if (vp->v_flag & VXWANT) { 1628 vp->v_flag &= ~VXWANT; 1629 simple_unlock(&vp->v_interlock); 1630 wakeup((caddr_t)vp); 1631 } else 1632 simple_unlock(&vp->v_interlock); 1633 } 1634 1635 /* 1636 * Recycle an unused vnode to the front of the free list. 1637 * Release the passed interlock if the vnode will be recycled. 1638 */ 1639 int 1640 vrecycle(vp, inter_lkp, p) 1641 struct vnode *vp; 1642 struct simplelock *inter_lkp; 1643 struct proc *p; 1644 { 1645 1646 simple_lock(&vp->v_interlock); 1647 if (vp->v_usecount == 0) { 1648 if (inter_lkp) 1649 simple_unlock(inter_lkp); 1650 vgonel(vp, p); 1651 return (1); 1652 } 1653 simple_unlock(&vp->v_interlock); 1654 return (0); 1655 } 1656 1657 /* 1658 * Eliminate all activity associated with a vnode 1659 * in preparation for reuse. 1660 */ 1661 void 1662 vgone(vp) 1663 struct vnode *vp; 1664 { 1665 struct proc *p = curproc; /* XXX */ 1666 1667 simple_lock(&vp->v_interlock); 1668 vgonel(vp, p); 1669 } 1670 1671 /* 1672 * vgone, with the vp interlock held. 1673 */ 1674 void 1675 vgonel(vp, p) 1676 struct vnode *vp; 1677 struct proc *p; 1678 { 1679 struct vnode *vq; 1680 struct vnode *vx; 1681 1682 LOCK_ASSERT(simple_lock_held(&vp->v_interlock)); 1683 1684 /* 1685 * If a vgone (or vclean) is already in progress, 1686 * wait until it is done and return. 1687 */ 1688 1689 if (vp->v_flag & VXLOCK) { 1690 vp->v_flag |= VXWANT; 1691 ltsleep(vp, PINOD | PNORELOCK, "vgone", 0, &vp->v_interlock); 1692 return; 1693 } 1694 1695 /* 1696 * Clean out the filesystem specific data. 1697 */ 1698 1699 vclean(vp, DOCLOSE, p); 1700 KASSERT((vp->v_flag & VONWORKLST) == 0); 1701 1702 /* 1703 * Delete from old mount point vnode list, if on one. 1704 */ 1705 1706 if (vp->v_mount != NULL) 1707 insmntque(vp, (struct mount *)0); 1708 1709 /* 1710 * If special device, remove it from special device alias list. 1711 * if it is on one. 1712 */ 1713 1714 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) { 1715 simple_lock(&spechash_slock); 1716 if (vp->v_hashchain != NULL) { 1717 if (*vp->v_hashchain == vp) { 1718 *vp->v_hashchain = vp->v_specnext; 1719 } else { 1720 for (vq = *vp->v_hashchain; vq; 1721 vq = vq->v_specnext) { 1722 if (vq->v_specnext != vp) 1723 continue; 1724 vq->v_specnext = vp->v_specnext; 1725 break; 1726 } 1727 if (vq == NULL) 1728 panic("missing bdev"); 1729 } 1730 if (vp->v_flag & VALIASED) { 1731 vx = NULL; 1732 for (vq = *vp->v_hashchain; vq; 1733 vq = vq->v_specnext) { 1734 if (vq->v_rdev != vp->v_rdev || 1735 vq->v_type != vp->v_type) 1736 continue; 1737 if (vx) 1738 break; 1739 vx = vq; 1740 } 1741 if (vx == NULL) 1742 panic("missing alias"); 1743 if (vq == NULL) 1744 vx->v_flag &= ~VALIASED; 1745 vp->v_flag &= ~VALIASED; 1746 } 1747 } 1748 simple_unlock(&spechash_slock); 1749 FREE(vp->v_specinfo, M_VNODE); 1750 vp->v_specinfo = NULL; 1751 } 1752 1753 /* 1754 * If it is on the freelist and not already at the head, 1755 * move it to the head of the list. The test of the back 1756 * pointer and the reference count of zero is because 1757 * it will be removed from the free list by getnewvnode, 1758 * but will not have its reference count incremented until 1759 * after calling vgone. If the reference count were 1760 * incremented first, vgone would (incorrectly) try to 1761 * close the previous instance of the underlying object. 1762 * So, the back pointer is explicitly set to `0xdeadb' in 1763 * getnewvnode after removing it from the freelist to ensure 1764 * that we do not try to move it here. 1765 */ 1766 1767 if (vp->v_usecount == 0) { 1768 simple_lock(&vnode_free_list_slock); 1769 if (vp->v_holdcnt > 0) 1770 panic("vgonel: not clean, vp %p", vp); 1771 if (vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb && 1772 TAILQ_FIRST(&vnode_free_list) != vp) { 1773 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1774 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 1775 } 1776 simple_unlock(&vnode_free_list_slock); 1777 } 1778 vp->v_type = VBAD; 1779 } 1780 1781 /* 1782 * Lookup a vnode by device number. 1783 */ 1784 int 1785 vfinddev(dev, type, vpp) 1786 dev_t dev; 1787 enum vtype type; 1788 struct vnode **vpp; 1789 { 1790 struct vnode *vp; 1791 int rc = 0; 1792 1793 simple_lock(&spechash_slock); 1794 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) { 1795 if (dev != vp->v_rdev || type != vp->v_type) 1796 continue; 1797 *vpp = vp; 1798 rc = 1; 1799 break; 1800 } 1801 simple_unlock(&spechash_slock); 1802 return (rc); 1803 } 1804 1805 /* 1806 * Revoke all the vnodes corresponding to the specified minor number 1807 * range (endpoints inclusive) of the specified major. 1808 */ 1809 void 1810 vdevgone(maj, minl, minh, type) 1811 int maj, minl, minh; 1812 enum vtype type; 1813 { 1814 struct vnode *vp; 1815 int mn; 1816 1817 for (mn = minl; mn <= minh; mn++) 1818 if (vfinddev(makedev(maj, mn), type, &vp)) 1819 VOP_REVOKE(vp, REVOKEALL); 1820 } 1821 1822 /* 1823 * Calculate the total number of references to a special device. 1824 */ 1825 int 1826 vcount(vp) 1827 struct vnode *vp; 1828 { 1829 struct vnode *vq, *vnext; 1830 int count; 1831 1832 loop: 1833 if ((vp->v_flag & VALIASED) == 0) 1834 return (vp->v_usecount); 1835 simple_lock(&spechash_slock); 1836 for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) { 1837 vnext = vq->v_specnext; 1838 if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type) 1839 continue; 1840 /* 1841 * Alias, but not in use, so flush it out. 1842 */ 1843 if (vq->v_usecount == 0 && vq != vp && 1844 (vq->v_flag & VXLOCK) == 0) { 1845 simple_unlock(&spechash_slock); 1846 vgone(vq); 1847 goto loop; 1848 } 1849 count += vq->v_usecount; 1850 } 1851 simple_unlock(&spechash_slock); 1852 return (count); 1853 } 1854 1855 /* 1856 * Print out a description of a vnode. 1857 */ 1858 const char * const vnode_types[] = { 1859 "VNON", 1860 "VREG", 1861 "VDIR", 1862 "VBLK", 1863 "VCHR", 1864 "VLNK", 1865 "VSOCK", 1866 "VFIFO", 1867 "VBAD" 1868 }; 1869 1870 void 1871 vprint(label, vp) 1872 char *label; 1873 struct vnode *vp; 1874 { 1875 char buf[96]; 1876 1877 if (label != NULL) 1878 printf("%s: ", label); 1879 printf("tag %d type %s, usecount %d, writecount %ld, refcount %ld,", 1880 vp->v_tag, vnode_types[vp->v_type], 1881 vp->v_usecount, vp->v_writecount, vp->v_holdcnt); 1882 buf[0] = '\0'; 1883 if (vp->v_flag & VROOT) 1884 strcat(buf, "|VROOT"); 1885 if (vp->v_flag & VTEXT) 1886 strcat(buf, "|VTEXT"); 1887 if (vp->v_flag & VEXECMAP) 1888 strcat(buf, "|VEXECMAP"); 1889 if (vp->v_flag & VSYSTEM) 1890 strcat(buf, "|VSYSTEM"); 1891 if (vp->v_flag & VXLOCK) 1892 strcat(buf, "|VXLOCK"); 1893 if (vp->v_flag & VXWANT) 1894 strcat(buf, "|VXWANT"); 1895 if (vp->v_flag & VBWAIT) 1896 strcat(buf, "|VBWAIT"); 1897 if (vp->v_flag & VALIASED) 1898 strcat(buf, "|VALIASED"); 1899 if (buf[0] != '\0') 1900 printf(" flags (%s)", &buf[1]); 1901 if (vp->v_data == NULL) { 1902 printf("\n"); 1903 } else { 1904 printf("\n\t"); 1905 VOP_PRINT(vp); 1906 } 1907 } 1908 1909 #ifdef DEBUG 1910 /* 1911 * List all of the locked vnodes in the system. 1912 * Called when debugging the kernel. 1913 */ 1914 void 1915 printlockedvnodes() 1916 { 1917 struct mount *mp, *nmp; 1918 struct vnode *vp; 1919 1920 printf("Locked vnodes\n"); 1921 simple_lock(&mountlist_slock); 1922 for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist; 1923 mp = nmp) { 1924 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) { 1925 nmp = CIRCLEQ_NEXT(mp, mnt_list); 1926 continue; 1927 } 1928 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { 1929 if (VOP_ISLOCKED(vp)) 1930 vprint(NULL, vp); 1931 } 1932 simple_lock(&mountlist_slock); 1933 nmp = CIRCLEQ_NEXT(mp, mnt_list); 1934 vfs_unbusy(mp); 1935 } 1936 simple_unlock(&mountlist_slock); 1937 } 1938 #endif 1939 1940 /* 1941 * Top level filesystem related information gathering. 1942 */ 1943 int 1944 vfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) 1945 int *name; 1946 u_int namelen; 1947 void *oldp; 1948 size_t *oldlenp; 1949 void *newp; 1950 size_t newlen; 1951 struct proc *p; 1952 { 1953 #if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44) 1954 struct vfsconf vfc; 1955 extern const char * const mountcompatnames[]; 1956 extern int nmountcompatnames; 1957 #endif 1958 struct vfsops *vfsp; 1959 1960 /* all sysctl names at this level are at least name and field */ 1961 if (namelen < 2) 1962 return (ENOTDIR); /* overloaded */ 1963 1964 /* Not generic: goes to file system. */ 1965 if (name[0] != VFS_GENERIC) { 1966 static const struct ctlname vfsnames[VFS_MAXID+1]=CTL_VFS_NAMES; 1967 const char *vfsname; 1968 1969 if (name[0] < 0 || name[0] > VFS_MAXID 1970 || (vfsname = vfsnames[name[0]].ctl_name) == NULL) 1971 return (EOPNOTSUPP); 1972 1973 vfsp = vfs_getopsbyname(vfsname); 1974 if (vfsp == NULL || vfsp->vfs_sysctl == NULL) 1975 return (EOPNOTSUPP); 1976 return ((*vfsp->vfs_sysctl)(&name[1], namelen - 1, 1977 oldp, oldlenp, newp, newlen, p)); 1978 } 1979 1980 /* The rest are generic vfs sysctls. */ 1981 switch (name[1]) { 1982 case VFS_USERMOUNT: 1983 return sysctl_int(oldp, oldlenp, newp, newlen, &dovfsusermount); 1984 #if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44) 1985 case VFS_MAXTYPENUM: 1986 /* 1987 * Provided for 4.4BSD-Lite2 compatibility. 1988 */ 1989 return (sysctl_rdint(oldp, oldlenp, newp, nmountcompatnames)); 1990 case VFS_CONF: 1991 /* 1992 * Special: a node, next is a file system name. 1993 * Provided for 4.4BSD-Lite2 compatibility. 1994 */ 1995 if (namelen < 3) 1996 return (ENOTDIR); /* overloaded */ 1997 if (name[2] >= nmountcompatnames || name[2] < 0 || 1998 mountcompatnames[name[2]] == NULL) 1999 return (EOPNOTSUPP); 2000 vfsp = vfs_getopsbyname(mountcompatnames[name[2]]); 2001 if (vfsp == NULL) 2002 return (EOPNOTSUPP); 2003 vfc.vfc_vfsops = vfsp; 2004 strncpy(vfc.vfc_name, vfsp->vfs_name, MFSNAMELEN); 2005 vfc.vfc_typenum = name[2]; 2006 vfc.vfc_refcount = vfsp->vfs_refcount; 2007 vfc.vfc_flags = 0; 2008 vfc.vfc_mountroot = vfsp->vfs_mountroot; 2009 vfc.vfc_next = NULL; 2010 return (sysctl_rdstruct(oldp, oldlenp, newp, &vfc, 2011 sizeof(struct vfsconf))); 2012 #endif 2013 default: 2014 break; 2015 } 2016 return (EOPNOTSUPP); 2017 } 2018 2019 int kinfo_vdebug = 1; 2020 int kinfo_vgetfailed; 2021 #define KINFO_VNODESLOP 10 2022 /* 2023 * Dump vnode list (via sysctl). 2024 * Copyout address of vnode followed by vnode. 2025 */ 2026 /* ARGSUSED */ 2027 int 2028 sysctl_vnode(where, sizep, p) 2029 char *where; 2030 size_t *sizep; 2031 struct proc *p; 2032 { 2033 struct mount *mp, *nmp; 2034 struct vnode *nvp, *vp; 2035 char *bp = where, *savebp; 2036 char *ewhere; 2037 int error; 2038 2039 #define VPTRSZ sizeof(struct vnode *) 2040 #define VNODESZ sizeof(struct vnode) 2041 if (where == NULL) { 2042 *sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ); 2043 return (0); 2044 } 2045 ewhere = where + *sizep; 2046 2047 simple_lock(&mountlist_slock); 2048 for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist; 2049 mp = nmp) { 2050 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) { 2051 nmp = CIRCLEQ_NEXT(mp, mnt_list); 2052 continue; 2053 } 2054 savebp = bp; 2055 again: 2056 simple_lock(&mntvnode_slock); 2057 for (vp = LIST_FIRST(&mp->mnt_vnodelist); 2058 vp != NULL; 2059 vp = nvp) { 2060 /* 2061 * Check that the vp is still associated with 2062 * this filesystem. RACE: could have been 2063 * recycled onto the same filesystem. 2064 */ 2065 if (vp->v_mount != mp) { 2066 simple_unlock(&mntvnode_slock); 2067 if (kinfo_vdebug) 2068 printf("kinfo: vp changed\n"); 2069 bp = savebp; 2070 goto again; 2071 } 2072 nvp = LIST_NEXT(vp, v_mntvnodes); 2073 if (bp + VPTRSZ + VNODESZ > ewhere) { 2074 simple_unlock(&mntvnode_slock); 2075 *sizep = bp - where; 2076 return (ENOMEM); 2077 } 2078 simple_unlock(&mntvnode_slock); 2079 if ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) || 2080 (error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ))) 2081 return (error); 2082 bp += VPTRSZ + VNODESZ; 2083 simple_lock(&mntvnode_slock); 2084 } 2085 simple_unlock(&mntvnode_slock); 2086 simple_lock(&mountlist_slock); 2087 nmp = CIRCLEQ_NEXT(mp, mnt_list); 2088 vfs_unbusy(mp); 2089 } 2090 simple_unlock(&mountlist_slock); 2091 2092 *sizep = bp - where; 2093 return (0); 2094 } 2095 2096 /* 2097 * Check to see if a filesystem is mounted on a block device. 2098 */ 2099 int 2100 vfs_mountedon(vp) 2101 struct vnode *vp; 2102 { 2103 struct vnode *vq; 2104 int error = 0; 2105 2106 if (vp->v_specmountpoint != NULL) 2107 return (EBUSY); 2108 if (vp->v_flag & VALIASED) { 2109 simple_lock(&spechash_slock); 2110 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 2111 if (vq->v_rdev != vp->v_rdev || 2112 vq->v_type != vp->v_type) 2113 continue; 2114 if (vq->v_specmountpoint != NULL) { 2115 error = EBUSY; 2116 break; 2117 } 2118 } 2119 simple_unlock(&spechash_slock); 2120 } 2121 return (error); 2122 } 2123 2124 /* 2125 * Build hash lists of net addresses and hang them off the mount point. 2126 * Called by ufs_mount() to set up the lists of export addresses. 2127 */ 2128 static int 2129 vfs_hang_addrlist(mp, nep, argp) 2130 struct mount *mp; 2131 struct netexport *nep; 2132 struct export_args *argp; 2133 { 2134 struct netcred *np, *enp; 2135 struct radix_node_head *rnh; 2136 int i; 2137 struct radix_node *rn; 2138 struct sockaddr *saddr, *smask = 0; 2139 struct domain *dom; 2140 int error; 2141 2142 if (argp->ex_addrlen == 0) { 2143 if (mp->mnt_flag & MNT_DEFEXPORTED) 2144 return (EPERM); 2145 np = &nep->ne_defexported; 2146 np->netc_exflags = argp->ex_flags; 2147 crcvt(&np->netc_anon, &argp->ex_anon); 2148 np->netc_anon.cr_ref = 1; 2149 mp->mnt_flag |= MNT_DEFEXPORTED; 2150 return (0); 2151 } 2152 2153 if (argp->ex_addrlen > MLEN) 2154 return (EINVAL); 2155 2156 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 2157 np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK); 2158 memset((caddr_t)np, 0, i); 2159 saddr = (struct sockaddr *)(np + 1); 2160 error = copyin(argp->ex_addr, (caddr_t)saddr, argp->ex_addrlen); 2161 if (error) 2162 goto out; 2163 if (saddr->sa_len > argp->ex_addrlen) 2164 saddr->sa_len = argp->ex_addrlen; 2165 if (argp->ex_masklen) { 2166 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 2167 error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen); 2168 if (error) 2169 goto out; 2170 if (smask->sa_len > argp->ex_masklen) 2171 smask->sa_len = argp->ex_masklen; 2172 } 2173 i = saddr->sa_family; 2174 if ((rnh = nep->ne_rtable[i]) == 0) { 2175 /* 2176 * Seems silly to initialize every AF when most are not 2177 * used, do so on demand here 2178 */ 2179 for (dom = domains; dom; dom = dom->dom_next) 2180 if (dom->dom_family == i && dom->dom_rtattach) { 2181 dom->dom_rtattach((void **)&nep->ne_rtable[i], 2182 dom->dom_rtoffset); 2183 break; 2184 } 2185 if ((rnh = nep->ne_rtable[i]) == 0) { 2186 error = ENOBUFS; 2187 goto out; 2188 } 2189 } 2190 rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh, 2191 np->netc_rnodes); 2192 if (rn == 0 || np != (struct netcred *)rn) { /* already exists */ 2193 if (rn == 0) { 2194 enp = (struct netcred *)(*rnh->rnh_lookup)(saddr, 2195 smask, rnh); 2196 if (enp == 0) { 2197 error = EPERM; 2198 goto out; 2199 } 2200 } else 2201 enp = (struct netcred *)rn; 2202 2203 if (enp->netc_exflags != argp->ex_flags || 2204 enp->netc_anon.cr_uid != argp->ex_anon.cr_uid || 2205 enp->netc_anon.cr_gid != argp->ex_anon.cr_gid || 2206 enp->netc_anon.cr_ngroups != 2207 (uint32_t) argp->ex_anon.cr_ngroups || 2208 memcmp(&enp->netc_anon.cr_groups, &argp->ex_anon.cr_groups, 2209 enp->netc_anon.cr_ngroups)) 2210 error = EPERM; 2211 else 2212 error = 0; 2213 goto out; 2214 } 2215 np->netc_exflags = argp->ex_flags; 2216 crcvt(&np->netc_anon, &argp->ex_anon); 2217 np->netc_anon.cr_ref = 1; 2218 return (0); 2219 out: 2220 free(np, M_NETADDR); 2221 return (error); 2222 } 2223 2224 /* ARGSUSED */ 2225 static int 2226 vfs_free_netcred(rn, w) 2227 struct radix_node *rn; 2228 void *w; 2229 { 2230 struct radix_node_head *rnh = (struct radix_node_head *)w; 2231 2232 (*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh); 2233 free((caddr_t)rn, M_NETADDR); 2234 return (0); 2235 } 2236 2237 /* 2238 * Free the net address hash lists that are hanging off the mount points. 2239 */ 2240 static void 2241 vfs_free_addrlist(nep) 2242 struct netexport *nep; 2243 { 2244 int i; 2245 struct radix_node_head *rnh; 2246 2247 for (i = 0; i <= AF_MAX; i++) 2248 if ((rnh = nep->ne_rtable[i]) != NULL) { 2249 (*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh); 2250 free((caddr_t)rnh, M_RTABLE); 2251 nep->ne_rtable[i] = 0; 2252 } 2253 } 2254 2255 int 2256 vfs_export(mp, nep, argp) 2257 struct mount *mp; 2258 struct netexport *nep; 2259 struct export_args *argp; 2260 { 2261 int error; 2262 2263 if (argp->ex_flags & MNT_DELEXPORT) { 2264 if (mp->mnt_flag & MNT_EXPUBLIC) { 2265 vfs_setpublicfs(NULL, NULL, NULL); 2266 mp->mnt_flag &= ~MNT_EXPUBLIC; 2267 } 2268 vfs_free_addrlist(nep); 2269 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 2270 } 2271 if (argp->ex_flags & MNT_EXPORTED) { 2272 if (argp->ex_flags & MNT_EXPUBLIC) { 2273 if ((error = vfs_setpublicfs(mp, nep, argp)) != 0) 2274 return (error); 2275 mp->mnt_flag |= MNT_EXPUBLIC; 2276 } 2277 if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0) 2278 return (error); 2279 mp->mnt_flag |= MNT_EXPORTED; 2280 } 2281 return (0); 2282 } 2283 2284 /* 2285 * Set the publicly exported filesystem (WebNFS). Currently, only 2286 * one public filesystem is possible in the spec (RFC 2054 and 2055) 2287 */ 2288 int 2289 vfs_setpublicfs(mp, nep, argp) 2290 struct mount *mp; 2291 struct netexport *nep; 2292 struct export_args *argp; 2293 { 2294 int error; 2295 struct vnode *rvp; 2296 char *cp; 2297 2298 /* 2299 * mp == NULL -> invalidate the current info, the FS is 2300 * no longer exported. May be called from either vfs_export 2301 * or unmount, so check if it hasn't already been done. 2302 */ 2303 if (mp == NULL) { 2304 if (nfs_pub.np_valid) { 2305 nfs_pub.np_valid = 0; 2306 if (nfs_pub.np_index != NULL) { 2307 FREE(nfs_pub.np_index, M_TEMP); 2308 nfs_pub.np_index = NULL; 2309 } 2310 } 2311 return (0); 2312 } 2313 2314 /* 2315 * Only one allowed at a time. 2316 */ 2317 if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount) 2318 return (EBUSY); 2319 2320 /* 2321 * Get real filehandle for root of exported FS. 2322 */ 2323 memset((caddr_t)&nfs_pub.np_handle, 0, sizeof(nfs_pub.np_handle)); 2324 nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid; 2325 2326 if ((error = VFS_ROOT(mp, &rvp))) 2327 return (error); 2328 2329 if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid))) 2330 return (error); 2331 2332 vput(rvp); 2333 2334 /* 2335 * If an indexfile was specified, pull it in. 2336 */ 2337 if (argp->ex_indexfile != NULL) { 2338 MALLOC(nfs_pub.np_index, char *, MAXNAMLEN + 1, M_TEMP, 2339 M_WAITOK); 2340 error = copyinstr(argp->ex_indexfile, nfs_pub.np_index, 2341 MAXNAMLEN, (size_t *)0); 2342 if (!error) { 2343 /* 2344 * Check for illegal filenames. 2345 */ 2346 for (cp = nfs_pub.np_index; *cp; cp++) { 2347 if (*cp == '/') { 2348 error = EINVAL; 2349 break; 2350 } 2351 } 2352 } 2353 if (error) { 2354 FREE(nfs_pub.np_index, M_TEMP); 2355 return (error); 2356 } 2357 } 2358 2359 nfs_pub.np_mount = mp; 2360 nfs_pub.np_valid = 1; 2361 return (0); 2362 } 2363 2364 struct netcred * 2365 vfs_export_lookup(mp, nep, nam) 2366 struct mount *mp; 2367 struct netexport *nep; 2368 struct mbuf *nam; 2369 { 2370 struct netcred *np; 2371 struct radix_node_head *rnh; 2372 struct sockaddr *saddr; 2373 2374 np = NULL; 2375 if (mp->mnt_flag & MNT_EXPORTED) { 2376 /* 2377 * Lookup in the export list first. 2378 */ 2379 if (nam != NULL) { 2380 saddr = mtod(nam, struct sockaddr *); 2381 rnh = nep->ne_rtable[saddr->sa_family]; 2382 if (rnh != NULL) { 2383 np = (struct netcred *) 2384 (*rnh->rnh_matchaddr)((caddr_t)saddr, 2385 rnh); 2386 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 2387 np = NULL; 2388 } 2389 } 2390 /* 2391 * If no address match, use the default if it exists. 2392 */ 2393 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 2394 np = &nep->ne_defexported; 2395 } 2396 return (np); 2397 } 2398 2399 /* 2400 * Do the usual access checking. 2401 * file_mode, uid and gid are from the vnode in question, 2402 * while acc_mode and cred are from the VOP_ACCESS parameter list 2403 */ 2404 int 2405 vaccess(type, file_mode, uid, gid, acc_mode, cred) 2406 enum vtype type; 2407 mode_t file_mode; 2408 uid_t uid; 2409 gid_t gid; 2410 mode_t acc_mode; 2411 struct ucred *cred; 2412 { 2413 mode_t mask; 2414 2415 /* 2416 * Super-user always gets read/write access, but execute access depends 2417 * on at least one execute bit being set. 2418 */ 2419 if (cred->cr_uid == 0) { 2420 if ((acc_mode & VEXEC) && type != VDIR && 2421 (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0) 2422 return (EACCES); 2423 return (0); 2424 } 2425 2426 mask = 0; 2427 2428 /* Otherwise, check the owner. */ 2429 if (cred->cr_uid == uid) { 2430 if (acc_mode & VEXEC) 2431 mask |= S_IXUSR; 2432 if (acc_mode & VREAD) 2433 mask |= S_IRUSR; 2434 if (acc_mode & VWRITE) 2435 mask |= S_IWUSR; 2436 return ((file_mode & mask) == mask ? 0 : EACCES); 2437 } 2438 2439 /* Otherwise, check the groups. */ 2440 if (cred->cr_gid == gid || groupmember(gid, cred)) { 2441 if (acc_mode & VEXEC) 2442 mask |= S_IXGRP; 2443 if (acc_mode & VREAD) 2444 mask |= S_IRGRP; 2445 if (acc_mode & VWRITE) 2446 mask |= S_IWGRP; 2447 return ((file_mode & mask) == mask ? 0 : EACCES); 2448 } 2449 2450 /* Otherwise, check everyone else. */ 2451 if (acc_mode & VEXEC) 2452 mask |= S_IXOTH; 2453 if (acc_mode & VREAD) 2454 mask |= S_IROTH; 2455 if (acc_mode & VWRITE) 2456 mask |= S_IWOTH; 2457 return ((file_mode & mask) == mask ? 0 : EACCES); 2458 } 2459 2460 /* 2461 * Unmount all file systems. 2462 * We traverse the list in reverse order under the assumption that doing so 2463 * will avoid needing to worry about dependencies. 2464 */ 2465 void 2466 vfs_unmountall(p) 2467 struct proc *p; 2468 { 2469 struct mount *mp, *nmp; 2470 int allerror, error; 2471 2472 for (allerror = 0, 2473 mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) { 2474 nmp = mp->mnt_list.cqe_prev; 2475 #ifdef DEBUG 2476 printf("unmounting %s (%s)...\n", 2477 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_mntfromname); 2478 #endif 2479 /* 2480 * XXX Freeze syncer. Must do this before locking the 2481 * mount point. See dounmount() for details. 2482 */ 2483 lockmgr(&syncer_lock, LK_EXCLUSIVE, NULL); 2484 if (vfs_busy(mp, 0, 0)) { 2485 lockmgr(&syncer_lock, LK_RELEASE, NULL); 2486 continue; 2487 } 2488 if ((error = dounmount(mp, MNT_FORCE, p)) != 0) { 2489 printf("unmount of %s failed with error %d\n", 2490 mp->mnt_stat.f_mntonname, error); 2491 allerror = 1; 2492 } 2493 } 2494 if (allerror) 2495 printf("WARNING: some file systems would not unmount\n"); 2496 } 2497 2498 /* 2499 * Sync and unmount file systems before shutting down. 2500 */ 2501 void 2502 vfs_shutdown() 2503 { 2504 struct buf *bp; 2505 int iter, nbusy, nbusy_prev = 0, dcount, s; 2506 struct lwp *l = curlwp; 2507 struct proc *p; 2508 2509 /* XXX we're certainly not running in proc0's context! */ 2510 if (l == NULL || (p = l->l_proc) == NULL) 2511 p = &proc0; 2512 2513 printf("syncing disks... "); 2514 2515 /* remove user process from run queue */ 2516 suspendsched(); 2517 (void) spl0(); 2518 2519 /* avoid coming back this way again if we panic. */ 2520 doing_shutdown = 1; 2521 2522 sys_sync(l, NULL, NULL); 2523 2524 /* Wait for sync to finish. */ 2525 dcount = 10000; 2526 for (iter = 0; iter < 20;) { 2527 nbusy = 0; 2528 for (bp = &buf[nbuf]; --bp >= buf; ) { 2529 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 2530 nbusy++; 2531 /* 2532 * With soft updates, some buffers that are 2533 * written will be remarked as dirty until other 2534 * buffers are written. 2535 */ 2536 if (bp->b_vp && bp->b_vp->v_mount 2537 && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP) 2538 && (bp->b_flags & B_DELWRI)) { 2539 s = splbio(); 2540 bremfree(bp); 2541 bp->b_flags |= B_BUSY; 2542 splx(s); 2543 nbusy++; 2544 bawrite(bp); 2545 if (dcount-- <= 0) { 2546 printf("softdep "); 2547 goto fail; 2548 } 2549 } 2550 } 2551 if (nbusy == 0) 2552 break; 2553 if (nbusy_prev == 0) 2554 nbusy_prev = nbusy; 2555 printf("%d ", nbusy); 2556 tsleep(&nbusy, PRIBIO, "bflush", 2557 (iter == 0) ? 1 : hz / 25 * iter); 2558 if (nbusy >= nbusy_prev) /* we didn't flush anything */ 2559 iter++; 2560 else 2561 nbusy_prev = nbusy; 2562 } 2563 if (nbusy) { 2564 fail: 2565 #if defined(DEBUG) || defined(DEBUG_HALT_BUSY) 2566 printf("giving up\nPrinting vnodes for busy buffers\n"); 2567 for (bp = &buf[nbuf]; --bp >= buf; ) 2568 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 2569 vprint(NULL, bp->b_vp); 2570 2571 #if defined(DDB) && defined(DEBUG_HALT_BUSY) 2572 Debugger(); 2573 #endif 2574 2575 #else /* defined(DEBUG) || defined(DEBUG_HALT_BUSY) */ 2576 printf("giving up\n"); 2577 #endif /* defined(DEBUG) || defined(DEBUG_HALT_BUSY) */ 2578 return; 2579 } else 2580 printf("done\n"); 2581 2582 /* 2583 * If we've panic'd, don't make the situation potentially 2584 * worse by unmounting the file systems. 2585 */ 2586 if (panicstr != NULL) 2587 return; 2588 2589 /* Release inodes held by texts before update. */ 2590 #ifdef notdef 2591 vnshutdown(); 2592 #endif 2593 /* Unmount file systems. */ 2594 vfs_unmountall(p); 2595 } 2596 2597 /* 2598 * Mount the root file system. If the operator didn't specify a 2599 * file system to use, try all possible file systems until one 2600 * succeeds. 2601 */ 2602 int 2603 vfs_mountroot() 2604 { 2605 struct vfsops *v; 2606 2607 if (root_device == NULL) 2608 panic("vfs_mountroot: root device unknown"); 2609 2610 switch (root_device->dv_class) { 2611 case DV_IFNET: 2612 if (rootdev != NODEV) 2613 panic("vfs_mountroot: rootdev set for DV_IFNET " 2614 "(0x%08x -> %d,%d)", rootdev, 2615 major(rootdev), minor(rootdev)); 2616 break; 2617 2618 case DV_DISK: 2619 if (rootdev == NODEV) 2620 panic("vfs_mountroot: rootdev not set for DV_DISK"); 2621 break; 2622 2623 default: 2624 printf("%s: inappropriate for root file system\n", 2625 root_device->dv_xname); 2626 return (ENODEV); 2627 } 2628 2629 /* 2630 * If user specified a file system, use it. 2631 */ 2632 if (mountroot != NULL) 2633 return ((*mountroot)()); 2634 2635 /* 2636 * Try each file system currently configured into the kernel. 2637 */ 2638 for (v = LIST_FIRST(&vfs_list); v != NULL; v = LIST_NEXT(v, vfs_list)) { 2639 if (v->vfs_mountroot == NULL) 2640 continue; 2641 #ifdef DEBUG 2642 printf("mountroot: trying %s...\n", v->vfs_name); 2643 #endif 2644 if ((*v->vfs_mountroot)() == 0) { 2645 printf("root file system type: %s\n", v->vfs_name); 2646 break; 2647 } 2648 } 2649 2650 if (v == NULL) { 2651 printf("no file system for %s", root_device->dv_xname); 2652 if (root_device->dv_class == DV_DISK) 2653 printf(" (dev 0x%x)", rootdev); 2654 printf("\n"); 2655 return (EFTYPE); 2656 } 2657 return (0); 2658 } 2659 2660 /* 2661 * Given a file system name, look up the vfsops for that 2662 * file system, or return NULL if file system isn't present 2663 * in the kernel. 2664 */ 2665 struct vfsops * 2666 vfs_getopsbyname(name) 2667 const char *name; 2668 { 2669 struct vfsops *v; 2670 2671 for (v = LIST_FIRST(&vfs_list); v != NULL; v = LIST_NEXT(v, vfs_list)) { 2672 if (strcmp(v->vfs_name, name) == 0) 2673 break; 2674 } 2675 2676 return (v); 2677 } 2678 2679 /* 2680 * Establish a file system and initialize it. 2681 */ 2682 int 2683 vfs_attach(vfs) 2684 struct vfsops *vfs; 2685 { 2686 struct vfsops *v; 2687 int error = 0; 2688 2689 2690 /* 2691 * Make sure this file system doesn't already exist. 2692 */ 2693 LIST_FOREACH(v, &vfs_list, vfs_list) { 2694 if (strcmp(vfs->vfs_name, v->vfs_name) == 0) { 2695 error = EEXIST; 2696 goto out; 2697 } 2698 } 2699 2700 /* 2701 * Initialize the vnode operations for this file system. 2702 */ 2703 vfs_opv_init(vfs->vfs_opv_descs); 2704 2705 /* 2706 * Now initialize the file system itself. 2707 */ 2708 (*vfs->vfs_init)(); 2709 2710 /* 2711 * ...and link it into the kernel's list. 2712 */ 2713 LIST_INSERT_HEAD(&vfs_list, vfs, vfs_list); 2714 2715 /* 2716 * Sanity: make sure the reference count is 0. 2717 */ 2718 vfs->vfs_refcount = 0; 2719 2720 out: 2721 return (error); 2722 } 2723 2724 /* 2725 * Remove a file system from the kernel. 2726 */ 2727 int 2728 vfs_detach(vfs) 2729 struct vfsops *vfs; 2730 { 2731 struct vfsops *v; 2732 2733 /* 2734 * Make sure no one is using the filesystem. 2735 */ 2736 if (vfs->vfs_refcount != 0) 2737 return (EBUSY); 2738 2739 /* 2740 * ...and remove it from the kernel's list. 2741 */ 2742 LIST_FOREACH(v, &vfs_list, vfs_list) { 2743 if (v == vfs) { 2744 LIST_REMOVE(v, vfs_list); 2745 break; 2746 } 2747 } 2748 2749 if (v == NULL) 2750 return (ESRCH); 2751 2752 /* 2753 * Now run the file system-specific cleanups. 2754 */ 2755 (*vfs->vfs_done)(); 2756 2757 /* 2758 * Free the vnode operations vector. 2759 */ 2760 vfs_opv_free(vfs->vfs_opv_descs); 2761 return (0); 2762 } 2763 2764 void 2765 vfs_reinit(void) 2766 { 2767 struct vfsops *vfs; 2768 2769 LIST_FOREACH(vfs, &vfs_list, vfs_list) { 2770 if (vfs->vfs_reinit) { 2771 (*vfs->vfs_reinit)(); 2772 } 2773 } 2774 } 2775 2776 void 2777 copy_statfs_info(struct statfs *sbp, const struct mount *mp) 2778 { 2779 const struct statfs *mbp; 2780 2781 if (sbp == (mbp = &mp->mnt_stat)) 2782 return; 2783 2784 sbp->f_oflags = mbp->f_oflags; 2785 sbp->f_type = mbp->f_type; 2786 (void)memcpy(&sbp->f_fsid, &mbp->f_fsid, sizeof(sbp->f_fsid)); 2787 sbp->f_owner = mbp->f_owner; 2788 sbp->f_flags = mbp->f_flags; 2789 sbp->f_syncwrites = mbp->f_syncwrites; 2790 sbp->f_asyncwrites = mbp->f_asyncwrites; 2791 sbp->f_spare[0] = mbp->f_spare[0]; 2792 (void)memcpy(sbp->f_fstypename, mbp->f_fstypename, 2793 sizeof(sbp->f_fstypename)); 2794 (void)memcpy(sbp->f_mntonname, mbp->f_mntonname, 2795 sizeof(sbp->f_mntonname)); 2796 (void)memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, 2797 sizeof(sbp->f_mntfromname)); 2798 } 2799 2800 int 2801 set_statfs_info(const char *onp, int ukon, const char *fromp, int ukfrom, 2802 struct mount *mp, struct proc *p) 2803 { 2804 int error; 2805 size_t size; 2806 struct statfs *sfs = &mp->mnt_stat; 2807 int (*fun)(const void *, void *, size_t, size_t *); 2808 2809 (void)strncpy(mp->mnt_stat.f_fstypename, mp->mnt_op->vfs_name, 2810 sizeof(mp->mnt_stat.f_fstypename)); 2811 2812 if (onp) { 2813 struct cwdinfo *cwdi = p->p_cwdi; 2814 fun = (ukon == UIO_SYSSPACE) ? copystr : copyinstr; 2815 if (cwdi->cwdi_rdir != NULL) { 2816 size_t len; 2817 char *bp; 2818 char *path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 2819 2820 if (!path) 2821 return ENOMEM; 2822 2823 bp = path + MAXPATHLEN; 2824 *--bp = '\0'; 2825 error = getcwd_common(cwdi->cwdi_rdir, rootvnode, &bp, 2826 path, MAXPATHLEN / 2, 0, p); 2827 if (error) { 2828 free(path, M_TEMP); 2829 return error; 2830 } 2831 2832 len = strlen(bp); 2833 if (len > sizeof(sfs->f_mntonname) - 1) 2834 len = sizeof(sfs->f_mntonname) - 1; 2835 (void)strncpy(sfs->f_mntonname, bp, len); 2836 free(path, M_TEMP); 2837 2838 if (len < sizeof(sfs->f_mntonname) - 1) { 2839 error = (*fun)(onp, &sfs->f_mntonname[len], 2840 sizeof(sfs->f_mntonname) - len - 1, &size); 2841 if (error) 2842 return error; 2843 size += len; 2844 } else { 2845 size = len; 2846 } 2847 } else { 2848 error = (*fun)(onp, &sfs->f_mntonname, 2849 sizeof(sfs->f_mntonname) - 1, &size); 2850 if (error) 2851 return error; 2852 } 2853 (void)memset(sfs->f_mntonname + size, 0, 2854 sizeof(sfs->f_mntonname) - size); 2855 } 2856 2857 if (fromp) { 2858 fun = (ukfrom == UIO_SYSSPACE) ? copystr : copyinstr; 2859 error = (*fun)(fromp, sfs->f_mntfromname, 2860 sizeof(sfs->f_mntfromname) - 1, &size); 2861 if (error) 2862 return error; 2863 (void)memset(sfs->f_mntfromname + size, 0, 2864 sizeof(sfs->f_mntfromname) - size); 2865 } 2866 return 0; 2867 } 2868 2869 #ifdef DDB 2870 const char buf_flagbits[] = 2871 "\20\1AGE\2NEEDCOMMIT\3ASYNC\4BAD\5BUSY\6SCANNED\7CALL\10DELWRI" 2872 "\11DIRTY\12DONE\13EINTR\14ERROR\15GATHERED\16INVAL\17LOCKED\20NOCACHE" 2873 "\21ORDERED\22CACHE\23PHYS\24RAW\25READ\26TAPE\30WANTED" 2874 "\32XXX\33VFLUSH"; 2875 2876 void 2877 vfs_buf_print(bp, full, pr) 2878 struct buf *bp; 2879 int full; 2880 void (*pr) __P((const char *, ...)); 2881 { 2882 char buf[1024]; 2883 2884 (*pr)(" vp %p lblkno 0x%x blkno 0x%x dev 0x%x\n", 2885 bp->b_vp, bp->b_lblkno, bp->b_blkno, bp->b_dev); 2886 2887 bitmask_snprintf(bp->b_flags, buf_flagbits, buf, sizeof(buf)); 2888 (*pr)(" error %d flags 0x%s\n", bp->b_error, buf); 2889 2890 (*pr)(" bufsize 0x%lx bcount 0x%lx resid 0x%lx\n", 2891 bp->b_bufsize, bp->b_bcount, bp->b_resid); 2892 (*pr)(" data %p saveaddr %p dep %p\n", 2893 bp->b_data, bp->b_saveaddr, LIST_FIRST(&bp->b_dep)); 2894 (*pr)(" iodone %p\n", bp->b_iodone); 2895 } 2896 2897 2898 const char vnode_flagbits[] = 2899 "\20\1ROOT\2TEXT\3SYSTEM\4ISTTY\5EXECMAP" 2900 "\11XLOCK\12XWANT\13BWAIT\14ALIASED" 2901 "\15DIROP\16LAYER\17ONWORKLIST\20DIRTY"; 2902 2903 const char * const vnode_tags[] = { 2904 "VT_NON", 2905 "VT_UFS", 2906 "VT_NFS", 2907 "VT_MFS", 2908 "VT_MSDOSFS", 2909 "VT_LFS", 2910 "VT_LOFS", 2911 "VT_FDESC", 2912 "VT_PORTAL", 2913 "VT_NULL", 2914 "VT_UMAP", 2915 "VT_KERNFS", 2916 "VT_PROCFS", 2917 "VT_AFS", 2918 "VT_ISOFS", 2919 "VT_UNION", 2920 "VT_ADOSFS", 2921 "VT_EXT2FS", 2922 "VT_CODA", 2923 "VT_FILECORE", 2924 "VT_NTFS", 2925 "VT_VFS", 2926 "VT_OVERLAY", 2927 "VT_SMBFS" 2928 }; 2929 2930 void 2931 vfs_vnode_print(vp, full, pr) 2932 struct vnode *vp; 2933 int full; 2934 void (*pr) __P((const char *, ...)); 2935 { 2936 char buf[256]; 2937 const char *vtype, *vtag; 2938 2939 uvm_object_printit(&vp->v_uobj, full, pr); 2940 bitmask_snprintf(vp->v_flag, vnode_flagbits, buf, sizeof(buf)); 2941 (*pr)("\nVNODE flags %s\n", buf); 2942 (*pr)("mp %p numoutput %d size 0x%llx\n", 2943 vp->v_mount, vp->v_numoutput, vp->v_size); 2944 2945 (*pr)("data %p usecount %d writecount %ld holdcnt %ld numoutput %d\n", 2946 vp->v_data, vp->v_usecount, vp->v_writecount, 2947 vp->v_holdcnt, vp->v_numoutput); 2948 2949 vtype = (vp->v_type >= 0 && 2950 vp->v_type < sizeof(vnode_types) / sizeof(vnode_types[0])) ? 2951 vnode_types[vp->v_type] : "UNKNOWN"; 2952 vtag = (vp->v_tag >= 0 && 2953 vp->v_tag < sizeof(vnode_tags) / sizeof(vnode_tags[0])) ? 2954 vnode_tags[vp->v_tag] : "UNKNOWN"; 2955 2956 (*pr)("type %s(%d) tag %s(%d) id 0x%lx mount %p typedata %p\n", 2957 vtype, vp->v_type, vtag, vp->v_tag, 2958 vp->v_id, vp->v_mount, vp->v_mountedhere); 2959 2960 if (full) { 2961 struct buf *bp; 2962 2963 (*pr)("clean bufs:\n"); 2964 LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) { 2965 (*pr)(" bp %p\n", bp); 2966 vfs_buf_print(bp, full, pr); 2967 } 2968 2969 (*pr)("dirty bufs:\n"); 2970 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) { 2971 (*pr)(" bp %p\n", bp); 2972 vfs_buf_print(bp, full, pr); 2973 } 2974 } 2975 } 2976 #endif 2977