1 /* $NetBSD: vfs_subr.c,v 1.158 2001/09/15 20:36:37 chs Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Copyright (c) 1989, 1993 42 * The Regents of the University of California. All rights reserved. 43 * (c) UNIX System Laboratories, Inc. 44 * All or some portions of this file are derived from material licensed 45 * to the University of California by American Telephone and Telegraph 46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 47 * the permission of UNIX System Laboratories, Inc. 48 * 49 * Redistribution and use in source and binary forms, with or without 50 * modification, are permitted provided that the following conditions 51 * are met: 52 * 1. Redistributions of source code must retain the above copyright 53 * notice, this list of conditions and the following disclaimer. 54 * 2. Redistributions in binary form must reproduce the above copyright 55 * notice, this list of conditions and the following disclaimer in the 56 * documentation and/or other materials provided with the distribution. 57 * 3. All advertising materials mentioning features or use of this software 58 * must display the following acknowledgement: 59 * This product includes software developed by the University of 60 * California, Berkeley and its contributors. 61 * 4. Neither the name of the University nor the names of its contributors 62 * may be used to endorse or promote products derived from this software 63 * without specific prior written permission. 64 * 65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 75 * SUCH DAMAGE. 76 * 77 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94 78 */ 79 80 /* 81 * External virtual filesystem routines 82 */ 83 84 #include "opt_ddb.h" 85 #include "opt_compat_netbsd.h" 86 #include "opt_compat_43.h" 87 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/proc.h> 91 #include <sys/kernel.h> 92 #include <sys/mount.h> 93 #include <sys/time.h> 94 #include <sys/fcntl.h> 95 #include <sys/vnode.h> 96 #include <sys/stat.h> 97 #include <sys/namei.h> 98 #include <sys/ucred.h> 99 #include <sys/buf.h> 100 #include <sys/errno.h> 101 #include <sys/malloc.h> 102 #include <sys/domain.h> 103 #include <sys/mbuf.h> 104 #include <sys/syscallargs.h> 105 #include <sys/device.h> 106 #include <sys/dirent.h> 107 108 #include <miscfs/specfs/specdev.h> 109 #include <miscfs/genfs/genfs.h> 110 #include <miscfs/syncfs/syncfs.h> 111 112 #include <uvm/uvm.h> 113 #include <uvm/uvm_ddb.h> 114 115 #include <sys/sysctl.h> 116 117 enum vtype iftovt_tab[16] = { 118 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 119 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 120 }; 121 const int vttoif_tab[9] = { 122 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 123 S_IFSOCK, S_IFIFO, S_IFMT, 124 }; 125 126 int doforce = 1; /* 1 => permit forcible unmounting */ 127 int prtactive = 0; /* 1 => print out reclaim of active vnodes */ 128 129 extern int dovfsusermount; /* 1 => permit any user to mount filesystems */ 130 131 /* 132 * Insq/Remq for the vnode usage lists. 133 */ 134 #define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) 135 #define bufremvn(bp) { \ 136 LIST_REMOVE(bp, b_vnbufs); \ 137 (bp)->b_vnbufs.le_next = NOLIST; \ 138 } 139 /* TAILQ_HEAD(freelst, vnode) vnode_free_list = vnode free list (in vnode.h) */ 140 struct freelst vnode_free_list = TAILQ_HEAD_INITIALIZER(vnode_free_list); 141 struct freelst vnode_hold_list = TAILQ_HEAD_INITIALIZER(vnode_hold_list); 142 143 struct mntlist mountlist = /* mounted filesystem list */ 144 CIRCLEQ_HEAD_INITIALIZER(mountlist); 145 struct vfs_list_head vfs_list = /* vfs list */ 146 LIST_HEAD_INITIALIZER(vfs_list); 147 148 struct nfs_public nfs_pub; /* publicly exported FS */ 149 150 struct simplelock mountlist_slock = SIMPLELOCK_INITIALIZER; 151 static struct simplelock mntid_slock = SIMPLELOCK_INITIALIZER; 152 struct simplelock mntvnode_slock = SIMPLELOCK_INITIALIZER; 153 struct simplelock vnode_free_list_slock = SIMPLELOCK_INITIALIZER; 154 struct simplelock spechash_slock = SIMPLELOCK_INITIALIZER; 155 156 /* 157 * These define the root filesystem and device. 158 */ 159 struct mount *rootfs; 160 struct vnode *rootvnode; 161 struct device *root_device; /* root device */ 162 163 struct pool vnode_pool; /* memory pool for vnodes */ 164 165 /* 166 * Local declarations. 167 */ 168 void insmntque __P((struct vnode *, struct mount *)); 169 int getdevvp __P((dev_t, struct vnode **, enum vtype)); 170 void vgoneall __P((struct vnode *)); 171 172 static int vfs_hang_addrlist __P((struct mount *, struct netexport *, 173 struct export_args *)); 174 static int vfs_free_netcred __P((struct radix_node *, void *)); 175 static void vfs_free_addrlist __P((struct netexport *)); 176 177 #ifdef DEBUG 178 void printlockedvnodes __P((void)); 179 #endif 180 181 /* 182 * Initialize the vnode management data structures. 183 */ 184 void 185 vntblinit() 186 { 187 188 pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodepl", 189 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VNODE); 190 191 /* 192 * Initialize the filesystem syncer. 193 */ 194 vn_initialize_syncerd(); 195 } 196 197 /* 198 * Mark a mount point as busy. Used to synchronize access and to delay 199 * unmounting. Interlock is not released on failure. 200 */ 201 int 202 vfs_busy(mp, flags, interlkp) 203 struct mount *mp; 204 int flags; 205 struct simplelock *interlkp; 206 { 207 int lkflags; 208 209 while (mp->mnt_flag & MNT_UNMOUNT) { 210 int gone; 211 212 if (flags & LK_NOWAIT) 213 return (ENOENT); 214 if ((flags & LK_RECURSEFAIL) && mp->mnt_unmounter != NULL 215 && mp->mnt_unmounter == curproc) 216 return (EDEADLK); 217 if (interlkp) 218 simple_unlock(interlkp); 219 /* 220 * Since all busy locks are shared except the exclusive 221 * lock granted when unmounting, the only place that a 222 * wakeup needs to be done is at the release of the 223 * exclusive lock at the end of dounmount. 224 * 225 * XXX MP: add spinlock protecting mnt_wcnt here once you 226 * can atomically unlock-and-sleep. 227 */ 228 mp->mnt_wcnt++; 229 tsleep((caddr_t)mp, PVFS, "vfs_busy", 0); 230 mp->mnt_wcnt--; 231 gone = mp->mnt_flag & MNT_GONE; 232 233 if (mp->mnt_wcnt == 0) 234 wakeup(&mp->mnt_wcnt); 235 if (interlkp) 236 simple_lock(interlkp); 237 if (gone) 238 return (ENOENT); 239 } 240 lkflags = LK_SHARED; 241 if (interlkp) 242 lkflags |= LK_INTERLOCK; 243 if (lockmgr(&mp->mnt_lock, lkflags, interlkp)) 244 panic("vfs_busy: unexpected lock failure"); 245 return (0); 246 } 247 248 /* 249 * Free a busy filesystem. 250 */ 251 void 252 vfs_unbusy(mp) 253 struct mount *mp; 254 { 255 256 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL); 257 } 258 259 /* 260 * Lookup a filesystem type, and if found allocate and initialize 261 * a mount structure for it. 262 * 263 * Devname is usually updated by mount(8) after booting. 264 */ 265 int 266 vfs_rootmountalloc(fstypename, devname, mpp) 267 char *fstypename; 268 char *devname; 269 struct mount **mpp; 270 { 271 struct vfsops *vfsp = NULL; 272 struct mount *mp; 273 274 LIST_FOREACH(vfsp, &vfs_list, vfs_list) 275 if (!strncmp(vfsp->vfs_name, fstypename, MFSNAMELEN)) 276 break; 277 278 if (vfsp == NULL) 279 return (ENODEV); 280 mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); 281 memset((char *)mp, 0, (u_long)sizeof(struct mount)); 282 lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0); 283 (void)vfs_busy(mp, LK_NOWAIT, 0); 284 LIST_INIT(&mp->mnt_vnodelist); 285 mp->mnt_op = vfsp; 286 mp->mnt_flag = MNT_RDONLY; 287 mp->mnt_vnodecovered = NULLVP; 288 vfsp->vfs_refcount++; 289 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfs_name, MFSNAMELEN); 290 mp->mnt_stat.f_mntonname[0] = '/'; 291 (void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 292 *mpp = mp; 293 return (0); 294 } 295 296 /* 297 * Lookup a mount point by filesystem identifier. 298 */ 299 struct mount * 300 vfs_getvfs(fsid) 301 fsid_t *fsid; 302 { 303 struct mount *mp; 304 305 simple_lock(&mountlist_slock); 306 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; 307 mp = mp->mnt_list.cqe_next) { 308 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 309 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 310 simple_unlock(&mountlist_slock); 311 return (mp); 312 } 313 } 314 simple_unlock(&mountlist_slock); 315 return ((struct mount *)0); 316 } 317 318 /* 319 * Get a new unique fsid 320 */ 321 void 322 vfs_getnewfsid(mp) 323 struct mount *mp; 324 { 325 static u_short xxxfs_mntid; 326 fsid_t tfsid; 327 int mtype; 328 329 simple_lock(&mntid_slock); 330 mtype = makefstype(mp->mnt_op->vfs_name); 331 mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0); 332 mp->mnt_stat.f_fsid.val[1] = mtype; 333 if (xxxfs_mntid == 0) 334 ++xxxfs_mntid; 335 tfsid.val[0] = makedev((nblkdev + mtype) & 0xff, xxxfs_mntid); 336 tfsid.val[1] = mtype; 337 if (mountlist.cqh_first != (void *)&mountlist) { 338 while (vfs_getvfs(&tfsid)) { 339 tfsid.val[0]++; 340 xxxfs_mntid++; 341 } 342 } 343 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 344 simple_unlock(&mntid_slock); 345 } 346 347 /* 348 * Make a 'unique' number from a mount type name. 349 */ 350 long 351 makefstype(type) 352 const char *type; 353 { 354 long rv; 355 356 for (rv = 0; *type; type++) { 357 rv <<= 2; 358 rv ^= *type; 359 } 360 return rv; 361 } 362 363 364 /* 365 * Set vnode attributes to VNOVAL 366 */ 367 void 368 vattr_null(vap) 369 struct vattr *vap; 370 { 371 372 vap->va_type = VNON; 373 374 /* 375 * Assign individually so that it is safe even if size and 376 * sign of each member are varied. 377 */ 378 vap->va_mode = VNOVAL; 379 vap->va_nlink = VNOVAL; 380 vap->va_uid = VNOVAL; 381 vap->va_gid = VNOVAL; 382 vap->va_fsid = VNOVAL; 383 vap->va_fileid = VNOVAL; 384 vap->va_size = VNOVAL; 385 vap->va_blocksize = VNOVAL; 386 vap->va_atime.tv_sec = 387 vap->va_mtime.tv_sec = 388 vap->va_ctime.tv_sec = VNOVAL; 389 vap->va_atime.tv_nsec = 390 vap->va_mtime.tv_nsec = 391 vap->va_ctime.tv_nsec = VNOVAL; 392 vap->va_gen = VNOVAL; 393 vap->va_flags = VNOVAL; 394 vap->va_rdev = VNOVAL; 395 vap->va_bytes = VNOVAL; 396 vap->va_vaflags = 0; 397 } 398 399 /* 400 * Routines having to do with the management of the vnode table. 401 */ 402 extern int (**dead_vnodeop_p) __P((void *)); 403 long numvnodes; 404 405 /* 406 * Return the next vnode from the free list. 407 */ 408 int 409 getnewvnode(tag, mp, vops, vpp) 410 enum vtagtype tag; 411 struct mount *mp; 412 int (**vops) __P((void *)); 413 struct vnode **vpp; 414 { 415 extern struct uvm_pagerops uvm_vnodeops; 416 struct uvm_object *uobj; 417 struct proc *p = curproc; /* XXX */ 418 struct freelst *listhd; 419 static int toggle; 420 struct vnode *vp; 421 int error = 0, tryalloc; 422 423 if (mp) { 424 /* 425 * Mark filesystem busy while we're creating a vnode. 426 * If unmount is in progress, this will wait; if the 427 * unmount succeeds (only if umount -f), this will 428 * return an error. If the unmount fails, we'll keep 429 * going afterwards. 430 * (This puts the per-mount vnode list logically under 431 * the protection of the vfs_busy lock). 432 */ 433 error = vfs_busy(mp, LK_RECURSEFAIL, 0); 434 if (error && error != EDEADLK) 435 return error; 436 } 437 438 /* 439 * We must choose whether to allocate a new vnode or recycle an 440 * existing one. The criterion for allocating a new one is that 441 * the total number of vnodes is less than the number desired or 442 * there are no vnodes on either free list. Generally we only 443 * want to recycle vnodes that have no buffers associated with 444 * them, so we look first on the vnode_free_list. If it is empty, 445 * we next consider vnodes with referencing buffers on the 446 * vnode_hold_list. The toggle ensures that half the time we 447 * will use a buffer from the vnode_hold_list, and half the time 448 * we will allocate a new one unless the list has grown to twice 449 * the desired size. We are reticent to recycle vnodes from the 450 * vnode_hold_list because we will lose the identity of all its 451 * referencing buffers. 452 */ 453 454 try_again: 455 vp = NULL; 456 457 simple_lock(&vnode_free_list_slock); 458 459 toggle ^= 1; 460 if (numvnodes > 2 * desiredvnodes) 461 toggle = 0; 462 463 tryalloc = numvnodes < desiredvnodes || 464 (TAILQ_FIRST(listhd = &vnode_free_list) == NULL && 465 (TAILQ_FIRST(listhd = &vnode_hold_list) == NULL || toggle)); 466 467 if (tryalloc && 468 (vp = pool_get(&vnode_pool, PR_NOWAIT)) != NULL) { 469 simple_unlock(&vnode_free_list_slock); 470 memset(vp, 0, sizeof(*vp)); 471 simple_lock_init(&vp->v_interlock); 472 uobj = &vp->v_uobj; 473 uobj->pgops = &uvm_vnodeops; 474 uobj->uo_npages = 0; 475 TAILQ_INIT(&uobj->memq); 476 numvnodes++; 477 } else { 478 TAILQ_FOREACH(vp, listhd, v_freelist) { 479 if (simple_lock_try(&vp->v_interlock)) { 480 if ((vp->v_flag & VLAYER) == 0) { 481 break; 482 } 483 if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT | 484 LK_INTERLOCK)) { 485 continue; 486 } 487 VOP_UNLOCK(vp, 0); 488 break; 489 } 490 } 491 /* 492 * Unless this is a bad time of the month, at most 493 * the first NCPUS items on the free list are 494 * locked, so this is close enough to being empty. 495 */ 496 if (vp == NULLVP) { 497 simple_unlock(&vnode_free_list_slock); 498 if (mp && error != EDEADLK) 499 vfs_unbusy(mp); 500 if (tryalloc) { 501 printf("WARNING: unable to allocate new " 502 "vnode, retrying...\n"); 503 (void) tsleep(&lbolt, PRIBIO, "newvn", hz); 504 goto try_again; 505 } 506 tablefull("vnode", "increase kern.maxvnodes or NVNODE"); 507 *vpp = 0; 508 return (ENFILE); 509 } 510 if (vp->v_usecount) 511 panic("free vnode isn't, vp %p", vp); 512 TAILQ_REMOVE(listhd, vp, v_freelist); 513 /* see comment on why 0xdeadb is set at end of vgone (below) */ 514 vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb; 515 simple_unlock(&vnode_free_list_slock); 516 vp->v_lease = NULL; 517 518 if (vp->v_type != VBAD) 519 vgonel(vp, p); 520 else 521 simple_unlock(&vp->v_interlock); 522 #ifdef DIAGNOSTIC 523 if (vp->v_data || vp->v_uobj.uo_npages || 524 TAILQ_FIRST(&vp->v_uobj.memq)) 525 panic("cleaned vnode isn't, vp %p", vp); 526 if (vp->v_numoutput) 527 panic("clean vnode has pending I/O's, vp %p", vp); 528 #endif 529 vp->v_flag = 0; 530 vp->v_socket = NULL; 531 } 532 vp->v_type = VNON; 533 vp->v_vnlock = &vp->v_lock; 534 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); 535 cache_purge(vp); 536 vp->v_tag = tag; 537 vp->v_op = vops; 538 insmntque(vp, mp); 539 *vpp = vp; 540 vp->v_usecount = 1; 541 vp->v_data = 0; 542 simple_lock_init(&vp->v_uobj.vmobjlock); 543 544 /* 545 * initialize uvm_object within vnode. 546 */ 547 548 uobj = &vp->v_uobj; 549 KASSERT(uobj->pgops == &uvm_vnodeops); 550 KASSERT(uobj->uo_npages == 0); 551 KASSERT(TAILQ_FIRST(&uobj->memq) == NULL); 552 vp->v_size = VSIZENOTSET; 553 554 if (mp && error != EDEADLK) 555 vfs_unbusy(mp); 556 return (0); 557 } 558 559 /* 560 * This is really just the reverse of getnewvnode(). Needed for 561 * VFS_VGET functions who may need to push back a vnode in case 562 * of a locking race. 563 */ 564 void 565 ungetnewvnode(vp) 566 struct vnode *vp; 567 { 568 #ifdef DIAGNOSTIC 569 if (vp->v_usecount != 1) 570 panic("ungetnewvnode: busy vnode"); 571 #endif 572 vp->v_usecount--; 573 insmntque(vp, NULL); 574 vp->v_type = VBAD; 575 576 simple_lock(&vp->v_interlock); 577 /* 578 * Insert at head of LRU list 579 */ 580 simple_lock(&vnode_free_list_slock); 581 if (vp->v_holdcnt > 0) 582 TAILQ_INSERT_HEAD(&vnode_hold_list, vp, v_freelist); 583 else 584 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 585 simple_unlock(&vnode_free_list_slock); 586 simple_unlock(&vp->v_interlock); 587 } 588 589 /* 590 * Move a vnode from one mount queue to another. 591 */ 592 void 593 insmntque(vp, mp) 594 struct vnode *vp; 595 struct mount *mp; 596 { 597 598 #ifdef DIAGNOSTIC 599 if ((mp != NULL) && 600 (mp->mnt_flag & MNT_UNMOUNT) && 601 !(mp->mnt_flag & MNT_SOFTDEP) && 602 vp->v_tag != VT_VFS) { 603 panic("insmntque into dying filesystem"); 604 } 605 #endif 606 607 simple_lock(&mntvnode_slock); 608 /* 609 * Delete from old mount point vnode list, if on one. 610 */ 611 if (vp->v_mount != NULL) 612 LIST_REMOVE(vp, v_mntvnodes); 613 /* 614 * Insert into list of vnodes for the new mount point, if available. 615 */ 616 if ((vp->v_mount = mp) != NULL) 617 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); 618 simple_unlock(&mntvnode_slock); 619 } 620 621 /* 622 * Update outstanding I/O count and do wakeup if requested. 623 */ 624 void 625 vwakeup(bp) 626 struct buf *bp; 627 { 628 struct vnode *vp; 629 630 if ((vp = bp->b_vp) != NULL) { 631 if (--vp->v_numoutput < 0) 632 panic("vwakeup: neg numoutput, vp %p", vp); 633 if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) { 634 vp->v_flag &= ~VBWAIT; 635 wakeup((caddr_t)&vp->v_numoutput); 636 } 637 } 638 } 639 640 /* 641 * Flush out and invalidate all buffers associated with a vnode. 642 * Called with the underlying vnode locked, which should prevent new dirty 643 * buffers from being queued. 644 */ 645 int 646 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo) 647 struct vnode *vp; 648 int flags; 649 struct ucred *cred; 650 struct proc *p; 651 int slpflag, slptimeo; 652 { 653 struct uvm_object *uobj = &vp->v_uobj; 654 struct buf *bp, *nbp; 655 int s, error; 656 int flushflags = PGO_ALLPAGES|PGO_FREE|PGO_SYNCIO| 657 (flags & V_SAVE ? PGO_CLEANIT : 0); 658 659 /* XXXUBC this doesn't look at flags or slp* */ 660 if (TAILQ_FIRST(&uobj->memq)) { 661 simple_lock(&uobj->vmobjlock); 662 error = (uobj->pgops->pgo_put)(uobj, 0, 0, flushflags); 663 if (error) { 664 return error; 665 } 666 } 667 if (flags & V_SAVE) { 668 error = VOP_FSYNC(vp, cred, FSYNC_WAIT|FSYNC_RECLAIM, 0, 0, p); 669 if (error) 670 return (error); 671 #ifdef DIAGNOSTIC 672 s = splbio(); 673 if (vp->v_numoutput > 0 || !LIST_EMPTY(&vp->v_dirtyblkhd)) 674 panic("vinvalbuf: dirty bufs, vp %p", vp); 675 splx(s); 676 #endif 677 } 678 679 s = splbio(); 680 681 restart: 682 for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 683 nbp = LIST_NEXT(bp, b_vnbufs); 684 if (bp->b_flags & B_BUSY) { 685 bp->b_flags |= B_WANTED; 686 error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1), 687 "vinvalbuf", slptimeo); 688 if (error) { 689 splx(s); 690 return (error); 691 } 692 goto restart; 693 } 694 bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; 695 brelse(bp); 696 } 697 698 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 699 nbp = LIST_NEXT(bp, b_vnbufs); 700 if (bp->b_flags & B_BUSY) { 701 bp->b_flags |= B_WANTED; 702 error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1), 703 "vinvalbuf", slptimeo); 704 if (error) { 705 splx(s); 706 return (error); 707 } 708 goto restart; 709 } 710 /* 711 * XXX Since there are no node locks for NFS, I believe 712 * there is a slight chance that a delayed write will 713 * occur while sleeping just above, so check for it. 714 */ 715 if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) { 716 #ifdef DEBUG 717 printf("buffer still DELWRI\n"); 718 #endif 719 bp->b_flags |= B_BUSY | B_VFLUSH; 720 VOP_BWRITE(bp); 721 goto restart; 722 } 723 bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; 724 brelse(bp); 725 } 726 727 #ifdef DIAGNOSTIC 728 if (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd)) 729 panic("vinvalbuf: flush failed, vp %p", vp); 730 #endif 731 732 splx(s); 733 734 return (0); 735 } 736 737 /* 738 * Destroy any in core blocks past the truncation length. 739 * Called with the underlying vnode locked, which should prevent new dirty 740 * buffers from being queued. 741 */ 742 int 743 vtruncbuf(vp, lbn, slpflag, slptimeo) 744 struct vnode *vp; 745 daddr_t lbn; 746 int slpflag, slptimeo; 747 { 748 struct uvm_object *uobj = &vp->v_uobj; 749 struct buf *bp, *nbp; 750 int s, error; 751 752 s = splbio(); 753 if (TAILQ_FIRST(&uobj->memq)) { 754 simple_lock(&uobj->vmobjlock); 755 error = (uobj->pgops->pgo_put)(uobj, 756 round_page((voff_t)lbn << vp->v_mount->mnt_fs_bshift), 0, 757 PGO_FREE|PGO_SYNCIO); 758 if (error) { 759 splx(s); 760 return error; 761 } 762 } 763 764 restart: 765 for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 766 nbp = LIST_NEXT(bp, b_vnbufs); 767 if (bp->b_lblkno < lbn) 768 continue; 769 if (bp->b_flags & B_BUSY) { 770 bp->b_flags |= B_WANTED; 771 error = tsleep(bp, slpflag | (PRIBIO + 1), 772 "vtruncbuf", slptimeo); 773 if (error) { 774 splx(s); 775 return (error); 776 } 777 goto restart; 778 } 779 bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; 780 brelse(bp); 781 } 782 783 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 784 nbp = LIST_NEXT(bp, b_vnbufs); 785 if (bp->b_lblkno < lbn) 786 continue; 787 if (bp->b_flags & B_BUSY) { 788 bp->b_flags |= B_WANTED; 789 error = tsleep(bp, slpflag | (PRIBIO + 1), 790 "vtruncbuf", slptimeo); 791 if (error) { 792 splx(s); 793 return (error); 794 } 795 goto restart; 796 } 797 bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; 798 brelse(bp); 799 } 800 801 splx(s); 802 803 return (0); 804 } 805 806 void 807 vflushbuf(vp, sync) 808 struct vnode *vp; 809 int sync; 810 { 811 struct uvm_object *uobj = &vp->v_uobj; 812 struct buf *bp, *nbp; 813 int s; 814 815 if (TAILQ_FIRST(&uobj->memq)) { 816 int flags = PGO_CLEANIT|PGO_ALLPAGES| (sync ? PGO_SYNCIO : 0); 817 818 simple_lock(&uobj->vmobjlock); 819 (void) (uobj->pgops->pgo_put)(uobj, 0, 0, flags); 820 } 821 822 loop: 823 s = splbio(); 824 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 825 nbp = LIST_NEXT(bp, b_vnbufs); 826 if ((bp->b_flags & B_BUSY)) 827 continue; 828 if ((bp->b_flags & B_DELWRI) == 0) 829 panic("vflushbuf: not dirty, bp %p", bp); 830 bp->b_flags |= B_BUSY | B_VFLUSH; 831 splx(s); 832 /* 833 * Wait for I/O associated with indirect blocks to complete, 834 * since there is no way to quickly wait for them below. 835 */ 836 if (bp->b_vp == vp || sync == 0) 837 (void) bawrite(bp); 838 else 839 (void) bwrite(bp); 840 goto loop; 841 } 842 if (sync == 0) { 843 splx(s); 844 return; 845 } 846 while (vp->v_numoutput) { 847 vp->v_flag |= VBWAIT; 848 tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "vflushbuf", 0); 849 } 850 splx(s); 851 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 852 vprint("vflushbuf: dirty", vp); 853 goto loop; 854 } 855 } 856 857 /* 858 * Associate a buffer with a vnode. 859 */ 860 void 861 bgetvp(vp, bp) 862 struct vnode *vp; 863 struct buf *bp; 864 { 865 int s; 866 867 if (bp->b_vp) 868 panic("bgetvp: not free, bp %p", bp); 869 VHOLD(vp); 870 s = splbio(); 871 bp->b_vp = vp; 872 if (vp->v_type == VBLK || vp->v_type == VCHR) 873 bp->b_dev = vp->v_rdev; 874 else 875 bp->b_dev = NODEV; 876 /* 877 * Insert onto list for new vnode. 878 */ 879 bufinsvn(bp, &vp->v_cleanblkhd); 880 splx(s); 881 } 882 883 /* 884 * Disassociate a buffer from a vnode. 885 */ 886 void 887 brelvp(bp) 888 struct buf *bp; 889 { 890 struct vnode *vp; 891 int s; 892 893 if (bp->b_vp == NULL) 894 panic("brelvp: vp NULL, bp %p", bp); 895 896 s = splbio(); 897 vp = bp->b_vp; 898 /* 899 * Delete from old vnode list, if on one. 900 */ 901 if (bp->b_vnbufs.le_next != NOLIST) 902 bufremvn(bp); 903 904 if (TAILQ_EMPTY(&vp->v_uobj.memq) && (vp->v_flag & VONWORKLST) && 905 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { 906 vp->v_flag &= ~VONWORKLST; 907 LIST_REMOVE(vp, v_synclist); 908 } 909 910 bp->b_vp = NULL; 911 HOLDRELE(vp); 912 splx(s); 913 } 914 915 /* 916 * Reassign a buffer from one vnode to another. 917 * Used to assign file specific control information 918 * (indirect blocks) to the vnode to which they belong. 919 * 920 * This function must be called at splbio(). 921 */ 922 void 923 reassignbuf(bp, newvp) 924 struct buf *bp; 925 struct vnode *newvp; 926 { 927 struct buflists *listheadp; 928 int delay; 929 930 /* 931 * Delete from old vnode list, if on one. 932 */ 933 if (bp->b_vnbufs.le_next != NOLIST) 934 bufremvn(bp); 935 /* 936 * If dirty, put on list of dirty buffers; 937 * otherwise insert onto list of clean buffers. 938 */ 939 if ((bp->b_flags & B_DELWRI) == 0) { 940 listheadp = &newvp->v_cleanblkhd; 941 if (TAILQ_EMPTY(&newvp->v_uobj.memq) && 942 (newvp->v_flag & VONWORKLST) && 943 LIST_FIRST(&newvp->v_dirtyblkhd) == NULL) { 944 newvp->v_flag &= ~VONWORKLST; 945 LIST_REMOVE(newvp, v_synclist); 946 } 947 } else { 948 listheadp = &newvp->v_dirtyblkhd; 949 if ((newvp->v_flag & VONWORKLST) == 0) { 950 switch (newvp->v_type) { 951 case VDIR: 952 delay = dirdelay; 953 break; 954 case VBLK: 955 if (newvp->v_specmountpoint != NULL) { 956 delay = metadelay; 957 break; 958 } 959 /* fall through */ 960 default: 961 delay = filedelay; 962 break; 963 } 964 if (!newvp->v_mount || 965 (newvp->v_mount->mnt_flag & MNT_ASYNC) == 0) 966 vn_syncer_add_to_worklist(newvp, delay); 967 } 968 } 969 bufinsvn(bp, listheadp); 970 } 971 972 /* 973 * Create a vnode for a block device. 974 * Used for root filesystem and swap areas. 975 * Also used for memory file system special devices. 976 */ 977 int 978 bdevvp(dev, vpp) 979 dev_t dev; 980 struct vnode **vpp; 981 { 982 983 return (getdevvp(dev, vpp, VBLK)); 984 } 985 986 /* 987 * Create a vnode for a character device. 988 * Used for kernfs and some console handling. 989 */ 990 int 991 cdevvp(dev, vpp) 992 dev_t dev; 993 struct vnode **vpp; 994 { 995 996 return (getdevvp(dev, vpp, VCHR)); 997 } 998 999 /* 1000 * Create a vnode for a device. 1001 * Used by bdevvp (block device) for root file system etc., 1002 * and by cdevvp (character device) for console and kernfs. 1003 */ 1004 int 1005 getdevvp(dev, vpp, type) 1006 dev_t dev; 1007 struct vnode **vpp; 1008 enum vtype type; 1009 { 1010 struct vnode *vp; 1011 struct vnode *nvp; 1012 int error; 1013 1014 if (dev == NODEV) { 1015 *vpp = NULLVP; 1016 return (0); 1017 } 1018 error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp); 1019 if (error) { 1020 *vpp = NULLVP; 1021 return (error); 1022 } 1023 vp = nvp; 1024 vp->v_type = type; 1025 if ((nvp = checkalias(vp, dev, NULL)) != 0) { 1026 vput(vp); 1027 vp = nvp; 1028 } 1029 *vpp = vp; 1030 return (0); 1031 } 1032 1033 /* 1034 * Check to see if the new vnode represents a special device 1035 * for which we already have a vnode (either because of 1036 * bdevvp() or because of a different vnode representing 1037 * the same block device). If such an alias exists, deallocate 1038 * the existing contents and return the aliased vnode. The 1039 * caller is responsible for filling it with its new contents. 1040 */ 1041 struct vnode * 1042 checkalias(nvp, nvp_rdev, mp) 1043 struct vnode *nvp; 1044 dev_t nvp_rdev; 1045 struct mount *mp; 1046 { 1047 struct proc *p = curproc; /* XXX */ 1048 struct vnode *vp; 1049 struct vnode **vpp; 1050 1051 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 1052 return (NULLVP); 1053 1054 vpp = &speclisth[SPECHASH(nvp_rdev)]; 1055 loop: 1056 simple_lock(&spechash_slock); 1057 for (vp = *vpp; vp; vp = vp->v_specnext) { 1058 if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) 1059 continue; 1060 /* 1061 * Alias, but not in use, so flush it out. 1062 */ 1063 simple_lock(&vp->v_interlock); 1064 if (vp->v_usecount == 0) { 1065 simple_unlock(&spechash_slock); 1066 vgonel(vp, p); 1067 goto loop; 1068 } 1069 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) { 1070 simple_unlock(&spechash_slock); 1071 goto loop; 1072 } 1073 break; 1074 } 1075 if (vp == NULL || vp->v_tag != VT_NON || vp->v_type != VBLK) { 1076 MALLOC(nvp->v_specinfo, struct specinfo *, 1077 sizeof(struct specinfo), M_VNODE, M_NOWAIT); 1078 /* XXX Erg. */ 1079 if (nvp->v_specinfo == NULL) { 1080 simple_unlock(&spechash_slock); 1081 uvm_wait("checkalias"); 1082 goto loop; 1083 } 1084 1085 nvp->v_rdev = nvp_rdev; 1086 nvp->v_hashchain = vpp; 1087 nvp->v_specnext = *vpp; 1088 nvp->v_specmountpoint = NULL; 1089 simple_unlock(&spechash_slock); 1090 nvp->v_speclockf = NULL; 1091 *vpp = nvp; 1092 if (vp != NULLVP) { 1093 nvp->v_flag |= VALIASED; 1094 vp->v_flag |= VALIASED; 1095 vput(vp); 1096 } 1097 return (NULLVP); 1098 } 1099 simple_unlock(&spechash_slock); 1100 VOP_UNLOCK(vp, 0); 1101 simple_lock(&vp->v_interlock); 1102 vclean(vp, 0, p); 1103 vp->v_op = nvp->v_op; 1104 vp->v_tag = nvp->v_tag; 1105 vp->v_vnlock = &vp->v_lock; 1106 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); 1107 nvp->v_type = VNON; 1108 insmntque(vp, mp); 1109 return (vp); 1110 } 1111 1112 /* 1113 * Grab a particular vnode from the free list, increment its 1114 * reference count and lock it. If the vnode lock bit is set the 1115 * vnode is being eliminated in vgone. In that case, we can not 1116 * grab the vnode, so the process is awakened when the transition is 1117 * completed, and an error returned to indicate that the vnode is no 1118 * longer usable (possibly having been changed to a new file system type). 1119 */ 1120 int 1121 vget(vp, flags) 1122 struct vnode *vp; 1123 int flags; 1124 { 1125 int error; 1126 1127 /* 1128 * If the vnode is in the process of being cleaned out for 1129 * another use, we wait for the cleaning to finish and then 1130 * return failure. Cleaning is determined by checking that 1131 * the VXLOCK flag is set. 1132 */ 1133 1134 if ((flags & LK_INTERLOCK) == 0) 1135 simple_lock(&vp->v_interlock); 1136 if (vp->v_flag & VXLOCK) { 1137 if (flags & LK_NOWAIT) { 1138 simple_unlock(&vp->v_interlock); 1139 return EBUSY; 1140 } 1141 vp->v_flag |= VXWANT; 1142 ltsleep(vp, PINOD|PNORELOCK, "vget", 0, &vp->v_interlock); 1143 return (ENOENT); 1144 } 1145 if (vp->v_usecount == 0) { 1146 simple_lock(&vnode_free_list_slock); 1147 if (vp->v_holdcnt > 0) 1148 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 1149 else 1150 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1151 simple_unlock(&vnode_free_list_slock); 1152 } 1153 vp->v_usecount++; 1154 #ifdef DIAGNOSTIC 1155 if (vp->v_usecount == 0) { 1156 vprint("vget", vp); 1157 panic("vget: usecount overflow, vp %p", vp); 1158 } 1159 #endif 1160 if (flags & LK_TYPE_MASK) { 1161 if ((error = vn_lock(vp, flags | LK_INTERLOCK))) { 1162 /* 1163 * must expand vrele here because we do not want 1164 * to call VOP_INACTIVE if the reference count 1165 * drops back to zero since it was never really 1166 * active. We must remove it from the free list 1167 * before sleeping so that multiple processes do 1168 * not try to recycle it. 1169 */ 1170 simple_lock(&vp->v_interlock); 1171 vp->v_usecount--; 1172 if (vp->v_usecount > 0) { 1173 simple_unlock(&vp->v_interlock); 1174 return (error); 1175 } 1176 /* 1177 * insert at tail of LRU list 1178 */ 1179 simple_lock(&vnode_free_list_slock); 1180 if (vp->v_holdcnt > 0) 1181 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, 1182 v_freelist); 1183 else 1184 TAILQ_INSERT_TAIL(&vnode_free_list, vp, 1185 v_freelist); 1186 simple_unlock(&vnode_free_list_slock); 1187 simple_unlock(&vp->v_interlock); 1188 } 1189 return (error); 1190 } 1191 simple_unlock(&vp->v_interlock); 1192 return (0); 1193 } 1194 1195 /* 1196 * vput(), just unlock and vrele() 1197 */ 1198 void 1199 vput(vp) 1200 struct vnode *vp; 1201 { 1202 struct proc *p = curproc; /* XXX */ 1203 1204 #ifdef DIAGNOSTIC 1205 if (vp == NULL) 1206 panic("vput: null vp"); 1207 #endif 1208 simple_lock(&vp->v_interlock); 1209 vp->v_usecount--; 1210 if (vp->v_usecount > 0) { 1211 simple_unlock(&vp->v_interlock); 1212 VOP_UNLOCK(vp, 0); 1213 return; 1214 } 1215 #ifdef DIAGNOSTIC 1216 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 1217 vprint("vput: bad ref count", vp); 1218 panic("vput: ref cnt"); 1219 } 1220 #endif 1221 /* 1222 * Insert at tail of LRU list. 1223 */ 1224 simple_lock(&vnode_free_list_slock); 1225 if (vp->v_holdcnt > 0) 1226 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 1227 else 1228 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 1229 simple_unlock(&vnode_free_list_slock); 1230 if (vp->v_flag & VTEXT) { 1231 uvmexp.vtextpages -= vp->v_uobj.uo_npages; 1232 uvmexp.vnodepages += vp->v_uobj.uo_npages; 1233 } 1234 vp->v_flag &= ~VTEXT; 1235 simple_unlock(&vp->v_interlock); 1236 VOP_INACTIVE(vp, p); 1237 } 1238 1239 /* 1240 * Vnode release. 1241 * If count drops to zero, call inactive routine and return to freelist. 1242 */ 1243 void 1244 vrele(vp) 1245 struct vnode *vp; 1246 { 1247 struct proc *p = curproc; /* XXX */ 1248 1249 #ifdef DIAGNOSTIC 1250 if (vp == NULL) 1251 panic("vrele: null vp"); 1252 #endif 1253 simple_lock(&vp->v_interlock); 1254 vp->v_usecount--; 1255 if (vp->v_usecount > 0) { 1256 simple_unlock(&vp->v_interlock); 1257 return; 1258 } 1259 #ifdef DIAGNOSTIC 1260 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 1261 vprint("vrele: bad ref count", vp); 1262 panic("vrele: ref cnt vp %p", vp); 1263 } 1264 #endif 1265 /* 1266 * Insert at tail of LRU list. 1267 */ 1268 simple_lock(&vnode_free_list_slock); 1269 if (vp->v_holdcnt > 0) 1270 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 1271 else 1272 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 1273 simple_unlock(&vnode_free_list_slock); 1274 if (vp->v_flag & VTEXT) { 1275 uvmexp.vtextpages -= vp->v_uobj.uo_npages; 1276 uvmexp.vnodepages += vp->v_uobj.uo_npages; 1277 } 1278 vp->v_flag &= ~VTEXT; 1279 if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK) == 0) 1280 VOP_INACTIVE(vp, p); 1281 } 1282 1283 #ifdef DIAGNOSTIC 1284 /* 1285 * Page or buffer structure gets a reference. 1286 */ 1287 void 1288 vhold(vp) 1289 struct vnode *vp; 1290 { 1291 1292 /* 1293 * If it is on the freelist and the hold count is currently 1294 * zero, move it to the hold list. The test of the back 1295 * pointer and the use reference count of zero is because 1296 * it will be removed from a free list by getnewvnode, 1297 * but will not have its reference count incremented until 1298 * after calling vgone. If the reference count were 1299 * incremented first, vgone would (incorrectly) try to 1300 * close the previous instance of the underlying object. 1301 * So, the back pointer is explicitly set to `0xdeadb' in 1302 * getnewvnode after removing it from a freelist to ensure 1303 * that we do not try to move it here. 1304 */ 1305 simple_lock(&vp->v_interlock); 1306 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) && 1307 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 1308 simple_lock(&vnode_free_list_slock); 1309 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1310 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 1311 simple_unlock(&vnode_free_list_slock); 1312 } 1313 vp->v_holdcnt++; 1314 simple_unlock(&vp->v_interlock); 1315 } 1316 1317 /* 1318 * Page or buffer structure frees a reference. 1319 */ 1320 void 1321 holdrele(vp) 1322 struct vnode *vp; 1323 { 1324 1325 simple_lock(&vp->v_interlock); 1326 if (vp->v_holdcnt <= 0) 1327 panic("holdrele: holdcnt vp %p", vp); 1328 vp->v_holdcnt--; 1329 1330 /* 1331 * If it is on the holdlist and the hold count drops to 1332 * zero, move it to the free list. The test of the back 1333 * pointer and the use reference count of zero is because 1334 * it will be removed from a free list by getnewvnode, 1335 * but will not have its reference count incremented until 1336 * after calling vgone. If the reference count were 1337 * incremented first, vgone would (incorrectly) try to 1338 * close the previous instance of the underlying object. 1339 * So, the back pointer is explicitly set to `0xdeadb' in 1340 * getnewvnode after removing it from a freelist to ensure 1341 * that we do not try to move it here. 1342 */ 1343 1344 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) && 1345 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 1346 simple_lock(&vnode_free_list_slock); 1347 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 1348 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 1349 simple_unlock(&vnode_free_list_slock); 1350 } 1351 simple_unlock(&vp->v_interlock); 1352 } 1353 1354 /* 1355 * Vnode reference. 1356 */ 1357 void 1358 vref(vp) 1359 struct vnode *vp; 1360 { 1361 1362 simple_lock(&vp->v_interlock); 1363 if (vp->v_usecount <= 0) 1364 panic("vref used where vget required, vp %p", vp); 1365 vp->v_usecount++; 1366 #ifdef DIAGNOSTIC 1367 if (vp->v_usecount == 0) { 1368 vprint("vref", vp); 1369 panic("vref: usecount overflow, vp %p", vp); 1370 } 1371 #endif 1372 simple_unlock(&vp->v_interlock); 1373 } 1374 #endif /* DIAGNOSTIC */ 1375 1376 /* 1377 * Remove any vnodes in the vnode table belonging to mount point mp. 1378 * 1379 * If MNT_NOFORCE is specified, there should not be any active ones, 1380 * return error if any are found (nb: this is a user error, not a 1381 * system error). If MNT_FORCE is specified, detach any active vnodes 1382 * that are found. 1383 */ 1384 #ifdef DEBUG 1385 int busyprt = 0; /* print out busy vnodes */ 1386 struct ctldebug debug1 = { "busyprt", &busyprt }; 1387 #endif 1388 1389 int 1390 vflush(mp, skipvp, flags) 1391 struct mount *mp; 1392 struct vnode *skipvp; 1393 int flags; 1394 { 1395 struct proc *p = curproc; /* XXX */ 1396 struct vnode *vp, *nvp; 1397 int busy = 0; 1398 1399 simple_lock(&mntvnode_slock); 1400 loop: 1401 for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) { 1402 if (vp->v_mount != mp) 1403 goto loop; 1404 nvp = vp->v_mntvnodes.le_next; 1405 /* 1406 * Skip over a selected vnode. 1407 */ 1408 if (vp == skipvp) 1409 continue; 1410 simple_lock(&vp->v_interlock); 1411 /* 1412 * Skip over a vnodes marked VSYSTEM. 1413 */ 1414 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1415 simple_unlock(&vp->v_interlock); 1416 continue; 1417 } 1418 /* 1419 * If WRITECLOSE is set, only flush out regular file 1420 * vnodes open for writing. 1421 */ 1422 if ((flags & WRITECLOSE) && 1423 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1424 simple_unlock(&vp->v_interlock); 1425 continue; 1426 } 1427 /* 1428 * With v_usecount == 0, all we need to do is clear 1429 * out the vnode data structures and we are done. 1430 */ 1431 if (vp->v_usecount == 0) { 1432 simple_unlock(&mntvnode_slock); 1433 vgonel(vp, p); 1434 simple_lock(&mntvnode_slock); 1435 continue; 1436 } 1437 /* 1438 * If FORCECLOSE is set, forcibly close the vnode. 1439 * For block or character devices, revert to an 1440 * anonymous device. For all other files, just kill them. 1441 */ 1442 if (flags & FORCECLOSE) { 1443 simple_unlock(&mntvnode_slock); 1444 if (vp->v_type != VBLK && vp->v_type != VCHR) { 1445 vgonel(vp, p); 1446 } else { 1447 vclean(vp, 0, p); 1448 vp->v_op = spec_vnodeop_p; 1449 insmntque(vp, (struct mount *)0); 1450 } 1451 simple_lock(&mntvnode_slock); 1452 continue; 1453 } 1454 #ifdef DEBUG 1455 if (busyprt) 1456 vprint("vflush: busy vnode", vp); 1457 #endif 1458 simple_unlock(&vp->v_interlock); 1459 busy++; 1460 } 1461 simple_unlock(&mntvnode_slock); 1462 if (busy) 1463 return (EBUSY); 1464 return (0); 1465 } 1466 1467 /* 1468 * Disassociate the underlying file system from a vnode. 1469 */ 1470 void 1471 vclean(vp, flags, p) 1472 struct vnode *vp; 1473 int flags; 1474 struct proc *p; 1475 { 1476 int active; 1477 1478 /* 1479 * Check to see if the vnode is in use. 1480 * If so we have to reference it before we clean it out 1481 * so that its count cannot fall to zero and generate a 1482 * race against ourselves to recycle it. 1483 */ 1484 if ((active = vp->v_usecount) != 0) { 1485 /* We have the vnode interlock. */ 1486 vp->v_usecount++; 1487 #ifdef DIAGNOSTIC 1488 if (vp->v_usecount == 0) { 1489 vprint("vclean", vp); 1490 panic("vclean: usecount overflow"); 1491 } 1492 #endif 1493 } 1494 1495 /* 1496 * Prevent the vnode from being recycled or 1497 * brought into use while we clean it out. 1498 */ 1499 if (vp->v_flag & VXLOCK) 1500 panic("vclean: deadlock, vp %p", vp); 1501 vp->v_flag |= VXLOCK; 1502 if (vp->v_flag & VTEXT) { 1503 uvmexp.vtextpages -= vp->v_uobj.uo_npages; 1504 uvmexp.vnodepages += vp->v_uobj.uo_npages; 1505 } 1506 vp->v_flag &= ~VTEXT; 1507 1508 /* 1509 * Even if the count is zero, the VOP_INACTIVE routine may still 1510 * have the object locked while it cleans it out. The VOP_LOCK 1511 * ensures that the VOP_INACTIVE routine is done with its work. 1512 * For active vnodes, it ensures that no other activity can 1513 * occur while the underlying object is being cleaned out. 1514 */ 1515 VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK); 1516 1517 /* 1518 * Clean out any cached data associated with the vnode. 1519 */ 1520 if (flags & DOCLOSE) 1521 vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0); 1522 1523 /* 1524 * If purging an active vnode, it must be closed and 1525 * deactivated before being reclaimed. Note that the 1526 * VOP_INACTIVE will unlock the vnode. 1527 */ 1528 if (active) { 1529 if (flags & DOCLOSE) 1530 VOP_CLOSE(vp, FNONBLOCK, NOCRED, NULL); 1531 VOP_INACTIVE(vp, p); 1532 } else { 1533 /* 1534 * Any other processes trying to obtain this lock must first 1535 * wait for VXLOCK to clear, then call the new lock operation. 1536 */ 1537 VOP_UNLOCK(vp, 0); 1538 } 1539 /* 1540 * Reclaim the vnode. 1541 */ 1542 if (VOP_RECLAIM(vp, p)) 1543 panic("vclean: cannot reclaim, vp %p", vp); 1544 if (active) { 1545 /* 1546 * Inline copy of vrele() since VOP_INACTIVE 1547 * has already been called. 1548 */ 1549 simple_lock(&vp->v_interlock); 1550 if (--vp->v_usecount <= 0) { 1551 #ifdef DIAGNOSTIC 1552 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 1553 vprint("vclean: bad ref count", vp); 1554 panic("vclean: ref cnt"); 1555 } 1556 #endif 1557 /* 1558 * Insert at tail of LRU list. 1559 */ 1560 1561 simple_unlock(&vp->v_interlock); 1562 simple_lock(&vnode_free_list_slock); 1563 #ifdef DIAGNOSTIC 1564 if (vp->v_holdcnt > 0) 1565 panic("vclean: not clean, vp %p", vp); 1566 #endif 1567 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 1568 simple_unlock(&vnode_free_list_slock); 1569 } else 1570 simple_unlock(&vp->v_interlock); 1571 } 1572 1573 cache_purge(vp); 1574 1575 /* 1576 * Done with purge, notify sleepers of the grim news. 1577 */ 1578 vp->v_op = dead_vnodeop_p; 1579 vp->v_tag = VT_NON; 1580 simple_lock(&vp->v_interlock); 1581 vp->v_flag &= ~VXLOCK; 1582 if (vp->v_flag & VXWANT) { 1583 vp->v_flag &= ~VXWANT; 1584 simple_unlock(&vp->v_interlock); 1585 wakeup((caddr_t)vp); 1586 } else 1587 simple_unlock(&vp->v_interlock); 1588 } 1589 1590 /* 1591 * Recycle an unused vnode to the front of the free list. 1592 * Release the passed interlock if the vnode will be recycled. 1593 */ 1594 int 1595 vrecycle(vp, inter_lkp, p) 1596 struct vnode *vp; 1597 struct simplelock *inter_lkp; 1598 struct proc *p; 1599 { 1600 1601 simple_lock(&vp->v_interlock); 1602 if (vp->v_usecount == 0) { 1603 if (inter_lkp) 1604 simple_unlock(inter_lkp); 1605 vgonel(vp, p); 1606 return (1); 1607 } 1608 simple_unlock(&vp->v_interlock); 1609 return (0); 1610 } 1611 1612 /* 1613 * Eliminate all activity associated with a vnode 1614 * in preparation for reuse. 1615 */ 1616 void 1617 vgone(vp) 1618 struct vnode *vp; 1619 { 1620 struct proc *p = curproc; /* XXX */ 1621 1622 simple_lock(&vp->v_interlock); 1623 vgonel(vp, p); 1624 } 1625 1626 /* 1627 * vgone, with the vp interlock held. 1628 */ 1629 void 1630 vgonel(vp, p) 1631 struct vnode *vp; 1632 struct proc *p; 1633 { 1634 struct vnode *vq; 1635 struct vnode *vx; 1636 1637 /* 1638 * If a vgone (or vclean) is already in progress, 1639 * wait until it is done and return. 1640 */ 1641 if (vp->v_flag & VXLOCK) { 1642 vp->v_flag |= VXWANT; 1643 ltsleep((caddr_t)vp, PINOD | PNORELOCK, 1644 "vgone", 0, &vp->v_interlock); 1645 return; 1646 } 1647 /* 1648 * Clean out the filesystem specific data. 1649 */ 1650 vclean(vp, DOCLOSE, p); 1651 /* 1652 * Delete from old mount point vnode list, if on one. 1653 */ 1654 if (vp->v_mount != NULL) 1655 insmntque(vp, (struct mount *)0); 1656 /* 1657 * If special device, remove it from special device alias list. 1658 * if it is on one. 1659 */ 1660 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) { 1661 simple_lock(&spechash_slock); 1662 if (vp->v_hashchain != NULL) { 1663 if (*vp->v_hashchain == vp) { 1664 *vp->v_hashchain = vp->v_specnext; 1665 } else { 1666 for (vq = *vp->v_hashchain; vq; 1667 vq = vq->v_specnext) { 1668 if (vq->v_specnext != vp) 1669 continue; 1670 vq->v_specnext = vp->v_specnext; 1671 break; 1672 } 1673 if (vq == NULL) 1674 panic("missing bdev"); 1675 } 1676 if (vp->v_flag & VALIASED) { 1677 vx = NULL; 1678 for (vq = *vp->v_hashchain; vq; 1679 vq = vq->v_specnext) { 1680 if (vq->v_rdev != vp->v_rdev || 1681 vq->v_type != vp->v_type) 1682 continue; 1683 if (vx) 1684 break; 1685 vx = vq; 1686 } 1687 if (vx == NULL) 1688 panic("missing alias"); 1689 if (vq == NULL) 1690 vx->v_flag &= ~VALIASED; 1691 vp->v_flag &= ~VALIASED; 1692 } 1693 } 1694 simple_unlock(&spechash_slock); 1695 FREE(vp->v_specinfo, M_VNODE); 1696 vp->v_specinfo = NULL; 1697 } 1698 /* 1699 * If it is on the freelist and not already at the head, 1700 * move it to the head of the list. The test of the back 1701 * pointer and the reference count of zero is because 1702 * it will be removed from the free list by getnewvnode, 1703 * but will not have its reference count incremented until 1704 * after calling vgone. If the reference count were 1705 * incremented first, vgone would (incorrectly) try to 1706 * close the previous instance of the underlying object. 1707 * So, the back pointer is explicitly set to `0xdeadb' in 1708 * getnewvnode after removing it from the freelist to ensure 1709 * that we do not try to move it here. 1710 */ 1711 if (vp->v_usecount == 0) { 1712 simple_lock(&vnode_free_list_slock); 1713 if (vp->v_holdcnt > 0) 1714 panic("vgonel: not clean, vp %p", vp); 1715 if (vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb && 1716 TAILQ_FIRST(&vnode_free_list) != vp) { 1717 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1718 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 1719 } 1720 simple_unlock(&vnode_free_list_slock); 1721 } 1722 vp->v_type = VBAD; 1723 } 1724 1725 /* 1726 * Lookup a vnode by device number. 1727 */ 1728 int 1729 vfinddev(dev, type, vpp) 1730 dev_t dev; 1731 enum vtype type; 1732 struct vnode **vpp; 1733 { 1734 struct vnode *vp; 1735 int rc = 0; 1736 1737 simple_lock(&spechash_slock); 1738 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) { 1739 if (dev != vp->v_rdev || type != vp->v_type) 1740 continue; 1741 *vpp = vp; 1742 rc = 1; 1743 break; 1744 } 1745 simple_unlock(&spechash_slock); 1746 return (rc); 1747 } 1748 1749 /* 1750 * Revoke all the vnodes corresponding to the specified minor number 1751 * range (endpoints inclusive) of the specified major. 1752 */ 1753 void 1754 vdevgone(maj, minl, minh, type) 1755 int maj, minl, minh; 1756 enum vtype type; 1757 { 1758 struct vnode *vp; 1759 int mn; 1760 1761 for (mn = minl; mn <= minh; mn++) 1762 if (vfinddev(makedev(maj, mn), type, &vp)) 1763 VOP_REVOKE(vp, REVOKEALL); 1764 } 1765 1766 /* 1767 * Calculate the total number of references to a special device. 1768 */ 1769 int 1770 vcount(vp) 1771 struct vnode *vp; 1772 { 1773 struct vnode *vq, *vnext; 1774 int count; 1775 1776 loop: 1777 if ((vp->v_flag & VALIASED) == 0) 1778 return (vp->v_usecount); 1779 simple_lock(&spechash_slock); 1780 for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) { 1781 vnext = vq->v_specnext; 1782 if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type) 1783 continue; 1784 /* 1785 * Alias, but not in use, so flush it out. 1786 */ 1787 if (vq->v_usecount == 0 && vq != vp && 1788 (vq->v_flag & VXLOCK) == 0) { 1789 simple_unlock(&spechash_slock); 1790 vgone(vq); 1791 goto loop; 1792 } 1793 count += vq->v_usecount; 1794 } 1795 simple_unlock(&spechash_slock); 1796 return (count); 1797 } 1798 1799 /* 1800 * Print out a description of a vnode. 1801 */ 1802 static const char * const typename[] = 1803 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" }; 1804 1805 void 1806 vprint(label, vp) 1807 char *label; 1808 struct vnode *vp; 1809 { 1810 char buf[64]; 1811 1812 if (label != NULL) 1813 printf("%s: ", label); 1814 printf("tag %d type %s, usecount %d, writecount %ld, refcount %ld,", 1815 vp->v_tag, typename[vp->v_type], vp->v_usecount, vp->v_writecount, 1816 vp->v_holdcnt); 1817 buf[0] = '\0'; 1818 if (vp->v_flag & VROOT) 1819 strcat(buf, "|VROOT"); 1820 if (vp->v_flag & VTEXT) 1821 strcat(buf, "|VTEXT"); 1822 if (vp->v_flag & VSYSTEM) 1823 strcat(buf, "|VSYSTEM"); 1824 if (vp->v_flag & VXLOCK) 1825 strcat(buf, "|VXLOCK"); 1826 if (vp->v_flag & VXWANT) 1827 strcat(buf, "|VXWANT"); 1828 if (vp->v_flag & VBWAIT) 1829 strcat(buf, "|VBWAIT"); 1830 if (vp->v_flag & VALIASED) 1831 strcat(buf, "|VALIASED"); 1832 if (buf[0] != '\0') 1833 printf(" flags (%s)", &buf[1]); 1834 if (vp->v_data == NULL) { 1835 printf("\n"); 1836 } else { 1837 printf("\n\t"); 1838 VOP_PRINT(vp); 1839 } 1840 } 1841 1842 #ifdef DEBUG 1843 /* 1844 * List all of the locked vnodes in the system. 1845 * Called when debugging the kernel. 1846 */ 1847 void 1848 printlockedvnodes() 1849 { 1850 struct mount *mp, *nmp; 1851 struct vnode *vp; 1852 1853 printf("Locked vnodes\n"); 1854 simple_lock(&mountlist_slock); 1855 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { 1856 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) { 1857 nmp = mp->mnt_list.cqe_next; 1858 continue; 1859 } 1860 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { 1861 if (VOP_ISLOCKED(vp)) 1862 vprint(NULL, vp); 1863 } 1864 simple_lock(&mountlist_slock); 1865 nmp = mp->mnt_list.cqe_next; 1866 vfs_unbusy(mp); 1867 } 1868 simple_unlock(&mountlist_slock); 1869 } 1870 #endif 1871 1872 /* 1873 * Top level filesystem related information gathering. 1874 */ 1875 int 1876 vfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) 1877 int *name; 1878 u_int namelen; 1879 void *oldp; 1880 size_t *oldlenp; 1881 void *newp; 1882 size_t newlen; 1883 struct proc *p; 1884 { 1885 #if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44) 1886 struct vfsconf vfc; 1887 extern const char * const mountcompatnames[]; 1888 extern int nmountcompatnames; 1889 #endif 1890 struct vfsops *vfsp; 1891 1892 /* all sysctl names at this level are at least name and field */ 1893 if (namelen < 2) 1894 return (ENOTDIR); /* overloaded */ 1895 1896 /* Not generic: goes to file system. */ 1897 if (name[0] != VFS_GENERIC) { 1898 static const struct ctlname vfsnames[VFS_MAXID+1]=CTL_VFS_NAMES; 1899 const char *vfsname; 1900 1901 if (name[0] < 0 || name[0] > VFS_MAXID 1902 || (vfsname = vfsnames[name[0]].ctl_name) == NULL) 1903 return (EOPNOTSUPP); 1904 1905 vfsp = vfs_getopsbyname(vfsname); 1906 if (vfsp == NULL || vfsp->vfs_sysctl == NULL) 1907 return (EOPNOTSUPP); 1908 return ((*vfsp->vfs_sysctl)(&name[1], namelen - 1, 1909 oldp, oldlenp, newp, newlen, p)); 1910 } 1911 1912 /* The rest are generic vfs sysctls. */ 1913 switch (name[1]) { 1914 case VFS_USERMOUNT: 1915 return sysctl_int(oldp, oldlenp, newp, newlen, &dovfsusermount); 1916 #if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44) 1917 case VFS_MAXTYPENUM: 1918 /* 1919 * Provided for 4.4BSD-Lite2 compatibility. 1920 */ 1921 return (sysctl_rdint(oldp, oldlenp, newp, nmountcompatnames)); 1922 case VFS_CONF: 1923 /* 1924 * Special: a node, next is a file system name. 1925 * Provided for 4.4BSD-Lite2 compatibility. 1926 */ 1927 if (namelen < 3) 1928 return (ENOTDIR); /* overloaded */ 1929 if (name[2] >= nmountcompatnames || name[2] < 0 || 1930 mountcompatnames[name[2]] == NULL) 1931 return (EOPNOTSUPP); 1932 vfsp = vfs_getopsbyname(mountcompatnames[name[2]]); 1933 if (vfsp == NULL) 1934 return (EOPNOTSUPP); 1935 vfc.vfc_vfsops = vfsp; 1936 strncpy(vfc.vfc_name, vfsp->vfs_name, MFSNAMELEN); 1937 vfc.vfc_typenum = name[2]; 1938 vfc.vfc_refcount = vfsp->vfs_refcount; 1939 vfc.vfc_flags = 0; 1940 vfc.vfc_mountroot = vfsp->vfs_mountroot; 1941 vfc.vfc_next = NULL; 1942 return (sysctl_rdstruct(oldp, oldlenp, newp, &vfc, 1943 sizeof(struct vfsconf))); 1944 #endif 1945 default: 1946 break; 1947 } 1948 return (EOPNOTSUPP); 1949 } 1950 1951 int kinfo_vdebug = 1; 1952 int kinfo_vgetfailed; 1953 #define KINFO_VNODESLOP 10 1954 /* 1955 * Dump vnode list (via sysctl). 1956 * Copyout address of vnode followed by vnode. 1957 */ 1958 /* ARGSUSED */ 1959 int 1960 sysctl_vnode(where, sizep, p) 1961 char *where; 1962 size_t *sizep; 1963 struct proc *p; 1964 { 1965 struct mount *mp, *nmp; 1966 struct vnode *nvp, *vp; 1967 char *bp = where, *savebp; 1968 char *ewhere; 1969 int error; 1970 1971 #define VPTRSZ sizeof(struct vnode *) 1972 #define VNODESZ sizeof(struct vnode) 1973 if (where == NULL) { 1974 *sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ); 1975 return (0); 1976 } 1977 ewhere = where + *sizep; 1978 1979 simple_lock(&mountlist_slock); 1980 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { 1981 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) { 1982 nmp = mp->mnt_list.cqe_next; 1983 continue; 1984 } 1985 savebp = bp; 1986 again: 1987 simple_lock(&mntvnode_slock); 1988 for (vp = mp->mnt_vnodelist.lh_first; 1989 vp != NULL; 1990 vp = nvp) { 1991 /* 1992 * Check that the vp is still associated with 1993 * this filesystem. RACE: could have been 1994 * recycled onto the same filesystem. 1995 */ 1996 if (vp->v_mount != mp) { 1997 simple_unlock(&mntvnode_slock); 1998 if (kinfo_vdebug) 1999 printf("kinfo: vp changed\n"); 2000 bp = savebp; 2001 goto again; 2002 } 2003 nvp = vp->v_mntvnodes.le_next; 2004 if (bp + VPTRSZ + VNODESZ > ewhere) { 2005 simple_unlock(&mntvnode_slock); 2006 *sizep = bp - where; 2007 return (ENOMEM); 2008 } 2009 simple_unlock(&mntvnode_slock); 2010 if ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) || 2011 (error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ))) 2012 return (error); 2013 bp += VPTRSZ + VNODESZ; 2014 simple_lock(&mntvnode_slock); 2015 } 2016 simple_unlock(&mntvnode_slock); 2017 simple_lock(&mountlist_slock); 2018 nmp = mp->mnt_list.cqe_next; 2019 vfs_unbusy(mp); 2020 } 2021 simple_unlock(&mountlist_slock); 2022 2023 *sizep = bp - where; 2024 return (0); 2025 } 2026 2027 /* 2028 * Check to see if a filesystem is mounted on a block device. 2029 */ 2030 int 2031 vfs_mountedon(vp) 2032 struct vnode *vp; 2033 { 2034 struct vnode *vq; 2035 int error = 0; 2036 2037 if (vp->v_specmountpoint != NULL) 2038 return (EBUSY); 2039 if (vp->v_flag & VALIASED) { 2040 simple_lock(&spechash_slock); 2041 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 2042 if (vq->v_rdev != vp->v_rdev || 2043 vq->v_type != vp->v_type) 2044 continue; 2045 if (vq->v_specmountpoint != NULL) { 2046 error = EBUSY; 2047 break; 2048 } 2049 } 2050 simple_unlock(&spechash_slock); 2051 } 2052 return (error); 2053 } 2054 2055 /* 2056 * Build hash lists of net addresses and hang them off the mount point. 2057 * Called by ufs_mount() to set up the lists of export addresses. 2058 */ 2059 static int 2060 vfs_hang_addrlist(mp, nep, argp) 2061 struct mount *mp; 2062 struct netexport *nep; 2063 struct export_args *argp; 2064 { 2065 struct netcred *np, *enp; 2066 struct radix_node_head *rnh; 2067 int i; 2068 struct radix_node *rn; 2069 struct sockaddr *saddr, *smask = 0; 2070 struct domain *dom; 2071 int error; 2072 2073 if (argp->ex_addrlen == 0) { 2074 if (mp->mnt_flag & MNT_DEFEXPORTED) 2075 return (EPERM); 2076 np = &nep->ne_defexported; 2077 np->netc_exflags = argp->ex_flags; 2078 np->netc_anon = argp->ex_anon; 2079 np->netc_anon.cr_ref = 1; 2080 mp->mnt_flag |= MNT_DEFEXPORTED; 2081 return (0); 2082 } 2083 2084 if (argp->ex_addrlen > MLEN) 2085 return (EINVAL); 2086 2087 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 2088 np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK); 2089 memset((caddr_t)np, 0, i); 2090 saddr = (struct sockaddr *)(np + 1); 2091 error = copyin(argp->ex_addr, (caddr_t)saddr, argp->ex_addrlen); 2092 if (error) 2093 goto out; 2094 if (saddr->sa_len > argp->ex_addrlen) 2095 saddr->sa_len = argp->ex_addrlen; 2096 if (argp->ex_masklen) { 2097 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 2098 error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen); 2099 if (error) 2100 goto out; 2101 if (smask->sa_len > argp->ex_masklen) 2102 smask->sa_len = argp->ex_masklen; 2103 } 2104 i = saddr->sa_family; 2105 if ((rnh = nep->ne_rtable[i]) == 0) { 2106 /* 2107 * Seems silly to initialize every AF when most are not 2108 * used, do so on demand here 2109 */ 2110 for (dom = domains; dom; dom = dom->dom_next) 2111 if (dom->dom_family == i && dom->dom_rtattach) { 2112 dom->dom_rtattach((void **)&nep->ne_rtable[i], 2113 dom->dom_rtoffset); 2114 break; 2115 } 2116 if ((rnh = nep->ne_rtable[i]) == 0) { 2117 error = ENOBUFS; 2118 goto out; 2119 } 2120 } 2121 rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh, 2122 np->netc_rnodes); 2123 if (rn == 0 || np != (struct netcred *)rn) { /* already exists */ 2124 if (rn == 0) { 2125 enp = (struct netcred *)(*rnh->rnh_lookup)(saddr, 2126 smask, rnh); 2127 if (enp == 0) { 2128 error = EPERM; 2129 goto out; 2130 } 2131 } else 2132 enp = (struct netcred *)rn; 2133 2134 if (enp->netc_exflags != argp->ex_flags || 2135 enp->netc_anon.cr_uid != argp->ex_anon.cr_uid || 2136 enp->netc_anon.cr_gid != argp->ex_anon.cr_gid || 2137 enp->netc_anon.cr_ngroups != argp->ex_anon.cr_ngroups || 2138 memcmp(&enp->netc_anon.cr_groups, &argp->ex_anon.cr_groups, 2139 enp->netc_anon.cr_ngroups)) 2140 error = EPERM; 2141 else 2142 error = 0; 2143 goto out; 2144 } 2145 np->netc_exflags = argp->ex_flags; 2146 np->netc_anon = argp->ex_anon; 2147 np->netc_anon.cr_ref = 1; 2148 return (0); 2149 out: 2150 free(np, M_NETADDR); 2151 return (error); 2152 } 2153 2154 /* ARGSUSED */ 2155 static int 2156 vfs_free_netcred(rn, w) 2157 struct radix_node *rn; 2158 void *w; 2159 { 2160 struct radix_node_head *rnh = (struct radix_node_head *)w; 2161 2162 (*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh); 2163 free((caddr_t)rn, M_NETADDR); 2164 return (0); 2165 } 2166 2167 /* 2168 * Free the net address hash lists that are hanging off the mount points. 2169 */ 2170 static void 2171 vfs_free_addrlist(nep) 2172 struct netexport *nep; 2173 { 2174 int i; 2175 struct radix_node_head *rnh; 2176 2177 for (i = 0; i <= AF_MAX; i++) 2178 if ((rnh = nep->ne_rtable[i]) != NULL) { 2179 (*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh); 2180 free((caddr_t)rnh, M_RTABLE); 2181 nep->ne_rtable[i] = 0; 2182 } 2183 } 2184 2185 int 2186 vfs_export(mp, nep, argp) 2187 struct mount *mp; 2188 struct netexport *nep; 2189 struct export_args *argp; 2190 { 2191 int error; 2192 2193 if (argp->ex_flags & MNT_DELEXPORT) { 2194 if (mp->mnt_flag & MNT_EXPUBLIC) { 2195 vfs_setpublicfs(NULL, NULL, NULL); 2196 mp->mnt_flag &= ~MNT_EXPUBLIC; 2197 } 2198 vfs_free_addrlist(nep); 2199 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 2200 } 2201 if (argp->ex_flags & MNT_EXPORTED) { 2202 if (argp->ex_flags & MNT_EXPUBLIC) { 2203 if ((error = vfs_setpublicfs(mp, nep, argp)) != 0) 2204 return (error); 2205 mp->mnt_flag |= MNT_EXPUBLIC; 2206 } 2207 if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0) 2208 return (error); 2209 mp->mnt_flag |= MNT_EXPORTED; 2210 } 2211 return (0); 2212 } 2213 2214 /* 2215 * Set the publicly exported filesystem (WebNFS). Currently, only 2216 * one public filesystem is possible in the spec (RFC 2054 and 2055) 2217 */ 2218 int 2219 vfs_setpublicfs(mp, nep, argp) 2220 struct mount *mp; 2221 struct netexport *nep; 2222 struct export_args *argp; 2223 { 2224 int error; 2225 struct vnode *rvp; 2226 char *cp; 2227 2228 /* 2229 * mp == NULL -> invalidate the current info, the FS is 2230 * no longer exported. May be called from either vfs_export 2231 * or unmount, so check if it hasn't already been done. 2232 */ 2233 if (mp == NULL) { 2234 if (nfs_pub.np_valid) { 2235 nfs_pub.np_valid = 0; 2236 if (nfs_pub.np_index != NULL) { 2237 FREE(nfs_pub.np_index, M_TEMP); 2238 nfs_pub.np_index = NULL; 2239 } 2240 } 2241 return (0); 2242 } 2243 2244 /* 2245 * Only one allowed at a time. 2246 */ 2247 if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount) 2248 return (EBUSY); 2249 2250 /* 2251 * Get real filehandle for root of exported FS. 2252 */ 2253 memset((caddr_t)&nfs_pub.np_handle, 0, sizeof(nfs_pub.np_handle)); 2254 nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid; 2255 2256 if ((error = VFS_ROOT(mp, &rvp))) 2257 return (error); 2258 2259 if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid))) 2260 return (error); 2261 2262 vput(rvp); 2263 2264 /* 2265 * If an indexfile was specified, pull it in. 2266 */ 2267 if (argp->ex_indexfile != NULL) { 2268 MALLOC(nfs_pub.np_index, char *, MAXNAMLEN + 1, M_TEMP, 2269 M_WAITOK); 2270 error = copyinstr(argp->ex_indexfile, nfs_pub.np_index, 2271 MAXNAMLEN, (size_t *)0); 2272 if (!error) { 2273 /* 2274 * Check for illegal filenames. 2275 */ 2276 for (cp = nfs_pub.np_index; *cp; cp++) { 2277 if (*cp == '/') { 2278 error = EINVAL; 2279 break; 2280 } 2281 } 2282 } 2283 if (error) { 2284 FREE(nfs_pub.np_index, M_TEMP); 2285 return (error); 2286 } 2287 } 2288 2289 nfs_pub.np_mount = mp; 2290 nfs_pub.np_valid = 1; 2291 return (0); 2292 } 2293 2294 struct netcred * 2295 vfs_export_lookup(mp, nep, nam) 2296 struct mount *mp; 2297 struct netexport *nep; 2298 struct mbuf *nam; 2299 { 2300 struct netcred *np; 2301 struct radix_node_head *rnh; 2302 struct sockaddr *saddr; 2303 2304 np = NULL; 2305 if (mp->mnt_flag & MNT_EXPORTED) { 2306 /* 2307 * Lookup in the export list first. 2308 */ 2309 if (nam != NULL) { 2310 saddr = mtod(nam, struct sockaddr *); 2311 rnh = nep->ne_rtable[saddr->sa_family]; 2312 if (rnh != NULL) { 2313 np = (struct netcred *) 2314 (*rnh->rnh_matchaddr)((caddr_t)saddr, 2315 rnh); 2316 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 2317 np = NULL; 2318 } 2319 } 2320 /* 2321 * If no address match, use the default if it exists. 2322 */ 2323 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 2324 np = &nep->ne_defexported; 2325 } 2326 return (np); 2327 } 2328 2329 /* 2330 * Do the usual access checking. 2331 * file_mode, uid and gid are from the vnode in question, 2332 * while acc_mode and cred are from the VOP_ACCESS parameter list 2333 */ 2334 int 2335 vaccess(type, file_mode, uid, gid, acc_mode, cred) 2336 enum vtype type; 2337 mode_t file_mode; 2338 uid_t uid; 2339 gid_t gid; 2340 mode_t acc_mode; 2341 struct ucred *cred; 2342 { 2343 mode_t mask; 2344 2345 /* 2346 * Super-user always gets read/write access, but execute access depends 2347 * on at least one execute bit being set. 2348 */ 2349 if (cred->cr_uid == 0) { 2350 if ((acc_mode & VEXEC) && type != VDIR && 2351 (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0) 2352 return (EACCES); 2353 return (0); 2354 } 2355 2356 mask = 0; 2357 2358 /* Otherwise, check the owner. */ 2359 if (cred->cr_uid == uid) { 2360 if (acc_mode & VEXEC) 2361 mask |= S_IXUSR; 2362 if (acc_mode & VREAD) 2363 mask |= S_IRUSR; 2364 if (acc_mode & VWRITE) 2365 mask |= S_IWUSR; 2366 return ((file_mode & mask) == mask ? 0 : EACCES); 2367 } 2368 2369 /* Otherwise, check the groups. */ 2370 if (cred->cr_gid == gid || groupmember(gid, cred)) { 2371 if (acc_mode & VEXEC) 2372 mask |= S_IXGRP; 2373 if (acc_mode & VREAD) 2374 mask |= S_IRGRP; 2375 if (acc_mode & VWRITE) 2376 mask |= S_IWGRP; 2377 return ((file_mode & mask) == mask ? 0 : EACCES); 2378 } 2379 2380 /* Otherwise, check everyone else. */ 2381 if (acc_mode & VEXEC) 2382 mask |= S_IXOTH; 2383 if (acc_mode & VREAD) 2384 mask |= S_IROTH; 2385 if (acc_mode & VWRITE) 2386 mask |= S_IWOTH; 2387 return ((file_mode & mask) == mask ? 0 : EACCES); 2388 } 2389 2390 /* 2391 * Unmount all file systems. 2392 * We traverse the list in reverse order under the assumption that doing so 2393 * will avoid needing to worry about dependencies. 2394 */ 2395 void 2396 vfs_unmountall(p) 2397 struct proc *p; 2398 { 2399 struct mount *mp, *nmp; 2400 int allerror, error; 2401 2402 for (allerror = 0, 2403 mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) { 2404 nmp = mp->mnt_list.cqe_prev; 2405 #ifdef DEBUG 2406 printf("unmounting %s (%s)...\n", 2407 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_mntfromname); 2408 #endif 2409 /* 2410 * XXX Freeze syncer. Must do this before locking the 2411 * mount point. See dounmount() for details. 2412 */ 2413 lockmgr(&syncer_lock, LK_EXCLUSIVE, NULL); 2414 if (vfs_busy(mp, 0, 0)) { 2415 lockmgr(&syncer_lock, LK_RELEASE, NULL); 2416 continue; 2417 } 2418 if ((error = dounmount(mp, MNT_FORCE, p)) != 0) { 2419 printf("unmount of %s failed with error %d\n", 2420 mp->mnt_stat.f_mntonname, error); 2421 allerror = 1; 2422 } 2423 } 2424 if (allerror) 2425 printf("WARNING: some file systems would not unmount\n"); 2426 } 2427 2428 /* 2429 * Sync and unmount file systems before shutting down. 2430 */ 2431 void 2432 vfs_shutdown() 2433 { 2434 struct buf *bp; 2435 int iter, nbusy, nbusy_prev = 0, dcount, s; 2436 struct proc *p = curproc; 2437 2438 /* XXX we're certainly not running in proc0's context! */ 2439 if (p == NULL) 2440 p = &proc0; 2441 2442 printf("syncing disks... "); 2443 2444 /* remove user process from run queue */ 2445 suspendsched(); 2446 (void) spl0(); 2447 2448 /* avoid coming back this way again if we panic. */ 2449 doing_shutdown = 1; 2450 2451 sys_sync(p, NULL, NULL); 2452 2453 /* Wait for sync to finish. */ 2454 dcount = 10000; 2455 for (iter = 0; iter < 20;) { 2456 nbusy = 0; 2457 for (bp = &buf[nbuf]; --bp >= buf; ) { 2458 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 2459 nbusy++; 2460 /* 2461 * With soft updates, some buffers that are 2462 * written will be remarked as dirty until other 2463 * buffers are written. 2464 */ 2465 if (bp->b_vp && bp->b_vp->v_mount 2466 && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP) 2467 && (bp->b_flags & B_DELWRI)) { 2468 s = splbio(); 2469 bremfree(bp); 2470 bp->b_flags |= B_BUSY; 2471 splx(s); 2472 nbusy++; 2473 bawrite(bp); 2474 if (dcount-- <= 0) { 2475 printf("softdep "); 2476 goto fail; 2477 } 2478 } 2479 } 2480 if (nbusy == 0) 2481 break; 2482 if (nbusy_prev == 0) 2483 nbusy_prev = nbusy; 2484 printf("%d ", nbusy); 2485 tsleep(&nbusy, PRIBIO, "bflush", 2486 (iter == 0) ? 1 : hz / 25 * iter); 2487 if (nbusy >= nbusy_prev) /* we didn't flush anything */ 2488 iter++; 2489 else 2490 nbusy_prev = nbusy; 2491 } 2492 if (nbusy) { 2493 fail: 2494 #if defined(DEBUG) || defined(DEBUG_HALT_BUSY) 2495 printf("giving up\nPrinting vnodes for busy buffers\n"); 2496 for (bp = &buf[nbuf]; --bp >= buf; ) 2497 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 2498 vprint(NULL, bp->b_vp); 2499 2500 #if defined(DDB) && defined(DEBUG_HALT_BUSY) 2501 Debugger(); 2502 #endif 2503 2504 #else /* defined(DEBUG) || defined(DEBUG_HALT_BUSY) */ 2505 printf("giving up\n"); 2506 #endif /* defined(DEBUG) || defined(DEBUG_HALT_BUSY) */ 2507 return; 2508 } else 2509 printf("done\n"); 2510 2511 /* 2512 * If we've panic'd, don't make the situation potentially 2513 * worse by unmounting the file systems. 2514 */ 2515 if (panicstr != NULL) 2516 return; 2517 2518 /* Release inodes held by texts before update. */ 2519 #ifdef notdef 2520 vnshutdown(); 2521 #endif 2522 /* Unmount file systems. */ 2523 vfs_unmountall(p); 2524 } 2525 2526 /* 2527 * Mount the root file system. If the operator didn't specify a 2528 * file system to use, try all possible file systems until one 2529 * succeeds. 2530 */ 2531 int 2532 vfs_mountroot() 2533 { 2534 extern int (*mountroot) __P((void)); 2535 struct vfsops *v; 2536 2537 if (root_device == NULL) 2538 panic("vfs_mountroot: root device unknown"); 2539 2540 switch (root_device->dv_class) { 2541 case DV_IFNET: 2542 if (rootdev != NODEV) 2543 panic("vfs_mountroot: rootdev set for DV_IFNET"); 2544 break; 2545 2546 case DV_DISK: 2547 if (rootdev == NODEV) 2548 panic("vfs_mountroot: rootdev not set for DV_DISK"); 2549 break; 2550 2551 default: 2552 printf("%s: inappropriate for root file system\n", 2553 root_device->dv_xname); 2554 return (ENODEV); 2555 } 2556 2557 /* 2558 * If user specified a file system, use it. 2559 */ 2560 if (mountroot != NULL) 2561 return ((*mountroot)()); 2562 2563 /* 2564 * Try each file system currently configured into the kernel. 2565 */ 2566 for (v = LIST_FIRST(&vfs_list); v != NULL; v = LIST_NEXT(v, vfs_list)) { 2567 if (v->vfs_mountroot == NULL) 2568 continue; 2569 #ifdef DEBUG 2570 printf("mountroot: trying %s...\n", v->vfs_name); 2571 #endif 2572 if ((*v->vfs_mountroot)() == 0) { 2573 printf("root file system type: %s\n", v->vfs_name); 2574 break; 2575 } 2576 } 2577 2578 if (v == NULL) { 2579 printf("no file system for %s", root_device->dv_xname); 2580 if (root_device->dv_class == DV_DISK) 2581 printf(" (dev 0x%x)", rootdev); 2582 printf("\n"); 2583 return (EFTYPE); 2584 } 2585 return (0); 2586 } 2587 2588 /* 2589 * Given a file system name, look up the vfsops for that 2590 * file system, or return NULL if file system isn't present 2591 * in the kernel. 2592 */ 2593 struct vfsops * 2594 vfs_getopsbyname(name) 2595 const char *name; 2596 { 2597 struct vfsops *v; 2598 2599 for (v = LIST_FIRST(&vfs_list); v != NULL; v = LIST_NEXT(v, vfs_list)) { 2600 if (strcmp(v->vfs_name, name) == 0) 2601 break; 2602 } 2603 2604 return (v); 2605 } 2606 2607 /* 2608 * Establish a file system and initialize it. 2609 */ 2610 int 2611 vfs_attach(vfs) 2612 struct vfsops *vfs; 2613 { 2614 struct vfsops *v; 2615 int error = 0; 2616 2617 2618 /* 2619 * Make sure this file system doesn't already exist. 2620 */ 2621 LIST_FOREACH(v, &vfs_list, vfs_list) { 2622 if (strcmp(vfs->vfs_name, v->vfs_name) == 0) { 2623 error = EEXIST; 2624 goto out; 2625 } 2626 } 2627 2628 /* 2629 * Initialize the vnode operations for this file system. 2630 */ 2631 vfs_opv_init(vfs->vfs_opv_descs); 2632 2633 /* 2634 * Now initialize the file system itself. 2635 */ 2636 (*vfs->vfs_init)(); 2637 2638 /* 2639 * ...and link it into the kernel's list. 2640 */ 2641 LIST_INSERT_HEAD(&vfs_list, vfs, vfs_list); 2642 2643 /* 2644 * Sanity: make sure the reference count is 0. 2645 */ 2646 vfs->vfs_refcount = 0; 2647 2648 out: 2649 return (error); 2650 } 2651 2652 /* 2653 * Remove a file system from the kernel. 2654 */ 2655 int 2656 vfs_detach(vfs) 2657 struct vfsops *vfs; 2658 { 2659 struct vfsops *v; 2660 2661 /* 2662 * Make sure no one is using the filesystem. 2663 */ 2664 if (vfs->vfs_refcount != 0) 2665 return (EBUSY); 2666 2667 /* 2668 * ...and remove it from the kernel's list. 2669 */ 2670 LIST_FOREACH(v, &vfs_list, vfs_list) { 2671 if (v == vfs) { 2672 LIST_REMOVE(v, vfs_list); 2673 break; 2674 } 2675 } 2676 2677 if (v == NULL) 2678 return (ESRCH); 2679 2680 /* 2681 * Now run the file system-specific cleanups. 2682 */ 2683 (*vfs->vfs_done)(); 2684 2685 /* 2686 * Free the vnode operations vector. 2687 */ 2688 vfs_opv_free(vfs->vfs_opv_descs); 2689 return (0); 2690 } 2691 2692 void 2693 vfs_reinit(void) 2694 { 2695 struct vfsops *vfs; 2696 2697 LIST_FOREACH(vfs, &vfs_list, vfs_list) { 2698 if (vfs->vfs_reinit) { 2699 (*vfs->vfs_reinit)(); 2700 } 2701 } 2702 } 2703 2704 #ifdef DDB 2705 const char buf_flagbits[] = 2706 "\20\1AGE\2NEEDCOMMIT\3ASYNC\4BAD\5BUSY\6SCANNED\7CALL\10DELWRI" 2707 "\11DIRTY\12DONE\13EINTR\14ERROR\15GATHERED\16INVAL\17LOCKED\20NOCACHE" 2708 "\21ORDERED\22CACHE\23PHYS\24RAW\25READ\26TAPE\30WANTED" 2709 "\32XXX\33VFLUSH"; 2710 2711 void 2712 vfs_buf_print(bp, full, pr) 2713 struct buf *bp; 2714 int full; 2715 void (*pr) __P((const char *, ...)); 2716 { 2717 char buf[1024]; 2718 2719 (*pr)(" vp %p lblkno 0x%x blkno 0x%x dev 0x%x\n", 2720 bp->b_vp, bp->b_lblkno, bp->b_blkno, bp->b_dev); 2721 2722 bitmask_snprintf(bp->b_flags, buf_flagbits, buf, sizeof(buf)); 2723 (*pr)(" error %d flags 0x%s\n", bp->b_error, buf); 2724 2725 (*pr)(" bufsize 0x%x bcount 0x%x resid 0x%x\n", 2726 bp->b_bufsize, bp->b_bcount, bp->b_resid); 2727 (*pr)(" data %p saveaddr %p dep %p\n", 2728 bp->b_data, bp->b_saveaddr, LIST_FIRST(&bp->b_dep)); 2729 (*pr)(" iodone %p\n", bp->b_iodone); 2730 } 2731 2732 2733 const char vnode_flagbits[] = 2734 "\20\1ROOT\2TEXT\3SYSTEM\4ISTTY\11XLOCK\12XWANT\13BWAIT\14ALIASED" 2735 "\15DIROP\16LAYER\17ONWORKLIST\20DIRTY"; 2736 2737 const char *vnode_types[] = { 2738 "VNON", 2739 "VREG", 2740 "VDIR", 2741 "VBLK", 2742 "VCHR", 2743 "VLNK", 2744 "VSOCK", 2745 "VFIFO", 2746 "VBAD", 2747 }; 2748 2749 const char *vnode_tags[] = { 2750 "VT_NON", 2751 "VT_UFS", 2752 "VT_NFS", 2753 "VT_MFS", 2754 "VT_MSDOSFS", 2755 "VT_LFS", 2756 "VT_LOFS", 2757 "VT_FDESC", 2758 "VT_PORTAL", 2759 "VT_NULL", 2760 "VT_UMAP", 2761 "VT_KERNFS", 2762 "VT_PROCFS", 2763 "VT_AFS", 2764 "VT_ISOFS", 2765 "VT_UNION", 2766 "VT_ADOSFS", 2767 "VT_EXT2FS", 2768 "VT_CODA", 2769 "VT_FILECORE", 2770 "VT_NTFS", 2771 "VT_VFS", 2772 "VT_OVERLAY" 2773 }; 2774 2775 void 2776 vfs_vnode_print(vp, full, pr) 2777 struct vnode *vp; 2778 int full; 2779 void (*pr) __P((const char *, ...)); 2780 { 2781 char buf[256]; 2782 const char *vtype, *vtag; 2783 2784 uvm_object_printit(&vp->v_uobj, full, pr); 2785 bitmask_snprintf(vp->v_flag, vnode_flagbits, buf, sizeof(buf)); 2786 (*pr)("\nVNODE flags %s\n", buf); 2787 (*pr)("mp %p numoutput %d size 0x%llx\n", 2788 vp->v_mount, vp->v_numoutput, vp->v_size); 2789 2790 (*pr)("data %p usecount %d writecount %d holdcnt %d numoutput %d\n", 2791 vp->v_data, vp->v_usecount, vp->v_writecount, 2792 vp->v_holdcnt, vp->v_numoutput); 2793 2794 vtype = (vp->v_type >= 0 && 2795 vp->v_type < sizeof(vnode_types) / sizeof(vnode_types[0])) ? 2796 vnode_types[vp->v_type] : "UNKNOWN"; 2797 vtag = (vp->v_tag >= 0 && 2798 vp->v_tag < sizeof(vnode_tags) / sizeof(vnode_tags[0])) ? 2799 vnode_tags[vp->v_tag] : "UNKNOWN"; 2800 2801 (*pr)("type %s(%d) tag %s(%d) id 0x%x mount %p typedata %p\n", 2802 vtype, vp->v_type, vtag, vp->v_tag, 2803 vp->v_id, vp->v_mount, vp->v_mountedhere); 2804 2805 if (full) { 2806 struct buf *bp; 2807 2808 (*pr)("clean bufs:\n"); 2809 LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) { 2810 (*pr)(" bp %p\n", bp); 2811 vfs_buf_print(bp, full, pr); 2812 } 2813 2814 (*pr)("dirty bufs:\n"); 2815 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) { 2816 (*pr)(" bp %p\n", bp); 2817 vfs_buf_print(bp, full, pr); 2818 } 2819 } 2820 } 2821 #endif 2822