1 /* $NetBSD: vfs_subr.c,v 1.142 2000/11/27 08:39:44 chs Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Copyright (c) 1989, 1993 42 * The Regents of the University of California. All rights reserved. 43 * (c) UNIX System Laboratories, Inc. 44 * All or some portions of this file are derived from material licensed 45 * to the University of California by American Telephone and Telegraph 46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 47 * the permission of UNIX System Laboratories, Inc. 48 * 49 * Redistribution and use in source and binary forms, with or without 50 * modification, are permitted provided that the following conditions 51 * are met: 52 * 1. Redistributions of source code must retain the above copyright 53 * notice, this list of conditions and the following disclaimer. 54 * 2. Redistributions in binary form must reproduce the above copyright 55 * notice, this list of conditions and the following disclaimer in the 56 * documentation and/or other materials provided with the distribution. 57 * 3. All advertising materials mentioning features or use of this software 58 * must display the following acknowledgement: 59 * This product includes software developed by the University of 60 * California, Berkeley and its contributors. 61 * 4. Neither the name of the University nor the names of its contributors 62 * may be used to endorse or promote products derived from this software 63 * without specific prior written permission. 64 * 65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 75 * SUCH DAMAGE. 76 * 77 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94 78 */ 79 80 /* 81 * External virtual filesystem routines 82 */ 83 84 #include "opt_ddb.h" 85 #include "opt_compat_netbsd.h" 86 #include "opt_compat_43.h" 87 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/proc.h> 91 #include <sys/kernel.h> 92 #include <sys/mount.h> 93 #include <sys/time.h> 94 #include <sys/fcntl.h> 95 #include <sys/vnode.h> 96 #include <sys/stat.h> 97 #include <sys/namei.h> 98 #include <sys/ucred.h> 99 #include <sys/buf.h> 100 #include <sys/errno.h> 101 #include <sys/malloc.h> 102 #include <sys/domain.h> 103 #include <sys/mbuf.h> 104 #include <sys/syscallargs.h> 105 #include <sys/device.h> 106 #include <sys/dirent.h> 107 108 #include <miscfs/specfs/specdev.h> 109 #include <miscfs/genfs/genfs.h> 110 #include <miscfs/syncfs/syncfs.h> 111 112 #include <uvm/uvm.h> 113 #include <uvm/uvm_ddb.h> 114 115 #include <sys/sysctl.h> 116 117 enum vtype iftovt_tab[16] = { 118 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 119 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 120 }; 121 int vttoif_tab[9] = { 122 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 123 S_IFSOCK, S_IFIFO, S_IFMT, 124 }; 125 126 int doforce = 1; /* 1 => permit forcible unmounting */ 127 int prtactive = 0; /* 1 => print out reclaim of active vnodes */ 128 129 extern int dovfsusermount; /* 1 => permit any user to mount filesystems */ 130 131 /* 132 * Insq/Remq for the vnode usage lists. 133 */ 134 #define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) 135 #define bufremvn(bp) { \ 136 LIST_REMOVE(bp, b_vnbufs); \ 137 (bp)->b_vnbufs.le_next = NOLIST; \ 138 } 139 /* TAILQ_HEAD(freelst, vnode) vnode_free_list = vnode free list (in vnode.h) */ 140 struct freelst vnode_free_list = TAILQ_HEAD_INITIALIZER(vnode_free_list); 141 struct freelst vnode_hold_list = TAILQ_HEAD_INITIALIZER(vnode_hold_list); 142 143 struct mntlist mountlist = /* mounted filesystem list */ 144 CIRCLEQ_HEAD_INITIALIZER(mountlist); 145 struct vfs_list_head vfs_list = /* vfs list */ 146 LIST_HEAD_INITIALIZER(vfs_list); 147 148 struct nfs_public nfs_pub; /* publicly exported FS */ 149 150 struct simplelock mountlist_slock = SIMPLELOCK_INITIALIZER; 151 static struct simplelock mntid_slock = SIMPLELOCK_INITIALIZER; 152 struct simplelock mntvnode_slock = SIMPLELOCK_INITIALIZER; 153 struct simplelock vnode_free_list_slock = SIMPLELOCK_INITIALIZER; 154 struct simplelock spechash_slock = SIMPLELOCK_INITIALIZER; 155 156 /* 157 * These define the root filesystem and device. 158 */ 159 struct mount *rootfs; 160 struct vnode *rootvnode; 161 struct device *root_device; /* root device */ 162 163 struct pool vnode_pool; /* memory pool for vnodes */ 164 165 /* 166 * Local declarations. 167 */ 168 void insmntque __P((struct vnode *, struct mount *)); 169 int getdevvp __P((dev_t, struct vnode **, enum vtype)); 170 void vgoneall __P((struct vnode *)); 171 172 static int vfs_hang_addrlist __P((struct mount *, struct netexport *, 173 struct export_args *)); 174 static int vfs_free_netcred __P((struct radix_node *, void *)); 175 static void vfs_free_addrlist __P((struct netexport *)); 176 177 #ifdef DEBUG 178 void printlockedvnodes __P((void)); 179 #endif 180 181 /* 182 * Initialize the vnode management data structures. 183 */ 184 void 185 vntblinit() 186 { 187 188 pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodepl", 189 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VNODE); 190 191 /* 192 * Initialize the filesystem syncer. 193 */ 194 vn_initialize_syncerd(); 195 } 196 197 /* 198 * Mark a mount point as busy. Used to synchronize access and to delay 199 * unmounting. Interlock is not released on failure. 200 */ 201 int 202 vfs_busy(mp, flags, interlkp) 203 struct mount *mp; 204 int flags; 205 struct simplelock *interlkp; 206 { 207 int lkflags; 208 209 while (mp->mnt_flag & MNT_UNMOUNT) { 210 int gone; 211 212 if (flags & LK_NOWAIT) 213 return (ENOENT); 214 if ((flags & LK_RECURSEFAIL) && mp->mnt_unmounter != NULL 215 && mp->mnt_unmounter == curproc) 216 return (EDEADLK); 217 if (interlkp) 218 simple_unlock(interlkp); 219 /* 220 * Since all busy locks are shared except the exclusive 221 * lock granted when unmounting, the only place that a 222 * wakeup needs to be done is at the release of the 223 * exclusive lock at the end of dounmount. 224 * 225 * XXX MP: add spinlock protecting mnt_wcnt here once you 226 * can atomically unlock-and-sleep. 227 */ 228 mp->mnt_wcnt++; 229 tsleep((caddr_t)mp, PVFS, "vfs_busy", 0); 230 mp->mnt_wcnt--; 231 gone = mp->mnt_flag & MNT_GONE; 232 233 if (mp->mnt_wcnt == 0) 234 wakeup(&mp->mnt_wcnt); 235 if (interlkp) 236 simple_lock(interlkp); 237 if (gone) 238 return (ENOENT); 239 } 240 lkflags = LK_SHARED; 241 if (interlkp) 242 lkflags |= LK_INTERLOCK; 243 if (lockmgr(&mp->mnt_lock, lkflags, interlkp)) 244 panic("vfs_busy: unexpected lock failure"); 245 return (0); 246 } 247 248 /* 249 * Free a busy filesystem. 250 */ 251 void 252 vfs_unbusy(mp) 253 struct mount *mp; 254 { 255 256 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL); 257 } 258 259 /* 260 * Lookup a filesystem type, and if found allocate and initialize 261 * a mount structure for it. 262 * 263 * Devname is usually updated by mount(8) after booting. 264 */ 265 int 266 vfs_rootmountalloc(fstypename, devname, mpp) 267 char *fstypename; 268 char *devname; 269 struct mount **mpp; 270 { 271 struct vfsops *vfsp = NULL; 272 struct mount *mp; 273 274 for (vfsp = LIST_FIRST(&vfs_list); vfsp != NULL; 275 vfsp = LIST_NEXT(vfsp, vfs_list)) 276 if (!strncmp(vfsp->vfs_name, fstypename, MFSNAMELEN)) 277 break; 278 279 if (vfsp == NULL) 280 return (ENODEV); 281 mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); 282 memset((char *)mp, 0, (u_long)sizeof(struct mount)); 283 lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0); 284 (void)vfs_busy(mp, LK_NOWAIT, 0); 285 LIST_INIT(&mp->mnt_vnodelist); 286 mp->mnt_op = vfsp; 287 mp->mnt_flag = MNT_RDONLY; 288 mp->mnt_vnodecovered = NULLVP; 289 vfsp->vfs_refcount++; 290 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfs_name, MFSNAMELEN); 291 mp->mnt_stat.f_mntonname[0] = '/'; 292 (void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 293 *mpp = mp; 294 return (0); 295 } 296 297 /* 298 * Lookup a mount point by filesystem identifier. 299 */ 300 struct mount * 301 vfs_getvfs(fsid) 302 fsid_t *fsid; 303 { 304 struct mount *mp; 305 306 simple_lock(&mountlist_slock); 307 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; 308 mp = mp->mnt_list.cqe_next) { 309 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 310 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 311 simple_unlock(&mountlist_slock); 312 return (mp); 313 } 314 } 315 simple_unlock(&mountlist_slock); 316 return ((struct mount *)0); 317 } 318 319 /* 320 * Get a new unique fsid 321 */ 322 void 323 vfs_getnewfsid(mp) 324 struct mount *mp; 325 { 326 static u_short xxxfs_mntid; 327 fsid_t tfsid; 328 int mtype; 329 330 simple_lock(&mntid_slock); 331 mtype = makefstype(mp->mnt_op->vfs_name); 332 mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0); 333 mp->mnt_stat.f_fsid.val[1] = mtype; 334 if (xxxfs_mntid == 0) 335 ++xxxfs_mntid; 336 tfsid.val[0] = makedev((nblkdev + mtype) & 0xff, xxxfs_mntid); 337 tfsid.val[1] = mtype; 338 if (mountlist.cqh_first != (void *)&mountlist) { 339 while (vfs_getvfs(&tfsid)) { 340 tfsid.val[0]++; 341 xxxfs_mntid++; 342 } 343 } 344 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 345 simple_unlock(&mntid_slock); 346 } 347 348 /* 349 * Make a 'unique' number from a mount type name. 350 */ 351 long 352 makefstype(type) 353 const char *type; 354 { 355 long rv; 356 357 for (rv = 0; *type; type++) { 358 rv <<= 2; 359 rv ^= *type; 360 } 361 return rv; 362 } 363 364 365 /* 366 * Set vnode attributes to VNOVAL 367 */ 368 void 369 vattr_null(vap) 370 struct vattr *vap; 371 { 372 373 vap->va_type = VNON; 374 375 /* 376 * Assign individually so that it is safe even if size and 377 * sign of each member are varied. 378 */ 379 vap->va_mode = VNOVAL; 380 vap->va_nlink = VNOVAL; 381 vap->va_uid = VNOVAL; 382 vap->va_gid = VNOVAL; 383 vap->va_fsid = VNOVAL; 384 vap->va_fileid = VNOVAL; 385 vap->va_size = VNOVAL; 386 vap->va_blocksize = VNOVAL; 387 vap->va_atime.tv_sec = 388 vap->va_mtime.tv_sec = 389 vap->va_ctime.tv_sec = VNOVAL; 390 vap->va_atime.tv_nsec = 391 vap->va_mtime.tv_nsec = 392 vap->va_ctime.tv_nsec = VNOVAL; 393 vap->va_gen = VNOVAL; 394 vap->va_flags = VNOVAL; 395 vap->va_rdev = VNOVAL; 396 vap->va_bytes = VNOVAL; 397 vap->va_vaflags = 0; 398 } 399 400 /* 401 * Routines having to do with the management of the vnode table. 402 */ 403 extern int (**dead_vnodeop_p) __P((void *)); 404 long numvnodes; 405 406 /* 407 * Return the next vnode from the free list. 408 */ 409 int 410 getnewvnode(tag, mp, vops, vpp) 411 enum vtagtype tag; 412 struct mount *mp; 413 int (**vops) __P((void *)); 414 struct vnode **vpp; 415 { 416 extern struct uvm_pagerops uvm_vnodeops; 417 struct uvm_object *uobj; 418 struct proc *p = curproc; /* XXX */ 419 struct freelst *listhd; 420 static int toggle; 421 struct vnode *vp; 422 int error = 0; 423 #ifdef DIAGNOSTIC 424 int s; 425 #endif 426 if (mp) { 427 /* 428 * Mark filesystem busy while we're creating a vnode. 429 * If unmount is in progress, this will wait; if the 430 * unmount succeeds (only if umount -f), this will 431 * return an error. If the unmount fails, we'll keep 432 * going afterwards. 433 * (This puts the per-mount vnode list logically under 434 * the protection of the vfs_busy lock). 435 */ 436 error = vfs_busy(mp, LK_RECURSEFAIL, 0); 437 if (error && error != EDEADLK) 438 return error; 439 } 440 441 /* 442 * We must choose whether to allocate a new vnode or recycle an 443 * existing one. The criterion for allocating a new one is that 444 * the total number of vnodes is less than the number desired or 445 * there are no vnodes on either free list. Generally we only 446 * want to recycle vnodes that have no buffers associated with 447 * them, so we look first on the vnode_free_list. If it is empty, 448 * we next consider vnodes with referencing buffers on the 449 * vnode_hold_list. The toggle ensures that half the time we 450 * will use a buffer from the vnode_hold_list, and half the time 451 * we will allocate a new one unless the list has grown to twice 452 * the desired size. We are reticent to recycle vnodes from the 453 * vnode_hold_list because we will lose the identity of all its 454 * referencing buffers. 455 */ 456 457 toggle ^= 1; 458 if (numvnodes > 2 * desiredvnodes) 459 toggle = 0; 460 461 simple_lock(&vnode_free_list_slock); 462 if (numvnodes < desiredvnodes || 463 (TAILQ_FIRST(listhd = &vnode_free_list) == NULL && 464 (TAILQ_FIRST(listhd = &vnode_hold_list) == NULL || toggle))) { 465 simple_unlock(&vnode_free_list_slock); 466 vp = pool_get(&vnode_pool, PR_WAITOK); 467 memset(vp, 0, sizeof(*vp)); 468 simple_lock_init(&vp->v_interlock); 469 numvnodes++; 470 } else { 471 for (vp = TAILQ_FIRST(listhd); vp != NULLVP; 472 vp = TAILQ_NEXT(vp, v_freelist)) { 473 if (simple_lock_try(&vp->v_interlock)) { 474 if ((vp->v_flag & VLAYER) == 0) { 475 break; 476 } 477 if (VOP_ISLOCKED(vp) == 0) 478 break; 479 else 480 simple_unlock(&vp->v_interlock); 481 } 482 } 483 /* 484 * Unless this is a bad time of the month, at most 485 * the first NCPUS items on the free list are 486 * locked, so this is close enough to being empty. 487 */ 488 if (vp == NULLVP) { 489 simple_unlock(&vnode_free_list_slock); 490 if (mp && error != EDEADLK) 491 vfs_unbusy(mp); 492 tablefull("vnode", "increase kern.maxvnodes or NVNODE"); 493 *vpp = 0; 494 return (ENFILE); 495 } 496 if (vp->v_usecount) 497 panic("free vnode isn't, vp %p", vp); 498 TAILQ_REMOVE(listhd, vp, v_freelist); 499 /* see comment on why 0xdeadb is set at end of vgone (below) */ 500 vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb; 501 simple_unlock(&vnode_free_list_slock); 502 vp->v_lease = NULL; 503 if (vp->v_type != VBAD) 504 vgonel(vp, p); 505 else 506 simple_unlock(&vp->v_interlock); 507 #ifdef DIAGNOSTIC 508 if (vp->v_data) 509 panic("cleaned vnode isn't, vp %p", vp); 510 s = splbio(); 511 if (vp->v_numoutput) 512 panic("clean vnode has pending I/O's, vp %p", vp); 513 splx(s); 514 #endif 515 vp->v_flag = 0; 516 vp->v_lastr = 0; 517 vp->v_ralen = 0; 518 vp->v_maxra = 0; 519 vp->v_lastw = 0; 520 vp->v_lasta = 0; 521 vp->v_cstart = 0; 522 vp->v_clen = 0; 523 vp->v_socket = 0; 524 } 525 vp->v_type = VNON; 526 vp->v_vnlock = &vp->v_lock; 527 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); 528 lockinit(&vp->v_glock, PVFS, "glock", 0, 0); 529 cache_purge(vp); 530 vp->v_tag = tag; 531 vp->v_op = vops; 532 insmntque(vp, mp); 533 *vpp = vp; 534 vp->v_usecount = 1; 535 vp->v_data = 0; 536 simple_lock_init(&vp->v_uvm.u_obj.vmobjlock); 537 538 /* 539 * initialize uvm_object within vnode. 540 */ 541 542 uobj = &vp->v_uvm.u_obj; 543 uobj->pgops = &uvm_vnodeops; 544 TAILQ_INIT(&uobj->memq); 545 vp->v_uvm.u_size = VSIZENOTSET; 546 547 if (mp && error != EDEADLK) 548 vfs_unbusy(mp); 549 return (0); 550 } 551 552 /* 553 * This is really just the reverse of getnewvnode(). Needed for 554 * VFS_VGET functions who may need to push back a vnode in case 555 * of a locking race. 556 */ 557 void 558 ungetnewvnode(vp) 559 struct vnode *vp; 560 { 561 #ifdef DIAGNOSTIC 562 if (vp->v_usecount != 1) 563 panic("ungetnewvnode: busy vnode"); 564 #endif 565 vp->v_usecount--; 566 insmntque(vp, NULL); 567 vp->v_type = VBAD; 568 569 simple_lock(&vp->v_interlock); 570 /* 571 * Insert at head of LRU list 572 */ 573 simple_lock(&vnode_free_list_slock); 574 if (vp->v_holdcnt > 0) 575 TAILQ_INSERT_HEAD(&vnode_hold_list, vp, v_freelist); 576 else 577 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 578 simple_unlock(&vnode_free_list_slock); 579 simple_unlock(&vp->v_interlock); 580 } 581 582 /* 583 * Move a vnode from one mount queue to another. 584 */ 585 void 586 insmntque(vp, mp) 587 struct vnode *vp; 588 struct mount *mp; 589 { 590 591 #ifdef DIAGNOSTIC 592 if ((mp != NULL) && 593 (mp->mnt_flag & MNT_UNMOUNT) && 594 !(mp->mnt_flag & MNT_SOFTDEP) && 595 vp->v_tag != VT_VFS) { 596 panic("insmntque into dying filesystem"); 597 } 598 #endif 599 600 simple_lock(&mntvnode_slock); 601 /* 602 * Delete from old mount point vnode list, if on one. 603 */ 604 if (vp->v_mount != NULL) 605 LIST_REMOVE(vp, v_mntvnodes); 606 /* 607 * Insert into list of vnodes for the new mount point, if available. 608 */ 609 if ((vp->v_mount = mp) != NULL) 610 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); 611 simple_unlock(&mntvnode_slock); 612 } 613 614 /* 615 * Update outstanding I/O count and do wakeup if requested. 616 */ 617 void 618 vwakeup(bp) 619 struct buf *bp; 620 { 621 struct vnode *vp; 622 623 if ((vp = bp->b_vp) != NULL) { 624 if (--vp->v_numoutput < 0) 625 panic("vwakeup: neg numoutput, vp %p", vp); 626 if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) { 627 vp->v_flag &= ~VBWAIT; 628 wakeup((caddr_t)&vp->v_numoutput); 629 } 630 } 631 } 632 633 /* 634 * Flush out and invalidate all buffers associated with a vnode. 635 * Called with the underlying vnode locked, which should prevent new dirty 636 * buffers from being queued. 637 */ 638 int 639 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo) 640 struct vnode *vp; 641 int flags; 642 struct ucred *cred; 643 struct proc *p; 644 int slpflag, slptimeo; 645 { 646 struct uvm_object *uobj = &vp->v_uvm.u_obj; 647 struct buf *bp, *nbp; 648 int s, error, rv; 649 int flushflags = PGO_ALLPAGES|PGO_FREE|PGO_SYNCIO| 650 (flags & V_SAVE ? PGO_CLEANIT : 0); 651 652 /* XXXUBC this doesn't look at flags or slp* */ 653 if (vp->v_type == VREG) { 654 simple_lock(&uobj->vmobjlock); 655 rv = (uobj->pgops->pgo_flush)(uobj, 0, 0, flushflags); 656 simple_unlock(&uobj->vmobjlock); 657 if (!rv) { 658 return EIO; 659 } 660 } 661 if (flags & V_SAVE) { 662 error = VOP_FSYNC(vp, cred, FSYNC_WAIT|FSYNC_RECLAIM, 0, 0, p); 663 if (error) 664 return (error); 665 #ifdef DIAGNOSTIC 666 s = splbio(); 667 if (vp->v_numoutput > 0 || !LIST_EMPTY(&vp->v_dirtyblkhd)) 668 panic("vinvalbuf: dirty bufs, vp %p", vp); 669 splx(s); 670 #endif 671 } 672 673 s = splbio(); 674 675 restart: 676 for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 677 nbp = LIST_NEXT(bp, b_vnbufs); 678 if (bp->b_flags & B_BUSY) { 679 bp->b_flags |= B_WANTED; 680 error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1), 681 "vinvalbuf", slptimeo); 682 if (error) { 683 splx(s); 684 return (error); 685 } 686 goto restart; 687 } 688 bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; 689 brelse(bp); 690 } 691 692 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 693 nbp = LIST_NEXT(bp, b_vnbufs); 694 if (bp->b_flags & B_BUSY) { 695 bp->b_flags |= B_WANTED; 696 error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1), 697 "vinvalbuf", slptimeo); 698 if (error) { 699 splx(s); 700 return (error); 701 } 702 goto restart; 703 } 704 /* 705 * XXX Since there are no node locks for NFS, I believe 706 * there is a slight chance that a delayed write will 707 * occur while sleeping just above, so check for it. 708 */ 709 if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) { 710 #ifdef DEBUG 711 printf("buffer still DELWRI\n"); 712 #endif 713 bp->b_flags |= B_BUSY | B_VFLUSH; 714 VOP_BWRITE(bp); 715 goto restart; 716 } 717 bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; 718 brelse(bp); 719 } 720 721 #ifdef DIAGNOSTIC 722 if (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd)) 723 panic("vinvalbuf: flush failed, vp %p", vp); 724 #endif 725 726 splx(s); 727 728 return (0); 729 } 730 731 /* 732 * Destroy any in core blocks past the truncation length. 733 * Called with the underlying vnode locked, which should prevent new dirty 734 * buffers from being queued. 735 */ 736 int 737 vtruncbuf(vp, lbn, slpflag, slptimeo) 738 struct vnode *vp; 739 daddr_t lbn; 740 int slpflag, slptimeo; 741 { 742 struct uvm_object *uobj = &vp->v_uvm.u_obj; 743 struct buf *bp, *nbp; 744 int s, error, rv; 745 746 s = splbio(); 747 if (vp->v_type == VREG) { 748 simple_lock(&uobj->vmobjlock); 749 rv = (uobj->pgops->pgo_flush)(uobj, 750 round_page(lbn << vp->v_mount->mnt_fs_bshift), 751 vp->v_uvm.u_size, PGO_FREE); 752 simple_unlock(&uobj->vmobjlock); 753 if (!rv) { 754 splx(s); 755 return EIO; 756 } 757 } 758 759 restart: 760 for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 761 nbp = LIST_NEXT(bp, b_vnbufs); 762 if (bp->b_lblkno < lbn) 763 continue; 764 if (bp->b_flags & B_BUSY) { 765 bp->b_flags |= B_WANTED; 766 error = tsleep(bp, slpflag | (PRIBIO + 1), 767 "vtruncbuf", slptimeo); 768 if (error) { 769 splx(s); 770 return (error); 771 } 772 goto restart; 773 } 774 bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; 775 brelse(bp); 776 } 777 778 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 779 nbp = LIST_NEXT(bp, b_vnbufs); 780 if (bp->b_lblkno < lbn) 781 continue; 782 if (bp->b_flags & B_BUSY) { 783 bp->b_flags |= B_WANTED; 784 error = tsleep(bp, slpflag | (PRIBIO + 1), 785 "vtruncbuf", slptimeo); 786 if (error) { 787 splx(s); 788 return (error); 789 } 790 goto restart; 791 } 792 bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; 793 brelse(bp); 794 } 795 796 splx(s); 797 798 return (0); 799 } 800 801 void 802 vflushbuf(vp, sync) 803 struct vnode *vp; 804 int sync; 805 { 806 struct uvm_object *uobj = &vp->v_uvm.u_obj; 807 struct buf *bp, *nbp; 808 int s; 809 810 if (vp->v_type == VREG) { 811 int flags = PGO_CLEANIT|PGO_ALLPAGES| (sync ? PGO_SYNCIO : 0); 812 813 simple_lock(&uobj->vmobjlock); 814 (uobj->pgops->pgo_flush)(uobj, 0, 0, flags); 815 simple_unlock(&uobj->vmobjlock); 816 } 817 818 loop: 819 s = splbio(); 820 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 821 nbp = LIST_NEXT(bp, b_vnbufs); 822 if ((bp->b_flags & B_BUSY)) 823 continue; 824 if ((bp->b_flags & B_DELWRI) == 0) 825 panic("vflushbuf: not dirty, bp %p", bp); 826 bp->b_flags |= B_BUSY | B_VFLUSH; 827 splx(s); 828 /* 829 * Wait for I/O associated with indirect blocks to complete, 830 * since there is no way to quickly wait for them below. 831 */ 832 if (bp->b_vp == vp || sync == 0) 833 (void) bawrite(bp); 834 else 835 (void) bwrite(bp); 836 goto loop; 837 } 838 if (sync == 0) { 839 splx(s); 840 return; 841 } 842 while (vp->v_numoutput) { 843 vp->v_flag |= VBWAIT; 844 tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "vflushbuf", 0); 845 } 846 splx(s); 847 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 848 vprint("vflushbuf: dirty", vp); 849 goto loop; 850 } 851 } 852 853 /* 854 * Associate a buffer with a vnode. 855 */ 856 void 857 bgetvp(vp, bp) 858 struct vnode *vp; 859 struct buf *bp; 860 { 861 int s; 862 863 if (bp->b_vp) 864 panic("bgetvp: not free, bp %p", bp); 865 VHOLD(vp); 866 s = splbio(); 867 bp->b_vp = vp; 868 if (vp->v_type == VBLK || vp->v_type == VCHR) 869 bp->b_dev = vp->v_rdev; 870 else 871 bp->b_dev = NODEV; 872 /* 873 * Insert onto list for new vnode. 874 */ 875 bufinsvn(bp, &vp->v_cleanblkhd); 876 splx(s); 877 } 878 879 /* 880 * Disassociate a buffer from a vnode. 881 */ 882 void 883 brelvp(bp) 884 struct buf *bp; 885 { 886 struct vnode *vp; 887 int s; 888 889 if (bp->b_vp == NULL) 890 panic("brelvp: vp NULL, bp %p", bp); 891 892 s = splbio(); 893 vp = bp->b_vp; 894 /* 895 * Delete from old vnode list, if on one. 896 */ 897 if (bp->b_vnbufs.le_next != NOLIST) 898 bufremvn(bp); 899 900 if (vp->v_type != VREG && (vp->v_flag & VONWORKLST) && 901 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { 902 vp->v_flag &= ~VONWORKLST; 903 LIST_REMOVE(vp, v_synclist); 904 } 905 906 bp->b_vp = NULL; 907 HOLDRELE(vp); 908 splx(s); 909 } 910 911 /* 912 * Reassign a buffer from one vnode to another. 913 * Used to assign file specific control information 914 * (indirect blocks) to the vnode to which they belong. 915 * 916 * This function must be called at splbio(). 917 */ 918 void 919 reassignbuf(bp, newvp) 920 struct buf *bp; 921 struct vnode *newvp; 922 { 923 struct buflists *listheadp; 924 int delay; 925 926 /* 927 * Delete from old vnode list, if on one. 928 */ 929 if (bp->b_vnbufs.le_next != NOLIST) 930 bufremvn(bp); 931 /* 932 * If dirty, put on list of dirty buffers; 933 * otherwise insert onto list of clean buffers. 934 */ 935 if ((bp->b_flags & B_DELWRI) == 0) { 936 listheadp = &newvp->v_cleanblkhd; 937 if (newvp->v_type != VREG && 938 (newvp->v_flag & VONWORKLST) && 939 LIST_FIRST(&newvp->v_dirtyblkhd) == NULL) { 940 newvp->v_flag &= ~VONWORKLST; 941 LIST_REMOVE(newvp, v_synclist); 942 } 943 } else { 944 listheadp = &newvp->v_dirtyblkhd; 945 if ((newvp->v_flag & VONWORKLST) == 0) { 946 switch (newvp->v_type) { 947 case VDIR: 948 delay = dirdelay; 949 break; 950 case VBLK: 951 if (newvp->v_specmountpoint != NULL) { 952 delay = metadelay; 953 break; 954 } 955 /* fall through */ 956 default: 957 delay = filedelay; 958 break; 959 } 960 if (!newvp->v_mount || 961 (newvp->v_mount->mnt_flag & MNT_ASYNC) == 0) 962 vn_syncer_add_to_worklist(newvp, delay); 963 } 964 } 965 bufinsvn(bp, listheadp); 966 } 967 968 /* 969 * Create a vnode for a block device. 970 * Used for root filesystem and swap areas. 971 * Also used for memory file system special devices. 972 */ 973 int 974 bdevvp(dev, vpp) 975 dev_t dev; 976 struct vnode **vpp; 977 { 978 979 return (getdevvp(dev, vpp, VBLK)); 980 } 981 982 /* 983 * Create a vnode for a character device. 984 * Used for kernfs and some console handling. 985 */ 986 int 987 cdevvp(dev, vpp) 988 dev_t dev; 989 struct vnode **vpp; 990 { 991 992 return (getdevvp(dev, vpp, VCHR)); 993 } 994 995 /* 996 * Create a vnode for a device. 997 * Used by bdevvp (block device) for root file system etc., 998 * and by cdevvp (character device) for console and kernfs. 999 */ 1000 int 1001 getdevvp(dev, vpp, type) 1002 dev_t dev; 1003 struct vnode **vpp; 1004 enum vtype type; 1005 { 1006 struct vnode *vp; 1007 struct vnode *nvp; 1008 int error; 1009 1010 if (dev == NODEV) { 1011 *vpp = NULLVP; 1012 return (0); 1013 } 1014 error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp); 1015 if (error) { 1016 *vpp = NULLVP; 1017 return (error); 1018 } 1019 vp = nvp; 1020 vp->v_type = type; 1021 if ((nvp = checkalias(vp, dev, NULL)) != 0) { 1022 vput(vp); 1023 vp = nvp; 1024 } 1025 *vpp = vp; 1026 return (0); 1027 } 1028 1029 /* 1030 * Check to see if the new vnode represents a special device 1031 * for which we already have a vnode (either because of 1032 * bdevvp() or because of a different vnode representing 1033 * the same block device). If such an alias exists, deallocate 1034 * the existing contents and return the aliased vnode. The 1035 * caller is responsible for filling it with its new contents. 1036 */ 1037 struct vnode * 1038 checkalias(nvp, nvp_rdev, mp) 1039 struct vnode *nvp; 1040 dev_t nvp_rdev; 1041 struct mount *mp; 1042 { 1043 struct proc *p = curproc; /* XXX */ 1044 struct vnode *vp; 1045 struct vnode **vpp; 1046 1047 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 1048 return (NULLVP); 1049 1050 vpp = &speclisth[SPECHASH(nvp_rdev)]; 1051 loop: 1052 simple_lock(&spechash_slock); 1053 for (vp = *vpp; vp; vp = vp->v_specnext) { 1054 if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) 1055 continue; 1056 /* 1057 * Alias, but not in use, so flush it out. 1058 */ 1059 simple_lock(&vp->v_interlock); 1060 if (vp->v_usecount == 0) { 1061 simple_unlock(&spechash_slock); 1062 vgonel(vp, p); 1063 goto loop; 1064 } 1065 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) { 1066 simple_unlock(&spechash_slock); 1067 goto loop; 1068 } 1069 break; 1070 } 1071 if (vp == NULL || vp->v_tag != VT_NON || vp->v_type != VBLK) { 1072 MALLOC(nvp->v_specinfo, struct specinfo *, 1073 sizeof(struct specinfo), M_VNODE, M_WAITOK); 1074 nvp->v_rdev = nvp_rdev; 1075 nvp->v_hashchain = vpp; 1076 nvp->v_specnext = *vpp; 1077 nvp->v_specmountpoint = NULL; 1078 simple_unlock(&spechash_slock); 1079 nvp->v_speclockf = NULL; 1080 *vpp = nvp; 1081 if (vp != NULLVP) { 1082 nvp->v_flag |= VALIASED; 1083 vp->v_flag |= VALIASED; 1084 vput(vp); 1085 } 1086 return (NULLVP); 1087 } 1088 simple_unlock(&spechash_slock); 1089 VOP_UNLOCK(vp, 0); 1090 simple_lock(&vp->v_interlock); 1091 vclean(vp, 0, p); 1092 vp->v_op = nvp->v_op; 1093 vp->v_tag = nvp->v_tag; 1094 vp->v_vnlock = &vp->v_lock; 1095 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); 1096 nvp->v_type = VNON; 1097 insmntque(vp, mp); 1098 return (vp); 1099 } 1100 1101 /* 1102 * Grab a particular vnode from the free list, increment its 1103 * reference count and lock it. If the vnode lock bit is set the 1104 * vnode is being eliminated in vgone. In that case, we can not 1105 * grab the vnode, so the process is awakened when the transition is 1106 * completed, and an error returned to indicate that the vnode is no 1107 * longer usable (possibly having been changed to a new file system type). 1108 */ 1109 int 1110 vget(vp, flags) 1111 struct vnode *vp; 1112 int flags; 1113 { 1114 int error; 1115 1116 /* 1117 * If the vnode is in the process of being cleaned out for 1118 * another use, we wait for the cleaning to finish and then 1119 * return failure. Cleaning is determined by checking that 1120 * the VXLOCK flag is set. 1121 */ 1122 1123 if ((flags & LK_INTERLOCK) == 0) 1124 simple_lock(&vp->v_interlock); 1125 if (vp->v_flag & VXLOCK) { 1126 if (flags & LK_NOWAIT) { 1127 return EBUSY; 1128 } 1129 vp->v_flag |= VXWANT; 1130 ltsleep((caddr_t)vp, PINOD|PNORELOCK, 1131 "vget", 0, &vp->v_interlock); 1132 return (ENOENT); 1133 } 1134 if (vp->v_usecount == 0) { 1135 simple_lock(&vnode_free_list_slock); 1136 if (vp->v_holdcnt > 0) 1137 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 1138 else 1139 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1140 simple_unlock(&vnode_free_list_slock); 1141 } 1142 vp->v_usecount++; 1143 #ifdef DIAGNOSTIC 1144 if (vp->v_usecount == 0) { 1145 vprint("vget", vp); 1146 panic("vget: usecount overflow, vp %p", vp); 1147 } 1148 #endif 1149 if (flags & LK_TYPE_MASK) { 1150 if ((error = vn_lock(vp, flags | LK_INTERLOCK))) { 1151 /* 1152 * must expand vrele here because we do not want 1153 * to call VOP_INACTIVE if the reference count 1154 * drops back to zero since it was never really 1155 * active. We must remove it from the free list 1156 * before sleeping so that multiple processes do 1157 * not try to recycle it. 1158 */ 1159 simple_lock(&vp->v_interlock); 1160 vp->v_usecount--; 1161 if (vp->v_usecount > 0) { 1162 simple_unlock(&vp->v_interlock); 1163 return (error); 1164 } 1165 /* 1166 * insert at tail of LRU list 1167 */ 1168 simple_lock(&vnode_free_list_slock); 1169 if (vp->v_holdcnt > 0) 1170 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, 1171 v_freelist); 1172 else 1173 TAILQ_INSERT_TAIL(&vnode_free_list, vp, 1174 v_freelist); 1175 simple_unlock(&vnode_free_list_slock); 1176 simple_unlock(&vp->v_interlock); 1177 } 1178 return (error); 1179 } 1180 simple_unlock(&vp->v_interlock); 1181 return (0); 1182 } 1183 1184 /* 1185 * vput(), just unlock and vrele() 1186 */ 1187 void 1188 vput(vp) 1189 struct vnode *vp; 1190 { 1191 struct proc *p = curproc; /* XXX */ 1192 1193 #ifdef DIAGNOSTIC 1194 if (vp == NULL) 1195 panic("vput: null vp"); 1196 #endif 1197 simple_lock(&vp->v_interlock); 1198 vp->v_usecount--; 1199 if (vp->v_usecount > 0) { 1200 simple_unlock(&vp->v_interlock); 1201 VOP_UNLOCK(vp, 0); 1202 return; 1203 } 1204 #ifdef DIAGNOSTIC 1205 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 1206 vprint("vput: bad ref count", vp); 1207 panic("vput: ref cnt"); 1208 } 1209 #endif 1210 /* 1211 * Insert at tail of LRU list. 1212 */ 1213 simple_lock(&vnode_free_list_slock); 1214 if (vp->v_holdcnt > 0) 1215 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 1216 else 1217 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 1218 simple_unlock(&vnode_free_list_slock); 1219 vp->v_flag &= ~VTEXT; 1220 simple_unlock(&vp->v_interlock); 1221 VOP_INACTIVE(vp, p); 1222 } 1223 1224 /* 1225 * Vnode release. 1226 * If count drops to zero, call inactive routine and return to freelist. 1227 */ 1228 void 1229 vrele(vp) 1230 struct vnode *vp; 1231 { 1232 struct proc *p = curproc; /* XXX */ 1233 1234 #ifdef DIAGNOSTIC 1235 if (vp == NULL) 1236 panic("vrele: null vp"); 1237 #endif 1238 simple_lock(&vp->v_interlock); 1239 vp->v_usecount--; 1240 if (vp->v_usecount > 0) { 1241 simple_unlock(&vp->v_interlock); 1242 return; 1243 } 1244 #ifdef DIAGNOSTIC 1245 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 1246 vprint("vrele: bad ref count", vp); 1247 panic("vrele: ref cnt vp %p", vp); 1248 } 1249 #endif 1250 /* 1251 * Insert at tail of LRU list. 1252 */ 1253 simple_lock(&vnode_free_list_slock); 1254 if (vp->v_holdcnt > 0) 1255 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 1256 else 1257 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 1258 simple_unlock(&vnode_free_list_slock); 1259 vp->v_flag &= ~VTEXT; 1260 if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK) == 0) 1261 VOP_INACTIVE(vp, p); 1262 } 1263 1264 #ifdef DIAGNOSTIC 1265 /* 1266 * Page or buffer structure gets a reference. 1267 */ 1268 void 1269 vhold(vp) 1270 struct vnode *vp; 1271 { 1272 1273 /* 1274 * If it is on the freelist and the hold count is currently 1275 * zero, move it to the hold list. The test of the back 1276 * pointer and the use reference count of zero is because 1277 * it will be removed from a free list by getnewvnode, 1278 * but will not have its reference count incremented until 1279 * after calling vgone. If the reference count were 1280 * incremented first, vgone would (incorrectly) try to 1281 * close the previous instance of the underlying object. 1282 * So, the back pointer is explicitly set to `0xdeadb' in 1283 * getnewvnode after removing it from a freelist to ensure 1284 * that we do not try to move it here. 1285 */ 1286 simple_lock(&vp->v_interlock); 1287 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) && 1288 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 1289 simple_lock(&vnode_free_list_slock); 1290 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1291 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 1292 simple_unlock(&vnode_free_list_slock); 1293 } 1294 vp->v_holdcnt++; 1295 simple_unlock(&vp->v_interlock); 1296 } 1297 1298 /* 1299 * Page or buffer structure frees a reference. 1300 */ 1301 void 1302 holdrele(vp) 1303 struct vnode *vp; 1304 { 1305 1306 simple_lock(&vp->v_interlock); 1307 if (vp->v_holdcnt <= 0) 1308 panic("holdrele: holdcnt vp %p", vp); 1309 vp->v_holdcnt--; 1310 1311 /* 1312 * If it is on the holdlist and the hold count drops to 1313 * zero, move it to the free list. The test of the back 1314 * pointer and the use reference count of zero is because 1315 * it will be removed from a free list by getnewvnode, 1316 * but will not have its reference count incremented until 1317 * after calling vgone. If the reference count were 1318 * incremented first, vgone would (incorrectly) try to 1319 * close the previous instance of the underlying object. 1320 * So, the back pointer is explicitly set to `0xdeadb' in 1321 * getnewvnode after removing it from a freelist to ensure 1322 * that we do not try to move it here. 1323 */ 1324 1325 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) && 1326 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 1327 simple_lock(&vnode_free_list_slock); 1328 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 1329 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 1330 simple_unlock(&vnode_free_list_slock); 1331 } 1332 simple_unlock(&vp->v_interlock); 1333 } 1334 1335 /* 1336 * Vnode reference. 1337 */ 1338 void 1339 vref(vp) 1340 struct vnode *vp; 1341 { 1342 1343 simple_lock(&vp->v_interlock); 1344 if (vp->v_usecount <= 0) 1345 panic("vref used where vget required, vp %p", vp); 1346 vp->v_usecount++; 1347 #ifdef DIAGNOSTIC 1348 if (vp->v_usecount == 0) { 1349 vprint("vref", vp); 1350 panic("vref: usecount overflow, vp %p", vp); 1351 } 1352 #endif 1353 simple_unlock(&vp->v_interlock); 1354 } 1355 #endif /* DIAGNOSTIC */ 1356 1357 /* 1358 * Remove any vnodes in the vnode table belonging to mount point mp. 1359 * 1360 * If MNT_NOFORCE is specified, there should not be any active ones, 1361 * return error if any are found (nb: this is a user error, not a 1362 * system error). If MNT_FORCE is specified, detach any active vnodes 1363 * that are found. 1364 */ 1365 #ifdef DEBUG 1366 int busyprt = 0; /* print out busy vnodes */ 1367 struct ctldebug debug1 = { "busyprt", &busyprt }; 1368 #endif 1369 1370 int 1371 vflush(mp, skipvp, flags) 1372 struct mount *mp; 1373 struct vnode *skipvp; 1374 int flags; 1375 { 1376 struct proc *p = curproc; /* XXX */ 1377 struct vnode *vp, *nvp; 1378 int busy = 0; 1379 1380 simple_lock(&mntvnode_slock); 1381 loop: 1382 for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) { 1383 if (vp->v_mount != mp) 1384 goto loop; 1385 nvp = vp->v_mntvnodes.le_next; 1386 /* 1387 * Skip over a selected vnode. 1388 */ 1389 if (vp == skipvp) 1390 continue; 1391 simple_lock(&vp->v_interlock); 1392 /* 1393 * Skip over a vnodes marked VSYSTEM. 1394 */ 1395 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1396 simple_unlock(&vp->v_interlock); 1397 continue; 1398 } 1399 /* 1400 * If WRITECLOSE is set, only flush out regular file 1401 * vnodes open for writing. 1402 */ 1403 if ((flags & WRITECLOSE) && 1404 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1405 simple_unlock(&vp->v_interlock); 1406 continue; 1407 } 1408 /* 1409 * With v_usecount == 0, all we need to do is clear 1410 * out the vnode data structures and we are done. 1411 */ 1412 if (vp->v_usecount == 0) { 1413 simple_unlock(&mntvnode_slock); 1414 vgonel(vp, p); 1415 simple_lock(&mntvnode_slock); 1416 continue; 1417 } 1418 /* 1419 * If FORCECLOSE is set, forcibly close the vnode. 1420 * For block or character devices, revert to an 1421 * anonymous device. For all other files, just kill them. 1422 */ 1423 if (flags & FORCECLOSE) { 1424 simple_unlock(&mntvnode_slock); 1425 if (vp->v_type != VBLK && vp->v_type != VCHR) { 1426 vgonel(vp, p); 1427 } else { 1428 vclean(vp, 0, p); 1429 vp->v_op = spec_vnodeop_p; 1430 insmntque(vp, (struct mount *)0); 1431 } 1432 simple_lock(&mntvnode_slock); 1433 continue; 1434 } 1435 #ifdef DEBUG 1436 if (busyprt) 1437 vprint("vflush: busy vnode", vp); 1438 #endif 1439 simple_unlock(&vp->v_interlock); 1440 busy++; 1441 } 1442 simple_unlock(&mntvnode_slock); 1443 if (busy) 1444 return (EBUSY); 1445 return (0); 1446 } 1447 1448 /* 1449 * Disassociate the underlying file system from a vnode. 1450 */ 1451 void 1452 vclean(vp, flags, p) 1453 struct vnode *vp; 1454 int flags; 1455 struct proc *p; 1456 { 1457 int active; 1458 1459 /* 1460 * Check to see if the vnode is in use. 1461 * If so we have to reference it before we clean it out 1462 * so that its count cannot fall to zero and generate a 1463 * race against ourselves to recycle it. 1464 */ 1465 if ((active = vp->v_usecount) != 0) { 1466 /* We have the vnode interlock. */ 1467 vp->v_usecount++; 1468 #ifdef DIAGNOSTIC 1469 if (vp->v_usecount == 0) { 1470 vprint("vclean", vp); 1471 panic("vclean: usecount overflow"); 1472 } 1473 #endif 1474 } 1475 1476 /* 1477 * Prevent the vnode from being recycled or 1478 * brought into use while we clean it out. 1479 */ 1480 if (vp->v_flag & VXLOCK) 1481 panic("vclean: deadlock, vp %p", vp); 1482 vp->v_flag |= VXLOCK; 1483 vp->v_flag &= ~VTEXT; 1484 1485 /* 1486 * Even if the count is zero, the VOP_INACTIVE routine may still 1487 * have the object locked while it cleans it out. The VOP_LOCK 1488 * ensures that the VOP_INACTIVE routine is done with its work. 1489 * For active vnodes, it ensures that no other activity can 1490 * occur while the underlying object is being cleaned out. 1491 */ 1492 VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK); 1493 1494 /* 1495 * Clean out any cached data associated with the vnode. 1496 */ 1497 if (flags & DOCLOSE) 1498 vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0); 1499 1500 /* 1501 * If purging an active vnode, it must be closed and 1502 * deactivated before being reclaimed. Note that the 1503 * VOP_INACTIVE will unlock the vnode. 1504 */ 1505 if (active) { 1506 if (flags & DOCLOSE) 1507 VOP_CLOSE(vp, FNONBLOCK, NOCRED, NULL); 1508 VOP_INACTIVE(vp, p); 1509 } else { 1510 /* 1511 * Any other processes trying to obtain this lock must first 1512 * wait for VXLOCK to clear, then call the new lock operation. 1513 */ 1514 VOP_UNLOCK(vp, 0); 1515 } 1516 /* 1517 * Reclaim the vnode. 1518 */ 1519 if (VOP_RECLAIM(vp, p)) 1520 panic("vclean: cannot reclaim, vp %p", vp); 1521 if (active) { 1522 /* 1523 * Inline copy of vrele() since VOP_INACTIVE 1524 * has already been called. 1525 */ 1526 simple_lock(&vp->v_interlock); 1527 if (--vp->v_usecount <= 0) { 1528 #ifdef DIAGNOSTIC 1529 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 1530 vprint("vclean: bad ref count", vp); 1531 panic("vclean: ref cnt"); 1532 } 1533 #endif 1534 /* 1535 * Insert at tail of LRU list. 1536 */ 1537 1538 simple_unlock(&vp->v_interlock); 1539 simple_lock(&vnode_free_list_slock); 1540 #ifdef DIAGNOSTIC 1541 if (vp->v_vnlock) { 1542 if ((vp->v_vnlock->lk_flags & LK_DRAINED) == 0) 1543 vprint("vclean: lock not drained", vp); 1544 } 1545 if (vp->v_holdcnt > 0) 1546 panic("vclean: not clean, vp %p", vp); 1547 #endif 1548 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 1549 simple_unlock(&vnode_free_list_slock); 1550 } else 1551 simple_unlock(&vp->v_interlock); 1552 } 1553 1554 cache_purge(vp); 1555 1556 /* 1557 * Done with purge, notify sleepers of the grim news. 1558 */ 1559 vp->v_op = dead_vnodeop_p; 1560 vp->v_tag = VT_NON; 1561 simple_lock(&vp->v_interlock); 1562 vp->v_flag &= ~VXLOCK; 1563 if (vp->v_flag & VXWANT) { 1564 vp->v_flag &= ~VXWANT; 1565 simple_unlock(&vp->v_interlock); 1566 wakeup((caddr_t)vp); 1567 } else 1568 simple_unlock(&vp->v_interlock); 1569 } 1570 1571 /* 1572 * Recycle an unused vnode to the front of the free list. 1573 * Release the passed interlock if the vnode will be recycled. 1574 */ 1575 int 1576 vrecycle(vp, inter_lkp, p) 1577 struct vnode *vp; 1578 struct simplelock *inter_lkp; 1579 struct proc *p; 1580 { 1581 1582 simple_lock(&vp->v_interlock); 1583 if (vp->v_usecount == 0) { 1584 if (inter_lkp) 1585 simple_unlock(inter_lkp); 1586 vgonel(vp, p); 1587 return (1); 1588 } 1589 simple_unlock(&vp->v_interlock); 1590 return (0); 1591 } 1592 1593 /* 1594 * Eliminate all activity associated with a vnode 1595 * in preparation for reuse. 1596 */ 1597 void 1598 vgone(vp) 1599 struct vnode *vp; 1600 { 1601 struct proc *p = curproc; /* XXX */ 1602 1603 simple_lock(&vp->v_interlock); 1604 vgonel(vp, p); 1605 } 1606 1607 /* 1608 * vgone, with the vp interlock held. 1609 */ 1610 void 1611 vgonel(vp, p) 1612 struct vnode *vp; 1613 struct proc *p; 1614 { 1615 struct vnode *vq; 1616 struct vnode *vx; 1617 1618 /* 1619 * If a vgone (or vclean) is already in progress, 1620 * wait until it is done and return. 1621 */ 1622 if (vp->v_flag & VXLOCK) { 1623 vp->v_flag |= VXWANT; 1624 ltsleep((caddr_t)vp, PINOD | PNORELOCK, 1625 "vgone", 0, &vp->v_interlock); 1626 return; 1627 } 1628 /* 1629 * Clean out the filesystem specific data. 1630 */ 1631 vclean(vp, DOCLOSE, p); 1632 /* 1633 * Delete from old mount point vnode list, if on one. 1634 */ 1635 if (vp->v_mount != NULL) 1636 insmntque(vp, (struct mount *)0); 1637 /* 1638 * If special device, remove it from special device alias list. 1639 * if it is on one. 1640 */ 1641 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) { 1642 simple_lock(&spechash_slock); 1643 if (vp->v_hashchain != NULL) { 1644 if (*vp->v_hashchain == vp) { 1645 *vp->v_hashchain = vp->v_specnext; 1646 } else { 1647 for (vq = *vp->v_hashchain; vq; 1648 vq = vq->v_specnext) { 1649 if (vq->v_specnext != vp) 1650 continue; 1651 vq->v_specnext = vp->v_specnext; 1652 break; 1653 } 1654 if (vq == NULL) 1655 panic("missing bdev"); 1656 } 1657 if (vp->v_flag & VALIASED) { 1658 vx = NULL; 1659 for (vq = *vp->v_hashchain; vq; 1660 vq = vq->v_specnext) { 1661 if (vq->v_rdev != vp->v_rdev || 1662 vq->v_type != vp->v_type) 1663 continue; 1664 if (vx) 1665 break; 1666 vx = vq; 1667 } 1668 if (vx == NULL) 1669 panic("missing alias"); 1670 if (vq == NULL) 1671 vx->v_flag &= ~VALIASED; 1672 vp->v_flag &= ~VALIASED; 1673 } 1674 } 1675 simple_unlock(&spechash_slock); 1676 FREE(vp->v_specinfo, M_VNODE); 1677 vp->v_specinfo = NULL; 1678 } 1679 /* 1680 * If it is on the freelist and not already at the head, 1681 * move it to the head of the list. The test of the back 1682 * pointer and the reference count of zero is because 1683 * it will be removed from the free list by getnewvnode, 1684 * but will not have its reference count incremented until 1685 * after calling vgone. If the reference count were 1686 * incremented first, vgone would (incorrectly) try to 1687 * close the previous instance of the underlying object. 1688 * So, the back pointer is explicitly set to `0xdeadb' in 1689 * getnewvnode after removing it from the freelist to ensure 1690 * that we do not try to move it here. 1691 */ 1692 if (vp->v_usecount == 0) { 1693 simple_lock(&vnode_free_list_slock); 1694 if (vp->v_holdcnt > 0) 1695 panic("vgonel: not clean, vp %p", vp); 1696 if (vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb && 1697 TAILQ_FIRST(&vnode_free_list) != vp) { 1698 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1699 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 1700 } 1701 simple_unlock(&vnode_free_list_slock); 1702 } 1703 vp->v_type = VBAD; 1704 } 1705 1706 /* 1707 * Lookup a vnode by device number. 1708 */ 1709 int 1710 vfinddev(dev, type, vpp) 1711 dev_t dev; 1712 enum vtype type; 1713 struct vnode **vpp; 1714 { 1715 struct vnode *vp; 1716 int rc = 0; 1717 1718 simple_lock(&spechash_slock); 1719 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) { 1720 if (dev != vp->v_rdev || type != vp->v_type) 1721 continue; 1722 *vpp = vp; 1723 rc = 1; 1724 break; 1725 } 1726 simple_unlock(&spechash_slock); 1727 return (rc); 1728 } 1729 1730 /* 1731 * Revoke all the vnodes corresponding to the specified minor number 1732 * range (endpoints inclusive) of the specified major. 1733 */ 1734 void 1735 vdevgone(maj, minl, minh, type) 1736 int maj, minl, minh; 1737 enum vtype type; 1738 { 1739 struct vnode *vp; 1740 int mn; 1741 1742 for (mn = minl; mn <= minh; mn++) 1743 if (vfinddev(makedev(maj, mn), type, &vp)) 1744 VOP_REVOKE(vp, REVOKEALL); 1745 } 1746 1747 /* 1748 * Calculate the total number of references to a special device. 1749 */ 1750 int 1751 vcount(vp) 1752 struct vnode *vp; 1753 { 1754 struct vnode *vq, *vnext; 1755 int count; 1756 1757 loop: 1758 if ((vp->v_flag & VALIASED) == 0) 1759 return (vp->v_usecount); 1760 simple_lock(&spechash_slock); 1761 for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) { 1762 vnext = vq->v_specnext; 1763 if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type) 1764 continue; 1765 /* 1766 * Alias, but not in use, so flush it out. 1767 */ 1768 if (vq->v_usecount == 0 && vq != vp) { 1769 simple_unlock(&spechash_slock); 1770 vgone(vq); 1771 goto loop; 1772 } 1773 count += vq->v_usecount; 1774 } 1775 simple_unlock(&spechash_slock); 1776 return (count); 1777 } 1778 1779 /* 1780 * Print out a description of a vnode. 1781 */ 1782 static char *typename[] = 1783 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" }; 1784 1785 void 1786 vprint(label, vp) 1787 char *label; 1788 struct vnode *vp; 1789 { 1790 char buf[64]; 1791 1792 if (label != NULL) 1793 printf("%s: ", label); 1794 printf("tag %d type %s, usecount %d, writecount %ld, refcount %ld,", 1795 vp->v_tag, typename[vp->v_type], vp->v_usecount, vp->v_writecount, 1796 vp->v_holdcnt); 1797 buf[0] = '\0'; 1798 if (vp->v_flag & VROOT) 1799 strcat(buf, "|VROOT"); 1800 if (vp->v_flag & VTEXT) 1801 strcat(buf, "|VTEXT"); 1802 if (vp->v_flag & VSYSTEM) 1803 strcat(buf, "|VSYSTEM"); 1804 if (vp->v_flag & VXLOCK) 1805 strcat(buf, "|VXLOCK"); 1806 if (vp->v_flag & VXWANT) 1807 strcat(buf, "|VXWANT"); 1808 if (vp->v_flag & VBWAIT) 1809 strcat(buf, "|VBWAIT"); 1810 if (vp->v_flag & VALIASED) 1811 strcat(buf, "|VALIASED"); 1812 if (buf[0] != '\0') 1813 printf(" flags (%s)", &buf[1]); 1814 if (vp->v_data == NULL) { 1815 printf("\n"); 1816 } else { 1817 printf("\n\t"); 1818 VOP_PRINT(vp); 1819 } 1820 } 1821 1822 #ifdef DEBUG 1823 /* 1824 * List all of the locked vnodes in the system. 1825 * Called when debugging the kernel. 1826 */ 1827 void 1828 printlockedvnodes() 1829 { 1830 struct mount *mp, *nmp; 1831 struct vnode *vp; 1832 1833 printf("Locked vnodes\n"); 1834 simple_lock(&mountlist_slock); 1835 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { 1836 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) { 1837 nmp = mp->mnt_list.cqe_next; 1838 continue; 1839 } 1840 for (vp = mp->mnt_vnodelist.lh_first; 1841 vp != NULL; 1842 vp = vp->v_mntvnodes.le_next) { 1843 if (VOP_ISLOCKED(vp)) 1844 vprint((char *)0, vp); 1845 } 1846 simple_lock(&mountlist_slock); 1847 nmp = mp->mnt_list.cqe_next; 1848 vfs_unbusy(mp); 1849 } 1850 simple_unlock(&mountlist_slock); 1851 } 1852 #endif 1853 1854 extern const char *mountcompatnames[]; 1855 extern const int nmountcompatnames; 1856 1857 /* 1858 * Top level filesystem related information gathering. 1859 */ 1860 int 1861 vfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) 1862 int *name; 1863 u_int namelen; 1864 void *oldp; 1865 size_t *oldlenp; 1866 void *newp; 1867 size_t newlen; 1868 struct proc *p; 1869 { 1870 #if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44) 1871 struct vfsconf vfc; 1872 #endif 1873 struct vfsops *vfsp; 1874 1875 /* all sysctl names at this level are at least name and field */ 1876 if (namelen < 2) 1877 return (ENOTDIR); /* overloaded */ 1878 1879 /* Not generic: goes to file system. */ 1880 if (name[0] != VFS_GENERIC) { 1881 if (name[0] >= nmountcompatnames || name[0] < 0 || 1882 mountcompatnames[name[0]] == NULL) 1883 return (EOPNOTSUPP); 1884 vfsp = vfs_getopsbyname(mountcompatnames[name[0]]); 1885 if (vfsp == NULL || vfsp->vfs_sysctl == NULL) 1886 return (EOPNOTSUPP); 1887 return ((*vfsp->vfs_sysctl)(&name[1], namelen - 1, 1888 oldp, oldlenp, newp, newlen, p)); 1889 } 1890 1891 /* The rest are generic vfs sysctls. */ 1892 switch (name[1]) { 1893 case VFS_USERMOUNT: 1894 return sysctl_int(oldp, oldlenp, newp, newlen, &dovfsusermount); 1895 #if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44) 1896 case VFS_MAXTYPENUM: 1897 /* 1898 * Provided for 4.4BSD-Lite2 compatibility. 1899 */ 1900 return (sysctl_rdint(oldp, oldlenp, newp, nmountcompatnames)); 1901 case VFS_CONF: 1902 /* 1903 * Special: a node, next is a file system name. 1904 * Provided for 4.4BSD-Lite2 compatibility. 1905 */ 1906 if (namelen < 3) 1907 return (ENOTDIR); /* overloaded */ 1908 if (name[2] >= nmountcompatnames || name[2] < 0 || 1909 mountcompatnames[name[2]] == NULL) 1910 return (EOPNOTSUPP); 1911 vfsp = vfs_getopsbyname(mountcompatnames[name[2]]); 1912 if (vfsp == NULL) 1913 return (EOPNOTSUPP); 1914 vfc.vfc_vfsops = vfsp; 1915 strncpy(vfc.vfc_name, vfsp->vfs_name, MFSNAMELEN); 1916 vfc.vfc_typenum = name[2]; 1917 vfc.vfc_refcount = vfsp->vfs_refcount; 1918 vfc.vfc_flags = 0; 1919 vfc.vfc_mountroot = vfsp->vfs_mountroot; 1920 vfc.vfc_next = NULL; 1921 return (sysctl_rdstruct(oldp, oldlenp, newp, &vfc, 1922 sizeof(struct vfsconf))); 1923 #endif 1924 default: 1925 break; 1926 } 1927 return (EOPNOTSUPP); 1928 } 1929 1930 int kinfo_vdebug = 1; 1931 int kinfo_vgetfailed; 1932 #define KINFO_VNODESLOP 10 1933 /* 1934 * Dump vnode list (via sysctl). 1935 * Copyout address of vnode followed by vnode. 1936 */ 1937 /* ARGSUSED */ 1938 int 1939 sysctl_vnode(where, sizep, p) 1940 char *where; 1941 size_t *sizep; 1942 struct proc *p; 1943 { 1944 struct mount *mp, *nmp; 1945 struct vnode *nvp, *vp; 1946 char *bp = where, *savebp; 1947 char *ewhere; 1948 int error; 1949 1950 #define VPTRSZ sizeof(struct vnode *) 1951 #define VNODESZ sizeof(struct vnode) 1952 if (where == NULL) { 1953 *sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ); 1954 return (0); 1955 } 1956 ewhere = where + *sizep; 1957 1958 simple_lock(&mountlist_slock); 1959 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { 1960 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) { 1961 nmp = mp->mnt_list.cqe_next; 1962 continue; 1963 } 1964 savebp = bp; 1965 again: 1966 simple_lock(&mntvnode_slock); 1967 for (vp = mp->mnt_vnodelist.lh_first; 1968 vp != NULL; 1969 vp = nvp) { 1970 /* 1971 * Check that the vp is still associated with 1972 * this filesystem. RACE: could have been 1973 * recycled onto the same filesystem. 1974 */ 1975 if (vp->v_mount != mp) { 1976 simple_unlock(&mntvnode_slock); 1977 if (kinfo_vdebug) 1978 printf("kinfo: vp changed\n"); 1979 bp = savebp; 1980 goto again; 1981 } 1982 nvp = vp->v_mntvnodes.le_next; 1983 if (bp + VPTRSZ + VNODESZ > ewhere) { 1984 simple_unlock(&mntvnode_slock); 1985 *sizep = bp - where; 1986 return (ENOMEM); 1987 } 1988 simple_unlock(&mntvnode_slock); 1989 if ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) || 1990 (error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ))) 1991 return (error); 1992 bp += VPTRSZ + VNODESZ; 1993 simple_lock(&mntvnode_slock); 1994 } 1995 simple_unlock(&mntvnode_slock); 1996 simple_lock(&mountlist_slock); 1997 nmp = mp->mnt_list.cqe_next; 1998 vfs_unbusy(mp); 1999 } 2000 simple_unlock(&mountlist_slock); 2001 2002 *sizep = bp - where; 2003 return (0); 2004 } 2005 2006 /* 2007 * Check to see if a filesystem is mounted on a block device. 2008 */ 2009 int 2010 vfs_mountedon(vp) 2011 struct vnode *vp; 2012 { 2013 struct vnode *vq; 2014 int error = 0; 2015 2016 if (vp->v_specmountpoint != NULL) 2017 return (EBUSY); 2018 if (vp->v_flag & VALIASED) { 2019 simple_lock(&spechash_slock); 2020 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 2021 if (vq->v_rdev != vp->v_rdev || 2022 vq->v_type != vp->v_type) 2023 continue; 2024 if (vq->v_specmountpoint != NULL) { 2025 error = EBUSY; 2026 break; 2027 } 2028 } 2029 simple_unlock(&spechash_slock); 2030 } 2031 return (error); 2032 } 2033 2034 /* 2035 * Build hash lists of net addresses and hang them off the mount point. 2036 * Called by ufs_mount() to set up the lists of export addresses. 2037 */ 2038 static int 2039 vfs_hang_addrlist(mp, nep, argp) 2040 struct mount *mp; 2041 struct netexport *nep; 2042 struct export_args *argp; 2043 { 2044 struct netcred *np, *enp; 2045 struct radix_node_head *rnh; 2046 int i; 2047 struct radix_node *rn; 2048 struct sockaddr *saddr, *smask = 0; 2049 struct domain *dom; 2050 int error; 2051 2052 if (argp->ex_addrlen == 0) { 2053 if (mp->mnt_flag & MNT_DEFEXPORTED) 2054 return (EPERM); 2055 np = &nep->ne_defexported; 2056 np->netc_exflags = argp->ex_flags; 2057 np->netc_anon = argp->ex_anon; 2058 np->netc_anon.cr_ref = 1; 2059 mp->mnt_flag |= MNT_DEFEXPORTED; 2060 return (0); 2061 } 2062 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 2063 np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK); 2064 memset((caddr_t)np, 0, i); 2065 saddr = (struct sockaddr *)(np + 1); 2066 error = copyin(argp->ex_addr, (caddr_t)saddr, argp->ex_addrlen); 2067 if (error) 2068 goto out; 2069 if (saddr->sa_len > argp->ex_addrlen) 2070 saddr->sa_len = argp->ex_addrlen; 2071 if (argp->ex_masklen) { 2072 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 2073 error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen); 2074 if (error) 2075 goto out; 2076 if (smask->sa_len > argp->ex_masklen) 2077 smask->sa_len = argp->ex_masklen; 2078 } 2079 i = saddr->sa_family; 2080 if ((rnh = nep->ne_rtable[i]) == 0) { 2081 /* 2082 * Seems silly to initialize every AF when most are not 2083 * used, do so on demand here 2084 */ 2085 for (dom = domains; dom; dom = dom->dom_next) 2086 if (dom->dom_family == i && dom->dom_rtattach) { 2087 dom->dom_rtattach((void **)&nep->ne_rtable[i], 2088 dom->dom_rtoffset); 2089 break; 2090 } 2091 if ((rnh = nep->ne_rtable[i]) == 0) { 2092 error = ENOBUFS; 2093 goto out; 2094 } 2095 } 2096 rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh, 2097 np->netc_rnodes); 2098 if (rn == 0 || np != (struct netcred *)rn) { /* already exists */ 2099 if (rn == 0) { 2100 enp = (struct netcred *)(*rnh->rnh_lookup)(saddr, 2101 smask, rnh); 2102 if (enp == 0) { 2103 error = EPERM; 2104 goto out; 2105 } 2106 } else 2107 enp = (struct netcred *)rn; 2108 2109 if (enp->netc_exflags != argp->ex_flags || 2110 enp->netc_anon.cr_uid != argp->ex_anon.cr_uid || 2111 enp->netc_anon.cr_gid != argp->ex_anon.cr_gid || 2112 enp->netc_anon.cr_ngroups != argp->ex_anon.cr_ngroups || 2113 memcmp(&enp->netc_anon.cr_groups, &argp->ex_anon.cr_groups, 2114 enp->netc_anon.cr_ngroups)) 2115 error = EPERM; 2116 else 2117 error = 0; 2118 goto out; 2119 } 2120 np->netc_exflags = argp->ex_flags; 2121 np->netc_anon = argp->ex_anon; 2122 np->netc_anon.cr_ref = 1; 2123 return (0); 2124 out: 2125 free(np, M_NETADDR); 2126 return (error); 2127 } 2128 2129 /* ARGSUSED */ 2130 static int 2131 vfs_free_netcred(rn, w) 2132 struct radix_node *rn; 2133 void *w; 2134 { 2135 struct radix_node_head *rnh = (struct radix_node_head *)w; 2136 2137 (*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh); 2138 free((caddr_t)rn, M_NETADDR); 2139 return (0); 2140 } 2141 2142 /* 2143 * Free the net address hash lists that are hanging off the mount points. 2144 */ 2145 static void 2146 vfs_free_addrlist(nep) 2147 struct netexport *nep; 2148 { 2149 int i; 2150 struct radix_node_head *rnh; 2151 2152 for (i = 0; i <= AF_MAX; i++) 2153 if ((rnh = nep->ne_rtable[i]) != NULL) { 2154 (*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh); 2155 free((caddr_t)rnh, M_RTABLE); 2156 nep->ne_rtable[i] = 0; 2157 } 2158 } 2159 2160 int 2161 vfs_export(mp, nep, argp) 2162 struct mount *mp; 2163 struct netexport *nep; 2164 struct export_args *argp; 2165 { 2166 int error; 2167 2168 if (argp->ex_flags & MNT_DELEXPORT) { 2169 if (mp->mnt_flag & MNT_EXPUBLIC) { 2170 vfs_setpublicfs(NULL, NULL, NULL); 2171 mp->mnt_flag &= ~MNT_EXPUBLIC; 2172 } 2173 vfs_free_addrlist(nep); 2174 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 2175 } 2176 if (argp->ex_flags & MNT_EXPORTED) { 2177 if (argp->ex_flags & MNT_EXPUBLIC) { 2178 if ((error = vfs_setpublicfs(mp, nep, argp)) != 0) 2179 return (error); 2180 mp->mnt_flag |= MNT_EXPUBLIC; 2181 } 2182 if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0) 2183 return (error); 2184 mp->mnt_flag |= MNT_EXPORTED; 2185 } 2186 return (0); 2187 } 2188 2189 /* 2190 * Set the publicly exported filesystem (WebNFS). Currently, only 2191 * one public filesystem is possible in the spec (RFC 2054 and 2055) 2192 */ 2193 int 2194 vfs_setpublicfs(mp, nep, argp) 2195 struct mount *mp; 2196 struct netexport *nep; 2197 struct export_args *argp; 2198 { 2199 int error; 2200 struct vnode *rvp; 2201 char *cp; 2202 2203 /* 2204 * mp == NULL -> invalidate the current info, the FS is 2205 * no longer exported. May be called from either vfs_export 2206 * or unmount, so check if it hasn't already been done. 2207 */ 2208 if (mp == NULL) { 2209 if (nfs_pub.np_valid) { 2210 nfs_pub.np_valid = 0; 2211 if (nfs_pub.np_index != NULL) { 2212 FREE(nfs_pub.np_index, M_TEMP); 2213 nfs_pub.np_index = NULL; 2214 } 2215 } 2216 return (0); 2217 } 2218 2219 /* 2220 * Only one allowed at a time. 2221 */ 2222 if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount) 2223 return (EBUSY); 2224 2225 /* 2226 * Get real filehandle for root of exported FS. 2227 */ 2228 memset((caddr_t)&nfs_pub.np_handle, 0, sizeof(nfs_pub.np_handle)); 2229 nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid; 2230 2231 if ((error = VFS_ROOT(mp, &rvp))) 2232 return (error); 2233 2234 if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid))) 2235 return (error); 2236 2237 vput(rvp); 2238 2239 /* 2240 * If an indexfile was specified, pull it in. 2241 */ 2242 if (argp->ex_indexfile != NULL) { 2243 MALLOC(nfs_pub.np_index, char *, MAXNAMLEN + 1, M_TEMP, 2244 M_WAITOK); 2245 error = copyinstr(argp->ex_indexfile, nfs_pub.np_index, 2246 MAXNAMLEN, (size_t *)0); 2247 if (!error) { 2248 /* 2249 * Check for illegal filenames. 2250 */ 2251 for (cp = nfs_pub.np_index; *cp; cp++) { 2252 if (*cp == '/') { 2253 error = EINVAL; 2254 break; 2255 } 2256 } 2257 } 2258 if (error) { 2259 FREE(nfs_pub.np_index, M_TEMP); 2260 return (error); 2261 } 2262 } 2263 2264 nfs_pub.np_mount = mp; 2265 nfs_pub.np_valid = 1; 2266 return (0); 2267 } 2268 2269 struct netcred * 2270 vfs_export_lookup(mp, nep, nam) 2271 struct mount *mp; 2272 struct netexport *nep; 2273 struct mbuf *nam; 2274 { 2275 struct netcred *np; 2276 struct radix_node_head *rnh; 2277 struct sockaddr *saddr; 2278 2279 np = NULL; 2280 if (mp->mnt_flag & MNT_EXPORTED) { 2281 /* 2282 * Lookup in the export list first. 2283 */ 2284 if (nam != NULL) { 2285 saddr = mtod(nam, struct sockaddr *); 2286 rnh = nep->ne_rtable[saddr->sa_family]; 2287 if (rnh != NULL) { 2288 np = (struct netcred *) 2289 (*rnh->rnh_matchaddr)((caddr_t)saddr, 2290 rnh); 2291 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 2292 np = NULL; 2293 } 2294 } 2295 /* 2296 * If no address match, use the default if it exists. 2297 */ 2298 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 2299 np = &nep->ne_defexported; 2300 } 2301 return (np); 2302 } 2303 2304 /* 2305 * Do the usual access checking. 2306 * file_mode, uid and gid are from the vnode in question, 2307 * while acc_mode and cred are from the VOP_ACCESS parameter list 2308 */ 2309 int 2310 vaccess(type, file_mode, uid, gid, acc_mode, cred) 2311 enum vtype type; 2312 mode_t file_mode; 2313 uid_t uid; 2314 gid_t gid; 2315 mode_t acc_mode; 2316 struct ucred *cred; 2317 { 2318 mode_t mask; 2319 2320 /* 2321 * Super-user always gets read/write access, but execute access depends 2322 * on at least one execute bit being set. 2323 */ 2324 if (cred->cr_uid == 0) { 2325 if ((acc_mode & VEXEC) && type != VDIR && 2326 (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0) 2327 return (EACCES); 2328 return (0); 2329 } 2330 2331 mask = 0; 2332 2333 /* Otherwise, check the owner. */ 2334 if (cred->cr_uid == uid) { 2335 if (acc_mode & VEXEC) 2336 mask |= S_IXUSR; 2337 if (acc_mode & VREAD) 2338 mask |= S_IRUSR; 2339 if (acc_mode & VWRITE) 2340 mask |= S_IWUSR; 2341 return ((file_mode & mask) == mask ? 0 : EACCES); 2342 } 2343 2344 /* Otherwise, check the groups. */ 2345 if (cred->cr_gid == gid || groupmember(gid, cred)) { 2346 if (acc_mode & VEXEC) 2347 mask |= S_IXGRP; 2348 if (acc_mode & VREAD) 2349 mask |= S_IRGRP; 2350 if (acc_mode & VWRITE) 2351 mask |= S_IWGRP; 2352 return ((file_mode & mask) == mask ? 0 : EACCES); 2353 } 2354 2355 /* Otherwise, check everyone else. */ 2356 if (acc_mode & VEXEC) 2357 mask |= S_IXOTH; 2358 if (acc_mode & VREAD) 2359 mask |= S_IROTH; 2360 if (acc_mode & VWRITE) 2361 mask |= S_IWOTH; 2362 return ((file_mode & mask) == mask ? 0 : EACCES); 2363 } 2364 2365 /* 2366 * Unmount all file systems. 2367 * We traverse the list in reverse order under the assumption that doing so 2368 * will avoid needing to worry about dependencies. 2369 */ 2370 void 2371 vfs_unmountall(p) 2372 struct proc *p; 2373 { 2374 struct mount *mp, *nmp; 2375 int allerror, error; 2376 2377 for (allerror = 0, 2378 mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) { 2379 nmp = mp->mnt_list.cqe_prev; 2380 #ifdef DEBUG 2381 printf("unmounting %s (%s)...\n", 2382 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_mntfromname); 2383 #endif 2384 if (vfs_busy(mp, 0, 0)) 2385 continue; 2386 if ((error = dounmount(mp, MNT_FORCE, p)) != 0) { 2387 printf("unmount of %s failed with error %d\n", 2388 mp->mnt_stat.f_mntonname, error); 2389 allerror = 1; 2390 } 2391 } 2392 if (allerror) 2393 printf("WARNING: some file systems would not unmount\n"); 2394 } 2395 2396 /* 2397 * Sync and unmount file systems before shutting down. 2398 */ 2399 void 2400 vfs_shutdown() 2401 { 2402 struct buf *bp; 2403 int iter, nbusy, nbusy_prev = 0, dcount, s; 2404 struct proc *p = curproc; 2405 2406 /* XXX we're certainly not running in proc0's context! */ 2407 if (p == NULL) 2408 p = &proc0; 2409 2410 printf("syncing disks... "); 2411 2412 /* remove user process from run queue */ 2413 suspendsched(); 2414 (void) spl0(); 2415 2416 /* avoid coming back this way again if we panic. */ 2417 doing_shutdown = 1; 2418 2419 sys_sync(p, NULL, NULL); 2420 2421 /* Wait for sync to finish. */ 2422 dcount = 10000; 2423 for (iter = 0; iter < 20;) { 2424 nbusy = 0; 2425 for (bp = &buf[nbuf]; --bp >= buf; ) { 2426 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 2427 nbusy++; 2428 /* 2429 * With soft updates, some buffers that are 2430 * written will be remarked as dirty until other 2431 * buffers are written. 2432 */ 2433 if (bp->b_vp && bp->b_vp->v_mount 2434 && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP) 2435 && (bp->b_flags & B_DELWRI)) { 2436 s = splbio(); 2437 bremfree(bp); 2438 bp->b_flags |= B_BUSY; 2439 splx(s); 2440 nbusy++; 2441 bawrite(bp); 2442 if (dcount-- <= 0) { 2443 printf("softdep "); 2444 goto fail; 2445 } 2446 } 2447 } 2448 if (nbusy == 0) 2449 break; 2450 if (nbusy_prev == 0) 2451 nbusy_prev = nbusy; 2452 printf("%d ", nbusy); 2453 tsleep(&nbusy, PRIBIO, "bflush", 2454 (iter == 0) ? 1 : hz / 25 * iter); 2455 if (nbusy >= nbusy_prev) /* we didn't flush anything */ 2456 iter++; 2457 else 2458 nbusy_prev = nbusy; 2459 } 2460 if (nbusy) { 2461 fail: 2462 #if defined(DEBUG) || defined(DEBUG_HALT_BUSY) 2463 printf("giving up\nPrinting vnodes for busy buffers\n"); 2464 for (bp = &buf[nbuf]; --bp >= buf; ) 2465 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 2466 vprint(NULL, bp->b_vp); 2467 2468 #if defined(DDB) && defined(DEBUG_HALT_BUSY) 2469 Debugger(); 2470 #endif 2471 2472 #else /* defined(DEBUG) || defined(DEBUG_HALT_BUSY) */ 2473 printf("giving up\n"); 2474 #endif /* defined(DEBUG) || defined(DEBUG_HALT_BUSY) */ 2475 return; 2476 } else 2477 printf("done\n"); 2478 2479 /* 2480 * If we've panic'd, don't make the situation potentially 2481 * worse by unmounting the file systems. 2482 */ 2483 if (panicstr != NULL) 2484 return; 2485 2486 /* Release inodes held by texts before update. */ 2487 #ifdef notdef 2488 vnshutdown(); 2489 #endif 2490 /* Unmount file systems. */ 2491 vfs_unmountall(p); 2492 } 2493 2494 /* 2495 * Mount the root file system. If the operator didn't specify a 2496 * file system to use, try all possible file systems until one 2497 * succeeds. 2498 */ 2499 int 2500 vfs_mountroot() 2501 { 2502 extern int (*mountroot) __P((void)); 2503 struct vfsops *v; 2504 2505 if (root_device == NULL) 2506 panic("vfs_mountroot: root device unknown"); 2507 2508 switch (root_device->dv_class) { 2509 case DV_IFNET: 2510 if (rootdev != NODEV) 2511 panic("vfs_mountroot: rootdev set for DV_IFNET"); 2512 break; 2513 2514 case DV_DISK: 2515 if (rootdev == NODEV) 2516 panic("vfs_mountroot: rootdev not set for DV_DISK"); 2517 break; 2518 2519 default: 2520 printf("%s: inappropriate for root file system\n", 2521 root_device->dv_xname); 2522 return (ENODEV); 2523 } 2524 2525 /* 2526 * If user specified a file system, use it. 2527 */ 2528 if (mountroot != NULL) 2529 return ((*mountroot)()); 2530 2531 /* 2532 * Try each file system currently configured into the kernel. 2533 */ 2534 for (v = LIST_FIRST(&vfs_list); v != NULL; v = LIST_NEXT(v, vfs_list)) { 2535 if (v->vfs_mountroot == NULL) 2536 continue; 2537 #ifdef DEBUG 2538 printf("mountroot: trying %s...\n", v->vfs_name); 2539 #endif 2540 if ((*v->vfs_mountroot)() == 0) { 2541 printf("root file system type: %s\n", v->vfs_name); 2542 break; 2543 } 2544 } 2545 2546 if (v == NULL) { 2547 printf("no file system for %s", root_device->dv_xname); 2548 if (root_device->dv_class == DV_DISK) 2549 printf(" (dev 0x%x)", rootdev); 2550 printf("\n"); 2551 return (EFTYPE); 2552 } 2553 return (0); 2554 } 2555 2556 /* 2557 * Given a file system name, look up the vfsops for that 2558 * file system, or return NULL if file system isn't present 2559 * in the kernel. 2560 */ 2561 struct vfsops * 2562 vfs_getopsbyname(name) 2563 const char *name; 2564 { 2565 struct vfsops *v; 2566 2567 for (v = LIST_FIRST(&vfs_list); v != NULL; v = LIST_NEXT(v, vfs_list)) { 2568 if (strcmp(v->vfs_name, name) == 0) 2569 break; 2570 } 2571 2572 return (v); 2573 } 2574 2575 /* 2576 * Establish a file system and initialize it. 2577 */ 2578 int 2579 vfs_attach(vfs) 2580 struct vfsops *vfs; 2581 { 2582 struct vfsops *v; 2583 int error = 0; 2584 2585 2586 /* 2587 * Make sure this file system doesn't already exist. 2588 */ 2589 for (v = LIST_FIRST(&vfs_list); v != NULL; v = LIST_NEXT(v, vfs_list)) { 2590 if (strcmp(vfs->vfs_name, v->vfs_name) == 0) { 2591 error = EEXIST; 2592 goto out; 2593 } 2594 } 2595 2596 /* 2597 * Initialize the vnode operations for this file system. 2598 */ 2599 vfs_opv_init(vfs->vfs_opv_descs); 2600 2601 /* 2602 * Now initialize the file system itself. 2603 */ 2604 (*vfs->vfs_init)(); 2605 2606 /* 2607 * ...and link it into the kernel's list. 2608 */ 2609 LIST_INSERT_HEAD(&vfs_list, vfs, vfs_list); 2610 2611 /* 2612 * Sanity: make sure the reference count is 0. 2613 */ 2614 vfs->vfs_refcount = 0; 2615 2616 out: 2617 return (error); 2618 } 2619 2620 /* 2621 * Remove a file system from the kernel. 2622 */ 2623 int 2624 vfs_detach(vfs) 2625 struct vfsops *vfs; 2626 { 2627 struct vfsops *v; 2628 2629 /* 2630 * Make sure no one is using the filesystem. 2631 */ 2632 if (vfs->vfs_refcount != 0) 2633 return (EBUSY); 2634 2635 /* 2636 * ...and remove it from the kernel's list. 2637 */ 2638 for (v = LIST_FIRST(&vfs_list); v != NULL; v = LIST_NEXT(v, vfs_list)) { 2639 if (v == vfs) { 2640 LIST_REMOVE(v, vfs_list); 2641 break; 2642 } 2643 } 2644 2645 if (v == NULL) 2646 return (ESRCH); 2647 2648 /* 2649 * Now run the file system-specific cleanups. 2650 */ 2651 (*vfs->vfs_done)(); 2652 2653 /* 2654 * Free the vnode operations vector. 2655 */ 2656 vfs_opv_free(vfs->vfs_opv_descs); 2657 return (0); 2658 } 2659 2660 #ifdef DDB 2661 const char buf_flagbits[] = 2662 "\20\1AGE\2NEEDCOMMIT\3ASYNC\4BAD\5BUSY\6SCANNED\7CALL\10DELWRI" 2663 "\11DIRTY\12DONE\13EINTR\14ERROR\15GATHERED\16INVAL\17LOCKED\20NOCACHE" 2664 "\21ORDERED\22CACHE\23PHYS\24RAW\25READ\26TAPE\30WANTED" 2665 "\32XXX\33VFLUSH"; 2666 2667 void 2668 vfs_buf_print(bp, full, pr) 2669 struct buf *bp; 2670 int full; 2671 void (*pr) __P((const char *, ...)); 2672 { 2673 char buf[1024]; 2674 2675 (*pr)(" vp %p lblkno 0x%x blkno 0x%x dev 0x%x\n", 2676 bp->b_vp, bp->b_lblkno, bp->b_blkno, bp->b_dev); 2677 2678 bitmask_snprintf(bp->b_flags, buf_flagbits, buf, sizeof(buf)); 2679 (*pr)(" error %d flags 0x%s\n", bp->b_error, buf); 2680 2681 (*pr)(" bufsize 0x%x bcount 0x%x resid 0x%x\n", 2682 bp->b_bufsize, bp->b_bcount, bp->b_resid); 2683 (*pr)(" data %p saveaddr %p dep %p\n", 2684 bp->b_data, bp->b_saveaddr, LIST_FIRST(&bp->b_dep)); 2685 (*pr)(" iodone %p\n", bp->b_iodone); 2686 } 2687 2688 2689 const char vnode_flagbits[] = 2690 "\20\1ROOT\2TEXT\3SYSTEM\4ISTTY\11XLOCK\12XWANT\13BWAIT\14ALIASED" 2691 "\15DIROP\17DIRTY"; 2692 2693 const char *vnode_types[] = { 2694 "VNON", 2695 "VREG", 2696 "VDIR", 2697 "VBLK", 2698 "VCHR", 2699 "VLNK", 2700 "VSOCK", 2701 "VFIFO", 2702 "VBAD", 2703 }; 2704 2705 const char *vnode_tags[] = { 2706 "VT_NON", 2707 "VT_UFS", 2708 "VT_NFS", 2709 "VT_MFS", 2710 "VT_MSDOSFS", 2711 "VT_LFS", 2712 "VT_LOFS", 2713 "VT_FDESC", 2714 "VT_PORTAL", 2715 "VT_NULL", 2716 "VT_UMAP", 2717 "VT_KERNFS", 2718 "VT_PROCFS", 2719 "VT_AFS", 2720 "VT_ISOFS", 2721 "VT_UNION", 2722 "VT_ADOSFS", 2723 "VT_EXT2FS", 2724 "VT_CODA", 2725 "VT_FILECORE", 2726 "VT_NTFS", 2727 "VT_VFS", 2728 "VT_OVERLAY" 2729 }; 2730 2731 void 2732 vfs_vnode_print(vp, full, pr) 2733 struct vnode *vp; 2734 int full; 2735 void (*pr) __P((const char *, ...)); 2736 { 2737 char buf[256]; 2738 2739 const char *vtype, *vtag; 2740 2741 uvm_object_printit(&vp->v_uvm.u_obj, full, pr); 2742 bitmask_snprintf(vp->v_flag, vnode_flagbits, buf, sizeof(buf)); 2743 (*pr)("\nVNODE flags %s\n", buf); 2744 (*pr)("mp %p nio %d size 0x%x rwlock 0x%x glock 0x%x\n", 2745 vp->v_mount, vp->v_uvm.u_nio, (int)vp->v_uvm.u_size, 2746 vp->v_vnlock ? lockstatus(vp->v_vnlock) : 0x999, 2747 lockstatus(&vp->v_glock)); 2748 2749 (*pr)("data %p usecount %d writecount %d holdcnt %d numoutput %d\n", 2750 vp->v_data, vp->v_usecount, vp->v_writecount, 2751 vp->v_holdcnt, vp->v_numoutput); 2752 2753 vtype = (vp->v_type >= 0 && 2754 vp->v_type < sizeof(vnode_types) / sizeof(vnode_types[0])) ? 2755 vnode_types[vp->v_type] : "UNKNOWN"; 2756 vtag = (vp->v_tag >= 0 && 2757 vp->v_tag < sizeof(vnode_tags) / sizeof(vnode_tags[0])) ? 2758 vnode_tags[vp->v_tag] : "UNKNOWN"; 2759 2760 (*pr)("type %s(%d) tag %s(%d) id 0x%x mount %p typedata %p\n", 2761 vtype, vp->v_type, vtag, vp->v_tag, 2762 vp->v_id, vp->v_mount, vp->v_mountedhere); 2763 (*pr)("lastr 0x%x lastw 0x%x lasta 0x%x\n", 2764 vp->v_lastr, vp->v_lastw, vp->v_lasta); 2765 (*pr)("cstart 0x%x clen 0x%x ralen 0x%x maxra 0x%x\n", 2766 vp->v_cstart, vp->v_clen, vp->v_ralen, vp->v_maxra); 2767 2768 if (full) { 2769 struct buf *bp; 2770 2771 (*pr)("clean bufs:\n"); 2772 LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) { 2773 (*pr)(" bp %p\n", bp); 2774 vfs_buf_print(bp, full, pr); 2775 } 2776 2777 (*pr)("dirty bufs:\n"); 2778 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) { 2779 (*pr)(" bp %p\n", bp); 2780 vfs_buf_print(bp, full, pr); 2781 } 2782 } 2783 } 2784 #endif 2785