1 /* $NetBSD: vfs_subr.c,v 1.144 2001/01/08 07:05:47 chs Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Copyright (c) 1989, 1993 42 * The Regents of the University of California. All rights reserved. 43 * (c) UNIX System Laboratories, Inc. 44 * All or some portions of this file are derived from material licensed 45 * to the University of California by American Telephone and Telegraph 46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 47 * the permission of UNIX System Laboratories, Inc. 48 * 49 * Redistribution and use in source and binary forms, with or without 50 * modification, are permitted provided that the following conditions 51 * are met: 52 * 1. Redistributions of source code must retain the above copyright 53 * notice, this list of conditions and the following disclaimer. 54 * 2. Redistributions in binary form must reproduce the above copyright 55 * notice, this list of conditions and the following disclaimer in the 56 * documentation and/or other materials provided with the distribution. 57 * 3. All advertising materials mentioning features or use of this software 58 * must display the following acknowledgement: 59 * This product includes software developed by the University of 60 * California, Berkeley and its contributors. 61 * 4. Neither the name of the University nor the names of its contributors 62 * may be used to endorse or promote products derived from this software 63 * without specific prior written permission. 64 * 65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 75 * SUCH DAMAGE. 76 * 77 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94 78 */ 79 80 /* 81 * External virtual filesystem routines 82 */ 83 84 #include "opt_ddb.h" 85 #include "opt_compat_netbsd.h" 86 #include "opt_compat_43.h" 87 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/proc.h> 91 #include <sys/kernel.h> 92 #include <sys/mount.h> 93 #include <sys/time.h> 94 #include <sys/fcntl.h> 95 #include <sys/vnode.h> 96 #include <sys/stat.h> 97 #include <sys/namei.h> 98 #include <sys/ucred.h> 99 #include <sys/buf.h> 100 #include <sys/errno.h> 101 #include <sys/malloc.h> 102 #include <sys/domain.h> 103 #include <sys/mbuf.h> 104 #include <sys/syscallargs.h> 105 #include <sys/device.h> 106 #include <sys/dirent.h> 107 108 #include <miscfs/specfs/specdev.h> 109 #include <miscfs/genfs/genfs.h> 110 #include <miscfs/syncfs/syncfs.h> 111 112 #include <uvm/uvm.h> 113 #include <uvm/uvm_ddb.h> 114 115 #include <sys/sysctl.h> 116 117 enum vtype iftovt_tab[16] = { 118 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 119 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 120 }; 121 int vttoif_tab[9] = { 122 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 123 S_IFSOCK, S_IFIFO, S_IFMT, 124 }; 125 126 int doforce = 1; /* 1 => permit forcible unmounting */ 127 int prtactive = 0; /* 1 => print out reclaim of active vnodes */ 128 129 extern int dovfsusermount; /* 1 => permit any user to mount filesystems */ 130 131 /* 132 * Insq/Remq for the vnode usage lists. 133 */ 134 #define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) 135 #define bufremvn(bp) { \ 136 LIST_REMOVE(bp, b_vnbufs); \ 137 (bp)->b_vnbufs.le_next = NOLIST; \ 138 } 139 /* TAILQ_HEAD(freelst, vnode) vnode_free_list = vnode free list (in vnode.h) */ 140 struct freelst vnode_free_list = TAILQ_HEAD_INITIALIZER(vnode_free_list); 141 struct freelst vnode_hold_list = TAILQ_HEAD_INITIALIZER(vnode_hold_list); 142 143 struct mntlist mountlist = /* mounted filesystem list */ 144 CIRCLEQ_HEAD_INITIALIZER(mountlist); 145 struct vfs_list_head vfs_list = /* vfs list */ 146 LIST_HEAD_INITIALIZER(vfs_list); 147 148 struct nfs_public nfs_pub; /* publicly exported FS */ 149 150 struct simplelock mountlist_slock = SIMPLELOCK_INITIALIZER; 151 static struct simplelock mntid_slock = SIMPLELOCK_INITIALIZER; 152 struct simplelock mntvnode_slock = SIMPLELOCK_INITIALIZER; 153 struct simplelock vnode_free_list_slock = SIMPLELOCK_INITIALIZER; 154 struct simplelock spechash_slock = SIMPLELOCK_INITIALIZER; 155 156 /* 157 * These define the root filesystem and device. 158 */ 159 struct mount *rootfs; 160 struct vnode *rootvnode; 161 struct device *root_device; /* root device */ 162 163 struct pool vnode_pool; /* memory pool for vnodes */ 164 165 /* 166 * Local declarations. 167 */ 168 void insmntque __P((struct vnode *, struct mount *)); 169 int getdevvp __P((dev_t, struct vnode **, enum vtype)); 170 void vgoneall __P((struct vnode *)); 171 172 static int vfs_hang_addrlist __P((struct mount *, struct netexport *, 173 struct export_args *)); 174 static int vfs_free_netcred __P((struct radix_node *, void *)); 175 static void vfs_free_addrlist __P((struct netexport *)); 176 177 #ifdef DEBUG 178 void printlockedvnodes __P((void)); 179 #endif 180 181 /* 182 * Initialize the vnode management data structures. 183 */ 184 void 185 vntblinit() 186 { 187 188 pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodepl", 189 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VNODE); 190 191 /* 192 * Initialize the filesystem syncer. 193 */ 194 vn_initialize_syncerd(); 195 } 196 197 /* 198 * Mark a mount point as busy. Used to synchronize access and to delay 199 * unmounting. Interlock is not released on failure. 200 */ 201 int 202 vfs_busy(mp, flags, interlkp) 203 struct mount *mp; 204 int flags; 205 struct simplelock *interlkp; 206 { 207 int lkflags; 208 209 while (mp->mnt_flag & MNT_UNMOUNT) { 210 int gone; 211 212 if (flags & LK_NOWAIT) 213 return (ENOENT); 214 if ((flags & LK_RECURSEFAIL) && mp->mnt_unmounter != NULL 215 && mp->mnt_unmounter == curproc) 216 return (EDEADLK); 217 if (interlkp) 218 simple_unlock(interlkp); 219 /* 220 * Since all busy locks are shared except the exclusive 221 * lock granted when unmounting, the only place that a 222 * wakeup needs to be done is at the release of the 223 * exclusive lock at the end of dounmount. 224 * 225 * XXX MP: add spinlock protecting mnt_wcnt here once you 226 * can atomically unlock-and-sleep. 227 */ 228 mp->mnt_wcnt++; 229 tsleep((caddr_t)mp, PVFS, "vfs_busy", 0); 230 mp->mnt_wcnt--; 231 gone = mp->mnt_flag & MNT_GONE; 232 233 if (mp->mnt_wcnt == 0) 234 wakeup(&mp->mnt_wcnt); 235 if (interlkp) 236 simple_lock(interlkp); 237 if (gone) 238 return (ENOENT); 239 } 240 lkflags = LK_SHARED; 241 if (interlkp) 242 lkflags |= LK_INTERLOCK; 243 if (lockmgr(&mp->mnt_lock, lkflags, interlkp)) 244 panic("vfs_busy: unexpected lock failure"); 245 return (0); 246 } 247 248 /* 249 * Free a busy filesystem. 250 */ 251 void 252 vfs_unbusy(mp) 253 struct mount *mp; 254 { 255 256 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL); 257 } 258 259 /* 260 * Lookup a filesystem type, and if found allocate and initialize 261 * a mount structure for it. 262 * 263 * Devname is usually updated by mount(8) after booting. 264 */ 265 int 266 vfs_rootmountalloc(fstypename, devname, mpp) 267 char *fstypename; 268 char *devname; 269 struct mount **mpp; 270 { 271 struct vfsops *vfsp = NULL; 272 struct mount *mp; 273 274 for (vfsp = LIST_FIRST(&vfs_list); vfsp != NULL; 275 vfsp = LIST_NEXT(vfsp, vfs_list)) 276 if (!strncmp(vfsp->vfs_name, fstypename, MFSNAMELEN)) 277 break; 278 279 if (vfsp == NULL) 280 return (ENODEV); 281 mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); 282 memset((char *)mp, 0, (u_long)sizeof(struct mount)); 283 lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0); 284 (void)vfs_busy(mp, LK_NOWAIT, 0); 285 LIST_INIT(&mp->mnt_vnodelist); 286 mp->mnt_op = vfsp; 287 mp->mnt_flag = MNT_RDONLY; 288 mp->mnt_vnodecovered = NULLVP; 289 vfsp->vfs_refcount++; 290 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfs_name, MFSNAMELEN); 291 mp->mnt_stat.f_mntonname[0] = '/'; 292 (void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 293 *mpp = mp; 294 return (0); 295 } 296 297 /* 298 * Lookup a mount point by filesystem identifier. 299 */ 300 struct mount * 301 vfs_getvfs(fsid) 302 fsid_t *fsid; 303 { 304 struct mount *mp; 305 306 simple_lock(&mountlist_slock); 307 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; 308 mp = mp->mnt_list.cqe_next) { 309 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 310 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 311 simple_unlock(&mountlist_slock); 312 return (mp); 313 } 314 } 315 simple_unlock(&mountlist_slock); 316 return ((struct mount *)0); 317 } 318 319 /* 320 * Get a new unique fsid 321 */ 322 void 323 vfs_getnewfsid(mp) 324 struct mount *mp; 325 { 326 static u_short xxxfs_mntid; 327 fsid_t tfsid; 328 int mtype; 329 330 simple_lock(&mntid_slock); 331 mtype = makefstype(mp->mnt_op->vfs_name); 332 mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0); 333 mp->mnt_stat.f_fsid.val[1] = mtype; 334 if (xxxfs_mntid == 0) 335 ++xxxfs_mntid; 336 tfsid.val[0] = makedev((nblkdev + mtype) & 0xff, xxxfs_mntid); 337 tfsid.val[1] = mtype; 338 if (mountlist.cqh_first != (void *)&mountlist) { 339 while (vfs_getvfs(&tfsid)) { 340 tfsid.val[0]++; 341 xxxfs_mntid++; 342 } 343 } 344 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 345 simple_unlock(&mntid_slock); 346 } 347 348 /* 349 * Make a 'unique' number from a mount type name. 350 */ 351 long 352 makefstype(type) 353 const char *type; 354 { 355 long rv; 356 357 for (rv = 0; *type; type++) { 358 rv <<= 2; 359 rv ^= *type; 360 } 361 return rv; 362 } 363 364 365 /* 366 * Set vnode attributes to VNOVAL 367 */ 368 void 369 vattr_null(vap) 370 struct vattr *vap; 371 { 372 373 vap->va_type = VNON; 374 375 /* 376 * Assign individually so that it is safe even if size and 377 * sign of each member are varied. 378 */ 379 vap->va_mode = VNOVAL; 380 vap->va_nlink = VNOVAL; 381 vap->va_uid = VNOVAL; 382 vap->va_gid = VNOVAL; 383 vap->va_fsid = VNOVAL; 384 vap->va_fileid = VNOVAL; 385 vap->va_size = VNOVAL; 386 vap->va_blocksize = VNOVAL; 387 vap->va_atime.tv_sec = 388 vap->va_mtime.tv_sec = 389 vap->va_ctime.tv_sec = VNOVAL; 390 vap->va_atime.tv_nsec = 391 vap->va_mtime.tv_nsec = 392 vap->va_ctime.tv_nsec = VNOVAL; 393 vap->va_gen = VNOVAL; 394 vap->va_flags = VNOVAL; 395 vap->va_rdev = VNOVAL; 396 vap->va_bytes = VNOVAL; 397 vap->va_vaflags = 0; 398 } 399 400 /* 401 * Routines having to do with the management of the vnode table. 402 */ 403 extern int (**dead_vnodeop_p) __P((void *)); 404 long numvnodes; 405 406 /* 407 * Return the next vnode from the free list. 408 */ 409 int 410 getnewvnode(tag, mp, vops, vpp) 411 enum vtagtype tag; 412 struct mount *mp; 413 int (**vops) __P((void *)); 414 struct vnode **vpp; 415 { 416 extern struct uvm_pagerops uvm_vnodeops; 417 struct uvm_object *uobj; 418 struct proc *p = curproc; /* XXX */ 419 struct freelst *listhd; 420 static int toggle; 421 struct vnode *vp; 422 int error = 0; 423 #ifdef DIAGNOSTIC 424 int s; 425 #endif 426 if (mp) { 427 /* 428 * Mark filesystem busy while we're creating a vnode. 429 * If unmount is in progress, this will wait; if the 430 * unmount succeeds (only if umount -f), this will 431 * return an error. If the unmount fails, we'll keep 432 * going afterwards. 433 * (This puts the per-mount vnode list logically under 434 * the protection of the vfs_busy lock). 435 */ 436 error = vfs_busy(mp, LK_RECURSEFAIL, 0); 437 if (error && error != EDEADLK) 438 return error; 439 } 440 441 /* 442 * We must choose whether to allocate a new vnode or recycle an 443 * existing one. The criterion for allocating a new one is that 444 * the total number of vnodes is less than the number desired or 445 * there are no vnodes on either free list. Generally we only 446 * want to recycle vnodes that have no buffers associated with 447 * them, so we look first on the vnode_free_list. If it is empty, 448 * we next consider vnodes with referencing buffers on the 449 * vnode_hold_list. The toggle ensures that half the time we 450 * will use a buffer from the vnode_hold_list, and half the time 451 * we will allocate a new one unless the list has grown to twice 452 * the desired size. We are reticent to recycle vnodes from the 453 * vnode_hold_list because we will lose the identity of all its 454 * referencing buffers. 455 */ 456 457 toggle ^= 1; 458 if (numvnodes > 2 * desiredvnodes) 459 toggle = 0; 460 461 simple_lock(&vnode_free_list_slock); 462 if (numvnodes < desiredvnodes || 463 (TAILQ_FIRST(listhd = &vnode_free_list) == NULL && 464 (TAILQ_FIRST(listhd = &vnode_hold_list) == NULL || toggle))) { 465 simple_unlock(&vnode_free_list_slock); 466 vp = pool_get(&vnode_pool, PR_WAITOK); 467 memset(vp, 0, sizeof(*vp)); 468 simple_lock_init(&vp->v_interlock); 469 numvnodes++; 470 } else { 471 for (vp = TAILQ_FIRST(listhd); vp != NULLVP; 472 vp = TAILQ_NEXT(vp, v_freelist)) { 473 if (simple_lock_try(&vp->v_interlock)) { 474 if ((vp->v_flag & VLAYER) == 0) { 475 break; 476 } 477 if (VOP_ISLOCKED(vp) == 0) 478 break; 479 else 480 simple_unlock(&vp->v_interlock); 481 } 482 } 483 /* 484 * Unless this is a bad time of the month, at most 485 * the first NCPUS items on the free list are 486 * locked, so this is close enough to being empty. 487 */ 488 if (vp == NULLVP) { 489 simple_unlock(&vnode_free_list_slock); 490 if (mp && error != EDEADLK) 491 vfs_unbusy(mp); 492 tablefull("vnode", "increase kern.maxvnodes or NVNODE"); 493 *vpp = 0; 494 return (ENFILE); 495 } 496 if (vp->v_usecount) 497 panic("free vnode isn't, vp %p", vp); 498 TAILQ_REMOVE(listhd, vp, v_freelist); 499 /* see comment on why 0xdeadb is set at end of vgone (below) */ 500 vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb; 501 simple_unlock(&vnode_free_list_slock); 502 vp->v_lease = NULL; 503 if (vp->v_type != VBAD) 504 vgonel(vp, p); 505 else 506 simple_unlock(&vp->v_interlock); 507 #ifdef DIAGNOSTIC 508 if (vp->v_data) 509 panic("cleaned vnode isn't, vp %p", vp); 510 s = splbio(); 511 if (vp->v_numoutput) 512 panic("clean vnode has pending I/O's, vp %p", vp); 513 splx(s); 514 #endif 515 vp->v_flag = 0; 516 vp->v_lastr = 0; 517 vp->v_ralen = 0; 518 vp->v_maxra = 0; 519 vp->v_lastw = 0; 520 vp->v_lasta = 0; 521 vp->v_cstart = 0; 522 vp->v_clen = 0; 523 vp->v_socket = 0; 524 } 525 vp->v_type = VNON; 526 vp->v_vnlock = &vp->v_lock; 527 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); 528 lockinit(&vp->v_glock, PVFS, "glock", 0, 0); 529 cache_purge(vp); 530 vp->v_tag = tag; 531 vp->v_op = vops; 532 insmntque(vp, mp); 533 *vpp = vp; 534 vp->v_usecount = 1; 535 vp->v_data = 0; 536 simple_lock_init(&vp->v_uvm.u_obj.vmobjlock); 537 538 /* 539 * initialize uvm_object within vnode. 540 */ 541 542 uobj = &vp->v_uvm.u_obj; 543 uobj->pgops = &uvm_vnodeops; 544 TAILQ_INIT(&uobj->memq); 545 vp->v_uvm.u_size = VSIZENOTSET; 546 547 if (mp && error != EDEADLK) 548 vfs_unbusy(mp); 549 return (0); 550 } 551 552 /* 553 * This is really just the reverse of getnewvnode(). Needed for 554 * VFS_VGET functions who may need to push back a vnode in case 555 * of a locking race. 556 */ 557 void 558 ungetnewvnode(vp) 559 struct vnode *vp; 560 { 561 #ifdef DIAGNOSTIC 562 if (vp->v_usecount != 1) 563 panic("ungetnewvnode: busy vnode"); 564 #endif 565 vp->v_usecount--; 566 insmntque(vp, NULL); 567 vp->v_type = VBAD; 568 569 simple_lock(&vp->v_interlock); 570 /* 571 * Insert at head of LRU list 572 */ 573 simple_lock(&vnode_free_list_slock); 574 if (vp->v_holdcnt > 0) 575 TAILQ_INSERT_HEAD(&vnode_hold_list, vp, v_freelist); 576 else 577 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 578 simple_unlock(&vnode_free_list_slock); 579 simple_unlock(&vp->v_interlock); 580 } 581 582 /* 583 * Move a vnode from one mount queue to another. 584 */ 585 void 586 insmntque(vp, mp) 587 struct vnode *vp; 588 struct mount *mp; 589 { 590 591 #ifdef DIAGNOSTIC 592 if ((mp != NULL) && 593 (mp->mnt_flag & MNT_UNMOUNT) && 594 !(mp->mnt_flag & MNT_SOFTDEP) && 595 vp->v_tag != VT_VFS) { 596 panic("insmntque into dying filesystem"); 597 } 598 #endif 599 600 simple_lock(&mntvnode_slock); 601 /* 602 * Delete from old mount point vnode list, if on one. 603 */ 604 if (vp->v_mount != NULL) 605 LIST_REMOVE(vp, v_mntvnodes); 606 /* 607 * Insert into list of vnodes for the new mount point, if available. 608 */ 609 if ((vp->v_mount = mp) != NULL) 610 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); 611 simple_unlock(&mntvnode_slock); 612 } 613 614 /* 615 * Update outstanding I/O count and do wakeup if requested. 616 */ 617 void 618 vwakeup(bp) 619 struct buf *bp; 620 { 621 struct vnode *vp; 622 623 if ((vp = bp->b_vp) != NULL) { 624 if (--vp->v_numoutput < 0) 625 panic("vwakeup: neg numoutput, vp %p", vp); 626 if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) { 627 vp->v_flag &= ~VBWAIT; 628 wakeup((caddr_t)&vp->v_numoutput); 629 } 630 } 631 } 632 633 /* 634 * Flush out and invalidate all buffers associated with a vnode. 635 * Called with the underlying vnode locked, which should prevent new dirty 636 * buffers from being queued. 637 */ 638 int 639 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo) 640 struct vnode *vp; 641 int flags; 642 struct ucred *cred; 643 struct proc *p; 644 int slpflag, slptimeo; 645 { 646 struct uvm_object *uobj = &vp->v_uvm.u_obj; 647 struct buf *bp, *nbp; 648 int s, error, rv; 649 int flushflags = PGO_ALLPAGES|PGO_FREE|PGO_SYNCIO| 650 (flags & V_SAVE ? PGO_CLEANIT : 0); 651 652 /* XXXUBC this doesn't look at flags or slp* */ 653 if (vp->v_type == VREG) { 654 simple_lock(&uobj->vmobjlock); 655 rv = (uobj->pgops->pgo_flush)(uobj, 0, 0, flushflags); 656 simple_unlock(&uobj->vmobjlock); 657 if (!rv) { 658 return EIO; 659 } 660 } 661 if (flags & V_SAVE) { 662 error = VOP_FSYNC(vp, cred, FSYNC_WAIT|FSYNC_RECLAIM, 0, 0, p); 663 if (error) 664 return (error); 665 #ifdef DIAGNOSTIC 666 s = splbio(); 667 if (vp->v_numoutput > 0 || !LIST_EMPTY(&vp->v_dirtyblkhd)) 668 panic("vinvalbuf: dirty bufs, vp %p", vp); 669 splx(s); 670 #endif 671 } 672 673 s = splbio(); 674 675 restart: 676 for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 677 nbp = LIST_NEXT(bp, b_vnbufs); 678 if (bp->b_flags & B_BUSY) { 679 bp->b_flags |= B_WANTED; 680 error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1), 681 "vinvalbuf", slptimeo); 682 if (error) { 683 splx(s); 684 return (error); 685 } 686 goto restart; 687 } 688 bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; 689 brelse(bp); 690 } 691 692 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 693 nbp = LIST_NEXT(bp, b_vnbufs); 694 if (bp->b_flags & B_BUSY) { 695 bp->b_flags |= B_WANTED; 696 error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1), 697 "vinvalbuf", slptimeo); 698 if (error) { 699 splx(s); 700 return (error); 701 } 702 goto restart; 703 } 704 /* 705 * XXX Since there are no node locks for NFS, I believe 706 * there is a slight chance that a delayed write will 707 * occur while sleeping just above, so check for it. 708 */ 709 if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) { 710 #ifdef DEBUG 711 printf("buffer still DELWRI\n"); 712 #endif 713 bp->b_flags |= B_BUSY | B_VFLUSH; 714 VOP_BWRITE(bp); 715 goto restart; 716 } 717 bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; 718 brelse(bp); 719 } 720 721 #ifdef DIAGNOSTIC 722 if (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd)) 723 panic("vinvalbuf: flush failed, vp %p", vp); 724 #endif 725 726 splx(s); 727 728 return (0); 729 } 730 731 /* 732 * Destroy any in core blocks past the truncation length. 733 * Called with the underlying vnode locked, which should prevent new dirty 734 * buffers from being queued. 735 */ 736 int 737 vtruncbuf(vp, lbn, slpflag, slptimeo) 738 struct vnode *vp; 739 daddr_t lbn; 740 int slpflag, slptimeo; 741 { 742 struct uvm_object *uobj = &vp->v_uvm.u_obj; 743 struct buf *bp, *nbp; 744 int s, error, rv; 745 746 s = splbio(); 747 if (vp->v_type == VREG) { 748 simple_lock(&uobj->vmobjlock); 749 rv = (uobj->pgops->pgo_flush)(uobj, 750 round_page(lbn << vp->v_mount->mnt_fs_bshift), 751 vp->v_uvm.u_size, PGO_FREE|PGO_SYNCIO); 752 simple_unlock(&uobj->vmobjlock); 753 if (!rv) { 754 splx(s); 755 return EIO; 756 } 757 } 758 759 restart: 760 for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 761 nbp = LIST_NEXT(bp, b_vnbufs); 762 if (bp->b_lblkno < lbn) 763 continue; 764 if (bp->b_flags & B_BUSY) { 765 bp->b_flags |= B_WANTED; 766 error = tsleep(bp, slpflag | (PRIBIO + 1), 767 "vtruncbuf", slptimeo); 768 if (error) { 769 splx(s); 770 return (error); 771 } 772 goto restart; 773 } 774 bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; 775 brelse(bp); 776 } 777 778 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 779 nbp = LIST_NEXT(bp, b_vnbufs); 780 if (bp->b_lblkno < lbn) 781 continue; 782 if (bp->b_flags & B_BUSY) { 783 bp->b_flags |= B_WANTED; 784 error = tsleep(bp, slpflag | (PRIBIO + 1), 785 "vtruncbuf", slptimeo); 786 if (error) { 787 splx(s); 788 return (error); 789 } 790 goto restart; 791 } 792 bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; 793 brelse(bp); 794 } 795 796 splx(s); 797 798 return (0); 799 } 800 801 void 802 vflushbuf(vp, sync) 803 struct vnode *vp; 804 int sync; 805 { 806 struct uvm_object *uobj = &vp->v_uvm.u_obj; 807 struct buf *bp, *nbp; 808 int s; 809 810 if (vp->v_type == VREG) { 811 int flags = PGO_CLEANIT|PGO_ALLPAGES| (sync ? PGO_SYNCIO : 0); 812 813 simple_lock(&uobj->vmobjlock); 814 (uobj->pgops->pgo_flush)(uobj, 0, 0, flags); 815 simple_unlock(&uobj->vmobjlock); 816 } 817 818 loop: 819 s = splbio(); 820 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 821 nbp = LIST_NEXT(bp, b_vnbufs); 822 if ((bp->b_flags & B_BUSY)) 823 continue; 824 if ((bp->b_flags & B_DELWRI) == 0) 825 panic("vflushbuf: not dirty, bp %p", bp); 826 bp->b_flags |= B_BUSY | B_VFLUSH; 827 splx(s); 828 /* 829 * Wait for I/O associated with indirect blocks to complete, 830 * since there is no way to quickly wait for them below. 831 */ 832 if (bp->b_vp == vp || sync == 0) 833 (void) bawrite(bp); 834 else 835 (void) bwrite(bp); 836 goto loop; 837 } 838 if (sync == 0) { 839 splx(s); 840 return; 841 } 842 while (vp->v_numoutput) { 843 vp->v_flag |= VBWAIT; 844 tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "vflushbuf", 0); 845 } 846 splx(s); 847 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 848 vprint("vflushbuf: dirty", vp); 849 goto loop; 850 } 851 } 852 853 /* 854 * Associate a buffer with a vnode. 855 */ 856 void 857 bgetvp(vp, bp) 858 struct vnode *vp; 859 struct buf *bp; 860 { 861 int s; 862 863 if (bp->b_vp) 864 panic("bgetvp: not free, bp %p", bp); 865 VHOLD(vp); 866 s = splbio(); 867 bp->b_vp = vp; 868 if (vp->v_type == VBLK || vp->v_type == VCHR) 869 bp->b_dev = vp->v_rdev; 870 else 871 bp->b_dev = NODEV; 872 /* 873 * Insert onto list for new vnode. 874 */ 875 bufinsvn(bp, &vp->v_cleanblkhd); 876 splx(s); 877 } 878 879 /* 880 * Disassociate a buffer from a vnode. 881 */ 882 void 883 brelvp(bp) 884 struct buf *bp; 885 { 886 struct vnode *vp; 887 int s; 888 889 if (bp->b_vp == NULL) 890 panic("brelvp: vp NULL, bp %p", bp); 891 892 s = splbio(); 893 vp = bp->b_vp; 894 /* 895 * Delete from old vnode list, if on one. 896 */ 897 if (bp->b_vnbufs.le_next != NOLIST) 898 bufremvn(bp); 899 900 if (vp->v_type != VREG && (vp->v_flag & VONWORKLST) && 901 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { 902 vp->v_flag &= ~VONWORKLST; 903 LIST_REMOVE(vp, v_synclist); 904 } 905 906 bp->b_vp = NULL; 907 HOLDRELE(vp); 908 splx(s); 909 } 910 911 /* 912 * Reassign a buffer from one vnode to another. 913 * Used to assign file specific control information 914 * (indirect blocks) to the vnode to which they belong. 915 * 916 * This function must be called at splbio(). 917 */ 918 void 919 reassignbuf(bp, newvp) 920 struct buf *bp; 921 struct vnode *newvp; 922 { 923 struct buflists *listheadp; 924 int delay; 925 926 /* 927 * Delete from old vnode list, if on one. 928 */ 929 if (bp->b_vnbufs.le_next != NOLIST) 930 bufremvn(bp); 931 /* 932 * If dirty, put on list of dirty buffers; 933 * otherwise insert onto list of clean buffers. 934 */ 935 if ((bp->b_flags & B_DELWRI) == 0) { 936 listheadp = &newvp->v_cleanblkhd; 937 if (newvp->v_type != VREG && 938 (newvp->v_flag & VONWORKLST) && 939 LIST_FIRST(&newvp->v_dirtyblkhd) == NULL) { 940 newvp->v_flag &= ~VONWORKLST; 941 LIST_REMOVE(newvp, v_synclist); 942 } 943 } else { 944 listheadp = &newvp->v_dirtyblkhd; 945 if ((newvp->v_flag & VONWORKLST) == 0) { 946 switch (newvp->v_type) { 947 case VDIR: 948 delay = dirdelay; 949 break; 950 case VBLK: 951 if (newvp->v_specmountpoint != NULL) { 952 delay = metadelay; 953 break; 954 } 955 /* fall through */ 956 default: 957 delay = filedelay; 958 break; 959 } 960 if (!newvp->v_mount || 961 (newvp->v_mount->mnt_flag & MNT_ASYNC) == 0) 962 vn_syncer_add_to_worklist(newvp, delay); 963 } 964 } 965 bufinsvn(bp, listheadp); 966 } 967 968 /* 969 * Create a vnode for a block device. 970 * Used for root filesystem and swap areas. 971 * Also used for memory file system special devices. 972 */ 973 int 974 bdevvp(dev, vpp) 975 dev_t dev; 976 struct vnode **vpp; 977 { 978 979 return (getdevvp(dev, vpp, VBLK)); 980 } 981 982 /* 983 * Create a vnode for a character device. 984 * Used for kernfs and some console handling. 985 */ 986 int 987 cdevvp(dev, vpp) 988 dev_t dev; 989 struct vnode **vpp; 990 { 991 992 return (getdevvp(dev, vpp, VCHR)); 993 } 994 995 /* 996 * Create a vnode for a device. 997 * Used by bdevvp (block device) for root file system etc., 998 * and by cdevvp (character device) for console and kernfs. 999 */ 1000 int 1001 getdevvp(dev, vpp, type) 1002 dev_t dev; 1003 struct vnode **vpp; 1004 enum vtype type; 1005 { 1006 struct vnode *vp; 1007 struct vnode *nvp; 1008 int error; 1009 1010 if (dev == NODEV) { 1011 *vpp = NULLVP; 1012 return (0); 1013 } 1014 error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp); 1015 if (error) { 1016 *vpp = NULLVP; 1017 return (error); 1018 } 1019 vp = nvp; 1020 vp->v_type = type; 1021 if ((nvp = checkalias(vp, dev, NULL)) != 0) { 1022 vput(vp); 1023 vp = nvp; 1024 } 1025 *vpp = vp; 1026 return (0); 1027 } 1028 1029 /* 1030 * Check to see if the new vnode represents a special device 1031 * for which we already have a vnode (either because of 1032 * bdevvp() or because of a different vnode representing 1033 * the same block device). If such an alias exists, deallocate 1034 * the existing contents and return the aliased vnode. The 1035 * caller is responsible for filling it with its new contents. 1036 */ 1037 struct vnode * 1038 checkalias(nvp, nvp_rdev, mp) 1039 struct vnode *nvp; 1040 dev_t nvp_rdev; 1041 struct mount *mp; 1042 { 1043 struct proc *p = curproc; /* XXX */ 1044 struct vnode *vp; 1045 struct vnode **vpp; 1046 1047 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 1048 return (NULLVP); 1049 1050 vpp = &speclisth[SPECHASH(nvp_rdev)]; 1051 loop: 1052 simple_lock(&spechash_slock); 1053 for (vp = *vpp; vp; vp = vp->v_specnext) { 1054 if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) 1055 continue; 1056 /* 1057 * Alias, but not in use, so flush it out. 1058 */ 1059 simple_lock(&vp->v_interlock); 1060 if (vp->v_usecount == 0) { 1061 simple_unlock(&spechash_slock); 1062 vgonel(vp, p); 1063 goto loop; 1064 } 1065 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) { 1066 simple_unlock(&spechash_slock); 1067 goto loop; 1068 } 1069 break; 1070 } 1071 if (vp == NULL || vp->v_tag != VT_NON || vp->v_type != VBLK) { 1072 MALLOC(nvp->v_specinfo, struct specinfo *, 1073 sizeof(struct specinfo), M_VNODE, M_WAITOK); 1074 nvp->v_rdev = nvp_rdev; 1075 nvp->v_hashchain = vpp; 1076 nvp->v_specnext = *vpp; 1077 nvp->v_specmountpoint = NULL; 1078 simple_unlock(&spechash_slock); 1079 nvp->v_speclockf = NULL; 1080 *vpp = nvp; 1081 if (vp != NULLVP) { 1082 nvp->v_flag |= VALIASED; 1083 vp->v_flag |= VALIASED; 1084 vput(vp); 1085 } 1086 return (NULLVP); 1087 } 1088 simple_unlock(&spechash_slock); 1089 VOP_UNLOCK(vp, 0); 1090 simple_lock(&vp->v_interlock); 1091 vclean(vp, 0, p); 1092 vp->v_op = nvp->v_op; 1093 vp->v_tag = nvp->v_tag; 1094 vp->v_vnlock = &vp->v_lock; 1095 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); 1096 nvp->v_type = VNON; 1097 insmntque(vp, mp); 1098 return (vp); 1099 } 1100 1101 /* 1102 * Grab a particular vnode from the free list, increment its 1103 * reference count and lock it. If the vnode lock bit is set the 1104 * vnode is being eliminated in vgone. In that case, we can not 1105 * grab the vnode, so the process is awakened when the transition is 1106 * completed, and an error returned to indicate that the vnode is no 1107 * longer usable (possibly having been changed to a new file system type). 1108 */ 1109 int 1110 vget(vp, flags) 1111 struct vnode *vp; 1112 int flags; 1113 { 1114 int error; 1115 1116 /* 1117 * If the vnode is in the process of being cleaned out for 1118 * another use, we wait for the cleaning to finish and then 1119 * return failure. Cleaning is determined by checking that 1120 * the VXLOCK flag is set. 1121 */ 1122 1123 if ((flags & LK_INTERLOCK) == 0) 1124 simple_lock(&vp->v_interlock); 1125 if (vp->v_flag & VXLOCK) { 1126 if (flags & LK_NOWAIT) { 1127 simple_unlock(&vp->v_interlock); 1128 return EBUSY; 1129 } 1130 vp->v_flag |= VXWANT; 1131 ltsleep((caddr_t)vp, PINOD|PNORELOCK, 1132 "vget", 0, &vp->v_interlock); 1133 return (ENOENT); 1134 } 1135 if (vp->v_usecount == 0) { 1136 simple_lock(&vnode_free_list_slock); 1137 if (vp->v_holdcnt > 0) 1138 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 1139 else 1140 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1141 simple_unlock(&vnode_free_list_slock); 1142 } 1143 vp->v_usecount++; 1144 #ifdef DIAGNOSTIC 1145 if (vp->v_usecount == 0) { 1146 vprint("vget", vp); 1147 panic("vget: usecount overflow, vp %p", vp); 1148 } 1149 #endif 1150 if (flags & LK_TYPE_MASK) { 1151 if ((error = vn_lock(vp, flags | LK_INTERLOCK))) { 1152 /* 1153 * must expand vrele here because we do not want 1154 * to call VOP_INACTIVE if the reference count 1155 * drops back to zero since it was never really 1156 * active. We must remove it from the free list 1157 * before sleeping so that multiple processes do 1158 * not try to recycle it. 1159 */ 1160 simple_lock(&vp->v_interlock); 1161 vp->v_usecount--; 1162 if (vp->v_usecount > 0) { 1163 simple_unlock(&vp->v_interlock); 1164 return (error); 1165 } 1166 /* 1167 * insert at tail of LRU list 1168 */ 1169 simple_lock(&vnode_free_list_slock); 1170 if (vp->v_holdcnt > 0) 1171 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, 1172 v_freelist); 1173 else 1174 TAILQ_INSERT_TAIL(&vnode_free_list, vp, 1175 v_freelist); 1176 simple_unlock(&vnode_free_list_slock); 1177 simple_unlock(&vp->v_interlock); 1178 } 1179 return (error); 1180 } 1181 simple_unlock(&vp->v_interlock); 1182 return (0); 1183 } 1184 1185 /* 1186 * vput(), just unlock and vrele() 1187 */ 1188 void 1189 vput(vp) 1190 struct vnode *vp; 1191 { 1192 struct proc *p = curproc; /* XXX */ 1193 1194 #ifdef DIAGNOSTIC 1195 if (vp == NULL) 1196 panic("vput: null vp"); 1197 #endif 1198 simple_lock(&vp->v_interlock); 1199 vp->v_usecount--; 1200 if (vp->v_usecount > 0) { 1201 simple_unlock(&vp->v_interlock); 1202 VOP_UNLOCK(vp, 0); 1203 return; 1204 } 1205 #ifdef DIAGNOSTIC 1206 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 1207 vprint("vput: bad ref count", vp); 1208 panic("vput: ref cnt"); 1209 } 1210 #endif 1211 /* 1212 * Insert at tail of LRU list. 1213 */ 1214 simple_lock(&vnode_free_list_slock); 1215 if (vp->v_holdcnt > 0) 1216 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 1217 else 1218 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 1219 simple_unlock(&vnode_free_list_slock); 1220 vp->v_flag &= ~VTEXT; 1221 simple_unlock(&vp->v_interlock); 1222 VOP_INACTIVE(vp, p); 1223 } 1224 1225 /* 1226 * Vnode release. 1227 * If count drops to zero, call inactive routine and return to freelist. 1228 */ 1229 void 1230 vrele(vp) 1231 struct vnode *vp; 1232 { 1233 struct proc *p = curproc; /* XXX */ 1234 1235 #ifdef DIAGNOSTIC 1236 if (vp == NULL) 1237 panic("vrele: null vp"); 1238 #endif 1239 simple_lock(&vp->v_interlock); 1240 vp->v_usecount--; 1241 if (vp->v_usecount > 0) { 1242 simple_unlock(&vp->v_interlock); 1243 return; 1244 } 1245 #ifdef DIAGNOSTIC 1246 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 1247 vprint("vrele: bad ref count", vp); 1248 panic("vrele: ref cnt vp %p", vp); 1249 } 1250 #endif 1251 /* 1252 * Insert at tail of LRU list. 1253 */ 1254 simple_lock(&vnode_free_list_slock); 1255 if (vp->v_holdcnt > 0) 1256 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 1257 else 1258 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 1259 simple_unlock(&vnode_free_list_slock); 1260 vp->v_flag &= ~VTEXT; 1261 if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK) == 0) 1262 VOP_INACTIVE(vp, p); 1263 } 1264 1265 #ifdef DIAGNOSTIC 1266 /* 1267 * Page or buffer structure gets a reference. 1268 */ 1269 void 1270 vhold(vp) 1271 struct vnode *vp; 1272 { 1273 1274 /* 1275 * If it is on the freelist and the hold count is currently 1276 * zero, move it to the hold list. The test of the back 1277 * pointer and the use reference count of zero is because 1278 * it will be removed from a free list by getnewvnode, 1279 * but will not have its reference count incremented until 1280 * after calling vgone. If the reference count were 1281 * incremented first, vgone would (incorrectly) try to 1282 * close the previous instance of the underlying object. 1283 * So, the back pointer is explicitly set to `0xdeadb' in 1284 * getnewvnode after removing it from a freelist to ensure 1285 * that we do not try to move it here. 1286 */ 1287 simple_lock(&vp->v_interlock); 1288 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) && 1289 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 1290 simple_lock(&vnode_free_list_slock); 1291 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1292 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 1293 simple_unlock(&vnode_free_list_slock); 1294 } 1295 vp->v_holdcnt++; 1296 simple_unlock(&vp->v_interlock); 1297 } 1298 1299 /* 1300 * Page or buffer structure frees a reference. 1301 */ 1302 void 1303 holdrele(vp) 1304 struct vnode *vp; 1305 { 1306 1307 simple_lock(&vp->v_interlock); 1308 if (vp->v_holdcnt <= 0) 1309 panic("holdrele: holdcnt vp %p", vp); 1310 vp->v_holdcnt--; 1311 1312 /* 1313 * If it is on the holdlist and the hold count drops to 1314 * zero, move it to the free list. The test of the back 1315 * pointer and the use reference count of zero is because 1316 * it will be removed from a free list by getnewvnode, 1317 * but will not have its reference count incremented until 1318 * after calling vgone. If the reference count were 1319 * incremented first, vgone would (incorrectly) try to 1320 * close the previous instance of the underlying object. 1321 * So, the back pointer is explicitly set to `0xdeadb' in 1322 * getnewvnode after removing it from a freelist to ensure 1323 * that we do not try to move it here. 1324 */ 1325 1326 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) && 1327 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 1328 simple_lock(&vnode_free_list_slock); 1329 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 1330 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 1331 simple_unlock(&vnode_free_list_slock); 1332 } 1333 simple_unlock(&vp->v_interlock); 1334 } 1335 1336 /* 1337 * Vnode reference. 1338 */ 1339 void 1340 vref(vp) 1341 struct vnode *vp; 1342 { 1343 1344 simple_lock(&vp->v_interlock); 1345 if (vp->v_usecount <= 0) 1346 panic("vref used where vget required, vp %p", vp); 1347 vp->v_usecount++; 1348 #ifdef DIAGNOSTIC 1349 if (vp->v_usecount == 0) { 1350 vprint("vref", vp); 1351 panic("vref: usecount overflow, vp %p", vp); 1352 } 1353 #endif 1354 simple_unlock(&vp->v_interlock); 1355 } 1356 #endif /* DIAGNOSTIC */ 1357 1358 /* 1359 * Remove any vnodes in the vnode table belonging to mount point mp. 1360 * 1361 * If MNT_NOFORCE is specified, there should not be any active ones, 1362 * return error if any are found (nb: this is a user error, not a 1363 * system error). If MNT_FORCE is specified, detach any active vnodes 1364 * that are found. 1365 */ 1366 #ifdef DEBUG 1367 int busyprt = 0; /* print out busy vnodes */ 1368 struct ctldebug debug1 = { "busyprt", &busyprt }; 1369 #endif 1370 1371 int 1372 vflush(mp, skipvp, flags) 1373 struct mount *mp; 1374 struct vnode *skipvp; 1375 int flags; 1376 { 1377 struct proc *p = curproc; /* XXX */ 1378 struct vnode *vp, *nvp; 1379 int busy = 0; 1380 1381 simple_lock(&mntvnode_slock); 1382 loop: 1383 for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) { 1384 if (vp->v_mount != mp) 1385 goto loop; 1386 nvp = vp->v_mntvnodes.le_next; 1387 /* 1388 * Skip over a selected vnode. 1389 */ 1390 if (vp == skipvp) 1391 continue; 1392 simple_lock(&vp->v_interlock); 1393 /* 1394 * Skip over a vnodes marked VSYSTEM. 1395 */ 1396 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1397 simple_unlock(&vp->v_interlock); 1398 continue; 1399 } 1400 /* 1401 * If WRITECLOSE is set, only flush out regular file 1402 * vnodes open for writing. 1403 */ 1404 if ((flags & WRITECLOSE) && 1405 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1406 simple_unlock(&vp->v_interlock); 1407 continue; 1408 } 1409 /* 1410 * With v_usecount == 0, all we need to do is clear 1411 * out the vnode data structures and we are done. 1412 */ 1413 if (vp->v_usecount == 0) { 1414 simple_unlock(&mntvnode_slock); 1415 vgonel(vp, p); 1416 simple_lock(&mntvnode_slock); 1417 continue; 1418 } 1419 /* 1420 * If FORCECLOSE is set, forcibly close the vnode. 1421 * For block or character devices, revert to an 1422 * anonymous device. For all other files, just kill them. 1423 */ 1424 if (flags & FORCECLOSE) { 1425 simple_unlock(&mntvnode_slock); 1426 if (vp->v_type != VBLK && vp->v_type != VCHR) { 1427 vgonel(vp, p); 1428 } else { 1429 vclean(vp, 0, p); 1430 vp->v_op = spec_vnodeop_p; 1431 insmntque(vp, (struct mount *)0); 1432 } 1433 simple_lock(&mntvnode_slock); 1434 continue; 1435 } 1436 #ifdef DEBUG 1437 if (busyprt) 1438 vprint("vflush: busy vnode", vp); 1439 #endif 1440 simple_unlock(&vp->v_interlock); 1441 busy++; 1442 } 1443 simple_unlock(&mntvnode_slock); 1444 if (busy) 1445 return (EBUSY); 1446 return (0); 1447 } 1448 1449 /* 1450 * Disassociate the underlying file system from a vnode. 1451 */ 1452 void 1453 vclean(vp, flags, p) 1454 struct vnode *vp; 1455 int flags; 1456 struct proc *p; 1457 { 1458 int active; 1459 1460 /* 1461 * Check to see if the vnode is in use. 1462 * If so we have to reference it before we clean it out 1463 * so that its count cannot fall to zero and generate a 1464 * race against ourselves to recycle it. 1465 */ 1466 if ((active = vp->v_usecount) != 0) { 1467 /* We have the vnode interlock. */ 1468 vp->v_usecount++; 1469 #ifdef DIAGNOSTIC 1470 if (vp->v_usecount == 0) { 1471 vprint("vclean", vp); 1472 panic("vclean: usecount overflow"); 1473 } 1474 #endif 1475 } 1476 1477 /* 1478 * Prevent the vnode from being recycled or 1479 * brought into use while we clean it out. 1480 */ 1481 if (vp->v_flag & VXLOCK) 1482 panic("vclean: deadlock, vp %p", vp); 1483 vp->v_flag |= VXLOCK; 1484 vp->v_flag &= ~VTEXT; 1485 1486 /* 1487 * Even if the count is zero, the VOP_INACTIVE routine may still 1488 * have the object locked while it cleans it out. The VOP_LOCK 1489 * ensures that the VOP_INACTIVE routine is done with its work. 1490 * For active vnodes, it ensures that no other activity can 1491 * occur while the underlying object is being cleaned out. 1492 */ 1493 VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK); 1494 1495 /* 1496 * Clean out any cached data associated with the vnode. 1497 */ 1498 if (flags & DOCLOSE) 1499 vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0); 1500 1501 /* 1502 * If purging an active vnode, it must be closed and 1503 * deactivated before being reclaimed. Note that the 1504 * VOP_INACTIVE will unlock the vnode. 1505 */ 1506 if (active) { 1507 if (flags & DOCLOSE) 1508 VOP_CLOSE(vp, FNONBLOCK, NOCRED, NULL); 1509 VOP_INACTIVE(vp, p); 1510 } else { 1511 /* 1512 * Any other processes trying to obtain this lock must first 1513 * wait for VXLOCK to clear, then call the new lock operation. 1514 */ 1515 VOP_UNLOCK(vp, 0); 1516 } 1517 /* 1518 * Reclaim the vnode. 1519 */ 1520 if (VOP_RECLAIM(vp, p)) 1521 panic("vclean: cannot reclaim, vp %p", vp); 1522 if (active) { 1523 /* 1524 * Inline copy of vrele() since VOP_INACTIVE 1525 * has already been called. 1526 */ 1527 simple_lock(&vp->v_interlock); 1528 if (--vp->v_usecount <= 0) { 1529 #ifdef DIAGNOSTIC 1530 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 1531 vprint("vclean: bad ref count", vp); 1532 panic("vclean: ref cnt"); 1533 } 1534 #endif 1535 /* 1536 * Insert at tail of LRU list. 1537 */ 1538 1539 simple_unlock(&vp->v_interlock); 1540 simple_lock(&vnode_free_list_slock); 1541 #ifdef DIAGNOSTIC 1542 if (vp->v_vnlock) { 1543 if ((vp->v_vnlock->lk_flags & LK_DRAINED) == 0) 1544 vprint("vclean: lock not drained", vp); 1545 } 1546 if (vp->v_holdcnt > 0) 1547 panic("vclean: not clean, vp %p", vp); 1548 #endif 1549 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 1550 simple_unlock(&vnode_free_list_slock); 1551 } else 1552 simple_unlock(&vp->v_interlock); 1553 } 1554 1555 cache_purge(vp); 1556 1557 /* 1558 * Done with purge, notify sleepers of the grim news. 1559 */ 1560 vp->v_op = dead_vnodeop_p; 1561 vp->v_tag = VT_NON; 1562 simple_lock(&vp->v_interlock); 1563 vp->v_flag &= ~VXLOCK; 1564 if (vp->v_flag & VXWANT) { 1565 vp->v_flag &= ~VXWANT; 1566 simple_unlock(&vp->v_interlock); 1567 wakeup((caddr_t)vp); 1568 } else 1569 simple_unlock(&vp->v_interlock); 1570 } 1571 1572 /* 1573 * Recycle an unused vnode to the front of the free list. 1574 * Release the passed interlock if the vnode will be recycled. 1575 */ 1576 int 1577 vrecycle(vp, inter_lkp, p) 1578 struct vnode *vp; 1579 struct simplelock *inter_lkp; 1580 struct proc *p; 1581 { 1582 1583 simple_lock(&vp->v_interlock); 1584 if (vp->v_usecount == 0) { 1585 if (inter_lkp) 1586 simple_unlock(inter_lkp); 1587 vgonel(vp, p); 1588 return (1); 1589 } 1590 simple_unlock(&vp->v_interlock); 1591 return (0); 1592 } 1593 1594 /* 1595 * Eliminate all activity associated with a vnode 1596 * in preparation for reuse. 1597 */ 1598 void 1599 vgone(vp) 1600 struct vnode *vp; 1601 { 1602 struct proc *p = curproc; /* XXX */ 1603 1604 simple_lock(&vp->v_interlock); 1605 vgonel(vp, p); 1606 } 1607 1608 /* 1609 * vgone, with the vp interlock held. 1610 */ 1611 void 1612 vgonel(vp, p) 1613 struct vnode *vp; 1614 struct proc *p; 1615 { 1616 struct vnode *vq; 1617 struct vnode *vx; 1618 1619 /* 1620 * If a vgone (or vclean) is already in progress, 1621 * wait until it is done and return. 1622 */ 1623 if (vp->v_flag & VXLOCK) { 1624 vp->v_flag |= VXWANT; 1625 ltsleep((caddr_t)vp, PINOD | PNORELOCK, 1626 "vgone", 0, &vp->v_interlock); 1627 return; 1628 } 1629 /* 1630 * Clean out the filesystem specific data. 1631 */ 1632 vclean(vp, DOCLOSE, p); 1633 /* 1634 * Delete from old mount point vnode list, if on one. 1635 */ 1636 if (vp->v_mount != NULL) 1637 insmntque(vp, (struct mount *)0); 1638 /* 1639 * If special device, remove it from special device alias list. 1640 * if it is on one. 1641 */ 1642 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) { 1643 simple_lock(&spechash_slock); 1644 if (vp->v_hashchain != NULL) { 1645 if (*vp->v_hashchain == vp) { 1646 *vp->v_hashchain = vp->v_specnext; 1647 } else { 1648 for (vq = *vp->v_hashchain; vq; 1649 vq = vq->v_specnext) { 1650 if (vq->v_specnext != vp) 1651 continue; 1652 vq->v_specnext = vp->v_specnext; 1653 break; 1654 } 1655 if (vq == NULL) 1656 panic("missing bdev"); 1657 } 1658 if (vp->v_flag & VALIASED) { 1659 vx = NULL; 1660 for (vq = *vp->v_hashchain; vq; 1661 vq = vq->v_specnext) { 1662 if (vq->v_rdev != vp->v_rdev || 1663 vq->v_type != vp->v_type) 1664 continue; 1665 if (vx) 1666 break; 1667 vx = vq; 1668 } 1669 if (vx == NULL) 1670 panic("missing alias"); 1671 if (vq == NULL) 1672 vx->v_flag &= ~VALIASED; 1673 vp->v_flag &= ~VALIASED; 1674 } 1675 } 1676 simple_unlock(&spechash_slock); 1677 FREE(vp->v_specinfo, M_VNODE); 1678 vp->v_specinfo = NULL; 1679 } 1680 /* 1681 * If it is on the freelist and not already at the head, 1682 * move it to the head of the list. The test of the back 1683 * pointer and the reference count of zero is because 1684 * it will be removed from the free list by getnewvnode, 1685 * but will not have its reference count incremented until 1686 * after calling vgone. If the reference count were 1687 * incremented first, vgone would (incorrectly) try to 1688 * close the previous instance of the underlying object. 1689 * So, the back pointer is explicitly set to `0xdeadb' in 1690 * getnewvnode after removing it from the freelist to ensure 1691 * that we do not try to move it here. 1692 */ 1693 if (vp->v_usecount == 0) { 1694 simple_lock(&vnode_free_list_slock); 1695 if (vp->v_holdcnt > 0) 1696 panic("vgonel: not clean, vp %p", vp); 1697 if (vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb && 1698 TAILQ_FIRST(&vnode_free_list) != vp) { 1699 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1700 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 1701 } 1702 simple_unlock(&vnode_free_list_slock); 1703 } 1704 vp->v_type = VBAD; 1705 } 1706 1707 /* 1708 * Lookup a vnode by device number. 1709 */ 1710 int 1711 vfinddev(dev, type, vpp) 1712 dev_t dev; 1713 enum vtype type; 1714 struct vnode **vpp; 1715 { 1716 struct vnode *vp; 1717 int rc = 0; 1718 1719 simple_lock(&spechash_slock); 1720 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) { 1721 if (dev != vp->v_rdev || type != vp->v_type) 1722 continue; 1723 *vpp = vp; 1724 rc = 1; 1725 break; 1726 } 1727 simple_unlock(&spechash_slock); 1728 return (rc); 1729 } 1730 1731 /* 1732 * Revoke all the vnodes corresponding to the specified minor number 1733 * range (endpoints inclusive) of the specified major. 1734 */ 1735 void 1736 vdevgone(maj, minl, minh, type) 1737 int maj, minl, minh; 1738 enum vtype type; 1739 { 1740 struct vnode *vp; 1741 int mn; 1742 1743 for (mn = minl; mn <= minh; mn++) 1744 if (vfinddev(makedev(maj, mn), type, &vp)) 1745 VOP_REVOKE(vp, REVOKEALL); 1746 } 1747 1748 /* 1749 * Calculate the total number of references to a special device. 1750 */ 1751 int 1752 vcount(vp) 1753 struct vnode *vp; 1754 { 1755 struct vnode *vq, *vnext; 1756 int count; 1757 1758 loop: 1759 if ((vp->v_flag & VALIASED) == 0) 1760 return (vp->v_usecount); 1761 simple_lock(&spechash_slock); 1762 for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) { 1763 vnext = vq->v_specnext; 1764 if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type) 1765 continue; 1766 /* 1767 * Alias, but not in use, so flush it out. 1768 */ 1769 if (vq->v_usecount == 0 && vq != vp) { 1770 simple_unlock(&spechash_slock); 1771 vgone(vq); 1772 goto loop; 1773 } 1774 count += vq->v_usecount; 1775 } 1776 simple_unlock(&spechash_slock); 1777 return (count); 1778 } 1779 1780 /* 1781 * Print out a description of a vnode. 1782 */ 1783 static char *typename[] = 1784 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" }; 1785 1786 void 1787 vprint(label, vp) 1788 char *label; 1789 struct vnode *vp; 1790 { 1791 char buf[64]; 1792 1793 if (label != NULL) 1794 printf("%s: ", label); 1795 printf("tag %d type %s, usecount %d, writecount %ld, refcount %ld,", 1796 vp->v_tag, typename[vp->v_type], vp->v_usecount, vp->v_writecount, 1797 vp->v_holdcnt); 1798 buf[0] = '\0'; 1799 if (vp->v_flag & VROOT) 1800 strcat(buf, "|VROOT"); 1801 if (vp->v_flag & VTEXT) 1802 strcat(buf, "|VTEXT"); 1803 if (vp->v_flag & VSYSTEM) 1804 strcat(buf, "|VSYSTEM"); 1805 if (vp->v_flag & VXLOCK) 1806 strcat(buf, "|VXLOCK"); 1807 if (vp->v_flag & VXWANT) 1808 strcat(buf, "|VXWANT"); 1809 if (vp->v_flag & VBWAIT) 1810 strcat(buf, "|VBWAIT"); 1811 if (vp->v_flag & VALIASED) 1812 strcat(buf, "|VALIASED"); 1813 if (buf[0] != '\0') 1814 printf(" flags (%s)", &buf[1]); 1815 if (vp->v_data == NULL) { 1816 printf("\n"); 1817 } else { 1818 printf("\n\t"); 1819 VOP_PRINT(vp); 1820 } 1821 } 1822 1823 #ifdef DEBUG 1824 /* 1825 * List all of the locked vnodes in the system. 1826 * Called when debugging the kernel. 1827 */ 1828 void 1829 printlockedvnodes() 1830 { 1831 struct mount *mp, *nmp; 1832 struct vnode *vp; 1833 1834 printf("Locked vnodes\n"); 1835 simple_lock(&mountlist_slock); 1836 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { 1837 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) { 1838 nmp = mp->mnt_list.cqe_next; 1839 continue; 1840 } 1841 for (vp = mp->mnt_vnodelist.lh_first; 1842 vp != NULL; 1843 vp = vp->v_mntvnodes.le_next) { 1844 if (VOP_ISLOCKED(vp)) 1845 vprint((char *)0, vp); 1846 } 1847 simple_lock(&mountlist_slock); 1848 nmp = mp->mnt_list.cqe_next; 1849 vfs_unbusy(mp); 1850 } 1851 simple_unlock(&mountlist_slock); 1852 } 1853 #endif 1854 1855 extern const char *mountcompatnames[]; 1856 extern const int nmountcompatnames; 1857 1858 /* 1859 * Top level filesystem related information gathering. 1860 */ 1861 int 1862 vfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) 1863 int *name; 1864 u_int namelen; 1865 void *oldp; 1866 size_t *oldlenp; 1867 void *newp; 1868 size_t newlen; 1869 struct proc *p; 1870 { 1871 #if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44) 1872 struct vfsconf vfc; 1873 #endif 1874 struct vfsops *vfsp; 1875 1876 /* all sysctl names at this level are at least name and field */ 1877 if (namelen < 2) 1878 return (ENOTDIR); /* overloaded */ 1879 1880 /* Not generic: goes to file system. */ 1881 if (name[0] != VFS_GENERIC) { 1882 if (name[0] >= nmountcompatnames || name[0] < 0 || 1883 mountcompatnames[name[0]] == NULL) 1884 return (EOPNOTSUPP); 1885 vfsp = vfs_getopsbyname(mountcompatnames[name[0]]); 1886 if (vfsp == NULL || vfsp->vfs_sysctl == NULL) 1887 return (EOPNOTSUPP); 1888 return ((*vfsp->vfs_sysctl)(&name[1], namelen - 1, 1889 oldp, oldlenp, newp, newlen, p)); 1890 } 1891 1892 /* The rest are generic vfs sysctls. */ 1893 switch (name[1]) { 1894 case VFS_USERMOUNT: 1895 return sysctl_int(oldp, oldlenp, newp, newlen, &dovfsusermount); 1896 #if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44) 1897 case VFS_MAXTYPENUM: 1898 /* 1899 * Provided for 4.4BSD-Lite2 compatibility. 1900 */ 1901 return (sysctl_rdint(oldp, oldlenp, newp, nmountcompatnames)); 1902 case VFS_CONF: 1903 /* 1904 * Special: a node, next is a file system name. 1905 * Provided for 4.4BSD-Lite2 compatibility. 1906 */ 1907 if (namelen < 3) 1908 return (ENOTDIR); /* overloaded */ 1909 if (name[2] >= nmountcompatnames || name[2] < 0 || 1910 mountcompatnames[name[2]] == NULL) 1911 return (EOPNOTSUPP); 1912 vfsp = vfs_getopsbyname(mountcompatnames[name[2]]); 1913 if (vfsp == NULL) 1914 return (EOPNOTSUPP); 1915 vfc.vfc_vfsops = vfsp; 1916 strncpy(vfc.vfc_name, vfsp->vfs_name, MFSNAMELEN); 1917 vfc.vfc_typenum = name[2]; 1918 vfc.vfc_refcount = vfsp->vfs_refcount; 1919 vfc.vfc_flags = 0; 1920 vfc.vfc_mountroot = vfsp->vfs_mountroot; 1921 vfc.vfc_next = NULL; 1922 return (sysctl_rdstruct(oldp, oldlenp, newp, &vfc, 1923 sizeof(struct vfsconf))); 1924 #endif 1925 default: 1926 break; 1927 } 1928 return (EOPNOTSUPP); 1929 } 1930 1931 int kinfo_vdebug = 1; 1932 int kinfo_vgetfailed; 1933 #define KINFO_VNODESLOP 10 1934 /* 1935 * Dump vnode list (via sysctl). 1936 * Copyout address of vnode followed by vnode. 1937 */ 1938 /* ARGSUSED */ 1939 int 1940 sysctl_vnode(where, sizep, p) 1941 char *where; 1942 size_t *sizep; 1943 struct proc *p; 1944 { 1945 struct mount *mp, *nmp; 1946 struct vnode *nvp, *vp; 1947 char *bp = where, *savebp; 1948 char *ewhere; 1949 int error; 1950 1951 #define VPTRSZ sizeof(struct vnode *) 1952 #define VNODESZ sizeof(struct vnode) 1953 if (where == NULL) { 1954 *sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ); 1955 return (0); 1956 } 1957 ewhere = where + *sizep; 1958 1959 simple_lock(&mountlist_slock); 1960 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { 1961 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) { 1962 nmp = mp->mnt_list.cqe_next; 1963 continue; 1964 } 1965 savebp = bp; 1966 again: 1967 simple_lock(&mntvnode_slock); 1968 for (vp = mp->mnt_vnodelist.lh_first; 1969 vp != NULL; 1970 vp = nvp) { 1971 /* 1972 * Check that the vp is still associated with 1973 * this filesystem. RACE: could have been 1974 * recycled onto the same filesystem. 1975 */ 1976 if (vp->v_mount != mp) { 1977 simple_unlock(&mntvnode_slock); 1978 if (kinfo_vdebug) 1979 printf("kinfo: vp changed\n"); 1980 bp = savebp; 1981 goto again; 1982 } 1983 nvp = vp->v_mntvnodes.le_next; 1984 if (bp + VPTRSZ + VNODESZ > ewhere) { 1985 simple_unlock(&mntvnode_slock); 1986 *sizep = bp - where; 1987 return (ENOMEM); 1988 } 1989 simple_unlock(&mntvnode_slock); 1990 if ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) || 1991 (error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ))) 1992 return (error); 1993 bp += VPTRSZ + VNODESZ; 1994 simple_lock(&mntvnode_slock); 1995 } 1996 simple_unlock(&mntvnode_slock); 1997 simple_lock(&mountlist_slock); 1998 nmp = mp->mnt_list.cqe_next; 1999 vfs_unbusy(mp); 2000 } 2001 simple_unlock(&mountlist_slock); 2002 2003 *sizep = bp - where; 2004 return (0); 2005 } 2006 2007 /* 2008 * Check to see if a filesystem is mounted on a block device. 2009 */ 2010 int 2011 vfs_mountedon(vp) 2012 struct vnode *vp; 2013 { 2014 struct vnode *vq; 2015 int error = 0; 2016 2017 if (vp->v_specmountpoint != NULL) 2018 return (EBUSY); 2019 if (vp->v_flag & VALIASED) { 2020 simple_lock(&spechash_slock); 2021 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 2022 if (vq->v_rdev != vp->v_rdev || 2023 vq->v_type != vp->v_type) 2024 continue; 2025 if (vq->v_specmountpoint != NULL) { 2026 error = EBUSY; 2027 break; 2028 } 2029 } 2030 simple_unlock(&spechash_slock); 2031 } 2032 return (error); 2033 } 2034 2035 /* 2036 * Build hash lists of net addresses and hang them off the mount point. 2037 * Called by ufs_mount() to set up the lists of export addresses. 2038 */ 2039 static int 2040 vfs_hang_addrlist(mp, nep, argp) 2041 struct mount *mp; 2042 struct netexport *nep; 2043 struct export_args *argp; 2044 { 2045 struct netcred *np, *enp; 2046 struct radix_node_head *rnh; 2047 int i; 2048 struct radix_node *rn; 2049 struct sockaddr *saddr, *smask = 0; 2050 struct domain *dom; 2051 int error; 2052 2053 if (argp->ex_addrlen == 0) { 2054 if (mp->mnt_flag & MNT_DEFEXPORTED) 2055 return (EPERM); 2056 np = &nep->ne_defexported; 2057 np->netc_exflags = argp->ex_flags; 2058 np->netc_anon = argp->ex_anon; 2059 np->netc_anon.cr_ref = 1; 2060 mp->mnt_flag |= MNT_DEFEXPORTED; 2061 return (0); 2062 } 2063 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 2064 np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK); 2065 memset((caddr_t)np, 0, i); 2066 saddr = (struct sockaddr *)(np + 1); 2067 error = copyin(argp->ex_addr, (caddr_t)saddr, argp->ex_addrlen); 2068 if (error) 2069 goto out; 2070 if (saddr->sa_len > argp->ex_addrlen) 2071 saddr->sa_len = argp->ex_addrlen; 2072 if (argp->ex_masklen) { 2073 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 2074 error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen); 2075 if (error) 2076 goto out; 2077 if (smask->sa_len > argp->ex_masklen) 2078 smask->sa_len = argp->ex_masklen; 2079 } 2080 i = saddr->sa_family; 2081 if ((rnh = nep->ne_rtable[i]) == 0) { 2082 /* 2083 * Seems silly to initialize every AF when most are not 2084 * used, do so on demand here 2085 */ 2086 for (dom = domains; dom; dom = dom->dom_next) 2087 if (dom->dom_family == i && dom->dom_rtattach) { 2088 dom->dom_rtattach((void **)&nep->ne_rtable[i], 2089 dom->dom_rtoffset); 2090 break; 2091 } 2092 if ((rnh = nep->ne_rtable[i]) == 0) { 2093 error = ENOBUFS; 2094 goto out; 2095 } 2096 } 2097 rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh, 2098 np->netc_rnodes); 2099 if (rn == 0 || np != (struct netcred *)rn) { /* already exists */ 2100 if (rn == 0) { 2101 enp = (struct netcred *)(*rnh->rnh_lookup)(saddr, 2102 smask, rnh); 2103 if (enp == 0) { 2104 error = EPERM; 2105 goto out; 2106 } 2107 } else 2108 enp = (struct netcred *)rn; 2109 2110 if (enp->netc_exflags != argp->ex_flags || 2111 enp->netc_anon.cr_uid != argp->ex_anon.cr_uid || 2112 enp->netc_anon.cr_gid != argp->ex_anon.cr_gid || 2113 enp->netc_anon.cr_ngroups != argp->ex_anon.cr_ngroups || 2114 memcmp(&enp->netc_anon.cr_groups, &argp->ex_anon.cr_groups, 2115 enp->netc_anon.cr_ngroups)) 2116 error = EPERM; 2117 else 2118 error = 0; 2119 goto out; 2120 } 2121 np->netc_exflags = argp->ex_flags; 2122 np->netc_anon = argp->ex_anon; 2123 np->netc_anon.cr_ref = 1; 2124 return (0); 2125 out: 2126 free(np, M_NETADDR); 2127 return (error); 2128 } 2129 2130 /* ARGSUSED */ 2131 static int 2132 vfs_free_netcred(rn, w) 2133 struct radix_node *rn; 2134 void *w; 2135 { 2136 struct radix_node_head *rnh = (struct radix_node_head *)w; 2137 2138 (*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh); 2139 free((caddr_t)rn, M_NETADDR); 2140 return (0); 2141 } 2142 2143 /* 2144 * Free the net address hash lists that are hanging off the mount points. 2145 */ 2146 static void 2147 vfs_free_addrlist(nep) 2148 struct netexport *nep; 2149 { 2150 int i; 2151 struct radix_node_head *rnh; 2152 2153 for (i = 0; i <= AF_MAX; i++) 2154 if ((rnh = nep->ne_rtable[i]) != NULL) { 2155 (*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh); 2156 free((caddr_t)rnh, M_RTABLE); 2157 nep->ne_rtable[i] = 0; 2158 } 2159 } 2160 2161 int 2162 vfs_export(mp, nep, argp) 2163 struct mount *mp; 2164 struct netexport *nep; 2165 struct export_args *argp; 2166 { 2167 int error; 2168 2169 if (argp->ex_flags & MNT_DELEXPORT) { 2170 if (mp->mnt_flag & MNT_EXPUBLIC) { 2171 vfs_setpublicfs(NULL, NULL, NULL); 2172 mp->mnt_flag &= ~MNT_EXPUBLIC; 2173 } 2174 vfs_free_addrlist(nep); 2175 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 2176 } 2177 if (argp->ex_flags & MNT_EXPORTED) { 2178 if (argp->ex_flags & MNT_EXPUBLIC) { 2179 if ((error = vfs_setpublicfs(mp, nep, argp)) != 0) 2180 return (error); 2181 mp->mnt_flag |= MNT_EXPUBLIC; 2182 } 2183 if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0) 2184 return (error); 2185 mp->mnt_flag |= MNT_EXPORTED; 2186 } 2187 return (0); 2188 } 2189 2190 /* 2191 * Set the publicly exported filesystem (WebNFS). Currently, only 2192 * one public filesystem is possible in the spec (RFC 2054 and 2055) 2193 */ 2194 int 2195 vfs_setpublicfs(mp, nep, argp) 2196 struct mount *mp; 2197 struct netexport *nep; 2198 struct export_args *argp; 2199 { 2200 int error; 2201 struct vnode *rvp; 2202 char *cp; 2203 2204 /* 2205 * mp == NULL -> invalidate the current info, the FS is 2206 * no longer exported. May be called from either vfs_export 2207 * or unmount, so check if it hasn't already been done. 2208 */ 2209 if (mp == NULL) { 2210 if (nfs_pub.np_valid) { 2211 nfs_pub.np_valid = 0; 2212 if (nfs_pub.np_index != NULL) { 2213 FREE(nfs_pub.np_index, M_TEMP); 2214 nfs_pub.np_index = NULL; 2215 } 2216 } 2217 return (0); 2218 } 2219 2220 /* 2221 * Only one allowed at a time. 2222 */ 2223 if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount) 2224 return (EBUSY); 2225 2226 /* 2227 * Get real filehandle for root of exported FS. 2228 */ 2229 memset((caddr_t)&nfs_pub.np_handle, 0, sizeof(nfs_pub.np_handle)); 2230 nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid; 2231 2232 if ((error = VFS_ROOT(mp, &rvp))) 2233 return (error); 2234 2235 if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid))) 2236 return (error); 2237 2238 vput(rvp); 2239 2240 /* 2241 * If an indexfile was specified, pull it in. 2242 */ 2243 if (argp->ex_indexfile != NULL) { 2244 MALLOC(nfs_pub.np_index, char *, MAXNAMLEN + 1, M_TEMP, 2245 M_WAITOK); 2246 error = copyinstr(argp->ex_indexfile, nfs_pub.np_index, 2247 MAXNAMLEN, (size_t *)0); 2248 if (!error) { 2249 /* 2250 * Check for illegal filenames. 2251 */ 2252 for (cp = nfs_pub.np_index; *cp; cp++) { 2253 if (*cp == '/') { 2254 error = EINVAL; 2255 break; 2256 } 2257 } 2258 } 2259 if (error) { 2260 FREE(nfs_pub.np_index, M_TEMP); 2261 return (error); 2262 } 2263 } 2264 2265 nfs_pub.np_mount = mp; 2266 nfs_pub.np_valid = 1; 2267 return (0); 2268 } 2269 2270 struct netcred * 2271 vfs_export_lookup(mp, nep, nam) 2272 struct mount *mp; 2273 struct netexport *nep; 2274 struct mbuf *nam; 2275 { 2276 struct netcred *np; 2277 struct radix_node_head *rnh; 2278 struct sockaddr *saddr; 2279 2280 np = NULL; 2281 if (mp->mnt_flag & MNT_EXPORTED) { 2282 /* 2283 * Lookup in the export list first. 2284 */ 2285 if (nam != NULL) { 2286 saddr = mtod(nam, struct sockaddr *); 2287 rnh = nep->ne_rtable[saddr->sa_family]; 2288 if (rnh != NULL) { 2289 np = (struct netcred *) 2290 (*rnh->rnh_matchaddr)((caddr_t)saddr, 2291 rnh); 2292 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 2293 np = NULL; 2294 } 2295 } 2296 /* 2297 * If no address match, use the default if it exists. 2298 */ 2299 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 2300 np = &nep->ne_defexported; 2301 } 2302 return (np); 2303 } 2304 2305 /* 2306 * Do the usual access checking. 2307 * file_mode, uid and gid are from the vnode in question, 2308 * while acc_mode and cred are from the VOP_ACCESS parameter list 2309 */ 2310 int 2311 vaccess(type, file_mode, uid, gid, acc_mode, cred) 2312 enum vtype type; 2313 mode_t file_mode; 2314 uid_t uid; 2315 gid_t gid; 2316 mode_t acc_mode; 2317 struct ucred *cred; 2318 { 2319 mode_t mask; 2320 2321 /* 2322 * Super-user always gets read/write access, but execute access depends 2323 * on at least one execute bit being set. 2324 */ 2325 if (cred->cr_uid == 0) { 2326 if ((acc_mode & VEXEC) && type != VDIR && 2327 (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0) 2328 return (EACCES); 2329 return (0); 2330 } 2331 2332 mask = 0; 2333 2334 /* Otherwise, check the owner. */ 2335 if (cred->cr_uid == uid) { 2336 if (acc_mode & VEXEC) 2337 mask |= S_IXUSR; 2338 if (acc_mode & VREAD) 2339 mask |= S_IRUSR; 2340 if (acc_mode & VWRITE) 2341 mask |= S_IWUSR; 2342 return ((file_mode & mask) == mask ? 0 : EACCES); 2343 } 2344 2345 /* Otherwise, check the groups. */ 2346 if (cred->cr_gid == gid || groupmember(gid, cred)) { 2347 if (acc_mode & VEXEC) 2348 mask |= S_IXGRP; 2349 if (acc_mode & VREAD) 2350 mask |= S_IRGRP; 2351 if (acc_mode & VWRITE) 2352 mask |= S_IWGRP; 2353 return ((file_mode & mask) == mask ? 0 : EACCES); 2354 } 2355 2356 /* Otherwise, check everyone else. */ 2357 if (acc_mode & VEXEC) 2358 mask |= S_IXOTH; 2359 if (acc_mode & VREAD) 2360 mask |= S_IROTH; 2361 if (acc_mode & VWRITE) 2362 mask |= S_IWOTH; 2363 return ((file_mode & mask) == mask ? 0 : EACCES); 2364 } 2365 2366 /* 2367 * Unmount all file systems. 2368 * We traverse the list in reverse order under the assumption that doing so 2369 * will avoid needing to worry about dependencies. 2370 */ 2371 void 2372 vfs_unmountall(p) 2373 struct proc *p; 2374 { 2375 struct mount *mp, *nmp; 2376 int allerror, error; 2377 2378 for (allerror = 0, 2379 mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) { 2380 nmp = mp->mnt_list.cqe_prev; 2381 #ifdef DEBUG 2382 printf("unmounting %s (%s)...\n", 2383 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_mntfromname); 2384 #endif 2385 if (vfs_busy(mp, 0, 0)) 2386 continue; 2387 if ((error = dounmount(mp, MNT_FORCE, p)) != 0) { 2388 printf("unmount of %s failed with error %d\n", 2389 mp->mnt_stat.f_mntonname, error); 2390 allerror = 1; 2391 } 2392 } 2393 if (allerror) 2394 printf("WARNING: some file systems would not unmount\n"); 2395 } 2396 2397 /* 2398 * Sync and unmount file systems before shutting down. 2399 */ 2400 void 2401 vfs_shutdown() 2402 { 2403 struct buf *bp; 2404 int iter, nbusy, nbusy_prev = 0, dcount, s; 2405 struct proc *p = curproc; 2406 2407 /* XXX we're certainly not running in proc0's context! */ 2408 if (p == NULL) 2409 p = &proc0; 2410 2411 printf("syncing disks... "); 2412 2413 /* remove user process from run queue */ 2414 suspendsched(); 2415 (void) spl0(); 2416 2417 /* avoid coming back this way again if we panic. */ 2418 doing_shutdown = 1; 2419 2420 sys_sync(p, NULL, NULL); 2421 2422 /* Wait for sync to finish. */ 2423 dcount = 10000; 2424 for (iter = 0; iter < 20;) { 2425 nbusy = 0; 2426 for (bp = &buf[nbuf]; --bp >= buf; ) { 2427 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 2428 nbusy++; 2429 /* 2430 * With soft updates, some buffers that are 2431 * written will be remarked as dirty until other 2432 * buffers are written. 2433 */ 2434 if (bp->b_vp && bp->b_vp->v_mount 2435 && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP) 2436 && (bp->b_flags & B_DELWRI)) { 2437 s = splbio(); 2438 bremfree(bp); 2439 bp->b_flags |= B_BUSY; 2440 splx(s); 2441 nbusy++; 2442 bawrite(bp); 2443 if (dcount-- <= 0) { 2444 printf("softdep "); 2445 goto fail; 2446 } 2447 } 2448 } 2449 if (nbusy == 0) 2450 break; 2451 if (nbusy_prev == 0) 2452 nbusy_prev = nbusy; 2453 printf("%d ", nbusy); 2454 tsleep(&nbusy, PRIBIO, "bflush", 2455 (iter == 0) ? 1 : hz / 25 * iter); 2456 if (nbusy >= nbusy_prev) /* we didn't flush anything */ 2457 iter++; 2458 else 2459 nbusy_prev = nbusy; 2460 } 2461 if (nbusy) { 2462 fail: 2463 #if defined(DEBUG) || defined(DEBUG_HALT_BUSY) 2464 printf("giving up\nPrinting vnodes for busy buffers\n"); 2465 for (bp = &buf[nbuf]; --bp >= buf; ) 2466 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 2467 vprint(NULL, bp->b_vp); 2468 2469 #if defined(DDB) && defined(DEBUG_HALT_BUSY) 2470 Debugger(); 2471 #endif 2472 2473 #else /* defined(DEBUG) || defined(DEBUG_HALT_BUSY) */ 2474 printf("giving up\n"); 2475 #endif /* defined(DEBUG) || defined(DEBUG_HALT_BUSY) */ 2476 return; 2477 } else 2478 printf("done\n"); 2479 2480 /* 2481 * If we've panic'd, don't make the situation potentially 2482 * worse by unmounting the file systems. 2483 */ 2484 if (panicstr != NULL) 2485 return; 2486 2487 /* Release inodes held by texts before update. */ 2488 #ifdef notdef 2489 vnshutdown(); 2490 #endif 2491 /* Unmount file systems. */ 2492 vfs_unmountall(p); 2493 } 2494 2495 /* 2496 * Mount the root file system. If the operator didn't specify a 2497 * file system to use, try all possible file systems until one 2498 * succeeds. 2499 */ 2500 int 2501 vfs_mountroot() 2502 { 2503 extern int (*mountroot) __P((void)); 2504 struct vfsops *v; 2505 2506 if (root_device == NULL) 2507 panic("vfs_mountroot: root device unknown"); 2508 2509 switch (root_device->dv_class) { 2510 case DV_IFNET: 2511 if (rootdev != NODEV) 2512 panic("vfs_mountroot: rootdev set for DV_IFNET"); 2513 break; 2514 2515 case DV_DISK: 2516 if (rootdev == NODEV) 2517 panic("vfs_mountroot: rootdev not set for DV_DISK"); 2518 break; 2519 2520 default: 2521 printf("%s: inappropriate for root file system\n", 2522 root_device->dv_xname); 2523 return (ENODEV); 2524 } 2525 2526 /* 2527 * If user specified a file system, use it. 2528 */ 2529 if (mountroot != NULL) 2530 return ((*mountroot)()); 2531 2532 /* 2533 * Try each file system currently configured into the kernel. 2534 */ 2535 for (v = LIST_FIRST(&vfs_list); v != NULL; v = LIST_NEXT(v, vfs_list)) { 2536 if (v->vfs_mountroot == NULL) 2537 continue; 2538 #ifdef DEBUG 2539 printf("mountroot: trying %s...\n", v->vfs_name); 2540 #endif 2541 if ((*v->vfs_mountroot)() == 0) { 2542 printf("root file system type: %s\n", v->vfs_name); 2543 break; 2544 } 2545 } 2546 2547 if (v == NULL) { 2548 printf("no file system for %s", root_device->dv_xname); 2549 if (root_device->dv_class == DV_DISK) 2550 printf(" (dev 0x%x)", rootdev); 2551 printf("\n"); 2552 return (EFTYPE); 2553 } 2554 return (0); 2555 } 2556 2557 /* 2558 * Given a file system name, look up the vfsops for that 2559 * file system, or return NULL if file system isn't present 2560 * in the kernel. 2561 */ 2562 struct vfsops * 2563 vfs_getopsbyname(name) 2564 const char *name; 2565 { 2566 struct vfsops *v; 2567 2568 for (v = LIST_FIRST(&vfs_list); v != NULL; v = LIST_NEXT(v, vfs_list)) { 2569 if (strcmp(v->vfs_name, name) == 0) 2570 break; 2571 } 2572 2573 return (v); 2574 } 2575 2576 /* 2577 * Establish a file system and initialize it. 2578 */ 2579 int 2580 vfs_attach(vfs) 2581 struct vfsops *vfs; 2582 { 2583 struct vfsops *v; 2584 int error = 0; 2585 2586 2587 /* 2588 * Make sure this file system doesn't already exist. 2589 */ 2590 for (v = LIST_FIRST(&vfs_list); v != NULL; v = LIST_NEXT(v, vfs_list)) { 2591 if (strcmp(vfs->vfs_name, v->vfs_name) == 0) { 2592 error = EEXIST; 2593 goto out; 2594 } 2595 } 2596 2597 /* 2598 * Initialize the vnode operations for this file system. 2599 */ 2600 vfs_opv_init(vfs->vfs_opv_descs); 2601 2602 /* 2603 * Now initialize the file system itself. 2604 */ 2605 (*vfs->vfs_init)(); 2606 2607 /* 2608 * ...and link it into the kernel's list. 2609 */ 2610 LIST_INSERT_HEAD(&vfs_list, vfs, vfs_list); 2611 2612 /* 2613 * Sanity: make sure the reference count is 0. 2614 */ 2615 vfs->vfs_refcount = 0; 2616 2617 out: 2618 return (error); 2619 } 2620 2621 /* 2622 * Remove a file system from the kernel. 2623 */ 2624 int 2625 vfs_detach(vfs) 2626 struct vfsops *vfs; 2627 { 2628 struct vfsops *v; 2629 2630 /* 2631 * Make sure no one is using the filesystem. 2632 */ 2633 if (vfs->vfs_refcount != 0) 2634 return (EBUSY); 2635 2636 /* 2637 * ...and remove it from the kernel's list. 2638 */ 2639 for (v = LIST_FIRST(&vfs_list); v != NULL; v = LIST_NEXT(v, vfs_list)) { 2640 if (v == vfs) { 2641 LIST_REMOVE(v, vfs_list); 2642 break; 2643 } 2644 } 2645 2646 if (v == NULL) 2647 return (ESRCH); 2648 2649 /* 2650 * Now run the file system-specific cleanups. 2651 */ 2652 (*vfs->vfs_done)(); 2653 2654 /* 2655 * Free the vnode operations vector. 2656 */ 2657 vfs_opv_free(vfs->vfs_opv_descs); 2658 return (0); 2659 } 2660 2661 #ifdef DDB 2662 const char buf_flagbits[] = 2663 "\20\1AGE\2NEEDCOMMIT\3ASYNC\4BAD\5BUSY\6SCANNED\7CALL\10DELWRI" 2664 "\11DIRTY\12DONE\13EINTR\14ERROR\15GATHERED\16INVAL\17LOCKED\20NOCACHE" 2665 "\21ORDERED\22CACHE\23PHYS\24RAW\25READ\26TAPE\30WANTED" 2666 "\32XXX\33VFLUSH"; 2667 2668 void 2669 vfs_buf_print(bp, full, pr) 2670 struct buf *bp; 2671 int full; 2672 void (*pr) __P((const char *, ...)); 2673 { 2674 char buf[1024]; 2675 2676 (*pr)(" vp %p lblkno 0x%x blkno 0x%x dev 0x%x\n", 2677 bp->b_vp, bp->b_lblkno, bp->b_blkno, bp->b_dev); 2678 2679 bitmask_snprintf(bp->b_flags, buf_flagbits, buf, sizeof(buf)); 2680 (*pr)(" error %d flags 0x%s\n", bp->b_error, buf); 2681 2682 (*pr)(" bufsize 0x%x bcount 0x%x resid 0x%x\n", 2683 bp->b_bufsize, bp->b_bcount, bp->b_resid); 2684 (*pr)(" data %p saveaddr %p dep %p\n", 2685 bp->b_data, bp->b_saveaddr, LIST_FIRST(&bp->b_dep)); 2686 (*pr)(" iodone %p\n", bp->b_iodone); 2687 } 2688 2689 2690 const char vnode_flagbits[] = 2691 "\20\1ROOT\2TEXT\3SYSTEM\4ISTTY\11XLOCK\12XWANT\13BWAIT\14ALIASED" 2692 "\15DIROP\17DIRTY"; 2693 2694 const char *vnode_types[] = { 2695 "VNON", 2696 "VREG", 2697 "VDIR", 2698 "VBLK", 2699 "VCHR", 2700 "VLNK", 2701 "VSOCK", 2702 "VFIFO", 2703 "VBAD", 2704 }; 2705 2706 const char *vnode_tags[] = { 2707 "VT_NON", 2708 "VT_UFS", 2709 "VT_NFS", 2710 "VT_MFS", 2711 "VT_MSDOSFS", 2712 "VT_LFS", 2713 "VT_LOFS", 2714 "VT_FDESC", 2715 "VT_PORTAL", 2716 "VT_NULL", 2717 "VT_UMAP", 2718 "VT_KERNFS", 2719 "VT_PROCFS", 2720 "VT_AFS", 2721 "VT_ISOFS", 2722 "VT_UNION", 2723 "VT_ADOSFS", 2724 "VT_EXT2FS", 2725 "VT_CODA", 2726 "VT_FILECORE", 2727 "VT_NTFS", 2728 "VT_VFS", 2729 "VT_OVERLAY" 2730 }; 2731 2732 void 2733 vfs_vnode_print(vp, full, pr) 2734 struct vnode *vp; 2735 int full; 2736 void (*pr) __P((const char *, ...)); 2737 { 2738 char buf[256]; 2739 2740 const char *vtype, *vtag; 2741 2742 uvm_object_printit(&vp->v_uvm.u_obj, full, pr); 2743 bitmask_snprintf(vp->v_flag, vnode_flagbits, buf, sizeof(buf)); 2744 (*pr)("\nVNODE flags %s\n", buf); 2745 (*pr)("mp %p nio %d size 0x%x rwlock 0x%x glock 0x%x\n", 2746 vp->v_mount, vp->v_uvm.u_nio, (int)vp->v_uvm.u_size, 2747 vp->v_vnlock ? lockstatus(vp->v_vnlock) : 0x999, 2748 lockstatus(&vp->v_glock)); 2749 2750 (*pr)("data %p usecount %d writecount %d holdcnt %d numoutput %d\n", 2751 vp->v_data, vp->v_usecount, vp->v_writecount, 2752 vp->v_holdcnt, vp->v_numoutput); 2753 2754 vtype = (vp->v_type >= 0 && 2755 vp->v_type < sizeof(vnode_types) / sizeof(vnode_types[0])) ? 2756 vnode_types[vp->v_type] : "UNKNOWN"; 2757 vtag = (vp->v_tag >= 0 && 2758 vp->v_tag < sizeof(vnode_tags) / sizeof(vnode_tags[0])) ? 2759 vnode_tags[vp->v_tag] : "UNKNOWN"; 2760 2761 (*pr)("type %s(%d) tag %s(%d) id 0x%x mount %p typedata %p\n", 2762 vtype, vp->v_type, vtag, vp->v_tag, 2763 vp->v_id, vp->v_mount, vp->v_mountedhere); 2764 (*pr)("lastr 0x%x lastw 0x%x lasta 0x%x\n", 2765 vp->v_lastr, vp->v_lastw, vp->v_lasta); 2766 (*pr)("cstart 0x%x clen 0x%x ralen 0x%x maxra 0x%x\n", 2767 vp->v_cstart, vp->v_clen, vp->v_ralen, vp->v_maxra); 2768 2769 if (full) { 2770 struct buf *bp; 2771 2772 (*pr)("clean bufs:\n"); 2773 LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) { 2774 (*pr)(" bp %p\n", bp); 2775 vfs_buf_print(bp, full, pr); 2776 } 2777 2778 (*pr)("dirty bufs:\n"); 2779 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) { 2780 (*pr)(" bp %p\n", bp); 2781 vfs_buf_print(bp, full, pr); 2782 } 2783 } 2784 } 2785 #endif 2786