1 /* $OpenBSD: vfs_subr.c,v 1.166 2008/05/07 14:08:37 thib Exp $ */ 2 /* $NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94 38 */ 39 40 /* 41 * External virtual filesystem routines 42 */ 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/proc.h> 47 #include <sys/mount.h> 48 #include <sys/time.h> 49 #include <sys/fcntl.h> 50 #include <sys/kernel.h> 51 #include <sys/vnode.h> 52 #include <sys/stat.h> 53 #include <sys/namei.h> 54 #include <sys/ucred.h> 55 #include <sys/buf.h> 56 #include <sys/errno.h> 57 #include <sys/malloc.h> 58 #include <sys/domain.h> 59 #include <sys/mbuf.h> 60 #include <sys/syscallargs.h> 61 #include <sys/pool.h> 62 63 #include <uvm/uvm_extern.h> 64 #include <sys/sysctl.h> 65 66 #include <miscfs/specfs/specdev.h> 67 68 enum vtype iftovt_tab[16] = { 69 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 70 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 71 }; 72 73 int vttoif_tab[9] = { 74 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 75 S_IFSOCK, S_IFIFO, S_IFMT, 76 }; 77 78 int doforce = 1; /* 1 => permit forcible unmounting */ 79 int prtactive = 0; /* 1 => print out reclaim of active vnodes */ 80 int suid_clear = 1; /* 1 => clear SUID / SGID on owner change */ 81 82 /* 83 * Insq/Remq for the vnode usage lists. 84 */ 85 #define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) 86 #define bufremvn(bp) { \ 87 LIST_REMOVE(bp, b_vnbufs); \ 88 LIST_NEXT(bp, b_vnbufs) = NOLIST; \ 89 } 90 91 struct freelst vnode_hold_list; /* list of vnodes referencing buffers */ 92 struct freelst vnode_free_list; /* vnode free list */ 93 94 struct mntlist mountlist; /* mounted filesystem list */ 95 96 void vclean(struct vnode *, int, struct proc *); 97 98 void insmntque(struct vnode *, struct mount *); 99 int getdevvp(dev_t, struct vnode **, enum vtype); 100 101 int vfs_hang_addrlist(struct mount *, struct netexport *, 102 struct export_args *); 103 int vfs_free_netcred(struct radix_node *, void *); 104 void vfs_free_addrlist(struct netexport *); 105 void vputonfreelist(struct vnode *); 106 107 int vflush_vnode(struct vnode *, void *); 108 int maxvnodes; 109 110 #ifdef DEBUG 111 void printlockedvnodes(void); 112 #endif 113 114 struct pool vnode_pool; 115 116 /* 117 * Initialize the vnode management data structures. 118 */ 119 void 120 vntblinit(void) 121 { 122 /* buffer cache may need a vnode for each buffer */ 123 maxvnodes = desiredvnodes; 124 pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodes", 125 &pool_allocator_nointr); 126 TAILQ_INIT(&vnode_hold_list); 127 TAILQ_INIT(&vnode_free_list); 128 CIRCLEQ_INIT(&mountlist); 129 /* 130 * Initialize the filesystem syncer. 131 */ 132 vn_initialize_syncerd(); 133 } 134 135 /* 136 * Mark a mount point as busy. Used to synchronize access and to delay 137 * unmounting. 138 * 139 * Default behaviour is to attempt getting a READ lock and in case of an 140 * ongoing unmount, to wait for it to finish and then return failure. 141 */ 142 int 143 vfs_busy(struct mount *mp, int flags) 144 { 145 int rwflags = 0; 146 147 /* new mountpoints need their lock initialised */ 148 if (mp->mnt_lock.rwl_name == NULL) 149 rw_init(&mp->mnt_lock, "vfslock"); 150 151 if (flags & VB_WRITE) 152 rwflags |= RW_WRITE; 153 else 154 rwflags |= RW_READ; 155 156 if (flags & VB_WAIT) 157 rwflags |= RW_SLEEPFAIL; 158 else 159 rwflags |= RW_NOSLEEP; 160 161 if (rw_enter(&mp->mnt_lock, rwflags)) 162 return (EBUSY); 163 164 return (0); 165 } 166 167 /* 168 * Free a busy file system 169 */ 170 void 171 vfs_unbusy(struct mount *mp) 172 { 173 rw_exit(&mp->mnt_lock); 174 } 175 176 int 177 vfs_isbusy(struct mount *mp) 178 { 179 if (RWLOCK_OWNER(&mp->mnt_lock) > 0) 180 return (1); 181 else 182 return (0); 183 } 184 185 /* 186 * Lookup a filesystem type, and if found allocate and initialize 187 * a mount structure for it. 188 * 189 * Devname is usually updated by mount(8) after booting. 190 */ 191 int 192 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 193 { 194 struct vfsconf *vfsp; 195 struct mount *mp; 196 197 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 198 if (!strcmp(vfsp->vfc_name, fstypename)) 199 break; 200 if (vfsp == NULL) 201 return (ENODEV); 202 mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK|M_ZERO); 203 (void)vfs_busy(mp, VB_READ|VB_NOWAIT); 204 LIST_INIT(&mp->mnt_vnodelist); 205 mp->mnt_vfc = vfsp; 206 mp->mnt_op = vfsp->vfc_vfsops; 207 mp->mnt_flag = MNT_RDONLY; 208 mp->mnt_vnodecovered = NULLVP; 209 vfsp->vfc_refcount++; 210 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 211 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 212 mp->mnt_stat.f_mntonname[0] = '/'; 213 (void)copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 214 *mpp = mp; 215 return (0); 216 } 217 218 /* 219 * Lookup a mount point by filesystem identifier. 220 */ 221 struct mount * 222 vfs_getvfs(fsid_t *fsid) 223 { 224 struct mount *mp; 225 226 CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) { 227 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 228 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 229 return (mp); 230 } 231 } 232 233 return (NULL); 234 } 235 236 237 /* 238 * Get a new unique fsid 239 */ 240 void 241 vfs_getnewfsid(struct mount *mp) 242 { 243 static u_short xxxfs_mntid; 244 245 fsid_t tfsid; 246 int mtype; 247 248 mtype = mp->mnt_vfc->vfc_typenum; 249 mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0); 250 mp->mnt_stat.f_fsid.val[1] = mtype; 251 if (xxxfs_mntid == 0) 252 ++xxxfs_mntid; 253 tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid); 254 tfsid.val[1] = mtype; 255 if (!CIRCLEQ_EMPTY(&mountlist)) { 256 while (vfs_getvfs(&tfsid)) { 257 tfsid.val[0]++; 258 xxxfs_mntid++; 259 } 260 } 261 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 262 } 263 264 /* 265 * Make a 'unique' number from a mount type name. 266 * Note that this is no longer used for ffs which 267 * now has an on-disk filesystem id. 268 */ 269 long 270 makefstype(char *type) 271 { 272 long rv; 273 274 for (rv = 0; *type; type++) { 275 rv <<= 2; 276 rv ^= *type; 277 } 278 return rv; 279 } 280 281 /* 282 * Set vnode attributes to VNOVAL 283 */ 284 void 285 vattr_null(struct vattr *vap) 286 { 287 288 vap->va_type = VNON; 289 /* XXX These next two used to be one line, but for a GCC bug. */ 290 vap->va_size = VNOVAL; 291 vap->va_bytes = VNOVAL; 292 vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid = 293 vap->va_fsid = vap->va_fileid = 294 vap->va_blocksize = vap->va_rdev = 295 vap->va_atime.tv_sec = vap->va_atime.tv_nsec = 296 vap->va_mtime.tv_sec = vap->va_mtime.tv_nsec = 297 vap->va_ctime.tv_sec = vap->va_ctime.tv_nsec = 298 vap->va_flags = vap->va_gen = VNOVAL; 299 vap->va_vaflags = 0; 300 } 301 302 /* 303 * Routines having to do with the management of the vnode table. 304 */ 305 extern int (**dead_vnodeop_p)(void *); 306 long numvnodes; 307 308 /* 309 * Return the next vnode from the free list. 310 */ 311 int 312 getnewvnode(enum vtagtype tag, struct mount *mp, int (**vops)(void *), 313 struct vnode **vpp) 314 { 315 struct proc *p = curproc; 316 struct freelst *listhd; 317 static int toggle; 318 struct vnode *vp; 319 int s; 320 321 /* 322 * We must choose whether to allocate a new vnode or recycle an 323 * existing one. The criterion for allocating a new one is that 324 * the total number of vnodes is less than the number desired or 325 * there are no vnodes on either free list. Generally we only 326 * want to recycle vnodes that have no buffers associated with 327 * them, so we look first on the vnode_free_list. If it is empty, 328 * we next consider vnodes with referencing buffers on the 329 * vnode_hold_list. The toggle ensures that half the time we 330 * will use a buffer from the vnode_hold_list, and half the time 331 * we will allocate a new one unless the list has grown to twice 332 * the desired size. We are reticent to recycle vnodes from the 333 * vnode_hold_list because we will lose the identity of all its 334 * referencing buffers. 335 */ 336 toggle ^= 1; 337 if (numvnodes > 2 * maxvnodes) 338 toggle = 0; 339 340 s = splbio(); 341 if ((numvnodes < maxvnodes) || 342 ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) && 343 ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) { 344 splx(s); 345 vp = pool_get(&vnode_pool, PR_WAITOK); 346 bzero((char *)vp, sizeof *vp); 347 numvnodes++; 348 } else { 349 for (vp = TAILQ_FIRST(listhd); vp != NULLVP; 350 vp = TAILQ_NEXT(vp, v_freelist)) { 351 if (VOP_ISLOCKED(vp) == 0) 352 break; 353 } 354 /* 355 * Unless this is a bad time of the month, at most 356 * the first NCPUS items on the free list are 357 * locked, so this is close enough to being empty. 358 */ 359 if (vp == NULL) { 360 splx(s); 361 tablefull("vnode"); 362 *vpp = 0; 363 return (ENFILE); 364 } 365 366 #ifdef DIAGNOSTIC 367 if (vp->v_usecount) { 368 vprint("free vnode", vp); 369 panic("free vnode isn't"); 370 } 371 #endif 372 373 TAILQ_REMOVE(listhd, vp, v_freelist); 374 vp->v_bioflag &= ~VBIOONFREELIST; 375 splx(s); 376 377 if (vp->v_type != VBAD) 378 vgonel(vp, p); 379 #ifdef DIAGNOSTIC 380 if (vp->v_data) { 381 vprint("cleaned vnode", vp); 382 panic("cleaned vnode isn't"); 383 } 384 s = splbio(); 385 if (vp->v_numoutput) 386 panic("Clean vnode has pending I/O's"); 387 splx(s); 388 #endif 389 vp->v_flag = 0; 390 vp->v_socket = 0; 391 } 392 vp->v_type = VNON; 393 cache_purge(vp); 394 vp->v_tag = tag; 395 vp->v_op = vops; 396 insmntque(vp, mp); 397 *vpp = vp; 398 vp->v_usecount = 1; 399 vp->v_data = 0; 400 simple_lock_init(&vp->v_uvm.u_obj.vmobjlock); 401 return (0); 402 } 403 404 /* 405 * Move a vnode from one mount queue to another. 406 */ 407 void 408 insmntque(struct vnode *vp, struct mount *mp) 409 { 410 /* 411 * Delete from old mount point vnode list, if on one. 412 */ 413 if (vp->v_mount != NULL) 414 LIST_REMOVE(vp, v_mntvnodes); 415 /* 416 * Insert into list of vnodes for the new mount point, if available. 417 */ 418 if ((vp->v_mount = mp) != NULL) 419 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); 420 } 421 422 /* 423 * Create a vnode for a block device. 424 * Used for root filesystem, argdev, and swap areas. 425 * Also used for memory file system special devices. 426 */ 427 int 428 bdevvp(dev_t dev, struct vnode **vpp) 429 { 430 return (getdevvp(dev, vpp, VBLK)); 431 } 432 433 /* 434 * Create a vnode for a character device. 435 * Used for console handling. 436 */ 437 int 438 cdevvp(dev_t dev, struct vnode **vpp) 439 { 440 return (getdevvp(dev, vpp, VCHR)); 441 } 442 443 /* 444 * Create a vnode for a device. 445 * Used by bdevvp (block device) for root file system etc., 446 * and by cdevvp (character device) for console. 447 */ 448 int 449 getdevvp(dev_t dev, struct vnode **vpp, enum vtype type) 450 { 451 struct vnode *vp; 452 struct vnode *nvp; 453 int error; 454 455 if (dev == NODEV) { 456 *vpp = NULLVP; 457 return (0); 458 } 459 error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp); 460 if (error) { 461 *vpp = NULLVP; 462 return (error); 463 } 464 vp = nvp; 465 vp->v_type = type; 466 if ((nvp = checkalias(vp, dev, NULL)) != 0) { 467 vput(vp); 468 vp = nvp; 469 } 470 *vpp = vp; 471 return (0); 472 } 473 474 /* 475 * Check to see if the new vnode represents a special device 476 * for which we already have a vnode (either because of 477 * bdevvp() or because of a different vnode representing 478 * the same block device). If such an alias exists, deallocate 479 * the existing contents and return the aliased vnode. The 480 * caller is responsible for filling it with its new contents. 481 */ 482 struct vnode * 483 checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp) 484 { 485 struct proc *p = curproc; 486 struct vnode *vp; 487 struct vnode **vpp; 488 489 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 490 return (NULLVP); 491 492 vpp = &speclisth[SPECHASH(nvp_rdev)]; 493 loop: 494 for (vp = *vpp; vp; vp = vp->v_specnext) { 495 if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) { 496 continue; 497 } 498 /* 499 * Alias, but not in use, so flush it out. 500 */ 501 if (vp->v_usecount == 0) { 502 vgonel(vp, p); 503 goto loop; 504 } 505 if (vget(vp, LK_EXCLUSIVE, p)) { 506 goto loop; 507 } 508 break; 509 } 510 511 /* 512 * Common case is actually in the if statement 513 */ 514 if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) { 515 nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE, 516 M_WAITOK); 517 nvp->v_rdev = nvp_rdev; 518 nvp->v_hashchain = vpp; 519 nvp->v_specnext = *vpp; 520 nvp->v_specmountpoint = NULL; 521 nvp->v_speclockf = NULL; 522 bzero(nvp->v_specbitmap, sizeof(nvp->v_specbitmap)); 523 *vpp = nvp; 524 if (vp != NULLVP) { 525 nvp->v_flag |= VALIASED; 526 vp->v_flag |= VALIASED; 527 vput(vp); 528 } 529 return (NULLVP); 530 } 531 532 /* 533 * This code is the uncommon case. It is called in case 534 * we found an alias that was VT_NON && vtype of VBLK 535 * This means we found a block device that was created 536 * using bdevvp. 537 * An example of such a vnode is the root partition device vnode 538 * created in ffs_mountroot. 539 * 540 * The vnodes created by bdevvp should not be aliased (why?). 541 */ 542 543 VOP_UNLOCK(vp, 0, p); 544 vclean(vp, 0, p); 545 vp->v_op = nvp->v_op; 546 vp->v_tag = nvp->v_tag; 547 nvp->v_type = VNON; 548 insmntque(vp, mp); 549 return (vp); 550 } 551 552 /* 553 * Grab a particular vnode from the free list, increment its 554 * reference count and lock it. If the vnode lock bit is set, 555 * the vnode is being eliminated in vgone. In that case, we 556 * cannot grab it, so the process is awakened when the 557 * transition is completed, and an error code is returned to 558 * indicate that the vnode is no longer usable, possibly 559 * having been changed to a new file system type. 560 */ 561 int 562 vget(struct vnode *vp, int flags, struct proc *p) 563 { 564 int error, s, onfreelist; 565 566 /* 567 * If the vnode is in the process of being cleaned out for 568 * another use, we wait for the cleaning to finish and then 569 * return failure. Cleaning is determined by checking that 570 * the VXLOCK flag is set. 571 */ 572 573 if (vp->v_flag & VXLOCK) { 574 if (flags & LK_NOWAIT) { 575 return (EBUSY); 576 } 577 578 vp->v_flag |= VXWANT; 579 tsleep(vp, PINOD, "vget", 0); 580 return (ENOENT); 581 } 582 583 onfreelist = vp->v_bioflag & VBIOONFREELIST; 584 if (vp->v_usecount == 0 && onfreelist) { 585 s = splbio(); 586 if (vp->v_holdcnt > 0) 587 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 588 else 589 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 590 vp->v_bioflag &= ~VBIOONFREELIST; 591 splx(s); 592 } 593 594 vp->v_usecount++; 595 if (flags & LK_TYPE_MASK) { 596 if ((error = vn_lock(vp, flags, p)) != 0) { 597 vp->v_usecount--; 598 if (vp->v_usecount == 0 && onfreelist) 599 vputonfreelist(vp); 600 } 601 return (error); 602 } 603 604 return (0); 605 } 606 607 608 /* Vnode reference. */ 609 void 610 vref(struct vnode *vp) 611 { 612 #ifdef DIAGNOSTIC 613 if (vp->v_usecount == 0) 614 panic("vref used where vget required"); 615 #endif 616 vp->v_usecount++; 617 } 618 619 void 620 vputonfreelist(struct vnode *vp) 621 { 622 int s; 623 struct freelst *lst; 624 625 s = splbio(); 626 #ifdef DIAGNOSTIC 627 if (vp->v_usecount != 0) 628 panic("Use count is not zero!"); 629 630 if (vp->v_bioflag & VBIOONFREELIST) { 631 vprint("vnode already on free list: ", vp); 632 panic("vnode already on free list"); 633 } 634 #endif 635 636 vp->v_bioflag |= VBIOONFREELIST; 637 638 if (vp->v_holdcnt > 0) 639 lst = &vnode_hold_list; 640 else 641 lst = &vnode_free_list; 642 643 if (vp->v_type == VBAD) 644 TAILQ_INSERT_HEAD(lst, vp, v_freelist); 645 else 646 TAILQ_INSERT_TAIL(lst, vp, v_freelist); 647 648 splx(s); 649 } 650 651 /* 652 * vput(), just unlock and vrele() 653 */ 654 void 655 vput(struct vnode *vp) 656 { 657 struct proc *p = curproc; 658 659 #ifdef DIAGNOSTIC 660 if (vp == NULL) 661 panic("vput: null vp"); 662 #endif 663 664 #ifdef DIAGNOSTIC 665 if (vp->v_usecount == 0) { 666 vprint("vput: bad ref count", vp); 667 panic("vput: ref cnt"); 668 } 669 #endif 670 vp->v_usecount--; 671 if (vp->v_usecount > 0) { 672 VOP_UNLOCK(vp, 0, p); 673 return; 674 } 675 676 #ifdef DIAGNOSTIC 677 if (vp->v_writecount != 0) { 678 vprint("vput: bad writecount", vp); 679 panic("vput: v_writecount != 0"); 680 } 681 #endif 682 683 VOP_INACTIVE(vp, p); 684 685 if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST)) 686 vputonfreelist(vp); 687 } 688 689 /* 690 * Vnode release - use for active VNODES. 691 * If count drops to zero, call inactive routine and return to freelist. 692 */ 693 void 694 vrele(struct vnode *vp) 695 { 696 struct proc *p = curproc; 697 698 #ifdef DIAGNOSTIC 699 if (vp == NULL) 700 panic("vrele: null vp"); 701 #endif 702 #ifdef DIAGNOSTIC 703 if (vp->v_usecount == 0) { 704 vprint("vrele: bad ref count", vp); 705 panic("vrele: ref cnt"); 706 } 707 #endif 708 vp->v_usecount--; 709 if (vp->v_usecount > 0) { 710 return; 711 } 712 713 #ifdef DIAGNOSTIC 714 if (vp->v_writecount != 0) { 715 vprint("vrele: bad writecount", vp); 716 panic("vrele: v_writecount != 0"); 717 } 718 #endif 719 720 if (vn_lock(vp, LK_EXCLUSIVE, p)) { 721 #ifdef DIAGNOSTIC 722 vprint("vrele: cannot lock", vp); 723 #endif 724 return; 725 } 726 727 VOP_INACTIVE(vp, p); 728 729 if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST)) 730 vputonfreelist(vp); 731 } 732 733 void vhold(struct vnode *vp); 734 735 /* 736 * Page or buffer structure gets a reference. 737 */ 738 void 739 vhold(struct vnode *vp) 740 { 741 /* 742 * If it is on the freelist and the hold count is currently 743 * zero, move it to the hold list. 744 */ 745 if ((vp->v_bioflag & VBIOONFREELIST) && 746 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 747 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 748 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 749 } 750 vp->v_holdcnt++; 751 } 752 753 /* 754 * Remove any vnodes in the vnode table belonging to mount point mp. 755 * 756 * If MNT_NOFORCE is specified, there should not be any active ones, 757 * return error if any are found (nb: this is a user error, not a 758 * system error). If MNT_FORCE is specified, detach any active vnodes 759 * that are found. 760 */ 761 #ifdef DEBUG 762 int busyprt = 0; /* print out busy vnodes */ 763 struct ctldebug debug1 = { "busyprt", &busyprt }; 764 #endif 765 766 int 767 vfs_mount_foreach_vnode(struct mount *mp, 768 int (*func)(struct vnode *, void *), void *arg) { 769 struct vnode *vp, *nvp; 770 int error = 0; 771 772 loop: 773 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { 774 if (vp->v_mount != mp) 775 goto loop; 776 nvp = LIST_NEXT(vp, v_mntvnodes); 777 778 error = func(vp, arg); 779 780 if (error != 0) 781 break; 782 } 783 784 return (error); 785 } 786 787 struct vflush_args { 788 struct vnode *skipvp; 789 int busy; 790 int flags; 791 }; 792 793 int 794 vflush_vnode(struct vnode *vp, void *arg) { 795 struct vflush_args *va = arg; 796 struct proc *p = curproc; 797 798 if (vp == va->skipvp) { 799 return (0); 800 } 801 802 if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 803 return (0); 804 } 805 806 /* 807 * If WRITECLOSE is set, only flush out regular file 808 * vnodes open for writing. 809 */ 810 if ((va->flags & WRITECLOSE) && 811 (vp->v_writecount == 0 || vp->v_type != VREG)) { 812 return (0); 813 } 814 815 /* 816 * With v_usecount == 0, all we need to do is clear 817 * out the vnode data structures and we are done. 818 */ 819 if (vp->v_usecount == 0) { 820 vgonel(vp, p); 821 return (0); 822 } 823 824 /* 825 * If FORCECLOSE is set, forcibly close the vnode. 826 * For block or character devices, revert to an 827 * anonymous device. For all other files, just kill them. 828 */ 829 if (va->flags & FORCECLOSE) { 830 if (vp->v_type != VBLK && vp->v_type != VCHR) { 831 vgonel(vp, p); 832 } else { 833 vclean(vp, 0, p); 834 vp->v_op = spec_vnodeop_p; 835 insmntque(vp, (struct mount *)0); 836 } 837 return (0); 838 } 839 840 #ifdef DEBUG 841 if (busyprt) 842 vprint("vflush: busy vnode", vp); 843 #endif 844 va->busy++; 845 return (0); 846 } 847 848 int 849 vflush(struct mount *mp, struct vnode *skipvp, int flags) 850 { 851 struct vflush_args va; 852 va.skipvp = skipvp; 853 va.busy = 0; 854 va.flags = flags; 855 856 vfs_mount_foreach_vnode(mp, vflush_vnode, &va); 857 858 if (va.busy) 859 return (EBUSY); 860 return (0); 861 } 862 863 /* 864 * Disassociate the underlying file system from a vnode. 865 */ 866 void 867 vclean(struct vnode *vp, int flags, struct proc *p) 868 { 869 int active; 870 871 /* 872 * Check to see if the vnode is in use. 873 * If so we have to reference it before we clean it out 874 * so that its count cannot fall to zero and generate a 875 * race against ourselves to recycle it. 876 */ 877 if ((active = vp->v_usecount) != 0) 878 vp->v_usecount++; 879 880 /* 881 * Prevent the vnode from being recycled or 882 * brought into use while we clean it out. 883 */ 884 if (vp->v_flag & VXLOCK) 885 panic("vclean: deadlock"); 886 vp->v_flag |= VXLOCK; 887 /* 888 * Even if the count is zero, the VOP_INACTIVE routine may still 889 * have the object locked while it cleans it out. The VOP_LOCK 890 * ensures that the VOP_INACTIVE routine is done with its work. 891 * For active vnodes, it ensures that no other activity can 892 * occur while the underlying object is being cleaned out. 893 */ 894 VOP_LOCK(vp, LK_DRAIN, p); 895 896 /* 897 * Clean out any VM data associated with the vnode. 898 */ 899 uvm_vnp_terminate(vp); 900 /* 901 * Clean out any buffers associated with the vnode. 902 */ 903 if (flags & DOCLOSE) 904 vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0); 905 /* 906 * If purging an active vnode, it must be closed and 907 * deactivated before being reclaimed. Note that the 908 * VOP_INACTIVE will unlock the vnode 909 */ 910 if (active) { 911 if (flags & DOCLOSE) 912 VOP_CLOSE(vp, FNONBLOCK, NOCRED, p); 913 VOP_INACTIVE(vp, p); 914 } else { 915 /* 916 * Any other processes trying to obtain this lock must first 917 * wait for VXLOCK to clear, then call the new lock operation. 918 */ 919 VOP_UNLOCK(vp, 0, p); 920 } 921 922 /* 923 * Reclaim the vnode. 924 */ 925 if (VOP_RECLAIM(vp, p)) 926 panic("vclean: cannot reclaim"); 927 if (active) { 928 vp->v_usecount--; 929 if (vp->v_usecount == 0) { 930 if (vp->v_holdcnt > 0) 931 panic("vclean: not clean"); 932 vputonfreelist(vp); 933 } 934 } 935 cache_purge(vp); 936 937 /* 938 * Done with purge, notify sleepers of the grim news. 939 */ 940 vp->v_op = dead_vnodeop_p; 941 VN_KNOTE(vp, NOTE_REVOKE); 942 vp->v_tag = VT_NON; 943 vp->v_flag &= ~VXLOCK; 944 #ifdef VFSDEBUG 945 vp->v_flag &= ~VLOCKSWORK; 946 #endif 947 if (vp->v_flag & VXWANT) { 948 vp->v_flag &= ~VXWANT; 949 wakeup(vp); 950 } 951 } 952 953 /* 954 * Recycle an unused vnode to the front of the free list. 955 */ 956 int 957 vrecycle(struct vnode *vp, struct proc *p) 958 { 959 if (vp->v_usecount == 0) { 960 vgonel(vp, p); 961 return (1); 962 } 963 return (0); 964 } 965 966 /* 967 * Eliminate all activity associated with a vnode 968 * in preparation for reuse. 969 */ 970 void 971 vgone(struct vnode *vp) 972 { 973 struct proc *p = curproc; 974 vgonel(vp, p); 975 } 976 977 /* 978 * vgone, with struct proc. 979 */ 980 void 981 vgonel(struct vnode *vp, struct proc *p) 982 { 983 struct vnode *vq; 984 struct vnode *vx; 985 986 /* 987 * If a vgone (or vclean) is already in progress, 988 * wait until it is done and return. 989 */ 990 if (vp->v_flag & VXLOCK) { 991 vp->v_flag |= VXWANT; 992 tsleep(vp, PINOD, "vgone", 0); 993 return; 994 } 995 996 /* 997 * Clean out the filesystem specific data. 998 */ 999 vclean(vp, DOCLOSE, p); 1000 /* 1001 * Delete from old mount point vnode list, if on one. 1002 */ 1003 if (vp->v_mount != NULL) 1004 insmntque(vp, (struct mount *)0); 1005 /* 1006 * If special device, remove it from special device alias list 1007 * if it is on one. 1008 */ 1009 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) { 1010 if (*vp->v_hashchain == vp) { 1011 *vp->v_hashchain = vp->v_specnext; 1012 } else { 1013 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1014 if (vq->v_specnext != vp) 1015 continue; 1016 vq->v_specnext = vp->v_specnext; 1017 break; 1018 } 1019 if (vq == NULL) 1020 panic("missing bdev"); 1021 } 1022 if (vp->v_flag & VALIASED) { 1023 vx = NULL; 1024 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1025 if (vq->v_rdev != vp->v_rdev || 1026 vq->v_type != vp->v_type) 1027 continue; 1028 if (vx) 1029 break; 1030 vx = vq; 1031 } 1032 if (vx == NULL) 1033 panic("missing alias"); 1034 if (vq == NULL) 1035 vx->v_flag &= ~VALIASED; 1036 vp->v_flag &= ~VALIASED; 1037 } 1038 free(vp->v_specinfo, M_VNODE); 1039 vp->v_specinfo = NULL; 1040 } 1041 /* 1042 * If it is on the freelist and not already at the head, 1043 * move it to the head of the list. 1044 */ 1045 vp->v_type = VBAD; 1046 1047 /* 1048 * Move onto the free list, unless we were called from 1049 * getnewvnode and we're not on any free list 1050 */ 1051 if (vp->v_usecount == 0 && 1052 (vp->v_bioflag & VBIOONFREELIST)) { 1053 int s; 1054 1055 s = splbio(); 1056 1057 if (vp->v_holdcnt > 0) 1058 panic("vgonel: not clean"); 1059 1060 if (TAILQ_FIRST(&vnode_free_list) != vp) { 1061 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1062 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 1063 } 1064 splx(s); 1065 } 1066 } 1067 1068 /* 1069 * Lookup a vnode by device number. 1070 */ 1071 int 1072 vfinddev(dev_t dev, enum vtype type, struct vnode **vpp) 1073 { 1074 struct vnode *vp; 1075 int rc =0; 1076 1077 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) { 1078 if (dev != vp->v_rdev || type != vp->v_type) 1079 continue; 1080 *vpp = vp; 1081 rc = 1; 1082 break; 1083 } 1084 return (rc); 1085 } 1086 1087 /* 1088 * Revoke all the vnodes corresponding to the specified minor number 1089 * range (endpoints inclusive) of the specified major. 1090 */ 1091 void 1092 vdevgone(int maj, int minl, int minh, enum vtype type) 1093 { 1094 struct vnode *vp; 1095 int mn; 1096 1097 for (mn = minl; mn <= minh; mn++) 1098 if (vfinddev(makedev(maj, mn), type, &vp)) 1099 VOP_REVOKE(vp, REVOKEALL); 1100 } 1101 1102 /* 1103 * Calculate the total number of references to a special device. 1104 */ 1105 int 1106 vcount(struct vnode *vp) 1107 { 1108 struct vnode *vq, *vnext; 1109 int count; 1110 1111 loop: 1112 if ((vp->v_flag & VALIASED) == 0) 1113 return (vp->v_usecount); 1114 for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) { 1115 vnext = vq->v_specnext; 1116 if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type) 1117 continue; 1118 /* 1119 * Alias, but not in use, so flush it out. 1120 */ 1121 if (vq->v_usecount == 0 && vq != vp) { 1122 vgone(vq); 1123 goto loop; 1124 } 1125 count += vq->v_usecount; 1126 } 1127 return (count); 1128 } 1129 1130 #if defined(DEBUG) || defined(DIAGNOSTIC) 1131 /* 1132 * Print out a description of a vnode. 1133 */ 1134 static char *typename[] = 1135 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" }; 1136 1137 void 1138 vprint(char *label, struct vnode *vp) 1139 { 1140 char buf[64]; 1141 1142 if (label != NULL) 1143 printf("%s: ", label); 1144 printf("%p, type %s, use %u, write %u, hold %u,", 1145 vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount, 1146 vp->v_holdcnt); 1147 buf[0] = '\0'; 1148 if (vp->v_flag & VROOT) 1149 strlcat(buf, "|VROOT", sizeof buf); 1150 if (vp->v_flag & VTEXT) 1151 strlcat(buf, "|VTEXT", sizeof buf); 1152 if (vp->v_flag & VSYSTEM) 1153 strlcat(buf, "|VSYSTEM", sizeof buf); 1154 if (vp->v_flag & VXLOCK) 1155 strlcat(buf, "|VXLOCK", sizeof buf); 1156 if (vp->v_flag & VXWANT) 1157 strlcat(buf, "|VXWANT", sizeof buf); 1158 if (vp->v_bioflag & VBIOWAIT) 1159 strlcat(buf, "|VBIOWAIT", sizeof buf); 1160 if (vp->v_bioflag & VBIOONFREELIST) 1161 strlcat(buf, "|VBIOONFREELIST", sizeof buf); 1162 if (vp->v_bioflag & VBIOONSYNCLIST) 1163 strlcat(buf, "|VBIOONSYNCLIST", sizeof buf); 1164 if (vp->v_flag & VALIASED) 1165 strlcat(buf, "|VALIASED", sizeof buf); 1166 if (buf[0] != '\0') 1167 printf(" flags (%s)", &buf[1]); 1168 if (vp->v_data == NULL) { 1169 printf("\n"); 1170 } else { 1171 printf("\n\t"); 1172 VOP_PRINT(vp); 1173 } 1174 } 1175 #endif /* DEBUG || DIAGNOSTIC */ 1176 1177 #ifdef DEBUG 1178 /* 1179 * List all of the locked vnodes in the system. 1180 * Called when debugging the kernel. 1181 */ 1182 void 1183 printlockedvnodes(void) 1184 { 1185 struct mount *mp, *nmp; 1186 struct vnode *vp; 1187 1188 printf("Locked vnodes\n"); 1189 1190 for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist); 1191 mp = nmp) { 1192 if (vfs_busy(mp, VB_READ|VB_NOWAIT)) { 1193 nmp = CIRCLEQ_NEXT(mp, mnt_list); 1194 continue; 1195 } 1196 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { 1197 if (VOP_ISLOCKED(vp)) 1198 vprint((char *)0, vp); 1199 } 1200 nmp = CIRCLEQ_NEXT(mp, mnt_list); 1201 vfs_unbusy(mp); 1202 } 1203 1204 } 1205 #endif 1206 1207 /* 1208 * Top level filesystem related information gathering. 1209 */ 1210 int 1211 vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1212 size_t newlen, struct proc *p) 1213 { 1214 struct vfsconf *vfsp, *tmpvfsp; 1215 int ret; 1216 1217 /* all sysctl names at this level are at least name and field */ 1218 if (namelen < 2) 1219 return (ENOTDIR); /* overloaded */ 1220 1221 if (name[0] != VFS_GENERIC) { 1222 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 1223 if (vfsp->vfc_typenum == name[0]) 1224 break; 1225 1226 if (vfsp == NULL) 1227 return (EOPNOTSUPP); 1228 1229 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 1230 oldp, oldlenp, newp, newlen, p)); 1231 } 1232 1233 switch (name[1]) { 1234 case VFS_MAXTYPENUM: 1235 return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf)); 1236 1237 case VFS_CONF: 1238 if (namelen < 3) 1239 return (ENOTDIR); /* overloaded */ 1240 1241 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 1242 if (vfsp->vfc_typenum == name[2]) 1243 break; 1244 1245 if (vfsp == NULL) 1246 return (EOPNOTSUPP); 1247 1248 /* Make a copy, clear out kernel pointers */ 1249 tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK); 1250 bcopy(vfsp, tmpvfsp, sizeof(*tmpvfsp)); 1251 tmpvfsp->vfc_vfsops = NULL; 1252 tmpvfsp->vfc_next = NULL; 1253 1254 ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp, 1255 sizeof(struct vfsconf)); 1256 1257 free(tmpvfsp, M_TEMP); 1258 return (ret); 1259 } 1260 1261 return (EOPNOTSUPP); 1262 } 1263 1264 int kinfo_vdebug = 1; 1265 #define KINFO_VNODESLOP 10 1266 /* 1267 * Dump vnode list (via sysctl). 1268 * Copyout address of vnode followed by vnode. 1269 */ 1270 /* ARGSUSED */ 1271 int 1272 sysctl_vnode(char *where, size_t *sizep, struct proc *p) 1273 { 1274 struct mount *mp, *nmp; 1275 struct vnode *vp, *nvp; 1276 char *bp = where, *savebp; 1277 char *ewhere; 1278 int error; 1279 1280 if (where == NULL) { 1281 *sizep = (numvnodes + KINFO_VNODESLOP) * sizeof(struct e_vnode); 1282 return (0); 1283 } 1284 ewhere = where + *sizep; 1285 1286 for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist); 1287 mp = nmp) { 1288 if (vfs_busy(mp, VB_READ|VB_NOWAIT)) { 1289 nmp = CIRCLEQ_NEXT(mp, mnt_list); 1290 continue; 1291 } 1292 savebp = bp; 1293 again: 1294 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; 1295 vp = nvp) { 1296 /* 1297 * Check that the vp is still associated with 1298 * this filesystem. RACE: could have been 1299 * recycled onto the same filesystem. 1300 */ 1301 if (vp->v_mount != mp) { 1302 if (kinfo_vdebug) 1303 printf("kinfo: vp changed\n"); 1304 bp = savebp; 1305 goto again; 1306 } 1307 nvp = LIST_NEXT(vp, v_mntvnodes); 1308 if (bp + sizeof(struct e_vnode) > ewhere) { 1309 *sizep = bp - where; 1310 vfs_unbusy(mp); 1311 return (ENOMEM); 1312 } 1313 if ((error = copyout(&vp, 1314 &((struct e_vnode *)bp)->vptr, 1315 sizeof(struct vnode *))) || 1316 (error = copyout(vp, 1317 &((struct e_vnode *)bp)->vnode, 1318 sizeof(struct vnode)))) { 1319 vfs_unbusy(mp); 1320 return (error); 1321 } 1322 bp += sizeof(struct e_vnode); 1323 } 1324 1325 nmp = CIRCLEQ_NEXT(mp, mnt_list); 1326 vfs_unbusy(mp); 1327 } 1328 1329 *sizep = bp - where; 1330 1331 return (0); 1332 } 1333 1334 /* 1335 * Check to see if a filesystem is mounted on a block device. 1336 */ 1337 int 1338 vfs_mountedon(struct vnode *vp) 1339 { 1340 struct vnode *vq; 1341 int error = 0; 1342 1343 if (vp->v_specmountpoint != NULL) 1344 return (EBUSY); 1345 if (vp->v_flag & VALIASED) { 1346 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1347 if (vq->v_rdev != vp->v_rdev || 1348 vq->v_type != vp->v_type) 1349 continue; 1350 if (vq->v_specmountpoint != NULL) { 1351 error = EBUSY; 1352 break; 1353 } 1354 } 1355 } 1356 return (error); 1357 } 1358 1359 /* 1360 * Build hash lists of net addresses and hang them off the mount point. 1361 * Called by ufs_mount() to set up the lists of export addresses. 1362 */ 1363 int 1364 vfs_hang_addrlist(struct mount *mp, struct netexport *nep, 1365 struct export_args *argp) 1366 { 1367 struct netcred *np; 1368 struct radix_node_head *rnh; 1369 int i; 1370 struct radix_node *rn; 1371 struct sockaddr *saddr, *smask = 0; 1372 struct domain *dom; 1373 int error; 1374 1375 if (argp->ex_addrlen == 0) { 1376 if (mp->mnt_flag & MNT_DEFEXPORTED) 1377 return (EPERM); 1378 np = &nep->ne_defexported; 1379 np->netc_exflags = argp->ex_flags; 1380 np->netc_anon = argp->ex_anon; 1381 np->netc_anon.cr_ref = 1; 1382 mp->mnt_flag |= MNT_DEFEXPORTED; 1383 return (0); 1384 } 1385 if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN || 1386 argp->ex_addrlen < 0 || argp->ex_masklen < 0) 1387 return (EINVAL); 1388 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 1389 np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK|M_ZERO); 1390 saddr = (struct sockaddr *)(np + 1); 1391 error = copyin(argp->ex_addr, saddr, argp->ex_addrlen); 1392 if (error) 1393 goto out; 1394 if (saddr->sa_len > argp->ex_addrlen) 1395 saddr->sa_len = argp->ex_addrlen; 1396 if (argp->ex_masklen) { 1397 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 1398 error = copyin(argp->ex_mask, smask, argp->ex_masklen); 1399 if (error) 1400 goto out; 1401 if (smask->sa_len > argp->ex_masklen) 1402 smask->sa_len = argp->ex_masklen; 1403 } 1404 i = saddr->sa_family; 1405 if (i < 0 || i > AF_MAX) { 1406 error = EINVAL; 1407 goto out; 1408 } 1409 if ((rnh = nep->ne_rtable[i]) == 0) { 1410 /* 1411 * Seems silly to initialize every AF when most are not 1412 * used, do so on demand here 1413 */ 1414 for (dom = domains; dom; dom = dom->dom_next) 1415 if (dom->dom_family == i && dom->dom_rtattach) { 1416 dom->dom_rtattach((void **)&nep->ne_rtable[i], 1417 dom->dom_rtoffset); 1418 break; 1419 } 1420 if ((rnh = nep->ne_rtable[i]) == 0) { 1421 error = ENOBUFS; 1422 goto out; 1423 } 1424 } 1425 rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh, 1426 np->netc_rnodes, 0); 1427 if (rn == 0 || np != (struct netcred *)rn) { /* already exists */ 1428 error = EPERM; 1429 goto out; 1430 } 1431 np->netc_exflags = argp->ex_flags; 1432 np->netc_anon = argp->ex_anon; 1433 np->netc_anon.cr_ref = 1; 1434 return (0); 1435 out: 1436 free(np, M_NETADDR); 1437 return (error); 1438 } 1439 1440 /* ARGSUSED */ 1441 int 1442 vfs_free_netcred(struct radix_node *rn, void *w) 1443 { 1444 struct radix_node_head *rnh = (struct radix_node_head *)w; 1445 1446 (*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh, NULL); 1447 free(rn, M_NETADDR); 1448 return (0); 1449 } 1450 1451 /* 1452 * Free the net address hash lists that are hanging off the mount points. 1453 */ 1454 void 1455 vfs_free_addrlist(struct netexport *nep) 1456 { 1457 int i; 1458 struct radix_node_head *rnh; 1459 1460 for (i = 0; i <= AF_MAX; i++) 1461 if ((rnh = nep->ne_rtable[i]) != NULL) { 1462 (*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh); 1463 free(rnh, M_RTABLE); 1464 nep->ne_rtable[i] = 0; 1465 } 1466 } 1467 1468 int 1469 vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp) 1470 { 1471 int error; 1472 1473 if (argp->ex_flags & MNT_DELEXPORT) { 1474 vfs_free_addrlist(nep); 1475 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 1476 } 1477 if (argp->ex_flags & MNT_EXPORTED) { 1478 if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0) 1479 return (error); 1480 mp->mnt_flag |= MNT_EXPORTED; 1481 } 1482 return (0); 1483 } 1484 1485 struct netcred * 1486 vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam) 1487 { 1488 struct netcred *np; 1489 struct radix_node_head *rnh; 1490 struct sockaddr *saddr; 1491 1492 np = NULL; 1493 if (mp->mnt_flag & MNT_EXPORTED) { 1494 /* 1495 * Lookup in the export list first. 1496 */ 1497 if (nam != NULL) { 1498 saddr = mtod(nam, struct sockaddr *); 1499 rnh = nep->ne_rtable[saddr->sa_family]; 1500 if (rnh != NULL) { 1501 np = (struct netcred *) 1502 (*rnh->rnh_matchaddr)((caddr_t)saddr, 1503 rnh); 1504 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 1505 np = NULL; 1506 } 1507 } 1508 /* 1509 * If no address match, use the default if it exists. 1510 */ 1511 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 1512 np = &nep->ne_defexported; 1513 } 1514 return (np); 1515 } 1516 1517 /* 1518 * Do the usual access checking. 1519 * file_mode, uid and gid are from the vnode in question, 1520 * while acc_mode and cred are from the VOP_ACCESS parameter list 1521 */ 1522 int 1523 vaccess(mode_t file_mode, uid_t uid, gid_t gid, mode_t acc_mode, 1524 struct ucred *cred) 1525 { 1526 mode_t mask; 1527 1528 /* User id 0 always gets access. */ 1529 if (cred->cr_uid == 0) 1530 return 0; 1531 1532 mask = 0; 1533 1534 /* Otherwise, check the owner. */ 1535 if (cred->cr_uid == uid) { 1536 if (acc_mode & VEXEC) 1537 mask |= S_IXUSR; 1538 if (acc_mode & VREAD) 1539 mask |= S_IRUSR; 1540 if (acc_mode & VWRITE) 1541 mask |= S_IWUSR; 1542 return (file_mode & mask) == mask ? 0 : EACCES; 1543 } 1544 1545 /* Otherwise, check the groups. */ 1546 if (cred->cr_gid == gid || groupmember(gid, cred)) { 1547 if (acc_mode & VEXEC) 1548 mask |= S_IXGRP; 1549 if (acc_mode & VREAD) 1550 mask |= S_IRGRP; 1551 if (acc_mode & VWRITE) 1552 mask |= S_IWGRP; 1553 return (file_mode & mask) == mask ? 0 : EACCES; 1554 } 1555 1556 /* Otherwise, check everyone else. */ 1557 if (acc_mode & VEXEC) 1558 mask |= S_IXOTH; 1559 if (acc_mode & VREAD) 1560 mask |= S_IROTH; 1561 if (acc_mode & VWRITE) 1562 mask |= S_IWOTH; 1563 return (file_mode & mask) == mask ? 0 : EACCES; 1564 } 1565 1566 /* 1567 * Unmount all file systems. 1568 * We traverse the list in reverse order under the assumption that doing so 1569 * will avoid needing to worry about dependencies. 1570 */ 1571 void 1572 vfs_unmountall(void) 1573 { 1574 struct mount *mp, *nmp; 1575 int allerror, error, again = 1; 1576 1577 retry: 1578 allerror = 0; 1579 for (mp = CIRCLEQ_LAST(&mountlist); mp != CIRCLEQ_END(&mountlist); 1580 mp = nmp) { 1581 nmp = CIRCLEQ_PREV(mp, mnt_list); 1582 if ((vfs_busy(mp, VB_WRITE|VB_NOWAIT)) != 0) 1583 continue; 1584 if ((error = dounmount(mp, MNT_FORCE, curproc, NULL)) != 0) { 1585 printf("unmount of %s failed with error %d\n", 1586 mp->mnt_stat.f_mntonname, error); 1587 allerror = 1; 1588 } 1589 } 1590 1591 if (allerror) { 1592 printf("WARNING: some file systems would not unmount\n"); 1593 if (again) { 1594 printf("retrying\n"); 1595 again = 0; 1596 goto retry; 1597 } 1598 } 1599 } 1600 1601 /* 1602 * Sync and unmount file systems before shutting down. 1603 */ 1604 void 1605 vfs_shutdown(void) 1606 { 1607 #ifdef ACCOUNTING 1608 extern void acct_shutdown(void); 1609 1610 acct_shutdown(); 1611 #endif 1612 1613 /* XXX Should suspend scheduling. */ 1614 (void) spl0(); 1615 1616 printf("syncing disks... "); 1617 1618 if (panicstr == 0) { 1619 /* Sync before unmount, in case we hang on something. */ 1620 sys_sync(&proc0, (void *)0, (register_t *)0); 1621 1622 /* Unmount file systems. */ 1623 vfs_unmountall(); 1624 } 1625 1626 if (vfs_syncwait(1)) 1627 printf("giving up\n"); 1628 else 1629 printf("done\n"); 1630 } 1631 1632 /* 1633 * perform sync() operation and wait for buffers to flush. 1634 * assumtions: called w/ scheduler disabled and physical io enabled 1635 * for now called at spl0() XXX 1636 */ 1637 int 1638 vfs_syncwait(int verbose) 1639 { 1640 struct buf *bp; 1641 int iter, nbusy, dcount, s; 1642 struct proc *p; 1643 1644 p = curproc? curproc : &proc0; 1645 sys_sync(p, (void *)0, (register_t *)0); 1646 1647 /* Wait for sync to finish. */ 1648 dcount = 10000; 1649 for (iter = 0; iter < 20; iter++) { 1650 nbusy = 0; 1651 LIST_FOREACH(bp, &bufhead, b_list) { 1652 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 1653 nbusy++; 1654 /* 1655 * With soft updates, some buffers that are 1656 * written will be remarked as dirty until other 1657 * buffers are written. 1658 */ 1659 if (bp->b_flags & B_DELWRI) { 1660 s = splbio(); 1661 bremfree(bp); 1662 bp->b_flags |= B_BUSY; 1663 splx(s); 1664 nbusy++; 1665 bawrite(bp); 1666 if (dcount-- <= 0) { 1667 if (verbose) 1668 printf("softdep "); 1669 return 1; 1670 } 1671 } 1672 } 1673 if (nbusy == 0) 1674 break; 1675 if (verbose) 1676 printf("%d ", nbusy); 1677 DELAY(40000 * iter); 1678 } 1679 1680 return nbusy; 1681 } 1682 1683 /* 1684 * posix file system related system variables. 1685 */ 1686 int 1687 fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, 1688 void *newp, size_t newlen, struct proc *p) 1689 { 1690 /* all sysctl names at this level are terminal */ 1691 if (namelen != 1) 1692 return (ENOTDIR); 1693 1694 switch (name[0]) { 1695 case FS_POSIX_SETUID: 1696 if (newp && securelevel > 0) 1697 return (EPERM); 1698 return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear)); 1699 default: 1700 return (EOPNOTSUPP); 1701 } 1702 /* NOTREACHED */ 1703 } 1704 1705 /* 1706 * file system related system variables. 1707 */ 1708 int 1709 fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1710 size_t newlen, struct proc *p) 1711 { 1712 sysctlfn *fn; 1713 1714 switch (name[0]) { 1715 case FS_POSIX: 1716 fn = fs_posix_sysctl; 1717 break; 1718 default: 1719 return (EOPNOTSUPP); 1720 } 1721 return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p); 1722 } 1723 1724 1725 /* 1726 * Routines dealing with vnodes and buffers 1727 */ 1728 1729 /* 1730 * Wait for all outstanding I/Os to complete 1731 * 1732 * Manipulates v_numoutput. Must be called at splbio() 1733 */ 1734 int 1735 vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo) 1736 { 1737 int error = 0; 1738 1739 splassert(IPL_BIO); 1740 1741 while (vp->v_numoutput) { 1742 vp->v_bioflag |= VBIOWAIT; 1743 error = tsleep(&vp->v_numoutput, 1744 slpflag | (PRIBIO + 1), wmesg, timeo); 1745 if (error) 1746 break; 1747 } 1748 1749 return (error); 1750 } 1751 1752 /* 1753 * Update outstanding I/O count and do wakeup if requested. 1754 * 1755 * Manipulates v_numoutput. Must be called at splbio() 1756 */ 1757 void 1758 vwakeup(struct vnode *vp) 1759 { 1760 splassert(IPL_BIO); 1761 1762 if (vp != NULL) { 1763 if (vp->v_numoutput-- == 0) 1764 panic("vwakeup: neg numoutput"); 1765 if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) { 1766 vp->v_bioflag &= ~VBIOWAIT; 1767 wakeup(&vp->v_numoutput); 1768 } 1769 } 1770 } 1771 1772 /* 1773 * Flush out and invalidate all buffers associated with a vnode. 1774 * Called with the underlying object locked. 1775 */ 1776 int 1777 vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p, 1778 int slpflag, int slptimeo) 1779 { 1780 struct buf *bp; 1781 struct buf *nbp, *blist; 1782 int s, error; 1783 1784 #ifdef VFSDEBUG 1785 if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp)) 1786 panic("vinvalbuf(): vp isn't locked"); 1787 #endif 1788 1789 if (flags & V_SAVE) { 1790 s = splbio(); 1791 vwaitforio(vp, 0, "vinvalbuf", 0); 1792 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 1793 splx(s); 1794 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0) 1795 return (error); 1796 s = splbio(); 1797 if (vp->v_numoutput > 0 || 1798 !LIST_EMPTY(&vp->v_dirtyblkhd)) 1799 panic("vinvalbuf: dirty bufs"); 1800 } 1801 splx(s); 1802 } 1803 loop: 1804 s = splbio(); 1805 for (;;) { 1806 if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) && 1807 (flags & V_SAVEMETA)) 1808 while (blist && blist->b_lblkno < 0) 1809 blist = LIST_NEXT(blist, b_vnbufs); 1810 if (blist == NULL && 1811 (blist = LIST_FIRST(&vp->v_dirtyblkhd)) && 1812 (flags & V_SAVEMETA)) 1813 while (blist && blist->b_lblkno < 0) 1814 blist = LIST_NEXT(blist, b_vnbufs); 1815 if (!blist) 1816 break; 1817 1818 for (bp = blist; bp; bp = nbp) { 1819 nbp = LIST_NEXT(bp, b_vnbufs); 1820 if (flags & V_SAVEMETA && bp->b_lblkno < 0) 1821 continue; 1822 if (bp->b_flags & B_BUSY) { 1823 bp->b_flags |= B_WANTED; 1824 error = tsleep(bp, slpflag | (PRIBIO + 1), 1825 "vinvalbuf", slptimeo); 1826 if (error) { 1827 splx(s); 1828 return (error); 1829 } 1830 break; 1831 } 1832 bremfree(bp); 1833 bp->b_flags |= B_BUSY; 1834 /* 1835 * XXX Since there are no node locks for NFS, I believe 1836 * there is a slight chance that a delayed write will 1837 * occur while sleeping just above, so check for it. 1838 */ 1839 if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) { 1840 splx(s); 1841 (void) VOP_BWRITE(bp); 1842 goto loop; 1843 } 1844 bp->b_flags |= B_INVAL; 1845 brelse(bp); 1846 } 1847 } 1848 if (!(flags & V_SAVEMETA) && 1849 (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd))) 1850 panic("vinvalbuf: flush failed"); 1851 splx(s); 1852 return (0); 1853 } 1854 1855 void 1856 vflushbuf(struct vnode *vp, int sync) 1857 { 1858 struct buf *bp, *nbp; 1859 int s; 1860 1861 loop: 1862 s = splbio(); 1863 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); 1864 bp != LIST_END(&vp->v_dirtyblkhd); bp = nbp) { 1865 nbp = LIST_NEXT(bp, b_vnbufs); 1866 if ((bp->b_flags & B_BUSY)) 1867 continue; 1868 if ((bp->b_flags & B_DELWRI) == 0) 1869 panic("vflushbuf: not dirty"); 1870 bremfree(bp); 1871 bp->b_flags |= B_BUSY; 1872 splx(s); 1873 /* 1874 * Wait for I/O associated with indirect blocks to complete, 1875 * since there is no way to quickly wait for them below. 1876 */ 1877 if (bp->b_vp == vp || sync == 0) 1878 (void) bawrite(bp); 1879 else 1880 (void) bwrite(bp); 1881 goto loop; 1882 } 1883 if (sync == 0) { 1884 splx(s); 1885 return; 1886 } 1887 vwaitforio(vp, 0, "vflushbuf", 0); 1888 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 1889 splx(s); 1890 #ifdef DIAGNOSTIC 1891 vprint("vflushbuf: dirty", vp); 1892 #endif 1893 goto loop; 1894 } 1895 splx(s); 1896 } 1897 1898 /* 1899 * Associate a buffer with a vnode. 1900 * 1901 * Manipulates buffer vnode queues. Must be called at splbio(). 1902 */ 1903 void 1904 bgetvp(struct vnode *vp, struct buf *bp) 1905 { 1906 splassert(IPL_BIO); 1907 1908 1909 if (bp->b_vp) 1910 panic("bgetvp: not free"); 1911 vhold(vp); 1912 bp->b_vp = vp; 1913 if (vp->v_type == VBLK || vp->v_type == VCHR) 1914 bp->b_dev = vp->v_rdev; 1915 else 1916 bp->b_dev = NODEV; 1917 /* 1918 * Insert onto list for new vnode. 1919 */ 1920 bufinsvn(bp, &vp->v_cleanblkhd); 1921 } 1922 1923 /* 1924 * Disassociate a buffer from a vnode. 1925 * 1926 * Manipulates vnode buffer queues. Must be called at splbio(). 1927 */ 1928 void 1929 brelvp(struct buf *bp) 1930 { 1931 struct vnode *vp; 1932 1933 splassert(IPL_BIO); 1934 1935 if ((vp = bp->b_vp) == (struct vnode *) 0) 1936 panic("brelvp: NULL"); 1937 /* 1938 * Delete from old vnode list, if on one. 1939 */ 1940 if (LIST_NEXT(bp, b_vnbufs) != NOLIST) 1941 bufremvn(bp); 1942 if ((vp->v_bioflag & VBIOONSYNCLIST) && 1943 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { 1944 vp->v_bioflag &= ~VBIOONSYNCLIST; 1945 LIST_REMOVE(vp, v_synclist); 1946 } 1947 bp->b_vp = (struct vnode *) 0; 1948 1949 #ifdef DIAGNOSTIC 1950 if (vp->v_holdcnt == 0) 1951 panic("brelvp: holdcnt"); 1952 #endif 1953 vp->v_holdcnt--; 1954 1955 /* 1956 * If it is on the holdlist and the hold count drops to 1957 * zero, move it to the free list. 1958 */ 1959 if ((vp->v_bioflag & VBIOONFREELIST) && 1960 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 1961 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 1962 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 1963 } 1964 } 1965 1966 /* 1967 * Replaces the current vnode associated with the buffer, if any, 1968 * with a new vnode. 1969 * 1970 * If an output I/O is pending on the buffer, the old vnode 1971 * I/O count is adjusted. 1972 * 1973 * Ignores vnode buffer queues. Must be called at splbio(). 1974 */ 1975 void 1976 buf_replacevnode(struct buf *bp, struct vnode *newvp) 1977 { 1978 struct vnode *oldvp = bp->b_vp; 1979 1980 splassert(IPL_BIO); 1981 1982 if (oldvp) 1983 brelvp(bp); 1984 1985 if ((bp->b_flags & (B_READ | B_DONE)) == 0) { 1986 newvp->v_numoutput++; /* put it on swapdev */ 1987 vwakeup(oldvp); 1988 } 1989 1990 bgetvp(newvp, bp); 1991 bufremvn(bp); 1992 } 1993 1994 /* 1995 * Used to assign buffers to the appropriate clean or dirty list on 1996 * the vnode and to add newly dirty vnodes to the appropriate 1997 * filesystem syncer list. 1998 * 1999 * Manipulates vnode buffer queues. Must be called at splbio(). 2000 */ 2001 void 2002 reassignbuf(struct buf *bp) 2003 { 2004 struct buflists *listheadp; 2005 int delay; 2006 struct vnode *vp = bp->b_vp; 2007 2008 splassert(IPL_BIO); 2009 2010 /* 2011 * Delete from old vnode list, if on one. 2012 */ 2013 if (LIST_NEXT(bp, b_vnbufs) != NOLIST) 2014 bufremvn(bp); 2015 2016 /* 2017 * If dirty, put on list of dirty buffers; 2018 * otherwise insert onto list of clean buffers. 2019 */ 2020 if ((bp->b_flags & B_DELWRI) == 0) { 2021 listheadp = &vp->v_cleanblkhd; 2022 if ((vp->v_bioflag & VBIOONSYNCLIST) && 2023 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { 2024 vp->v_bioflag &= ~VBIOONSYNCLIST; 2025 LIST_REMOVE(vp, v_synclist); 2026 } 2027 } else { 2028 listheadp = &vp->v_dirtyblkhd; 2029 if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) { 2030 switch (vp->v_type) { 2031 case VDIR: 2032 delay = syncdelay / 2; 2033 break; 2034 case VBLK: 2035 if (vp->v_specmountpoint != NULL) { 2036 delay = syncdelay / 3; 2037 break; 2038 } 2039 /* FALLTHROUGH */ 2040 default: 2041 delay = syncdelay; 2042 } 2043 vn_syncer_add_to_worklist(vp, delay); 2044 } 2045 } 2046 bufinsvn(bp, listheadp); 2047 } 2048 2049 int 2050 vfs_register(struct vfsconf *vfs) 2051 { 2052 struct vfsconf *vfsp; 2053 struct vfsconf **vfspp; 2054 2055 #ifdef DIAGNOSTIC 2056 /* Paranoia? */ 2057 if (vfs->vfc_refcount != 0) 2058 printf("vfs_register called with vfc_refcount > 0\n"); 2059 #endif 2060 2061 /* Check if filesystem already known */ 2062 for (vfspp = &vfsconf, vfsp = vfsconf; vfsp; 2063 vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) 2064 if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0) 2065 return (EEXIST); 2066 2067 if (vfs->vfc_typenum > maxvfsconf) 2068 maxvfsconf = vfs->vfc_typenum; 2069 2070 vfs->vfc_next = NULL; 2071 2072 /* Add to the end of the list */ 2073 *vfspp = vfs; 2074 2075 /* Call vfs_init() */ 2076 if (vfs->vfc_vfsops->vfs_init) 2077 (*(vfs->vfc_vfsops->vfs_init))(vfs); 2078 2079 return 0; 2080 } 2081 2082 int 2083 vfs_unregister(struct vfsconf *vfs) 2084 { 2085 struct vfsconf *vfsp; 2086 struct vfsconf **vfspp; 2087 int maxtypenum; 2088 2089 /* Find our vfsconf struct */ 2090 for (vfspp = &vfsconf, vfsp = vfsconf; vfsp; 2091 vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) { 2092 if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0) 2093 break; 2094 } 2095 2096 if (!vfsp) /* Not found */ 2097 return (ENOENT); 2098 2099 if (vfsp->vfc_refcount) /* In use */ 2100 return (EBUSY); 2101 2102 /* Remove from list and free */ 2103 *vfspp = vfsp->vfc_next; 2104 2105 maxtypenum = 0; 2106 2107 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2108 if (vfsp->vfc_typenum > maxtypenum) 2109 maxtypenum = vfsp->vfc_typenum; 2110 2111 maxvfsconf = maxtypenum; 2112 return 0; 2113 } 2114 2115 /* 2116 * Check if vnode represents a disk device 2117 */ 2118 int 2119 vn_isdisk(struct vnode *vp, int *errp) 2120 { 2121 if (vp->v_type != VBLK && vp->v_type != VCHR) 2122 return (0); 2123 2124 return (1); 2125 } 2126 2127 #ifdef DDB 2128 #include <machine/db_machdep.h> 2129 #include <ddb/db_interface.h> 2130 #include <ddb/db_output.h> 2131 2132 void 2133 vfs_buf_print(struct buf *bp, int full, int (*pr)(const char *, ...)) 2134 { 2135 2136 (*pr)(" vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n" 2137 " proc %p error %d flags %b\n", 2138 bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev, 2139 bp->b_proc, bp->b_error, bp->b_flags, B_BITS); 2140 2141 (*pr)(" bufsize 0x%lx bcount 0x%lx resid 0x%lx sync 0x%x\n" 2142 " data %p saveaddr %p dep %p iodone %p\n", 2143 bp->b_bufsize, bp->b_bcount, (long)bp->b_resid, bp->b_synctime, 2144 bp->b_data, bp->b_saveaddr, LIST_FIRST(&bp->b_dep), bp->b_iodone); 2145 2146 (*pr)(" dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n", 2147 bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend); 2148 2149 #ifdef FFS_SOFTUPDATES 2150 if (full) 2151 softdep_print(bp, full, pr); 2152 #endif 2153 } 2154 2155 const char *vtypes[] = { VTYPE_NAMES }; 2156 const char *vtags[] = { VTAG_NAMES }; 2157 2158 void 2159 vfs_vnode_print(struct vnode *vp, int full, int (*pr)(const char *, ...)) 2160 { 2161 2162 #define NENTS(n) (sizeof n / sizeof(n[0])) 2163 (*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n", 2164 vp->v_tag > NENTS(vtags)? "<unk>":vtags[vp->v_tag], vp->v_tag, 2165 vp->v_type > NENTS(vtypes)? "<unk>":vtypes[vp->v_type], 2166 vp->v_type, vp->v_mount, vp->v_mountedhere); 2167 2168 (*pr)("data %p usecount %d writecount %ld holdcnt %ld numoutput %d\n", 2169 vp->v_data, vp->v_usecount, vp->v_writecount, 2170 vp->v_holdcnt, vp->v_numoutput); 2171 2172 /* uvm_object_printit(&vp->v_uobj, full, pr); */ 2173 2174 if (full) { 2175 struct buf *bp; 2176 2177 (*pr)("clean bufs:\n"); 2178 LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) { 2179 (*pr)(" bp %p\n", bp); 2180 vfs_buf_print(bp, full, pr); 2181 } 2182 2183 (*pr)("dirty bufs:\n"); 2184 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) { 2185 (*pr)(" bp %p\n", bp); 2186 vfs_buf_print(bp, full, pr); 2187 } 2188 } 2189 } 2190 2191 void 2192 vfs_mount_print(struct mount *mp, int full, int (*pr)(const char *, ...)) 2193 { 2194 struct vfsconf *vfc = mp->mnt_vfc; 2195 struct vnode *vp; 2196 int cnt = 0; 2197 2198 (*pr)("flags %b\nvnodecovered %p syncer %p data %p\n", 2199 mp->mnt_flag, MNT_BITS, 2200 mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data); 2201 2202 (*pr)("vfsconf: ops %p name \"%s\" num %d ref %d flags 0x%x\n", 2203 vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum, 2204 vfc->vfc_refcount, vfc->vfc_flags); 2205 2206 (*pr)("statvfs cache: bsize %x iosize %x\nblocks %llu free %llu avail %lld\n", 2207 mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks, 2208 mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail); 2209 2210 (*pr)(" files %llu ffiles %llu favail $lld\n", mp->mnt_stat.f_files, 2211 mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail); 2212 2213 (*pr)(" f_fsidx {0x%x, 0x%x} owner %u ctime 0x%x\n", 2214 mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1], 2215 mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime); 2216 2217 (*pr)(" syncwrites %llu asyncwrites = %llu\n", 2218 mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites); 2219 2220 (*pr)(" syncreads %llu asyncreads = %llu\n", 2221 mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads); 2222 2223 (*pr)(" fstype \"%s\" mnton \"%s\" mntfrom \"%s\"\n", 2224 mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname, 2225 mp->mnt_stat.f_mntfromname); 2226 2227 (*pr)("locked vnodes:"); 2228 /* XXX would take mountlist lock, except ddb has no context */ 2229 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) 2230 if (VOP_ISLOCKED(vp)) { 2231 if (!LIST_NEXT(vp, v_mntvnodes)) 2232 (*pr)(" %p", vp); 2233 else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4)))) 2234 (*pr)("\n\t%p", vp); 2235 else 2236 (*pr)(", %p", vp); 2237 } 2238 (*pr)("\n"); 2239 2240 if (full) { 2241 (*pr)("all vnodes:\n\t"); 2242 /* XXX would take mountlist lock, except ddb has no context */ 2243 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) 2244 if (!LIST_NEXT(vp, v_mntvnodes)) 2245 (*pr)(" %p", vp); 2246 else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4)))) 2247 (*pr)(" %p,\n\t", vp); 2248 else 2249 (*pr)(" %p,", vp); 2250 (*pr)("\n"); 2251 } 2252 } 2253 #endif /* DDB */ 2254 2255 void 2256 copy_statfs_info(struct statfs *sbp, const struct mount *mp) 2257 { 2258 const struct statfs *mbp; 2259 2260 strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN); 2261 2262 if (sbp == (mbp = &mp->mnt_stat)) 2263 return; 2264 2265 sbp->f_fsid = mbp->f_fsid; 2266 sbp->f_owner = mbp->f_owner; 2267 sbp->f_flags = mbp->f_flags; 2268 sbp->f_syncwrites = mbp->f_syncwrites; 2269 sbp->f_asyncwrites = mbp->f_asyncwrites; 2270 sbp->f_syncreads = mbp->f_syncreads; 2271 sbp->f_asyncreads = mbp->f_asyncreads; 2272 sbp->f_namemax = mbp->f_namemax; 2273 bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN); 2274 bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN); 2275 bcopy(&mp->mnt_stat.mount_info.ufs_args, &sbp->mount_info.ufs_args, 2276 sizeof(struct ufs_args)); 2277 } 2278 2279