1 /* $OpenBSD: vfs_subr.c,v 1.173 2008/07/05 12:48:03 thib Exp $ */ 2 /* $NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94 38 */ 39 40 /* 41 * External virtual filesystem routines 42 */ 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/proc.h> 47 #include <sys/mount.h> 48 #include <sys/time.h> 49 #include <sys/fcntl.h> 50 #include <sys/kernel.h> 51 #include <sys/vnode.h> 52 #include <sys/stat.h> 53 #include <sys/namei.h> 54 #include <sys/ucred.h> 55 #include <sys/buf.h> 56 #include <sys/errno.h> 57 #include <sys/malloc.h> 58 #include <sys/domain.h> 59 #include <sys/mbuf.h> 60 #include <sys/syscallargs.h> 61 #include <sys/pool.h> 62 63 #include <uvm/uvm_extern.h> 64 #include <sys/sysctl.h> 65 66 #include <miscfs/specfs/specdev.h> 67 68 enum vtype iftovt_tab[16] = { 69 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 70 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 71 }; 72 73 int vttoif_tab[9] = { 74 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 75 S_IFSOCK, S_IFIFO, S_IFMT, 76 }; 77 78 int doforce = 1; /* 1 => permit forcible unmounting */ 79 int prtactive = 0; /* 1 => print out reclaim of active vnodes */ 80 int suid_clear = 1; /* 1 => clear SUID / SGID on owner change */ 81 82 /* 83 * Insq/Remq for the vnode usage lists. 84 */ 85 #define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) 86 #define bufremvn(bp) { \ 87 LIST_REMOVE(bp, b_vnbufs); \ 88 LIST_NEXT(bp, b_vnbufs) = NOLIST; \ 89 } 90 91 struct freelst vnode_hold_list; /* list of vnodes referencing buffers */ 92 struct freelst vnode_free_list; /* vnode free list */ 93 94 struct mntlist mountlist; /* mounted filesystem list */ 95 96 void vclean(struct vnode *, int, struct proc *); 97 void vhold(struct vnode *); 98 void vdrop(struct vnode *); 99 100 void insmntque(struct vnode *, struct mount *); 101 int getdevvp(dev_t, struct vnode **, enum vtype); 102 103 int vfs_hang_addrlist(struct mount *, struct netexport *, 104 struct export_args *); 105 int vfs_free_netcred(struct radix_node *, void *); 106 void vfs_free_addrlist(struct netexport *); 107 void vputonfreelist(struct vnode *); 108 109 int vflush_vnode(struct vnode *, void *); 110 int maxvnodes; 111 112 #ifdef DEBUG 113 void printlockedvnodes(void); 114 #endif 115 116 struct pool vnode_pool; 117 118 /* 119 * Initialize the vnode management data structures. 120 */ 121 void 122 vntblinit(void) 123 { 124 /* buffer cache may need a vnode for each buffer */ 125 maxvnodes = desiredvnodes; 126 pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodes", 127 &pool_allocator_nointr); 128 TAILQ_INIT(&vnode_hold_list); 129 TAILQ_INIT(&vnode_free_list); 130 CIRCLEQ_INIT(&mountlist); 131 /* 132 * Initialize the filesystem syncer. 133 */ 134 vn_initialize_syncerd(); 135 } 136 137 /* 138 * Mark a mount point as busy. Used to synchronize access and to delay 139 * unmounting. 140 * 141 * Default behaviour is to attempt getting a READ lock and in case of an 142 * ongoing unmount, to wait for it to finish and then return failure. 143 */ 144 int 145 vfs_busy(struct mount *mp, int flags) 146 { 147 int rwflags = 0; 148 149 /* new mountpoints need their lock initialised */ 150 if (mp->mnt_lock.rwl_name == NULL) 151 rw_init(&mp->mnt_lock, "vfslock"); 152 153 if (flags & VB_WRITE) 154 rwflags |= RW_WRITE; 155 else 156 rwflags |= RW_READ; 157 158 if (flags & VB_WAIT) 159 rwflags |= RW_SLEEPFAIL; 160 else 161 rwflags |= RW_NOSLEEP; 162 163 if (rw_enter(&mp->mnt_lock, rwflags)) 164 return (EBUSY); 165 166 return (0); 167 } 168 169 /* 170 * Free a busy file system 171 */ 172 void 173 vfs_unbusy(struct mount *mp) 174 { 175 rw_exit(&mp->mnt_lock); 176 } 177 178 int 179 vfs_isbusy(struct mount *mp) 180 { 181 if (RWLOCK_OWNER(&mp->mnt_lock) > 0) 182 return (1); 183 else 184 return (0); 185 } 186 187 /* 188 * Lookup a filesystem type, and if found allocate and initialize 189 * a mount structure for it. 190 * 191 * Devname is usually updated by mount(8) after booting. 192 */ 193 int 194 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 195 { 196 struct vfsconf *vfsp; 197 struct mount *mp; 198 199 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 200 if (!strcmp(vfsp->vfc_name, fstypename)) 201 break; 202 if (vfsp == NULL) 203 return (ENODEV); 204 mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK|M_ZERO); 205 (void)vfs_busy(mp, VB_READ|VB_NOWAIT); 206 LIST_INIT(&mp->mnt_vnodelist); 207 mp->mnt_vfc = vfsp; 208 mp->mnt_op = vfsp->vfc_vfsops; 209 mp->mnt_flag = MNT_RDONLY; 210 mp->mnt_vnodecovered = NULLVP; 211 vfsp->vfc_refcount++; 212 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 213 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 214 mp->mnt_stat.f_mntonname[0] = '/'; 215 (void)copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 216 *mpp = mp; 217 return (0); 218 } 219 220 /* 221 * Lookup a mount point by filesystem identifier. 222 */ 223 struct mount * 224 vfs_getvfs(fsid_t *fsid) 225 { 226 struct mount *mp; 227 228 CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) { 229 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 230 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 231 return (mp); 232 } 233 } 234 235 return (NULL); 236 } 237 238 239 /* 240 * Get a new unique fsid 241 */ 242 void 243 vfs_getnewfsid(struct mount *mp) 244 { 245 static u_short xxxfs_mntid; 246 247 fsid_t tfsid; 248 int mtype; 249 250 mtype = mp->mnt_vfc->vfc_typenum; 251 mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0); 252 mp->mnt_stat.f_fsid.val[1] = mtype; 253 if (xxxfs_mntid == 0) 254 ++xxxfs_mntid; 255 tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid); 256 tfsid.val[1] = mtype; 257 if (!CIRCLEQ_EMPTY(&mountlist)) { 258 while (vfs_getvfs(&tfsid)) { 259 tfsid.val[0]++; 260 xxxfs_mntid++; 261 } 262 } 263 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 264 } 265 266 /* 267 * Make a 'unique' number from a mount type name. 268 * Note that this is no longer used for ffs which 269 * now has an on-disk filesystem id. 270 */ 271 long 272 makefstype(char *type) 273 { 274 long rv; 275 276 for (rv = 0; *type; type++) { 277 rv <<= 2; 278 rv ^= *type; 279 } 280 return rv; 281 } 282 283 /* 284 * Set vnode attributes to VNOVAL 285 */ 286 void 287 vattr_null(struct vattr *vap) 288 { 289 290 vap->va_type = VNON; 291 /* XXX These next two used to be one line, but for a GCC bug. */ 292 vap->va_size = VNOVAL; 293 vap->va_bytes = VNOVAL; 294 vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid = 295 vap->va_fsid = vap->va_fileid = 296 vap->va_blocksize = vap->va_rdev = 297 vap->va_atime.tv_sec = vap->va_atime.tv_nsec = 298 vap->va_mtime.tv_sec = vap->va_mtime.tv_nsec = 299 vap->va_ctime.tv_sec = vap->va_ctime.tv_nsec = 300 vap->va_flags = vap->va_gen = VNOVAL; 301 vap->va_vaflags = 0; 302 } 303 304 /* 305 * Routines having to do with the management of the vnode table. 306 */ 307 extern int (**dead_vnodeop_p)(void *); 308 long numvnodes; 309 310 /* 311 * Return the next vnode from the free list. 312 */ 313 int 314 getnewvnode(enum vtagtype tag, struct mount *mp, int (**vops)(void *), 315 struct vnode **vpp) 316 { 317 struct proc *p = curproc; 318 struct freelst *listhd; 319 static int toggle; 320 struct vnode *vp; 321 int s; 322 323 /* 324 * We must choose whether to allocate a new vnode or recycle an 325 * existing one. The criterion for allocating a new one is that 326 * the total number of vnodes is less than the number desired or 327 * there are no vnodes on either free list. Generally we only 328 * want to recycle vnodes that have no buffers associated with 329 * them, so we look first on the vnode_free_list. If it is empty, 330 * we next consider vnodes with referencing buffers on the 331 * vnode_hold_list. The toggle ensures that half the time we 332 * will use a buffer from the vnode_hold_list, and half the time 333 * we will allocate a new one unless the list has grown to twice 334 * the desired size. We are reticent to recycle vnodes from the 335 * vnode_hold_list because we will lose the identity of all its 336 * referencing buffers. 337 */ 338 toggle ^= 1; 339 if (numvnodes > 2 * maxvnodes) 340 toggle = 0; 341 342 s = splbio(); 343 if ((numvnodes < maxvnodes) || 344 ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) && 345 ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) { 346 splx(s); 347 vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO); 348 numvnodes++; 349 } else { 350 for (vp = TAILQ_FIRST(listhd); vp != NULLVP; 351 vp = TAILQ_NEXT(vp, v_freelist)) { 352 if (VOP_ISLOCKED(vp) == 0) 353 break; 354 } 355 /* 356 * Unless this is a bad time of the month, at most 357 * the first NCPUS items on the free list are 358 * locked, so this is close enough to being empty. 359 */ 360 if (vp == NULL) { 361 splx(s); 362 tablefull("vnode"); 363 *vpp = 0; 364 return (ENFILE); 365 } 366 367 #ifdef DIAGNOSTIC 368 if (vp->v_usecount) { 369 vprint("free vnode", vp); 370 panic("free vnode isn't"); 371 } 372 #endif 373 374 TAILQ_REMOVE(listhd, vp, v_freelist); 375 vp->v_bioflag &= ~VBIOONFREELIST; 376 splx(s); 377 378 if (vp->v_type != VBAD) 379 vgonel(vp, p); 380 #ifdef DIAGNOSTIC 381 if (vp->v_data) { 382 vprint("cleaned vnode", vp); 383 panic("cleaned vnode isn't"); 384 } 385 s = splbio(); 386 if (vp->v_numoutput) 387 panic("Clean vnode has pending I/O's"); 388 splx(s); 389 #endif 390 vp->v_flag = 0; 391 vp->v_socket = 0; 392 } 393 vp->v_type = VNON; 394 cache_purge(vp); 395 vp->v_tag = tag; 396 vp->v_op = vops; 397 insmntque(vp, mp); 398 *vpp = vp; 399 vp->v_usecount = 1; 400 vp->v_data = 0; 401 simple_lock_init(&vp->v_uvm.u_obj.vmobjlock); 402 return (0); 403 } 404 405 /* 406 * Move a vnode from one mount queue to another. 407 */ 408 void 409 insmntque(struct vnode *vp, struct mount *mp) 410 { 411 /* 412 * Delete from old mount point vnode list, if on one. 413 */ 414 if (vp->v_mount != NULL) 415 LIST_REMOVE(vp, v_mntvnodes); 416 /* 417 * Insert into list of vnodes for the new mount point, if available. 418 */ 419 if ((vp->v_mount = mp) != NULL) 420 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); 421 } 422 423 /* 424 * Create a vnode for a block device. 425 * Used for root filesystem, argdev, and swap areas. 426 * Also used for memory file system special devices. 427 */ 428 int 429 bdevvp(dev_t dev, struct vnode **vpp) 430 { 431 return (getdevvp(dev, vpp, VBLK)); 432 } 433 434 /* 435 * Create a vnode for a character device. 436 * Used for console handling. 437 */ 438 int 439 cdevvp(dev_t dev, struct vnode **vpp) 440 { 441 return (getdevvp(dev, vpp, VCHR)); 442 } 443 444 /* 445 * Create a vnode for a device. 446 * Used by bdevvp (block device) for root file system etc., 447 * and by cdevvp (character device) for console. 448 */ 449 int 450 getdevvp(dev_t dev, struct vnode **vpp, enum vtype type) 451 { 452 struct vnode *vp; 453 struct vnode *nvp; 454 int error; 455 456 if (dev == NODEV) { 457 *vpp = NULLVP; 458 return (0); 459 } 460 error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp); 461 if (error) { 462 *vpp = NULLVP; 463 return (error); 464 } 465 vp = nvp; 466 vp->v_type = type; 467 if ((nvp = checkalias(vp, dev, NULL)) != 0) { 468 vput(vp); 469 vp = nvp; 470 } 471 *vpp = vp; 472 return (0); 473 } 474 475 /* 476 * Check to see if the new vnode represents a special device 477 * for which we already have a vnode (either because of 478 * bdevvp() or because of a different vnode representing 479 * the same block device). If such an alias exists, deallocate 480 * the existing contents and return the aliased vnode. The 481 * caller is responsible for filling it with its new contents. 482 */ 483 struct vnode * 484 checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp) 485 { 486 struct proc *p = curproc; 487 struct vnode *vp; 488 struct vnode **vpp; 489 490 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 491 return (NULLVP); 492 493 vpp = &speclisth[SPECHASH(nvp_rdev)]; 494 loop: 495 for (vp = *vpp; vp; vp = vp->v_specnext) { 496 if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) { 497 continue; 498 } 499 /* 500 * Alias, but not in use, so flush it out. 501 */ 502 if (vp->v_usecount == 0) { 503 vgonel(vp, p); 504 goto loop; 505 } 506 if (vget(vp, LK_EXCLUSIVE, p)) { 507 goto loop; 508 } 509 break; 510 } 511 512 /* 513 * Common case is actually in the if statement 514 */ 515 if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) { 516 nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE, 517 M_WAITOK); 518 nvp->v_rdev = nvp_rdev; 519 nvp->v_hashchain = vpp; 520 nvp->v_specnext = *vpp; 521 nvp->v_specmountpoint = NULL; 522 nvp->v_speclockf = NULL; 523 bzero(nvp->v_specbitmap, sizeof(nvp->v_specbitmap)); 524 *vpp = nvp; 525 if (vp != NULLVP) { 526 nvp->v_flag |= VALIASED; 527 vp->v_flag |= VALIASED; 528 vput(vp); 529 } 530 return (NULLVP); 531 } 532 533 /* 534 * This code is the uncommon case. It is called in case 535 * we found an alias that was VT_NON && vtype of VBLK 536 * This means we found a block device that was created 537 * using bdevvp. 538 * An example of such a vnode is the root partition device vnode 539 * created in ffs_mountroot. 540 * 541 * The vnodes created by bdevvp should not be aliased (why?). 542 */ 543 544 VOP_UNLOCK(vp, 0, p); 545 vclean(vp, 0, p); 546 vp->v_op = nvp->v_op; 547 vp->v_tag = nvp->v_tag; 548 nvp->v_type = VNON; 549 insmntque(vp, mp); 550 return (vp); 551 } 552 553 /* 554 * Grab a particular vnode from the free list, increment its 555 * reference count and lock it. If the vnode lock bit is set, 556 * the vnode is being eliminated in vgone. In that case, we 557 * cannot grab it, so the process is awakened when the 558 * transition is completed, and an error code is returned to 559 * indicate that the vnode is no longer usable, possibly 560 * having been changed to a new file system type. 561 */ 562 int 563 vget(struct vnode *vp, int flags, struct proc *p) 564 { 565 int error, s, onfreelist; 566 567 /* 568 * If the vnode is in the process of being cleaned out for 569 * another use, we wait for the cleaning to finish and then 570 * return failure. Cleaning is determined by checking that 571 * the VXLOCK flag is set. 572 */ 573 574 if (vp->v_flag & VXLOCK) { 575 if (flags & LK_NOWAIT) { 576 return (EBUSY); 577 } 578 579 vp->v_flag |= VXWANT; 580 tsleep(vp, PINOD, "vget", 0); 581 return (ENOENT); 582 } 583 584 onfreelist = vp->v_bioflag & VBIOONFREELIST; 585 if (vp->v_usecount == 0 && onfreelist) { 586 s = splbio(); 587 if (vp->v_holdcnt > 0) 588 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 589 else 590 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 591 vp->v_bioflag &= ~VBIOONFREELIST; 592 splx(s); 593 } 594 595 vp->v_usecount++; 596 if (flags & LK_TYPE_MASK) { 597 if ((error = vn_lock(vp, flags, p)) != 0) { 598 vp->v_usecount--; 599 if (vp->v_usecount == 0 && onfreelist) 600 vputonfreelist(vp); 601 } 602 return (error); 603 } 604 605 return (0); 606 } 607 608 609 /* Vnode reference. */ 610 void 611 vref(struct vnode *vp) 612 { 613 #ifdef DIAGNOSTIC 614 if (vp->v_usecount == 0) 615 panic("vref used where vget required"); 616 #endif 617 vp->v_usecount++; 618 } 619 620 void 621 vputonfreelist(struct vnode *vp) 622 { 623 int s; 624 struct freelst *lst; 625 626 s = splbio(); 627 #ifdef DIAGNOSTIC 628 if (vp->v_usecount != 0) 629 panic("Use count is not zero!"); 630 631 if (vp->v_bioflag & VBIOONFREELIST) { 632 vprint("vnode already on free list: ", vp); 633 panic("vnode already on free list"); 634 } 635 #endif 636 637 vp->v_bioflag |= VBIOONFREELIST; 638 639 if (vp->v_holdcnt > 0) 640 lst = &vnode_hold_list; 641 else 642 lst = &vnode_free_list; 643 644 if (vp->v_type == VBAD) 645 TAILQ_INSERT_HEAD(lst, vp, v_freelist); 646 else 647 TAILQ_INSERT_TAIL(lst, vp, v_freelist); 648 649 splx(s); 650 } 651 652 /* 653 * vput(), just unlock and vrele() 654 */ 655 void 656 vput(struct vnode *vp) 657 { 658 struct proc *p = curproc; 659 660 #ifdef DIAGNOSTIC 661 if (vp == NULL) 662 panic("vput: null vp"); 663 #endif 664 665 #ifdef DIAGNOSTIC 666 if (vp->v_usecount == 0) { 667 vprint("vput: bad ref count", vp); 668 panic("vput: ref cnt"); 669 } 670 #endif 671 vp->v_usecount--; 672 if (vp->v_usecount > 0) { 673 VOP_UNLOCK(vp, 0, p); 674 return; 675 } 676 677 #ifdef DIAGNOSTIC 678 if (vp->v_writecount != 0) { 679 vprint("vput: bad writecount", vp); 680 panic("vput: v_writecount != 0"); 681 } 682 #endif 683 684 VOP_INACTIVE(vp, p); 685 686 if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST)) 687 vputonfreelist(vp); 688 } 689 690 /* 691 * Vnode release - use for active VNODES. 692 * If count drops to zero, call inactive routine and return to freelist. 693 */ 694 void 695 vrele(struct vnode *vp) 696 { 697 struct proc *p = curproc; 698 699 #ifdef DIAGNOSTIC 700 if (vp == NULL) 701 panic("vrele: null vp"); 702 #endif 703 #ifdef DIAGNOSTIC 704 if (vp->v_usecount == 0) { 705 vprint("vrele: bad ref count", vp); 706 panic("vrele: ref cnt"); 707 } 708 #endif 709 vp->v_usecount--; 710 if (vp->v_usecount > 0) { 711 return; 712 } 713 714 #ifdef DIAGNOSTIC 715 if (vp->v_writecount != 0) { 716 vprint("vrele: bad writecount", vp); 717 panic("vrele: v_writecount != 0"); 718 } 719 #endif 720 721 if (vn_lock(vp, LK_EXCLUSIVE, p)) { 722 #ifdef DIAGNOSTIC 723 vprint("vrele: cannot lock", vp); 724 #endif 725 return; 726 } 727 728 VOP_INACTIVE(vp, p); 729 730 if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST)) 731 vputonfreelist(vp); 732 } 733 734 /* Page or buffer structure gets a reference. */ 735 void 736 vhold(struct vnode *vp) 737 { 738 /* 739 * If it is on the freelist and the hold count is currently 740 * zero, move it to the hold list. 741 */ 742 if ((vp->v_bioflag & VBIOONFREELIST) && 743 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 744 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 745 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 746 } 747 vp->v_holdcnt++; 748 } 749 750 /* Lose interest in a vnode. */ 751 void 752 vdrop(struct vnode *vp) 753 { 754 #ifdef DIAGNOSTIC 755 if (vp->v_holdcnt == 0) 756 panic("vdrop: zero holdcnt"); 757 #endif 758 759 vp->v_holdcnt--; 760 761 /* 762 * If it is on the holdlist and the hold count drops to 763 * zero, move it to the free list. 764 */ 765 if ((vp->v_bioflag & VBIOONFREELIST) && 766 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 767 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 768 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 769 } 770 } 771 772 /* 773 * Remove any vnodes in the vnode table belonging to mount point mp. 774 * 775 * If MNT_NOFORCE is specified, there should not be any active ones, 776 * return error if any are found (nb: this is a user error, not a 777 * system error). If MNT_FORCE is specified, detach any active vnodes 778 * that are found. 779 */ 780 #ifdef DEBUG 781 int busyprt = 0; /* print out busy vnodes */ 782 struct ctldebug debug1 = { "busyprt", &busyprt }; 783 #endif 784 785 int 786 vfs_mount_foreach_vnode(struct mount *mp, 787 int (*func)(struct vnode *, void *), void *arg) { 788 struct vnode *vp, *nvp; 789 int error = 0; 790 791 loop: 792 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { 793 if (vp->v_mount != mp) 794 goto loop; 795 nvp = LIST_NEXT(vp, v_mntvnodes); 796 797 error = func(vp, arg); 798 799 if (error != 0) 800 break; 801 } 802 803 return (error); 804 } 805 806 struct vflush_args { 807 struct vnode *skipvp; 808 int busy; 809 int flags; 810 }; 811 812 int 813 vflush_vnode(struct vnode *vp, void *arg) { 814 struct vflush_args *va = arg; 815 struct proc *p = curproc; 816 817 if (vp == va->skipvp) { 818 return (0); 819 } 820 821 if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 822 return (0); 823 } 824 825 /* 826 * If WRITECLOSE is set, only flush out regular file 827 * vnodes open for writing. 828 */ 829 if ((va->flags & WRITECLOSE) && 830 (vp->v_writecount == 0 || vp->v_type != VREG)) { 831 return (0); 832 } 833 834 /* 835 * With v_usecount == 0, all we need to do is clear 836 * out the vnode data structures and we are done. 837 */ 838 if (vp->v_usecount == 0) { 839 vgonel(vp, p); 840 return (0); 841 } 842 843 /* 844 * If FORCECLOSE is set, forcibly close the vnode. 845 * For block or character devices, revert to an 846 * anonymous device. For all other files, just kill them. 847 */ 848 if (va->flags & FORCECLOSE) { 849 if (vp->v_type != VBLK && vp->v_type != VCHR) { 850 vgonel(vp, p); 851 } else { 852 vclean(vp, 0, p); 853 vp->v_op = spec_vnodeop_p; 854 insmntque(vp, (struct mount *)0); 855 } 856 return (0); 857 } 858 859 #ifdef DEBUG 860 if (busyprt) 861 vprint("vflush: busy vnode", vp); 862 #endif 863 va->busy++; 864 return (0); 865 } 866 867 int 868 vflush(struct mount *mp, struct vnode *skipvp, int flags) 869 { 870 struct vflush_args va; 871 va.skipvp = skipvp; 872 va.busy = 0; 873 va.flags = flags; 874 875 vfs_mount_foreach_vnode(mp, vflush_vnode, &va); 876 877 if (va.busy) 878 return (EBUSY); 879 return (0); 880 } 881 882 /* 883 * Disassociate the underlying file system from a vnode. 884 */ 885 void 886 vclean(struct vnode *vp, int flags, struct proc *p) 887 { 888 int active; 889 890 /* 891 * Check to see if the vnode is in use. 892 * If so we have to reference it before we clean it out 893 * so that its count cannot fall to zero and generate a 894 * race against ourselves to recycle it. 895 */ 896 if ((active = vp->v_usecount) != 0) 897 vp->v_usecount++; 898 899 /* 900 * Prevent the vnode from being recycled or 901 * brought into use while we clean it out. 902 */ 903 if (vp->v_flag & VXLOCK) 904 panic("vclean: deadlock"); 905 vp->v_flag |= VXLOCK; 906 /* 907 * Even if the count is zero, the VOP_INACTIVE routine may still 908 * have the object locked while it cleans it out. The VOP_LOCK 909 * ensures that the VOP_INACTIVE routine is done with its work. 910 * For active vnodes, it ensures that no other activity can 911 * occur while the underlying object is being cleaned out. 912 */ 913 VOP_LOCK(vp, LK_DRAIN, p); 914 915 /* 916 * Clean out any VM data associated with the vnode. 917 */ 918 uvm_vnp_terminate(vp); 919 /* 920 * Clean out any buffers associated with the vnode. 921 */ 922 if (flags & DOCLOSE) 923 vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0); 924 /* 925 * If purging an active vnode, it must be closed and 926 * deactivated before being reclaimed. Note that the 927 * VOP_INACTIVE will unlock the vnode 928 */ 929 if (active) { 930 if (flags & DOCLOSE) 931 VOP_CLOSE(vp, FNONBLOCK, NOCRED, p); 932 VOP_INACTIVE(vp, p); 933 } else { 934 /* 935 * Any other processes trying to obtain this lock must first 936 * wait for VXLOCK to clear, then call the new lock operation. 937 */ 938 VOP_UNLOCK(vp, 0, p); 939 } 940 941 /* 942 * Reclaim the vnode. 943 */ 944 if (VOP_RECLAIM(vp, p)) 945 panic("vclean: cannot reclaim"); 946 if (active) { 947 vp->v_usecount--; 948 if (vp->v_usecount == 0) { 949 if (vp->v_holdcnt > 0) 950 panic("vclean: not clean"); 951 vputonfreelist(vp); 952 } 953 } 954 cache_purge(vp); 955 956 /* 957 * Done with purge, notify sleepers of the grim news. 958 */ 959 vp->v_op = dead_vnodeop_p; 960 VN_KNOTE(vp, NOTE_REVOKE); 961 vp->v_tag = VT_NON; 962 vp->v_flag &= ~VXLOCK; 963 #ifdef VFSDEBUG 964 vp->v_flag &= ~VLOCKSWORK; 965 #endif 966 if (vp->v_flag & VXWANT) { 967 vp->v_flag &= ~VXWANT; 968 wakeup(vp); 969 } 970 } 971 972 /* 973 * Recycle an unused vnode to the front of the free list. 974 */ 975 int 976 vrecycle(struct vnode *vp, struct proc *p) 977 { 978 if (vp->v_usecount == 0) { 979 vgonel(vp, p); 980 return (1); 981 } 982 return (0); 983 } 984 985 /* 986 * Eliminate all activity associated with a vnode 987 * in preparation for reuse. 988 */ 989 void 990 vgone(struct vnode *vp) 991 { 992 struct proc *p = curproc; 993 vgonel(vp, p); 994 } 995 996 /* 997 * vgone, with struct proc. 998 */ 999 void 1000 vgonel(struct vnode *vp, struct proc *p) 1001 { 1002 struct vnode *vq; 1003 struct vnode *vx; 1004 1005 /* 1006 * If a vgone (or vclean) is already in progress, 1007 * wait until it is done and return. 1008 */ 1009 if (vp->v_flag & VXLOCK) { 1010 vp->v_flag |= VXWANT; 1011 tsleep(vp, PINOD, "vgone", 0); 1012 return; 1013 } 1014 1015 /* 1016 * Clean out the filesystem specific data. 1017 */ 1018 vclean(vp, DOCLOSE, p); 1019 /* 1020 * Delete from old mount point vnode list, if on one. 1021 */ 1022 if (vp->v_mount != NULL) 1023 insmntque(vp, (struct mount *)0); 1024 /* 1025 * If special device, remove it from special device alias list 1026 * if it is on one. 1027 */ 1028 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) { 1029 if (*vp->v_hashchain == vp) { 1030 *vp->v_hashchain = vp->v_specnext; 1031 } else { 1032 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1033 if (vq->v_specnext != vp) 1034 continue; 1035 vq->v_specnext = vp->v_specnext; 1036 break; 1037 } 1038 if (vq == NULL) 1039 panic("missing bdev"); 1040 } 1041 if (vp->v_flag & VALIASED) { 1042 vx = NULL; 1043 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1044 if (vq->v_rdev != vp->v_rdev || 1045 vq->v_type != vp->v_type) 1046 continue; 1047 if (vx) 1048 break; 1049 vx = vq; 1050 } 1051 if (vx == NULL) 1052 panic("missing alias"); 1053 if (vq == NULL) 1054 vx->v_flag &= ~VALIASED; 1055 vp->v_flag &= ~VALIASED; 1056 } 1057 free(vp->v_specinfo, M_VNODE); 1058 vp->v_specinfo = NULL; 1059 } 1060 /* 1061 * If it is on the freelist and not already at the head, 1062 * move it to the head of the list. 1063 */ 1064 vp->v_type = VBAD; 1065 1066 /* 1067 * Move onto the free list, unless we were called from 1068 * getnewvnode and we're not on any free list 1069 */ 1070 if (vp->v_usecount == 0 && 1071 (vp->v_bioflag & VBIOONFREELIST)) { 1072 int s; 1073 1074 s = splbio(); 1075 1076 if (vp->v_holdcnt > 0) 1077 panic("vgonel: not clean"); 1078 1079 if (TAILQ_FIRST(&vnode_free_list) != vp) { 1080 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1081 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 1082 } 1083 splx(s); 1084 } 1085 } 1086 1087 /* 1088 * Lookup a vnode by device number. 1089 */ 1090 int 1091 vfinddev(dev_t dev, enum vtype type, struct vnode **vpp) 1092 { 1093 struct vnode *vp; 1094 int rc =0; 1095 1096 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) { 1097 if (dev != vp->v_rdev || type != vp->v_type) 1098 continue; 1099 *vpp = vp; 1100 rc = 1; 1101 break; 1102 } 1103 return (rc); 1104 } 1105 1106 /* 1107 * Revoke all the vnodes corresponding to the specified minor number 1108 * range (endpoints inclusive) of the specified major. 1109 */ 1110 void 1111 vdevgone(int maj, int minl, int minh, enum vtype type) 1112 { 1113 struct vnode *vp; 1114 int mn; 1115 1116 for (mn = minl; mn <= minh; mn++) 1117 if (vfinddev(makedev(maj, mn), type, &vp)) 1118 VOP_REVOKE(vp, REVOKEALL); 1119 } 1120 1121 /* 1122 * Calculate the total number of references to a special device. 1123 */ 1124 int 1125 vcount(struct vnode *vp) 1126 { 1127 struct vnode *vq, *vnext; 1128 int count; 1129 1130 loop: 1131 if ((vp->v_flag & VALIASED) == 0) 1132 return (vp->v_usecount); 1133 for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) { 1134 vnext = vq->v_specnext; 1135 if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type) 1136 continue; 1137 /* 1138 * Alias, but not in use, so flush it out. 1139 */ 1140 if (vq->v_usecount == 0 && vq != vp) { 1141 vgone(vq); 1142 goto loop; 1143 } 1144 count += vq->v_usecount; 1145 } 1146 return (count); 1147 } 1148 1149 #if defined(DEBUG) || defined(DIAGNOSTIC) 1150 /* 1151 * Print out a description of a vnode. 1152 */ 1153 static char *typename[] = 1154 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" }; 1155 1156 void 1157 vprint(char *label, struct vnode *vp) 1158 { 1159 char buf[64]; 1160 1161 if (label != NULL) 1162 printf("%s: ", label); 1163 printf("%p, type %s, use %u, write %u, hold %u,", 1164 vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount, 1165 vp->v_holdcnt); 1166 buf[0] = '\0'; 1167 if (vp->v_flag & VROOT) 1168 strlcat(buf, "|VROOT", sizeof buf); 1169 if (vp->v_flag & VTEXT) 1170 strlcat(buf, "|VTEXT", sizeof buf); 1171 if (vp->v_flag & VSYSTEM) 1172 strlcat(buf, "|VSYSTEM", sizeof buf); 1173 if (vp->v_flag & VXLOCK) 1174 strlcat(buf, "|VXLOCK", sizeof buf); 1175 if (vp->v_flag & VXWANT) 1176 strlcat(buf, "|VXWANT", sizeof buf); 1177 if (vp->v_bioflag & VBIOWAIT) 1178 strlcat(buf, "|VBIOWAIT", sizeof buf); 1179 if (vp->v_bioflag & VBIOONFREELIST) 1180 strlcat(buf, "|VBIOONFREELIST", sizeof buf); 1181 if (vp->v_bioflag & VBIOONSYNCLIST) 1182 strlcat(buf, "|VBIOONSYNCLIST", sizeof buf); 1183 if (vp->v_flag & VALIASED) 1184 strlcat(buf, "|VALIASED", sizeof buf); 1185 if (buf[0] != '\0') 1186 printf(" flags (%s)", &buf[1]); 1187 if (vp->v_data == NULL) { 1188 printf("\n"); 1189 } else { 1190 printf("\n\t"); 1191 VOP_PRINT(vp); 1192 } 1193 } 1194 #endif /* DEBUG || DIAGNOSTIC */ 1195 1196 #ifdef DEBUG 1197 /* 1198 * List all of the locked vnodes in the system. 1199 * Called when debugging the kernel. 1200 */ 1201 void 1202 printlockedvnodes(void) 1203 { 1204 struct mount *mp, *nmp; 1205 struct vnode *vp; 1206 1207 printf("Locked vnodes\n"); 1208 1209 for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist); 1210 mp = nmp) { 1211 if (vfs_busy(mp, VB_READ|VB_NOWAIT)) { 1212 nmp = CIRCLEQ_NEXT(mp, mnt_list); 1213 continue; 1214 } 1215 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { 1216 if (VOP_ISLOCKED(vp)) 1217 vprint((char *)0, vp); 1218 } 1219 nmp = CIRCLEQ_NEXT(mp, mnt_list); 1220 vfs_unbusy(mp); 1221 } 1222 1223 } 1224 #endif 1225 1226 /* 1227 * Top level filesystem related information gathering. 1228 */ 1229 int 1230 vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1231 size_t newlen, struct proc *p) 1232 { 1233 struct vfsconf *vfsp, *tmpvfsp; 1234 int ret; 1235 1236 /* all sysctl names at this level are at least name and field */ 1237 if (namelen < 2) 1238 return (ENOTDIR); /* overloaded */ 1239 1240 if (name[0] != VFS_GENERIC) { 1241 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 1242 if (vfsp->vfc_typenum == name[0]) 1243 break; 1244 1245 if (vfsp == NULL) 1246 return (EOPNOTSUPP); 1247 1248 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 1249 oldp, oldlenp, newp, newlen, p)); 1250 } 1251 1252 switch (name[1]) { 1253 case VFS_MAXTYPENUM: 1254 return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf)); 1255 1256 case VFS_CONF: 1257 if (namelen < 3) 1258 return (ENOTDIR); /* overloaded */ 1259 1260 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 1261 if (vfsp->vfc_typenum == name[2]) 1262 break; 1263 1264 if (vfsp == NULL) 1265 return (EOPNOTSUPP); 1266 1267 /* Make a copy, clear out kernel pointers */ 1268 tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK); 1269 bcopy(vfsp, tmpvfsp, sizeof(*tmpvfsp)); 1270 tmpvfsp->vfc_vfsops = NULL; 1271 tmpvfsp->vfc_next = NULL; 1272 1273 ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp, 1274 sizeof(struct vfsconf)); 1275 1276 free(tmpvfsp, M_TEMP); 1277 return (ret); 1278 case VFS_BCACHESTAT: /* buffer cache statistics */ 1279 ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats, 1280 sizeof(struct bcachestats)); 1281 return(ret); 1282 } 1283 return (EOPNOTSUPP); 1284 } 1285 1286 int kinfo_vdebug = 1; 1287 #define KINFO_VNODESLOP 10 1288 /* 1289 * Dump vnode list (via sysctl). 1290 * Copyout address of vnode followed by vnode. 1291 */ 1292 /* ARGSUSED */ 1293 int 1294 sysctl_vnode(char *where, size_t *sizep, struct proc *p) 1295 { 1296 struct mount *mp, *nmp; 1297 struct vnode *vp, *nvp; 1298 char *bp = where, *savebp; 1299 char *ewhere; 1300 int error; 1301 1302 if (where == NULL) { 1303 *sizep = (numvnodes + KINFO_VNODESLOP) * sizeof(struct e_vnode); 1304 return (0); 1305 } 1306 ewhere = where + *sizep; 1307 1308 for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist); 1309 mp = nmp) { 1310 if (vfs_busy(mp, VB_READ|VB_NOWAIT)) { 1311 nmp = CIRCLEQ_NEXT(mp, mnt_list); 1312 continue; 1313 } 1314 savebp = bp; 1315 again: 1316 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; 1317 vp = nvp) { 1318 /* 1319 * Check that the vp is still associated with 1320 * this filesystem. RACE: could have been 1321 * recycled onto the same filesystem. 1322 */ 1323 if (vp->v_mount != mp) { 1324 if (kinfo_vdebug) 1325 printf("kinfo: vp changed\n"); 1326 bp = savebp; 1327 goto again; 1328 } 1329 nvp = LIST_NEXT(vp, v_mntvnodes); 1330 if (bp + sizeof(struct e_vnode) > ewhere) { 1331 *sizep = bp - where; 1332 vfs_unbusy(mp); 1333 return (ENOMEM); 1334 } 1335 if ((error = copyout(&vp, 1336 &((struct e_vnode *)bp)->vptr, 1337 sizeof(struct vnode *))) || 1338 (error = copyout(vp, 1339 &((struct e_vnode *)bp)->vnode, 1340 sizeof(struct vnode)))) { 1341 vfs_unbusy(mp); 1342 return (error); 1343 } 1344 bp += sizeof(struct e_vnode); 1345 } 1346 1347 nmp = CIRCLEQ_NEXT(mp, mnt_list); 1348 vfs_unbusy(mp); 1349 } 1350 1351 *sizep = bp - where; 1352 1353 return (0); 1354 } 1355 1356 /* 1357 * Check to see if a filesystem is mounted on a block device. 1358 */ 1359 int 1360 vfs_mountedon(struct vnode *vp) 1361 { 1362 struct vnode *vq; 1363 int error = 0; 1364 1365 if (vp->v_specmountpoint != NULL) 1366 return (EBUSY); 1367 if (vp->v_flag & VALIASED) { 1368 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1369 if (vq->v_rdev != vp->v_rdev || 1370 vq->v_type != vp->v_type) 1371 continue; 1372 if (vq->v_specmountpoint != NULL) { 1373 error = EBUSY; 1374 break; 1375 } 1376 } 1377 } 1378 return (error); 1379 } 1380 1381 /* 1382 * Build hash lists of net addresses and hang them off the mount point. 1383 * Called by ufs_mount() to set up the lists of export addresses. 1384 */ 1385 int 1386 vfs_hang_addrlist(struct mount *mp, struct netexport *nep, 1387 struct export_args *argp) 1388 { 1389 struct netcred *np; 1390 struct radix_node_head *rnh; 1391 int i; 1392 struct radix_node *rn; 1393 struct sockaddr *saddr, *smask = 0; 1394 struct domain *dom; 1395 int error; 1396 1397 if (argp->ex_addrlen == 0) { 1398 if (mp->mnt_flag & MNT_DEFEXPORTED) 1399 return (EPERM); 1400 np = &nep->ne_defexported; 1401 np->netc_exflags = argp->ex_flags; 1402 np->netc_anon = argp->ex_anon; 1403 np->netc_anon.cr_ref = 1; 1404 mp->mnt_flag |= MNT_DEFEXPORTED; 1405 return (0); 1406 } 1407 if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN || 1408 argp->ex_addrlen < 0 || argp->ex_masklen < 0) 1409 return (EINVAL); 1410 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 1411 np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK|M_ZERO); 1412 saddr = (struct sockaddr *)(np + 1); 1413 error = copyin(argp->ex_addr, saddr, argp->ex_addrlen); 1414 if (error) 1415 goto out; 1416 if (saddr->sa_len > argp->ex_addrlen) 1417 saddr->sa_len = argp->ex_addrlen; 1418 if (argp->ex_masklen) { 1419 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 1420 error = copyin(argp->ex_mask, smask, argp->ex_masklen); 1421 if (error) 1422 goto out; 1423 if (smask->sa_len > argp->ex_masklen) 1424 smask->sa_len = argp->ex_masklen; 1425 } 1426 i = saddr->sa_family; 1427 if (i < 0 || i > AF_MAX) { 1428 error = EINVAL; 1429 goto out; 1430 } 1431 if ((rnh = nep->ne_rtable[i]) == 0) { 1432 /* 1433 * Seems silly to initialize every AF when most are not 1434 * used, do so on demand here 1435 */ 1436 for (dom = domains; dom; dom = dom->dom_next) 1437 if (dom->dom_family == i && dom->dom_rtattach) { 1438 dom->dom_rtattach((void **)&nep->ne_rtable[i], 1439 dom->dom_rtoffset); 1440 break; 1441 } 1442 if ((rnh = nep->ne_rtable[i]) == 0) { 1443 error = ENOBUFS; 1444 goto out; 1445 } 1446 } 1447 rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh, 1448 np->netc_rnodes, 0); 1449 if (rn == 0 || np != (struct netcred *)rn) { /* already exists */ 1450 error = EPERM; 1451 goto out; 1452 } 1453 np->netc_exflags = argp->ex_flags; 1454 np->netc_anon = argp->ex_anon; 1455 np->netc_anon.cr_ref = 1; 1456 return (0); 1457 out: 1458 free(np, M_NETADDR); 1459 return (error); 1460 } 1461 1462 /* ARGSUSED */ 1463 int 1464 vfs_free_netcred(struct radix_node *rn, void *w) 1465 { 1466 struct radix_node_head *rnh = (struct radix_node_head *)w; 1467 1468 (*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh, NULL); 1469 free(rn, M_NETADDR); 1470 return (0); 1471 } 1472 1473 /* 1474 * Free the net address hash lists that are hanging off the mount points. 1475 */ 1476 void 1477 vfs_free_addrlist(struct netexport *nep) 1478 { 1479 int i; 1480 struct radix_node_head *rnh; 1481 1482 for (i = 0; i <= AF_MAX; i++) 1483 if ((rnh = nep->ne_rtable[i]) != NULL) { 1484 (*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh); 1485 free(rnh, M_RTABLE); 1486 nep->ne_rtable[i] = 0; 1487 } 1488 } 1489 1490 int 1491 vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp) 1492 { 1493 int error; 1494 1495 if (argp->ex_flags & MNT_DELEXPORT) { 1496 vfs_free_addrlist(nep); 1497 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 1498 } 1499 if (argp->ex_flags & MNT_EXPORTED) { 1500 if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0) 1501 return (error); 1502 mp->mnt_flag |= MNT_EXPORTED; 1503 } 1504 return (0); 1505 } 1506 1507 struct netcred * 1508 vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam) 1509 { 1510 struct netcred *np; 1511 struct radix_node_head *rnh; 1512 struct sockaddr *saddr; 1513 1514 np = NULL; 1515 if (mp->mnt_flag & MNT_EXPORTED) { 1516 /* 1517 * Lookup in the export list first. 1518 */ 1519 if (nam != NULL) { 1520 saddr = mtod(nam, struct sockaddr *); 1521 rnh = nep->ne_rtable[saddr->sa_family]; 1522 if (rnh != NULL) { 1523 np = (struct netcred *) 1524 (*rnh->rnh_matchaddr)((caddr_t)saddr, 1525 rnh); 1526 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 1527 np = NULL; 1528 } 1529 } 1530 /* 1531 * If no address match, use the default if it exists. 1532 */ 1533 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 1534 np = &nep->ne_defexported; 1535 } 1536 return (np); 1537 } 1538 1539 /* 1540 * Do the usual access checking. 1541 * file_mode, uid and gid are from the vnode in question, 1542 * while acc_mode and cred are from the VOP_ACCESS parameter list 1543 */ 1544 int 1545 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid, 1546 mode_t acc_mode, struct ucred *cred) 1547 { 1548 mode_t mask; 1549 1550 /* User id 0 always gets read/write access. */ 1551 if (cred->cr_uid == 0) { 1552 /* For VEXEC, at least one of the execute bits must be set. */ 1553 if ((acc_mode & VEXEC) && type != VDIR && 1554 (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0) 1555 return EACCES; 1556 return 0; 1557 } 1558 1559 mask = 0; 1560 1561 /* Otherwise, check the owner. */ 1562 if (cred->cr_uid == uid) { 1563 if (acc_mode & VEXEC) 1564 mask |= S_IXUSR; 1565 if (acc_mode & VREAD) 1566 mask |= S_IRUSR; 1567 if (acc_mode & VWRITE) 1568 mask |= S_IWUSR; 1569 return (file_mode & mask) == mask ? 0 : EACCES; 1570 } 1571 1572 /* Otherwise, check the groups. */ 1573 if (cred->cr_gid == gid || groupmember(gid, cred)) { 1574 if (acc_mode & VEXEC) 1575 mask |= S_IXGRP; 1576 if (acc_mode & VREAD) 1577 mask |= S_IRGRP; 1578 if (acc_mode & VWRITE) 1579 mask |= S_IWGRP; 1580 return (file_mode & mask) == mask ? 0 : EACCES; 1581 } 1582 1583 /* Otherwise, check everyone else. */ 1584 if (acc_mode & VEXEC) 1585 mask |= S_IXOTH; 1586 if (acc_mode & VREAD) 1587 mask |= S_IROTH; 1588 if (acc_mode & VWRITE) 1589 mask |= S_IWOTH; 1590 return (file_mode & mask) == mask ? 0 : EACCES; 1591 } 1592 1593 /* 1594 * Unmount all file systems. 1595 * We traverse the list in reverse order under the assumption that doing so 1596 * will avoid needing to worry about dependencies. 1597 */ 1598 void 1599 vfs_unmountall(void) 1600 { 1601 struct mount *mp, *nmp; 1602 int allerror, error, again = 1; 1603 1604 retry: 1605 allerror = 0; 1606 for (mp = CIRCLEQ_LAST(&mountlist); mp != CIRCLEQ_END(&mountlist); 1607 mp = nmp) { 1608 nmp = CIRCLEQ_PREV(mp, mnt_list); 1609 if ((vfs_busy(mp, VB_WRITE|VB_NOWAIT)) != 0) 1610 continue; 1611 if ((error = dounmount(mp, MNT_FORCE, curproc, NULL)) != 0) { 1612 printf("unmount of %s failed with error %d\n", 1613 mp->mnt_stat.f_mntonname, error); 1614 allerror = 1; 1615 } 1616 } 1617 1618 if (allerror) { 1619 printf("WARNING: some file systems would not unmount\n"); 1620 if (again) { 1621 printf("retrying\n"); 1622 again = 0; 1623 goto retry; 1624 } 1625 } 1626 } 1627 1628 /* 1629 * Sync and unmount file systems before shutting down. 1630 */ 1631 void 1632 vfs_shutdown(void) 1633 { 1634 #ifdef ACCOUNTING 1635 extern void acct_shutdown(void); 1636 1637 acct_shutdown(); 1638 #endif 1639 1640 /* XXX Should suspend scheduling. */ 1641 (void) spl0(); 1642 1643 printf("syncing disks... "); 1644 1645 if (panicstr == 0) { 1646 /* Sync before unmount, in case we hang on something. */ 1647 sys_sync(&proc0, (void *)0, (register_t *)0); 1648 1649 /* Unmount file systems. */ 1650 vfs_unmountall(); 1651 } 1652 1653 if (vfs_syncwait(1)) 1654 printf("giving up\n"); 1655 else 1656 printf("done\n"); 1657 } 1658 1659 /* 1660 * perform sync() operation and wait for buffers to flush. 1661 * assumtions: called w/ scheduler disabled and physical io enabled 1662 * for now called at spl0() XXX 1663 */ 1664 int 1665 vfs_syncwait(int verbose) 1666 { 1667 struct buf *bp; 1668 int iter, nbusy, dcount, s; 1669 struct proc *p; 1670 1671 p = curproc? curproc : &proc0; 1672 sys_sync(p, (void *)0, (register_t *)0); 1673 1674 /* Wait for sync to finish. */ 1675 dcount = 10000; 1676 for (iter = 0; iter < 20; iter++) { 1677 nbusy = 0; 1678 LIST_FOREACH(bp, &bufhead, b_list) { 1679 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 1680 nbusy++; 1681 /* 1682 * With soft updates, some buffers that are 1683 * written will be remarked as dirty until other 1684 * buffers are written. 1685 */ 1686 if (bp->b_flags & B_DELWRI) { 1687 s = splbio(); 1688 bremfree(bp); 1689 buf_acquire(bp); 1690 splx(s); 1691 nbusy++; 1692 bawrite(bp); 1693 if (dcount-- <= 0) { 1694 if (verbose) 1695 printf("softdep "); 1696 return 1; 1697 } 1698 } 1699 } 1700 if (nbusy == 0) 1701 break; 1702 if (verbose) 1703 printf("%d ", nbusy); 1704 DELAY(40000 * iter); 1705 } 1706 1707 return nbusy; 1708 } 1709 1710 /* 1711 * posix file system related system variables. 1712 */ 1713 int 1714 fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, 1715 void *newp, size_t newlen, struct proc *p) 1716 { 1717 /* all sysctl names at this level are terminal */ 1718 if (namelen != 1) 1719 return (ENOTDIR); 1720 1721 switch (name[0]) { 1722 case FS_POSIX_SETUID: 1723 if (newp && securelevel > 0) 1724 return (EPERM); 1725 return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear)); 1726 default: 1727 return (EOPNOTSUPP); 1728 } 1729 /* NOTREACHED */ 1730 } 1731 1732 /* 1733 * file system related system variables. 1734 */ 1735 int 1736 fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1737 size_t newlen, struct proc *p) 1738 { 1739 sysctlfn *fn; 1740 1741 switch (name[0]) { 1742 case FS_POSIX: 1743 fn = fs_posix_sysctl; 1744 break; 1745 default: 1746 return (EOPNOTSUPP); 1747 } 1748 return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p); 1749 } 1750 1751 1752 /* 1753 * Routines dealing with vnodes and buffers 1754 */ 1755 1756 /* 1757 * Wait for all outstanding I/Os to complete 1758 * 1759 * Manipulates v_numoutput. Must be called at splbio() 1760 */ 1761 int 1762 vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo) 1763 { 1764 int error = 0; 1765 1766 splassert(IPL_BIO); 1767 1768 while (vp->v_numoutput) { 1769 vp->v_bioflag |= VBIOWAIT; 1770 error = tsleep(&vp->v_numoutput, 1771 slpflag | (PRIBIO + 1), wmesg, timeo); 1772 if (error) 1773 break; 1774 } 1775 1776 return (error); 1777 } 1778 1779 /* 1780 * Update outstanding I/O count and do wakeup if requested. 1781 * 1782 * Manipulates v_numoutput. Must be called at splbio() 1783 */ 1784 void 1785 vwakeup(struct vnode *vp) 1786 { 1787 splassert(IPL_BIO); 1788 1789 if (vp != NULL) { 1790 if (vp->v_numoutput-- == 0) 1791 panic("vwakeup: neg numoutput"); 1792 if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) { 1793 vp->v_bioflag &= ~VBIOWAIT; 1794 wakeup(&vp->v_numoutput); 1795 } 1796 } 1797 } 1798 1799 /* 1800 * Flush out and invalidate all buffers associated with a vnode. 1801 * Called with the underlying object locked. 1802 */ 1803 int 1804 vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p, 1805 int slpflag, int slptimeo) 1806 { 1807 struct buf *bp; 1808 struct buf *nbp, *blist; 1809 int s, error; 1810 1811 #ifdef VFSDEBUG 1812 if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp)) 1813 panic("vinvalbuf(): vp isn't locked"); 1814 #endif 1815 1816 if (flags & V_SAVE) { 1817 s = splbio(); 1818 vwaitforio(vp, 0, "vinvalbuf", 0); 1819 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 1820 splx(s); 1821 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0) 1822 return (error); 1823 s = splbio(); 1824 if (vp->v_numoutput > 0 || 1825 !LIST_EMPTY(&vp->v_dirtyblkhd)) 1826 panic("vinvalbuf: dirty bufs"); 1827 } 1828 splx(s); 1829 } 1830 loop: 1831 s = splbio(); 1832 for (;;) { 1833 if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) && 1834 (flags & V_SAVEMETA)) 1835 while (blist && blist->b_lblkno < 0) 1836 blist = LIST_NEXT(blist, b_vnbufs); 1837 if (blist == NULL && 1838 (blist = LIST_FIRST(&vp->v_dirtyblkhd)) && 1839 (flags & V_SAVEMETA)) 1840 while (blist && blist->b_lblkno < 0) 1841 blist = LIST_NEXT(blist, b_vnbufs); 1842 if (!blist) 1843 break; 1844 1845 for (bp = blist; bp; bp = nbp) { 1846 nbp = LIST_NEXT(bp, b_vnbufs); 1847 if (flags & V_SAVEMETA && bp->b_lblkno < 0) 1848 continue; 1849 if (bp->b_flags & B_BUSY) { 1850 bp->b_flags |= B_WANTED; 1851 error = tsleep(bp, slpflag | (PRIBIO + 1), 1852 "vinvalbuf", slptimeo); 1853 if (error) { 1854 splx(s); 1855 return (error); 1856 } 1857 break; 1858 } 1859 bremfree(bp); 1860 buf_acquire(bp); 1861 /* 1862 * XXX Since there are no node locks for NFS, I believe 1863 * there is a slight chance that a delayed write will 1864 * occur while sleeping just above, so check for it. 1865 */ 1866 if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) { 1867 splx(s); 1868 (void) VOP_BWRITE(bp); 1869 goto loop; 1870 } 1871 bp->b_flags |= B_INVAL; 1872 brelse(bp); 1873 } 1874 } 1875 if (!(flags & V_SAVEMETA) && 1876 (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd))) 1877 panic("vinvalbuf: flush failed"); 1878 splx(s); 1879 return (0); 1880 } 1881 1882 void 1883 vflushbuf(struct vnode *vp, int sync) 1884 { 1885 struct buf *bp, *nbp; 1886 int s; 1887 1888 loop: 1889 s = splbio(); 1890 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); 1891 bp != LIST_END(&vp->v_dirtyblkhd); bp = nbp) { 1892 nbp = LIST_NEXT(bp, b_vnbufs); 1893 if ((bp->b_flags & B_BUSY)) 1894 continue; 1895 if ((bp->b_flags & B_DELWRI) == 0) 1896 panic("vflushbuf: not dirty"); 1897 bremfree(bp); 1898 buf_acquire(bp); 1899 splx(s); 1900 /* 1901 * Wait for I/O associated with indirect blocks to complete, 1902 * since there is no way to quickly wait for them below. 1903 */ 1904 if (bp->b_vp == vp || sync == 0) 1905 (void) bawrite(bp); 1906 else 1907 (void) bwrite(bp); 1908 goto loop; 1909 } 1910 if (sync == 0) { 1911 splx(s); 1912 return; 1913 } 1914 vwaitforio(vp, 0, "vflushbuf", 0); 1915 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 1916 splx(s); 1917 #ifdef DIAGNOSTIC 1918 vprint("vflushbuf: dirty", vp); 1919 #endif 1920 goto loop; 1921 } 1922 splx(s); 1923 } 1924 1925 /* 1926 * Associate a buffer with a vnode. 1927 * 1928 * Manipulates buffer vnode queues. Must be called at splbio(). 1929 */ 1930 void 1931 bgetvp(struct vnode *vp, struct buf *bp) 1932 { 1933 splassert(IPL_BIO); 1934 1935 1936 if (bp->b_vp) 1937 panic("bgetvp: not free"); 1938 vhold(vp); 1939 bp->b_vp = vp; 1940 if (vp->v_type == VBLK || vp->v_type == VCHR) 1941 bp->b_dev = vp->v_rdev; 1942 else 1943 bp->b_dev = NODEV; 1944 /* 1945 * Insert onto list for new vnode. 1946 */ 1947 bufinsvn(bp, &vp->v_cleanblkhd); 1948 } 1949 1950 /* 1951 * Disassociate a buffer from a vnode. 1952 * 1953 * Manipulates vnode buffer queues. Must be called at splbio(). 1954 */ 1955 void 1956 brelvp(struct buf *bp) 1957 { 1958 struct vnode *vp; 1959 1960 splassert(IPL_BIO); 1961 1962 if ((vp = bp->b_vp) == (struct vnode *) 0) 1963 panic("brelvp: NULL"); 1964 /* 1965 * Delete from old vnode list, if on one. 1966 */ 1967 if (LIST_NEXT(bp, b_vnbufs) != NOLIST) 1968 bufremvn(bp); 1969 if ((vp->v_bioflag & VBIOONSYNCLIST) && 1970 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { 1971 vp->v_bioflag &= ~VBIOONSYNCLIST; 1972 LIST_REMOVE(vp, v_synclist); 1973 } 1974 bp->b_vp = NULL; 1975 1976 vdrop(vp); 1977 } 1978 1979 /* 1980 * Replaces the current vnode associated with the buffer, if any, 1981 * with a new vnode. 1982 * 1983 * If an output I/O is pending on the buffer, the old vnode 1984 * I/O count is adjusted. 1985 * 1986 * Ignores vnode buffer queues. Must be called at splbio(). 1987 */ 1988 void 1989 buf_replacevnode(struct buf *bp, struct vnode *newvp) 1990 { 1991 struct vnode *oldvp = bp->b_vp; 1992 1993 splassert(IPL_BIO); 1994 1995 if (oldvp) 1996 brelvp(bp); 1997 1998 if ((bp->b_flags & (B_READ | B_DONE)) == 0) { 1999 newvp->v_numoutput++; /* put it on swapdev */ 2000 vwakeup(oldvp); 2001 } 2002 2003 bgetvp(newvp, bp); 2004 bufremvn(bp); 2005 } 2006 2007 /* 2008 * Used to assign buffers to the appropriate clean or dirty list on 2009 * the vnode and to add newly dirty vnodes to the appropriate 2010 * filesystem syncer list. 2011 * 2012 * Manipulates vnode buffer queues. Must be called at splbio(). 2013 */ 2014 void 2015 reassignbuf(struct buf *bp) 2016 { 2017 struct buflists *listheadp; 2018 int delay; 2019 struct vnode *vp = bp->b_vp; 2020 2021 splassert(IPL_BIO); 2022 2023 /* 2024 * Delete from old vnode list, if on one. 2025 */ 2026 if (LIST_NEXT(bp, b_vnbufs) != NOLIST) 2027 bufremvn(bp); 2028 2029 /* 2030 * If dirty, put on list of dirty buffers; 2031 * otherwise insert onto list of clean buffers. 2032 */ 2033 if ((bp->b_flags & B_DELWRI) == 0) { 2034 listheadp = &vp->v_cleanblkhd; 2035 if ((vp->v_bioflag & VBIOONSYNCLIST) && 2036 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { 2037 vp->v_bioflag &= ~VBIOONSYNCLIST; 2038 LIST_REMOVE(vp, v_synclist); 2039 } 2040 } else { 2041 listheadp = &vp->v_dirtyblkhd; 2042 if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) { 2043 switch (vp->v_type) { 2044 case VDIR: 2045 delay = syncdelay / 2; 2046 break; 2047 case VBLK: 2048 if (vp->v_specmountpoint != NULL) { 2049 delay = syncdelay / 3; 2050 break; 2051 } 2052 /* FALLTHROUGH */ 2053 default: 2054 delay = syncdelay; 2055 } 2056 vn_syncer_add_to_worklist(vp, delay); 2057 } 2058 } 2059 bufinsvn(bp, listheadp); 2060 } 2061 2062 int 2063 vfs_register(struct vfsconf *vfs) 2064 { 2065 struct vfsconf *vfsp; 2066 struct vfsconf **vfspp; 2067 2068 #ifdef DIAGNOSTIC 2069 /* Paranoia? */ 2070 if (vfs->vfc_refcount != 0) 2071 printf("vfs_register called with vfc_refcount > 0\n"); 2072 #endif 2073 2074 /* Check if filesystem already known */ 2075 for (vfspp = &vfsconf, vfsp = vfsconf; vfsp; 2076 vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) 2077 if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0) 2078 return (EEXIST); 2079 2080 if (vfs->vfc_typenum > maxvfsconf) 2081 maxvfsconf = vfs->vfc_typenum; 2082 2083 vfs->vfc_next = NULL; 2084 2085 /* Add to the end of the list */ 2086 *vfspp = vfs; 2087 2088 /* Call vfs_init() */ 2089 if (vfs->vfc_vfsops->vfs_init) 2090 (*(vfs->vfc_vfsops->vfs_init))(vfs); 2091 2092 return 0; 2093 } 2094 2095 int 2096 vfs_unregister(struct vfsconf *vfs) 2097 { 2098 struct vfsconf *vfsp; 2099 struct vfsconf **vfspp; 2100 int maxtypenum; 2101 2102 /* Find our vfsconf struct */ 2103 for (vfspp = &vfsconf, vfsp = vfsconf; vfsp; 2104 vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) { 2105 if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0) 2106 break; 2107 } 2108 2109 if (!vfsp) /* Not found */ 2110 return (ENOENT); 2111 2112 if (vfsp->vfc_refcount) /* In use */ 2113 return (EBUSY); 2114 2115 /* Remove from list and free */ 2116 *vfspp = vfsp->vfc_next; 2117 2118 maxtypenum = 0; 2119 2120 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2121 if (vfsp->vfc_typenum > maxtypenum) 2122 maxtypenum = vfsp->vfc_typenum; 2123 2124 maxvfsconf = maxtypenum; 2125 return 0; 2126 } 2127 2128 /* 2129 * Check if vnode represents a disk device 2130 */ 2131 int 2132 vn_isdisk(struct vnode *vp, int *errp) 2133 { 2134 if (vp->v_type != VBLK && vp->v_type != VCHR) 2135 return (0); 2136 2137 return (1); 2138 } 2139 2140 #ifdef DDB 2141 #include <machine/db_machdep.h> 2142 #include <ddb/db_interface.h> 2143 #include <ddb/db_output.h> 2144 2145 void 2146 vfs_buf_print(struct buf *bp, int full, int (*pr)(const char *, ...)) 2147 { 2148 2149 (*pr)(" vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n" 2150 " proc %p error %d flags %b\n", 2151 bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev, 2152 bp->b_proc, bp->b_error, bp->b_flags, B_BITS); 2153 2154 (*pr)(" bufsize 0x%lx bcount 0x%lx resid 0x%lx sync 0x%x\n" 2155 " data %p saveaddr %p dep %p iodone %p\n", 2156 bp->b_bufsize, bp->b_bcount, (long)bp->b_resid, bp->b_synctime, 2157 bp->b_data, bp->b_saveaddr, LIST_FIRST(&bp->b_dep), bp->b_iodone); 2158 2159 (*pr)(" dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n", 2160 bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend); 2161 2162 #ifdef FFS_SOFTUPDATES 2163 if (full) 2164 softdep_print(bp, full, pr); 2165 #endif 2166 } 2167 2168 const char *vtypes[] = { VTYPE_NAMES }; 2169 const char *vtags[] = { VTAG_NAMES }; 2170 2171 void 2172 vfs_vnode_print(struct vnode *vp, int full, int (*pr)(const char *, ...)) 2173 { 2174 2175 #define NENTS(n) (sizeof n / sizeof(n[0])) 2176 (*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n", 2177 vp->v_tag > NENTS(vtags)? "<unk>":vtags[vp->v_tag], vp->v_tag, 2178 vp->v_type > NENTS(vtypes)? "<unk>":vtypes[vp->v_type], 2179 vp->v_type, vp->v_mount, vp->v_mountedhere); 2180 2181 (*pr)("data %p usecount %d writecount %ld holdcnt %ld numoutput %d\n", 2182 vp->v_data, vp->v_usecount, vp->v_writecount, 2183 vp->v_holdcnt, vp->v_numoutput); 2184 2185 /* uvm_object_printit(&vp->v_uobj, full, pr); */ 2186 2187 if (full) { 2188 struct buf *bp; 2189 2190 (*pr)("clean bufs:\n"); 2191 LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) { 2192 (*pr)(" bp %p\n", bp); 2193 vfs_buf_print(bp, full, pr); 2194 } 2195 2196 (*pr)("dirty bufs:\n"); 2197 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) { 2198 (*pr)(" bp %p\n", bp); 2199 vfs_buf_print(bp, full, pr); 2200 } 2201 } 2202 } 2203 2204 void 2205 vfs_mount_print(struct mount *mp, int full, int (*pr)(const char *, ...)) 2206 { 2207 struct vfsconf *vfc = mp->mnt_vfc; 2208 struct vnode *vp; 2209 int cnt = 0; 2210 2211 (*pr)("flags %b\nvnodecovered %p syncer %p data %p\n", 2212 mp->mnt_flag, MNT_BITS, 2213 mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data); 2214 2215 (*pr)("vfsconf: ops %p name \"%s\" num %d ref %d flags 0x%x\n", 2216 vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum, 2217 vfc->vfc_refcount, vfc->vfc_flags); 2218 2219 (*pr)("statvfs cache: bsize %x iosize %x\nblocks %llu free %llu avail %lld\n", 2220 mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks, 2221 mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail); 2222 2223 (*pr)(" files %llu ffiles %llu favail $lld\n", mp->mnt_stat.f_files, 2224 mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail); 2225 2226 (*pr)(" f_fsidx {0x%x, 0x%x} owner %u ctime 0x%x\n", 2227 mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1], 2228 mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime); 2229 2230 (*pr)(" syncwrites %llu asyncwrites = %llu\n", 2231 mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites); 2232 2233 (*pr)(" syncreads %llu asyncreads = %llu\n", 2234 mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads); 2235 2236 (*pr)(" fstype \"%s\" mnton \"%s\" mntfrom \"%s\"\n", 2237 mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname, 2238 mp->mnt_stat.f_mntfromname); 2239 2240 (*pr)("locked vnodes:"); 2241 /* XXX would take mountlist lock, except ddb has no context */ 2242 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) 2243 if (VOP_ISLOCKED(vp)) { 2244 if (!LIST_NEXT(vp, v_mntvnodes)) 2245 (*pr)(" %p", vp); 2246 else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4)))) 2247 (*pr)("\n\t%p", vp); 2248 else 2249 (*pr)(", %p", vp); 2250 } 2251 (*pr)("\n"); 2252 2253 if (full) { 2254 (*pr)("all vnodes:\n\t"); 2255 /* XXX would take mountlist lock, except ddb has no context */ 2256 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) 2257 if (!LIST_NEXT(vp, v_mntvnodes)) 2258 (*pr)(" %p", vp); 2259 else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4)))) 2260 (*pr)(" %p,\n\t", vp); 2261 else 2262 (*pr)(" %p,", vp); 2263 (*pr)("\n"); 2264 } 2265 } 2266 #endif /* DDB */ 2267 2268 void 2269 copy_statfs_info(struct statfs *sbp, const struct mount *mp) 2270 { 2271 const struct statfs *mbp; 2272 2273 strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN); 2274 2275 if (sbp == (mbp = &mp->mnt_stat)) 2276 return; 2277 2278 sbp->f_fsid = mbp->f_fsid; 2279 sbp->f_owner = mbp->f_owner; 2280 sbp->f_flags = mbp->f_flags; 2281 sbp->f_syncwrites = mbp->f_syncwrites; 2282 sbp->f_asyncwrites = mbp->f_asyncwrites; 2283 sbp->f_syncreads = mbp->f_syncreads; 2284 sbp->f_asyncreads = mbp->f_asyncreads; 2285 sbp->f_namemax = mbp->f_namemax; 2286 bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN); 2287 bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN); 2288 bcopy(&mp->mnt_stat.mount_info.ufs_args, &sbp->mount_info.ufs_args, 2289 sizeof(struct ufs_args)); 2290 } 2291 2292