1 /* $OpenBSD: vfs_subr.c,v 1.267 2018/03/07 18:30:23 bluhm Exp $ */ 2 /* $NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94 38 */ 39 40 /* 41 * External virtual filesystem routines 42 */ 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/proc.h> 47 #include <sys/sysctl.h> 48 #include <sys/mount.h> 49 #include <sys/time.h> 50 #include <sys/fcntl.h> 51 #include <sys/kernel.h> 52 #include <sys/conf.h> 53 #include <sys/vnode.h> 54 #include <sys/lock.h> 55 #include <sys/stat.h> 56 #include <sys/acct.h> 57 #include <sys/namei.h> 58 #include <sys/ucred.h> 59 #include <sys/buf.h> 60 #include <sys/errno.h> 61 #include <sys/malloc.h> 62 #include <sys/mbuf.h> 63 #include <sys/syscallargs.h> 64 #include <sys/pool.h> 65 #include <sys/tree.h> 66 #include <sys/specdev.h> 67 68 #include <netinet/in.h> 69 70 #include <uvm/uvm_extern.h> 71 #include <uvm/uvm_vnode.h> 72 73 #include "softraid.h" 74 75 void sr_quiesce(void); 76 77 enum vtype iftovt_tab[16] = { 78 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 79 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 80 }; 81 82 int vttoif_tab[9] = { 83 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 84 S_IFSOCK, S_IFIFO, S_IFMT, 85 }; 86 87 int prtactive = 0; /* 1 => print out reclaim of active vnodes */ 88 int suid_clear = 1; /* 1 => clear SUID / SGID on owner change */ 89 90 /* 91 * Insq/Remq for the vnode usage lists. 92 */ 93 #define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) 94 #define bufremvn(bp) { \ 95 LIST_REMOVE(bp, b_vnbufs); \ 96 LIST_NEXT(bp, b_vnbufs) = NOLIST; \ 97 } 98 99 struct freelst vnode_hold_list; /* list of vnodes referencing buffers */ 100 struct freelst vnode_free_list; /* vnode free list */ 101 102 struct mntlist mountlist; /* mounted filesystem list */ 103 104 void vclean(struct vnode *, int, struct proc *); 105 106 void insmntque(struct vnode *, struct mount *); 107 int getdevvp(dev_t, struct vnode **, enum vtype); 108 109 int vfs_hang_addrlist(struct mount *, struct netexport *, 110 struct export_args *); 111 int vfs_free_netcred(struct radix_node *, void *, u_int); 112 void vfs_free_addrlist(struct netexport *); 113 void vputonfreelist(struct vnode *); 114 115 int vflush_vnode(struct vnode *, void *); 116 int maxvnodes; 117 118 void vfs_unmountall(void); 119 120 #ifdef DEBUG 121 void printlockedvnodes(void); 122 #endif 123 124 struct pool vnode_pool; 125 struct pool uvm_vnode_pool; 126 127 static inline int rb_buf_compare(const struct buf *b1, const struct buf *b2); 128 RBT_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare); 129 130 static inline int 131 rb_buf_compare(const struct buf *b1, const struct buf *b2) 132 { 133 if (b1->b_lblkno < b2->b_lblkno) 134 return(-1); 135 if (b1->b_lblkno > b2->b_lblkno) 136 return(1); 137 return(0); 138 } 139 140 /* 141 * Initialize the vnode management data structures. 142 */ 143 void 144 vntblinit(void) 145 { 146 /* buffer cache may need a vnode for each buffer */ 147 maxvnodes = 2 * initialvnodes; 148 pool_init(&vnode_pool, sizeof(struct vnode), 0, IPL_NONE, 149 PR_WAITOK, "vnodes", NULL); 150 pool_init(&uvm_vnode_pool, sizeof(struct uvm_vnode), 0, IPL_NONE, 151 PR_WAITOK, "uvmvnodes", NULL); 152 TAILQ_INIT(&vnode_hold_list); 153 TAILQ_INIT(&vnode_free_list); 154 TAILQ_INIT(&mountlist); 155 /* 156 * Initialize the filesystem syncer. 157 */ 158 vn_initialize_syncerd(); 159 160 #ifdef NFSSERVER 161 rn_init(sizeof(struct sockaddr_in)); 162 #endif /* NFSSERVER */ 163 } 164 165 /* 166 * Mark a mount point as busy. Used to synchronize access and to delay 167 * unmounting. 168 * 169 * Default behaviour is to attempt getting a READ lock and in case of an 170 * ongoing unmount, to wait for it to finish and then return failure. 171 */ 172 int 173 vfs_busy(struct mount *mp, int flags) 174 { 175 int rwflags = 0; 176 177 /* new mountpoints need their lock initialised */ 178 if (mp->mnt_lock.rwl_name == NULL) 179 rw_init_flags(&mp->mnt_lock, "vfslock", RWL_IS_VNODE); 180 181 if (flags & VB_WRITE) 182 rwflags |= RW_WRITE; 183 else 184 rwflags |= RW_READ; 185 186 if (flags & VB_WAIT) 187 rwflags |= RW_SLEEPFAIL; 188 else 189 rwflags |= RW_NOSLEEP; 190 191 if (rw_enter(&mp->mnt_lock, rwflags)) 192 return (EBUSY); 193 194 return (0); 195 } 196 197 /* 198 * Free a busy file system 199 */ 200 void 201 vfs_unbusy(struct mount *mp) 202 { 203 rw_exit(&mp->mnt_lock); 204 } 205 206 int 207 vfs_isbusy(struct mount *mp) 208 { 209 if (RWLOCK_OWNER(&mp->mnt_lock) > 0) 210 return (1); 211 else 212 return (0); 213 } 214 215 /* 216 * Lookup a filesystem type, and if found allocate and initialize 217 * a mount structure for it. 218 * 219 * Devname is usually updated by mount(8) after booting. 220 */ 221 int 222 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 223 { 224 struct vfsconf *vfsp; 225 struct mount *mp; 226 227 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 228 if (!strcmp(vfsp->vfc_name, fstypename)) 229 break; 230 if (vfsp == NULL) 231 return (ENODEV); 232 mp = malloc(sizeof(*mp), M_MOUNT, M_WAITOK|M_ZERO); 233 (void)vfs_busy(mp, VB_READ|VB_NOWAIT); 234 LIST_INIT(&mp->mnt_vnodelist); 235 mp->mnt_vfc = vfsp; 236 mp->mnt_op = vfsp->vfc_vfsops; 237 mp->mnt_flag = MNT_RDONLY; 238 mp->mnt_vnodecovered = NULLVP; 239 vfsp->vfc_refcount++; 240 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 241 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 242 mp->mnt_stat.f_mntonname[0] = '/'; 243 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN, 0); 244 copystr(devname, mp->mnt_stat.f_mntfromspec, MNAMELEN, 0); 245 *mpp = mp; 246 return (0); 247 } 248 249 /* 250 * Lookup a mount point by filesystem identifier. 251 */ 252 struct mount * 253 vfs_getvfs(fsid_t *fsid) 254 { 255 struct mount *mp; 256 257 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 258 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 259 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 260 return (mp); 261 } 262 } 263 264 return (NULL); 265 } 266 267 268 /* 269 * Get a new unique fsid 270 */ 271 void 272 vfs_getnewfsid(struct mount *mp) 273 { 274 static u_short xxxfs_mntid; 275 276 fsid_t tfsid; 277 int mtype; 278 279 mtype = mp->mnt_vfc->vfc_typenum; 280 mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0); 281 mp->mnt_stat.f_fsid.val[1] = mtype; 282 if (xxxfs_mntid == 0) 283 ++xxxfs_mntid; 284 tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid); 285 tfsid.val[1] = mtype; 286 if (!TAILQ_EMPTY(&mountlist)) { 287 while (vfs_getvfs(&tfsid)) { 288 tfsid.val[0]++; 289 xxxfs_mntid++; 290 } 291 } 292 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 293 } 294 295 /* 296 * Set vnode attributes to VNOVAL 297 */ 298 void 299 vattr_null(struct vattr *vap) 300 { 301 302 vap->va_type = VNON; 303 /* 304 * Don't get fancy: u_quad_t = u_int = VNOVAL leaves the u_quad_t 305 * with 2^31-1 instead of 2^64-1. Just write'm out and let 306 * the compiler do its job. 307 */ 308 vap->va_mode = VNOVAL; 309 vap->va_nlink = VNOVAL; 310 vap->va_uid = VNOVAL; 311 vap->va_gid = VNOVAL; 312 vap->va_fsid = VNOVAL; 313 vap->va_fileid = VNOVAL; 314 vap->va_size = VNOVAL; 315 vap->va_blocksize = VNOVAL; 316 vap->va_atime.tv_sec = VNOVAL; 317 vap->va_atime.tv_nsec = VNOVAL; 318 vap->va_mtime.tv_sec = VNOVAL; 319 vap->va_mtime.tv_nsec = VNOVAL; 320 vap->va_ctime.tv_sec = VNOVAL; 321 vap->va_ctime.tv_nsec = VNOVAL; 322 vap->va_gen = VNOVAL; 323 vap->va_flags = VNOVAL; 324 vap->va_rdev = VNOVAL; 325 vap->va_bytes = VNOVAL; 326 vap->va_filerev = VNOVAL; 327 vap->va_vaflags = 0; 328 } 329 330 /* 331 * Routines having to do with the management of the vnode table. 332 */ 333 long numvnodes; 334 335 /* 336 * Return the next vnode from the free list. 337 */ 338 int 339 getnewvnode(enum vtagtype tag, struct mount *mp, struct vops *vops, 340 struct vnode **vpp) 341 { 342 struct proc *p = curproc; 343 struct freelst *listhd; 344 static int toggle; 345 struct vnode *vp; 346 int s; 347 348 /* 349 * allow maxvnodes to increase if the buffer cache itself 350 * is big enough to justify it. (we don't shrink it ever) 351 */ 352 maxvnodes = maxvnodes < bcstats.numbufs ? bcstats.numbufs 353 : maxvnodes; 354 355 /* 356 * We must choose whether to allocate a new vnode or recycle an 357 * existing one. The criterion for allocating a new one is that 358 * the total number of vnodes is less than the number desired or 359 * there are no vnodes on either free list. Generally we only 360 * want to recycle vnodes that have no buffers associated with 361 * them, so we look first on the vnode_free_list. If it is empty, 362 * we next consider vnodes with referencing buffers on the 363 * vnode_hold_list. The toggle ensures that half the time we 364 * will use a buffer from the vnode_hold_list, and half the time 365 * we will allocate a new one unless the list has grown to twice 366 * the desired size. We are reticent to recycle vnodes from the 367 * vnode_hold_list because we will lose the identity of all its 368 * referencing buffers. 369 */ 370 toggle ^= 1; 371 if (numvnodes / 2 > maxvnodes) 372 toggle = 0; 373 374 s = splbio(); 375 if ((numvnodes < maxvnodes) || 376 ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) && 377 ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) { 378 splx(s); 379 vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO); 380 vp->v_uvm = pool_get(&uvm_vnode_pool, PR_WAITOK | PR_ZERO); 381 vp->v_uvm->u_vnode = vp; 382 RBT_INIT(buf_rb_bufs, &vp->v_bufs_tree); 383 cache_tree_init(&vp->v_nc_tree); 384 TAILQ_INIT(&vp->v_cache_dst); 385 numvnodes++; 386 } else { 387 TAILQ_FOREACH(vp, listhd, v_freelist) { 388 if (VOP_ISLOCKED(vp) == 0) 389 break; 390 } 391 /* 392 * Unless this is a bad time of the month, at most 393 * the first NCPUS items on the free list are 394 * locked, so this is close enough to being empty. 395 */ 396 if (vp == NULL) { 397 splx(s); 398 tablefull("vnode"); 399 *vpp = 0; 400 return (ENFILE); 401 } 402 403 #ifdef DIAGNOSTIC 404 if (vp->v_usecount) { 405 vprint("free vnode", vp); 406 panic("free vnode isn't"); 407 } 408 #endif 409 410 TAILQ_REMOVE(listhd, vp, v_freelist); 411 vp->v_bioflag &= ~VBIOONFREELIST; 412 splx(s); 413 414 if (vp->v_type != VBAD) 415 vgonel(vp, p); 416 #ifdef DIAGNOSTIC 417 if (vp->v_data) { 418 vprint("cleaned vnode", vp); 419 panic("cleaned vnode isn't"); 420 } 421 s = splbio(); 422 if (vp->v_numoutput) 423 panic("Clean vnode has pending I/O's"); 424 splx(s); 425 #endif 426 vp->v_flag = 0; 427 vp->v_socket = 0; 428 } 429 cache_purge(vp); 430 vp->v_type = VNON; 431 vp->v_tag = tag; 432 vp->v_op = vops; 433 insmntque(vp, mp); 434 *vpp = vp; 435 vp->v_usecount = 1; 436 vp->v_data = 0; 437 return (0); 438 } 439 440 /* 441 * Move a vnode from one mount queue to another. 442 */ 443 void 444 insmntque(struct vnode *vp, struct mount *mp) 445 { 446 /* 447 * Delete from old mount point vnode list, if on one. 448 */ 449 if (vp->v_mount != NULL) 450 LIST_REMOVE(vp, v_mntvnodes); 451 /* 452 * Insert into list of vnodes for the new mount point, if available. 453 */ 454 if ((vp->v_mount = mp) != NULL) 455 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); 456 } 457 458 /* 459 * Create a vnode for a block device. 460 * Used for root filesystem, argdev, and swap areas. 461 * Also used for memory file system special devices. 462 */ 463 int 464 bdevvp(dev_t dev, struct vnode **vpp) 465 { 466 return (getdevvp(dev, vpp, VBLK)); 467 } 468 469 /* 470 * Create a vnode for a character device. 471 * Used for console handling. 472 */ 473 int 474 cdevvp(dev_t dev, struct vnode **vpp) 475 { 476 return (getdevvp(dev, vpp, VCHR)); 477 } 478 479 /* 480 * Create a vnode for a device. 481 * Used by bdevvp (block device) for root file system etc., 482 * and by cdevvp (character device) for console. 483 */ 484 int 485 getdevvp(dev_t dev, struct vnode **vpp, enum vtype type) 486 { 487 struct vnode *vp; 488 struct vnode *nvp; 489 int error; 490 491 if (dev == NODEV) { 492 *vpp = NULLVP; 493 return (0); 494 } 495 error = getnewvnode(VT_NON, NULL, &spec_vops, &nvp); 496 if (error) { 497 *vpp = NULLVP; 498 return (error); 499 } 500 vp = nvp; 501 vp->v_type = type; 502 if ((nvp = checkalias(vp, dev, NULL)) != 0) { 503 vput(vp); 504 vp = nvp; 505 } 506 if (vp->v_type == VCHR && cdevsw[major(vp->v_rdev)].d_type == D_TTY) 507 vp->v_flag |= VISTTY; 508 *vpp = vp; 509 return (0); 510 } 511 512 /* 513 * Check to see if the new vnode represents a special device 514 * for which we already have a vnode (either because of 515 * bdevvp() or because of a different vnode representing 516 * the same block device). If such an alias exists, deallocate 517 * the existing contents and return the aliased vnode. The 518 * caller is responsible for filling it with its new contents. 519 */ 520 struct vnode * 521 checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp) 522 { 523 struct proc *p = curproc; 524 struct vnode *vp; 525 struct vnode **vpp; 526 527 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 528 return (NULLVP); 529 530 vpp = &speclisth[SPECHASH(nvp_rdev)]; 531 loop: 532 for (vp = *vpp; vp; vp = vp->v_specnext) { 533 if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) { 534 continue; 535 } 536 /* 537 * Alias, but not in use, so flush it out. 538 */ 539 if (vp->v_usecount == 0) { 540 vgonel(vp, p); 541 goto loop; 542 } 543 if (vget(vp, LK_EXCLUSIVE, p)) { 544 goto loop; 545 } 546 break; 547 } 548 549 /* 550 * Common case is actually in the if statement 551 */ 552 if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) { 553 nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE, 554 M_WAITOK); 555 nvp->v_rdev = nvp_rdev; 556 nvp->v_hashchain = vpp; 557 nvp->v_specnext = *vpp; 558 nvp->v_specmountpoint = NULL; 559 nvp->v_speclockf = NULL; 560 nvp->v_specbitmap = NULL; 561 if (nvp->v_type == VCHR && 562 (cdevsw[major(nvp_rdev)].d_flags & D_CLONE) && 563 (minor(nvp_rdev) >> CLONE_SHIFT == 0)) { 564 if (vp != NULLVP) 565 nvp->v_specbitmap = vp->v_specbitmap; 566 else 567 nvp->v_specbitmap = malloc(CLONE_MAPSZ, 568 M_VNODE, M_WAITOK | M_ZERO); 569 } 570 *vpp = nvp; 571 if (vp != NULLVP) { 572 nvp->v_flag |= VALIASED; 573 vp->v_flag |= VALIASED; 574 vput(vp); 575 } 576 return (NULLVP); 577 } 578 579 /* 580 * This code is the uncommon case. It is called in case 581 * we found an alias that was VT_NON && vtype of VBLK 582 * This means we found a block device that was created 583 * using bdevvp. 584 * An example of such a vnode is the root partition device vnode 585 * created in ffs_mountroot. 586 * 587 * The vnodes created by bdevvp should not be aliased (why?). 588 */ 589 590 VOP_UNLOCK(vp, p); 591 vclean(vp, 0, p); 592 vp->v_op = nvp->v_op; 593 vp->v_tag = nvp->v_tag; 594 nvp->v_type = VNON; 595 insmntque(vp, mp); 596 return (vp); 597 } 598 599 /* 600 * Grab a particular vnode from the free list, increment its 601 * reference count and lock it. If the vnode lock bit is set, 602 * the vnode is being eliminated in vgone. In that case, we 603 * cannot grab it, so the process is awakened when the 604 * transition is completed, and an error code is returned to 605 * indicate that the vnode is no longer usable, possibly 606 * having been changed to a new file system type. 607 */ 608 int 609 vget(struct vnode *vp, int flags, struct proc *p) 610 { 611 int error, s, onfreelist; 612 613 /* 614 * If the vnode is in the process of being cleaned out for 615 * another use, we wait for the cleaning to finish and then 616 * return failure. Cleaning is determined by checking that 617 * the VXLOCK flag is set. 618 */ 619 620 if (vp->v_flag & VXLOCK) { 621 if (flags & LK_NOWAIT) { 622 return (EBUSY); 623 } 624 625 vp->v_flag |= VXWANT; 626 tsleep(vp, PINOD, "vget", 0); 627 return (ENOENT); 628 } 629 630 onfreelist = vp->v_bioflag & VBIOONFREELIST; 631 if (vp->v_usecount == 0 && onfreelist) { 632 s = splbio(); 633 if (vp->v_holdcnt > 0) 634 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 635 else 636 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 637 vp->v_bioflag &= ~VBIOONFREELIST; 638 splx(s); 639 } 640 641 vp->v_usecount++; 642 if (flags & LK_TYPE_MASK) { 643 if ((error = vn_lock(vp, flags, p)) != 0) { 644 vp->v_usecount--; 645 if (vp->v_usecount == 0 && onfreelist) 646 vputonfreelist(vp); 647 } 648 return (error); 649 } 650 651 return (0); 652 } 653 654 655 /* Vnode reference. */ 656 void 657 vref(struct vnode *vp) 658 { 659 #ifdef DIAGNOSTIC 660 if (vp->v_usecount == 0) 661 panic("vref used where vget required"); 662 if (vp->v_type == VNON) 663 panic("vref on a VNON vnode"); 664 #endif 665 vp->v_usecount++; 666 } 667 668 void 669 vputonfreelist(struct vnode *vp) 670 { 671 int s; 672 struct freelst *lst; 673 674 s = splbio(); 675 #ifdef DIAGNOSTIC 676 if (vp->v_usecount != 0) 677 panic("Use count is not zero!"); 678 679 if (vp->v_bioflag & VBIOONFREELIST) { 680 vprint("vnode already on free list: ", vp); 681 panic("vnode already on free list"); 682 } 683 #endif 684 685 vp->v_bioflag |= VBIOONFREELIST; 686 687 if (vp->v_holdcnt > 0) 688 lst = &vnode_hold_list; 689 else 690 lst = &vnode_free_list; 691 692 if (vp->v_type == VBAD) 693 TAILQ_INSERT_HEAD(lst, vp, v_freelist); 694 else 695 TAILQ_INSERT_TAIL(lst, vp, v_freelist); 696 697 splx(s); 698 } 699 700 /* 701 * vput(), just unlock and vrele() 702 */ 703 void 704 vput(struct vnode *vp) 705 { 706 struct proc *p = curproc; 707 708 #ifdef DIAGNOSTIC 709 if (vp == NULL) 710 panic("vput: null vp"); 711 #endif 712 713 #ifdef DIAGNOSTIC 714 if (vp->v_usecount == 0) { 715 vprint("vput: bad ref count", vp); 716 panic("vput: ref cnt"); 717 } 718 #endif 719 vp->v_usecount--; 720 if (vp->v_usecount > 0) { 721 VOP_UNLOCK(vp, p); 722 return; 723 } 724 725 #ifdef DIAGNOSTIC 726 if (vp->v_writecount != 0) { 727 vprint("vput: bad writecount", vp); 728 panic("vput: v_writecount != 0"); 729 } 730 #endif 731 732 VOP_INACTIVE(vp, p); 733 734 if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST)) 735 vputonfreelist(vp); 736 } 737 738 /* 739 * Vnode release - use for active VNODES. 740 * If count drops to zero, call inactive routine and return to freelist. 741 * Returns 0 if it did not sleep. 742 */ 743 int 744 vrele(struct vnode *vp) 745 { 746 struct proc *p = curproc; 747 748 #ifdef DIAGNOSTIC 749 if (vp == NULL) 750 panic("vrele: null vp"); 751 #endif 752 #ifdef DIAGNOSTIC 753 if (vp->v_usecount == 0) { 754 vprint("vrele: bad ref count", vp); 755 panic("vrele: ref cnt"); 756 } 757 #endif 758 vp->v_usecount--; 759 if (vp->v_usecount > 0) { 760 return (0); 761 } 762 763 #ifdef DIAGNOSTIC 764 if (vp->v_writecount != 0) { 765 vprint("vrele: bad writecount", vp); 766 panic("vrele: v_writecount != 0"); 767 } 768 #endif 769 770 if (vn_lock(vp, LK_EXCLUSIVE, p)) { 771 #ifdef DIAGNOSTIC 772 vprint("vrele: cannot lock", vp); 773 #endif 774 return (1); 775 } 776 777 VOP_INACTIVE(vp, p); 778 779 if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST)) 780 vputonfreelist(vp); 781 return (1); 782 } 783 784 /* Page or buffer structure gets a reference. */ 785 void 786 vhold(struct vnode *vp) 787 { 788 /* 789 * If it is on the freelist and the hold count is currently 790 * zero, move it to the hold list. 791 */ 792 if ((vp->v_bioflag & VBIOONFREELIST) && 793 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 794 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 795 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 796 } 797 vp->v_holdcnt++; 798 } 799 800 /* Lose interest in a vnode. */ 801 void 802 vdrop(struct vnode *vp) 803 { 804 #ifdef DIAGNOSTIC 805 if (vp->v_holdcnt == 0) 806 panic("vdrop: zero holdcnt"); 807 #endif 808 809 vp->v_holdcnt--; 810 811 /* 812 * If it is on the holdlist and the hold count drops to 813 * zero, move it to the free list. 814 */ 815 if ((vp->v_bioflag & VBIOONFREELIST) && 816 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 817 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 818 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 819 } 820 } 821 822 /* 823 * Remove any vnodes in the vnode table belonging to mount point mp. 824 * 825 * If MNT_NOFORCE is specified, there should not be any active ones, 826 * return error if any are found (nb: this is a user error, not a 827 * system error). If MNT_FORCE is specified, detach any active vnodes 828 * that are found. 829 */ 830 #ifdef DEBUG 831 int busyprt = 0; /* print out busy vnodes */ 832 struct ctldebug debug1 = { "busyprt", &busyprt }; 833 #endif 834 835 int 836 vfs_mount_foreach_vnode(struct mount *mp, 837 int (*func)(struct vnode *, void *), void *arg) { 838 struct vnode *vp, *nvp; 839 int error = 0; 840 841 loop: 842 LIST_FOREACH_SAFE(vp , &mp->mnt_vnodelist, v_mntvnodes, nvp) { 843 if (vp->v_mount != mp) 844 goto loop; 845 846 error = func(vp, arg); 847 848 if (error != 0) 849 break; 850 } 851 852 return (error); 853 } 854 855 struct vflush_args { 856 struct vnode *skipvp; 857 int busy; 858 int flags; 859 }; 860 861 int 862 vflush_vnode(struct vnode *vp, void *arg) 863 { 864 struct vflush_args *va = arg; 865 struct proc *p = curproc; 866 867 if (vp == va->skipvp) { 868 return (0); 869 } 870 871 if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 872 return (0); 873 } 874 875 /* 876 * If WRITECLOSE is set, only flush out regular file 877 * vnodes open for writing. 878 */ 879 if ((va->flags & WRITECLOSE) && 880 (vp->v_writecount == 0 || vp->v_type != VREG)) { 881 return (0); 882 } 883 884 /* 885 * With v_usecount == 0, all we need to do is clear 886 * out the vnode data structures and we are done. 887 */ 888 if (vp->v_usecount == 0) { 889 vgonel(vp, p); 890 return (0); 891 } 892 893 /* 894 * If FORCECLOSE is set, forcibly close the vnode. 895 * For block or character devices, revert to an 896 * anonymous device. For all other files, just kill them. 897 */ 898 if (va->flags & FORCECLOSE) { 899 if (vp->v_type != VBLK && vp->v_type != VCHR) { 900 vgonel(vp, p); 901 } else { 902 vclean(vp, 0, p); 903 vp->v_op = &spec_vops; 904 insmntque(vp, NULL); 905 } 906 return (0); 907 } 908 909 /* 910 * If set, this is allowed to ignore vnodes which don't 911 * have changes pending to disk. 912 * XXX Might be nice to check per-fs "inode" flags, but 913 * generally the filesystem is sync'd already, right? 914 */ 915 if ((va->flags & IGNORECLEAN) && 916 LIST_EMPTY(&vp->v_dirtyblkhd)) 917 return (0); 918 919 #ifdef DEBUG 920 if (busyprt) 921 vprint("vflush: busy vnode", vp); 922 #endif 923 va->busy++; 924 return (0); 925 } 926 927 int 928 vflush(struct mount *mp, struct vnode *skipvp, int flags) 929 { 930 struct vflush_args va; 931 va.skipvp = skipvp; 932 va.busy = 0; 933 va.flags = flags; 934 935 vfs_mount_foreach_vnode(mp, vflush_vnode, &va); 936 937 if (va.busy) 938 return (EBUSY); 939 return (0); 940 } 941 942 /* 943 * Disassociate the underlying file system from a vnode. 944 */ 945 void 946 vclean(struct vnode *vp, int flags, struct proc *p) 947 { 948 int active; 949 950 /* 951 * Check to see if the vnode is in use. 952 * If so we have to reference it before we clean it out 953 * so that its count cannot fall to zero and generate a 954 * race against ourselves to recycle it. 955 */ 956 if ((active = vp->v_usecount) != 0) 957 vp->v_usecount++; 958 959 /* 960 * Prevent the vnode from being recycled or 961 * brought into use while we clean it out. 962 */ 963 if (vp->v_flag & VXLOCK) 964 panic("vclean: deadlock"); 965 vp->v_flag |= VXLOCK; 966 /* 967 * Even if the count is zero, the VOP_INACTIVE routine may still 968 * have the object locked while it cleans it out. The VOP_LOCK 969 * ensures that the VOP_INACTIVE routine is done with its work. 970 * For active vnodes, it ensures that no other activity can 971 * occur while the underlying object is being cleaned out. 972 */ 973 VOP_LOCK(vp, LK_DRAIN | LK_EXCLUSIVE, p); 974 975 /* 976 * Clean out any VM data associated with the vnode. 977 */ 978 uvm_vnp_terminate(vp); 979 /* 980 * Clean out any buffers associated with the vnode. 981 */ 982 if (flags & DOCLOSE) 983 vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0); 984 /* 985 * If purging an active vnode, it must be closed and 986 * deactivated before being reclaimed. Note that the 987 * VOP_INACTIVE will unlock the vnode 988 */ 989 if (active) { 990 if (flags & DOCLOSE) 991 VOP_CLOSE(vp, FNONBLOCK, NOCRED, p); 992 VOP_INACTIVE(vp, p); 993 } else { 994 /* 995 * Any other processes trying to obtain this lock must first 996 * wait for VXLOCK to clear, then call the new lock operation. 997 */ 998 VOP_UNLOCK(vp, p); 999 } 1000 1001 /* 1002 * Reclaim the vnode. 1003 */ 1004 if (VOP_RECLAIM(vp, p)) 1005 panic("vclean: cannot reclaim"); 1006 if (active) { 1007 vp->v_usecount--; 1008 if (vp->v_usecount == 0) { 1009 if (vp->v_holdcnt > 0) 1010 panic("vclean: not clean"); 1011 vputonfreelist(vp); 1012 } 1013 } 1014 cache_purge(vp); 1015 1016 /* 1017 * Done with purge, notify sleepers of the grim news. 1018 */ 1019 vp->v_op = &dead_vops; 1020 VN_KNOTE(vp, NOTE_REVOKE); 1021 vp->v_tag = VT_NON; 1022 vp->v_flag &= ~VXLOCK; 1023 #ifdef VFSLCKDEBUG 1024 vp->v_flag &= ~VLOCKSWORK; 1025 #endif 1026 if (vp->v_flag & VXWANT) { 1027 vp->v_flag &= ~VXWANT; 1028 wakeup(vp); 1029 } 1030 } 1031 1032 /* 1033 * Recycle an unused vnode to the front of the free list. 1034 */ 1035 int 1036 vrecycle(struct vnode *vp, struct proc *p) 1037 { 1038 if (vp->v_usecount == 0) { 1039 vgonel(vp, p); 1040 return (1); 1041 } 1042 return (0); 1043 } 1044 1045 /* 1046 * Eliminate all activity associated with a vnode 1047 * in preparation for reuse. 1048 */ 1049 void 1050 vgone(struct vnode *vp) 1051 { 1052 struct proc *p = curproc; 1053 vgonel(vp, p); 1054 } 1055 1056 /* 1057 * vgone, with struct proc. 1058 */ 1059 void 1060 vgonel(struct vnode *vp, struct proc *p) 1061 { 1062 struct vnode *vq; 1063 struct vnode *vx; 1064 1065 /* 1066 * If a vgone (or vclean) is already in progress, 1067 * wait until it is done and return. 1068 */ 1069 if (vp->v_flag & VXLOCK) { 1070 vp->v_flag |= VXWANT; 1071 tsleep(vp, PINOD, "vgone", 0); 1072 return; 1073 } 1074 1075 /* 1076 * Clean out the filesystem specific data. 1077 */ 1078 vclean(vp, DOCLOSE, p); 1079 /* 1080 * Delete from old mount point vnode list, if on one. 1081 */ 1082 if (vp->v_mount != NULL) 1083 insmntque(vp, NULL); 1084 /* 1085 * If special device, remove it from special device alias list 1086 * if it is on one. 1087 */ 1088 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) { 1089 if ((vp->v_flag & VALIASED) == 0 && vp->v_type == VCHR && 1090 (cdevsw[major(vp->v_rdev)].d_flags & D_CLONE) && 1091 (minor(vp->v_rdev) >> CLONE_SHIFT == 0)) { 1092 free(vp->v_specbitmap, M_VNODE, CLONE_MAPSZ); 1093 } 1094 if (*vp->v_hashchain == vp) { 1095 *vp->v_hashchain = vp->v_specnext; 1096 } else { 1097 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1098 if (vq->v_specnext != vp) 1099 continue; 1100 vq->v_specnext = vp->v_specnext; 1101 break; 1102 } 1103 if (vq == NULL) 1104 panic("missing bdev"); 1105 } 1106 if (vp->v_flag & VALIASED) { 1107 vx = NULL; 1108 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1109 if (vq->v_rdev != vp->v_rdev || 1110 vq->v_type != vp->v_type) 1111 continue; 1112 if (vx) 1113 break; 1114 vx = vq; 1115 } 1116 if (vx == NULL) 1117 panic("missing alias"); 1118 if (vq == NULL) 1119 vx->v_flag &= ~VALIASED; 1120 vp->v_flag &= ~VALIASED; 1121 } 1122 free(vp->v_specinfo, M_VNODE, sizeof(struct specinfo)); 1123 vp->v_specinfo = NULL; 1124 } 1125 /* 1126 * If it is on the freelist and not already at the head, 1127 * move it to the head of the list. 1128 */ 1129 vp->v_type = VBAD; 1130 1131 /* 1132 * Move onto the free list, unless we were called from 1133 * getnewvnode and we're not on any free list 1134 */ 1135 if (vp->v_usecount == 0 && 1136 (vp->v_bioflag & VBIOONFREELIST)) { 1137 int s; 1138 1139 s = splbio(); 1140 1141 if (vp->v_holdcnt > 0) 1142 panic("vgonel: not clean"); 1143 1144 if (TAILQ_FIRST(&vnode_free_list) != vp) { 1145 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1146 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 1147 } 1148 splx(s); 1149 } 1150 } 1151 1152 /* 1153 * Lookup a vnode by device number. 1154 */ 1155 int 1156 vfinddev(dev_t dev, enum vtype type, struct vnode **vpp) 1157 { 1158 struct vnode *vp; 1159 int rc =0; 1160 1161 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) { 1162 if (dev != vp->v_rdev || type != vp->v_type) 1163 continue; 1164 *vpp = vp; 1165 rc = 1; 1166 break; 1167 } 1168 return (rc); 1169 } 1170 1171 /* 1172 * Revoke all the vnodes corresponding to the specified minor number 1173 * range (endpoints inclusive) of the specified major. 1174 */ 1175 void 1176 vdevgone(int maj, int minl, int minh, enum vtype type) 1177 { 1178 struct vnode *vp; 1179 int mn; 1180 1181 for (mn = minl; mn <= minh; mn++) 1182 if (vfinddev(makedev(maj, mn), type, &vp)) 1183 VOP_REVOKE(vp, REVOKEALL); 1184 } 1185 1186 /* 1187 * Calculate the total number of references to a special device. 1188 */ 1189 int 1190 vcount(struct vnode *vp) 1191 { 1192 struct vnode *vq, *vnext; 1193 int count; 1194 1195 loop: 1196 if ((vp->v_flag & VALIASED) == 0) 1197 return (vp->v_usecount); 1198 for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) { 1199 vnext = vq->v_specnext; 1200 if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type) 1201 continue; 1202 /* 1203 * Alias, but not in use, so flush it out. 1204 */ 1205 if (vq->v_usecount == 0 && vq != vp) { 1206 vgone(vq); 1207 goto loop; 1208 } 1209 count += vq->v_usecount; 1210 } 1211 return (count); 1212 } 1213 1214 #if defined(DEBUG) || defined(DIAGNOSTIC) 1215 /* 1216 * Print out a description of a vnode. 1217 */ 1218 static char *typename[] = 1219 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" }; 1220 1221 void 1222 vprint(char *label, struct vnode *vp) 1223 { 1224 char buf[64]; 1225 1226 if (label != NULL) 1227 printf("%s: ", label); 1228 printf("%p, type %s, use %u, write %u, hold %u,", 1229 vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount, 1230 vp->v_holdcnt); 1231 buf[0] = '\0'; 1232 if (vp->v_flag & VROOT) 1233 strlcat(buf, "|VROOT", sizeof buf); 1234 if (vp->v_flag & VTEXT) 1235 strlcat(buf, "|VTEXT", sizeof buf); 1236 if (vp->v_flag & VSYSTEM) 1237 strlcat(buf, "|VSYSTEM", sizeof buf); 1238 if (vp->v_flag & VXLOCK) 1239 strlcat(buf, "|VXLOCK", sizeof buf); 1240 if (vp->v_flag & VXWANT) 1241 strlcat(buf, "|VXWANT", sizeof buf); 1242 if (vp->v_bioflag & VBIOWAIT) 1243 strlcat(buf, "|VBIOWAIT", sizeof buf); 1244 if (vp->v_bioflag & VBIOONFREELIST) 1245 strlcat(buf, "|VBIOONFREELIST", sizeof buf); 1246 if (vp->v_bioflag & VBIOONSYNCLIST) 1247 strlcat(buf, "|VBIOONSYNCLIST", sizeof buf); 1248 if (vp->v_flag & VALIASED) 1249 strlcat(buf, "|VALIASED", sizeof buf); 1250 if (buf[0] != '\0') 1251 printf(" flags (%s)", &buf[1]); 1252 if (vp->v_data == NULL) { 1253 printf("\n"); 1254 } else { 1255 printf("\n\t"); 1256 VOP_PRINT(vp); 1257 } 1258 } 1259 #endif /* DEBUG || DIAGNOSTIC */ 1260 1261 #ifdef DEBUG 1262 /* 1263 * List all of the locked vnodes in the system. 1264 * Called when debugging the kernel. 1265 */ 1266 void 1267 printlockedvnodes(void) 1268 { 1269 struct mount *mp; 1270 struct vnode *vp; 1271 1272 printf("Locked vnodes\n"); 1273 1274 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 1275 if (vfs_busy(mp, VB_READ|VB_NOWAIT)) 1276 continue; 1277 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { 1278 if (VOP_ISLOCKED(vp)) 1279 vprint(NULL, vp); 1280 } 1281 vfs_unbusy(mp); 1282 } 1283 1284 } 1285 #endif 1286 1287 /* 1288 * Top level filesystem related information gathering. 1289 */ 1290 int 1291 vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1292 size_t newlen, struct proc *p) 1293 { 1294 struct vfsconf *vfsp, *tmpvfsp; 1295 int ret; 1296 1297 /* all sysctl names at this level are at least name and field */ 1298 if (namelen < 2) 1299 return (ENOTDIR); /* overloaded */ 1300 1301 if (name[0] != VFS_GENERIC) { 1302 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 1303 if (vfsp->vfc_typenum == name[0]) 1304 break; 1305 1306 if (vfsp == NULL || vfsp->vfc_vfsops->vfs_sysctl == NULL) 1307 return (EOPNOTSUPP); 1308 1309 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 1310 oldp, oldlenp, newp, newlen, p)); 1311 } 1312 1313 switch (name[1]) { 1314 case VFS_MAXTYPENUM: 1315 return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf)); 1316 1317 case VFS_CONF: 1318 if (namelen < 3) 1319 return (ENOTDIR); /* overloaded */ 1320 1321 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 1322 if (vfsp->vfc_typenum == name[2]) 1323 break; 1324 1325 if (vfsp == NULL) 1326 return (EOPNOTSUPP); 1327 1328 /* Make a copy, clear out kernel pointers */ 1329 tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK|M_ZERO); 1330 memcpy(tmpvfsp, vfsp, sizeof(*tmpvfsp)); 1331 tmpvfsp->vfc_vfsops = NULL; 1332 tmpvfsp->vfc_next = NULL; 1333 1334 ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp, 1335 sizeof(struct vfsconf)); 1336 1337 free(tmpvfsp, M_TEMP, sizeof(*tmpvfsp)); 1338 return (ret); 1339 case VFS_BCACHESTAT: /* buffer cache statistics */ 1340 ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats, 1341 sizeof(struct bcachestats)); 1342 return(ret); 1343 } 1344 return (EOPNOTSUPP); 1345 } 1346 1347 /* 1348 * Check to see if a filesystem is mounted on a block device. 1349 */ 1350 int 1351 vfs_mountedon(struct vnode *vp) 1352 { 1353 struct vnode *vq; 1354 int error = 0; 1355 1356 if (vp->v_specmountpoint != NULL) 1357 return (EBUSY); 1358 if (vp->v_flag & VALIASED) { 1359 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1360 if (vq->v_rdev != vp->v_rdev || 1361 vq->v_type != vp->v_type) 1362 continue; 1363 if (vq->v_specmountpoint != NULL) { 1364 error = EBUSY; 1365 break; 1366 } 1367 } 1368 } 1369 return (error); 1370 } 1371 1372 #ifdef NFSSERVER 1373 /* 1374 * Build hash lists of net addresses and hang them off the mount point. 1375 * Called by vfs_export() to set up the lists of export addresses. 1376 */ 1377 int 1378 vfs_hang_addrlist(struct mount *mp, struct netexport *nep, 1379 struct export_args *argp) 1380 { 1381 struct netcred *np; 1382 struct radix_node_head *rnh; 1383 int nplen, i; 1384 struct radix_node *rn; 1385 struct sockaddr *saddr, *smask = 0; 1386 int error; 1387 1388 if (argp->ex_addrlen == 0) { 1389 if (mp->mnt_flag & MNT_DEFEXPORTED) 1390 return (EPERM); 1391 np = &nep->ne_defexported; 1392 /* fill in the kernel's ucred from userspace's xucred */ 1393 if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon))) 1394 return (error); 1395 mp->mnt_flag |= MNT_DEFEXPORTED; 1396 goto finish; 1397 } 1398 if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN || 1399 argp->ex_addrlen < 0 || argp->ex_masklen < 0) 1400 return (EINVAL); 1401 nplen = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 1402 np = (struct netcred *)malloc(nplen, M_NETADDR, M_WAITOK|M_ZERO); 1403 saddr = (struct sockaddr *)(np + 1); 1404 error = copyin(argp->ex_addr, saddr, argp->ex_addrlen); 1405 if (error) 1406 goto out; 1407 if (saddr->sa_len > argp->ex_addrlen) 1408 saddr->sa_len = argp->ex_addrlen; 1409 if (argp->ex_masklen) { 1410 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 1411 error = copyin(argp->ex_mask, smask, argp->ex_masklen); 1412 if (error) 1413 goto out; 1414 if (smask->sa_len > argp->ex_masklen) 1415 smask->sa_len = argp->ex_masklen; 1416 } 1417 /* fill in the kernel's ucred from userspace's xucred */ 1418 if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon))) 1419 goto out; 1420 i = saddr->sa_family; 1421 switch (i) { 1422 case AF_INET: 1423 if ((rnh = nep->ne_rtable_inet) == NULL) { 1424 if (!rn_inithead((void **)&nep->ne_rtable_inet, 1425 offsetof(struct sockaddr_in, sin_addr))) { 1426 error = ENOBUFS; 1427 goto out; 1428 } 1429 rnh = nep->ne_rtable_inet; 1430 } 1431 break; 1432 default: 1433 error = EINVAL; 1434 goto out; 1435 } 1436 rn = rn_addroute(saddr, smask, rnh, np->netc_rnodes, 0); 1437 if (rn == 0 || np != (struct netcred *)rn) { /* already exists */ 1438 error = EPERM; 1439 goto out; 1440 } 1441 finish: 1442 np->netc_exflags = argp->ex_flags; 1443 return (0); 1444 out: 1445 free(np, M_NETADDR, nplen); 1446 return (error); 1447 } 1448 1449 int 1450 vfs_free_netcred(struct radix_node *rn, void *w, u_int id) 1451 { 1452 struct radix_node_head *rnh = (struct radix_node_head *)w; 1453 1454 rn_delete(rn->rn_key, rn->rn_mask, rnh, NULL); 1455 free(rn, M_NETADDR, 0); 1456 return (0); 1457 } 1458 1459 /* 1460 * Free the net address hash lists that are hanging off the mount points. 1461 */ 1462 void 1463 vfs_free_addrlist(struct netexport *nep) 1464 { 1465 struct radix_node_head *rnh; 1466 1467 if ((rnh = nep->ne_rtable_inet) != NULL) { 1468 rn_walktree(rnh, vfs_free_netcred, rnh); 1469 free(rnh, M_RTABLE, 0); 1470 nep->ne_rtable_inet = NULL; 1471 } 1472 } 1473 #endif /* NFSSERVER */ 1474 1475 int 1476 vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp) 1477 { 1478 #ifdef NFSSERVER 1479 int error; 1480 1481 if (argp->ex_flags & MNT_DELEXPORT) { 1482 vfs_free_addrlist(nep); 1483 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 1484 } 1485 if (argp->ex_flags & MNT_EXPORTED) { 1486 if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0) 1487 return (error); 1488 mp->mnt_flag |= MNT_EXPORTED; 1489 } 1490 return (0); 1491 #else 1492 return (ENOTSUP); 1493 #endif /* NFSSERVER */ 1494 } 1495 1496 struct netcred * 1497 vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam) 1498 { 1499 #ifdef NFSSERVER 1500 struct netcred *np; 1501 struct radix_node_head *rnh; 1502 struct sockaddr *saddr; 1503 1504 np = NULL; 1505 if (mp->mnt_flag & MNT_EXPORTED) { 1506 /* 1507 * Lookup in the export list first. 1508 */ 1509 if (nam != NULL) { 1510 saddr = mtod(nam, struct sockaddr *); 1511 switch(saddr->sa_family) { 1512 case AF_INET: 1513 rnh = nep->ne_rtable_inet; 1514 break; 1515 default: 1516 rnh = NULL; 1517 break; 1518 } 1519 if (rnh != NULL) 1520 np = (struct netcred *)rn_match(saddr, rnh); 1521 } 1522 /* 1523 * If no address match, use the default if it exists. 1524 */ 1525 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 1526 np = &nep->ne_defexported; 1527 } 1528 return (np); 1529 #else 1530 return (NULL); 1531 #endif /* NFSSERVER */ 1532 } 1533 1534 /* 1535 * Do the usual access checking. 1536 * file_mode, uid and gid are from the vnode in question, 1537 * while acc_mode and cred are from the VOP_ACCESS parameter list 1538 */ 1539 int 1540 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid, 1541 mode_t acc_mode, struct ucred *cred) 1542 { 1543 mode_t mask; 1544 1545 /* User id 0 always gets read/write access. */ 1546 if (cred->cr_uid == 0) { 1547 /* For VEXEC, at least one of the execute bits must be set. */ 1548 if ((acc_mode & VEXEC) && type != VDIR && 1549 (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0) 1550 return EACCES; 1551 return 0; 1552 } 1553 1554 mask = 0; 1555 1556 /* Otherwise, check the owner. */ 1557 if (cred->cr_uid == uid) { 1558 if (acc_mode & VEXEC) 1559 mask |= S_IXUSR; 1560 if (acc_mode & VREAD) 1561 mask |= S_IRUSR; 1562 if (acc_mode & VWRITE) 1563 mask |= S_IWUSR; 1564 return (file_mode & mask) == mask ? 0 : EACCES; 1565 } 1566 1567 /* Otherwise, check the groups. */ 1568 if (groupmember(gid, cred)) { 1569 if (acc_mode & VEXEC) 1570 mask |= S_IXGRP; 1571 if (acc_mode & VREAD) 1572 mask |= S_IRGRP; 1573 if (acc_mode & VWRITE) 1574 mask |= S_IWGRP; 1575 return (file_mode & mask) == mask ? 0 : EACCES; 1576 } 1577 1578 /* Otherwise, check everyone else. */ 1579 if (acc_mode & VEXEC) 1580 mask |= S_IXOTH; 1581 if (acc_mode & VREAD) 1582 mask |= S_IROTH; 1583 if (acc_mode & VWRITE) 1584 mask |= S_IWOTH; 1585 return (file_mode & mask) == mask ? 0 : EACCES; 1586 } 1587 1588 struct rwlock vfs_stall_lock = RWLOCK_INITIALIZER("vfs_stall"); 1589 1590 int 1591 vfs_stall(struct proc *p, int stall) 1592 { 1593 struct mount *mp, *nmp; 1594 int allerror = 0, error; 1595 1596 if (stall) 1597 rw_enter_write(&vfs_stall_lock); 1598 1599 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, nmp) { 1600 if (stall) { 1601 error = vfs_busy(mp, VB_WRITE|VB_WAIT); 1602 if (error) { 1603 printf("%s: busy\n", mp->mnt_stat.f_mntonname); 1604 allerror = error; 1605 continue; 1606 } 1607 uvm_vnp_sync(mp); 1608 error = VFS_SYNC(mp, MNT_WAIT, stall, p->p_ucred, p); 1609 if (error) { 1610 printf("%s: failed to sync\n", mp->mnt_stat.f_mntonname); 1611 vfs_unbusy(mp); 1612 allerror = error; 1613 continue; 1614 } 1615 mp->mnt_flag |= MNT_STALLED; 1616 } else { 1617 if (mp->mnt_flag & MNT_STALLED) { 1618 vfs_unbusy(mp); 1619 mp->mnt_flag &= ~MNT_STALLED; 1620 } 1621 } 1622 } 1623 1624 if (!stall) 1625 rw_exit_write(&vfs_stall_lock); 1626 1627 return (allerror); 1628 } 1629 1630 /* 1631 * Unmount all file systems. 1632 * We traverse the list in reverse order under the assumption that doing so 1633 * will avoid needing to worry about dependencies. 1634 */ 1635 void 1636 vfs_unmountall(void) 1637 { 1638 struct mount *mp, *nmp; 1639 int allerror, error, again = 1; 1640 1641 retry: 1642 allerror = 0; 1643 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, nmp) { 1644 if (vfs_busy(mp, VB_WRITE|VB_NOWAIT)) 1645 continue; 1646 /* XXX Here is a race, the next pointer is not locked. */ 1647 if ((error = dounmount(mp, MNT_FORCE, curproc)) != 0) { 1648 printf("unmount of %s failed with error %d\n", 1649 mp->mnt_stat.f_mntonname, error); 1650 allerror = 1; 1651 } 1652 } 1653 1654 if (allerror) { 1655 printf("WARNING: some file systems would not unmount\n"); 1656 if (again) { 1657 printf("retrying\n"); 1658 again = 0; 1659 goto retry; 1660 } 1661 } 1662 } 1663 1664 /* 1665 * Sync and unmount file systems before shutting down. 1666 */ 1667 void 1668 vfs_shutdown(struct proc *p) 1669 { 1670 #ifdef ACCOUNTING 1671 acct_shutdown(); 1672 #endif 1673 1674 printf("syncing disks... "); 1675 1676 if (panicstr == 0) { 1677 /* Sync before unmount, in case we hang on something. */ 1678 sys_sync(p, NULL, NULL); 1679 vfs_unmountall(); 1680 } 1681 1682 #if NSOFTRAID > 0 1683 sr_quiesce(); 1684 #endif 1685 1686 if (vfs_syncwait(p, 1)) 1687 printf("giving up\n"); 1688 else 1689 printf("done\n"); 1690 } 1691 1692 /* 1693 * perform sync() operation and wait for buffers to flush. 1694 */ 1695 int 1696 vfs_syncwait(struct proc *p, int verbose) 1697 { 1698 struct buf *bp; 1699 int iter, nbusy, dcount, s; 1700 #ifdef MULTIPROCESSOR 1701 int hold_count; 1702 #endif 1703 1704 sys_sync(p, NULL, NULL); 1705 1706 /* Wait for sync to finish. */ 1707 dcount = 10000; 1708 for (iter = 0; iter < 20; iter++) { 1709 nbusy = 0; 1710 LIST_FOREACH(bp, &bufhead, b_list) { 1711 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 1712 nbusy++; 1713 /* 1714 * With soft updates, some buffers that are 1715 * written will be remarked as dirty until other 1716 * buffers are written. 1717 */ 1718 if (bp->b_flags & B_DELWRI) { 1719 s = splbio(); 1720 bremfree(bp); 1721 buf_acquire(bp); 1722 splx(s); 1723 nbusy++; 1724 bawrite(bp); 1725 if (dcount-- <= 0) { 1726 if (verbose) 1727 printf("softdep "); 1728 return 1; 1729 } 1730 } 1731 } 1732 if (nbusy == 0) 1733 break; 1734 if (verbose) 1735 printf("%d ", nbusy); 1736 #ifdef MULTIPROCESSOR 1737 if (_kernel_lock_held()) 1738 hold_count = __mp_release_all(&kernel_lock); 1739 else 1740 hold_count = 0; 1741 #endif 1742 DELAY(40000 * iter); 1743 #ifdef MULTIPROCESSOR 1744 if (hold_count) 1745 __mp_acquire_count(&kernel_lock, hold_count); 1746 #endif 1747 } 1748 1749 return nbusy; 1750 } 1751 1752 /* 1753 * posix file system related system variables. 1754 */ 1755 int 1756 fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, 1757 void *newp, size_t newlen, struct proc *p) 1758 { 1759 /* all sysctl names at this level are terminal */ 1760 if (namelen != 1) 1761 return (ENOTDIR); 1762 1763 switch (name[0]) { 1764 case FS_POSIX_SETUID: 1765 if (newp && securelevel > 0) 1766 return (EPERM); 1767 return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear)); 1768 default: 1769 return (EOPNOTSUPP); 1770 } 1771 /* NOTREACHED */ 1772 } 1773 1774 /* 1775 * file system related system variables. 1776 */ 1777 int 1778 fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1779 size_t newlen, struct proc *p) 1780 { 1781 sysctlfn *fn; 1782 1783 switch (name[0]) { 1784 case FS_POSIX: 1785 fn = fs_posix_sysctl; 1786 break; 1787 default: 1788 return (EOPNOTSUPP); 1789 } 1790 return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p); 1791 } 1792 1793 1794 /* 1795 * Routines dealing with vnodes and buffers 1796 */ 1797 1798 /* 1799 * Wait for all outstanding I/Os to complete 1800 * 1801 * Manipulates v_numoutput. Must be called at splbio() 1802 */ 1803 int 1804 vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo) 1805 { 1806 int error = 0; 1807 1808 splassert(IPL_BIO); 1809 1810 while (vp->v_numoutput) { 1811 vp->v_bioflag |= VBIOWAIT; 1812 error = tsleep(&vp->v_numoutput, 1813 slpflag | (PRIBIO + 1), wmesg, timeo); 1814 if (error) 1815 break; 1816 } 1817 1818 return (error); 1819 } 1820 1821 /* 1822 * Update outstanding I/O count and do wakeup if requested. 1823 * 1824 * Manipulates v_numoutput. Must be called at splbio() 1825 */ 1826 void 1827 vwakeup(struct vnode *vp) 1828 { 1829 splassert(IPL_BIO); 1830 1831 if (vp != NULL) { 1832 if (vp->v_numoutput-- == 0) 1833 panic("vwakeup: neg numoutput"); 1834 if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) { 1835 vp->v_bioflag &= ~VBIOWAIT; 1836 wakeup(&vp->v_numoutput); 1837 } 1838 } 1839 } 1840 1841 /* 1842 * Flush out and invalidate all buffers associated with a vnode. 1843 * Called with the underlying object locked. 1844 */ 1845 int 1846 vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p, 1847 int slpflag, int slptimeo) 1848 { 1849 struct buf *bp; 1850 struct buf *nbp, *blist; 1851 int s, error; 1852 1853 #ifdef VFSLCKDEBUG 1854 if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp)) 1855 panic("vinvalbuf(): vp isn't locked"); 1856 #endif 1857 1858 if (flags & V_SAVE) { 1859 s = splbio(); 1860 vwaitforio(vp, 0, "vinvalbuf", 0); 1861 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 1862 splx(s); 1863 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0) 1864 return (error); 1865 s = splbio(); 1866 if (vp->v_numoutput > 0 || 1867 !LIST_EMPTY(&vp->v_dirtyblkhd)) 1868 panic("vinvalbuf: dirty bufs"); 1869 } 1870 splx(s); 1871 } 1872 loop: 1873 s = splbio(); 1874 for (;;) { 1875 if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) && 1876 (flags & V_SAVEMETA)) 1877 while (blist && blist->b_lblkno < 0) 1878 blist = LIST_NEXT(blist, b_vnbufs); 1879 if (blist == NULL && 1880 (blist = LIST_FIRST(&vp->v_dirtyblkhd)) && 1881 (flags & V_SAVEMETA)) 1882 while (blist && blist->b_lblkno < 0) 1883 blist = LIST_NEXT(blist, b_vnbufs); 1884 if (!blist) 1885 break; 1886 1887 for (bp = blist; bp; bp = nbp) { 1888 nbp = LIST_NEXT(bp, b_vnbufs); 1889 if (flags & V_SAVEMETA && bp->b_lblkno < 0) 1890 continue; 1891 if (bp->b_flags & B_BUSY) { 1892 bp->b_flags |= B_WANTED; 1893 error = tsleep(bp, slpflag | (PRIBIO + 1), 1894 "vinvalbuf", slptimeo); 1895 if (error) { 1896 splx(s); 1897 return (error); 1898 } 1899 break; 1900 } 1901 bremfree(bp); 1902 /* 1903 * XXX Since there are no node locks for NFS, I believe 1904 * there is a slight chance that a delayed write will 1905 * occur while sleeping just above, so check for it. 1906 */ 1907 if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) { 1908 buf_acquire(bp); 1909 splx(s); 1910 (void) VOP_BWRITE(bp); 1911 goto loop; 1912 } 1913 buf_acquire_nomap(bp); 1914 bp->b_flags |= B_INVAL; 1915 brelse(bp); 1916 } 1917 } 1918 if (!(flags & V_SAVEMETA) && 1919 (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd))) 1920 panic("vinvalbuf: flush failed"); 1921 splx(s); 1922 return (0); 1923 } 1924 1925 void 1926 vflushbuf(struct vnode *vp, int sync) 1927 { 1928 struct buf *bp, *nbp; 1929 int s; 1930 1931 loop: 1932 s = splbio(); 1933 LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp) { 1934 if ((bp->b_flags & B_BUSY)) 1935 continue; 1936 if ((bp->b_flags & B_DELWRI) == 0) 1937 panic("vflushbuf: not dirty"); 1938 bremfree(bp); 1939 buf_acquire(bp); 1940 splx(s); 1941 /* 1942 * Wait for I/O associated with indirect blocks to complete, 1943 * since there is no way to quickly wait for them below. 1944 */ 1945 if (bp->b_vp == vp || sync == 0) 1946 (void) bawrite(bp); 1947 else 1948 (void) bwrite(bp); 1949 goto loop; 1950 } 1951 if (sync == 0) { 1952 splx(s); 1953 return; 1954 } 1955 vwaitforio(vp, 0, "vflushbuf", 0); 1956 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 1957 splx(s); 1958 #ifdef DIAGNOSTIC 1959 vprint("vflushbuf: dirty", vp); 1960 #endif 1961 goto loop; 1962 } 1963 splx(s); 1964 } 1965 1966 /* 1967 * Associate a buffer with a vnode. 1968 * 1969 * Manipulates buffer vnode queues. Must be called at splbio(). 1970 */ 1971 void 1972 bgetvp(struct vnode *vp, struct buf *bp) 1973 { 1974 splassert(IPL_BIO); 1975 1976 1977 if (bp->b_vp) 1978 panic("bgetvp: not free"); 1979 vhold(vp); 1980 bp->b_vp = vp; 1981 if (vp->v_type == VBLK || vp->v_type == VCHR) 1982 bp->b_dev = vp->v_rdev; 1983 else 1984 bp->b_dev = NODEV; 1985 /* 1986 * Insert onto list for new vnode. 1987 */ 1988 bufinsvn(bp, &vp->v_cleanblkhd); 1989 } 1990 1991 /* 1992 * Disassociate a buffer from a vnode. 1993 * 1994 * Manipulates vnode buffer queues. Must be called at splbio(). 1995 */ 1996 void 1997 brelvp(struct buf *bp) 1998 { 1999 struct vnode *vp; 2000 2001 splassert(IPL_BIO); 2002 2003 if ((vp = bp->b_vp) == (struct vnode *) 0) 2004 panic("brelvp: NULL"); 2005 /* 2006 * Delete from old vnode list, if on one. 2007 */ 2008 if (LIST_NEXT(bp, b_vnbufs) != NOLIST) 2009 bufremvn(bp); 2010 if ((vp->v_bioflag & VBIOONSYNCLIST) && 2011 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { 2012 vp->v_bioflag &= ~VBIOONSYNCLIST; 2013 LIST_REMOVE(vp, v_synclist); 2014 } 2015 bp->b_vp = NULL; 2016 2017 vdrop(vp); 2018 } 2019 2020 /* 2021 * Replaces the current vnode associated with the buffer, if any, 2022 * with a new vnode. 2023 * 2024 * If an output I/O is pending on the buffer, the old vnode 2025 * I/O count is adjusted. 2026 * 2027 * Ignores vnode buffer queues. Must be called at splbio(). 2028 */ 2029 void 2030 buf_replacevnode(struct buf *bp, struct vnode *newvp) 2031 { 2032 struct vnode *oldvp = bp->b_vp; 2033 2034 splassert(IPL_BIO); 2035 2036 if (oldvp) 2037 brelvp(bp); 2038 2039 if ((bp->b_flags & (B_READ | B_DONE)) == 0) { 2040 newvp->v_numoutput++; /* put it on swapdev */ 2041 vwakeup(oldvp); 2042 } 2043 2044 bgetvp(newvp, bp); 2045 bufremvn(bp); 2046 } 2047 2048 /* 2049 * Used to assign buffers to the appropriate clean or dirty list on 2050 * the vnode and to add newly dirty vnodes to the appropriate 2051 * filesystem syncer list. 2052 * 2053 * Manipulates vnode buffer queues. Must be called at splbio(). 2054 */ 2055 void 2056 reassignbuf(struct buf *bp) 2057 { 2058 struct buflists *listheadp; 2059 int delay; 2060 struct vnode *vp = bp->b_vp; 2061 2062 splassert(IPL_BIO); 2063 2064 /* 2065 * Delete from old vnode list, if on one. 2066 */ 2067 if (LIST_NEXT(bp, b_vnbufs) != NOLIST) 2068 bufremvn(bp); 2069 2070 /* 2071 * If dirty, put on list of dirty buffers; 2072 * otherwise insert onto list of clean buffers. 2073 */ 2074 if ((bp->b_flags & B_DELWRI) == 0) { 2075 listheadp = &vp->v_cleanblkhd; 2076 if ((vp->v_bioflag & VBIOONSYNCLIST) && 2077 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { 2078 vp->v_bioflag &= ~VBIOONSYNCLIST; 2079 LIST_REMOVE(vp, v_synclist); 2080 } 2081 } else { 2082 listheadp = &vp->v_dirtyblkhd; 2083 if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) { 2084 switch (vp->v_type) { 2085 case VDIR: 2086 delay = syncdelay / 2; 2087 break; 2088 case VBLK: 2089 if (vp->v_specmountpoint != NULL) { 2090 delay = syncdelay / 3; 2091 break; 2092 } 2093 /* FALLTHROUGH */ 2094 default: 2095 delay = syncdelay; 2096 } 2097 vn_syncer_add_to_worklist(vp, delay); 2098 } 2099 } 2100 bufinsvn(bp, listheadp); 2101 } 2102 2103 int 2104 vfs_register(struct vfsconf *vfs) 2105 { 2106 struct vfsconf *vfsp; 2107 struct vfsconf **vfspp; 2108 2109 #ifdef DIAGNOSTIC 2110 /* Paranoia? */ 2111 if (vfs->vfc_refcount != 0) 2112 printf("vfs_register called with vfc_refcount > 0\n"); 2113 #endif 2114 2115 /* Check if filesystem already known */ 2116 for (vfspp = &vfsconf, vfsp = vfsconf; vfsp; 2117 vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) 2118 if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0) 2119 return (EEXIST); 2120 2121 if (vfs->vfc_typenum > maxvfsconf) 2122 maxvfsconf = vfs->vfc_typenum; 2123 2124 vfs->vfc_next = NULL; 2125 2126 /* Add to the end of the list */ 2127 *vfspp = vfs; 2128 2129 /* Call vfs_init() */ 2130 if (vfs->vfc_vfsops->vfs_init) 2131 (*(vfs->vfc_vfsops->vfs_init))(vfs); 2132 2133 return 0; 2134 } 2135 2136 int 2137 vfs_unregister(struct vfsconf *vfs) 2138 { 2139 struct vfsconf *vfsp; 2140 struct vfsconf **vfspp; 2141 int maxtypenum; 2142 2143 /* Find our vfsconf struct */ 2144 for (vfspp = &vfsconf, vfsp = vfsconf; vfsp; 2145 vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) { 2146 if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0) 2147 break; 2148 } 2149 2150 if (!vfsp) /* Not found */ 2151 return (ENOENT); 2152 2153 if (vfsp->vfc_refcount) /* In use */ 2154 return (EBUSY); 2155 2156 /* Remove from list and free */ 2157 *vfspp = vfsp->vfc_next; 2158 2159 maxtypenum = 0; 2160 2161 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2162 if (vfsp->vfc_typenum > maxtypenum) 2163 maxtypenum = vfsp->vfc_typenum; 2164 2165 maxvfsconf = maxtypenum; 2166 return 0; 2167 } 2168 2169 /* 2170 * Check if vnode represents a disk device 2171 */ 2172 int 2173 vn_isdisk(struct vnode *vp, int *errp) 2174 { 2175 if (vp->v_type != VBLK && vp->v_type != VCHR) 2176 return (0); 2177 2178 return (1); 2179 } 2180 2181 #ifdef DDB 2182 #include <machine/db_machdep.h> 2183 #include <ddb/db_interface.h> 2184 2185 void 2186 vfs_buf_print(void *b, int full, 2187 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2188 { 2189 struct buf *bp = b; 2190 2191 (*pr)(" vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n" 2192 " proc %p error %d flags %lb\n", 2193 bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev, 2194 bp->b_proc, bp->b_error, bp->b_flags, B_BITS); 2195 2196 (*pr)(" bufsize 0x%lx bcount 0x%lx resid 0x%lx\n" 2197 " data %p saveaddr %p dep %p iodone %p\n", 2198 bp->b_bufsize, bp->b_bcount, (long)bp->b_resid, 2199 bp->b_data, bp->b_saveaddr, 2200 LIST_FIRST(&bp->b_dep), bp->b_iodone); 2201 2202 (*pr)(" dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n", 2203 bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend); 2204 2205 #ifdef FFS_SOFTUPDATES 2206 if (full) 2207 softdep_print(bp, full, pr); 2208 #endif 2209 } 2210 2211 const char *vtypes[] = { VTYPE_NAMES }; 2212 const char *vtags[] = { VTAG_NAMES }; 2213 2214 void 2215 vfs_vnode_print(void *v, int full, 2216 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2217 { 2218 struct vnode *vp = v; 2219 2220 (*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n", 2221 (u_int)vp->v_tag >= nitems(vtags)? "<unk>":vtags[vp->v_tag], 2222 vp->v_tag, 2223 (u_int)vp->v_type >= nitems(vtypes)? "<unk>":vtypes[vp->v_type], 2224 vp->v_type, vp->v_mount, vp->v_mountedhere); 2225 2226 (*pr)("data %p usecount %d writecount %d holdcnt %d numoutput %d\n", 2227 vp->v_data, vp->v_usecount, vp->v_writecount, 2228 vp->v_holdcnt, vp->v_numoutput); 2229 2230 /* uvm_object_printit(&vp->v_uobj, full, pr); */ 2231 2232 if (full) { 2233 struct buf *bp; 2234 2235 (*pr)("clean bufs:\n"); 2236 LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) { 2237 (*pr)(" bp %p\n", bp); 2238 vfs_buf_print(bp, full, pr); 2239 } 2240 2241 (*pr)("dirty bufs:\n"); 2242 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) { 2243 (*pr)(" bp %p\n", bp); 2244 vfs_buf_print(bp, full, pr); 2245 } 2246 } 2247 } 2248 2249 void 2250 vfs_mount_print(struct mount *mp, int full, 2251 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2252 { 2253 struct vfsconf *vfc = mp->mnt_vfc; 2254 struct vnode *vp; 2255 int cnt; 2256 2257 (*pr)("flags %b\nvnodecovered %p syncer %p data %p\n", 2258 mp->mnt_flag, MNT_BITS, 2259 mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data); 2260 2261 (*pr)("vfsconf: ops %p name \"%s\" num %d ref %d flags 0x%x\n", 2262 vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum, 2263 vfc->vfc_refcount, vfc->vfc_flags); 2264 2265 (*pr)("statvfs cache: bsize %x iosize %x\nblocks %llu free %llu avail %lld\n", 2266 mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks, 2267 mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail); 2268 2269 (*pr)(" files %llu ffiles %llu favail %lld\n", mp->mnt_stat.f_files, 2270 mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail); 2271 2272 (*pr)(" f_fsidx {0x%x, 0x%x} owner %u ctime 0x%llx\n", 2273 mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1], 2274 mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime); 2275 2276 (*pr)(" syncwrites %llu asyncwrites = %llu\n", 2277 mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites); 2278 2279 (*pr)(" syncreads %llu asyncreads = %llu\n", 2280 mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads); 2281 2282 (*pr)(" fstype \"%s\" mnton \"%s\" mntfrom \"%s\" mntspec \"%s\"\n", 2283 mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname, 2284 mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntfromspec); 2285 2286 (*pr)("locked vnodes:"); 2287 /* XXX would take mountlist lock, except ddb has no context */ 2288 cnt = 0; 2289 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { 2290 if (VOP_ISLOCKED(vp)) { 2291 if (cnt == 0) 2292 (*pr)("\n %p", vp); 2293 else if ((cnt % (72 / (sizeof(void *) * 2 + 4))) == 0) 2294 (*pr)(",\n %p", vp); 2295 else 2296 (*pr)(", %p", vp); 2297 cnt++; 2298 } 2299 } 2300 (*pr)("\n"); 2301 2302 if (full) { 2303 (*pr)("all vnodes:"); 2304 /* XXX would take mountlist lock, except ddb has no context */ 2305 cnt = 0; 2306 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { 2307 if (cnt == 0) 2308 (*pr)("\n %p", vp); 2309 else if ((cnt % (72 / (sizeof(void *) * 2 + 4))) == 0) 2310 (*pr)(",\n %p", vp); 2311 else 2312 (*pr)(", %p", vp); 2313 cnt++; 2314 } 2315 (*pr)("\n"); 2316 } 2317 } 2318 #endif /* DDB */ 2319 2320 void 2321 copy_statfs_info(struct statfs *sbp, const struct mount *mp) 2322 { 2323 const struct statfs *mbp; 2324 2325 strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN); 2326 2327 if (sbp == (mbp = &mp->mnt_stat)) 2328 return; 2329 2330 sbp->f_fsid = mbp->f_fsid; 2331 sbp->f_owner = mbp->f_owner; 2332 sbp->f_flags = mbp->f_flags; 2333 sbp->f_syncwrites = mbp->f_syncwrites; 2334 sbp->f_asyncwrites = mbp->f_asyncwrites; 2335 sbp->f_syncreads = mbp->f_syncreads; 2336 sbp->f_asyncreads = mbp->f_asyncreads; 2337 sbp->f_namemax = mbp->f_namemax; 2338 memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN); 2339 memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN); 2340 memcpy(sbp->f_mntfromspec, mp->mnt_stat.f_mntfromspec, MNAMELEN); 2341 memcpy(&sbp->mount_info, &mp->mnt_stat.mount_info, 2342 sizeof(union mount_info)); 2343 } 2344