1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 39 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $ 40 * $DragonFly: src/sys/kern/vfs_subr.c,v 1.35 2004/07/10 16:29:45 dillon Exp $ 41 */ 42 43 /* 44 * External virtual filesystem routines 45 */ 46 #include "opt_ddb.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/buf.h> 51 #include <sys/conf.h> 52 #include <sys/dirent.h> 53 #include <sys/domain.h> 54 #include <sys/eventhandler.h> 55 #include <sys/fcntl.h> 56 #include <sys/kernel.h> 57 #include <sys/kthread.h> 58 #include <sys/malloc.h> 59 #include <sys/mbuf.h> 60 #include <sys/mount.h> 61 #include <sys/proc.h> 62 #include <sys/namei.h> 63 #include <sys/reboot.h> 64 #include <sys/socket.h> 65 #include <sys/stat.h> 66 #include <sys/sysctl.h> 67 #include <sys/syslog.h> 68 #include <sys/vmmeter.h> 69 #include <sys/vnode.h> 70 71 #include <machine/limits.h> 72 73 #include <vm/vm.h> 74 #include <vm/vm_object.h> 75 #include <vm/vm_extern.h> 76 #include <vm/vm_kern.h> 77 #include <vm/pmap.h> 78 #include <vm/vm_map.h> 79 #include <vm/vm_page.h> 80 #include <vm/vm_pager.h> 81 #include <vm/vnode_pager.h> 82 #include <vm/vm_zone.h> 83 84 #include <sys/buf2.h> 85 #include <sys/thread2.h> 86 87 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 88 89 static void insmntque (struct vnode *vp, struct mount *mp); 90 static void vclean (struct vnode *vp, lwkt_tokref_t vlock, 91 int flags, struct thread *td); 92 93 static unsigned long numvnodes; 94 SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, ""); 95 96 enum vtype iftovt_tab[16] = { 97 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 98 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 99 }; 100 int vttoif_tab[9] = { 101 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 102 S_IFSOCK, S_IFIFO, S_IFMT, 103 }; 104 105 static TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */ 106 107 static u_long wantfreevnodes = 25; 108 SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW, 109 &wantfreevnodes, 0, ""); 110 static u_long freevnodes = 0; 111 SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, 112 &freevnodes, 0, ""); 113 114 static int reassignbufcalls; 115 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, 116 &reassignbufcalls, 0, ""); 117 static int reassignbufloops; 118 SYSCTL_INT(_vfs, OID_AUTO, reassignbufloops, CTLFLAG_RW, 119 &reassignbufloops, 0, ""); 120 static int reassignbufsortgood; 121 SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortgood, CTLFLAG_RW, 122 &reassignbufsortgood, 0, ""); 123 static int reassignbufsortbad; 124 SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortbad, CTLFLAG_RW, 125 &reassignbufsortbad, 0, ""); 126 static int reassignbufmethod = 1; 127 SYSCTL_INT(_vfs, OID_AUTO, reassignbufmethod, CTLFLAG_RW, 128 &reassignbufmethod, 0, ""); 129 130 #ifdef ENABLE_VFS_IOOPT 131 int vfs_ioopt = 0; 132 SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, ""); 133 #endif 134 135 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); /* mounted fs */ 136 struct lwkt_token mountlist_token; 137 struct lwkt_token mntvnode_token; 138 int nfs_mount_type = -1; 139 static struct lwkt_token mntid_token; 140 static struct lwkt_token vnode_free_list_token; 141 static struct lwkt_token spechash_token; 142 struct nfs_public nfs_pub; /* publicly exported FS */ 143 static vm_zone_t vnode_zone; 144 145 /* 146 * The workitem queue. 147 */ 148 #define SYNCER_MAXDELAY 32 149 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 150 time_t syncdelay = 30; /* max time to delay syncing data */ 151 SYSCTL_INT(_kern, OID_AUTO, syncdelay, CTLFLAG_RW, 152 &syncdelay, 0, "VFS data synchronization delay"); 153 time_t filedelay = 30; /* time to delay syncing files */ 154 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, 155 &filedelay, 0, "File synchronization delay"); 156 time_t dirdelay = 29; /* time to delay syncing directories */ 157 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, 158 &dirdelay, 0, "Directory synchronization delay"); 159 time_t metadelay = 28; /* time to delay syncing metadata */ 160 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, 161 &metadelay, 0, "VFS metadata synchronization delay"); 162 static int rushjob; /* number of slots to run ASAP */ 163 static int stat_rush_requests; /* number of times I/O speeded up */ 164 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, 165 &stat_rush_requests, 0, ""); 166 167 static int syncer_delayno = 0; 168 static long syncer_mask; 169 LIST_HEAD(synclist, vnode); 170 static struct synclist *syncer_workitem_pending; 171 172 int desiredvnodes; 173 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 174 &desiredvnodes, 0, "Maximum number of vnodes"); 175 static int minvnodes; 176 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 177 &minvnodes, 0, "Minimum number of vnodes"); 178 static int vnlru_nowhere = 0; 179 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 180 &vnlru_nowhere, 0, 181 "Number of times the vnlru process ran without success"); 182 183 static void vfs_free_addrlist (struct netexport *nep); 184 static int vfs_free_netcred (struct radix_node *rn, void *w); 185 static int vfs_hang_addrlist (struct mount *mp, struct netexport *nep, 186 struct export_args *argp); 187 188 #define VSHOULDFREE(vp) \ 189 (!((vp)->v_flag & (VFREE|VDOOMED)) && \ 190 !(vp)->v_holdcnt && !(vp)->v_usecount && \ 191 (!(vp)->v_object || \ 192 !((vp)->v_object->ref_count || (vp)->v_object->resident_page_count))) 193 194 #define VMIGHTFREE(vp) \ 195 (((vp)->v_flag & (VFREE|VDOOMED|VXLOCK)) == 0 && \ 196 cache_leaf_test(vp) == 0 && (vp)->v_usecount == 0) 197 198 #define VSHOULDBUSY(vp) \ 199 (((vp)->v_flag & VFREE) && \ 200 ((vp)->v_holdcnt || (vp)->v_usecount)) 201 202 static void vbusy(struct vnode *vp); 203 static void vfree(struct vnode *vp); 204 static void vmaybefree(struct vnode *vp); 205 206 extern int dev_ref_debug; 207 208 /* 209 * NOTE: the vnode interlock must be held on call. 210 */ 211 static __inline void 212 vmaybefree(struct vnode *vp) 213 { 214 if (VSHOULDFREE(vp)) 215 vfree(vp); 216 } 217 218 /* 219 * Initialize the vnode management data structures. 220 */ 221 void 222 vntblinit(void) 223 { 224 225 /* 226 * Desired vnodes is a result of the physical page count 227 * and the size of kernel's heap. It scales in proportion 228 * to the amount of available physical memory. This can 229 * cause trouble on 64-bit and large memory platforms. 230 */ 231 /* desiredvnodes = maxproc + vmstats.v_page_count / 4; */ 232 desiredvnodes = 233 min(maxproc + vmstats.v_page_count /4, 234 2 * (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 235 (5 * (sizeof(struct vm_object) + sizeof(struct vnode)))); 236 237 minvnodes = desiredvnodes / 4; 238 lwkt_token_init(&mountlist_token); 239 lwkt_token_init(&mntvnode_token); 240 lwkt_token_init(&mntid_token); 241 lwkt_token_init(&spechash_token); 242 TAILQ_INIT(&vnode_free_list); 243 lwkt_token_init(&vnode_free_list_token); 244 vnode_zone = zinit("VNODE", sizeof (struct vnode), 0, 0, 5); 245 /* 246 * Initialize the filesystem syncer. 247 */ 248 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 249 &syncer_mask); 250 syncer_maxdelay = syncer_mask + 1; 251 } 252 253 /* 254 * Mark a mount point as busy. Used to synchronize access and to delay 255 * unmounting. Interlock is not released on failure. 256 */ 257 int 258 vfs_busy(struct mount *mp, int flags, 259 lwkt_tokref_t interlkp, struct thread *td) 260 { 261 int lkflags; 262 263 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 264 if (flags & LK_NOWAIT) 265 return (ENOENT); 266 mp->mnt_kern_flag |= MNTK_MWAIT; 267 /* 268 * Since all busy locks are shared except the exclusive 269 * lock granted when unmounting, the only place that a 270 * wakeup needs to be done is at the release of the 271 * exclusive lock at the end of dounmount. 272 * 273 * note: interlkp is a serializer and thus can be safely 274 * held through any sleep 275 */ 276 tsleep((caddr_t)mp, 0, "vfs_busy", 0); 277 return (ENOENT); 278 } 279 lkflags = LK_SHARED | LK_NOPAUSE; 280 if (interlkp) 281 lkflags |= LK_INTERLOCK; 282 if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td)) 283 panic("vfs_busy: unexpected lock failure"); 284 return (0); 285 } 286 287 /* 288 * Free a busy filesystem. 289 */ 290 void 291 vfs_unbusy(struct mount *mp, struct thread *td) 292 { 293 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td); 294 } 295 296 /* 297 * Lookup a filesystem type, and if found allocate and initialize 298 * a mount structure for it. 299 * 300 * Devname is usually updated by mount(8) after booting. 301 */ 302 int 303 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 304 { 305 struct thread *td = curthread; /* XXX */ 306 struct vfsconf *vfsp; 307 struct mount *mp; 308 309 if (fstypename == NULL) 310 return (ENODEV); 311 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 312 if (!strcmp(vfsp->vfc_name, fstypename)) 313 break; 314 } 315 if (vfsp == NULL) 316 return (ENODEV); 317 mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); 318 bzero((char *)mp, (u_long)sizeof(struct mount)); 319 lockinit(&mp->mnt_lock, 0, "vfslock", VLKTIMEOUT, LK_NOPAUSE); 320 vfs_busy(mp, LK_NOWAIT, NULL, td); 321 TAILQ_INIT(&mp->mnt_nvnodelist); 322 TAILQ_INIT(&mp->mnt_reservedvnlist); 323 mp->mnt_nvnodelistsize = 0; 324 mp->mnt_vfc = vfsp; 325 mp->mnt_op = vfsp->vfc_vfsops; 326 mp->mnt_flag = MNT_RDONLY; 327 mp->mnt_vnodecovered = NULLVP; 328 vfsp->vfc_refcount++; 329 mp->mnt_iosize_max = DFLTPHYS; 330 mp->mnt_stat.f_type = vfsp->vfc_typenum; 331 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 332 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 333 mp->mnt_stat.f_mntonname[0] = '/'; 334 mp->mnt_stat.f_mntonname[1] = 0; 335 (void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 336 *mpp = mp; 337 return (0); 338 } 339 340 /* 341 * Lookup a mount point by filesystem identifier. 342 */ 343 struct mount * 344 vfs_getvfs(fsid_t *fsid) 345 { 346 struct mount *mp; 347 lwkt_tokref ilock; 348 349 lwkt_gettoken(&ilock, &mountlist_token); 350 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 351 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 352 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 353 break; 354 } 355 } 356 lwkt_reltoken(&ilock); 357 return (mp); 358 } 359 360 /* 361 * Get a new unique fsid. Try to make its val[0] unique, since this value 362 * will be used to create fake device numbers for stat(). Also try (but 363 * not so hard) make its val[0] unique mod 2^16, since some emulators only 364 * support 16-bit device numbers. We end up with unique val[0]'s for the 365 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 366 * 367 * Keep in mind that several mounts may be running in parallel. Starting 368 * the search one past where the previous search terminated is both a 369 * micro-optimization and a defense against returning the same fsid to 370 * different mounts. 371 */ 372 void 373 vfs_getnewfsid(struct mount *mp) 374 { 375 static u_int16_t mntid_base; 376 lwkt_tokref ilock; 377 fsid_t tfsid; 378 int mtype; 379 380 lwkt_gettoken(&ilock, &mntid_token); 381 mtype = mp->mnt_vfc->vfc_typenum; 382 tfsid.val[1] = mtype; 383 mtype = (mtype & 0xFF) << 24; 384 for (;;) { 385 tfsid.val[0] = makeudev(255, 386 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 387 mntid_base++; 388 if (vfs_getvfs(&tfsid) == NULL) 389 break; 390 } 391 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 392 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 393 lwkt_reltoken(&ilock); 394 } 395 396 /* 397 * Knob to control the precision of file timestamps: 398 * 399 * 0 = seconds only; nanoseconds zeroed. 400 * 1 = seconds and nanoseconds, accurate within 1/HZ. 401 * 2 = seconds and nanoseconds, truncated to microseconds. 402 * >=3 = seconds and nanoseconds, maximum precision. 403 */ 404 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 405 406 static int timestamp_precision = TSP_SEC; 407 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 408 ×tamp_precision, 0, ""); 409 410 /* 411 * Get a current timestamp. 412 */ 413 void 414 vfs_timestamp(struct timespec *tsp) 415 { 416 struct timeval tv; 417 418 switch (timestamp_precision) { 419 case TSP_SEC: 420 tsp->tv_sec = time_second; 421 tsp->tv_nsec = 0; 422 break; 423 case TSP_HZ: 424 getnanotime(tsp); 425 break; 426 case TSP_USEC: 427 microtime(&tv); 428 TIMEVAL_TO_TIMESPEC(&tv, tsp); 429 break; 430 case TSP_NSEC: 431 default: 432 nanotime(tsp); 433 break; 434 } 435 } 436 437 /* 438 * Set vnode attributes to VNOVAL 439 */ 440 void 441 vattr_null(struct vattr *vap) 442 { 443 vap->va_type = VNON; 444 vap->va_size = VNOVAL; 445 vap->va_bytes = VNOVAL; 446 vap->va_mode = VNOVAL; 447 vap->va_nlink = VNOVAL; 448 vap->va_uid = VNOVAL; 449 vap->va_gid = VNOVAL; 450 vap->va_fsid = VNOVAL; 451 vap->va_fileid = VNOVAL; 452 vap->va_blocksize = VNOVAL; 453 vap->va_rdev = VNOVAL; 454 vap->va_atime.tv_sec = VNOVAL; 455 vap->va_atime.tv_nsec = VNOVAL; 456 vap->va_mtime.tv_sec = VNOVAL; 457 vap->va_mtime.tv_nsec = VNOVAL; 458 vap->va_ctime.tv_sec = VNOVAL; 459 vap->va_ctime.tv_nsec = VNOVAL; 460 vap->va_flags = VNOVAL; 461 vap->va_gen = VNOVAL; 462 vap->va_vaflags = 0; 463 } 464 465 /* 466 * This routine is called when we have too many vnodes. It attempts 467 * to free <count> vnodes and will potentially free vnodes that still 468 * have VM backing store (VM backing store is typically the cause 469 * of a vnode blowout so we want to do this). Therefore, this operation 470 * is not considered cheap. 471 * 472 * A number of conditions may prevent a vnode from being reclaimed. 473 * the buffer cache may have references on the vnode, a directory 474 * vnode may still have references due to the namei cache representing 475 * underlying files, or the vnode may be in active use. It is not 476 * desireable to reuse such vnodes. These conditions may cause the 477 * number of vnodes to reach some minimum value regardless of what 478 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 479 */ 480 static int 481 vlrureclaim(struct mount *mp) 482 { 483 struct vnode *vp; 484 lwkt_tokref ilock; 485 lwkt_tokref vlock; 486 int done; 487 int trigger; 488 int usevnodes; 489 int count; 490 491 /* 492 * Calculate the trigger point, don't allow user 493 * screwups to blow us up. This prevents us from 494 * recycling vnodes with lots of resident pages. We 495 * aren't trying to free memory, we are trying to 496 * free vnodes. 497 */ 498 usevnodes = desiredvnodes; 499 if (usevnodes <= 0) 500 usevnodes = 1; 501 trigger = vmstats.v_page_count * 2 / usevnodes; 502 503 done = 0; 504 lwkt_gettoken(&ilock, &mntvnode_token); 505 count = mp->mnt_nvnodelistsize / 10 + 1; 506 while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) { 507 /* 508 * __VNODESCAN__ 509 * 510 * The VP will stick around while we hold mntvnode_token, 511 * at least until we block, so we can safely do an initial 512 * check. But we have to check again after obtaining 513 * the vnode interlock. vp->v_interlock points to stable 514 * storage so it's ok if the vp gets ripped out from 515 * under us while we are blocked. 516 */ 517 if (vp->v_type == VNON || 518 vp->v_type == VBAD || 519 !VMIGHTFREE(vp) || /* critical path opt */ 520 (vp->v_object && 521 vp->v_object->resident_page_count >= trigger) 522 ) { 523 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 524 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist,vp, v_nmntvnodes); 525 --count; 526 continue; 527 } 528 529 /* 530 * Get the interlock, delay moving the node to the tail so 531 * we don't race against new additions to the mountlist. 532 */ 533 lwkt_gettoken(&vlock, vp->v_interlock); 534 if (TAILQ_FIRST(&mp->mnt_nvnodelist) != vp) { 535 lwkt_reltoken(&vlock); 536 continue; 537 } 538 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 539 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist,vp, v_nmntvnodes); 540 541 /* 542 * Must check again 543 */ 544 if (vp->v_type == VNON || 545 vp->v_type == VBAD || 546 !VMIGHTFREE(vp) || /* critical path opt */ 547 (vp->v_object && 548 vp->v_object->resident_page_count >= trigger) 549 ) { 550 lwkt_reltoken(&vlock); 551 --count; 552 continue; 553 } 554 vgonel(vp, &vlock, curthread); 555 ++done; 556 --count; 557 } 558 lwkt_reltoken(&ilock); 559 return done; 560 } 561 562 /* 563 * Attempt to recycle vnodes in a context that is always safe to block. 564 * Calling vlrurecycle() from the bowels of file system code has some 565 * interesting deadlock problems. 566 */ 567 static struct thread *vnlruthread; 568 static int vnlruproc_sig; 569 570 static void 571 vnlru_proc(void) 572 { 573 struct mount *mp, *nmp; 574 lwkt_tokref ilock; 575 int s; 576 int done; 577 struct thread *td = curthread; 578 579 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 580 SHUTDOWN_PRI_FIRST); 581 582 s = splbio(); 583 for (;;) { 584 kproc_suspend_loop(); 585 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) { 586 vnlruproc_sig = 0; 587 wakeup(&vnlruproc_sig); 588 tsleep(td, 0, "vlruwt", hz); 589 continue; 590 } 591 done = 0; 592 lwkt_gettoken(&ilock, &mountlist_token); 593 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 594 if (vfs_busy(mp, LK_NOWAIT, &ilock, td)) { 595 nmp = TAILQ_NEXT(mp, mnt_list); 596 continue; 597 } 598 done += vlrureclaim(mp); 599 lwkt_gettokref(&ilock); 600 nmp = TAILQ_NEXT(mp, mnt_list); 601 vfs_unbusy(mp, td); 602 } 603 lwkt_reltoken(&ilock); 604 if (done == 0) { 605 vnlru_nowhere++; 606 tsleep(td, 0, "vlrup", hz * 3); 607 } 608 } 609 splx(s); 610 } 611 612 static struct kproc_desc vnlru_kp = { 613 "vnlru", 614 vnlru_proc, 615 &vnlruthread 616 }; 617 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp) 618 619 /* 620 * Routines having to do with the management of the vnode table. 621 */ 622 extern vop_t **dead_vnodeop_p; 623 624 /* 625 * Return the next vnode from the free list. 626 */ 627 int 628 getnewvnode(enum vtagtype tag, struct mount *mp, 629 vop_t **vops, struct vnode **vpp) 630 { 631 int s; 632 struct thread *td = curthread; /* XXX */ 633 struct vnode *vp = NULL; 634 struct vnode *xvp; 635 vm_object_t object; 636 lwkt_tokref ilock; 637 lwkt_tokref vlock; 638 639 s = splbio(); /* YYY remove me */ 640 641 /* 642 * Try to reuse vnodes if we hit the max. This situation only 643 * occurs in certain large-memory (2G+) situations. We cannot 644 * attempt to directly reclaim vnodes due to nasty recursion 645 * problems. 646 */ 647 while (numvnodes - freevnodes > desiredvnodes) { 648 if (vnlruproc_sig == 0) { 649 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 650 wakeup(vnlruthread); 651 } 652 tsleep(&vnlruproc_sig, 0, "vlruwk", hz); 653 } 654 655 656 /* 657 * Attempt to reuse a vnode already on the free list, allocating 658 * a new vnode if we can't find one or if we have not reached a 659 * good minimum for good LRU performance. 660 */ 661 lwkt_gettoken(&ilock, &vnode_free_list_token); 662 if (freevnodes >= wantfreevnodes && numvnodes >= minvnodes) { 663 int count; 664 665 for (count = 0; count < freevnodes; count++) { 666 /* 667 * __VNODESCAN__ 668 * 669 * Pull the next vnode off the free list and do some 670 * sanity checks. Note that regardless of how we 671 * block, if freevnodes is non-zero there had better 672 * be something on the list. 673 */ 674 vp = TAILQ_FIRST(&vnode_free_list); 675 if (vp == NULL) 676 panic("getnewvnode: free vnode isn't"); 677 678 /* 679 * Move the vnode to the end of the list so other 680 * processes do not double-block trying to recycle 681 * the same vnode (as an optimization), then get 682 * the interlock. 683 */ 684 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 685 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 686 687 /* 688 * Skip vnodes that are in the process of being 689 * held or referenced. Since the act of adding or 690 * removing a vnode on the freelist requires a token 691 * and may block, the ref count may be adjusted 692 * prior to its addition or removal. 693 */ 694 if (VSHOULDBUSY(vp)) { 695 vp = NULL; 696 continue; 697 } 698 699 700 /* 701 * Obtain the vnode interlock and check that the 702 * vnode is still on the free list. 703 * 704 * This normally devolves into a degenerate case so 705 * it is optimal. Loop up if it isn't. Note that 706 * the vnode could be in the middle of being moved 707 * off the free list (the VSHOULDBUSY() check) and 708 * must be skipped if so. 709 */ 710 lwkt_gettoken(&vlock, vp->v_interlock); 711 TAILQ_FOREACH_REVERSE(xvp, &vnode_free_list, 712 freelst, v_freelist) { 713 if (vp == xvp) 714 break; 715 } 716 if (vp != xvp || VSHOULDBUSY(vp)) { 717 vp = NULL; 718 continue; 719 } 720 721 /* 722 * We now safely own the vnode. If the vnode has 723 * an object do not recycle it if its VM object 724 * has resident pages or references. 725 */ 726 if ((VOP_GETVOBJECT(vp, &object) == 0 && 727 (object->resident_page_count || object->ref_count)) 728 ) { 729 lwkt_reltoken(&vlock); 730 vp = NULL; 731 continue; 732 } 733 734 /* 735 * We can almost reuse this vnode. But we don't want 736 * to recycle it if the vnode has children in the 737 * namecache because that breaks the namecache's 738 * path element chain. (YYY use nc_refs for the 739 * check?) 740 */ 741 KKASSERT(vp->v_flag & VFREE); 742 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 743 744 if (TAILQ_FIRST(&vp->v_namecache) == NULL || 745 cache_leaf_test(vp) >= 0) { 746 /* ok, we can reuse this vnode */ 747 break; 748 } 749 lwkt_reltoken(&vlock); 750 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 751 vp = NULL; 752 } 753 } 754 755 /* 756 * If vp is non-NULL we hold it's interlock. 757 */ 758 if (vp) { 759 vp->v_flag |= VDOOMED; 760 vp->v_flag &= ~VFREE; 761 freevnodes--; 762 lwkt_reltoken(&ilock); 763 cache_purge(vp); /* YYY may block */ 764 vp->v_lease = NULL; 765 if (vp->v_type != VBAD) { 766 vgonel(vp, &vlock, td); 767 } else { 768 lwkt_reltoken(&vlock); 769 } 770 771 #ifdef INVARIANTS 772 { 773 int s; 774 775 if (vp->v_data) 776 panic("cleaned vnode isn't"); 777 s = splbio(); 778 if (vp->v_numoutput) 779 panic("Clean vnode has pending I/O's"); 780 splx(s); 781 } 782 #endif 783 vp->v_flag = 0; 784 vp->v_lastw = 0; 785 vp->v_lasta = 0; 786 vp->v_cstart = 0; 787 vp->v_clen = 0; 788 vp->v_socket = 0; 789 vp->v_writecount = 0; /* XXX */ 790 } else { 791 lwkt_reltoken(&ilock); 792 vp = zalloc(vnode_zone); 793 bzero(vp, sizeof(*vp)); 794 vp->v_interlock = lwkt_token_pool_get(vp); 795 lwkt_token_init(&vp->v_pollinfo.vpi_token); 796 cache_purge(vp); 797 TAILQ_INIT(&vp->v_namecache); 798 numvnodes++; 799 } 800 801 TAILQ_INIT(&vp->v_cleanblkhd); 802 TAILQ_INIT(&vp->v_dirtyblkhd); 803 vp->v_type = VNON; 804 vp->v_tag = tag; 805 vp->v_op = vops; 806 *vpp = vp; 807 vp->v_usecount = 1; 808 vp->v_data = NULL; 809 splx(s); 810 811 /* 812 * Placing the vnode on the mount point's queue makes it visible. 813 * We had better already have a ref on it. 814 */ 815 insmntque(vp, mp); 816 817 vfs_object_create(vp, td); 818 return (0); 819 } 820 821 /* 822 * Move a vnode from one mount queue to another. 823 */ 824 static void 825 insmntque(struct vnode *vp, struct mount *mp) 826 { 827 lwkt_tokref ilock; 828 829 lwkt_gettoken(&ilock, &mntvnode_token); 830 /* 831 * Delete from old mount point vnode list, if on one. 832 */ 833 if (vp->v_mount != NULL) { 834 KASSERT(vp->v_mount->mnt_nvnodelistsize > 0, 835 ("bad mount point vnode list size")); 836 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 837 vp->v_mount->mnt_nvnodelistsize--; 838 } 839 /* 840 * Insert into list of vnodes for the new mount point, if available. 841 */ 842 if ((vp->v_mount = mp) == NULL) { 843 lwkt_reltoken(&ilock); 844 return; 845 } 846 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 847 mp->mnt_nvnodelistsize++; 848 lwkt_reltoken(&ilock); 849 } 850 851 /* 852 * Update outstanding I/O count and do wakeup if requested. 853 */ 854 void 855 vwakeup(struct buf *bp) 856 { 857 struct vnode *vp; 858 859 bp->b_flags &= ~B_WRITEINPROG; 860 if ((vp = bp->b_vp)) { 861 vp->v_numoutput--; 862 if (vp->v_numoutput < 0) 863 panic("vwakeup: neg numoutput"); 864 if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) { 865 vp->v_flag &= ~VBWAIT; 866 wakeup((caddr_t) &vp->v_numoutput); 867 } 868 } 869 } 870 871 /* 872 * Flush out and invalidate all buffers associated with a vnode. 873 * Called with the underlying object locked. 874 */ 875 int 876 vinvalbuf(struct vnode *vp, int flags, struct thread *td, 877 int slpflag, int slptimeo) 878 { 879 struct buf *bp; 880 struct buf *nbp, *blist; 881 int s, error; 882 vm_object_t object; 883 lwkt_tokref vlock; 884 885 if (flags & V_SAVE) { 886 s = splbio(); 887 while (vp->v_numoutput) { 888 vp->v_flag |= VBWAIT; 889 error = tsleep((caddr_t)&vp->v_numoutput, 890 slpflag, "vinvlbuf", slptimeo); 891 if (error) { 892 splx(s); 893 return (error); 894 } 895 } 896 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 897 splx(s); 898 if ((error = VOP_FSYNC(vp, MNT_WAIT, td)) != 0) 899 return (error); 900 s = splbio(); 901 if (vp->v_numoutput > 0 || 902 !TAILQ_EMPTY(&vp->v_dirtyblkhd)) 903 panic("vinvalbuf: dirty bufs"); 904 } 905 splx(s); 906 } 907 s = splbio(); 908 for (;;) { 909 blist = TAILQ_FIRST(&vp->v_cleanblkhd); 910 if (!blist) 911 blist = TAILQ_FIRST(&vp->v_dirtyblkhd); 912 if (!blist) 913 break; 914 915 for (bp = blist; bp; bp = nbp) { 916 nbp = TAILQ_NEXT(bp, b_vnbufs); 917 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 918 error = BUF_TIMELOCK(bp, 919 LK_EXCLUSIVE | LK_SLEEPFAIL, 920 "vinvalbuf", slpflag, slptimeo); 921 if (error == ENOLCK) 922 break; 923 splx(s); 924 return (error); 925 } 926 /* 927 * XXX Since there are no node locks for NFS, I 928 * believe there is a slight chance that a delayed 929 * write will occur while sleeping just above, so 930 * check for it. Note that vfs_bio_awrite expects 931 * buffers to reside on a queue, while VOP_BWRITE and 932 * brelse do not. 933 */ 934 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 935 (flags & V_SAVE)) { 936 937 if (bp->b_vp == vp) { 938 if (bp->b_flags & B_CLUSTEROK) { 939 BUF_UNLOCK(bp); 940 vfs_bio_awrite(bp); 941 } else { 942 bremfree(bp); 943 bp->b_flags |= B_ASYNC; 944 VOP_BWRITE(bp->b_vp, bp); 945 } 946 } else { 947 bremfree(bp); 948 (void) VOP_BWRITE(bp->b_vp, bp); 949 } 950 break; 951 } 952 bremfree(bp); 953 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF); 954 bp->b_flags &= ~B_ASYNC; 955 brelse(bp); 956 } 957 } 958 959 /* 960 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 961 * have write I/O in-progress but if there is a VM object then the 962 * VM object can also have read-I/O in-progress. 963 */ 964 do { 965 while (vp->v_numoutput > 0) { 966 vp->v_flag |= VBWAIT; 967 tsleep(&vp->v_numoutput, 0, "vnvlbv", 0); 968 } 969 if (VOP_GETVOBJECT(vp, &object) == 0) { 970 while (object->paging_in_progress) 971 vm_object_pip_sleep(object, "vnvlbx"); 972 } 973 } while (vp->v_numoutput > 0); 974 975 splx(s); 976 977 /* 978 * Destroy the copy in the VM cache, too. 979 */ 980 lwkt_gettoken(&vlock, vp->v_interlock); 981 if (VOP_GETVOBJECT(vp, &object) == 0) { 982 vm_object_page_remove(object, 0, 0, 983 (flags & V_SAVE) ? TRUE : FALSE); 984 } 985 lwkt_reltoken(&vlock); 986 987 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || !TAILQ_EMPTY(&vp->v_cleanblkhd)) 988 panic("vinvalbuf: flush failed"); 989 return (0); 990 } 991 992 /* 993 * Truncate a file's buffer and pages to a specified length. This 994 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 995 * sync activity. 996 */ 997 int 998 vtruncbuf(struct vnode *vp, struct thread *td, off_t length, int blksize) 999 { 1000 struct buf *bp; 1001 struct buf *nbp; 1002 int s, anyfreed; 1003 int trunclbn; 1004 1005 /* 1006 * Round up to the *next* lbn. 1007 */ 1008 trunclbn = (length + blksize - 1) / blksize; 1009 1010 s = splbio(); 1011 restart: 1012 anyfreed = 1; 1013 for (;anyfreed;) { 1014 anyfreed = 0; 1015 for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 1016 nbp = TAILQ_NEXT(bp, b_vnbufs); 1017 if (bp->b_lblkno >= trunclbn) { 1018 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1019 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 1020 goto restart; 1021 } else { 1022 bremfree(bp); 1023 bp->b_flags |= (B_INVAL | B_RELBUF); 1024 bp->b_flags &= ~B_ASYNC; 1025 brelse(bp); 1026 anyfreed = 1; 1027 } 1028 if (nbp && 1029 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 1030 (nbp->b_vp != vp) || 1031 (nbp->b_flags & B_DELWRI))) { 1032 goto restart; 1033 } 1034 } 1035 } 1036 1037 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 1038 nbp = TAILQ_NEXT(bp, b_vnbufs); 1039 if (bp->b_lblkno >= trunclbn) { 1040 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1041 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 1042 goto restart; 1043 } else { 1044 bremfree(bp); 1045 bp->b_flags |= (B_INVAL | B_RELBUF); 1046 bp->b_flags &= ~B_ASYNC; 1047 brelse(bp); 1048 anyfreed = 1; 1049 } 1050 if (nbp && 1051 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 1052 (nbp->b_vp != vp) || 1053 (nbp->b_flags & B_DELWRI) == 0)) { 1054 goto restart; 1055 } 1056 } 1057 } 1058 } 1059 1060 if (length > 0) { 1061 restartsync: 1062 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 1063 nbp = TAILQ_NEXT(bp, b_vnbufs); 1064 if ((bp->b_flags & B_DELWRI) && (bp->b_lblkno < 0)) { 1065 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1066 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 1067 goto restart; 1068 } else { 1069 bremfree(bp); 1070 if (bp->b_vp == vp) { 1071 bp->b_flags |= B_ASYNC; 1072 } else { 1073 bp->b_flags &= ~B_ASYNC; 1074 } 1075 VOP_BWRITE(bp->b_vp, bp); 1076 } 1077 goto restartsync; 1078 } 1079 1080 } 1081 } 1082 1083 while (vp->v_numoutput > 0) { 1084 vp->v_flag |= VBWAIT; 1085 tsleep(&vp->v_numoutput, 0, "vbtrunc", 0); 1086 } 1087 1088 splx(s); 1089 1090 vnode_pager_setsize(vp, length); 1091 1092 return (0); 1093 } 1094 1095 /* 1096 * Associate a buffer with a vnode. 1097 */ 1098 void 1099 bgetvp(struct vnode *vp, struct buf *bp) 1100 { 1101 int s; 1102 1103 KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); 1104 1105 vhold(vp); 1106 bp->b_vp = vp; 1107 bp->b_dev = vn_todev(vp); 1108 /* 1109 * Insert onto list for new vnode. 1110 */ 1111 s = splbio(); 1112 bp->b_xflags |= BX_VNCLEAN; 1113 bp->b_xflags &= ~BX_VNDIRTY; 1114 TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs); 1115 splx(s); 1116 } 1117 1118 /* 1119 * Disassociate a buffer from a vnode. 1120 */ 1121 void 1122 brelvp(struct buf *bp) 1123 { 1124 struct vnode *vp; 1125 struct buflists *listheadp; 1126 int s; 1127 1128 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 1129 1130 /* 1131 * Delete from old vnode list, if on one. 1132 */ 1133 vp = bp->b_vp; 1134 s = splbio(); 1135 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) { 1136 if (bp->b_xflags & BX_VNDIRTY) 1137 listheadp = &vp->v_dirtyblkhd; 1138 else 1139 listheadp = &vp->v_cleanblkhd; 1140 TAILQ_REMOVE(listheadp, bp, b_vnbufs); 1141 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1142 } 1143 if ((vp->v_flag & VONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 1144 vp->v_flag &= ~VONWORKLST; 1145 LIST_REMOVE(vp, v_synclist); 1146 } 1147 splx(s); 1148 bp->b_vp = (struct vnode *) 0; 1149 vdrop(vp); 1150 } 1151 1152 /* 1153 * The workitem queue. 1154 * 1155 * It is useful to delay writes of file data and filesystem metadata 1156 * for tens of seconds so that quickly created and deleted files need 1157 * not waste disk bandwidth being created and removed. To realize this, 1158 * we append vnodes to a "workitem" queue. When running with a soft 1159 * updates implementation, most pending metadata dependencies should 1160 * not wait for more than a few seconds. Thus, mounted on block devices 1161 * are delayed only about a half the time that file data is delayed. 1162 * Similarly, directory updates are more critical, so are only delayed 1163 * about a third the time that file data is delayed. Thus, there are 1164 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 1165 * one each second (driven off the filesystem syncer process). The 1166 * syncer_delayno variable indicates the next queue that is to be processed. 1167 * Items that need to be processed soon are placed in this queue: 1168 * 1169 * syncer_workitem_pending[syncer_delayno] 1170 * 1171 * A delay of fifteen seconds is done by placing the request fifteen 1172 * entries later in the queue: 1173 * 1174 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 1175 * 1176 */ 1177 1178 /* 1179 * Add an item to the syncer work queue. 1180 */ 1181 static void 1182 vn_syncer_add_to_worklist(struct vnode *vp, int delay) 1183 { 1184 int s, slot; 1185 1186 s = splbio(); 1187 1188 if (vp->v_flag & VONWORKLST) { 1189 LIST_REMOVE(vp, v_synclist); 1190 } 1191 1192 if (delay > syncer_maxdelay - 2) 1193 delay = syncer_maxdelay - 2; 1194 slot = (syncer_delayno + delay) & syncer_mask; 1195 1196 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist); 1197 vp->v_flag |= VONWORKLST; 1198 splx(s); 1199 } 1200 1201 struct thread *updatethread; 1202 static void sched_sync (void); 1203 static struct kproc_desc up_kp = { 1204 "syncer", 1205 sched_sync, 1206 &updatethread 1207 }; 1208 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 1209 1210 /* 1211 * System filesystem synchronizer daemon. 1212 */ 1213 void 1214 sched_sync(void) 1215 { 1216 struct synclist *slp; 1217 struct vnode *vp; 1218 long starttime; 1219 int s; 1220 struct thread *td = curthread; 1221 1222 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 1223 SHUTDOWN_PRI_LAST); 1224 1225 for (;;) { 1226 kproc_suspend_loop(); 1227 1228 starttime = time_second; 1229 1230 /* 1231 * Push files whose dirty time has expired. Be careful 1232 * of interrupt race on slp queue. 1233 */ 1234 s = splbio(); 1235 slp = &syncer_workitem_pending[syncer_delayno]; 1236 syncer_delayno += 1; 1237 if (syncer_delayno == syncer_maxdelay) 1238 syncer_delayno = 0; 1239 splx(s); 1240 1241 while ((vp = LIST_FIRST(slp)) != NULL) { 1242 if (VOP_ISLOCKED(vp, NULL) == 0) { 1243 vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td); 1244 (void) VOP_FSYNC(vp, MNT_LAZY, td); 1245 VOP_UNLOCK(vp, NULL, 0, td); 1246 } 1247 s = splbio(); 1248 if (LIST_FIRST(slp) == vp) { 1249 /* 1250 * Note: v_tag VT_VFS vps can remain on the 1251 * worklist too with no dirty blocks, but 1252 * since sync_fsync() moves it to a different 1253 * slot we are safe. 1254 */ 1255 if (TAILQ_EMPTY(&vp->v_dirtyblkhd) && 1256 !vn_isdisk(vp, NULL)) 1257 panic("sched_sync: fsync failed vp %p tag %d", vp, vp->v_tag); 1258 /* 1259 * Put us back on the worklist. The worklist 1260 * routine will remove us from our current 1261 * position and then add us back in at a later 1262 * position. 1263 */ 1264 vn_syncer_add_to_worklist(vp, syncdelay); 1265 } 1266 splx(s); 1267 } 1268 1269 /* 1270 * Do soft update processing. 1271 */ 1272 if (bioops.io_sync) 1273 (*bioops.io_sync)(NULL); 1274 1275 /* 1276 * The variable rushjob allows the kernel to speed up the 1277 * processing of the filesystem syncer process. A rushjob 1278 * value of N tells the filesystem syncer to process the next 1279 * N seconds worth of work on its queue ASAP. Currently rushjob 1280 * is used by the soft update code to speed up the filesystem 1281 * syncer process when the incore state is getting so far 1282 * ahead of the disk that the kernel memory pool is being 1283 * threatened with exhaustion. 1284 */ 1285 if (rushjob > 0) { 1286 rushjob -= 1; 1287 continue; 1288 } 1289 /* 1290 * If it has taken us less than a second to process the 1291 * current work, then wait. Otherwise start right over 1292 * again. We can still lose time if any single round 1293 * takes more than two seconds, but it does not really 1294 * matter as we are just trying to generally pace the 1295 * filesystem activity. 1296 */ 1297 if (time_second == starttime) 1298 tsleep(&lbolt, 0, "syncer", 0); 1299 } 1300 } 1301 1302 /* 1303 * Request the syncer daemon to speed up its work. 1304 * We never push it to speed up more than half of its 1305 * normal turn time, otherwise it could take over the cpu. 1306 * 1307 * YYY wchan field protected by the BGL. 1308 */ 1309 int 1310 speedup_syncer(void) 1311 { 1312 crit_enter(); 1313 if (updatethread->td_wchan == &lbolt) { /* YYY */ 1314 unsleep(updatethread); 1315 lwkt_schedule(updatethread); 1316 } 1317 crit_exit(); 1318 if (rushjob < syncdelay / 2) { 1319 rushjob += 1; 1320 stat_rush_requests += 1; 1321 return (1); 1322 } 1323 return(0); 1324 } 1325 1326 /* 1327 * Associate a p-buffer with a vnode. 1328 * 1329 * Also sets B_PAGING flag to indicate that vnode is not fully associated 1330 * with the buffer. i.e. the bp has not been linked into the vnode or 1331 * ref-counted. 1332 */ 1333 void 1334 pbgetvp(struct vnode *vp, struct buf *bp) 1335 { 1336 KASSERT(bp->b_vp == NULL, ("pbgetvp: not free")); 1337 1338 bp->b_vp = vp; 1339 bp->b_flags |= B_PAGING; 1340 bp->b_dev = vn_todev(vp); 1341 } 1342 1343 /* 1344 * Disassociate a p-buffer from a vnode. 1345 */ 1346 void 1347 pbrelvp(struct buf *bp) 1348 { 1349 KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL")); 1350 1351 /* XXX REMOVE ME */ 1352 if (TAILQ_NEXT(bp, b_vnbufs) != NULL) { 1353 panic( 1354 "relpbuf(): b_vp was probably reassignbuf()d %p %x", 1355 bp, 1356 (int)bp->b_flags 1357 ); 1358 } 1359 bp->b_vp = (struct vnode *) 0; 1360 bp->b_flags &= ~B_PAGING; 1361 } 1362 1363 void 1364 pbreassignbuf(struct buf *bp, struct vnode *newvp) 1365 { 1366 if ((bp->b_flags & B_PAGING) == 0) { 1367 panic( 1368 "pbreassignbuf() on non phys bp %p", 1369 bp 1370 ); 1371 } 1372 bp->b_vp = newvp; 1373 } 1374 1375 /* 1376 * Reassign a buffer from one vnode to another. 1377 * Used to assign file specific control information 1378 * (indirect blocks) to the vnode to which they belong. 1379 */ 1380 void 1381 reassignbuf(struct buf *bp, struct vnode *newvp) 1382 { 1383 struct buflists *listheadp; 1384 int delay; 1385 int s; 1386 1387 if (newvp == NULL) { 1388 printf("reassignbuf: NULL"); 1389 return; 1390 } 1391 ++reassignbufcalls; 1392 1393 /* 1394 * B_PAGING flagged buffers cannot be reassigned because their vp 1395 * is not fully linked in. 1396 */ 1397 if (bp->b_flags & B_PAGING) 1398 panic("cannot reassign paging buffer"); 1399 1400 s = splbio(); 1401 /* 1402 * Delete from old vnode list, if on one. 1403 */ 1404 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) { 1405 if (bp->b_xflags & BX_VNDIRTY) 1406 listheadp = &bp->b_vp->v_dirtyblkhd; 1407 else 1408 listheadp = &bp->b_vp->v_cleanblkhd; 1409 TAILQ_REMOVE(listheadp, bp, b_vnbufs); 1410 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1411 if (bp->b_vp != newvp) { 1412 vdrop(bp->b_vp); 1413 bp->b_vp = NULL; /* for clarification */ 1414 } 1415 } 1416 /* 1417 * If dirty, put on list of dirty buffers; otherwise insert onto list 1418 * of clean buffers. 1419 */ 1420 if (bp->b_flags & B_DELWRI) { 1421 struct buf *tbp; 1422 1423 listheadp = &newvp->v_dirtyblkhd; 1424 if ((newvp->v_flag & VONWORKLST) == 0) { 1425 switch (newvp->v_type) { 1426 case VDIR: 1427 delay = dirdelay; 1428 break; 1429 case VCHR: 1430 case VBLK: 1431 if (newvp->v_rdev && 1432 newvp->v_rdev->si_mountpoint != NULL) { 1433 delay = metadelay; 1434 break; 1435 } 1436 /* fall through */ 1437 default: 1438 delay = filedelay; 1439 } 1440 vn_syncer_add_to_worklist(newvp, delay); 1441 } 1442 bp->b_xflags |= BX_VNDIRTY; 1443 tbp = TAILQ_FIRST(listheadp); 1444 if (tbp == NULL || 1445 bp->b_lblkno == 0 || 1446 (bp->b_lblkno > 0 && tbp->b_lblkno < 0) || 1447 (bp->b_lblkno > 0 && bp->b_lblkno < tbp->b_lblkno)) { 1448 TAILQ_INSERT_HEAD(listheadp, bp, b_vnbufs); 1449 ++reassignbufsortgood; 1450 } else if (bp->b_lblkno < 0) { 1451 TAILQ_INSERT_TAIL(listheadp, bp, b_vnbufs); 1452 ++reassignbufsortgood; 1453 } else if (reassignbufmethod == 1) { 1454 /* 1455 * New sorting algorithm, only handle sequential case, 1456 * otherwise append to end (but before metadata) 1457 */ 1458 if ((tbp = gbincore(newvp, bp->b_lblkno - 1)) != NULL && 1459 (tbp->b_xflags & BX_VNDIRTY)) { 1460 /* 1461 * Found the best place to insert the buffer 1462 */ 1463 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1464 ++reassignbufsortgood; 1465 } else { 1466 /* 1467 * Missed, append to end, but before meta-data. 1468 * We know that the head buffer in the list is 1469 * not meta-data due to prior conditionals. 1470 * 1471 * Indirect effects: NFS second stage write 1472 * tends to wind up here, giving maximum 1473 * distance between the unstable write and the 1474 * commit rpc. 1475 */ 1476 tbp = TAILQ_LAST(listheadp, buflists); 1477 while (tbp && tbp->b_lblkno < 0) 1478 tbp = TAILQ_PREV(tbp, buflists, b_vnbufs); 1479 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1480 ++reassignbufsortbad; 1481 } 1482 } else { 1483 /* 1484 * Old sorting algorithm, scan queue and insert 1485 */ 1486 struct buf *ttbp; 1487 while ((ttbp = TAILQ_NEXT(tbp, b_vnbufs)) && 1488 (ttbp->b_lblkno < bp->b_lblkno)) { 1489 ++reassignbufloops; 1490 tbp = ttbp; 1491 } 1492 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1493 } 1494 } else { 1495 bp->b_xflags |= BX_VNCLEAN; 1496 TAILQ_INSERT_TAIL(&newvp->v_cleanblkhd, bp, b_vnbufs); 1497 if ((newvp->v_flag & VONWORKLST) && 1498 TAILQ_EMPTY(&newvp->v_dirtyblkhd)) { 1499 newvp->v_flag &= ~VONWORKLST; 1500 LIST_REMOVE(newvp, v_synclist); 1501 } 1502 } 1503 if (bp->b_vp != newvp) { 1504 bp->b_vp = newvp; 1505 vhold(bp->b_vp); 1506 } 1507 splx(s); 1508 } 1509 1510 /* 1511 * Create a vnode for a block device. 1512 * Used for mounting the root file system. 1513 */ 1514 int 1515 bdevvp(dev_t dev, struct vnode **vpp) 1516 { 1517 struct vnode *vp; 1518 struct vnode *nvp; 1519 int error; 1520 1521 if (dev == NODEV) { 1522 *vpp = NULLVP; 1523 return (ENXIO); 1524 } 1525 error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp); 1526 if (error) { 1527 *vpp = NULLVP; 1528 return (error); 1529 } 1530 vp = nvp; 1531 vp->v_type = VCHR; 1532 vp->v_udev = dev->si_udev; 1533 *vpp = vp; 1534 return (0); 1535 } 1536 1537 int 1538 v_associate_rdev(struct vnode *vp, dev_t dev) 1539 { 1540 lwkt_tokref ilock; 1541 1542 if (dev == NULL || dev == NODEV) 1543 return(ENXIO); 1544 if (dev_is_good(dev) == 0) 1545 return(ENXIO); 1546 KKASSERT(vp->v_rdev == NULL); 1547 if (dev_ref_debug) 1548 printf("Z1"); 1549 vp->v_rdev = reference_dev(dev); 1550 lwkt_gettoken(&ilock, &spechash_token); 1551 SLIST_INSERT_HEAD(&dev->si_hlist, vp, v_specnext); 1552 lwkt_reltoken(&ilock); 1553 return(0); 1554 } 1555 1556 void 1557 v_release_rdev(struct vnode *vp) 1558 { 1559 lwkt_tokref ilock; 1560 dev_t dev; 1561 1562 if ((dev = vp->v_rdev) != NULL) { 1563 lwkt_gettoken(&ilock, &spechash_token); 1564 SLIST_REMOVE(&dev->si_hlist, vp, vnode, v_specnext); 1565 if (dev_ref_debug && vp->v_opencount != 0) { 1566 printf("releasing rdev with non-0 " 1567 "v_opencount(%d) (revoked?)\n", 1568 vp->v_opencount); 1569 } 1570 vp->v_rdev = NULL; 1571 vp->v_opencount = 0; 1572 release_dev(dev); 1573 lwkt_reltoken(&ilock); 1574 } 1575 } 1576 1577 /* 1578 * Add a vnode to the alias list hung off the dev_t. We only associate 1579 * the device number with the vnode. The actual device is not associated 1580 * until the vnode is opened (usually in spec_open()), and will be 1581 * disassociated on last close. 1582 */ 1583 void 1584 addaliasu(struct vnode *nvp, udev_t nvp_udev) 1585 { 1586 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 1587 panic("addaliasu on non-special vnode"); 1588 nvp->v_udev = nvp_udev; 1589 } 1590 1591 /* 1592 * Grab a particular vnode from the free list, increment its 1593 * reference count and lock it. The vnode lock bit is set if the 1594 * vnode is being eliminated in vgone. The process is awakened 1595 * when the transition is completed, and an error returned to 1596 * indicate that the vnode is no longer usable (possibly having 1597 * been changed to a new file system type). 1598 * 1599 * This code is very sensitive. We are depending on the vnode interlock 1600 * to be maintained through to the vn_lock() call, which means that we 1601 * cannot block which means that we cannot call vbusy() until after vn_lock(). 1602 * If the interlock is not maintained, the VXLOCK check will not properly 1603 * interlock against a vclean()'s LK_DRAIN operation on the lock. 1604 */ 1605 int 1606 vget(struct vnode *vp, lwkt_tokref_t vlock, int flags, thread_t td) 1607 { 1608 int error; 1609 lwkt_tokref vvlock; 1610 1611 /* 1612 * We need the interlock to safely modify the v_ fields. ZZZ it is 1613 * only legal to pass (1) the vnode's interlock and (2) only pass 1614 * NULL w/o LK_INTERLOCK if the vnode is *ALREADY* referenced or 1615 * held. 1616 */ 1617 if ((flags & LK_INTERLOCK) == 0) { 1618 lwkt_gettoken(&vvlock, vp->v_interlock); 1619 vlock = &vvlock; 1620 } 1621 1622 /* 1623 * If the vnode is in the process of being cleaned out for 1624 * another use, we wait for the cleaning to finish and then 1625 * return failure. Cleaning is determined by checking that 1626 * the VXLOCK flag is set. It is possible for the vnode to be 1627 * self-referenced during the cleaning operation. 1628 */ 1629 if (vp->v_flag & VXLOCK) { 1630 if (vp->v_vxthread == curthread) { 1631 #if 0 1632 /* this can now occur in normal operation */ 1633 log(LOG_INFO, "VXLOCK interlock avoided\n"); 1634 #endif 1635 } else { 1636 vp->v_flag |= VXWANT; 1637 lwkt_reltoken(vlock); 1638 tsleep((caddr_t)vp, 0, "vget", 0); 1639 return (ENOENT); 1640 } 1641 } 1642 1643 /* 1644 * Bump v_usecount to prevent the vnode from being recycled. The 1645 * usecount needs to be bumped before we successfully get our lock. 1646 */ 1647 vp->v_usecount++; 1648 if (flags & LK_TYPE_MASK) { 1649 if ((error = vn_lock(vp, vlock, flags | LK_INTERLOCK, td)) != 0) { 1650 /* 1651 * must expand vrele here because we do not want 1652 * to call VOP_INACTIVE if the reference count 1653 * drops back to zero since it was never really 1654 * active. We must remove it from the free list 1655 * before sleeping so that multiple processes do 1656 * not try to recycle it. 1657 */ 1658 lwkt_gettokref(vlock); 1659 vp->v_usecount--; 1660 vmaybefree(vp); 1661 lwkt_reltoken(vlock); 1662 } 1663 return (error); 1664 } 1665 if (VSHOULDBUSY(vp)) 1666 vbusy(vp); /* interlock must be held on call */ 1667 lwkt_reltoken(vlock); 1668 return (0); 1669 } 1670 1671 void 1672 vref(struct vnode *vp) 1673 { 1674 crit_enter(); /* YYY use crit section for moment / BGL protected */ 1675 vp->v_usecount++; 1676 crit_exit(); 1677 } 1678 1679 /* 1680 * Vnode put/release. 1681 * If count drops to zero, call inactive routine and return to freelist. 1682 */ 1683 void 1684 vrele(struct vnode *vp) 1685 { 1686 struct thread *td = curthread; /* XXX */ 1687 lwkt_tokref vlock; 1688 1689 KASSERT(vp != NULL && vp->v_usecount >= 0, 1690 ("vrele: null vp or <=0 v_usecount")); 1691 1692 lwkt_gettoken(&vlock, vp->v_interlock); 1693 1694 if (vp->v_usecount > 1) { 1695 vp->v_usecount--; 1696 lwkt_reltoken(&vlock); 1697 return; 1698 } 1699 1700 if (vp->v_usecount == 1) { 1701 vp->v_usecount--; 1702 /* 1703 * We must call VOP_INACTIVE with the node locked and the 1704 * usecount 0. If we are doing a vpu, the node is already 1705 * locked, but, in the case of vrele, we must explicitly lock 1706 * the vnode before calling VOP_INACTIVE. 1707 */ 1708 1709 if (vn_lock(vp, NULL, LK_EXCLUSIVE, td) == 0) 1710 VOP_INACTIVE(vp, td); 1711 vmaybefree(vp); 1712 lwkt_reltoken(&vlock); 1713 } else { 1714 #ifdef DIAGNOSTIC 1715 vprint("vrele: negative ref count", vp); 1716 #endif 1717 lwkt_reltoken(&vlock); 1718 panic("vrele: negative ref cnt"); 1719 } 1720 } 1721 1722 void 1723 vput(struct vnode *vp) 1724 { 1725 struct thread *td = curthread; /* XXX */ 1726 lwkt_tokref vlock; 1727 1728 KASSERT(vp != NULL, ("vput: null vp")); 1729 1730 lwkt_gettoken(&vlock, vp->v_interlock); 1731 1732 if (vp->v_usecount > 1) { 1733 vp->v_usecount--; 1734 VOP_UNLOCK(vp, &vlock, LK_INTERLOCK, td); 1735 return; 1736 } 1737 1738 if (vp->v_usecount == 1) { 1739 vp->v_usecount--; 1740 /* 1741 * We must call VOP_INACTIVE with the node locked. 1742 * If we are doing a vpu, the node is already locked, 1743 * so we just need to release the vnode mutex. 1744 */ 1745 VOP_INACTIVE(vp, td); 1746 vmaybefree(vp); 1747 lwkt_reltoken(&vlock); 1748 } else { 1749 #ifdef DIAGNOSTIC 1750 vprint("vput: negative ref count", vp); 1751 #endif 1752 lwkt_reltoken(&vlock); 1753 panic("vput: negative ref cnt"); 1754 } 1755 } 1756 1757 /* 1758 * Somebody doesn't want the vnode recycled. ZZZ vnode interlock should 1759 * be held but isn't. 1760 */ 1761 void 1762 vhold(struct vnode *vp) 1763 { 1764 int s; 1765 1766 s = splbio(); 1767 vp->v_holdcnt++; 1768 if (VSHOULDBUSY(vp)) 1769 vbusy(vp); /* interlock must be held on call */ 1770 splx(s); 1771 } 1772 1773 /* 1774 * One less who cares about this vnode. 1775 */ 1776 void 1777 vdrop(struct vnode *vp) 1778 { 1779 lwkt_tokref vlock; 1780 1781 lwkt_gettoken(&vlock, vp->v_interlock); 1782 if (vp->v_holdcnt <= 0) 1783 panic("vdrop: holdcnt"); 1784 vp->v_holdcnt--; 1785 vmaybefree(vp); 1786 lwkt_reltoken(&vlock); 1787 } 1788 1789 int 1790 vmntvnodescan( 1791 struct mount *mp, 1792 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data), 1793 int (*slowfunc)(struct mount *mp, struct vnode *vp, 1794 lwkt_tokref_t vlock, void *data), 1795 void *data 1796 ) { 1797 lwkt_tokref ilock; 1798 lwkt_tokref vlock; 1799 struct vnode *pvp; 1800 struct vnode *vp; 1801 int r = 0; 1802 1803 /* 1804 * Scan the vnodes on the mount's vnode list. Use a placemarker 1805 */ 1806 pvp = zalloc(vnode_zone); 1807 pvp->v_flag |= VPLACEMARKER; 1808 1809 lwkt_gettoken(&ilock, &mntvnode_token); 1810 TAILQ_INSERT_HEAD(&mp->mnt_nvnodelist, pvp, v_nmntvnodes); 1811 1812 while ((vp = TAILQ_NEXT(pvp, v_nmntvnodes)) != NULL) { 1813 /* 1814 * Move the placemarker and skip other placemarkers we 1815 * encounter. The nothing can get in our way so the 1816 * mount point on the vp must be valid. 1817 */ 1818 TAILQ_REMOVE(&mp->mnt_nvnodelist, pvp, v_nmntvnodes); 1819 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, pvp, v_nmntvnodes); 1820 if (vp->v_flag & VPLACEMARKER) 1821 continue; 1822 KKASSERT(vp->v_mount == mp); 1823 1824 /* 1825 * Quick test 1826 */ 1827 if (fastfunc) { 1828 if ((r = fastfunc(mp, vp, data)) < 0) 1829 continue; 1830 if (r) 1831 break; 1832 } 1833 1834 /* 1835 * Get the vnodes interlock and make sure it is still on the 1836 * mount list. Skip it if it has moved (we may encounter it 1837 * later). Then do the with-interlock test. The callback 1838 * is responsible for releasing the vnode interlock. 1839 * 1840 * The interlock is type-stable. 1841 */ 1842 if (slowfunc) { 1843 lwkt_gettoken(&vlock, vp->v_interlock); 1844 if (vp != TAILQ_PREV(pvp, vnodelst, v_nmntvnodes)) { 1845 printf("vmntvnodescan (debug info only): f=%p vp=%p vnode ripped out from under us\n", slowfunc, vp); 1846 lwkt_reltoken(&vlock); 1847 continue; 1848 } 1849 if ((r = slowfunc(mp, vp, &vlock, data)) != 0) { 1850 KKASSERT(lwkt_havetokref(&vlock) == 0); 1851 break; 1852 } 1853 KKASSERT(lwkt_havetokref(&vlock) == 0); 1854 } 1855 } 1856 TAILQ_REMOVE(&mp->mnt_nvnodelist, pvp, v_nmntvnodes); 1857 zfree(vnode_zone, pvp); 1858 lwkt_reltoken(&ilock); 1859 return(r); 1860 } 1861 1862 /* 1863 * Remove any vnodes in the vnode table belonging to mount point mp. 1864 * 1865 * If FORCECLOSE is not specified, there should not be any active ones, 1866 * return error if any are found (nb: this is a user error, not a 1867 * system error). If FORCECLOSE is specified, detach any active vnodes 1868 * that are found. 1869 * 1870 * If WRITECLOSE is set, only flush out regular file vnodes open for 1871 * writing. 1872 * 1873 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 1874 * 1875 * `rootrefs' specifies the base reference count for the root vnode 1876 * of this filesystem. The root vnode is considered busy if its 1877 * v_usecount exceeds this value. On a successful return, vflush() 1878 * will call vrele() on the root vnode exactly rootrefs times. 1879 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 1880 * be zero. 1881 */ 1882 #ifdef DIAGNOSTIC 1883 static int busyprt = 0; /* print out busy vnodes */ 1884 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 1885 #endif 1886 1887 static int vflush_scan(struct mount *mp, struct vnode *vp, 1888 lwkt_tokref_t vlock, void *data); 1889 1890 struct vflush_info { 1891 int flags; 1892 int busy; 1893 thread_t td; 1894 }; 1895 1896 int 1897 vflush(struct mount *mp, int rootrefs, int flags) 1898 { 1899 struct thread *td = curthread; /* XXX */ 1900 struct vnode *rootvp = NULL; 1901 int error; 1902 lwkt_tokref vlock; 1903 struct vflush_info vflush_info; 1904 1905 if (rootrefs > 0) { 1906 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 1907 ("vflush: bad args")); 1908 /* 1909 * Get the filesystem root vnode. We can vput() it 1910 * immediately, since with rootrefs > 0, it won't go away. 1911 */ 1912 if ((error = VFS_ROOT(mp, &rootvp)) != 0) 1913 return (error); 1914 vput(rootvp); 1915 } 1916 1917 vflush_info.busy = 0; 1918 vflush_info.flags = flags; 1919 vflush_info.td = td; 1920 vmntvnodescan(mp, NULL, vflush_scan, &vflush_info); 1921 1922 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 1923 /* 1924 * If just the root vnode is busy, and if its refcount 1925 * is equal to `rootrefs', then go ahead and kill it. 1926 */ 1927 lwkt_gettoken(&vlock, rootvp->v_interlock); 1928 KASSERT(vflush_info.busy > 0, ("vflush: not busy")); 1929 KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs")); 1930 if (vflush_info.busy == 1 && rootvp->v_usecount == rootrefs) { 1931 vgonel(rootvp, &vlock, td); 1932 vflush_info.busy = 0; 1933 } else { 1934 lwkt_reltoken(&vlock); 1935 } 1936 } 1937 if (vflush_info.busy) 1938 return (EBUSY); 1939 for (; rootrefs > 0; rootrefs--) 1940 vrele(rootvp); 1941 return (0); 1942 } 1943 1944 /* 1945 * The scan callback is made with an interlocked vnode. 1946 */ 1947 static int 1948 vflush_scan(struct mount *mp, struct vnode *vp, 1949 lwkt_tokref_t vlock, void *data) 1950 { 1951 struct vflush_info *info = data; 1952 struct vattr vattr; 1953 1954 /* 1955 * Skip over a vnodes marked VSYSTEM. 1956 */ 1957 if ((info->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1958 lwkt_reltoken(vlock); 1959 return(0); 1960 } 1961 1962 /* 1963 * If WRITECLOSE is set, flush out unlinked but still open 1964 * files (even if open only for reading) and regular file 1965 * vnodes open for writing. 1966 */ 1967 if ((info->flags & WRITECLOSE) && 1968 (vp->v_type == VNON || 1969 (VOP_GETATTR(vp, &vattr, info->td) == 0 && 1970 vattr.va_nlink > 0)) && 1971 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1972 lwkt_reltoken(vlock); 1973 return(0); 1974 } 1975 1976 /* 1977 * With v_usecount == 0, all we need to do is clear out the 1978 * vnode data structures and we are done. 1979 */ 1980 if (vp->v_usecount == 0) { 1981 vgonel(vp, vlock, info->td); 1982 return(0); 1983 } 1984 1985 /* 1986 * If FORCECLOSE is set, forcibly close the vnode. For block 1987 * or character devices, revert to an anonymous device. For 1988 * all other files, just kill them. 1989 */ 1990 if (info->flags & FORCECLOSE) { 1991 if (vp->v_type != VBLK && vp->v_type != VCHR) { 1992 vgonel(vp, vlock, info->td); 1993 } else { 1994 vclean(vp, vlock, 0, info->td); 1995 vp->v_op = spec_vnodeop_p; 1996 insmntque(vp, (struct mount *) 0); 1997 } 1998 return(0); 1999 } 2000 #ifdef DIAGNOSTIC 2001 if (busyprt) 2002 vprint("vflush: busy vnode", vp); 2003 #endif 2004 lwkt_reltoken(vlock); 2005 ++info->busy; 2006 return(0); 2007 } 2008 2009 /* 2010 * Disassociate the underlying file system from a vnode. 2011 */ 2012 static void 2013 vclean(struct vnode *vp, lwkt_tokref_t vlock, int flags, struct thread *td) 2014 { 2015 int active; 2016 2017 /* 2018 * Check to see if the vnode is in use. If so we have to reference it 2019 * before we clean it out so that its count cannot fall to zero and 2020 * generate a race against ourselves to recycle it. 2021 */ 2022 if ((active = vp->v_usecount)) 2023 vp->v_usecount++; 2024 2025 /* 2026 * Prevent the vnode from being recycled or brought into use while we 2027 * clean it out. 2028 */ 2029 if (vp->v_flag & VXLOCK) 2030 panic("vclean: deadlock"); 2031 vp->v_flag |= VXLOCK; 2032 vp->v_vxthread = curthread; 2033 2034 /* 2035 * Even if the count is zero, the VOP_INACTIVE routine may still 2036 * have the object locked while it cleans it out. The VOP_LOCK 2037 * ensures that the VOP_INACTIVE routine is done with its work. 2038 * For active vnodes, it ensures that no other activity can 2039 * occur while the underlying object is being cleaned out. 2040 * 2041 * NOTE: we continue to hold the vnode interlock through to the 2042 * end of vclean(). 2043 */ 2044 VOP_LOCK(vp, NULL, LK_DRAIN, td); 2045 2046 /* 2047 * Clean out any buffers associated with the vnode. 2048 */ 2049 vinvalbuf(vp, V_SAVE, td, 0, 0); 2050 VOP_DESTROYVOBJECT(vp); 2051 2052 /* 2053 * If purging an active vnode, it must be closed and 2054 * deactivated before being reclaimed. Note that the 2055 * VOP_INACTIVE will unlock the vnode. 2056 */ 2057 if (active) { 2058 if (flags & DOCLOSE) 2059 VOP_CLOSE(vp, FNONBLOCK, td); 2060 VOP_INACTIVE(vp, td); 2061 } else { 2062 /* 2063 * Any other processes trying to obtain this lock must first 2064 * wait for VXLOCK to clear, then call the new lock operation. 2065 */ 2066 VOP_UNLOCK(vp, NULL, 0, td); 2067 } 2068 /* 2069 * Reclaim the vnode. 2070 */ 2071 if (VOP_RECLAIM(vp, td)) 2072 panic("vclean: cannot reclaim"); 2073 2074 if (active) { 2075 /* 2076 * Inline copy of vrele() since VOP_INACTIVE 2077 * has already been called. 2078 */ 2079 if (--vp->v_usecount <= 0) { 2080 #ifdef DIAGNOSTIC 2081 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 2082 vprint("vclean: bad ref count", vp); 2083 panic("vclean: ref cnt"); 2084 } 2085 #endif 2086 vfree(vp); 2087 } 2088 } 2089 2090 cache_purge(vp); 2091 vp->v_vnlock = NULL; 2092 vmaybefree(vp); 2093 2094 /* 2095 * Done with purge, notify sleepers of the grim news. 2096 */ 2097 vp->v_op = dead_vnodeop_p; 2098 vn_pollgone(vp); 2099 vp->v_tag = VT_NON; 2100 vp->v_flag &= ~VXLOCK; 2101 vp->v_vxthread = NULL; 2102 if (vp->v_flag & VXWANT) { 2103 vp->v_flag &= ~VXWANT; 2104 wakeup((caddr_t) vp); 2105 } 2106 lwkt_reltoken(vlock); 2107 } 2108 2109 /* 2110 * Eliminate all activity associated with the requested vnode 2111 * and with all vnodes aliased to the requested vnode. 2112 * 2113 * revoke { struct vnode *a_vp, int a_flags } 2114 */ 2115 int 2116 vop_revoke(struct vop_revoke_args *ap) 2117 { 2118 struct vnode *vp, *vq; 2119 lwkt_tokref ilock; 2120 dev_t dev; 2121 2122 KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke")); 2123 2124 vp = ap->a_vp; 2125 /* 2126 * If a vgone (or vclean) is already in progress, 2127 * wait until it is done and return. 2128 */ 2129 if (vp->v_flag & VXLOCK) { 2130 vp->v_flag |= VXWANT; 2131 /*lwkt_reltoken(vlock); ZZZ */ 2132 tsleep((caddr_t)vp, 0, "vop_revokeall", 0); 2133 return (0); 2134 } 2135 2136 /* 2137 * If the vnode has a device association, scrap all vnodes associated 2138 * with the device. Don't let the device disappear on us while we 2139 * are scrapping the vnodes. 2140 */ 2141 if (vp->v_type != VCHR && vp->v_type != VBLK) 2142 return(0); 2143 if ((dev = vp->v_rdev) == NULL) { 2144 if ((dev = udev2dev(vp->v_udev, vp->v_type == VBLK)) == NODEV) 2145 return(0); 2146 } 2147 reference_dev(dev); 2148 for (;;) { 2149 lwkt_gettoken(&ilock, &spechash_token); 2150 vq = SLIST_FIRST(&dev->si_hlist); 2151 lwkt_reltoken(&ilock); 2152 if (vq == NULL) 2153 break; 2154 vgone(vq); 2155 } 2156 release_dev(dev); 2157 return (0); 2158 } 2159 2160 /* 2161 * Recycle an unused vnode to the front of the free list. 2162 * Release the passed interlock if the vnode will be recycled. 2163 */ 2164 int 2165 vrecycle(struct vnode *vp, lwkt_tokref_t inter_lkp, struct thread *td) 2166 { 2167 lwkt_tokref vlock; 2168 2169 lwkt_gettoken(&vlock, vp->v_interlock); 2170 if (vp->v_usecount == 0) { 2171 if (inter_lkp) 2172 lwkt_reltoken(inter_lkp); 2173 vgonel(vp, &vlock, td); 2174 return (1); 2175 } 2176 lwkt_reltoken(&vlock); 2177 return (0); 2178 } 2179 2180 /* 2181 * Eliminate all activity associated with a vnode 2182 * in preparation for reuse. 2183 */ 2184 void 2185 vgone(struct vnode *vp) 2186 { 2187 struct thread *td = curthread; /* XXX */ 2188 lwkt_tokref vlock; 2189 2190 lwkt_gettoken(&vlock, vp->v_interlock); 2191 vgonel(vp, &vlock, td); 2192 } 2193 2194 /* 2195 * vgone, with the vp interlock held. 2196 */ 2197 void 2198 vgonel(struct vnode *vp, lwkt_tokref_t vlock, struct thread *td) 2199 { 2200 lwkt_tokref ilock; 2201 int s; 2202 2203 /* 2204 * If a vgone (or vclean) is already in progress, 2205 * wait until it is done and return. 2206 */ 2207 if (vp->v_flag & VXLOCK) { 2208 vp->v_flag |= VXWANT; 2209 lwkt_reltoken(vlock); 2210 tsleep((caddr_t)vp, 0, "vgone", 0); 2211 return; 2212 } 2213 2214 /* 2215 * Clean out the filesystem specific data. 2216 */ 2217 vclean(vp, vlock, DOCLOSE, td); 2218 lwkt_gettokref(vlock); 2219 2220 /* 2221 * Delete from old mount point vnode list, if on one. 2222 */ 2223 if (vp->v_mount != NULL) 2224 insmntque(vp, (struct mount *)0); 2225 2226 /* 2227 * If special device, remove it from special device alias list 2228 * if it is on one. This should normally only occur if a vnode is 2229 * being revoked as the device should otherwise have been released 2230 * naturally. 2231 */ 2232 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_rdev != NULL) { 2233 v_release_rdev(vp); 2234 } 2235 2236 /* 2237 * If it is on the freelist and not already at the head, 2238 * move it to the head of the list. The test of the 2239 * VDOOMED flag and the reference count of zero is because 2240 * it will be removed from the free list by getnewvnode, 2241 * but will not have its reference count incremented until 2242 * after calling vgone. If the reference count were 2243 * incremented first, vgone would (incorrectly) try to 2244 * close the previous instance of the underlying object. 2245 */ 2246 if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) { 2247 s = splbio(); 2248 lwkt_gettoken(&ilock, &vnode_free_list_token); 2249 if (vp->v_flag & VFREE) 2250 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 2251 else 2252 freevnodes++; 2253 vp->v_flag |= VFREE; 2254 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 2255 lwkt_reltoken(&ilock); 2256 splx(s); 2257 } 2258 vp->v_type = VBAD; 2259 lwkt_reltoken(vlock); 2260 } 2261 2262 /* 2263 * Lookup a vnode by device number. 2264 */ 2265 int 2266 vfinddev(dev_t dev, enum vtype type, struct vnode **vpp) 2267 { 2268 lwkt_tokref ilock; 2269 struct vnode *vp; 2270 2271 lwkt_gettoken(&ilock, &spechash_token); 2272 SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) { 2273 if (type == vp->v_type) { 2274 *vpp = vp; 2275 lwkt_reltoken(&ilock); 2276 return (1); 2277 } 2278 } 2279 lwkt_reltoken(&ilock); 2280 return (0); 2281 } 2282 2283 /* 2284 * Calculate the total number of references to a special device. This 2285 * routine may only be called for VBLK and VCHR vnodes since v_rdev is 2286 * an overloaded field. Since udev2dev can now return NODEV, we have 2287 * to check for a NULL v_rdev. 2288 */ 2289 int 2290 count_dev(dev_t dev) 2291 { 2292 lwkt_tokref ilock; 2293 struct vnode *vp; 2294 int count = 0; 2295 2296 if (SLIST_FIRST(&dev->si_hlist)) { 2297 lwkt_gettoken(&ilock, &spechash_token); 2298 SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) { 2299 count += vp->v_usecount; 2300 } 2301 lwkt_reltoken(&ilock); 2302 } 2303 return(count); 2304 } 2305 2306 int 2307 count_udev(udev_t udev) 2308 { 2309 dev_t dev; 2310 2311 if ((dev = udev2dev(udev, 0)) == NODEV) 2312 return(0); 2313 return(count_dev(dev)); 2314 } 2315 2316 int 2317 vcount(struct vnode *vp) 2318 { 2319 if (vp->v_rdev == NULL) 2320 return(0); 2321 return(count_dev(vp->v_rdev)); 2322 } 2323 2324 /* 2325 * Print out a description of a vnode. 2326 */ 2327 static char *typename[] = 2328 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 2329 2330 void 2331 vprint(char *label, struct vnode *vp) 2332 { 2333 char buf[96]; 2334 2335 if (label != NULL) 2336 printf("%s: %p: ", label, (void *)vp); 2337 else 2338 printf("%p: ", (void *)vp); 2339 printf("type %s, usecount %d, writecount %d, refcount %d,", 2340 typename[vp->v_type], vp->v_usecount, vp->v_writecount, 2341 vp->v_holdcnt); 2342 buf[0] = '\0'; 2343 if (vp->v_flag & VROOT) 2344 strcat(buf, "|VROOT"); 2345 if (vp->v_flag & VTEXT) 2346 strcat(buf, "|VTEXT"); 2347 if (vp->v_flag & VSYSTEM) 2348 strcat(buf, "|VSYSTEM"); 2349 if (vp->v_flag & VXLOCK) 2350 strcat(buf, "|VXLOCK"); 2351 if (vp->v_flag & VXWANT) 2352 strcat(buf, "|VXWANT"); 2353 if (vp->v_flag & VBWAIT) 2354 strcat(buf, "|VBWAIT"); 2355 if (vp->v_flag & VDOOMED) 2356 strcat(buf, "|VDOOMED"); 2357 if (vp->v_flag & VFREE) 2358 strcat(buf, "|VFREE"); 2359 if (vp->v_flag & VOBJBUF) 2360 strcat(buf, "|VOBJBUF"); 2361 if (buf[0] != '\0') 2362 printf(" flags (%s)", &buf[1]); 2363 if (vp->v_data == NULL) { 2364 printf("\n"); 2365 } else { 2366 printf("\n\t"); 2367 VOP_PRINT(vp); 2368 } 2369 } 2370 2371 #ifdef DDB 2372 #include <ddb/ddb.h> 2373 /* 2374 * List all of the locked vnodes in the system. 2375 * Called when debugging the kernel. 2376 */ 2377 DB_SHOW_COMMAND(lockedvnodes, lockedvnodes) 2378 { 2379 struct thread *td = curthread; /* XXX */ 2380 lwkt_tokref ilock; 2381 struct mount *mp, *nmp; 2382 struct vnode *vp; 2383 2384 printf("Locked vnodes\n"); 2385 lwkt_gettoken(&ilock, &mountlist_token); 2386 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 2387 if (vfs_busy(mp, LK_NOWAIT, &ilock, td)) { 2388 nmp = TAILQ_NEXT(mp, mnt_list); 2389 continue; 2390 } 2391 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 2392 if (VOP_ISLOCKED(vp, NULL)) 2393 vprint((char *)0, vp); 2394 } 2395 lwkt_gettokref(&ilock); 2396 nmp = TAILQ_NEXT(mp, mnt_list); 2397 vfs_unbusy(mp, td); 2398 } 2399 lwkt_reltoken(&ilock); 2400 } 2401 #endif 2402 2403 /* 2404 * Top level filesystem related information gathering. 2405 */ 2406 static int sysctl_ovfs_conf (SYSCTL_HANDLER_ARGS); 2407 2408 static int 2409 vfs_sysctl(SYSCTL_HANDLER_ARGS) 2410 { 2411 int *name = (int *)arg1 - 1; /* XXX */ 2412 u_int namelen = arg2 + 1; /* XXX */ 2413 struct vfsconf *vfsp; 2414 2415 #if 1 || defined(COMPAT_PRELITE2) 2416 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 2417 if (namelen == 1) 2418 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 2419 #endif 2420 2421 #ifdef notyet 2422 /* all sysctl names at this level are at least name and field */ 2423 if (namelen < 2) 2424 return (ENOTDIR); /* overloaded */ 2425 if (name[0] != VFS_GENERIC) { 2426 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2427 if (vfsp->vfc_typenum == name[0]) 2428 break; 2429 if (vfsp == NULL) 2430 return (EOPNOTSUPP); 2431 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 2432 oldp, oldlenp, newp, newlen, p)); 2433 } 2434 #endif 2435 switch (name[1]) { 2436 case VFS_MAXTYPENUM: 2437 if (namelen != 2) 2438 return (ENOTDIR); 2439 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 2440 case VFS_CONF: 2441 if (namelen != 3) 2442 return (ENOTDIR); /* overloaded */ 2443 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2444 if (vfsp->vfc_typenum == name[2]) 2445 break; 2446 if (vfsp == NULL) 2447 return (EOPNOTSUPP); 2448 return (SYSCTL_OUT(req, vfsp, sizeof *vfsp)); 2449 } 2450 return (EOPNOTSUPP); 2451 } 2452 2453 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl, 2454 "Generic filesystem"); 2455 2456 #if 1 || defined(COMPAT_PRELITE2) 2457 2458 static int 2459 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 2460 { 2461 int error; 2462 struct vfsconf *vfsp; 2463 struct ovfsconf ovfs; 2464 2465 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 2466 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 2467 strcpy(ovfs.vfc_name, vfsp->vfc_name); 2468 ovfs.vfc_index = vfsp->vfc_typenum; 2469 ovfs.vfc_refcount = vfsp->vfc_refcount; 2470 ovfs.vfc_flags = vfsp->vfc_flags; 2471 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 2472 if (error) 2473 return error; 2474 } 2475 return 0; 2476 } 2477 2478 #endif /* 1 || COMPAT_PRELITE2 */ 2479 2480 #if 0 2481 #define KINFO_VNODESLOP 10 2482 /* 2483 * Dump vnode list (via sysctl). 2484 * Copyout address of vnode followed by vnode. 2485 */ 2486 /* ARGSUSED */ 2487 static int 2488 sysctl_vnode(SYSCTL_HANDLER_ARGS) 2489 { 2490 struct proc *p = curproc; /* XXX */ 2491 struct mount *mp, *nmp; 2492 struct vnode *nvp, *vp; 2493 lwkt_tokref ilock; 2494 lwkt_tokref jlock; 2495 int error; 2496 2497 #define VPTRSZ sizeof (struct vnode *) 2498 #define VNODESZ sizeof (struct vnode) 2499 2500 req->lock = 0; 2501 if (!req->oldptr) /* Make an estimate */ 2502 return (SYSCTL_OUT(req, 0, 2503 (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ))); 2504 2505 lwkt_gettoken(&ilock, &mountlist_token); 2506 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 2507 if (vfs_busy(mp, LK_NOWAIT, &ilock, p)) { 2508 nmp = TAILQ_NEXT(mp, mnt_list); 2509 continue; 2510 } 2511 lwkt_gettoken(&jlock, &mntvnode_token); 2512 again: 2513 for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 2514 vp != NULL; 2515 vp = nvp) { 2516 /* 2517 * Check that the vp is still associated with 2518 * this filesystem. RACE: could have been 2519 * recycled onto the same filesystem. 2520 */ 2521 if (vp->v_mount != mp) 2522 goto again; 2523 nvp = TAILQ_NEXT(vp, v_nmntvnodes); 2524 if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) || 2525 (error = SYSCTL_OUT(req, vp, VNODESZ))) { 2526 lwkt_reltoken(&jlock); 2527 return (error); 2528 } 2529 } 2530 lwkt_reltoken(&jlock); 2531 lwkt_gettokref(&ilock); 2532 nmp = TAILQ_NEXT(mp, mnt_list); /* ZZZ */ 2533 vfs_unbusy(mp, p); 2534 } 2535 lwkt_reltoken(&ilock); 2536 2537 return (0); 2538 } 2539 #endif 2540 2541 /* 2542 * XXX 2543 * Exporting the vnode list on large systems causes them to crash. 2544 * Exporting the vnode list on medium systems causes sysctl to coredump. 2545 */ 2546 #if 0 2547 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD, 2548 0, 0, sysctl_vnode, "S,vnode", ""); 2549 #endif 2550 2551 /* 2552 * Check to see if a filesystem is mounted on a block device. 2553 */ 2554 int 2555 vfs_mountedon(struct vnode *vp) 2556 { 2557 dev_t dev; 2558 2559 if ((dev = vp->v_rdev) == NULL) 2560 dev = udev2dev(vp->v_udev, (vp->v_type == VBLK)); 2561 if (dev != NODEV && dev->si_mountpoint) 2562 return (EBUSY); 2563 return (0); 2564 } 2565 2566 /* 2567 * Unmount all filesystems. The list is traversed in reverse order 2568 * of mounting to avoid dependencies. 2569 */ 2570 void 2571 vfs_unmountall(void) 2572 { 2573 struct mount *mp; 2574 struct thread *td = curthread; 2575 int error; 2576 2577 if (td->td_proc == NULL) 2578 td = initproc->p_thread; /* XXX XXX use proc0 instead? */ 2579 2580 /* 2581 * Since this only runs when rebooting, it is not interlocked. 2582 */ 2583 while(!TAILQ_EMPTY(&mountlist)) { 2584 mp = TAILQ_LAST(&mountlist, mntlist); 2585 error = dounmount(mp, MNT_FORCE, td); 2586 if (error) { 2587 TAILQ_REMOVE(&mountlist, mp, mnt_list); 2588 printf("unmount of %s failed (", 2589 mp->mnt_stat.f_mntonname); 2590 if (error == EBUSY) 2591 printf("BUSY)\n"); 2592 else 2593 printf("%d)\n", error); 2594 } else { 2595 /* The unmount has removed mp from the mountlist */ 2596 } 2597 } 2598 } 2599 2600 /* 2601 * Build hash lists of net addresses and hang them off the mount point. 2602 * Called by ufs_mount() to set up the lists of export addresses. 2603 */ 2604 static int 2605 vfs_hang_addrlist(struct mount *mp, struct netexport *nep, 2606 struct export_args *argp) 2607 { 2608 struct netcred *np; 2609 struct radix_node_head *rnh; 2610 int i; 2611 struct radix_node *rn; 2612 struct sockaddr *saddr, *smask = 0; 2613 struct domain *dom; 2614 int error; 2615 2616 if (argp->ex_addrlen == 0) { 2617 if (mp->mnt_flag & MNT_DEFEXPORTED) 2618 return (EPERM); 2619 np = &nep->ne_defexported; 2620 np->netc_exflags = argp->ex_flags; 2621 np->netc_anon = argp->ex_anon; 2622 np->netc_anon.cr_ref = 1; 2623 mp->mnt_flag |= MNT_DEFEXPORTED; 2624 return (0); 2625 } 2626 2627 if (argp->ex_addrlen < 0 || argp->ex_addrlen > MLEN) 2628 return (EINVAL); 2629 if (argp->ex_masklen < 0 || argp->ex_masklen > MLEN) 2630 return (EINVAL); 2631 2632 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 2633 np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK); 2634 bzero((caddr_t) np, i); 2635 saddr = (struct sockaddr *) (np + 1); 2636 if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen))) 2637 goto out; 2638 if (saddr->sa_len > argp->ex_addrlen) 2639 saddr->sa_len = argp->ex_addrlen; 2640 if (argp->ex_masklen) { 2641 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 2642 error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen); 2643 if (error) 2644 goto out; 2645 if (smask->sa_len > argp->ex_masklen) 2646 smask->sa_len = argp->ex_masklen; 2647 } 2648 i = saddr->sa_family; 2649 if ((rnh = nep->ne_rtable[i]) == 0) { 2650 /* 2651 * Seems silly to initialize every AF when most are not used, 2652 * do so on demand here 2653 */ 2654 for (dom = domains; dom; dom = dom->dom_next) 2655 if (dom->dom_family == i && dom->dom_rtattach) { 2656 dom->dom_rtattach((void **) &nep->ne_rtable[i], 2657 dom->dom_rtoffset); 2658 break; 2659 } 2660 if ((rnh = nep->ne_rtable[i]) == 0) { 2661 error = ENOBUFS; 2662 goto out; 2663 } 2664 } 2665 rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh, 2666 np->netc_rnodes); 2667 if (rn == 0 || np != (struct netcred *) rn) { /* already exists */ 2668 error = EPERM; 2669 goto out; 2670 } 2671 np->netc_exflags = argp->ex_flags; 2672 np->netc_anon = argp->ex_anon; 2673 np->netc_anon.cr_ref = 1; 2674 return (0); 2675 out: 2676 free(np, M_NETADDR); 2677 return (error); 2678 } 2679 2680 /* ARGSUSED */ 2681 static int 2682 vfs_free_netcred(struct radix_node *rn, void *w) 2683 { 2684 struct radix_node_head *rnh = (struct radix_node_head *) w; 2685 2686 (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh); 2687 free((caddr_t) rn, M_NETADDR); 2688 return (0); 2689 } 2690 2691 /* 2692 * Free the net address hash lists that are hanging off the mount points. 2693 */ 2694 static void 2695 vfs_free_addrlist(struct netexport *nep) 2696 { 2697 int i; 2698 struct radix_node_head *rnh; 2699 2700 for (i = 0; i <= AF_MAX; i++) 2701 if ((rnh = nep->ne_rtable[i])) { 2702 (*rnh->rnh_walktree) (rnh, vfs_free_netcred, 2703 (caddr_t) rnh); 2704 free((caddr_t) rnh, M_RTABLE); 2705 nep->ne_rtable[i] = 0; 2706 } 2707 } 2708 2709 int 2710 vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp) 2711 { 2712 int error; 2713 2714 if (argp->ex_flags & MNT_DELEXPORT) { 2715 if (mp->mnt_flag & MNT_EXPUBLIC) { 2716 vfs_setpublicfs(NULL, NULL, NULL); 2717 mp->mnt_flag &= ~MNT_EXPUBLIC; 2718 } 2719 vfs_free_addrlist(nep); 2720 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 2721 } 2722 if (argp->ex_flags & MNT_EXPORTED) { 2723 if (argp->ex_flags & MNT_EXPUBLIC) { 2724 if ((error = vfs_setpublicfs(mp, nep, argp)) != 0) 2725 return (error); 2726 mp->mnt_flag |= MNT_EXPUBLIC; 2727 } 2728 if ((error = vfs_hang_addrlist(mp, nep, argp))) 2729 return (error); 2730 mp->mnt_flag |= MNT_EXPORTED; 2731 } 2732 return (0); 2733 } 2734 2735 2736 /* 2737 * Set the publicly exported filesystem (WebNFS). Currently, only 2738 * one public filesystem is possible in the spec (RFC 2054 and 2055) 2739 */ 2740 int 2741 vfs_setpublicfs(struct mount *mp, struct netexport *nep, 2742 struct export_args *argp) 2743 { 2744 int error; 2745 struct vnode *rvp; 2746 char *cp; 2747 2748 /* 2749 * mp == NULL -> invalidate the current info, the FS is 2750 * no longer exported. May be called from either vfs_export 2751 * or unmount, so check if it hasn't already been done. 2752 */ 2753 if (mp == NULL) { 2754 if (nfs_pub.np_valid) { 2755 nfs_pub.np_valid = 0; 2756 if (nfs_pub.np_index != NULL) { 2757 FREE(nfs_pub.np_index, M_TEMP); 2758 nfs_pub.np_index = NULL; 2759 } 2760 } 2761 return (0); 2762 } 2763 2764 /* 2765 * Only one allowed at a time. 2766 */ 2767 if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount) 2768 return (EBUSY); 2769 2770 /* 2771 * Get real filehandle for root of exported FS. 2772 */ 2773 bzero((caddr_t)&nfs_pub.np_handle, sizeof(nfs_pub.np_handle)); 2774 nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid; 2775 2776 if ((error = VFS_ROOT(mp, &rvp))) 2777 return (error); 2778 2779 if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid))) 2780 return (error); 2781 2782 vput(rvp); 2783 2784 /* 2785 * If an indexfile was specified, pull it in. 2786 */ 2787 if (argp->ex_indexfile != NULL) { 2788 MALLOC(nfs_pub.np_index, char *, MAXNAMLEN + 1, M_TEMP, 2789 M_WAITOK); 2790 error = copyinstr(argp->ex_indexfile, nfs_pub.np_index, 2791 MAXNAMLEN, (size_t *)0); 2792 if (!error) { 2793 /* 2794 * Check for illegal filenames. 2795 */ 2796 for (cp = nfs_pub.np_index; *cp; cp++) { 2797 if (*cp == '/') { 2798 error = EINVAL; 2799 break; 2800 } 2801 } 2802 } 2803 if (error) { 2804 FREE(nfs_pub.np_index, M_TEMP); 2805 return (error); 2806 } 2807 } 2808 2809 nfs_pub.np_mount = mp; 2810 nfs_pub.np_valid = 1; 2811 return (0); 2812 } 2813 2814 struct netcred * 2815 vfs_export_lookup(struct mount *mp, struct netexport *nep, 2816 struct sockaddr *nam) 2817 { 2818 struct netcred *np; 2819 struct radix_node_head *rnh; 2820 struct sockaddr *saddr; 2821 2822 np = NULL; 2823 if (mp->mnt_flag & MNT_EXPORTED) { 2824 /* 2825 * Lookup in the export list first. 2826 */ 2827 if (nam != NULL) { 2828 saddr = nam; 2829 rnh = nep->ne_rtable[saddr->sa_family]; 2830 if (rnh != NULL) { 2831 np = (struct netcred *) 2832 (*rnh->rnh_matchaddr)((caddr_t)saddr, 2833 rnh); 2834 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 2835 np = NULL; 2836 } 2837 } 2838 /* 2839 * If no address match, use the default if it exists. 2840 */ 2841 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 2842 np = &nep->ne_defexported; 2843 } 2844 return (np); 2845 } 2846 2847 /* 2848 * perform msync on all vnodes under a mount point. The mount point must 2849 * be locked. This code is also responsible for lazy-freeing unreferenced 2850 * vnodes whos VM objects no longer contain pages. 2851 * 2852 * NOTE: MNT_WAIT still skips vnodes in the VXLOCK state. 2853 */ 2854 static int vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data); 2855 static int vfs_msync_scan2(struct mount *mp, struct vnode *vp, 2856 lwkt_tokref_t vlock, void *data); 2857 2858 void 2859 vfs_msync(struct mount *mp, int flags) 2860 { 2861 vmntvnodescan(mp, vfs_msync_scan1, vfs_msync_scan2, (void *)flags); 2862 } 2863 2864 /* 2865 * scan1 is a fast pre-check. There could be hundreds of thousands of 2866 * vnodes, we cannot afford to do anything heavy weight until we have a 2867 * fairly good indication that there is work to do. 2868 */ 2869 static 2870 int 2871 vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data) 2872 { 2873 int flags = (int)data; 2874 2875 if ((vp->v_flag & VXLOCK) == 0) { 2876 if (VSHOULDFREE(vp)) 2877 return(0); 2878 if ((mp->mnt_flag & MNT_RDONLY) == 0 && 2879 (vp->v_flag & VOBJDIRTY) && 2880 (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) { 2881 return(0); 2882 } 2883 } 2884 return(-1); 2885 } 2886 2887 static 2888 int 2889 vfs_msync_scan2(struct mount *mp, struct vnode *vp, 2890 lwkt_tokref_t vlock, void *data) 2891 { 2892 vm_object_t obj; 2893 int error; 2894 int flags = (int)data; 2895 2896 if (vp->v_flag & VXLOCK) 2897 return(0); 2898 2899 if ((mp->mnt_flag & MNT_RDONLY) == 0 && 2900 (vp->v_flag & VOBJDIRTY) && 2901 (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) { 2902 error = vget(vp, vlock, LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ | LK_INTERLOCK, curthread); 2903 if (error == 0) { 2904 if (VOP_GETVOBJECT(vp, &obj) == 0) { 2905 vm_object_page_clean(obj, 0, 0, 2906 flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC); 2907 } 2908 vput(vp); 2909 } 2910 return(0); 2911 } 2912 vmaybefree(vp); 2913 lwkt_reltoken(vlock); 2914 return(0); 2915 } 2916 2917 /* 2918 * Create the VM object needed for VMIO and mmap support. This 2919 * is done for all VREG files in the system. Some filesystems might 2920 * afford the additional metadata buffering capability of the 2921 * VMIO code by making the device node be VMIO mode also. 2922 * 2923 * vp must be locked when vfs_object_create is called. 2924 */ 2925 int 2926 vfs_object_create(struct vnode *vp, struct thread *td) 2927 { 2928 return (VOP_CREATEVOBJECT(vp, td)); 2929 } 2930 2931 /* 2932 * NOTE: the vnode interlock must be held during the call. We have to recheck 2933 * the VFREE flag since the vnode may have been removed from the free list 2934 * while we were blocked on vnode_free_list_token. The use or hold count 2935 * must have already been bumped by the caller. 2936 */ 2937 static void 2938 vbusy(struct vnode *vp) 2939 { 2940 lwkt_tokref ilock; 2941 2942 lwkt_gettoken(&ilock, &vnode_free_list_token); 2943 if ((vp->v_flag & VFREE) != 0) { 2944 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 2945 freevnodes--; 2946 vp->v_flag &= ~(VFREE|VAGE); 2947 } 2948 lwkt_reltoken(&ilock); 2949 } 2950 2951 /* 2952 * NOTE: the vnode interlock must be held during the call. The use or hold 2953 * count must have already been bumped by the caller. We use a VINFREE to 2954 * interlock against other calls to vfree() which might occur while we 2955 * are blocked. The vnode cannot be reused until it has actually been 2956 * placed on the free list, so there are no other races even though the 2957 * use and hold counts are 0. 2958 */ 2959 static void 2960 vfree(struct vnode *vp) 2961 { 2962 lwkt_tokref ilock; 2963 2964 if ((vp->v_flag & VINFREE) == 0) { 2965 vp->v_flag |= VINFREE; 2966 lwkt_gettoken(&ilock, &vnode_free_list_token); /* can block */ 2967 KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free")); 2968 if (vp->v_flag & VAGE) { 2969 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 2970 } else { 2971 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 2972 } 2973 freevnodes++; 2974 vp->v_flag &= ~(VAGE|VINFREE); 2975 vp->v_flag |= VFREE; 2976 lwkt_reltoken(&ilock); /* can block */ 2977 } 2978 } 2979 2980 2981 /* 2982 * Record a process's interest in events which might happen to 2983 * a vnode. Because poll uses the historic select-style interface 2984 * internally, this routine serves as both the ``check for any 2985 * pending events'' and the ``record my interest in future events'' 2986 * functions. (These are done together, while the lock is held, 2987 * to avoid race conditions.) 2988 */ 2989 int 2990 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 2991 { 2992 lwkt_tokref ilock; 2993 2994 lwkt_gettoken(&ilock, &vp->v_pollinfo.vpi_token); 2995 if (vp->v_pollinfo.vpi_revents & events) { 2996 /* 2997 * This leaves events we are not interested 2998 * in available for the other process which 2999 * which presumably had requested them 3000 * (otherwise they would never have been 3001 * recorded). 3002 */ 3003 events &= vp->v_pollinfo.vpi_revents; 3004 vp->v_pollinfo.vpi_revents &= ~events; 3005 3006 lwkt_reltoken(&ilock); 3007 return events; 3008 } 3009 vp->v_pollinfo.vpi_events |= events; 3010 selrecord(td, &vp->v_pollinfo.vpi_selinfo); 3011 lwkt_reltoken(&ilock); 3012 return 0; 3013 } 3014 3015 /* 3016 * Note the occurrence of an event. If the VN_POLLEVENT macro is used, 3017 * it is possible for us to miss an event due to race conditions, but 3018 * that condition is expected to be rare, so for the moment it is the 3019 * preferred interface. 3020 */ 3021 void 3022 vn_pollevent(struct vnode *vp, int events) 3023 { 3024 lwkt_tokref ilock; 3025 3026 lwkt_gettoken(&ilock, &vp->v_pollinfo.vpi_token); 3027 if (vp->v_pollinfo.vpi_events & events) { 3028 /* 3029 * We clear vpi_events so that we don't 3030 * call selwakeup() twice if two events are 3031 * posted before the polling process(es) is 3032 * awakened. This also ensures that we take at 3033 * most one selwakeup() if the polling process 3034 * is no longer interested. However, it does 3035 * mean that only one event can be noticed at 3036 * a time. (Perhaps we should only clear those 3037 * event bits which we note?) XXX 3038 */ 3039 vp->v_pollinfo.vpi_events = 0; /* &= ~events ??? */ 3040 vp->v_pollinfo.vpi_revents |= events; 3041 selwakeup(&vp->v_pollinfo.vpi_selinfo); 3042 } 3043 lwkt_reltoken(&ilock); 3044 } 3045 3046 /* 3047 * Wake up anyone polling on vp because it is being revoked. 3048 * This depends on dead_poll() returning POLLHUP for correct 3049 * behavior. 3050 */ 3051 void 3052 vn_pollgone(struct vnode *vp) 3053 { 3054 lwkt_tokref ilock; 3055 3056 lwkt_gettoken(&ilock, &vp->v_pollinfo.vpi_token); 3057 if (vp->v_pollinfo.vpi_events) { 3058 vp->v_pollinfo.vpi_events = 0; 3059 selwakeup(&vp->v_pollinfo.vpi_selinfo); 3060 } 3061 lwkt_reltoken(&ilock); 3062 } 3063 3064 3065 3066 /* 3067 * Routine to create and manage a filesystem syncer vnode. 3068 */ 3069 #define sync_close ((int (*) (struct vop_close_args *))nullop) 3070 static int sync_fsync (struct vop_fsync_args *); 3071 static int sync_inactive (struct vop_inactive_args *); 3072 static int sync_reclaim (struct vop_reclaim_args *); 3073 #define sync_lock ((int (*) (struct vop_lock_args *))vop_nolock) 3074 #define sync_unlock ((int (*) (struct vop_unlock_args *))vop_nounlock) 3075 static int sync_print (struct vop_print_args *); 3076 #define sync_islocked ((int(*) (struct vop_islocked_args *))vop_noislocked) 3077 3078 static vop_t **sync_vnodeop_p; 3079 static struct vnodeopv_entry_desc sync_vnodeop_entries[] = { 3080 { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 3081 { &vop_close_desc, (vop_t *) sync_close }, /* close */ 3082 { &vop_fsync_desc, (vop_t *) sync_fsync }, /* fsync */ 3083 { &vop_inactive_desc, (vop_t *) sync_inactive }, /* inactive */ 3084 { &vop_reclaim_desc, (vop_t *) sync_reclaim }, /* reclaim */ 3085 { &vop_lock_desc, (vop_t *) sync_lock }, /* lock */ 3086 { &vop_unlock_desc, (vop_t *) sync_unlock }, /* unlock */ 3087 { &vop_print_desc, (vop_t *) sync_print }, /* print */ 3088 { &vop_islocked_desc, (vop_t *) sync_islocked }, /* islocked */ 3089 { NULL, NULL } 3090 }; 3091 static struct vnodeopv_desc sync_vnodeop_opv_desc = 3092 { &sync_vnodeop_p, sync_vnodeop_entries }; 3093 3094 VNODEOP_SET(sync_vnodeop_opv_desc); 3095 3096 /* 3097 * Create a new filesystem syncer vnode for the specified mount point. 3098 * This vnode is placed on the worklist and is responsible for sync'ing 3099 * the filesystem. 3100 * 3101 * NOTE: read-only mounts are also placed on the worklist. The filesystem 3102 * sync code is also responsible for cleaning up vnodes. 3103 */ 3104 int 3105 vfs_allocate_syncvnode(struct mount *mp) 3106 { 3107 struct vnode *vp; 3108 static long start, incr, next; 3109 int error; 3110 3111 /* Allocate a new vnode */ 3112 if ((error = getnewvnode(VT_VFS, mp, sync_vnodeop_p, &vp)) != 0) { 3113 mp->mnt_syncer = NULL; 3114 return (error); 3115 } 3116 vp->v_type = VNON; 3117 /* 3118 * Place the vnode onto the syncer worklist. We attempt to 3119 * scatter them about on the list so that they will go off 3120 * at evenly distributed times even if all the filesystems 3121 * are mounted at once. 3122 */ 3123 next += incr; 3124 if (next == 0 || next > syncer_maxdelay) { 3125 start /= 2; 3126 incr /= 2; 3127 if (start == 0) { 3128 start = syncer_maxdelay / 2; 3129 incr = syncer_maxdelay; 3130 } 3131 next = start; 3132 } 3133 vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0); 3134 mp->mnt_syncer = vp; 3135 return (0); 3136 } 3137 3138 /* 3139 * Do a lazy sync of the filesystem. 3140 * 3141 * sync_fsync { struct vnode *a_vp, struct ucred *a_cred, int a_waitfor, 3142 * struct thread *a_td } 3143 */ 3144 static int 3145 sync_fsync(struct vop_fsync_args *ap) 3146 { 3147 struct vnode *syncvp = ap->a_vp; 3148 struct mount *mp = syncvp->v_mount; 3149 struct thread *td = ap->a_td; 3150 lwkt_tokref ilock; 3151 int asyncflag; 3152 3153 /* 3154 * We only need to do something if this is a lazy evaluation. 3155 */ 3156 if (ap->a_waitfor != MNT_LAZY) 3157 return (0); 3158 3159 /* 3160 * Move ourselves to the back of the sync list. 3161 */ 3162 vn_syncer_add_to_worklist(syncvp, syncdelay); 3163 3164 /* 3165 * Walk the list of vnodes pushing all that are dirty and 3166 * not already on the sync list, and freeing vnodes which have 3167 * no refs and whos VM objects are empty. vfs_msync() handles 3168 * the VM issues and must be called whether the mount is readonly 3169 * or not. 3170 */ 3171 lwkt_gettoken(&ilock, &mountlist_token); 3172 if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &ilock, td) != 0) { 3173 lwkt_reltoken(&ilock); 3174 return (0); 3175 } 3176 if (mp->mnt_flag & MNT_RDONLY) { 3177 vfs_msync(mp, MNT_NOWAIT); 3178 } else { 3179 asyncflag = mp->mnt_flag & MNT_ASYNC; 3180 mp->mnt_flag &= ~MNT_ASYNC; /* ZZZ hack */ 3181 vfs_msync(mp, MNT_NOWAIT); 3182 VFS_SYNC(mp, MNT_LAZY, td); 3183 if (asyncflag) 3184 mp->mnt_flag |= MNT_ASYNC; 3185 } 3186 vfs_unbusy(mp, td); 3187 return (0); 3188 } 3189 3190 /* 3191 * The syncer vnode is no referenced. 3192 * 3193 * sync_inactive { struct vnode *a_vp, struct proc *a_p } 3194 */ 3195 static int 3196 sync_inactive(struct vop_inactive_args *ap) 3197 { 3198 vgone(ap->a_vp); 3199 return (0); 3200 } 3201 3202 /* 3203 * The syncer vnode is no longer needed and is being decommissioned. 3204 * 3205 * Modifications to the worklist must be protected at splbio(). 3206 * 3207 * sync_reclaim { struct vnode *a_vp } 3208 */ 3209 static int 3210 sync_reclaim(struct vop_reclaim_args *ap) 3211 { 3212 struct vnode *vp = ap->a_vp; 3213 int s; 3214 3215 s = splbio(); 3216 vp->v_mount->mnt_syncer = NULL; 3217 if (vp->v_flag & VONWORKLST) { 3218 LIST_REMOVE(vp, v_synclist); 3219 vp->v_flag &= ~VONWORKLST; 3220 } 3221 splx(s); 3222 3223 return (0); 3224 } 3225 3226 /* 3227 * Print out a syncer vnode. 3228 * 3229 * sync_print { struct vnode *a_vp } 3230 */ 3231 static int 3232 sync_print(struct vop_print_args *ap) 3233 { 3234 struct vnode *vp = ap->a_vp; 3235 3236 printf("syncer vnode"); 3237 if (vp->v_vnlock != NULL) 3238 lockmgr_printinfo(vp->v_vnlock); 3239 printf("\n"); 3240 return (0); 3241 } 3242 3243 /* 3244 * extract the dev_t from a VBLK or VCHR. The vnode must have been opened 3245 * (or v_rdev might be NULL). 3246 */ 3247 dev_t 3248 vn_todev(struct vnode *vp) 3249 { 3250 if (vp->v_type != VBLK && vp->v_type != VCHR) 3251 return (NODEV); 3252 KKASSERT(vp->v_rdev != NULL); 3253 return (vp->v_rdev); 3254 } 3255 3256 /* 3257 * Check if vnode represents a disk device. The vnode does not need to be 3258 * opened. 3259 */ 3260 int 3261 vn_isdisk(struct vnode *vp, int *errp) 3262 { 3263 dev_t dev; 3264 3265 if (vp->v_type != VBLK && vp->v_type != VCHR) { 3266 if (errp != NULL) 3267 *errp = ENOTBLK; 3268 return (0); 3269 } 3270 3271 if ((dev = vp->v_rdev) == NULL) 3272 dev = udev2dev(vp->v_udev, (vp->v_type == VBLK)); 3273 if (dev == NULL || dev == NODEV) { 3274 if (errp != NULL) 3275 *errp = ENXIO; 3276 return (0); 3277 } 3278 if (dev_is_good(dev) == 0) { 3279 if (errp != NULL) 3280 *errp = ENXIO; 3281 return (0); 3282 } 3283 if ((dev_dflags(dev) & D_DISK) == 0) { 3284 if (errp != NULL) 3285 *errp = ENOTBLK; 3286 return (0); 3287 } 3288 if (errp != NULL) 3289 *errp = 0; 3290 return (1); 3291 } 3292 3293 void 3294 NDFREE(struct nameidata *ndp, const uint flags) 3295 { 3296 if (!(flags & NDF_NO_FREE_PNBUF) && 3297 (ndp->ni_cnd.cn_flags & CNP_HASBUF)) { 3298 zfree(namei_zone, ndp->ni_cnd.cn_pnbuf); 3299 ndp->ni_cnd.cn_flags &= ~CNP_HASBUF; 3300 } 3301 if (!(flags & NDF_NO_DNCP_RELE) && 3302 (ndp->ni_cnd.cn_flags & CNP_WANTDNCP) && 3303 ndp->ni_dncp) { 3304 cache_drop(ndp->ni_dncp); 3305 ndp->ni_dncp = NULL; 3306 } 3307 if (!(flags & NDF_NO_NCP_RELE) && 3308 (ndp->ni_cnd.cn_flags & CNP_WANTNCP) && 3309 ndp->ni_ncp) { 3310 cache_drop(ndp->ni_ncp); 3311 ndp->ni_ncp = NULL; 3312 } 3313 if (!(flags & NDF_NO_DVP_UNLOCK) && 3314 (ndp->ni_cnd.cn_flags & CNP_LOCKPARENT) && 3315 ndp->ni_dvp != ndp->ni_vp) { 3316 VOP_UNLOCK(ndp->ni_dvp, NULL, 0, ndp->ni_cnd.cn_td); 3317 } 3318 if (!(flags & NDF_NO_DVP_RELE) && 3319 (ndp->ni_cnd.cn_flags & (CNP_LOCKPARENT|CNP_WANTPARENT))) { 3320 vrele(ndp->ni_dvp); 3321 ndp->ni_dvp = NULL; 3322 } 3323 if (!(flags & NDF_NO_VP_UNLOCK) && 3324 (ndp->ni_cnd.cn_flags & CNP_LOCKLEAF) && ndp->ni_vp) { 3325 VOP_UNLOCK(ndp->ni_vp, NULL, 0, ndp->ni_cnd.cn_td); 3326 } 3327 if (!(flags & NDF_NO_VP_RELE) && 3328 ndp->ni_vp) { 3329 vrele(ndp->ni_vp); 3330 ndp->ni_vp = NULL; 3331 } 3332 if (!(flags & NDF_NO_STARTDIR_RELE) && 3333 (ndp->ni_cnd.cn_flags & CNP_SAVESTART)) { 3334 vrele(ndp->ni_startdir); 3335 ndp->ni_startdir = NULL; 3336 } 3337 } 3338 3339 #ifdef DEBUG_VFS_LOCKS 3340 3341 void 3342 assert_vop_locked(struct vnode *vp, const char *str) 3343 { 3344 if (vp && IS_LOCKING_VFS(vp) && !VOP_ISLOCKED(vp, NULL)) { 3345 panic("%s: %p is not locked shared but should be", str, vp); 3346 } 3347 } 3348 3349 void 3350 assert_vop_unlocked(struct vnode *vp, const char *str) 3351 { 3352 if (vp && IS_LOCKING_VFS(vp)) { 3353 if (VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE) { 3354 panic("%s: %p is locked but should not be", str, vp); 3355 } 3356 } 3357 } 3358 3359 #endif 3360