1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 35 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $ 36 */ 37 38 /* 39 * External virtual filesystem routines 40 */ 41 #include "opt_ddb.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/buf.h> 46 #include <sys/conf.h> 47 #include <sys/dirent.h> 48 #include <sys/domain.h> 49 #include <sys/eventhandler.h> 50 #include <sys/fcntl.h> 51 #include <sys/file.h> 52 #include <sys/kernel.h> 53 #include <sys/kthread.h> 54 #include <sys/malloc.h> 55 #include <sys/mbuf.h> 56 #include <sys/mount.h> 57 #include <sys/priv.h> 58 #include <sys/proc.h> 59 #include <sys/reboot.h> 60 #include <sys/socket.h> 61 #include <sys/stat.h> 62 #include <sys/sysctl.h> 63 #include <sys/syslog.h> 64 #include <sys/unistd.h> 65 #include <sys/vmmeter.h> 66 #include <sys/vnode.h> 67 68 #include <machine/limits.h> 69 70 #include <vm/vm.h> 71 #include <vm/vm_object.h> 72 #include <vm/vm_extern.h> 73 #include <vm/vm_kern.h> 74 #include <vm/pmap.h> 75 #include <vm/vm_map.h> 76 #include <vm/vm_page.h> 77 #include <vm/vm_pager.h> 78 #include <vm/vnode_pager.h> 79 #include <vm/vm_zone.h> 80 81 #include <sys/buf2.h> 82 #include <sys/thread2.h> 83 #include <sys/sysref2.h> 84 #include <sys/mplock2.h> 85 86 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 87 88 int numvnodes; 89 SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 90 "Number of vnodes allocated"); 91 int verbose_reclaims; 92 SYSCTL_INT(_debug, OID_AUTO, verbose_reclaims, CTLFLAG_RD, &verbose_reclaims, 0, 93 "Output filename of reclaimed vnode(s)"); 94 95 enum vtype iftovt_tab[16] = { 96 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 97 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 98 }; 99 int vttoif_tab[9] = { 100 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 101 S_IFSOCK, S_IFIFO, S_IFMT, 102 }; 103 104 static int reassignbufcalls; 105 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 106 0, "Number of times buffers have been reassigned to the proper list"); 107 108 static int check_buf_overlap = 2; /* invasive check */ 109 SYSCTL_INT(_vfs, OID_AUTO, check_buf_overlap, CTLFLAG_RW, &check_buf_overlap, 110 0, "Enable overlapping buffer checks"); 111 112 int nfs_mount_type = -1; 113 static struct lwkt_token spechash_token; 114 struct nfs_public nfs_pub; /* publicly exported FS */ 115 116 int desiredvnodes; 117 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 118 &desiredvnodes, 0, "Maximum number of vnodes"); 119 120 static void vfs_free_addrlist (struct netexport *nep); 121 static int vfs_free_netcred (struct radix_node *rn, void *w); 122 static int vfs_hang_addrlist (struct mount *mp, struct netexport *nep, 123 const struct export_args *argp); 124 125 /* 126 * Red black tree functions 127 */ 128 static int rb_buf_compare(struct buf *b1, struct buf *b2); 129 RB_GENERATE2(buf_rb_tree, buf, b_rbnode, rb_buf_compare, off_t, b_loffset); 130 RB_GENERATE2(buf_rb_hash, buf, b_rbhash, rb_buf_compare, off_t, b_loffset); 131 132 static int 133 rb_buf_compare(struct buf *b1, struct buf *b2) 134 { 135 if (b1->b_loffset < b2->b_loffset) 136 return(-1); 137 if (b1->b_loffset > b2->b_loffset) 138 return(1); 139 return(0); 140 } 141 142 /* 143 * Returns non-zero if the vnode is a candidate for lazy msyncing. 144 * 145 * NOTE: v_object is not stable (this scan can race), however the 146 * mntvnodescan code holds vmobj_token so any VM object we 147 * do find will remain stable storage. 148 */ 149 static __inline int 150 vshouldmsync(struct vnode *vp) 151 { 152 vm_object_t object; 153 154 if (vp->v_auxrefs != 0 || VREFCNT(vp) > 0) 155 return (0); /* other holders */ 156 object = vp->v_object; 157 cpu_ccfence(); 158 if (object && (object->ref_count || object->resident_page_count)) 159 return(0); 160 return (1); 161 } 162 163 /* 164 * Initialize the vnode management data structures. 165 * 166 * Called from vfsinit() 167 */ 168 void 169 vfs_subr_init(void) 170 { 171 int factor1; 172 int factor2; 173 174 /* 175 * Desiredvnodes is kern.maxvnodes. We want to scale it 176 * according to available system memory but we may also have 177 * to limit it based on available KVM, which is capped on 32 bit 178 * systems, to ~80K vnodes or so. 179 * 180 * WARNING! For machines with 64-256M of ram we have to be sure 181 * that the default limit scales down well due to HAMMER 182 * taking up significantly more memory per-vnode vs UFS. 183 * We want around ~5800 on a 128M machine. 184 */ 185 factor1 = 20 * (sizeof(struct vm_object) + sizeof(struct vnode)); 186 factor2 = 25 * (sizeof(struct vm_object) + sizeof(struct vnode)); 187 desiredvnodes = 188 imin((int64_t)vmstats.v_page_count * PAGE_SIZE / factor1, 189 KvaSize / factor2); 190 desiredvnodes = imax(desiredvnodes, maxproc * 8); 191 192 lwkt_token_init(&spechash_token, "spechash"); 193 } 194 195 /* 196 * Knob to control the precision of file timestamps: 197 * 198 * 0 = seconds only; nanoseconds zeroed. 199 * 1 = seconds and nanoseconds, accurate within 1/HZ. 200 * 2 = seconds and nanoseconds, truncated to microseconds. 201 * >=3 = seconds and nanoseconds, maximum precision. 202 */ 203 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 204 205 static int timestamp_precision = TSP_SEC; 206 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 207 ×tamp_precision, 0, "Precision of file timestamps"); 208 209 /* 210 * Get a current timestamp. 211 * 212 * MPSAFE 213 */ 214 void 215 vfs_timestamp(struct timespec *tsp) 216 { 217 struct timeval tv; 218 219 switch (timestamp_precision) { 220 case TSP_SEC: 221 tsp->tv_sec = time_second; 222 tsp->tv_nsec = 0; 223 break; 224 case TSP_HZ: 225 getnanotime(tsp); 226 break; 227 case TSP_USEC: 228 microtime(&tv); 229 TIMEVAL_TO_TIMESPEC(&tv, tsp); 230 break; 231 case TSP_NSEC: 232 default: 233 nanotime(tsp); 234 break; 235 } 236 } 237 238 /* 239 * Set vnode attributes to VNOVAL 240 */ 241 void 242 vattr_null(struct vattr *vap) 243 { 244 vap->va_type = VNON; 245 vap->va_size = VNOVAL; 246 vap->va_bytes = VNOVAL; 247 vap->va_mode = VNOVAL; 248 vap->va_nlink = VNOVAL; 249 vap->va_uid = VNOVAL; 250 vap->va_gid = VNOVAL; 251 vap->va_fsid = VNOVAL; 252 vap->va_fileid = VNOVAL; 253 vap->va_blocksize = VNOVAL; 254 vap->va_rmajor = VNOVAL; 255 vap->va_rminor = VNOVAL; 256 vap->va_atime.tv_sec = VNOVAL; 257 vap->va_atime.tv_nsec = VNOVAL; 258 vap->va_mtime.tv_sec = VNOVAL; 259 vap->va_mtime.tv_nsec = VNOVAL; 260 vap->va_ctime.tv_sec = VNOVAL; 261 vap->va_ctime.tv_nsec = VNOVAL; 262 vap->va_flags = VNOVAL; 263 vap->va_gen = VNOVAL; 264 vap->va_vaflags = 0; 265 /* va_*_uuid fields are only valid if related flags are set */ 266 } 267 268 /* 269 * Flush out and invalidate all buffers associated with a vnode. 270 * 271 * vp must be locked. 272 */ 273 static int vinvalbuf_bp(struct buf *bp, void *data); 274 275 struct vinvalbuf_bp_info { 276 struct vnode *vp; 277 int slptimeo; 278 int lkflags; 279 int flags; 280 int clean; 281 }; 282 283 int 284 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 285 { 286 struct vinvalbuf_bp_info info; 287 vm_object_t object; 288 int error; 289 290 lwkt_gettoken(&vp->v_token); 291 292 /* 293 * If we are being asked to save, call fsync to ensure that the inode 294 * is updated. 295 */ 296 if (flags & V_SAVE) { 297 error = bio_track_wait(&vp->v_track_write, slpflag, slptimeo); 298 if (error) 299 goto done; 300 if (!RB_EMPTY(&vp->v_rbdirty_tree)) { 301 if ((error = VOP_FSYNC(vp, MNT_WAIT, 0)) != 0) 302 goto done; 303 #if 0 304 /* 305 * Dirty bufs may be left or generated via races 306 * in circumstances where vinvalbuf() is called on 307 * a vnode not undergoing reclamation. Only 308 * panic if we are trying to reclaim the vnode. 309 */ 310 if ((vp->v_flag & VRECLAIMED) && 311 (bio_track_active(&vp->v_track_write) || 312 !RB_EMPTY(&vp->v_rbdirty_tree))) { 313 panic("vinvalbuf: dirty bufs"); 314 } 315 #endif 316 } 317 } 318 info.slptimeo = slptimeo; 319 info.lkflags = LK_EXCLUSIVE | LK_SLEEPFAIL; 320 if (slpflag & PCATCH) 321 info.lkflags |= LK_PCATCH; 322 info.flags = flags; 323 info.vp = vp; 324 325 /* 326 * Flush the buffer cache until nothing is left, wait for all I/O 327 * to complete. At least one pass is required. We might block 328 * in the pip code so we have to re-check. Order is important. 329 */ 330 do { 331 /* 332 * Flush buffer cache 333 */ 334 if (!RB_EMPTY(&vp->v_rbclean_tree)) { 335 info.clean = 1; 336 error = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 337 NULL, vinvalbuf_bp, &info); 338 } 339 if (!RB_EMPTY(&vp->v_rbdirty_tree)) { 340 info.clean = 0; 341 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 342 NULL, vinvalbuf_bp, &info); 343 } 344 345 /* 346 * Wait for I/O completion. 347 */ 348 bio_track_wait(&vp->v_track_write, 0, 0); 349 if ((object = vp->v_object) != NULL) 350 refcount_wait(&object->paging_in_progress, "vnvlbx"); 351 } while (bio_track_active(&vp->v_track_write) || 352 !RB_EMPTY(&vp->v_rbclean_tree) || 353 !RB_EMPTY(&vp->v_rbdirty_tree)); 354 355 /* 356 * Destroy the copy in the VM cache, too. 357 */ 358 if ((object = vp->v_object) != NULL) { 359 vm_object_page_remove(object, 0, 0, 360 (flags & V_SAVE) ? TRUE : FALSE); 361 } 362 363 if (!RB_EMPTY(&vp->v_rbdirty_tree) || !RB_EMPTY(&vp->v_rbclean_tree)) 364 panic("vinvalbuf: flush failed"); 365 if (!RB_EMPTY(&vp->v_rbhash_tree)) 366 panic("vinvalbuf: flush failed, buffers still present"); 367 error = 0; 368 done: 369 lwkt_reltoken(&vp->v_token); 370 return (error); 371 } 372 373 static int 374 vinvalbuf_bp(struct buf *bp, void *data) 375 { 376 struct vinvalbuf_bp_info *info = data; 377 int error; 378 379 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 380 atomic_add_int(&bp->b_refs, 1); 381 error = BUF_TIMELOCK(bp, info->lkflags, 382 "vinvalbuf", info->slptimeo); 383 atomic_subtract_int(&bp->b_refs, 1); 384 if (error == 0) { 385 BUF_UNLOCK(bp); 386 error = ENOLCK; 387 } 388 if (error == ENOLCK) 389 return(0); 390 return (-error); 391 } 392 KKASSERT(bp->b_vp == info->vp); 393 394 /* 395 * Must check clean/dirty status after successfully locking as 396 * it may race. 397 */ 398 if ((info->clean && (bp->b_flags & B_DELWRI)) || 399 (info->clean == 0 && (bp->b_flags & B_DELWRI) == 0)) { 400 BUF_UNLOCK(bp); 401 return(0); 402 } 403 404 /* 405 * NOTE: NO B_LOCKED CHECK. Also no buf_checkwrite() 406 * check. This code will write out the buffer, period. 407 */ 408 bremfree(bp); 409 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 410 (info->flags & V_SAVE)) { 411 cluster_awrite(bp); 412 } else if (info->flags & V_SAVE) { 413 /* 414 * Cannot set B_NOCACHE on a clean buffer as this will 415 * destroy the VM backing store which might actually 416 * be dirty (and unsynchronized). 417 */ 418 bp->b_flags |= (B_INVAL | B_RELBUF); 419 brelse(bp); 420 } else { 421 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF); 422 brelse(bp); 423 } 424 return(0); 425 } 426 427 /* 428 * Truncate a file's buffer and pages to a specified length. This 429 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 430 * sync activity. 431 * 432 * The vnode must be locked. 433 */ 434 static int vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data); 435 static int vtruncbuf_bp_trunc(struct buf *bp, void *data); 436 static int vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data); 437 static int vtruncbuf_bp_metasync(struct buf *bp, void *data); 438 439 struct vtruncbuf_info { 440 struct vnode *vp; 441 off_t truncloffset; 442 int clean; 443 }; 444 445 int 446 vtruncbuf(struct vnode *vp, off_t length, int blksize) 447 { 448 struct vtruncbuf_info info; 449 const char *filename; 450 int count; 451 452 /* 453 * Round up to the *next* block, then destroy the buffers in question. 454 * Since we are only removing some of the buffers we must rely on the 455 * scan count to determine whether a loop is necessary. 456 */ 457 if ((count = (int)(length % blksize)) != 0) 458 info.truncloffset = length + (blksize - count); 459 else 460 info.truncloffset = length; 461 info.vp = vp; 462 463 lwkt_gettoken(&vp->v_token); 464 do { 465 info.clean = 1; 466 count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 467 vtruncbuf_bp_trunc_cmp, 468 vtruncbuf_bp_trunc, &info); 469 info.clean = 0; 470 count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 471 vtruncbuf_bp_trunc_cmp, 472 vtruncbuf_bp_trunc, &info); 473 } while(count); 474 475 /* 476 * For safety, fsync any remaining metadata if the file is not being 477 * truncated to 0. Since the metadata does not represent the entire 478 * dirty list we have to rely on the hit count to ensure that we get 479 * all of it. 480 */ 481 if (length > 0) { 482 do { 483 count = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 484 vtruncbuf_bp_metasync_cmp, 485 vtruncbuf_bp_metasync, &info); 486 } while (count); 487 } 488 489 /* 490 * Clean out any left over VM backing store. 491 * 492 * It is possible to have in-progress I/O from buffers that were 493 * not part of the truncation. This should not happen if we 494 * are truncating to 0-length. 495 */ 496 vnode_pager_setsize(vp, length); 497 bio_track_wait(&vp->v_track_write, 0, 0); 498 499 /* 500 * Debugging only 501 */ 502 spin_lock(&vp->v_spin); 503 filename = TAILQ_FIRST(&vp->v_namecache) ? 504 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"; 505 spin_unlock(&vp->v_spin); 506 507 /* 508 * Make sure no buffers were instantiated while we were trying 509 * to clean out the remaining VM pages. This could occur due 510 * to busy dirty VM pages being flushed out to disk. 511 */ 512 do { 513 info.clean = 1; 514 count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 515 vtruncbuf_bp_trunc_cmp, 516 vtruncbuf_bp_trunc, &info); 517 info.clean = 0; 518 count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 519 vtruncbuf_bp_trunc_cmp, 520 vtruncbuf_bp_trunc, &info); 521 if (count) { 522 kprintf("Warning: vtruncbuf(): Had to re-clean %d " 523 "left over buffers in %s\n", count, filename); 524 } 525 } while(count); 526 527 lwkt_reltoken(&vp->v_token); 528 529 return (0); 530 } 531 532 /* 533 * The callback buffer is beyond the new file EOF and must be destroyed. 534 * Note that the compare function must conform to the RB_SCAN's requirements. 535 */ 536 static 537 int 538 vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data) 539 { 540 struct vtruncbuf_info *info = data; 541 542 if (bp->b_loffset >= info->truncloffset) 543 return(0); 544 return(-1); 545 } 546 547 static 548 int 549 vtruncbuf_bp_trunc(struct buf *bp, void *data) 550 { 551 struct vtruncbuf_info *info = data; 552 553 /* 554 * Do not try to use a buffer we cannot immediately lock, but sleep 555 * anyway to prevent a livelock. The code will loop until all buffers 556 * can be acted upon. 557 * 558 * We must always revalidate the buffer after locking it to deal 559 * with MP races. 560 */ 561 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 562 atomic_add_int(&bp->b_refs, 1); 563 if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0) 564 BUF_UNLOCK(bp); 565 atomic_subtract_int(&bp->b_refs, 1); 566 } else if ((info->clean && (bp->b_flags & B_DELWRI)) || 567 (info->clean == 0 && (bp->b_flags & B_DELWRI) == 0) || 568 bp->b_vp != info->vp || 569 vtruncbuf_bp_trunc_cmp(bp, data)) { 570 BUF_UNLOCK(bp); 571 } else { 572 bremfree(bp); 573 bp->b_flags |= (B_INVAL | B_RELBUF | B_NOCACHE); 574 brelse(bp); 575 } 576 return(1); 577 } 578 579 /* 580 * Fsync all meta-data after truncating a file to be non-zero. Only metadata 581 * blocks (with a negative loffset) are scanned. 582 * Note that the compare function must conform to the RB_SCAN's requirements. 583 */ 584 static int 585 vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data __unused) 586 { 587 if (bp->b_loffset < 0) 588 return(0); 589 return(1); 590 } 591 592 static int 593 vtruncbuf_bp_metasync(struct buf *bp, void *data) 594 { 595 struct vtruncbuf_info *info = data; 596 597 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 598 atomic_add_int(&bp->b_refs, 1); 599 if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0) 600 BUF_UNLOCK(bp); 601 atomic_subtract_int(&bp->b_refs, 1); 602 } else if ((bp->b_flags & B_DELWRI) == 0 || 603 bp->b_vp != info->vp || 604 vtruncbuf_bp_metasync_cmp(bp, data)) { 605 BUF_UNLOCK(bp); 606 } else { 607 bremfree(bp); 608 if (bp->b_vp == info->vp) 609 bawrite(bp); 610 else 611 bwrite(bp); 612 } 613 return(1); 614 } 615 616 /* 617 * vfsync - implements a multipass fsync on a file which understands 618 * dependancies and meta-data. The passed vnode must be locked. The 619 * waitfor argument may be MNT_WAIT or MNT_NOWAIT, or MNT_LAZY. 620 * 621 * When fsyncing data asynchronously just do one consolidated pass starting 622 * with the most negative block number. This may not get all the data due 623 * to dependancies. 624 * 625 * When fsyncing data synchronously do a data pass, then a metadata pass, 626 * then do additional data+metadata passes to try to get all the data out. 627 */ 628 static int vfsync_wait_output(struct vnode *vp, 629 int (*waitoutput)(struct vnode *, struct thread *)); 630 static int vfsync_dummy_cmp(struct buf *bp __unused, void *data __unused); 631 static int vfsync_data_only_cmp(struct buf *bp, void *data); 632 static int vfsync_meta_only_cmp(struct buf *bp, void *data); 633 static int vfsync_lazy_range_cmp(struct buf *bp, void *data); 634 static int vfsync_bp(struct buf *bp, void *data); 635 636 struct vfsync_info { 637 struct vnode *vp; 638 int synchronous; 639 int syncdeps; 640 int lazycount; 641 int lazylimit; 642 int skippedbufs; 643 int (*checkdef)(struct buf *); 644 int (*cmpfunc)(struct buf *, void *); 645 }; 646 647 int 648 vfsync(struct vnode *vp, int waitfor, int passes, 649 int (*checkdef)(struct buf *), 650 int (*waitoutput)(struct vnode *, struct thread *)) 651 { 652 struct vfsync_info info; 653 int error; 654 655 bzero(&info, sizeof(info)); 656 info.vp = vp; 657 if ((info.checkdef = checkdef) == NULL) 658 info.syncdeps = 1; 659 660 lwkt_gettoken(&vp->v_token); 661 662 switch(waitfor) { 663 case MNT_LAZY | MNT_NOWAIT: 664 case MNT_LAZY: 665 /* 666 * Lazy (filesystem syncer typ) Asynchronous plus limit the 667 * number of data (not meta) pages we try to flush to 1MB. 668 * A non-zero return means that lazy limit was reached. 669 */ 670 info.lazylimit = 1024 * 1024; 671 info.syncdeps = 1; 672 info.cmpfunc = vfsync_lazy_range_cmp; 673 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 674 vfsync_lazy_range_cmp, vfsync_bp, &info); 675 info.cmpfunc = vfsync_meta_only_cmp; 676 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 677 vfsync_meta_only_cmp, vfsync_bp, &info); 678 if (error == 0) 679 vp->v_lazyw = 0; 680 else if (!RB_EMPTY(&vp->v_rbdirty_tree)) 681 vn_syncer_add(vp, 1); 682 error = 0; 683 break; 684 case MNT_NOWAIT: 685 /* 686 * Asynchronous. Do a data-only pass and a meta-only pass. 687 */ 688 info.syncdeps = 1; 689 info.cmpfunc = vfsync_data_only_cmp; 690 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp, 691 vfsync_bp, &info); 692 info.cmpfunc = vfsync_meta_only_cmp; 693 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_meta_only_cmp, 694 vfsync_bp, &info); 695 error = 0; 696 break; 697 default: 698 /* 699 * Synchronous. Do a data-only pass, then a meta-data+data 700 * pass, then additional integrated passes to try to get 701 * all the dependancies flushed. 702 */ 703 info.cmpfunc = vfsync_data_only_cmp; 704 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp, 705 vfsync_bp, &info); 706 error = vfsync_wait_output(vp, waitoutput); 707 if (error == 0) { 708 info.skippedbufs = 0; 709 info.cmpfunc = vfsync_dummy_cmp; 710 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 711 vfsync_bp, &info); 712 error = vfsync_wait_output(vp, waitoutput); 713 if (info.skippedbufs) { 714 kprintf("Warning: vfsync skipped %d dirty " 715 "bufs in pass2!\n", info.skippedbufs); 716 } 717 } 718 while (error == 0 && passes > 0 && 719 !RB_EMPTY(&vp->v_rbdirty_tree) 720 ) { 721 if (--passes == 0) { 722 info.synchronous = 1; 723 info.syncdeps = 1; 724 } 725 info.cmpfunc = vfsync_dummy_cmp; 726 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 727 vfsync_bp, &info); 728 if (error < 0) 729 error = -error; 730 info.syncdeps = 1; 731 if (error == 0) 732 error = vfsync_wait_output(vp, waitoutput); 733 } 734 break; 735 } 736 lwkt_reltoken(&vp->v_token); 737 return(error); 738 } 739 740 static int 741 vfsync_wait_output(struct vnode *vp, 742 int (*waitoutput)(struct vnode *, struct thread *)) 743 { 744 int error; 745 746 error = bio_track_wait(&vp->v_track_write, 0, 0); 747 if (waitoutput) 748 error = waitoutput(vp, curthread); 749 return(error); 750 } 751 752 static int 753 vfsync_dummy_cmp(struct buf *bp __unused, void *data __unused) 754 { 755 return(0); 756 } 757 758 static int 759 vfsync_data_only_cmp(struct buf *bp, void *data) 760 { 761 if (bp->b_loffset < 0) 762 return(-1); 763 return(0); 764 } 765 766 static int 767 vfsync_meta_only_cmp(struct buf *bp, void *data) 768 { 769 if (bp->b_loffset < 0) 770 return(0); 771 return(1); 772 } 773 774 static int 775 vfsync_lazy_range_cmp(struct buf *bp, void *data) 776 { 777 struct vfsync_info *info = data; 778 779 if (bp->b_loffset < info->vp->v_lazyw) 780 return(-1); 781 return(0); 782 } 783 784 static int 785 vfsync_bp(struct buf *bp, void *data) 786 { 787 struct vfsync_info *info = data; 788 struct vnode *vp = info->vp; 789 int error; 790 791 /* 792 * Ignore buffers that we cannot immediately lock. 793 */ 794 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 795 ++info->skippedbufs; 796 return(0); 797 } 798 799 /* 800 * We must revalidate the buffer after locking. 801 */ 802 if ((bp->b_flags & B_DELWRI) == 0 || 803 bp->b_vp != info->vp || 804 info->cmpfunc(bp, data)) { 805 BUF_UNLOCK(bp); 806 return(0); 807 } 808 809 /* 810 * If syncdeps is not set we do not try to write buffers which have 811 * dependancies. 812 */ 813 if (!info->synchronous && info->syncdeps == 0 && info->checkdef(bp)) { 814 BUF_UNLOCK(bp); 815 return(0); 816 } 817 818 /* 819 * B_NEEDCOMMIT (primarily used by NFS) is a state where the buffer 820 * has been written but an additional handshake with the device 821 * is required before we can dispose of the buffer. We have no idea 822 * how to do this so we have to skip these buffers. 823 */ 824 if (bp->b_flags & B_NEEDCOMMIT) { 825 BUF_UNLOCK(bp); 826 return(0); 827 } 828 829 /* 830 * Ask bioops if it is ok to sync. If not the VFS may have 831 * set B_LOCKED so we have to cycle the buffer. 832 */ 833 if (LIST_FIRST(&bp->b_dep) != NULL && buf_checkwrite(bp)) { 834 bremfree(bp); 835 brelse(bp); 836 return(0); 837 } 838 839 if (info->synchronous) { 840 /* 841 * Synchronous flushing. An error may be returned. 842 */ 843 bremfree(bp); 844 error = bwrite(bp); 845 } else { 846 /* 847 * Asynchronous flushing. A negative return value simply 848 * stops the scan and is not considered an error. We use 849 * this to support limited MNT_LAZY flushes. 850 */ 851 vp->v_lazyw = bp->b_loffset; 852 bremfree(bp); 853 info->lazycount += cluster_awrite(bp); 854 waitrunningbufspace(); 855 vm_wait_nominal(); 856 if (info->lazylimit && info->lazycount >= info->lazylimit) 857 error = 1; 858 else 859 error = 0; 860 } 861 return(-error); 862 } 863 864 /* 865 * Associate a buffer with a vnode. 866 * 867 * MPSAFE 868 */ 869 int 870 bgetvp(struct vnode *vp, struct buf *bp, int testsize) 871 { 872 KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); 873 KKASSERT((bp->b_flags & (B_HASHED|B_DELWRI|B_VNCLEAN|B_VNDIRTY)) == 0); 874 875 /* 876 * Insert onto list for new vnode. 877 */ 878 lwkt_gettoken(&vp->v_token); 879 880 if (buf_rb_hash_RB_INSERT(&vp->v_rbhash_tree, bp)) { 881 lwkt_reltoken(&vp->v_token); 882 return (EEXIST); 883 } 884 885 /* 886 * Diagnostics (mainly for HAMMER debugging). Check for 887 * overlapping buffers. 888 */ 889 if (check_buf_overlap) { 890 struct buf *bx; 891 bx = buf_rb_hash_RB_PREV(bp); 892 if (bx) { 893 if (bx->b_loffset + bx->b_bufsize > bp->b_loffset) { 894 kprintf("bgetvp: overlapl %016jx/%d %016jx " 895 "bx %p bp %p\n", 896 (intmax_t)bx->b_loffset, 897 bx->b_bufsize, 898 (intmax_t)bp->b_loffset, 899 bx, bp); 900 if (check_buf_overlap > 1) 901 panic("bgetvp - overlapping buffer"); 902 } 903 } 904 bx = buf_rb_hash_RB_NEXT(bp); 905 if (bx) { 906 if (bp->b_loffset + testsize > bx->b_loffset) { 907 kprintf("bgetvp: overlapr %016jx/%d %016jx " 908 "bp %p bx %p\n", 909 (intmax_t)bp->b_loffset, 910 testsize, 911 (intmax_t)bx->b_loffset, 912 bp, bx); 913 if (check_buf_overlap > 1) 914 panic("bgetvp - overlapping buffer"); 915 } 916 } 917 } 918 bp->b_vp = vp; 919 bp->b_flags |= B_HASHED; 920 bp->b_flags |= B_VNCLEAN; 921 if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) 922 panic("reassignbuf: dup lblk/clean vp %p bp %p", vp, bp); 923 /*vhold(vp);*/ 924 lwkt_reltoken(&vp->v_token); 925 return(0); 926 } 927 928 /* 929 * Disassociate a buffer from a vnode. 930 * 931 * MPSAFE 932 */ 933 void 934 brelvp(struct buf *bp) 935 { 936 struct vnode *vp; 937 938 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 939 940 /* 941 * Delete from old vnode list, if on one. 942 */ 943 vp = bp->b_vp; 944 lwkt_gettoken(&vp->v_token); 945 if (bp->b_flags & (B_VNDIRTY | B_VNCLEAN)) { 946 if (bp->b_flags & B_VNDIRTY) 947 buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp); 948 else 949 buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp); 950 bp->b_flags &= ~(B_VNDIRTY | B_VNCLEAN); 951 } 952 if (bp->b_flags & B_HASHED) { 953 buf_rb_hash_RB_REMOVE(&vp->v_rbhash_tree, bp); 954 bp->b_flags &= ~B_HASHED; 955 } 956 957 /* 958 * Only remove from synclist when no dirty buffers are left AND 959 * the VFS has not flagged the vnode's inode as being dirty. 960 */ 961 if ((vp->v_flag & (VONWORKLST | VISDIRTY | VOBJDIRTY)) == VONWORKLST && 962 RB_EMPTY(&vp->v_rbdirty_tree)) { 963 vn_syncer_remove(vp); 964 } 965 bp->b_vp = NULL; 966 967 lwkt_reltoken(&vp->v_token); 968 969 /*vdrop(vp);*/ 970 } 971 972 /* 973 * Reassign the buffer to the proper clean/dirty list based on B_DELWRI. 974 * This routine is called when the state of the B_DELWRI bit is changed. 975 * 976 * Must be called with vp->v_token held. 977 * MPSAFE 978 */ 979 void 980 reassignbuf(struct buf *bp) 981 { 982 struct vnode *vp = bp->b_vp; 983 int delay; 984 985 ASSERT_LWKT_TOKEN_HELD(&vp->v_token); 986 ++reassignbufcalls; 987 988 /* 989 * B_PAGING flagged buffers cannot be reassigned because their vp 990 * is not fully linked in. 991 */ 992 if (bp->b_flags & B_PAGING) 993 panic("cannot reassign paging buffer"); 994 995 if (bp->b_flags & B_DELWRI) { 996 /* 997 * Move to the dirty list, add the vnode to the worklist 998 */ 999 if (bp->b_flags & B_VNCLEAN) { 1000 buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp); 1001 bp->b_flags &= ~B_VNCLEAN; 1002 } 1003 if ((bp->b_flags & B_VNDIRTY) == 0) { 1004 if (buf_rb_tree_RB_INSERT(&vp->v_rbdirty_tree, bp)) { 1005 panic("reassignbuf: dup lblk vp %p bp %p", 1006 vp, bp); 1007 } 1008 bp->b_flags |= B_VNDIRTY; 1009 } 1010 if ((vp->v_flag & VONWORKLST) == 0) { 1011 switch (vp->v_type) { 1012 case VDIR: 1013 delay = dirdelay; 1014 break; 1015 case VCHR: 1016 case VBLK: 1017 if (vp->v_rdev && 1018 vp->v_rdev->si_mountpoint != NULL) { 1019 delay = metadelay; 1020 break; 1021 } 1022 /* fall through */ 1023 default: 1024 delay = filedelay; 1025 } 1026 vn_syncer_add(vp, delay); 1027 } 1028 } else { 1029 /* 1030 * Move to the clean list, remove the vnode from the worklist 1031 * if no dirty blocks remain. 1032 */ 1033 if (bp->b_flags & B_VNDIRTY) { 1034 buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp); 1035 bp->b_flags &= ~B_VNDIRTY; 1036 } 1037 if ((bp->b_flags & B_VNCLEAN) == 0) { 1038 if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) { 1039 panic("reassignbuf: dup lblk vp %p bp %p", 1040 vp, bp); 1041 } 1042 bp->b_flags |= B_VNCLEAN; 1043 } 1044 1045 /* 1046 * Only remove from synclist when no dirty buffers are left 1047 * AND the VFS has not flagged the vnode's inode as being 1048 * dirty. 1049 */ 1050 if ((vp->v_flag & (VONWORKLST | VISDIRTY | VOBJDIRTY)) == 1051 VONWORKLST && 1052 RB_EMPTY(&vp->v_rbdirty_tree)) { 1053 vn_syncer_remove(vp); 1054 } 1055 } 1056 } 1057 1058 /* 1059 * Create a vnode for a block device. Used for mounting the root file 1060 * system. 1061 * 1062 * A vref()'d vnode is returned. 1063 */ 1064 extern struct vop_ops *devfs_vnode_dev_vops_p; 1065 int 1066 bdevvp(cdev_t dev, struct vnode **vpp) 1067 { 1068 struct vnode *vp; 1069 struct vnode *nvp; 1070 int error; 1071 1072 if (dev == NULL) { 1073 *vpp = NULLVP; 1074 return (ENXIO); 1075 } 1076 error = getspecialvnode(VT_NON, NULL, &devfs_vnode_dev_vops_p, 1077 &nvp, 0, 0); 1078 if (error) { 1079 *vpp = NULLVP; 1080 return (error); 1081 } 1082 vp = nvp; 1083 vp->v_type = VCHR; 1084 #if 0 1085 vp->v_rdev = dev; 1086 #endif 1087 v_associate_rdev(vp, dev); 1088 vp->v_umajor = dev->si_umajor; 1089 vp->v_uminor = dev->si_uminor; 1090 vx_unlock(vp); 1091 *vpp = vp; 1092 return (0); 1093 } 1094 1095 int 1096 v_associate_rdev(struct vnode *vp, cdev_t dev) 1097 { 1098 if (dev == NULL) 1099 return(ENXIO); 1100 if (dev_is_good(dev) == 0) 1101 return(ENXIO); 1102 KKASSERT(vp->v_rdev == NULL); 1103 vp->v_rdev = reference_dev(dev); 1104 lwkt_gettoken(&spechash_token); 1105 SLIST_INSERT_HEAD(&dev->si_hlist, vp, v_cdevnext); 1106 lwkt_reltoken(&spechash_token); 1107 return(0); 1108 } 1109 1110 void 1111 v_release_rdev(struct vnode *vp) 1112 { 1113 cdev_t dev; 1114 1115 if ((dev = vp->v_rdev) != NULL) { 1116 lwkt_gettoken(&spechash_token); 1117 SLIST_REMOVE(&dev->si_hlist, vp, vnode, v_cdevnext); 1118 vp->v_rdev = NULL; 1119 release_dev(dev); 1120 lwkt_reltoken(&spechash_token); 1121 } 1122 } 1123 1124 /* 1125 * Add a vnode to the alias list hung off the cdev_t. We only associate 1126 * the device number with the vnode. The actual device is not associated 1127 * until the vnode is opened (usually in spec_open()), and will be 1128 * disassociated on last close. 1129 */ 1130 void 1131 addaliasu(struct vnode *nvp, int x, int y) 1132 { 1133 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 1134 panic("addaliasu on non-special vnode"); 1135 nvp->v_umajor = x; 1136 nvp->v_uminor = y; 1137 } 1138 1139 /* 1140 * Simple call that a filesystem can make to try to get rid of a 1141 * vnode. It will fail if anyone is referencing the vnode (including 1142 * the caller). 1143 * 1144 * The filesystem can check whether its in-memory inode structure still 1145 * references the vp on return. 1146 * 1147 * May only be called if the vnode is in a known state (i.e. being prevented 1148 * from being deallocated by some other condition such as a vfs inode hold). 1149 */ 1150 void 1151 vclean_unlocked(struct vnode *vp) 1152 { 1153 vx_get(vp); 1154 if (VREFCNT(vp) <= 0) 1155 vgone_vxlocked(vp); 1156 vx_put(vp); 1157 } 1158 1159 /* 1160 * Disassociate a vnode from its underlying filesystem. 1161 * 1162 * The vnode must be VX locked and referenced. In all normal situations 1163 * there are no active references. If vclean_vxlocked() is called while 1164 * there are active references, the vnode is being ripped out and we have 1165 * to call VOP_CLOSE() as appropriate before we can reclaim it. 1166 */ 1167 void 1168 vclean_vxlocked(struct vnode *vp, int flags) 1169 { 1170 int active; 1171 int n; 1172 vm_object_t object; 1173 struct namecache *ncp; 1174 1175 /* 1176 * If the vnode has already been reclaimed we have nothing to do. 1177 */ 1178 if (vp->v_flag & VRECLAIMED) 1179 return; 1180 1181 /* 1182 * Set flag to interlock operation, flag finalization to ensure 1183 * that the vnode winds up on the inactive list, and set v_act to 0. 1184 */ 1185 vsetflags(vp, VRECLAIMED); 1186 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE); 1187 vp->v_act = 0; 1188 1189 if (verbose_reclaims) { 1190 if ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL) 1191 kprintf("Debug: reclaim %p %s\n", vp, ncp->nc_name); 1192 } 1193 1194 /* 1195 * Scrap the vfs cache 1196 */ 1197 while (cache_inval_vp(vp, 0) != 0) { 1198 kprintf("Warning: vnode %p clean/cache_resolution " 1199 "race detected\n", vp); 1200 tsleep(vp, 0, "vclninv", 2); 1201 } 1202 1203 /* 1204 * Check to see if the vnode is in use. If so we have to reference it 1205 * before we clean it out so that its count cannot fall to zero and 1206 * generate a race against ourselves to recycle it. 1207 */ 1208 active = (VREFCNT(vp) > 0); 1209 1210 /* 1211 * Clean out any buffers associated with the vnode and destroy its 1212 * object, if it has one. 1213 */ 1214 vinvalbuf(vp, V_SAVE, 0, 0); 1215 KKASSERT(lockcountnb(&vp->v_lock) == 1); 1216 1217 /* 1218 * If purging an active vnode (typically during a forced unmount 1219 * or reboot), it must be closed and deactivated before being 1220 * reclaimed. This isn't really all that safe, but what can 1221 * we do? XXX. 1222 * 1223 * Note that neither of these routines unlocks the vnode. 1224 */ 1225 if (active && (flags & DOCLOSE)) { 1226 while ((n = vp->v_opencount) != 0) { 1227 if (vp->v_writecount) 1228 VOP_CLOSE(vp, FWRITE|FNONBLOCK); 1229 else 1230 VOP_CLOSE(vp, FNONBLOCK); 1231 if (vp->v_opencount == n) { 1232 kprintf("Warning: unable to force-close" 1233 " vnode %p\n", vp); 1234 break; 1235 } 1236 } 1237 } 1238 1239 /* 1240 * If the vnode has not been deactivated, deactivated it. Deactivation 1241 * can create new buffers and VM pages so we have to call vinvalbuf() 1242 * again to make sure they all get flushed. 1243 * 1244 * This can occur if a file with a link count of 0 needs to be 1245 * truncated. 1246 * 1247 * If the vnode is already dead don't try to deactivate it. 1248 */ 1249 if ((vp->v_flag & VINACTIVE) == 0) { 1250 vsetflags(vp, VINACTIVE); 1251 if (vp->v_mount) 1252 VOP_INACTIVE(vp); 1253 vinvalbuf(vp, V_SAVE, 0, 0); 1254 } 1255 KKASSERT(lockcountnb(&vp->v_lock) == 1); 1256 1257 /* 1258 * If the vnode has an object, destroy it. 1259 */ 1260 while ((object = vp->v_object) != NULL) { 1261 vm_object_hold(object); 1262 if (object == vp->v_object) 1263 break; 1264 vm_object_drop(object); 1265 } 1266 1267 if (object != NULL) { 1268 if (object->ref_count == 0) { 1269 if ((object->flags & OBJ_DEAD) == 0) 1270 vm_object_terminate(object); 1271 vm_object_drop(object); 1272 vclrflags(vp, VOBJBUF); 1273 } else { 1274 vm_pager_deallocate(object); 1275 vclrflags(vp, VOBJBUF); 1276 vm_object_drop(object); 1277 } 1278 } 1279 KKASSERT((vp->v_flag & VOBJBUF) == 0); 1280 1281 /* 1282 * Reclaim the vnode if not already dead. 1283 */ 1284 if (vp->v_mount && VOP_RECLAIM(vp)) 1285 panic("vclean: cannot reclaim"); 1286 1287 /* 1288 * Done with purge, notify sleepers of the grim news. 1289 */ 1290 vp->v_ops = &dead_vnode_vops_p; 1291 vn_gone(vp); 1292 vp->v_tag = VT_NON; 1293 1294 /* 1295 * If we are destroying an active vnode, reactivate it now that 1296 * we have reassociated it with deadfs. This prevents the system 1297 * from crashing on the vnode due to it being unexpectedly marked 1298 * as inactive or reclaimed. 1299 */ 1300 if (active && (flags & DOCLOSE)) { 1301 vclrflags(vp, VINACTIVE | VRECLAIMED); 1302 } 1303 } 1304 1305 /* 1306 * Eliminate all activity associated with the requested vnode 1307 * and with all vnodes aliased to the requested vnode. 1308 * 1309 * The vnode must be referenced but should not be locked. 1310 */ 1311 int 1312 vrevoke(struct vnode *vp, struct ucred *cred) 1313 { 1314 struct vnode *vq; 1315 struct vnode *vqn; 1316 cdev_t dev; 1317 int error; 1318 1319 /* 1320 * If the vnode has a device association, scrap all vnodes associated 1321 * with the device. Don't let the device disappear on us while we 1322 * are scrapping the vnodes. 1323 * 1324 * The passed vp will probably show up in the list, do not VX lock 1325 * it twice! 1326 * 1327 * Releasing the vnode's rdev here can mess up specfs's call to 1328 * device close, so don't do it. The vnode has been disassociated 1329 * and the device will be closed after the last ref on the related 1330 * fp goes away (if not still open by e.g. the kernel). 1331 */ 1332 if (vp->v_type != VCHR) { 1333 error = fdrevoke(vp, DTYPE_VNODE, cred); 1334 return (error); 1335 } 1336 if ((dev = vp->v_rdev) == NULL) { 1337 return(0); 1338 } 1339 reference_dev(dev); 1340 lwkt_gettoken(&spechash_token); 1341 1342 restart: 1343 vqn = SLIST_FIRST(&dev->si_hlist); 1344 if (vqn) 1345 vhold(vqn); 1346 while ((vq = vqn) != NULL) { 1347 if (VREFCNT(vq) > 0) { 1348 vref(vq); 1349 fdrevoke(vq, DTYPE_VNODE, cred); 1350 /*v_release_rdev(vq);*/ 1351 vrele(vq); 1352 if (vq->v_rdev != dev) { 1353 vdrop(vq); 1354 goto restart; 1355 } 1356 } 1357 vqn = SLIST_NEXT(vq, v_cdevnext); 1358 if (vqn) 1359 vhold(vqn); 1360 vdrop(vq); 1361 } 1362 lwkt_reltoken(&spechash_token); 1363 dev_drevoke(dev); 1364 release_dev(dev); 1365 return (0); 1366 } 1367 1368 /* 1369 * This is called when the object underlying a vnode is being destroyed, 1370 * such as in a remove(). Try to recycle the vnode immediately if the 1371 * only active reference is our reference. 1372 * 1373 * Directory vnodes in the namecache with children cannot be immediately 1374 * recycled because numerous VOP_N*() ops require them to be stable. 1375 * 1376 * To avoid recursive recycling from VOP_INACTIVE implemenetations this 1377 * function is a NOP if VRECLAIMED is already set. 1378 */ 1379 int 1380 vrecycle(struct vnode *vp) 1381 { 1382 if (VREFCNT(vp) <= 1 && (vp->v_flag & VRECLAIMED) == 0) { 1383 if (cache_inval_vp_nonblock(vp)) 1384 return(0); 1385 vgone_vxlocked(vp); 1386 return (1); 1387 } 1388 return (0); 1389 } 1390 1391 /* 1392 * Return the maximum I/O size allowed for strategy calls on VP. 1393 * 1394 * If vp is VCHR or VBLK we dive the device, otherwise we use 1395 * the vp's mount info. 1396 * 1397 * The returned value is clamped at MAXPHYS as most callers cannot use 1398 * buffers larger than that size. 1399 */ 1400 int 1401 vmaxiosize(struct vnode *vp) 1402 { 1403 int maxiosize; 1404 1405 if (vp->v_type == VBLK || vp->v_type == VCHR) 1406 maxiosize = vp->v_rdev->si_iosize_max; 1407 else 1408 maxiosize = vp->v_mount->mnt_iosize_max; 1409 1410 if (maxiosize > MAXPHYS) 1411 maxiosize = MAXPHYS; 1412 return (maxiosize); 1413 } 1414 1415 /* 1416 * Eliminate all activity associated with a vnode in preparation for 1417 * destruction. 1418 * 1419 * The vnode must be VX locked and refd and will remain VX locked and refd 1420 * on return. This routine may be called with the vnode in any state, as 1421 * long as it is VX locked. The vnode will be cleaned out and marked 1422 * VRECLAIMED but will not actually be reused until all existing refs and 1423 * holds go away. 1424 * 1425 * NOTE: This routine may be called on a vnode which has not yet been 1426 * already been deactivated (VOP_INACTIVE), or on a vnode which has 1427 * already been reclaimed. 1428 * 1429 * This routine is not responsible for placing us back on the freelist. 1430 * Instead, it happens automatically when the caller releases the VX lock 1431 * (assuming there aren't any other references). 1432 */ 1433 void 1434 vgone_vxlocked(struct vnode *vp) 1435 { 1436 /* 1437 * assert that the VX lock is held. This is an absolute requirement 1438 * now for vgone_vxlocked() to be called. 1439 */ 1440 KKASSERT(lockcountnb(&vp->v_lock) == 1); 1441 1442 /* 1443 * Clean out the filesystem specific data and set the VRECLAIMED 1444 * bit. Also deactivate the vnode if necessary. 1445 */ 1446 vclean_vxlocked(vp, DOCLOSE); 1447 1448 /* 1449 * Delete from old mount point vnode list, if on one. 1450 */ 1451 if (vp->v_mount != NULL) { 1452 KKASSERT(vp->v_data == NULL); 1453 insmntque(vp, NULL); 1454 } 1455 1456 /* 1457 * If special device, remove it from special device alias list 1458 * if it is on one. This should normally only occur if a vnode is 1459 * being revoked as the device should otherwise have been released 1460 * naturally. 1461 */ 1462 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_rdev != NULL) { 1463 v_release_rdev(vp); 1464 } 1465 1466 /* 1467 * Set us to VBAD 1468 */ 1469 vp->v_type = VBAD; 1470 } 1471 1472 /* 1473 * Lookup a vnode by device number. 1474 * 1475 * Returns non-zero and *vpp set to a vref'd vnode on success. 1476 * Returns zero on failure. 1477 */ 1478 int 1479 vfinddev(cdev_t dev, enum vtype type, struct vnode **vpp) 1480 { 1481 struct vnode *vp; 1482 1483 lwkt_gettoken(&spechash_token); 1484 SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) { 1485 if (type == vp->v_type) { 1486 *vpp = vp; 1487 vref(vp); 1488 lwkt_reltoken(&spechash_token); 1489 return (1); 1490 } 1491 } 1492 lwkt_reltoken(&spechash_token); 1493 return (0); 1494 } 1495 1496 /* 1497 * Calculate the total number of references to a special device. This 1498 * routine may only be called for VBLK and VCHR vnodes since v_rdev is 1499 * an overloaded field. Since udev2dev can now return NULL, we have 1500 * to check for a NULL v_rdev. 1501 */ 1502 int 1503 count_dev(cdev_t dev) 1504 { 1505 struct vnode *vp; 1506 int count = 0; 1507 1508 if (SLIST_FIRST(&dev->si_hlist)) { 1509 lwkt_gettoken(&spechash_token); 1510 SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) { 1511 count += vp->v_opencount; 1512 } 1513 lwkt_reltoken(&spechash_token); 1514 } 1515 return(count); 1516 } 1517 1518 int 1519 vcount(struct vnode *vp) 1520 { 1521 if (vp->v_rdev == NULL) 1522 return(0); 1523 return(count_dev(vp->v_rdev)); 1524 } 1525 1526 /* 1527 * Initialize VMIO for a vnode. This routine MUST be called before a 1528 * VFS can issue buffer cache ops on a vnode. It is typically called 1529 * when a vnode is initialized from its inode. 1530 */ 1531 int 1532 vinitvmio(struct vnode *vp, off_t filesize, int blksize, int boff) 1533 { 1534 vm_object_t object; 1535 int error = 0; 1536 1537 retry: 1538 while ((object = vp->v_object) != NULL) { 1539 vm_object_hold(object); 1540 if (object == vp->v_object) 1541 break; 1542 vm_object_drop(object); 1543 } 1544 1545 if (object == NULL) { 1546 object = vnode_pager_alloc(vp, filesize, 0, 0, blksize, boff); 1547 1548 /* 1549 * Dereference the reference we just created. This assumes 1550 * that the object is associated with the vp. 1551 */ 1552 vm_object_hold(object); 1553 atomic_add_int(&object->ref_count, -1); 1554 vrele(vp); 1555 } else { 1556 if (object->flags & OBJ_DEAD) { 1557 vn_unlock(vp); 1558 if (vp->v_object == object) 1559 vm_object_dead_sleep(object, "vodead"); 1560 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1561 vm_object_drop(object); 1562 goto retry; 1563 } 1564 } 1565 KASSERT(vp->v_object != NULL, ("vinitvmio: NULL object")); 1566 vsetflags(vp, VOBJBUF); 1567 vm_object_drop(object); 1568 1569 return (error); 1570 } 1571 1572 1573 /* 1574 * Print out a description of a vnode. 1575 */ 1576 static char *typename[] = 1577 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 1578 1579 void 1580 vprint(char *label, struct vnode *vp) 1581 { 1582 char buf[96]; 1583 1584 if (label != NULL) 1585 kprintf("%s: %p: ", label, (void *)vp); 1586 else 1587 kprintf("%p: ", (void *)vp); 1588 kprintf("type %s, refcnt %08x, writecount %d, holdcnt %d,", 1589 typename[vp->v_type], 1590 vp->v_refcnt, vp->v_writecount, vp->v_auxrefs); 1591 buf[0] = '\0'; 1592 if (vp->v_flag & VROOT) 1593 strcat(buf, "|VROOT"); 1594 if (vp->v_flag & VPFSROOT) 1595 strcat(buf, "|VPFSROOT"); 1596 if (vp->v_flag & VTEXT) 1597 strcat(buf, "|VTEXT"); 1598 if (vp->v_flag & VSYSTEM) 1599 strcat(buf, "|VSYSTEM"); 1600 if (vp->v_flag & VOBJBUF) 1601 strcat(buf, "|VOBJBUF"); 1602 if (buf[0] != '\0') 1603 kprintf(" flags (%s)", &buf[1]); 1604 if (vp->v_data == NULL) { 1605 kprintf("\n"); 1606 } else { 1607 kprintf("\n\t"); 1608 VOP_PRINT(vp); 1609 } 1610 } 1611 1612 /* 1613 * Do the usual access checking. 1614 * file_mode, uid and gid are from the vnode in question, 1615 * while acc_mode and cred are from the VOP_ACCESS parameter list 1616 */ 1617 int 1618 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid, 1619 mode_t acc_mode, struct ucred *cred) 1620 { 1621 mode_t mask; 1622 int ismember; 1623 1624 /* 1625 * Super-user always gets read/write access, but execute access depends 1626 * on at least one execute bit being set. 1627 */ 1628 if (priv_check_cred(cred, PRIV_ROOT, 0) == 0) { 1629 if ((acc_mode & VEXEC) && type != VDIR && 1630 (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0) 1631 return (EACCES); 1632 return (0); 1633 } 1634 1635 mask = 0; 1636 1637 /* Otherwise, check the owner. */ 1638 if (cred->cr_uid == uid) { 1639 if (acc_mode & VEXEC) 1640 mask |= S_IXUSR; 1641 if (acc_mode & VREAD) 1642 mask |= S_IRUSR; 1643 if (acc_mode & VWRITE) 1644 mask |= S_IWUSR; 1645 return ((file_mode & mask) == mask ? 0 : EACCES); 1646 } 1647 1648 /* Otherwise, check the groups. */ 1649 ismember = groupmember(gid, cred); 1650 if (cred->cr_svgid == gid || ismember) { 1651 if (acc_mode & VEXEC) 1652 mask |= S_IXGRP; 1653 if (acc_mode & VREAD) 1654 mask |= S_IRGRP; 1655 if (acc_mode & VWRITE) 1656 mask |= S_IWGRP; 1657 return ((file_mode & mask) == mask ? 0 : EACCES); 1658 } 1659 1660 /* Otherwise, check everyone else. */ 1661 if (acc_mode & VEXEC) 1662 mask |= S_IXOTH; 1663 if (acc_mode & VREAD) 1664 mask |= S_IROTH; 1665 if (acc_mode & VWRITE) 1666 mask |= S_IWOTH; 1667 return ((file_mode & mask) == mask ? 0 : EACCES); 1668 } 1669 1670 #ifdef DDB 1671 #include <ddb/ddb.h> 1672 1673 static int db_show_locked_vnodes(struct mount *mp, void *data); 1674 1675 /* 1676 * List all of the locked vnodes in the system. 1677 * Called when debugging the kernel. 1678 */ 1679 DB_SHOW_COMMAND(lockedvnodes, lockedvnodes) 1680 { 1681 kprintf("Locked vnodes\n"); 1682 mountlist_scan(db_show_locked_vnodes, NULL, 1683 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 1684 } 1685 1686 static int 1687 db_show_locked_vnodes(struct mount *mp, void *data __unused) 1688 { 1689 struct vnode *vp; 1690 1691 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 1692 if (vn_islocked(vp)) 1693 vprint(NULL, vp); 1694 } 1695 return(0); 1696 } 1697 #endif 1698 1699 /* 1700 * Top level filesystem related information gathering. 1701 */ 1702 static int sysctl_ovfs_conf (SYSCTL_HANDLER_ARGS); 1703 1704 static int 1705 vfs_sysctl(SYSCTL_HANDLER_ARGS) 1706 { 1707 int *name = (int *)arg1 - 1; /* XXX */ 1708 u_int namelen = arg2 + 1; /* XXX */ 1709 struct vfsconf *vfsp; 1710 int maxtypenum; 1711 1712 #if 1 || defined(COMPAT_PRELITE2) 1713 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 1714 if (namelen == 1) 1715 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 1716 #endif 1717 1718 #ifdef notyet 1719 /* all sysctl names at this level are at least name and field */ 1720 if (namelen < 2) 1721 return (ENOTDIR); /* overloaded */ 1722 if (name[0] != VFS_GENERIC) { 1723 vfsp = vfsconf_find_by_typenum(name[0]); 1724 if (vfsp == NULL) 1725 return (EOPNOTSUPP); 1726 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 1727 oldp, oldlenp, newp, newlen, p)); 1728 } 1729 #endif 1730 switch (name[1]) { 1731 case VFS_MAXTYPENUM: 1732 if (namelen != 2) 1733 return (ENOTDIR); 1734 maxtypenum = vfsconf_get_maxtypenum(); 1735 return (SYSCTL_OUT(req, &maxtypenum, sizeof(maxtypenum))); 1736 case VFS_CONF: 1737 if (namelen != 3) 1738 return (ENOTDIR); /* overloaded */ 1739 vfsp = vfsconf_find_by_typenum(name[2]); 1740 if (vfsp == NULL) 1741 return (EOPNOTSUPP); 1742 return (SYSCTL_OUT(req, vfsp, sizeof *vfsp)); 1743 } 1744 return (EOPNOTSUPP); 1745 } 1746 1747 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl, 1748 "Generic filesystem"); 1749 1750 #if 1 || defined(COMPAT_PRELITE2) 1751 1752 static int 1753 sysctl_ovfs_conf_iter(struct vfsconf *vfsp, void *data) 1754 { 1755 int error; 1756 struct ovfsconf ovfs; 1757 struct sysctl_req *req = (struct sysctl_req*) data; 1758 1759 bzero(&ovfs, sizeof(ovfs)); 1760 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 1761 strcpy(ovfs.vfc_name, vfsp->vfc_name); 1762 ovfs.vfc_index = vfsp->vfc_typenum; 1763 ovfs.vfc_refcount = vfsp->vfc_refcount; 1764 ovfs.vfc_flags = vfsp->vfc_flags; 1765 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 1766 if (error) 1767 return error; /* abort iteration with error code */ 1768 else 1769 return 0; /* continue iterating with next element */ 1770 } 1771 1772 static int 1773 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 1774 { 1775 return vfsconf_each(sysctl_ovfs_conf_iter, (void*)req); 1776 } 1777 1778 #endif /* 1 || COMPAT_PRELITE2 */ 1779 1780 /* 1781 * Check to see if a filesystem is mounted on a block device. 1782 */ 1783 int 1784 vfs_mountedon(struct vnode *vp) 1785 { 1786 cdev_t dev; 1787 1788 if ((dev = vp->v_rdev) == NULL) { 1789 /* if (vp->v_type != VBLK) 1790 dev = get_dev(vp->v_uminor, vp->v_umajor); */ 1791 } 1792 if (dev != NULL && dev->si_mountpoint) 1793 return (EBUSY); 1794 return (0); 1795 } 1796 1797 /* 1798 * Unmount all filesystems. The list is traversed in reverse order 1799 * of mounting to avoid dependencies. 1800 */ 1801 1802 static int vfs_umountall_callback(struct mount *mp, void *data); 1803 1804 void 1805 vfs_unmountall(void) 1806 { 1807 int count; 1808 1809 do { 1810 count = mountlist_scan(vfs_umountall_callback, 1811 NULL, MNTSCAN_REVERSE|MNTSCAN_NOBUSY); 1812 } while (count); 1813 } 1814 1815 static 1816 int 1817 vfs_umountall_callback(struct mount *mp, void *data) 1818 { 1819 int error; 1820 1821 error = dounmount(mp, MNT_FORCE); 1822 if (error) { 1823 mountlist_remove(mp); 1824 kprintf("unmount of filesystem mounted from %s failed (", 1825 mp->mnt_stat.f_mntfromname); 1826 if (error == EBUSY) 1827 kprintf("BUSY)\n"); 1828 else 1829 kprintf("%d)\n", error); 1830 } 1831 return(1); 1832 } 1833 1834 /* 1835 * Checks the mount flags for parameter mp and put the names comma-separated 1836 * into a string buffer buf with a size limit specified by len. 1837 * 1838 * It returns the number of bytes written into buf, and (*errorp) will be 1839 * set to 0, EINVAL (if passed length is 0), or ENOSPC (supplied buffer was 1840 * not large enough). The buffer will be 0-terminated if len was not 0. 1841 */ 1842 size_t 1843 vfs_flagstostr(int flags, const struct mountctl_opt *optp, 1844 char *buf, size_t len, int *errorp) 1845 { 1846 static const struct mountctl_opt optnames[] = { 1847 { MNT_ASYNC, "asynchronous" }, 1848 { MNT_EXPORTED, "NFS exported" }, 1849 { MNT_LOCAL, "local" }, 1850 { MNT_NOATIME, "noatime" }, 1851 { MNT_NODEV, "nodev" }, 1852 { MNT_NOEXEC, "noexec" }, 1853 { MNT_NOSUID, "nosuid" }, 1854 { MNT_NOSYMFOLLOW, "nosymfollow" }, 1855 { MNT_QUOTA, "with-quotas" }, 1856 { MNT_RDONLY, "read-only" }, 1857 { MNT_SYNCHRONOUS, "synchronous" }, 1858 { MNT_UNION, "union" }, 1859 { MNT_NOCLUSTERR, "noclusterr" }, 1860 { MNT_NOCLUSTERW, "noclusterw" }, 1861 { MNT_SUIDDIR, "suiddir" }, 1862 { MNT_SOFTDEP, "soft-updates" }, 1863 { MNT_IGNORE, "ignore" }, 1864 { 0, NULL} 1865 }; 1866 int bwritten; 1867 int bleft; 1868 int optlen; 1869 int actsize; 1870 1871 *errorp = 0; 1872 bwritten = 0; 1873 bleft = len - 1; /* leave room for trailing \0 */ 1874 1875 /* 1876 * Checks the size of the string. If it contains 1877 * any data, then we will append the new flags to 1878 * it. 1879 */ 1880 actsize = strlen(buf); 1881 if (actsize > 0) 1882 buf += actsize; 1883 1884 /* Default flags if no flags passed */ 1885 if (optp == NULL) 1886 optp = optnames; 1887 1888 if (bleft < 0) { /* degenerate case, 0-length buffer */ 1889 *errorp = EINVAL; 1890 return(0); 1891 } 1892 1893 for (; flags && optp->o_opt; ++optp) { 1894 if ((flags & optp->o_opt) == 0) 1895 continue; 1896 optlen = strlen(optp->o_name); 1897 if (bwritten || actsize > 0) { 1898 if (bleft < 2) { 1899 *errorp = ENOSPC; 1900 break; 1901 } 1902 buf[bwritten++] = ','; 1903 buf[bwritten++] = ' '; 1904 bleft -= 2; 1905 } 1906 if (bleft < optlen) { 1907 *errorp = ENOSPC; 1908 break; 1909 } 1910 bcopy(optp->o_name, buf + bwritten, optlen); 1911 bwritten += optlen; 1912 bleft -= optlen; 1913 flags &= ~optp->o_opt; 1914 } 1915 1916 /* 1917 * Space already reserved for trailing \0 1918 */ 1919 buf[bwritten] = 0; 1920 return (bwritten); 1921 } 1922 1923 /* 1924 * Build hash lists of net addresses and hang them off the mount point. 1925 * Called by ufs_mount() to set up the lists of export addresses. 1926 */ 1927 static int 1928 vfs_hang_addrlist(struct mount *mp, struct netexport *nep, 1929 const struct export_args *argp) 1930 { 1931 struct netcred *np; 1932 struct radix_node_head *rnh; 1933 int i; 1934 struct radix_node *rn; 1935 struct sockaddr *saddr, *smask = NULL; 1936 struct domain *dom; 1937 int error; 1938 1939 if (argp->ex_addrlen == 0) { 1940 if (mp->mnt_flag & MNT_DEFEXPORTED) 1941 return (EPERM); 1942 np = &nep->ne_defexported; 1943 np->netc_exflags = argp->ex_flags; 1944 np->netc_anon = argp->ex_anon; 1945 np->netc_anon.cr_ref = 1; 1946 mp->mnt_flag |= MNT_DEFEXPORTED; 1947 return (0); 1948 } 1949 1950 if (argp->ex_addrlen < 0 || argp->ex_addrlen > MLEN) 1951 return (EINVAL); 1952 if (argp->ex_masklen < 0 || argp->ex_masklen > MLEN) 1953 return (EINVAL); 1954 1955 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 1956 np = (struct netcred *) kmalloc(i, M_NETADDR, M_WAITOK | M_ZERO); 1957 saddr = (struct sockaddr *) (np + 1); 1958 if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen))) 1959 goto out; 1960 if (saddr->sa_len > argp->ex_addrlen) 1961 saddr->sa_len = argp->ex_addrlen; 1962 if (argp->ex_masklen) { 1963 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 1964 error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen); 1965 if (error) 1966 goto out; 1967 if (smask->sa_len > argp->ex_masklen) 1968 smask->sa_len = argp->ex_masklen; 1969 } 1970 i = saddr->sa_family; 1971 if ((rnh = nep->ne_rtable[i]) == NULL) { 1972 /* 1973 * Seems silly to initialize every AF when most are not used, 1974 * do so on demand here 1975 */ 1976 SLIST_FOREACH(dom, &domains, dom_next) 1977 if (dom->dom_family == i && dom->dom_rtattach) { 1978 dom->dom_rtattach((void **) &nep->ne_rtable[i], 1979 dom->dom_rtoffset); 1980 break; 1981 } 1982 if ((rnh = nep->ne_rtable[i]) == NULL) { 1983 error = ENOBUFS; 1984 goto out; 1985 } 1986 } 1987 rn = (*rnh->rnh_addaddr) ((char *) saddr, (char *) smask, rnh, 1988 np->netc_rnodes); 1989 if (rn == NULL || np != (struct netcred *) rn) { /* already exists */ 1990 error = EPERM; 1991 goto out; 1992 } 1993 np->netc_exflags = argp->ex_flags; 1994 np->netc_anon = argp->ex_anon; 1995 np->netc_anon.cr_ref = 1; 1996 return (0); 1997 out: 1998 kfree(np, M_NETADDR); 1999 return (error); 2000 } 2001 2002 /* ARGSUSED */ 2003 static int 2004 vfs_free_netcred(struct radix_node *rn, void *w) 2005 { 2006 struct radix_node_head *rnh = (struct radix_node_head *) w; 2007 2008 (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh); 2009 kfree((caddr_t) rn, M_NETADDR); 2010 return (0); 2011 } 2012 2013 /* 2014 * Free the net address hash lists that are hanging off the mount points. 2015 */ 2016 static void 2017 vfs_free_addrlist(struct netexport *nep) 2018 { 2019 int i; 2020 struct radix_node_head *rnh; 2021 2022 for (i = 0; i <= AF_MAX; i++) 2023 if ((rnh = nep->ne_rtable[i])) { 2024 (*rnh->rnh_walktree) (rnh, vfs_free_netcred, 2025 (caddr_t) rnh); 2026 kfree((caddr_t) rnh, M_RTABLE); 2027 nep->ne_rtable[i] = 0; 2028 } 2029 } 2030 2031 int 2032 vfs_export(struct mount *mp, struct netexport *nep, 2033 const struct export_args *argp) 2034 { 2035 int error; 2036 2037 if (argp->ex_flags & MNT_DELEXPORT) { 2038 if (mp->mnt_flag & MNT_EXPUBLIC) { 2039 vfs_setpublicfs(NULL, NULL, NULL); 2040 mp->mnt_flag &= ~MNT_EXPUBLIC; 2041 } 2042 vfs_free_addrlist(nep); 2043 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 2044 } 2045 if (argp->ex_flags & MNT_EXPORTED) { 2046 if (argp->ex_flags & MNT_EXPUBLIC) { 2047 if ((error = vfs_setpublicfs(mp, nep, argp)) != 0) 2048 return (error); 2049 mp->mnt_flag |= MNT_EXPUBLIC; 2050 } 2051 if ((error = vfs_hang_addrlist(mp, nep, argp))) 2052 return (error); 2053 mp->mnt_flag |= MNT_EXPORTED; 2054 } 2055 return (0); 2056 } 2057 2058 2059 /* 2060 * Set the publicly exported filesystem (WebNFS). Currently, only 2061 * one public filesystem is possible in the spec (RFC 2054 and 2055) 2062 */ 2063 int 2064 vfs_setpublicfs(struct mount *mp, struct netexport *nep, 2065 const struct export_args *argp) 2066 { 2067 int error; 2068 struct vnode *rvp; 2069 char *cp; 2070 2071 /* 2072 * mp == NULL -> invalidate the current info, the FS is 2073 * no longer exported. May be called from either vfs_export 2074 * or unmount, so check if it hasn't already been done. 2075 */ 2076 if (mp == NULL) { 2077 if (nfs_pub.np_valid) { 2078 nfs_pub.np_valid = 0; 2079 if (nfs_pub.np_index != NULL) { 2080 kfree(nfs_pub.np_index, M_TEMP); 2081 nfs_pub.np_index = NULL; 2082 } 2083 } 2084 return (0); 2085 } 2086 2087 /* 2088 * Only one allowed at a time. 2089 */ 2090 if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount) 2091 return (EBUSY); 2092 2093 /* 2094 * Get real filehandle for root of exported FS. 2095 */ 2096 bzero((caddr_t)&nfs_pub.np_handle, sizeof(nfs_pub.np_handle)); 2097 nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid; 2098 2099 if ((error = VFS_ROOT(mp, &rvp))) 2100 return (error); 2101 2102 if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid))) 2103 return (error); 2104 2105 vput(rvp); 2106 2107 /* 2108 * If an indexfile was specified, pull it in. 2109 */ 2110 if (argp->ex_indexfile != NULL) { 2111 int namelen; 2112 2113 error = vn_get_namelen(rvp, &namelen); 2114 if (error) 2115 return (error); 2116 nfs_pub.np_index = kmalloc(namelen, M_TEMP, M_WAITOK); 2117 error = copyinstr(argp->ex_indexfile, nfs_pub.np_index, 2118 namelen, NULL); 2119 if (!error) { 2120 /* 2121 * Check for illegal filenames. 2122 */ 2123 for (cp = nfs_pub.np_index; *cp; cp++) { 2124 if (*cp == '/') { 2125 error = EINVAL; 2126 break; 2127 } 2128 } 2129 } 2130 if (error) { 2131 kfree(nfs_pub.np_index, M_TEMP); 2132 return (error); 2133 } 2134 } 2135 2136 nfs_pub.np_mount = mp; 2137 nfs_pub.np_valid = 1; 2138 return (0); 2139 } 2140 2141 struct netcred * 2142 vfs_export_lookup(struct mount *mp, struct netexport *nep, 2143 struct sockaddr *nam) 2144 { 2145 struct netcred *np; 2146 struct radix_node_head *rnh; 2147 struct sockaddr *saddr; 2148 2149 np = NULL; 2150 if (mp->mnt_flag & MNT_EXPORTED) { 2151 /* 2152 * Lookup in the export list first. 2153 */ 2154 if (nam != NULL) { 2155 saddr = nam; 2156 rnh = nep->ne_rtable[saddr->sa_family]; 2157 if (rnh != NULL) { 2158 np = (struct netcred *) 2159 (*rnh->rnh_matchaddr)((char *)saddr, 2160 rnh); 2161 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 2162 np = NULL; 2163 } 2164 } 2165 /* 2166 * If no address match, use the default if it exists. 2167 */ 2168 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 2169 np = &nep->ne_defexported; 2170 } 2171 return (np); 2172 } 2173 2174 /* 2175 * perform msync on all vnodes under a mount point. The mount point must 2176 * be locked. This code is also responsible for lazy-freeing unreferenced 2177 * vnodes whos VM objects no longer contain pages. 2178 * 2179 * NOTE: MNT_WAIT still skips vnodes in the VXLOCK state. 2180 * 2181 * NOTE: XXX VOP_PUTPAGES and friends requires that the vnode be locked, 2182 * but vnode_pager_putpages() doesn't lock the vnode. We have to do it 2183 * way up in this high level function. 2184 */ 2185 static int vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data); 2186 static int vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data); 2187 2188 void 2189 vfs_msync(struct mount *mp, int flags) 2190 { 2191 int vmsc_flags; 2192 2193 /* 2194 * tmpfs sets this flag to prevent msync(), sync, and the 2195 * filesystem periodic syncer from trying to flush VM pages 2196 * to swap. Only pure memory pressure flushes tmpfs VM pages 2197 * to swap. 2198 */ 2199 if (mp->mnt_kern_flag & MNTK_NOMSYNC) 2200 return; 2201 2202 /* 2203 * Ok, scan the vnodes for work. If the filesystem is using the 2204 * syncer thread feature we can use vsyncscan() instead of 2205 * vmntvnodescan(), which is much faster. 2206 */ 2207 vmsc_flags = VMSC_GETVP; 2208 if (flags != MNT_WAIT) 2209 vmsc_flags |= VMSC_NOWAIT; 2210 2211 if (mp->mnt_kern_flag & MNTK_THR_SYNC) { 2212 vsyncscan(mp, vmsc_flags, vfs_msync_scan2, 2213 (void *)(intptr_t)flags); 2214 } else { 2215 vmntvnodescan(mp, vmsc_flags, 2216 vfs_msync_scan1, vfs_msync_scan2, 2217 (void *)(intptr_t)flags); 2218 } 2219 } 2220 2221 /* 2222 * scan1 is a fast pre-check. There could be hundreds of thousands of 2223 * vnodes, we cannot afford to do anything heavy weight until we have a 2224 * fairly good indication that there is work to do. 2225 */ 2226 static 2227 int 2228 vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data) 2229 { 2230 int flags = (int)(intptr_t)data; 2231 2232 if ((vp->v_flag & VRECLAIMED) == 0) { 2233 if (vshouldmsync(vp)) 2234 return(0); /* call scan2 */ 2235 if ((mp->mnt_flag & MNT_RDONLY) == 0 && 2236 (vp->v_flag & VOBJDIRTY) && 2237 (flags == MNT_WAIT || vn_islocked(vp) == 0)) { 2238 return(0); /* call scan2 */ 2239 } 2240 } 2241 2242 /* 2243 * do not call scan2, continue the loop 2244 */ 2245 return(-1); 2246 } 2247 2248 /* 2249 * This callback is handed a locked vnode. 2250 */ 2251 static 2252 int 2253 vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data) 2254 { 2255 vm_object_t obj; 2256 int flags = (int)(intptr_t)data; 2257 2258 if (vp->v_flag & VRECLAIMED) 2259 return(0); 2260 2261 if ((mp->mnt_flag & MNT_RDONLY) == 0 && (vp->v_flag & VOBJDIRTY)) { 2262 if ((obj = vp->v_object) != NULL) { 2263 vm_object_page_clean(obj, 0, 0, 2264 flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC); 2265 } 2266 } 2267 return(0); 2268 } 2269 2270 /* 2271 * Wake up anyone interested in vp because it is being revoked. 2272 */ 2273 void 2274 vn_gone(struct vnode *vp) 2275 { 2276 lwkt_gettoken(&vp->v_token); 2277 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, NOTE_REVOKE); 2278 lwkt_reltoken(&vp->v_token); 2279 } 2280 2281 /* 2282 * extract the cdev_t from a VBLK or VCHR. The vnode must have been opened 2283 * (or v_rdev might be NULL). 2284 */ 2285 cdev_t 2286 vn_todev(struct vnode *vp) 2287 { 2288 if (vp->v_type != VBLK && vp->v_type != VCHR) 2289 return (NULL); 2290 KKASSERT(vp->v_rdev != NULL); 2291 return (vp->v_rdev); 2292 } 2293 2294 /* 2295 * Check if vnode represents a disk device. The vnode does not need to be 2296 * opened. 2297 * 2298 * MPALMOSTSAFE 2299 */ 2300 int 2301 vn_isdisk(struct vnode *vp, int *errp) 2302 { 2303 cdev_t dev; 2304 2305 if (vp->v_type != VCHR) { 2306 if (errp != NULL) 2307 *errp = ENOTBLK; 2308 return (0); 2309 } 2310 2311 dev = vp->v_rdev; 2312 2313 if (dev == NULL) { 2314 if (errp != NULL) 2315 *errp = ENXIO; 2316 return (0); 2317 } 2318 if (dev_is_good(dev) == 0) { 2319 if (errp != NULL) 2320 *errp = ENXIO; 2321 return (0); 2322 } 2323 if ((dev_dflags(dev) & D_DISK) == 0) { 2324 if (errp != NULL) 2325 *errp = ENOTBLK; 2326 return (0); 2327 } 2328 if (errp != NULL) 2329 *errp = 0; 2330 return (1); 2331 } 2332 2333 int 2334 vn_get_namelen(struct vnode *vp, int *namelen) 2335 { 2336 int error; 2337 register_t retval[2]; 2338 2339 error = VOP_PATHCONF(vp, _PC_NAME_MAX, retval); 2340 if (error) 2341 return (error); 2342 *namelen = (int)retval[0]; 2343 return (0); 2344 } 2345 2346 int 2347 vop_write_dirent(int *error, struct uio *uio, ino_t d_ino, uint8_t d_type, 2348 uint16_t d_namlen, const char *d_name) 2349 { 2350 struct dirent *dp; 2351 size_t len; 2352 2353 len = _DIRENT_RECLEN(d_namlen); 2354 if (len > uio->uio_resid) 2355 return(1); 2356 2357 dp = kmalloc(len, M_TEMP, M_WAITOK | M_ZERO); 2358 2359 dp->d_ino = d_ino; 2360 dp->d_namlen = d_namlen; 2361 dp->d_type = d_type; 2362 bcopy(d_name, dp->d_name, d_namlen); 2363 2364 *error = uiomove((caddr_t)dp, len, uio); 2365 2366 kfree(dp, M_TEMP); 2367 2368 return(0); 2369 } 2370 2371 void 2372 vn_mark_atime(struct vnode *vp, struct thread *td) 2373 { 2374 struct proc *p = td->td_proc; 2375 struct ucred *cred = p ? p->p_ucred : proc0.p_ucred; 2376 2377 if ((vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) { 2378 VOP_MARKATIME(vp, cred); 2379 } 2380 } 2381