1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 35 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $ 36 */ 37 38 /* 39 * External virtual filesystem routines 40 */ 41 #include "opt_ddb.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/buf.h> 46 #include <sys/conf.h> 47 #include <sys/dirent.h> 48 #include <sys/domain.h> 49 #include <sys/eventhandler.h> 50 #include <sys/fcntl.h> 51 #include <sys/file.h> 52 #include <sys/kernel.h> 53 #include <sys/kthread.h> 54 #include <sys/malloc.h> 55 #include <sys/mbuf.h> 56 #include <sys/mount.h> 57 #include <sys/priv.h> 58 #include <sys/proc.h> 59 #include <sys/reboot.h> 60 #include <sys/socket.h> 61 #include <sys/stat.h> 62 #include <sys/sysctl.h> 63 #include <sys/syslog.h> 64 #include <sys/unistd.h> 65 #include <sys/vmmeter.h> 66 #include <sys/vnode.h> 67 68 #include <machine/limits.h> 69 70 #include <vm/vm.h> 71 #include <vm/vm_object.h> 72 #include <vm/vm_extern.h> 73 #include <vm/vm_kern.h> 74 #include <vm/pmap.h> 75 #include <vm/vm_map.h> 76 #include <vm/vm_page.h> 77 #include <vm/vm_pager.h> 78 #include <vm/vnode_pager.h> 79 #include <vm/vm_zone.h> 80 81 #include <sys/buf2.h> 82 #include <sys/thread2.h> 83 #include <sys/sysref2.h> 84 #include <sys/mplock2.h> 85 86 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 87 88 int numvnodes; 89 SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 90 "Number of vnodes allocated"); 91 int verbose_reclaims; 92 SYSCTL_INT(_debug, OID_AUTO, verbose_reclaims, CTLFLAG_RD, &verbose_reclaims, 0, 93 "Output filename of reclaimed vnode(s)"); 94 95 enum vtype iftovt_tab[16] = { 96 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 97 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 98 }; 99 int vttoif_tab[9] = { 100 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 101 S_IFSOCK, S_IFIFO, S_IFMT, 102 }; 103 104 static int reassignbufcalls; 105 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 106 0, "Number of times buffers have been reassigned to the proper list"); 107 108 static int check_buf_overlap = 2; /* invasive check */ 109 SYSCTL_INT(_vfs, OID_AUTO, check_buf_overlap, CTLFLAG_RW, &check_buf_overlap, 110 0, "Enable overlapping buffer checks"); 111 112 int nfs_mount_type = -1; 113 static struct lwkt_token spechash_token; 114 struct nfs_public nfs_pub; /* publicly exported FS */ 115 116 int desiredvnodes; 117 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 118 &desiredvnodes, 0, "Maximum number of vnodes"); 119 120 static void vfs_free_addrlist (struct netexport *nep); 121 static int vfs_free_netcred (struct radix_node *rn, void *w); 122 static int vfs_hang_addrlist (struct mount *mp, struct netexport *nep, 123 const struct export_args *argp); 124 125 /* 126 * Red black tree functions 127 */ 128 static int rb_buf_compare(struct buf *b1, struct buf *b2); 129 RB_GENERATE2(buf_rb_tree, buf, b_rbnode, rb_buf_compare, off_t, b_loffset); 130 RB_GENERATE2(buf_rb_hash, buf, b_rbhash, rb_buf_compare, off_t, b_loffset); 131 132 static int 133 rb_buf_compare(struct buf *b1, struct buf *b2) 134 { 135 if (b1->b_loffset < b2->b_loffset) 136 return(-1); 137 if (b1->b_loffset > b2->b_loffset) 138 return(1); 139 return(0); 140 } 141 142 /* 143 * Initialize the vnode management data structures. 144 * 145 * Called from vfsinit() 146 */ 147 void 148 vfs_subr_init(void) 149 { 150 int factor1; 151 int factor2; 152 153 /* 154 * Desiredvnodes is kern.maxvnodes. We want to scale it 155 * according to available system memory but we may also have 156 * to limit it based on available KVM, which is capped on 32 bit 157 * systems, to ~80K vnodes or so. 158 * 159 * WARNING! For machines with 64-256M of ram we have to be sure 160 * that the default limit scales down well due to HAMMER 161 * taking up significantly more memory per-vnode vs UFS. 162 * We want around ~5800 on a 128M machine. 163 */ 164 factor1 = 20 * (sizeof(struct vm_object) + sizeof(struct vnode)); 165 factor2 = 25 * (sizeof(struct vm_object) + sizeof(struct vnode)); 166 desiredvnodes = 167 imin((int64_t)vmstats.v_page_count * PAGE_SIZE / factor1, 168 KvaSize / factor2); 169 desiredvnodes = imax(desiredvnodes, maxproc * 8); 170 171 lwkt_token_init(&spechash_token, "spechash"); 172 } 173 174 /* 175 * Knob to control the precision of file timestamps: 176 * 177 * 0 = seconds only; nanoseconds zeroed. 178 * 1 = seconds and nanoseconds, accurate within 1/HZ. 179 * 2 = seconds and nanoseconds, truncated to microseconds. 180 * >=3 = seconds and nanoseconds, maximum precision. 181 */ 182 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 183 184 static int timestamp_precision = TSP_SEC; 185 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 186 ×tamp_precision, 0, "Precision of file timestamps"); 187 188 /* 189 * Get a current timestamp. 190 * 191 * MPSAFE 192 */ 193 void 194 vfs_timestamp(struct timespec *tsp) 195 { 196 struct timeval tv; 197 198 switch (timestamp_precision) { 199 case TSP_SEC: 200 tsp->tv_sec = time_second; 201 tsp->tv_nsec = 0; 202 break; 203 case TSP_HZ: 204 getnanotime(tsp); 205 break; 206 case TSP_USEC: 207 microtime(&tv); 208 TIMEVAL_TO_TIMESPEC(&tv, tsp); 209 break; 210 case TSP_NSEC: 211 default: 212 nanotime(tsp); 213 break; 214 } 215 } 216 217 /* 218 * Set vnode attributes to VNOVAL 219 */ 220 void 221 vattr_null(struct vattr *vap) 222 { 223 vap->va_type = VNON; 224 vap->va_size = VNOVAL; 225 vap->va_bytes = VNOVAL; 226 vap->va_mode = VNOVAL; 227 vap->va_nlink = VNOVAL; 228 vap->va_uid = VNOVAL; 229 vap->va_gid = VNOVAL; 230 vap->va_fsid = VNOVAL; 231 vap->va_fileid = VNOVAL; 232 vap->va_blocksize = VNOVAL; 233 vap->va_rmajor = VNOVAL; 234 vap->va_rminor = VNOVAL; 235 vap->va_atime.tv_sec = VNOVAL; 236 vap->va_atime.tv_nsec = VNOVAL; 237 vap->va_mtime.tv_sec = VNOVAL; 238 vap->va_mtime.tv_nsec = VNOVAL; 239 vap->va_ctime.tv_sec = VNOVAL; 240 vap->va_ctime.tv_nsec = VNOVAL; 241 vap->va_flags = VNOVAL; 242 vap->va_gen = VNOVAL; 243 vap->va_vaflags = 0; 244 /* va_*_uuid fields are only valid if related flags are set */ 245 } 246 247 /* 248 * Flush out and invalidate all buffers associated with a vnode. 249 * 250 * vp must be locked. 251 */ 252 static int vinvalbuf_bp(struct buf *bp, void *data); 253 254 struct vinvalbuf_bp_info { 255 struct vnode *vp; 256 int slptimeo; 257 int lkflags; 258 int flags; 259 int clean; 260 }; 261 262 int 263 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 264 { 265 struct vinvalbuf_bp_info info; 266 vm_object_t object; 267 int error; 268 269 lwkt_gettoken(&vp->v_token); 270 271 /* 272 * If we are being asked to save, call fsync to ensure that the inode 273 * is updated. 274 */ 275 if (flags & V_SAVE) { 276 error = bio_track_wait(&vp->v_track_write, slpflag, slptimeo); 277 if (error) 278 goto done; 279 if (!RB_EMPTY(&vp->v_rbdirty_tree)) { 280 if ((error = VOP_FSYNC(vp, MNT_WAIT, 0)) != 0) 281 goto done; 282 #if 0 283 /* 284 * Dirty bufs may be left or generated via races 285 * in circumstances where vinvalbuf() is called on 286 * a vnode not undergoing reclamation. Only 287 * panic if we are trying to reclaim the vnode. 288 */ 289 if ((vp->v_flag & VRECLAIMED) && 290 (bio_track_active(&vp->v_track_write) || 291 !RB_EMPTY(&vp->v_rbdirty_tree))) { 292 panic("vinvalbuf: dirty bufs"); 293 } 294 #endif 295 } 296 } 297 info.slptimeo = slptimeo; 298 info.lkflags = LK_EXCLUSIVE | LK_SLEEPFAIL; 299 if (slpflag & PCATCH) 300 info.lkflags |= LK_PCATCH; 301 info.flags = flags; 302 info.vp = vp; 303 304 /* 305 * Flush the buffer cache until nothing is left, wait for all I/O 306 * to complete. At least one pass is required. We might block 307 * in the pip code so we have to re-check. Order is important. 308 */ 309 do { 310 /* 311 * Flush buffer cache 312 */ 313 if (!RB_EMPTY(&vp->v_rbclean_tree)) { 314 info.clean = 1; 315 error = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 316 NULL, vinvalbuf_bp, &info); 317 } 318 if (!RB_EMPTY(&vp->v_rbdirty_tree)) { 319 info.clean = 0; 320 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 321 NULL, vinvalbuf_bp, &info); 322 } 323 324 /* 325 * Wait for I/O completion. 326 */ 327 bio_track_wait(&vp->v_track_write, 0, 0); 328 if ((object = vp->v_object) != NULL) 329 refcount_wait(&object->paging_in_progress, "vnvlbx"); 330 } while (bio_track_active(&vp->v_track_write) || 331 !RB_EMPTY(&vp->v_rbclean_tree) || 332 !RB_EMPTY(&vp->v_rbdirty_tree)); 333 334 /* 335 * Destroy the copy in the VM cache, too. 336 */ 337 if ((object = vp->v_object) != NULL) { 338 vm_object_page_remove(object, 0, 0, 339 (flags & V_SAVE) ? TRUE : FALSE); 340 } 341 342 if (!RB_EMPTY(&vp->v_rbdirty_tree) || !RB_EMPTY(&vp->v_rbclean_tree)) 343 panic("vinvalbuf: flush failed"); 344 if (!RB_EMPTY(&vp->v_rbhash_tree)) 345 panic("vinvalbuf: flush failed, buffers still present"); 346 error = 0; 347 done: 348 lwkt_reltoken(&vp->v_token); 349 return (error); 350 } 351 352 static int 353 vinvalbuf_bp(struct buf *bp, void *data) 354 { 355 struct vinvalbuf_bp_info *info = data; 356 int error; 357 358 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 359 atomic_add_int(&bp->b_refs, 1); 360 error = BUF_TIMELOCK(bp, info->lkflags, 361 "vinvalbuf", info->slptimeo); 362 atomic_subtract_int(&bp->b_refs, 1); 363 if (error == 0) { 364 BUF_UNLOCK(bp); 365 error = ENOLCK; 366 } 367 if (error == ENOLCK) 368 return(0); 369 return (-error); 370 } 371 KKASSERT(bp->b_vp == info->vp); 372 373 /* 374 * Must check clean/dirty status after successfully locking as 375 * it may race. 376 */ 377 if ((info->clean && (bp->b_flags & B_DELWRI)) || 378 (info->clean == 0 && (bp->b_flags & B_DELWRI) == 0)) { 379 BUF_UNLOCK(bp); 380 return(0); 381 } 382 383 /* 384 * NOTE: NO B_LOCKED CHECK. Also no buf_checkwrite() 385 * check. This code will write out the buffer, period. 386 */ 387 bremfree(bp); 388 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 389 (info->flags & V_SAVE)) { 390 cluster_awrite(bp); 391 } else if (info->flags & V_SAVE) { 392 /* 393 * Cannot set B_NOCACHE on a clean buffer as this will 394 * destroy the VM backing store which might actually 395 * be dirty (and unsynchronized). 396 */ 397 bp->b_flags |= (B_INVAL | B_RELBUF); 398 brelse(bp); 399 } else { 400 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF); 401 brelse(bp); 402 } 403 return(0); 404 } 405 406 /* 407 * Truncate a file's buffer and pages to a specified length. This 408 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 409 * sync activity. 410 * 411 * The vnode must be locked. 412 */ 413 static int vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data); 414 static int vtruncbuf_bp_trunc(struct buf *bp, void *data); 415 static int vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data); 416 static int vtruncbuf_bp_metasync(struct buf *bp, void *data); 417 418 struct vtruncbuf_info { 419 struct vnode *vp; 420 off_t truncloffset; 421 int clean; 422 }; 423 424 int 425 vtruncbuf(struct vnode *vp, off_t length, int blksize) 426 { 427 struct vtruncbuf_info info; 428 const char *filename; 429 int count; 430 431 /* 432 * Round up to the *next* block, then destroy the buffers in question. 433 * Since we are only removing some of the buffers we must rely on the 434 * scan count to determine whether a loop is necessary. 435 */ 436 if ((count = (int)(length % blksize)) != 0) 437 info.truncloffset = length + (blksize - count); 438 else 439 info.truncloffset = length; 440 info.vp = vp; 441 442 lwkt_gettoken(&vp->v_token); 443 do { 444 info.clean = 1; 445 count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 446 vtruncbuf_bp_trunc_cmp, 447 vtruncbuf_bp_trunc, &info); 448 info.clean = 0; 449 count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 450 vtruncbuf_bp_trunc_cmp, 451 vtruncbuf_bp_trunc, &info); 452 } while(count); 453 454 /* 455 * For safety, fsync any remaining metadata if the file is not being 456 * truncated to 0. Since the metadata does not represent the entire 457 * dirty list we have to rely on the hit count to ensure that we get 458 * all of it. 459 */ 460 if (length > 0) { 461 do { 462 count = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 463 vtruncbuf_bp_metasync_cmp, 464 vtruncbuf_bp_metasync, &info); 465 } while (count); 466 } 467 468 /* 469 * Clean out any left over VM backing store. 470 * 471 * It is possible to have in-progress I/O from buffers that were 472 * not part of the truncation. This should not happen if we 473 * are truncating to 0-length. 474 */ 475 vnode_pager_setsize(vp, length); 476 bio_track_wait(&vp->v_track_write, 0, 0); 477 478 /* 479 * Debugging only 480 */ 481 spin_lock(&vp->v_spin); 482 filename = TAILQ_FIRST(&vp->v_namecache) ? 483 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"; 484 spin_unlock(&vp->v_spin); 485 486 /* 487 * Make sure no buffers were instantiated while we were trying 488 * to clean out the remaining VM pages. This could occur due 489 * to busy dirty VM pages being flushed out to disk. 490 */ 491 do { 492 info.clean = 1; 493 count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 494 vtruncbuf_bp_trunc_cmp, 495 vtruncbuf_bp_trunc, &info); 496 info.clean = 0; 497 count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 498 vtruncbuf_bp_trunc_cmp, 499 vtruncbuf_bp_trunc, &info); 500 if (count) { 501 kprintf("Warning: vtruncbuf(): Had to re-clean %d " 502 "left over buffers in %s\n", count, filename); 503 } 504 } while(count); 505 506 lwkt_reltoken(&vp->v_token); 507 508 return (0); 509 } 510 511 /* 512 * The callback buffer is beyond the new file EOF and must be destroyed. 513 * Note that the compare function must conform to the RB_SCAN's requirements. 514 */ 515 static 516 int 517 vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data) 518 { 519 struct vtruncbuf_info *info = data; 520 521 if (bp->b_loffset >= info->truncloffset) 522 return(0); 523 return(-1); 524 } 525 526 static 527 int 528 vtruncbuf_bp_trunc(struct buf *bp, void *data) 529 { 530 struct vtruncbuf_info *info = data; 531 532 /* 533 * Do not try to use a buffer we cannot immediately lock, but sleep 534 * anyway to prevent a livelock. The code will loop until all buffers 535 * can be acted upon. 536 * 537 * We must always revalidate the buffer after locking it to deal 538 * with MP races. 539 */ 540 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 541 atomic_add_int(&bp->b_refs, 1); 542 if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0) 543 BUF_UNLOCK(bp); 544 atomic_subtract_int(&bp->b_refs, 1); 545 } else if ((info->clean && (bp->b_flags & B_DELWRI)) || 546 (info->clean == 0 && (bp->b_flags & B_DELWRI) == 0) || 547 bp->b_vp != info->vp || 548 vtruncbuf_bp_trunc_cmp(bp, data)) { 549 BUF_UNLOCK(bp); 550 } else { 551 bremfree(bp); 552 bp->b_flags |= (B_INVAL | B_RELBUF | B_NOCACHE); 553 brelse(bp); 554 } 555 return(1); 556 } 557 558 /* 559 * Fsync all meta-data after truncating a file to be non-zero. Only metadata 560 * blocks (with a negative loffset) are scanned. 561 * Note that the compare function must conform to the RB_SCAN's requirements. 562 */ 563 static int 564 vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data __unused) 565 { 566 if (bp->b_loffset < 0) 567 return(0); 568 return(1); 569 } 570 571 static int 572 vtruncbuf_bp_metasync(struct buf *bp, void *data) 573 { 574 struct vtruncbuf_info *info = data; 575 576 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 577 atomic_add_int(&bp->b_refs, 1); 578 if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0) 579 BUF_UNLOCK(bp); 580 atomic_subtract_int(&bp->b_refs, 1); 581 } else if ((bp->b_flags & B_DELWRI) == 0 || 582 bp->b_vp != info->vp || 583 vtruncbuf_bp_metasync_cmp(bp, data)) { 584 BUF_UNLOCK(bp); 585 } else { 586 bremfree(bp); 587 if (bp->b_vp == info->vp) 588 bawrite(bp); 589 else 590 bwrite(bp); 591 } 592 return(1); 593 } 594 595 /* 596 * vfsync - implements a multipass fsync on a file which understands 597 * dependancies and meta-data. The passed vnode must be locked. The 598 * waitfor argument may be MNT_WAIT or MNT_NOWAIT, or MNT_LAZY. 599 * 600 * When fsyncing data asynchronously just do one consolidated pass starting 601 * with the most negative block number. This may not get all the data due 602 * to dependancies. 603 * 604 * When fsyncing data synchronously do a data pass, then a metadata pass, 605 * then do additional data+metadata passes to try to get all the data out. 606 */ 607 static int vfsync_wait_output(struct vnode *vp, 608 int (*waitoutput)(struct vnode *, struct thread *)); 609 static int vfsync_dummy_cmp(struct buf *bp __unused, void *data __unused); 610 static int vfsync_data_only_cmp(struct buf *bp, void *data); 611 static int vfsync_meta_only_cmp(struct buf *bp, void *data); 612 static int vfsync_lazy_range_cmp(struct buf *bp, void *data); 613 static int vfsync_bp(struct buf *bp, void *data); 614 615 struct vfsync_info { 616 struct vnode *vp; 617 int synchronous; 618 int syncdeps; 619 int lazycount; 620 int lazylimit; 621 int skippedbufs; 622 int (*checkdef)(struct buf *); 623 int (*cmpfunc)(struct buf *, void *); 624 }; 625 626 int 627 vfsync(struct vnode *vp, int waitfor, int passes, 628 int (*checkdef)(struct buf *), 629 int (*waitoutput)(struct vnode *, struct thread *)) 630 { 631 struct vfsync_info info; 632 int error; 633 634 bzero(&info, sizeof(info)); 635 info.vp = vp; 636 if ((info.checkdef = checkdef) == NULL) 637 info.syncdeps = 1; 638 639 lwkt_gettoken(&vp->v_token); 640 641 switch(waitfor) { 642 case MNT_LAZY | MNT_NOWAIT: 643 case MNT_LAZY: 644 /* 645 * Lazy (filesystem syncer typ) Asynchronous plus limit the 646 * number of data (not meta) pages we try to flush to 1MB. 647 * A non-zero return means that lazy limit was reached. 648 */ 649 info.lazylimit = 1024 * 1024; 650 info.syncdeps = 1; 651 info.cmpfunc = vfsync_lazy_range_cmp; 652 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 653 vfsync_lazy_range_cmp, vfsync_bp, &info); 654 info.cmpfunc = vfsync_meta_only_cmp; 655 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 656 vfsync_meta_only_cmp, vfsync_bp, &info); 657 if (error == 0) 658 vp->v_lazyw = 0; 659 else if (!RB_EMPTY(&vp->v_rbdirty_tree)) 660 vn_syncer_add(vp, 1); 661 error = 0; 662 break; 663 case MNT_NOWAIT: 664 /* 665 * Asynchronous. Do a data-only pass and a meta-only pass. 666 */ 667 info.syncdeps = 1; 668 info.cmpfunc = vfsync_data_only_cmp; 669 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp, 670 vfsync_bp, &info); 671 info.cmpfunc = vfsync_meta_only_cmp; 672 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_meta_only_cmp, 673 vfsync_bp, &info); 674 error = 0; 675 break; 676 default: 677 /* 678 * Synchronous. Do a data-only pass, then a meta-data+data 679 * pass, then additional integrated passes to try to get 680 * all the dependancies flushed. 681 */ 682 info.cmpfunc = vfsync_data_only_cmp; 683 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp, 684 vfsync_bp, &info); 685 error = vfsync_wait_output(vp, waitoutput); 686 if (error == 0) { 687 info.skippedbufs = 0; 688 info.cmpfunc = vfsync_dummy_cmp; 689 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 690 vfsync_bp, &info); 691 error = vfsync_wait_output(vp, waitoutput); 692 if (info.skippedbufs) { 693 kprintf("Warning: vfsync skipped %d dirty " 694 "bufs in pass2!\n", info.skippedbufs); 695 } 696 } 697 while (error == 0 && passes > 0 && 698 !RB_EMPTY(&vp->v_rbdirty_tree) 699 ) { 700 if (--passes == 0) { 701 info.synchronous = 1; 702 info.syncdeps = 1; 703 } 704 info.cmpfunc = vfsync_dummy_cmp; 705 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 706 vfsync_bp, &info); 707 if (error < 0) 708 error = -error; 709 info.syncdeps = 1; 710 if (error == 0) 711 error = vfsync_wait_output(vp, waitoutput); 712 } 713 break; 714 } 715 lwkt_reltoken(&vp->v_token); 716 return(error); 717 } 718 719 static int 720 vfsync_wait_output(struct vnode *vp, 721 int (*waitoutput)(struct vnode *, struct thread *)) 722 { 723 int error; 724 725 error = bio_track_wait(&vp->v_track_write, 0, 0); 726 if (waitoutput) 727 error = waitoutput(vp, curthread); 728 return(error); 729 } 730 731 static int 732 vfsync_dummy_cmp(struct buf *bp __unused, void *data __unused) 733 { 734 return(0); 735 } 736 737 static int 738 vfsync_data_only_cmp(struct buf *bp, void *data) 739 { 740 if (bp->b_loffset < 0) 741 return(-1); 742 return(0); 743 } 744 745 static int 746 vfsync_meta_only_cmp(struct buf *bp, void *data) 747 { 748 if (bp->b_loffset < 0) 749 return(0); 750 return(1); 751 } 752 753 static int 754 vfsync_lazy_range_cmp(struct buf *bp, void *data) 755 { 756 struct vfsync_info *info = data; 757 758 if (bp->b_loffset < info->vp->v_lazyw) 759 return(-1); 760 return(0); 761 } 762 763 static int 764 vfsync_bp(struct buf *bp, void *data) 765 { 766 struct vfsync_info *info = data; 767 struct vnode *vp = info->vp; 768 int error; 769 770 /* 771 * Ignore buffers that we cannot immediately lock. 772 */ 773 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 774 ++info->skippedbufs; 775 return(0); 776 } 777 778 /* 779 * We must revalidate the buffer after locking. 780 */ 781 if ((bp->b_flags & B_DELWRI) == 0 || 782 bp->b_vp != info->vp || 783 info->cmpfunc(bp, data)) { 784 BUF_UNLOCK(bp); 785 return(0); 786 } 787 788 /* 789 * If syncdeps is not set we do not try to write buffers which have 790 * dependancies. 791 */ 792 if (!info->synchronous && info->syncdeps == 0 && info->checkdef(bp)) { 793 BUF_UNLOCK(bp); 794 return(0); 795 } 796 797 /* 798 * B_NEEDCOMMIT (primarily used by NFS) is a state where the buffer 799 * has been written but an additional handshake with the device 800 * is required before we can dispose of the buffer. We have no idea 801 * how to do this so we have to skip these buffers. 802 */ 803 if (bp->b_flags & B_NEEDCOMMIT) { 804 BUF_UNLOCK(bp); 805 return(0); 806 } 807 808 /* 809 * Ask bioops if it is ok to sync. If not the VFS may have 810 * set B_LOCKED so we have to cycle the buffer. 811 */ 812 if (LIST_FIRST(&bp->b_dep) != NULL && buf_checkwrite(bp)) { 813 bremfree(bp); 814 brelse(bp); 815 return(0); 816 } 817 818 if (info->synchronous) { 819 /* 820 * Synchronous flushing. An error may be returned. 821 */ 822 bremfree(bp); 823 error = bwrite(bp); 824 } else { 825 /* 826 * Asynchronous flushing. A negative return value simply 827 * stops the scan and is not considered an error. We use 828 * this to support limited MNT_LAZY flushes. 829 */ 830 vp->v_lazyw = bp->b_loffset; 831 bremfree(bp); 832 info->lazycount += cluster_awrite(bp); 833 waitrunningbufspace(); 834 vm_wait_nominal(); 835 if (info->lazylimit && info->lazycount >= info->lazylimit) 836 error = 1; 837 else 838 error = 0; 839 } 840 return(-error); 841 } 842 843 /* 844 * Associate a buffer with a vnode. 845 * 846 * MPSAFE 847 */ 848 int 849 bgetvp(struct vnode *vp, struct buf *bp, int testsize) 850 { 851 KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); 852 KKASSERT((bp->b_flags & (B_HASHED|B_DELWRI|B_VNCLEAN|B_VNDIRTY)) == 0); 853 854 /* 855 * Insert onto list for new vnode. 856 */ 857 lwkt_gettoken(&vp->v_token); 858 859 if (buf_rb_hash_RB_INSERT(&vp->v_rbhash_tree, bp)) { 860 lwkt_reltoken(&vp->v_token); 861 return (EEXIST); 862 } 863 864 /* 865 * Diagnostics (mainly for HAMMER debugging). Check for 866 * overlapping buffers. 867 */ 868 if (check_buf_overlap) { 869 struct buf *bx; 870 bx = buf_rb_hash_RB_PREV(bp); 871 if (bx) { 872 if (bx->b_loffset + bx->b_bufsize > bp->b_loffset) { 873 kprintf("bgetvp: overlapl %016jx/%d %016jx " 874 "bx %p bp %p\n", 875 (intmax_t)bx->b_loffset, 876 bx->b_bufsize, 877 (intmax_t)bp->b_loffset, 878 bx, bp); 879 if (check_buf_overlap > 1) 880 panic("bgetvp - overlapping buffer"); 881 } 882 } 883 bx = buf_rb_hash_RB_NEXT(bp); 884 if (bx) { 885 if (bp->b_loffset + testsize > bx->b_loffset) { 886 kprintf("bgetvp: overlapr %016jx/%d %016jx " 887 "bp %p bx %p\n", 888 (intmax_t)bp->b_loffset, 889 testsize, 890 (intmax_t)bx->b_loffset, 891 bp, bx); 892 if (check_buf_overlap > 1) 893 panic("bgetvp - overlapping buffer"); 894 } 895 } 896 } 897 bp->b_vp = vp; 898 bp->b_flags |= B_HASHED; 899 bp->b_flags |= B_VNCLEAN; 900 if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) 901 panic("reassignbuf: dup lblk/clean vp %p bp %p", vp, bp); 902 /*vhold(vp);*/ 903 lwkt_reltoken(&vp->v_token); 904 return(0); 905 } 906 907 /* 908 * Disassociate a buffer from a vnode. 909 * 910 * MPSAFE 911 */ 912 void 913 brelvp(struct buf *bp) 914 { 915 struct vnode *vp; 916 917 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 918 919 /* 920 * Delete from old vnode list, if on one. 921 */ 922 vp = bp->b_vp; 923 lwkt_gettoken(&vp->v_token); 924 if (bp->b_flags & (B_VNDIRTY | B_VNCLEAN)) { 925 if (bp->b_flags & B_VNDIRTY) 926 buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp); 927 else 928 buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp); 929 bp->b_flags &= ~(B_VNDIRTY | B_VNCLEAN); 930 } 931 if (bp->b_flags & B_HASHED) { 932 buf_rb_hash_RB_REMOVE(&vp->v_rbhash_tree, bp); 933 bp->b_flags &= ~B_HASHED; 934 } 935 936 /* 937 * Only remove from synclist when no dirty buffers are left AND 938 * the VFS has not flagged the vnode's inode as being dirty. 939 */ 940 if ((vp->v_flag & (VONWORKLST | VISDIRTY | VOBJDIRTY)) == VONWORKLST && 941 RB_EMPTY(&vp->v_rbdirty_tree)) { 942 vn_syncer_remove(vp); 943 } 944 bp->b_vp = NULL; 945 946 lwkt_reltoken(&vp->v_token); 947 948 /*vdrop(vp);*/ 949 } 950 951 /* 952 * Reassign the buffer to the proper clean/dirty list based on B_DELWRI. 953 * This routine is called when the state of the B_DELWRI bit is changed. 954 * 955 * Must be called with vp->v_token held. 956 * MPSAFE 957 */ 958 void 959 reassignbuf(struct buf *bp) 960 { 961 struct vnode *vp = bp->b_vp; 962 int delay; 963 964 ASSERT_LWKT_TOKEN_HELD(&vp->v_token); 965 ++reassignbufcalls; 966 967 /* 968 * B_PAGING flagged buffers cannot be reassigned because their vp 969 * is not fully linked in. 970 */ 971 if (bp->b_flags & B_PAGING) 972 panic("cannot reassign paging buffer"); 973 974 if (bp->b_flags & B_DELWRI) { 975 /* 976 * Move to the dirty list, add the vnode to the worklist 977 */ 978 if (bp->b_flags & B_VNCLEAN) { 979 buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp); 980 bp->b_flags &= ~B_VNCLEAN; 981 } 982 if ((bp->b_flags & B_VNDIRTY) == 0) { 983 if (buf_rb_tree_RB_INSERT(&vp->v_rbdirty_tree, bp)) { 984 panic("reassignbuf: dup lblk vp %p bp %p", 985 vp, bp); 986 } 987 bp->b_flags |= B_VNDIRTY; 988 } 989 if ((vp->v_flag & VONWORKLST) == 0) { 990 switch (vp->v_type) { 991 case VDIR: 992 delay = dirdelay; 993 break; 994 case VCHR: 995 case VBLK: 996 if (vp->v_rdev && 997 vp->v_rdev->si_mountpoint != NULL) { 998 delay = metadelay; 999 break; 1000 } 1001 /* fall through */ 1002 default: 1003 delay = filedelay; 1004 } 1005 vn_syncer_add(vp, delay); 1006 } 1007 } else { 1008 /* 1009 * Move to the clean list, remove the vnode from the worklist 1010 * if no dirty blocks remain. 1011 */ 1012 if (bp->b_flags & B_VNDIRTY) { 1013 buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp); 1014 bp->b_flags &= ~B_VNDIRTY; 1015 } 1016 if ((bp->b_flags & B_VNCLEAN) == 0) { 1017 if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) { 1018 panic("reassignbuf: dup lblk vp %p bp %p", 1019 vp, bp); 1020 } 1021 bp->b_flags |= B_VNCLEAN; 1022 } 1023 1024 /* 1025 * Only remove from synclist when no dirty buffers are left 1026 * AND the VFS has not flagged the vnode's inode as being 1027 * dirty. 1028 */ 1029 if ((vp->v_flag & (VONWORKLST | VISDIRTY | VOBJDIRTY)) == 1030 VONWORKLST && 1031 RB_EMPTY(&vp->v_rbdirty_tree)) { 1032 vn_syncer_remove(vp); 1033 } 1034 } 1035 } 1036 1037 /* 1038 * Create a vnode for a block device. Used for mounting the root file 1039 * system. 1040 * 1041 * A vref()'d vnode is returned. 1042 */ 1043 extern struct vop_ops *devfs_vnode_dev_vops_p; 1044 int 1045 bdevvp(cdev_t dev, struct vnode **vpp) 1046 { 1047 struct vnode *vp; 1048 struct vnode *nvp; 1049 int error; 1050 1051 if (dev == NULL) { 1052 *vpp = NULLVP; 1053 return (ENXIO); 1054 } 1055 error = getspecialvnode(VT_NON, NULL, &devfs_vnode_dev_vops_p, 1056 &nvp, 0, 0); 1057 if (error) { 1058 *vpp = NULLVP; 1059 return (error); 1060 } 1061 vp = nvp; 1062 vp->v_type = VCHR; 1063 #if 0 1064 vp->v_rdev = dev; 1065 #endif 1066 v_associate_rdev(vp, dev); 1067 vp->v_umajor = dev->si_umajor; 1068 vp->v_uminor = dev->si_uminor; 1069 vx_unlock(vp); 1070 *vpp = vp; 1071 return (0); 1072 } 1073 1074 int 1075 v_associate_rdev(struct vnode *vp, cdev_t dev) 1076 { 1077 if (dev == NULL) 1078 return(ENXIO); 1079 if (dev_is_good(dev) == 0) 1080 return(ENXIO); 1081 KKASSERT(vp->v_rdev == NULL); 1082 vp->v_rdev = reference_dev(dev); 1083 lwkt_gettoken(&spechash_token); 1084 SLIST_INSERT_HEAD(&dev->si_hlist, vp, v_cdevnext); 1085 lwkt_reltoken(&spechash_token); 1086 return(0); 1087 } 1088 1089 void 1090 v_release_rdev(struct vnode *vp) 1091 { 1092 cdev_t dev; 1093 1094 if ((dev = vp->v_rdev) != NULL) { 1095 lwkt_gettoken(&spechash_token); 1096 SLIST_REMOVE(&dev->si_hlist, vp, vnode, v_cdevnext); 1097 vp->v_rdev = NULL; 1098 release_dev(dev); 1099 lwkt_reltoken(&spechash_token); 1100 } 1101 } 1102 1103 /* 1104 * Add a vnode to the alias list hung off the cdev_t. We only associate 1105 * the device number with the vnode. The actual device is not associated 1106 * until the vnode is opened (usually in spec_open()), and will be 1107 * disassociated on last close. 1108 */ 1109 void 1110 addaliasu(struct vnode *nvp, int x, int y) 1111 { 1112 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 1113 panic("addaliasu on non-special vnode"); 1114 nvp->v_umajor = x; 1115 nvp->v_uminor = y; 1116 } 1117 1118 /* 1119 * Simple call that a filesystem can make to try to get rid of a 1120 * vnode. It will fail if anyone is referencing the vnode (including 1121 * the caller). 1122 * 1123 * The filesystem can check whether its in-memory inode structure still 1124 * references the vp on return. 1125 * 1126 * May only be called if the vnode is in a known state (i.e. being prevented 1127 * from being deallocated by some other condition such as a vfs inode hold). 1128 */ 1129 void 1130 vclean_unlocked(struct vnode *vp) 1131 { 1132 vx_get(vp); 1133 if (VREFCNT(vp) <= 0) 1134 vgone_vxlocked(vp); 1135 vx_put(vp); 1136 } 1137 1138 /* 1139 * Disassociate a vnode from its underlying filesystem. 1140 * 1141 * The vnode must be VX locked and referenced. In all normal situations 1142 * there are no active references. If vclean_vxlocked() is called while 1143 * there are active references, the vnode is being ripped out and we have 1144 * to call VOP_CLOSE() as appropriate before we can reclaim it. 1145 */ 1146 void 1147 vclean_vxlocked(struct vnode *vp, int flags) 1148 { 1149 int active; 1150 int n; 1151 vm_object_t object; 1152 struct namecache *ncp; 1153 1154 /* 1155 * If the vnode has already been reclaimed we have nothing to do. 1156 */ 1157 if (vp->v_flag & VRECLAIMED) 1158 return; 1159 1160 /* 1161 * Set flag to interlock operation, flag finalization to ensure 1162 * that the vnode winds up on the inactive list, and set v_act to 0. 1163 */ 1164 vsetflags(vp, VRECLAIMED); 1165 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE); 1166 vp->v_act = 0; 1167 1168 if (verbose_reclaims) { 1169 if ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL) 1170 kprintf("Debug: reclaim %p %s\n", vp, ncp->nc_name); 1171 } 1172 1173 /* 1174 * Scrap the vfs cache 1175 */ 1176 while (cache_inval_vp(vp, 0) != 0) { 1177 kprintf("Warning: vnode %p clean/cache_resolution " 1178 "race detected\n", vp); 1179 tsleep(vp, 0, "vclninv", 2); 1180 } 1181 1182 /* 1183 * Check to see if the vnode is in use. If so we have to reference it 1184 * before we clean it out so that its count cannot fall to zero and 1185 * generate a race against ourselves to recycle it. 1186 */ 1187 active = (VREFCNT(vp) > 0); 1188 1189 /* 1190 * Clean out any buffers associated with the vnode and destroy its 1191 * object, if it has one. 1192 */ 1193 vinvalbuf(vp, V_SAVE, 0, 0); 1194 KKASSERT(lockcountnb(&vp->v_lock) == 1); 1195 1196 /* 1197 * If purging an active vnode (typically during a forced unmount 1198 * or reboot), it must be closed and deactivated before being 1199 * reclaimed. This isn't really all that safe, but what can 1200 * we do? XXX. 1201 * 1202 * Note that neither of these routines unlocks the vnode. 1203 */ 1204 if (active && (flags & DOCLOSE)) { 1205 while ((n = vp->v_opencount) != 0) { 1206 if (vp->v_writecount) 1207 VOP_CLOSE(vp, FWRITE|FNONBLOCK); 1208 else 1209 VOP_CLOSE(vp, FNONBLOCK); 1210 if (vp->v_opencount == n) { 1211 kprintf("Warning: unable to force-close" 1212 " vnode %p\n", vp); 1213 break; 1214 } 1215 } 1216 } 1217 1218 /* 1219 * If the vnode has not been deactivated, deactivated it. Deactivation 1220 * can create new buffers and VM pages so we have to call vinvalbuf() 1221 * again to make sure they all get flushed. 1222 * 1223 * This can occur if a file with a link count of 0 needs to be 1224 * truncated. 1225 * 1226 * If the vnode is already dead don't try to deactivate it. 1227 */ 1228 if ((vp->v_flag & VINACTIVE) == 0) { 1229 vsetflags(vp, VINACTIVE); 1230 if (vp->v_mount) 1231 VOP_INACTIVE(vp); 1232 vinvalbuf(vp, V_SAVE, 0, 0); 1233 } 1234 KKASSERT(lockcountnb(&vp->v_lock) == 1); 1235 1236 /* 1237 * If the vnode has an object, destroy it. 1238 */ 1239 while ((object = vp->v_object) != NULL) { 1240 vm_object_hold(object); 1241 if (object == vp->v_object) 1242 break; 1243 vm_object_drop(object); 1244 } 1245 1246 if (object != NULL) { 1247 if (object->ref_count == 0) { 1248 if ((object->flags & OBJ_DEAD) == 0) 1249 vm_object_terminate(object); 1250 vm_object_drop(object); 1251 vclrflags(vp, VOBJBUF); 1252 } else { 1253 vm_pager_deallocate(object); 1254 vclrflags(vp, VOBJBUF); 1255 vm_object_drop(object); 1256 } 1257 } 1258 KKASSERT((vp->v_flag & VOBJBUF) == 0); 1259 1260 /* 1261 * Reclaim the vnode if not already dead. 1262 */ 1263 if (vp->v_mount && VOP_RECLAIM(vp)) 1264 panic("vclean: cannot reclaim"); 1265 1266 /* 1267 * Done with purge, notify sleepers of the grim news. 1268 */ 1269 vp->v_ops = &dead_vnode_vops_p; 1270 vn_gone(vp); 1271 vp->v_tag = VT_NON; 1272 1273 /* 1274 * If we are destroying an active vnode, reactivate it now that 1275 * we have reassociated it with deadfs. This prevents the system 1276 * from crashing on the vnode due to it being unexpectedly marked 1277 * as inactive or reclaimed. 1278 */ 1279 if (active && (flags & DOCLOSE)) { 1280 vclrflags(vp, VINACTIVE | VRECLAIMED); 1281 } 1282 } 1283 1284 /* 1285 * Eliminate all activity associated with the requested vnode 1286 * and with all vnodes aliased to the requested vnode. 1287 * 1288 * The vnode must be referenced but should not be locked. 1289 */ 1290 int 1291 vrevoke(struct vnode *vp, struct ucred *cred) 1292 { 1293 struct vnode *vq; 1294 struct vnode *vqn; 1295 cdev_t dev; 1296 int error; 1297 1298 /* 1299 * If the vnode has a device association, scrap all vnodes associated 1300 * with the device. Don't let the device disappear on us while we 1301 * are scrapping the vnodes. 1302 * 1303 * The passed vp will probably show up in the list, do not VX lock 1304 * it twice! 1305 * 1306 * Releasing the vnode's rdev here can mess up specfs's call to 1307 * device close, so don't do it. The vnode has been disassociated 1308 * and the device will be closed after the last ref on the related 1309 * fp goes away (if not still open by e.g. the kernel). 1310 */ 1311 if (vp->v_type != VCHR) { 1312 error = fdrevoke(vp, DTYPE_VNODE, cred); 1313 return (error); 1314 } 1315 if ((dev = vp->v_rdev) == NULL) { 1316 return(0); 1317 } 1318 reference_dev(dev); 1319 lwkt_gettoken(&spechash_token); 1320 1321 restart: 1322 vqn = SLIST_FIRST(&dev->si_hlist); 1323 if (vqn) 1324 vhold(vqn); 1325 while ((vq = vqn) != NULL) { 1326 if (VREFCNT(vq) > 0) { 1327 vref(vq); 1328 fdrevoke(vq, DTYPE_VNODE, cred); 1329 /*v_release_rdev(vq);*/ 1330 vrele(vq); 1331 if (vq->v_rdev != dev) { 1332 vdrop(vq); 1333 goto restart; 1334 } 1335 } 1336 vqn = SLIST_NEXT(vq, v_cdevnext); 1337 if (vqn) 1338 vhold(vqn); 1339 vdrop(vq); 1340 } 1341 lwkt_reltoken(&spechash_token); 1342 dev_drevoke(dev); 1343 release_dev(dev); 1344 return (0); 1345 } 1346 1347 /* 1348 * This is called when the object underlying a vnode is being destroyed, 1349 * such as in a remove(). Try to recycle the vnode immediately if the 1350 * only active reference is our reference. 1351 * 1352 * Directory vnodes in the namecache with children cannot be immediately 1353 * recycled because numerous VOP_N*() ops require them to be stable. 1354 * 1355 * To avoid recursive recycling from VOP_INACTIVE implemenetations this 1356 * function is a NOP if VRECLAIMED is already set. 1357 */ 1358 int 1359 vrecycle(struct vnode *vp) 1360 { 1361 if (VREFCNT(vp) <= 1 && (vp->v_flag & VRECLAIMED) == 0) { 1362 if (cache_inval_vp_nonblock(vp)) 1363 return(0); 1364 vgone_vxlocked(vp); 1365 return (1); 1366 } 1367 return (0); 1368 } 1369 1370 /* 1371 * Return the maximum I/O size allowed for strategy calls on VP. 1372 * 1373 * If vp is VCHR or VBLK we dive the device, otherwise we use 1374 * the vp's mount info. 1375 * 1376 * The returned value is clamped at MAXPHYS as most callers cannot use 1377 * buffers larger than that size. 1378 */ 1379 int 1380 vmaxiosize(struct vnode *vp) 1381 { 1382 int maxiosize; 1383 1384 if (vp->v_type == VBLK || vp->v_type == VCHR) 1385 maxiosize = vp->v_rdev->si_iosize_max; 1386 else 1387 maxiosize = vp->v_mount->mnt_iosize_max; 1388 1389 if (maxiosize > MAXPHYS) 1390 maxiosize = MAXPHYS; 1391 return (maxiosize); 1392 } 1393 1394 /* 1395 * Eliminate all activity associated with a vnode in preparation for 1396 * destruction. 1397 * 1398 * The vnode must be VX locked and refd and will remain VX locked and refd 1399 * on return. This routine may be called with the vnode in any state, as 1400 * long as it is VX locked. The vnode will be cleaned out and marked 1401 * VRECLAIMED but will not actually be reused until all existing refs and 1402 * holds go away. 1403 * 1404 * NOTE: This routine may be called on a vnode which has not yet been 1405 * already been deactivated (VOP_INACTIVE), or on a vnode which has 1406 * already been reclaimed. 1407 * 1408 * This routine is not responsible for placing us back on the freelist. 1409 * Instead, it happens automatically when the caller releases the VX lock 1410 * (assuming there aren't any other references). 1411 */ 1412 void 1413 vgone_vxlocked(struct vnode *vp) 1414 { 1415 /* 1416 * assert that the VX lock is held. This is an absolute requirement 1417 * now for vgone_vxlocked() to be called. 1418 */ 1419 KKASSERT(lockcountnb(&vp->v_lock) == 1); 1420 1421 /* 1422 * Clean out the filesystem specific data and set the VRECLAIMED 1423 * bit. Also deactivate the vnode if necessary. 1424 */ 1425 vclean_vxlocked(vp, DOCLOSE); 1426 1427 /* 1428 * Delete from old mount point vnode list, if on one. 1429 */ 1430 if (vp->v_mount != NULL) { 1431 KKASSERT(vp->v_data == NULL); 1432 insmntque(vp, NULL); 1433 } 1434 1435 /* 1436 * If special device, remove it from special device alias list 1437 * if it is on one. This should normally only occur if a vnode is 1438 * being revoked as the device should otherwise have been released 1439 * naturally. 1440 */ 1441 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_rdev != NULL) { 1442 v_release_rdev(vp); 1443 } 1444 1445 /* 1446 * Set us to VBAD 1447 */ 1448 vp->v_type = VBAD; 1449 } 1450 1451 /* 1452 * Lookup a vnode by device number. 1453 * 1454 * Returns non-zero and *vpp set to a vref'd vnode on success. 1455 * Returns zero on failure. 1456 */ 1457 int 1458 vfinddev(cdev_t dev, enum vtype type, struct vnode **vpp) 1459 { 1460 struct vnode *vp; 1461 1462 lwkt_gettoken(&spechash_token); 1463 SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) { 1464 if (type == vp->v_type) { 1465 *vpp = vp; 1466 vref(vp); 1467 lwkt_reltoken(&spechash_token); 1468 return (1); 1469 } 1470 } 1471 lwkt_reltoken(&spechash_token); 1472 return (0); 1473 } 1474 1475 /* 1476 * Calculate the total number of references to a special device. This 1477 * routine may only be called for VBLK and VCHR vnodes since v_rdev is 1478 * an overloaded field. Since udev2dev can now return NULL, we have 1479 * to check for a NULL v_rdev. 1480 */ 1481 int 1482 count_dev(cdev_t dev) 1483 { 1484 struct vnode *vp; 1485 int count = 0; 1486 1487 if (SLIST_FIRST(&dev->si_hlist)) { 1488 lwkt_gettoken(&spechash_token); 1489 SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) { 1490 count += vp->v_opencount; 1491 } 1492 lwkt_reltoken(&spechash_token); 1493 } 1494 return(count); 1495 } 1496 1497 int 1498 vcount(struct vnode *vp) 1499 { 1500 if (vp->v_rdev == NULL) 1501 return(0); 1502 return(count_dev(vp->v_rdev)); 1503 } 1504 1505 /* 1506 * Initialize VMIO for a vnode. This routine MUST be called before a 1507 * VFS can issue buffer cache ops on a vnode. It is typically called 1508 * when a vnode is initialized from its inode. 1509 */ 1510 int 1511 vinitvmio(struct vnode *vp, off_t filesize, int blksize, int boff) 1512 { 1513 vm_object_t object; 1514 int error = 0; 1515 1516 object = vp->v_object; 1517 if (object) { 1518 vm_object_hold(object); 1519 KKASSERT(vp->v_object == object); 1520 } 1521 1522 if (object == NULL) { 1523 object = vnode_pager_alloc(vp, filesize, 0, 0, blksize, boff); 1524 1525 /* 1526 * Dereference the reference we just created. This assumes 1527 * that the object is associated with the vp. Allow it to 1528 * have zero refs. It cannot be destroyed as long as it 1529 * is associated with the vnode. 1530 */ 1531 vm_object_hold(object); 1532 atomic_add_int(&object->ref_count, -1); 1533 vrele(vp); 1534 } else { 1535 KKASSERT((object->flags & OBJ_DEAD) == 0); 1536 } 1537 KASSERT(vp->v_object != NULL, ("vinitvmio: NULL object")); 1538 vsetflags(vp, VOBJBUF); 1539 vm_object_drop(object); 1540 1541 return (error); 1542 } 1543 1544 1545 /* 1546 * Print out a description of a vnode. 1547 */ 1548 static char *typename[] = 1549 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 1550 1551 void 1552 vprint(char *label, struct vnode *vp) 1553 { 1554 char buf[96]; 1555 1556 if (label != NULL) 1557 kprintf("%s: %p: ", label, (void *)vp); 1558 else 1559 kprintf("%p: ", (void *)vp); 1560 kprintf("type %s, refcnt %08x, writecount %d, holdcnt %d,", 1561 typename[vp->v_type], 1562 vp->v_refcnt, vp->v_writecount, vp->v_auxrefs); 1563 buf[0] = '\0'; 1564 if (vp->v_flag & VROOT) 1565 strcat(buf, "|VROOT"); 1566 if (vp->v_flag & VPFSROOT) 1567 strcat(buf, "|VPFSROOT"); 1568 if (vp->v_flag & VTEXT) 1569 strcat(buf, "|VTEXT"); 1570 if (vp->v_flag & VSYSTEM) 1571 strcat(buf, "|VSYSTEM"); 1572 if (vp->v_flag & VOBJBUF) 1573 strcat(buf, "|VOBJBUF"); 1574 if (buf[0] != '\0') 1575 kprintf(" flags (%s)", &buf[1]); 1576 if (vp->v_data == NULL) { 1577 kprintf("\n"); 1578 } else { 1579 kprintf("\n\t"); 1580 VOP_PRINT(vp); 1581 } 1582 } 1583 1584 /* 1585 * Do the usual access checking. 1586 * file_mode, uid and gid are from the vnode in question, 1587 * while acc_mode and cred are from the VOP_ACCESS parameter list 1588 */ 1589 int 1590 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid, 1591 mode_t acc_mode, struct ucred *cred) 1592 { 1593 mode_t mask; 1594 int ismember; 1595 1596 /* 1597 * Super-user always gets read/write access, but execute access depends 1598 * on at least one execute bit being set. 1599 */ 1600 if (priv_check_cred(cred, PRIV_ROOT, 0) == 0) { 1601 if ((acc_mode & VEXEC) && type != VDIR && 1602 (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0) 1603 return (EACCES); 1604 return (0); 1605 } 1606 1607 mask = 0; 1608 1609 /* Otherwise, check the owner. */ 1610 if (cred->cr_uid == uid) { 1611 if (acc_mode & VEXEC) 1612 mask |= S_IXUSR; 1613 if (acc_mode & VREAD) 1614 mask |= S_IRUSR; 1615 if (acc_mode & VWRITE) 1616 mask |= S_IWUSR; 1617 return ((file_mode & mask) == mask ? 0 : EACCES); 1618 } 1619 1620 /* Otherwise, check the groups. */ 1621 ismember = groupmember(gid, cred); 1622 if (cred->cr_svgid == gid || ismember) { 1623 if (acc_mode & VEXEC) 1624 mask |= S_IXGRP; 1625 if (acc_mode & VREAD) 1626 mask |= S_IRGRP; 1627 if (acc_mode & VWRITE) 1628 mask |= S_IWGRP; 1629 return ((file_mode & mask) == mask ? 0 : EACCES); 1630 } 1631 1632 /* Otherwise, check everyone else. */ 1633 if (acc_mode & VEXEC) 1634 mask |= S_IXOTH; 1635 if (acc_mode & VREAD) 1636 mask |= S_IROTH; 1637 if (acc_mode & VWRITE) 1638 mask |= S_IWOTH; 1639 return ((file_mode & mask) == mask ? 0 : EACCES); 1640 } 1641 1642 #ifdef DDB 1643 #include <ddb/ddb.h> 1644 1645 static int db_show_locked_vnodes(struct mount *mp, void *data); 1646 1647 /* 1648 * List all of the locked vnodes in the system. 1649 * Called when debugging the kernel. 1650 */ 1651 DB_SHOW_COMMAND(lockedvnodes, lockedvnodes) 1652 { 1653 kprintf("Locked vnodes\n"); 1654 mountlist_scan(db_show_locked_vnodes, NULL, 1655 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 1656 } 1657 1658 static int 1659 db_show_locked_vnodes(struct mount *mp, void *data __unused) 1660 { 1661 struct vnode *vp; 1662 1663 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 1664 if (vn_islocked(vp)) 1665 vprint(NULL, vp); 1666 } 1667 return(0); 1668 } 1669 #endif 1670 1671 /* 1672 * Top level filesystem related information gathering. 1673 */ 1674 static int sysctl_ovfs_conf (SYSCTL_HANDLER_ARGS); 1675 1676 static int 1677 vfs_sysctl(SYSCTL_HANDLER_ARGS) 1678 { 1679 int *name = (int *)arg1 - 1; /* XXX */ 1680 u_int namelen = arg2 + 1; /* XXX */ 1681 struct vfsconf *vfsp; 1682 int maxtypenum; 1683 1684 #if 1 || defined(COMPAT_PRELITE2) 1685 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 1686 if (namelen == 1) 1687 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 1688 #endif 1689 1690 #ifdef notyet 1691 /* all sysctl names at this level are at least name and field */ 1692 if (namelen < 2) 1693 return (ENOTDIR); /* overloaded */ 1694 if (name[0] != VFS_GENERIC) { 1695 vfsp = vfsconf_find_by_typenum(name[0]); 1696 if (vfsp == NULL) 1697 return (EOPNOTSUPP); 1698 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 1699 oldp, oldlenp, newp, newlen, p)); 1700 } 1701 #endif 1702 switch (name[1]) { 1703 case VFS_MAXTYPENUM: 1704 if (namelen != 2) 1705 return (ENOTDIR); 1706 maxtypenum = vfsconf_get_maxtypenum(); 1707 return (SYSCTL_OUT(req, &maxtypenum, sizeof(maxtypenum))); 1708 case VFS_CONF: 1709 if (namelen != 3) 1710 return (ENOTDIR); /* overloaded */ 1711 vfsp = vfsconf_find_by_typenum(name[2]); 1712 if (vfsp == NULL) 1713 return (EOPNOTSUPP); 1714 return (SYSCTL_OUT(req, vfsp, sizeof *vfsp)); 1715 } 1716 return (EOPNOTSUPP); 1717 } 1718 1719 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl, 1720 "Generic filesystem"); 1721 1722 #if 1 || defined(COMPAT_PRELITE2) 1723 1724 static int 1725 sysctl_ovfs_conf_iter(struct vfsconf *vfsp, void *data) 1726 { 1727 int error; 1728 struct ovfsconf ovfs; 1729 struct sysctl_req *req = (struct sysctl_req*) data; 1730 1731 bzero(&ovfs, sizeof(ovfs)); 1732 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 1733 strcpy(ovfs.vfc_name, vfsp->vfc_name); 1734 ovfs.vfc_index = vfsp->vfc_typenum; 1735 ovfs.vfc_refcount = vfsp->vfc_refcount; 1736 ovfs.vfc_flags = vfsp->vfc_flags; 1737 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 1738 if (error) 1739 return error; /* abort iteration with error code */ 1740 else 1741 return 0; /* continue iterating with next element */ 1742 } 1743 1744 static int 1745 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 1746 { 1747 return vfsconf_each(sysctl_ovfs_conf_iter, (void*)req); 1748 } 1749 1750 #endif /* 1 || COMPAT_PRELITE2 */ 1751 1752 /* 1753 * Check to see if a filesystem is mounted on a block device. 1754 */ 1755 int 1756 vfs_mountedon(struct vnode *vp) 1757 { 1758 cdev_t dev; 1759 1760 if ((dev = vp->v_rdev) == NULL) { 1761 /* if (vp->v_type != VBLK) 1762 dev = get_dev(vp->v_uminor, vp->v_umajor); */ 1763 } 1764 if (dev != NULL && dev->si_mountpoint) 1765 return (EBUSY); 1766 return (0); 1767 } 1768 1769 /* 1770 * Unmount all filesystems. The list is traversed in reverse order 1771 * of mounting to avoid dependencies. 1772 */ 1773 1774 static int vfs_umountall_callback(struct mount *mp, void *data); 1775 1776 void 1777 vfs_unmountall(void) 1778 { 1779 int count; 1780 1781 do { 1782 count = mountlist_scan(vfs_umountall_callback, 1783 NULL, MNTSCAN_REVERSE|MNTSCAN_NOBUSY); 1784 } while (count); 1785 } 1786 1787 static 1788 int 1789 vfs_umountall_callback(struct mount *mp, void *data) 1790 { 1791 int error; 1792 1793 error = dounmount(mp, MNT_FORCE); 1794 if (error) { 1795 mountlist_remove(mp); 1796 kprintf("unmount of filesystem mounted from %s failed (", 1797 mp->mnt_stat.f_mntfromname); 1798 if (error == EBUSY) 1799 kprintf("BUSY)\n"); 1800 else 1801 kprintf("%d)\n", error); 1802 } 1803 return(1); 1804 } 1805 1806 /* 1807 * Checks the mount flags for parameter mp and put the names comma-separated 1808 * into a string buffer buf with a size limit specified by len. 1809 * 1810 * It returns the number of bytes written into buf, and (*errorp) will be 1811 * set to 0, EINVAL (if passed length is 0), or ENOSPC (supplied buffer was 1812 * not large enough). The buffer will be 0-terminated if len was not 0. 1813 */ 1814 size_t 1815 vfs_flagstostr(int flags, const struct mountctl_opt *optp, 1816 char *buf, size_t len, int *errorp) 1817 { 1818 static const struct mountctl_opt optnames[] = { 1819 { MNT_ASYNC, "asynchronous" }, 1820 { MNT_EXPORTED, "NFS exported" }, 1821 { MNT_LOCAL, "local" }, 1822 { MNT_NOATIME, "noatime" }, 1823 { MNT_NODEV, "nodev" }, 1824 { MNT_NOEXEC, "noexec" }, 1825 { MNT_NOSUID, "nosuid" }, 1826 { MNT_NOSYMFOLLOW, "nosymfollow" }, 1827 { MNT_QUOTA, "with-quotas" }, 1828 { MNT_RDONLY, "read-only" }, 1829 { MNT_SYNCHRONOUS, "synchronous" }, 1830 { MNT_UNION, "union" }, 1831 { MNT_NOCLUSTERR, "noclusterr" }, 1832 { MNT_NOCLUSTERW, "noclusterw" }, 1833 { MNT_SUIDDIR, "suiddir" }, 1834 { MNT_SOFTDEP, "soft-updates" }, 1835 { MNT_IGNORE, "ignore" }, 1836 { 0, NULL} 1837 }; 1838 int bwritten; 1839 int bleft; 1840 int optlen; 1841 int actsize; 1842 1843 *errorp = 0; 1844 bwritten = 0; 1845 bleft = len - 1; /* leave room for trailing \0 */ 1846 1847 /* 1848 * Checks the size of the string. If it contains 1849 * any data, then we will append the new flags to 1850 * it. 1851 */ 1852 actsize = strlen(buf); 1853 if (actsize > 0) 1854 buf += actsize; 1855 1856 /* Default flags if no flags passed */ 1857 if (optp == NULL) 1858 optp = optnames; 1859 1860 if (bleft < 0) { /* degenerate case, 0-length buffer */ 1861 *errorp = EINVAL; 1862 return(0); 1863 } 1864 1865 for (; flags && optp->o_opt; ++optp) { 1866 if ((flags & optp->o_opt) == 0) 1867 continue; 1868 optlen = strlen(optp->o_name); 1869 if (bwritten || actsize > 0) { 1870 if (bleft < 2) { 1871 *errorp = ENOSPC; 1872 break; 1873 } 1874 buf[bwritten++] = ','; 1875 buf[bwritten++] = ' '; 1876 bleft -= 2; 1877 } 1878 if (bleft < optlen) { 1879 *errorp = ENOSPC; 1880 break; 1881 } 1882 bcopy(optp->o_name, buf + bwritten, optlen); 1883 bwritten += optlen; 1884 bleft -= optlen; 1885 flags &= ~optp->o_opt; 1886 } 1887 1888 /* 1889 * Space already reserved for trailing \0 1890 */ 1891 buf[bwritten] = 0; 1892 return (bwritten); 1893 } 1894 1895 /* 1896 * Build hash lists of net addresses and hang them off the mount point. 1897 * Called by ufs_mount() to set up the lists of export addresses. 1898 */ 1899 static int 1900 vfs_hang_addrlist(struct mount *mp, struct netexport *nep, 1901 const struct export_args *argp) 1902 { 1903 struct netcred *np; 1904 struct radix_node_head *rnh; 1905 int i; 1906 struct radix_node *rn; 1907 struct sockaddr *saddr, *smask = NULL; 1908 struct domain *dom; 1909 int error; 1910 1911 if (argp->ex_addrlen == 0) { 1912 if (mp->mnt_flag & MNT_DEFEXPORTED) 1913 return (EPERM); 1914 np = &nep->ne_defexported; 1915 np->netc_exflags = argp->ex_flags; 1916 np->netc_anon = argp->ex_anon; 1917 np->netc_anon.cr_ref = 1; 1918 mp->mnt_flag |= MNT_DEFEXPORTED; 1919 return (0); 1920 } 1921 1922 if (argp->ex_addrlen < 0 || argp->ex_addrlen > MLEN) 1923 return (EINVAL); 1924 if (argp->ex_masklen < 0 || argp->ex_masklen > MLEN) 1925 return (EINVAL); 1926 1927 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 1928 np = (struct netcred *) kmalloc(i, M_NETADDR, M_WAITOK | M_ZERO); 1929 saddr = (struct sockaddr *) (np + 1); 1930 if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen))) 1931 goto out; 1932 if (saddr->sa_len > argp->ex_addrlen) 1933 saddr->sa_len = argp->ex_addrlen; 1934 if (argp->ex_masklen) { 1935 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 1936 error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen); 1937 if (error) 1938 goto out; 1939 if (smask->sa_len > argp->ex_masklen) 1940 smask->sa_len = argp->ex_masklen; 1941 } 1942 i = saddr->sa_family; 1943 if ((rnh = nep->ne_rtable[i]) == NULL) { 1944 /* 1945 * Seems silly to initialize every AF when most are not used, 1946 * do so on demand here 1947 */ 1948 SLIST_FOREACH(dom, &domains, dom_next) 1949 if (dom->dom_family == i && dom->dom_rtattach) { 1950 dom->dom_rtattach((void **) &nep->ne_rtable[i], 1951 dom->dom_rtoffset); 1952 break; 1953 } 1954 if ((rnh = nep->ne_rtable[i]) == NULL) { 1955 error = ENOBUFS; 1956 goto out; 1957 } 1958 } 1959 rn = (*rnh->rnh_addaddr) ((char *) saddr, (char *) smask, rnh, 1960 np->netc_rnodes); 1961 if (rn == NULL || np != (struct netcred *) rn) { /* already exists */ 1962 error = EPERM; 1963 goto out; 1964 } 1965 np->netc_exflags = argp->ex_flags; 1966 np->netc_anon = argp->ex_anon; 1967 np->netc_anon.cr_ref = 1; 1968 return (0); 1969 out: 1970 kfree(np, M_NETADDR); 1971 return (error); 1972 } 1973 1974 /* ARGSUSED */ 1975 static int 1976 vfs_free_netcred(struct radix_node *rn, void *w) 1977 { 1978 struct radix_node_head *rnh = (struct radix_node_head *) w; 1979 1980 (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh); 1981 kfree((caddr_t) rn, M_NETADDR); 1982 return (0); 1983 } 1984 1985 /* 1986 * Free the net address hash lists that are hanging off the mount points. 1987 */ 1988 static void 1989 vfs_free_addrlist(struct netexport *nep) 1990 { 1991 int i; 1992 struct radix_node_head *rnh; 1993 1994 for (i = 0; i <= AF_MAX; i++) 1995 if ((rnh = nep->ne_rtable[i])) { 1996 (*rnh->rnh_walktree) (rnh, vfs_free_netcred, 1997 (caddr_t) rnh); 1998 kfree((caddr_t) rnh, M_RTABLE); 1999 nep->ne_rtable[i] = 0; 2000 } 2001 } 2002 2003 int 2004 vfs_export(struct mount *mp, struct netexport *nep, 2005 const struct export_args *argp) 2006 { 2007 int error; 2008 2009 if (argp->ex_flags & MNT_DELEXPORT) { 2010 if (mp->mnt_flag & MNT_EXPUBLIC) { 2011 vfs_setpublicfs(NULL, NULL, NULL); 2012 mp->mnt_flag &= ~MNT_EXPUBLIC; 2013 } 2014 vfs_free_addrlist(nep); 2015 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 2016 } 2017 if (argp->ex_flags & MNT_EXPORTED) { 2018 if (argp->ex_flags & MNT_EXPUBLIC) { 2019 if ((error = vfs_setpublicfs(mp, nep, argp)) != 0) 2020 return (error); 2021 mp->mnt_flag |= MNT_EXPUBLIC; 2022 } 2023 if ((error = vfs_hang_addrlist(mp, nep, argp))) 2024 return (error); 2025 mp->mnt_flag |= MNT_EXPORTED; 2026 } 2027 return (0); 2028 } 2029 2030 2031 /* 2032 * Set the publicly exported filesystem (WebNFS). Currently, only 2033 * one public filesystem is possible in the spec (RFC 2054 and 2055) 2034 */ 2035 int 2036 vfs_setpublicfs(struct mount *mp, struct netexport *nep, 2037 const struct export_args *argp) 2038 { 2039 int error; 2040 struct vnode *rvp; 2041 char *cp; 2042 2043 /* 2044 * mp == NULL -> invalidate the current info, the FS is 2045 * no longer exported. May be called from either vfs_export 2046 * or unmount, so check if it hasn't already been done. 2047 */ 2048 if (mp == NULL) { 2049 if (nfs_pub.np_valid) { 2050 nfs_pub.np_valid = 0; 2051 if (nfs_pub.np_index != NULL) { 2052 kfree(nfs_pub.np_index, M_TEMP); 2053 nfs_pub.np_index = NULL; 2054 } 2055 } 2056 return (0); 2057 } 2058 2059 /* 2060 * Only one allowed at a time. 2061 */ 2062 if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount) 2063 return (EBUSY); 2064 2065 /* 2066 * Get real filehandle for root of exported FS. 2067 */ 2068 bzero((caddr_t)&nfs_pub.np_handle, sizeof(nfs_pub.np_handle)); 2069 nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid; 2070 2071 if ((error = VFS_ROOT(mp, &rvp))) 2072 return (error); 2073 2074 if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid))) 2075 return (error); 2076 2077 vput(rvp); 2078 2079 /* 2080 * If an indexfile was specified, pull it in. 2081 */ 2082 if (argp->ex_indexfile != NULL) { 2083 int namelen; 2084 2085 error = vn_get_namelen(rvp, &namelen); 2086 if (error) 2087 return (error); 2088 nfs_pub.np_index = kmalloc(namelen, M_TEMP, M_WAITOK); 2089 error = copyinstr(argp->ex_indexfile, nfs_pub.np_index, 2090 namelen, NULL); 2091 if (!error) { 2092 /* 2093 * Check for illegal filenames. 2094 */ 2095 for (cp = nfs_pub.np_index; *cp; cp++) { 2096 if (*cp == '/') { 2097 error = EINVAL; 2098 break; 2099 } 2100 } 2101 } 2102 if (error) { 2103 kfree(nfs_pub.np_index, M_TEMP); 2104 return (error); 2105 } 2106 } 2107 2108 nfs_pub.np_mount = mp; 2109 nfs_pub.np_valid = 1; 2110 return (0); 2111 } 2112 2113 struct netcred * 2114 vfs_export_lookup(struct mount *mp, struct netexport *nep, 2115 struct sockaddr *nam) 2116 { 2117 struct netcred *np; 2118 struct radix_node_head *rnh; 2119 struct sockaddr *saddr; 2120 2121 np = NULL; 2122 if (mp->mnt_flag & MNT_EXPORTED) { 2123 /* 2124 * Lookup in the export list first. 2125 */ 2126 if (nam != NULL) { 2127 saddr = nam; 2128 rnh = nep->ne_rtable[saddr->sa_family]; 2129 if (rnh != NULL) { 2130 np = (struct netcred *) 2131 (*rnh->rnh_matchaddr)((char *)saddr, 2132 rnh); 2133 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 2134 np = NULL; 2135 } 2136 } 2137 /* 2138 * If no address match, use the default if it exists. 2139 */ 2140 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 2141 np = &nep->ne_defexported; 2142 } 2143 return (np); 2144 } 2145 2146 /* 2147 * perform msync on all vnodes under a mount point. The mount point must 2148 * be locked. This code is also responsible for lazy-freeing unreferenced 2149 * vnodes whos VM objects no longer contain pages. 2150 * 2151 * NOTE: MNT_WAIT still skips vnodes in the VXLOCK state. 2152 * 2153 * NOTE: XXX VOP_PUTPAGES and friends requires that the vnode be locked, 2154 * but vnode_pager_putpages() doesn't lock the vnode. We have to do it 2155 * way up in this high level function. 2156 */ 2157 static int vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data); 2158 static int vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data); 2159 2160 void 2161 vfs_msync(struct mount *mp, int flags) 2162 { 2163 int vmsc_flags; 2164 2165 /* 2166 * tmpfs sets this flag to prevent msync(), sync, and the 2167 * filesystem periodic syncer from trying to flush VM pages 2168 * to swap. Only pure memory pressure flushes tmpfs VM pages 2169 * to swap. 2170 */ 2171 if (mp->mnt_kern_flag & MNTK_NOMSYNC) 2172 return; 2173 2174 /* 2175 * Ok, scan the vnodes for work. If the filesystem is using the 2176 * syncer thread feature we can use vsyncscan() instead of 2177 * vmntvnodescan(), which is much faster. 2178 */ 2179 vmsc_flags = VMSC_GETVP; 2180 if (flags != MNT_WAIT) 2181 vmsc_flags |= VMSC_NOWAIT; 2182 2183 if (mp->mnt_kern_flag & MNTK_THR_SYNC) { 2184 vsyncscan(mp, vmsc_flags, vfs_msync_scan2, 2185 (void *)(intptr_t)flags); 2186 } else { 2187 vmntvnodescan(mp, vmsc_flags, 2188 vfs_msync_scan1, vfs_msync_scan2, 2189 (void *)(intptr_t)flags); 2190 } 2191 } 2192 2193 /* 2194 * scan1 is a fast pre-check. There could be hundreds of thousands of 2195 * vnodes, we cannot afford to do anything heavy weight until we have a 2196 * fairly good indication that there is work to do. 2197 */ 2198 static 2199 int 2200 vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data) 2201 { 2202 int flags = (int)(intptr_t)data; 2203 2204 if ((vp->v_flag & VRECLAIMED) == 0) { 2205 if (vp->v_auxrefs == 0 && VREFCNT(vp) <= 0 && 2206 vp->v_object) { 2207 return(0); /* call scan2 */ 2208 } 2209 if ((mp->mnt_flag & MNT_RDONLY) == 0 && 2210 (vp->v_flag & VOBJDIRTY) && 2211 (flags == MNT_WAIT || vn_islocked(vp) == 0)) { 2212 return(0); /* call scan2 */ 2213 } 2214 } 2215 2216 /* 2217 * do not call scan2, continue the loop 2218 */ 2219 return(-1); 2220 } 2221 2222 /* 2223 * This callback is handed a locked vnode. 2224 */ 2225 static 2226 int 2227 vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data) 2228 { 2229 vm_object_t obj; 2230 int flags = (int)(intptr_t)data; 2231 2232 if (vp->v_flag & VRECLAIMED) 2233 return(0); 2234 2235 if ((mp->mnt_flag & MNT_RDONLY) == 0 && (vp->v_flag & VOBJDIRTY)) { 2236 if ((obj = vp->v_object) != NULL) { 2237 vm_object_page_clean(obj, 0, 0, 2238 flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC); 2239 } 2240 } 2241 return(0); 2242 } 2243 2244 /* 2245 * Wake up anyone interested in vp because it is being revoked. 2246 */ 2247 void 2248 vn_gone(struct vnode *vp) 2249 { 2250 lwkt_gettoken(&vp->v_token); 2251 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, NOTE_REVOKE); 2252 lwkt_reltoken(&vp->v_token); 2253 } 2254 2255 /* 2256 * extract the cdev_t from a VBLK or VCHR. The vnode must have been opened 2257 * (or v_rdev might be NULL). 2258 */ 2259 cdev_t 2260 vn_todev(struct vnode *vp) 2261 { 2262 if (vp->v_type != VBLK && vp->v_type != VCHR) 2263 return (NULL); 2264 KKASSERT(vp->v_rdev != NULL); 2265 return (vp->v_rdev); 2266 } 2267 2268 /* 2269 * Check if vnode represents a disk device. The vnode does not need to be 2270 * opened. 2271 * 2272 * MPALMOSTSAFE 2273 */ 2274 int 2275 vn_isdisk(struct vnode *vp, int *errp) 2276 { 2277 cdev_t dev; 2278 2279 if (vp->v_type != VCHR) { 2280 if (errp != NULL) 2281 *errp = ENOTBLK; 2282 return (0); 2283 } 2284 2285 dev = vp->v_rdev; 2286 2287 if (dev == NULL) { 2288 if (errp != NULL) 2289 *errp = ENXIO; 2290 return (0); 2291 } 2292 if (dev_is_good(dev) == 0) { 2293 if (errp != NULL) 2294 *errp = ENXIO; 2295 return (0); 2296 } 2297 if ((dev_dflags(dev) & D_DISK) == 0) { 2298 if (errp != NULL) 2299 *errp = ENOTBLK; 2300 return (0); 2301 } 2302 if (errp != NULL) 2303 *errp = 0; 2304 return (1); 2305 } 2306 2307 int 2308 vn_get_namelen(struct vnode *vp, int *namelen) 2309 { 2310 int error; 2311 register_t retval[2]; 2312 2313 error = VOP_PATHCONF(vp, _PC_NAME_MAX, retval); 2314 if (error) 2315 return (error); 2316 *namelen = (int)retval[0]; 2317 return (0); 2318 } 2319 2320 int 2321 vop_write_dirent(int *error, struct uio *uio, ino_t d_ino, uint8_t d_type, 2322 uint16_t d_namlen, const char *d_name) 2323 { 2324 struct dirent *dp; 2325 size_t len; 2326 2327 len = _DIRENT_RECLEN(d_namlen); 2328 if (len > uio->uio_resid) 2329 return(1); 2330 2331 dp = kmalloc(len, M_TEMP, M_WAITOK | M_ZERO); 2332 2333 dp->d_ino = d_ino; 2334 dp->d_namlen = d_namlen; 2335 dp->d_type = d_type; 2336 bcopy(d_name, dp->d_name, d_namlen); 2337 2338 *error = uiomove((caddr_t)dp, len, uio); 2339 2340 kfree(dp, M_TEMP); 2341 2342 return(0); 2343 } 2344 2345 void 2346 vn_mark_atime(struct vnode *vp, struct thread *td) 2347 { 2348 struct proc *p = td->td_proc; 2349 struct ucred *cred = p ? p->p_ucred : proc0.p_ucred; 2350 2351 if ((vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) { 2352 VOP_MARKATIME(vp, cred); 2353 } 2354 } 2355