1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 35 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $ 36 */ 37 38 /* 39 * External virtual filesystem routines 40 */ 41 #include "opt_ddb.h" 42 #include "opt_inet.h" 43 #include "opt_inet6.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/buf.h> 48 #include <sys/conf.h> 49 #include <sys/dirent.h> 50 #include <sys/eventhandler.h> 51 #include <sys/fcntl.h> 52 #include <sys/file.h> 53 #include <sys/kernel.h> 54 #include <sys/kthread.h> 55 #include <sys/malloc.h> 56 #include <sys/mbuf.h> 57 #include <sys/mount.h> 58 #include <sys/priv.h> 59 #include <sys/proc.h> 60 #include <sys/reboot.h> 61 #include <sys/socket.h> 62 #include <sys/stat.h> 63 #include <sys/sysctl.h> 64 #include <sys/syslog.h> 65 #include <sys/unistd.h> 66 #include <sys/vmmeter.h> 67 #include <sys/vnode.h> 68 69 #include <machine/limits.h> 70 71 #include <vm/vm.h> 72 #include <vm/vm_object.h> 73 #include <vm/vm_extern.h> 74 #include <vm/vm_kern.h> 75 #include <vm/pmap.h> 76 #include <vm/vm_map.h> 77 #include <vm/vm_page.h> 78 #include <vm/vm_pager.h> 79 #include <vm/vnode_pager.h> 80 #include <vm/vm_zone.h> 81 82 #include <sys/buf2.h> 83 #include <sys/thread2.h> 84 #include <sys/sysref2.h> 85 #include <sys/mplock2.h> 86 87 #include <netinet/in.h> 88 89 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 90 91 int numvnodes; 92 SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 93 "Number of vnodes allocated"); 94 int verbose_reclaims; 95 SYSCTL_INT(_debug, OID_AUTO, verbose_reclaims, CTLFLAG_RD, &verbose_reclaims, 0, 96 "Output filename of reclaimed vnode(s)"); 97 98 enum vtype iftovt_tab[16] = { 99 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 100 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 101 }; 102 int vttoif_tab[9] = { 103 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 104 S_IFSOCK, S_IFIFO, S_IFMT, 105 }; 106 107 static int reassignbufcalls; 108 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 109 0, "Number of times buffers have been reassigned to the proper list"); 110 111 static int check_buf_overlap = 2; /* invasive check */ 112 SYSCTL_INT(_vfs, OID_AUTO, check_buf_overlap, CTLFLAG_RW, &check_buf_overlap, 113 0, "Enable overlapping buffer checks"); 114 115 int nfs_mount_type = -1; 116 static struct lwkt_token spechash_token; 117 struct nfs_public nfs_pub; /* publicly exported FS */ 118 119 int desiredvnodes; 120 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 121 &desiredvnodes, 0, "Maximum number of vnodes"); 122 123 static struct radix_node_head *vfs_create_addrlist_af ( 124 struct radix_node_head **prnh, int off, 125 struct radix_node_head *maskhead); 126 static void vfs_free_addrlist (struct netexport *nep); 127 static int vfs_free_netcred (struct radix_node *rn, void *w); 128 static void vfs_free_addrlist_af (struct radix_node_head **prnh); 129 static int vfs_hang_addrlist (struct mount *mp, struct netexport *nep, 130 const struct export_args *argp); 131 132 int prtactive = 0; /* 1 => print out reclaim of active vnodes */ 133 134 /* 135 * Red black tree functions 136 */ 137 static int rb_buf_compare(struct buf *b1, struct buf *b2); 138 RB_GENERATE2(buf_rb_tree, buf, b_rbnode, rb_buf_compare, off_t, b_loffset); 139 RB_GENERATE2(buf_rb_hash, buf, b_rbhash, rb_buf_compare, off_t, b_loffset); 140 141 static int 142 rb_buf_compare(struct buf *b1, struct buf *b2) 143 { 144 if (b1->b_loffset < b2->b_loffset) 145 return(-1); 146 if (b1->b_loffset > b2->b_loffset) 147 return(1); 148 return(0); 149 } 150 151 /* 152 * Initialize the vnode management data structures. 153 * 154 * Called from vfsinit() 155 */ 156 void 157 vfs_subr_init(void) 158 { 159 int factor1; 160 int factor2; 161 162 /* 163 * Desiredvnodes is kern.maxvnodes. We want to scale it 164 * according to available system memory but we may also have 165 * to limit it based on available KVM, which is capped on 32 bit 166 * systems, to ~80K vnodes or so. 167 * 168 * WARNING! For machines with 64-256M of ram we have to be sure 169 * that the default limit scales down well due to HAMMER 170 * taking up significantly more memory per-vnode vs UFS. 171 * We want around ~5800 on a 128M machine. 172 */ 173 factor1 = 20 * (sizeof(struct vm_object) + sizeof(struct vnode)); 174 factor2 = 25 * (sizeof(struct vm_object) + sizeof(struct vnode)); 175 desiredvnodes = 176 imin((int64_t)vmstats.v_page_count * PAGE_SIZE / factor1, 177 KvaSize / factor2); 178 desiredvnodes = imax(desiredvnodes, maxproc * 8); 179 180 lwkt_token_init(&spechash_token, "spechash"); 181 } 182 183 /* 184 * Knob to control the precision of file timestamps: 185 * 186 * 0 = seconds only; nanoseconds zeroed. 187 * 1 = seconds and nanoseconds, accurate within 1/HZ. 188 * 2 = seconds and nanoseconds, truncated to microseconds. 189 * >=3 = seconds and nanoseconds, maximum precision. 190 */ 191 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 192 193 static int timestamp_precision = TSP_SEC; 194 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 195 ×tamp_precision, 0, "Precision of file timestamps"); 196 197 /* 198 * Get a current timestamp. 199 * 200 * MPSAFE 201 */ 202 void 203 vfs_timestamp(struct timespec *tsp) 204 { 205 struct timeval tv; 206 207 switch (timestamp_precision) { 208 case TSP_SEC: 209 tsp->tv_sec = time_second; 210 tsp->tv_nsec = 0; 211 break; 212 case TSP_HZ: 213 getnanotime(tsp); 214 break; 215 case TSP_USEC: 216 microtime(&tv); 217 TIMEVAL_TO_TIMESPEC(&tv, tsp); 218 break; 219 case TSP_NSEC: 220 default: 221 nanotime(tsp); 222 break; 223 } 224 } 225 226 /* 227 * Set vnode attributes to VNOVAL 228 */ 229 void 230 vattr_null(struct vattr *vap) 231 { 232 vap->va_type = VNON; 233 vap->va_size = VNOVAL; 234 vap->va_bytes = VNOVAL; 235 vap->va_mode = VNOVAL; 236 vap->va_nlink = VNOVAL; 237 vap->va_uid = VNOVAL; 238 vap->va_gid = VNOVAL; 239 vap->va_fsid = VNOVAL; 240 vap->va_fileid = VNOVAL; 241 vap->va_blocksize = VNOVAL; 242 vap->va_rmajor = VNOVAL; 243 vap->va_rminor = VNOVAL; 244 vap->va_atime.tv_sec = VNOVAL; 245 vap->va_atime.tv_nsec = VNOVAL; 246 vap->va_mtime.tv_sec = VNOVAL; 247 vap->va_mtime.tv_nsec = VNOVAL; 248 vap->va_ctime.tv_sec = VNOVAL; 249 vap->va_ctime.tv_nsec = VNOVAL; 250 vap->va_flags = VNOVAL; 251 vap->va_gen = VNOVAL; 252 vap->va_vaflags = 0; 253 /* va_*_uuid fields are only valid if related flags are set */ 254 } 255 256 /* 257 * Flush out and invalidate all buffers associated with a vnode. 258 * 259 * vp must be locked. 260 */ 261 static int vinvalbuf_bp(struct buf *bp, void *data); 262 263 struct vinvalbuf_bp_info { 264 struct vnode *vp; 265 int slptimeo; 266 int lkflags; 267 int flags; 268 int clean; 269 }; 270 271 int 272 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 273 { 274 struct vinvalbuf_bp_info info; 275 vm_object_t object; 276 int error; 277 278 lwkt_gettoken(&vp->v_token); 279 280 /* 281 * If we are being asked to save, call fsync to ensure that the inode 282 * is updated. 283 */ 284 if (flags & V_SAVE) { 285 error = bio_track_wait(&vp->v_track_write, slpflag, slptimeo); 286 if (error) 287 goto done; 288 if (!RB_EMPTY(&vp->v_rbdirty_tree)) { 289 if ((error = VOP_FSYNC(vp, MNT_WAIT, 0)) != 0) 290 goto done; 291 #if 0 292 /* 293 * Dirty bufs may be left or generated via races 294 * in circumstances where vinvalbuf() is called on 295 * a vnode not undergoing reclamation. Only 296 * panic if we are trying to reclaim the vnode. 297 */ 298 if ((vp->v_flag & VRECLAIMED) && 299 (bio_track_active(&vp->v_track_write) || 300 !RB_EMPTY(&vp->v_rbdirty_tree))) { 301 panic("vinvalbuf: dirty bufs"); 302 } 303 #endif 304 } 305 } 306 info.slptimeo = slptimeo; 307 info.lkflags = LK_EXCLUSIVE | LK_SLEEPFAIL; 308 if (slpflag & PCATCH) 309 info.lkflags |= LK_PCATCH; 310 info.flags = flags; 311 info.vp = vp; 312 313 /* 314 * Flush the buffer cache until nothing is left, wait for all I/O 315 * to complete. At least one pass is required. We might block 316 * in the pip code so we have to re-check. Order is important. 317 */ 318 do { 319 /* 320 * Flush buffer cache 321 */ 322 if (!RB_EMPTY(&vp->v_rbclean_tree)) { 323 info.clean = 1; 324 error = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 325 NULL, vinvalbuf_bp, &info); 326 } 327 if (!RB_EMPTY(&vp->v_rbdirty_tree)) { 328 info.clean = 0; 329 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 330 NULL, vinvalbuf_bp, &info); 331 } 332 333 /* 334 * Wait for I/O completion. 335 */ 336 bio_track_wait(&vp->v_track_write, 0, 0); 337 if ((object = vp->v_object) != NULL) 338 refcount_wait(&object->paging_in_progress, "vnvlbx"); 339 } while (bio_track_active(&vp->v_track_write) || 340 !RB_EMPTY(&vp->v_rbclean_tree) || 341 !RB_EMPTY(&vp->v_rbdirty_tree)); 342 343 /* 344 * Destroy the copy in the VM cache, too. 345 */ 346 if ((object = vp->v_object) != NULL) { 347 vm_object_page_remove(object, 0, 0, 348 (flags & V_SAVE) ? TRUE : FALSE); 349 } 350 351 if (!RB_EMPTY(&vp->v_rbdirty_tree) || !RB_EMPTY(&vp->v_rbclean_tree)) 352 panic("vinvalbuf: flush failed"); 353 if (!RB_EMPTY(&vp->v_rbhash_tree)) 354 panic("vinvalbuf: flush failed, buffers still present"); 355 error = 0; 356 done: 357 lwkt_reltoken(&vp->v_token); 358 return (error); 359 } 360 361 static int 362 vinvalbuf_bp(struct buf *bp, void *data) 363 { 364 struct vinvalbuf_bp_info *info = data; 365 int error; 366 367 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 368 atomic_add_int(&bp->b_refs, 1); 369 error = BUF_TIMELOCK(bp, info->lkflags, 370 "vinvalbuf", info->slptimeo); 371 atomic_subtract_int(&bp->b_refs, 1); 372 if (error == 0) { 373 BUF_UNLOCK(bp); 374 error = ENOLCK; 375 } 376 if (error == ENOLCK) 377 return(0); 378 return (-error); 379 } 380 KKASSERT(bp->b_vp == info->vp); 381 382 /* 383 * Must check clean/dirty status after successfully locking as 384 * it may race. 385 */ 386 if ((info->clean && (bp->b_flags & B_DELWRI)) || 387 (info->clean == 0 && (bp->b_flags & B_DELWRI) == 0)) { 388 BUF_UNLOCK(bp); 389 return(0); 390 } 391 392 /* 393 * NOTE: NO B_LOCKED CHECK. Also no buf_checkwrite() 394 * check. This code will write out the buffer, period. 395 */ 396 bremfree(bp); 397 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 398 (info->flags & V_SAVE)) { 399 cluster_awrite(bp); 400 } else if (info->flags & V_SAVE) { 401 /* 402 * Cannot set B_NOCACHE on a clean buffer as this will 403 * destroy the VM backing store which might actually 404 * be dirty (and unsynchronized). 405 */ 406 bp->b_flags |= (B_INVAL | B_RELBUF); 407 brelse(bp); 408 } else { 409 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF); 410 brelse(bp); 411 } 412 return(0); 413 } 414 415 /* 416 * Truncate a file's buffer and pages to a specified length. This 417 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 418 * sync activity. 419 * 420 * The vnode must be locked. 421 */ 422 static int vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data); 423 static int vtruncbuf_bp_trunc(struct buf *bp, void *data); 424 static int vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data); 425 static int vtruncbuf_bp_metasync(struct buf *bp, void *data); 426 427 struct vtruncbuf_info { 428 struct vnode *vp; 429 off_t truncloffset; 430 int clean; 431 }; 432 433 int 434 vtruncbuf(struct vnode *vp, off_t length, int blksize) 435 { 436 struct vtruncbuf_info info; 437 const char *filename; 438 int count; 439 440 /* 441 * Round up to the *next* block, then destroy the buffers in question. 442 * Since we are only removing some of the buffers we must rely on the 443 * scan count to determine whether a loop is necessary. 444 */ 445 if ((count = (int)(length % blksize)) != 0) 446 info.truncloffset = length + (blksize - count); 447 else 448 info.truncloffset = length; 449 info.vp = vp; 450 451 lwkt_gettoken(&vp->v_token); 452 do { 453 info.clean = 1; 454 count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 455 vtruncbuf_bp_trunc_cmp, 456 vtruncbuf_bp_trunc, &info); 457 info.clean = 0; 458 count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 459 vtruncbuf_bp_trunc_cmp, 460 vtruncbuf_bp_trunc, &info); 461 } while(count); 462 463 /* 464 * For safety, fsync any remaining metadata if the file is not being 465 * truncated to 0. Since the metadata does not represent the entire 466 * dirty list we have to rely on the hit count to ensure that we get 467 * all of it. 468 */ 469 if (length > 0) { 470 do { 471 count = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 472 vtruncbuf_bp_metasync_cmp, 473 vtruncbuf_bp_metasync, &info); 474 } while (count); 475 } 476 477 /* 478 * Clean out any left over VM backing store. 479 * 480 * It is possible to have in-progress I/O from buffers that were 481 * not part of the truncation. This should not happen if we 482 * are truncating to 0-length. 483 */ 484 vnode_pager_setsize(vp, length); 485 bio_track_wait(&vp->v_track_write, 0, 0); 486 487 /* 488 * Debugging only 489 */ 490 spin_lock(&vp->v_spin); 491 filename = TAILQ_FIRST(&vp->v_namecache) ? 492 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"; 493 spin_unlock(&vp->v_spin); 494 495 /* 496 * Make sure no buffers were instantiated while we were trying 497 * to clean out the remaining VM pages. This could occur due 498 * to busy dirty VM pages being flushed out to disk. 499 */ 500 do { 501 info.clean = 1; 502 count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 503 vtruncbuf_bp_trunc_cmp, 504 vtruncbuf_bp_trunc, &info); 505 info.clean = 0; 506 count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 507 vtruncbuf_bp_trunc_cmp, 508 vtruncbuf_bp_trunc, &info); 509 if (count) { 510 kprintf("Warning: vtruncbuf(): Had to re-clean %d " 511 "left over buffers in %s\n", count, filename); 512 } 513 } while(count); 514 515 lwkt_reltoken(&vp->v_token); 516 517 return (0); 518 } 519 520 /* 521 * The callback buffer is beyond the new file EOF and must be destroyed. 522 * Note that the compare function must conform to the RB_SCAN's requirements. 523 */ 524 static 525 int 526 vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data) 527 { 528 struct vtruncbuf_info *info = data; 529 530 if (bp->b_loffset >= info->truncloffset) 531 return(0); 532 return(-1); 533 } 534 535 static 536 int 537 vtruncbuf_bp_trunc(struct buf *bp, void *data) 538 { 539 struct vtruncbuf_info *info = data; 540 541 /* 542 * Do not try to use a buffer we cannot immediately lock, but sleep 543 * anyway to prevent a livelock. The code will loop until all buffers 544 * can be acted upon. 545 * 546 * We must always revalidate the buffer after locking it to deal 547 * with MP races. 548 */ 549 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 550 atomic_add_int(&bp->b_refs, 1); 551 if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0) 552 BUF_UNLOCK(bp); 553 atomic_subtract_int(&bp->b_refs, 1); 554 } else if ((info->clean && (bp->b_flags & B_DELWRI)) || 555 (info->clean == 0 && (bp->b_flags & B_DELWRI) == 0) || 556 bp->b_vp != info->vp || 557 vtruncbuf_bp_trunc_cmp(bp, data)) { 558 BUF_UNLOCK(bp); 559 } else { 560 bremfree(bp); 561 bp->b_flags |= (B_INVAL | B_RELBUF | B_NOCACHE); 562 brelse(bp); 563 } 564 return(1); 565 } 566 567 /* 568 * Fsync all meta-data after truncating a file to be non-zero. Only metadata 569 * blocks (with a negative loffset) are scanned. 570 * Note that the compare function must conform to the RB_SCAN's requirements. 571 */ 572 static int 573 vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data __unused) 574 { 575 if (bp->b_loffset < 0) 576 return(0); 577 return(1); 578 } 579 580 static int 581 vtruncbuf_bp_metasync(struct buf *bp, void *data) 582 { 583 struct vtruncbuf_info *info = data; 584 585 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 586 atomic_add_int(&bp->b_refs, 1); 587 if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0) 588 BUF_UNLOCK(bp); 589 atomic_subtract_int(&bp->b_refs, 1); 590 } else if ((bp->b_flags & B_DELWRI) == 0 || 591 bp->b_vp != info->vp || 592 vtruncbuf_bp_metasync_cmp(bp, data)) { 593 BUF_UNLOCK(bp); 594 } else { 595 bremfree(bp); 596 if (bp->b_vp == info->vp) 597 bawrite(bp); 598 else 599 bwrite(bp); 600 } 601 return(1); 602 } 603 604 /* 605 * vfsync - implements a multipass fsync on a file which understands 606 * dependancies and meta-data. The passed vnode must be locked. The 607 * waitfor argument may be MNT_WAIT or MNT_NOWAIT, or MNT_LAZY. 608 * 609 * When fsyncing data asynchronously just do one consolidated pass starting 610 * with the most negative block number. This may not get all the data due 611 * to dependancies. 612 * 613 * When fsyncing data synchronously do a data pass, then a metadata pass, 614 * then do additional data+metadata passes to try to get all the data out. 615 * 616 * Caller must ref the vnode but does not have to lock it. 617 */ 618 static int vfsync_wait_output(struct vnode *vp, 619 int (*waitoutput)(struct vnode *, struct thread *)); 620 static int vfsync_dummy_cmp(struct buf *bp __unused, void *data __unused); 621 static int vfsync_data_only_cmp(struct buf *bp, void *data); 622 static int vfsync_meta_only_cmp(struct buf *bp, void *data); 623 static int vfsync_lazy_range_cmp(struct buf *bp, void *data); 624 static int vfsync_bp(struct buf *bp, void *data); 625 626 struct vfsync_info { 627 struct vnode *vp; 628 int synchronous; 629 int syncdeps; 630 int lazycount; 631 int lazylimit; 632 int skippedbufs; 633 int (*checkdef)(struct buf *); 634 int (*cmpfunc)(struct buf *, void *); 635 }; 636 637 int 638 vfsync(struct vnode *vp, int waitfor, int passes, 639 int (*checkdef)(struct buf *), 640 int (*waitoutput)(struct vnode *, struct thread *)) 641 { 642 struct vfsync_info info; 643 int error; 644 645 bzero(&info, sizeof(info)); 646 info.vp = vp; 647 if ((info.checkdef = checkdef) == NULL) 648 info.syncdeps = 1; 649 650 lwkt_gettoken(&vp->v_token); 651 652 switch(waitfor) { 653 case MNT_LAZY | MNT_NOWAIT: 654 case MNT_LAZY: 655 /* 656 * Lazy (filesystem syncer typ) Asynchronous plus limit the 657 * number of data (not meta) pages we try to flush to 1MB. 658 * A non-zero return means that lazy limit was reached. 659 */ 660 info.lazylimit = 1024 * 1024; 661 info.syncdeps = 1; 662 info.cmpfunc = vfsync_lazy_range_cmp; 663 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 664 vfsync_lazy_range_cmp, vfsync_bp, &info); 665 info.cmpfunc = vfsync_meta_only_cmp; 666 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 667 vfsync_meta_only_cmp, vfsync_bp, &info); 668 if (error == 0) 669 vp->v_lazyw = 0; 670 else if (!RB_EMPTY(&vp->v_rbdirty_tree)) 671 vn_syncer_add(vp, 1); 672 error = 0; 673 break; 674 case MNT_NOWAIT: 675 /* 676 * Asynchronous. Do a data-only pass and a meta-only pass. 677 */ 678 info.syncdeps = 1; 679 info.cmpfunc = vfsync_data_only_cmp; 680 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp, 681 vfsync_bp, &info); 682 info.cmpfunc = vfsync_meta_only_cmp; 683 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_meta_only_cmp, 684 vfsync_bp, &info); 685 error = 0; 686 break; 687 default: 688 /* 689 * Synchronous. Do a data-only pass, then a meta-data+data 690 * pass, then additional integrated passes to try to get 691 * all the dependancies flushed. 692 */ 693 info.cmpfunc = vfsync_data_only_cmp; 694 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp, 695 vfsync_bp, &info); 696 error = vfsync_wait_output(vp, waitoutput); 697 if (error == 0) { 698 info.skippedbufs = 0; 699 info.cmpfunc = vfsync_dummy_cmp; 700 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 701 vfsync_bp, &info); 702 error = vfsync_wait_output(vp, waitoutput); 703 if (info.skippedbufs) { 704 kprintf("Warning: vfsync skipped %d dirty " 705 "bufs in pass2!\n", info.skippedbufs); 706 } 707 } 708 while (error == 0 && passes > 0 && 709 !RB_EMPTY(&vp->v_rbdirty_tree) 710 ) { 711 if (--passes == 0) { 712 info.synchronous = 1; 713 info.syncdeps = 1; 714 } 715 info.cmpfunc = vfsync_dummy_cmp; 716 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 717 vfsync_bp, &info); 718 if (error < 0) 719 error = -error; 720 info.syncdeps = 1; 721 if (error == 0) 722 error = vfsync_wait_output(vp, waitoutput); 723 } 724 break; 725 } 726 lwkt_reltoken(&vp->v_token); 727 return(error); 728 } 729 730 static int 731 vfsync_wait_output(struct vnode *vp, 732 int (*waitoutput)(struct vnode *, struct thread *)) 733 { 734 int error; 735 736 error = bio_track_wait(&vp->v_track_write, 0, 0); 737 if (waitoutput) 738 error = waitoutput(vp, curthread); 739 return(error); 740 } 741 742 static int 743 vfsync_dummy_cmp(struct buf *bp __unused, void *data __unused) 744 { 745 return(0); 746 } 747 748 static int 749 vfsync_data_only_cmp(struct buf *bp, void *data) 750 { 751 if (bp->b_loffset < 0) 752 return(-1); 753 return(0); 754 } 755 756 static int 757 vfsync_meta_only_cmp(struct buf *bp, void *data) 758 { 759 if (bp->b_loffset < 0) 760 return(0); 761 return(1); 762 } 763 764 static int 765 vfsync_lazy_range_cmp(struct buf *bp, void *data) 766 { 767 struct vfsync_info *info = data; 768 769 if (bp->b_loffset < info->vp->v_lazyw) 770 return(-1); 771 return(0); 772 } 773 774 static int 775 vfsync_bp(struct buf *bp, void *data) 776 { 777 struct vfsync_info *info = data; 778 struct vnode *vp = info->vp; 779 int error; 780 781 /* 782 * Ignore buffers that we cannot immediately lock. 783 */ 784 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 785 ++info->skippedbufs; 786 return(0); 787 } 788 789 /* 790 * We must revalidate the buffer after locking. 791 */ 792 if ((bp->b_flags & B_DELWRI) == 0 || 793 bp->b_vp != info->vp || 794 info->cmpfunc(bp, data)) { 795 BUF_UNLOCK(bp); 796 return(0); 797 } 798 799 /* 800 * If syncdeps is not set we do not try to write buffers which have 801 * dependancies. 802 */ 803 if (!info->synchronous && info->syncdeps == 0 && info->checkdef(bp)) { 804 BUF_UNLOCK(bp); 805 return(0); 806 } 807 808 /* 809 * B_NEEDCOMMIT (primarily used by NFS) is a state where the buffer 810 * has been written but an additional handshake with the device 811 * is required before we can dispose of the buffer. We have no idea 812 * how to do this so we have to skip these buffers. 813 */ 814 if (bp->b_flags & B_NEEDCOMMIT) { 815 BUF_UNLOCK(bp); 816 return(0); 817 } 818 819 /* 820 * Ask bioops if it is ok to sync. If not the VFS may have 821 * set B_LOCKED so we have to cycle the buffer. 822 */ 823 if (LIST_FIRST(&bp->b_dep) != NULL && buf_checkwrite(bp)) { 824 bremfree(bp); 825 brelse(bp); 826 return(0); 827 } 828 829 if (info->synchronous) { 830 /* 831 * Synchronous flushing. An error may be returned. 832 */ 833 bremfree(bp); 834 error = bwrite(bp); 835 } else { 836 /* 837 * Asynchronous flushing. A negative return value simply 838 * stops the scan and is not considered an error. We use 839 * this to support limited MNT_LAZY flushes. 840 */ 841 vp->v_lazyw = bp->b_loffset; 842 bremfree(bp); 843 info->lazycount += cluster_awrite(bp); 844 waitrunningbufspace(); 845 vm_wait_nominal(); 846 if (info->lazylimit && info->lazycount >= info->lazylimit) 847 error = 1; 848 else 849 error = 0; 850 } 851 return(-error); 852 } 853 854 /* 855 * Associate a buffer with a vnode. 856 * 857 * MPSAFE 858 */ 859 int 860 bgetvp(struct vnode *vp, struct buf *bp, int testsize) 861 { 862 KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); 863 KKASSERT((bp->b_flags & (B_HASHED|B_DELWRI|B_VNCLEAN|B_VNDIRTY)) == 0); 864 865 /* 866 * Insert onto list for new vnode. 867 */ 868 lwkt_gettoken(&vp->v_token); 869 870 if (buf_rb_hash_RB_INSERT(&vp->v_rbhash_tree, bp)) { 871 lwkt_reltoken(&vp->v_token); 872 return (EEXIST); 873 } 874 875 /* 876 * Diagnostics (mainly for HAMMER debugging). Check for 877 * overlapping buffers. 878 */ 879 if (check_buf_overlap) { 880 struct buf *bx; 881 bx = buf_rb_hash_RB_PREV(bp); 882 if (bx) { 883 if (bx->b_loffset + bx->b_bufsize > bp->b_loffset) { 884 kprintf("bgetvp: overlapl %016jx/%d %016jx " 885 "bx %p bp %p\n", 886 (intmax_t)bx->b_loffset, 887 bx->b_bufsize, 888 (intmax_t)bp->b_loffset, 889 bx, bp); 890 if (check_buf_overlap > 1) 891 panic("bgetvp - overlapping buffer"); 892 } 893 } 894 bx = buf_rb_hash_RB_NEXT(bp); 895 if (bx) { 896 if (bp->b_loffset + testsize > bx->b_loffset) { 897 kprintf("bgetvp: overlapr %016jx/%d %016jx " 898 "bp %p bx %p\n", 899 (intmax_t)bp->b_loffset, 900 testsize, 901 (intmax_t)bx->b_loffset, 902 bp, bx); 903 if (check_buf_overlap > 1) 904 panic("bgetvp - overlapping buffer"); 905 } 906 } 907 } 908 bp->b_vp = vp; 909 bp->b_flags |= B_HASHED; 910 bp->b_flags |= B_VNCLEAN; 911 if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) 912 panic("reassignbuf: dup lblk/clean vp %p bp %p", vp, bp); 913 /*vhold(vp);*/ 914 lwkt_reltoken(&vp->v_token); 915 return(0); 916 } 917 918 /* 919 * Disassociate a buffer from a vnode. 920 * 921 * MPSAFE 922 */ 923 void 924 brelvp(struct buf *bp) 925 { 926 struct vnode *vp; 927 928 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 929 930 /* 931 * Delete from old vnode list, if on one. 932 */ 933 vp = bp->b_vp; 934 lwkt_gettoken(&vp->v_token); 935 if (bp->b_flags & (B_VNDIRTY | B_VNCLEAN)) { 936 if (bp->b_flags & B_VNDIRTY) 937 buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp); 938 else 939 buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp); 940 bp->b_flags &= ~(B_VNDIRTY | B_VNCLEAN); 941 } 942 if (bp->b_flags & B_HASHED) { 943 buf_rb_hash_RB_REMOVE(&vp->v_rbhash_tree, bp); 944 bp->b_flags &= ~B_HASHED; 945 } 946 947 /* 948 * Only remove from synclist when no dirty buffers are left AND 949 * the VFS has not flagged the vnode's inode as being dirty. 950 */ 951 if ((vp->v_flag & (VONWORKLST | VISDIRTY | VOBJDIRTY)) == VONWORKLST && 952 RB_EMPTY(&vp->v_rbdirty_tree)) { 953 vn_syncer_remove(vp); 954 } 955 bp->b_vp = NULL; 956 957 lwkt_reltoken(&vp->v_token); 958 959 /*vdrop(vp);*/ 960 } 961 962 /* 963 * Reassign the buffer to the proper clean/dirty list based on B_DELWRI. 964 * This routine is called when the state of the B_DELWRI bit is changed. 965 * 966 * Must be called with vp->v_token held. 967 * MPSAFE 968 */ 969 void 970 reassignbuf(struct buf *bp) 971 { 972 struct vnode *vp = bp->b_vp; 973 int delay; 974 975 ASSERT_LWKT_TOKEN_HELD(&vp->v_token); 976 ++reassignbufcalls; 977 978 /* 979 * B_PAGING flagged buffers cannot be reassigned because their vp 980 * is not fully linked in. 981 */ 982 if (bp->b_flags & B_PAGING) 983 panic("cannot reassign paging buffer"); 984 985 if (bp->b_flags & B_DELWRI) { 986 /* 987 * Move to the dirty list, add the vnode to the worklist 988 */ 989 if (bp->b_flags & B_VNCLEAN) { 990 buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp); 991 bp->b_flags &= ~B_VNCLEAN; 992 } 993 if ((bp->b_flags & B_VNDIRTY) == 0) { 994 if (buf_rb_tree_RB_INSERT(&vp->v_rbdirty_tree, bp)) { 995 panic("reassignbuf: dup lblk vp %p bp %p", 996 vp, bp); 997 } 998 bp->b_flags |= B_VNDIRTY; 999 } 1000 if ((vp->v_flag & VONWORKLST) == 0) { 1001 switch (vp->v_type) { 1002 case VDIR: 1003 delay = dirdelay; 1004 break; 1005 case VCHR: 1006 case VBLK: 1007 if (vp->v_rdev && 1008 vp->v_rdev->si_mountpoint != NULL) { 1009 delay = metadelay; 1010 break; 1011 } 1012 /* fall through */ 1013 default: 1014 delay = filedelay; 1015 } 1016 vn_syncer_add(vp, delay); 1017 } 1018 } else { 1019 /* 1020 * Move to the clean list, remove the vnode from the worklist 1021 * if no dirty blocks remain. 1022 */ 1023 if (bp->b_flags & B_VNDIRTY) { 1024 buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp); 1025 bp->b_flags &= ~B_VNDIRTY; 1026 } 1027 if ((bp->b_flags & B_VNCLEAN) == 0) { 1028 if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) { 1029 panic("reassignbuf: dup lblk vp %p bp %p", 1030 vp, bp); 1031 } 1032 bp->b_flags |= B_VNCLEAN; 1033 } 1034 1035 /* 1036 * Only remove from synclist when no dirty buffers are left 1037 * AND the VFS has not flagged the vnode's inode as being 1038 * dirty. 1039 */ 1040 if ((vp->v_flag & (VONWORKLST | VISDIRTY | VOBJDIRTY)) == 1041 VONWORKLST && 1042 RB_EMPTY(&vp->v_rbdirty_tree)) { 1043 vn_syncer_remove(vp); 1044 } 1045 } 1046 } 1047 1048 /* 1049 * Create a vnode for a block device. Used for mounting the root file 1050 * system. 1051 * 1052 * A vref()'d vnode is returned. 1053 */ 1054 extern struct vop_ops *devfs_vnode_dev_vops_p; 1055 int 1056 bdevvp(cdev_t dev, struct vnode **vpp) 1057 { 1058 struct vnode *vp; 1059 struct vnode *nvp; 1060 int error; 1061 1062 if (dev == NULL) { 1063 *vpp = NULLVP; 1064 return (ENXIO); 1065 } 1066 error = getspecialvnode(VT_NON, NULL, &devfs_vnode_dev_vops_p, 1067 &nvp, 0, 0); 1068 if (error) { 1069 *vpp = NULLVP; 1070 return (error); 1071 } 1072 vp = nvp; 1073 vp->v_type = VCHR; 1074 #if 0 1075 vp->v_rdev = dev; 1076 #endif 1077 v_associate_rdev(vp, dev); 1078 vp->v_umajor = dev->si_umajor; 1079 vp->v_uminor = dev->si_uminor; 1080 vx_unlock(vp); 1081 *vpp = vp; 1082 return (0); 1083 } 1084 1085 int 1086 v_associate_rdev(struct vnode *vp, cdev_t dev) 1087 { 1088 if (dev == NULL) 1089 return(ENXIO); 1090 if (dev_is_good(dev) == 0) 1091 return(ENXIO); 1092 KKASSERT(vp->v_rdev == NULL); 1093 vp->v_rdev = reference_dev(dev); 1094 lwkt_gettoken(&spechash_token); 1095 SLIST_INSERT_HEAD(&dev->si_hlist, vp, v_cdevnext); 1096 lwkt_reltoken(&spechash_token); 1097 return(0); 1098 } 1099 1100 void 1101 v_release_rdev(struct vnode *vp) 1102 { 1103 cdev_t dev; 1104 1105 if ((dev = vp->v_rdev) != NULL) { 1106 lwkt_gettoken(&spechash_token); 1107 SLIST_REMOVE(&dev->si_hlist, vp, vnode, v_cdevnext); 1108 vp->v_rdev = NULL; 1109 release_dev(dev); 1110 lwkt_reltoken(&spechash_token); 1111 } 1112 } 1113 1114 /* 1115 * Add a vnode to the alias list hung off the cdev_t. We only associate 1116 * the device number with the vnode. The actual device is not associated 1117 * until the vnode is opened (usually in spec_open()), and will be 1118 * disassociated on last close. 1119 */ 1120 void 1121 addaliasu(struct vnode *nvp, int x, int y) 1122 { 1123 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 1124 panic("addaliasu on non-special vnode"); 1125 nvp->v_umajor = x; 1126 nvp->v_uminor = y; 1127 } 1128 1129 /* 1130 * Simple call that a filesystem can make to try to get rid of a 1131 * vnode. It will fail if anyone is referencing the vnode (including 1132 * the caller). 1133 * 1134 * The filesystem can check whether its in-memory inode structure still 1135 * references the vp on return. 1136 * 1137 * May only be called if the vnode is in a known state (i.e. being prevented 1138 * from being deallocated by some other condition such as a vfs inode hold). 1139 */ 1140 void 1141 vclean_unlocked(struct vnode *vp) 1142 { 1143 vx_get(vp); 1144 if (VREFCNT(vp) <= 1) 1145 vgone_vxlocked(vp); 1146 vx_put(vp); 1147 } 1148 1149 /* 1150 * Disassociate a vnode from its underlying filesystem. 1151 * 1152 * The vnode must be VX locked and referenced. In all normal situations 1153 * there are no active references. If vclean_vxlocked() is called while 1154 * there are active references, the vnode is being ripped out and we have 1155 * to call VOP_CLOSE() as appropriate before we can reclaim it. 1156 */ 1157 void 1158 vclean_vxlocked(struct vnode *vp, int flags) 1159 { 1160 int active; 1161 int n; 1162 vm_object_t object; 1163 struct namecache *ncp; 1164 1165 /* 1166 * If the vnode has already been reclaimed we have nothing to do. 1167 */ 1168 if (vp->v_flag & VRECLAIMED) 1169 return; 1170 1171 /* 1172 * Set flag to interlock operation, flag finalization to ensure 1173 * that the vnode winds up on the inactive list, and set v_act to 0. 1174 */ 1175 vsetflags(vp, VRECLAIMED); 1176 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE); 1177 vp->v_act = 0; 1178 1179 if (verbose_reclaims) { 1180 if ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL) 1181 kprintf("Debug: reclaim %p %s\n", vp, ncp->nc_name); 1182 } 1183 1184 /* 1185 * Scrap the vfs cache 1186 */ 1187 while (cache_inval_vp(vp, 0) != 0) { 1188 kprintf("Warning: vnode %p clean/cache_resolution " 1189 "race detected\n", vp); 1190 tsleep(vp, 0, "vclninv", 2); 1191 } 1192 1193 /* 1194 * Check to see if the vnode is in use. If so we have to reference it 1195 * before we clean it out so that its count cannot fall to zero and 1196 * generate a race against ourselves to recycle it. 1197 */ 1198 active = (VREFCNT(vp) > 0); 1199 1200 /* 1201 * Clean out any buffers associated with the vnode and destroy its 1202 * object, if it has one. 1203 */ 1204 vinvalbuf(vp, V_SAVE, 0, 0); 1205 KKASSERT(lockcountnb(&vp->v_lock) == 1); 1206 1207 /* 1208 * If purging an active vnode (typically during a forced unmount 1209 * or reboot), it must be closed and deactivated before being 1210 * reclaimed. This isn't really all that safe, but what can 1211 * we do? XXX. 1212 * 1213 * Note that neither of these routines unlocks the vnode. 1214 */ 1215 if (active && (flags & DOCLOSE)) { 1216 while ((n = vp->v_opencount) != 0) { 1217 if (vp->v_writecount) 1218 VOP_CLOSE(vp, FWRITE|FNONBLOCK, NULL); 1219 else 1220 VOP_CLOSE(vp, FNONBLOCK, NULL); 1221 if (vp->v_opencount == n) { 1222 kprintf("Warning: unable to force-close" 1223 " vnode %p\n", vp); 1224 break; 1225 } 1226 } 1227 } 1228 1229 /* 1230 * If the vnode has not been deactivated, deactivated it. Deactivation 1231 * can create new buffers and VM pages so we have to call vinvalbuf() 1232 * again to make sure they all get flushed. 1233 * 1234 * This can occur if a file with a link count of 0 needs to be 1235 * truncated. 1236 * 1237 * If the vnode is already dead don't try to deactivate it. 1238 */ 1239 if ((vp->v_flag & VINACTIVE) == 0) { 1240 vsetflags(vp, VINACTIVE); 1241 if (vp->v_mount) 1242 VOP_INACTIVE(vp); 1243 vinvalbuf(vp, V_SAVE, 0, 0); 1244 } 1245 KKASSERT(lockcountnb(&vp->v_lock) == 1); 1246 1247 /* 1248 * If the vnode has an object, destroy it. 1249 */ 1250 while ((object = vp->v_object) != NULL) { 1251 vm_object_hold(object); 1252 if (object == vp->v_object) 1253 break; 1254 vm_object_drop(object); 1255 } 1256 1257 if (object != NULL) { 1258 if (object->ref_count == 0) { 1259 if ((object->flags & OBJ_DEAD) == 0) 1260 vm_object_terminate(object); 1261 vm_object_drop(object); 1262 vclrflags(vp, VOBJBUF); 1263 } else { 1264 vm_pager_deallocate(object); 1265 vclrflags(vp, VOBJBUF); 1266 vm_object_drop(object); 1267 } 1268 } 1269 KKASSERT((vp->v_flag & VOBJBUF) == 0); 1270 1271 /* 1272 * Reclaim the vnode if not already dead. 1273 */ 1274 if (vp->v_mount && VOP_RECLAIM(vp)) 1275 panic("vclean: cannot reclaim"); 1276 1277 /* 1278 * Done with purge, notify sleepers of the grim news. 1279 */ 1280 vp->v_ops = &dead_vnode_vops_p; 1281 vn_gone(vp); 1282 vp->v_tag = VT_NON; 1283 1284 /* 1285 * If we are destroying an active vnode, reactivate it now that 1286 * we have reassociated it with deadfs. This prevents the system 1287 * from crashing on the vnode due to it being unexpectedly marked 1288 * as inactive or reclaimed. 1289 */ 1290 if (active && (flags & DOCLOSE)) { 1291 vclrflags(vp, VINACTIVE | VRECLAIMED); 1292 } 1293 } 1294 1295 /* 1296 * Eliminate all activity associated with the requested vnode 1297 * and with all vnodes aliased to the requested vnode. 1298 * 1299 * The vnode must be referenced but should not be locked. 1300 */ 1301 int 1302 vrevoke(struct vnode *vp, struct ucred *cred) 1303 { 1304 struct vnode *vq; 1305 struct vnode *vqn; 1306 cdev_t dev; 1307 int error; 1308 1309 /* 1310 * If the vnode has a device association, scrap all vnodes associated 1311 * with the device. Don't let the device disappear on us while we 1312 * are scrapping the vnodes. 1313 * 1314 * The passed vp will probably show up in the list, do not VX lock 1315 * it twice! 1316 * 1317 * Releasing the vnode's rdev here can mess up specfs's call to 1318 * device close, so don't do it. The vnode has been disassociated 1319 * and the device will be closed after the last ref on the related 1320 * fp goes away (if not still open by e.g. the kernel). 1321 */ 1322 if (vp->v_type != VCHR) { 1323 error = fdrevoke(vp, DTYPE_VNODE, cred); 1324 return (error); 1325 } 1326 if ((dev = vp->v_rdev) == NULL) { 1327 return(0); 1328 } 1329 reference_dev(dev); 1330 lwkt_gettoken(&spechash_token); 1331 1332 restart: 1333 vqn = SLIST_FIRST(&dev->si_hlist); 1334 if (vqn) 1335 vhold(vqn); 1336 while ((vq = vqn) != NULL) { 1337 if (VREFCNT(vq) > 0) { 1338 vref(vq); 1339 fdrevoke(vq, DTYPE_VNODE, cred); 1340 /*v_release_rdev(vq);*/ 1341 vrele(vq); 1342 if (vq->v_rdev != dev) { 1343 vdrop(vq); 1344 goto restart; 1345 } 1346 } 1347 vqn = SLIST_NEXT(vq, v_cdevnext); 1348 if (vqn) 1349 vhold(vqn); 1350 vdrop(vq); 1351 } 1352 lwkt_reltoken(&spechash_token); 1353 dev_drevoke(dev); 1354 release_dev(dev); 1355 return (0); 1356 } 1357 1358 /* 1359 * This is called when the object underlying a vnode is being destroyed, 1360 * such as in a remove(). Try to recycle the vnode immediately if the 1361 * only active reference is our reference. 1362 * 1363 * Directory vnodes in the namecache with children cannot be immediately 1364 * recycled because numerous VOP_N*() ops require them to be stable. 1365 * 1366 * To avoid recursive recycling from VOP_INACTIVE implemenetations this 1367 * function is a NOP if VRECLAIMED is already set. 1368 */ 1369 int 1370 vrecycle(struct vnode *vp) 1371 { 1372 if (VREFCNT(vp) <= 1 && (vp->v_flag & VRECLAIMED) == 0) { 1373 if (cache_inval_vp_nonblock(vp)) 1374 return(0); 1375 vgone_vxlocked(vp); 1376 return (1); 1377 } 1378 return (0); 1379 } 1380 1381 /* 1382 * Return the maximum I/O size allowed for strategy calls on VP. 1383 * 1384 * If vp is VCHR or VBLK we dive the device, otherwise we use 1385 * the vp's mount info. 1386 * 1387 * The returned value is clamped at MAXPHYS as most callers cannot use 1388 * buffers larger than that size. 1389 */ 1390 int 1391 vmaxiosize(struct vnode *vp) 1392 { 1393 int maxiosize; 1394 1395 if (vp->v_type == VBLK || vp->v_type == VCHR) 1396 maxiosize = vp->v_rdev->si_iosize_max; 1397 else 1398 maxiosize = vp->v_mount->mnt_iosize_max; 1399 1400 if (maxiosize > MAXPHYS) 1401 maxiosize = MAXPHYS; 1402 return (maxiosize); 1403 } 1404 1405 /* 1406 * Eliminate all activity associated with a vnode in preparation for 1407 * destruction. 1408 * 1409 * The vnode must be VX locked and refd and will remain VX locked and refd 1410 * on return. This routine may be called with the vnode in any state, as 1411 * long as it is VX locked. The vnode will be cleaned out and marked 1412 * VRECLAIMED but will not actually be reused until all existing refs and 1413 * holds go away. 1414 * 1415 * NOTE: This routine may be called on a vnode which has not yet been 1416 * already been deactivated (VOP_INACTIVE), or on a vnode which has 1417 * already been reclaimed. 1418 * 1419 * This routine is not responsible for placing us back on the freelist. 1420 * Instead, it happens automatically when the caller releases the VX lock 1421 * (assuming there aren't any other references). 1422 */ 1423 void 1424 vgone_vxlocked(struct vnode *vp) 1425 { 1426 /* 1427 * assert that the VX lock is held. This is an absolute requirement 1428 * now for vgone_vxlocked() to be called. 1429 */ 1430 KKASSERT(lockcountnb(&vp->v_lock) == 1); 1431 1432 /* 1433 * Clean out the filesystem specific data and set the VRECLAIMED 1434 * bit. Also deactivate the vnode if necessary. 1435 * 1436 * The vnode should have automatically been removed from the syncer 1437 * list as syncer/dirty flags cleared during the cleaning. 1438 */ 1439 vclean_vxlocked(vp, DOCLOSE); 1440 KKASSERT((vp->v_flag & VONWORKLST) == 0); 1441 1442 /* 1443 * Delete from old mount point vnode list, if on one. 1444 */ 1445 if (vp->v_mount != NULL) { 1446 KKASSERT(vp->v_data == NULL); 1447 insmntque(vp, NULL); 1448 } 1449 1450 /* 1451 * If special device, remove it from special device alias list 1452 * if it is on one. This should normally only occur if a vnode is 1453 * being revoked as the device should otherwise have been released 1454 * naturally. 1455 */ 1456 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_rdev != NULL) { 1457 v_release_rdev(vp); 1458 } 1459 1460 /* 1461 * Set us to VBAD 1462 */ 1463 vp->v_type = VBAD; 1464 } 1465 1466 /* 1467 * Lookup a vnode by device number. 1468 * 1469 * Returns non-zero and *vpp set to a vref'd vnode on success. 1470 * Returns zero on failure. 1471 */ 1472 int 1473 vfinddev(cdev_t dev, enum vtype type, struct vnode **vpp) 1474 { 1475 struct vnode *vp; 1476 1477 lwkt_gettoken(&spechash_token); 1478 SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) { 1479 if (type == vp->v_type) { 1480 *vpp = vp; 1481 vref(vp); 1482 lwkt_reltoken(&spechash_token); 1483 return (1); 1484 } 1485 } 1486 lwkt_reltoken(&spechash_token); 1487 return (0); 1488 } 1489 1490 /* 1491 * Calculate the total number of references to a special device. This 1492 * routine may only be called for VBLK and VCHR vnodes since v_rdev is 1493 * an overloaded field. Since udev2dev can now return NULL, we have 1494 * to check for a NULL v_rdev. 1495 */ 1496 int 1497 count_dev(cdev_t dev) 1498 { 1499 struct vnode *vp; 1500 int count = 0; 1501 1502 if (SLIST_FIRST(&dev->si_hlist)) { 1503 lwkt_gettoken(&spechash_token); 1504 SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) { 1505 count += vp->v_opencount; 1506 } 1507 lwkt_reltoken(&spechash_token); 1508 } 1509 return(count); 1510 } 1511 1512 int 1513 vcount(struct vnode *vp) 1514 { 1515 if (vp->v_rdev == NULL) 1516 return(0); 1517 return(count_dev(vp->v_rdev)); 1518 } 1519 1520 /* 1521 * Initialize VMIO for a vnode. This routine MUST be called before a 1522 * VFS can issue buffer cache ops on a vnode. It is typically called 1523 * when a vnode is initialized from its inode. 1524 */ 1525 int 1526 vinitvmio(struct vnode *vp, off_t filesize, int blksize, int boff) 1527 { 1528 vm_object_t object; 1529 int error = 0; 1530 1531 object = vp->v_object; 1532 if (object) { 1533 vm_object_hold(object); 1534 KKASSERT(vp->v_object == object); 1535 } 1536 1537 if (object == NULL) { 1538 object = vnode_pager_alloc(vp, filesize, 0, 0, blksize, boff); 1539 1540 /* 1541 * Dereference the reference we just created. This assumes 1542 * that the object is associated with the vp. Allow it to 1543 * have zero refs. It cannot be destroyed as long as it 1544 * is associated with the vnode. 1545 */ 1546 vm_object_hold(object); 1547 atomic_add_int(&object->ref_count, -1); 1548 vrele(vp); 1549 } else { 1550 KKASSERT((object->flags & OBJ_DEAD) == 0); 1551 } 1552 KASSERT(vp->v_object != NULL, ("vinitvmio: NULL object")); 1553 vsetflags(vp, VOBJBUF); 1554 vm_object_drop(object); 1555 1556 return (error); 1557 } 1558 1559 1560 /* 1561 * Print out a description of a vnode. 1562 */ 1563 static char *typename[] = 1564 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 1565 1566 void 1567 vprint(char *label, struct vnode *vp) 1568 { 1569 char buf[96]; 1570 1571 if (label != NULL) 1572 kprintf("%s: %p: ", label, (void *)vp); 1573 else 1574 kprintf("%p: ", (void *)vp); 1575 kprintf("type %s, refcnt %08x, writecount %d, holdcnt %d,", 1576 typename[vp->v_type], 1577 vp->v_refcnt, vp->v_writecount, vp->v_auxrefs); 1578 buf[0] = '\0'; 1579 if (vp->v_flag & VROOT) 1580 strcat(buf, "|VROOT"); 1581 if (vp->v_flag & VPFSROOT) 1582 strcat(buf, "|VPFSROOT"); 1583 if (vp->v_flag & VTEXT) 1584 strcat(buf, "|VTEXT"); 1585 if (vp->v_flag & VSYSTEM) 1586 strcat(buf, "|VSYSTEM"); 1587 if (vp->v_flag & VOBJBUF) 1588 strcat(buf, "|VOBJBUF"); 1589 if (buf[0] != '\0') 1590 kprintf(" flags (%s)", &buf[1]); 1591 if (vp->v_data == NULL) { 1592 kprintf("\n"); 1593 } else { 1594 kprintf("\n\t"); 1595 VOP_PRINT(vp); 1596 } 1597 } 1598 1599 /* 1600 * Do the usual access checking. 1601 * file_mode, uid and gid are from the vnode in question, 1602 * while acc_mode and cred are from the VOP_ACCESS parameter list 1603 */ 1604 int 1605 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid, 1606 mode_t acc_mode, struct ucred *cred) 1607 { 1608 mode_t mask; 1609 int ismember; 1610 1611 /* 1612 * Super-user always gets read/write access, but execute access depends 1613 * on at least one execute bit being set. 1614 */ 1615 if (priv_check_cred(cred, PRIV_ROOT, 0) == 0) { 1616 if ((acc_mode & VEXEC) && type != VDIR && 1617 (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0) 1618 return (EACCES); 1619 return (0); 1620 } 1621 1622 mask = 0; 1623 1624 /* Otherwise, check the owner. */ 1625 if (cred->cr_uid == uid) { 1626 if (acc_mode & VEXEC) 1627 mask |= S_IXUSR; 1628 if (acc_mode & VREAD) 1629 mask |= S_IRUSR; 1630 if (acc_mode & VWRITE) 1631 mask |= S_IWUSR; 1632 return ((file_mode & mask) == mask ? 0 : EACCES); 1633 } 1634 1635 /* Otherwise, check the groups. */ 1636 ismember = groupmember(gid, cred); 1637 if (cred->cr_svgid == gid || ismember) { 1638 if (acc_mode & VEXEC) 1639 mask |= S_IXGRP; 1640 if (acc_mode & VREAD) 1641 mask |= S_IRGRP; 1642 if (acc_mode & VWRITE) 1643 mask |= S_IWGRP; 1644 return ((file_mode & mask) == mask ? 0 : EACCES); 1645 } 1646 1647 /* Otherwise, check everyone else. */ 1648 if (acc_mode & VEXEC) 1649 mask |= S_IXOTH; 1650 if (acc_mode & VREAD) 1651 mask |= S_IROTH; 1652 if (acc_mode & VWRITE) 1653 mask |= S_IWOTH; 1654 return ((file_mode & mask) == mask ? 0 : EACCES); 1655 } 1656 1657 #ifdef DDB 1658 #include <ddb/ddb.h> 1659 1660 static int db_show_locked_vnodes(struct mount *mp, void *data); 1661 1662 /* 1663 * List all of the locked vnodes in the system. 1664 * Called when debugging the kernel. 1665 */ 1666 DB_SHOW_COMMAND(lockedvnodes, lockedvnodes) 1667 { 1668 kprintf("Locked vnodes\n"); 1669 mountlist_scan(db_show_locked_vnodes, NULL, 1670 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 1671 } 1672 1673 static int 1674 db_show_locked_vnodes(struct mount *mp, void *data __unused) 1675 { 1676 struct vnode *vp; 1677 1678 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 1679 if (vn_islocked(vp)) 1680 vprint(NULL, vp); 1681 } 1682 return(0); 1683 } 1684 #endif 1685 1686 /* 1687 * Top level filesystem related information gathering. 1688 */ 1689 static int sysctl_ovfs_conf (SYSCTL_HANDLER_ARGS); 1690 1691 static int 1692 vfs_sysctl(SYSCTL_HANDLER_ARGS) 1693 { 1694 int *name = (int *)arg1 - 1; /* XXX */ 1695 u_int namelen = arg2 + 1; /* XXX */ 1696 struct vfsconf *vfsp; 1697 int maxtypenum; 1698 1699 #if 1 || defined(COMPAT_PRELITE2) 1700 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 1701 if (namelen == 1) 1702 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 1703 #endif 1704 1705 #ifdef notyet 1706 /* all sysctl names at this level are at least name and field */ 1707 if (namelen < 2) 1708 return (ENOTDIR); /* overloaded */ 1709 if (name[0] != VFS_GENERIC) { 1710 vfsp = vfsconf_find_by_typenum(name[0]); 1711 if (vfsp == NULL) 1712 return (EOPNOTSUPP); 1713 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 1714 oldp, oldlenp, newp, newlen, p)); 1715 } 1716 #endif 1717 switch (name[1]) { 1718 case VFS_MAXTYPENUM: 1719 if (namelen != 2) 1720 return (ENOTDIR); 1721 maxtypenum = vfsconf_get_maxtypenum(); 1722 return (SYSCTL_OUT(req, &maxtypenum, sizeof(maxtypenum))); 1723 case VFS_CONF: 1724 if (namelen != 3) 1725 return (ENOTDIR); /* overloaded */ 1726 vfsp = vfsconf_find_by_typenum(name[2]); 1727 if (vfsp == NULL) 1728 return (EOPNOTSUPP); 1729 return (SYSCTL_OUT(req, vfsp, sizeof *vfsp)); 1730 } 1731 return (EOPNOTSUPP); 1732 } 1733 1734 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl, 1735 "Generic filesystem"); 1736 1737 #if 1 || defined(COMPAT_PRELITE2) 1738 1739 static int 1740 sysctl_ovfs_conf_iter(struct vfsconf *vfsp, void *data) 1741 { 1742 int error; 1743 struct ovfsconf ovfs; 1744 struct sysctl_req *req = (struct sysctl_req*) data; 1745 1746 bzero(&ovfs, sizeof(ovfs)); 1747 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 1748 strcpy(ovfs.vfc_name, vfsp->vfc_name); 1749 ovfs.vfc_index = vfsp->vfc_typenum; 1750 ovfs.vfc_refcount = vfsp->vfc_refcount; 1751 ovfs.vfc_flags = vfsp->vfc_flags; 1752 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 1753 if (error) 1754 return error; /* abort iteration with error code */ 1755 else 1756 return 0; /* continue iterating with next element */ 1757 } 1758 1759 static int 1760 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 1761 { 1762 return vfsconf_each(sysctl_ovfs_conf_iter, (void*)req); 1763 } 1764 1765 #endif /* 1 || COMPAT_PRELITE2 */ 1766 1767 /* 1768 * Check to see if a filesystem is mounted on a block device. 1769 */ 1770 int 1771 vfs_mountedon(struct vnode *vp) 1772 { 1773 cdev_t dev; 1774 1775 if ((dev = vp->v_rdev) == NULL) { 1776 /* if (vp->v_type != VBLK) 1777 dev = get_dev(vp->v_uminor, vp->v_umajor); */ 1778 } 1779 if (dev != NULL && dev->si_mountpoint) 1780 return (EBUSY); 1781 return (0); 1782 } 1783 1784 /* 1785 * Unmount all filesystems. The list is traversed in reverse order 1786 * of mounting to avoid dependencies. 1787 */ 1788 1789 static int vfs_umountall_callback(struct mount *mp, void *data); 1790 1791 void 1792 vfs_unmountall(void) 1793 { 1794 int count; 1795 1796 do { 1797 count = mountlist_scan(vfs_umountall_callback, 1798 NULL, MNTSCAN_REVERSE|MNTSCAN_NOBUSY); 1799 } while (count); 1800 } 1801 1802 static 1803 int 1804 vfs_umountall_callback(struct mount *mp, void *data) 1805 { 1806 int error; 1807 1808 error = dounmount(mp, MNT_FORCE); 1809 if (error) { 1810 mountlist_remove(mp); 1811 kprintf("unmount of filesystem mounted from %s failed (", 1812 mp->mnt_stat.f_mntfromname); 1813 if (error == EBUSY) 1814 kprintf("BUSY)\n"); 1815 else 1816 kprintf("%d)\n", error); 1817 } 1818 return(1); 1819 } 1820 1821 /* 1822 * Checks the mount flags for parameter mp and put the names comma-separated 1823 * into a string buffer buf with a size limit specified by len. 1824 * 1825 * It returns the number of bytes written into buf, and (*errorp) will be 1826 * set to 0, EINVAL (if passed length is 0), or ENOSPC (supplied buffer was 1827 * not large enough). The buffer will be 0-terminated if len was not 0. 1828 */ 1829 size_t 1830 vfs_flagstostr(int flags, const struct mountctl_opt *optp, 1831 char *buf, size_t len, int *errorp) 1832 { 1833 static const struct mountctl_opt optnames[] = { 1834 { MNT_ASYNC, "asynchronous" }, 1835 { MNT_EXPORTED, "NFS exported" }, 1836 { MNT_LOCAL, "local" }, 1837 { MNT_NOATIME, "noatime" }, 1838 { MNT_NODEV, "nodev" }, 1839 { MNT_NOEXEC, "noexec" }, 1840 { MNT_NOSUID, "nosuid" }, 1841 { MNT_NOSYMFOLLOW, "nosymfollow" }, 1842 { MNT_QUOTA, "with-quotas" }, 1843 { MNT_RDONLY, "read-only" }, 1844 { MNT_SYNCHRONOUS, "synchronous" }, 1845 { MNT_UNION, "union" }, 1846 { MNT_NOCLUSTERR, "noclusterr" }, 1847 { MNT_NOCLUSTERW, "noclusterw" }, 1848 { MNT_SUIDDIR, "suiddir" }, 1849 { MNT_SOFTDEP, "soft-updates" }, 1850 { MNT_IGNORE, "ignore" }, 1851 { 0, NULL} 1852 }; 1853 int bwritten; 1854 int bleft; 1855 int optlen; 1856 int actsize; 1857 1858 *errorp = 0; 1859 bwritten = 0; 1860 bleft = len - 1; /* leave room for trailing \0 */ 1861 1862 /* 1863 * Checks the size of the string. If it contains 1864 * any data, then we will append the new flags to 1865 * it. 1866 */ 1867 actsize = strlen(buf); 1868 if (actsize > 0) 1869 buf += actsize; 1870 1871 /* Default flags if no flags passed */ 1872 if (optp == NULL) 1873 optp = optnames; 1874 1875 if (bleft < 0) { /* degenerate case, 0-length buffer */ 1876 *errorp = EINVAL; 1877 return(0); 1878 } 1879 1880 for (; flags && optp->o_opt; ++optp) { 1881 if ((flags & optp->o_opt) == 0) 1882 continue; 1883 optlen = strlen(optp->o_name); 1884 if (bwritten || actsize > 0) { 1885 if (bleft < 2) { 1886 *errorp = ENOSPC; 1887 break; 1888 } 1889 buf[bwritten++] = ','; 1890 buf[bwritten++] = ' '; 1891 bleft -= 2; 1892 } 1893 if (bleft < optlen) { 1894 *errorp = ENOSPC; 1895 break; 1896 } 1897 bcopy(optp->o_name, buf + bwritten, optlen); 1898 bwritten += optlen; 1899 bleft -= optlen; 1900 flags &= ~optp->o_opt; 1901 } 1902 1903 /* 1904 * Space already reserved for trailing \0 1905 */ 1906 buf[bwritten] = 0; 1907 return (bwritten); 1908 } 1909 1910 /* 1911 * Build hash lists of net addresses and hang them off the mount point. 1912 * Called by ufs_mount() to set up the lists of export addresses. 1913 */ 1914 static int 1915 vfs_hang_addrlist(struct mount *mp, struct netexport *nep, 1916 const struct export_args *argp) 1917 { 1918 struct netcred *np; 1919 struct radix_node_head *rnh; 1920 int i; 1921 struct radix_node *rn; 1922 struct sockaddr *saddr, *smask = NULL; 1923 int off; 1924 int error; 1925 1926 if (argp->ex_addrlen == 0) { 1927 if (mp->mnt_flag & MNT_DEFEXPORTED) 1928 return (EPERM); 1929 np = &nep->ne_defexported; 1930 np->netc_exflags = argp->ex_flags; 1931 np->netc_anon = argp->ex_anon; 1932 np->netc_anon.cr_ref = 1; 1933 mp->mnt_flag |= MNT_DEFEXPORTED; 1934 return (0); 1935 } 1936 1937 if (argp->ex_addrlen < 0 || argp->ex_addrlen > MLEN) 1938 return (EINVAL); 1939 if (argp->ex_masklen < 0 || argp->ex_masklen > MLEN) 1940 return (EINVAL); 1941 1942 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 1943 np = (struct netcred *) kmalloc(i, M_NETADDR, M_WAITOK | M_ZERO); 1944 saddr = (struct sockaddr *) (np + 1); 1945 if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen))) 1946 goto out; 1947 if (saddr->sa_len > argp->ex_addrlen) 1948 saddr->sa_len = argp->ex_addrlen; 1949 if (argp->ex_masklen) { 1950 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 1951 error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen); 1952 if (error) 1953 goto out; 1954 if (smask->sa_len > argp->ex_masklen) 1955 smask->sa_len = argp->ex_masklen; 1956 } 1957 if (nep->ne_maskhead == NULL) { 1958 if (!rn_inithead((void **)&nep->ne_maskhead, NULL, 0)) { 1959 error = ENOBUFS; 1960 goto out; 1961 } 1962 } 1963 switch (saddr->sa_family) { 1964 #ifdef INET 1965 case AF_INET: 1966 if ((rnh = nep->ne_inethead) == NULL) { 1967 off = offsetof(struct sockaddr_in, sin_addr) << 3; 1968 rnh = vfs_create_addrlist_af(&nep->ne_inethead, off, 1969 nep->ne_maskhead); 1970 } 1971 break; 1972 #endif 1973 #ifdef INET6 1974 case AF_INET6: 1975 if ((rnh = nep->ne_inet6head) == NULL) { 1976 off = offsetof(struct sockaddr_in6, sin6_addr) << 3; 1977 rnh = vfs_create_addrlist_af(&nep->ne_inet6head, off, 1978 nep->ne_maskhead); 1979 } 1980 break; 1981 #endif 1982 default: 1983 error = EAFNOSUPPORT; 1984 goto out; 1985 } 1986 if (rnh == NULL) { 1987 error = ENOBUFS; 1988 goto out; 1989 } 1990 rn = (*rnh->rnh_addaddr) ((char *) saddr, (char *) smask, rnh, 1991 np->netc_rnodes); 1992 if (rn == NULL || np != (struct netcred *) rn) { /* already exists */ 1993 error = EPERM; 1994 goto out; 1995 } 1996 np->netc_exflags = argp->ex_flags; 1997 np->netc_anon = argp->ex_anon; 1998 np->netc_anon.cr_ref = 1; 1999 return (0); 2000 out: 2001 kfree(np, M_NETADDR); 2002 return (error); 2003 } 2004 2005 /* ARGSUSED */ 2006 static int 2007 vfs_free_netcred(struct radix_node *rn, void *w) 2008 { 2009 struct radix_node_head *rnh = (struct radix_node_head *) w; 2010 2011 (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh); 2012 kfree((caddr_t) rn, M_NETADDR); 2013 return (0); 2014 } 2015 2016 static struct radix_node_head * 2017 vfs_create_addrlist_af(struct radix_node_head **prnh, int off, 2018 struct radix_node_head *maskhead) 2019 { 2020 KKASSERT(maskhead != NULL); 2021 if (!rn_inithead((void **)prnh, maskhead, off)) 2022 return (NULL); 2023 return (*prnh); 2024 } 2025 2026 static void 2027 vfs_free_addrlist_af(struct radix_node_head **prnh) 2028 { 2029 struct radix_node_head *rnh = *prnh; 2030 2031 (*rnh->rnh_walktree) (rnh, vfs_free_netcred, rnh); 2032 kfree(rnh, M_RTABLE); 2033 prnh = NULL; 2034 } 2035 2036 /* 2037 * Free the net address hash lists that are hanging off the mount points. 2038 */ 2039 static void 2040 vfs_free_addrlist(struct netexport *nep) 2041 { 2042 if (nep->ne_inethead != NULL) 2043 vfs_free_addrlist_af(&nep->ne_inethead); 2044 if (nep->ne_inet6head != NULL) 2045 vfs_free_addrlist_af(&nep->ne_inet6head); 2046 } 2047 2048 int 2049 vfs_export(struct mount *mp, struct netexport *nep, 2050 const struct export_args *argp) 2051 { 2052 int error; 2053 2054 if (argp->ex_flags & MNT_DELEXPORT) { 2055 if (mp->mnt_flag & MNT_EXPUBLIC) { 2056 vfs_setpublicfs(NULL, NULL, NULL); 2057 mp->mnt_flag &= ~MNT_EXPUBLIC; 2058 } 2059 vfs_free_addrlist(nep); 2060 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 2061 } 2062 if (argp->ex_flags & MNT_EXPORTED) { 2063 if (argp->ex_flags & MNT_EXPUBLIC) { 2064 if ((error = vfs_setpublicfs(mp, nep, argp)) != 0) 2065 return (error); 2066 mp->mnt_flag |= MNT_EXPUBLIC; 2067 } 2068 if ((error = vfs_hang_addrlist(mp, nep, argp))) 2069 return (error); 2070 mp->mnt_flag |= MNT_EXPORTED; 2071 } 2072 return (0); 2073 } 2074 2075 2076 /* 2077 * Set the publicly exported filesystem (WebNFS). Currently, only 2078 * one public filesystem is possible in the spec (RFC 2054 and 2055) 2079 */ 2080 int 2081 vfs_setpublicfs(struct mount *mp, struct netexport *nep, 2082 const struct export_args *argp) 2083 { 2084 int error; 2085 struct vnode *rvp; 2086 char *cp; 2087 2088 /* 2089 * mp == NULL -> invalidate the current info, the FS is 2090 * no longer exported. May be called from either vfs_export 2091 * or unmount, so check if it hasn't already been done. 2092 */ 2093 if (mp == NULL) { 2094 if (nfs_pub.np_valid) { 2095 nfs_pub.np_valid = 0; 2096 if (nfs_pub.np_index != NULL) { 2097 kfree(nfs_pub.np_index, M_TEMP); 2098 nfs_pub.np_index = NULL; 2099 } 2100 } 2101 return (0); 2102 } 2103 2104 /* 2105 * Only one allowed at a time. 2106 */ 2107 if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount) 2108 return (EBUSY); 2109 2110 /* 2111 * Get real filehandle for root of exported FS. 2112 */ 2113 bzero((caddr_t)&nfs_pub.np_handle, sizeof(nfs_pub.np_handle)); 2114 nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid; 2115 2116 if ((error = VFS_ROOT(mp, &rvp))) 2117 return (error); 2118 2119 if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid))) 2120 return (error); 2121 2122 vput(rvp); 2123 2124 /* 2125 * If an indexfile was specified, pull it in. 2126 */ 2127 if (argp->ex_indexfile != NULL) { 2128 int namelen; 2129 2130 error = vn_get_namelen(rvp, &namelen); 2131 if (error) 2132 return (error); 2133 nfs_pub.np_index = kmalloc(namelen, M_TEMP, M_WAITOK); 2134 error = copyinstr(argp->ex_indexfile, nfs_pub.np_index, 2135 namelen, NULL); 2136 if (!error) { 2137 /* 2138 * Check for illegal filenames. 2139 */ 2140 for (cp = nfs_pub.np_index; *cp; cp++) { 2141 if (*cp == '/') { 2142 error = EINVAL; 2143 break; 2144 } 2145 } 2146 } 2147 if (error) { 2148 kfree(nfs_pub.np_index, M_TEMP); 2149 return (error); 2150 } 2151 } 2152 2153 nfs_pub.np_mount = mp; 2154 nfs_pub.np_valid = 1; 2155 return (0); 2156 } 2157 2158 struct netcred * 2159 vfs_export_lookup(struct mount *mp, struct netexport *nep, 2160 struct sockaddr *nam) 2161 { 2162 struct netcred *np; 2163 struct radix_node_head *rnh; 2164 struct sockaddr *saddr; 2165 2166 np = NULL; 2167 if (mp->mnt_flag & MNT_EXPORTED) { 2168 /* 2169 * Lookup in the export list first. 2170 */ 2171 if (nam != NULL) { 2172 saddr = nam; 2173 switch (saddr->sa_family) { 2174 #ifdef INET 2175 case AF_INET: 2176 rnh = nep->ne_inethead; 2177 break; 2178 #endif 2179 #ifdef INET6 2180 case AF_INET6: 2181 rnh = nep->ne_inet6head; 2182 break; 2183 #endif 2184 default: 2185 rnh = NULL; 2186 } 2187 if (rnh != NULL) { 2188 np = (struct netcred *) 2189 (*rnh->rnh_matchaddr)((char *)saddr, 2190 rnh); 2191 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 2192 np = NULL; 2193 } 2194 } 2195 /* 2196 * If no address match, use the default if it exists. 2197 */ 2198 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 2199 np = &nep->ne_defexported; 2200 } 2201 return (np); 2202 } 2203 2204 /* 2205 * perform msync on all vnodes under a mount point. The mount point must 2206 * be locked. This code is also responsible for lazy-freeing unreferenced 2207 * vnodes whos VM objects no longer contain pages. 2208 * 2209 * NOTE: MNT_WAIT still skips vnodes in the VXLOCK state. 2210 * 2211 * NOTE: XXX VOP_PUTPAGES and friends requires that the vnode be locked, 2212 * but vnode_pager_putpages() doesn't lock the vnode. We have to do it 2213 * way up in this high level function. 2214 */ 2215 static int vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data); 2216 static int vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data); 2217 2218 void 2219 vfs_msync(struct mount *mp, int flags) 2220 { 2221 int vmsc_flags; 2222 2223 /* 2224 * tmpfs sets this flag to prevent msync(), sync, and the 2225 * filesystem periodic syncer from trying to flush VM pages 2226 * to swap. Only pure memory pressure flushes tmpfs VM pages 2227 * to swap. 2228 */ 2229 if (mp->mnt_kern_flag & MNTK_NOMSYNC) 2230 return; 2231 2232 /* 2233 * Ok, scan the vnodes for work. If the filesystem is using the 2234 * syncer thread feature we can use vsyncscan() instead of 2235 * vmntvnodescan(), which is much faster. 2236 */ 2237 vmsc_flags = VMSC_GETVP; 2238 if (flags != MNT_WAIT) 2239 vmsc_flags |= VMSC_NOWAIT; 2240 2241 if (mp->mnt_kern_flag & MNTK_THR_SYNC) { 2242 vsyncscan(mp, vmsc_flags, vfs_msync_scan2, 2243 (void *)(intptr_t)flags); 2244 } else { 2245 vmntvnodescan(mp, vmsc_flags, 2246 vfs_msync_scan1, vfs_msync_scan2, 2247 (void *)(intptr_t)flags); 2248 } 2249 } 2250 2251 /* 2252 * scan1 is a fast pre-check. There could be hundreds of thousands of 2253 * vnodes, we cannot afford to do anything heavy weight until we have a 2254 * fairly good indication that there is work to do. 2255 */ 2256 static 2257 int 2258 vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data) 2259 { 2260 int flags = (int)(intptr_t)data; 2261 2262 if ((vp->v_flag & VRECLAIMED) == 0) { 2263 if (vp->v_auxrefs == 0 && VREFCNT(vp) <= 0 && 2264 vp->v_object) { 2265 return(0); /* call scan2 */ 2266 } 2267 if ((mp->mnt_flag & MNT_RDONLY) == 0 && 2268 (vp->v_flag & VOBJDIRTY) && 2269 (flags == MNT_WAIT || vn_islocked(vp) == 0)) { 2270 return(0); /* call scan2 */ 2271 } 2272 } 2273 2274 /* 2275 * do not call scan2, continue the loop 2276 */ 2277 return(-1); 2278 } 2279 2280 /* 2281 * This callback is handed a locked vnode. 2282 */ 2283 static 2284 int 2285 vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data) 2286 { 2287 vm_object_t obj; 2288 int flags = (int)(intptr_t)data; 2289 2290 if (vp->v_flag & VRECLAIMED) 2291 return(0); 2292 2293 if ((mp->mnt_flag & MNT_RDONLY) == 0 && (vp->v_flag & VOBJDIRTY)) { 2294 if ((obj = vp->v_object) != NULL) { 2295 vm_object_page_clean(obj, 0, 0, 2296 flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC); 2297 } 2298 } 2299 return(0); 2300 } 2301 2302 /* 2303 * Wake up anyone interested in vp because it is being revoked. 2304 */ 2305 void 2306 vn_gone(struct vnode *vp) 2307 { 2308 lwkt_gettoken(&vp->v_token); 2309 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, NOTE_REVOKE); 2310 lwkt_reltoken(&vp->v_token); 2311 } 2312 2313 /* 2314 * extract the cdev_t from a VBLK or VCHR. The vnode must have been opened 2315 * (or v_rdev might be NULL). 2316 */ 2317 cdev_t 2318 vn_todev(struct vnode *vp) 2319 { 2320 if (vp->v_type != VBLK && vp->v_type != VCHR) 2321 return (NULL); 2322 KKASSERT(vp->v_rdev != NULL); 2323 return (vp->v_rdev); 2324 } 2325 2326 /* 2327 * Check if vnode represents a disk device. The vnode does not need to be 2328 * opened. 2329 * 2330 * MPALMOSTSAFE 2331 */ 2332 int 2333 vn_isdisk(struct vnode *vp, int *errp) 2334 { 2335 cdev_t dev; 2336 2337 if (vp->v_type != VCHR) { 2338 if (errp != NULL) 2339 *errp = ENOTBLK; 2340 return (0); 2341 } 2342 2343 dev = vp->v_rdev; 2344 2345 if (dev == NULL) { 2346 if (errp != NULL) 2347 *errp = ENXIO; 2348 return (0); 2349 } 2350 if (dev_is_good(dev) == 0) { 2351 if (errp != NULL) 2352 *errp = ENXIO; 2353 return (0); 2354 } 2355 if ((dev_dflags(dev) & D_DISK) == 0) { 2356 if (errp != NULL) 2357 *errp = ENOTBLK; 2358 return (0); 2359 } 2360 if (errp != NULL) 2361 *errp = 0; 2362 return (1); 2363 } 2364 2365 int 2366 vn_get_namelen(struct vnode *vp, int *namelen) 2367 { 2368 int error; 2369 register_t retval[2]; 2370 2371 error = VOP_PATHCONF(vp, _PC_NAME_MAX, retval); 2372 if (error) 2373 return (error); 2374 *namelen = (int)retval[0]; 2375 return (0); 2376 } 2377 2378 int 2379 vop_write_dirent(int *error, struct uio *uio, ino_t d_ino, uint8_t d_type, 2380 uint16_t d_namlen, const char *d_name) 2381 { 2382 struct dirent *dp; 2383 size_t len; 2384 2385 len = _DIRENT_RECLEN(d_namlen); 2386 if (len > uio->uio_resid) 2387 return(1); 2388 2389 dp = kmalloc(len, M_TEMP, M_WAITOK | M_ZERO); 2390 2391 dp->d_ino = d_ino; 2392 dp->d_namlen = d_namlen; 2393 dp->d_type = d_type; 2394 bcopy(d_name, dp->d_name, d_namlen); 2395 2396 *error = uiomove((caddr_t)dp, len, uio); 2397 2398 kfree(dp, M_TEMP); 2399 2400 return(0); 2401 } 2402 2403 void 2404 vn_mark_atime(struct vnode *vp, struct thread *td) 2405 { 2406 struct proc *p = td->td_proc; 2407 struct ucred *cred = p ? p->p_ucred : proc0.p_ucred; 2408 2409 if ((vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) { 2410 VOP_MARKATIME(vp, cred); 2411 } 2412 } 2413