1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 39 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $ 40 * $DragonFly: src/sys/kern/vfs_subr.c,v 1.118 2008/09/17 21:44:18 dillon Exp $ 41 */ 42 43 /* 44 * External virtual filesystem routines 45 */ 46 #include "opt_ddb.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/buf.h> 51 #include <sys/conf.h> 52 #include <sys/dirent.h> 53 #include <sys/domain.h> 54 #include <sys/eventhandler.h> 55 #include <sys/fcntl.h> 56 #include <sys/file.h> 57 #include <sys/kernel.h> 58 #include <sys/kthread.h> 59 #include <sys/malloc.h> 60 #include <sys/mbuf.h> 61 #include <sys/mount.h> 62 #include <sys/priv.h> 63 #include <sys/proc.h> 64 #include <sys/reboot.h> 65 #include <sys/socket.h> 66 #include <sys/stat.h> 67 #include <sys/sysctl.h> 68 #include <sys/syslog.h> 69 #include <sys/unistd.h> 70 #include <sys/vmmeter.h> 71 #include <sys/vnode.h> 72 73 #include <machine/limits.h> 74 75 #include <vm/vm.h> 76 #include <vm/vm_object.h> 77 #include <vm/vm_extern.h> 78 #include <vm/vm_kern.h> 79 #include <vm/pmap.h> 80 #include <vm/vm_map.h> 81 #include <vm/vm_page.h> 82 #include <vm/vm_pager.h> 83 #include <vm/vnode_pager.h> 84 #include <vm/vm_zone.h> 85 86 #include <sys/buf2.h> 87 #include <sys/thread2.h> 88 #include <sys/sysref2.h> 89 #include <sys/mplock2.h> 90 91 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 92 93 int numvnodes; 94 SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, ""); 95 int vfs_fastdev = 1; 96 SYSCTL_INT(_vfs, OID_AUTO, fastdev, CTLFLAG_RW, &vfs_fastdev, 0, ""); 97 98 enum vtype iftovt_tab[16] = { 99 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 100 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 101 }; 102 int vttoif_tab[9] = { 103 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 104 S_IFSOCK, S_IFIFO, S_IFMT, 105 }; 106 107 static int reassignbufcalls; 108 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, 109 &reassignbufcalls, 0, ""); 110 static int reassignbufloops; 111 SYSCTL_INT(_vfs, OID_AUTO, reassignbufloops, CTLFLAG_RW, 112 &reassignbufloops, 0, ""); 113 static int reassignbufsortgood; 114 SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortgood, CTLFLAG_RW, 115 &reassignbufsortgood, 0, ""); 116 static int reassignbufsortbad; 117 SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortbad, CTLFLAG_RW, 118 &reassignbufsortbad, 0, ""); 119 static int reassignbufmethod = 1; 120 SYSCTL_INT(_vfs, OID_AUTO, reassignbufmethod, CTLFLAG_RW, 121 &reassignbufmethod, 0, ""); 122 123 int nfs_mount_type = -1; 124 static struct lwkt_token spechash_token; 125 struct nfs_public nfs_pub; /* publicly exported FS */ 126 127 int desiredvnodes; 128 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 129 &desiredvnodes, 0, "Maximum number of vnodes"); 130 131 static void vfs_free_addrlist (struct netexport *nep); 132 static int vfs_free_netcred (struct radix_node *rn, void *w); 133 static int vfs_hang_addrlist (struct mount *mp, struct netexport *nep, 134 const struct export_args *argp); 135 136 /* 137 * Red black tree functions 138 */ 139 static int rb_buf_compare(struct buf *b1, struct buf *b2); 140 RB_GENERATE2(buf_rb_tree, buf, b_rbnode, rb_buf_compare, off_t, b_loffset); 141 RB_GENERATE2(buf_rb_hash, buf, b_rbhash, rb_buf_compare, off_t, b_loffset); 142 143 static int 144 rb_buf_compare(struct buf *b1, struct buf *b2) 145 { 146 if (b1->b_loffset < b2->b_loffset) 147 return(-1); 148 if (b1->b_loffset > b2->b_loffset) 149 return(1); 150 return(0); 151 } 152 153 /* 154 * Returns non-zero if the vnode is a candidate for lazy msyncing. 155 */ 156 static __inline int 157 vshouldmsync(struct vnode *vp) 158 { 159 if (vp->v_auxrefs != 0 || vp->v_sysref.refcnt > 0) 160 return (0); /* other holders */ 161 if (vp->v_object && 162 (vp->v_object->ref_count || vp->v_object->resident_page_count)) { 163 return (0); 164 } 165 return (1); 166 } 167 168 /* 169 * Initialize the vnode management data structures. 170 * 171 * Called from vfsinit() 172 */ 173 void 174 vfs_subr_init(void) 175 { 176 int factor1; 177 int factor2; 178 179 /* 180 * Desiredvnodes is kern.maxvnodes. We want to scale it 181 * according to available system memory but we may also have 182 * to limit it based on available KVM, which is capped on 32 bit 183 * systems. 184 * 185 * WARNING! For machines with 64-256M of ram we have to be sure 186 * that the default limit scales down well due to HAMMER 187 * taking up significantly more memory per-vnode vs UFS. 188 * We want around ~5800 on a 128M machine. 189 */ 190 factor1 = 20 * (sizeof(struct vm_object) + sizeof(struct vnode)); 191 factor2 = 22 * (sizeof(struct vm_object) + sizeof(struct vnode)); 192 desiredvnodes = 193 imin((int64_t)vmstats.v_page_count * PAGE_SIZE / factor1, 194 KvaSize / factor2); 195 desiredvnodes = imax(desiredvnodes, maxproc * 8); 196 197 lwkt_token_init(&spechash_token); 198 } 199 200 /* 201 * Knob to control the precision of file timestamps: 202 * 203 * 0 = seconds only; nanoseconds zeroed. 204 * 1 = seconds and nanoseconds, accurate within 1/HZ. 205 * 2 = seconds and nanoseconds, truncated to microseconds. 206 * >=3 = seconds and nanoseconds, maximum precision. 207 */ 208 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 209 210 static int timestamp_precision = TSP_SEC; 211 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 212 ×tamp_precision, 0, ""); 213 214 /* 215 * Get a current timestamp. 216 * 217 * MPSAFE 218 */ 219 void 220 vfs_timestamp(struct timespec *tsp) 221 { 222 struct timeval tv; 223 224 switch (timestamp_precision) { 225 case TSP_SEC: 226 tsp->tv_sec = time_second; 227 tsp->tv_nsec = 0; 228 break; 229 case TSP_HZ: 230 getnanotime(tsp); 231 break; 232 case TSP_USEC: 233 microtime(&tv); 234 TIMEVAL_TO_TIMESPEC(&tv, tsp); 235 break; 236 case TSP_NSEC: 237 default: 238 nanotime(tsp); 239 break; 240 } 241 } 242 243 /* 244 * Set vnode attributes to VNOVAL 245 */ 246 void 247 vattr_null(struct vattr *vap) 248 { 249 vap->va_type = VNON; 250 vap->va_size = VNOVAL; 251 vap->va_bytes = VNOVAL; 252 vap->va_mode = VNOVAL; 253 vap->va_nlink = VNOVAL; 254 vap->va_uid = VNOVAL; 255 vap->va_gid = VNOVAL; 256 vap->va_fsid = VNOVAL; 257 vap->va_fileid = VNOVAL; 258 vap->va_blocksize = VNOVAL; 259 vap->va_rmajor = VNOVAL; 260 vap->va_rminor = VNOVAL; 261 vap->va_atime.tv_sec = VNOVAL; 262 vap->va_atime.tv_nsec = VNOVAL; 263 vap->va_mtime.tv_sec = VNOVAL; 264 vap->va_mtime.tv_nsec = VNOVAL; 265 vap->va_ctime.tv_sec = VNOVAL; 266 vap->va_ctime.tv_nsec = VNOVAL; 267 vap->va_flags = VNOVAL; 268 vap->va_gen = VNOVAL; 269 vap->va_vaflags = 0; 270 /* va_*_uuid fields are only valid if related flags are set */ 271 } 272 273 /* 274 * Flush out and invalidate all buffers associated with a vnode. 275 * 276 * vp must be locked. 277 */ 278 static int vinvalbuf_bp(struct buf *bp, void *data); 279 280 struct vinvalbuf_bp_info { 281 struct vnode *vp; 282 int slptimeo; 283 int lkflags; 284 int flags; 285 }; 286 287 int 288 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 289 { 290 struct vinvalbuf_bp_info info; 291 vm_object_t object; 292 lwkt_tokref vlock; 293 int error; 294 295 lwkt_gettoken(&vlock, &vp->v_token); 296 297 /* 298 * If we are being asked to save, call fsync to ensure that the inode 299 * is updated. 300 */ 301 if (flags & V_SAVE) { 302 error = bio_track_wait(&vp->v_track_write, slpflag, slptimeo); 303 if (error) 304 goto done; 305 if (!RB_EMPTY(&vp->v_rbdirty_tree)) { 306 if ((error = VOP_FSYNC(vp, MNT_WAIT, 0)) != 0) 307 goto done; 308 309 /* 310 * Dirty bufs may be left or generated via races 311 * in circumstances where vinvalbuf() is called on 312 * a vnode not undergoing reclamation. Only 313 * panic if we are trying to reclaim the vnode. 314 */ 315 if ((vp->v_flag & VRECLAIMED) && 316 (bio_track_active(&vp->v_track_write) || 317 !RB_EMPTY(&vp->v_rbdirty_tree))) { 318 panic("vinvalbuf: dirty bufs"); 319 } 320 } 321 } 322 info.slptimeo = slptimeo; 323 info.lkflags = LK_EXCLUSIVE | LK_SLEEPFAIL; 324 if (slpflag & PCATCH) 325 info.lkflags |= LK_PCATCH; 326 info.flags = flags; 327 info.vp = vp; 328 329 /* 330 * Flush the buffer cache until nothing is left. 331 */ 332 while (!RB_EMPTY(&vp->v_rbclean_tree) || 333 !RB_EMPTY(&vp->v_rbdirty_tree)) { 334 error = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, NULL, 335 vinvalbuf_bp, &info); 336 if (error == 0) { 337 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 338 vinvalbuf_bp, &info); 339 } 340 } 341 342 /* 343 * Wait for I/O completion. We may block in the pip code so we have 344 * to re-check. 345 */ 346 do { 347 bio_track_wait(&vp->v_track_write, 0, 0); 348 if ((object = vp->v_object) != NULL) { 349 while (object->paging_in_progress) 350 vm_object_pip_sleep(object, "vnvlbx"); 351 } 352 } while (bio_track_active(&vp->v_track_write)); 353 354 /* 355 * Destroy the copy in the VM cache, too. 356 */ 357 if ((object = vp->v_object) != NULL) { 358 vm_object_page_remove(object, 0, 0, 359 (flags & V_SAVE) ? TRUE : FALSE); 360 } 361 362 if (!RB_EMPTY(&vp->v_rbdirty_tree) || !RB_EMPTY(&vp->v_rbclean_tree)) 363 panic("vinvalbuf: flush failed"); 364 if (!RB_EMPTY(&vp->v_rbhash_tree)) 365 panic("vinvalbuf: flush failed, buffers still present"); 366 error = 0; 367 done: 368 lwkt_reltoken(&vlock); 369 return (error); 370 } 371 372 static int 373 vinvalbuf_bp(struct buf *bp, void *data) 374 { 375 struct vinvalbuf_bp_info *info = data; 376 int error; 377 378 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 379 error = BUF_TIMELOCK(bp, info->lkflags, 380 "vinvalbuf", info->slptimeo); 381 if (error == 0) { 382 BUF_UNLOCK(bp); 383 error = ENOLCK; 384 } 385 if (error == ENOLCK) 386 return(0); 387 return (-error); 388 } 389 390 KKASSERT(bp->b_vp == info->vp); 391 392 /* 393 * XXX Since there are no node locks for NFS, I 394 * believe there is a slight chance that a delayed 395 * write will occur while sleeping just above, so 396 * check for it. Note that vfs_bio_awrite expects 397 * buffers to reside on a queue, while bwrite() and 398 * brelse() do not. 399 * 400 * NOTE: NO B_LOCKED CHECK. Also no buf_checkwrite() 401 * check. This code will write out the buffer, period. 402 */ 403 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 404 (info->flags & V_SAVE)) { 405 if (bp->b_vp == info->vp) { 406 if (bp->b_flags & B_CLUSTEROK) { 407 vfs_bio_awrite(bp); 408 } else { 409 bremfree(bp); 410 bawrite(bp); 411 } 412 } else { 413 bremfree(bp); 414 bwrite(bp); 415 } 416 } else if (info->flags & V_SAVE) { 417 /* 418 * Cannot set B_NOCACHE on a clean buffer as this will 419 * destroy the VM backing store which might actually 420 * be dirty (and unsynchronized). 421 */ 422 bremfree(bp); 423 bp->b_flags |= (B_INVAL | B_RELBUF); 424 brelse(bp); 425 } else { 426 bremfree(bp); 427 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF); 428 brelse(bp); 429 } 430 return(0); 431 } 432 433 /* 434 * Truncate a file's buffer and pages to a specified length. This 435 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 436 * sync activity. 437 * 438 * The vnode must be locked. 439 */ 440 static int vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data); 441 static int vtruncbuf_bp_trunc(struct buf *bp, void *data); 442 static int vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data); 443 static int vtruncbuf_bp_metasync(struct buf *bp, void *data); 444 445 int 446 vtruncbuf(struct vnode *vp, off_t length, int blksize) 447 { 448 off_t truncloffset; 449 const char *filename; 450 lwkt_tokref vlock; 451 int count; 452 453 /* 454 * Round up to the *next* block, then destroy the buffers in question. 455 * Since we are only removing some of the buffers we must rely on the 456 * scan count to determine whether a loop is necessary. 457 */ 458 if ((count = (int)(length % blksize)) != 0) 459 truncloffset = length + (blksize - count); 460 else 461 truncloffset = length; 462 463 lwkt_gettoken(&vlock, &vp->v_token); 464 do { 465 count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 466 vtruncbuf_bp_trunc_cmp, 467 vtruncbuf_bp_trunc, &truncloffset); 468 count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 469 vtruncbuf_bp_trunc_cmp, 470 vtruncbuf_bp_trunc, &truncloffset); 471 } while(count); 472 473 /* 474 * For safety, fsync any remaining metadata if the file is not being 475 * truncated to 0. Since the metadata does not represent the entire 476 * dirty list we have to rely on the hit count to ensure that we get 477 * all of it. 478 */ 479 if (length > 0) { 480 do { 481 count = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 482 vtruncbuf_bp_metasync_cmp, 483 vtruncbuf_bp_metasync, vp); 484 } while (count); 485 } 486 487 /* 488 * Clean out any left over VM backing store. 489 * 490 * It is possible to have in-progress I/O from buffers that were 491 * not part of the truncation. This should not happen if we 492 * are truncating to 0-length. 493 */ 494 vnode_pager_setsize(vp, length); 495 bio_track_wait(&vp->v_track_write, 0, 0); 496 497 /* 498 * Debugging only 499 */ 500 spin_lock_wr(&vp->v_spinlock); 501 filename = TAILQ_FIRST(&vp->v_namecache) ? 502 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"; 503 spin_unlock_wr(&vp->v_spinlock); 504 505 /* 506 * Make sure no buffers were instantiated while we were trying 507 * to clean out the remaining VM pages. This could occur due 508 * to busy dirty VM pages being flushed out to disk. 509 */ 510 do { 511 count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 512 vtruncbuf_bp_trunc_cmp, 513 vtruncbuf_bp_trunc, &truncloffset); 514 count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 515 vtruncbuf_bp_trunc_cmp, 516 vtruncbuf_bp_trunc, &truncloffset); 517 if (count) { 518 kprintf("Warning: vtruncbuf(): Had to re-clean %d " 519 "left over buffers in %s\n", count, filename); 520 } 521 } while(count); 522 523 lwkt_reltoken(&vlock); 524 525 return (0); 526 } 527 528 /* 529 * The callback buffer is beyond the new file EOF and must be destroyed. 530 * Note that the compare function must conform to the RB_SCAN's requirements. 531 */ 532 static 533 int 534 vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data) 535 { 536 if (bp->b_loffset >= *(off_t *)data) 537 return(0); 538 return(-1); 539 } 540 541 static 542 int 543 vtruncbuf_bp_trunc(struct buf *bp, void *data) 544 { 545 /* 546 * Do not try to use a buffer we cannot immediately lock, but sleep 547 * anyway to prevent a livelock. The code will loop until all buffers 548 * can be acted upon. 549 */ 550 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 551 if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0) 552 BUF_UNLOCK(bp); 553 } else { 554 bremfree(bp); 555 bp->b_flags |= (B_INVAL | B_RELBUF | B_NOCACHE); 556 brelse(bp); 557 } 558 return(1); 559 } 560 561 /* 562 * Fsync all meta-data after truncating a file to be non-zero. Only metadata 563 * blocks (with a negative loffset) are scanned. 564 * Note that the compare function must conform to the RB_SCAN's requirements. 565 */ 566 static int 567 vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data) 568 { 569 if (bp->b_loffset < 0) 570 return(0); 571 return(1); 572 } 573 574 static int 575 vtruncbuf_bp_metasync(struct buf *bp, void *data) 576 { 577 struct vnode *vp = data; 578 579 if (bp->b_flags & B_DELWRI) { 580 /* 581 * Do not try to use a buffer we cannot immediately lock, 582 * but sleep anyway to prevent a livelock. The code will 583 * loop until all buffers can be acted upon. 584 */ 585 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 586 if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0) 587 BUF_UNLOCK(bp); 588 } else { 589 bremfree(bp); 590 if (bp->b_vp == vp) 591 bawrite(bp); 592 else 593 bwrite(bp); 594 } 595 return(1); 596 } else { 597 return(0); 598 } 599 } 600 601 /* 602 * vfsync - implements a multipass fsync on a file which understands 603 * dependancies and meta-data. The passed vnode must be locked. The 604 * waitfor argument may be MNT_WAIT or MNT_NOWAIT, or MNT_LAZY. 605 * 606 * When fsyncing data asynchronously just do one consolidated pass starting 607 * with the most negative block number. This may not get all the data due 608 * to dependancies. 609 * 610 * When fsyncing data synchronously do a data pass, then a metadata pass, 611 * then do additional data+metadata passes to try to get all the data out. 612 */ 613 static int vfsync_wait_output(struct vnode *vp, 614 int (*waitoutput)(struct vnode *, struct thread *)); 615 static int vfsync_data_only_cmp(struct buf *bp, void *data); 616 static int vfsync_meta_only_cmp(struct buf *bp, void *data); 617 static int vfsync_lazy_range_cmp(struct buf *bp, void *data); 618 static int vfsync_bp(struct buf *bp, void *data); 619 620 struct vfsync_info { 621 struct vnode *vp; 622 int synchronous; 623 int syncdeps; 624 int lazycount; 625 int lazylimit; 626 int skippedbufs; 627 int (*checkdef)(struct buf *); 628 }; 629 630 int 631 vfsync(struct vnode *vp, int waitfor, int passes, 632 int (*checkdef)(struct buf *), 633 int (*waitoutput)(struct vnode *, struct thread *)) 634 { 635 struct vfsync_info info; 636 lwkt_tokref vlock; 637 int error; 638 639 bzero(&info, sizeof(info)); 640 info.vp = vp; 641 if ((info.checkdef = checkdef) == NULL) 642 info.syncdeps = 1; 643 644 lwkt_gettoken(&vlock, &vp->v_token); 645 646 switch(waitfor) { 647 case MNT_LAZY: 648 /* 649 * Lazy (filesystem syncer typ) Asynchronous plus limit the 650 * number of data (not meta) pages we try to flush to 1MB. 651 * A non-zero return means that lazy limit was reached. 652 */ 653 info.lazylimit = 1024 * 1024; 654 info.syncdeps = 1; 655 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 656 vfsync_lazy_range_cmp, vfsync_bp, &info); 657 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 658 vfsync_meta_only_cmp, vfsync_bp, &info); 659 if (error == 0) 660 vp->v_lazyw = 0; 661 else if (!RB_EMPTY(&vp->v_rbdirty_tree)) 662 vn_syncer_add_to_worklist(vp, 1); 663 error = 0; 664 break; 665 case MNT_NOWAIT: 666 /* 667 * Asynchronous. Do a data-only pass and a meta-only pass. 668 */ 669 info.syncdeps = 1; 670 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp, 671 vfsync_bp, &info); 672 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_meta_only_cmp, 673 vfsync_bp, &info); 674 error = 0; 675 break; 676 default: 677 /* 678 * Synchronous. Do a data-only pass, then a meta-data+data 679 * pass, then additional integrated passes to try to get 680 * all the dependancies flushed. 681 */ 682 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp, 683 vfsync_bp, &info); 684 error = vfsync_wait_output(vp, waitoutput); 685 if (error == 0) { 686 info.skippedbufs = 0; 687 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 688 vfsync_bp, &info); 689 error = vfsync_wait_output(vp, waitoutput); 690 if (info.skippedbufs) 691 kprintf("Warning: vfsync skipped %d dirty bufs in pass2!\n", info.skippedbufs); 692 } 693 while (error == 0 && passes > 0 && 694 !RB_EMPTY(&vp->v_rbdirty_tree) 695 ) { 696 if (--passes == 0) { 697 info.synchronous = 1; 698 info.syncdeps = 1; 699 } 700 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 701 vfsync_bp, &info); 702 if (error < 0) 703 error = -error; 704 info.syncdeps = 1; 705 if (error == 0) 706 error = vfsync_wait_output(vp, waitoutput); 707 } 708 break; 709 } 710 lwkt_reltoken(&vlock); 711 return(error); 712 } 713 714 static int 715 vfsync_wait_output(struct vnode *vp, 716 int (*waitoutput)(struct vnode *, struct thread *)) 717 { 718 int error; 719 720 error = bio_track_wait(&vp->v_track_write, 0, 0); 721 if (waitoutput) 722 error = waitoutput(vp, curthread); 723 return(error); 724 } 725 726 static int 727 vfsync_data_only_cmp(struct buf *bp, void *data) 728 { 729 if (bp->b_loffset < 0) 730 return(-1); 731 return(0); 732 } 733 734 static int 735 vfsync_meta_only_cmp(struct buf *bp, void *data) 736 { 737 if (bp->b_loffset < 0) 738 return(0); 739 return(1); 740 } 741 742 static int 743 vfsync_lazy_range_cmp(struct buf *bp, void *data) 744 { 745 struct vfsync_info *info = data; 746 if (bp->b_loffset < info->vp->v_lazyw) 747 return(-1); 748 return(0); 749 } 750 751 static int 752 vfsync_bp(struct buf *bp, void *data) 753 { 754 struct vfsync_info *info = data; 755 struct vnode *vp = info->vp; 756 int error; 757 758 /* 759 * if syncdeps is not set we do not try to write buffers which have 760 * dependancies. 761 */ 762 if (!info->synchronous && info->syncdeps == 0 && info->checkdef(bp)) 763 return(0); 764 765 /* 766 * Ignore buffers that we cannot immediately lock. XXX 767 */ 768 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 769 kprintf("Warning: vfsync_bp skipping dirty buffer %p\n", bp); 770 ++info->skippedbufs; 771 return(0); 772 } 773 if ((bp->b_flags & B_DELWRI) == 0) 774 panic("vfsync_bp: buffer not dirty"); 775 if (vp != bp->b_vp) 776 panic("vfsync_bp: buffer vp mismatch"); 777 778 /* 779 * B_NEEDCOMMIT (primarily used by NFS) is a state where the buffer 780 * has been written but an additional handshake with the device 781 * is required before we can dispose of the buffer. We have no idea 782 * how to do this so we have to skip these buffers. 783 */ 784 if (bp->b_flags & B_NEEDCOMMIT) { 785 BUF_UNLOCK(bp); 786 return(0); 787 } 788 789 /* 790 * Ask bioops if it is ok to sync 791 */ 792 if (LIST_FIRST(&bp->b_dep) != NULL && buf_checkwrite(bp)) { 793 bremfree(bp); 794 brelse(bp); 795 return(0); 796 } 797 798 if (info->synchronous) { 799 /* 800 * Synchronous flushing. An error may be returned. 801 */ 802 bremfree(bp); 803 error = bwrite(bp); 804 } else { 805 /* 806 * Asynchronous flushing. A negative return value simply 807 * stops the scan and is not considered an error. We use 808 * this to support limited MNT_LAZY flushes. 809 */ 810 vp->v_lazyw = bp->b_loffset; 811 if ((vp->v_flag & VOBJBUF) && (bp->b_flags & B_CLUSTEROK)) { 812 info->lazycount += vfs_bio_awrite(bp); 813 } else { 814 info->lazycount += bp->b_bufsize; 815 bremfree(bp); 816 bawrite(bp); 817 } 818 if (info->lazylimit && info->lazycount >= info->lazylimit) 819 error = 1; 820 else 821 error = 0; 822 } 823 return(-error); 824 } 825 826 /* 827 * Associate a buffer with a vnode. 828 * 829 * MPSAFE 830 */ 831 int 832 bgetvp(struct vnode *vp, struct buf *bp) 833 { 834 lwkt_tokref vlock; 835 836 KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); 837 KKASSERT((bp->b_flags & (B_HASHED|B_DELWRI|B_VNCLEAN|B_VNDIRTY)) == 0); 838 839 /* 840 * Insert onto list for new vnode. 841 */ 842 lwkt_gettoken(&vlock, &vp->v_token); 843 if (buf_rb_hash_RB_INSERT(&vp->v_rbhash_tree, bp)) { 844 lwkt_reltoken(&vlock); 845 return (EEXIST); 846 } 847 bp->b_vp = vp; 848 bp->b_flags |= B_HASHED; 849 bp->b_flags |= B_VNCLEAN; 850 if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) 851 panic("reassignbuf: dup lblk/clean vp %p bp %p", vp, bp); 852 vhold(vp); 853 lwkt_reltoken(&vlock); 854 return(0); 855 } 856 857 /* 858 * Disassociate a buffer from a vnode. 859 */ 860 void 861 brelvp(struct buf *bp) 862 { 863 struct vnode *vp; 864 lwkt_tokref vlock; 865 866 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 867 868 /* 869 * Delete from old vnode list, if on one. 870 */ 871 vp = bp->b_vp; 872 lwkt_gettoken(&vlock, &vp->v_token); 873 if (bp->b_flags & (B_VNDIRTY | B_VNCLEAN)) { 874 if (bp->b_flags & B_VNDIRTY) 875 buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp); 876 else 877 buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp); 878 bp->b_flags &= ~(B_VNDIRTY | B_VNCLEAN); 879 } 880 if (bp->b_flags & B_HASHED) { 881 buf_rb_hash_RB_REMOVE(&vp->v_rbhash_tree, bp); 882 bp->b_flags &= ~B_HASHED; 883 } 884 if ((vp->v_flag & VONWORKLST) && RB_EMPTY(&vp->v_rbdirty_tree)) { 885 vclrflags(vp, VONWORKLST); 886 LIST_REMOVE(vp, v_synclist); 887 } 888 bp->b_vp = NULL; 889 lwkt_reltoken(&vlock); 890 891 vdrop(vp); 892 } 893 894 /* 895 * Reassign the buffer to the proper clean/dirty list based on B_DELWRI. 896 * This routine is called when the state of the B_DELWRI bit is changed. 897 * 898 * MPSAFE 899 */ 900 void 901 reassignbuf(struct buf *bp) 902 { 903 struct vnode *vp = bp->b_vp; 904 lwkt_tokref vlock; 905 int delay; 906 907 KKASSERT(vp != NULL); 908 ++reassignbufcalls; 909 910 /* 911 * B_PAGING flagged buffers cannot be reassigned because their vp 912 * is not fully linked in. 913 */ 914 if (bp->b_flags & B_PAGING) 915 panic("cannot reassign paging buffer"); 916 917 lwkt_gettoken(&vlock, &vp->v_token); 918 if (bp->b_flags & B_DELWRI) { 919 /* 920 * Move to the dirty list, add the vnode to the worklist 921 */ 922 if (bp->b_flags & B_VNCLEAN) { 923 buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp); 924 bp->b_flags &= ~B_VNCLEAN; 925 } 926 if ((bp->b_flags & B_VNDIRTY) == 0) { 927 if (buf_rb_tree_RB_INSERT(&vp->v_rbdirty_tree, bp)) { 928 panic("reassignbuf: dup lblk vp %p bp %p", 929 vp, bp); 930 } 931 bp->b_flags |= B_VNDIRTY; 932 } 933 if ((vp->v_flag & VONWORKLST) == 0) { 934 switch (vp->v_type) { 935 case VDIR: 936 delay = dirdelay; 937 break; 938 case VCHR: 939 case VBLK: 940 if (vp->v_rdev && 941 vp->v_rdev->si_mountpoint != NULL) { 942 delay = metadelay; 943 break; 944 } 945 /* fall through */ 946 default: 947 delay = filedelay; 948 } 949 vn_syncer_add_to_worklist(vp, delay); 950 } 951 } else { 952 /* 953 * Move to the clean list, remove the vnode from the worklist 954 * if no dirty blocks remain. 955 */ 956 if (bp->b_flags & B_VNDIRTY) { 957 buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp); 958 bp->b_flags &= ~B_VNDIRTY; 959 } 960 if ((bp->b_flags & B_VNCLEAN) == 0) { 961 if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) { 962 panic("reassignbuf: dup lblk vp %p bp %p", 963 vp, bp); 964 } 965 bp->b_flags |= B_VNCLEAN; 966 } 967 if ((vp->v_flag & VONWORKLST) && 968 RB_EMPTY(&vp->v_rbdirty_tree)) { 969 vclrflags(vp, VONWORKLST); 970 LIST_REMOVE(vp, v_synclist); 971 } 972 } 973 lwkt_reltoken(&vlock); 974 } 975 976 /* 977 * Create a vnode for a block device. 978 * Used for mounting the root file system. 979 */ 980 extern struct vop_ops *devfs_vnode_dev_vops_p; 981 int 982 bdevvp(cdev_t dev, struct vnode **vpp) 983 { 984 struct vnode *vp; 985 struct vnode *nvp; 986 int error; 987 988 if (dev == NULL) { 989 *vpp = NULLVP; 990 return (ENXIO); 991 } 992 error = getspecialvnode(VT_NON, NULL, &devfs_vnode_dev_vops_p, 993 &nvp, 0, 0); 994 if (error) { 995 *vpp = NULLVP; 996 return (error); 997 } 998 vp = nvp; 999 vp->v_type = VCHR; 1000 #if 0 1001 vp->v_rdev = dev; 1002 #endif 1003 v_associate_rdev(vp, dev); 1004 vp->v_umajor = dev->si_umajor; 1005 vp->v_uminor = dev->si_uminor; 1006 vx_unlock(vp); 1007 *vpp = vp; 1008 return (0); 1009 } 1010 1011 int 1012 v_associate_rdev(struct vnode *vp, cdev_t dev) 1013 { 1014 lwkt_tokref ilock; 1015 1016 if (dev == NULL) 1017 return(ENXIO); 1018 if (dev_is_good(dev) == 0) 1019 return(ENXIO); 1020 KKASSERT(vp->v_rdev == NULL); 1021 vp->v_rdev = reference_dev(dev); 1022 lwkt_gettoken(&ilock, &spechash_token); 1023 SLIST_INSERT_HEAD(&dev->si_hlist, vp, v_cdevnext); 1024 lwkt_reltoken(&ilock); 1025 return(0); 1026 } 1027 1028 void 1029 v_release_rdev(struct vnode *vp) 1030 { 1031 lwkt_tokref ilock; 1032 cdev_t dev; 1033 1034 if ((dev = vp->v_rdev) != NULL) { 1035 lwkt_gettoken(&ilock, &spechash_token); 1036 SLIST_REMOVE(&dev->si_hlist, vp, vnode, v_cdevnext); 1037 vp->v_rdev = NULL; 1038 release_dev(dev); 1039 lwkt_reltoken(&ilock); 1040 } 1041 } 1042 1043 /* 1044 * Add a vnode to the alias list hung off the cdev_t. We only associate 1045 * the device number with the vnode. The actual device is not associated 1046 * until the vnode is opened (usually in spec_open()), and will be 1047 * disassociated on last close. 1048 */ 1049 void 1050 addaliasu(struct vnode *nvp, int x, int y) 1051 { 1052 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 1053 panic("addaliasu on non-special vnode"); 1054 nvp->v_umajor = x; 1055 nvp->v_uminor = y; 1056 } 1057 1058 /* 1059 * Simple call that a filesystem can make to try to get rid of a 1060 * vnode. It will fail if anyone is referencing the vnode (including 1061 * the caller). 1062 * 1063 * The filesystem can check whether its in-memory inode structure still 1064 * references the vp on return. 1065 */ 1066 void 1067 vclean_unlocked(struct vnode *vp) 1068 { 1069 vx_get(vp); 1070 if (sysref_isactive(&vp->v_sysref) == 0) 1071 vgone_vxlocked(vp); 1072 vx_put(vp); 1073 } 1074 1075 /* 1076 * Disassociate a vnode from its underlying filesystem. 1077 * 1078 * The vnode must be VX locked and referenced. In all normal situations 1079 * there are no active references. If vclean_vxlocked() is called while 1080 * there are active references, the vnode is being ripped out and we have 1081 * to call VOP_CLOSE() as appropriate before we can reclaim it. 1082 */ 1083 void 1084 vclean_vxlocked(struct vnode *vp, int flags) 1085 { 1086 int active; 1087 int n; 1088 vm_object_t object; 1089 1090 /* 1091 * If the vnode has already been reclaimed we have nothing to do. 1092 */ 1093 if (vp->v_flag & VRECLAIMED) 1094 return; 1095 vsetflags(vp, VRECLAIMED); 1096 1097 /* 1098 * Scrap the vfs cache 1099 */ 1100 while (cache_inval_vp(vp, 0) != 0) { 1101 kprintf("Warning: vnode %p clean/cache_resolution race detected\n", vp); 1102 tsleep(vp, 0, "vclninv", 2); 1103 } 1104 1105 /* 1106 * Check to see if the vnode is in use. If so we have to reference it 1107 * before we clean it out so that its count cannot fall to zero and 1108 * generate a race against ourselves to recycle it. 1109 */ 1110 active = sysref_isactive(&vp->v_sysref); 1111 1112 /* 1113 * Clean out any buffers associated with the vnode and destroy its 1114 * object, if it has one. 1115 */ 1116 vinvalbuf(vp, V_SAVE, 0, 0); 1117 1118 /* 1119 * If purging an active vnode (typically during a forced unmount 1120 * or reboot), it must be closed and deactivated before being 1121 * reclaimed. This isn't really all that safe, but what can 1122 * we do? XXX. 1123 * 1124 * Note that neither of these routines unlocks the vnode. 1125 */ 1126 if (active && (flags & DOCLOSE)) { 1127 while ((n = vp->v_opencount) != 0) { 1128 if (vp->v_writecount) 1129 VOP_CLOSE(vp, FWRITE|FNONBLOCK); 1130 else 1131 VOP_CLOSE(vp, FNONBLOCK); 1132 if (vp->v_opencount == n) { 1133 kprintf("Warning: unable to force-close" 1134 " vnode %p\n", vp); 1135 break; 1136 } 1137 } 1138 } 1139 1140 /* 1141 * If the vnode has not been deactivated, deactivated it. Deactivation 1142 * can create new buffers and VM pages so we have to call vinvalbuf() 1143 * again to make sure they all get flushed. 1144 * 1145 * This can occur if a file with a link count of 0 needs to be 1146 * truncated. 1147 * 1148 * If the vnode is already dead don't try to deactivate it. 1149 */ 1150 if ((vp->v_flag & VINACTIVE) == 0) { 1151 vsetflags(vp, VINACTIVE); 1152 if (vp->v_mount) 1153 VOP_INACTIVE(vp); 1154 vinvalbuf(vp, V_SAVE, 0, 0); 1155 } 1156 1157 /* 1158 * If the vnode has an object, destroy it. 1159 */ 1160 if ((object = vp->v_object) != NULL) { 1161 if (object->ref_count == 0) { 1162 if ((object->flags & OBJ_DEAD) == 0) 1163 vm_object_terminate(object); 1164 } else { 1165 vm_pager_deallocate(object); 1166 } 1167 vclrflags(vp, VOBJBUF); 1168 } 1169 KKASSERT((vp->v_flag & VOBJBUF) == 0); 1170 1171 /* 1172 * Reclaim the vnode if not already dead. 1173 */ 1174 if (vp->v_mount && VOP_RECLAIM(vp)) 1175 panic("vclean: cannot reclaim"); 1176 1177 /* 1178 * Done with purge, notify sleepers of the grim news. 1179 */ 1180 vp->v_ops = &dead_vnode_vops_p; 1181 vn_pollgone(vp); 1182 vp->v_tag = VT_NON; 1183 1184 /* 1185 * If we are destroying an active vnode, reactivate it now that 1186 * we have reassociated it with deadfs. This prevents the system 1187 * from crashing on the vnode due to it being unexpectedly marked 1188 * as inactive or reclaimed. 1189 */ 1190 if (active && (flags & DOCLOSE)) { 1191 vclrflags(vp, VINACTIVE | VRECLAIMED); 1192 } 1193 } 1194 1195 /* 1196 * Eliminate all activity associated with the requested vnode 1197 * and with all vnodes aliased to the requested vnode. 1198 * 1199 * The vnode must be referenced but should not be locked. 1200 */ 1201 int 1202 vrevoke(struct vnode *vp, struct ucred *cred) 1203 { 1204 struct vnode *vq; 1205 struct vnode *vqn; 1206 lwkt_tokref ilock; 1207 cdev_t dev; 1208 int error; 1209 1210 /* 1211 * If the vnode has a device association, scrap all vnodes associated 1212 * with the device. Don't let the device disappear on us while we 1213 * are scrapping the vnodes. 1214 * 1215 * The passed vp will probably show up in the list, do not VX lock 1216 * it twice! 1217 * 1218 * Releasing the vnode's rdev here can mess up specfs's call to 1219 * device close, so don't do it. The vnode has been disassociated 1220 * and the device will be closed after the last ref on the related 1221 * fp goes away (if not still open by e.g. the kernel). 1222 */ 1223 if (vp->v_type != VCHR) { 1224 error = fdrevoke(vp, DTYPE_VNODE, cred); 1225 return (error); 1226 } 1227 if ((dev = vp->v_rdev) == NULL) { 1228 return(0); 1229 } 1230 reference_dev(dev); 1231 lwkt_gettoken(&ilock, &spechash_token); 1232 1233 vqn = SLIST_FIRST(&dev->si_hlist); 1234 if (vqn) 1235 vref(vqn); 1236 while ((vq = vqn) != NULL) { 1237 vqn = SLIST_NEXT(vqn, v_cdevnext); 1238 if (vqn) 1239 vref(vqn); 1240 fdrevoke(vq, DTYPE_VNODE, cred); 1241 /*v_release_rdev(vq);*/ 1242 vrele(vq); 1243 } 1244 lwkt_reltoken(&ilock); 1245 dev_drevoke(dev); 1246 release_dev(dev); 1247 return (0); 1248 } 1249 1250 /* 1251 * This is called when the object underlying a vnode is being destroyed, 1252 * such as in a remove(). Try to recycle the vnode immediately if the 1253 * only active reference is our reference. 1254 * 1255 * Directory vnodes in the namecache with children cannot be immediately 1256 * recycled because numerous VOP_N*() ops require them to be stable. 1257 * 1258 * To avoid recursive recycling from VOP_INACTIVE implemenetations this 1259 * function is a NOP if VRECLAIMED is already set. 1260 */ 1261 int 1262 vrecycle(struct vnode *vp) 1263 { 1264 if (vp->v_sysref.refcnt <= 1 && (vp->v_flag & VRECLAIMED) == 0) { 1265 if (cache_inval_vp_nonblock(vp)) 1266 return(0); 1267 vgone_vxlocked(vp); 1268 return (1); 1269 } 1270 return (0); 1271 } 1272 1273 /* 1274 * Return the maximum I/O size allowed for strategy calls on VP. 1275 * 1276 * If vp is VCHR or VBLK we dive the device, otherwise we use 1277 * the vp's mount info. 1278 */ 1279 int 1280 vmaxiosize(struct vnode *vp) 1281 { 1282 if (vp->v_type == VBLK || vp->v_type == VCHR) { 1283 return(vp->v_rdev->si_iosize_max); 1284 } else { 1285 return(vp->v_mount->mnt_iosize_max); 1286 } 1287 } 1288 1289 /* 1290 * Eliminate all activity associated with a vnode in preparation for reuse. 1291 * 1292 * The vnode must be VX locked and refd and will remain VX locked and refd 1293 * on return. This routine may be called with the vnode in any state, as 1294 * long as it is VX locked. The vnode will be cleaned out and marked 1295 * VRECLAIMED but will not actually be reused until all existing refs and 1296 * holds go away. 1297 * 1298 * NOTE: This routine may be called on a vnode which has not yet been 1299 * already been deactivated (VOP_INACTIVE), or on a vnode which has 1300 * already been reclaimed. 1301 * 1302 * This routine is not responsible for placing us back on the freelist. 1303 * Instead, it happens automatically when the caller releases the VX lock 1304 * (assuming there aren't any other references). 1305 */ 1306 void 1307 vgone_vxlocked(struct vnode *vp) 1308 { 1309 /* 1310 * assert that the VX lock is held. This is an absolute requirement 1311 * now for vgone_vxlocked() to be called. 1312 */ 1313 KKASSERT(vp->v_lock.lk_exclusivecount == 1); 1314 1315 get_mplock(); 1316 1317 /* 1318 * Clean out the filesystem specific data and set the VRECLAIMED 1319 * bit. Also deactivate the vnode if necessary. 1320 */ 1321 vclean_vxlocked(vp, DOCLOSE); 1322 1323 /* 1324 * Delete from old mount point vnode list, if on one. 1325 */ 1326 if (vp->v_mount != NULL) { 1327 KKASSERT(vp->v_data == NULL); 1328 insmntque(vp, NULL); 1329 } 1330 1331 /* 1332 * If special device, remove it from special device alias list 1333 * if it is on one. This should normally only occur if a vnode is 1334 * being revoked as the device should otherwise have been released 1335 * naturally. 1336 */ 1337 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_rdev != NULL) { 1338 v_release_rdev(vp); 1339 } 1340 1341 /* 1342 * Set us to VBAD 1343 */ 1344 vp->v_type = VBAD; 1345 rel_mplock(); 1346 } 1347 1348 /* 1349 * Lookup a vnode by device number. 1350 * 1351 * Returns non-zero and *vpp set to a vref'd vnode on success. 1352 * Returns zero on failure. 1353 */ 1354 int 1355 vfinddev(cdev_t dev, enum vtype type, struct vnode **vpp) 1356 { 1357 lwkt_tokref ilock; 1358 struct vnode *vp; 1359 1360 lwkt_gettoken(&ilock, &spechash_token); 1361 SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) { 1362 if (type == vp->v_type) { 1363 *vpp = vp; 1364 vref(vp); 1365 lwkt_reltoken(&ilock); 1366 return (1); 1367 } 1368 } 1369 lwkt_reltoken(&ilock); 1370 return (0); 1371 } 1372 1373 /* 1374 * Calculate the total number of references to a special device. This 1375 * routine may only be called for VBLK and VCHR vnodes since v_rdev is 1376 * an overloaded field. Since udev2dev can now return NULL, we have 1377 * to check for a NULL v_rdev. 1378 */ 1379 int 1380 count_dev(cdev_t dev) 1381 { 1382 lwkt_tokref ilock; 1383 struct vnode *vp; 1384 int count = 0; 1385 1386 if (SLIST_FIRST(&dev->si_hlist)) { 1387 lwkt_gettoken(&ilock, &spechash_token); 1388 SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) { 1389 count += vp->v_opencount; 1390 } 1391 lwkt_reltoken(&ilock); 1392 } 1393 return(count); 1394 } 1395 1396 int 1397 vcount(struct vnode *vp) 1398 { 1399 if (vp->v_rdev == NULL) 1400 return(0); 1401 return(count_dev(vp->v_rdev)); 1402 } 1403 1404 /* 1405 * Initialize VMIO for a vnode. This routine MUST be called before a 1406 * VFS can issue buffer cache ops on a vnode. It is typically called 1407 * when a vnode is initialized from its inode. 1408 */ 1409 int 1410 vinitvmio(struct vnode *vp, off_t filesize, int blksize, int boff) 1411 { 1412 vm_object_t object; 1413 int error = 0; 1414 1415 retry: 1416 if ((object = vp->v_object) == NULL) { 1417 object = vnode_pager_alloc(vp, filesize, 0, 0, blksize, boff); 1418 /* 1419 * Dereference the reference we just created. This assumes 1420 * that the object is associated with the vp. 1421 */ 1422 object->ref_count--; 1423 vrele(vp); 1424 } else { 1425 if (object->flags & OBJ_DEAD) { 1426 vn_unlock(vp); 1427 vm_object_dead_sleep(object, "vodead"); 1428 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1429 goto retry; 1430 } 1431 } 1432 KASSERT(vp->v_object != NULL, ("vinitvmio: NULL object")); 1433 vsetflags(vp, VOBJBUF); 1434 return (error); 1435 } 1436 1437 1438 /* 1439 * Print out a description of a vnode. 1440 */ 1441 static char *typename[] = 1442 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 1443 1444 void 1445 vprint(char *label, struct vnode *vp) 1446 { 1447 char buf[96]; 1448 1449 if (label != NULL) 1450 kprintf("%s: %p: ", label, (void *)vp); 1451 else 1452 kprintf("%p: ", (void *)vp); 1453 kprintf("type %s, sysrefs %d, writecount %d, holdcnt %d,", 1454 typename[vp->v_type], 1455 vp->v_sysref.refcnt, vp->v_writecount, vp->v_auxrefs); 1456 buf[0] = '\0'; 1457 if (vp->v_flag & VROOT) 1458 strcat(buf, "|VROOT"); 1459 if (vp->v_flag & VPFSROOT) 1460 strcat(buf, "|VPFSROOT"); 1461 if (vp->v_flag & VTEXT) 1462 strcat(buf, "|VTEXT"); 1463 if (vp->v_flag & VSYSTEM) 1464 strcat(buf, "|VSYSTEM"); 1465 if (vp->v_flag & VFREE) 1466 strcat(buf, "|VFREE"); 1467 if (vp->v_flag & VOBJBUF) 1468 strcat(buf, "|VOBJBUF"); 1469 if (buf[0] != '\0') 1470 kprintf(" flags (%s)", &buf[1]); 1471 if (vp->v_data == NULL) { 1472 kprintf("\n"); 1473 } else { 1474 kprintf("\n\t"); 1475 VOP_PRINT(vp); 1476 } 1477 } 1478 1479 /* 1480 * Do the usual access checking. 1481 * file_mode, uid and gid are from the vnode in question, 1482 * while acc_mode and cred are from the VOP_ACCESS parameter list 1483 */ 1484 int 1485 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid, 1486 mode_t acc_mode, struct ucred *cred) 1487 { 1488 mode_t mask; 1489 int ismember; 1490 1491 /* 1492 * Super-user always gets read/write access, but execute access depends 1493 * on at least one execute bit being set. 1494 */ 1495 if (priv_check_cred(cred, PRIV_ROOT, 0) == 0) { 1496 if ((acc_mode & VEXEC) && type != VDIR && 1497 (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0) 1498 return (EACCES); 1499 return (0); 1500 } 1501 1502 mask = 0; 1503 1504 /* Otherwise, check the owner. */ 1505 if (cred->cr_uid == uid) { 1506 if (acc_mode & VEXEC) 1507 mask |= S_IXUSR; 1508 if (acc_mode & VREAD) 1509 mask |= S_IRUSR; 1510 if (acc_mode & VWRITE) 1511 mask |= S_IWUSR; 1512 return ((file_mode & mask) == mask ? 0 : EACCES); 1513 } 1514 1515 /* Otherwise, check the groups. */ 1516 ismember = groupmember(gid, cred); 1517 if (cred->cr_svgid == gid || ismember) { 1518 if (acc_mode & VEXEC) 1519 mask |= S_IXGRP; 1520 if (acc_mode & VREAD) 1521 mask |= S_IRGRP; 1522 if (acc_mode & VWRITE) 1523 mask |= S_IWGRP; 1524 return ((file_mode & mask) == mask ? 0 : EACCES); 1525 } 1526 1527 /* Otherwise, check everyone else. */ 1528 if (acc_mode & VEXEC) 1529 mask |= S_IXOTH; 1530 if (acc_mode & VREAD) 1531 mask |= S_IROTH; 1532 if (acc_mode & VWRITE) 1533 mask |= S_IWOTH; 1534 return ((file_mode & mask) == mask ? 0 : EACCES); 1535 } 1536 1537 #ifdef DDB 1538 #include <ddb/ddb.h> 1539 1540 static int db_show_locked_vnodes(struct mount *mp, void *data); 1541 1542 /* 1543 * List all of the locked vnodes in the system. 1544 * Called when debugging the kernel. 1545 */ 1546 DB_SHOW_COMMAND(lockedvnodes, lockedvnodes) 1547 { 1548 kprintf("Locked vnodes\n"); 1549 mountlist_scan(db_show_locked_vnodes, NULL, 1550 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 1551 } 1552 1553 static int 1554 db_show_locked_vnodes(struct mount *mp, void *data __unused) 1555 { 1556 struct vnode *vp; 1557 1558 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 1559 if (vn_islocked(vp)) 1560 vprint(NULL, vp); 1561 } 1562 return(0); 1563 } 1564 #endif 1565 1566 /* 1567 * Top level filesystem related information gathering. 1568 */ 1569 static int sysctl_ovfs_conf (SYSCTL_HANDLER_ARGS); 1570 1571 static int 1572 vfs_sysctl(SYSCTL_HANDLER_ARGS) 1573 { 1574 int *name = (int *)arg1 - 1; /* XXX */ 1575 u_int namelen = arg2 + 1; /* XXX */ 1576 struct vfsconf *vfsp; 1577 int maxtypenum; 1578 1579 #if 1 || defined(COMPAT_PRELITE2) 1580 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 1581 if (namelen == 1) 1582 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 1583 #endif 1584 1585 #ifdef notyet 1586 /* all sysctl names at this level are at least name and field */ 1587 if (namelen < 2) 1588 return (ENOTDIR); /* overloaded */ 1589 if (name[0] != VFS_GENERIC) { 1590 vfsp = vfsconf_find_by_typenum(name[0]); 1591 if (vfsp == NULL) 1592 return (EOPNOTSUPP); 1593 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 1594 oldp, oldlenp, newp, newlen, p)); 1595 } 1596 #endif 1597 switch (name[1]) { 1598 case VFS_MAXTYPENUM: 1599 if (namelen != 2) 1600 return (ENOTDIR); 1601 maxtypenum = vfsconf_get_maxtypenum(); 1602 return (SYSCTL_OUT(req, &maxtypenum, sizeof(maxtypenum))); 1603 case VFS_CONF: 1604 if (namelen != 3) 1605 return (ENOTDIR); /* overloaded */ 1606 vfsp = vfsconf_find_by_typenum(name[2]); 1607 if (vfsp == NULL) 1608 return (EOPNOTSUPP); 1609 return (SYSCTL_OUT(req, vfsp, sizeof *vfsp)); 1610 } 1611 return (EOPNOTSUPP); 1612 } 1613 1614 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl, 1615 "Generic filesystem"); 1616 1617 #if 1 || defined(COMPAT_PRELITE2) 1618 1619 static int 1620 sysctl_ovfs_conf_iter(struct vfsconf *vfsp, void *data) 1621 { 1622 int error; 1623 struct ovfsconf ovfs; 1624 struct sysctl_req *req = (struct sysctl_req*) data; 1625 1626 bzero(&ovfs, sizeof(ovfs)); 1627 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 1628 strcpy(ovfs.vfc_name, vfsp->vfc_name); 1629 ovfs.vfc_index = vfsp->vfc_typenum; 1630 ovfs.vfc_refcount = vfsp->vfc_refcount; 1631 ovfs.vfc_flags = vfsp->vfc_flags; 1632 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 1633 if (error) 1634 return error; /* abort iteration with error code */ 1635 else 1636 return 0; /* continue iterating with next element */ 1637 } 1638 1639 static int 1640 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 1641 { 1642 return vfsconf_each(sysctl_ovfs_conf_iter, (void*)req); 1643 } 1644 1645 #endif /* 1 || COMPAT_PRELITE2 */ 1646 1647 /* 1648 * Check to see if a filesystem is mounted on a block device. 1649 */ 1650 int 1651 vfs_mountedon(struct vnode *vp) 1652 { 1653 cdev_t dev; 1654 1655 if ((dev = vp->v_rdev) == NULL) { 1656 /* if (vp->v_type != VBLK) 1657 dev = get_dev(vp->v_uminor, vp->v_umajor); */ 1658 } 1659 if (dev != NULL && dev->si_mountpoint) 1660 return (EBUSY); 1661 return (0); 1662 } 1663 1664 /* 1665 * Unmount all filesystems. The list is traversed in reverse order 1666 * of mounting to avoid dependencies. 1667 */ 1668 1669 static int vfs_umountall_callback(struct mount *mp, void *data); 1670 1671 void 1672 vfs_unmountall(void) 1673 { 1674 int count; 1675 1676 do { 1677 count = mountlist_scan(vfs_umountall_callback, 1678 NULL, MNTSCAN_REVERSE|MNTSCAN_NOBUSY); 1679 } while (count); 1680 } 1681 1682 static 1683 int 1684 vfs_umountall_callback(struct mount *mp, void *data) 1685 { 1686 int error; 1687 1688 error = dounmount(mp, MNT_FORCE); 1689 if (error) { 1690 mountlist_remove(mp); 1691 kprintf("unmount of filesystem mounted from %s failed (", 1692 mp->mnt_stat.f_mntfromname); 1693 if (error == EBUSY) 1694 kprintf("BUSY)\n"); 1695 else 1696 kprintf("%d)\n", error); 1697 } 1698 return(1); 1699 } 1700 1701 /* 1702 * Checks the mount flags for parameter mp and put the names comma-separated 1703 * into a string buffer buf with a size limit specified by len. 1704 * 1705 * It returns the number of bytes written into buf, and (*errorp) will be 1706 * set to 0, EINVAL (if passed length is 0), or ENOSPC (supplied buffer was 1707 * not large enough). The buffer will be 0-terminated if len was not 0. 1708 */ 1709 size_t 1710 vfs_flagstostr(int flags, const struct mountctl_opt *optp, 1711 char *buf, size_t len, int *errorp) 1712 { 1713 static const struct mountctl_opt optnames[] = { 1714 { MNT_ASYNC, "asynchronous" }, 1715 { MNT_EXPORTED, "NFS exported" }, 1716 { MNT_LOCAL, "local" }, 1717 { MNT_NOATIME, "noatime" }, 1718 { MNT_NODEV, "nodev" }, 1719 { MNT_NOEXEC, "noexec" }, 1720 { MNT_NOSUID, "nosuid" }, 1721 { MNT_NOSYMFOLLOW, "nosymfollow" }, 1722 { MNT_QUOTA, "with-quotas" }, 1723 { MNT_RDONLY, "read-only" }, 1724 { MNT_SYNCHRONOUS, "synchronous" }, 1725 { MNT_UNION, "union" }, 1726 { MNT_NOCLUSTERR, "noclusterr" }, 1727 { MNT_NOCLUSTERW, "noclusterw" }, 1728 { MNT_SUIDDIR, "suiddir" }, 1729 { MNT_SOFTDEP, "soft-updates" }, 1730 { MNT_IGNORE, "ignore" }, 1731 { 0, NULL} 1732 }; 1733 int bwritten; 1734 int bleft; 1735 int optlen; 1736 int actsize; 1737 1738 *errorp = 0; 1739 bwritten = 0; 1740 bleft = len - 1; /* leave room for trailing \0 */ 1741 1742 /* 1743 * Checks the size of the string. If it contains 1744 * any data, then we will append the new flags to 1745 * it. 1746 */ 1747 actsize = strlen(buf); 1748 if (actsize > 0) 1749 buf += actsize; 1750 1751 /* Default flags if no flags passed */ 1752 if (optp == NULL) 1753 optp = optnames; 1754 1755 if (bleft < 0) { /* degenerate case, 0-length buffer */ 1756 *errorp = EINVAL; 1757 return(0); 1758 } 1759 1760 for (; flags && optp->o_opt; ++optp) { 1761 if ((flags & optp->o_opt) == 0) 1762 continue; 1763 optlen = strlen(optp->o_name); 1764 if (bwritten || actsize > 0) { 1765 if (bleft < 2) { 1766 *errorp = ENOSPC; 1767 break; 1768 } 1769 buf[bwritten++] = ','; 1770 buf[bwritten++] = ' '; 1771 bleft -= 2; 1772 } 1773 if (bleft < optlen) { 1774 *errorp = ENOSPC; 1775 break; 1776 } 1777 bcopy(optp->o_name, buf + bwritten, optlen); 1778 bwritten += optlen; 1779 bleft -= optlen; 1780 flags &= ~optp->o_opt; 1781 } 1782 1783 /* 1784 * Space already reserved for trailing \0 1785 */ 1786 buf[bwritten] = 0; 1787 return (bwritten); 1788 } 1789 1790 /* 1791 * Build hash lists of net addresses and hang them off the mount point. 1792 * Called by ufs_mount() to set up the lists of export addresses. 1793 */ 1794 static int 1795 vfs_hang_addrlist(struct mount *mp, struct netexport *nep, 1796 const struct export_args *argp) 1797 { 1798 struct netcred *np; 1799 struct radix_node_head *rnh; 1800 int i; 1801 struct radix_node *rn; 1802 struct sockaddr *saddr, *smask = 0; 1803 struct domain *dom; 1804 int error; 1805 1806 if (argp->ex_addrlen == 0) { 1807 if (mp->mnt_flag & MNT_DEFEXPORTED) 1808 return (EPERM); 1809 np = &nep->ne_defexported; 1810 np->netc_exflags = argp->ex_flags; 1811 np->netc_anon = argp->ex_anon; 1812 np->netc_anon.cr_ref = 1; 1813 mp->mnt_flag |= MNT_DEFEXPORTED; 1814 return (0); 1815 } 1816 1817 if (argp->ex_addrlen < 0 || argp->ex_addrlen > MLEN) 1818 return (EINVAL); 1819 if (argp->ex_masklen < 0 || argp->ex_masklen > MLEN) 1820 return (EINVAL); 1821 1822 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 1823 np = (struct netcred *) kmalloc(i, M_NETADDR, M_WAITOK | M_ZERO); 1824 saddr = (struct sockaddr *) (np + 1); 1825 if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen))) 1826 goto out; 1827 if (saddr->sa_len > argp->ex_addrlen) 1828 saddr->sa_len = argp->ex_addrlen; 1829 if (argp->ex_masklen) { 1830 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 1831 error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen); 1832 if (error) 1833 goto out; 1834 if (smask->sa_len > argp->ex_masklen) 1835 smask->sa_len = argp->ex_masklen; 1836 } 1837 i = saddr->sa_family; 1838 if ((rnh = nep->ne_rtable[i]) == 0) { 1839 /* 1840 * Seems silly to initialize every AF when most are not used, 1841 * do so on demand here 1842 */ 1843 SLIST_FOREACH(dom, &domains, dom_next) 1844 if (dom->dom_family == i && dom->dom_rtattach) { 1845 dom->dom_rtattach((void **) &nep->ne_rtable[i], 1846 dom->dom_rtoffset); 1847 break; 1848 } 1849 if ((rnh = nep->ne_rtable[i]) == 0) { 1850 error = ENOBUFS; 1851 goto out; 1852 } 1853 } 1854 rn = (*rnh->rnh_addaddr) ((char *) saddr, (char *) smask, rnh, 1855 np->netc_rnodes); 1856 if (rn == 0 || np != (struct netcred *) rn) { /* already exists */ 1857 error = EPERM; 1858 goto out; 1859 } 1860 np->netc_exflags = argp->ex_flags; 1861 np->netc_anon = argp->ex_anon; 1862 np->netc_anon.cr_ref = 1; 1863 return (0); 1864 out: 1865 kfree(np, M_NETADDR); 1866 return (error); 1867 } 1868 1869 /* ARGSUSED */ 1870 static int 1871 vfs_free_netcred(struct radix_node *rn, void *w) 1872 { 1873 struct radix_node_head *rnh = (struct radix_node_head *) w; 1874 1875 (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh); 1876 kfree((caddr_t) rn, M_NETADDR); 1877 return (0); 1878 } 1879 1880 /* 1881 * Free the net address hash lists that are hanging off the mount points. 1882 */ 1883 static void 1884 vfs_free_addrlist(struct netexport *nep) 1885 { 1886 int i; 1887 struct radix_node_head *rnh; 1888 1889 for (i = 0; i <= AF_MAX; i++) 1890 if ((rnh = nep->ne_rtable[i])) { 1891 (*rnh->rnh_walktree) (rnh, vfs_free_netcred, 1892 (caddr_t) rnh); 1893 kfree((caddr_t) rnh, M_RTABLE); 1894 nep->ne_rtable[i] = 0; 1895 } 1896 } 1897 1898 int 1899 vfs_export(struct mount *mp, struct netexport *nep, 1900 const struct export_args *argp) 1901 { 1902 int error; 1903 1904 if (argp->ex_flags & MNT_DELEXPORT) { 1905 if (mp->mnt_flag & MNT_EXPUBLIC) { 1906 vfs_setpublicfs(NULL, NULL, NULL); 1907 mp->mnt_flag &= ~MNT_EXPUBLIC; 1908 } 1909 vfs_free_addrlist(nep); 1910 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 1911 } 1912 if (argp->ex_flags & MNT_EXPORTED) { 1913 if (argp->ex_flags & MNT_EXPUBLIC) { 1914 if ((error = vfs_setpublicfs(mp, nep, argp)) != 0) 1915 return (error); 1916 mp->mnt_flag |= MNT_EXPUBLIC; 1917 } 1918 if ((error = vfs_hang_addrlist(mp, nep, argp))) 1919 return (error); 1920 mp->mnt_flag |= MNT_EXPORTED; 1921 } 1922 return (0); 1923 } 1924 1925 1926 /* 1927 * Set the publicly exported filesystem (WebNFS). Currently, only 1928 * one public filesystem is possible in the spec (RFC 2054 and 2055) 1929 */ 1930 int 1931 vfs_setpublicfs(struct mount *mp, struct netexport *nep, 1932 const struct export_args *argp) 1933 { 1934 int error; 1935 struct vnode *rvp; 1936 char *cp; 1937 1938 /* 1939 * mp == NULL -> invalidate the current info, the FS is 1940 * no longer exported. May be called from either vfs_export 1941 * or unmount, so check if it hasn't already been done. 1942 */ 1943 if (mp == NULL) { 1944 if (nfs_pub.np_valid) { 1945 nfs_pub.np_valid = 0; 1946 if (nfs_pub.np_index != NULL) { 1947 FREE(nfs_pub.np_index, M_TEMP); 1948 nfs_pub.np_index = NULL; 1949 } 1950 } 1951 return (0); 1952 } 1953 1954 /* 1955 * Only one allowed at a time. 1956 */ 1957 if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount) 1958 return (EBUSY); 1959 1960 /* 1961 * Get real filehandle for root of exported FS. 1962 */ 1963 bzero((caddr_t)&nfs_pub.np_handle, sizeof(nfs_pub.np_handle)); 1964 nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid; 1965 1966 if ((error = VFS_ROOT(mp, &rvp))) 1967 return (error); 1968 1969 if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid))) 1970 return (error); 1971 1972 vput(rvp); 1973 1974 /* 1975 * If an indexfile was specified, pull it in. 1976 */ 1977 if (argp->ex_indexfile != NULL) { 1978 int namelen; 1979 1980 error = vn_get_namelen(rvp, &namelen); 1981 if (error) 1982 return (error); 1983 MALLOC(nfs_pub.np_index, char *, namelen, M_TEMP, 1984 M_WAITOK); 1985 error = copyinstr(argp->ex_indexfile, nfs_pub.np_index, 1986 namelen, NULL); 1987 if (!error) { 1988 /* 1989 * Check for illegal filenames. 1990 */ 1991 for (cp = nfs_pub.np_index; *cp; cp++) { 1992 if (*cp == '/') { 1993 error = EINVAL; 1994 break; 1995 } 1996 } 1997 } 1998 if (error) { 1999 FREE(nfs_pub.np_index, M_TEMP); 2000 return (error); 2001 } 2002 } 2003 2004 nfs_pub.np_mount = mp; 2005 nfs_pub.np_valid = 1; 2006 return (0); 2007 } 2008 2009 struct netcred * 2010 vfs_export_lookup(struct mount *mp, struct netexport *nep, 2011 struct sockaddr *nam) 2012 { 2013 struct netcred *np; 2014 struct radix_node_head *rnh; 2015 struct sockaddr *saddr; 2016 2017 np = NULL; 2018 if (mp->mnt_flag & MNT_EXPORTED) { 2019 /* 2020 * Lookup in the export list first. 2021 */ 2022 if (nam != NULL) { 2023 saddr = nam; 2024 rnh = nep->ne_rtable[saddr->sa_family]; 2025 if (rnh != NULL) { 2026 np = (struct netcred *) 2027 (*rnh->rnh_matchaddr)((char *)saddr, 2028 rnh); 2029 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 2030 np = NULL; 2031 } 2032 } 2033 /* 2034 * If no address match, use the default if it exists. 2035 */ 2036 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 2037 np = &nep->ne_defexported; 2038 } 2039 return (np); 2040 } 2041 2042 /* 2043 * perform msync on all vnodes under a mount point. The mount point must 2044 * be locked. This code is also responsible for lazy-freeing unreferenced 2045 * vnodes whos VM objects no longer contain pages. 2046 * 2047 * NOTE: MNT_WAIT still skips vnodes in the VXLOCK state. 2048 * 2049 * NOTE: XXX VOP_PUTPAGES and friends requires that the vnode be locked, 2050 * but vnode_pager_putpages() doesn't lock the vnode. We have to do it 2051 * way up in this high level function. 2052 */ 2053 static int vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data); 2054 static int vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data); 2055 2056 void 2057 vfs_msync(struct mount *mp, int flags) 2058 { 2059 int vmsc_flags; 2060 2061 /* 2062 * tmpfs sets this flag to prevent msync(), sync, and the 2063 * filesystem periodic syncer from trying to flush VM pages 2064 * to swap. Only pure memory pressure flushes tmpfs VM pages 2065 * to swap. 2066 */ 2067 if (mp->mnt_kern_flag & MNTK_NOMSYNC) 2068 return; 2069 2070 /* 2071 * Ok, scan the vnodes for work. 2072 */ 2073 vmsc_flags = VMSC_GETVP; 2074 if (flags != MNT_WAIT) 2075 vmsc_flags |= VMSC_NOWAIT; 2076 vmntvnodescan(mp, vmsc_flags, vfs_msync_scan1, vfs_msync_scan2, 2077 (void *)(intptr_t)flags); 2078 } 2079 2080 /* 2081 * scan1 is a fast pre-check. There could be hundreds of thousands of 2082 * vnodes, we cannot afford to do anything heavy weight until we have a 2083 * fairly good indication that there is work to do. 2084 */ 2085 static 2086 int 2087 vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data) 2088 { 2089 int flags = (int)(intptr_t)data; 2090 2091 if ((vp->v_flag & VRECLAIMED) == 0) { 2092 if (vshouldmsync(vp)) 2093 return(0); /* call scan2 */ 2094 if ((mp->mnt_flag & MNT_RDONLY) == 0 && 2095 (vp->v_flag & VOBJDIRTY) && 2096 (flags == MNT_WAIT || vn_islocked(vp) == 0)) { 2097 return(0); /* call scan2 */ 2098 } 2099 } 2100 2101 /* 2102 * do not call scan2, continue the loop 2103 */ 2104 return(-1); 2105 } 2106 2107 /* 2108 * This callback is handed a locked vnode. 2109 */ 2110 static 2111 int 2112 vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data) 2113 { 2114 vm_object_t obj; 2115 int flags = (int)(intptr_t)data; 2116 2117 if (vp->v_flag & VRECLAIMED) 2118 return(0); 2119 2120 if ((mp->mnt_flag & MNT_RDONLY) == 0 && (vp->v_flag & VOBJDIRTY)) { 2121 if ((obj = vp->v_object) != NULL) { 2122 vm_object_page_clean(obj, 0, 0, 2123 flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC); 2124 } 2125 } 2126 return(0); 2127 } 2128 2129 /* 2130 * Record a process's interest in events which might happen to 2131 * a vnode. Because poll uses the historic select-style interface 2132 * internally, this routine serves as both the ``check for any 2133 * pending events'' and the ``record my interest in future events'' 2134 * functions. (These are done together, while the lock is held, 2135 * to avoid race conditions.) 2136 */ 2137 int 2138 vn_pollrecord(struct vnode *vp, int events) 2139 { 2140 lwkt_tokref vlock; 2141 2142 KKASSERT(curthread->td_proc != NULL); 2143 2144 lwkt_gettoken(&vlock, &vp->v_token); 2145 if (vp->v_pollinfo.vpi_revents & events) { 2146 /* 2147 * This leaves events we are not interested 2148 * in available for the other process which 2149 * which presumably had requested them 2150 * (otherwise they would never have been 2151 * recorded). 2152 */ 2153 events &= vp->v_pollinfo.vpi_revents; 2154 vp->v_pollinfo.vpi_revents &= ~events; 2155 2156 lwkt_reltoken(&vlock); 2157 return events; 2158 } 2159 vp->v_pollinfo.vpi_events |= events; 2160 selrecord(curthread, &vp->v_pollinfo.vpi_selinfo); 2161 lwkt_reltoken(&vlock); 2162 return 0; 2163 } 2164 2165 /* 2166 * Note the occurrence of an event. If the VN_POLLEVENT macro is used, 2167 * it is possible for us to miss an event due to race conditions, but 2168 * that condition is expected to be rare, so for the moment it is the 2169 * preferred interface. 2170 */ 2171 void 2172 vn_pollevent(struct vnode *vp, int events) 2173 { 2174 lwkt_tokref vlock; 2175 2176 lwkt_gettoken(&vlock, &vp->v_token); 2177 if (vp->v_pollinfo.vpi_events & events) { 2178 /* 2179 * We clear vpi_events so that we don't 2180 * call selwakeup() twice if two events are 2181 * posted before the polling process(es) is 2182 * awakened. This also ensures that we take at 2183 * most one selwakeup() if the polling process 2184 * is no longer interested. However, it does 2185 * mean that only one event can be noticed at 2186 * a time. (Perhaps we should only clear those 2187 * event bits which we note?) XXX 2188 */ 2189 vp->v_pollinfo.vpi_events = 0; /* &= ~events ??? */ 2190 vp->v_pollinfo.vpi_revents |= events; 2191 selwakeup(&vp->v_pollinfo.vpi_selinfo); 2192 } 2193 lwkt_reltoken(&vlock); 2194 } 2195 2196 /* 2197 * Wake up anyone polling on vp because it is being revoked. 2198 * This depends on dead_poll() returning POLLHUP for correct 2199 * behavior. 2200 */ 2201 void 2202 vn_pollgone(struct vnode *vp) 2203 { 2204 lwkt_tokref vlock; 2205 2206 lwkt_gettoken(&vlock, &vp->v_token); 2207 if (vp->v_pollinfo.vpi_events) { 2208 vp->v_pollinfo.vpi_events = 0; 2209 selwakeup(&vp->v_pollinfo.vpi_selinfo); 2210 } 2211 lwkt_reltoken(&vlock); 2212 } 2213 2214 /* 2215 * extract the cdev_t from a VBLK or VCHR. The vnode must have been opened 2216 * (or v_rdev might be NULL). 2217 */ 2218 cdev_t 2219 vn_todev(struct vnode *vp) 2220 { 2221 if (vp->v_type != VBLK && vp->v_type != VCHR) 2222 return (NULL); 2223 KKASSERT(vp->v_rdev != NULL); 2224 return (vp->v_rdev); 2225 } 2226 2227 /* 2228 * Check if vnode represents a disk device. The vnode does not need to be 2229 * opened. 2230 * 2231 * MPALMOSTSAFE 2232 */ 2233 int 2234 vn_isdisk(struct vnode *vp, int *errp) 2235 { 2236 cdev_t dev; 2237 2238 if (vp->v_type != VCHR) { 2239 if (errp != NULL) 2240 *errp = ENOTBLK; 2241 return (0); 2242 } 2243 2244 dev = vp->v_rdev; 2245 2246 if (dev == NULL) { 2247 if (errp != NULL) 2248 *errp = ENXIO; 2249 return (0); 2250 } 2251 if (dev_is_good(dev) == 0) { 2252 if (errp != NULL) 2253 *errp = ENXIO; 2254 return (0); 2255 } 2256 if ((dev_dflags(dev) & D_DISK) == 0) { 2257 if (errp != NULL) 2258 *errp = ENOTBLK; 2259 return (0); 2260 } 2261 if (errp != NULL) 2262 *errp = 0; 2263 return (1); 2264 } 2265 2266 int 2267 vn_get_namelen(struct vnode *vp, int *namelen) 2268 { 2269 int error; 2270 register_t retval[2]; 2271 2272 error = VOP_PATHCONF(vp, _PC_NAME_MAX, retval); 2273 if (error) 2274 return (error); 2275 *namelen = (int)retval[0]; 2276 return (0); 2277 } 2278 2279 int 2280 vop_write_dirent(int *error, struct uio *uio, ino_t d_ino, uint8_t d_type, 2281 uint16_t d_namlen, const char *d_name) 2282 { 2283 struct dirent *dp; 2284 size_t len; 2285 2286 len = _DIRENT_RECLEN(d_namlen); 2287 if (len > uio->uio_resid) 2288 return(1); 2289 2290 dp = kmalloc(len, M_TEMP, M_WAITOK | M_ZERO); 2291 2292 dp->d_ino = d_ino; 2293 dp->d_namlen = d_namlen; 2294 dp->d_type = d_type; 2295 bcopy(d_name, dp->d_name, d_namlen); 2296 2297 *error = uiomove((caddr_t)dp, len, uio); 2298 2299 kfree(dp, M_TEMP); 2300 2301 return(0); 2302 } 2303 2304 void 2305 vn_mark_atime(struct vnode *vp, struct thread *td) 2306 { 2307 struct proc *p = td->td_proc; 2308 struct ucred *cred = p ? p->p_ucred : proc0.p_ucred; 2309 2310 if ((vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) { 2311 VOP_MARKATIME(vp, cred); 2312 } 2313 } 2314