1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 39 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $ 40 * $DragonFly: src/sys/kern/vfs_subr.c,v 1.118 2008/09/17 21:44:18 dillon Exp $ 41 */ 42 43 /* 44 * External virtual filesystem routines 45 */ 46 #include "opt_ddb.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/buf.h> 51 #include <sys/conf.h> 52 #include <sys/dirent.h> 53 #include <sys/domain.h> 54 #include <sys/eventhandler.h> 55 #include <sys/fcntl.h> 56 #include <sys/file.h> 57 #include <sys/kernel.h> 58 #include <sys/kthread.h> 59 #include <sys/malloc.h> 60 #include <sys/mbuf.h> 61 #include <sys/mount.h> 62 #include <sys/priv.h> 63 #include <sys/proc.h> 64 #include <sys/reboot.h> 65 #include <sys/socket.h> 66 #include <sys/stat.h> 67 #include <sys/sysctl.h> 68 #include <sys/syslog.h> 69 #include <sys/unistd.h> 70 #include <sys/vmmeter.h> 71 #include <sys/vnode.h> 72 73 #include <machine/limits.h> 74 75 #include <vm/vm.h> 76 #include <vm/vm_object.h> 77 #include <vm/vm_extern.h> 78 #include <vm/vm_kern.h> 79 #include <vm/pmap.h> 80 #include <vm/vm_map.h> 81 #include <vm/vm_page.h> 82 #include <vm/vm_pager.h> 83 #include <vm/vnode_pager.h> 84 #include <vm/vm_zone.h> 85 86 #include <sys/buf2.h> 87 #include <sys/thread2.h> 88 #include <sys/sysref2.h> 89 #include <sys/mplock2.h> 90 91 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 92 93 int numvnodes; 94 SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, ""); 95 96 enum vtype iftovt_tab[16] = { 97 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 98 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 99 }; 100 int vttoif_tab[9] = { 101 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 102 S_IFSOCK, S_IFIFO, S_IFMT, 103 }; 104 105 static int reassignbufcalls; 106 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, 107 &reassignbufcalls, 0, ""); 108 static int reassignbufloops; 109 SYSCTL_INT(_vfs, OID_AUTO, reassignbufloops, CTLFLAG_RW, 110 &reassignbufloops, 0, ""); 111 static int reassignbufsortgood; 112 SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortgood, CTLFLAG_RW, 113 &reassignbufsortgood, 0, ""); 114 static int reassignbufsortbad; 115 SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortbad, CTLFLAG_RW, 116 &reassignbufsortbad, 0, ""); 117 static int reassignbufmethod = 1; 118 SYSCTL_INT(_vfs, OID_AUTO, reassignbufmethod, CTLFLAG_RW, 119 &reassignbufmethod, 0, ""); 120 static int check_buf_overlap = 2; /* invasive check */ 121 SYSCTL_INT(_vfs, OID_AUTO, check_buf_overlap, CTLFLAG_RW, 122 &check_buf_overlap, 0, ""); 123 124 int nfs_mount_type = -1; 125 static struct lwkt_token spechash_token; 126 struct nfs_public nfs_pub; /* publicly exported FS */ 127 128 int desiredvnodes; 129 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 130 &desiredvnodes, 0, "Maximum number of vnodes"); 131 132 static void vfs_free_addrlist (struct netexport *nep); 133 static int vfs_free_netcred (struct radix_node *rn, void *w); 134 static int vfs_hang_addrlist (struct mount *mp, struct netexport *nep, 135 const struct export_args *argp); 136 137 /* 138 * Red black tree functions 139 */ 140 static int rb_buf_compare(struct buf *b1, struct buf *b2); 141 RB_GENERATE2(buf_rb_tree, buf, b_rbnode, rb_buf_compare, off_t, b_loffset); 142 RB_GENERATE2(buf_rb_hash, buf, b_rbhash, rb_buf_compare, off_t, b_loffset); 143 144 static int 145 rb_buf_compare(struct buf *b1, struct buf *b2) 146 { 147 if (b1->b_loffset < b2->b_loffset) 148 return(-1); 149 if (b1->b_loffset > b2->b_loffset) 150 return(1); 151 return(0); 152 } 153 154 /* 155 * Returns non-zero if the vnode is a candidate for lazy msyncing. 156 */ 157 static __inline int 158 vshouldmsync(struct vnode *vp) 159 { 160 if (vp->v_auxrefs != 0 || vp->v_sysref.refcnt > 0) 161 return (0); /* other holders */ 162 if (vp->v_object && 163 (vp->v_object->ref_count || vp->v_object->resident_page_count)) { 164 return (0); 165 } 166 return (1); 167 } 168 169 /* 170 * Initialize the vnode management data structures. 171 * 172 * Called from vfsinit() 173 */ 174 void 175 vfs_subr_init(void) 176 { 177 int factor1; 178 int factor2; 179 180 /* 181 * Desiredvnodes is kern.maxvnodes. We want to scale it 182 * according to available system memory but we may also have 183 * to limit it based on available KVM, which is capped on 32 bit 184 * systems. 185 * 186 * WARNING! For machines with 64-256M of ram we have to be sure 187 * that the default limit scales down well due to HAMMER 188 * taking up significantly more memory per-vnode vs UFS. 189 * We want around ~5800 on a 128M machine. 190 */ 191 factor1 = 20 * (sizeof(struct vm_object) + sizeof(struct vnode)); 192 factor2 = 22 * (sizeof(struct vm_object) + sizeof(struct vnode)); 193 desiredvnodes = 194 imin((int64_t)vmstats.v_page_count * PAGE_SIZE / factor1, 195 KvaSize / factor2); 196 desiredvnodes = imax(desiredvnodes, maxproc * 8); 197 198 lwkt_token_init(&spechash_token, 1, "spechash"); 199 } 200 201 /* 202 * Knob to control the precision of file timestamps: 203 * 204 * 0 = seconds only; nanoseconds zeroed. 205 * 1 = seconds and nanoseconds, accurate within 1/HZ. 206 * 2 = seconds and nanoseconds, truncated to microseconds. 207 * >=3 = seconds and nanoseconds, maximum precision. 208 */ 209 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 210 211 static int timestamp_precision = TSP_SEC; 212 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 213 ×tamp_precision, 0, ""); 214 215 /* 216 * Get a current timestamp. 217 * 218 * MPSAFE 219 */ 220 void 221 vfs_timestamp(struct timespec *tsp) 222 { 223 struct timeval tv; 224 225 switch (timestamp_precision) { 226 case TSP_SEC: 227 tsp->tv_sec = time_second; 228 tsp->tv_nsec = 0; 229 break; 230 case TSP_HZ: 231 getnanotime(tsp); 232 break; 233 case TSP_USEC: 234 microtime(&tv); 235 TIMEVAL_TO_TIMESPEC(&tv, tsp); 236 break; 237 case TSP_NSEC: 238 default: 239 nanotime(tsp); 240 break; 241 } 242 } 243 244 /* 245 * Set vnode attributes to VNOVAL 246 */ 247 void 248 vattr_null(struct vattr *vap) 249 { 250 vap->va_type = VNON; 251 vap->va_size = VNOVAL; 252 vap->va_bytes = VNOVAL; 253 vap->va_mode = VNOVAL; 254 vap->va_nlink = VNOVAL; 255 vap->va_uid = VNOVAL; 256 vap->va_gid = VNOVAL; 257 vap->va_fsid = VNOVAL; 258 vap->va_fileid = VNOVAL; 259 vap->va_blocksize = VNOVAL; 260 vap->va_rmajor = VNOVAL; 261 vap->va_rminor = VNOVAL; 262 vap->va_atime.tv_sec = VNOVAL; 263 vap->va_atime.tv_nsec = VNOVAL; 264 vap->va_mtime.tv_sec = VNOVAL; 265 vap->va_mtime.tv_nsec = VNOVAL; 266 vap->va_ctime.tv_sec = VNOVAL; 267 vap->va_ctime.tv_nsec = VNOVAL; 268 vap->va_flags = VNOVAL; 269 vap->va_gen = VNOVAL; 270 vap->va_vaflags = 0; 271 /* va_*_uuid fields are only valid if related flags are set */ 272 } 273 274 /* 275 * Flush out and invalidate all buffers associated with a vnode. 276 * 277 * vp must be locked. 278 */ 279 static int vinvalbuf_bp(struct buf *bp, void *data); 280 281 struct vinvalbuf_bp_info { 282 struct vnode *vp; 283 int slptimeo; 284 int lkflags; 285 int flags; 286 }; 287 288 int 289 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 290 { 291 struct vinvalbuf_bp_info info; 292 vm_object_t object; 293 int error; 294 295 lwkt_gettoken(&vp->v_token); 296 297 /* 298 * If we are being asked to save, call fsync to ensure that the inode 299 * is updated. 300 */ 301 if (flags & V_SAVE) { 302 error = bio_track_wait(&vp->v_track_write, slpflag, slptimeo); 303 if (error) 304 goto done; 305 if (!RB_EMPTY(&vp->v_rbdirty_tree)) { 306 if ((error = VOP_FSYNC(vp, MNT_WAIT, 0)) != 0) 307 goto done; 308 309 /* 310 * Dirty bufs may be left or generated via races 311 * in circumstances where vinvalbuf() is called on 312 * a vnode not undergoing reclamation. Only 313 * panic if we are trying to reclaim the vnode. 314 */ 315 if ((vp->v_flag & VRECLAIMED) && 316 (bio_track_active(&vp->v_track_write) || 317 !RB_EMPTY(&vp->v_rbdirty_tree))) { 318 panic("vinvalbuf: dirty bufs"); 319 } 320 } 321 } 322 info.slptimeo = slptimeo; 323 info.lkflags = LK_EXCLUSIVE | LK_SLEEPFAIL; 324 if (slpflag & PCATCH) 325 info.lkflags |= LK_PCATCH; 326 info.flags = flags; 327 info.vp = vp; 328 329 /* 330 * Flush the buffer cache until nothing is left. 331 */ 332 while (!RB_EMPTY(&vp->v_rbclean_tree) || 333 !RB_EMPTY(&vp->v_rbdirty_tree)) { 334 error = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, NULL, 335 vinvalbuf_bp, &info); 336 if (error == 0) { 337 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 338 vinvalbuf_bp, &info); 339 } 340 } 341 342 /* 343 * Wait for I/O completion. We may block in the pip code so we have 344 * to re-check. 345 */ 346 do { 347 bio_track_wait(&vp->v_track_write, 0, 0); 348 if ((object = vp->v_object) != NULL) { 349 while (object->paging_in_progress) 350 vm_object_pip_sleep(object, "vnvlbx"); 351 } 352 } while (bio_track_active(&vp->v_track_write)); 353 354 /* 355 * Destroy the copy in the VM cache, too. 356 */ 357 if ((object = vp->v_object) != NULL) { 358 vm_object_page_remove(object, 0, 0, 359 (flags & V_SAVE) ? TRUE : FALSE); 360 } 361 362 if (!RB_EMPTY(&vp->v_rbdirty_tree) || !RB_EMPTY(&vp->v_rbclean_tree)) 363 panic("vinvalbuf: flush failed"); 364 if (!RB_EMPTY(&vp->v_rbhash_tree)) 365 panic("vinvalbuf: flush failed, buffers still present"); 366 error = 0; 367 done: 368 lwkt_reltoken(&vp->v_token); 369 return (error); 370 } 371 372 static int 373 vinvalbuf_bp(struct buf *bp, void *data) 374 { 375 struct vinvalbuf_bp_info *info = data; 376 int error; 377 378 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 379 error = BUF_TIMELOCK(bp, info->lkflags, 380 "vinvalbuf", info->slptimeo); 381 if (error == 0) { 382 BUF_UNLOCK(bp); 383 error = ENOLCK; 384 } 385 if (error == ENOLCK) 386 return(0); 387 return (-error); 388 } 389 390 KKASSERT(bp->b_vp == info->vp); 391 392 /* 393 * XXX Since there are no node locks for NFS, I 394 * believe there is a slight chance that a delayed 395 * write will occur while sleeping just above, so 396 * check for it. Note that vfs_bio_awrite expects 397 * buffers to reside on a queue, while bwrite() and 398 * brelse() do not. 399 * 400 * NOTE: NO B_LOCKED CHECK. Also no buf_checkwrite() 401 * check. This code will write out the buffer, period. 402 */ 403 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 404 (info->flags & V_SAVE)) { 405 if (bp->b_vp == info->vp) { 406 if (bp->b_flags & B_CLUSTEROK) { 407 vfs_bio_awrite(bp); 408 } else { 409 bremfree(bp); 410 bawrite(bp); 411 } 412 } else { 413 bremfree(bp); 414 bwrite(bp); 415 } 416 } else if (info->flags & V_SAVE) { 417 /* 418 * Cannot set B_NOCACHE on a clean buffer as this will 419 * destroy the VM backing store which might actually 420 * be dirty (and unsynchronized). 421 */ 422 bremfree(bp); 423 bp->b_flags |= (B_INVAL | B_RELBUF); 424 brelse(bp); 425 } else { 426 bremfree(bp); 427 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF); 428 brelse(bp); 429 } 430 return(0); 431 } 432 433 /* 434 * Truncate a file's buffer and pages to a specified length. This 435 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 436 * sync activity. 437 * 438 * The vnode must be locked. 439 */ 440 static int vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data); 441 static int vtruncbuf_bp_trunc(struct buf *bp, void *data); 442 static int vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data); 443 static int vtruncbuf_bp_metasync(struct buf *bp, void *data); 444 445 int 446 vtruncbuf(struct vnode *vp, off_t length, int blksize) 447 { 448 off_t truncloffset; 449 const char *filename; 450 int count; 451 452 /* 453 * Round up to the *next* block, then destroy the buffers in question. 454 * Since we are only removing some of the buffers we must rely on the 455 * scan count to determine whether a loop is necessary. 456 */ 457 if ((count = (int)(length % blksize)) != 0) 458 truncloffset = length + (blksize - count); 459 else 460 truncloffset = length; 461 462 lwkt_gettoken(&vp->v_token); 463 do { 464 count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 465 vtruncbuf_bp_trunc_cmp, 466 vtruncbuf_bp_trunc, &truncloffset); 467 count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 468 vtruncbuf_bp_trunc_cmp, 469 vtruncbuf_bp_trunc, &truncloffset); 470 } while(count); 471 472 /* 473 * For safety, fsync any remaining metadata if the file is not being 474 * truncated to 0. Since the metadata does not represent the entire 475 * dirty list we have to rely on the hit count to ensure that we get 476 * all of it. 477 */ 478 if (length > 0) { 479 do { 480 count = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 481 vtruncbuf_bp_metasync_cmp, 482 vtruncbuf_bp_metasync, vp); 483 } while (count); 484 } 485 486 /* 487 * Clean out any left over VM backing store. 488 * 489 * It is possible to have in-progress I/O from buffers that were 490 * not part of the truncation. This should not happen if we 491 * are truncating to 0-length. 492 */ 493 vnode_pager_setsize(vp, length); 494 bio_track_wait(&vp->v_track_write, 0, 0); 495 496 /* 497 * Debugging only 498 */ 499 spin_lock_wr(&vp->v_spinlock); 500 filename = TAILQ_FIRST(&vp->v_namecache) ? 501 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"; 502 spin_unlock_wr(&vp->v_spinlock); 503 504 /* 505 * Make sure no buffers were instantiated while we were trying 506 * to clean out the remaining VM pages. This could occur due 507 * to busy dirty VM pages being flushed out to disk. 508 */ 509 do { 510 count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 511 vtruncbuf_bp_trunc_cmp, 512 vtruncbuf_bp_trunc, &truncloffset); 513 count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 514 vtruncbuf_bp_trunc_cmp, 515 vtruncbuf_bp_trunc, &truncloffset); 516 if (count) { 517 kprintf("Warning: vtruncbuf(): Had to re-clean %d " 518 "left over buffers in %s\n", count, filename); 519 } 520 } while(count); 521 522 lwkt_reltoken(&vp->v_token); 523 524 return (0); 525 } 526 527 /* 528 * The callback buffer is beyond the new file EOF and must be destroyed. 529 * Note that the compare function must conform to the RB_SCAN's requirements. 530 */ 531 static 532 int 533 vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data) 534 { 535 if (bp->b_loffset >= *(off_t *)data) 536 return(0); 537 return(-1); 538 } 539 540 static 541 int 542 vtruncbuf_bp_trunc(struct buf *bp, void *data) 543 { 544 /* 545 * Do not try to use a buffer we cannot immediately lock, but sleep 546 * anyway to prevent a livelock. The code will loop until all buffers 547 * can be acted upon. 548 */ 549 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 550 if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0) 551 BUF_UNLOCK(bp); 552 } else { 553 bremfree(bp); 554 bp->b_flags |= (B_INVAL | B_RELBUF | B_NOCACHE); 555 brelse(bp); 556 } 557 return(1); 558 } 559 560 /* 561 * Fsync all meta-data after truncating a file to be non-zero. Only metadata 562 * blocks (with a negative loffset) are scanned. 563 * Note that the compare function must conform to the RB_SCAN's requirements. 564 */ 565 static int 566 vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data) 567 { 568 if (bp->b_loffset < 0) 569 return(0); 570 return(1); 571 } 572 573 static int 574 vtruncbuf_bp_metasync(struct buf *bp, void *data) 575 { 576 struct vnode *vp = data; 577 578 if (bp->b_flags & B_DELWRI) { 579 /* 580 * Do not try to use a buffer we cannot immediately lock, 581 * but sleep anyway to prevent a livelock. The code will 582 * loop until all buffers can be acted upon. 583 */ 584 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 585 if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0) 586 BUF_UNLOCK(bp); 587 } else { 588 bremfree(bp); 589 if (bp->b_vp == vp) 590 bawrite(bp); 591 else 592 bwrite(bp); 593 } 594 return(1); 595 } else { 596 return(0); 597 } 598 } 599 600 /* 601 * vfsync - implements a multipass fsync on a file which understands 602 * dependancies and meta-data. The passed vnode must be locked. The 603 * waitfor argument may be MNT_WAIT or MNT_NOWAIT, or MNT_LAZY. 604 * 605 * When fsyncing data asynchronously just do one consolidated pass starting 606 * with the most negative block number. This may not get all the data due 607 * to dependancies. 608 * 609 * When fsyncing data synchronously do a data pass, then a metadata pass, 610 * then do additional data+metadata passes to try to get all the data out. 611 */ 612 static int vfsync_wait_output(struct vnode *vp, 613 int (*waitoutput)(struct vnode *, struct thread *)); 614 static int vfsync_data_only_cmp(struct buf *bp, void *data); 615 static int vfsync_meta_only_cmp(struct buf *bp, void *data); 616 static int vfsync_lazy_range_cmp(struct buf *bp, void *data); 617 static int vfsync_bp(struct buf *bp, void *data); 618 619 struct vfsync_info { 620 struct vnode *vp; 621 int synchronous; 622 int syncdeps; 623 int lazycount; 624 int lazylimit; 625 int skippedbufs; 626 int (*checkdef)(struct buf *); 627 }; 628 629 int 630 vfsync(struct vnode *vp, int waitfor, int passes, 631 int (*checkdef)(struct buf *), 632 int (*waitoutput)(struct vnode *, struct thread *)) 633 { 634 struct vfsync_info info; 635 int error; 636 637 bzero(&info, sizeof(info)); 638 info.vp = vp; 639 if ((info.checkdef = checkdef) == NULL) 640 info.syncdeps = 1; 641 642 lwkt_gettoken(&vp->v_token); 643 644 switch(waitfor) { 645 case MNT_LAZY: 646 /* 647 * Lazy (filesystem syncer typ) Asynchronous plus limit the 648 * number of data (not meta) pages we try to flush to 1MB. 649 * A non-zero return means that lazy limit was reached. 650 */ 651 info.lazylimit = 1024 * 1024; 652 info.syncdeps = 1; 653 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 654 vfsync_lazy_range_cmp, vfsync_bp, &info); 655 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 656 vfsync_meta_only_cmp, vfsync_bp, &info); 657 if (error == 0) 658 vp->v_lazyw = 0; 659 else if (!RB_EMPTY(&vp->v_rbdirty_tree)) 660 vn_syncer_add_to_worklist(vp, 1); 661 error = 0; 662 break; 663 case MNT_NOWAIT: 664 /* 665 * Asynchronous. Do a data-only pass and a meta-only pass. 666 */ 667 info.syncdeps = 1; 668 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp, 669 vfsync_bp, &info); 670 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_meta_only_cmp, 671 vfsync_bp, &info); 672 error = 0; 673 break; 674 default: 675 /* 676 * Synchronous. Do a data-only pass, then a meta-data+data 677 * pass, then additional integrated passes to try to get 678 * all the dependancies flushed. 679 */ 680 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp, 681 vfsync_bp, &info); 682 error = vfsync_wait_output(vp, waitoutput); 683 if (error == 0) { 684 info.skippedbufs = 0; 685 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 686 vfsync_bp, &info); 687 error = vfsync_wait_output(vp, waitoutput); 688 if (info.skippedbufs) 689 kprintf("Warning: vfsync skipped %d dirty bufs in pass2!\n", info.skippedbufs); 690 } 691 while (error == 0 && passes > 0 && 692 !RB_EMPTY(&vp->v_rbdirty_tree) 693 ) { 694 if (--passes == 0) { 695 info.synchronous = 1; 696 info.syncdeps = 1; 697 } 698 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 699 vfsync_bp, &info); 700 if (error < 0) 701 error = -error; 702 info.syncdeps = 1; 703 if (error == 0) 704 error = vfsync_wait_output(vp, waitoutput); 705 } 706 break; 707 } 708 lwkt_reltoken(&vp->v_token); 709 return(error); 710 } 711 712 static int 713 vfsync_wait_output(struct vnode *vp, 714 int (*waitoutput)(struct vnode *, struct thread *)) 715 { 716 int error; 717 718 error = bio_track_wait(&vp->v_track_write, 0, 0); 719 if (waitoutput) 720 error = waitoutput(vp, curthread); 721 return(error); 722 } 723 724 static int 725 vfsync_data_only_cmp(struct buf *bp, void *data) 726 { 727 if (bp->b_loffset < 0) 728 return(-1); 729 return(0); 730 } 731 732 static int 733 vfsync_meta_only_cmp(struct buf *bp, void *data) 734 { 735 if (bp->b_loffset < 0) 736 return(0); 737 return(1); 738 } 739 740 static int 741 vfsync_lazy_range_cmp(struct buf *bp, void *data) 742 { 743 struct vfsync_info *info = data; 744 if (bp->b_loffset < info->vp->v_lazyw) 745 return(-1); 746 return(0); 747 } 748 749 static int 750 vfsync_bp(struct buf *bp, void *data) 751 { 752 struct vfsync_info *info = data; 753 struct vnode *vp = info->vp; 754 int error; 755 756 /* 757 * if syncdeps is not set we do not try to write buffers which have 758 * dependancies. 759 */ 760 if (!info->synchronous && info->syncdeps == 0 && info->checkdef(bp)) 761 return(0); 762 763 /* 764 * Ignore buffers that we cannot immediately lock. XXX 765 */ 766 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 767 kprintf("Warning: vfsync_bp skipping dirty buffer %p\n", bp); 768 ++info->skippedbufs; 769 return(0); 770 } 771 if ((bp->b_flags & B_DELWRI) == 0) 772 panic("vfsync_bp: buffer not dirty"); 773 if (vp != bp->b_vp) 774 panic("vfsync_bp: buffer vp mismatch"); 775 776 /* 777 * B_NEEDCOMMIT (primarily used by NFS) is a state where the buffer 778 * has been written but an additional handshake with the device 779 * is required before we can dispose of the buffer. We have no idea 780 * how to do this so we have to skip these buffers. 781 */ 782 if (bp->b_flags & B_NEEDCOMMIT) { 783 BUF_UNLOCK(bp); 784 return(0); 785 } 786 787 /* 788 * Ask bioops if it is ok to sync 789 */ 790 if (LIST_FIRST(&bp->b_dep) != NULL && buf_checkwrite(bp)) { 791 bremfree(bp); 792 brelse(bp); 793 return(0); 794 } 795 796 if (info->synchronous) { 797 /* 798 * Synchronous flushing. An error may be returned. 799 */ 800 bremfree(bp); 801 error = bwrite(bp); 802 } else { 803 /* 804 * Asynchronous flushing. A negative return value simply 805 * stops the scan and is not considered an error. We use 806 * this to support limited MNT_LAZY flushes. 807 */ 808 vp->v_lazyw = bp->b_loffset; 809 if ((vp->v_flag & VOBJBUF) && (bp->b_flags & B_CLUSTEROK)) { 810 info->lazycount += vfs_bio_awrite(bp); 811 } else { 812 info->lazycount += bp->b_bufsize; 813 bremfree(bp); 814 bawrite(bp); 815 } 816 waitrunningbufspace(); 817 if (info->lazylimit && info->lazycount >= info->lazylimit) 818 error = 1; 819 else 820 error = 0; 821 } 822 return(-error); 823 } 824 825 /* 826 * Associate a buffer with a vnode. 827 * 828 * MPSAFE 829 */ 830 int 831 bgetvp(struct vnode *vp, struct buf *bp, int testsize) 832 { 833 KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); 834 KKASSERT((bp->b_flags & (B_HASHED|B_DELWRI|B_VNCLEAN|B_VNDIRTY)) == 0); 835 836 /* 837 * Insert onto list for new vnode. 838 */ 839 lwkt_gettoken(&vp->v_token); 840 if (buf_rb_hash_RB_INSERT(&vp->v_rbhash_tree, bp)) { 841 lwkt_reltoken(&vp->v_token); 842 return (EEXIST); 843 } 844 845 /* 846 * Diagnostics (mainly for HAMMER debugging). Check for 847 * overlapping buffers. 848 */ 849 if (check_buf_overlap) { 850 struct buf *bx; 851 bx = buf_rb_hash_RB_PREV(bp); 852 if (bx) { 853 if (bx->b_loffset + bx->b_bufsize > bp->b_loffset) { 854 kprintf("bgetvp: overlapl %016jx/%d %016jx " 855 "bx %p bp %p\n", 856 (intmax_t)bx->b_loffset, 857 bx->b_bufsize, 858 (intmax_t)bp->b_loffset, 859 bx, bp); 860 if (check_buf_overlap > 1) 861 panic("bgetvp - overlapping buffer"); 862 } 863 } 864 bx = buf_rb_hash_RB_NEXT(bp); 865 if (bx) { 866 if (bp->b_loffset + testsize > bx->b_loffset) { 867 kprintf("bgetvp: overlapr %016jx/%d %016jx " 868 "bp %p bx %p\n", 869 (intmax_t)bp->b_loffset, 870 testsize, 871 (intmax_t)bx->b_loffset, 872 bp, bx); 873 if (check_buf_overlap > 1) 874 panic("bgetvp - overlapping buffer"); 875 } 876 } 877 } 878 bp->b_vp = vp; 879 bp->b_flags |= B_HASHED; 880 bp->b_flags |= B_VNCLEAN; 881 if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) 882 panic("reassignbuf: dup lblk/clean vp %p bp %p", vp, bp); 883 vhold(vp); 884 lwkt_reltoken(&vp->v_token); 885 return(0); 886 } 887 888 /* 889 * Disassociate a buffer from a vnode. 890 */ 891 void 892 brelvp(struct buf *bp) 893 { 894 struct vnode *vp; 895 896 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 897 898 /* 899 * Delete from old vnode list, if on one. 900 */ 901 vp = bp->b_vp; 902 lwkt_gettoken(&vp->v_token); 903 if (bp->b_flags & (B_VNDIRTY | B_VNCLEAN)) { 904 if (bp->b_flags & B_VNDIRTY) 905 buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp); 906 else 907 buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp); 908 bp->b_flags &= ~(B_VNDIRTY | B_VNCLEAN); 909 } 910 if (bp->b_flags & B_HASHED) { 911 buf_rb_hash_RB_REMOVE(&vp->v_rbhash_tree, bp); 912 bp->b_flags &= ~B_HASHED; 913 } 914 if ((vp->v_flag & VONWORKLST) && RB_EMPTY(&vp->v_rbdirty_tree)) { 915 vclrflags(vp, VONWORKLST); 916 LIST_REMOVE(vp, v_synclist); 917 } 918 bp->b_vp = NULL; 919 lwkt_reltoken(&vp->v_token); 920 921 vdrop(vp); 922 } 923 924 /* 925 * Reassign the buffer to the proper clean/dirty list based on B_DELWRI. 926 * This routine is called when the state of the B_DELWRI bit is changed. 927 * 928 * MPSAFE 929 */ 930 void 931 reassignbuf(struct buf *bp) 932 { 933 struct vnode *vp = bp->b_vp; 934 int delay; 935 936 KKASSERT(vp != NULL); 937 ++reassignbufcalls; 938 939 /* 940 * B_PAGING flagged buffers cannot be reassigned because their vp 941 * is not fully linked in. 942 */ 943 if (bp->b_flags & B_PAGING) 944 panic("cannot reassign paging buffer"); 945 946 lwkt_gettoken(&vp->v_token); 947 if (bp->b_flags & B_DELWRI) { 948 /* 949 * Move to the dirty list, add the vnode to the worklist 950 */ 951 if (bp->b_flags & B_VNCLEAN) { 952 buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp); 953 bp->b_flags &= ~B_VNCLEAN; 954 } 955 if ((bp->b_flags & B_VNDIRTY) == 0) { 956 if (buf_rb_tree_RB_INSERT(&vp->v_rbdirty_tree, bp)) { 957 panic("reassignbuf: dup lblk vp %p bp %p", 958 vp, bp); 959 } 960 bp->b_flags |= B_VNDIRTY; 961 } 962 if ((vp->v_flag & VONWORKLST) == 0) { 963 switch (vp->v_type) { 964 case VDIR: 965 delay = dirdelay; 966 break; 967 case VCHR: 968 case VBLK: 969 if (vp->v_rdev && 970 vp->v_rdev->si_mountpoint != NULL) { 971 delay = metadelay; 972 break; 973 } 974 /* fall through */ 975 default: 976 delay = filedelay; 977 } 978 vn_syncer_add_to_worklist(vp, delay); 979 } 980 } else { 981 /* 982 * Move to the clean list, remove the vnode from the worklist 983 * if no dirty blocks remain. 984 */ 985 if (bp->b_flags & B_VNDIRTY) { 986 buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp); 987 bp->b_flags &= ~B_VNDIRTY; 988 } 989 if ((bp->b_flags & B_VNCLEAN) == 0) { 990 if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) { 991 panic("reassignbuf: dup lblk vp %p bp %p", 992 vp, bp); 993 } 994 bp->b_flags |= B_VNCLEAN; 995 } 996 if ((vp->v_flag & VONWORKLST) && 997 RB_EMPTY(&vp->v_rbdirty_tree)) { 998 vclrflags(vp, VONWORKLST); 999 LIST_REMOVE(vp, v_synclist); 1000 } 1001 } 1002 lwkt_reltoken(&vp->v_token); 1003 } 1004 1005 /* 1006 * Create a vnode for a block device. 1007 * Used for mounting the root file system. 1008 */ 1009 extern struct vop_ops *devfs_vnode_dev_vops_p; 1010 int 1011 bdevvp(cdev_t dev, struct vnode **vpp) 1012 { 1013 struct vnode *vp; 1014 struct vnode *nvp; 1015 int error; 1016 1017 if (dev == NULL) { 1018 *vpp = NULLVP; 1019 return (ENXIO); 1020 } 1021 error = getspecialvnode(VT_NON, NULL, &devfs_vnode_dev_vops_p, 1022 &nvp, 0, 0); 1023 if (error) { 1024 *vpp = NULLVP; 1025 return (error); 1026 } 1027 vp = nvp; 1028 vp->v_type = VCHR; 1029 #if 0 1030 vp->v_rdev = dev; 1031 #endif 1032 v_associate_rdev(vp, dev); 1033 vp->v_umajor = dev->si_umajor; 1034 vp->v_uminor = dev->si_uminor; 1035 vx_unlock(vp); 1036 *vpp = vp; 1037 return (0); 1038 } 1039 1040 int 1041 v_associate_rdev(struct vnode *vp, cdev_t dev) 1042 { 1043 if (dev == NULL) 1044 return(ENXIO); 1045 if (dev_is_good(dev) == 0) 1046 return(ENXIO); 1047 KKASSERT(vp->v_rdev == NULL); 1048 vp->v_rdev = reference_dev(dev); 1049 lwkt_gettoken(&spechash_token); 1050 SLIST_INSERT_HEAD(&dev->si_hlist, vp, v_cdevnext); 1051 lwkt_reltoken(&spechash_token); 1052 return(0); 1053 } 1054 1055 void 1056 v_release_rdev(struct vnode *vp) 1057 { 1058 cdev_t dev; 1059 1060 if ((dev = vp->v_rdev) != NULL) { 1061 lwkt_gettoken(&spechash_token); 1062 SLIST_REMOVE(&dev->si_hlist, vp, vnode, v_cdevnext); 1063 vp->v_rdev = NULL; 1064 release_dev(dev); 1065 lwkt_reltoken(&spechash_token); 1066 } 1067 } 1068 1069 /* 1070 * Add a vnode to the alias list hung off the cdev_t. We only associate 1071 * the device number with the vnode. The actual device is not associated 1072 * until the vnode is opened (usually in spec_open()), and will be 1073 * disassociated on last close. 1074 */ 1075 void 1076 addaliasu(struct vnode *nvp, int x, int y) 1077 { 1078 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 1079 panic("addaliasu on non-special vnode"); 1080 nvp->v_umajor = x; 1081 nvp->v_uminor = y; 1082 } 1083 1084 /* 1085 * Simple call that a filesystem can make to try to get rid of a 1086 * vnode. It will fail if anyone is referencing the vnode (including 1087 * the caller). 1088 * 1089 * The filesystem can check whether its in-memory inode structure still 1090 * references the vp on return. 1091 */ 1092 void 1093 vclean_unlocked(struct vnode *vp) 1094 { 1095 vx_get(vp); 1096 if (sysref_isactive(&vp->v_sysref) == 0) 1097 vgone_vxlocked(vp); 1098 vx_put(vp); 1099 } 1100 1101 /* 1102 * Disassociate a vnode from its underlying filesystem. 1103 * 1104 * The vnode must be VX locked and referenced. In all normal situations 1105 * there are no active references. If vclean_vxlocked() is called while 1106 * there are active references, the vnode is being ripped out and we have 1107 * to call VOP_CLOSE() as appropriate before we can reclaim it. 1108 */ 1109 void 1110 vclean_vxlocked(struct vnode *vp, int flags) 1111 { 1112 int active; 1113 int n; 1114 vm_object_t object; 1115 1116 /* 1117 * If the vnode has already been reclaimed we have nothing to do. 1118 */ 1119 if (vp->v_flag & VRECLAIMED) 1120 return; 1121 vsetflags(vp, VRECLAIMED); 1122 1123 /* 1124 * Scrap the vfs cache 1125 */ 1126 while (cache_inval_vp(vp, 0) != 0) { 1127 kprintf("Warning: vnode %p clean/cache_resolution race detected\n", vp); 1128 tsleep(vp, 0, "vclninv", 2); 1129 } 1130 1131 /* 1132 * Check to see if the vnode is in use. If so we have to reference it 1133 * before we clean it out so that its count cannot fall to zero and 1134 * generate a race against ourselves to recycle it. 1135 */ 1136 active = sysref_isactive(&vp->v_sysref); 1137 1138 /* 1139 * Clean out any buffers associated with the vnode and destroy its 1140 * object, if it has one. 1141 */ 1142 vinvalbuf(vp, V_SAVE, 0, 0); 1143 1144 /* 1145 * If purging an active vnode (typically during a forced unmount 1146 * or reboot), it must be closed and deactivated before being 1147 * reclaimed. This isn't really all that safe, but what can 1148 * we do? XXX. 1149 * 1150 * Note that neither of these routines unlocks the vnode. 1151 */ 1152 if (active && (flags & DOCLOSE)) { 1153 while ((n = vp->v_opencount) != 0) { 1154 if (vp->v_writecount) 1155 VOP_CLOSE(vp, FWRITE|FNONBLOCK); 1156 else 1157 VOP_CLOSE(vp, FNONBLOCK); 1158 if (vp->v_opencount == n) { 1159 kprintf("Warning: unable to force-close" 1160 " vnode %p\n", vp); 1161 break; 1162 } 1163 } 1164 } 1165 1166 /* 1167 * If the vnode has not been deactivated, deactivated it. Deactivation 1168 * can create new buffers and VM pages so we have to call vinvalbuf() 1169 * again to make sure they all get flushed. 1170 * 1171 * This can occur if a file with a link count of 0 needs to be 1172 * truncated. 1173 * 1174 * If the vnode is already dead don't try to deactivate it. 1175 */ 1176 if ((vp->v_flag & VINACTIVE) == 0) { 1177 vsetflags(vp, VINACTIVE); 1178 if (vp->v_mount) 1179 VOP_INACTIVE(vp); 1180 vinvalbuf(vp, V_SAVE, 0, 0); 1181 } 1182 1183 /* 1184 * If the vnode has an object, destroy it. 1185 */ 1186 if ((object = vp->v_object) != NULL) { 1187 lwkt_gettoken(&vm_token); 1188 KKASSERT(object == vp->v_object); 1189 if (object->ref_count == 0) { 1190 if ((object->flags & OBJ_DEAD) == 0) 1191 vm_object_terminate(object); 1192 } else { 1193 vm_pager_deallocate(object); 1194 } 1195 vclrflags(vp, VOBJBUF); 1196 lwkt_reltoken(&vm_token); 1197 } 1198 KKASSERT((vp->v_flag & VOBJBUF) == 0); 1199 1200 /* 1201 * Reclaim the vnode if not already dead. 1202 */ 1203 if (vp->v_mount && VOP_RECLAIM(vp)) 1204 panic("vclean: cannot reclaim"); 1205 1206 /* 1207 * Done with purge, notify sleepers of the grim news. 1208 */ 1209 vp->v_ops = &dead_vnode_vops_p; 1210 vn_gone(vp); 1211 vp->v_tag = VT_NON; 1212 1213 /* 1214 * If we are destroying an active vnode, reactivate it now that 1215 * we have reassociated it with deadfs. This prevents the system 1216 * from crashing on the vnode due to it being unexpectedly marked 1217 * as inactive or reclaimed. 1218 */ 1219 if (active && (flags & DOCLOSE)) { 1220 vclrflags(vp, VINACTIVE | VRECLAIMED); 1221 } 1222 } 1223 1224 /* 1225 * Eliminate all activity associated with the requested vnode 1226 * and with all vnodes aliased to the requested vnode. 1227 * 1228 * The vnode must be referenced but should not be locked. 1229 */ 1230 int 1231 vrevoke(struct vnode *vp, struct ucred *cred) 1232 { 1233 struct vnode *vq; 1234 struct vnode *vqn; 1235 cdev_t dev; 1236 int error; 1237 1238 /* 1239 * If the vnode has a device association, scrap all vnodes associated 1240 * with the device. Don't let the device disappear on us while we 1241 * are scrapping the vnodes. 1242 * 1243 * The passed vp will probably show up in the list, do not VX lock 1244 * it twice! 1245 * 1246 * Releasing the vnode's rdev here can mess up specfs's call to 1247 * device close, so don't do it. The vnode has been disassociated 1248 * and the device will be closed after the last ref on the related 1249 * fp goes away (if not still open by e.g. the kernel). 1250 */ 1251 if (vp->v_type != VCHR) { 1252 error = fdrevoke(vp, DTYPE_VNODE, cred); 1253 return (error); 1254 } 1255 if ((dev = vp->v_rdev) == NULL) { 1256 return(0); 1257 } 1258 reference_dev(dev); 1259 lwkt_gettoken(&spechash_token); 1260 1261 vqn = SLIST_FIRST(&dev->si_hlist); 1262 if (vqn) 1263 vref(vqn); 1264 while ((vq = vqn) != NULL) { 1265 vqn = SLIST_NEXT(vqn, v_cdevnext); 1266 if (vqn) 1267 vref(vqn); 1268 fdrevoke(vq, DTYPE_VNODE, cred); 1269 /*v_release_rdev(vq);*/ 1270 vrele(vq); 1271 } 1272 lwkt_reltoken(&spechash_token); 1273 dev_drevoke(dev); 1274 release_dev(dev); 1275 return (0); 1276 } 1277 1278 /* 1279 * This is called when the object underlying a vnode is being destroyed, 1280 * such as in a remove(). Try to recycle the vnode immediately if the 1281 * only active reference is our reference. 1282 * 1283 * Directory vnodes in the namecache with children cannot be immediately 1284 * recycled because numerous VOP_N*() ops require them to be stable. 1285 * 1286 * To avoid recursive recycling from VOP_INACTIVE implemenetations this 1287 * function is a NOP if VRECLAIMED is already set. 1288 */ 1289 int 1290 vrecycle(struct vnode *vp) 1291 { 1292 if (vp->v_sysref.refcnt <= 1 && (vp->v_flag & VRECLAIMED) == 0) { 1293 if (cache_inval_vp_nonblock(vp)) 1294 return(0); 1295 vgone_vxlocked(vp); 1296 return (1); 1297 } 1298 return (0); 1299 } 1300 1301 /* 1302 * Return the maximum I/O size allowed for strategy calls on VP. 1303 * 1304 * If vp is VCHR or VBLK we dive the device, otherwise we use 1305 * the vp's mount info. 1306 */ 1307 int 1308 vmaxiosize(struct vnode *vp) 1309 { 1310 if (vp->v_type == VBLK || vp->v_type == VCHR) { 1311 return(vp->v_rdev->si_iosize_max); 1312 } else { 1313 return(vp->v_mount->mnt_iosize_max); 1314 } 1315 } 1316 1317 /* 1318 * Eliminate all activity associated with a vnode in preparation for reuse. 1319 * 1320 * The vnode must be VX locked and refd and will remain VX locked and refd 1321 * on return. This routine may be called with the vnode in any state, as 1322 * long as it is VX locked. The vnode will be cleaned out and marked 1323 * VRECLAIMED but will not actually be reused until all existing refs and 1324 * holds go away. 1325 * 1326 * NOTE: This routine may be called on a vnode which has not yet been 1327 * already been deactivated (VOP_INACTIVE), or on a vnode which has 1328 * already been reclaimed. 1329 * 1330 * This routine is not responsible for placing us back on the freelist. 1331 * Instead, it happens automatically when the caller releases the VX lock 1332 * (assuming there aren't any other references). 1333 */ 1334 void 1335 vgone_vxlocked(struct vnode *vp) 1336 { 1337 /* 1338 * assert that the VX lock is held. This is an absolute requirement 1339 * now for vgone_vxlocked() to be called. 1340 */ 1341 KKASSERT(vp->v_lock.lk_exclusivecount == 1); 1342 1343 get_mplock(); 1344 1345 /* 1346 * Clean out the filesystem specific data and set the VRECLAIMED 1347 * bit. Also deactivate the vnode if necessary. 1348 */ 1349 vclean_vxlocked(vp, DOCLOSE); 1350 1351 /* 1352 * Delete from old mount point vnode list, if on one. 1353 */ 1354 if (vp->v_mount != NULL) { 1355 KKASSERT(vp->v_data == NULL); 1356 insmntque(vp, NULL); 1357 } 1358 1359 /* 1360 * If special device, remove it from special device alias list 1361 * if it is on one. This should normally only occur if a vnode is 1362 * being revoked as the device should otherwise have been released 1363 * naturally. 1364 */ 1365 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_rdev != NULL) { 1366 v_release_rdev(vp); 1367 } 1368 1369 /* 1370 * Set us to VBAD 1371 */ 1372 vp->v_type = VBAD; 1373 rel_mplock(); 1374 } 1375 1376 /* 1377 * Lookup a vnode by device number. 1378 * 1379 * Returns non-zero and *vpp set to a vref'd vnode on success. 1380 * Returns zero on failure. 1381 */ 1382 int 1383 vfinddev(cdev_t dev, enum vtype type, struct vnode **vpp) 1384 { 1385 struct vnode *vp; 1386 1387 lwkt_gettoken(&spechash_token); 1388 SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) { 1389 if (type == vp->v_type) { 1390 *vpp = vp; 1391 vref(vp); 1392 lwkt_reltoken(&spechash_token); 1393 return (1); 1394 } 1395 } 1396 lwkt_reltoken(&spechash_token); 1397 return (0); 1398 } 1399 1400 /* 1401 * Calculate the total number of references to a special device. This 1402 * routine may only be called for VBLK and VCHR vnodes since v_rdev is 1403 * an overloaded field. Since udev2dev can now return NULL, we have 1404 * to check for a NULL v_rdev. 1405 */ 1406 int 1407 count_dev(cdev_t dev) 1408 { 1409 struct vnode *vp; 1410 int count = 0; 1411 1412 if (SLIST_FIRST(&dev->si_hlist)) { 1413 lwkt_gettoken(&spechash_token); 1414 SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) { 1415 count += vp->v_opencount; 1416 } 1417 lwkt_reltoken(&spechash_token); 1418 } 1419 return(count); 1420 } 1421 1422 int 1423 vcount(struct vnode *vp) 1424 { 1425 if (vp->v_rdev == NULL) 1426 return(0); 1427 return(count_dev(vp->v_rdev)); 1428 } 1429 1430 /* 1431 * Initialize VMIO for a vnode. This routine MUST be called before a 1432 * VFS can issue buffer cache ops on a vnode. It is typically called 1433 * when a vnode is initialized from its inode. 1434 */ 1435 int 1436 vinitvmio(struct vnode *vp, off_t filesize, int blksize, int boff) 1437 { 1438 vm_object_t object; 1439 int error = 0; 1440 1441 retry: 1442 if ((object = vp->v_object) == NULL) { 1443 object = vnode_pager_alloc(vp, filesize, 0, 0, blksize, boff); 1444 /* 1445 * Dereference the reference we just created. This assumes 1446 * that the object is associated with the vp. 1447 */ 1448 object->ref_count--; 1449 vrele(vp); 1450 } else { 1451 if (object->flags & OBJ_DEAD) { 1452 vn_unlock(vp); 1453 vm_object_dead_sleep(object, "vodead"); 1454 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1455 goto retry; 1456 } 1457 } 1458 KASSERT(vp->v_object != NULL, ("vinitvmio: NULL object")); 1459 vsetflags(vp, VOBJBUF); 1460 return (error); 1461 } 1462 1463 1464 /* 1465 * Print out a description of a vnode. 1466 */ 1467 static char *typename[] = 1468 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 1469 1470 void 1471 vprint(char *label, struct vnode *vp) 1472 { 1473 char buf[96]; 1474 1475 if (label != NULL) 1476 kprintf("%s: %p: ", label, (void *)vp); 1477 else 1478 kprintf("%p: ", (void *)vp); 1479 kprintf("type %s, sysrefs %d, writecount %d, holdcnt %d,", 1480 typename[vp->v_type], 1481 vp->v_sysref.refcnt, vp->v_writecount, vp->v_auxrefs); 1482 buf[0] = '\0'; 1483 if (vp->v_flag & VROOT) 1484 strcat(buf, "|VROOT"); 1485 if (vp->v_flag & VPFSROOT) 1486 strcat(buf, "|VPFSROOT"); 1487 if (vp->v_flag & VTEXT) 1488 strcat(buf, "|VTEXT"); 1489 if (vp->v_flag & VSYSTEM) 1490 strcat(buf, "|VSYSTEM"); 1491 if (vp->v_flag & VFREE) 1492 strcat(buf, "|VFREE"); 1493 if (vp->v_flag & VOBJBUF) 1494 strcat(buf, "|VOBJBUF"); 1495 if (buf[0] != '\0') 1496 kprintf(" flags (%s)", &buf[1]); 1497 if (vp->v_data == NULL) { 1498 kprintf("\n"); 1499 } else { 1500 kprintf("\n\t"); 1501 VOP_PRINT(vp); 1502 } 1503 } 1504 1505 /* 1506 * Do the usual access checking. 1507 * file_mode, uid and gid are from the vnode in question, 1508 * while acc_mode and cred are from the VOP_ACCESS parameter list 1509 */ 1510 int 1511 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid, 1512 mode_t acc_mode, struct ucred *cred) 1513 { 1514 mode_t mask; 1515 int ismember; 1516 1517 /* 1518 * Super-user always gets read/write access, but execute access depends 1519 * on at least one execute bit being set. 1520 */ 1521 if (priv_check_cred(cred, PRIV_ROOT, 0) == 0) { 1522 if ((acc_mode & VEXEC) && type != VDIR && 1523 (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0) 1524 return (EACCES); 1525 return (0); 1526 } 1527 1528 mask = 0; 1529 1530 /* Otherwise, check the owner. */ 1531 if (cred->cr_uid == uid) { 1532 if (acc_mode & VEXEC) 1533 mask |= S_IXUSR; 1534 if (acc_mode & VREAD) 1535 mask |= S_IRUSR; 1536 if (acc_mode & VWRITE) 1537 mask |= S_IWUSR; 1538 return ((file_mode & mask) == mask ? 0 : EACCES); 1539 } 1540 1541 /* Otherwise, check the groups. */ 1542 ismember = groupmember(gid, cred); 1543 if (cred->cr_svgid == gid || ismember) { 1544 if (acc_mode & VEXEC) 1545 mask |= S_IXGRP; 1546 if (acc_mode & VREAD) 1547 mask |= S_IRGRP; 1548 if (acc_mode & VWRITE) 1549 mask |= S_IWGRP; 1550 return ((file_mode & mask) == mask ? 0 : EACCES); 1551 } 1552 1553 /* Otherwise, check everyone else. */ 1554 if (acc_mode & VEXEC) 1555 mask |= S_IXOTH; 1556 if (acc_mode & VREAD) 1557 mask |= S_IROTH; 1558 if (acc_mode & VWRITE) 1559 mask |= S_IWOTH; 1560 return ((file_mode & mask) == mask ? 0 : EACCES); 1561 } 1562 1563 #ifdef DDB 1564 #include <ddb/ddb.h> 1565 1566 static int db_show_locked_vnodes(struct mount *mp, void *data); 1567 1568 /* 1569 * List all of the locked vnodes in the system. 1570 * Called when debugging the kernel. 1571 */ 1572 DB_SHOW_COMMAND(lockedvnodes, lockedvnodes) 1573 { 1574 kprintf("Locked vnodes\n"); 1575 mountlist_scan(db_show_locked_vnodes, NULL, 1576 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 1577 } 1578 1579 static int 1580 db_show_locked_vnodes(struct mount *mp, void *data __unused) 1581 { 1582 struct vnode *vp; 1583 1584 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 1585 if (vn_islocked(vp)) 1586 vprint(NULL, vp); 1587 } 1588 return(0); 1589 } 1590 #endif 1591 1592 /* 1593 * Top level filesystem related information gathering. 1594 */ 1595 static int sysctl_ovfs_conf (SYSCTL_HANDLER_ARGS); 1596 1597 static int 1598 vfs_sysctl(SYSCTL_HANDLER_ARGS) 1599 { 1600 int *name = (int *)arg1 - 1; /* XXX */ 1601 u_int namelen = arg2 + 1; /* XXX */ 1602 struct vfsconf *vfsp; 1603 int maxtypenum; 1604 1605 #if 1 || defined(COMPAT_PRELITE2) 1606 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 1607 if (namelen == 1) 1608 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 1609 #endif 1610 1611 #ifdef notyet 1612 /* all sysctl names at this level are at least name and field */ 1613 if (namelen < 2) 1614 return (ENOTDIR); /* overloaded */ 1615 if (name[0] != VFS_GENERIC) { 1616 vfsp = vfsconf_find_by_typenum(name[0]); 1617 if (vfsp == NULL) 1618 return (EOPNOTSUPP); 1619 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 1620 oldp, oldlenp, newp, newlen, p)); 1621 } 1622 #endif 1623 switch (name[1]) { 1624 case VFS_MAXTYPENUM: 1625 if (namelen != 2) 1626 return (ENOTDIR); 1627 maxtypenum = vfsconf_get_maxtypenum(); 1628 return (SYSCTL_OUT(req, &maxtypenum, sizeof(maxtypenum))); 1629 case VFS_CONF: 1630 if (namelen != 3) 1631 return (ENOTDIR); /* overloaded */ 1632 vfsp = vfsconf_find_by_typenum(name[2]); 1633 if (vfsp == NULL) 1634 return (EOPNOTSUPP); 1635 return (SYSCTL_OUT(req, vfsp, sizeof *vfsp)); 1636 } 1637 return (EOPNOTSUPP); 1638 } 1639 1640 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl, 1641 "Generic filesystem"); 1642 1643 #if 1 || defined(COMPAT_PRELITE2) 1644 1645 static int 1646 sysctl_ovfs_conf_iter(struct vfsconf *vfsp, void *data) 1647 { 1648 int error; 1649 struct ovfsconf ovfs; 1650 struct sysctl_req *req = (struct sysctl_req*) data; 1651 1652 bzero(&ovfs, sizeof(ovfs)); 1653 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 1654 strcpy(ovfs.vfc_name, vfsp->vfc_name); 1655 ovfs.vfc_index = vfsp->vfc_typenum; 1656 ovfs.vfc_refcount = vfsp->vfc_refcount; 1657 ovfs.vfc_flags = vfsp->vfc_flags; 1658 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 1659 if (error) 1660 return error; /* abort iteration with error code */ 1661 else 1662 return 0; /* continue iterating with next element */ 1663 } 1664 1665 static int 1666 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 1667 { 1668 return vfsconf_each(sysctl_ovfs_conf_iter, (void*)req); 1669 } 1670 1671 #endif /* 1 || COMPAT_PRELITE2 */ 1672 1673 /* 1674 * Check to see if a filesystem is mounted on a block device. 1675 */ 1676 int 1677 vfs_mountedon(struct vnode *vp) 1678 { 1679 cdev_t dev; 1680 1681 if ((dev = vp->v_rdev) == NULL) { 1682 /* if (vp->v_type != VBLK) 1683 dev = get_dev(vp->v_uminor, vp->v_umajor); */ 1684 } 1685 if (dev != NULL && dev->si_mountpoint) 1686 return (EBUSY); 1687 return (0); 1688 } 1689 1690 /* 1691 * Unmount all filesystems. The list is traversed in reverse order 1692 * of mounting to avoid dependencies. 1693 */ 1694 1695 static int vfs_umountall_callback(struct mount *mp, void *data); 1696 1697 void 1698 vfs_unmountall(void) 1699 { 1700 int count; 1701 1702 do { 1703 count = mountlist_scan(vfs_umountall_callback, 1704 NULL, MNTSCAN_REVERSE|MNTSCAN_NOBUSY); 1705 } while (count); 1706 } 1707 1708 static 1709 int 1710 vfs_umountall_callback(struct mount *mp, void *data) 1711 { 1712 int error; 1713 1714 error = dounmount(mp, MNT_FORCE); 1715 if (error) { 1716 mountlist_remove(mp); 1717 kprintf("unmount of filesystem mounted from %s failed (", 1718 mp->mnt_stat.f_mntfromname); 1719 if (error == EBUSY) 1720 kprintf("BUSY)\n"); 1721 else 1722 kprintf("%d)\n", error); 1723 } 1724 return(1); 1725 } 1726 1727 /* 1728 * Checks the mount flags for parameter mp and put the names comma-separated 1729 * into a string buffer buf with a size limit specified by len. 1730 * 1731 * It returns the number of bytes written into buf, and (*errorp) will be 1732 * set to 0, EINVAL (if passed length is 0), or ENOSPC (supplied buffer was 1733 * not large enough). The buffer will be 0-terminated if len was not 0. 1734 */ 1735 size_t 1736 vfs_flagstostr(int flags, const struct mountctl_opt *optp, 1737 char *buf, size_t len, int *errorp) 1738 { 1739 static const struct mountctl_opt optnames[] = { 1740 { MNT_ASYNC, "asynchronous" }, 1741 { MNT_EXPORTED, "NFS exported" }, 1742 { MNT_LOCAL, "local" }, 1743 { MNT_NOATIME, "noatime" }, 1744 { MNT_NODEV, "nodev" }, 1745 { MNT_NOEXEC, "noexec" }, 1746 { MNT_NOSUID, "nosuid" }, 1747 { MNT_NOSYMFOLLOW, "nosymfollow" }, 1748 { MNT_QUOTA, "with-quotas" }, 1749 { MNT_RDONLY, "read-only" }, 1750 { MNT_SYNCHRONOUS, "synchronous" }, 1751 { MNT_UNION, "union" }, 1752 { MNT_NOCLUSTERR, "noclusterr" }, 1753 { MNT_NOCLUSTERW, "noclusterw" }, 1754 { MNT_SUIDDIR, "suiddir" }, 1755 { MNT_SOFTDEP, "soft-updates" }, 1756 { MNT_IGNORE, "ignore" }, 1757 { 0, NULL} 1758 }; 1759 int bwritten; 1760 int bleft; 1761 int optlen; 1762 int actsize; 1763 1764 *errorp = 0; 1765 bwritten = 0; 1766 bleft = len - 1; /* leave room for trailing \0 */ 1767 1768 /* 1769 * Checks the size of the string. If it contains 1770 * any data, then we will append the new flags to 1771 * it. 1772 */ 1773 actsize = strlen(buf); 1774 if (actsize > 0) 1775 buf += actsize; 1776 1777 /* Default flags if no flags passed */ 1778 if (optp == NULL) 1779 optp = optnames; 1780 1781 if (bleft < 0) { /* degenerate case, 0-length buffer */ 1782 *errorp = EINVAL; 1783 return(0); 1784 } 1785 1786 for (; flags && optp->o_opt; ++optp) { 1787 if ((flags & optp->o_opt) == 0) 1788 continue; 1789 optlen = strlen(optp->o_name); 1790 if (bwritten || actsize > 0) { 1791 if (bleft < 2) { 1792 *errorp = ENOSPC; 1793 break; 1794 } 1795 buf[bwritten++] = ','; 1796 buf[bwritten++] = ' '; 1797 bleft -= 2; 1798 } 1799 if (bleft < optlen) { 1800 *errorp = ENOSPC; 1801 break; 1802 } 1803 bcopy(optp->o_name, buf + bwritten, optlen); 1804 bwritten += optlen; 1805 bleft -= optlen; 1806 flags &= ~optp->o_opt; 1807 } 1808 1809 /* 1810 * Space already reserved for trailing \0 1811 */ 1812 buf[bwritten] = 0; 1813 return (bwritten); 1814 } 1815 1816 /* 1817 * Build hash lists of net addresses and hang them off the mount point. 1818 * Called by ufs_mount() to set up the lists of export addresses. 1819 */ 1820 static int 1821 vfs_hang_addrlist(struct mount *mp, struct netexport *nep, 1822 const struct export_args *argp) 1823 { 1824 struct netcred *np; 1825 struct radix_node_head *rnh; 1826 int i; 1827 struct radix_node *rn; 1828 struct sockaddr *saddr, *smask = 0; 1829 struct domain *dom; 1830 int error; 1831 1832 if (argp->ex_addrlen == 0) { 1833 if (mp->mnt_flag & MNT_DEFEXPORTED) 1834 return (EPERM); 1835 np = &nep->ne_defexported; 1836 np->netc_exflags = argp->ex_flags; 1837 np->netc_anon = argp->ex_anon; 1838 np->netc_anon.cr_ref = 1; 1839 mp->mnt_flag |= MNT_DEFEXPORTED; 1840 return (0); 1841 } 1842 1843 if (argp->ex_addrlen < 0 || argp->ex_addrlen > MLEN) 1844 return (EINVAL); 1845 if (argp->ex_masklen < 0 || argp->ex_masklen > MLEN) 1846 return (EINVAL); 1847 1848 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 1849 np = (struct netcred *) kmalloc(i, M_NETADDR, M_WAITOK | M_ZERO); 1850 saddr = (struct sockaddr *) (np + 1); 1851 if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen))) 1852 goto out; 1853 if (saddr->sa_len > argp->ex_addrlen) 1854 saddr->sa_len = argp->ex_addrlen; 1855 if (argp->ex_masklen) { 1856 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 1857 error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen); 1858 if (error) 1859 goto out; 1860 if (smask->sa_len > argp->ex_masklen) 1861 smask->sa_len = argp->ex_masklen; 1862 } 1863 i = saddr->sa_family; 1864 if ((rnh = nep->ne_rtable[i]) == 0) { 1865 /* 1866 * Seems silly to initialize every AF when most are not used, 1867 * do so on demand here 1868 */ 1869 SLIST_FOREACH(dom, &domains, dom_next) 1870 if (dom->dom_family == i && dom->dom_rtattach) { 1871 dom->dom_rtattach((void **) &nep->ne_rtable[i], 1872 dom->dom_rtoffset); 1873 break; 1874 } 1875 if ((rnh = nep->ne_rtable[i]) == 0) { 1876 error = ENOBUFS; 1877 goto out; 1878 } 1879 } 1880 rn = (*rnh->rnh_addaddr) ((char *) saddr, (char *) smask, rnh, 1881 np->netc_rnodes); 1882 if (rn == 0 || np != (struct netcred *) rn) { /* already exists */ 1883 error = EPERM; 1884 goto out; 1885 } 1886 np->netc_exflags = argp->ex_flags; 1887 np->netc_anon = argp->ex_anon; 1888 np->netc_anon.cr_ref = 1; 1889 return (0); 1890 out: 1891 kfree(np, M_NETADDR); 1892 return (error); 1893 } 1894 1895 /* ARGSUSED */ 1896 static int 1897 vfs_free_netcred(struct radix_node *rn, void *w) 1898 { 1899 struct radix_node_head *rnh = (struct radix_node_head *) w; 1900 1901 (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh); 1902 kfree((caddr_t) rn, M_NETADDR); 1903 return (0); 1904 } 1905 1906 /* 1907 * Free the net address hash lists that are hanging off the mount points. 1908 */ 1909 static void 1910 vfs_free_addrlist(struct netexport *nep) 1911 { 1912 int i; 1913 struct radix_node_head *rnh; 1914 1915 for (i = 0; i <= AF_MAX; i++) 1916 if ((rnh = nep->ne_rtable[i])) { 1917 (*rnh->rnh_walktree) (rnh, vfs_free_netcred, 1918 (caddr_t) rnh); 1919 kfree((caddr_t) rnh, M_RTABLE); 1920 nep->ne_rtable[i] = 0; 1921 } 1922 } 1923 1924 int 1925 vfs_export(struct mount *mp, struct netexport *nep, 1926 const struct export_args *argp) 1927 { 1928 int error; 1929 1930 if (argp->ex_flags & MNT_DELEXPORT) { 1931 if (mp->mnt_flag & MNT_EXPUBLIC) { 1932 vfs_setpublicfs(NULL, NULL, NULL); 1933 mp->mnt_flag &= ~MNT_EXPUBLIC; 1934 } 1935 vfs_free_addrlist(nep); 1936 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 1937 } 1938 if (argp->ex_flags & MNT_EXPORTED) { 1939 if (argp->ex_flags & MNT_EXPUBLIC) { 1940 if ((error = vfs_setpublicfs(mp, nep, argp)) != 0) 1941 return (error); 1942 mp->mnt_flag |= MNT_EXPUBLIC; 1943 } 1944 if ((error = vfs_hang_addrlist(mp, nep, argp))) 1945 return (error); 1946 mp->mnt_flag |= MNT_EXPORTED; 1947 } 1948 return (0); 1949 } 1950 1951 1952 /* 1953 * Set the publicly exported filesystem (WebNFS). Currently, only 1954 * one public filesystem is possible in the spec (RFC 2054 and 2055) 1955 */ 1956 int 1957 vfs_setpublicfs(struct mount *mp, struct netexport *nep, 1958 const struct export_args *argp) 1959 { 1960 int error; 1961 struct vnode *rvp; 1962 char *cp; 1963 1964 /* 1965 * mp == NULL -> invalidate the current info, the FS is 1966 * no longer exported. May be called from either vfs_export 1967 * or unmount, so check if it hasn't already been done. 1968 */ 1969 if (mp == NULL) { 1970 if (nfs_pub.np_valid) { 1971 nfs_pub.np_valid = 0; 1972 if (nfs_pub.np_index != NULL) { 1973 FREE(nfs_pub.np_index, M_TEMP); 1974 nfs_pub.np_index = NULL; 1975 } 1976 } 1977 return (0); 1978 } 1979 1980 /* 1981 * Only one allowed at a time. 1982 */ 1983 if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount) 1984 return (EBUSY); 1985 1986 /* 1987 * Get real filehandle for root of exported FS. 1988 */ 1989 bzero((caddr_t)&nfs_pub.np_handle, sizeof(nfs_pub.np_handle)); 1990 nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid; 1991 1992 if ((error = VFS_ROOT(mp, &rvp))) 1993 return (error); 1994 1995 if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid))) 1996 return (error); 1997 1998 vput(rvp); 1999 2000 /* 2001 * If an indexfile was specified, pull it in. 2002 */ 2003 if (argp->ex_indexfile != NULL) { 2004 int namelen; 2005 2006 error = vn_get_namelen(rvp, &namelen); 2007 if (error) 2008 return (error); 2009 MALLOC(nfs_pub.np_index, char *, namelen, M_TEMP, 2010 M_WAITOK); 2011 error = copyinstr(argp->ex_indexfile, nfs_pub.np_index, 2012 namelen, NULL); 2013 if (!error) { 2014 /* 2015 * Check for illegal filenames. 2016 */ 2017 for (cp = nfs_pub.np_index; *cp; cp++) { 2018 if (*cp == '/') { 2019 error = EINVAL; 2020 break; 2021 } 2022 } 2023 } 2024 if (error) { 2025 FREE(nfs_pub.np_index, M_TEMP); 2026 return (error); 2027 } 2028 } 2029 2030 nfs_pub.np_mount = mp; 2031 nfs_pub.np_valid = 1; 2032 return (0); 2033 } 2034 2035 struct netcred * 2036 vfs_export_lookup(struct mount *mp, struct netexport *nep, 2037 struct sockaddr *nam) 2038 { 2039 struct netcred *np; 2040 struct radix_node_head *rnh; 2041 struct sockaddr *saddr; 2042 2043 np = NULL; 2044 if (mp->mnt_flag & MNT_EXPORTED) { 2045 /* 2046 * Lookup in the export list first. 2047 */ 2048 if (nam != NULL) { 2049 saddr = nam; 2050 rnh = nep->ne_rtable[saddr->sa_family]; 2051 if (rnh != NULL) { 2052 np = (struct netcred *) 2053 (*rnh->rnh_matchaddr)((char *)saddr, 2054 rnh); 2055 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 2056 np = NULL; 2057 } 2058 } 2059 /* 2060 * If no address match, use the default if it exists. 2061 */ 2062 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 2063 np = &nep->ne_defexported; 2064 } 2065 return (np); 2066 } 2067 2068 /* 2069 * perform msync on all vnodes under a mount point. The mount point must 2070 * be locked. This code is also responsible for lazy-freeing unreferenced 2071 * vnodes whos VM objects no longer contain pages. 2072 * 2073 * NOTE: MNT_WAIT still skips vnodes in the VXLOCK state. 2074 * 2075 * NOTE: XXX VOP_PUTPAGES and friends requires that the vnode be locked, 2076 * but vnode_pager_putpages() doesn't lock the vnode. We have to do it 2077 * way up in this high level function. 2078 */ 2079 static int vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data); 2080 static int vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data); 2081 2082 void 2083 vfs_msync(struct mount *mp, int flags) 2084 { 2085 int vmsc_flags; 2086 2087 /* 2088 * tmpfs sets this flag to prevent msync(), sync, and the 2089 * filesystem periodic syncer from trying to flush VM pages 2090 * to swap. Only pure memory pressure flushes tmpfs VM pages 2091 * to swap. 2092 */ 2093 if (mp->mnt_kern_flag & MNTK_NOMSYNC) 2094 return; 2095 2096 /* 2097 * Ok, scan the vnodes for work. 2098 */ 2099 vmsc_flags = VMSC_GETVP; 2100 if (flags != MNT_WAIT) 2101 vmsc_flags |= VMSC_NOWAIT; 2102 vmntvnodescan(mp, vmsc_flags, vfs_msync_scan1, vfs_msync_scan2, 2103 (void *)(intptr_t)flags); 2104 } 2105 2106 /* 2107 * scan1 is a fast pre-check. There could be hundreds of thousands of 2108 * vnodes, we cannot afford to do anything heavy weight until we have a 2109 * fairly good indication that there is work to do. 2110 */ 2111 static 2112 int 2113 vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data) 2114 { 2115 int flags = (int)(intptr_t)data; 2116 2117 if ((vp->v_flag & VRECLAIMED) == 0) { 2118 if (vshouldmsync(vp)) 2119 return(0); /* call scan2 */ 2120 if ((mp->mnt_flag & MNT_RDONLY) == 0 && 2121 (vp->v_flag & VOBJDIRTY) && 2122 (flags == MNT_WAIT || vn_islocked(vp) == 0)) { 2123 return(0); /* call scan2 */ 2124 } 2125 } 2126 2127 /* 2128 * do not call scan2, continue the loop 2129 */ 2130 return(-1); 2131 } 2132 2133 /* 2134 * This callback is handed a locked vnode. 2135 */ 2136 static 2137 int 2138 vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data) 2139 { 2140 vm_object_t obj; 2141 int flags = (int)(intptr_t)data; 2142 2143 if (vp->v_flag & VRECLAIMED) 2144 return(0); 2145 2146 if ((mp->mnt_flag & MNT_RDONLY) == 0 && (vp->v_flag & VOBJDIRTY)) { 2147 if ((obj = vp->v_object) != NULL) { 2148 vm_object_page_clean(obj, 0, 0, 2149 flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC); 2150 } 2151 } 2152 return(0); 2153 } 2154 2155 /* 2156 * Wake up anyone interested in vp because it is being revoked. 2157 */ 2158 void 2159 vn_gone(struct vnode *vp) 2160 { 2161 lwkt_gettoken(&vp->v_token); 2162 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, NOTE_REVOKE); 2163 lwkt_reltoken(&vp->v_token); 2164 } 2165 2166 /* 2167 * extract the cdev_t from a VBLK or VCHR. The vnode must have been opened 2168 * (or v_rdev might be NULL). 2169 */ 2170 cdev_t 2171 vn_todev(struct vnode *vp) 2172 { 2173 if (vp->v_type != VBLK && vp->v_type != VCHR) 2174 return (NULL); 2175 KKASSERT(vp->v_rdev != NULL); 2176 return (vp->v_rdev); 2177 } 2178 2179 /* 2180 * Check if vnode represents a disk device. The vnode does not need to be 2181 * opened. 2182 * 2183 * MPALMOSTSAFE 2184 */ 2185 int 2186 vn_isdisk(struct vnode *vp, int *errp) 2187 { 2188 cdev_t dev; 2189 2190 if (vp->v_type != VCHR) { 2191 if (errp != NULL) 2192 *errp = ENOTBLK; 2193 return (0); 2194 } 2195 2196 dev = vp->v_rdev; 2197 2198 if (dev == NULL) { 2199 if (errp != NULL) 2200 *errp = ENXIO; 2201 return (0); 2202 } 2203 if (dev_is_good(dev) == 0) { 2204 if (errp != NULL) 2205 *errp = ENXIO; 2206 return (0); 2207 } 2208 if ((dev_dflags(dev) & D_DISK) == 0) { 2209 if (errp != NULL) 2210 *errp = ENOTBLK; 2211 return (0); 2212 } 2213 if (errp != NULL) 2214 *errp = 0; 2215 return (1); 2216 } 2217 2218 int 2219 vn_get_namelen(struct vnode *vp, int *namelen) 2220 { 2221 int error; 2222 register_t retval[2]; 2223 2224 error = VOP_PATHCONF(vp, _PC_NAME_MAX, retval); 2225 if (error) 2226 return (error); 2227 *namelen = (int)retval[0]; 2228 return (0); 2229 } 2230 2231 int 2232 vop_write_dirent(int *error, struct uio *uio, ino_t d_ino, uint8_t d_type, 2233 uint16_t d_namlen, const char *d_name) 2234 { 2235 struct dirent *dp; 2236 size_t len; 2237 2238 len = _DIRENT_RECLEN(d_namlen); 2239 if (len > uio->uio_resid) 2240 return(1); 2241 2242 dp = kmalloc(len, M_TEMP, M_WAITOK | M_ZERO); 2243 2244 dp->d_ino = d_ino; 2245 dp->d_namlen = d_namlen; 2246 dp->d_type = d_type; 2247 bcopy(d_name, dp->d_name, d_namlen); 2248 2249 *error = uiomove((caddr_t)dp, len, uio); 2250 2251 kfree(dp, M_TEMP); 2252 2253 return(0); 2254 } 2255 2256 void 2257 vn_mark_atime(struct vnode *vp, struct thread *td) 2258 { 2259 struct proc *p = td->td_proc; 2260 struct ucred *cred = p ? p->p_ucred : proc0.p_ucred; 2261 2262 if ((vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) { 2263 VOP_MARKATIME(vp, cred); 2264 } 2265 } 2266