1 /*- 2 * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code 7 * 2005 program. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 * 30 * $NetBSD: tmpfs_vnops.c,v 1.39 2007/07/23 15:41:01 jmmv Exp $ 31 */ 32 33 /* 34 * tmpfs vnode interface. 35 */ 36 37 #include <sys/kernel.h> 38 #include <sys/kern_syscall.h> 39 #include <sys/param.h> 40 #include <sys/fcntl.h> 41 #include <sys/lockf.h> 42 #include <sys/priv.h> 43 #include <sys/proc.h> 44 #include <sys/resourcevar.h> 45 #include <sys/sched.h> 46 #include <sys/stat.h> 47 #include <sys/systm.h> 48 #include <sys/sysctl.h> 49 #include <sys/unistd.h> 50 #include <sys/vfsops.h> 51 #include <sys/vnode.h> 52 #include <sys/mountctl.h> 53 54 #include <vm/vm.h> 55 #include <vm/vm_extern.h> 56 #include <vm/vm_object.h> 57 #include <vm/vm_page.h> 58 #include <vm/vm_pageout.h> 59 #include <vm/vm_pager.h> 60 #include <vm/swap_pager.h> 61 62 #include <sys/buf2.h> 63 #include <vm/vm_page2.h> 64 65 #include <vfs/fifofs/fifo.h> 66 #include <vfs/tmpfs/tmpfs_vnops.h> 67 #include "tmpfs.h" 68 69 static void tmpfs_strategy_done(struct bio *bio); 70 static void tmpfs_move_pages(vm_object_t src, vm_object_t dst); 71 72 static int tmpfs_cluster_enable = 1; 73 SYSCTL_NODE(_vfs, OID_AUTO, tmpfs, CTLFLAG_RW, 0, "TMPFS filesystem"); 74 SYSCTL_INT(_vfs_tmpfs, OID_AUTO, cluster_enable, CTLFLAG_RW, 75 &tmpfs_cluster_enable, 0, ""); 76 77 static __inline 78 void 79 tmpfs_knote(struct vnode *vp, int flags) 80 { 81 if (flags) 82 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags); 83 } 84 85 86 /* --------------------------------------------------------------------- */ 87 88 static int 89 tmpfs_nresolve(struct vop_nresolve_args *ap) 90 { 91 struct vnode *dvp = ap->a_dvp; 92 struct vnode *vp = NULL; 93 struct namecache *ncp = ap->a_nch->ncp; 94 struct tmpfs_node *tnode; 95 struct tmpfs_dirent *de; 96 struct tmpfs_node *dnode; 97 int error; 98 99 dnode = VP_TO_TMPFS_DIR(dvp); 100 101 TMPFS_NODE_LOCK_SH(dnode); 102 de = tmpfs_dir_lookup(dnode, NULL, ncp); 103 if (de == NULL) { 104 error = ENOENT; 105 } else { 106 /* 107 * Allocate a vnode for the node we found. 108 */ 109 tnode = de->td_node; 110 error = tmpfs_alloc_vp(dvp->v_mount, tnode, 111 LK_EXCLUSIVE | LK_RETRY, &vp); 112 if (error) 113 goto out; 114 KKASSERT(vp); 115 } 116 117 out: 118 TMPFS_NODE_UNLOCK(dnode); 119 120 if ((dnode->tn_status & TMPFS_NODE_ACCESSED) == 0) { 121 TMPFS_NODE_LOCK(dnode); 122 dnode->tn_status |= TMPFS_NODE_ACCESSED; 123 TMPFS_NODE_UNLOCK(dnode); 124 } 125 126 /* 127 * Store the result of this lookup in the cache. Avoid this if the 128 * request was for creation, as it does not improve timings on 129 * emprical tests. 130 */ 131 if (vp) { 132 vn_unlock(vp); 133 cache_setvp(ap->a_nch, vp); 134 vrele(vp); 135 } else if (error == ENOENT) { 136 cache_setvp(ap->a_nch, NULL); 137 } 138 return (error); 139 } 140 141 static int 142 tmpfs_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 143 { 144 struct vnode *dvp = ap->a_dvp; 145 struct vnode **vpp = ap->a_vpp; 146 struct tmpfs_node *dnode = VP_TO_TMPFS_NODE(dvp); 147 struct ucred *cred = ap->a_cred; 148 int error; 149 150 *vpp = NULL; 151 152 /* Check accessibility of requested node as a first step. */ 153 error = VOP_ACCESS(dvp, VEXEC, cred); 154 if (error != 0) 155 return error; 156 157 if (dnode->tn_dir.tn_parent != NULL) { 158 /* Allocate a new vnode on the matching entry. */ 159 error = tmpfs_alloc_vp(dvp->v_mount, dnode->tn_dir.tn_parent, 160 LK_EXCLUSIVE | LK_RETRY, vpp); 161 162 if (*vpp) 163 vn_unlock(*vpp); 164 } 165 return (*vpp == NULL) ? ENOENT : 0; 166 } 167 168 /* --------------------------------------------------------------------- */ 169 170 static int 171 tmpfs_ncreate(struct vop_ncreate_args *ap) 172 { 173 struct vnode *dvp = ap->a_dvp; 174 struct vnode **vpp = ap->a_vpp; 175 struct namecache *ncp = ap->a_nch->ncp; 176 struct vattr *vap = ap->a_vap; 177 struct ucred *cred = ap->a_cred; 178 int error; 179 180 KKASSERT(vap->va_type == VREG || vap->va_type == VSOCK); 181 182 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL); 183 if (error == 0) { 184 cache_setunresolved(ap->a_nch); 185 cache_setvp(ap->a_nch, *vpp); 186 tmpfs_knote(dvp, NOTE_WRITE); 187 } 188 return (error); 189 } 190 /* --------------------------------------------------------------------- */ 191 192 static int 193 tmpfs_nmknod(struct vop_nmknod_args *ap) 194 { 195 struct vnode *dvp = ap->a_dvp; 196 struct vnode **vpp = ap->a_vpp; 197 struct namecache *ncp = ap->a_nch->ncp; 198 struct vattr *vap = ap->a_vap; 199 struct ucred *cred = ap->a_cred; 200 int error; 201 202 if (vap->va_type != VBLK && vap->va_type != VCHR && 203 vap->va_type != VFIFO) { 204 return (EINVAL); 205 } 206 207 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL); 208 if (error == 0) { 209 cache_setunresolved(ap->a_nch); 210 cache_setvp(ap->a_nch, *vpp); 211 tmpfs_knote(dvp, NOTE_WRITE); 212 } 213 return error; 214 } 215 216 /* --------------------------------------------------------------------- */ 217 218 static int 219 tmpfs_open(struct vop_open_args *ap) 220 { 221 struct vnode *vp = ap->a_vp; 222 int mode = ap->a_mode; 223 struct tmpfs_node *node; 224 int error; 225 226 node = VP_TO_TMPFS_NODE(vp); 227 228 #if 0 229 /* The file is still active but all its names have been removed 230 * (e.g. by a "rmdir $(pwd)"). It cannot be opened any more as 231 * it is about to die. */ 232 if (node->tn_links < 1) 233 return (ENOENT); 234 #endif 235 236 /* If the file is marked append-only, deny write requests. */ 237 if ((node->tn_flags & APPEND) && 238 (mode & (FWRITE | O_APPEND)) == FWRITE) { 239 error = EPERM; 240 } else { 241 if (node->tn_reg.tn_pages_in_aobj) { 242 TMPFS_NODE_LOCK(node); 243 if (node->tn_reg.tn_pages_in_aobj) { 244 tmpfs_move_pages(node->tn_reg.tn_aobj, 245 vp->v_object); 246 node->tn_reg.tn_pages_in_aobj = 0; 247 } 248 TMPFS_NODE_UNLOCK(node); 249 } 250 error = vop_stdopen(ap); 251 } 252 253 return (error); 254 } 255 256 /* --------------------------------------------------------------------- */ 257 258 static int 259 tmpfs_close(struct vop_close_args *ap) 260 { 261 struct vnode *vp = ap->a_vp; 262 struct tmpfs_node *node; 263 int error; 264 265 node = VP_TO_TMPFS_NODE(vp); 266 267 if (node->tn_links > 0) { 268 /* 269 * Update node times. No need to do it if the node has 270 * been deleted, because it will vanish after we return. 271 */ 272 tmpfs_update(vp); 273 } 274 275 error = vop_stdclose(ap); 276 277 return (error); 278 } 279 280 /* --------------------------------------------------------------------- */ 281 282 int 283 tmpfs_access(struct vop_access_args *ap) 284 { 285 struct vnode *vp = ap->a_vp; 286 int error; 287 struct tmpfs_node *node; 288 289 node = VP_TO_TMPFS_NODE(vp); 290 291 switch (vp->v_type) { 292 case VDIR: 293 /* FALLTHROUGH */ 294 case VLNK: 295 /* FALLTHROUGH */ 296 case VREG: 297 if ((ap->a_mode & VWRITE) && 298 (vp->v_mount->mnt_flag & MNT_RDONLY)) { 299 error = EROFS; 300 goto out; 301 } 302 break; 303 304 case VBLK: 305 /* FALLTHROUGH */ 306 case VCHR: 307 /* FALLTHROUGH */ 308 case VSOCK: 309 /* FALLTHROUGH */ 310 case VFIFO: 311 break; 312 313 default: 314 error = EINVAL; 315 goto out; 316 } 317 318 if ((ap->a_mode & VWRITE) && (node->tn_flags & IMMUTABLE)) { 319 error = EPERM; 320 goto out; 321 } 322 323 error = vop_helper_access(ap, node->tn_uid, node->tn_gid, 324 node->tn_mode, 0); 325 out: 326 return error; 327 } 328 329 /* --------------------------------------------------------------------- */ 330 331 int 332 tmpfs_getattr(struct vop_getattr_args *ap) 333 { 334 struct vnode *vp = ap->a_vp; 335 struct vattr *vap = ap->a_vap; 336 struct tmpfs_node *node; 337 338 node = VP_TO_TMPFS_NODE(vp); 339 340 tmpfs_update(vp); 341 342 TMPFS_NODE_LOCK_SH(node); 343 vap->va_type = vp->v_type; 344 vap->va_mode = node->tn_mode; 345 vap->va_nlink = node->tn_links; 346 vap->va_uid = node->tn_uid; 347 vap->va_gid = node->tn_gid; 348 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 349 vap->va_fileid = node->tn_id; 350 vap->va_size = node->tn_size; 351 vap->va_blocksize = PAGE_SIZE; 352 vap->va_atime.tv_sec = node->tn_atime; 353 vap->va_atime.tv_nsec = node->tn_atimensec; 354 vap->va_mtime.tv_sec = node->tn_mtime; 355 vap->va_mtime.tv_nsec = node->tn_mtimensec; 356 vap->va_ctime.tv_sec = node->tn_ctime; 357 vap->va_ctime.tv_nsec = node->tn_ctimensec; 358 vap->va_gen = node->tn_gen; 359 vap->va_flags = node->tn_flags; 360 if (vp->v_type == VBLK || vp->v_type == VCHR) { 361 vap->va_rmajor = umajor(node->tn_rdev); 362 vap->va_rminor = uminor(node->tn_rdev); 363 } 364 vap->va_bytes = round_page(node->tn_size); 365 vap->va_filerev = 0; 366 TMPFS_NODE_UNLOCK(node); 367 368 return 0; 369 } 370 371 /* --------------------------------------------------------------------- */ 372 373 int 374 tmpfs_setattr(struct vop_setattr_args *ap) 375 { 376 struct vnode *vp = ap->a_vp; 377 struct vattr *vap = ap->a_vap; 378 struct ucred *cred = ap->a_cred; 379 struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp); 380 int error = 0; 381 int kflags = 0; 382 383 TMPFS_NODE_LOCK(node); 384 if (error == 0 && (vap->va_flags != VNOVAL)) { 385 error = tmpfs_chflags(vp, vap->va_flags, cred); 386 kflags |= NOTE_ATTRIB; 387 } 388 389 if (error == 0 && (vap->va_size != VNOVAL)) { 390 /* restore any saved pages before proceeding */ 391 if (node->tn_reg.tn_pages_in_aobj) { 392 tmpfs_move_pages(node->tn_reg.tn_aobj, vp->v_object); 393 node->tn_reg.tn_pages_in_aobj = 0; 394 } 395 if (vap->va_size > node->tn_size) 396 kflags |= NOTE_WRITE | NOTE_EXTEND; 397 else 398 kflags |= NOTE_WRITE; 399 error = tmpfs_chsize(vp, vap->va_size, cred); 400 } 401 402 if (error == 0 && (vap->va_uid != (uid_t)VNOVAL || 403 vap->va_gid != (gid_t)VNOVAL)) { 404 error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred); 405 kflags |= NOTE_ATTRIB; 406 } 407 408 if (error == 0 && (vap->va_mode != (mode_t)VNOVAL)) { 409 error = tmpfs_chmod(vp, vap->va_mode, cred); 410 kflags |= NOTE_ATTRIB; 411 } 412 413 if (error == 0 && ((vap->va_atime.tv_sec != VNOVAL && 414 vap->va_atime.tv_nsec != VNOVAL) || 415 (vap->va_mtime.tv_sec != VNOVAL && 416 vap->va_mtime.tv_nsec != VNOVAL) )) { 417 error = tmpfs_chtimes(vp, &vap->va_atime, &vap->va_mtime, 418 vap->va_vaflags, cred); 419 kflags |= NOTE_ATTRIB; 420 } 421 422 /* 423 * Update the node times. We give preference to the error codes 424 * generated by this function rather than the ones that may arise 425 * from tmpfs_update. 426 */ 427 tmpfs_update(vp); 428 TMPFS_NODE_UNLOCK(node); 429 tmpfs_knote(vp, kflags); 430 431 return (error); 432 } 433 434 /* --------------------------------------------------------------------- */ 435 436 /* 437 * fsync is usually a NOP, but we must take action when unmounting or 438 * when recycling. 439 */ 440 static int 441 tmpfs_fsync(struct vop_fsync_args *ap) 442 { 443 struct tmpfs_node *node; 444 struct vnode *vp = ap->a_vp; 445 446 node = VP_TO_TMPFS_NODE(vp); 447 448 tmpfs_update(vp); 449 if (vp->v_type == VREG) { 450 if (vp->v_flag & VRECLAIMED) { 451 if (node->tn_links == 0) 452 tmpfs_truncate(vp, 0); 453 else 454 vfsync(ap->a_vp, ap->a_waitfor, 1, NULL, NULL); 455 } 456 } 457 return 0; 458 } 459 460 /* --------------------------------------------------------------------- */ 461 462 static int 463 tmpfs_read(struct vop_read_args *ap) 464 { 465 struct buf *bp; 466 struct vnode *vp = ap->a_vp; 467 struct uio *uio = ap->a_uio; 468 struct tmpfs_node *node; 469 off_t base_offset; 470 size_t offset; 471 size_t len; 472 size_t resid; 473 int error; 474 int seqcount; 475 476 /* 477 * Check the basics 478 */ 479 if (uio->uio_offset < 0) 480 return (EINVAL); 481 if (vp->v_type != VREG) 482 return (EINVAL); 483 484 /* 485 * Extract node, try to shortcut the operation through 486 * the VM page cache, allowing us to avoid buffer cache 487 * overheads. 488 */ 489 node = VP_TO_TMPFS_NODE(vp); 490 resid = uio->uio_resid; 491 seqcount = ap->a_ioflag >> 16; 492 error = vop_helper_read_shortcut(ap); 493 if (error) 494 return error; 495 if (uio->uio_resid == 0) { 496 if (resid) 497 goto finished; 498 return error; 499 } 500 501 /* 502 * restore any saved pages before proceeding 503 */ 504 if (node->tn_reg.tn_pages_in_aobj) { 505 TMPFS_NODE_LOCK(node); 506 if (node->tn_reg.tn_pages_in_aobj) { 507 tmpfs_move_pages(node->tn_reg.tn_aobj, vp->v_object); 508 node->tn_reg.tn_pages_in_aobj = 0; 509 } 510 TMPFS_NODE_UNLOCK(node); 511 } 512 513 /* 514 * Fall-through to our normal read code. 515 */ 516 while (uio->uio_resid > 0 && uio->uio_offset < node->tn_size) { 517 /* 518 * Use buffer cache I/O (via tmpfs_strategy) 519 */ 520 offset = (size_t)uio->uio_offset & TMPFS_BLKMASK64; 521 base_offset = (off_t)uio->uio_offset - offset; 522 bp = getcacheblk(vp, base_offset, TMPFS_BLKSIZE, GETBLK_KVABIO); 523 if (bp == NULL) { 524 if (tmpfs_cluster_enable) { 525 error = cluster_readx(vp, node->tn_size, 526 base_offset, 527 TMPFS_BLKSIZE, 528 B_NOTMETA | B_KVABIO, 529 uio->uio_resid, 530 seqcount * MAXBSIZE, 531 &bp); 532 } else { 533 error = bread_kvabio(vp, base_offset, 534 TMPFS_BLKSIZE, &bp); 535 } 536 if (error) { 537 brelse(bp); 538 kprintf("tmpfs_read bread error %d\n", error); 539 break; 540 } 541 542 /* 543 * tmpfs pretty much fiddles directly with the VM 544 * system, don't let it exhaust it or we won't play 545 * nice with other processes. 546 * 547 * Only do this if the VOP is coming from a normal 548 * read/write. The VM system handles the case for 549 * UIO_NOCOPY. 550 */ 551 if (uio->uio_segflg != UIO_NOCOPY) 552 vm_wait_nominal(); 553 } 554 bp->b_flags |= B_CLUSTEROK; 555 bkvasync(bp); 556 557 /* 558 * Figure out how many bytes we can actually copy this loop. 559 */ 560 len = TMPFS_BLKSIZE - offset; 561 if (len > uio->uio_resid) 562 len = uio->uio_resid; 563 if (len > node->tn_size - uio->uio_offset) 564 len = (size_t)(node->tn_size - uio->uio_offset); 565 566 error = uiomovebp(bp, (char *)bp->b_data + offset, len, uio); 567 bqrelse(bp); 568 if (error) { 569 kprintf("tmpfs_read uiomove error %d\n", error); 570 break; 571 } 572 } 573 574 finished: 575 if ((node->tn_status & TMPFS_NODE_ACCESSED) == 0) { 576 TMPFS_NODE_LOCK(node); 577 node->tn_status |= TMPFS_NODE_ACCESSED; 578 TMPFS_NODE_UNLOCK(node); 579 } 580 return (error); 581 } 582 583 static int 584 tmpfs_write(struct vop_write_args *ap) 585 { 586 struct buf *bp; 587 struct vnode *vp = ap->a_vp; 588 struct uio *uio = ap->a_uio; 589 struct thread *td = uio->uio_td; 590 struct tmpfs_node *node; 591 boolean_t extended; 592 off_t oldsize; 593 int error; 594 off_t base_offset; 595 size_t offset; 596 size_t len; 597 struct rlimit limit; 598 int trivial = 0; 599 int kflags = 0; 600 int seqcount; 601 602 error = 0; 603 if (uio->uio_resid == 0) { 604 return error; 605 } 606 607 node = VP_TO_TMPFS_NODE(vp); 608 609 if (vp->v_type != VREG) 610 return (EINVAL); 611 seqcount = ap->a_ioflag >> 16; 612 613 TMPFS_NODE_LOCK(node); 614 615 /* 616 * restore any saved pages before proceeding 617 */ 618 if (node->tn_reg.tn_pages_in_aobj) { 619 tmpfs_move_pages(node->tn_reg.tn_aobj, vp->v_object); 620 node->tn_reg.tn_pages_in_aobj = 0; 621 } 622 623 oldsize = node->tn_size; 624 if (ap->a_ioflag & IO_APPEND) 625 uio->uio_offset = node->tn_size; 626 627 /* 628 * Check for illegal write offsets. 629 */ 630 if (uio->uio_offset + uio->uio_resid > 631 VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize) { 632 error = EFBIG; 633 goto done; 634 } 635 636 /* 637 * NOTE: Ignore if UIO does not come from a user thread (e.g. VN). 638 */ 639 if (vp->v_type == VREG && td != NULL && td->td_lwp != NULL) { 640 error = kern_getrlimit(RLIMIT_FSIZE, &limit); 641 if (error) 642 goto done; 643 if (uio->uio_offset + uio->uio_resid > limit.rlim_cur) { 644 ksignal(td->td_proc, SIGXFSZ); 645 error = EFBIG; 646 goto done; 647 } 648 } 649 650 /* 651 * Extend the file's size if necessary 652 */ 653 extended = ((uio->uio_offset + uio->uio_resid) > node->tn_size); 654 655 while (uio->uio_resid > 0) { 656 /* 657 * Don't completely blow out running buffer I/O 658 * when being hit from the pageout daemon. 659 */ 660 if (uio->uio_segflg == UIO_NOCOPY && 661 (ap->a_ioflag & IO_RECURSE) == 0) { 662 bwillwrite(TMPFS_BLKSIZE); 663 } 664 665 /* 666 * Use buffer cache I/O (via tmpfs_strategy) 667 */ 668 offset = (size_t)uio->uio_offset & TMPFS_BLKMASK64; 669 base_offset = (off_t)uio->uio_offset - offset; 670 len = TMPFS_BLKSIZE - offset; 671 if (len > uio->uio_resid) 672 len = uio->uio_resid; 673 674 if ((uio->uio_offset + len) > node->tn_size) { 675 trivial = (uio->uio_offset <= node->tn_size); 676 error = tmpfs_reg_resize(vp, uio->uio_offset + len, 677 trivial); 678 if (error) 679 break; 680 } 681 682 /* 683 * Read to fill in any gaps. Theoretically we could 684 * optimize this if the write covers the entire buffer 685 * and is not a UIO_NOCOPY write, however this can lead 686 * to a security violation exposing random kernel memory 687 * (whatever junk was in the backing VM pages before). 688 * 689 * So just use bread() to do the right thing. 690 */ 691 error = bread_kvabio(vp, base_offset, TMPFS_BLKSIZE, &bp); 692 bkvasync(bp); 693 error = uiomovebp(bp, (char *)bp->b_data + offset, len, uio); 694 if (error) { 695 kprintf("tmpfs_write uiomove error %d\n", error); 696 brelse(bp); 697 break; 698 } 699 700 if (uio->uio_offset > node->tn_size) { 701 node->tn_size = uio->uio_offset; 702 kflags |= NOTE_EXTEND; 703 } 704 kflags |= NOTE_WRITE; 705 706 /* 707 * Always try to flush the page in the UIO_NOCOPY case. This 708 * can come from the pageout daemon or during vnode eviction. 709 * It is not necessarily going to be marked IO_ASYNC/IO_SYNC. 710 * 711 * For the normal case we buwrite(), dirtying the underlying 712 * VM pages instead of dirtying the buffer and releasing the 713 * buffer as a clean buffer. This allows tmpfs to use 714 * essentially all available memory to cache file data. 715 * If we used bdwrite() the buffer cache would wind up 716 * flushing the data to swap too quickly. 717 * 718 * But because tmpfs can seriously load the VM system we 719 * fall-back to using bdwrite() when free memory starts 720 * to get low. This shifts the load away from the VM system 721 * and makes tmpfs act more like a normal filesystem with 722 * regards to disk activity. 723 * 724 * tmpfs pretty much fiddles directly with the VM 725 * system, don't let it exhaust it or we won't play 726 * nice with other processes. Only do this if the 727 * VOP is coming from a normal read/write. The VM system 728 * handles the case for UIO_NOCOPY. 729 */ 730 bp->b_flags |= B_CLUSTEROK; 731 if (uio->uio_segflg == UIO_NOCOPY) { 732 /* 733 * Flush from the pageout daemon, deal with 734 * potentially very heavy tmpfs write activity 735 * causing long stalls in the pageout daemon 736 * before pages get to free/cache. 737 * 738 * (a) Under severe pressure setting B_DIRECT will 739 * cause a buffer release to try to free the 740 * underlying pages. 741 * 742 * (b) Under modest memory pressure the B_RELBUF 743 * alone is sufficient to get the pages moved 744 * to the cache. We could also force this by 745 * setting B_NOTMETA but that might have other 746 * unintended side-effects (e.g. setting 747 * PG_NOTMETA on the VM page). 748 * 749 * Hopefully this will unblock the VM system more 750 * quickly under extreme tmpfs write load. 751 */ 752 if (vm_page_count_min(vm_page_free_hysteresis)) 753 bp->b_flags |= B_DIRECT; 754 bp->b_flags |= B_AGE | B_RELBUF; 755 bp->b_act_count = 0; /* buffer->deactivate pgs */ 756 cluster_awrite(bp); 757 } else if (vm_page_count_target()) { 758 /* 759 * Normal (userland) write but we are low on memory, 760 * run the buffer the buffer cache. 761 */ 762 bp->b_act_count = 0; /* buffer->deactivate pgs */ 763 bdwrite(bp); 764 } else { 765 /* 766 * Otherwise run the buffer directly through to the 767 * backing VM store. 768 */ 769 buwrite(bp); 770 /*vm_wait_nominal();*/ 771 } 772 773 if (bp->b_error) { 774 kprintf("tmpfs_write bwrite error %d\n", bp->b_error); 775 break; 776 } 777 } 778 779 if (error) { 780 if (extended) { 781 (void)tmpfs_reg_resize(vp, oldsize, trivial); 782 kflags &= ~NOTE_EXTEND; 783 } 784 goto done; 785 } 786 787 /* 788 * Currently we don't set the mtime on files modified via mmap() 789 * because we can't tell the difference between those modifications 790 * and an attempt by the pageout daemon to flush tmpfs pages to 791 * swap. 792 * 793 * This is because in order to defer flushes as long as possible 794 * buwrite() works by marking the underlying VM pages dirty in 795 * order to be able to dispose of the buffer cache buffer without 796 * flushing it. 797 */ 798 if (vp->v_writecount) { 799 node->tn_status |= TMPFS_NODE_MODIFIED; 800 } else { 801 node->tn_mtime = vp->v_lastwrite_ts.tv_sec; 802 node->tn_mtimensec = vp->v_lastwrite_ts.tv_nsec; 803 } 804 805 if (extended) 806 node->tn_status |= TMPFS_NODE_CHANGED; 807 808 if (node->tn_mode & (S_ISUID | S_ISGID)) { 809 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0)) 810 node->tn_mode &= ~(S_ISUID | S_ISGID); 811 } 812 done: 813 TMPFS_NODE_UNLOCK(node); 814 if (kflags) 815 tmpfs_knote(vp, kflags); 816 817 return(error); 818 } 819 820 static int 821 tmpfs_advlock(struct vop_advlock_args *ap) 822 { 823 struct tmpfs_node *node; 824 struct vnode *vp = ap->a_vp; 825 int error; 826 827 node = VP_TO_TMPFS_NODE(vp); 828 error = (lf_advlock(ap, &node->tn_advlock, node->tn_size)); 829 830 return (error); 831 } 832 833 /* 834 * The strategy function is typically only called when memory pressure 835 * forces the system to attempt to pageout pages. It can also be called 836 * by [n]vtruncbuf() when a truncation cuts a page in half. Normal write 837 * operations 838 * 839 * We set VKVABIO for VREG files so bp->b_data may not be synchronized to 840 * our cpu. swap_pager_strategy() is all we really use, and it directly 841 * supports this. 842 */ 843 static int 844 tmpfs_strategy(struct vop_strategy_args *ap) 845 { 846 struct bio *bio = ap->a_bio; 847 struct bio *nbio; 848 struct buf *bp = bio->bio_buf; 849 struct vnode *vp = ap->a_vp; 850 struct tmpfs_node *node; 851 vm_object_t uobj; 852 vm_page_t m; 853 int i; 854 855 if (vp->v_type != VREG) { 856 bp->b_resid = bp->b_bcount; 857 bp->b_flags |= B_ERROR | B_INVAL; 858 bp->b_error = EINVAL; 859 biodone(bio); 860 return(0); 861 } 862 863 node = VP_TO_TMPFS_NODE(vp); 864 865 uobj = node->tn_reg.tn_aobj; 866 867 /* 868 * Don't bother flushing to swap if there is no swap, just 869 * ensure that the pages are marked as needing a commit (still). 870 */ 871 if (bp->b_cmd == BUF_CMD_WRITE && vm_swap_size == 0) { 872 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 873 m = bp->b_xio.xio_pages[i]; 874 vm_page_need_commit(m); 875 } 876 bp->b_resid = 0; 877 bp->b_error = 0; 878 biodone(bio); 879 } else { 880 nbio = push_bio(bio); 881 nbio->bio_done = tmpfs_strategy_done; 882 nbio->bio_offset = bio->bio_offset; 883 swap_pager_strategy(uobj, nbio); 884 } 885 return 0; 886 } 887 888 /* 889 * If we were unable to commit the pages to swap make sure they are marked 890 * as needing a commit (again). If we were, clear the flag to allow the 891 * pages to be freed. 892 * 893 * Do not error-out the buffer. In particular, vinvalbuf() needs to 894 * always work. 895 */ 896 static void 897 tmpfs_strategy_done(struct bio *bio) 898 { 899 struct buf *bp; 900 vm_page_t m; 901 int i; 902 903 bp = bio->bio_buf; 904 905 if (bp->b_flags & B_ERROR) { 906 bp->b_flags &= ~B_ERROR; 907 bp->b_error = 0; 908 bp->b_resid = 0; 909 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 910 m = bp->b_xio.xio_pages[i]; 911 vm_page_need_commit(m); 912 } 913 } else { 914 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 915 m = bp->b_xio.xio_pages[i]; 916 vm_page_clear_commit(m); 917 } 918 } 919 bio = pop_bio(bio); 920 biodone(bio); 921 } 922 923 static int 924 tmpfs_bmap(struct vop_bmap_args *ap) 925 { 926 if (ap->a_doffsetp != NULL) 927 *ap->a_doffsetp = ap->a_loffset; 928 if (ap->a_runp != NULL) 929 *ap->a_runp = 0; 930 if (ap->a_runb != NULL) 931 *ap->a_runb = 0; 932 933 return 0; 934 } 935 936 /* --------------------------------------------------------------------- */ 937 938 static int 939 tmpfs_nremove(struct vop_nremove_args *ap) 940 { 941 struct vnode *dvp = ap->a_dvp; 942 struct namecache *ncp = ap->a_nch->ncp; 943 struct vnode *vp; 944 int error; 945 struct tmpfs_dirent *de; 946 struct tmpfs_mount *tmp; 947 struct tmpfs_node *dnode; 948 struct tmpfs_node *node; 949 950 /* 951 * We have to acquire the vp from ap->a_nch because we will likely 952 * unresolve the namecache entry, and a vrele/vput is needed to 953 * trigger the tmpfs_inactive/tmpfs_reclaim sequence. 954 * 955 * We have to use vget to clear any inactive state on the vnode, 956 * otherwise the vnode may remain inactive and thus tmpfs_inactive 957 * will not get called when we release it. 958 */ 959 error = cache_vget(ap->a_nch, ap->a_cred, LK_SHARED, &vp); 960 KKASSERT(vp->v_mount == dvp->v_mount); 961 KKASSERT(error == 0); 962 vn_unlock(vp); 963 964 if (vp->v_type == VDIR) { 965 error = EISDIR; 966 goto out2; 967 } 968 969 dnode = VP_TO_TMPFS_DIR(dvp); 970 node = VP_TO_TMPFS_NODE(vp); 971 tmp = VFS_TO_TMPFS(vp->v_mount); 972 973 TMPFS_NODE_LOCK(dnode); 974 de = tmpfs_dir_lookup(dnode, node, ncp); 975 if (de == NULL) { 976 error = ENOENT; 977 goto out; 978 } 979 980 /* Files marked as immutable or append-only cannot be deleted. */ 981 if ((node->tn_flags & (IMMUTABLE | APPEND | NOUNLINK)) || 982 (dnode->tn_flags & APPEND)) { 983 error = EPERM; 984 goto out; 985 } 986 987 /* Remove the entry from the directory; as it is a file, we do not 988 * have to change the number of hard links of the directory. */ 989 tmpfs_dir_detach(dnode, de); 990 991 /* Free the directory entry we just deleted. Note that the node 992 * referred by it will not be removed until the vnode is really 993 * reclaimed. */ 994 tmpfs_free_dirent(tmp, de); 995 996 if (node->tn_links > 0) { 997 TMPFS_NODE_LOCK(node); 998 node->tn_status |= TMPFS_NODE_CHANGED; 999 TMPFS_NODE_UNLOCK(node); 1000 } 1001 1002 cache_unlink(ap->a_nch); 1003 tmpfs_knote(vp, NOTE_DELETE); 1004 error = 0; 1005 1006 out: 1007 TMPFS_NODE_UNLOCK(dnode); 1008 if (error == 0) 1009 tmpfs_knote(dvp, NOTE_WRITE); 1010 out2: 1011 vrele(vp); 1012 1013 return error; 1014 } 1015 1016 /* --------------------------------------------------------------------- */ 1017 1018 static int 1019 tmpfs_nlink(struct vop_nlink_args *ap) 1020 { 1021 struct vnode *dvp = ap->a_dvp; 1022 struct vnode *vp = ap->a_vp; 1023 struct namecache *ncp = ap->a_nch->ncp; 1024 struct tmpfs_dirent *de; 1025 struct tmpfs_node *node; 1026 struct tmpfs_node *dnode; 1027 int error; 1028 1029 KKASSERT(dvp != vp); /* XXX When can this be false? */ 1030 1031 node = VP_TO_TMPFS_NODE(vp); 1032 dnode = VP_TO_TMPFS_NODE(dvp); 1033 TMPFS_NODE_LOCK(dnode); 1034 1035 /* XXX: Why aren't the following two tests done by the caller? */ 1036 1037 /* Hard links of directories are forbidden. */ 1038 if (vp->v_type == VDIR) { 1039 error = EPERM; 1040 goto out; 1041 } 1042 1043 /* Cannot create cross-device links. */ 1044 if (dvp->v_mount != vp->v_mount) { 1045 error = EXDEV; 1046 goto out; 1047 } 1048 1049 /* Ensure that we do not overflow the maximum number of links imposed 1050 * by the system. */ 1051 KKASSERT(node->tn_links <= LINK_MAX); 1052 if (node->tn_links >= LINK_MAX) { 1053 error = EMLINK; 1054 goto out; 1055 } 1056 1057 /* We cannot create links of files marked immutable or append-only. */ 1058 if (node->tn_flags & (IMMUTABLE | APPEND)) { 1059 error = EPERM; 1060 goto out; 1061 } 1062 1063 /* Allocate a new directory entry to represent the node. */ 1064 error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node, 1065 ncp->nc_name, ncp->nc_nlen, &de); 1066 if (error != 0) 1067 goto out; 1068 1069 /* Insert the new directory entry into the appropriate directory. */ 1070 tmpfs_dir_attach(dnode, de); 1071 1072 /* vp link count has changed, so update node times. */ 1073 1074 TMPFS_NODE_LOCK(node); 1075 node->tn_status |= TMPFS_NODE_CHANGED; 1076 TMPFS_NODE_UNLOCK(node); 1077 tmpfs_update(vp); 1078 1079 tmpfs_knote(vp, NOTE_LINK); 1080 cache_setunresolved(ap->a_nch); 1081 cache_setvp(ap->a_nch, vp); 1082 error = 0; 1083 1084 out: 1085 TMPFS_NODE_UNLOCK(dnode); 1086 if (error == 0) 1087 tmpfs_knote(dvp, NOTE_WRITE); 1088 return error; 1089 } 1090 1091 /* --------------------------------------------------------------------- */ 1092 1093 static int 1094 tmpfs_nrename(struct vop_nrename_args *ap) 1095 { 1096 struct vnode *fdvp = ap->a_fdvp; 1097 struct namecache *fncp = ap->a_fnch->ncp; 1098 struct vnode *fvp = fncp->nc_vp; 1099 struct vnode *tdvp = ap->a_tdvp; 1100 struct namecache *tncp = ap->a_tnch->ncp; 1101 struct vnode *tvp; 1102 struct tmpfs_dirent *de, *tde; 1103 struct tmpfs_mount *tmp; 1104 struct tmpfs_node *fdnode; 1105 struct tmpfs_node *fnode; 1106 struct tmpfs_node *tnode; 1107 struct tmpfs_node *tdnode; 1108 char *newname; 1109 char *oldname; 1110 int error; 1111 1112 KKASSERT(fdvp->v_mount == fvp->v_mount); 1113 1114 /* 1115 * Because tvp can get overwritten we have to vget it instead of 1116 * just vref or use it, otherwise it's VINACTIVE flag may not get 1117 * cleared and the node won't get destroyed. 1118 */ 1119 error = cache_vget(ap->a_tnch, ap->a_cred, LK_SHARED, &tvp); 1120 if (error == 0) { 1121 tnode = VP_TO_TMPFS_NODE(tvp); 1122 vn_unlock(tvp); 1123 } else { 1124 tnode = NULL; 1125 } 1126 1127 /* Disallow cross-device renames. 1128 * XXX Why isn't this done by the caller? */ 1129 if (fvp->v_mount != tdvp->v_mount || 1130 (tvp != NULL && fvp->v_mount != tvp->v_mount)) { 1131 error = EXDEV; 1132 goto out; 1133 } 1134 1135 tmp = VFS_TO_TMPFS(tdvp->v_mount); 1136 tdnode = VP_TO_TMPFS_DIR(tdvp); 1137 1138 /* If source and target are the same file, there is nothing to do. */ 1139 if (fvp == tvp) { 1140 error = 0; 1141 goto out; 1142 } 1143 1144 fdnode = VP_TO_TMPFS_DIR(fdvp); 1145 fnode = VP_TO_TMPFS_NODE(fvp); 1146 TMPFS_NODE_LOCK(fdnode); 1147 de = tmpfs_dir_lookup(fdnode, fnode, fncp); 1148 TMPFS_NODE_UNLOCK(fdnode); /* XXX depend on namecache lock */ 1149 1150 /* Avoid manipulating '.' and '..' entries. */ 1151 if (de == NULL) { 1152 error = ENOENT; 1153 goto out_locked; 1154 } 1155 KKASSERT(de->td_node == fnode); 1156 1157 /* 1158 * If replacing an entry in the target directory and that entry 1159 * is a directory, it must be empty. 1160 * 1161 * Kern_rename gurantees the destination to be a directory 1162 * if the source is one (it does?). 1163 */ 1164 if (tvp != NULL) { 1165 KKASSERT(tnode != NULL); 1166 1167 if ((tnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) || 1168 (tdnode->tn_flags & (APPEND | IMMUTABLE))) { 1169 error = EPERM; 1170 goto out_locked; 1171 } 1172 1173 if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) { 1174 if (tnode->tn_size > 0) { 1175 error = ENOTEMPTY; 1176 goto out_locked; 1177 } 1178 } else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) { 1179 error = ENOTDIR; 1180 goto out_locked; 1181 } else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) { 1182 error = EISDIR; 1183 goto out_locked; 1184 } else { 1185 KKASSERT(fnode->tn_type != VDIR && 1186 tnode->tn_type != VDIR); 1187 } 1188 } 1189 1190 if ((fnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) || 1191 (fdnode->tn_flags & (APPEND | IMMUTABLE))) { 1192 error = EPERM; 1193 goto out_locked; 1194 } 1195 1196 /* 1197 * Ensure that we have enough memory to hold the new name, if it 1198 * has to be changed. 1199 */ 1200 if (fncp->nc_nlen != tncp->nc_nlen || 1201 bcmp(fncp->nc_name, tncp->nc_name, fncp->nc_nlen) != 0) { 1202 newname = kmalloc(tncp->nc_nlen + 1, tmp->tm_name_zone, 1203 M_WAITOK | M_NULLOK); 1204 if (newname == NULL) { 1205 error = ENOSPC; 1206 goto out_locked; 1207 } 1208 bcopy(tncp->nc_name, newname, tncp->nc_nlen); 1209 newname[tncp->nc_nlen] = '\0'; 1210 } else { 1211 newname = NULL; 1212 } 1213 1214 /* 1215 * Unlink entry from source directory. Note that the kernel has 1216 * already checked for illegal recursion cases (renaming a directory 1217 * into a subdirectory of itself). 1218 */ 1219 if (fdnode != tdnode) { 1220 tmpfs_dir_detach(fdnode, de); 1221 } else { 1222 /* XXX depend on namecache lock */ 1223 TMPFS_NODE_LOCK(fdnode); 1224 KKASSERT(de == tmpfs_dir_lookup(fdnode, fnode, fncp)); 1225 RB_REMOVE(tmpfs_dirtree, &fdnode->tn_dir.tn_dirtree, de); 1226 RB_REMOVE(tmpfs_dirtree_cookie, 1227 &fdnode->tn_dir.tn_cookietree, de); 1228 TMPFS_NODE_UNLOCK(fdnode); 1229 } 1230 1231 /* 1232 * Handle any name change. Swap with newname, we will 1233 * deallocate it at the end. 1234 */ 1235 if (newname != NULL) { 1236 #if 0 1237 TMPFS_NODE_LOCK(fnode); 1238 fnode->tn_status |= TMPFS_NODE_CHANGED; 1239 TMPFS_NODE_UNLOCK(fnode); 1240 #endif 1241 oldname = de->td_name; 1242 de->td_name = newname; 1243 de->td_namelen = (uint16_t)tncp->nc_nlen; 1244 newname = oldname; 1245 } 1246 1247 /* 1248 * If we are overwriting an entry, we have to remove the old one 1249 * from the target directory. 1250 */ 1251 if (tvp != NULL) { 1252 /* Remove the old entry from the target directory. */ 1253 TMPFS_NODE_LOCK(tdnode); 1254 tde = tmpfs_dir_lookup(tdnode, tnode, tncp); 1255 tmpfs_dir_detach(tdnode, tde); 1256 TMPFS_NODE_UNLOCK(tdnode); 1257 tmpfs_knote(tdnode->tn_vnode, NOTE_DELETE); 1258 1259 /* 1260 * Free the directory entry we just deleted. Note that the 1261 * node referred by it will not be removed until the vnode is 1262 * really reclaimed. 1263 */ 1264 tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), tde); 1265 /*cache_inval_vp(tvp, CINV_DESTROY);*/ 1266 } 1267 1268 /* 1269 * Link entry to target directory. If the entry 1270 * represents a directory move the parent linkage 1271 * as well. 1272 */ 1273 if (fdnode != tdnode) { 1274 if (de->td_node->tn_type == VDIR) { 1275 TMPFS_VALIDATE_DIR(fnode); 1276 } 1277 tmpfs_dir_attach(tdnode, de); 1278 } else { 1279 TMPFS_NODE_LOCK(tdnode); 1280 tdnode->tn_status |= TMPFS_NODE_MODIFIED; 1281 RB_INSERT(tmpfs_dirtree, &tdnode->tn_dir.tn_dirtree, de); 1282 RB_INSERT(tmpfs_dirtree_cookie, 1283 &tdnode->tn_dir.tn_cookietree, de); 1284 TMPFS_NODE_UNLOCK(tdnode); 1285 } 1286 1287 /* 1288 * Finish up 1289 */ 1290 if (newname) { 1291 kfree(newname, tmp->tm_name_zone); 1292 newname = NULL; 1293 } 1294 cache_rename(ap->a_fnch, ap->a_tnch); 1295 tmpfs_knote(ap->a_fdvp, NOTE_WRITE); 1296 tmpfs_knote(ap->a_tdvp, NOTE_WRITE); 1297 if (fnode->tn_vnode) 1298 tmpfs_knote(fnode->tn_vnode, NOTE_RENAME); 1299 error = 0; 1300 1301 out_locked: 1302 ; 1303 out: 1304 if (tvp) 1305 vrele(tvp); 1306 return error; 1307 } 1308 1309 /* --------------------------------------------------------------------- */ 1310 1311 static int 1312 tmpfs_nmkdir(struct vop_nmkdir_args *ap) 1313 { 1314 struct vnode *dvp = ap->a_dvp; 1315 struct vnode **vpp = ap->a_vpp; 1316 struct namecache *ncp = ap->a_nch->ncp; 1317 struct vattr *vap = ap->a_vap; 1318 struct ucred *cred = ap->a_cred; 1319 int error; 1320 1321 KKASSERT(vap->va_type == VDIR); 1322 1323 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL); 1324 if (error == 0) { 1325 cache_setunresolved(ap->a_nch); 1326 cache_setvp(ap->a_nch, *vpp); 1327 tmpfs_knote(dvp, NOTE_WRITE | NOTE_LINK); 1328 } 1329 return error; 1330 } 1331 1332 /* --------------------------------------------------------------------- */ 1333 1334 static int 1335 tmpfs_nrmdir(struct vop_nrmdir_args *ap) 1336 { 1337 struct vnode *dvp = ap->a_dvp; 1338 struct namecache *ncp = ap->a_nch->ncp; 1339 struct vnode *vp; 1340 struct tmpfs_dirent *de; 1341 struct tmpfs_mount *tmp; 1342 struct tmpfs_node *dnode; 1343 struct tmpfs_node *node; 1344 int error; 1345 1346 /* 1347 * We have to acquire the vp from ap->a_nch because we will likely 1348 * unresolve the namecache entry, and a vrele/vput is needed to 1349 * trigger the tmpfs_inactive/tmpfs_reclaim sequence. 1350 * 1351 * We have to use vget to clear any inactive state on the vnode, 1352 * otherwise the vnode may remain inactive and thus tmpfs_inactive 1353 * will not get called when we release it. 1354 */ 1355 error = cache_vget(ap->a_nch, ap->a_cred, LK_SHARED, &vp); 1356 KKASSERT(error == 0); 1357 vn_unlock(vp); 1358 1359 /* 1360 * Prevalidate so we don't hit an assertion later 1361 */ 1362 if (vp->v_type != VDIR) { 1363 error = ENOTDIR; 1364 goto out; 1365 } 1366 1367 tmp = VFS_TO_TMPFS(dvp->v_mount); 1368 dnode = VP_TO_TMPFS_DIR(dvp); 1369 node = VP_TO_TMPFS_DIR(vp); 1370 1371 /* 1372 * Directories with more than two entries ('.' and '..') cannot 1373 * be removed. 1374 */ 1375 if (node->tn_size > 0) { 1376 error = ENOTEMPTY; 1377 goto out; 1378 } 1379 1380 if ((dnode->tn_flags & APPEND) 1381 || (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))) { 1382 error = EPERM; 1383 goto out; 1384 } 1385 1386 /* 1387 * This invariant holds only if we are not trying to 1388 * remove "..". We checked for that above so this is safe now. 1389 */ 1390 KKASSERT(node->tn_dir.tn_parent == dnode); 1391 1392 /* 1393 * Get the directory entry associated with node (vp). This 1394 * was filled by tmpfs_lookup while looking up the entry. 1395 */ 1396 TMPFS_NODE_LOCK(dnode); 1397 de = tmpfs_dir_lookup(dnode, node, ncp); 1398 KKASSERT(TMPFS_DIRENT_MATCHES(de, ncp->nc_name, ncp->nc_nlen)); 1399 1400 /* Check flags to see if we are allowed to remove the directory. */ 1401 if ((dnode->tn_flags & APPEND) || 1402 node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) { 1403 error = EPERM; 1404 TMPFS_NODE_UNLOCK(dnode); 1405 goto out; 1406 } 1407 1408 /* Detach the directory entry from the directory (dnode). */ 1409 tmpfs_dir_detach(dnode, de); 1410 TMPFS_NODE_UNLOCK(dnode); 1411 1412 /* No vnode should be allocated for this entry from this point */ 1413 TMPFS_NODE_LOCK(dnode); 1414 TMPFS_ASSERT_ELOCKED(dnode); 1415 TMPFS_NODE_LOCK(node); 1416 TMPFS_ASSERT_ELOCKED(node); 1417 1418 /* 1419 * Must set parent linkage to NULL (tested by ncreate to disallow 1420 * the creation of new files/dirs in a deleted directory) 1421 */ 1422 node->tn_status |= TMPFS_NODE_CHANGED; 1423 1424 dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | 1425 TMPFS_NODE_MODIFIED; 1426 1427 TMPFS_NODE_UNLOCK(node); 1428 TMPFS_NODE_UNLOCK(dnode); 1429 1430 /* Free the directory entry we just deleted. Note that the node 1431 * referred by it will not be removed until the vnode is really 1432 * reclaimed. */ 1433 tmpfs_free_dirent(tmp, de); 1434 1435 /* Release the deleted vnode (will destroy the node, notify 1436 * interested parties and clean it from the cache). */ 1437 1438 TMPFS_NODE_LOCK(dnode); 1439 dnode->tn_status |= TMPFS_NODE_CHANGED; 1440 TMPFS_NODE_UNLOCK(dnode); 1441 tmpfs_update(dvp); 1442 1443 cache_unlink(ap->a_nch); 1444 tmpfs_knote(dvp, NOTE_WRITE | NOTE_LINK); 1445 error = 0; 1446 1447 out: 1448 vrele(vp); 1449 1450 return error; 1451 } 1452 1453 /* --------------------------------------------------------------------- */ 1454 1455 static int 1456 tmpfs_nsymlink(struct vop_nsymlink_args *ap) 1457 { 1458 struct vnode *dvp = ap->a_dvp; 1459 struct vnode **vpp = ap->a_vpp; 1460 struct namecache *ncp = ap->a_nch->ncp; 1461 struct vattr *vap = ap->a_vap; 1462 struct ucred *cred = ap->a_cred; 1463 char *target = ap->a_target; 1464 int error; 1465 1466 vap->va_type = VLNK; 1467 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, target); 1468 if (error == 0) { 1469 tmpfs_knote(*vpp, NOTE_WRITE); 1470 cache_setunresolved(ap->a_nch); 1471 cache_setvp(ap->a_nch, *vpp); 1472 } 1473 return error; 1474 } 1475 1476 /* --------------------------------------------------------------------- */ 1477 1478 static int 1479 tmpfs_readdir(struct vop_readdir_args *ap) 1480 { 1481 struct vnode *vp = ap->a_vp; 1482 struct uio *uio = ap->a_uio; 1483 int *eofflag = ap->a_eofflag; 1484 off_t **cookies = ap->a_cookies; 1485 int *ncookies = ap->a_ncookies; 1486 struct tmpfs_mount *tmp; 1487 int error; 1488 off_t startoff; 1489 off_t cnt = 0; 1490 struct tmpfs_node *node; 1491 1492 /* This operation only makes sense on directory nodes. */ 1493 if (vp->v_type != VDIR) { 1494 return ENOTDIR; 1495 } 1496 1497 tmp = VFS_TO_TMPFS(vp->v_mount); 1498 node = VP_TO_TMPFS_DIR(vp); 1499 startoff = uio->uio_offset; 1500 1501 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOT) { 1502 error = tmpfs_dir_getdotdent(node, uio); 1503 if (error != 0) { 1504 TMPFS_NODE_LOCK_SH(node); 1505 goto outok; 1506 } 1507 cnt++; 1508 } 1509 1510 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT) { 1511 /* may lock parent, cannot hold node lock */ 1512 error = tmpfs_dir_getdotdotdent(tmp, node, uio); 1513 if (error != 0) { 1514 TMPFS_NODE_LOCK_SH(node); 1515 goto outok; 1516 } 1517 cnt++; 1518 } 1519 1520 TMPFS_NODE_LOCK_SH(node); 1521 error = tmpfs_dir_getdents(node, uio, &cnt); 1522 1523 outok: 1524 KKASSERT(error >= -1); 1525 1526 if (error == -1) 1527 error = 0; 1528 1529 if (eofflag != NULL) 1530 *eofflag = 1531 (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF); 1532 1533 /* Update NFS-related variables. */ 1534 if (error == 0 && cookies != NULL && ncookies != NULL) { 1535 off_t i; 1536 off_t off = startoff; 1537 struct tmpfs_dirent *de = NULL; 1538 1539 *ncookies = cnt; 1540 *cookies = kmalloc(cnt * sizeof(off_t), M_TEMP, M_WAITOK); 1541 1542 for (i = 0; i < cnt; i++) { 1543 KKASSERT(off != TMPFS_DIRCOOKIE_EOF); 1544 if (off == TMPFS_DIRCOOKIE_DOT) { 1545 off = TMPFS_DIRCOOKIE_DOTDOT; 1546 } else { 1547 if (off == TMPFS_DIRCOOKIE_DOTDOT) { 1548 de = RB_MIN(tmpfs_dirtree_cookie, 1549 &node->tn_dir.tn_cookietree); 1550 } else if (de != NULL) { 1551 de = RB_NEXT(tmpfs_dirtree_cookie, 1552 &node->tn_dir.tn_cookietree, de); 1553 } else { 1554 de = tmpfs_dir_lookupbycookie(node, 1555 off); 1556 KKASSERT(de != NULL); 1557 de = RB_NEXT(tmpfs_dirtree_cookie, 1558 &node->tn_dir.tn_cookietree, de); 1559 } 1560 if (de == NULL) 1561 off = TMPFS_DIRCOOKIE_EOF; 1562 else 1563 off = tmpfs_dircookie(de); 1564 } 1565 (*cookies)[i] = off; 1566 } 1567 KKASSERT(uio->uio_offset == off); 1568 } 1569 TMPFS_NODE_UNLOCK(node); 1570 1571 if ((node->tn_status & TMPFS_NODE_ACCESSED) == 0) { 1572 TMPFS_NODE_LOCK(node); 1573 node->tn_status |= TMPFS_NODE_ACCESSED; 1574 TMPFS_NODE_UNLOCK(node); 1575 } 1576 return error; 1577 } 1578 1579 /* --------------------------------------------------------------------- */ 1580 1581 static int 1582 tmpfs_readlink(struct vop_readlink_args *ap) 1583 { 1584 struct vnode *vp = ap->a_vp; 1585 struct uio *uio = ap->a_uio; 1586 int error; 1587 struct tmpfs_node *node; 1588 1589 KKASSERT(uio->uio_offset == 0); 1590 KKASSERT(vp->v_type == VLNK); 1591 1592 node = VP_TO_TMPFS_NODE(vp); 1593 TMPFS_NODE_LOCK_SH(node); 1594 error = uiomove(node->tn_link, 1595 MIN(node->tn_size, uio->uio_resid), uio); 1596 TMPFS_NODE_UNLOCK(node); 1597 if ((node->tn_status & TMPFS_NODE_ACCESSED) == 0) { 1598 TMPFS_NODE_LOCK(node); 1599 node->tn_status |= TMPFS_NODE_ACCESSED; 1600 TMPFS_NODE_UNLOCK(node); 1601 } 1602 return error; 1603 } 1604 1605 /* --------------------------------------------------------------------- */ 1606 1607 static int 1608 tmpfs_inactive(struct vop_inactive_args *ap) 1609 { 1610 struct vnode *vp = ap->a_vp; 1611 struct tmpfs_node *node; 1612 struct mount *mp; 1613 1614 mp = vp->v_mount; 1615 lwkt_gettoken(&mp->mnt_token); 1616 node = VP_TO_TMPFS_NODE(vp); 1617 1618 /* 1619 * Degenerate case 1620 */ 1621 if (node == NULL) { 1622 vrecycle(vp); 1623 lwkt_reltoken(&mp->mnt_token); 1624 return(0); 1625 } 1626 1627 /* 1628 * Get rid of unreferenced deleted vnodes sooner rather than 1629 * later so the data memory can be recovered immediately. 1630 * 1631 * We must truncate the vnode to prevent the normal reclamation 1632 * path from flushing the data for the removed file to disk. 1633 */ 1634 TMPFS_NODE_LOCK(node); 1635 if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 && 1636 node->tn_links == 0) 1637 { 1638 node->tn_vpstate = TMPFS_VNODE_DOOMED; 1639 TMPFS_NODE_UNLOCK(node); 1640 if (node->tn_type == VREG) 1641 tmpfs_truncate(vp, 0); 1642 vrecycle(vp); 1643 } else { 1644 /* 1645 * We must retain any VM pages belonging to the vnode's 1646 * object as the vnode will destroy the object during a 1647 * later reclaim. We call vinvalbuf(V_SAVE) to clean 1648 * out the buffer cache. 1649 * 1650 * On DragonFlyBSD, vnodes are not immediately deactivated 1651 * on the 1->0 refs, so this is a relatively optimal 1652 * operation. We have to do this in tmpfs_inactive() 1653 * because the pages will have already been thrown away 1654 * at the time tmpfs_reclaim() is called. 1655 */ 1656 if (node->tn_type == VREG && 1657 node->tn_reg.tn_pages_in_aobj == 0) { 1658 vinvalbuf(vp, V_SAVE, 0, 0); 1659 KKASSERT(RB_EMPTY(&vp->v_rbdirty_tree)); 1660 KKASSERT(RB_EMPTY(&vp->v_rbclean_tree)); 1661 tmpfs_move_pages(vp->v_object, node->tn_reg.tn_aobj); 1662 node->tn_reg.tn_pages_in_aobj = 1; 1663 } 1664 1665 TMPFS_NODE_UNLOCK(node); 1666 } 1667 lwkt_reltoken(&mp->mnt_token); 1668 1669 return 0; 1670 } 1671 1672 /* --------------------------------------------------------------------- */ 1673 1674 int 1675 tmpfs_reclaim(struct vop_reclaim_args *ap) 1676 { 1677 struct vnode *vp = ap->a_vp; 1678 struct tmpfs_mount *tmp; 1679 struct tmpfs_node *node; 1680 struct mount *mp; 1681 1682 mp = vp->v_mount; 1683 lwkt_gettoken(&mp->mnt_token); 1684 1685 node = VP_TO_TMPFS_NODE(vp); 1686 tmp = VFS_TO_TMPFS(vp->v_mount); 1687 KKASSERT(mp == tmp->tm_mount); 1688 1689 tmpfs_free_vp(vp); 1690 1691 /* 1692 * If the node referenced by this vnode was deleted by the 1693 * user, we must free its associated data structures now that 1694 * the vnode is being reclaimed. 1695 * 1696 * Directories have an extra link ref. 1697 */ 1698 TMPFS_NODE_LOCK(node); 1699 if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 && 1700 node->tn_links == 0) { 1701 node->tn_vpstate = TMPFS_VNODE_DOOMED; 1702 tmpfs_free_node(tmp, node); 1703 /* eats the lock */ 1704 } else { 1705 TMPFS_NODE_UNLOCK(node); 1706 } 1707 lwkt_reltoken(&mp->mnt_token); 1708 1709 KKASSERT(vp->v_data == NULL); 1710 return 0; 1711 } 1712 1713 /* --------------------------------------------------------------------- */ 1714 1715 static int 1716 tmpfs_mountctl(struct vop_mountctl_args *ap) 1717 { 1718 struct tmpfs_mount *tmp; 1719 struct mount *mp; 1720 int rc; 1721 1722 mp = ap->a_head.a_ops->head.vv_mount; 1723 lwkt_gettoken(&mp->mnt_token); 1724 1725 switch (ap->a_op) { 1726 case (MOUNTCTL_SET_EXPORT): 1727 tmp = (struct tmpfs_mount *) mp->mnt_data; 1728 1729 if (ap->a_ctllen != sizeof(struct export_args)) 1730 rc = (EINVAL); 1731 else 1732 rc = vfs_export(mp, &tmp->tm_export, 1733 (const struct export_args *) ap->a_ctl); 1734 break; 1735 default: 1736 rc = vop_stdmountctl(ap); 1737 break; 1738 } 1739 1740 lwkt_reltoken(&mp->mnt_token); 1741 return (rc); 1742 } 1743 1744 /* --------------------------------------------------------------------- */ 1745 1746 static int 1747 tmpfs_print(struct vop_print_args *ap) 1748 { 1749 struct vnode *vp = ap->a_vp; 1750 1751 struct tmpfs_node *node; 1752 1753 node = VP_TO_TMPFS_NODE(vp); 1754 1755 kprintf("tag VT_TMPFS, tmpfs_node %p, flags 0x%x, links %d\n", 1756 node, node->tn_flags, node->tn_links); 1757 kprintf("\tmode 0%o, owner %d, group %d, size %ju, status 0x%x\n", 1758 node->tn_mode, node->tn_uid, node->tn_gid, 1759 (uintmax_t)node->tn_size, node->tn_status); 1760 1761 if (vp->v_type == VFIFO) 1762 fifo_printinfo(vp); 1763 1764 kprintf("\n"); 1765 1766 return 0; 1767 } 1768 1769 /* --------------------------------------------------------------------- */ 1770 1771 static int 1772 tmpfs_pathconf(struct vop_pathconf_args *ap) 1773 { 1774 struct vnode *vp = ap->a_vp; 1775 int name = ap->a_name; 1776 register_t *retval = ap->a_retval; 1777 struct tmpfs_mount *tmp; 1778 int error; 1779 1780 error = 0; 1781 1782 switch (name) { 1783 case _PC_CHOWN_RESTRICTED: 1784 *retval = 1; 1785 break; 1786 1787 case _PC_FILESIZEBITS: 1788 tmp = VFS_TO_TMPFS(vp->v_mount); 1789 *retval = max(32, flsll(tmp->tm_pages_max * PAGE_SIZE) + 1); 1790 break; 1791 1792 case _PC_LINK_MAX: 1793 *retval = LINK_MAX; 1794 break; 1795 1796 case _PC_NAME_MAX: 1797 *retval = NAME_MAX; 1798 break; 1799 1800 case _PC_NO_TRUNC: 1801 *retval = 1; 1802 break; 1803 1804 case _PC_PATH_MAX: 1805 *retval = PATH_MAX; 1806 break; 1807 1808 case _PC_PIPE_BUF: 1809 *retval = PIPE_BUF; 1810 break; 1811 1812 case _PC_SYNC_IO: 1813 *retval = 1; 1814 break; 1815 1816 case _PC_2_SYMLINKS: 1817 *retval = 1; 1818 break; 1819 1820 default: 1821 error = EINVAL; 1822 } 1823 1824 return error; 1825 } 1826 1827 /************************************************************************ 1828 * KQFILTER OPS * 1829 ************************************************************************/ 1830 1831 static void filt_tmpfsdetach(struct knote *kn); 1832 static int filt_tmpfsread(struct knote *kn, long hint); 1833 static int filt_tmpfswrite(struct knote *kn, long hint); 1834 static int filt_tmpfsvnode(struct knote *kn, long hint); 1835 1836 static struct filterops tmpfsread_filtops = 1837 { FILTEROP_ISFD | FILTEROP_MPSAFE, 1838 NULL, filt_tmpfsdetach, filt_tmpfsread }; 1839 static struct filterops tmpfswrite_filtops = 1840 { FILTEROP_ISFD | FILTEROP_MPSAFE, 1841 NULL, filt_tmpfsdetach, filt_tmpfswrite }; 1842 static struct filterops tmpfsvnode_filtops = 1843 { FILTEROP_ISFD | FILTEROP_MPSAFE, 1844 NULL, filt_tmpfsdetach, filt_tmpfsvnode }; 1845 1846 static int 1847 tmpfs_kqfilter (struct vop_kqfilter_args *ap) 1848 { 1849 struct vnode *vp = ap->a_vp; 1850 struct knote *kn = ap->a_kn; 1851 1852 switch (kn->kn_filter) { 1853 case EVFILT_READ: 1854 kn->kn_fop = &tmpfsread_filtops; 1855 break; 1856 case EVFILT_WRITE: 1857 kn->kn_fop = &tmpfswrite_filtops; 1858 break; 1859 case EVFILT_VNODE: 1860 kn->kn_fop = &tmpfsvnode_filtops; 1861 break; 1862 default: 1863 return (EOPNOTSUPP); 1864 } 1865 1866 kn->kn_hook = (caddr_t)vp; 1867 1868 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 1869 1870 return(0); 1871 } 1872 1873 static void 1874 filt_tmpfsdetach(struct knote *kn) 1875 { 1876 struct vnode *vp = (void *)kn->kn_hook; 1877 1878 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 1879 } 1880 1881 static int 1882 filt_tmpfsread(struct knote *kn, long hint) 1883 { 1884 struct vnode *vp = (void *)kn->kn_hook; 1885 struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp); 1886 off_t off; 1887 1888 if (hint == NOTE_REVOKE) { 1889 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 1890 return(1); 1891 } 1892 1893 /* 1894 * Interlock against MP races when performing this function. 1895 */ 1896 TMPFS_NODE_LOCK_SH(node); 1897 off = node->tn_size - kn->kn_fp->f_offset; 1898 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX; 1899 if (kn->kn_sfflags & NOTE_OLDAPI) { 1900 TMPFS_NODE_UNLOCK(node); 1901 return(1); 1902 } 1903 if (kn->kn_data == 0) { 1904 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX; 1905 } 1906 TMPFS_NODE_UNLOCK(node); 1907 return (kn->kn_data != 0); 1908 } 1909 1910 static int 1911 filt_tmpfswrite(struct knote *kn, long hint) 1912 { 1913 if (hint == NOTE_REVOKE) 1914 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 1915 kn->kn_data = 0; 1916 return (1); 1917 } 1918 1919 static int 1920 filt_tmpfsvnode(struct knote *kn, long hint) 1921 { 1922 if (kn->kn_sfflags & hint) 1923 kn->kn_fflags |= hint; 1924 if (hint == NOTE_REVOKE) { 1925 kn->kn_flags |= (EV_EOF | EV_NODATA); 1926 return (1); 1927 } 1928 return (kn->kn_fflags != 0); 1929 } 1930 1931 /* 1932 * Helper to move VM pages between objects 1933 * 1934 * NOTE: The vm_page_rename() dirties the page, so we can clear the 1935 * PG_NEED_COMMIT flag. If the pages are being moved into tn_aobj, 1936 * the pageout daemon will be able to page them out. 1937 */ 1938 static int 1939 tmpfs_move_pages_callback(vm_page_t p, void *data) 1940 { 1941 struct rb_vm_page_scan_info *info = data; 1942 vm_pindex_t pindex; 1943 1944 pindex = p->pindex; 1945 if (vm_page_busy_try(p, TRUE)) { 1946 vm_page_sleep_busy(p, TRUE, "tpgmov"); 1947 info->error = -1; 1948 return -1; 1949 } 1950 if (p->object != info->object || p->pindex != pindex) { 1951 vm_page_wakeup(p); 1952 info->error = -1; 1953 return -1; 1954 } 1955 vm_page_rename(p, info->backing_object, pindex); 1956 vm_page_clear_commit(p); 1957 vm_page_wakeup(p); 1958 /* page automaticaly made dirty */ 1959 1960 return 0; 1961 } 1962 1963 static 1964 void 1965 tmpfs_move_pages(vm_object_t src, vm_object_t dst) 1966 { 1967 struct rb_vm_page_scan_info info; 1968 1969 vm_object_hold(src); 1970 vm_object_hold(dst); 1971 info.object = src; 1972 info.backing_object = dst; 1973 do { 1974 info.error = 1; 1975 vm_page_rb_tree_RB_SCAN(&src->rb_memq, NULL, 1976 tmpfs_move_pages_callback, &info); 1977 } while (info.error < 0); 1978 vm_object_drop(dst); 1979 vm_object_drop(src); 1980 } 1981 1982 /* --------------------------------------------------------------------- */ 1983 1984 /* 1985 * vnode operations vector used for files stored in a tmpfs file system. 1986 */ 1987 struct vop_ops tmpfs_vnode_vops = { 1988 .vop_default = vop_defaultop, 1989 .vop_getpages = vop_stdgetpages, 1990 .vop_putpages = vop_stdputpages, 1991 .vop_ncreate = tmpfs_ncreate, 1992 .vop_nresolve = tmpfs_nresolve, 1993 .vop_nlookupdotdot = tmpfs_nlookupdotdot, 1994 .vop_nmknod = tmpfs_nmknod, 1995 .vop_open = tmpfs_open, 1996 .vop_close = tmpfs_close, 1997 .vop_access = tmpfs_access, 1998 .vop_getattr = tmpfs_getattr, 1999 .vop_setattr = tmpfs_setattr, 2000 .vop_read = tmpfs_read, 2001 .vop_write = tmpfs_write, 2002 .vop_fsync = tmpfs_fsync, 2003 .vop_mountctl = tmpfs_mountctl, 2004 .vop_nremove = tmpfs_nremove, 2005 .vop_nlink = tmpfs_nlink, 2006 .vop_nrename = tmpfs_nrename, 2007 .vop_nmkdir = tmpfs_nmkdir, 2008 .vop_nrmdir = tmpfs_nrmdir, 2009 .vop_nsymlink = tmpfs_nsymlink, 2010 .vop_readdir = tmpfs_readdir, 2011 .vop_readlink = tmpfs_readlink, 2012 .vop_inactive = tmpfs_inactive, 2013 .vop_reclaim = tmpfs_reclaim, 2014 .vop_print = tmpfs_print, 2015 .vop_pathconf = tmpfs_pathconf, 2016 .vop_bmap = tmpfs_bmap, 2017 .vop_strategy = tmpfs_strategy, 2018 .vop_advlock = tmpfs_advlock, 2019 .vop_kqfilter = tmpfs_kqfilter 2020 }; 2021