1 /*- 2 * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code 7 * 2005 program. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 * 30 * $NetBSD: tmpfs_vnops.c,v 1.39 2007/07/23 15:41:01 jmmv Exp $ 31 */ 32 33 /* 34 * tmpfs vnode interface. 35 */ 36 37 #include <sys/kernel.h> 38 #include <sys/kern_syscall.h> 39 #include <sys/param.h> 40 #include <sys/uio.h> 41 #include <sys/fcntl.h> 42 #include <sys/lockf.h> 43 #include <sys/priv.h> 44 #include <sys/proc.h> 45 #include <sys/resourcevar.h> 46 #include <sys/sched.h> 47 #include <sys/stat.h> 48 #include <sys/systm.h> 49 #include <sys/sysctl.h> 50 #include <sys/unistd.h> 51 #include <sys/vfsops.h> 52 #include <sys/vnode.h> 53 #include <sys/mountctl.h> 54 55 #include <vm/vm.h> 56 #include <vm/vm_extern.h> 57 #include <vm/vm_object.h> 58 #include <vm/vm_page.h> 59 #include <vm/vm_pageout.h> 60 #include <vm/vm_pager.h> 61 #include <vm/swap_pager.h> 62 63 #include <sys/buf2.h> 64 #include <vm/vm_page2.h> 65 66 #include <vfs/fifofs/fifo.h> 67 #include <vfs/tmpfs/tmpfs_vnops.h> 68 #include "tmpfs.h" 69 70 static void tmpfs_strategy_done(struct bio *bio); 71 static void tmpfs_move_pages(vm_object_t src, vm_object_t dst); 72 73 static int tmpfs_cluster_enable = 1; 74 SYSCTL_NODE(_vfs, OID_AUTO, tmpfs, CTLFLAG_RW, 0, "TMPFS filesystem"); 75 SYSCTL_INT(_vfs_tmpfs, OID_AUTO, cluster_enable, CTLFLAG_RW, 76 &tmpfs_cluster_enable, 0, ""); 77 78 static __inline 79 void 80 tmpfs_knote(struct vnode *vp, int flags) 81 { 82 if (flags) 83 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags); 84 } 85 86 87 /* --------------------------------------------------------------------- */ 88 89 static int 90 tmpfs_nresolve(struct vop_nresolve_args *ap) 91 { 92 struct vnode *dvp = ap->a_dvp; 93 struct vnode *vp = NULL; 94 struct namecache *ncp = ap->a_nch->ncp; 95 struct tmpfs_node *tnode; 96 struct tmpfs_dirent *de; 97 struct tmpfs_node *dnode; 98 int error; 99 100 dnode = VP_TO_TMPFS_DIR(dvp); 101 102 TMPFS_NODE_LOCK_SH(dnode); 103 loop: 104 de = tmpfs_dir_lookup(dnode, NULL, ncp); 105 if (de == NULL) { 106 error = ENOENT; 107 } else { 108 /* 109 * Allocate a vnode for the node we found. Use 110 * tmpfs_alloc_vp()'s deadlock handling mode. 111 */ 112 tnode = de->td_node; 113 error = tmpfs_alloc_vp(dvp->v_mount, dnode, tnode, 114 LK_EXCLUSIVE | LK_RETRY, &vp); 115 if (error == EAGAIN) 116 goto loop; 117 if (error) 118 goto out; 119 KKASSERT(vp); 120 } 121 122 out: 123 TMPFS_NODE_UNLOCK(dnode); 124 125 if ((dnode->tn_status & TMPFS_NODE_ACCESSED) == 0) { 126 TMPFS_NODE_LOCK(dnode); 127 dnode->tn_status |= TMPFS_NODE_ACCESSED; 128 TMPFS_NODE_UNLOCK(dnode); 129 } 130 131 /* 132 * Store the result of this lookup in the cache. Avoid this if the 133 * request was for creation, as it does not improve timings on 134 * emprical tests. 135 */ 136 if (vp) { 137 vn_unlock(vp); 138 cache_setvp(ap->a_nch, vp); 139 vrele(vp); 140 } else if (error == ENOENT) { 141 cache_setvp(ap->a_nch, NULL); 142 } 143 return (error); 144 } 145 146 static int 147 tmpfs_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 148 { 149 struct vnode *dvp = ap->a_dvp; 150 struct vnode **vpp = ap->a_vpp; 151 struct tmpfs_node *dnode = VP_TO_TMPFS_NODE(dvp); 152 struct ucred *cred = ap->a_cred; 153 int error; 154 155 *vpp = NULL; 156 157 /* Check accessibility of requested node as a first step. */ 158 error = VOP_ACCESS(dvp, VEXEC, cred); 159 if (error != 0) 160 return error; 161 162 if (dnode->tn_dir.tn_parent != NULL) { 163 /* Allocate a new vnode on the matching entry. */ 164 error = tmpfs_alloc_vp(dvp->v_mount, 165 NULL, dnode->tn_dir.tn_parent, 166 LK_EXCLUSIVE | LK_RETRY, vpp); 167 168 if (*vpp) 169 vn_unlock(*vpp); 170 } 171 return (*vpp == NULL) ? ENOENT : 0; 172 } 173 174 /* --------------------------------------------------------------------- */ 175 176 static int 177 tmpfs_ncreate(struct vop_ncreate_args *ap) 178 { 179 struct vnode *dvp = ap->a_dvp; 180 struct vnode **vpp = ap->a_vpp; 181 struct namecache *ncp = ap->a_nch->ncp; 182 struct vattr *vap = ap->a_vap; 183 struct ucred *cred = ap->a_cred; 184 int error; 185 186 KKASSERT(vap->va_type == VREG || vap->va_type == VSOCK); 187 188 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL); 189 if (error == 0) { 190 cache_setunresolved(ap->a_nch); 191 cache_setvp(ap->a_nch, *vpp); 192 tmpfs_knote(dvp, NOTE_WRITE); 193 } 194 return (error); 195 } 196 /* --------------------------------------------------------------------- */ 197 198 static int 199 tmpfs_nmknod(struct vop_nmknod_args *ap) 200 { 201 struct vnode *dvp = ap->a_dvp; 202 struct vnode **vpp = ap->a_vpp; 203 struct namecache *ncp = ap->a_nch->ncp; 204 struct vattr *vap = ap->a_vap; 205 struct ucred *cred = ap->a_cred; 206 int error; 207 208 if (vap->va_type != VBLK && vap->va_type != VCHR && 209 vap->va_type != VFIFO) { 210 return (EINVAL); 211 } 212 213 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL); 214 if (error == 0) { 215 cache_setunresolved(ap->a_nch); 216 cache_setvp(ap->a_nch, *vpp); 217 tmpfs_knote(dvp, NOTE_WRITE); 218 } 219 return error; 220 } 221 222 /* --------------------------------------------------------------------- */ 223 224 static int 225 tmpfs_open(struct vop_open_args *ap) 226 { 227 struct vnode *vp = ap->a_vp; 228 int mode = ap->a_mode; 229 struct tmpfs_node *node; 230 int error; 231 232 node = VP_TO_TMPFS_NODE(vp); 233 234 #if 0 235 /* The file is still active but all its names have been removed 236 * (e.g. by a "rmdir $(pwd)"). It cannot be opened any more as 237 * it is about to die. */ 238 if (node->tn_links < 1) 239 return (ENOENT); 240 #endif 241 242 /* If the file is marked append-only, deny write requests. */ 243 if ((node->tn_flags & APPEND) && 244 (mode & (FWRITE | O_APPEND)) == FWRITE) { 245 error = EPERM; 246 } else { 247 if (node->tn_reg.tn_pages_in_aobj) { 248 TMPFS_NODE_LOCK(node); 249 if (node->tn_reg.tn_pages_in_aobj) { 250 tmpfs_move_pages(node->tn_reg.tn_aobj, 251 vp->v_object); 252 node->tn_reg.tn_pages_in_aobj = 0; 253 } 254 TMPFS_NODE_UNLOCK(node); 255 } 256 error = vop_stdopen(ap); 257 } 258 259 return (error); 260 } 261 262 /* --------------------------------------------------------------------- */ 263 264 static int 265 tmpfs_close(struct vop_close_args *ap) 266 { 267 struct vnode *vp = ap->a_vp; 268 struct tmpfs_node *node; 269 int error; 270 271 node = VP_TO_TMPFS_NODE(vp); 272 273 if (node->tn_links > 0) { 274 /* 275 * Update node times. No need to do it if the node has 276 * been deleted, because it will vanish after we return. 277 */ 278 tmpfs_update(vp); 279 } 280 281 error = vop_stdclose(ap); 282 283 return (error); 284 } 285 286 /* --------------------------------------------------------------------- */ 287 288 int 289 tmpfs_access(struct vop_access_args *ap) 290 { 291 struct vnode *vp = ap->a_vp; 292 int error; 293 struct tmpfs_node *node; 294 295 node = VP_TO_TMPFS_NODE(vp); 296 297 switch (vp->v_type) { 298 case VDIR: 299 /* FALLTHROUGH */ 300 case VLNK: 301 /* FALLTHROUGH */ 302 case VREG: 303 if ((ap->a_mode & VWRITE) && 304 (vp->v_mount->mnt_flag & MNT_RDONLY)) { 305 error = EROFS; 306 goto out; 307 } 308 break; 309 310 case VBLK: 311 /* FALLTHROUGH */ 312 case VCHR: 313 /* FALLTHROUGH */ 314 case VSOCK: 315 /* FALLTHROUGH */ 316 case VFIFO: 317 break; 318 319 default: 320 error = EINVAL; 321 goto out; 322 } 323 324 if ((ap->a_mode & VWRITE) && (node->tn_flags & IMMUTABLE)) { 325 error = EPERM; 326 goto out; 327 } 328 329 error = vop_helper_access(ap, node->tn_uid, node->tn_gid, 330 node->tn_mode, 0); 331 out: 332 return error; 333 } 334 335 /* --------------------------------------------------------------------- */ 336 337 int 338 tmpfs_getattr(struct vop_getattr_args *ap) 339 { 340 struct vnode *vp = ap->a_vp; 341 struct vattr *vap = ap->a_vap; 342 struct tmpfs_node *node; 343 344 node = VP_TO_TMPFS_NODE(vp); 345 346 tmpfs_update(vp); 347 348 TMPFS_NODE_LOCK_SH(node); 349 vap->va_type = vp->v_type; 350 vap->va_mode = node->tn_mode; 351 vap->va_nlink = node->tn_links; 352 vap->va_uid = node->tn_uid; 353 vap->va_gid = node->tn_gid; 354 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 355 vap->va_fileid = node->tn_id; 356 vap->va_size = node->tn_size; 357 vap->va_blocksize = PAGE_SIZE; 358 vap->va_atime.tv_sec = node->tn_atime; 359 vap->va_atime.tv_nsec = node->tn_atimensec; 360 vap->va_mtime.tv_sec = node->tn_mtime; 361 vap->va_mtime.tv_nsec = node->tn_mtimensec; 362 vap->va_ctime.tv_sec = node->tn_ctime; 363 vap->va_ctime.tv_nsec = node->tn_ctimensec; 364 vap->va_gen = node->tn_gen; 365 vap->va_flags = node->tn_flags; 366 if (vp->v_type == VBLK || vp->v_type == VCHR) { 367 vap->va_rmajor = umajor(node->tn_rdev); 368 vap->va_rminor = uminor(node->tn_rdev); 369 } 370 vap->va_bytes = round_page(node->tn_size); 371 vap->va_filerev = 0; 372 TMPFS_NODE_UNLOCK(node); 373 374 return 0; 375 } 376 377 /* --------------------------------------------------------------------- */ 378 379 int 380 tmpfs_setattr(struct vop_setattr_args *ap) 381 { 382 struct vnode *vp = ap->a_vp; 383 struct vattr *vap = ap->a_vap; 384 struct ucred *cred = ap->a_cred; 385 struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp); 386 int error = 0; 387 int kflags = 0; 388 389 TMPFS_NODE_LOCK(node); 390 if (error == 0 && (vap->va_flags != VNOVAL)) { 391 error = tmpfs_chflags(vp, vap->va_flags, cred); 392 kflags |= NOTE_ATTRIB; 393 } 394 395 if (error == 0 && (vap->va_size != VNOVAL)) { 396 /* restore any saved pages before proceeding */ 397 if (node->tn_reg.tn_pages_in_aobj) { 398 tmpfs_move_pages(node->tn_reg.tn_aobj, vp->v_object); 399 node->tn_reg.tn_pages_in_aobj = 0; 400 } 401 if (vap->va_size > node->tn_size) 402 kflags |= NOTE_WRITE | NOTE_EXTEND; 403 else 404 kflags |= NOTE_WRITE; 405 error = tmpfs_chsize(vp, vap->va_size, cred); 406 } 407 408 if (error == 0 && (vap->va_uid != (uid_t)VNOVAL || 409 vap->va_gid != (gid_t)VNOVAL)) { 410 error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred); 411 kflags |= NOTE_ATTRIB; 412 } 413 414 if (error == 0 && (vap->va_mode != (mode_t)VNOVAL)) { 415 error = tmpfs_chmod(vp, vap->va_mode, cred); 416 kflags |= NOTE_ATTRIB; 417 } 418 419 if (error == 0 && ((vap->va_atime.tv_sec != VNOVAL && 420 vap->va_atime.tv_nsec != VNOVAL) || 421 (vap->va_mtime.tv_sec != VNOVAL && 422 vap->va_mtime.tv_nsec != VNOVAL) )) { 423 error = tmpfs_chtimes(vp, &vap->va_atime, &vap->va_mtime, 424 vap->va_vaflags, cred); 425 kflags |= NOTE_ATTRIB; 426 } 427 428 /* 429 * Update the node times. We give preference to the error codes 430 * generated by this function rather than the ones that may arise 431 * from tmpfs_update. 432 */ 433 tmpfs_update(vp); 434 TMPFS_NODE_UNLOCK(node); 435 tmpfs_knote(vp, kflags); 436 437 return (error); 438 } 439 440 /* --------------------------------------------------------------------- */ 441 442 /* 443 * fsync is usually a NOP, but we must take action when unmounting or 444 * when recycling. 445 */ 446 static int 447 tmpfs_fsync(struct vop_fsync_args *ap) 448 { 449 struct tmpfs_node *node; 450 struct vnode *vp = ap->a_vp; 451 452 node = VP_TO_TMPFS_NODE(vp); 453 454 /* 455 * tmpfs vnodes typically remain dirty, avoid long syncer scans 456 * by forcing removal from the syncer list. 457 */ 458 vn_syncer_remove(vp, 1); 459 460 tmpfs_update(vp); 461 if (vp->v_type == VREG) { 462 if (vp->v_flag & VRECLAIMED) { 463 if (node->tn_links == 0) 464 tmpfs_truncate(vp, 0); 465 else 466 vfsync(ap->a_vp, ap->a_waitfor, 1, NULL, NULL); 467 } 468 } 469 470 return 0; 471 } 472 473 /* --------------------------------------------------------------------- */ 474 475 static int 476 tmpfs_read(struct vop_read_args *ap) 477 { 478 struct buf *bp; 479 struct vnode *vp = ap->a_vp; 480 struct uio *uio = ap->a_uio; 481 struct tmpfs_node *node; 482 off_t base_offset; 483 size_t offset; 484 size_t len; 485 size_t resid; 486 int error; 487 int seqcount; 488 489 /* 490 * Check the basics 491 */ 492 if (uio->uio_offset < 0) 493 return (EINVAL); 494 if (vp->v_type != VREG) 495 return (EINVAL); 496 497 /* 498 * Extract node, try to shortcut the operation through 499 * the VM page cache, allowing us to avoid buffer cache 500 * overheads. 501 */ 502 node = VP_TO_TMPFS_NODE(vp); 503 resid = uio->uio_resid; 504 seqcount = ap->a_ioflag >> IO_SEQSHIFT; 505 error = vop_helper_read_shortcut(ap); 506 if (error) 507 return error; 508 if (uio->uio_resid == 0) { 509 if (resid) 510 goto finished; 511 return error; 512 } 513 514 /* 515 * restore any saved pages before proceeding 516 */ 517 if (node->tn_reg.tn_pages_in_aobj) { 518 TMPFS_NODE_LOCK(node); 519 if (node->tn_reg.tn_pages_in_aobj) { 520 tmpfs_move_pages(node->tn_reg.tn_aobj, vp->v_object); 521 node->tn_reg.tn_pages_in_aobj = 0; 522 } 523 TMPFS_NODE_UNLOCK(node); 524 } 525 526 /* 527 * Fall-through to our normal read code. 528 */ 529 while (uio->uio_resid > 0 && uio->uio_offset < node->tn_size) { 530 /* 531 * Use buffer cache I/O (via tmpfs_strategy) 532 */ 533 offset = (size_t)uio->uio_offset & TMPFS_BLKMASK64; 534 base_offset = (off_t)uio->uio_offset - offset; 535 bp = getcacheblk(vp, base_offset, TMPFS_BLKSIZE, GETBLK_KVABIO); 536 if (bp == NULL) { 537 if (tmpfs_cluster_enable) { 538 error = cluster_readx(vp, node->tn_size, 539 base_offset, 540 TMPFS_BLKSIZE, 541 B_NOTMETA | B_KVABIO, 542 uio->uio_resid, 543 seqcount * MAXBSIZE, 544 &bp); 545 } else { 546 error = bread_kvabio(vp, base_offset, 547 TMPFS_BLKSIZE, &bp); 548 } 549 if (error) { 550 brelse(bp); 551 kprintf("tmpfs_read bread error %d\n", error); 552 break; 553 } 554 555 /* 556 * tmpfs pretty much fiddles directly with the VM 557 * system, don't let it exhaust it or we won't play 558 * nice with other processes. 559 * 560 * Only do this if the VOP is coming from a normal 561 * read/write. The VM system handles the case for 562 * UIO_NOCOPY. 563 */ 564 if (uio->uio_segflg != UIO_NOCOPY) 565 vm_wait_nominal(); 566 } 567 bp->b_flags |= B_CLUSTEROK; 568 bkvasync(bp); 569 570 /* 571 * Figure out how many bytes we can actually copy this loop. 572 */ 573 len = TMPFS_BLKSIZE - offset; 574 if (len > uio->uio_resid) 575 len = uio->uio_resid; 576 if (len > node->tn_size - uio->uio_offset) 577 len = (size_t)(node->tn_size - uio->uio_offset); 578 579 error = uiomovebp(bp, (char *)bp->b_data + offset, len, uio); 580 bqrelse(bp); 581 if (error) { 582 kprintf("tmpfs_read uiomove error %d\n", error); 583 break; 584 } 585 } 586 587 finished: 588 if ((node->tn_status & TMPFS_NODE_ACCESSED) == 0) { 589 TMPFS_NODE_LOCK(node); 590 node->tn_status |= TMPFS_NODE_ACCESSED; 591 TMPFS_NODE_UNLOCK(node); 592 } 593 return (error); 594 } 595 596 static int 597 tmpfs_write(struct vop_write_args *ap) 598 { 599 struct buf *bp; 600 struct vnode *vp = ap->a_vp; 601 struct uio *uio = ap->a_uio; 602 struct thread *td = uio->uio_td; 603 struct tmpfs_node *node; 604 boolean_t extended; 605 off_t oldsize; 606 int error; 607 off_t base_offset; 608 size_t offset; 609 size_t len; 610 struct rlimit limit; 611 int trivial = 0; 612 int kflags = 0; 613 int seqcount; 614 615 error = 0; 616 if (uio->uio_resid == 0) { 617 return error; 618 } 619 620 node = VP_TO_TMPFS_NODE(vp); 621 622 if (vp->v_type != VREG) 623 return (EINVAL); 624 seqcount = ap->a_ioflag >> IO_SEQSHIFT; 625 626 TMPFS_NODE_LOCK(node); 627 628 /* 629 * restore any saved pages before proceeding 630 */ 631 if (node->tn_reg.tn_pages_in_aobj) { 632 tmpfs_move_pages(node->tn_reg.tn_aobj, vp->v_object); 633 node->tn_reg.tn_pages_in_aobj = 0; 634 } 635 636 oldsize = node->tn_size; 637 if (ap->a_ioflag & IO_APPEND) 638 uio->uio_offset = node->tn_size; 639 640 /* 641 * Check for illegal write offsets. 642 */ 643 if (uio->uio_offset + uio->uio_resid > 644 VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize) { 645 error = EFBIG; 646 goto done; 647 } 648 649 /* 650 * NOTE: Ignore if UIO does not come from a user thread (e.g. VN). 651 */ 652 if (vp->v_type == VREG && td != NULL && td->td_lwp != NULL) { 653 error = kern_getrlimit(RLIMIT_FSIZE, &limit); 654 if (error) 655 goto done; 656 if (uio->uio_offset + uio->uio_resid > limit.rlim_cur) { 657 ksignal(td->td_proc, SIGXFSZ); 658 error = EFBIG; 659 goto done; 660 } 661 } 662 663 /* 664 * Extend the file's size if necessary 665 */ 666 extended = ((uio->uio_offset + uio->uio_resid) > node->tn_size); 667 668 while (uio->uio_resid > 0) { 669 /* 670 * Don't completely blow out running buffer I/O 671 * when being hit from the pageout daemon. 672 */ 673 if (uio->uio_segflg == UIO_NOCOPY && 674 (ap->a_ioflag & IO_RECURSE) == 0) { 675 bwillwrite(TMPFS_BLKSIZE); 676 } 677 678 /* 679 * Use buffer cache I/O (via tmpfs_strategy) 680 */ 681 offset = (size_t)uio->uio_offset & TMPFS_BLKMASK64; 682 base_offset = (off_t)uio->uio_offset - offset; 683 len = TMPFS_BLKSIZE - offset; 684 if (len > uio->uio_resid) 685 len = uio->uio_resid; 686 687 if ((uio->uio_offset + len) > node->tn_size) { 688 trivial = (uio->uio_offset <= node->tn_size); 689 error = tmpfs_reg_resize(vp, uio->uio_offset + len, 690 trivial); 691 if (error) 692 break; 693 } 694 695 /* 696 * Read to fill in any gaps. Theoretically we could 697 * optimize this if the write covers the entire buffer 698 * and is not a UIO_NOCOPY write, however this can lead 699 * to a security violation exposing random kernel memory 700 * (whatever junk was in the backing VM pages before). 701 * 702 * So just use bread() to do the right thing. 703 */ 704 error = bread_kvabio(vp, base_offset, TMPFS_BLKSIZE, &bp); 705 bkvasync(bp); 706 error = uiomovebp(bp, (char *)bp->b_data + offset, len, uio); 707 if (error) { 708 kprintf("tmpfs_write uiomove error %d\n", error); 709 brelse(bp); 710 break; 711 } 712 713 if (uio->uio_offset > node->tn_size) { 714 node->tn_size = uio->uio_offset; 715 kflags |= NOTE_EXTEND; 716 } 717 kflags |= NOTE_WRITE; 718 719 /* 720 * Always try to flush the page in the UIO_NOCOPY case. This 721 * can come from the pageout daemon or during vnode eviction. 722 * It is not necessarily going to be marked IO_ASYNC/IO_SYNC. 723 * 724 * For the normal case we buwrite(), dirtying the underlying 725 * VM pages instead of dirtying the buffer and releasing the 726 * buffer as a clean buffer. This allows tmpfs to use 727 * essentially all available memory to cache file data. 728 * If we used bdwrite() the buffer cache would wind up 729 * flushing the data to swap too quickly. 730 * 731 * But because tmpfs can seriously load the VM system we 732 * fall-back to using bdwrite() when free memory starts 733 * to get low. This shifts the load away from the VM system 734 * and makes tmpfs act more like a normal filesystem with 735 * regards to disk activity. 736 * 737 * tmpfs pretty much fiddles directly with the VM 738 * system, don't let it exhaust it or we won't play 739 * nice with other processes. Only do this if the 740 * VOP is coming from a normal read/write. The VM system 741 * handles the case for UIO_NOCOPY. 742 */ 743 bp->b_flags |= B_CLUSTEROK; 744 if (uio->uio_segflg == UIO_NOCOPY) { 745 /* 746 * Flush from the pageout daemon, deal with 747 * potentially very heavy tmpfs write activity 748 * causing long stalls in the pageout daemon 749 * before pages get to free/cache. 750 * 751 * (a) Under severe pressure setting B_DIRECT will 752 * cause a buffer release to try to free the 753 * underlying pages. 754 * 755 * (b) Under modest memory pressure the B_RELBUF 756 * alone is sufficient to get the pages moved 757 * to the cache. We could also force this by 758 * setting B_NOTMETA but that might have other 759 * unintended side-effects (e.g. setting 760 * PG_NOTMETA on the VM page). 761 * 762 * Hopefully this will unblock the VM system more 763 * quickly under extreme tmpfs write load. 764 */ 765 if (vm_page_count_min(vm_page_free_hysteresis)) 766 bp->b_flags |= B_DIRECT; 767 bp->b_flags |= B_AGE | B_RELBUF; 768 bp->b_act_count = 0; /* buffer->deactivate pgs */ 769 cluster_awrite(bp); 770 } else if (vm_pages_needed) { 771 /* 772 * If the pageout daemon is running we cycle the 773 * write through the buffer cache normally to 774 * pipeline the flush, thus avoiding adding any 775 * more memory pressure to the pageout daemon. 776 */ 777 bp->b_act_count = 0; /* buffer->deactivate pgs */ 778 bdwrite(bp); 779 } else { 780 /* 781 * Otherwise run the buffer directly through to the 782 * backing VM store. 783 */ 784 buwrite(bp); 785 /*vm_wait_nominal();*/ 786 } 787 788 if (bp->b_error) { 789 kprintf("tmpfs_write bwrite error %d\n", bp->b_error); 790 break; 791 } 792 } 793 794 if (error) { 795 if (extended) { 796 (void)tmpfs_reg_resize(vp, oldsize, trivial); 797 kflags &= ~NOTE_EXTEND; 798 } 799 goto done; 800 } 801 802 /* 803 * Currently we don't set the mtime on files modified via mmap() 804 * because we can't tell the difference between those modifications 805 * and an attempt by the pageout daemon to flush tmpfs pages to 806 * swap. 807 * 808 * This is because in order to defer flushes as long as possible 809 * buwrite() works by marking the underlying VM pages dirty in 810 * order to be able to dispose of the buffer cache buffer without 811 * flushing it. 812 */ 813 if (uio->uio_segflg == UIO_NOCOPY) { 814 if (vp->v_flag & VLASTWRITETS) { 815 node->tn_mtime = vp->v_lastwrite_ts.tv_sec; 816 node->tn_mtimensec = vp->v_lastwrite_ts.tv_nsec; 817 } 818 } else { 819 node->tn_status |= TMPFS_NODE_MODIFIED; 820 vclrflags(vp, VLASTWRITETS); 821 } 822 823 if (extended) 824 node->tn_status |= TMPFS_NODE_CHANGED; 825 826 if (node->tn_mode & (S_ISUID | S_ISGID)) { 827 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0)) 828 node->tn_mode &= ~(S_ISUID | S_ISGID); 829 } 830 done: 831 TMPFS_NODE_UNLOCK(node); 832 if (kflags) 833 tmpfs_knote(vp, kflags); 834 835 return(error); 836 } 837 838 static int 839 tmpfs_advlock(struct vop_advlock_args *ap) 840 { 841 struct tmpfs_node *node; 842 struct vnode *vp = ap->a_vp; 843 int error; 844 845 node = VP_TO_TMPFS_NODE(vp); 846 error = (lf_advlock(ap, &node->tn_advlock, node->tn_size)); 847 848 return (error); 849 } 850 851 /* 852 * The strategy function is typically only called when memory pressure 853 * forces the system to attempt to pageout pages. It can also be called 854 * by [n]vtruncbuf() when a truncation cuts a page in half. Normal write 855 * operations 856 * 857 * We set VKVABIO for VREG files so bp->b_data may not be synchronized to 858 * our cpu. swap_pager_strategy() is all we really use, and it directly 859 * supports this. 860 */ 861 static int 862 tmpfs_strategy(struct vop_strategy_args *ap) 863 { 864 struct bio *bio = ap->a_bio; 865 struct bio *nbio; 866 struct buf *bp = bio->bio_buf; 867 struct vnode *vp = ap->a_vp; 868 struct tmpfs_node *node; 869 vm_object_t uobj; 870 vm_page_t m; 871 int i; 872 873 if (vp->v_type != VREG) { 874 bp->b_resid = bp->b_bcount; 875 bp->b_flags |= B_ERROR | B_INVAL; 876 bp->b_error = EINVAL; 877 biodone(bio); 878 return(0); 879 } 880 881 node = VP_TO_TMPFS_NODE(vp); 882 883 uobj = node->tn_reg.tn_aobj; 884 885 /* 886 * Don't bother flushing to swap if there is no swap, just 887 * ensure that the pages are marked as needing a commit (still). 888 */ 889 if (bp->b_cmd == BUF_CMD_WRITE && vm_swap_size == 0) { 890 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 891 m = bp->b_xio.xio_pages[i]; 892 vm_page_need_commit(m); 893 } 894 bp->b_resid = 0; 895 bp->b_error = 0; 896 biodone(bio); 897 } else { 898 nbio = push_bio(bio); 899 nbio->bio_done = tmpfs_strategy_done; 900 nbio->bio_offset = bio->bio_offset; 901 swap_pager_strategy(uobj, nbio); 902 } 903 return 0; 904 } 905 906 /* 907 * If we were unable to commit the pages to swap make sure they are marked 908 * as needing a commit (again). If we were, clear the flag to allow the 909 * pages to be freed. 910 * 911 * Do not error-out the buffer. In particular, vinvalbuf() needs to 912 * always work. 913 */ 914 static void 915 tmpfs_strategy_done(struct bio *bio) 916 { 917 struct buf *bp; 918 vm_page_t m; 919 int i; 920 921 bp = bio->bio_buf; 922 923 if (bp->b_flags & B_ERROR) { 924 bp->b_flags &= ~B_ERROR; 925 bp->b_error = 0; 926 bp->b_resid = 0; 927 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 928 m = bp->b_xio.xio_pages[i]; 929 vm_page_need_commit(m); 930 } 931 } else { 932 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 933 m = bp->b_xio.xio_pages[i]; 934 vm_page_clear_commit(m); 935 } 936 } 937 bio = pop_bio(bio); 938 biodone(bio); 939 } 940 941 static int 942 tmpfs_bmap(struct vop_bmap_args *ap) 943 { 944 if (ap->a_doffsetp != NULL) 945 *ap->a_doffsetp = ap->a_loffset; 946 if (ap->a_runp != NULL) 947 *ap->a_runp = 0; 948 if (ap->a_runb != NULL) 949 *ap->a_runb = 0; 950 951 return 0; 952 } 953 954 /* --------------------------------------------------------------------- */ 955 956 static int 957 tmpfs_nremove(struct vop_nremove_args *ap) 958 { 959 struct vnode *dvp = ap->a_dvp; 960 struct namecache *ncp = ap->a_nch->ncp; 961 struct vnode *vp; 962 int error; 963 struct tmpfs_dirent *de; 964 struct tmpfs_mount *tmp; 965 struct tmpfs_node *dnode; 966 struct tmpfs_node *node; 967 968 /* 969 * We have to acquire the vp from ap->a_nch because we will likely 970 * unresolve the namecache entry, and a vrele/vput is needed to 971 * trigger the tmpfs_inactive/tmpfs_reclaim sequence. 972 * 973 * We have to use vget to clear any inactive state on the vnode, 974 * otherwise the vnode may remain inactive and thus tmpfs_inactive 975 * will not get called when we release it. 976 */ 977 error = cache_vget(ap->a_nch, ap->a_cred, LK_SHARED, &vp); 978 KKASSERT(vp->v_mount == dvp->v_mount); 979 KKASSERT(error == 0); 980 vn_unlock(vp); 981 982 if (vp->v_type == VDIR) { 983 error = EISDIR; 984 goto out2; 985 } 986 987 dnode = VP_TO_TMPFS_DIR(dvp); 988 node = VP_TO_TMPFS_NODE(vp); 989 tmp = VFS_TO_TMPFS(vp->v_mount); 990 991 TMPFS_NODE_LOCK(dnode); 992 de = tmpfs_dir_lookup(dnode, node, ncp); 993 if (de == NULL) { 994 error = ENOENT; 995 TMPFS_NODE_UNLOCK(dnode); 996 goto out; 997 } 998 999 /* Files marked as immutable or append-only cannot be deleted. */ 1000 if ((node->tn_flags & (IMMUTABLE | APPEND | NOUNLINK)) || 1001 (dnode->tn_flags & APPEND)) { 1002 error = EPERM; 1003 TMPFS_NODE_UNLOCK(dnode); 1004 goto out; 1005 } 1006 1007 /* Remove the entry from the directory; as it is a file, we do not 1008 * have to change the number of hard links of the directory. */ 1009 tmpfs_dir_detach(dnode, de); 1010 TMPFS_NODE_UNLOCK(dnode); 1011 1012 /* Free the directory entry we just deleted. Note that the node 1013 * referred by it will not be removed until the vnode is really 1014 * reclaimed. */ 1015 tmpfs_free_dirent(tmp, de); 1016 1017 if (node->tn_links > 0) { 1018 TMPFS_NODE_LOCK(node); 1019 node->tn_status |= TMPFS_NODE_CHANGED; 1020 TMPFS_NODE_UNLOCK(node); 1021 } 1022 1023 cache_unlink(ap->a_nch); 1024 tmpfs_knote(vp, NOTE_DELETE); 1025 error = 0; 1026 1027 out: 1028 if (error == 0) 1029 tmpfs_knote(dvp, NOTE_WRITE); 1030 out2: 1031 vrele(vp); 1032 1033 return error; 1034 } 1035 1036 /* --------------------------------------------------------------------- */ 1037 1038 static int 1039 tmpfs_nlink(struct vop_nlink_args *ap) 1040 { 1041 struct vnode *dvp = ap->a_dvp; 1042 struct vnode *vp = ap->a_vp; 1043 struct namecache *ncp = ap->a_nch->ncp; 1044 struct tmpfs_dirent *de; 1045 struct tmpfs_node *node; 1046 struct tmpfs_node *dnode; 1047 int error; 1048 1049 KKASSERT(dvp != vp); /* XXX When can this be false? */ 1050 1051 node = VP_TO_TMPFS_NODE(vp); 1052 dnode = VP_TO_TMPFS_NODE(dvp); 1053 TMPFS_NODE_LOCK(dnode); 1054 1055 /* XXX: Why aren't the following two tests done by the caller? */ 1056 1057 /* Hard links of directories are forbidden. */ 1058 if (vp->v_type == VDIR) { 1059 error = EPERM; 1060 goto out; 1061 } 1062 1063 /* Cannot create cross-device links. */ 1064 if (dvp->v_mount != vp->v_mount) { 1065 error = EXDEV; 1066 goto out; 1067 } 1068 1069 /* Ensure that we do not overflow the maximum number of links imposed 1070 * by the system. */ 1071 KKASSERT(node->tn_links <= LINK_MAX); 1072 if (node->tn_links >= LINK_MAX) { 1073 error = EMLINK; 1074 goto out; 1075 } 1076 1077 /* We cannot create links of files marked immutable or append-only. */ 1078 if (node->tn_flags & (IMMUTABLE | APPEND)) { 1079 error = EPERM; 1080 goto out; 1081 } 1082 1083 /* Allocate a new directory entry to represent the node. */ 1084 error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node, 1085 ncp->nc_name, ncp->nc_nlen, &de); 1086 if (error != 0) 1087 goto out; 1088 1089 /* Insert the new directory entry into the appropriate directory. */ 1090 tmpfs_dir_attach(dnode, de); 1091 1092 /* vp link count has changed, so update node times. */ 1093 1094 TMPFS_NODE_LOCK(node); 1095 node->tn_status |= TMPFS_NODE_CHANGED; 1096 TMPFS_NODE_UNLOCK(node); 1097 tmpfs_update(vp); 1098 1099 tmpfs_knote(vp, NOTE_LINK); 1100 cache_setunresolved(ap->a_nch); 1101 cache_setvp(ap->a_nch, vp); 1102 error = 0; 1103 1104 out: 1105 TMPFS_NODE_UNLOCK(dnode); 1106 if (error == 0) 1107 tmpfs_knote(dvp, NOTE_WRITE); 1108 return error; 1109 } 1110 1111 /* --------------------------------------------------------------------- */ 1112 1113 static int 1114 tmpfs_nrename(struct vop_nrename_args *ap) 1115 { 1116 struct vnode *fdvp = ap->a_fdvp; 1117 struct namecache *fncp = ap->a_fnch->ncp; 1118 struct vnode *fvp = fncp->nc_vp; 1119 struct vnode *tdvp = ap->a_tdvp; 1120 struct namecache *tncp = ap->a_tnch->ncp; 1121 struct vnode *tvp; 1122 struct tmpfs_dirent *de, *tde; 1123 struct tmpfs_mount *tmp; 1124 struct tmpfs_node *fdnode; 1125 struct tmpfs_node *fnode; 1126 struct tmpfs_node *tnode; 1127 struct tmpfs_node *tdnode; 1128 char *newname; 1129 char *oldname; 1130 int error; 1131 1132 KKASSERT(fdvp->v_mount == fvp->v_mount); 1133 1134 /* 1135 * Because tvp can get overwritten we have to vget it instead of 1136 * just vref or use it, otherwise it's VINACTIVE flag may not get 1137 * cleared and the node won't get destroyed. 1138 */ 1139 error = cache_vget(ap->a_tnch, ap->a_cred, LK_SHARED, &tvp); 1140 if (error == 0) { 1141 tnode = VP_TO_TMPFS_NODE(tvp); 1142 vn_unlock(tvp); 1143 } else { 1144 tnode = NULL; 1145 } 1146 1147 /* Disallow cross-device renames. 1148 * XXX Why isn't this done by the caller? */ 1149 if (fvp->v_mount != tdvp->v_mount || 1150 (tvp != NULL && fvp->v_mount != tvp->v_mount)) { 1151 error = EXDEV; 1152 goto out; 1153 } 1154 1155 tmp = VFS_TO_TMPFS(tdvp->v_mount); 1156 tdnode = VP_TO_TMPFS_DIR(tdvp); 1157 1158 /* If source and target are the same file, there is nothing to do. */ 1159 if (fvp == tvp) { 1160 error = 0; 1161 goto out; 1162 } 1163 1164 fdnode = VP_TO_TMPFS_DIR(fdvp); 1165 fnode = VP_TO_TMPFS_NODE(fvp); 1166 TMPFS_NODE_LOCK(fdnode); 1167 de = tmpfs_dir_lookup(fdnode, fnode, fncp); 1168 TMPFS_NODE_UNLOCK(fdnode); /* XXX depend on namecache lock */ 1169 1170 /* Avoid manipulating '.' and '..' entries. */ 1171 if (de == NULL) { 1172 error = ENOENT; 1173 goto out_locked; 1174 } 1175 KKASSERT(de->td_node == fnode); 1176 1177 /* 1178 * If replacing an entry in the target directory and that entry 1179 * is a directory, it must be empty. 1180 * 1181 * Kern_rename gurantees the destination to be a directory 1182 * if the source is one (it does?). 1183 */ 1184 if (tvp != NULL) { 1185 KKASSERT(tnode != NULL); 1186 1187 if ((tnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) || 1188 (tdnode->tn_flags & (APPEND | IMMUTABLE))) { 1189 error = EPERM; 1190 goto out_locked; 1191 } 1192 1193 if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) { 1194 if (tnode->tn_size > 0) { 1195 error = ENOTEMPTY; 1196 goto out_locked; 1197 } 1198 } else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) { 1199 error = ENOTDIR; 1200 goto out_locked; 1201 } else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) { 1202 error = EISDIR; 1203 goto out_locked; 1204 } else { 1205 KKASSERT(fnode->tn_type != VDIR && 1206 tnode->tn_type != VDIR); 1207 } 1208 } 1209 1210 if ((fnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) || 1211 (fdnode->tn_flags & (APPEND | IMMUTABLE))) { 1212 error = EPERM; 1213 goto out_locked; 1214 } 1215 1216 /* 1217 * Ensure that we have enough memory to hold the new name, if it 1218 * has to be changed. 1219 */ 1220 if (fncp->nc_nlen != tncp->nc_nlen || 1221 bcmp(fncp->nc_name, tncp->nc_name, fncp->nc_nlen) != 0) { 1222 newname = kmalloc(tncp->nc_nlen + 1, tmp->tm_name_zone, 1223 M_WAITOK | M_NULLOK); 1224 if (newname == NULL) { 1225 error = ENOSPC; 1226 goto out_locked; 1227 } 1228 bcopy(tncp->nc_name, newname, tncp->nc_nlen); 1229 newname[tncp->nc_nlen] = '\0'; 1230 } else { 1231 newname = NULL; 1232 } 1233 1234 /* 1235 * Unlink entry from source directory. Note that the kernel has 1236 * already checked for illegal recursion cases (renaming a directory 1237 * into a subdirectory of itself). 1238 */ 1239 if (fdnode != tdnode) { 1240 tmpfs_dir_detach(fdnode, de); 1241 } else { 1242 /* XXX depend on namecache lock */ 1243 TMPFS_NODE_LOCK(fdnode); 1244 KKASSERT(de == tmpfs_dir_lookup(fdnode, fnode, fncp)); 1245 RB_REMOVE(tmpfs_dirtree, &fdnode->tn_dir.tn_dirtree, de); 1246 RB_REMOVE(tmpfs_dirtree_cookie, 1247 &fdnode->tn_dir.tn_cookietree, de); 1248 TMPFS_NODE_UNLOCK(fdnode); 1249 } 1250 1251 /* 1252 * Handle any name change. Swap with newname, we will 1253 * deallocate it at the end. 1254 */ 1255 if (newname != NULL) { 1256 #if 0 1257 TMPFS_NODE_LOCK(fnode); 1258 fnode->tn_status |= TMPFS_NODE_CHANGED; 1259 TMPFS_NODE_UNLOCK(fnode); 1260 #endif 1261 oldname = de->td_name; 1262 de->td_name = newname; 1263 de->td_namelen = (uint16_t)tncp->nc_nlen; 1264 newname = oldname; 1265 } 1266 1267 /* 1268 * If we are overwriting an entry, we have to remove the old one 1269 * from the target directory. 1270 */ 1271 if (tvp != NULL) { 1272 /* Remove the old entry from the target directory. */ 1273 TMPFS_NODE_LOCK(tdnode); 1274 tde = tmpfs_dir_lookup(tdnode, tnode, tncp); 1275 tmpfs_dir_detach(tdnode, tde); 1276 TMPFS_NODE_UNLOCK(tdnode); 1277 tmpfs_knote(tdnode->tn_vnode, NOTE_DELETE); 1278 1279 /* 1280 * Free the directory entry we just deleted. Note that the 1281 * node referred by it will not be removed until the vnode is 1282 * really reclaimed. 1283 */ 1284 tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), tde); 1285 /*cache_inval_vp(tvp, CINV_DESTROY);*/ 1286 } 1287 1288 /* 1289 * Link entry to target directory. If the entry 1290 * represents a directory move the parent linkage 1291 * as well. 1292 */ 1293 if (fdnode != tdnode) { 1294 if (de->td_node->tn_type == VDIR) { 1295 TMPFS_VALIDATE_DIR(fnode); 1296 } 1297 tmpfs_dir_attach(tdnode, de); 1298 } else { 1299 TMPFS_NODE_LOCK(tdnode); 1300 tdnode->tn_status |= TMPFS_NODE_MODIFIED; 1301 RB_INSERT(tmpfs_dirtree, &tdnode->tn_dir.tn_dirtree, de); 1302 RB_INSERT(tmpfs_dirtree_cookie, 1303 &tdnode->tn_dir.tn_cookietree, de); 1304 TMPFS_NODE_UNLOCK(tdnode); 1305 } 1306 1307 /* 1308 * Finish up 1309 */ 1310 if (newname) { 1311 kfree(newname, tmp->tm_name_zone); 1312 newname = NULL; 1313 } 1314 cache_rename(ap->a_fnch, ap->a_tnch); 1315 tmpfs_knote(ap->a_fdvp, NOTE_WRITE); 1316 tmpfs_knote(ap->a_tdvp, NOTE_WRITE); 1317 if (fnode->tn_vnode) 1318 tmpfs_knote(fnode->tn_vnode, NOTE_RENAME); 1319 error = 0; 1320 1321 out_locked: 1322 ; 1323 out: 1324 if (tvp) 1325 vrele(tvp); 1326 return error; 1327 } 1328 1329 /* --------------------------------------------------------------------- */ 1330 1331 static int 1332 tmpfs_nmkdir(struct vop_nmkdir_args *ap) 1333 { 1334 struct vnode *dvp = ap->a_dvp; 1335 struct vnode **vpp = ap->a_vpp; 1336 struct namecache *ncp = ap->a_nch->ncp; 1337 struct vattr *vap = ap->a_vap; 1338 struct ucred *cred = ap->a_cred; 1339 int error; 1340 1341 KKASSERT(vap->va_type == VDIR); 1342 1343 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL); 1344 if (error == 0) { 1345 cache_setunresolved(ap->a_nch); 1346 cache_setvp(ap->a_nch, *vpp); 1347 tmpfs_knote(dvp, NOTE_WRITE | NOTE_LINK); 1348 } 1349 return error; 1350 } 1351 1352 /* --------------------------------------------------------------------- */ 1353 1354 static int 1355 tmpfs_nrmdir(struct vop_nrmdir_args *ap) 1356 { 1357 struct vnode *dvp = ap->a_dvp; 1358 struct namecache *ncp = ap->a_nch->ncp; 1359 struct vnode *vp; 1360 struct tmpfs_dirent *de; 1361 struct tmpfs_mount *tmp; 1362 struct tmpfs_node *dnode; 1363 struct tmpfs_node *node; 1364 int error; 1365 1366 /* 1367 * We have to acquire the vp from ap->a_nch because we will likely 1368 * unresolve the namecache entry, and a vrele/vput is needed to 1369 * trigger the tmpfs_inactive/tmpfs_reclaim sequence. 1370 * 1371 * We have to use vget to clear any inactive state on the vnode, 1372 * otherwise the vnode may remain inactive and thus tmpfs_inactive 1373 * will not get called when we release it. 1374 */ 1375 error = cache_vget(ap->a_nch, ap->a_cred, LK_SHARED, &vp); 1376 KKASSERT(error == 0); 1377 vn_unlock(vp); 1378 1379 /* 1380 * Prevalidate so we don't hit an assertion later 1381 */ 1382 if (vp->v_type != VDIR) { 1383 error = ENOTDIR; 1384 goto out; 1385 } 1386 1387 tmp = VFS_TO_TMPFS(dvp->v_mount); 1388 dnode = VP_TO_TMPFS_DIR(dvp); 1389 node = VP_TO_TMPFS_DIR(vp); 1390 1391 /* 1392 * Directories with more than two entries ('.' and '..') cannot 1393 * be removed. 1394 */ 1395 if (node->tn_size > 0) { 1396 error = ENOTEMPTY; 1397 goto out; 1398 } 1399 1400 if ((dnode->tn_flags & APPEND) 1401 || (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))) { 1402 error = EPERM; 1403 goto out; 1404 } 1405 1406 /* 1407 * This invariant holds only if we are not trying to 1408 * remove "..". We checked for that above so this is safe now. 1409 */ 1410 KKASSERT(node->tn_dir.tn_parent == dnode); 1411 1412 /* 1413 * Get the directory entry associated with node (vp). This 1414 * was filled by tmpfs_lookup while looking up the entry. 1415 */ 1416 TMPFS_NODE_LOCK(dnode); 1417 de = tmpfs_dir_lookup(dnode, node, ncp); 1418 KKASSERT(TMPFS_DIRENT_MATCHES(de, ncp->nc_name, ncp->nc_nlen)); 1419 1420 /* Check flags to see if we are allowed to remove the directory. */ 1421 if ((dnode->tn_flags & APPEND) || 1422 node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) { 1423 error = EPERM; 1424 TMPFS_NODE_UNLOCK(dnode); 1425 goto out; 1426 } 1427 1428 /* Detach the directory entry from the directory (dnode). */ 1429 tmpfs_dir_detach(dnode, de); 1430 TMPFS_NODE_UNLOCK(dnode); 1431 1432 /* No vnode should be allocated for this entry from this point */ 1433 TMPFS_NODE_LOCK(dnode); 1434 TMPFS_ASSERT_ELOCKED(dnode); 1435 TMPFS_NODE_LOCK(node); 1436 TMPFS_ASSERT_ELOCKED(node); 1437 1438 /* 1439 * Must set parent linkage to NULL (tested by ncreate to disallow 1440 * the creation of new files/dirs in a deleted directory) 1441 */ 1442 node->tn_status |= TMPFS_NODE_CHANGED; 1443 1444 dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | 1445 TMPFS_NODE_MODIFIED; 1446 1447 TMPFS_NODE_UNLOCK(node); 1448 TMPFS_NODE_UNLOCK(dnode); 1449 1450 /* Free the directory entry we just deleted. Note that the node 1451 * referred by it will not be removed until the vnode is really 1452 * reclaimed. */ 1453 tmpfs_free_dirent(tmp, de); 1454 1455 /* Release the deleted vnode (will destroy the node, notify 1456 * interested parties and clean it from the cache). */ 1457 1458 TMPFS_NODE_LOCK(dnode); 1459 dnode->tn_status |= TMPFS_NODE_CHANGED; 1460 TMPFS_NODE_UNLOCK(dnode); 1461 tmpfs_update(dvp); 1462 1463 cache_unlink(ap->a_nch); 1464 tmpfs_knote(dvp, NOTE_WRITE | NOTE_LINK); 1465 error = 0; 1466 1467 out: 1468 vrele(vp); 1469 1470 return error; 1471 } 1472 1473 /* --------------------------------------------------------------------- */ 1474 1475 static int 1476 tmpfs_nsymlink(struct vop_nsymlink_args *ap) 1477 { 1478 struct vnode *dvp = ap->a_dvp; 1479 struct vnode **vpp = ap->a_vpp; 1480 struct namecache *ncp = ap->a_nch->ncp; 1481 struct vattr *vap = ap->a_vap; 1482 struct ucred *cred = ap->a_cred; 1483 char *target = ap->a_target; 1484 int error; 1485 1486 vap->va_type = VLNK; 1487 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, target); 1488 if (error == 0) { 1489 tmpfs_knote(*vpp, NOTE_WRITE); 1490 cache_setunresolved(ap->a_nch); 1491 cache_setvp(ap->a_nch, *vpp); 1492 } 1493 return error; 1494 } 1495 1496 /* --------------------------------------------------------------------- */ 1497 1498 static int 1499 tmpfs_readdir(struct vop_readdir_args *ap) 1500 { 1501 struct vnode *vp = ap->a_vp; 1502 struct uio *uio = ap->a_uio; 1503 int *eofflag = ap->a_eofflag; 1504 off_t **cookies = ap->a_cookies; 1505 int *ncookies = ap->a_ncookies; 1506 struct tmpfs_mount *tmp; 1507 int error; 1508 off_t startoff; 1509 off_t cnt = 0; 1510 struct tmpfs_node *node; 1511 1512 /* This operation only makes sense on directory nodes. */ 1513 if (vp->v_type != VDIR) { 1514 return ENOTDIR; 1515 } 1516 1517 tmp = VFS_TO_TMPFS(vp->v_mount); 1518 node = VP_TO_TMPFS_DIR(vp); 1519 startoff = uio->uio_offset; 1520 1521 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOT) { 1522 error = tmpfs_dir_getdotdent(node, uio); 1523 if (error != 0) { 1524 TMPFS_NODE_LOCK_SH(node); 1525 goto outok; 1526 } 1527 cnt++; 1528 } 1529 1530 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT) { 1531 /* may lock parent, cannot hold node lock */ 1532 error = tmpfs_dir_getdotdotdent(tmp, node, uio); 1533 if (error != 0) { 1534 TMPFS_NODE_LOCK_SH(node); 1535 goto outok; 1536 } 1537 cnt++; 1538 } 1539 1540 TMPFS_NODE_LOCK_SH(node); 1541 error = tmpfs_dir_getdents(node, uio, &cnt); 1542 1543 outok: 1544 KKASSERT(error >= -1); 1545 1546 if (error == -1) 1547 error = 0; 1548 1549 if (eofflag != NULL) 1550 *eofflag = 1551 (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF); 1552 1553 /* Update NFS-related variables. */ 1554 if (error == 0 && cookies != NULL && ncookies != NULL) { 1555 off_t i; 1556 off_t off = startoff; 1557 struct tmpfs_dirent *de = NULL; 1558 1559 *ncookies = cnt; 1560 *cookies = kmalloc(cnt * sizeof(off_t), M_TEMP, M_WAITOK); 1561 1562 for (i = 0; i < cnt; i++) { 1563 KKASSERT(off != TMPFS_DIRCOOKIE_EOF); 1564 if (off == TMPFS_DIRCOOKIE_DOT) { 1565 off = TMPFS_DIRCOOKIE_DOTDOT; 1566 } else { 1567 if (off == TMPFS_DIRCOOKIE_DOTDOT) { 1568 de = RB_MIN(tmpfs_dirtree_cookie, 1569 &node->tn_dir.tn_cookietree); 1570 } else if (de != NULL) { 1571 de = RB_NEXT(tmpfs_dirtree_cookie, 1572 &node->tn_dir.tn_cookietree, de); 1573 } else { 1574 de = tmpfs_dir_lookupbycookie(node, 1575 off); 1576 KKASSERT(de != NULL); 1577 de = RB_NEXT(tmpfs_dirtree_cookie, 1578 &node->tn_dir.tn_cookietree, de); 1579 } 1580 if (de == NULL) 1581 off = TMPFS_DIRCOOKIE_EOF; 1582 else 1583 off = tmpfs_dircookie(de); 1584 } 1585 (*cookies)[i] = off; 1586 } 1587 KKASSERT(uio->uio_offset == off); 1588 } 1589 TMPFS_NODE_UNLOCK(node); 1590 1591 if ((node->tn_status & TMPFS_NODE_ACCESSED) == 0) { 1592 TMPFS_NODE_LOCK(node); 1593 node->tn_status |= TMPFS_NODE_ACCESSED; 1594 TMPFS_NODE_UNLOCK(node); 1595 } 1596 return error; 1597 } 1598 1599 /* --------------------------------------------------------------------- */ 1600 1601 static int 1602 tmpfs_readlink(struct vop_readlink_args *ap) 1603 { 1604 struct vnode *vp = ap->a_vp; 1605 struct uio *uio = ap->a_uio; 1606 int error; 1607 struct tmpfs_node *node; 1608 1609 KKASSERT(uio->uio_offset == 0); 1610 KKASSERT(vp->v_type == VLNK); 1611 1612 node = VP_TO_TMPFS_NODE(vp); 1613 TMPFS_NODE_LOCK_SH(node); 1614 error = uiomove(node->tn_link, 1615 MIN(node->tn_size, uio->uio_resid), uio); 1616 TMPFS_NODE_UNLOCK(node); 1617 if ((node->tn_status & TMPFS_NODE_ACCESSED) == 0) { 1618 TMPFS_NODE_LOCK(node); 1619 node->tn_status |= TMPFS_NODE_ACCESSED; 1620 TMPFS_NODE_UNLOCK(node); 1621 } 1622 return error; 1623 } 1624 1625 /* --------------------------------------------------------------------- */ 1626 1627 static int 1628 tmpfs_inactive(struct vop_inactive_args *ap) 1629 { 1630 struct vnode *vp = ap->a_vp; 1631 struct tmpfs_node *node; 1632 struct mount *mp; 1633 1634 mp = vp->v_mount; 1635 lwkt_gettoken(&mp->mnt_token); 1636 node = VP_TO_TMPFS_NODE(vp); 1637 1638 /* 1639 * Degenerate case 1640 */ 1641 if (node == NULL) { 1642 vrecycle(vp); 1643 lwkt_reltoken(&mp->mnt_token); 1644 return(0); 1645 } 1646 1647 /* 1648 * Get rid of unreferenced deleted vnodes sooner rather than 1649 * later so the data memory can be recovered immediately. 1650 * 1651 * We must truncate the vnode to prevent the normal reclamation 1652 * path from flushing the data for the removed file to disk. 1653 */ 1654 TMPFS_NODE_LOCK(node); 1655 if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 && 1656 node->tn_links == 0) 1657 { 1658 node->tn_vpstate = TMPFS_VNODE_DOOMED; 1659 TMPFS_NODE_UNLOCK(node); 1660 if (node->tn_type == VREG) 1661 tmpfs_truncate(vp, 0); 1662 vrecycle(vp); 1663 } else { 1664 /* 1665 * We must retain any VM pages belonging to the vnode's 1666 * object as the vnode will destroy the object during a 1667 * later reclaim. We call vinvalbuf(V_SAVE) to clean 1668 * out the buffer cache. 1669 * 1670 * On DragonFlyBSD, vnodes are not immediately deactivated 1671 * on the 1->0 refs, so this is a relatively optimal 1672 * operation. We have to do this in tmpfs_inactive() 1673 * because the pages will have already been thrown away 1674 * at the time tmpfs_reclaim() is called. 1675 */ 1676 if (node->tn_type == VREG && 1677 node->tn_reg.tn_pages_in_aobj == 0) { 1678 vinvalbuf(vp, V_SAVE, 0, 0); 1679 KKASSERT(RB_EMPTY(&vp->v_rbdirty_tree)); 1680 KKASSERT(RB_EMPTY(&vp->v_rbclean_tree)); 1681 tmpfs_move_pages(vp->v_object, node->tn_reg.tn_aobj); 1682 node->tn_reg.tn_pages_in_aobj = 1; 1683 } 1684 1685 TMPFS_NODE_UNLOCK(node); 1686 } 1687 lwkt_reltoken(&mp->mnt_token); 1688 1689 return 0; 1690 } 1691 1692 /* --------------------------------------------------------------------- */ 1693 1694 int 1695 tmpfs_reclaim(struct vop_reclaim_args *ap) 1696 { 1697 struct vnode *vp = ap->a_vp; 1698 struct tmpfs_mount *tmp; 1699 struct tmpfs_node *node; 1700 struct mount *mp; 1701 1702 mp = vp->v_mount; 1703 lwkt_gettoken(&mp->mnt_token); 1704 1705 node = VP_TO_TMPFS_NODE(vp); 1706 tmp = VFS_TO_TMPFS(vp->v_mount); 1707 KKASSERT(mp == tmp->tm_mount); 1708 1709 TMPFS_NODE_LOCK(node); 1710 KKASSERT(node->tn_vnode == vp); 1711 node->tn_vnode = NULL; 1712 vp->v_data = NULL; 1713 1714 /* 1715 * If the node referenced by this vnode was deleted by the 1716 * user, we must free its associated data structures now that 1717 * the vnode is being reclaimed. 1718 * 1719 * Directories have an extra link ref. 1720 */ 1721 if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 && 1722 node->tn_links == 0) { 1723 node->tn_vpstate = TMPFS_VNODE_DOOMED; 1724 tmpfs_free_node(tmp, node); 1725 /* eats the lock */ 1726 } else { 1727 TMPFS_NODE_UNLOCK(node); 1728 } 1729 lwkt_reltoken(&mp->mnt_token); 1730 1731 KKASSERT(vp->v_data == NULL); 1732 return 0; 1733 } 1734 1735 /* --------------------------------------------------------------------- */ 1736 1737 static int 1738 tmpfs_mountctl(struct vop_mountctl_args *ap) 1739 { 1740 struct tmpfs_mount *tmp; 1741 struct mount *mp; 1742 int rc; 1743 1744 mp = ap->a_head.a_ops->head.vv_mount; 1745 lwkt_gettoken(&mp->mnt_token); 1746 1747 switch (ap->a_op) { 1748 case (MOUNTCTL_SET_EXPORT): 1749 tmp = (struct tmpfs_mount *) mp->mnt_data; 1750 1751 if (ap->a_ctllen != sizeof(struct export_args)) 1752 rc = (EINVAL); 1753 else 1754 rc = vfs_export(mp, &tmp->tm_export, 1755 (const struct export_args *) ap->a_ctl); 1756 break; 1757 default: 1758 rc = vop_stdmountctl(ap); 1759 break; 1760 } 1761 1762 lwkt_reltoken(&mp->mnt_token); 1763 return (rc); 1764 } 1765 1766 /* --------------------------------------------------------------------- */ 1767 1768 static int 1769 tmpfs_print(struct vop_print_args *ap) 1770 { 1771 struct vnode *vp = ap->a_vp; 1772 1773 struct tmpfs_node *node; 1774 1775 node = VP_TO_TMPFS_NODE(vp); 1776 1777 kprintf("tag VT_TMPFS, tmpfs_node %p, flags 0x%x, links %d\n", 1778 node, node->tn_flags, node->tn_links); 1779 kprintf("\tmode 0%o, owner %d, group %d, size %ju, status 0x%x\n", 1780 node->tn_mode, node->tn_uid, node->tn_gid, 1781 (uintmax_t)node->tn_size, node->tn_status); 1782 1783 if (vp->v_type == VFIFO) 1784 fifo_printinfo(vp); 1785 1786 kprintf("\n"); 1787 1788 return 0; 1789 } 1790 1791 /* --------------------------------------------------------------------- */ 1792 1793 static int 1794 tmpfs_pathconf(struct vop_pathconf_args *ap) 1795 { 1796 struct vnode *vp = ap->a_vp; 1797 int name = ap->a_name; 1798 register_t *retval = ap->a_retval; 1799 struct tmpfs_mount *tmp; 1800 int error; 1801 1802 error = 0; 1803 1804 switch (name) { 1805 case _PC_CHOWN_RESTRICTED: 1806 *retval = 1; 1807 break; 1808 1809 case _PC_FILESIZEBITS: 1810 tmp = VFS_TO_TMPFS(vp->v_mount); 1811 *retval = max(32, flsll(tmp->tm_pages_max * PAGE_SIZE) + 1); 1812 break; 1813 1814 case _PC_LINK_MAX: 1815 *retval = LINK_MAX; 1816 break; 1817 1818 case _PC_NAME_MAX: 1819 *retval = NAME_MAX; 1820 break; 1821 1822 case _PC_NO_TRUNC: 1823 *retval = 1; 1824 break; 1825 1826 case _PC_PATH_MAX: 1827 *retval = PATH_MAX; 1828 break; 1829 1830 case _PC_PIPE_BUF: 1831 *retval = PIPE_BUF; 1832 break; 1833 1834 case _PC_SYNC_IO: 1835 *retval = 1; 1836 break; 1837 1838 case _PC_2_SYMLINKS: 1839 *retval = 1; 1840 break; 1841 1842 default: 1843 error = EINVAL; 1844 } 1845 1846 return error; 1847 } 1848 1849 /************************************************************************ 1850 * KQFILTER OPS * 1851 ************************************************************************/ 1852 1853 static void filt_tmpfsdetach(struct knote *kn); 1854 static int filt_tmpfsread(struct knote *kn, long hint); 1855 static int filt_tmpfswrite(struct knote *kn, long hint); 1856 static int filt_tmpfsvnode(struct knote *kn, long hint); 1857 1858 static struct filterops tmpfsread_filtops = 1859 { FILTEROP_ISFD | FILTEROP_MPSAFE, 1860 NULL, filt_tmpfsdetach, filt_tmpfsread }; 1861 static struct filterops tmpfswrite_filtops = 1862 { FILTEROP_ISFD | FILTEROP_MPSAFE, 1863 NULL, filt_tmpfsdetach, filt_tmpfswrite }; 1864 static struct filterops tmpfsvnode_filtops = 1865 { FILTEROP_ISFD | FILTEROP_MPSAFE, 1866 NULL, filt_tmpfsdetach, filt_tmpfsvnode }; 1867 1868 static int 1869 tmpfs_kqfilter (struct vop_kqfilter_args *ap) 1870 { 1871 struct vnode *vp = ap->a_vp; 1872 struct knote *kn = ap->a_kn; 1873 1874 switch (kn->kn_filter) { 1875 case EVFILT_READ: 1876 kn->kn_fop = &tmpfsread_filtops; 1877 break; 1878 case EVFILT_WRITE: 1879 kn->kn_fop = &tmpfswrite_filtops; 1880 break; 1881 case EVFILT_VNODE: 1882 kn->kn_fop = &tmpfsvnode_filtops; 1883 break; 1884 default: 1885 return (EOPNOTSUPP); 1886 } 1887 1888 kn->kn_hook = (caddr_t)vp; 1889 1890 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 1891 1892 return(0); 1893 } 1894 1895 static void 1896 filt_tmpfsdetach(struct knote *kn) 1897 { 1898 struct vnode *vp = (void *)kn->kn_hook; 1899 1900 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 1901 } 1902 1903 static int 1904 filt_tmpfsread(struct knote *kn, long hint) 1905 { 1906 struct vnode *vp = (void *)kn->kn_hook; 1907 struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp); 1908 off_t off; 1909 1910 if (hint == NOTE_REVOKE) { 1911 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 1912 return(1); 1913 } 1914 1915 /* 1916 * Interlock against MP races when performing this function. 1917 */ 1918 TMPFS_NODE_LOCK_SH(node); 1919 off = node->tn_size - kn->kn_fp->f_offset; 1920 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX; 1921 if (kn->kn_sfflags & NOTE_OLDAPI) { 1922 TMPFS_NODE_UNLOCK(node); 1923 return(1); 1924 } 1925 if (kn->kn_data == 0) { 1926 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX; 1927 } 1928 TMPFS_NODE_UNLOCK(node); 1929 return (kn->kn_data != 0); 1930 } 1931 1932 static int 1933 filt_tmpfswrite(struct knote *kn, long hint) 1934 { 1935 if (hint == NOTE_REVOKE) 1936 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 1937 kn->kn_data = 0; 1938 return (1); 1939 } 1940 1941 static int 1942 filt_tmpfsvnode(struct knote *kn, long hint) 1943 { 1944 if (kn->kn_sfflags & hint) 1945 kn->kn_fflags |= hint; 1946 if (hint == NOTE_REVOKE) { 1947 kn->kn_flags |= (EV_EOF | EV_NODATA); 1948 return (1); 1949 } 1950 return (kn->kn_fflags != 0); 1951 } 1952 1953 /* 1954 * Helper to move VM pages between objects 1955 * 1956 * NOTE: The vm_page_rename() dirties the page, so we can clear the 1957 * PG_NEED_COMMIT flag. If the pages are being moved into tn_aobj, 1958 * the pageout daemon will be able to page them out. 1959 */ 1960 static int 1961 tmpfs_move_pages_callback(vm_page_t p, void *data) 1962 { 1963 struct rb_vm_page_scan_info *info = data; 1964 vm_pindex_t pindex; 1965 1966 pindex = p->pindex; 1967 if (vm_page_busy_try(p, TRUE)) { 1968 vm_page_sleep_busy(p, TRUE, "tpgmov"); 1969 info->error = -1; 1970 return -1; 1971 } 1972 if (p->object != info->object || p->pindex != pindex) { 1973 vm_page_wakeup(p); 1974 info->error = -1; 1975 return -1; 1976 } 1977 vm_page_rename(p, info->dest_object, pindex); 1978 vm_page_clear_commit(p); 1979 vm_page_wakeup(p); 1980 /* page automaticaly made dirty */ 1981 1982 return 0; 1983 } 1984 1985 static 1986 void 1987 tmpfs_move_pages(vm_object_t src, vm_object_t dst) 1988 { 1989 struct rb_vm_page_scan_info info; 1990 1991 vm_object_hold(src); 1992 vm_object_hold(dst); 1993 info.object = src; 1994 info.dest_object = dst; 1995 do { 1996 if (src->paging_in_progress) 1997 vm_object_pip_wait(src, "objtfs"); 1998 info.error = 1; 1999 vm_page_rb_tree_RB_SCAN(&src->rb_memq, NULL, 2000 tmpfs_move_pages_callback, &info); 2001 } while (info.error < 0 || !RB_EMPTY(&src->rb_memq) || 2002 src->paging_in_progress); 2003 vm_object_drop(dst); 2004 vm_object_drop(src); 2005 } 2006 2007 /* --------------------------------------------------------------------- */ 2008 2009 /* 2010 * vnode operations vector used for files stored in a tmpfs file system. 2011 */ 2012 struct vop_ops tmpfs_vnode_vops = { 2013 .vop_default = vop_defaultop, 2014 .vop_getpages = vop_stdgetpages, 2015 .vop_putpages = vop_stdputpages, 2016 .vop_ncreate = tmpfs_ncreate, 2017 .vop_nresolve = tmpfs_nresolve, 2018 .vop_nlookupdotdot = tmpfs_nlookupdotdot, 2019 .vop_nmknod = tmpfs_nmknod, 2020 .vop_open = tmpfs_open, 2021 .vop_close = tmpfs_close, 2022 .vop_access = tmpfs_access, 2023 .vop_getattr = tmpfs_getattr, 2024 .vop_setattr = tmpfs_setattr, 2025 .vop_read = tmpfs_read, 2026 .vop_write = tmpfs_write, 2027 .vop_fsync = tmpfs_fsync, 2028 .vop_mountctl = tmpfs_mountctl, 2029 .vop_nremove = tmpfs_nremove, 2030 .vop_nlink = tmpfs_nlink, 2031 .vop_nrename = tmpfs_nrename, 2032 .vop_nmkdir = tmpfs_nmkdir, 2033 .vop_nrmdir = tmpfs_nrmdir, 2034 .vop_nsymlink = tmpfs_nsymlink, 2035 .vop_readdir = tmpfs_readdir, 2036 .vop_readlink = tmpfs_readlink, 2037 .vop_inactive = tmpfs_inactive, 2038 .vop_reclaim = tmpfs_reclaim, 2039 .vop_print = tmpfs_print, 2040 .vop_pathconf = tmpfs_pathconf, 2041 .vop_bmap = tmpfs_bmap, 2042 .vop_strategy = tmpfs_strategy, 2043 .vop_advlock = tmpfs_advlock, 2044 .vop_kqfilter = tmpfs_kqfilter 2045 }; 2046