1 /*- 2 * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code 7 * 2005 program. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 * 30 * $NetBSD: tmpfs_vnops.c,v 1.39 2007/07/23 15:41:01 jmmv Exp $ 31 */ 32 33 /* 34 * tmpfs vnode interface. 35 */ 36 #include <sys/cdefs.h> 37 38 #include <sys/kernel.h> 39 #include <sys/kern_syscall.h> 40 #include <sys/param.h> 41 #include <sys/fcntl.h> 42 #include <sys/lockf.h> 43 #include <sys/priv.h> 44 #include <sys/proc.h> 45 #include <sys/resourcevar.h> 46 #include <sys/sched.h> 47 #include <sys/stat.h> 48 #include <sys/systm.h> 49 #include <sys/unistd.h> 50 #include <sys/vfsops.h> 51 #include <sys/vnode.h> 52 53 #include <sys/mplock2.h> 54 55 #include <vm/vm.h> 56 #include <vm/vm_object.h> 57 #include <vm/vm_page.h> 58 #include <vm/vm_pager.h> 59 #include <vm/swap_pager.h> 60 61 #include <vfs/fifofs/fifo.h> 62 #include <vfs/tmpfs/tmpfs_vnops.h> 63 #include <vfs/tmpfs/tmpfs.h> 64 65 MALLOC_DECLARE(M_TMPFS); 66 67 static __inline 68 void 69 tmpfs_knote(struct vnode *vp, int flags) 70 { 71 if (flags) 72 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags); 73 } 74 75 76 /* --------------------------------------------------------------------- */ 77 78 static int 79 tmpfs_nresolve(struct vop_nresolve_args *v) 80 { 81 struct vnode *dvp = v->a_dvp; 82 struct vnode *vp = NULL; 83 struct namecache *ncp = v->a_nch->ncp; 84 struct tmpfs_node *tnode; 85 86 int error; 87 struct tmpfs_dirent *de; 88 struct tmpfs_node *dnode; 89 90 dnode = VP_TO_TMPFS_DIR(dvp); 91 92 de = tmpfs_dir_lookup(dnode, NULL, ncp); 93 if (de == NULL) { 94 error = ENOENT; 95 } else { 96 /* 97 * Allocate a vnode for the node we found. 98 */ 99 tnode = de->td_node; 100 error = tmpfs_alloc_vp(dvp->v_mount, tnode, 101 LK_EXCLUSIVE | LK_RETRY, &vp); 102 if (error) 103 goto out; 104 KKASSERT(vp); 105 } 106 107 out: 108 /* 109 * Store the result of this lookup in the cache. Avoid this if the 110 * request was for creation, as it does not improve timings on 111 * emprical tests. 112 */ 113 if (vp) { 114 vn_unlock(vp); 115 cache_setvp(v->a_nch, vp); 116 vrele(vp); 117 } else if (error == ENOENT) { 118 cache_setvp(v->a_nch, NULL); 119 } 120 return error; 121 } 122 123 static int 124 tmpfs_nlookupdotdot(struct vop_nlookupdotdot_args *v) 125 { 126 struct vnode *dvp = v->a_dvp; 127 struct vnode **vpp = v->a_vpp; 128 struct tmpfs_node *dnode = VP_TO_TMPFS_NODE(dvp); 129 struct ucred *cred = v->a_cred; 130 int error; 131 132 *vpp = NULL; 133 /* Check accessibility of requested node as a first step. */ 134 error = VOP_ACCESS(dvp, VEXEC, cred); 135 if (error != 0) 136 return error; 137 138 if (dnode->tn_dir.tn_parent != NULL) { 139 /* Allocate a new vnode on the matching entry. */ 140 error = tmpfs_alloc_vp(dvp->v_mount, dnode->tn_dir.tn_parent, 141 LK_EXCLUSIVE | LK_RETRY, vpp); 142 143 if (*vpp) 144 vn_unlock(*vpp); 145 } 146 147 return (*vpp == NULL) ? ENOENT : 0; 148 } 149 150 /* --------------------------------------------------------------------- */ 151 152 static int 153 tmpfs_ncreate(struct vop_ncreate_args *v) 154 { 155 struct vnode *dvp = v->a_dvp; 156 struct vnode **vpp = v->a_vpp; 157 struct namecache *ncp = v->a_nch->ncp; 158 struct vattr *vap = v->a_vap; 159 struct ucred *cred = v->a_cred; 160 int error; 161 162 KKASSERT(vap->va_type == VREG || vap->va_type == VSOCK); 163 164 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL); 165 if (error == 0) { 166 cache_setunresolved(v->a_nch); 167 cache_setvp(v->a_nch, *vpp); 168 tmpfs_knote(dvp, NOTE_WRITE); 169 } 170 171 return error; 172 } 173 /* --------------------------------------------------------------------- */ 174 175 static int 176 tmpfs_nmknod(struct vop_nmknod_args *v) 177 { 178 struct vnode *dvp = v->a_dvp; 179 struct vnode **vpp = v->a_vpp; 180 struct namecache *ncp = v->a_nch->ncp; 181 struct vattr *vap = v->a_vap; 182 struct ucred *cred = v->a_cred; 183 int error; 184 185 if (vap->va_type != VBLK && vap->va_type != VCHR && 186 vap->va_type != VFIFO) 187 return EINVAL; 188 189 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL); 190 if (error == 0) { 191 cache_setunresolved(v->a_nch); 192 cache_setvp(v->a_nch, *vpp); 193 tmpfs_knote(dvp, NOTE_WRITE); 194 } 195 196 return error; 197 } 198 199 /* --------------------------------------------------------------------- */ 200 201 static int 202 tmpfs_open(struct vop_open_args *v) 203 { 204 struct vnode *vp = v->a_vp; 205 int mode = v->a_mode; 206 207 int error; 208 struct tmpfs_node *node; 209 210 node = VP_TO_TMPFS_NODE(vp); 211 212 /* The file is still active but all its names have been removed 213 * (e.g. by a "rmdir $(pwd)"). It cannot be opened any more as 214 * it is about to die. */ 215 if (node->tn_links < 1) 216 return (ENOENT); 217 218 /* If the file is marked append-only, deny write requests. */ 219 if ((node->tn_flags & APPEND) && 220 (mode & (FWRITE | O_APPEND)) == FWRITE) { 221 error = EPERM; 222 } else { 223 return (vop_stdopen(v)); 224 } 225 return error; 226 } 227 228 /* --------------------------------------------------------------------- */ 229 230 static int 231 tmpfs_close(struct vop_close_args *v) 232 { 233 struct vnode *vp = v->a_vp; 234 struct tmpfs_node *node; 235 236 node = VP_TO_TMPFS_NODE(vp); 237 238 if (node->tn_links > 0) { 239 /* Update node times. No need to do it if the node has 240 * been deleted, because it will vanish after we return. */ 241 tmpfs_update(vp); 242 } 243 244 return vop_stdclose(v); 245 } 246 247 /* --------------------------------------------------------------------- */ 248 249 int 250 tmpfs_access(struct vop_access_args *v) 251 { 252 struct vnode *vp = v->a_vp; 253 int error; 254 struct tmpfs_node *node; 255 256 node = VP_TO_TMPFS_NODE(vp); 257 258 switch (vp->v_type) { 259 case VDIR: 260 /* FALLTHROUGH */ 261 case VLNK: 262 /* FALLTHROUGH */ 263 case VREG: 264 if (VWRITE && vp->v_mount->mnt_flag & MNT_RDONLY) { 265 error = EROFS; 266 goto out; 267 } 268 break; 269 270 case VBLK: 271 /* FALLTHROUGH */ 272 case VCHR: 273 /* FALLTHROUGH */ 274 case VSOCK: 275 /* FALLTHROUGH */ 276 case VFIFO: 277 break; 278 279 default: 280 error = EINVAL; 281 goto out; 282 } 283 284 if (VWRITE && node->tn_flags & IMMUTABLE) { 285 error = EPERM; 286 goto out; 287 } 288 289 error = vop_helper_access(v, node->tn_uid, node->tn_gid, node->tn_mode, 0); 290 291 out: 292 293 return error; 294 } 295 296 /* --------------------------------------------------------------------- */ 297 298 int 299 tmpfs_getattr(struct vop_getattr_args *v) 300 { 301 struct vnode *vp = v->a_vp; 302 struct vattr *vap = v->a_vap; 303 struct tmpfs_node *node; 304 305 node = VP_TO_TMPFS_NODE(vp); 306 307 tmpfs_update(vp); 308 309 vap->va_type = vp->v_type; 310 vap->va_mode = node->tn_mode; 311 vap->va_nlink = node->tn_links; 312 vap->va_uid = node->tn_uid; 313 vap->va_gid = node->tn_gid; 314 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 315 vap->va_fileid = node->tn_id; 316 vap->va_size = node->tn_size; 317 vap->va_blocksize = PAGE_SIZE; 318 vap->va_atime.tv_sec = node->tn_atime; 319 vap->va_atime.tv_nsec = node->tn_atimensec; 320 vap->va_mtime.tv_sec = node->tn_mtime; 321 vap->va_mtime.tv_nsec = node->tn_mtimensec; 322 vap->va_ctime.tv_sec = node->tn_ctime; 323 vap->va_ctime.tv_nsec = node->tn_ctimensec; 324 vap->va_gen = node->tn_gen; 325 vap->va_flags = node->tn_flags; 326 if (vp->v_type == VBLK || vp->v_type == VCHR) 327 { 328 vap->va_rmajor = umajor(node->tn_rdev); 329 vap->va_rminor = uminor(node->tn_rdev); 330 } 331 vap->va_bytes = round_page(node->tn_size); 332 vap->va_filerev = 0; 333 334 return 0; 335 } 336 337 /* --------------------------------------------------------------------- */ 338 339 int 340 tmpfs_setattr(struct vop_setattr_args *v) 341 { 342 struct vnode *vp = v->a_vp; 343 struct vattr *vap = v->a_vap; 344 struct ucred *cred = v->a_cred; 345 struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp); 346 int error = 0; 347 int kflags = 0; 348 349 if (error == 0 && (vap->va_flags != VNOVAL)) { 350 error = tmpfs_chflags(vp, vap->va_flags, cred); 351 kflags |= NOTE_ATTRIB; 352 } 353 354 if (error == 0 && (vap->va_size != VNOVAL)) { 355 if (vap->va_size > node->tn_size) 356 kflags |= NOTE_WRITE | NOTE_EXTEND; 357 else 358 kflags |= NOTE_WRITE; 359 error = tmpfs_chsize(vp, vap->va_size, cred); 360 } 361 362 if (error == 0 && (vap->va_uid != (uid_t)VNOVAL || 363 vap->va_gid != (gid_t)VNOVAL)) { 364 error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred); 365 kflags |= NOTE_ATTRIB; 366 } 367 368 if (error == 0 && (vap->va_mode != (mode_t)VNOVAL)) { 369 error = tmpfs_chmod(vp, vap->va_mode, cred); 370 kflags |= NOTE_ATTRIB; 371 } 372 373 if (error == 0 && ((vap->va_atime.tv_sec != VNOVAL && 374 vap->va_atime.tv_nsec != VNOVAL) || 375 (vap->va_mtime.tv_sec != VNOVAL && 376 vap->va_mtime.tv_nsec != VNOVAL) )) { 377 error = tmpfs_chtimes(vp, &vap->va_atime, &vap->va_mtime, 378 vap->va_vaflags, cred); 379 kflags |= NOTE_ATTRIB; 380 } 381 382 /* Update the node times. We give preference to the error codes 383 * generated by this function rather than the ones that may arise 384 * from tmpfs_update. */ 385 tmpfs_update(vp); 386 tmpfs_knote(vp, kflags); 387 388 return error; 389 } 390 391 /* --------------------------------------------------------------------- */ 392 393 /* 394 * fsync is usually a NOP, but we must take action when unmounting or 395 * when recycling. 396 */ 397 static int 398 tmpfs_fsync(struct vop_fsync_args *v) 399 { 400 struct tmpfs_mount *tmp; 401 struct tmpfs_node *node; 402 struct vnode *vp = v->a_vp; 403 404 tmp = VFS_TO_TMPFS(vp->v_mount); 405 node = VP_TO_TMPFS_NODE(vp); 406 407 tmpfs_update(vp); 408 if (vp->v_type == VREG) { 409 if (vp->v_flag & VRECLAIMED) { 410 if (node->tn_links == 0) 411 tmpfs_truncate(vp, 0); 412 else 413 vfsync(v->a_vp, v->a_waitfor, 1, NULL, NULL); 414 } 415 } 416 return 0; 417 } 418 419 /* --------------------------------------------------------------------- */ 420 421 static int 422 tmpfs_read (struct vop_read_args *ap) 423 { 424 struct buf *bp; 425 struct vnode *vp = ap->a_vp; 426 struct uio *uio = ap->a_uio; 427 struct tmpfs_node *node; 428 off_t base_offset; 429 size_t offset; 430 size_t len; 431 int got_mplock; 432 int error; 433 434 error = 0; 435 if (uio->uio_resid == 0) { 436 return error; 437 } 438 439 node = VP_TO_TMPFS_NODE(vp); 440 441 if (uio->uio_offset < 0) 442 return (EINVAL); 443 if (vp->v_type != VREG) 444 return (EINVAL); 445 446 #ifdef SMP 447 if(curthread->td_mpcount) 448 got_mplock = -1; 449 else 450 got_mplock = 0; 451 #else 452 got_mplock = -1; 453 #endif 454 455 while (uio->uio_resid > 0 && uio->uio_offset < node->tn_size) { 456 /* 457 * Use buffer cache I/O (via tmpfs_strategy) 458 */ 459 offset = (size_t)uio->uio_offset & BMASK; 460 base_offset = (off_t)uio->uio_offset - offset; 461 bp = getcacheblk(vp, base_offset); 462 if (bp == NULL) 463 { 464 if (got_mplock == 0) { 465 got_mplock = 1; 466 get_mplock(); 467 } 468 469 error = bread(vp, base_offset, BSIZE, &bp); 470 if (error) { 471 brelse(bp); 472 kprintf("tmpfs_read bread error %d\n", error); 473 break; 474 } 475 } 476 477 if (got_mplock == 0) { 478 got_mplock = 1; 479 get_mplock(); 480 } 481 482 /* 483 * Figure out how many bytes we can actually copy this loop. 484 */ 485 len = BSIZE - offset; 486 if (len > uio->uio_resid) 487 len = uio->uio_resid; 488 if (len > node->tn_size - uio->uio_offset) 489 len = (size_t)(node->tn_size - uio->uio_offset); 490 491 error = uiomove((char *)bp->b_data + offset, len, uio); 492 bqrelse(bp); 493 if (error) { 494 kprintf("tmpfs_read uiomove error %d\n", error); 495 break; 496 } 497 } 498 499 if (got_mplock > 0) 500 rel_mplock(); 501 502 TMPFS_NODE_LOCK(node); 503 node->tn_status |= TMPFS_NODE_ACCESSED; 504 TMPFS_NODE_UNLOCK(node); 505 506 return(error); 507 } 508 509 static int 510 tmpfs_write (struct vop_write_args *ap) 511 { 512 struct buf *bp; 513 struct vnode *vp = ap->a_vp; 514 struct uio *uio = ap->a_uio; 515 struct thread *td = uio->uio_td; 516 struct tmpfs_node *node; 517 boolean_t extended; 518 off_t oldsize; 519 int error; 520 off_t base_offset; 521 size_t offset; 522 size_t len; 523 struct rlimit limit; 524 int got_mplock; 525 int trivial = 0; 526 int kflags = 0; 527 528 error = 0; 529 if (uio->uio_resid == 0) { 530 return error; 531 } 532 533 node = VP_TO_TMPFS_NODE(vp); 534 535 if (vp->v_type != VREG) 536 return (EINVAL); 537 538 oldsize = node->tn_size; 539 if (ap->a_ioflag & IO_APPEND) 540 uio->uio_offset = node->tn_size; 541 542 /* 543 * Check for illegal write offsets. 544 */ 545 if (uio->uio_offset + uio->uio_resid > 546 VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize) 547 return (EFBIG); 548 549 if (vp->v_type == VREG && td != NULL) { 550 error = kern_getrlimit(RLIMIT_FSIZE, &limit); 551 if (error != 0) 552 return error; 553 if (uio->uio_offset + uio->uio_resid > limit.rlim_cur) { 554 ksignal(td->td_proc, SIGXFSZ); 555 return (EFBIG); 556 } 557 } 558 559 560 /* 561 * Extend the file's size if necessary 562 */ 563 extended = ((uio->uio_offset + uio->uio_resid) > node->tn_size); 564 565 #ifdef SMP 566 if (curthread->td_mpcount) { 567 got_mplock = -1; 568 } else { 569 got_mplock = 1; 570 get_mplock(); 571 } 572 #else 573 got_mplock = -1; 574 #endif 575 while (uio->uio_resid > 0) { 576 /* 577 * Use buffer cache I/O (via tmpfs_strategy) 578 */ 579 offset = (size_t)uio->uio_offset & BMASK; 580 base_offset = (off_t)uio->uio_offset - offset; 581 len = BSIZE - offset; 582 if (len > uio->uio_resid) 583 len = uio->uio_resid; 584 585 if ((uio->uio_offset + len) > node->tn_size) { 586 trivial = (uio->uio_offset <= node->tn_size); 587 error = tmpfs_reg_resize(vp, uio->uio_offset + len, trivial); 588 if (error) 589 break; 590 } 591 592 /* 593 * Read to fill in any gaps. Theoretically we could 594 * optimize this if the write covers the entire buffer 595 * and is not a UIO_NOCOPY write, however this can lead 596 * to a security violation exposing random kernel memory 597 * (whatever junk was in the backing VM pages before). 598 * 599 * So just use bread() to do the right thing. 600 */ 601 error = bread(vp, base_offset, BSIZE, &bp); 602 error = uiomove((char *)bp->b_data + offset, len, uio); 603 if (error) { 604 kprintf("tmpfs_write uiomove error %d\n", error); 605 brelse(bp); 606 break; 607 } 608 609 if (uio->uio_offset > node->tn_size) { 610 node->tn_size = uio->uio_offset; 611 kflags |= NOTE_EXTEND; 612 } 613 kflags |= NOTE_WRITE; 614 615 /* 616 * The data has been loaded into the buffer, write it out. 617 * 618 * We want tmpfs to be able to use all available ram, not 619 * just the buffer cache, so if not explicitly paging we 620 * use buwrite() to leave the buffer clean but mark all the 621 * VM pages valid+dirty. 622 * 623 * When the kernel is paging, either via normal pageout 624 * operation or when cleaning the object during a recycle, 625 * the underlying VM pages are going to get thrown away 626 * so we MUST write them to swap. 627 * 628 * XXX unfortunately this catches msync() system calls too 629 * for the moment. 630 */ 631 if (vm_swap_size == 0) { 632 /* 633 * if swap isn't configured yet, force a buwrite() to 634 * avoid problems further down the line, due to flushing 635 * to swap. 636 */ 637 buwrite(bp); 638 } else { 639 if (ap->a_ioflag & IO_SYNC) { 640 bwrite(bp); 641 } else if ((ap->a_ioflag & IO_ASYNC) || 642 (uio->uio_segflg == UIO_NOCOPY)) { 643 bawrite(bp); 644 } else { 645 buwrite(bp); 646 } 647 } 648 649 if (bp->b_error) { 650 kprintf("tmpfs_write bwrite error %d\n", bp->b_error); 651 break; 652 } 653 } 654 655 if (got_mplock > 0) 656 rel_mplock(); 657 658 if (error) { 659 if (extended) { 660 (void)tmpfs_reg_resize(vp, oldsize, trivial); 661 kflags &= ~NOTE_EXTEND; 662 } 663 goto done; 664 } 665 666 TMPFS_NODE_LOCK(node); 667 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED | 668 (extended? TMPFS_NODE_CHANGED : 0); 669 670 if (node->tn_mode & (S_ISUID | S_ISGID)) { 671 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0)) 672 node->tn_mode &= ~(S_ISUID | S_ISGID); 673 } 674 TMPFS_NODE_UNLOCK(node); 675 done: 676 677 tmpfs_knote(vp, kflags); 678 return(error); 679 } 680 681 static int 682 tmpfs_advlock (struct vop_advlock_args *ap) 683 { 684 struct tmpfs_node *node; 685 struct vnode *vp = ap->a_vp; 686 687 node = VP_TO_TMPFS_NODE(vp); 688 689 return (lf_advlock(ap, &node->tn_advlock, node->tn_size)); 690 } 691 692 static int 693 tmpfs_strategy(struct vop_strategy_args *ap) 694 { 695 struct bio *bio = ap->a_bio; 696 struct buf *bp = bio->bio_buf; 697 struct vnode *vp = ap->a_vp; 698 struct tmpfs_node *node; 699 vm_object_t uobj; 700 701 if (vp->v_type != VREG) { 702 bp->b_resid = bp->b_bcount; 703 bp->b_flags |= B_ERROR | B_INVAL; 704 bp->b_error = EINVAL; 705 biodone(bio); 706 return(0); 707 } 708 709 node = VP_TO_TMPFS_NODE(vp); 710 711 uobj = node->tn_reg.tn_aobj; 712 713 /* 714 * Call swap_pager_strategy to read or write between the VM 715 * object and the buffer cache. 716 */ 717 swap_pager_strategy(uobj, bio); 718 719 return 0; 720 } 721 722 static int 723 tmpfs_bmap(struct vop_bmap_args *ap) 724 { 725 if (ap->a_doffsetp != NULL) 726 *ap->a_doffsetp = ap->a_loffset; 727 if (ap->a_runp != NULL) 728 *ap->a_runp = 0; 729 if (ap->a_runb != NULL) 730 *ap->a_runb = 0; 731 732 return 0; 733 } 734 735 /* --------------------------------------------------------------------- */ 736 737 static int 738 tmpfs_nremove(struct vop_nremove_args *v) 739 { 740 struct vnode *dvp = v->a_dvp; 741 struct namecache *ncp = v->a_nch->ncp; 742 struct vnode *vp; 743 int error; 744 struct tmpfs_dirent *de; 745 struct tmpfs_mount *tmp; 746 struct tmpfs_node *dnode; 747 struct tmpfs_node *node; 748 749 /* 750 * We have to acquire the vp from v->a_nch because 751 * we will likely unresolve the namecache entry, and 752 * a vrele is needed to trigger the tmpfs_inactive/tmpfs_reclaim 753 * sequence to recover space from the file. 754 */ 755 error = cache_vref(v->a_nch, v->a_cred, &vp); 756 KKASSERT(error == 0); 757 758 if (vp->v_type == VDIR) { 759 error = EISDIR; 760 goto out; 761 } 762 763 dnode = VP_TO_TMPFS_DIR(dvp); 764 node = VP_TO_TMPFS_NODE(vp); 765 tmp = VFS_TO_TMPFS(vp->v_mount); 766 de = tmpfs_dir_lookup(dnode, node, ncp); 767 if (de == NULL) { 768 error = ENOENT; 769 goto out; 770 } 771 772 /* Files marked as immutable or append-only cannot be deleted. */ 773 if ((node->tn_flags & (IMMUTABLE | APPEND | NOUNLINK)) || 774 (dnode->tn_flags & APPEND)) { 775 error = EPERM; 776 goto out; 777 } 778 779 /* Remove the entry from the directory; as it is a file, we do not 780 * have to change the number of hard links of the directory. */ 781 tmpfs_dir_detach(dnode, de); 782 783 /* Free the directory entry we just deleted. Note that the node 784 * referred by it will not be removed until the vnode is really 785 * reclaimed. */ 786 tmpfs_free_dirent(tmp, de); 787 788 if (node->tn_links > 0) { 789 TMPFS_NODE_LOCK(node); 790 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \ 791 TMPFS_NODE_MODIFIED; 792 TMPFS_NODE_UNLOCK(node); 793 } 794 795 cache_setunresolved(v->a_nch); 796 cache_setvp(v->a_nch, NULL); 797 tmpfs_knote(vp, NOTE_DELETE); 798 /*cache_inval_vp(vp, CINV_DESTROY);*/ 799 tmpfs_knote(dvp, NOTE_WRITE); 800 error = 0; 801 802 out: 803 vrele(vp); 804 805 return error; 806 } 807 808 /* --------------------------------------------------------------------- */ 809 810 static int 811 tmpfs_nlink(struct vop_nlink_args *v) 812 { 813 struct vnode *dvp = v->a_dvp; 814 struct vnode *vp = v->a_vp; 815 struct namecache *ncp = v->a_nch->ncp; 816 struct tmpfs_dirent *de; 817 struct tmpfs_node *node; 818 struct tmpfs_node *dnode; 819 int error; 820 821 KKASSERT(dvp != vp); /* XXX When can this be false? */ 822 823 node = VP_TO_TMPFS_NODE(vp); 824 dnode = VP_TO_TMPFS_NODE(dvp); 825 826 /* XXX: Why aren't the following two tests done by the caller? */ 827 828 /* Hard links of directories are forbidden. */ 829 if (vp->v_type == VDIR) { 830 error = EPERM; 831 goto out; 832 } 833 834 /* Cannot create cross-device links. */ 835 if (dvp->v_mount != vp->v_mount) { 836 error = EXDEV; 837 goto out; 838 } 839 840 /* Ensure that we do not overflow the maximum number of links imposed 841 * by the system. */ 842 KKASSERT(node->tn_links <= LINK_MAX); 843 if (node->tn_links == LINK_MAX) { 844 error = EMLINK; 845 goto out; 846 } 847 848 /* We cannot create links of files marked immutable or append-only. */ 849 if (node->tn_flags & (IMMUTABLE | APPEND)) { 850 error = EPERM; 851 goto out; 852 } 853 854 /* Allocate a new directory entry to represent the node. */ 855 error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node, 856 ncp->nc_name, ncp->nc_nlen, &de); 857 if (error != 0) 858 goto out; 859 860 /* Insert the new directory entry into the appropriate directory. */ 861 tmpfs_dir_attach(dnode, de); 862 863 /* vp link count has changed, so update node times. */ 864 865 TMPFS_NODE_LOCK(node); 866 node->tn_status |= TMPFS_NODE_CHANGED; 867 TMPFS_NODE_UNLOCK(node); 868 tmpfs_update(vp); 869 870 tmpfs_knote(vp, NOTE_LINK); 871 cache_setunresolved(v->a_nch); 872 cache_setvp(v->a_nch, vp); 873 tmpfs_knote(dvp, NOTE_WRITE); 874 error = 0; 875 876 out: 877 return error; 878 } 879 880 /* --------------------------------------------------------------------- */ 881 882 static int 883 tmpfs_nrename(struct vop_nrename_args *v) 884 { 885 struct vnode *fdvp = v->a_fdvp; 886 struct namecache *fncp = v->a_fnch->ncp; 887 struct vnode *fvp = fncp->nc_vp; 888 struct vnode *tdvp = v->a_tdvp; 889 struct namecache *tncp = v->a_tnch->ncp; 890 struct vnode *tvp = tncp->nc_vp; 891 struct tmpfs_dirent *de; 892 struct tmpfs_mount *tmp; 893 struct tmpfs_node *fdnode; 894 struct tmpfs_node *fnode; 895 struct tmpfs_node *tnode; 896 struct tmpfs_node *tdnode; 897 char *newname; 898 char *oldname; 899 int error; 900 901 tnode = (tvp == NULL) ? NULL : VP_TO_TMPFS_NODE(tvp); 902 903 /* Disallow cross-device renames. 904 * XXX Why isn't this done by the caller? */ 905 if (fvp->v_mount != tdvp->v_mount || 906 (tvp != NULL && fvp->v_mount != tvp->v_mount)) { 907 error = EXDEV; 908 goto out; 909 } 910 911 tmp = VFS_TO_TMPFS(tdvp->v_mount); 912 tdnode = VP_TO_TMPFS_DIR(tdvp); 913 914 /* If source and target are the same file, there is nothing to do. */ 915 if (fvp == tvp) { 916 error = 0; 917 goto out; 918 } 919 920 fdnode = VP_TO_TMPFS_DIR(fdvp); 921 fnode = VP_TO_TMPFS_NODE(fvp); 922 de = tmpfs_dir_lookup(fdnode, fnode, fncp); 923 924 /* Avoid manipulating '.' and '..' entries. */ 925 if (de == NULL) { 926 error = ENOENT; 927 goto out_locked; 928 } 929 KKASSERT(de->td_node == fnode); 930 931 /* 932 * If replacing an entry in the target directory and that entry 933 * is a directory, it must be empty. 934 * 935 * Kern_rename gurantees the destination to be a directory 936 * if the source is one (it does?). 937 */ 938 if (tvp != NULL) { 939 KKASSERT(tnode != NULL); 940 941 if ((tnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) || 942 (tdnode->tn_flags & (APPEND | IMMUTABLE))) { 943 error = EPERM; 944 goto out_locked; 945 } 946 947 if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) { 948 if (tnode->tn_size > 0) { 949 error = ENOTEMPTY; 950 goto out_locked; 951 } 952 } else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) { 953 error = ENOTDIR; 954 goto out_locked; 955 } else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) { 956 error = EISDIR; 957 goto out_locked; 958 } else { 959 KKASSERT(fnode->tn_type != VDIR && 960 tnode->tn_type != VDIR); 961 } 962 } 963 964 if ((fnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) || 965 (fdnode->tn_flags & (APPEND | IMMUTABLE))) { 966 error = EPERM; 967 goto out_locked; 968 } 969 970 /* 971 * Ensure that we have enough memory to hold the new name, if it 972 * has to be changed. 973 */ 974 if (fncp->nc_nlen != tncp->nc_nlen || 975 bcmp(fncp->nc_name, tncp->nc_name, fncp->nc_nlen) != 0) { 976 newname = kmalloc(tncp->nc_nlen + 1, tmp->tm_name_zone, 977 M_WAITOK | M_NULLOK); 978 if (newname == NULL) { 979 error = ENOSPC; 980 goto out_locked; 981 } 982 bcopy(tncp->nc_name, newname, tncp->nc_nlen); 983 newname[tncp->nc_nlen] = '\0'; 984 } else { 985 newname = NULL; 986 } 987 988 /* 989 * Unlink entry from source directory. Note that the kernel has 990 * already checked for illegal recursion cases (renaming a directory 991 * into a subdirectory of itself). 992 */ 993 if (fdnode != tdnode) 994 tmpfs_dir_detach(fdnode, de); 995 996 /* 997 * Handle any name change. Swap with newname, we will 998 * deallocate it at the end. 999 */ 1000 if (newname != NULL) { 1001 #if 0 1002 TMPFS_NODE_LOCK(fnode); 1003 fnode->tn_status |= TMPFS_NODE_CHANGED; 1004 TMPFS_NODE_UNLOCK(fnode); 1005 #endif 1006 oldname = de->td_name; 1007 de->td_name = newname; 1008 de->td_namelen = (uint16_t)tncp->nc_nlen; 1009 newname = oldname; 1010 } 1011 1012 /* 1013 * Link entry to target directory. If the entry 1014 * represents a directory move the parent linkage 1015 * as well. 1016 */ 1017 if (fdnode != tdnode) { 1018 if (de->td_node->tn_type == VDIR) { 1019 TMPFS_VALIDATE_DIR(fnode); 1020 1021 TMPFS_NODE_LOCK(tdnode); 1022 tdnode->tn_links++; 1023 tdnode->tn_status |= TMPFS_NODE_MODIFIED; 1024 TMPFS_NODE_UNLOCK(tdnode); 1025 1026 TMPFS_NODE_LOCK(fnode); 1027 fnode->tn_dir.tn_parent = tdnode; 1028 fnode->tn_status |= TMPFS_NODE_CHANGED; 1029 TMPFS_NODE_UNLOCK(fnode); 1030 1031 TMPFS_NODE_LOCK(fdnode); 1032 fdnode->tn_links--; 1033 fdnode->tn_status |= TMPFS_NODE_MODIFIED; 1034 TMPFS_NODE_UNLOCK(fdnode); 1035 } 1036 tmpfs_dir_attach(tdnode, de); 1037 } else { 1038 TMPFS_NODE_LOCK(tdnode); 1039 tdnode->tn_status |= TMPFS_NODE_MODIFIED; 1040 TMPFS_NODE_UNLOCK(tdnode); 1041 } 1042 1043 /* 1044 * If we are overwriting an entry, we have to remove the old one 1045 * from the target directory. 1046 */ 1047 if (tvp != NULL) { 1048 /* Remove the old entry from the target directory. */ 1049 de = tmpfs_dir_lookup(tdnode, tnode, tncp); 1050 tmpfs_dir_detach(tdnode, de); 1051 tmpfs_knote(tdnode->tn_vnode, NOTE_DELETE); 1052 1053 /* 1054 * Free the directory entry we just deleted. Note that the 1055 * node referred by it will not be removed until the vnode is 1056 * really reclaimed. 1057 */ 1058 tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), de); 1059 /*cache_inval_vp(tvp, CINV_DESTROY);*/ 1060 } 1061 1062 /* 1063 * Finish up 1064 */ 1065 if (newname) { 1066 kfree(newname, tmp->tm_name_zone); 1067 newname = NULL; 1068 } 1069 cache_rename(v->a_fnch, v->a_tnch); 1070 tmpfs_knote(v->a_fdvp, NOTE_WRITE); 1071 tmpfs_knote(v->a_tdvp, NOTE_WRITE); 1072 if (fnode->tn_vnode) 1073 tmpfs_knote(fnode->tn_vnode, NOTE_RENAME); 1074 error = 0; 1075 1076 out_locked: 1077 ; 1078 1079 out: 1080 /* Release target nodes. */ 1081 /* XXX: I don't understand when tdvp can be the same as tvp, but 1082 * other code takes care of this... */ 1083 if (tdvp == tvp) 1084 vrele(tdvp); 1085 1086 return error; 1087 } 1088 1089 /* --------------------------------------------------------------------- */ 1090 1091 static int 1092 tmpfs_nmkdir(struct vop_nmkdir_args *v) 1093 { 1094 struct vnode *dvp = v->a_dvp; 1095 struct vnode **vpp = v->a_vpp; 1096 struct namecache *ncp = v->a_nch->ncp; 1097 struct vattr *vap = v->a_vap; 1098 struct ucred *cred = v->a_cred; 1099 int error; 1100 1101 KKASSERT(vap->va_type == VDIR); 1102 1103 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL); 1104 if (error == 0) { 1105 cache_setunresolved(v->a_nch); 1106 cache_setvp(v->a_nch, *vpp); 1107 tmpfs_knote(dvp, NOTE_WRITE | NOTE_LINK); 1108 } 1109 1110 return error; 1111 } 1112 1113 /* --------------------------------------------------------------------- */ 1114 1115 static int 1116 tmpfs_nrmdir(struct vop_nrmdir_args *v) 1117 { 1118 struct vnode *dvp = v->a_dvp; 1119 struct namecache *ncp = v->a_nch->ncp; 1120 struct vnode *vp; 1121 struct tmpfs_dirent *de; 1122 struct tmpfs_mount *tmp; 1123 struct tmpfs_node *dnode; 1124 struct tmpfs_node *node; 1125 int error; 1126 1127 /* 1128 * We have to acquire the vp from v->a_nch because 1129 * we will likely unresolve the namecache entry, and 1130 * a vrele is needed to trigger the tmpfs_inactive/tmpfs_reclaim 1131 * sequence. 1132 */ 1133 error = cache_vref(v->a_nch, v->a_cred, &vp); 1134 KKASSERT(error == 0); 1135 1136 /* 1137 * Prevalidate so we don't hit an assertion later 1138 */ 1139 if (vp->v_type != VDIR) { 1140 error = ENOTDIR; 1141 goto out; 1142 } 1143 1144 tmp = VFS_TO_TMPFS(dvp->v_mount); 1145 dnode = VP_TO_TMPFS_DIR(dvp); 1146 node = VP_TO_TMPFS_DIR(vp); 1147 1148 /* Directories with more than two entries ('.' and '..') cannot be 1149 * removed. */ 1150 if (node->tn_size > 0) { 1151 error = ENOTEMPTY; 1152 goto out; 1153 } 1154 1155 if ((dnode->tn_flags & APPEND) 1156 || (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))) { 1157 error = EPERM; 1158 goto out; 1159 } 1160 1161 /* This invariant holds only if we are not trying to remove "..". 1162 * We checked for that above so this is safe now. */ 1163 KKASSERT(node->tn_dir.tn_parent == dnode); 1164 1165 /* Get the directory entry associated with node (vp). This was 1166 * filled by tmpfs_lookup while looking up the entry. */ 1167 de = tmpfs_dir_lookup(dnode, node, ncp); 1168 KKASSERT(TMPFS_DIRENT_MATCHES(de, 1169 ncp->nc_name, 1170 ncp->nc_nlen)); 1171 1172 /* Check flags to see if we are allowed to remove the directory. */ 1173 if ((dnode->tn_flags & APPEND) || 1174 node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) { 1175 error = EPERM; 1176 goto out; 1177 } 1178 1179 1180 /* Detach the directory entry from the directory (dnode). */ 1181 tmpfs_dir_detach(dnode, de); 1182 1183 /* No vnode should be allocated for this entry from this point */ 1184 TMPFS_NODE_LOCK(node); 1185 TMPFS_ASSERT_ELOCKED(node); 1186 TMPFS_NODE_LOCK(dnode); 1187 TMPFS_ASSERT_ELOCKED(dnode); 1188 1189 #if 0 1190 /* handled by tmpfs_free_node */ 1191 KKASSERT(node->tn_links > 0); 1192 node->tn_links--; 1193 node->tn_dir.tn_parent = NULL; 1194 #endif 1195 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \ 1196 TMPFS_NODE_MODIFIED; 1197 1198 #if 0 1199 /* handled by tmpfs_free_node */ 1200 KKASSERT(dnode->tn_links > 0); 1201 dnode->tn_links--; 1202 #endif 1203 dnode->tn_status |= TMPFS_NODE_ACCESSED | \ 1204 TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED; 1205 1206 TMPFS_NODE_UNLOCK(dnode); 1207 TMPFS_NODE_UNLOCK(node); 1208 1209 /* Free the directory entry we just deleted. Note that the node 1210 * referred by it will not be removed until the vnode is really 1211 * reclaimed. */ 1212 tmpfs_free_dirent(tmp, de); 1213 1214 /* Release the deleted vnode (will destroy the node, notify 1215 * interested parties and clean it from the cache). */ 1216 1217 TMPFS_NODE_LOCK(dnode); 1218 dnode->tn_status |= TMPFS_NODE_CHANGED; 1219 TMPFS_NODE_UNLOCK(dnode); 1220 tmpfs_update(dvp); 1221 1222 cache_setunresolved(v->a_nch); 1223 cache_setvp(v->a_nch, NULL); 1224 /*cache_inval_vp(vp, CINV_DESTROY);*/ 1225 tmpfs_knote(dvp, NOTE_WRITE | NOTE_LINK); 1226 error = 0; 1227 1228 out: 1229 vrele(vp); 1230 1231 return error; 1232 } 1233 1234 /* --------------------------------------------------------------------- */ 1235 1236 static int 1237 tmpfs_nsymlink(struct vop_nsymlink_args *v) 1238 { 1239 struct vnode *dvp = v->a_dvp; 1240 struct vnode **vpp = v->a_vpp; 1241 struct namecache *ncp = v->a_nch->ncp; 1242 struct vattr *vap = v->a_vap; 1243 struct ucred *cred = v->a_cred; 1244 char *target = v->a_target; 1245 int error; 1246 1247 vap->va_type = VLNK; 1248 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, target); 1249 if (error == 0) { 1250 tmpfs_knote(*vpp, NOTE_WRITE); 1251 cache_setunresolved(v->a_nch); 1252 cache_setvp(v->a_nch, *vpp); 1253 } 1254 1255 return error; 1256 } 1257 1258 /* --------------------------------------------------------------------- */ 1259 1260 static int 1261 tmpfs_readdir(struct vop_readdir_args *v) 1262 { 1263 struct vnode *vp = v->a_vp; 1264 struct uio *uio = v->a_uio; 1265 int *eofflag = v->a_eofflag; 1266 off_t **cookies = v->a_cookies; 1267 int *ncookies = v->a_ncookies; 1268 struct tmpfs_mount *tmp; 1269 int error; 1270 off_t startoff; 1271 off_t cnt = 0; 1272 struct tmpfs_node *node; 1273 1274 /* This operation only makes sense on directory nodes. */ 1275 if (vp->v_type != VDIR) 1276 return ENOTDIR; 1277 1278 tmp = VFS_TO_TMPFS(vp->v_mount); 1279 node = VP_TO_TMPFS_DIR(vp); 1280 startoff = uio->uio_offset; 1281 1282 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOT) { 1283 error = tmpfs_dir_getdotdent(node, uio); 1284 if (error != 0) 1285 goto outok; 1286 cnt++; 1287 } 1288 1289 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT) { 1290 error = tmpfs_dir_getdotdotdent(tmp, node, uio); 1291 if (error != 0) 1292 goto outok; 1293 cnt++; 1294 } 1295 1296 error = tmpfs_dir_getdents(node, uio, &cnt); 1297 1298 outok: 1299 KKASSERT(error >= -1); 1300 1301 if (error == -1) 1302 error = 0; 1303 1304 if (eofflag != NULL) 1305 *eofflag = 1306 (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF); 1307 1308 /* Update NFS-related variables. */ 1309 if (error == 0 && cookies != NULL && ncookies != NULL) { 1310 off_t i; 1311 off_t off = startoff; 1312 struct tmpfs_dirent *de = NULL; 1313 1314 *ncookies = cnt; 1315 *cookies = kmalloc(cnt * sizeof(off_t), M_TEMP, M_WAITOK); 1316 1317 for (i = 0; i < cnt; i++) { 1318 KKASSERT(off != TMPFS_DIRCOOKIE_EOF); 1319 if (off == TMPFS_DIRCOOKIE_DOT) { 1320 off = TMPFS_DIRCOOKIE_DOTDOT; 1321 } else { 1322 if (off == TMPFS_DIRCOOKIE_DOTDOT) { 1323 de = TAILQ_FIRST(&node->tn_dir.tn_dirhead); 1324 } else if (de != NULL) { 1325 de = TAILQ_NEXT(de, td_entries); 1326 } else { 1327 de = tmpfs_dir_lookupbycookie(node, 1328 off); 1329 KKASSERT(de != NULL); 1330 de = TAILQ_NEXT(de, td_entries); 1331 } 1332 if (de == NULL) 1333 off = TMPFS_DIRCOOKIE_EOF; 1334 else 1335 off = tmpfs_dircookie(de); 1336 } 1337 1338 (*cookies)[i] = off; 1339 } 1340 KKASSERT(uio->uio_offset == off); 1341 } 1342 1343 return error; 1344 } 1345 1346 /* --------------------------------------------------------------------- */ 1347 1348 static int 1349 tmpfs_readlink(struct vop_readlink_args *v) 1350 { 1351 struct vnode *vp = v->a_vp; 1352 struct uio *uio = v->a_uio; 1353 1354 int error; 1355 struct tmpfs_node *node; 1356 1357 KKASSERT(uio->uio_offset == 0); 1358 KKASSERT(vp->v_type == VLNK); 1359 1360 node = VP_TO_TMPFS_NODE(vp); 1361 1362 error = uiomove(node->tn_link, MIN(node->tn_size, uio->uio_resid), 1363 uio); 1364 TMPFS_NODE_LOCK(node); 1365 node->tn_status |= TMPFS_NODE_ACCESSED; 1366 TMPFS_NODE_UNLOCK(node); 1367 1368 return error; 1369 } 1370 1371 /* --------------------------------------------------------------------- */ 1372 1373 static int 1374 tmpfs_inactive(struct vop_inactive_args *v) 1375 { 1376 struct vnode *vp = v->a_vp; 1377 1378 struct tmpfs_node *node; 1379 1380 node = VP_TO_TMPFS_NODE(vp); 1381 1382 /* 1383 * Get rid of unreferenced deleted vnodes sooner rather than 1384 * later so the data memory can be recovered immediately. 1385 * 1386 * We must truncate the vnode to prevent the normal reclamation 1387 * path from flushing the data for the removed file to disk. 1388 */ 1389 TMPFS_NODE_LOCK(node); 1390 if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 && 1391 (node->tn_links == 0 || 1392 (node->tn_links == 1 && node->tn_type == VDIR && 1393 node->tn_dir.tn_parent))) 1394 { 1395 node->tn_vpstate = TMPFS_VNODE_DOOMED; 1396 TMPFS_NODE_UNLOCK(node); 1397 if (node->tn_type == VREG) 1398 tmpfs_truncate(vp, 0); 1399 vrecycle(vp); 1400 } else { 1401 TMPFS_NODE_UNLOCK(node); 1402 } 1403 1404 return 0; 1405 } 1406 1407 /* --------------------------------------------------------------------- */ 1408 1409 int 1410 tmpfs_reclaim(struct vop_reclaim_args *v) 1411 { 1412 struct vnode *vp = v->a_vp; 1413 struct tmpfs_mount *tmp; 1414 struct tmpfs_node *node; 1415 1416 node = VP_TO_TMPFS_NODE(vp); 1417 tmp = VFS_TO_TMPFS(vp->v_mount); 1418 1419 tmpfs_free_vp(vp); 1420 1421 /* 1422 * If the node referenced by this vnode was deleted by the 1423 * user, we must free its associated data structures now that 1424 * the vnode is being reclaimed. 1425 * 1426 * Directories have an extra link ref. 1427 */ 1428 TMPFS_NODE_LOCK(node); 1429 if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 && 1430 (node->tn_links == 0 || 1431 (node->tn_links == 1 && node->tn_type == VDIR && 1432 node->tn_dir.tn_parent))) 1433 { 1434 node->tn_vpstate = TMPFS_VNODE_DOOMED; 1435 tmpfs_free_node(tmp, node); 1436 /* eats the lock */ 1437 } else { 1438 TMPFS_NODE_UNLOCK(node); 1439 } 1440 1441 KKASSERT(vp->v_data == NULL); 1442 return 0; 1443 } 1444 1445 /* --------------------------------------------------------------------- */ 1446 1447 static int 1448 tmpfs_print(struct vop_print_args *v) 1449 { 1450 struct vnode *vp = v->a_vp; 1451 1452 struct tmpfs_node *node; 1453 1454 node = VP_TO_TMPFS_NODE(vp); 1455 1456 kprintf("tag VT_TMPFS, tmpfs_node %p, flags 0x%x, links %d\n", 1457 node, node->tn_flags, node->tn_links); 1458 kprintf("\tmode 0%o, owner %d, group %d, size %ju, status 0x%x\n", 1459 node->tn_mode, node->tn_uid, node->tn_gid, 1460 (uintmax_t)node->tn_size, node->tn_status); 1461 1462 if (vp->v_type == VFIFO) 1463 fifo_printinfo(vp); 1464 1465 kprintf("\n"); 1466 1467 return 0; 1468 } 1469 1470 /* --------------------------------------------------------------------- */ 1471 1472 static int 1473 tmpfs_pathconf(struct vop_pathconf_args *v) 1474 { 1475 int name = v->a_name; 1476 register_t *retval = v->a_retval; 1477 1478 int error; 1479 1480 error = 0; 1481 1482 switch (name) { 1483 case _PC_LINK_MAX: 1484 *retval = LINK_MAX; 1485 break; 1486 1487 case _PC_NAME_MAX: 1488 *retval = NAME_MAX; 1489 break; 1490 1491 case _PC_PATH_MAX: 1492 *retval = PATH_MAX; 1493 break; 1494 1495 case _PC_PIPE_BUF: 1496 *retval = PIPE_BUF; 1497 break; 1498 1499 case _PC_CHOWN_RESTRICTED: 1500 *retval = 1; 1501 break; 1502 1503 case _PC_NO_TRUNC: 1504 *retval = 1; 1505 break; 1506 1507 case _PC_SYNC_IO: 1508 *retval = 1; 1509 break; 1510 1511 case _PC_FILESIZEBITS: 1512 *retval = 0; /* XXX Don't know which value should I return. */ 1513 break; 1514 1515 default: 1516 error = EINVAL; 1517 } 1518 1519 return error; 1520 } 1521 1522 /************************************************************************ 1523 * KQFILTER OPS * 1524 ************************************************************************/ 1525 1526 static void filt_tmpfsdetach(struct knote *kn); 1527 static int filt_tmpfsread(struct knote *kn, long hint); 1528 static int filt_tmpfswrite(struct knote *kn, long hint); 1529 static int filt_tmpfsvnode(struct knote *kn, long hint); 1530 1531 static struct filterops tmpfsread_filtops = 1532 { FILTEROP_ISFD, NULL, filt_tmpfsdetach, filt_tmpfsread }; 1533 static struct filterops tmpfswrite_filtops = 1534 { FILTEROP_ISFD, NULL, filt_tmpfsdetach, filt_tmpfswrite }; 1535 static struct filterops tmpfsvnode_filtops = 1536 { FILTEROP_ISFD, NULL, filt_tmpfsdetach, filt_tmpfsvnode }; 1537 1538 static int 1539 tmpfs_kqfilter (struct vop_kqfilter_args *ap) 1540 { 1541 struct vnode *vp = ap->a_vp; 1542 struct knote *kn = ap->a_kn; 1543 1544 switch (kn->kn_filter) { 1545 case EVFILT_READ: 1546 kn->kn_fop = &tmpfsread_filtops; 1547 break; 1548 case EVFILT_WRITE: 1549 kn->kn_fop = &tmpfswrite_filtops; 1550 break; 1551 case EVFILT_VNODE: 1552 kn->kn_fop = &tmpfsvnode_filtops; 1553 break; 1554 default: 1555 return (EOPNOTSUPP); 1556 } 1557 1558 kn->kn_hook = (caddr_t)vp; 1559 1560 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 1561 1562 return(0); 1563 } 1564 1565 static void 1566 filt_tmpfsdetach(struct knote *kn) 1567 { 1568 struct vnode *vp = (void *)kn->kn_hook; 1569 1570 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 1571 } 1572 1573 static int 1574 filt_tmpfsread(struct knote *kn, long hint) 1575 { 1576 struct vnode *vp = (void *)kn->kn_hook; 1577 struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp); 1578 off_t off; 1579 1580 if (hint == NOTE_REVOKE) { 1581 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 1582 return(1); 1583 } 1584 off = node->tn_size - kn->kn_fp->f_offset; 1585 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX; 1586 if (kn->kn_sfflags & NOTE_OLDAPI) 1587 return(1); 1588 1589 /* 1590 * Handle possible MP race interlock on filter check/write 1591 */ 1592 if (kn->kn_data == 0) { 1593 get_mplock(); 1594 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX; 1595 rel_mplock(); 1596 } 1597 return (kn->kn_data != 0); 1598 } 1599 1600 static int 1601 filt_tmpfswrite(struct knote *kn, long hint) 1602 { 1603 if (hint == NOTE_REVOKE) 1604 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 1605 kn->kn_data = 0; 1606 return (1); 1607 } 1608 1609 static int 1610 filt_tmpfsvnode(struct knote *kn, long hint) 1611 { 1612 if (kn->kn_sfflags & hint) 1613 kn->kn_fflags |= hint; 1614 if (hint == NOTE_REVOKE) { 1615 kn->kn_flags |= EV_EOF; 1616 return (1); 1617 } 1618 return (kn->kn_fflags != 0); 1619 } 1620 1621 1622 /* --------------------------------------------------------------------- */ 1623 1624 /* 1625 * vnode operations vector used for files stored in a tmpfs file system. 1626 */ 1627 struct vop_ops tmpfs_vnode_vops = { 1628 .vop_default = vop_defaultop, 1629 .vop_getpages = vop_stdgetpages, 1630 .vop_putpages = vop_stdputpages, 1631 .vop_ncreate = tmpfs_ncreate, 1632 .vop_nresolve = tmpfs_nresolve, 1633 .vop_nlookupdotdot = tmpfs_nlookupdotdot, 1634 .vop_nmknod = tmpfs_nmknod, 1635 .vop_open = tmpfs_open, 1636 .vop_close = tmpfs_close, 1637 .vop_access = tmpfs_access, 1638 .vop_getattr = tmpfs_getattr, 1639 .vop_setattr = tmpfs_setattr, 1640 .vop_read = tmpfs_read, 1641 .vop_write = tmpfs_write, 1642 .vop_fsync = tmpfs_fsync, 1643 .vop_nremove = tmpfs_nremove, 1644 .vop_nlink = tmpfs_nlink, 1645 .vop_nrename = tmpfs_nrename, 1646 .vop_nmkdir = tmpfs_nmkdir, 1647 .vop_nrmdir = tmpfs_nrmdir, 1648 .vop_nsymlink = tmpfs_nsymlink, 1649 .vop_readdir = tmpfs_readdir, 1650 .vop_readlink = tmpfs_readlink, 1651 .vop_inactive = tmpfs_inactive, 1652 .vop_reclaim = tmpfs_reclaim, 1653 .vop_print = tmpfs_print, 1654 .vop_pathconf = tmpfs_pathconf, 1655 .vop_bmap = tmpfs_bmap, 1656 .vop_strategy = tmpfs_strategy, 1657 .vop_advlock = tmpfs_advlock, 1658 .vop_kqfilter = tmpfs_kqfilter 1659 }; 1660