1 /*- 2 * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code 7 * 2005 program. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 * 30 * $NetBSD: tmpfs_vnops.c,v 1.39 2007/07/23 15:41:01 jmmv Exp $ 31 */ 32 33 /* 34 * tmpfs vnode interface. 35 */ 36 37 #include <sys/kernel.h> 38 #include <sys/kern_syscall.h> 39 #include <sys/param.h> 40 #include <sys/fcntl.h> 41 #include <sys/lockf.h> 42 #include <sys/priv.h> 43 #include <sys/proc.h> 44 #include <sys/resourcevar.h> 45 #include <sys/sched.h> 46 #include <sys/stat.h> 47 #include <sys/systm.h> 48 #include <sys/unistd.h> 49 #include <sys/vfsops.h> 50 #include <sys/vnode.h> 51 52 #include <vm/vm.h> 53 #include <vm/vm_object.h> 54 #include <vm/vm_page.h> 55 #include <vm/vm_pager.h> 56 #include <vm/swap_pager.h> 57 58 #include <sys/buf2.h> 59 60 #include <vfs/fifofs/fifo.h> 61 #include <vfs/tmpfs/tmpfs_vnops.h> 62 #include <vfs/tmpfs/tmpfs.h> 63 64 MALLOC_DECLARE(M_TMPFS); 65 66 static __inline 67 void 68 tmpfs_knote(struct vnode *vp, int flags) 69 { 70 if (flags) 71 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags); 72 } 73 74 75 /* --------------------------------------------------------------------- */ 76 77 static int 78 tmpfs_nresolve(struct vop_nresolve_args *v) 79 { 80 struct vnode *dvp = v->a_dvp; 81 struct vnode *vp = NULL; 82 struct namecache *ncp = v->a_nch->ncp; 83 struct tmpfs_node *tnode; 84 85 int error; 86 struct tmpfs_dirent *de; 87 struct tmpfs_node *dnode; 88 89 dnode = VP_TO_TMPFS_DIR(dvp); 90 91 de = tmpfs_dir_lookup(dnode, NULL, ncp); 92 if (de == NULL) { 93 error = ENOENT; 94 } else { 95 /* 96 * Allocate a vnode for the node we found. 97 */ 98 tnode = de->td_node; 99 error = tmpfs_alloc_vp(dvp->v_mount, tnode, 100 LK_EXCLUSIVE | LK_RETRY, &vp); 101 if (error) 102 goto out; 103 KKASSERT(vp); 104 } 105 106 out: 107 /* 108 * Store the result of this lookup in the cache. Avoid this if the 109 * request was for creation, as it does not improve timings on 110 * emprical tests. 111 */ 112 if (vp) { 113 vn_unlock(vp); 114 cache_setvp(v->a_nch, vp); 115 vrele(vp); 116 } else if (error == ENOENT) { 117 cache_setvp(v->a_nch, NULL); 118 } 119 return error; 120 } 121 122 static int 123 tmpfs_nlookupdotdot(struct vop_nlookupdotdot_args *v) 124 { 125 struct vnode *dvp = v->a_dvp; 126 struct vnode **vpp = v->a_vpp; 127 struct tmpfs_node *dnode = VP_TO_TMPFS_NODE(dvp); 128 struct ucred *cred = v->a_cred; 129 int error; 130 131 *vpp = NULL; 132 /* Check accessibility of requested node as a first step. */ 133 error = VOP_ACCESS(dvp, VEXEC, cred); 134 if (error != 0) 135 return error; 136 137 if (dnode->tn_dir.tn_parent != NULL) { 138 /* Allocate a new vnode on the matching entry. */ 139 error = tmpfs_alloc_vp(dvp->v_mount, dnode->tn_dir.tn_parent, 140 LK_EXCLUSIVE | LK_RETRY, vpp); 141 142 if (*vpp) 143 vn_unlock(*vpp); 144 } 145 146 return (*vpp == NULL) ? ENOENT : 0; 147 } 148 149 /* --------------------------------------------------------------------- */ 150 151 static int 152 tmpfs_ncreate(struct vop_ncreate_args *v) 153 { 154 struct vnode *dvp = v->a_dvp; 155 struct vnode **vpp = v->a_vpp; 156 struct namecache *ncp = v->a_nch->ncp; 157 struct vattr *vap = v->a_vap; 158 struct ucred *cred = v->a_cred; 159 int error; 160 161 KKASSERT(vap->va_type == VREG || vap->va_type == VSOCK); 162 163 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL); 164 if (error == 0) { 165 cache_setunresolved(v->a_nch); 166 cache_setvp(v->a_nch, *vpp); 167 tmpfs_knote(dvp, NOTE_WRITE); 168 } 169 170 return error; 171 } 172 /* --------------------------------------------------------------------- */ 173 174 static int 175 tmpfs_nmknod(struct vop_nmknod_args *v) 176 { 177 struct vnode *dvp = v->a_dvp; 178 struct vnode **vpp = v->a_vpp; 179 struct namecache *ncp = v->a_nch->ncp; 180 struct vattr *vap = v->a_vap; 181 struct ucred *cred = v->a_cred; 182 int error; 183 184 if (vap->va_type != VBLK && vap->va_type != VCHR && 185 vap->va_type != VFIFO) 186 return EINVAL; 187 188 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL); 189 if (error == 0) { 190 cache_setunresolved(v->a_nch); 191 cache_setvp(v->a_nch, *vpp); 192 tmpfs_knote(dvp, NOTE_WRITE); 193 } 194 195 return error; 196 } 197 198 /* --------------------------------------------------------------------- */ 199 200 static int 201 tmpfs_open(struct vop_open_args *v) 202 { 203 struct vnode *vp = v->a_vp; 204 int mode = v->a_mode; 205 206 int error; 207 struct tmpfs_node *node; 208 209 node = VP_TO_TMPFS_NODE(vp); 210 211 /* The file is still active but all its names have been removed 212 * (e.g. by a "rmdir $(pwd)"). It cannot be opened any more as 213 * it is about to die. */ 214 if (node->tn_links < 1) 215 return (ENOENT); 216 217 /* If the file is marked append-only, deny write requests. */ 218 if ((node->tn_flags & APPEND) && 219 (mode & (FWRITE | O_APPEND)) == FWRITE) { 220 error = EPERM; 221 } else { 222 return (vop_stdopen(v)); 223 } 224 return error; 225 } 226 227 /* --------------------------------------------------------------------- */ 228 229 static int 230 tmpfs_close(struct vop_close_args *v) 231 { 232 struct vnode *vp = v->a_vp; 233 struct tmpfs_node *node; 234 235 node = VP_TO_TMPFS_NODE(vp); 236 237 if (node->tn_links > 0) { 238 /* Update node times. No need to do it if the node has 239 * been deleted, because it will vanish after we return. */ 240 tmpfs_update(vp); 241 } 242 243 return vop_stdclose(v); 244 } 245 246 /* --------------------------------------------------------------------- */ 247 248 int 249 tmpfs_access(struct vop_access_args *v) 250 { 251 struct vnode *vp = v->a_vp; 252 int error; 253 struct tmpfs_node *node; 254 255 node = VP_TO_TMPFS_NODE(vp); 256 257 switch (vp->v_type) { 258 case VDIR: 259 /* FALLTHROUGH */ 260 case VLNK: 261 /* FALLTHROUGH */ 262 case VREG: 263 if ((v->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 264 error = EROFS; 265 goto out; 266 } 267 break; 268 269 case VBLK: 270 /* FALLTHROUGH */ 271 case VCHR: 272 /* FALLTHROUGH */ 273 case VSOCK: 274 /* FALLTHROUGH */ 275 case VFIFO: 276 break; 277 278 default: 279 error = EINVAL; 280 goto out; 281 } 282 283 if ((v->a_mode & VWRITE) && (node->tn_flags & IMMUTABLE)) { 284 error = EPERM; 285 goto out; 286 } 287 288 error = vop_helper_access(v, node->tn_uid, node->tn_gid, node->tn_mode, 0); 289 290 out: 291 292 return error; 293 } 294 295 /* --------------------------------------------------------------------- */ 296 297 int 298 tmpfs_getattr(struct vop_getattr_args *v) 299 { 300 struct vnode *vp = v->a_vp; 301 struct vattr *vap = v->a_vap; 302 struct tmpfs_node *node; 303 304 node = VP_TO_TMPFS_NODE(vp); 305 306 lwkt_gettoken(&vp->v_mount->mnt_token); 307 tmpfs_update(vp); 308 309 vap->va_type = vp->v_type; 310 vap->va_mode = node->tn_mode; 311 vap->va_nlink = node->tn_links; 312 vap->va_uid = node->tn_uid; 313 vap->va_gid = node->tn_gid; 314 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 315 vap->va_fileid = node->tn_id; 316 vap->va_size = node->tn_size; 317 vap->va_blocksize = PAGE_SIZE; 318 vap->va_atime.tv_sec = node->tn_atime; 319 vap->va_atime.tv_nsec = node->tn_atimensec; 320 vap->va_mtime.tv_sec = node->tn_mtime; 321 vap->va_mtime.tv_nsec = node->tn_mtimensec; 322 vap->va_ctime.tv_sec = node->tn_ctime; 323 vap->va_ctime.tv_nsec = node->tn_ctimensec; 324 vap->va_gen = node->tn_gen; 325 vap->va_flags = node->tn_flags; 326 if (vp->v_type == VBLK || vp->v_type == VCHR) 327 { 328 vap->va_rmajor = umajor(node->tn_rdev); 329 vap->va_rminor = uminor(node->tn_rdev); 330 } 331 vap->va_bytes = round_page(node->tn_size); 332 vap->va_filerev = 0; 333 334 lwkt_reltoken(&vp->v_mount->mnt_token); 335 336 return 0; 337 } 338 339 /* --------------------------------------------------------------------- */ 340 341 int 342 tmpfs_setattr(struct vop_setattr_args *v) 343 { 344 struct vnode *vp = v->a_vp; 345 struct vattr *vap = v->a_vap; 346 struct ucred *cred = v->a_cred; 347 struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp); 348 int error = 0; 349 int kflags = 0; 350 351 if (error == 0 && (vap->va_flags != VNOVAL)) { 352 error = tmpfs_chflags(vp, vap->va_flags, cred); 353 kflags |= NOTE_ATTRIB; 354 } 355 356 if (error == 0 && (vap->va_size != VNOVAL)) { 357 if (vap->va_size > node->tn_size) 358 kflags |= NOTE_WRITE | NOTE_EXTEND; 359 else 360 kflags |= NOTE_WRITE; 361 error = tmpfs_chsize(vp, vap->va_size, cred); 362 } 363 364 if (error == 0 && (vap->va_uid != (uid_t)VNOVAL || 365 vap->va_gid != (gid_t)VNOVAL)) { 366 error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred); 367 kflags |= NOTE_ATTRIB; 368 } 369 370 if (error == 0 && (vap->va_mode != (mode_t)VNOVAL)) { 371 error = tmpfs_chmod(vp, vap->va_mode, cred); 372 kflags |= NOTE_ATTRIB; 373 } 374 375 if (error == 0 && ((vap->va_atime.tv_sec != VNOVAL && 376 vap->va_atime.tv_nsec != VNOVAL) || 377 (vap->va_mtime.tv_sec != VNOVAL && 378 vap->va_mtime.tv_nsec != VNOVAL) )) { 379 error = tmpfs_chtimes(vp, &vap->va_atime, &vap->va_mtime, 380 vap->va_vaflags, cred); 381 kflags |= NOTE_ATTRIB; 382 } 383 384 /* Update the node times. We give preference to the error codes 385 * generated by this function rather than the ones that may arise 386 * from tmpfs_update. */ 387 tmpfs_update(vp); 388 tmpfs_knote(vp, kflags); 389 390 return error; 391 } 392 393 /* --------------------------------------------------------------------- */ 394 395 /* 396 * fsync is usually a NOP, but we must take action when unmounting or 397 * when recycling. 398 */ 399 static int 400 tmpfs_fsync(struct vop_fsync_args *v) 401 { 402 struct tmpfs_mount *tmp; 403 struct tmpfs_node *node; 404 struct vnode *vp = v->a_vp; 405 406 tmp = VFS_TO_TMPFS(vp->v_mount); 407 node = VP_TO_TMPFS_NODE(vp); 408 409 tmpfs_update(vp); 410 if (vp->v_type == VREG) { 411 if (vp->v_flag & VRECLAIMED) { 412 if (node->tn_links == 0) 413 tmpfs_truncate(vp, 0); 414 else 415 vfsync(v->a_vp, v->a_waitfor, 1, NULL, NULL); 416 } 417 } 418 return 0; 419 } 420 421 /* --------------------------------------------------------------------- */ 422 423 static int 424 tmpfs_read (struct vop_read_args *ap) 425 { 426 struct buf *bp; 427 struct vnode *vp = ap->a_vp; 428 struct uio *uio = ap->a_uio; 429 struct tmpfs_node *node; 430 off_t base_offset; 431 size_t offset; 432 size_t len; 433 int error; 434 435 error = 0; 436 if (uio->uio_resid == 0) { 437 return error; 438 } 439 440 node = VP_TO_TMPFS_NODE(vp); 441 442 if (uio->uio_offset < 0) 443 return (EINVAL); 444 if (vp->v_type != VREG) 445 return (EINVAL); 446 447 while (uio->uio_resid > 0 && uio->uio_offset < node->tn_size) { 448 /* 449 * Use buffer cache I/O (via tmpfs_strategy) 450 */ 451 offset = (size_t)uio->uio_offset & BMASK; 452 base_offset = (off_t)uio->uio_offset - offset; 453 bp = getcacheblk(vp, base_offset, BSIZE); 454 if (bp == NULL) 455 { 456 lwkt_gettoken(&vp->v_mount->mnt_token); 457 error = bread(vp, base_offset, BSIZE, &bp); 458 if (error) { 459 brelse(bp); 460 lwkt_reltoken(&vp->v_mount->mnt_token); 461 kprintf("tmpfs_read bread error %d\n", error); 462 break; 463 } 464 lwkt_reltoken(&vp->v_mount->mnt_token); 465 } 466 467 /* 468 * Figure out how many bytes we can actually copy this loop. 469 */ 470 len = BSIZE - offset; 471 if (len > uio->uio_resid) 472 len = uio->uio_resid; 473 if (len > node->tn_size - uio->uio_offset) 474 len = (size_t)(node->tn_size - uio->uio_offset); 475 476 error = uiomove((char *)bp->b_data + offset, len, uio); 477 bqrelse(bp); 478 if (error) { 479 kprintf("tmpfs_read uiomove error %d\n", error); 480 break; 481 } 482 } 483 484 TMPFS_NODE_LOCK(node); 485 node->tn_status |= TMPFS_NODE_ACCESSED; 486 TMPFS_NODE_UNLOCK(node); 487 488 return(error); 489 } 490 491 static int 492 tmpfs_write (struct vop_write_args *ap) 493 { 494 struct buf *bp; 495 struct vnode *vp = ap->a_vp; 496 struct uio *uio = ap->a_uio; 497 struct thread *td = uio->uio_td; 498 struct tmpfs_node *node; 499 boolean_t extended; 500 off_t oldsize; 501 int error; 502 off_t base_offset; 503 size_t offset; 504 size_t len; 505 struct rlimit limit; 506 int trivial = 0; 507 int kflags = 0; 508 509 error = 0; 510 if (uio->uio_resid == 0) { 511 return error; 512 } 513 514 node = VP_TO_TMPFS_NODE(vp); 515 516 if (vp->v_type != VREG) 517 return (EINVAL); 518 519 lwkt_gettoken(&vp->v_mount->mnt_token); 520 521 oldsize = node->tn_size; 522 if (ap->a_ioflag & IO_APPEND) 523 uio->uio_offset = node->tn_size; 524 525 /* 526 * Check for illegal write offsets. 527 */ 528 if (uio->uio_offset + uio->uio_resid > 529 VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize) { 530 lwkt_reltoken(&vp->v_mount->mnt_token); 531 return (EFBIG); 532 } 533 534 if (vp->v_type == VREG && td != NULL) { 535 error = kern_getrlimit(RLIMIT_FSIZE, &limit); 536 if (error != 0) { 537 lwkt_reltoken(&vp->v_mount->mnt_token); 538 return error; 539 } 540 if (uio->uio_offset + uio->uio_resid > limit.rlim_cur) { 541 ksignal(td->td_proc, SIGXFSZ); 542 lwkt_reltoken(&vp->v_mount->mnt_token); 543 return (EFBIG); 544 } 545 } 546 547 548 /* 549 * Extend the file's size if necessary 550 */ 551 extended = ((uio->uio_offset + uio->uio_resid) > node->tn_size); 552 553 while (uio->uio_resid > 0) { 554 /* 555 * Use buffer cache I/O (via tmpfs_strategy) 556 */ 557 offset = (size_t)uio->uio_offset & BMASK; 558 base_offset = (off_t)uio->uio_offset - offset; 559 len = BSIZE - offset; 560 if (len > uio->uio_resid) 561 len = uio->uio_resid; 562 563 if ((uio->uio_offset + len) > node->tn_size) { 564 trivial = (uio->uio_offset <= node->tn_size); 565 error = tmpfs_reg_resize(vp, uio->uio_offset + len, trivial); 566 if (error) 567 break; 568 } 569 570 /* 571 * Read to fill in any gaps. Theoretically we could 572 * optimize this if the write covers the entire buffer 573 * and is not a UIO_NOCOPY write, however this can lead 574 * to a security violation exposing random kernel memory 575 * (whatever junk was in the backing VM pages before). 576 * 577 * So just use bread() to do the right thing. 578 */ 579 error = bread(vp, base_offset, BSIZE, &bp); 580 error = uiomove((char *)bp->b_data + offset, len, uio); 581 if (error) { 582 kprintf("tmpfs_write uiomove error %d\n", error); 583 brelse(bp); 584 break; 585 } 586 587 if (uio->uio_offset > node->tn_size) { 588 node->tn_size = uio->uio_offset; 589 kflags |= NOTE_EXTEND; 590 } 591 kflags |= NOTE_WRITE; 592 593 /* 594 * The data has been loaded into the buffer, write it out. 595 * 596 * We want tmpfs to be able to use all available ram, not 597 * just the buffer cache, so if not explicitly paging we 598 * use buwrite() to leave the buffer clean but mark all the 599 * VM pages valid+dirty. 600 * 601 * When the kernel is paging, either via normal pageout 602 * operation or when cleaning the object during a recycle, 603 * the underlying VM pages are going to get thrown away 604 * so we MUST write them to swap. 605 * 606 * XXX unfortunately this catches msync() system calls too 607 * for the moment. 608 */ 609 if (vm_swap_size == 0) { 610 /* 611 * if swap isn't configured yet, force a buwrite() to 612 * avoid problems further down the line, due to flushing 613 * to swap. 614 */ 615 buwrite(bp); 616 } else { 617 if (ap->a_ioflag & IO_SYNC) { 618 bwrite(bp); 619 } else if ((ap->a_ioflag & IO_ASYNC) || 620 (uio->uio_segflg == UIO_NOCOPY)) { 621 bawrite(bp); 622 } else { 623 buwrite(bp); 624 } 625 } 626 627 if (bp->b_error) { 628 kprintf("tmpfs_write bwrite error %d\n", bp->b_error); 629 break; 630 } 631 } 632 633 if (error) { 634 if (extended) { 635 (void)tmpfs_reg_resize(vp, oldsize, trivial); 636 kflags &= ~NOTE_EXTEND; 637 } 638 goto done; 639 } 640 641 TMPFS_NODE_LOCK(node); 642 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED | 643 (extended? TMPFS_NODE_CHANGED : 0); 644 645 if (node->tn_mode & (S_ISUID | S_ISGID)) { 646 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0)) 647 node->tn_mode &= ~(S_ISUID | S_ISGID); 648 } 649 TMPFS_NODE_UNLOCK(node); 650 done: 651 652 tmpfs_knote(vp, kflags); 653 654 655 lwkt_reltoken(&vp->v_mount->mnt_token); 656 return(error); 657 } 658 659 static int 660 tmpfs_advlock (struct vop_advlock_args *ap) 661 { 662 struct tmpfs_node *node; 663 struct vnode *vp = ap->a_vp; 664 665 node = VP_TO_TMPFS_NODE(vp); 666 667 return (lf_advlock(ap, &node->tn_advlock, node->tn_size)); 668 } 669 670 static int 671 tmpfs_strategy(struct vop_strategy_args *ap) 672 { 673 struct bio *bio = ap->a_bio; 674 struct buf *bp = bio->bio_buf; 675 struct vnode *vp = ap->a_vp; 676 struct tmpfs_node *node; 677 vm_object_t uobj; 678 679 if (vp->v_type != VREG) { 680 bp->b_resid = bp->b_bcount; 681 bp->b_flags |= B_ERROR | B_INVAL; 682 bp->b_error = EINVAL; 683 biodone(bio); 684 return(0); 685 } 686 687 lwkt_gettoken(&vp->v_mount->mnt_token); 688 node = VP_TO_TMPFS_NODE(vp); 689 690 uobj = node->tn_reg.tn_aobj; 691 692 /* 693 * Call swap_pager_strategy to read or write between the VM 694 * object and the buffer cache. 695 */ 696 swap_pager_strategy(uobj, bio); 697 698 lwkt_reltoken(&vp->v_mount->mnt_token); 699 return 0; 700 } 701 702 static int 703 tmpfs_bmap(struct vop_bmap_args *ap) 704 { 705 if (ap->a_doffsetp != NULL) 706 *ap->a_doffsetp = ap->a_loffset; 707 if (ap->a_runp != NULL) 708 *ap->a_runp = 0; 709 if (ap->a_runb != NULL) 710 *ap->a_runb = 0; 711 712 return 0; 713 } 714 715 /* --------------------------------------------------------------------- */ 716 717 static int 718 tmpfs_nremove(struct vop_nremove_args *v) 719 { 720 struct vnode *dvp = v->a_dvp; 721 struct namecache *ncp = v->a_nch->ncp; 722 struct vnode *vp; 723 int error; 724 struct tmpfs_dirent *de; 725 struct tmpfs_mount *tmp; 726 struct tmpfs_node *dnode; 727 struct tmpfs_node *node; 728 729 /* 730 * We have to acquire the vp from v->a_nch because we will likely 731 * unresolve the namecache entry, and a vrele/vput is needed to 732 * trigger the tmpfs_inactive/tmpfs_reclaim sequence. 733 * 734 * We have to use vget to clear any inactive state on the vnode, 735 * otherwise the vnode may remain inactive and thus tmpfs_inactive 736 * will not get called when we release it. 737 */ 738 error = cache_vget(v->a_nch, v->a_cred, LK_SHARED, &vp); 739 KKASSERT(error == 0); 740 vn_unlock(vp); 741 742 if (vp->v_type == VDIR) { 743 error = EISDIR; 744 goto out; 745 } 746 747 dnode = VP_TO_TMPFS_DIR(dvp); 748 node = VP_TO_TMPFS_NODE(vp); 749 tmp = VFS_TO_TMPFS(vp->v_mount); 750 de = tmpfs_dir_lookup(dnode, node, ncp); 751 if (de == NULL) { 752 error = ENOENT; 753 goto out; 754 } 755 756 /* Files marked as immutable or append-only cannot be deleted. */ 757 if ((node->tn_flags & (IMMUTABLE | APPEND | NOUNLINK)) || 758 (dnode->tn_flags & APPEND)) { 759 error = EPERM; 760 goto out; 761 } 762 763 /* Remove the entry from the directory; as it is a file, we do not 764 * have to change the number of hard links of the directory. */ 765 tmpfs_dir_detach(dnode, de); 766 767 /* Free the directory entry we just deleted. Note that the node 768 * referred by it will not be removed until the vnode is really 769 * reclaimed. */ 770 tmpfs_free_dirent(tmp, de); 771 772 if (node->tn_links > 0) { 773 TMPFS_NODE_LOCK(node); 774 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \ 775 TMPFS_NODE_MODIFIED; 776 TMPFS_NODE_UNLOCK(node); 777 } 778 779 cache_setunresolved(v->a_nch); 780 cache_setvp(v->a_nch, NULL); 781 tmpfs_knote(vp, NOTE_DELETE); 782 /*cache_inval_vp(vp, CINV_DESTROY);*/ 783 tmpfs_knote(dvp, NOTE_WRITE); 784 error = 0; 785 786 out: 787 vrele(vp); 788 789 return error; 790 } 791 792 /* --------------------------------------------------------------------- */ 793 794 static int 795 tmpfs_nlink(struct vop_nlink_args *v) 796 { 797 struct vnode *dvp = v->a_dvp; 798 struct vnode *vp = v->a_vp; 799 struct namecache *ncp = v->a_nch->ncp; 800 struct tmpfs_dirent *de; 801 struct tmpfs_node *node; 802 struct tmpfs_node *dnode; 803 int error; 804 805 KKASSERT(dvp != vp); /* XXX When can this be false? */ 806 807 node = VP_TO_TMPFS_NODE(vp); 808 dnode = VP_TO_TMPFS_NODE(dvp); 809 810 /* XXX: Why aren't the following two tests done by the caller? */ 811 812 /* Hard links of directories are forbidden. */ 813 if (vp->v_type == VDIR) { 814 error = EPERM; 815 goto out; 816 } 817 818 /* Cannot create cross-device links. */ 819 if (dvp->v_mount != vp->v_mount) { 820 error = EXDEV; 821 goto out; 822 } 823 824 /* Ensure that we do not overflow the maximum number of links imposed 825 * by the system. */ 826 KKASSERT(node->tn_links <= LINK_MAX); 827 if (node->tn_links == LINK_MAX) { 828 error = EMLINK; 829 goto out; 830 } 831 832 /* We cannot create links of files marked immutable or append-only. */ 833 if (node->tn_flags & (IMMUTABLE | APPEND)) { 834 error = EPERM; 835 goto out; 836 } 837 838 /* Allocate a new directory entry to represent the node. */ 839 error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node, 840 ncp->nc_name, ncp->nc_nlen, &de); 841 if (error != 0) 842 goto out; 843 844 /* Insert the new directory entry into the appropriate directory. */ 845 tmpfs_dir_attach(dnode, de); 846 847 /* vp link count has changed, so update node times. */ 848 849 TMPFS_NODE_LOCK(node); 850 node->tn_status |= TMPFS_NODE_CHANGED; 851 TMPFS_NODE_UNLOCK(node); 852 tmpfs_update(vp); 853 854 tmpfs_knote(vp, NOTE_LINK); 855 cache_setunresolved(v->a_nch); 856 cache_setvp(v->a_nch, vp); 857 tmpfs_knote(dvp, NOTE_WRITE); 858 error = 0; 859 860 out: 861 return error; 862 } 863 864 /* --------------------------------------------------------------------- */ 865 866 static int 867 tmpfs_nrename(struct vop_nrename_args *v) 868 { 869 struct vnode *fdvp = v->a_fdvp; 870 struct namecache *fncp = v->a_fnch->ncp; 871 struct vnode *fvp = fncp->nc_vp; 872 struct vnode *tdvp = v->a_tdvp; 873 struct namecache *tncp = v->a_tnch->ncp; 874 struct vnode *tvp; 875 struct tmpfs_dirent *de; 876 struct tmpfs_mount *tmp; 877 struct tmpfs_node *fdnode; 878 struct tmpfs_node *fnode; 879 struct tmpfs_node *tnode; 880 struct tmpfs_node *tdnode; 881 char *newname; 882 char *oldname; 883 int error; 884 885 /* 886 * Because tvp can get overwritten we have to vget it instead of 887 * just vref or use it, otherwise it's VINACTIVE flag may not get 888 * cleared and the node won't get destroyed. 889 */ 890 error = cache_vget(v->a_tnch, v->a_cred, LK_SHARED, &tvp); 891 if (error == 0) { 892 tnode = VP_TO_TMPFS_NODE(tvp); 893 vn_unlock(tvp); 894 } else { 895 tnode = NULL; 896 } 897 898 /* Disallow cross-device renames. 899 * XXX Why isn't this done by the caller? */ 900 if (fvp->v_mount != tdvp->v_mount || 901 (tvp != NULL && fvp->v_mount != tvp->v_mount)) { 902 error = EXDEV; 903 goto out; 904 } 905 906 tmp = VFS_TO_TMPFS(tdvp->v_mount); 907 tdnode = VP_TO_TMPFS_DIR(tdvp); 908 909 /* If source and target are the same file, there is nothing to do. */ 910 if (fvp == tvp) { 911 error = 0; 912 goto out; 913 } 914 915 fdnode = VP_TO_TMPFS_DIR(fdvp); 916 fnode = VP_TO_TMPFS_NODE(fvp); 917 de = tmpfs_dir_lookup(fdnode, fnode, fncp); 918 919 /* Avoid manipulating '.' and '..' entries. */ 920 if (de == NULL) { 921 error = ENOENT; 922 goto out_locked; 923 } 924 KKASSERT(de->td_node == fnode); 925 926 /* 927 * If replacing an entry in the target directory and that entry 928 * is a directory, it must be empty. 929 * 930 * Kern_rename gurantees the destination to be a directory 931 * if the source is one (it does?). 932 */ 933 if (tvp != NULL) { 934 KKASSERT(tnode != NULL); 935 936 if ((tnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) || 937 (tdnode->tn_flags & (APPEND | IMMUTABLE))) { 938 error = EPERM; 939 goto out_locked; 940 } 941 942 if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) { 943 if (tnode->tn_size > 0) { 944 error = ENOTEMPTY; 945 goto out_locked; 946 } 947 } else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) { 948 error = ENOTDIR; 949 goto out_locked; 950 } else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) { 951 error = EISDIR; 952 goto out_locked; 953 } else { 954 KKASSERT(fnode->tn_type != VDIR && 955 tnode->tn_type != VDIR); 956 } 957 } 958 959 if ((fnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) || 960 (fdnode->tn_flags & (APPEND | IMMUTABLE))) { 961 error = EPERM; 962 goto out_locked; 963 } 964 965 /* 966 * Ensure that we have enough memory to hold the new name, if it 967 * has to be changed. 968 */ 969 if (fncp->nc_nlen != tncp->nc_nlen || 970 bcmp(fncp->nc_name, tncp->nc_name, fncp->nc_nlen) != 0) { 971 newname = kmalloc(tncp->nc_nlen + 1, tmp->tm_name_zone, 972 M_WAITOK | M_NULLOK); 973 if (newname == NULL) { 974 error = ENOSPC; 975 goto out_locked; 976 } 977 bcopy(tncp->nc_name, newname, tncp->nc_nlen); 978 newname[tncp->nc_nlen] = '\0'; 979 } else { 980 newname = NULL; 981 } 982 983 /* 984 * Unlink entry from source directory. Note that the kernel has 985 * already checked for illegal recursion cases (renaming a directory 986 * into a subdirectory of itself). 987 */ 988 if (fdnode != tdnode) 989 tmpfs_dir_detach(fdnode, de); 990 991 /* 992 * Handle any name change. Swap with newname, we will 993 * deallocate it at the end. 994 */ 995 if (newname != NULL) { 996 #if 0 997 TMPFS_NODE_LOCK(fnode); 998 fnode->tn_status |= TMPFS_NODE_CHANGED; 999 TMPFS_NODE_UNLOCK(fnode); 1000 #endif 1001 oldname = de->td_name; 1002 de->td_name = newname; 1003 de->td_namelen = (uint16_t)tncp->nc_nlen; 1004 newname = oldname; 1005 } 1006 1007 /* 1008 * Link entry to target directory. If the entry 1009 * represents a directory move the parent linkage 1010 * as well. 1011 */ 1012 if (fdnode != tdnode) { 1013 if (de->td_node->tn_type == VDIR) { 1014 TMPFS_VALIDATE_DIR(fnode); 1015 1016 TMPFS_NODE_LOCK(tdnode); 1017 tdnode->tn_links++; 1018 tdnode->tn_status |= TMPFS_NODE_MODIFIED; 1019 TMPFS_NODE_UNLOCK(tdnode); 1020 1021 TMPFS_NODE_LOCK(fnode); 1022 fnode->tn_dir.tn_parent = tdnode; 1023 fnode->tn_status |= TMPFS_NODE_CHANGED; 1024 TMPFS_NODE_UNLOCK(fnode); 1025 1026 TMPFS_NODE_LOCK(fdnode); 1027 fdnode->tn_links--; 1028 fdnode->tn_status |= TMPFS_NODE_MODIFIED; 1029 TMPFS_NODE_UNLOCK(fdnode); 1030 } 1031 tmpfs_dir_attach(tdnode, de); 1032 } else { 1033 TMPFS_NODE_LOCK(tdnode); 1034 tdnode->tn_status |= TMPFS_NODE_MODIFIED; 1035 TMPFS_NODE_UNLOCK(tdnode); 1036 } 1037 1038 /* 1039 * If we are overwriting an entry, we have to remove the old one 1040 * from the target directory. 1041 */ 1042 if (tvp != NULL) { 1043 /* Remove the old entry from the target directory. */ 1044 de = tmpfs_dir_lookup(tdnode, tnode, tncp); 1045 tmpfs_dir_detach(tdnode, de); 1046 tmpfs_knote(tdnode->tn_vnode, NOTE_DELETE); 1047 1048 /* 1049 * Free the directory entry we just deleted. Note that the 1050 * node referred by it will not be removed until the vnode is 1051 * really reclaimed. 1052 */ 1053 tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), de); 1054 /*cache_inval_vp(tvp, CINV_DESTROY);*/ 1055 } 1056 1057 /* 1058 * Finish up 1059 */ 1060 if (newname) { 1061 kfree(newname, tmp->tm_name_zone); 1062 newname = NULL; 1063 } 1064 cache_rename(v->a_fnch, v->a_tnch); 1065 tmpfs_knote(v->a_fdvp, NOTE_WRITE); 1066 tmpfs_knote(v->a_tdvp, NOTE_WRITE); 1067 if (fnode->tn_vnode) 1068 tmpfs_knote(fnode->tn_vnode, NOTE_RENAME); 1069 error = 0; 1070 1071 out_locked: 1072 ; 1073 1074 out: 1075 if (tvp) 1076 vrele(tvp); 1077 1078 return error; 1079 } 1080 1081 /* --------------------------------------------------------------------- */ 1082 1083 static int 1084 tmpfs_nmkdir(struct vop_nmkdir_args *v) 1085 { 1086 struct vnode *dvp = v->a_dvp; 1087 struct vnode **vpp = v->a_vpp; 1088 struct namecache *ncp = v->a_nch->ncp; 1089 struct vattr *vap = v->a_vap; 1090 struct ucred *cred = v->a_cred; 1091 int error; 1092 1093 KKASSERT(vap->va_type == VDIR); 1094 1095 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL); 1096 if (error == 0) { 1097 cache_setunresolved(v->a_nch); 1098 cache_setvp(v->a_nch, *vpp); 1099 tmpfs_knote(dvp, NOTE_WRITE | NOTE_LINK); 1100 } 1101 1102 return error; 1103 } 1104 1105 /* --------------------------------------------------------------------- */ 1106 1107 static int 1108 tmpfs_nrmdir(struct vop_nrmdir_args *v) 1109 { 1110 struct vnode *dvp = v->a_dvp; 1111 struct namecache *ncp = v->a_nch->ncp; 1112 struct vnode *vp; 1113 struct tmpfs_dirent *de; 1114 struct tmpfs_mount *tmp; 1115 struct tmpfs_node *dnode; 1116 struct tmpfs_node *node; 1117 int error; 1118 1119 /* 1120 * We have to acquire the vp from v->a_nch because we will likely 1121 * unresolve the namecache entry, and a vrele/vput is needed to 1122 * trigger the tmpfs_inactive/tmpfs_reclaim sequence. 1123 * 1124 * We have to use vget to clear any inactive state on the vnode, 1125 * otherwise the vnode may remain inactive and thus tmpfs_inactive 1126 * will not get called when we release it. 1127 */ 1128 error = cache_vget(v->a_nch, v->a_cred, LK_SHARED, &vp); 1129 KKASSERT(error == 0); 1130 vn_unlock(vp); 1131 1132 /* 1133 * Prevalidate so we don't hit an assertion later 1134 */ 1135 if (vp->v_type != VDIR) { 1136 error = ENOTDIR; 1137 goto out; 1138 } 1139 1140 tmp = VFS_TO_TMPFS(dvp->v_mount); 1141 dnode = VP_TO_TMPFS_DIR(dvp); 1142 node = VP_TO_TMPFS_DIR(vp); 1143 1144 /* Directories with more than two entries ('.' and '..') cannot be 1145 * removed. */ 1146 if (node->tn_size > 0) { 1147 error = ENOTEMPTY; 1148 goto out; 1149 } 1150 1151 if ((dnode->tn_flags & APPEND) 1152 || (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))) { 1153 error = EPERM; 1154 goto out; 1155 } 1156 1157 /* This invariant holds only if we are not trying to remove "..". 1158 * We checked for that above so this is safe now. */ 1159 KKASSERT(node->tn_dir.tn_parent == dnode); 1160 1161 /* Get the directory entry associated with node (vp). This was 1162 * filled by tmpfs_lookup while looking up the entry. */ 1163 de = tmpfs_dir_lookup(dnode, node, ncp); 1164 KKASSERT(TMPFS_DIRENT_MATCHES(de, 1165 ncp->nc_name, 1166 ncp->nc_nlen)); 1167 1168 /* Check flags to see if we are allowed to remove the directory. */ 1169 if ((dnode->tn_flags & APPEND) || 1170 node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) { 1171 error = EPERM; 1172 goto out; 1173 } 1174 1175 1176 /* Detach the directory entry from the directory (dnode). */ 1177 tmpfs_dir_detach(dnode, de); 1178 1179 /* No vnode should be allocated for this entry from this point */ 1180 TMPFS_NODE_LOCK(node); 1181 TMPFS_ASSERT_ELOCKED(node); 1182 TMPFS_NODE_LOCK(dnode); 1183 TMPFS_ASSERT_ELOCKED(dnode); 1184 1185 #if 0 1186 /* handled by tmpfs_free_node */ 1187 KKASSERT(node->tn_links > 0); 1188 node->tn_links--; 1189 node->tn_dir.tn_parent = NULL; 1190 #endif 1191 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \ 1192 TMPFS_NODE_MODIFIED; 1193 1194 #if 0 1195 /* handled by tmpfs_free_node */ 1196 KKASSERT(dnode->tn_links > 0); 1197 dnode->tn_links--; 1198 #endif 1199 dnode->tn_status |= TMPFS_NODE_ACCESSED | \ 1200 TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED; 1201 1202 TMPFS_NODE_UNLOCK(dnode); 1203 TMPFS_NODE_UNLOCK(node); 1204 1205 /* Free the directory entry we just deleted. Note that the node 1206 * referred by it will not be removed until the vnode is really 1207 * reclaimed. */ 1208 tmpfs_free_dirent(tmp, de); 1209 1210 /* Release the deleted vnode (will destroy the node, notify 1211 * interested parties and clean it from the cache). */ 1212 1213 TMPFS_NODE_LOCK(dnode); 1214 dnode->tn_status |= TMPFS_NODE_CHANGED; 1215 TMPFS_NODE_UNLOCK(dnode); 1216 tmpfs_update(dvp); 1217 1218 cache_setunresolved(v->a_nch); 1219 cache_setvp(v->a_nch, NULL); 1220 /*cache_inval_vp(vp, CINV_DESTROY);*/ 1221 tmpfs_knote(dvp, NOTE_WRITE | NOTE_LINK); 1222 error = 0; 1223 1224 out: 1225 vrele(vp); 1226 1227 return error; 1228 } 1229 1230 /* --------------------------------------------------------------------- */ 1231 1232 static int 1233 tmpfs_nsymlink(struct vop_nsymlink_args *v) 1234 { 1235 struct vnode *dvp = v->a_dvp; 1236 struct vnode **vpp = v->a_vpp; 1237 struct namecache *ncp = v->a_nch->ncp; 1238 struct vattr *vap = v->a_vap; 1239 struct ucred *cred = v->a_cred; 1240 char *target = v->a_target; 1241 int error; 1242 1243 vap->va_type = VLNK; 1244 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, target); 1245 if (error == 0) { 1246 tmpfs_knote(*vpp, NOTE_WRITE); 1247 cache_setunresolved(v->a_nch); 1248 cache_setvp(v->a_nch, *vpp); 1249 } 1250 1251 return error; 1252 } 1253 1254 /* --------------------------------------------------------------------- */ 1255 1256 static int 1257 tmpfs_readdir(struct vop_readdir_args *v) 1258 { 1259 struct vnode *vp = v->a_vp; 1260 struct uio *uio = v->a_uio; 1261 int *eofflag = v->a_eofflag; 1262 off_t **cookies = v->a_cookies; 1263 int *ncookies = v->a_ncookies; 1264 struct tmpfs_mount *tmp; 1265 int error; 1266 off_t startoff; 1267 off_t cnt = 0; 1268 struct tmpfs_node *node; 1269 1270 /* This operation only makes sense on directory nodes. */ 1271 if (vp->v_type != VDIR) 1272 return ENOTDIR; 1273 1274 tmp = VFS_TO_TMPFS(vp->v_mount); 1275 node = VP_TO_TMPFS_DIR(vp); 1276 startoff = uio->uio_offset; 1277 1278 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOT) { 1279 error = tmpfs_dir_getdotdent(node, uio); 1280 if (error != 0) 1281 goto outok; 1282 cnt++; 1283 } 1284 1285 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT) { 1286 error = tmpfs_dir_getdotdotdent(tmp, node, uio); 1287 if (error != 0) 1288 goto outok; 1289 cnt++; 1290 } 1291 1292 error = tmpfs_dir_getdents(node, uio, &cnt); 1293 1294 outok: 1295 KKASSERT(error >= -1); 1296 1297 if (error == -1) 1298 error = 0; 1299 1300 if (eofflag != NULL) 1301 *eofflag = 1302 (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF); 1303 1304 /* Update NFS-related variables. */ 1305 if (error == 0 && cookies != NULL && ncookies != NULL) { 1306 off_t i; 1307 off_t off = startoff; 1308 struct tmpfs_dirent *de = NULL; 1309 1310 *ncookies = cnt; 1311 *cookies = kmalloc(cnt * sizeof(off_t), M_TEMP, M_WAITOK); 1312 1313 for (i = 0; i < cnt; i++) { 1314 KKASSERT(off != TMPFS_DIRCOOKIE_EOF); 1315 if (off == TMPFS_DIRCOOKIE_DOT) { 1316 off = TMPFS_DIRCOOKIE_DOTDOT; 1317 } else { 1318 if (off == TMPFS_DIRCOOKIE_DOTDOT) { 1319 de = TAILQ_FIRST(&node->tn_dir.tn_dirhead); 1320 } else if (de != NULL) { 1321 de = TAILQ_NEXT(de, td_entries); 1322 } else { 1323 de = tmpfs_dir_lookupbycookie(node, 1324 off); 1325 KKASSERT(de != NULL); 1326 de = TAILQ_NEXT(de, td_entries); 1327 } 1328 if (de == NULL) 1329 off = TMPFS_DIRCOOKIE_EOF; 1330 else 1331 off = tmpfs_dircookie(de); 1332 } 1333 1334 (*cookies)[i] = off; 1335 } 1336 KKASSERT(uio->uio_offset == off); 1337 } 1338 1339 return error; 1340 } 1341 1342 /* --------------------------------------------------------------------- */ 1343 1344 static int 1345 tmpfs_readlink(struct vop_readlink_args *v) 1346 { 1347 struct vnode *vp = v->a_vp; 1348 struct uio *uio = v->a_uio; 1349 1350 int error; 1351 struct tmpfs_node *node; 1352 1353 KKASSERT(uio->uio_offset == 0); 1354 KKASSERT(vp->v_type == VLNK); 1355 1356 node = VP_TO_TMPFS_NODE(vp); 1357 1358 error = uiomove(node->tn_link, MIN(node->tn_size, uio->uio_resid), 1359 uio); 1360 TMPFS_NODE_LOCK(node); 1361 node->tn_status |= TMPFS_NODE_ACCESSED; 1362 TMPFS_NODE_UNLOCK(node); 1363 1364 return error; 1365 } 1366 1367 /* --------------------------------------------------------------------- */ 1368 1369 static int 1370 tmpfs_inactive(struct vop_inactive_args *v) 1371 { 1372 struct vnode *vp = v->a_vp; 1373 struct tmpfs_node *node; 1374 1375 node = VP_TO_TMPFS_NODE(vp); 1376 1377 /* 1378 * Degenerate case 1379 */ 1380 if (node == NULL) { 1381 vrecycle(vp); 1382 return(0); 1383 } 1384 1385 /* 1386 * Get rid of unreferenced deleted vnodes sooner rather than 1387 * later so the data memory can be recovered immediately. 1388 * 1389 * We must truncate the vnode to prevent the normal reclamation 1390 * path from flushing the data for the removed file to disk. 1391 */ 1392 TMPFS_NODE_LOCK(node); 1393 if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 && 1394 (node->tn_links == 0 || 1395 (node->tn_links == 1 && node->tn_type == VDIR && 1396 node->tn_dir.tn_parent))) 1397 { 1398 node->tn_vpstate = TMPFS_VNODE_DOOMED; 1399 TMPFS_NODE_UNLOCK(node); 1400 if (node->tn_type == VREG) 1401 tmpfs_truncate(vp, 0); 1402 vrecycle(vp); 1403 } else { 1404 TMPFS_NODE_UNLOCK(node); 1405 } 1406 1407 return 0; 1408 } 1409 1410 /* --------------------------------------------------------------------- */ 1411 1412 int 1413 tmpfs_reclaim(struct vop_reclaim_args *v) 1414 { 1415 struct vnode *vp = v->a_vp; 1416 struct tmpfs_mount *tmp; 1417 struct tmpfs_node *node; 1418 1419 node = VP_TO_TMPFS_NODE(vp); 1420 tmp = VFS_TO_TMPFS(vp->v_mount); 1421 1422 tmpfs_free_vp(vp); 1423 1424 /* 1425 * If the node referenced by this vnode was deleted by the 1426 * user, we must free its associated data structures now that 1427 * the vnode is being reclaimed. 1428 * 1429 * Directories have an extra link ref. 1430 */ 1431 TMPFS_NODE_LOCK(node); 1432 if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 && 1433 (node->tn_links == 0 || 1434 (node->tn_links == 1 && node->tn_type == VDIR && 1435 node->tn_dir.tn_parent))) 1436 { 1437 node->tn_vpstate = TMPFS_VNODE_DOOMED; 1438 tmpfs_free_node(tmp, node); 1439 /* eats the lock */ 1440 } else { 1441 TMPFS_NODE_UNLOCK(node); 1442 } 1443 1444 KKASSERT(vp->v_data == NULL); 1445 return 0; 1446 } 1447 1448 /* --------------------------------------------------------------------- */ 1449 1450 static int 1451 tmpfs_print(struct vop_print_args *v) 1452 { 1453 struct vnode *vp = v->a_vp; 1454 1455 struct tmpfs_node *node; 1456 1457 node = VP_TO_TMPFS_NODE(vp); 1458 1459 kprintf("tag VT_TMPFS, tmpfs_node %p, flags 0x%x, links %d\n", 1460 node, node->tn_flags, node->tn_links); 1461 kprintf("\tmode 0%o, owner %d, group %d, size %ju, status 0x%x\n", 1462 node->tn_mode, node->tn_uid, node->tn_gid, 1463 (uintmax_t)node->tn_size, node->tn_status); 1464 1465 if (vp->v_type == VFIFO) 1466 fifo_printinfo(vp); 1467 1468 kprintf("\n"); 1469 1470 return 0; 1471 } 1472 1473 /* --------------------------------------------------------------------- */ 1474 1475 static int 1476 tmpfs_pathconf(struct vop_pathconf_args *v) 1477 { 1478 int name = v->a_name; 1479 register_t *retval = v->a_retval; 1480 1481 int error; 1482 1483 error = 0; 1484 1485 switch (name) { 1486 case _PC_LINK_MAX: 1487 *retval = LINK_MAX; 1488 break; 1489 1490 case _PC_NAME_MAX: 1491 *retval = NAME_MAX; 1492 break; 1493 1494 case _PC_PATH_MAX: 1495 *retval = PATH_MAX; 1496 break; 1497 1498 case _PC_PIPE_BUF: 1499 *retval = PIPE_BUF; 1500 break; 1501 1502 case _PC_CHOWN_RESTRICTED: 1503 *retval = 1; 1504 break; 1505 1506 case _PC_NO_TRUNC: 1507 *retval = 1; 1508 break; 1509 1510 case _PC_SYNC_IO: 1511 *retval = 1; 1512 break; 1513 1514 case _PC_FILESIZEBITS: 1515 *retval = 0; /* XXX Don't know which value should I return. */ 1516 break; 1517 1518 default: 1519 error = EINVAL; 1520 } 1521 1522 return error; 1523 } 1524 1525 /************************************************************************ 1526 * KQFILTER OPS * 1527 ************************************************************************/ 1528 1529 static void filt_tmpfsdetach(struct knote *kn); 1530 static int filt_tmpfsread(struct knote *kn, long hint); 1531 static int filt_tmpfswrite(struct knote *kn, long hint); 1532 static int filt_tmpfsvnode(struct knote *kn, long hint); 1533 1534 static struct filterops tmpfsread_filtops = 1535 { FILTEROP_ISFD, NULL, filt_tmpfsdetach, filt_tmpfsread }; 1536 static struct filterops tmpfswrite_filtops = 1537 { FILTEROP_ISFD, NULL, filt_tmpfsdetach, filt_tmpfswrite }; 1538 static struct filterops tmpfsvnode_filtops = 1539 { FILTEROP_ISFD, NULL, filt_tmpfsdetach, filt_tmpfsvnode }; 1540 1541 static int 1542 tmpfs_kqfilter (struct vop_kqfilter_args *ap) 1543 { 1544 struct vnode *vp = ap->a_vp; 1545 struct knote *kn = ap->a_kn; 1546 1547 switch (kn->kn_filter) { 1548 case EVFILT_READ: 1549 kn->kn_fop = &tmpfsread_filtops; 1550 break; 1551 case EVFILT_WRITE: 1552 kn->kn_fop = &tmpfswrite_filtops; 1553 break; 1554 case EVFILT_VNODE: 1555 kn->kn_fop = &tmpfsvnode_filtops; 1556 break; 1557 default: 1558 return (EOPNOTSUPP); 1559 } 1560 1561 kn->kn_hook = (caddr_t)vp; 1562 1563 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 1564 1565 return(0); 1566 } 1567 1568 static void 1569 filt_tmpfsdetach(struct knote *kn) 1570 { 1571 struct vnode *vp = (void *)kn->kn_hook; 1572 1573 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 1574 } 1575 1576 static int 1577 filt_tmpfsread(struct knote *kn, long hint) 1578 { 1579 struct vnode *vp = (void *)kn->kn_hook; 1580 struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp); 1581 off_t off; 1582 1583 if (hint == NOTE_REVOKE) { 1584 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 1585 return(1); 1586 } 1587 1588 /* 1589 * Interlock against MP races when performing this function. 1590 */ 1591 lwkt_gettoken(&vp->v_mount->mnt_token); 1592 off = node->tn_size - kn->kn_fp->f_offset; 1593 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX; 1594 if (kn->kn_sfflags & NOTE_OLDAPI) { 1595 lwkt_reltoken(&vp->v_mount->mnt_token); 1596 return(1); 1597 } 1598 1599 if (kn->kn_data == 0) { 1600 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX; 1601 } 1602 lwkt_reltoken(&vp->v_mount->mnt_token); 1603 return (kn->kn_data != 0); 1604 } 1605 1606 static int 1607 filt_tmpfswrite(struct knote *kn, long hint) 1608 { 1609 if (hint == NOTE_REVOKE) 1610 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 1611 kn->kn_data = 0; 1612 return (1); 1613 } 1614 1615 static int 1616 filt_tmpfsvnode(struct knote *kn, long hint) 1617 { 1618 if (kn->kn_sfflags & hint) 1619 kn->kn_fflags |= hint; 1620 if (hint == NOTE_REVOKE) { 1621 kn->kn_flags |= (EV_EOF | EV_NODATA); 1622 return (1); 1623 } 1624 return (kn->kn_fflags != 0); 1625 } 1626 1627 1628 /* --------------------------------------------------------------------- */ 1629 1630 /* 1631 * vnode operations vector used for files stored in a tmpfs file system. 1632 */ 1633 struct vop_ops tmpfs_vnode_vops = { 1634 .vop_default = vop_defaultop, 1635 .vop_getpages = vop_stdgetpages, 1636 .vop_putpages = vop_stdputpages, 1637 .vop_ncreate = tmpfs_ncreate, 1638 .vop_nresolve = tmpfs_nresolve, 1639 .vop_nlookupdotdot = tmpfs_nlookupdotdot, 1640 .vop_nmknod = tmpfs_nmknod, 1641 .vop_open = tmpfs_open, 1642 .vop_close = tmpfs_close, 1643 .vop_access = tmpfs_access, 1644 .vop_getattr = tmpfs_getattr, 1645 .vop_setattr = tmpfs_setattr, 1646 .vop_read = tmpfs_read, 1647 .vop_write = tmpfs_write, 1648 .vop_fsync = tmpfs_fsync, 1649 .vop_nremove = tmpfs_nremove, 1650 .vop_nlink = tmpfs_nlink, 1651 .vop_nrename = tmpfs_nrename, 1652 .vop_nmkdir = tmpfs_nmkdir, 1653 .vop_nrmdir = tmpfs_nrmdir, 1654 .vop_nsymlink = tmpfs_nsymlink, 1655 .vop_readdir = tmpfs_readdir, 1656 .vop_readlink = tmpfs_readlink, 1657 .vop_inactive = tmpfs_inactive, 1658 .vop_reclaim = tmpfs_reclaim, 1659 .vop_print = tmpfs_print, 1660 .vop_pathconf = tmpfs_pathconf, 1661 .vop_bmap = tmpfs_bmap, 1662 .vop_strategy = tmpfs_strategy, 1663 .vop_advlock = tmpfs_advlock, 1664 .vop_kqfilter = tmpfs_kqfilter 1665 }; 1666