1 /* $NetBSD: tmpfs_vnops.c,v 1.39 2007/07/23 15:41:01 jmmv Exp $ */ 2 3 /*- 4 * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code 9 * 2005 program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * tmpfs vnode interface. 35 */ 36 #include <sys/cdefs.h> 37 38 #include <sys/kernel.h> 39 #include <sys/kern_syscall.h> 40 #include <sys/param.h> 41 #include <sys/fcntl.h> 42 #include <sys/lockf.h> 43 #include <sys/priv.h> 44 #include <sys/proc.h> 45 #include <sys/resourcevar.h> 46 #include <sys/sched.h> 47 #include <sys/sfbuf.h> 48 #include <sys/stat.h> 49 #include <sys/systm.h> 50 #include <sys/unistd.h> 51 #include <sys/vfsops.h> 52 #include <sys/vnode.h> 53 54 #include <sys/mplock2.h> 55 56 #include <vm/vm.h> 57 #include <vm/vm_object.h> 58 #include <vm/vm_page.h> 59 #include <vm/vm_pager.h> 60 61 #include <vfs/fifofs/fifo.h> 62 #include <vfs/tmpfs/tmpfs_vnops.h> 63 #include <vfs/tmpfs/tmpfs.h> 64 65 MALLOC_DECLARE(M_TMPFS); 66 67 /* --------------------------------------------------------------------- */ 68 69 static int 70 tmpfs_nresolve(struct vop_nresolve_args *v) 71 { 72 struct vnode *dvp = v->a_dvp; 73 struct vnode *vp = NULL; 74 struct namecache *ncp = v->a_nch->ncp; 75 struct tmpfs_node *tnode; 76 77 int error; 78 struct tmpfs_dirent *de; 79 struct tmpfs_node *dnode; 80 81 dnode = VP_TO_TMPFS_DIR(dvp); 82 83 de = tmpfs_dir_lookup(dnode, NULL, ncp); 84 if (de == NULL) { 85 error = ENOENT; 86 } else { 87 /* 88 * Allocate a vnode for the node we found. 89 */ 90 tnode = de->td_node; 91 error = tmpfs_alloc_vp(dvp->v_mount, tnode, 92 LK_EXCLUSIVE | LK_RETRY, &vp); 93 if (error) 94 goto out; 95 KKASSERT(vp); 96 } 97 98 out: 99 /* 100 * Store the result of this lookup in the cache. Avoid this if the 101 * request was for creation, as it does not improve timings on 102 * emprical tests. 103 */ 104 if (vp) { 105 vn_unlock(vp); 106 cache_setvp(v->a_nch, vp); 107 vrele(vp); 108 } else if (error == ENOENT) { 109 cache_setvp(v->a_nch, NULL); 110 } 111 return error; 112 } 113 114 static int 115 tmpfs_nlookupdotdot(struct vop_nlookupdotdot_args *v) 116 { 117 struct vnode *dvp = v->a_dvp; 118 struct vnode **vpp = v->a_vpp; 119 struct tmpfs_node *dnode = VP_TO_TMPFS_NODE(dvp); 120 struct ucred *cred = v->a_cred; 121 int error; 122 123 *vpp = NULL; 124 /* Check accessibility of requested node as a first step. */ 125 error = VOP_ACCESS(dvp, VEXEC, cred); 126 if (error != 0) 127 return error; 128 129 if (dnode->tn_dir.tn_parent != NULL) { 130 /* Allocate a new vnode on the matching entry. */ 131 error = tmpfs_alloc_vp(dvp->v_mount, dnode->tn_dir.tn_parent, 132 LK_EXCLUSIVE | LK_RETRY, vpp); 133 134 if (*vpp) 135 vn_unlock(*vpp); 136 } 137 138 return (*vpp == NULL) ? ENOENT : 0; 139 } 140 141 /* --------------------------------------------------------------------- */ 142 143 static int 144 tmpfs_ncreate(struct vop_ncreate_args *v) 145 { 146 struct vnode *dvp = v->a_dvp; 147 struct vnode **vpp = v->a_vpp; 148 struct namecache *ncp = v->a_nch->ncp; 149 struct vattr *vap = v->a_vap; 150 struct ucred *cred = v->a_cred; 151 int error; 152 153 KKASSERT(vap->va_type == VREG || vap->va_type == VSOCK); 154 155 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL); 156 if (error == 0) { 157 cache_setunresolved(v->a_nch); 158 cache_setvp(v->a_nch, *vpp); 159 } 160 161 return error; 162 } 163 /* --------------------------------------------------------------------- */ 164 165 static int 166 tmpfs_nmknod(struct vop_nmknod_args *v) 167 { 168 struct vnode *dvp = v->a_dvp; 169 struct vnode **vpp = v->a_vpp; 170 struct namecache *ncp = v->a_nch->ncp; 171 struct vattr *vap = v->a_vap; 172 struct ucred *cred = v->a_cred; 173 int error; 174 175 if (vap->va_type != VBLK && vap->va_type != VCHR && 176 vap->va_type != VFIFO) 177 return EINVAL; 178 179 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL); 180 if (error == 0) { 181 cache_setunresolved(v->a_nch); 182 cache_setvp(v->a_nch, *vpp); 183 } 184 185 return error; 186 } 187 188 /* --------------------------------------------------------------------- */ 189 190 static int 191 tmpfs_open(struct vop_open_args *v) 192 { 193 struct vnode *vp = v->a_vp; 194 int mode = v->a_mode; 195 196 int error; 197 struct tmpfs_node *node; 198 199 node = VP_TO_TMPFS_NODE(vp); 200 201 /* The file is still active but all its names have been removed 202 * (e.g. by a "rmdir $(pwd)"). It cannot be opened any more as 203 * it is about to die. */ 204 if (node->tn_links < 1) 205 return (ENOENT); 206 207 /* If the file is marked append-only, deny write requests. */ 208 if ((node->tn_flags & APPEND) && 209 (mode & (FWRITE | O_APPEND)) == FWRITE) { 210 error = EPERM; 211 } else { 212 return (vop_stdopen(v)); 213 } 214 return error; 215 } 216 217 /* --------------------------------------------------------------------- */ 218 219 static int 220 tmpfs_close(struct vop_close_args *v) 221 { 222 struct vnode *vp = v->a_vp; 223 struct tmpfs_node *node; 224 225 node = VP_TO_TMPFS_NODE(vp); 226 227 if (node->tn_links > 0) { 228 /* Update node times. No need to do it if the node has 229 * been deleted, because it will vanish after we return. */ 230 tmpfs_update(vp); 231 } 232 233 return vop_stdclose(v); 234 } 235 236 /* --------------------------------------------------------------------- */ 237 238 int 239 tmpfs_access(struct vop_access_args *v) 240 { 241 struct vnode *vp = v->a_vp; 242 int error; 243 struct tmpfs_node *node; 244 245 node = VP_TO_TMPFS_NODE(vp); 246 247 switch (vp->v_type) { 248 case VDIR: 249 /* FALLTHROUGH */ 250 case VLNK: 251 /* FALLTHROUGH */ 252 case VREG: 253 if (VWRITE && vp->v_mount->mnt_flag & MNT_RDONLY) { 254 error = EROFS; 255 goto out; 256 } 257 break; 258 259 case VBLK: 260 /* FALLTHROUGH */ 261 case VCHR: 262 /* FALLTHROUGH */ 263 case VSOCK: 264 /* FALLTHROUGH */ 265 case VFIFO: 266 break; 267 268 default: 269 error = EINVAL; 270 goto out; 271 } 272 273 if (VWRITE && node->tn_flags & IMMUTABLE) { 274 error = EPERM; 275 goto out; 276 } 277 278 error = vop_helper_access(v, node->tn_uid, node->tn_gid, node->tn_mode, 0); 279 280 out: 281 282 return error; 283 } 284 285 /* --------------------------------------------------------------------- */ 286 287 int 288 tmpfs_getattr(struct vop_getattr_args *v) 289 { 290 struct vnode *vp = v->a_vp; 291 struct vattr *vap = v->a_vap; 292 struct tmpfs_node *node; 293 294 node = VP_TO_TMPFS_NODE(vp); 295 296 tmpfs_update(vp); 297 298 vap->va_type = vp->v_type; 299 vap->va_mode = node->tn_mode; 300 vap->va_nlink = node->tn_links; 301 vap->va_uid = node->tn_uid; 302 vap->va_gid = node->tn_gid; 303 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 304 vap->va_fileid = node->tn_id; 305 vap->va_size = node->tn_size; 306 vap->va_blocksize = PAGE_SIZE; 307 vap->va_atime.tv_sec = node->tn_atime; 308 vap->va_atime.tv_nsec = node->tn_atimensec; 309 vap->va_mtime.tv_sec = node->tn_mtime; 310 vap->va_mtime.tv_nsec = node->tn_mtimensec; 311 vap->va_ctime.tv_sec = node->tn_ctime; 312 vap->va_ctime.tv_nsec = node->tn_ctimensec; 313 vap->va_gen = node->tn_gen; 314 vap->va_flags = node->tn_flags; 315 if (vp->v_type == VBLK || vp->v_type == VCHR) 316 { 317 vap->va_rmajor = umajor(node->tn_rdev); 318 vap->va_rminor = uminor(node->tn_rdev); 319 } 320 vap->va_bytes = round_page(node->tn_size); 321 vap->va_filerev = 0; 322 323 return 0; 324 } 325 326 /* --------------------------------------------------------------------- */ 327 328 int 329 tmpfs_setattr(struct vop_setattr_args *v) 330 { 331 struct vnode *vp = v->a_vp; 332 struct vattr *vap = v->a_vap; 333 struct ucred *cred = v->a_cred; 334 int error = 0; 335 336 if (error == 0 && (vap->va_flags != VNOVAL)) 337 error = tmpfs_chflags(vp, vap->va_flags, cred); 338 339 if (error == 0 && (vap->va_size != VNOVAL)) 340 error = tmpfs_chsize(vp, vap->va_size, cred); 341 342 if (error == 0 && (vap->va_uid != (uid_t)VNOVAL || 343 vap->va_gid != (gid_t)VNOVAL)) { 344 error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred); 345 } 346 347 if (error == 0 && (vap->va_mode != (mode_t)VNOVAL)) 348 error = tmpfs_chmod(vp, vap->va_mode, cred); 349 350 if (error == 0 && ((vap->va_atime.tv_sec != VNOVAL && 351 vap->va_atime.tv_nsec != VNOVAL) || 352 (vap->va_mtime.tv_sec != VNOVAL && 353 vap->va_mtime.tv_nsec != VNOVAL) )) { 354 error = tmpfs_chtimes(vp, &vap->va_atime, &vap->va_mtime, 355 vap->va_vaflags, cred); 356 } 357 358 /* Update the node times. We give preference to the error codes 359 * generated by this function rather than the ones that may arise 360 * from tmpfs_update. */ 361 tmpfs_update(vp); 362 363 return error; 364 } 365 366 /* --------------------------------------------------------------------- */ 367 368 /* 369 * fsync is usually a NOP, but we must take action when unmounting or 370 * when recycling. 371 */ 372 static int 373 tmpfs_fsync(struct vop_fsync_args *v) 374 { 375 struct tmpfs_mount *tmp; 376 struct tmpfs_node *node; 377 struct vnode *vp = v->a_vp; 378 379 tmp = VFS_TO_TMPFS(vp->v_mount); 380 node = VP_TO_TMPFS_NODE(vp); 381 382 tmpfs_update(vp); 383 if (vp->v_type == VREG) { 384 if (vp->v_flag & VRECLAIMED) { 385 if (node->tn_links == 0) 386 tmpfs_truncate(vp, 0); 387 else 388 vfsync(v->a_vp, v->a_waitfor, 1, NULL, NULL); 389 } 390 } 391 return 0; 392 } 393 394 /* --------------------------------------------------------------------- */ 395 396 static int 397 tmpfs_read (struct vop_read_args *ap) 398 { 399 struct buf *bp; 400 struct vnode *vp = ap->a_vp; 401 struct uio *uio = ap->a_uio; 402 struct tmpfs_node *node; 403 off_t base_offset; 404 size_t offset; 405 size_t len; 406 int got_mplock; 407 int error; 408 409 error = 0; 410 if (uio->uio_resid == 0) { 411 return error; 412 } 413 414 node = VP_TO_TMPFS_NODE(vp); 415 416 if (uio->uio_offset < 0) 417 return (EINVAL); 418 if (vp->v_type != VREG) 419 return (EINVAL); 420 421 #ifdef SMP 422 if(curthread->td_mpcount) 423 got_mplock = -1; 424 else 425 got_mplock = 0; 426 #else 427 got_mplock = -1; 428 #endif 429 430 while (uio->uio_resid > 0 && uio->uio_offset < node->tn_size) { 431 /* 432 * Use buffer cache I/O (via tmpfs_strategy) 433 */ 434 offset = (size_t)uio->uio_offset & BMASK; 435 base_offset = (off_t)uio->uio_offset - offset; 436 bp = getcacheblk(vp, base_offset); 437 if (bp == NULL) 438 { 439 if (got_mplock == 0) { 440 got_mplock = 1; 441 get_mplock(); 442 } 443 444 error = bread(vp, base_offset, BSIZE, &bp); 445 if (error) { 446 brelse(bp); 447 kprintf("tmpfs_read bread error %d\n", error); 448 break; 449 } 450 } 451 452 if (got_mplock == 0) { 453 got_mplock = 1; 454 get_mplock(); 455 } 456 457 /* 458 * Figure out how many bytes we can actually copy this loop. 459 */ 460 len = BSIZE - offset; 461 if (len > uio->uio_resid) 462 len = uio->uio_resid; 463 if (len > node->tn_size - uio->uio_offset) 464 len = (size_t)(node->tn_size - uio->uio_offset); 465 466 error = uiomove((char *)bp->b_data + offset, len, uio); 467 bqrelse(bp); 468 if (error) { 469 kprintf("tmpfs_read uiomove error %d\n", error); 470 break; 471 } 472 } 473 474 if (got_mplock > 0) 475 rel_mplock(); 476 477 TMPFS_NODE_LOCK(node); 478 node->tn_status |= TMPFS_NODE_ACCESSED; 479 TMPFS_NODE_UNLOCK(node); 480 481 return(error); 482 } 483 484 static int 485 tmpfs_write (struct vop_write_args *ap) 486 { 487 struct buf *bp; 488 struct vnode *vp = ap->a_vp; 489 struct uio *uio = ap->a_uio; 490 struct thread *td = uio->uio_td; 491 struct tmpfs_node *node; 492 boolean_t extended; 493 off_t oldsize; 494 int error; 495 off_t base_offset; 496 size_t offset; 497 size_t len; 498 struct rlimit limit; 499 int got_mplock; 500 int trivial = 0; 501 502 error = 0; 503 if (uio->uio_resid == 0) { 504 return error; 505 } 506 507 node = VP_TO_TMPFS_NODE(vp); 508 509 if (vp->v_type != VREG) 510 return (EINVAL); 511 512 oldsize = node->tn_size; 513 if (ap->a_ioflag & IO_APPEND) 514 uio->uio_offset = node->tn_size; 515 516 /* 517 * Check for illegal write offsets. 518 */ 519 if (uio->uio_offset + uio->uio_resid > 520 VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize) 521 return (EFBIG); 522 523 if (vp->v_type == VREG && td != NULL) { 524 error = kern_getrlimit(RLIMIT_FSIZE, &limit); 525 if (error != 0) 526 return error; 527 if (uio->uio_offset + uio->uio_resid > limit.rlim_cur) { 528 ksignal(td->td_proc, SIGXFSZ); 529 return (EFBIG); 530 } 531 } 532 533 534 /* 535 * Extend the file's size if necessary 536 */ 537 extended = ((uio->uio_offset + uio->uio_resid) > node->tn_size); 538 539 #ifdef SMP 540 if (curthread->td_mpcount) { 541 got_mplock = -1; 542 } else { 543 got_mplock = 1; 544 get_mplock(); 545 } 546 #else 547 got_mplock = -1; 548 #endif 549 while (uio->uio_resid > 0) { 550 /* 551 * Use buffer cache I/O (via tmpfs_strategy) 552 */ 553 offset = (size_t)uio->uio_offset & BMASK; 554 base_offset = (off_t)uio->uio_offset - offset; 555 len = BSIZE - offset; 556 if (len > uio->uio_resid) 557 len = uio->uio_resid; 558 559 if ((uio->uio_offset + len) > node->tn_size) { 560 trivial = (uio->uio_offset <= node->tn_size); 561 error = tmpfs_reg_resize(vp, uio->uio_offset + len, trivial); 562 if (error) 563 break; 564 } 565 566 /* 567 * Read to fill in any gaps. Theoretically we could 568 * optimize this if the write covers the entire buffer 569 * and is not a UIO_NOCOPY write, however this can lead 570 * to a security violation exposing random kernel memory 571 * (whatever junk was in the backing VM pages before). 572 * 573 * So just use bread() to do the right thing. 574 */ 575 error = bread(vp, base_offset, BSIZE, &bp); 576 error = uiomove((char *)bp->b_data + offset, len, uio); 577 if (error) { 578 kprintf("tmpfs_write uiomove error %d\n", error); 579 brelse(bp); 580 break; 581 } 582 583 if (uio->uio_offset > node->tn_size) 584 node->tn_size = uio->uio_offset; 585 586 /* 587 * The data has been loaded into the buffer, write it out. 588 * 589 * We want tmpfs to be able to use all available ram, not 590 * just the buffer cache, so if not explicitly paging we 591 * use buwrite() to leave the buffer clean but mark all the 592 * VM pages valid+dirty. 593 * 594 * When the kernel is paging, either via normal pageout 595 * operation or when cleaning the object during a recycle, 596 * the underlying VM pages are going to get thrown away 597 * so we MUST write them to swap. 598 * 599 * XXX unfortunately this catches msync() system calls too 600 * for the moment. 601 */ 602 if (ap->a_ioflag & IO_SYNC) { 603 bwrite(bp); 604 } else if ((ap->a_ioflag & IO_ASYNC) || 605 (uio->uio_segflg == UIO_NOCOPY)) { 606 bawrite(bp); 607 } else { 608 buwrite(bp); 609 } 610 611 if (bp->b_error) { 612 kprintf("tmpfs_write bwrite error %d\n", error); 613 break; 614 } 615 } 616 617 if (got_mplock > 0) 618 rel_mplock(); 619 620 if (error) { 621 if (extended) 622 (void)tmpfs_reg_resize(vp, oldsize, trivial); 623 return error; 624 } 625 626 TMPFS_NODE_LOCK(node); 627 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED | 628 (extended? TMPFS_NODE_CHANGED : 0); 629 630 if (node->tn_mode & (S_ISUID | S_ISGID)) { 631 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0)) 632 node->tn_mode &= ~(S_ISUID | S_ISGID); 633 } 634 TMPFS_NODE_UNLOCK(node); 635 636 return(error); 637 } 638 639 static int 640 tmpfs_advlock (struct vop_advlock_args *ap) 641 { 642 struct tmpfs_node *node; 643 struct vnode *vp = ap->a_vp; 644 645 node = VP_TO_TMPFS_NODE(vp); 646 647 return (lf_advlock(ap, &node->tn_advlock, node->tn_size)); 648 } 649 650 651 static int 652 tmpfs_strategy(struct vop_strategy_args *ap) 653 { 654 struct bio *bio = ap->a_bio; 655 struct buf *bp = bio->bio_buf; 656 struct vnode *vp = ap->a_vp; 657 struct tmpfs_node *node; 658 vm_object_t uobj; 659 660 if (vp->v_type != VREG) { 661 bp->b_resid = bp->b_bcount; 662 bp->b_flags |= B_ERROR | B_INVAL; 663 bp->b_error = EINVAL; 664 biodone(bio); 665 return(0); 666 } 667 668 node = VP_TO_TMPFS_NODE(vp); 669 670 uobj = node->tn_reg.tn_aobj; 671 672 /* 673 * Call swap_pager_strategy to read or write between the VM 674 * object and the buffer cache. 675 */ 676 swap_pager_strategy(uobj, bio); 677 678 return 0; 679 } 680 681 static int 682 tmpfs_bmap(struct vop_bmap_args *ap) 683 { 684 if (ap->a_doffsetp != NULL) 685 *ap->a_doffsetp = ap->a_loffset; 686 if (ap->a_runp != NULL) 687 *ap->a_runp = 0; 688 if (ap->a_runb != NULL) 689 *ap->a_runb = 0; 690 691 return 0; 692 } 693 694 /* --------------------------------------------------------------------- */ 695 696 static int 697 tmpfs_nremove(struct vop_nremove_args *v) 698 { 699 struct vnode *dvp = v->a_dvp; 700 struct namecache *ncp = v->a_nch->ncp; 701 struct vnode *vp; 702 int error; 703 struct tmpfs_dirent *de; 704 struct tmpfs_mount *tmp; 705 struct tmpfs_node *dnode; 706 struct tmpfs_node *node; 707 708 /* 709 * We have to acquire the vp from v->a_nch because 710 * we will likely unresolve the namecache entry, and 711 * a vrele is needed to trigger the tmpfs_inactive/tmpfs_reclaim 712 * sequence to recover space from the file. 713 */ 714 error = cache_vref(v->a_nch, v->a_cred, &vp); 715 KKASSERT(error == 0); 716 717 if (vp->v_type == VDIR) { 718 error = EISDIR; 719 goto out; 720 } 721 722 dnode = VP_TO_TMPFS_DIR(dvp); 723 node = VP_TO_TMPFS_NODE(vp); 724 tmp = VFS_TO_TMPFS(vp->v_mount); 725 de = tmpfs_dir_lookup(dnode, node, ncp); 726 if (de == NULL) { 727 error = ENOENT; 728 goto out; 729 } 730 731 /* Files marked as immutable or append-only cannot be deleted. */ 732 if ((node->tn_flags & (IMMUTABLE | APPEND | NOUNLINK)) || 733 (dnode->tn_flags & APPEND)) { 734 error = EPERM; 735 goto out; 736 } 737 738 /* Remove the entry from the directory; as it is a file, we do not 739 * have to change the number of hard links of the directory. */ 740 tmpfs_dir_detach(dnode, de); 741 742 /* Free the directory entry we just deleted. Note that the node 743 * referred by it will not be removed until the vnode is really 744 * reclaimed. */ 745 tmpfs_free_dirent(tmp, de); 746 747 if (node->tn_links > 0) { 748 TMPFS_NODE_LOCK(node); 749 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \ 750 TMPFS_NODE_MODIFIED; 751 TMPFS_NODE_UNLOCK(node); 752 } 753 754 cache_setunresolved(v->a_nch); 755 cache_setvp(v->a_nch, NULL); 756 /*cache_inval_vp(vp, CINV_DESTROY);*/ 757 error = 0; 758 759 out: 760 vrele(vp); 761 762 return error; 763 } 764 765 /* --------------------------------------------------------------------- */ 766 767 static int 768 tmpfs_nlink(struct vop_nlink_args *v) 769 { 770 struct vnode *dvp = v->a_dvp; 771 struct vnode *vp = v->a_vp; 772 struct namecache *ncp = v->a_nch->ncp; 773 struct tmpfs_dirent *de; 774 struct tmpfs_node *node; 775 struct tmpfs_node *dnode; 776 int error; 777 778 KKASSERT(dvp != vp); /* XXX When can this be false? */ 779 780 node = VP_TO_TMPFS_NODE(vp); 781 dnode = VP_TO_TMPFS_NODE(dvp); 782 783 /* XXX: Why aren't the following two tests done by the caller? */ 784 785 /* Hard links of directories are forbidden. */ 786 if (vp->v_type == VDIR) { 787 error = EPERM; 788 goto out; 789 } 790 791 /* Cannot create cross-device links. */ 792 if (dvp->v_mount != vp->v_mount) { 793 error = EXDEV; 794 goto out; 795 } 796 797 /* Ensure that we do not overflow the maximum number of links imposed 798 * by the system. */ 799 KKASSERT(node->tn_links <= LINK_MAX); 800 if (node->tn_links == LINK_MAX) { 801 error = EMLINK; 802 goto out; 803 } 804 805 /* We cannot create links of files marked immutable or append-only. */ 806 if (node->tn_flags & (IMMUTABLE | APPEND)) { 807 error = EPERM; 808 goto out; 809 } 810 811 /* Allocate a new directory entry to represent the node. */ 812 error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node, 813 ncp->nc_name, ncp->nc_nlen, &de); 814 if (error != 0) 815 goto out; 816 817 /* Insert the new directory entry into the appropriate directory. */ 818 tmpfs_dir_attach(dnode, de); 819 820 /* vp link count has changed, so update node times. */ 821 822 TMPFS_NODE_LOCK(node); 823 node->tn_status |= TMPFS_NODE_CHANGED; 824 TMPFS_NODE_UNLOCK(node); 825 tmpfs_update(vp); 826 827 cache_setunresolved(v->a_nch); 828 cache_setvp(v->a_nch, vp); 829 error = 0; 830 831 out: 832 return error; 833 } 834 835 /* --------------------------------------------------------------------- */ 836 837 static int 838 tmpfs_nrename(struct vop_nrename_args *v) 839 { 840 struct vnode *fdvp = v->a_fdvp; 841 struct namecache *fncp = v->a_fnch->ncp; 842 struct vnode *fvp = fncp->nc_vp; 843 struct vnode *tdvp = v->a_tdvp; 844 struct namecache *tncp = v->a_tnch->ncp; 845 struct vnode *tvp = tncp->nc_vp; 846 struct tmpfs_dirent *de; 847 struct tmpfs_mount *tmp; 848 struct tmpfs_node *fdnode; 849 struct tmpfs_node *fnode; 850 struct tmpfs_node *tnode; 851 struct tmpfs_node *tdnode; 852 char *newname; 853 char *oldname; 854 int error; 855 856 tnode = (tvp == NULL) ? NULL : VP_TO_TMPFS_NODE(tvp); 857 858 /* Disallow cross-device renames. 859 * XXX Why isn't this done by the caller? */ 860 if (fvp->v_mount != tdvp->v_mount || 861 (tvp != NULL && fvp->v_mount != tvp->v_mount)) { 862 error = EXDEV; 863 goto out; 864 } 865 866 tmp = VFS_TO_TMPFS(tdvp->v_mount); 867 tdnode = VP_TO_TMPFS_DIR(tdvp); 868 869 /* If source and target are the same file, there is nothing to do. */ 870 if (fvp == tvp) { 871 error = 0; 872 goto out; 873 } 874 875 fdnode = VP_TO_TMPFS_DIR(fdvp); 876 fnode = VP_TO_TMPFS_NODE(fvp); 877 de = tmpfs_dir_lookup(fdnode, fnode, fncp); 878 879 /* Avoid manipulating '.' and '..' entries. */ 880 if (de == NULL) { 881 error = ENOENT; 882 goto out_locked; 883 } 884 KKASSERT(de->td_node == fnode); 885 886 /* 887 * If replacing an entry in the target directory and that entry 888 * is a directory, it must be empty. 889 * 890 * Kern_rename gurantees the destination to be a directory 891 * if the source is one (it does?). 892 */ 893 if (tvp != NULL) { 894 KKASSERT(tnode != NULL); 895 896 if ((tnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) || 897 (tdnode->tn_flags & (APPEND | IMMUTABLE))) { 898 error = EPERM; 899 goto out_locked; 900 } 901 902 if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) { 903 if (tnode->tn_size > 0) { 904 error = ENOTEMPTY; 905 goto out_locked; 906 } 907 } else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) { 908 error = ENOTDIR; 909 goto out_locked; 910 } else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) { 911 error = EISDIR; 912 goto out_locked; 913 } else { 914 KKASSERT(fnode->tn_type != VDIR && 915 tnode->tn_type != VDIR); 916 } 917 } 918 919 if ((fnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) || 920 (fdnode->tn_flags & (APPEND | IMMUTABLE))) { 921 error = EPERM; 922 goto out_locked; 923 } 924 925 /* 926 * Ensure that we have enough memory to hold the new name, if it 927 * has to be changed. 928 */ 929 if (fncp->nc_nlen != tncp->nc_nlen || 930 bcmp(fncp->nc_name, tncp->nc_name, fncp->nc_nlen) != 0) { 931 newname = kmalloc(tncp->nc_nlen + 1, M_TMPFSNAME, M_WAITOK); 932 bcopy(tncp->nc_name, newname, tncp->nc_nlen); 933 newname[tncp->nc_nlen] = '\0'; 934 } else { 935 newname = NULL; 936 } 937 938 /* 939 * Unlink entry from source directory. Note that the kernel has 940 * already checked for illegal recursion cases (renaming a directory 941 * into a subdirectory of itself). 942 */ 943 if (fdnode != tdnode) 944 tmpfs_dir_detach(fdnode, de); 945 946 /* 947 * Handle any name change. Swap with newname, we will 948 * deallocate it at the end. 949 */ 950 if (newname != NULL) { 951 #if 0 952 TMPFS_NODE_LOCK(fnode); 953 fnode->tn_status |= TMPFS_NODE_CHANGED; 954 TMPFS_NODE_UNLOCK(fnode); 955 #endif 956 oldname = de->td_name; 957 de->td_name = newname; 958 de->td_namelen = (uint16_t)tncp->nc_nlen; 959 newname = oldname; 960 } 961 962 /* 963 * Link entry to target directory. If the entry 964 * represents a directory move the parent linkage 965 * as well. 966 */ 967 if (fdnode != tdnode) { 968 if (de->td_node->tn_type == VDIR) { 969 TMPFS_VALIDATE_DIR(fnode); 970 971 TMPFS_NODE_LOCK(tdnode); 972 tdnode->tn_links++; 973 tdnode->tn_status |= TMPFS_NODE_MODIFIED; 974 TMPFS_NODE_UNLOCK(tdnode); 975 976 TMPFS_NODE_LOCK(fnode); 977 fnode->tn_dir.tn_parent = tdnode; 978 fnode->tn_status |= TMPFS_NODE_CHANGED; 979 TMPFS_NODE_UNLOCK(fnode); 980 981 TMPFS_NODE_LOCK(fdnode); 982 fdnode->tn_links--; 983 fdnode->tn_status |= TMPFS_NODE_MODIFIED; 984 TMPFS_NODE_UNLOCK(fdnode); 985 } 986 tmpfs_dir_attach(tdnode, de); 987 } else { 988 TMPFS_NODE_LOCK(tdnode); 989 tdnode->tn_status |= TMPFS_NODE_MODIFIED; 990 TMPFS_NODE_UNLOCK(tdnode); 991 } 992 993 /* 994 * If we are overwriting an entry, we have to remove the old one 995 * from the target directory. 996 */ 997 if (tvp != NULL) { 998 /* Remove the old entry from the target directory. */ 999 de = tmpfs_dir_lookup(tdnode, tnode, tncp); 1000 tmpfs_dir_detach(tdnode, de); 1001 1002 /* 1003 * Free the directory entry we just deleted. Note that the 1004 * node referred by it will not be removed until the vnode is 1005 * really reclaimed. 1006 */ 1007 tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), de); 1008 /*cache_inval_vp(tvp, CINV_DESTROY);*/ 1009 } 1010 1011 /* 1012 * Finish up 1013 */ 1014 if (newname) { 1015 kfree(newname, M_TMPFSNAME); 1016 newname = NULL; 1017 } 1018 cache_rename(v->a_fnch, v->a_tnch); 1019 error = 0; 1020 1021 out_locked: 1022 ; 1023 1024 out: 1025 /* Release target nodes. */ 1026 /* XXX: I don't understand when tdvp can be the same as tvp, but 1027 * other code takes care of this... */ 1028 if (tdvp == tvp) 1029 vrele(tdvp); 1030 1031 return error; 1032 } 1033 1034 /* --------------------------------------------------------------------- */ 1035 1036 static int 1037 tmpfs_nmkdir(struct vop_nmkdir_args *v) 1038 { 1039 struct vnode *dvp = v->a_dvp; 1040 struct vnode **vpp = v->a_vpp; 1041 struct namecache *ncp = v->a_nch->ncp; 1042 struct vattr *vap = v->a_vap; 1043 struct ucred *cred = v->a_cred; 1044 int error; 1045 1046 KKASSERT(vap->va_type == VDIR); 1047 1048 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL); 1049 if (error == 0) { 1050 cache_setunresolved(v->a_nch); 1051 cache_setvp(v->a_nch, *vpp); 1052 } 1053 1054 return error; 1055 } 1056 1057 /* --------------------------------------------------------------------- */ 1058 1059 static int 1060 tmpfs_nrmdir(struct vop_nrmdir_args *v) 1061 { 1062 struct vnode *dvp = v->a_dvp; 1063 struct namecache *ncp = v->a_nch->ncp; 1064 struct vnode *vp; 1065 struct tmpfs_dirent *de; 1066 struct tmpfs_mount *tmp; 1067 struct tmpfs_node *dnode; 1068 struct tmpfs_node *node; 1069 int error; 1070 1071 /* 1072 * We have to acquire the vp from v->a_nch because 1073 * we will likely unresolve the namecache entry, and 1074 * a vrele is needed to trigger the tmpfs_inactive/tmpfs_reclaim 1075 * sequence. 1076 */ 1077 error = cache_vref(v->a_nch, v->a_cred, &vp); 1078 KKASSERT(error == 0); 1079 1080 /* 1081 * Prevalidate so we don't hit an assertion later 1082 */ 1083 if (vp->v_type != VDIR) { 1084 error = ENOTDIR; 1085 goto out; 1086 } 1087 1088 tmp = VFS_TO_TMPFS(dvp->v_mount); 1089 dnode = VP_TO_TMPFS_DIR(dvp); 1090 node = VP_TO_TMPFS_DIR(vp); 1091 1092 /* Directories with more than two entries ('.' and '..') cannot be 1093 * removed. */ 1094 if (node->tn_size > 0) { 1095 error = ENOTEMPTY; 1096 goto out; 1097 } 1098 1099 if ((dnode->tn_flags & APPEND) 1100 || (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))) { 1101 error = EPERM; 1102 goto out; 1103 } 1104 1105 /* This invariant holds only if we are not trying to remove "..". 1106 * We checked for that above so this is safe now. */ 1107 KKASSERT(node->tn_dir.tn_parent == dnode); 1108 1109 /* Get the directory entry associated with node (vp). This was 1110 * filled by tmpfs_lookup while looking up the entry. */ 1111 de = tmpfs_dir_lookup(dnode, node, ncp); 1112 KKASSERT(TMPFS_DIRENT_MATCHES(de, 1113 ncp->nc_name, 1114 ncp->nc_nlen)); 1115 1116 /* Check flags to see if we are allowed to remove the directory. */ 1117 if ((dnode->tn_flags & APPEND) || 1118 node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) { 1119 error = EPERM; 1120 goto out; 1121 } 1122 1123 1124 /* Detach the directory entry from the directory (dnode). */ 1125 tmpfs_dir_detach(dnode, de); 1126 1127 /* No vnode should be allocated for this entry from this point */ 1128 TMPFS_NODE_LOCK(node); 1129 TMPFS_ASSERT_ELOCKED(node); 1130 TMPFS_NODE_LOCK(dnode); 1131 TMPFS_ASSERT_ELOCKED(dnode); 1132 1133 #if 0 1134 /* handled by tmpfs_free_node */ 1135 KKASSERT(node->tn_links > 0); 1136 node->tn_links--; 1137 node->tn_dir.tn_parent = NULL; 1138 #endif 1139 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \ 1140 TMPFS_NODE_MODIFIED; 1141 1142 #if 0 1143 /* handled by tmpfs_free_node */ 1144 KKASSERT(dnode->tn_links > 0); 1145 dnode->tn_links--; 1146 #endif 1147 dnode->tn_status |= TMPFS_NODE_ACCESSED | \ 1148 TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED; 1149 1150 TMPFS_NODE_UNLOCK(dnode); 1151 TMPFS_NODE_UNLOCK(node); 1152 1153 /* Free the directory entry we just deleted. Note that the node 1154 * referred by it will not be removed until the vnode is really 1155 * reclaimed. */ 1156 tmpfs_free_dirent(tmp, de); 1157 1158 /* Release the deleted vnode (will destroy the node, notify 1159 * interested parties and clean it from the cache). */ 1160 1161 TMPFS_NODE_LOCK(dnode); 1162 dnode->tn_status |= TMPFS_NODE_CHANGED; 1163 TMPFS_NODE_UNLOCK(dnode); 1164 tmpfs_update(dvp); 1165 1166 cache_setunresolved(v->a_nch); 1167 cache_setvp(v->a_nch, NULL); 1168 /*cache_inval_vp(vp, CINV_DESTROY);*/ 1169 error = 0; 1170 1171 out: 1172 vrele(vp); 1173 1174 return error; 1175 } 1176 1177 /* --------------------------------------------------------------------- */ 1178 1179 static int 1180 tmpfs_nsymlink(struct vop_nsymlink_args *v) 1181 { 1182 struct vnode *dvp = v->a_dvp; 1183 struct vnode **vpp = v->a_vpp; 1184 struct namecache *ncp = v->a_nch->ncp; 1185 struct vattr *vap = v->a_vap; 1186 struct ucred *cred = v->a_cred; 1187 char *target = v->a_target; 1188 int error; 1189 1190 vap->va_type = VLNK; 1191 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, target); 1192 if (error == 0) { 1193 cache_setunresolved(v->a_nch); 1194 cache_setvp(v->a_nch, *vpp); 1195 } 1196 1197 return error; 1198 } 1199 1200 /* --------------------------------------------------------------------- */ 1201 1202 static int 1203 tmpfs_readdir(struct vop_readdir_args *v) 1204 { 1205 struct vnode *vp = v->a_vp; 1206 struct uio *uio = v->a_uio; 1207 int *eofflag = v->a_eofflag; 1208 off_t **cookies = v->a_cookies; 1209 int *ncookies = v->a_ncookies; 1210 struct tmpfs_mount *tmp; 1211 int error; 1212 off_t startoff; 1213 off_t cnt = 0; 1214 struct tmpfs_node *node; 1215 1216 /* This operation only makes sense on directory nodes. */ 1217 if (vp->v_type != VDIR) 1218 return ENOTDIR; 1219 1220 tmp = VFS_TO_TMPFS(vp->v_mount); 1221 node = VP_TO_TMPFS_DIR(vp); 1222 startoff = uio->uio_offset; 1223 1224 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOT) { 1225 error = tmpfs_dir_getdotdent(node, uio); 1226 if (error != 0) 1227 goto outok; 1228 cnt++; 1229 } 1230 1231 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT) { 1232 error = tmpfs_dir_getdotdotdent(tmp, node, uio); 1233 if (error != 0) 1234 goto outok; 1235 cnt++; 1236 } 1237 1238 error = tmpfs_dir_getdents(node, uio, &cnt); 1239 1240 outok: 1241 KKASSERT(error >= -1); 1242 1243 if (error == -1) 1244 error = 0; 1245 1246 if (eofflag != NULL) 1247 *eofflag = 1248 (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF); 1249 1250 /* Update NFS-related variables. */ 1251 if (error == 0 && cookies != NULL && ncookies != NULL) { 1252 off_t i; 1253 off_t off = startoff; 1254 struct tmpfs_dirent *de = NULL; 1255 1256 *ncookies = cnt; 1257 *cookies = kmalloc(cnt * sizeof(off_t), M_TEMP, M_WAITOK); 1258 1259 for (i = 0; i < cnt; i++) { 1260 KKASSERT(off != TMPFS_DIRCOOKIE_EOF); 1261 if (off == TMPFS_DIRCOOKIE_DOT) { 1262 off = TMPFS_DIRCOOKIE_DOTDOT; 1263 } else { 1264 if (off == TMPFS_DIRCOOKIE_DOTDOT) { 1265 de = TAILQ_FIRST(&node->tn_dir.tn_dirhead); 1266 } else if (de != NULL) { 1267 de = TAILQ_NEXT(de, td_entries); 1268 } else { 1269 de = tmpfs_dir_lookupbycookie(node, 1270 off); 1271 KKASSERT(de != NULL); 1272 de = TAILQ_NEXT(de, td_entries); 1273 } 1274 if (de == NULL) 1275 off = TMPFS_DIRCOOKIE_EOF; 1276 else 1277 off = tmpfs_dircookie(de); 1278 } 1279 1280 (*cookies)[i] = off; 1281 } 1282 KKASSERT(uio->uio_offset == off); 1283 } 1284 1285 return error; 1286 } 1287 1288 /* --------------------------------------------------------------------- */ 1289 1290 static int 1291 tmpfs_readlink(struct vop_readlink_args *v) 1292 { 1293 struct vnode *vp = v->a_vp; 1294 struct uio *uio = v->a_uio; 1295 1296 int error; 1297 struct tmpfs_node *node; 1298 1299 KKASSERT(uio->uio_offset == 0); 1300 KKASSERT(vp->v_type == VLNK); 1301 1302 node = VP_TO_TMPFS_NODE(vp); 1303 1304 error = uiomove(node->tn_link, MIN(node->tn_size, uio->uio_resid), 1305 uio); 1306 TMPFS_NODE_LOCK(node); 1307 node->tn_status |= TMPFS_NODE_ACCESSED; 1308 TMPFS_NODE_UNLOCK(node); 1309 1310 return error; 1311 } 1312 1313 /* --------------------------------------------------------------------- */ 1314 1315 static int 1316 tmpfs_inactive(struct vop_inactive_args *v) 1317 { 1318 struct vnode *vp = v->a_vp; 1319 1320 struct tmpfs_node *node; 1321 1322 node = VP_TO_TMPFS_NODE(vp); 1323 1324 /* 1325 * Get rid of unreferenced deleted vnodes sooner rather than 1326 * later so the data memory can be recovered immediately. 1327 * 1328 * We must truncate the vnode to prevent the normal reclamation 1329 * path from flushing the data for the removed file to disk. 1330 */ 1331 TMPFS_NODE_LOCK(node); 1332 if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 && 1333 (node->tn_links == 0 || 1334 (node->tn_links == 1 && node->tn_type == VDIR && 1335 node->tn_dir.tn_parent))) 1336 { 1337 node->tn_vpstate = TMPFS_VNODE_DOOMED; 1338 TMPFS_NODE_UNLOCK(node); 1339 if (node->tn_type == VREG) 1340 tmpfs_truncate(vp, 0); 1341 vrecycle(vp); 1342 } else { 1343 TMPFS_NODE_UNLOCK(node); 1344 } 1345 1346 return 0; 1347 } 1348 1349 /* --------------------------------------------------------------------- */ 1350 1351 int 1352 tmpfs_reclaim(struct vop_reclaim_args *v) 1353 { 1354 struct vnode *vp = v->a_vp; 1355 struct tmpfs_mount *tmp; 1356 struct tmpfs_node *node; 1357 1358 node = VP_TO_TMPFS_NODE(vp); 1359 tmp = VFS_TO_TMPFS(vp->v_mount); 1360 1361 tmpfs_free_vp(vp); 1362 1363 /* 1364 * If the node referenced by this vnode was deleted by the 1365 * user, we must free its associated data structures now that 1366 * the vnode is being reclaimed. 1367 * 1368 * Directories have an extra link ref. 1369 */ 1370 TMPFS_NODE_LOCK(node); 1371 if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 && 1372 (node->tn_links == 0 || 1373 (node->tn_links == 1 && node->tn_type == VDIR && 1374 node->tn_dir.tn_parent))) 1375 { 1376 node->tn_vpstate = TMPFS_VNODE_DOOMED; 1377 tmpfs_free_node(tmp, node); 1378 /* eats the lock */ 1379 } else { 1380 TMPFS_NODE_UNLOCK(node); 1381 } 1382 1383 KKASSERT(vp->v_data == NULL); 1384 return 0; 1385 } 1386 1387 /* --------------------------------------------------------------------- */ 1388 1389 static int 1390 tmpfs_print(struct vop_print_args *v) 1391 { 1392 struct vnode *vp = v->a_vp; 1393 1394 struct tmpfs_node *node; 1395 1396 node = VP_TO_TMPFS_NODE(vp); 1397 1398 kprintf("tag VT_TMPFS, tmpfs_node %p, flags 0x%x, links %d\n", 1399 node, node->tn_flags, node->tn_links); 1400 kprintf("\tmode 0%o, owner %d, group %d, size %ju, status 0x%x\n", 1401 node->tn_mode, node->tn_uid, node->tn_gid, 1402 (uintmax_t)node->tn_size, node->tn_status); 1403 1404 if (vp->v_type == VFIFO) 1405 fifo_printinfo(vp); 1406 1407 kprintf("\n"); 1408 1409 return 0; 1410 } 1411 1412 /* --------------------------------------------------------------------- */ 1413 1414 static int 1415 tmpfs_pathconf(struct vop_pathconf_args *v) 1416 { 1417 int name = v->a_name; 1418 register_t *retval = v->a_retval; 1419 1420 int error; 1421 1422 error = 0; 1423 1424 switch (name) { 1425 case _PC_LINK_MAX: 1426 *retval = LINK_MAX; 1427 break; 1428 1429 case _PC_NAME_MAX: 1430 *retval = NAME_MAX; 1431 break; 1432 1433 case _PC_PATH_MAX: 1434 *retval = PATH_MAX; 1435 break; 1436 1437 case _PC_PIPE_BUF: 1438 *retval = PIPE_BUF; 1439 break; 1440 1441 case _PC_CHOWN_RESTRICTED: 1442 *retval = 1; 1443 break; 1444 1445 case _PC_NO_TRUNC: 1446 *retval = 1; 1447 break; 1448 1449 case _PC_SYNC_IO: 1450 *retval = 1; 1451 break; 1452 1453 case _PC_FILESIZEBITS: 1454 *retval = 0; /* XXX Don't know which value should I return. */ 1455 break; 1456 1457 default: 1458 error = EINVAL; 1459 } 1460 1461 return error; 1462 } 1463 1464 /* --------------------------------------------------------------------- */ 1465 1466 /* 1467 * vnode operations vector used for files stored in a tmpfs file system. 1468 */ 1469 struct vop_ops tmpfs_vnode_vops = { 1470 .vop_default = vop_defaultop, 1471 .vop_getpages = vop_stdgetpages, 1472 .vop_putpages = vop_stdputpages, 1473 .vop_ncreate = tmpfs_ncreate, 1474 .vop_nresolve = tmpfs_nresolve, 1475 .vop_nlookupdotdot = tmpfs_nlookupdotdot, 1476 .vop_nmknod = tmpfs_nmknod, 1477 .vop_open = tmpfs_open, 1478 .vop_close = tmpfs_close, 1479 .vop_access = tmpfs_access, 1480 .vop_getattr = tmpfs_getattr, 1481 .vop_setattr = tmpfs_setattr, 1482 .vop_read = tmpfs_read, 1483 .vop_write = tmpfs_write, 1484 .vop_fsync = tmpfs_fsync, 1485 .vop_nremove = tmpfs_nremove, 1486 .vop_nlink = tmpfs_nlink, 1487 .vop_nrename = tmpfs_nrename, 1488 .vop_nmkdir = tmpfs_nmkdir, 1489 .vop_nrmdir = tmpfs_nrmdir, 1490 .vop_nsymlink = tmpfs_nsymlink, 1491 .vop_readdir = tmpfs_readdir, 1492 .vop_readlink = tmpfs_readlink, 1493 .vop_inactive = tmpfs_inactive, 1494 .vop_reclaim = tmpfs_reclaim, 1495 .vop_print = tmpfs_print, 1496 .vop_pathconf = tmpfs_pathconf, 1497 .vop_bmap = tmpfs_bmap, 1498 .vop_bmap = (void *)vop_eopnotsupp, 1499 .vop_strategy = tmpfs_strategy, 1500 .vop_advlock = tmpfs_advlock, 1501 }; 1502