1 /* 2 * Copyright (c) 2013 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Antonio Huete Jimenez <tuxillo@quantumachine.net> 6 * by Matthew Dillon <dillon@dragonflybsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 */ 36 37 #include <errno.h> 38 #include <fcntl.h> 39 #include <unistd.h> 40 41 #include <sys/mount.h> 42 #include <sys/queue.h> 43 #include <sys/spinlock2.h> 44 #include <sys/stat.h> 45 #include <sys/systm.h> 46 #include <sys/types.h> 47 #include <sys/vfscache.h> 48 #include <sys/vnode.h> 49 50 #include "dirfs.h" 51 52 /* 53 * Allocate and setup all is needed for the dirfs node to hold the filename. 54 * Note: dn_name is NULL terminated. 55 */ 56 void 57 dirfs_node_setname(dirfs_node_t dnp, const char *name, int len) 58 { 59 debug_called(); 60 61 if (dnp->dn_name) 62 kfree(dnp->dn_name, M_DIRFS_MISC); 63 dnp->dn_name = kmalloc(len + 1, M_DIRFS_MISC, M_WAITOK | M_ZERO); 64 bcopy(name, dnp->dn_name, len); 65 dnp->dn_name[len] = 0; 66 dnp->dn_namelen = len; 67 } 68 69 /* 70 * Allocate enough space to hold a dirfs node structure. 71 * Note: Node name and length isn't handled here. 72 */ 73 dirfs_node_t 74 dirfs_node_alloc(struct mount *mp) 75 { 76 dirfs_node_t dnp; 77 78 debug_called(); 79 80 dnp = kmalloc(sizeof(*dnp), M_DIRFS_NODE, M_WAITOK | M_ZERO); 81 lockinit(&dnp->dn_lock, "dfsnode", 0, LK_CANRECURSE); 82 83 dnp->dn_fd = DIRFS_NOFD; 84 85 return dnp; 86 } 87 88 /* 89 * Drops a reference to the node and. Node is freed when in the last reference. 90 */ 91 void 92 dirfs_node_drop(dirfs_mount_t dmp, dirfs_node_t dnp) 93 { 94 if (dirfs_node_unref(dnp)) 95 dirfs_node_free(dmp, dnp); 96 } 97 98 /* 99 * Removes the association with its parent. Before freeing up its resources 100 * the node will be removed from the per-mount passive fd cache and its fd 101 * will be closed, either normally or forced. 102 */ 103 int 104 dirfs_node_free(dirfs_mount_t dmp, dirfs_node_t dnp) 105 { 106 struct vnode *vp; 107 108 debug_called(); 109 110 KKASSERT(dnp != NULL); 111 debug_node2(dnp); 112 113 KKASSERT(dirfs_node_refcnt(dnp) == 0); 114 115 vp = NODE_TO_VP(dnp); 116 /* 117 * Remove the inode from the passive fds list 118 * as we are tearing down the node. 119 * Root inode will be removed on VOP_UNMOUNT() 120 */ 121 dirfs_mount_gettoken(dmp); 122 123 if (dnp->dn_parent) { /* NULL when children reaped parents */ 124 dirfs_node_drop(dmp, dnp->dn_parent); 125 dnp->dn_parent = NULL; 126 } 127 dirfs_node_setpassive(dmp, dnp, 0); 128 if (dnp->dn_name) { 129 kfree(dnp->dn_name, M_DIRFS_MISC); 130 dnp->dn_name = NULL; 131 } 132 133 /* 134 * The file descriptor should have been closed already by the 135 * previous call to dirfs_set-passive. If not, force a sync and 136 * close it. 137 */ 138 if (dnp->dn_fd != DIRFS_NOFD) { 139 if (dnp->dn_vnode) 140 VOP_FSYNC(vp, MNT_WAIT, 0); 141 close(dnp->dn_fd); 142 dnp->dn_fd = DIRFS_NOFD; 143 } 144 145 lockuninit(&dnp->dn_lock); 146 kfree(dnp, M_DIRFS_NODE); 147 dnp = NULL; 148 149 dirfs_mount_reltoken(dmp); 150 151 return 0; 152 } 153 154 /* 155 * Do all the operations needed to get a resulting inode <--> host file 156 * association. This or may not include opening the file, which should be 157 * only needed when creating it. 158 * 159 * In the case vap is not NULL and openflags are specified, open the file. 160 */ 161 int 162 dirfs_alloc_file(dirfs_mount_t dmp, dirfs_node_t *dnpp, dirfs_node_t pdnp, 163 struct namecache *ncp, struct vnode **vpp, struct vattr *vap, 164 int openflags) 165 { 166 dirfs_node_t dnp; 167 dirfs_node_t pathnp; 168 struct vnode *vp; 169 struct mount *mp; 170 char *tmp; 171 char *pathfree; 172 int error; 173 174 debug_called(); 175 176 error = 0; 177 vp = NULL; 178 mp = DIRFS_TO_VFS(dmp); 179 180 /* Sanity check */ 181 if (pdnp == NULL) 182 return EINVAL; 183 184 dnp = dirfs_node_alloc(mp); 185 KKASSERT(dnp != NULL); 186 187 dirfs_node_lock(dnp); 188 dirfs_node_setname(dnp, ncp->nc_name, ncp->nc_nlen); 189 dnp->dn_parent = pdnp; 190 dirfs_node_ref(pdnp); /* Children ref */ 191 dirfs_node_unlock(dnp); 192 193 pathnp = dirfs_findfd(dmp, dnp, &tmp, &pathfree); 194 195 if (openflags && vap != NULL) { 196 dnp->dn_fd = openat(pathnp->dn_fd, tmp, 197 openflags, vap->va_mode); 198 if (dnp->dn_fd == -1) { 199 dirfs_dropfd(dmp, pathnp, pathfree); 200 return errno; 201 } 202 } 203 204 error = dirfs_node_stat(pathnp->dn_fd, tmp, dnp); 205 if (error) { /* XXX Handle errors */ 206 error = errno; 207 if (vp) 208 dirfs_free_vp(dmp, dnp); 209 dirfs_node_free(dmp, dnp); 210 dirfs_dropfd(dmp, pathnp, pathfree); 211 return error; 212 } 213 214 dirfs_alloc_vp(mp, &vp, LK_CANRECURSE, dnp); 215 *vpp = vp; 216 *dnpp = dnp; 217 218 dbg(5, "tmp=%s dnp=%p allocated\n", tmp, dnp); 219 dirfs_dropfd(dmp, pathnp, pathfree); 220 221 /* We want VOP_INACTIVE() to be called on last ref */ 222 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE); 223 224 return error; 225 } 226 227 /* 228 * Requires an already dirfs_node_t that has been already lstat(2) 229 * for the type comparison 230 */ 231 void 232 dirfs_alloc_vp(struct mount *mp, struct vnode **vpp, int lkflags, 233 dirfs_node_t dnp) 234 { 235 struct vnode *vp; 236 dirfs_mount_t dmp = VFS_TO_DIRFS(mp); 237 238 debug_called(); 239 240 /* 241 * Handle vnode reclaim/alloc races 242 */ 243 for (;;) { 244 vp = dnp->dn_vnode; 245 if (vp) { 246 if (vget(vp, LK_EXCLUSIVE) == 0) 247 break; /* success */ 248 /* vget raced a reclaim, retry */ 249 } else { 250 getnewvnode(VT_UNUSED10, mp, &vp, 0, lkflags); 251 if (dnp->dn_vnode == NULL) { 252 dnp->dn_vnode = vp; 253 vp->v_data = dnp; 254 vp->v_type = dnp->dn_type; 255 if (dmp->dm_root == dnp) 256 vsetflags(vp, VROOT); 257 dirfs_node_ref(dnp); /* ref for dnp<->vp */ 258 259 /* Type-specific initialization. */ 260 switch (dnp->dn_type) { 261 case VBLK: 262 case VCHR: 263 case VSOCK: 264 break; 265 case VREG: 266 vinitvmio(vp, dnp->dn_size, BMASK, -1); 267 break; 268 case VLNK: 269 break; 270 case VFIFO: 271 // vp->v_ops = &mp->mnt_vn_fifo_ops; 272 break; 273 case VDIR: 274 break; 275 default: 276 panic("dirfs_alloc_vp: dnp=%p vp=%p " 277 "type=%d", 278 dnp, vp, dnp->dn_type); 279 /* NOT REACHED */ 280 break; 281 } 282 break; /* success */ 283 } 284 vp->v_type = VBAD; 285 vx_put(vp); 286 /* multiple dirfs_alloc_vp calls raced, retry */ 287 } 288 } 289 KKASSERT(vp != NULL); 290 *vpp = vp; 291 dbg(5, "dnp=%p vp=%p type=%d\n", dnp, vp, vp->v_type); 292 } 293 294 /* 295 * Do not call locked! 296 */ 297 void 298 dirfs_free_vp(dirfs_mount_t dmp, dirfs_node_t dnp) 299 { 300 struct vnode *vp = NODE_TO_VP(dnp); 301 302 dnp->dn_vnode = NULL; 303 vp->v_data = NULL; 304 dirfs_node_drop(dmp, dnp); 305 } 306 307 int 308 dirfs_nodetype(struct stat *st) 309 { 310 int ret; 311 mode_t mode = st->st_mode; 312 313 debug_called(); 314 315 if (S_ISDIR(mode)) 316 ret = VDIR; 317 else if (S_ISBLK(mode)) 318 ret = VBLK; 319 else if (S_ISCHR(mode)) 320 ret = VCHR; 321 else if (S_ISFIFO(mode)) 322 ret = VFIFO; 323 else if (S_ISSOCK(mode)) 324 ret = VSOCK; 325 else if (S_ISLNK(mode)) 326 ret = VLNK; 327 else if (S_ISREG(mode)) 328 ret = VREG; 329 else 330 ret = VBAD; 331 332 return ret; 333 } 334 335 int 336 dirfs_node_stat(int fd, const char *path, dirfs_node_t dnp) 337 { 338 struct stat st; 339 int error; 340 341 debug_called(); 342 if (fd == DIRFS_NOFD) 343 error = lstat(path, &st); 344 else 345 error = fstatat(fd, path, &st, AT_SYMLINK_NOFOLLOW); 346 347 if (error) 348 return errno; 349 350 /* Populate our dirfs node struct with stat data */ 351 dnp->dn_uid = st.st_uid; 352 dnp->dn_gid = st.st_gid; 353 dnp->dn_mode = st.st_mode; 354 dnp->dn_flags = st.st_flags; 355 dnp->dn_links = st.st_nlink; 356 dnp->dn_atime = st.st_atime; 357 dnp->dn_atimensec = (st.st_atime * 1000000000L); 358 dnp->dn_mtime = st.st_mtime; 359 dnp->dn_mtimensec = (st.st_mtime * 1000000000L); 360 dnp->dn_ctime = st.st_ctime; 361 dnp->dn_ctimensec = (st.st_ctime * 1000000000L); 362 dnp->dn_gen = st.st_gen; 363 dnp->dn_ino = st.st_ino; 364 dnp->dn_st_dev = st.st_dev; 365 dnp->dn_size = st.st_size; 366 dnp->dn_type = dirfs_nodetype(&st); 367 368 return 0; 369 } 370 371 char * 372 dirfs_node_absolute_path(dirfs_mount_t dmp, dirfs_node_t cur, char **pathfreep) 373 { 374 return(dirfs_node_absolute_path_plus(dmp, cur, NULL, pathfreep)); 375 } 376 377 char * 378 dirfs_node_absolute_path_plus(dirfs_mount_t dmp, dirfs_node_t cur, 379 char *last, char **pathfreep) 380 { 381 size_t len; 382 dirfs_node_t dnp1; 383 char *buf; 384 int count; 385 386 debug_called(); 387 388 KKASSERT(dmp->dm_root); /* Sanity check */ 389 *pathfreep = NULL; 390 if (cur == NULL) 391 return NULL; 392 buf = kmalloc(MAXPATHLEN + 1, M_DIRFS_MISC, M_WAITOK); 393 394 /* 395 * Passed-in trailing element. 396 */ 397 count = 0; 398 buf[MAXPATHLEN] = 0; 399 if (last) { 400 len = strlen(last); 401 count += len; 402 if (count <= MAXPATHLEN) 403 bcopy(last, &buf[MAXPATHLEN - count], len); 404 ++count; 405 if (count <= MAXPATHLEN) 406 buf[MAXPATHLEN - count] = '/'; 407 } 408 409 /* 410 * Iterate through the parents until we hit the root. 411 */ 412 dnp1 = cur; 413 while (dirfs_node_isroot(dnp1) == 0) { 414 count += dnp1->dn_namelen; 415 if (count <= MAXPATHLEN) { 416 bcopy(dnp1->dn_name, &buf[MAXPATHLEN - count], 417 dnp1->dn_namelen); 418 } 419 ++count; 420 if (count <= MAXPATHLEN) 421 buf[MAXPATHLEN - count] = '/'; 422 dnp1 = dnp1->dn_parent; 423 if (dnp1 == NULL) 424 break; 425 } 426 427 /* 428 * Prefix with the root mount path. If the element was unlinked 429 * dnp1 will be NULL and there is no path. 430 */ 431 len = strlen(dmp->dm_path); 432 count += len; 433 if (dnp1 && count <= MAXPATHLEN) { 434 bcopy(dmp->dm_path, &buf[MAXPATHLEN - count], len); 435 *pathfreep = buf; 436 dbg(5, "absolute_path %s\n", &buf[MAXPATHLEN - count]); 437 return (&buf[MAXPATHLEN - count]); 438 } else { 439 kfree(buf, M_DIRFS_MISC); 440 *pathfreep = NULL; 441 return (NULL); 442 } 443 } 444 445 /* 446 * Return a dirfs_node with a valid descriptor plus an allocated 447 * relative path which can be used in openat(), fstatat(), etc calls 448 * to locate the requested inode. 449 */ 450 dirfs_node_t 451 dirfs_findfd(dirfs_mount_t dmp, dirfs_node_t cur, 452 char **pathto, char **pathfreep) 453 { 454 dirfs_node_t dnp1; 455 int count; 456 char *buf; 457 458 debug_called(); 459 460 *pathfreep = NULL; 461 *pathto = NULL; 462 463 if (cur == NULL) 464 return NULL; 465 466 buf = kmalloc(MAXPATHLEN + 1, M_DIRFS_MISC, M_WAITOK | M_ZERO); 467 count = 0; 468 469 dnp1 = cur; 470 while (dnp1 == cur || dnp1->dn_fd == DIRFS_NOFD) { 471 count += dnp1->dn_namelen; 472 if (count <= MAXPATHLEN) { 473 bcopy(dnp1->dn_name, &buf[MAXPATHLEN - count], 474 dnp1->dn_namelen); 475 } 476 ++count; 477 if (count <= MAXPATHLEN) 478 buf[MAXPATHLEN - count] = '/'; 479 dnp1 = dnp1->dn_parent; 480 KKASSERT(dnp1 != NULL); 481 } 482 483 if (dnp1 && count <= MAXPATHLEN) { 484 *pathfreep = buf; 485 *pathto = &buf[MAXPATHLEN - count + 1]; /* skip '/' prefix */ 486 dirfs_node_ref(dnp1); 487 dbg(5, "fd=%d dnp1=%p dnp1->dn_name=%d &buf[off]=%s\n", 488 dnp1->dn_fd, dnp1, dnp1->dn_name, *pathto); 489 } else { 490 dbg(5, "failed too long\n"); 491 kfree(buf, M_DIRFS_MISC); 492 *pathfreep = NULL; 493 *pathto = NULL; 494 dnp1 = NULL; 495 } 496 return (dnp1); 497 } 498 499 void 500 dirfs_dropfd(dirfs_mount_t dmp, dirfs_node_t dnp1, char *pathfree) 501 { 502 if (pathfree) 503 kfree(pathfree, M_DIRFS_MISC); 504 if (dnp1) 505 dirfs_node_drop(dmp, dnp1); 506 } 507 508 int 509 dirfs_node_getperms(dirfs_node_t dnp, int *flags) 510 { 511 dirfs_mount_t dmp; 512 struct vnode *vp = dnp->dn_vnode; 513 int isowner; 514 int isgroup; 515 516 /* 517 * There must be an active vnode anyways since that 518 * would indicate the dirfs node has valid data for 519 * for dnp->dn_mode (via lstat syscall). 520 */ 521 KKASSERT(vp); 522 dmp = VFS_TO_DIRFS(vp->v_mount); 523 524 isowner = (dmp->dm_uid == dnp->dn_uid); 525 isgroup = (dmp->dm_gid == dnp->dn_gid); 526 527 if (isowner) { 528 if (dnp->dn_mode & S_IRUSR) 529 *flags |= DIRFS_NODE_RD; 530 if (dnp->dn_mode & S_IWUSR) 531 *flags |= DIRFS_NODE_WR; 532 if (dnp->dn_mode & S_IXUSR) 533 *flags |= DIRFS_NODE_EXE; 534 } else if (isgroup) { 535 if (dnp->dn_mode & S_IRGRP) 536 *flags |= DIRFS_NODE_RD; 537 if (dnp->dn_mode & S_IWGRP) 538 *flags |= DIRFS_NODE_WR; 539 if (dnp->dn_mode & S_IXGRP) 540 *flags |= DIRFS_NODE_EXE; 541 } else { 542 if (dnp->dn_mode & S_IROTH) 543 *flags |= DIRFS_NODE_RD; 544 if (dnp->dn_mode & S_IWOTH) 545 *flags |= DIRFS_NODE_WR; 546 if (dnp->dn_mode & S_IXOTH) 547 *flags |= DIRFS_NODE_EXE; 548 } 549 550 return 0; 551 } 552 553 /* 554 * This requires an allocated node and vnode, otherwise it'll panic 555 */ 556 int 557 dirfs_open_helper(dirfs_mount_t dmp, dirfs_node_t dnp, int parentfd, 558 char *relpath) 559 { 560 dirfs_node_t pathnp; 561 char *pathfree; 562 char *tmp; 563 int flags; 564 int perms; 565 int error; 566 567 debug_called(); 568 569 flags = error = perms = 0; 570 tmp = NULL; 571 572 KKASSERT(dnp); 573 KKASSERT(dnp->dn_vnode); 574 575 /* 576 * XXX Besides VDIR and VREG there are other file 577 * types, y'know? 578 * Also, O_RDWR alone might not be the best mode to open 579 * a file with, need to investigate which suits better. 580 */ 581 dirfs_node_getperms(dnp, &perms); 582 583 if (dnp->dn_type & VDIR) { 584 flags |= O_DIRECTORY; 585 } else { 586 if (perms & DIRFS_NODE_WR) 587 flags |= O_RDWR; 588 else 589 flags |= O_RDONLY; 590 } 591 if (relpath != NULL) { 592 tmp = relpath; 593 pathnp = NULL; 594 KKASSERT(parentfd != DIRFS_NOFD); 595 } else if (parentfd == DIRFS_NOFD) { 596 pathnp = dirfs_findfd(dmp, dnp, &tmp, &pathfree); 597 parentfd = pathnp->dn_fd; 598 } else { 599 pathnp = NULL; 600 } 601 602 dnp->dn_fd = openat(parentfd, tmp, flags); 603 if (dnp->dn_fd == -1) 604 error = errno; 605 606 dbg(5, "dnp=%p tmp2=%s parentfd=%d flags=%d error=%d " 607 "flags=%08x w=%d x=%d\n", dnp, tmp, parentfd, flags, error, 608 perms); 609 610 if (pathnp) 611 dirfs_dropfd(dmp, pathnp, pathfree); 612 613 return error; 614 } 615 616 int 617 dirfs_close_helper(dirfs_node_t dnp) 618 { 619 int error = 0; 620 621 debug_called(); 622 623 624 if (dnp->dn_fd != DIRFS_NOFD) { 625 dbg(5, "closed fd on dnp=%p\n", dnp); 626 #if 0 627 /* buffer cache buffers may still be present */ 628 error = close(dnp->dn_fd); /* XXX EINTR should be checked */ 629 dnp->dn_fd = DIRFS_NOFD; 630 #endif 631 } 632 633 return error; 634 } 635 636 int 637 dirfs_node_refcnt(dirfs_node_t dnp) 638 { 639 return dnp->dn_refcnt; 640 } 641 642 int 643 dirfs_node_chtimes(dirfs_node_t dnp) 644 { 645 struct vnode *vp; 646 dirfs_mount_t dmp; 647 int error = 0; 648 char *tmp; 649 char *pathfree; 650 651 debug_called(); 652 653 vp = NODE_TO_VP(dnp); 654 dmp = VFS_TO_DIRFS(vp->v_mount); 655 656 KKASSERT(vn_islocked(vp)); 657 658 if (dnp->dn_flags & (IMMUTABLE | APPEND)) 659 return EPERM; 660 661 tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree); 662 KKASSERT(tmp); 663 if((lutimes(tmp, NULL)) == -1) 664 error = errno; 665 666 dirfs_node_stat(DIRFS_NOFD, tmp, dnp); 667 dirfs_dropfd(dmp, NULL, pathfree); 668 669 KKASSERT(vn_islocked(vp)); 670 671 672 return error; 673 } 674 675 int 676 dirfs_node_chflags(dirfs_node_t dnp, int vaflags, struct ucred *cred) 677 { 678 struct vnode *vp; 679 dirfs_mount_t dmp; 680 int error = 0; 681 int flags; 682 char *tmp; 683 char *pathfree; 684 685 debug_called(); 686 687 vp = NODE_TO_VP(dnp); 688 dmp = VFS_TO_DIRFS(vp->v_mount); 689 690 KKASSERT(vn_islocked(vp)); 691 692 flags = dnp->dn_flags; 693 694 error = vop_helper_setattr_flags(&flags, vaflags, dnp->dn_uid, cred); 695 /* 696 * When running vkernels with non-root it is not possible to set 697 * certain flags on host files, such as SF* flags. chflags(2) call 698 * will spit an error in that case. 699 */ 700 if (error == 0) { 701 tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree); 702 KKASSERT(tmp); 703 if((lchflags(tmp, flags)) == -1) 704 error = errno; 705 dirfs_node_stat(DIRFS_NOFD, tmp, dnp); 706 dirfs_dropfd(dmp, NULL, pathfree); 707 } 708 709 KKASSERT(vn_islocked(vp)); 710 711 return error; 712 } 713 714 int 715 dirfs_node_chmod(dirfs_mount_t dmp, dirfs_node_t dnp, mode_t mode) 716 { 717 char *tmp; 718 char *pathfree; 719 int error = 0; 720 721 tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree); 722 KKASSERT(tmp); 723 if (lchmod(tmp, mode) < 0) 724 error = errno; 725 dirfs_node_stat(DIRFS_NOFD, tmp, dnp); 726 dirfs_dropfd(dmp, NULL, pathfree); 727 728 return error; 729 } 730 731 int 732 dirfs_node_chown(dirfs_mount_t dmp, dirfs_node_t dnp, 733 uid_t uid, uid_t gid, mode_t mode) 734 { 735 char *tmp; 736 char *pathfree; 737 int error = 0; 738 739 tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree); 740 KKASSERT(tmp); 741 if (lchown(tmp, uid, gid) < 0) 742 error = errno; 743 if (mode != dnp->dn_mode) 744 lchmod(tmp, mode); 745 dirfs_node_stat(DIRFS_NOFD, tmp, dnp); 746 dirfs_dropfd(dmp, NULL, pathfree); 747 748 return error; 749 } 750 751 752 int 753 dirfs_node_chsize(dirfs_node_t dnp, off_t nsize) 754 { 755 dirfs_mount_t dmp; 756 struct vnode *vp; 757 int error = 0; 758 char *tmp; 759 char *pathfree; 760 off_t osize; 761 int biosize; 762 763 debug_called(); 764 765 KKASSERT(dnp); 766 767 vp = NODE_TO_VP(dnp); 768 dmp = VFS_TO_DIRFS(vp->v_mount); 769 biosize = BSIZE; 770 osize = dnp->dn_size; 771 772 KKASSERT(vn_islocked(vp)); 773 774 switch (vp->v_type) { 775 case VDIR: 776 return (EISDIR); 777 case VREG: 778 break; 779 default: 780 return (EOPNOTSUPP); 781 782 } 783 784 tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree); 785 if (nsize < osize) { 786 error = nvtruncbuf(vp, nsize, biosize, -1, 0); 787 } else { 788 error = nvextendbuf(vp, osize, nsize, 789 biosize, biosize, 790 -1, -1, 0); 791 } 792 if (error == 0 && truncate(tmp, nsize) < 0) 793 error = errno; 794 if (error == 0) 795 dnp->dn_size = nsize; 796 dbg(5, "TRUNCATE %016jx %016jx\n", (intmax_t)nsize, dnp->dn_size); 797 /*dirfs_node_stat(DIRFS_NOFD, tmp, dnp); don't need to do this*/ 798 799 dirfs_dropfd(dmp, NULL, pathfree); 800 801 802 KKASSERT(vn_islocked(vp)); 803 804 return error; 805 } 806 807 void 808 dirfs_node_setpassive(dirfs_mount_t dmp, dirfs_node_t dnp, int state) 809 { 810 struct vnode *vp; 811 812 if (state && (dnp->dn_state & DIRFS_PASVFD) == 0 && 813 dnp->dn_fd != DIRFS_NOFD) { 814 dirfs_node_ref(dnp); 815 dirfs_node_setflags(dnp, DIRFS_PASVFD); 816 TAILQ_INSERT_TAIL(&dmp->dm_fdlist, dnp, dn_fdentry); 817 ++dirfs_fd_used; 818 ++dmp->dm_fd_used; 819 820 /* 821 * If we are over our limit remove nodes from the 822 * passive fd cache. 823 */ 824 while (dmp->dm_fd_used > dirfs_fd_limit) { 825 dnp = TAILQ_FIRST(&dmp->dm_fdlist); 826 dirfs_node_setpassive(dmp, dnp, 0); 827 } 828 } 829 if (state == 0 && (dnp->dn_state & DIRFS_PASVFD)) { 830 dirfs_node_clrflags(dnp, DIRFS_PASVFD); 831 TAILQ_REMOVE(&dmp->dm_fdlist, dnp, dn_fdentry); 832 --dirfs_fd_used; 833 --dmp->dm_fd_used; 834 dbg(5, "dnp=%p removed from fdlist. %d used\n", 835 dnp, dirfs_fd_used); 836 837 /* 838 * Attempt to close the descriptor. We can only do this 839 * if the related vnode is inactive and has exactly two 840 * refs (representing the vp<->dnp and PASVFD). Otherwise 841 * someone might have ref'd the node in order to use the 842 * dn_fd. 843 * 844 * Also, if the vnode is in any way dirty we leave the fd 845 * open for the buffer cache code. The syncer will eventually 846 * come along and fsync the vnode, and the next inactive 847 * transition will deal with the descriptor. 848 * 849 * The descriptor for the root node is NEVER closed by 850 * this function. 851 */ 852 vp = dnp->dn_vnode; 853 if (dirfs_node_refcnt(dnp) == 2 && vp && 854 dnp->dn_fd != DIRFS_NOFD && 855 !dirfs_node_isroot(dnp) && 856 (vp->v_flag & (VINACTIVE|VOBJDIRTY)) == VINACTIVE && 857 RB_EMPTY(&vp->v_rbdirty_tree)) { 858 dbg(5, "passive cache: closing %d\n", dnp->dn_fd); 859 close(dnp->dn_fd); 860 dnp->dn_fd = DIRFS_NOFD; 861 } else { 862 if (dirfs_node_refcnt(dnp) == 1 && dnp->dn_vnode == NULL && 863 dnp->dn_fd != DIRFS_NOFD && 864 dnp != dmp->dm_root) { 865 dbg(5, "passive cache: closing %d\n", dnp->dn_fd); 866 close(dnp->dn_fd); 867 dnp->dn_fd = DIRFS_NOFD; 868 } 869 } 870 dirfs_node_drop(dmp, dnp); 871 } 872 } 873 874 char * 875 dirfs_flag2str(dirfs_node_t dnp) 876 { 877 const char *txtflg[] = { DIRFS_TXTFLG }; 878 static char str[512] = {0}; 879 880 if (dnp->dn_state & DIRFS_PASVFD) 881 ksprintf(str, "%s ", txtflg[0]); 882 883 return str; 884 } 885 886 void 887 debug(int level, const char *fmt, ...) 888 { 889 __va_list ap; 890 891 if (debuglvl >= level) { 892 __va_start(ap, fmt); 893 kvprintf(fmt, ap); 894 __va_end(ap); 895 } 896 } 897 898