1 /* $NetBSD: tmpfs_subr.c,v 1.35 2007/07/09 21:10:50 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2005 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code 9 * 2005 program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Efficient memory file system supporting functions. 35 */ 36 37 #include <sys/kernel.h> 38 #include <sys/param.h> 39 #include <sys/namei.h> 40 #include <sys/priv.h> 41 #include <sys/proc.h> 42 #include <sys/spinlock2.h> 43 #include <sys/stat.h> 44 #include <sys/systm.h> 45 #include <sys/vnode.h> 46 #include <sys/vmmeter.h> 47 48 #include <vm/vm.h> 49 #include <vm/vm_object.h> 50 #include <vm/vm_page.h> 51 #include <vm/vm_pager.h> 52 #include <vm/vm_extern.h> 53 54 #include <vfs/tmpfs/tmpfs.h> 55 #include <vfs/tmpfs/tmpfs_vnops.h> 56 57 static ino_t tmpfs_fetch_ino(struct tmpfs_mount *); 58 static int tmpfs_dirtree_compare(struct tmpfs_dirent *a, 59 struct tmpfs_dirent *b); 60 61 RB_GENERATE(tmpfs_dirtree, tmpfs_dirent, rb_node, tmpfs_dirtree_compare); 62 63 64 /* --------------------------------------------------------------------- */ 65 66 /* 67 * Allocates a new node of type 'type' inside the 'tmp' mount point, with 68 * its owner set to 'uid', its group to 'gid' and its mode set to 'mode', 69 * using the credentials of the process 'p'. 70 * 71 * If the node type is set to 'VDIR', then the parent parameter must point 72 * to the parent directory of the node being created. It may only be NULL 73 * while allocating the root node. 74 * 75 * If the node type is set to 'VBLK' or 'VCHR', then the rdev parameter 76 * specifies the device the node represents. 77 * 78 * If the node type is set to 'VLNK', then the parameter target specifies 79 * the file name of the target file for the symbolic link that is being 80 * created. 81 * 82 * Note that new nodes are retrieved from the available list if it has 83 * items or, if it is empty, from the node pool as long as there is enough 84 * space to create them. 85 * 86 * Returns zero on success or an appropriate error code on failure. 87 */ 88 int 89 tmpfs_alloc_node(struct tmpfs_mount *tmp, enum vtype type, 90 uid_t uid, gid_t gid, mode_t mode, 91 char *target, int rmajor, int rminor, 92 struct tmpfs_node **node) 93 { 94 struct tmpfs_node *nnode; 95 struct timespec ts; 96 udev_t rdev; 97 98 KKASSERT(IFF(type == VLNK, target != NULL)); 99 KKASSERT(IFF(type == VBLK || type == VCHR, rmajor != VNOVAL)); 100 101 if (tmp->tm_nodes_inuse >= tmp->tm_nodes_max) 102 return (ENOSPC); 103 104 nnode = objcache_get(tmp->tm_node_pool, M_WAITOK | M_NULLOK); 105 if (nnode == NULL) 106 return (ENOSPC); 107 108 /* Generic initialization. */ 109 nnode->tn_type = type; 110 vfs_timestamp(&ts); 111 nnode->tn_ctime = nnode->tn_mtime = nnode->tn_atime 112 = ts.tv_sec; 113 nnode->tn_ctimensec = nnode->tn_mtimensec = nnode->tn_atimensec 114 = ts.tv_nsec; 115 nnode->tn_uid = uid; 116 nnode->tn_gid = gid; 117 nnode->tn_mode = mode; 118 nnode->tn_id = tmpfs_fetch_ino(tmp); 119 nnode->tn_advlock.init_done = 0; 120 KKASSERT(nnode->tn_links == 0); 121 122 /* Type-specific initialization. */ 123 switch (nnode->tn_type) { 124 case VBLK: 125 case VCHR: 126 rdev = makeudev(rmajor, rminor); 127 if (rdev == NOUDEV) { 128 objcache_put(tmp->tm_node_pool, nnode); 129 return(EINVAL); 130 } 131 nnode->tn_rdev = rdev; 132 break; 133 134 case VDIR: 135 RB_INIT(&nnode->tn_dir.tn_dirtree); 136 nnode->tn_dir.tn_readdir_lastn = 0; 137 nnode->tn_dir.tn_readdir_lastp = NULL; 138 nnode->tn_size = 0; 139 break; 140 141 case VFIFO: 142 /* FALLTHROUGH */ 143 case VSOCK: 144 break; 145 146 case VLNK: 147 nnode->tn_size = strlen(target); 148 nnode->tn_link = kmalloc(nnode->tn_size + 1, tmp->tm_name_zone, 149 M_WAITOK | M_NULLOK); 150 if (nnode->tn_link == NULL) { 151 objcache_put(tmp->tm_node_pool, nnode); 152 return (ENOSPC); 153 } 154 bcopy(target, nnode->tn_link, nnode->tn_size); 155 nnode->tn_link[nnode->tn_size] = '\0'; 156 break; 157 158 case VREG: 159 nnode->tn_reg.tn_aobj = 160 swap_pager_alloc(NULL, 0, VM_PROT_DEFAULT, 0); 161 nnode->tn_reg.tn_aobj_pages = 0; 162 nnode->tn_size = 0; 163 break; 164 165 default: 166 panic("tmpfs_alloc_node: type %p %d", nnode, (int)nnode->tn_type); 167 } 168 169 TMPFS_NODE_LOCK(nnode); 170 TMPFS_LOCK(tmp); 171 LIST_INSERT_HEAD(&tmp->tm_nodes_used, nnode, tn_entries); 172 tmp->tm_nodes_inuse++; 173 TMPFS_UNLOCK(tmp); 174 TMPFS_NODE_UNLOCK(nnode); 175 176 *node = nnode; 177 return 0; 178 } 179 180 /* --------------------------------------------------------------------- */ 181 182 /* 183 * Destroys the node pointed to by node from the file system 'tmp'. 184 * If the node does not belong to the given mount point, the results are 185 * unpredicted. 186 * 187 * If the node references a directory; no entries are allowed because 188 * their removal could need a recursive algorithm, something forbidden in 189 * kernel space. Furthermore, there is not need to provide such 190 * functionality (recursive removal) because the only primitives offered 191 * to the user are the removal of empty directories and the deletion of 192 * individual files. 193 * 194 * Note that nodes are not really deleted; in fact, when a node has been 195 * allocated, it cannot be deleted during the whole life of the file 196 * system. Instead, they are moved to the available list and remain there 197 * until reused. 198 */ 199 void 200 tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node) 201 { 202 vm_pindex_t pages = 0; 203 204 #ifdef INVARIANTS 205 TMPFS_ASSERT_ELOCKED(node); 206 KKASSERT(node->tn_vnode == NULL); 207 KKASSERT((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0); 208 #endif 209 210 TMPFS_LOCK(tmp); 211 LIST_REMOVE(node, tn_entries); 212 tmp->tm_nodes_inuse--; 213 TMPFS_UNLOCK(tmp); 214 TMPFS_NODE_UNLOCK(node); 215 216 switch (node->tn_type) { 217 case VNON: 218 /* Do not do anything. VNON is provided to let the 219 * allocation routine clean itself easily by avoiding 220 * duplicating code in it. */ 221 /* FALLTHROUGH */ 222 case VBLK: 223 /* FALLTHROUGH */ 224 case VCHR: 225 /* FALLTHROUGH */ 226 break; 227 case VDIR: 228 /* 229 * The parent link can be NULL if this is the root 230 * node or if it is a directory node that was rmdir'd. 231 * 232 * XXX what if node is a directory which still contains 233 * directory entries (e.g. due to a forced umount) ? 234 */ 235 node->tn_size = 0; 236 KKASSERT(node->tn_dir.tn_parent == NULL); 237 238 /* 239 * If the root node is being destroyed don't leave a 240 * dangling pointer in tmpfs_mount. 241 */ 242 if (node == tmp->tm_root) 243 tmp->tm_root = NULL; 244 break; 245 case VFIFO: 246 /* FALLTHROUGH */ 247 case VSOCK: 248 break; 249 250 case VLNK: 251 kfree(node->tn_link, tmp->tm_name_zone); 252 node->tn_link = NULL; 253 node->tn_size = 0; 254 break; 255 256 case VREG: 257 if (node->tn_reg.tn_aobj != NULL) 258 vm_object_deallocate(node->tn_reg.tn_aobj); 259 node->tn_reg.tn_aobj = NULL; 260 pages = node->tn_reg.tn_aobj_pages; 261 break; 262 263 default: 264 panic("tmpfs_free_node: type %p %d", node, (int)node->tn_type); 265 } 266 267 /* 268 * Clean up fields for the next allocation. The objcache only ctors 269 * new allocations. 270 */ 271 tmpfs_node_ctor(node, NULL, 0); 272 objcache_put(tmp->tm_node_pool, node); 273 /* node is now invalid */ 274 275 TMPFS_LOCK(tmp); 276 tmp->tm_pages_used -= pages; 277 TMPFS_UNLOCK(tmp); 278 } 279 280 /* --------------------------------------------------------------------- */ 281 282 /* 283 * Allocates a new directory entry for the node node with a name of name. 284 * The new directory entry is returned in *de. 285 * 286 * The link count of node is increased by one to reflect the new object 287 * referencing it. 288 * 289 * Returns zero on success or an appropriate error code on failure. 290 */ 291 int 292 tmpfs_alloc_dirent(struct tmpfs_mount *tmp, struct tmpfs_node *node, 293 const char *name, uint16_t len, struct tmpfs_dirent **de) 294 { 295 struct tmpfs_dirent *nde; 296 297 nde = objcache_get(tmp->tm_dirent_pool, M_WAITOK); 298 nde->td_name = kmalloc(len + 1, tmp->tm_name_zone, M_WAITOK | M_NULLOK); 299 if (nde->td_name == NULL) { 300 objcache_put(tmp->tm_dirent_pool, nde); 301 *de = NULL; 302 return (ENOSPC); 303 } 304 nde->td_namelen = len; 305 bcopy(name, nde->td_name, len); 306 nde->td_name[len] = '\0'; 307 308 nde->td_node = node; 309 310 TMPFS_NODE_LOCK(node); 311 ++node->tn_links; 312 TMPFS_NODE_UNLOCK(node); 313 314 *de = nde; 315 316 return 0; 317 } 318 319 /* --------------------------------------------------------------------- */ 320 321 /* 322 * Frees a directory entry. It is the caller's responsibility to destroy 323 * the node referenced by it if needed. 324 * 325 * The link count of node is decreased by one to reflect the removal of an 326 * object that referenced it. This only happens if 'node_exists' is true; 327 * otherwise the function will not access the node referred to by the 328 * directory entry, as it may already have been released from the outside. 329 */ 330 void 331 tmpfs_free_dirent(struct tmpfs_mount *tmp, struct tmpfs_dirent *de) 332 { 333 struct tmpfs_node *node; 334 335 node = de->td_node; 336 337 TMPFS_NODE_LOCK(node); 338 TMPFS_ASSERT_ELOCKED(node); 339 KKASSERT(node->tn_links > 0); 340 node->tn_links--; 341 TMPFS_NODE_UNLOCK(node); 342 343 kfree(de->td_name, tmp->tm_name_zone); 344 de->td_namelen = 0; 345 de->td_name = NULL; 346 de->td_node = NULL; 347 objcache_put(tmp->tm_dirent_pool, de); 348 } 349 350 /* --------------------------------------------------------------------- */ 351 352 /* 353 * Allocates a new vnode for the node node or returns a new reference to 354 * an existing one if the node had already a vnode referencing it. The 355 * resulting locked vnode is returned in *vpp. 356 * 357 * Returns zero on success or an appropriate error code on failure. 358 */ 359 int 360 tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, int lkflag, 361 struct vnode **vpp) 362 { 363 int error = 0; 364 struct vnode *vp; 365 366 loop: 367 /* 368 * Interlocked extraction from node. This can race many things. 369 * We have to get a soft reference on the vnode while we hold 370 * the node locked, then acquire it properly and check for races. 371 */ 372 TMPFS_NODE_LOCK(node); 373 if ((vp = node->tn_vnode) != NULL) { 374 KKASSERT((node->tn_vpstate & TMPFS_VNODE_DOOMED) == 0); 375 vhold(vp); 376 TMPFS_NODE_UNLOCK(node); 377 378 if (vget(vp, lkflag | LK_EXCLUSIVE) != 0) { 379 vdrop(vp); 380 goto loop; 381 } 382 if (node->tn_vnode != vp) { 383 vput(vp); 384 vdrop(vp); 385 goto loop; 386 } 387 vdrop(vp); 388 goto out; 389 } 390 /* vp is NULL */ 391 392 /* 393 * This should never happen. 394 */ 395 if (node->tn_vpstate & TMPFS_VNODE_DOOMED) { 396 TMPFS_NODE_UNLOCK(node); 397 error = ENOENT; 398 goto out; 399 } 400 401 /* 402 * Interlock against other calls to tmpfs_alloc_vp() trying to 403 * allocate and assign a vp to node. 404 */ 405 if (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) { 406 node->tn_vpstate |= TMPFS_VNODE_WANT; 407 error = tsleep(&node->tn_vpstate, PINTERLOCKED | PCATCH, 408 "tmpfs_alloc_vp", 0); 409 TMPFS_NODE_UNLOCK(node); 410 if (error) 411 return error; 412 goto loop; 413 } 414 node->tn_vpstate |= TMPFS_VNODE_ALLOCATING; 415 TMPFS_NODE_UNLOCK(node); 416 417 /* 418 * Allocate a new vnode (may block). The ALLOCATING flag should 419 * prevent a race against someone else assigning node->tn_vnode. 420 */ 421 error = getnewvnode(VT_TMPFS, mp, &vp, VLKTIMEOUT, LK_CANRECURSE); 422 if (error != 0) 423 goto unlock; 424 425 KKASSERT(node->tn_vnode == NULL); 426 KKASSERT(vp != NULL); 427 vp->v_data = node; 428 vp->v_type = node->tn_type; 429 430 /* Type-specific initialization. */ 431 switch (node->tn_type) { 432 case VBLK: 433 /* FALLTHROUGH */ 434 case VCHR: 435 /* FALLTHROUGH */ 436 case VSOCK: 437 break; 438 case VREG: 439 vinitvmio(vp, node->tn_size, TMPFS_BLKMASK, -1); 440 break; 441 case VLNK: 442 break; 443 case VFIFO: 444 vp->v_ops = &mp->mnt_vn_fifo_ops; 445 break; 446 case VDIR: 447 break; 448 449 default: 450 panic("tmpfs_alloc_vp: type %p %d", node, (int)node->tn_type); 451 } 452 453 454 unlock: 455 TMPFS_NODE_LOCK(node); 456 457 KKASSERT(node->tn_vpstate & TMPFS_VNODE_ALLOCATING); 458 node->tn_vpstate &= ~TMPFS_VNODE_ALLOCATING; 459 node->tn_vnode = vp; 460 461 if (node->tn_vpstate & TMPFS_VNODE_WANT) { 462 node->tn_vpstate &= ~TMPFS_VNODE_WANT; 463 TMPFS_NODE_UNLOCK(node); 464 wakeup(&node->tn_vpstate); 465 } else { 466 TMPFS_NODE_UNLOCK(node); 467 } 468 469 out: 470 *vpp = vp; 471 472 KKASSERT(IFF(error == 0, *vpp != NULL && vn_islocked(*vpp))); 473 #ifdef INVARIANTS 474 TMPFS_NODE_LOCK(node); 475 KKASSERT(*vpp == node->tn_vnode); 476 TMPFS_NODE_UNLOCK(node); 477 #endif 478 479 return error; 480 } 481 482 /* --------------------------------------------------------------------- */ 483 484 /* 485 * Destroys the association between the vnode vp and the node it 486 * references. 487 */ 488 void 489 tmpfs_free_vp(struct vnode *vp) 490 { 491 struct tmpfs_node *node; 492 493 node = VP_TO_TMPFS_NODE(vp); 494 495 TMPFS_NODE_LOCK(node); 496 KKASSERT(lockcount(TMPFS_NODE_MTX(node)) > 0); 497 node->tn_vnode = NULL; 498 vp->v_data = NULL; 499 TMPFS_NODE_UNLOCK(node); 500 } 501 502 /* --------------------------------------------------------------------- */ 503 504 /* 505 * Allocates a new file of type 'type' and adds it to the parent directory 506 * 'dvp'; this addition is done using the component name given in 'cnp'. 507 * The ownership of the new file is automatically assigned based on the 508 * credentials of the caller (through 'cnp'), the group is set based on 509 * the parent directory and the mode is determined from the 'vap' argument. 510 * If successful, *vpp holds a vnode to the newly created file and zero 511 * is returned. Otherwise *vpp is NULL and the function returns an 512 * appropriate error code. 513 */ 514 int 515 tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap, 516 struct namecache *ncp, struct ucred *cred, char *target) 517 { 518 int error; 519 struct tmpfs_dirent *de; 520 struct tmpfs_mount *tmp; 521 struct tmpfs_node *dnode; 522 struct tmpfs_node *node; 523 524 tmp = VFS_TO_TMPFS(dvp->v_mount); 525 dnode = VP_TO_TMPFS_DIR(dvp); 526 *vpp = NULL; 527 528 /* 529 * If the directory was removed but a process was CD'd into it, 530 * we do not allow any more file/dir creation within it. Otherwise 531 * we will lose track of it. 532 */ 533 KKASSERT(dnode->tn_type == VDIR); 534 if (dnode != tmp->tm_root && dnode->tn_dir.tn_parent == NULL) 535 return ENOENT; 536 537 /* 538 * Make sure the link count does not overflow. 539 */ 540 if (vap->va_type == VDIR && dnode->tn_links >= LINK_MAX) 541 return EMLINK; 542 543 /* Allocate a node that represents the new file. */ 544 error = tmpfs_alloc_node(tmp, vap->va_type, cred->cr_uid, 545 dnode->tn_gid, vap->va_mode, target, 546 vap->va_rmajor, vap->va_rminor, &node); 547 if (error != 0) 548 return error; 549 TMPFS_NODE_LOCK(node); 550 551 /* Allocate a directory entry that points to the new file. */ 552 error = tmpfs_alloc_dirent(tmp, node, ncp->nc_name, ncp->nc_nlen, &de); 553 if (error != 0) { 554 tmpfs_free_node(tmp, node); 555 /* eats node lock */ 556 return error; 557 } 558 559 /* Allocate a vnode for the new file. */ 560 error = tmpfs_alloc_vp(dvp->v_mount, node, LK_EXCLUSIVE, vpp); 561 if (error != 0) { 562 tmpfs_free_dirent(tmp, de); 563 tmpfs_free_node(tmp, node); 564 /* eats node lock */ 565 return error; 566 } 567 568 /* 569 * Now that all required items are allocated, we can proceed to 570 * insert the new node into the directory, an operation that 571 * cannot fail. 572 */ 573 tmpfs_dir_attach(dnode, de); 574 TMPFS_NODE_UNLOCK(node); 575 576 return error; 577 } 578 579 /* --------------------------------------------------------------------- */ 580 581 /* 582 * Attaches the directory entry de to the directory represented by vp. 583 * Note that this does not change the link count of the node pointed by 584 * the directory entry, as this is done by tmpfs_alloc_dirent. 585 */ 586 void 587 tmpfs_dir_attach(struct tmpfs_node *dnode, struct tmpfs_dirent *de) 588 { 589 struct tmpfs_node *node = de->td_node; 590 591 TMPFS_NODE_LOCK(dnode); 592 if (node && node->tn_type == VDIR) { 593 TMPFS_NODE_LOCK(node); 594 ++node->tn_links; 595 node->tn_status |= TMPFS_NODE_CHANGED; 596 node->tn_dir.tn_parent = dnode; 597 ++dnode->tn_links; 598 TMPFS_NODE_UNLOCK(node); 599 } 600 RB_INSERT(tmpfs_dirtree, &dnode->tn_dir.tn_dirtree, de); 601 dnode->tn_size += sizeof(struct tmpfs_dirent); 602 dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | 603 TMPFS_NODE_MODIFIED; 604 TMPFS_NODE_UNLOCK(dnode); 605 } 606 607 /* --------------------------------------------------------------------- */ 608 609 /* 610 * Detaches the directory entry de from the directory represented by vp. 611 * Note that this does not change the link count of the node pointed by 612 * the directory entry, as this is done by tmpfs_free_dirent. 613 */ 614 void 615 tmpfs_dir_detach(struct tmpfs_node *dnode, struct tmpfs_dirent *de) 616 { 617 struct tmpfs_node *node = de->td_node; 618 619 TMPFS_NODE_LOCK(dnode); 620 if (dnode->tn_dir.tn_readdir_lastp == de) { 621 dnode->tn_dir.tn_readdir_lastn = 0; 622 dnode->tn_dir.tn_readdir_lastp = NULL; 623 } 624 RB_REMOVE(tmpfs_dirtree, &dnode->tn_dir.tn_dirtree, de); 625 dnode->tn_size -= sizeof(struct tmpfs_dirent); 626 dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | 627 TMPFS_NODE_MODIFIED; 628 TMPFS_NODE_UNLOCK(dnode); 629 630 /* 631 * Clean out the tn_parent pointer immediately when removing a 632 * directory. 633 * 634 * Removal of the parent linkage also cleans out the extra tn_links 635 * count we had on both node and dnode. 636 * 637 * node can be NULL (typ during a forced umount), in which case 638 * the mount code is dealing with the linkages from a linked list 639 * scan. 640 */ 641 if (node && node->tn_type == VDIR && node->tn_dir.tn_parent) { 642 TMPFS_NODE_LOCK(dnode); 643 TMPFS_NODE_LOCK(node); 644 KKASSERT(node->tn_dir.tn_parent == dnode); 645 dnode->tn_links--; 646 node->tn_links--; 647 node->tn_dir.tn_parent = NULL; 648 TMPFS_NODE_UNLOCK(node); 649 TMPFS_NODE_UNLOCK(dnode); 650 } 651 } 652 653 /* --------------------------------------------------------------------- */ 654 655 /* 656 * Looks for a directory entry in the directory represented by node. 657 * 'ncp' describes the name of the entry to look for. Note that the . 658 * and .. components are not allowed as they do not physically exist 659 * within directories. 660 * 661 * Returns a pointer to the entry when found, otherwise NULL. 662 * 663 * Caller must hold the node locked (shared ok) 664 */ 665 struct tmpfs_dirent * 666 tmpfs_dir_lookup(struct tmpfs_node *node, struct tmpfs_node *f, 667 struct namecache *ncp) 668 { 669 struct tmpfs_dirent *de; 670 int len = ncp->nc_nlen; 671 struct tmpfs_dirent wanted; 672 673 wanted.td_namelen = len; 674 wanted.td_name = ncp->nc_name; 675 676 TMPFS_VALIDATE_DIR(node); 677 678 de = RB_FIND(tmpfs_dirtree, &node->tn_dir.tn_dirtree, &wanted); 679 680 KKASSERT(f == NULL || f == de->td_node); 681 682 return de; 683 } 684 685 /* --------------------------------------------------------------------- */ 686 687 /* 688 * Helper function for tmpfs_readdir. Creates a '.' entry for the given 689 * directory and returns it in the uio space. The function returns 0 690 * on success, -1 if there was not enough space in the uio structure to 691 * hold the directory entry or an appropriate error code if another 692 * error happens. 693 */ 694 int 695 tmpfs_dir_getdotdent(struct tmpfs_node *node, struct uio *uio) 696 { 697 int error; 698 struct dirent dent; 699 int dirsize; 700 701 TMPFS_VALIDATE_DIR(node); 702 KKASSERT(uio->uio_offset == TMPFS_DIRCOOKIE_DOT); 703 704 dent.d_ino = node->tn_id; 705 dent.d_type = DT_DIR; 706 dent.d_namlen = 1; 707 dent.d_name[0] = '.'; 708 dent.d_name[1] = '\0'; 709 dirsize = _DIRENT_DIRSIZ(&dent); 710 711 if (dirsize > uio->uio_resid) 712 error = -1; 713 else { 714 error = uiomove((caddr_t)&dent, dirsize, uio); 715 if (error == 0) 716 uio->uio_offset = TMPFS_DIRCOOKIE_DOTDOT; 717 } 718 return error; 719 } 720 721 /* --------------------------------------------------------------------- */ 722 723 /* 724 * Helper function for tmpfs_readdir. Creates a '..' entry for the given 725 * directory and returns it in the uio space. The function returns 0 726 * on success, -1 if there was not enough space in the uio structure to 727 * hold the directory entry or an appropriate error code if another 728 * error happens. 729 */ 730 int 731 tmpfs_dir_getdotdotdent(struct tmpfs_mount *tmp, struct tmpfs_node *node, 732 struct uio *uio) 733 { 734 int error; 735 struct dirent dent; 736 int dirsize; 737 738 TMPFS_VALIDATE_DIR(node); 739 KKASSERT(uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT); 740 741 if (node->tn_dir.tn_parent) { 742 TMPFS_NODE_LOCK(node->tn_dir.tn_parent); 743 dent.d_ino = node->tn_dir.tn_parent->tn_id; 744 TMPFS_NODE_UNLOCK(node->tn_dir.tn_parent); 745 } else { 746 dent.d_ino = tmp->tm_root->tn_id; 747 } 748 749 dent.d_type = DT_DIR; 750 dent.d_namlen = 2; 751 dent.d_name[0] = '.'; 752 dent.d_name[1] = '.'; 753 dent.d_name[2] = '\0'; 754 dirsize = _DIRENT_DIRSIZ(&dent); 755 756 if (dirsize > uio->uio_resid) 757 error = -1; 758 else { 759 error = uiomove((caddr_t)&dent, dirsize, uio); 760 if (error == 0) { 761 struct tmpfs_dirent *de; 762 763 de = RB_MIN(tmpfs_dirtree, &node->tn_dir.tn_dirtree); 764 if (de == NULL) 765 uio->uio_offset = TMPFS_DIRCOOKIE_EOF; 766 else 767 uio->uio_offset = tmpfs_dircookie(de); 768 } 769 } 770 return error; 771 } 772 773 /* --------------------------------------------------------------------- */ 774 775 /* 776 * Lookup a directory entry by its associated cookie. 777 */ 778 struct tmpfs_dirent * 779 tmpfs_dir_lookupbycookie(struct tmpfs_node *node, off_t cookie) 780 { 781 struct tmpfs_dirent *de; 782 783 if (cookie == node->tn_dir.tn_readdir_lastn && 784 node->tn_dir.tn_readdir_lastp != NULL) { 785 return node->tn_dir.tn_readdir_lastp; 786 } 787 788 RB_FOREACH(de, tmpfs_dirtree, &node->tn_dir.tn_dirtree) { 789 if (tmpfs_dircookie(de) == cookie) { 790 break; 791 } 792 } 793 794 return de; 795 } 796 797 /* --------------------------------------------------------------------- */ 798 799 /* 800 * Helper function for tmpfs_readdir. Returns as much directory entries 801 * as can fit in the uio space. The read starts at uio->uio_offset. 802 * The function returns 0 on success, -1 if there was not enough space 803 * in the uio structure to hold the directory entry or an appropriate 804 * error code if another error happens. 805 * 806 * Caller must hold the node locked (shared ok) 807 */ 808 int 809 tmpfs_dir_getdents(struct tmpfs_node *node, struct uio *uio, off_t *cntp) 810 { 811 int error; 812 off_t startcookie; 813 struct tmpfs_dirent *de; 814 815 TMPFS_VALIDATE_DIR(node); 816 817 /* 818 * Locate the first directory entry we have to return. We have cached 819 * the last readdir in the node, so use those values if appropriate. 820 * Otherwise do a linear scan to find the requested entry. 821 */ 822 startcookie = uio->uio_offset; 823 KKASSERT(startcookie != TMPFS_DIRCOOKIE_DOT); 824 KKASSERT(startcookie != TMPFS_DIRCOOKIE_DOTDOT); 825 826 if (startcookie == TMPFS_DIRCOOKIE_EOF) 827 return 0; 828 829 de = tmpfs_dir_lookupbycookie(node, startcookie); 830 if (de == NULL) 831 return EINVAL; 832 833 /* Read as much entries as possible; i.e., until we reach the end of 834 * the directory or we exhaust uio space. */ 835 do { 836 struct dirent d; 837 int reclen; 838 839 /* Create a dirent structure representing the current 840 * tmpfs_node and fill it. */ 841 d.d_ino = de->td_node->tn_id; 842 switch (de->td_node->tn_type) { 843 case VBLK: 844 d.d_type = DT_BLK; 845 break; 846 847 case VCHR: 848 d.d_type = DT_CHR; 849 break; 850 851 case VDIR: 852 d.d_type = DT_DIR; 853 break; 854 855 case VFIFO: 856 d.d_type = DT_FIFO; 857 break; 858 859 case VLNK: 860 d.d_type = DT_LNK; 861 break; 862 863 case VREG: 864 d.d_type = DT_REG; 865 break; 866 867 case VSOCK: 868 d.d_type = DT_SOCK; 869 break; 870 871 default: 872 panic("tmpfs_dir_getdents: type %p %d", 873 de->td_node, (int)de->td_node->tn_type); 874 } 875 d.d_namlen = de->td_namelen; 876 KKASSERT(de->td_namelen < sizeof(d.d_name)); 877 bcopy(de->td_name, d.d_name, d.d_namlen); 878 d.d_name[d.d_namlen] = '\0'; 879 reclen = _DIRENT_RECLEN(d.d_namlen); 880 881 /* Stop reading if the directory entry we are treating is 882 * bigger than the amount of data that can be returned. */ 883 if (reclen > uio->uio_resid) { 884 error = -1; 885 break; 886 } 887 888 /* Copy the new dirent structure into the output buffer and 889 * advance pointers. */ 890 error = uiomove((caddr_t)&d, reclen, uio); 891 892 (*cntp)++; 893 de = RB_NEXT(tmpfs_dirtree, node->tn_dir.tn_dirtree, de); 894 } while (error == 0 && uio->uio_resid > 0 && de != NULL); 895 896 /* Update the offset and cache. */ 897 if (de == NULL) { 898 uio->uio_offset = TMPFS_DIRCOOKIE_EOF; 899 node->tn_dir.tn_readdir_lastn = 0; 900 node->tn_dir.tn_readdir_lastp = NULL; 901 } else { 902 node->tn_dir.tn_readdir_lastn = uio->uio_offset = tmpfs_dircookie(de); 903 node->tn_dir.tn_readdir_lastp = de; 904 } 905 906 return error; 907 } 908 909 /* --------------------------------------------------------------------- */ 910 911 /* 912 * Resizes the aobj associated to the regular file pointed to by vp to 913 * the size newsize. 'vp' must point to a vnode that represents a regular 914 * file. 'newsize' must be positive. 915 * 916 * pass trivial as 1 when buf content will be overwritten, otherwise set 0 917 * to be zero filled. 918 * 919 * Returns zero on success or an appropriate error code on failure. 920 */ 921 int 922 tmpfs_reg_resize(struct vnode *vp, off_t newsize, int trivial) 923 { 924 int error; 925 vm_pindex_t newpages, oldpages; 926 struct tmpfs_mount *tmp; 927 struct tmpfs_node *node; 928 off_t oldsize; 929 930 #ifdef INVARIANTS 931 KKASSERT(vp->v_type == VREG); 932 KKASSERT(newsize >= 0); 933 #endif 934 935 node = VP_TO_TMPFS_NODE(vp); 936 tmp = VFS_TO_TMPFS(vp->v_mount); 937 938 /* 939 * Convert the old and new sizes to the number of pages needed to 940 * store them. It may happen that we do not need to do anything 941 * because the last allocated page can accommodate the change on 942 * its own. 943 */ 944 TMPFS_NODE_LOCK(node); 945 oldsize = node->tn_size; 946 oldpages = round_page64(oldsize) / PAGE_SIZE; 947 KKASSERT(oldpages == node->tn_reg.tn_aobj_pages); 948 newpages = round_page64(newsize) / PAGE_SIZE; 949 950 if (newpages > oldpages && 951 tmp->tm_pages_used + newpages - oldpages > tmp->tm_pages_max) { 952 TMPFS_NODE_UNLOCK(node); 953 error = ENOSPC; 954 goto out; 955 } 956 node->tn_reg.tn_aobj_pages = newpages; 957 node->tn_size = newsize; 958 TMPFS_NODE_UNLOCK(node); 959 960 TMPFS_LOCK(tmp); 961 tmp->tm_pages_used += (newpages - oldpages); 962 TMPFS_UNLOCK(tmp); 963 964 /* 965 * When adjusting the vnode filesize and its VM object we must 966 * also adjust our backing VM object (aobj). The blocksize 967 * used must match the block sized we use for the buffer cache. 968 * 969 * The backing VM object contains no VM pages, only swap 970 * assignments. 971 */ 972 if (newsize < oldsize) { 973 vm_pindex_t osize; 974 vm_pindex_t nsize; 975 vm_object_t aobj; 976 977 error = nvtruncbuf(vp, newsize, TMPFS_BLKSIZE, -1, 0); 978 aobj = node->tn_reg.tn_aobj; 979 if (aobj) { 980 osize = aobj->size; 981 nsize = vp->v_object->size; 982 if (nsize < osize) { 983 aobj->size = osize; 984 swap_pager_freespace(aobj, nsize, 985 osize - nsize); 986 } 987 } 988 } else { 989 vm_object_t aobj; 990 991 error = nvextendbuf(vp, oldsize, newsize, 992 TMPFS_BLKSIZE, TMPFS_BLKSIZE, 993 -1, -1, trivial); 994 aobj = node->tn_reg.tn_aobj; 995 if (aobj) 996 aobj->size = vp->v_object->size; 997 } 998 999 out: 1000 return error; 1001 } 1002 1003 /* --------------------------------------------------------------------- */ 1004 1005 /* 1006 * Change flags of the given vnode. 1007 * Caller should execute tmpfs_update on vp after a successful execution. 1008 * The vnode must be locked on entry and remain locked on exit. 1009 */ 1010 int 1011 tmpfs_chflags(struct vnode *vp, int vaflags, struct ucred *cred) 1012 { 1013 int error; 1014 struct tmpfs_node *node; 1015 int flags; 1016 1017 KKASSERT(vn_islocked(vp)); 1018 1019 node = VP_TO_TMPFS_NODE(vp); 1020 flags = node->tn_flags; 1021 1022 /* Disallow this operation if the file system is mounted read-only. */ 1023 if (vp->v_mount->mnt_flag & MNT_RDONLY) 1024 return EROFS; 1025 error = vop_helper_setattr_flags(&flags, vaflags, node->tn_uid, cred); 1026 1027 /* Actually change the flags on the node itself */ 1028 if (error == 0) { 1029 TMPFS_NODE_LOCK(node); 1030 node->tn_flags = flags; 1031 node->tn_status |= TMPFS_NODE_CHANGED; 1032 TMPFS_NODE_UNLOCK(node); 1033 } 1034 1035 KKASSERT(vn_islocked(vp)); 1036 1037 return error; 1038 } 1039 1040 /* --------------------------------------------------------------------- */ 1041 1042 /* 1043 * Change access mode on the given vnode. 1044 * Caller should execute tmpfs_update on vp after a successful execution. 1045 * The vnode must be locked on entry and remain locked on exit. 1046 */ 1047 int 1048 tmpfs_chmod(struct vnode *vp, mode_t vamode, struct ucred *cred) 1049 { 1050 struct tmpfs_node *node; 1051 mode_t cur_mode; 1052 int error; 1053 1054 KKASSERT(vn_islocked(vp)); 1055 1056 node = VP_TO_TMPFS_NODE(vp); 1057 1058 /* Disallow this operation if the file system is mounted read-only. */ 1059 if (vp->v_mount->mnt_flag & MNT_RDONLY) 1060 return EROFS; 1061 1062 /* Immutable or append-only files cannot be modified, either. */ 1063 if (node->tn_flags & (IMMUTABLE | APPEND)) 1064 return EPERM; 1065 1066 cur_mode = node->tn_mode; 1067 error = vop_helper_chmod(vp, vamode, cred, node->tn_uid, node->tn_gid, 1068 &cur_mode); 1069 1070 if (error == 0 && 1071 (node->tn_mode & ALLPERMS) != (cur_mode & ALLPERMS)) { 1072 TMPFS_NODE_LOCK(node); 1073 node->tn_mode &= ~ALLPERMS; 1074 node->tn_mode |= cur_mode & ALLPERMS; 1075 1076 node->tn_status |= TMPFS_NODE_CHANGED; 1077 TMPFS_NODE_UNLOCK(node); 1078 } 1079 1080 KKASSERT(vn_islocked(vp)); 1081 1082 return 0; 1083 } 1084 1085 /* --------------------------------------------------------------------- */ 1086 1087 /* 1088 * Change ownership of the given vnode. At least one of uid or gid must 1089 * be different than VNOVAL. If one is set to that value, the attribute 1090 * is unchanged. 1091 * Caller should execute tmpfs_update on vp after a successful execution. 1092 * The vnode must be locked on entry and remain locked on exit. 1093 */ 1094 int 1095 tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred) 1096 { 1097 mode_t cur_mode; 1098 uid_t cur_uid; 1099 gid_t cur_gid; 1100 struct tmpfs_node *node; 1101 int error; 1102 1103 KKASSERT(vn_islocked(vp)); 1104 node = VP_TO_TMPFS_NODE(vp); 1105 1106 /* Disallow this operation if the file system is mounted read-only. */ 1107 if (vp->v_mount->mnt_flag & MNT_RDONLY) 1108 return EROFS; 1109 1110 /* Immutable or append-only files cannot be modified, either. */ 1111 if (node->tn_flags & (IMMUTABLE | APPEND)) 1112 return EPERM; 1113 1114 cur_uid = node->tn_uid; 1115 cur_gid = node->tn_gid; 1116 cur_mode = node->tn_mode; 1117 error = vop_helper_chown(vp, uid, gid, cred, 1118 &cur_uid, &cur_gid, &cur_mode); 1119 1120 if (error == 0) { 1121 TMPFS_NODE_LOCK(node); 1122 if (cur_uid != node->tn_uid || 1123 cur_gid != node->tn_gid || 1124 cur_mode != node->tn_mode) { 1125 node->tn_uid = cur_uid; 1126 node->tn_gid = cur_gid; 1127 node->tn_mode = cur_mode; 1128 node->tn_status |= TMPFS_NODE_CHANGED; 1129 } 1130 TMPFS_NODE_UNLOCK(node); 1131 } 1132 1133 return error; 1134 } 1135 1136 /* --------------------------------------------------------------------- */ 1137 1138 /* 1139 * Change size of the given vnode. 1140 * Caller should execute tmpfs_update on vp after a successful execution. 1141 * The vnode must be locked on entry and remain locked on exit. 1142 */ 1143 int 1144 tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred) 1145 { 1146 int error; 1147 struct tmpfs_node *node; 1148 1149 KKASSERT(vn_islocked(vp)); 1150 1151 node = VP_TO_TMPFS_NODE(vp); 1152 1153 /* Decide whether this is a valid operation based on the file type. */ 1154 error = 0; 1155 switch (vp->v_type) { 1156 case VDIR: 1157 return EISDIR; 1158 1159 case VREG: 1160 if (vp->v_mount->mnt_flag & MNT_RDONLY) 1161 return EROFS; 1162 break; 1163 1164 case VBLK: 1165 /* FALLTHROUGH */ 1166 case VCHR: 1167 /* FALLTHROUGH */ 1168 case VFIFO: 1169 /* Allow modifications of special files even if in the file 1170 * system is mounted read-only (we are not modifying the 1171 * files themselves, but the objects they represent). */ 1172 return 0; 1173 1174 default: 1175 /* Anything else is unsupported. */ 1176 return EOPNOTSUPP; 1177 } 1178 1179 /* Immutable or append-only files cannot be modified, either. */ 1180 if (node->tn_flags & (IMMUTABLE | APPEND)) 1181 return EPERM; 1182 1183 error = tmpfs_truncate(vp, size); 1184 /* tmpfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents 1185 * for us, as will update tn_status; no need to do that here. */ 1186 1187 KKASSERT(vn_islocked(vp)); 1188 1189 return error; 1190 } 1191 1192 /* --------------------------------------------------------------------- */ 1193 1194 /* 1195 * Change access and modification times of the given vnode. 1196 * Caller should execute tmpfs_update on vp after a successful execution. 1197 * The vnode must be locked on entry and remain locked on exit. 1198 */ 1199 int 1200 tmpfs_chtimes(struct vnode *vp, struct timespec *atime, struct timespec *mtime, 1201 int vaflags, struct ucred *cred) 1202 { 1203 struct tmpfs_node *node; 1204 1205 KKASSERT(vn_islocked(vp)); 1206 1207 node = VP_TO_TMPFS_NODE(vp); 1208 1209 /* Disallow this operation if the file system is mounted read-only. */ 1210 if (vp->v_mount->mnt_flag & MNT_RDONLY) 1211 return EROFS; 1212 1213 /* Immutable or append-only files cannot be modified, either. */ 1214 if (node->tn_flags & (IMMUTABLE | APPEND)) 1215 return EPERM; 1216 1217 TMPFS_NODE_LOCK(node); 1218 if (atime->tv_sec != VNOVAL && atime->tv_nsec != VNOVAL) 1219 node->tn_status |= TMPFS_NODE_ACCESSED; 1220 1221 if (mtime->tv_sec != VNOVAL && mtime->tv_nsec != VNOVAL) 1222 node->tn_status |= TMPFS_NODE_MODIFIED; 1223 1224 TMPFS_NODE_UNLOCK(node); 1225 1226 tmpfs_itimes(vp, atime, mtime); 1227 1228 KKASSERT(vn_islocked(vp)); 1229 1230 return 0; 1231 } 1232 1233 /* --------------------------------------------------------------------- */ 1234 /* Sync timestamps */ 1235 void 1236 tmpfs_itimes(struct vnode *vp, const struct timespec *acc, 1237 const struct timespec *mod) 1238 { 1239 struct tmpfs_node *node; 1240 struct timespec now; 1241 1242 node = VP_TO_TMPFS_NODE(vp); 1243 1244 if ((node->tn_status & (TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED | 1245 TMPFS_NODE_CHANGED)) == 0) 1246 return; 1247 1248 vfs_timestamp(&now); 1249 1250 TMPFS_NODE_LOCK(node); 1251 if (node->tn_status & TMPFS_NODE_ACCESSED) { 1252 if (acc == NULL) 1253 acc = &now; 1254 node->tn_atime = acc->tv_sec; 1255 node->tn_atimensec = acc->tv_nsec; 1256 } 1257 if (node->tn_status & TMPFS_NODE_MODIFIED) { 1258 if (mod == NULL) 1259 mod = &now; 1260 node->tn_mtime = mod->tv_sec; 1261 node->tn_mtimensec = mod->tv_nsec; 1262 } 1263 if (node->tn_status & TMPFS_NODE_CHANGED) { 1264 node->tn_ctime = now.tv_sec; 1265 node->tn_ctimensec = now.tv_nsec; 1266 } 1267 node->tn_status &= 1268 ~(TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED); 1269 TMPFS_NODE_UNLOCK(node); 1270 } 1271 1272 /* --------------------------------------------------------------------- */ 1273 1274 void 1275 tmpfs_update(struct vnode *vp) 1276 { 1277 tmpfs_itimes(vp, NULL, NULL); 1278 } 1279 1280 /* --------------------------------------------------------------------- */ 1281 1282 int 1283 tmpfs_truncate(struct vnode *vp, off_t length) 1284 { 1285 int error; 1286 struct tmpfs_node *node; 1287 1288 node = VP_TO_TMPFS_NODE(vp); 1289 1290 if (length < 0) { 1291 error = EINVAL; 1292 goto out; 1293 } 1294 1295 if (node->tn_size == length) { 1296 error = 0; 1297 goto out; 1298 } 1299 1300 if (length > VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize) 1301 return (EFBIG); 1302 1303 1304 error = tmpfs_reg_resize(vp, length, 1); 1305 1306 if (error == 0) { 1307 TMPFS_NODE_LOCK(node); 1308 node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED; 1309 TMPFS_NODE_UNLOCK(node); 1310 } 1311 1312 out: 1313 tmpfs_update(vp); 1314 1315 return error; 1316 } 1317 1318 /* --------------------------------------------------------------------- */ 1319 1320 static ino_t 1321 tmpfs_fetch_ino(struct tmpfs_mount *tmp) 1322 { 1323 ino_t ret; 1324 1325 ret = tmp->tm_ino++; 1326 1327 return (ret); 1328 } 1329 1330 static int 1331 tmpfs_dirtree_compare(struct tmpfs_dirent *a, struct tmpfs_dirent *b) 1332 { 1333 if (a->td_namelen > b->td_namelen) 1334 return 1; 1335 else if (a->td_namelen < b->td_namelen) 1336 return -1; 1337 else 1338 return strncmp(a->td_name, b->td_name, a->td_namelen); 1339 } 1340