1 /* $NetBSD: tmpfs_subr.c,v 1.35 2007/07/09 21:10:50 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2005 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code 9 * 2005 program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Efficient memory file system supporting functions. 35 */ 36 37 #include <sys/kernel.h> 38 #include <sys/param.h> 39 #include <sys/priv.h> 40 #include <sys/proc.h> 41 #include <sys/stat.h> 42 #include <sys/systm.h> 43 #include <sys/vnode.h> 44 #include <sys/vmmeter.h> 45 46 #include <vm/vm.h> 47 #include <vm/vm_object.h> 48 #include <vm/vm_page.h> 49 #include <vm/vm_pager.h> 50 #include <vm/vm_extern.h> 51 #include <vm/vm_pageout.h> 52 #include <vm/vm_page2.h> 53 54 #include <vfs/tmpfs/tmpfs.h> 55 #include <vfs/tmpfs/tmpfs_vnops.h> 56 57 static ino_t tmpfs_fetch_ino(struct tmpfs_mount *); 58 59 static int tmpfs_dirtree_compare(struct tmpfs_dirent *a, 60 struct tmpfs_dirent *b); 61 RB_GENERATE(tmpfs_dirtree, tmpfs_dirent, rb_node, tmpfs_dirtree_compare); 62 63 static int tmpfs_dirtree_compare_cookie(struct tmpfs_dirent *a, 64 struct tmpfs_dirent *b); 65 RB_GENERATE(tmpfs_dirtree_cookie, tmpfs_dirent, 66 rb_cookienode, tmpfs_dirtree_compare_cookie); 67 68 69 /* --------------------------------------------------------------------- */ 70 71 /* 72 * Allocates a new node of type 'type' inside the 'tmp' mount point, with 73 * its owner set to 'uid', its group to 'gid' and its mode set to 'mode', 74 * using the credentials of the process 'p'. 75 * 76 * If the node type is set to 'VDIR', then the parent parameter must point 77 * to the parent directory of the node being created. It may only be NULL 78 * while allocating the root node. 79 * 80 * If the node type is set to 'VBLK' or 'VCHR', then the rdev parameter 81 * specifies the device the node represents. 82 * 83 * If the node type is set to 'VLNK', then the parameter target specifies 84 * the file name of the target file for the symbolic link that is being 85 * created. 86 * 87 * Note that new nodes are retrieved from the available list if it has 88 * items or, if it is empty, from the node pool as long as there is enough 89 * space to create them. 90 * 91 * Returns zero on success or an appropriate error code on failure. 92 */ 93 int 94 tmpfs_alloc_node(struct tmpfs_mount *tmp, enum vtype type, 95 uid_t uid, gid_t gid, mode_t mode, 96 char *target, int rmajor, int rminor, 97 struct tmpfs_node **node) 98 { 99 struct tmpfs_node *nnode; 100 struct timespec ts; 101 dev_t rdev; 102 103 KKASSERT(IFF(type == VLNK, target != NULL)); 104 KKASSERT(IFF(type == VBLK || type == VCHR, rmajor != VNOVAL)); 105 106 if (tmp->tm_nodes_inuse >= tmp->tm_nodes_max) 107 return (ENOSPC); 108 109 nnode = objcache_get(tmp->tm_node_pool, M_WAITOK | M_NULLOK); 110 if (nnode == NULL) 111 return (ENOSPC); 112 113 /* Generic initialization. */ 114 nnode->tn_type = type; 115 vfs_timestamp(&ts); 116 nnode->tn_ctime = nnode->tn_mtime = nnode->tn_atime 117 = ts.tv_sec; 118 nnode->tn_ctimensec = nnode->tn_mtimensec = nnode->tn_atimensec 119 = ts.tv_nsec; 120 nnode->tn_uid = uid; 121 nnode->tn_gid = gid; 122 nnode->tn_mode = mode; 123 nnode->tn_id = tmpfs_fetch_ino(tmp); 124 nnode->tn_advlock.init_done = 0; 125 KKASSERT(nnode->tn_links == 0); 126 127 /* Type-specific initialization. */ 128 switch (nnode->tn_type) { 129 case VBLK: 130 case VCHR: 131 rdev = makeudev(rmajor, rminor); 132 if (rdev == NOUDEV) { 133 objcache_put(tmp->tm_node_pool, nnode); 134 return(EINVAL); 135 } 136 nnode->tn_rdev = rdev; 137 break; 138 139 case VDIR: 140 RB_INIT(&nnode->tn_dir.tn_dirtree); 141 RB_INIT(&nnode->tn_dir.tn_cookietree); 142 nnode->tn_dir.tn_parent = NULL; 143 nnode->tn_size = 0; 144 break; 145 146 case VFIFO: 147 /* FALLTHROUGH */ 148 case VSOCK: 149 break; 150 151 case VLNK: 152 nnode->tn_size = strlen(target); 153 nnode->tn_link = kmalloc(nnode->tn_size + 1, tmp->tm_name_zone, 154 M_WAITOK | M_NULLOK); 155 if (nnode->tn_link == NULL) { 156 objcache_put(tmp->tm_node_pool, nnode); 157 return (ENOSPC); 158 } 159 bcopy(target, nnode->tn_link, nnode->tn_size); 160 nnode->tn_link[nnode->tn_size] = '\0'; 161 break; 162 163 case VREG: 164 nnode->tn_reg.tn_aobj = swap_pager_alloc(NULL, 0, 165 VM_PROT_DEFAULT, 0); 166 nnode->tn_reg.tn_aobj_pages = 0; 167 nnode->tn_size = 0; 168 vm_object_set_flag(nnode->tn_reg.tn_aobj, OBJ_NOPAGEIN); 169 break; 170 171 default: 172 panic("tmpfs_alloc_node: type %p %d", nnode, (int)nnode->tn_type); 173 } 174 175 TMPFS_NODE_LOCK(nnode); 176 TMPFS_LOCK(tmp); 177 LIST_INSERT_HEAD(&tmp->tm_nodes_used, nnode, tn_entries); 178 tmp->tm_nodes_inuse++; 179 TMPFS_UNLOCK(tmp); 180 TMPFS_NODE_UNLOCK(nnode); 181 182 *node = nnode; 183 return 0; 184 } 185 186 /* --------------------------------------------------------------------- */ 187 188 /* 189 * Destroys the node pointed to by node from the file system 'tmp'. 190 * If the node does not belong to the given mount point, the results are 191 * unpredicted. 192 * 193 * If the node references a directory; no entries are allowed because 194 * their removal could need a recursive algorithm, something forbidden in 195 * kernel space. Furthermore, there is not need to provide such 196 * functionality (recursive removal) because the only primitives offered 197 * to the user are the removal of empty directories and the deletion of 198 * individual files. 199 * 200 * Note that nodes are not really deleted; in fact, when a node has been 201 * allocated, it cannot be deleted during the whole life of the file 202 * system. Instead, they are moved to the available list and remain there 203 * until reused. 204 * 205 * A caller must have TMPFS_NODE_LOCK(node) and this function unlocks it. 206 */ 207 void 208 tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node) 209 { 210 vm_pindex_t pages = 0; 211 212 #ifdef INVARIANTS 213 TMPFS_ASSERT_ELOCKED(node); 214 KKASSERT(node->tn_vnode == NULL); 215 #endif 216 217 TMPFS_LOCK(tmp); 218 LIST_REMOVE(node, tn_entries); 219 tmp->tm_nodes_inuse--; 220 TMPFS_UNLOCK(tmp); 221 TMPFS_NODE_UNLOCK(node); /* Caller has this lock */ 222 223 switch (node->tn_type) { 224 case VNON: 225 /* Do not do anything. VNON is provided to let the 226 * allocation routine clean itself easily by avoiding 227 * duplicating code in it. */ 228 /* FALLTHROUGH */ 229 case VBLK: 230 /* FALLTHROUGH */ 231 case VCHR: 232 /* FALLTHROUGH */ 233 break; 234 case VDIR: 235 /* 236 * The parent link can be NULL if this is the root 237 * node or if it is a directory node that was rmdir'd. 238 * 239 * XXX what if node is a directory which still contains 240 * directory entries (e.g. due to a forced umount) ? 241 */ 242 node->tn_size = 0; 243 KKASSERT(node->tn_dir.tn_parent == NULL); 244 245 /* 246 * If the root node is being destroyed don't leave a 247 * dangling pointer in tmpfs_mount. 248 */ 249 if (node == tmp->tm_root) 250 tmp->tm_root = NULL; 251 break; 252 case VFIFO: 253 /* FALLTHROUGH */ 254 case VSOCK: 255 break; 256 257 case VLNK: 258 kfree(node->tn_link, tmp->tm_name_zone); 259 node->tn_link = NULL; 260 node->tn_size = 0; 261 break; 262 263 case VREG: 264 if (node->tn_reg.tn_aobj != NULL) 265 vm_object_deallocate(node->tn_reg.tn_aobj); 266 node->tn_reg.tn_aobj = NULL; 267 pages = node->tn_reg.tn_aobj_pages; 268 break; 269 270 default: 271 panic("tmpfs_free_node: type %p %d", node, (int)node->tn_type); 272 } 273 274 /* 275 * Clean up fields for the next allocation. The objcache only ctors 276 * new allocations. 277 */ 278 tmpfs_node_ctor(node, NULL, 0); 279 objcache_put(tmp->tm_node_pool, node); 280 /* node is now invalid */ 281 282 if (pages) 283 atomic_add_long(&tmp->tm_pages_used, -(long)pages); 284 } 285 286 /* --------------------------------------------------------------------- */ 287 288 /* 289 * Allocates a new directory entry for the node node with a name of name. 290 * The new directory entry is returned in *de. 291 * 292 * The link count of node is increased by one to reflect the new object 293 * referencing it. 294 * 295 * Returns zero on success or an appropriate error code on failure. 296 */ 297 int 298 tmpfs_alloc_dirent(struct tmpfs_mount *tmp, struct tmpfs_node *node, 299 const char *name, uint16_t len, struct tmpfs_dirent **de) 300 { 301 struct tmpfs_dirent *nde; 302 303 nde = objcache_get(tmp->tm_dirent_pool, M_WAITOK); 304 nde->td_name = kmalloc(len + 1, tmp->tm_name_zone, M_WAITOK | M_NULLOK); 305 if (nde->td_name == NULL) { 306 objcache_put(tmp->tm_dirent_pool, nde); 307 *de = NULL; 308 return (ENOSPC); 309 } 310 nde->td_namelen = len; 311 bcopy(name, nde->td_name, len); 312 nde->td_name[len] = '\0'; 313 314 nde->td_node = node; 315 316 atomic_add_int(&node->tn_links, 1); 317 318 *de = nde; 319 320 return 0; 321 } 322 323 /* --------------------------------------------------------------------- */ 324 325 /* 326 * Frees a directory entry. It is the caller's responsibility to destroy 327 * the node referenced by it if needed. 328 * 329 * The link count of node is decreased by one to reflect the removal of an 330 * object that referenced it. This only happens if 'node_exists' is true; 331 * otherwise the function will not access the node referred to by the 332 * directory entry, as it may already have been released from the outside. 333 */ 334 void 335 tmpfs_free_dirent(struct tmpfs_mount *tmp, struct tmpfs_dirent *de) 336 { 337 struct tmpfs_node *node; 338 339 node = de->td_node; 340 341 KKASSERT(node->tn_links > 0); 342 atomic_add_int(&node->tn_links, -1); 343 344 kfree(de->td_name, tmp->tm_name_zone); 345 de->td_namelen = 0; 346 de->td_name = NULL; 347 de->td_node = NULL; 348 objcache_put(tmp->tm_dirent_pool, de); 349 } 350 351 /* --------------------------------------------------------------------- */ 352 353 /* 354 * Allocates a new vnode for the node node or returns a new reference to 355 * an existing one if the node had already a vnode referencing it. The 356 * resulting locked vnode is returned in *vpp. 357 * 358 * Returns zero on success or an appropriate error code on failure. 359 * 360 * The caller must ensure that node cannot go away (usually by holding 361 * the related directory entry). 362 * 363 * If dnode is non-NULL this routine avoids deadlocking against it but 364 * can return EAGAIN. Caller must try again. The dnode lock will cycle 365 * in this case, it remains locked on return in all cases. dnode must 366 * be shared-locked. 367 */ 368 int 369 tmpfs_alloc_vp(struct mount *mp, 370 struct tmpfs_node *dnode, struct tmpfs_node *node, int lkflag, 371 struct vnode **vpp) 372 { 373 int error = 0; 374 struct vnode *vp; 375 376 loop: 377 vp = NULL; 378 if (node->tn_vnode == NULL) { 379 error = getnewvnode(VT_TMPFS, mp, &vp, 380 VLKTIMEOUT, LK_CANRECURSE); 381 if (error) 382 goto out; 383 } 384 385 /* 386 * Interlocked extraction from node. This can race many things. 387 * We have to get a soft reference on the vnode while we hold 388 * the node locked, then acquire it properly and check for races. 389 */ 390 TMPFS_NODE_LOCK(node); 391 if (node->tn_vnode) { 392 if (vp) { 393 vp->v_type = VBAD; 394 vx_put(vp); 395 } 396 vp = node->tn_vnode; 397 398 KKASSERT((node->tn_vpstate & TMPFS_VNODE_DOOMED) == 0); 399 vhold(vp); 400 TMPFS_NODE_UNLOCK(node); 401 402 if (dnode) { 403 /* 404 * Special-case handling to avoid deadlocking against 405 * dnode. This case has been validated and occurs 406 * every so often during synth builds. 407 */ 408 if (vget(vp, (lkflag & ~LK_RETRY) | 409 LK_NOWAIT | 410 LK_EXCLUSIVE) != 0) { 411 TMPFS_NODE_UNLOCK(dnode); 412 if (vget(vp, (lkflag & ~LK_RETRY) | 413 LK_SLEEPFAIL | 414 LK_EXCLUSIVE) == 0) { 415 vn_unlock(vp); 416 } 417 vdrop(vp); 418 TMPFS_NODE_LOCK_SH(dnode); 419 420 return EAGAIN; 421 } 422 } else { 423 /* 424 * Normal path 425 */ 426 if (vget(vp, lkflag | LK_EXCLUSIVE) != 0) { 427 vdrop(vp); 428 goto loop; 429 } 430 } 431 if (node->tn_vnode != vp) { 432 vput(vp); 433 vdrop(vp); 434 goto loop; 435 } 436 vdrop(vp); 437 goto out; 438 } 439 440 /* 441 * We need to assign node->tn_vnode. If vp is NULL, loop up to 442 * allocate the vp. This can happen due to SMP races. 443 */ 444 if (vp == NULL) { 445 TMPFS_NODE_UNLOCK(node); 446 goto loop; 447 } 448 449 /* 450 * This should never happen. 451 */ 452 if (node->tn_vpstate & TMPFS_VNODE_DOOMED) { 453 TMPFS_NODE_UNLOCK(node); 454 vp->v_type = VBAD; 455 vx_put(vp); 456 error = ENOENT; 457 goto out; 458 } 459 460 KKASSERT(node->tn_vnode == NULL); 461 KKASSERT(vp != NULL); 462 vp->v_data = node; 463 vp->v_type = node->tn_type; 464 465 /* Type-specific initialization. */ 466 switch (node->tn_type) { 467 case VBLK: 468 /* FALLTHROUGH */ 469 case VCHR: 470 /* FALLTHROUGH */ 471 case VSOCK: 472 break; 473 case VREG: 474 /* 475 * VMIO is mandatory. Tmpfs also supports KVABIO 476 * for its tmpfs_strategy(). 477 */ 478 vsetflags(vp, VKVABIO); 479 vinitvmio(vp, node->tn_size, node->tn_blksize, -1); 480 break; 481 case VLNK: 482 break; 483 case VFIFO: 484 vp->v_ops = &mp->mnt_vn_fifo_ops; 485 break; 486 case VDIR: 487 break; 488 489 default: 490 panic("tmpfs_alloc_vp: type %p %d", node, (int)node->tn_type); 491 } 492 493 node->tn_vnode = vp; 494 TMPFS_NODE_UNLOCK(node); 495 496 vx_downgrade(vp); 497 out: 498 *vpp = vp; 499 KKASSERT(IFF(error == 0, *vpp != NULL && vn_islocked(*vpp))); 500 501 return error; 502 } 503 504 /* --------------------------------------------------------------------- */ 505 506 /* 507 * Allocates a new file of type 'type' and adds it to the parent directory 508 * 'dvp'; this addition is done using the component name given in 'cnp'. 509 * The ownership of the new file is automatically assigned based on the 510 * credentials of the caller (through 'cnp'), the group is set based on 511 * the parent directory and the mode is determined from the 'vap' argument. 512 * If successful, *vpp holds a vnode to the newly created file and zero 513 * is returned. Otherwise *vpp is NULL and the function returns an 514 * appropriate error code. 515 */ 516 int 517 tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap, 518 struct namecache *ncp, struct ucred *cred, char *target) 519 { 520 int error; 521 struct tmpfs_dirent *de; 522 struct tmpfs_mount *tmp; 523 struct tmpfs_node *dnode; 524 struct tmpfs_node *node; 525 526 tmp = VFS_TO_TMPFS(dvp->v_mount); 527 dnode = VP_TO_TMPFS_DIR(dvp); 528 *vpp = NULL; 529 530 TMPFS_NODE_LOCK(dnode); 531 532 /* 533 * If the directory was removed but a process was CD'd into it, 534 * we do not allow any more file/dir creation within it. Otherwise 535 * we will lose track of it. 536 */ 537 KKASSERT(dnode->tn_type == VDIR); 538 if (dnode != tmp->tm_root && dnode->tn_dir.tn_parent == NULL) { 539 TMPFS_NODE_UNLOCK(dnode); 540 return ENOENT; 541 } 542 543 /* 544 * Make sure the link count does not overflow. 545 */ 546 if (vap->va_type == VDIR && dnode->tn_links >= LINK_MAX) { 547 TMPFS_NODE_UNLOCK(dnode); 548 return EMLINK; 549 } 550 551 /* Allocate a node that represents the new file. */ 552 error = tmpfs_alloc_node(tmp, vap->va_type, cred->cr_uid, 553 dnode->tn_gid, vap->va_mode, target, 554 vap->va_rmajor, vap->va_rminor, &node); 555 if (error != 0) { 556 TMPFS_NODE_UNLOCK(dnode); 557 return error; 558 } 559 TMPFS_NODE_LOCK(node); 560 561 /* Allocate a directory entry that points to the new file. */ 562 error = tmpfs_alloc_dirent(tmp, node, ncp->nc_name, ncp->nc_nlen, &de); 563 if (error != 0) { 564 TMPFS_NODE_UNLOCK(dnode); 565 tmpfs_free_node(tmp, node); 566 /* eats node lock */ 567 return error; 568 } 569 570 /* Allocate a vnode for the new file. */ 571 error = tmpfs_alloc_vp(dvp->v_mount, NULL, node, LK_EXCLUSIVE, vpp); 572 if (error != 0) { 573 TMPFS_NODE_UNLOCK(dnode); 574 tmpfs_free_dirent(tmp, de); 575 tmpfs_free_node(tmp, node); 576 /* eats node lock */ 577 return error; 578 } 579 580 /* 581 * Now that all required items are allocated, we can proceed to 582 * insert the new node into the directory, an operation that 583 * cannot fail. 584 */ 585 tmpfs_dir_attach_locked(dnode, de); 586 TMPFS_NODE_UNLOCK(dnode); 587 TMPFS_NODE_UNLOCK(node); 588 589 return error; 590 } 591 592 /* --------------------------------------------------------------------- */ 593 594 /* 595 * Attaches the directory entry de to the directory represented by dnode. 596 * Note that this does not change the link count of the node pointed by 597 * the directory entry, as this is done by tmpfs_alloc_dirent. 598 * 599 * dnode must be locked. 600 */ 601 void 602 tmpfs_dir_attach_locked(struct tmpfs_node *dnode, struct tmpfs_dirent *de) 603 { 604 struct tmpfs_node *node = de->td_node; 605 606 if (node && node->tn_type == VDIR) { 607 TMPFS_NODE_LOCK(node); 608 atomic_add_int(&node->tn_links, 1); 609 node->tn_status |= TMPFS_NODE_CHANGED; 610 node->tn_dir.tn_parent = dnode; 611 atomic_add_int(&dnode->tn_links, 1); 612 TMPFS_NODE_UNLOCK(node); 613 } 614 RB_INSERT(tmpfs_dirtree, &dnode->tn_dir.tn_dirtree, de); 615 RB_INSERT(tmpfs_dirtree_cookie, &dnode->tn_dir.tn_cookietree, de); 616 dnode->tn_size += sizeof(struct tmpfs_dirent); 617 dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | 618 TMPFS_NODE_MODIFIED; 619 } 620 621 /* --------------------------------------------------------------------- */ 622 623 /* 624 * Detaches the directory entry de from the directory represented by dnode. 625 * Note that this does not change the link count of the node pointed by 626 * the directory entry, as this is done by tmpfs_free_dirent. 627 * 628 * dnode must be locked. 629 */ 630 void 631 tmpfs_dir_detach_locked(struct tmpfs_node *dnode, struct tmpfs_dirent *de) 632 { 633 struct tmpfs_node *node = de->td_node; 634 635 RB_REMOVE(tmpfs_dirtree, &dnode->tn_dir.tn_dirtree, de); 636 RB_REMOVE(tmpfs_dirtree_cookie, &dnode->tn_dir.tn_cookietree, de); 637 dnode->tn_size -= sizeof(struct tmpfs_dirent); 638 dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | 639 TMPFS_NODE_MODIFIED; 640 641 /* 642 * Clean out the tn_parent pointer immediately when removing a 643 * directory. 644 * 645 * Removal of the parent linkage also cleans out the extra tn_links 646 * count we had on both node and dnode. 647 * 648 * node can be NULL (typ during a forced umount), in which case 649 * the mount code is dealing with the linkages from a linked list 650 * scan. 651 */ 652 if (node && node->tn_type == VDIR && node->tn_dir.tn_parent) { 653 TMPFS_NODE_LOCK(node); 654 KKASSERT(node->tn_dir.tn_parent == dnode); 655 atomic_add_int(&dnode->tn_links, -1); 656 atomic_add_int(&node->tn_links, -1); 657 node->tn_dir.tn_parent = NULL; 658 TMPFS_NODE_UNLOCK(node); 659 } 660 } 661 662 /* --------------------------------------------------------------------- */ 663 664 /* 665 * Looks for a directory entry in the directory represented by node. 666 * 'ncp' describes the name of the entry to look for. Note that the . 667 * and .. components are not allowed as they do not physically exist 668 * within directories. 669 * 670 * Returns a pointer to the entry when found, otherwise NULL. 671 * 672 * Caller must hold the node locked (shared ok) 673 */ 674 struct tmpfs_dirent * 675 tmpfs_dir_lookup(struct tmpfs_node *node, struct tmpfs_node *f, 676 struct namecache *ncp) 677 { 678 struct tmpfs_dirent *de; 679 int len = ncp->nc_nlen; 680 struct tmpfs_dirent wanted; 681 682 wanted.td_namelen = len; 683 wanted.td_name = ncp->nc_name; 684 685 TMPFS_VALIDATE_DIR(node); 686 687 de = RB_FIND(tmpfs_dirtree, &node->tn_dir.tn_dirtree, &wanted); 688 689 KASSERT((f == NULL || de == NULL || f == de->td_node), 690 ("tmpfs_dir_lookup: Incorrect node %p %p %p", 691 f, de, (de ? de->td_node : NULL))); 692 693 return de; 694 } 695 696 /* --------------------------------------------------------------------- */ 697 698 /* 699 * Helper function for tmpfs_readdir. Creates a '.' entry for the given 700 * directory and returns it in the uio space. The function returns 0 701 * on success, -1 if there was not enough space in the uio structure to 702 * hold the directory entry or an appropriate error code if another 703 * error happens. 704 */ 705 int 706 tmpfs_dir_getdotdent(struct tmpfs_node *node, struct uio *uio) 707 { 708 int error; 709 710 TMPFS_VALIDATE_DIR(node); 711 KKASSERT(uio->uio_offset == TMPFS_DIRCOOKIE_DOT); 712 713 if (vop_write_dirent(&error, uio, node->tn_id, DT_DIR, 1, ".")) 714 return -1; 715 if (error == 0) 716 uio->uio_offset = TMPFS_DIRCOOKIE_DOTDOT; 717 return error; 718 } 719 720 /* --------------------------------------------------------------------- */ 721 722 /* 723 * Helper function for tmpfs_readdir. Creates a '..' entry for the given 724 * directory and returns it in the uio space. The function returns 0 725 * on success, -1 if there was not enough space in the uio structure to 726 * hold the directory entry or an appropriate error code if another 727 * error happens. 728 */ 729 int 730 tmpfs_dir_getdotdotdent(struct tmpfs_mount *tmp, struct tmpfs_node *node, 731 struct uio *uio) 732 { 733 int error; 734 ino_t d_ino; 735 736 TMPFS_VALIDATE_DIR(node); 737 KKASSERT(uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT); 738 739 if (node->tn_dir.tn_parent) { 740 TMPFS_NODE_LOCK(node); 741 if (node->tn_dir.tn_parent) 742 d_ino = node->tn_dir.tn_parent->tn_id; 743 else 744 d_ino = tmp->tm_root->tn_id; 745 TMPFS_NODE_UNLOCK(node); 746 } else { 747 d_ino = tmp->tm_root->tn_id; 748 } 749 750 if (vop_write_dirent(&error, uio, d_ino, DT_DIR, 2, "..")) 751 return -1; 752 if (error == 0) { 753 struct tmpfs_dirent *de; 754 de = RB_MIN(tmpfs_dirtree_cookie, &node->tn_dir.tn_cookietree); 755 if (de == NULL) 756 uio->uio_offset = TMPFS_DIRCOOKIE_EOF; 757 else 758 uio->uio_offset = tmpfs_dircookie(de); 759 } 760 return error; 761 } 762 763 /* --------------------------------------------------------------------- */ 764 765 /* 766 * Lookup a directory entry by its associated cookie. 767 * 768 * Must be called with the directory node locked (shared ok) 769 */ 770 struct lubycookie_info { 771 off_t cookie; 772 struct tmpfs_dirent *de; 773 }; 774 775 static int 776 lubycookie_cmp(struct tmpfs_dirent *de, void *arg) 777 { 778 struct lubycookie_info *info = arg; 779 off_t cookie = tmpfs_dircookie(de); 780 781 if (cookie < info->cookie) 782 return(-1); 783 if (cookie > info->cookie) 784 return(1); 785 return(0); 786 } 787 788 static int 789 lubycookie_callback(struct tmpfs_dirent *de, void *arg) 790 { 791 struct lubycookie_info *info = arg; 792 793 if (tmpfs_dircookie(de) == info->cookie) { 794 info->de = de; 795 return(-1); 796 } 797 return(0); 798 } 799 800 struct tmpfs_dirent * 801 tmpfs_dir_lookupbycookie(struct tmpfs_node *node, off_t cookie) 802 { 803 struct lubycookie_info info; 804 805 info.cookie = cookie; 806 info.de = NULL; 807 RB_SCAN(tmpfs_dirtree_cookie, &node->tn_dir.tn_cookietree, 808 lubycookie_cmp, lubycookie_callback, &info); 809 return (info.de); 810 } 811 812 /* --------------------------------------------------------------------- */ 813 814 /* 815 * Helper function for tmpfs_readdir. Returns as much directory entries 816 * as can fit in the uio space. The read starts at uio->uio_offset. 817 * The function returns 0 on success, -1 if there was not enough space 818 * in the uio structure to hold the directory entry or an appropriate 819 * error code if another error happens. 820 * 821 * Caller must hold the node locked (shared ok) 822 */ 823 int 824 tmpfs_dir_getdents(struct tmpfs_node *node, struct uio *uio, off_t *cntp) 825 { 826 int error; 827 off_t startcookie; 828 struct tmpfs_dirent *de; 829 830 TMPFS_VALIDATE_DIR(node); 831 832 /* 833 * Locate the first directory entry we have to return. We have cached 834 * the last readdir in the node, so use those values if appropriate. 835 * Otherwise do a linear scan to find the requested entry. 836 */ 837 startcookie = uio->uio_offset; 838 KKASSERT(startcookie != TMPFS_DIRCOOKIE_DOT); 839 KKASSERT(startcookie != TMPFS_DIRCOOKIE_DOTDOT); 840 841 if (startcookie == TMPFS_DIRCOOKIE_EOF) 842 return 0; 843 844 de = tmpfs_dir_lookupbycookie(node, startcookie); 845 if (de == NULL) 846 return EINVAL; 847 848 /* 849 * Read as much entries as possible; i.e., until we reach the end of 850 * the directory or we exhaust uio space. 851 */ 852 do { 853 ino_t d_ino; 854 uint8_t d_type; 855 856 /* Create a dirent structure representing the current 857 * tmpfs_node and fill it. */ 858 d_ino = de->td_node->tn_id; 859 switch (de->td_node->tn_type) { 860 case VBLK: 861 d_type = DT_BLK; 862 break; 863 864 case VCHR: 865 d_type = DT_CHR; 866 break; 867 868 case VDIR: 869 d_type = DT_DIR; 870 break; 871 872 case VFIFO: 873 d_type = DT_FIFO; 874 break; 875 876 case VLNK: 877 d_type = DT_LNK; 878 break; 879 880 case VREG: 881 d_type = DT_REG; 882 break; 883 884 case VSOCK: 885 d_type = DT_SOCK; 886 break; 887 888 default: 889 panic("tmpfs_dir_getdents: type %p %d", 890 de->td_node, (int)de->td_node->tn_type); 891 } 892 KKASSERT(de->td_namelen < 256); /* 255 + 1 */ 893 894 if (vop_write_dirent(&error, uio, d_ino, d_type, 895 de->td_namelen, de->td_name)) { 896 error = -1; 897 break; 898 } 899 900 (*cntp)++; 901 de = RB_NEXT(tmpfs_dirtree_cookie, 902 node->tn_dir.tn_cookietree, de); 903 } while (error == 0 && uio->uio_resid > 0 && de != NULL); 904 905 /* Update the offset and cache. */ 906 if (de == NULL) { 907 uio->uio_offset = TMPFS_DIRCOOKIE_EOF; 908 } else { 909 uio->uio_offset = tmpfs_dircookie(de); 910 } 911 912 return error; 913 } 914 915 /* --------------------------------------------------------------------- */ 916 917 /* 918 * Resizes the aobj associated to the regular file pointed to by vp to 919 * the size newsize. 'vp' must point to a vnode that represents a regular 920 * file. 'newsize' must be positive. 921 * 922 * pass NVEXTF_TRIVIAL when buf content will be overwritten, otherwise set 0 923 * to be zero filled. 924 * 925 * Returns zero on success or an appropriate error code on failure. 926 * 927 * Caller must hold the node exclusively locked. 928 */ 929 int 930 tmpfs_reg_resize(struct vnode *vp, off_t newsize, int trivial) 931 { 932 int error; 933 vm_pindex_t newpages, oldpages; 934 struct tmpfs_mount *tmp; 935 struct tmpfs_node *node; 936 off_t oldsize; 937 int nvextflags; 938 939 #ifdef INVARIANTS 940 KKASSERT(vp->v_type == VREG); 941 KKASSERT(newsize >= 0); 942 #endif 943 944 node = VP_TO_TMPFS_NODE(vp); 945 tmp = VFS_TO_TMPFS(vp->v_mount); 946 947 /* 948 * Convert the old and new sizes to the number of pages needed to 949 * store them. It may happen that we do not need to do anything 950 * because the last allocated page can accommodate the change on 951 * its own. 952 */ 953 oldsize = node->tn_size; 954 oldpages = round_page64(oldsize) / PAGE_SIZE; 955 KKASSERT(oldpages == node->tn_reg.tn_aobj_pages); 956 newpages = round_page64(newsize) / PAGE_SIZE; 957 958 if (newpages > oldpages && 959 tmp->tm_pages_used + newpages - oldpages > tmp->tm_pages_max) { 960 error = ENOSPC; 961 goto out; 962 } 963 node->tn_reg.tn_aobj_pages = newpages; 964 node->tn_size = newsize; 965 966 if (newpages != oldpages) 967 atomic_add_long(&tmp->tm_pages_used, (newpages - oldpages)); 968 969 /* 970 * nvextflags to pass along for bdwrite() vs buwrite() 971 */ 972 if (vm_pages_needed || vm_paging_needed(0) || 973 tmpfs_bufcache_mode >= 2) { 974 nvextflags = 0; 975 } else { 976 nvextflags = NVEXTF_BUWRITE; 977 } 978 979 980 /* 981 * When adjusting the vnode filesize and its VM object we must 982 * also adjust our backing VM object (aobj). The blocksize 983 * used must match the block sized we use for the buffer cache. 984 * 985 * The backing VM object may contain VM pages as well as swap 986 * assignments if we previously renamed main object pages into 987 * it during deactivation. 988 * 989 * To make things easier tmpfs uses a blksize in multiples of 990 * PAGE_SIZE, and will only increase the blksize as a small file 991 * increases in size. Once a file has exceeded TMPFS_BLKSIZE (16KB), 992 * the blksize is maxed out. Truncating the file does not reduce 993 * the blksize. 994 */ 995 if (newsize < oldsize) { 996 vm_pindex_t osize; 997 vm_pindex_t nsize; 998 vm_object_t aobj; 999 1000 error = nvtruncbuf(vp, newsize, node->tn_blksize, 1001 -1, nvextflags); 1002 aobj = node->tn_reg.tn_aobj; 1003 if (aobj) { 1004 osize = aobj->size; 1005 nsize = vp->v_object->size; 1006 if (nsize < osize) { 1007 aobj->size = osize; 1008 swap_pager_freespace(aobj, nsize, 1009 osize - nsize); 1010 vm_object_page_remove(aobj, nsize, osize, 1011 FALSE); 1012 } 1013 } 1014 } else { 1015 vm_object_t aobj; 1016 int nblksize; 1017 1018 /* 1019 * The first (and only the first) buffer in the file is resized 1020 * in multiples of PAGE_SIZE, up to TMPFS_BLKSIZE. 1021 */ 1022 nblksize = node->tn_blksize; 1023 while (nblksize < TMPFS_BLKSIZE && 1024 nblksize < newsize) { 1025 nblksize += PAGE_SIZE; 1026 } 1027 1028 if (trivial) 1029 nvextflags |= NVEXTF_TRIVIAL; 1030 1031 error = nvextendbuf(vp, oldsize, newsize, 1032 node->tn_blksize, nblksize, 1033 -1, -1, nvextflags); 1034 node->tn_blksize = nblksize; 1035 aobj = node->tn_reg.tn_aobj; 1036 if (aobj) 1037 aobj->size = vp->v_object->size; 1038 } 1039 1040 out: 1041 return error; 1042 } 1043 1044 /* --------------------------------------------------------------------- */ 1045 1046 /* 1047 * Change flags of the given vnode. 1048 * Caller should execute tmpfs_update on vp after a successful execution. 1049 * The vnode must be locked on entry and remain locked on exit. 1050 */ 1051 int 1052 tmpfs_chflags(struct vnode *vp, u_long vaflags, struct ucred *cred) 1053 { 1054 int error; 1055 struct tmpfs_node *node; 1056 int flags; 1057 1058 KKASSERT(vn_islocked(vp)); 1059 1060 node = VP_TO_TMPFS_NODE(vp); 1061 flags = node->tn_flags; 1062 1063 /* Disallow this operation if the file system is mounted read-only. */ 1064 if (vp->v_mount->mnt_flag & MNT_RDONLY) 1065 return EROFS; 1066 error = vop_helper_setattr_flags(&flags, vaflags, node->tn_uid, cred); 1067 1068 /* Actually change the flags on the node itself */ 1069 if (error == 0) { 1070 TMPFS_NODE_LOCK(node); 1071 node->tn_flags = flags; 1072 node->tn_status |= TMPFS_NODE_CHANGED; 1073 TMPFS_NODE_UNLOCK(node); 1074 } 1075 1076 KKASSERT(vn_islocked(vp)); 1077 1078 return error; 1079 } 1080 1081 /* --------------------------------------------------------------------- */ 1082 1083 /* 1084 * Change access mode on the given vnode. 1085 * Caller should execute tmpfs_update on vp after a successful execution. 1086 * The vnode must be locked on entry and remain locked on exit. 1087 */ 1088 int 1089 tmpfs_chmod(struct vnode *vp, mode_t vamode, struct ucred *cred) 1090 { 1091 struct tmpfs_node *node; 1092 mode_t cur_mode; 1093 int error; 1094 1095 KKASSERT(vn_islocked(vp)); 1096 1097 node = VP_TO_TMPFS_NODE(vp); 1098 1099 /* Disallow this operation if the file system is mounted read-only. */ 1100 if (vp->v_mount->mnt_flag & MNT_RDONLY) 1101 return EROFS; 1102 1103 /* Immutable or append-only files cannot be modified, either. */ 1104 if (node->tn_flags & (IMMUTABLE | APPEND)) 1105 return EPERM; 1106 1107 cur_mode = node->tn_mode; 1108 error = vop_helper_chmod(vp, vamode, cred, node->tn_uid, node->tn_gid, 1109 &cur_mode); 1110 1111 if (error == 0 && 1112 (node->tn_mode & ALLPERMS) != (cur_mode & ALLPERMS)) { 1113 TMPFS_NODE_LOCK(node); 1114 node->tn_mode &= ~ALLPERMS; 1115 node->tn_mode |= cur_mode & ALLPERMS; 1116 1117 node->tn_status |= TMPFS_NODE_CHANGED; 1118 TMPFS_NODE_UNLOCK(node); 1119 } 1120 1121 KKASSERT(vn_islocked(vp)); 1122 1123 return 0; 1124 } 1125 1126 /* --------------------------------------------------------------------- */ 1127 1128 /* 1129 * Change ownership of the given vnode. At least one of uid or gid must 1130 * be different than VNOVAL. If one is set to that value, the attribute 1131 * is unchanged. 1132 * Caller should execute tmpfs_update on vp after a successful execution. 1133 * The vnode must be locked on entry and remain locked on exit. 1134 */ 1135 int 1136 tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred) 1137 { 1138 mode_t cur_mode; 1139 uid_t cur_uid; 1140 gid_t cur_gid; 1141 struct tmpfs_node *node; 1142 int error; 1143 1144 KKASSERT(vn_islocked(vp)); 1145 node = VP_TO_TMPFS_NODE(vp); 1146 1147 /* Disallow this operation if the file system is mounted read-only. */ 1148 if (vp->v_mount->mnt_flag & MNT_RDONLY) 1149 return EROFS; 1150 1151 /* Immutable or append-only files cannot be modified, either. */ 1152 if (node->tn_flags & (IMMUTABLE | APPEND)) 1153 return EPERM; 1154 1155 cur_uid = node->tn_uid; 1156 cur_gid = node->tn_gid; 1157 cur_mode = node->tn_mode; 1158 error = vop_helper_chown(vp, uid, gid, cred, 1159 &cur_uid, &cur_gid, &cur_mode); 1160 1161 if (error == 0) { 1162 TMPFS_NODE_LOCK(node); 1163 if (cur_uid != node->tn_uid || 1164 cur_gid != node->tn_gid || 1165 cur_mode != node->tn_mode) { 1166 node->tn_uid = cur_uid; 1167 node->tn_gid = cur_gid; 1168 node->tn_mode = cur_mode; 1169 node->tn_status |= TMPFS_NODE_CHANGED; 1170 } 1171 TMPFS_NODE_UNLOCK(node); 1172 } 1173 1174 return error; 1175 } 1176 1177 /* --------------------------------------------------------------------- */ 1178 1179 /* 1180 * Change size of the given vnode. 1181 * Caller should execute tmpfs_update on vp after a successful execution. 1182 * The vnode must be locked on entry and remain locked on exit. 1183 */ 1184 int 1185 tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred) 1186 { 1187 int error; 1188 struct tmpfs_node *node; 1189 1190 KKASSERT(vn_islocked(vp)); 1191 1192 node = VP_TO_TMPFS_NODE(vp); 1193 1194 /* Decide whether this is a valid operation based on the file type. */ 1195 error = 0; 1196 switch (vp->v_type) { 1197 case VDIR: 1198 return EISDIR; 1199 1200 case VREG: 1201 if (vp->v_mount->mnt_flag & MNT_RDONLY) 1202 return EROFS; 1203 break; 1204 1205 case VBLK: 1206 /* FALLTHROUGH */ 1207 case VCHR: 1208 /* FALLTHROUGH */ 1209 case VFIFO: 1210 /* Allow modifications of special files even if in the file 1211 * system is mounted read-only (we are not modifying the 1212 * files themselves, but the objects they represent). */ 1213 return 0; 1214 1215 default: 1216 /* Anything else is unsupported. */ 1217 return EOPNOTSUPP; 1218 } 1219 1220 /* Immutable or append-only files cannot be modified, either. */ 1221 if (node->tn_flags & (IMMUTABLE | APPEND)) 1222 return EPERM; 1223 1224 error = tmpfs_truncate(vp, size); 1225 /* tmpfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents 1226 * for us, as will update tn_status; no need to do that here. */ 1227 1228 KKASSERT(vn_islocked(vp)); 1229 1230 return error; 1231 } 1232 1233 /* --------------------------------------------------------------------- */ 1234 1235 /* 1236 * Change access and modification times of the given vnode. 1237 * Caller should execute tmpfs_update on vp after a successful execution. 1238 * The vnode must be locked on entry and remain locked on exit. 1239 */ 1240 int 1241 tmpfs_chtimes(struct vnode *vp, struct timespec *atime, struct timespec *mtime, 1242 int vaflags, struct ucred *cred) 1243 { 1244 struct tmpfs_node *node; 1245 1246 KKASSERT(vn_islocked(vp)); 1247 1248 node = VP_TO_TMPFS_NODE(vp); 1249 1250 /* Disallow this operation if the file system is mounted read-only. */ 1251 if (vp->v_mount->mnt_flag & MNT_RDONLY) 1252 return EROFS; 1253 1254 /* Immutable or append-only files cannot be modified, either. */ 1255 if (node->tn_flags & (IMMUTABLE | APPEND)) 1256 return EPERM; 1257 1258 TMPFS_NODE_LOCK(node); 1259 if (atime->tv_sec != VNOVAL && atime->tv_nsec != VNOVAL) 1260 node->tn_status |= TMPFS_NODE_ACCESSED; 1261 1262 if (mtime->tv_sec != VNOVAL && mtime->tv_nsec != VNOVAL) { 1263 node->tn_status |= TMPFS_NODE_MODIFIED; 1264 vclrflags(vp, VLASTWRITETS); 1265 } 1266 1267 TMPFS_NODE_UNLOCK(node); 1268 1269 tmpfs_itimes(vp, atime, mtime); 1270 1271 KKASSERT(vn_islocked(vp)); 1272 1273 return 0; 1274 } 1275 1276 /* --------------------------------------------------------------------- */ 1277 /* Sync timestamps */ 1278 void 1279 tmpfs_itimes(struct vnode *vp, const struct timespec *acc, 1280 const struct timespec *mod) 1281 { 1282 struct tmpfs_node *node; 1283 struct timespec now; 1284 1285 node = VP_TO_TMPFS_NODE(vp); 1286 1287 if ((node->tn_status & (TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED | 1288 TMPFS_NODE_CHANGED)) == 0) { 1289 return; 1290 } 1291 1292 vfs_timestamp(&now); 1293 1294 TMPFS_NODE_LOCK(node); 1295 if (node->tn_status & TMPFS_NODE_ACCESSED) { 1296 if (acc == NULL) 1297 acc = &now; 1298 node->tn_atime = acc->tv_sec; 1299 node->tn_atimensec = acc->tv_nsec; 1300 } 1301 if (node->tn_status & TMPFS_NODE_MODIFIED) { 1302 if (mod == NULL) 1303 mod = &now; 1304 node->tn_mtime = mod->tv_sec; 1305 node->tn_mtimensec = mod->tv_nsec; 1306 } 1307 if (node->tn_status & TMPFS_NODE_CHANGED) { 1308 node->tn_ctime = now.tv_sec; 1309 node->tn_ctimensec = now.tv_nsec; 1310 } 1311 1312 node->tn_status &= ~(TMPFS_NODE_ACCESSED | 1313 TMPFS_NODE_MODIFIED | 1314 TMPFS_NODE_CHANGED); 1315 TMPFS_NODE_UNLOCK(node); 1316 } 1317 1318 /* --------------------------------------------------------------------- */ 1319 1320 void 1321 tmpfs_update(struct vnode *vp) 1322 { 1323 tmpfs_itimes(vp, NULL, NULL); 1324 } 1325 1326 /* --------------------------------------------------------------------- */ 1327 1328 /* 1329 * Caller must hold an exclusive node lock. 1330 */ 1331 int 1332 tmpfs_truncate(struct vnode *vp, off_t length) 1333 { 1334 int error; 1335 struct tmpfs_node *node; 1336 1337 node = VP_TO_TMPFS_NODE(vp); 1338 1339 if (length < 0) { 1340 error = EINVAL; 1341 goto out; 1342 } 1343 1344 if (node->tn_size == length) { 1345 error = 0; 1346 goto out; 1347 } 1348 1349 if (length > VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize) 1350 return (EFBIG); 1351 1352 1353 error = tmpfs_reg_resize(vp, length, 1); 1354 1355 if (error == 0) 1356 node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED; 1357 1358 out: 1359 tmpfs_update(vp); 1360 1361 return error; 1362 } 1363 1364 /* --------------------------------------------------------------------- */ 1365 1366 static ino_t 1367 tmpfs_fetch_ino(struct tmpfs_mount *tmp) 1368 { 1369 ino_t ret; 1370 1371 ret = atomic_fetchadd_64(&tmp->tm_ino, 1); 1372 1373 return (ret); 1374 } 1375 1376 static int 1377 tmpfs_dirtree_compare(struct tmpfs_dirent *a, struct tmpfs_dirent *b) 1378 { 1379 if (a->td_namelen > b->td_namelen) 1380 return 1; 1381 else if (a->td_namelen < b->td_namelen) 1382 return -1; 1383 else 1384 return strncmp(a->td_name, b->td_name, a->td_namelen); 1385 } 1386 1387 static int 1388 tmpfs_dirtree_compare_cookie(struct tmpfs_dirent *a, struct tmpfs_dirent *b) 1389 { 1390 if (a < b) 1391 return(-1); 1392 if (a > b) 1393 return(1); 1394 return 0; 1395 } 1396 1397 /* 1398 * Lock for rename. The namecache entries for the related terminal files 1399 * are already locked but the directories are not. A directory lock order 1400 * reversal is possible so use a deterministic order. 1401 * 1402 * Generally order path parent-to-child or using a simple pointer comparison. 1403 * Probably not perfect but it should catch most of the cases. 1404 * 1405 * Underlying files must be locked after the related directory. 1406 */ 1407 void 1408 tmpfs_lock4(struct tmpfs_node *node1, struct tmpfs_node *node2, 1409 struct tmpfs_node *node3, struct tmpfs_node *node4) 1410 { 1411 if (node1->tn_dir.tn_parent != node2 && 1412 (node1 < node2 || node2->tn_dir.tn_parent == node1)) { 1413 TMPFS_NODE_LOCK(node1); /* fdir */ 1414 TMPFS_NODE_LOCK(node3); /* ffile */ 1415 TMPFS_NODE_LOCK(node2); /* tdir */ 1416 if (node4) 1417 TMPFS_NODE_LOCK(node4); /* tfile */ 1418 } else { 1419 TMPFS_NODE_LOCK(node2); /* tdir */ 1420 if (node4) 1421 TMPFS_NODE_LOCK(node4); /* tfile */ 1422 TMPFS_NODE_LOCK(node1); /* fdir */ 1423 TMPFS_NODE_LOCK(node3); /* ffile */ 1424 } 1425 } 1426 1427 void 1428 tmpfs_unlock4(struct tmpfs_node *node1, struct tmpfs_node *node2, 1429 struct tmpfs_node *node3, struct tmpfs_node *node4) 1430 { 1431 if (node4) 1432 TMPFS_NODE_UNLOCK(node4); 1433 TMPFS_NODE_UNLOCK(node2); 1434 TMPFS_NODE_UNLOCK(node3); 1435 TMPFS_NODE_UNLOCK(node1); 1436 } 1437