xref: /dflybsd-src/sys/vfs/tmpfs/tmpfs_subr.c (revision 2b3f93ea6d1f70880f3e87f3c2cbe0dc0bfc9332)
17a2de9a4SMatthew Dillon /*	$NetBSD: tmpfs_subr.c,v 1.35 2007/07/09 21:10:50 ad Exp $	*/
27a2de9a4SMatthew Dillon 
37a2de9a4SMatthew Dillon /*-
47a2de9a4SMatthew Dillon  * Copyright (c) 2005 The NetBSD Foundation, Inc.
57a2de9a4SMatthew Dillon  * All rights reserved.
67a2de9a4SMatthew Dillon  *
77a2de9a4SMatthew Dillon  * This code is derived from software contributed to The NetBSD Foundation
87a2de9a4SMatthew Dillon  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
97a2de9a4SMatthew Dillon  * 2005 program.
107a2de9a4SMatthew Dillon  *
117a2de9a4SMatthew Dillon  * Redistribution and use in source and binary forms, with or without
127a2de9a4SMatthew Dillon  * modification, are permitted provided that the following conditions
137a2de9a4SMatthew Dillon  * are met:
147a2de9a4SMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
157a2de9a4SMatthew Dillon  *    notice, this list of conditions and the following disclaimer.
167a2de9a4SMatthew Dillon  * 2. Redistributions in binary form must reproduce the above copyright
177a2de9a4SMatthew Dillon  *    notice, this list of conditions and the following disclaimer in the
187a2de9a4SMatthew Dillon  *    documentation and/or other materials provided with the distribution.
197a2de9a4SMatthew Dillon  *
207a2de9a4SMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
217a2de9a4SMatthew Dillon  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
227a2de9a4SMatthew Dillon  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
237a2de9a4SMatthew Dillon  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
247a2de9a4SMatthew Dillon  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
257a2de9a4SMatthew Dillon  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
267a2de9a4SMatthew Dillon  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
277a2de9a4SMatthew Dillon  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
287a2de9a4SMatthew Dillon  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
297a2de9a4SMatthew Dillon  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
307a2de9a4SMatthew Dillon  * POSSIBILITY OF SUCH DAMAGE.
317a2de9a4SMatthew Dillon  */
327a2de9a4SMatthew Dillon 
337a2de9a4SMatthew Dillon /*
347a2de9a4SMatthew Dillon  * Efficient memory file system supporting functions.
357a2de9a4SMatthew Dillon  */
367a2de9a4SMatthew Dillon 
377a2de9a4SMatthew Dillon #include <sys/kernel.h>
387a2de9a4SMatthew Dillon #include <sys/param.h>
39*2b3f93eaSMatthew Dillon #include <sys/caps.h>
407a2de9a4SMatthew Dillon #include <sys/proc.h>
417a2de9a4SMatthew Dillon #include <sys/stat.h>
427a2de9a4SMatthew Dillon #include <sys/systm.h>
437a2de9a4SMatthew Dillon #include <sys/vnode.h>
447a2de9a4SMatthew Dillon #include <sys/vmmeter.h>
45e9dbfea1SMatthew Dillon #include <sys/malloc.h>
467a2de9a4SMatthew Dillon 
477a2de9a4SMatthew Dillon #include <vm/vm.h>
487a2de9a4SMatthew Dillon #include <vm/vm_object.h>
497a2de9a4SMatthew Dillon #include <vm/vm_page.h>
507a2de9a4SMatthew Dillon #include <vm/vm_pager.h>
517a2de9a4SMatthew Dillon #include <vm/vm_extern.h>
529cd86db5SMatthew Dillon #include <vm/vm_pageout.h>
539cd86db5SMatthew Dillon #include <vm/vm_page2.h>
547a2de9a4SMatthew Dillon 
557a2de9a4SMatthew Dillon #include <vfs/tmpfs/tmpfs.h>
567a2de9a4SMatthew Dillon #include <vfs/tmpfs/tmpfs_vnops.h>
577a2de9a4SMatthew Dillon 
58f7db522fSVenkatesh Srinivas static ino_t tmpfs_fetch_ino(struct tmpfs_mount *);
59f5f22af6SMatthew Dillon 
6029ca4fd6SJohannes Hofmann static int tmpfs_dirtree_compare(struct tmpfs_dirent *a,
6129ca4fd6SJohannes Hofmann 	struct tmpfs_dirent *b);
6229ca4fd6SJohannes Hofmann RB_GENERATE(tmpfs_dirtree, tmpfs_dirent, rb_node, tmpfs_dirtree_compare);
6329ca4fd6SJohannes Hofmann 
64f5f22af6SMatthew Dillon static int tmpfs_dirtree_compare_cookie(struct tmpfs_dirent *a,
65f5f22af6SMatthew Dillon 	struct tmpfs_dirent *b);
66f5f22af6SMatthew Dillon RB_GENERATE(tmpfs_dirtree_cookie, tmpfs_dirent,
67f5f22af6SMatthew Dillon 	rb_cookienode, tmpfs_dirtree_compare_cookie);
68f5f22af6SMatthew Dillon 
697a2de9a4SMatthew Dillon 
707a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
717a2de9a4SMatthew Dillon 
727a2de9a4SMatthew Dillon /*
737a2de9a4SMatthew Dillon  * Allocates a new node of type 'type' inside the 'tmp' mount point, with
747a2de9a4SMatthew Dillon  * its owner set to 'uid', its group to 'gid' and its mode set to 'mode',
757a2de9a4SMatthew Dillon  * using the credentials of the process 'p'.
767a2de9a4SMatthew Dillon  *
777a2de9a4SMatthew Dillon  * If the node type is set to 'VDIR', then the parent parameter must point
787a2de9a4SMatthew Dillon  * to the parent directory of the node being created.  It may only be NULL
797a2de9a4SMatthew Dillon  * while allocating the root node.
807a2de9a4SMatthew Dillon  *
817a2de9a4SMatthew Dillon  * If the node type is set to 'VBLK' or 'VCHR', then the rdev parameter
827a2de9a4SMatthew Dillon  * specifies the device the node represents.
837a2de9a4SMatthew Dillon  *
847a2de9a4SMatthew Dillon  * If the node type is set to 'VLNK', then the parameter target specifies
857a2de9a4SMatthew Dillon  * the file name of the target file for the symbolic link that is being
867a2de9a4SMatthew Dillon  * created.
877a2de9a4SMatthew Dillon  *
887a2de9a4SMatthew Dillon  * Note that new nodes are retrieved from the available list if it has
897a2de9a4SMatthew Dillon  * items or, if it is empty, from the node pool as long as there is enough
907a2de9a4SMatthew Dillon  * space to create them.
917a2de9a4SMatthew Dillon  *
927a2de9a4SMatthew Dillon  * Returns zero on success or an appropriate error code on failure.
937a2de9a4SMatthew Dillon  */
947a2de9a4SMatthew Dillon int
tmpfs_alloc_node(struct tmpfs_mount * tmp,enum vtype type,uid_t uid,gid_t gid,mode_t mode,char * target,int rmajor,int rminor,struct tmpfs_node ** node)957a2de9a4SMatthew Dillon tmpfs_alloc_node(struct tmpfs_mount *tmp, enum vtype type,
966e0c5aabSMatthew Dillon 		 uid_t uid, gid_t gid, mode_t mode,
976e0c5aabSMatthew Dillon 		 char *target, int rmajor, int rminor,
986e0c5aabSMatthew Dillon 		 struct tmpfs_node **node)
997a2de9a4SMatthew Dillon {
1007a2de9a4SMatthew Dillon 	struct tmpfs_node *nnode;
1017a2de9a4SMatthew Dillon 	struct timespec ts;
10291ffdfc5SSascha Wildner 	dev_t rdev;
1037a2de9a4SMatthew Dillon 
1047a2de9a4SMatthew Dillon 	KKASSERT(IFF(type == VLNK, target != NULL));
1057a2de9a4SMatthew Dillon 	KKASSERT(IFF(type == VBLK || type == VCHR, rmajor != VNOVAL));
1067a2de9a4SMatthew Dillon 
10752ce5c29STomohiro Kusumi 	if (tmp->tm_nodes_inuse >= tmp->tm_nodes_max)
1087a2de9a4SMatthew Dillon 		return (ENOSPC);
1097a2de9a4SMatthew Dillon 
110e9dbfea1SMatthew Dillon 	nnode = kmalloc_obj(sizeof(struct tmpfs_node), tmp->tm_node_zone,
111e9dbfea1SMatthew Dillon 			    M_WAITOK | M_ZERO | M_NULLOK);
112881dac8bSVenkatesh Srinivas 	if (nnode == NULL)
113881dac8bSVenkatesh Srinivas 		return (ENOSPC);
114e9dbfea1SMatthew Dillon 	tmpfs_node_init(nnode);
1157a2de9a4SMatthew Dillon 
1167a2de9a4SMatthew Dillon 	/* Generic initialization. */
1177a2de9a4SMatthew Dillon 	nnode->tn_type = type;
1187a2de9a4SMatthew Dillon 	vfs_timestamp(&ts);
1197a2de9a4SMatthew Dillon 	nnode->tn_ctime = nnode->tn_mtime = nnode->tn_atime
1207a2de9a4SMatthew Dillon 		= ts.tv_sec;
1217a2de9a4SMatthew Dillon 	nnode->tn_ctimensec = nnode->tn_mtimensec = nnode->tn_atimensec
1227a2de9a4SMatthew Dillon 		= ts.tv_nsec;
1237a2de9a4SMatthew Dillon 	nnode->tn_uid = uid;
1247a2de9a4SMatthew Dillon 	nnode->tn_gid = gid;
1257a2de9a4SMatthew Dillon 	nnode->tn_mode = mode;
126f7db522fSVenkatesh Srinivas 	nnode->tn_id = tmpfs_fetch_ino(tmp);
12712a5de0eSMatthew Dillon 	nnode->tn_advlock.init_done = 0;
1286e0c5aabSMatthew Dillon 	KKASSERT(nnode->tn_links == 0);
1297a2de9a4SMatthew Dillon 
1307a2de9a4SMatthew Dillon 	/* Type-specific initialization. */
1317a2de9a4SMatthew Dillon 	switch (nnode->tn_type) {
1327a2de9a4SMatthew Dillon 	case VBLK:
1337a2de9a4SMatthew Dillon 	case VCHR:
1347a2de9a4SMatthew Dillon 		rdev = makeudev(rmajor, rminor);
1357a2de9a4SMatthew Dillon 		if (rdev == NOUDEV) {
136e9dbfea1SMatthew Dillon 			tmpfs_node_uninit(nnode);
137e9dbfea1SMatthew Dillon 			kfree_obj(nnode, tmp->tm_node_zone);
1387a2de9a4SMatthew Dillon 			return(EINVAL);
1397a2de9a4SMatthew Dillon 		}
1407a2de9a4SMatthew Dillon 		nnode->tn_rdev = rdev;
1417a2de9a4SMatthew Dillon 		break;
1427a2de9a4SMatthew Dillon 
1437a2de9a4SMatthew Dillon 	case VDIR:
14429ca4fd6SJohannes Hofmann 		RB_INIT(&nnode->tn_dir.tn_dirtree);
145f5f22af6SMatthew Dillon 		RB_INIT(&nnode->tn_dir.tn_cookietree);
146a44ecf5cSMatthew Dillon 		nnode->tn_dir.tn_parent = NULL;
1477a2de9a4SMatthew Dillon 		nnode->tn_size = 0;
1487a2de9a4SMatthew Dillon 		break;
1497a2de9a4SMatthew Dillon 
1507a2de9a4SMatthew Dillon 	case VFIFO:
1517a2de9a4SMatthew Dillon 		/* FALLTHROUGH */
1527a2de9a4SMatthew Dillon 	case VSOCK:
1537a2de9a4SMatthew Dillon 		break;
1547a2de9a4SMatthew Dillon 
1557a2de9a4SMatthew Dillon 	case VLNK:
1569fc94b5fSMatthew Dillon 		nnode->tn_size = strlen(target);
157d00cd01cSVenkatesh Srinivas 		nnode->tn_link = kmalloc(nnode->tn_size + 1, tmp->tm_name_zone,
15842f6f6b1SVenkatesh Srinivas 					 M_WAITOK | M_NULLOK);
15942f6f6b1SVenkatesh Srinivas 		if (nnode->tn_link == NULL) {
160e9dbfea1SMatthew Dillon 			tmpfs_node_uninit(nnode);
161e9dbfea1SMatthew Dillon 			kfree_obj(nnode, tmp->tm_node_zone);
16242f6f6b1SVenkatesh Srinivas 			return (ENOSPC);
16342f6f6b1SVenkatesh Srinivas 		}
1647a2de9a4SMatthew Dillon 		bcopy(target, nnode->tn_link, nnode->tn_size);
1657a2de9a4SMatthew Dillon 		nnode->tn_link[nnode->tn_size] = '\0';
1667a2de9a4SMatthew Dillon 		break;
1677a2de9a4SMatthew Dillon 
1687a2de9a4SMatthew Dillon 	case VREG:
169a1b829f2SMatthew Dillon 		nnode->tn_reg.tn_aobj = swap_pager_alloc(NULL, 0,
170a1b829f2SMatthew Dillon 							 VM_PROT_DEFAULT, 0);
1717a2de9a4SMatthew Dillon 		nnode->tn_reg.tn_aobj_pages = 0;
1727a2de9a4SMatthew Dillon 		nnode->tn_size = 0;
17346b71cbeSMatthew Dillon 		vm_object_set_flag(nnode->tn_reg.tn_aobj, OBJ_NOPAGEIN);
1747a2de9a4SMatthew Dillon 		break;
1757a2de9a4SMatthew Dillon 
1767a2de9a4SMatthew Dillon 	default:
1777a2de9a4SMatthew Dillon 		panic("tmpfs_alloc_node: type %p %d", nnode, (int)nnode->tn_type);
1787a2de9a4SMatthew Dillon 	}
1797a2de9a4SMatthew Dillon 
1807a2de9a4SMatthew Dillon 	TMPFS_NODE_LOCK(nnode);
1817a2de9a4SMatthew Dillon 	TMPFS_LOCK(tmp);
1820786baf1SMatthew Dillon 	LIST_INSERT_HEAD(&tmp->tm_nodes_used, nnode, tn_entries);
1837a2de9a4SMatthew Dillon 	tmp->tm_nodes_inuse++;
1847a2de9a4SMatthew Dillon 	TMPFS_UNLOCK(tmp);
1857a2de9a4SMatthew Dillon 	TMPFS_NODE_UNLOCK(nnode);
1867a2de9a4SMatthew Dillon 
1877a2de9a4SMatthew Dillon 	*node = nnode;
1887a2de9a4SMatthew Dillon 	return 0;
1897a2de9a4SMatthew Dillon }
1907a2de9a4SMatthew Dillon 
1917a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
1927a2de9a4SMatthew Dillon 
1937a2de9a4SMatthew Dillon /*
1947a2de9a4SMatthew Dillon  * Destroys the node pointed to by node from the file system 'tmp'.
1957a2de9a4SMatthew Dillon  * If the node does not belong to the given mount point, the results are
1967a2de9a4SMatthew Dillon  * unpredicted.
1977a2de9a4SMatthew Dillon  *
1987a2de9a4SMatthew Dillon  * If the node references a directory; no entries are allowed because
1997a2de9a4SMatthew Dillon  * their removal could need a recursive algorithm, something forbidden in
2007a2de9a4SMatthew Dillon  * kernel space.  Furthermore, there is not need to provide such
2017a2de9a4SMatthew Dillon  * functionality (recursive removal) because the only primitives offered
2027a2de9a4SMatthew Dillon  * to the user are the removal of empty directories and the deletion of
2037a2de9a4SMatthew Dillon  * individual files.
2047a2de9a4SMatthew Dillon  *
2057a2de9a4SMatthew Dillon  * Note that nodes are not really deleted; in fact, when a node has been
2067a2de9a4SMatthew Dillon  * allocated, it cannot be deleted during the whole life of the file
2077a2de9a4SMatthew Dillon  * system.  Instead, they are moved to the available list and remain there
2087a2de9a4SMatthew Dillon  * until reused.
209c5552356STomohiro Kusumi  *
210c5552356STomohiro Kusumi  * A caller must have TMPFS_NODE_LOCK(node) and this function unlocks it.
2117a2de9a4SMatthew Dillon  */
2127a2de9a4SMatthew Dillon void
tmpfs_free_node(struct tmpfs_mount * tmp,struct tmpfs_node * node)2137a2de9a4SMatthew Dillon tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node)
2147a2de9a4SMatthew Dillon {
21529ffeb28SMatthew Dillon 	vm_pindex_t pages = 0;
2167a2de9a4SMatthew Dillon 
2177a2de9a4SMatthew Dillon #ifdef INVARIANTS
2187a2de9a4SMatthew Dillon 	TMPFS_ASSERT_ELOCKED(node);
2197a2de9a4SMatthew Dillon 	KKASSERT(node->tn_vnode == NULL);
2207a2de9a4SMatthew Dillon #endif
2217a2de9a4SMatthew Dillon 	TMPFS_LOCK(tmp);
2220786baf1SMatthew Dillon 	LIST_REMOVE(node, tn_entries);
2237a2de9a4SMatthew Dillon 	tmp->tm_nodes_inuse--;
2247a2de9a4SMatthew Dillon 	TMPFS_UNLOCK(tmp);
225c5552356STomohiro Kusumi 	TMPFS_NODE_UNLOCK(node);  /* Caller has this lock */
2267a2de9a4SMatthew Dillon 
2277a2de9a4SMatthew Dillon 	switch (node->tn_type) {
2287a2de9a4SMatthew Dillon 	case VNON:
2297a2de9a4SMatthew Dillon 		/* Do not do anything.  VNON is provided to let the
2307a2de9a4SMatthew Dillon 		 * allocation routine clean itself easily by avoiding
2317a2de9a4SMatthew Dillon 		 * duplicating code in it. */
2327a2de9a4SMatthew Dillon 		/* FALLTHROUGH */
2337a2de9a4SMatthew Dillon 	case VBLK:
2347a2de9a4SMatthew Dillon 		/* FALLTHROUGH */
2357a2de9a4SMatthew Dillon 	case VCHR:
2367a2de9a4SMatthew Dillon 		/* FALLTHROUGH */
2377a2de9a4SMatthew Dillon 		break;
2387a2de9a4SMatthew Dillon 	case VDIR:
2390786baf1SMatthew Dillon 		/*
2400786baf1SMatthew Dillon 		 * The parent link can be NULL if this is the root
2416e0c5aabSMatthew Dillon 		 * node or if it is a directory node that was rmdir'd.
2426e0c5aabSMatthew Dillon 		 *
2436e0c5aabSMatthew Dillon 		 * XXX what if node is a directory which still contains
2446e0c5aabSMatthew Dillon 		 * directory entries (e.g. due to a forced umount) ?
2450786baf1SMatthew Dillon 		 */
2467a2de9a4SMatthew Dillon 		node->tn_size = 0;
2476e0c5aabSMatthew Dillon 		KKASSERT(node->tn_dir.tn_parent == NULL);
2480786baf1SMatthew Dillon 
2490786baf1SMatthew Dillon 		/*
2500786baf1SMatthew Dillon 		 * If the root node is being destroyed don't leave a
2510786baf1SMatthew Dillon 		 * dangling pointer in tmpfs_mount.
2520786baf1SMatthew Dillon 		 */
2530786baf1SMatthew Dillon 		if (node == tmp->tm_root)
2540786baf1SMatthew Dillon 			tmp->tm_root = NULL;
2557a2de9a4SMatthew Dillon 		break;
2567a2de9a4SMatthew Dillon 	case VFIFO:
2577a2de9a4SMatthew Dillon 		/* FALLTHROUGH */
2587a2de9a4SMatthew Dillon 	case VSOCK:
2597a2de9a4SMatthew Dillon 		break;
2607a2de9a4SMatthew Dillon 
2617a2de9a4SMatthew Dillon 	case VLNK:
262d00cd01cSVenkatesh Srinivas 		kfree(node->tn_link, tmp->tm_name_zone);
2639fc94b5fSMatthew Dillon 		node->tn_link = NULL;
2642706b587SMatthew Dillon 		node->tn_size = 0;
2657a2de9a4SMatthew Dillon 		break;
2667a2de9a4SMatthew Dillon 
2677a2de9a4SMatthew Dillon 	case VREG:
2687a2de9a4SMatthew Dillon 		if (node->tn_reg.tn_aobj != NULL)
269f96f2f39SMatthew Dillon 			vm_object_deallocate(node->tn_reg.tn_aobj);
2707a2de9a4SMatthew Dillon 		node->tn_reg.tn_aobj = NULL;
2717a2de9a4SMatthew Dillon 		pages = node->tn_reg.tn_aobj_pages;
2727a2de9a4SMatthew Dillon 		break;
2737a2de9a4SMatthew Dillon 
2747a2de9a4SMatthew Dillon 	default:
2757a2de9a4SMatthew Dillon 		panic("tmpfs_free_node: type %p %d", node, (int)node->tn_type);
2767a2de9a4SMatthew Dillon 	}
2777a2de9a4SMatthew Dillon 
2780786baf1SMatthew Dillon 	/*
279e9dbfea1SMatthew Dillon 	 * Clean up fields as a safety before destroying the entry.
2800786baf1SMatthew Dillon 	 */
281e9dbfea1SMatthew Dillon 	tmpfs_node_uninit(node);
282e9dbfea1SMatthew Dillon 	kfree_obj(node, tmp->tm_node_zone);
2830786baf1SMatthew Dillon 	/* node is now invalid */
2847a2de9a4SMatthew Dillon 
285eae96dbbSMatthew Dillon 	if (pages)
286b37a7c00SMatthew Dillon 		atomic_add_long(&tmp->tm_pages_used, -(long)pages);
2877a2de9a4SMatthew Dillon }
2887a2de9a4SMatthew Dillon 
2897a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
2907a2de9a4SMatthew Dillon 
2917a2de9a4SMatthew Dillon /*
2927a2de9a4SMatthew Dillon  * Allocates a new directory entry for the node node with a name of name.
2937a2de9a4SMatthew Dillon  * The new directory entry is returned in *de.
2947a2de9a4SMatthew Dillon  *
2957a2de9a4SMatthew Dillon  * The link count of node is increased by one to reflect the new object
2967a2de9a4SMatthew Dillon  * referencing it.
2977a2de9a4SMatthew Dillon  *
2987a2de9a4SMatthew Dillon  * Returns zero on success or an appropriate error code on failure.
2997a2de9a4SMatthew Dillon  */
3007a2de9a4SMatthew Dillon int
tmpfs_alloc_dirent(struct tmpfs_mount * tmp,struct tmpfs_node * node,const char * name,uint16_t len,struct tmpfs_dirent ** de)3017a2de9a4SMatthew Dillon tmpfs_alloc_dirent(struct tmpfs_mount *tmp, struct tmpfs_node *node,
3027a2de9a4SMatthew Dillon 		   const char *name, uint16_t len, struct tmpfs_dirent **de)
3037a2de9a4SMatthew Dillon {
3047a2de9a4SMatthew Dillon 	struct tmpfs_dirent *nde;
3057a2de9a4SMatthew Dillon 
306e9dbfea1SMatthew Dillon 	nde = kmalloc_obj(sizeof(struct tmpfs_dirent),
307e9dbfea1SMatthew Dillon 			  tmp->tm_dirent_zone, M_WAITOK);
308d00cd01cSVenkatesh Srinivas 	nde->td_name = kmalloc(len + 1, tmp->tm_name_zone, M_WAITOK | M_NULLOK);
30942f6f6b1SVenkatesh Srinivas 	if (nde->td_name == NULL) {
310e9dbfea1SMatthew Dillon 		kfree_obj(nde, tmp->tm_dirent_zone);
31142f6f6b1SVenkatesh Srinivas 		*de = NULL;
31242f6f6b1SVenkatesh Srinivas 		return (ENOSPC);
31342f6f6b1SVenkatesh Srinivas 	}
3147a2de9a4SMatthew Dillon 	nde->td_namelen = len;
3157a2de9a4SMatthew Dillon 	bcopy(name, nde->td_name, len);
3167a2de9a4SMatthew Dillon 	nde->td_name[len] = '\0';
3177a2de9a4SMatthew Dillon 
3187a2de9a4SMatthew Dillon 	nde->td_node = node;
3197a2de9a4SMatthew Dillon 
3204d22d8eeSMatthew Dillon 	atomic_add_int(&node->tn_links, 1);
3217a2de9a4SMatthew Dillon 
3227a2de9a4SMatthew Dillon 	*de = nde;
3237a2de9a4SMatthew Dillon 
3247a2de9a4SMatthew Dillon 	return 0;
3257a2de9a4SMatthew Dillon }
3267a2de9a4SMatthew Dillon 
3277a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
3287a2de9a4SMatthew Dillon 
3297a2de9a4SMatthew Dillon /*
3307a2de9a4SMatthew Dillon  * Frees a directory entry.  It is the caller's responsibility to destroy
3317a2de9a4SMatthew Dillon  * the node referenced by it if needed.
3327a2de9a4SMatthew Dillon  *
3337a2de9a4SMatthew Dillon  * The link count of node is decreased by one to reflect the removal of an
3347a2de9a4SMatthew Dillon  * object that referenced it.  This only happens if 'node_exists' is true;
3357a2de9a4SMatthew Dillon  * otherwise the function will not access the node referred to by the
3367a2de9a4SMatthew Dillon  * directory entry, as it may already have been released from the outside.
3377a2de9a4SMatthew Dillon  */
3387a2de9a4SMatthew Dillon void
tmpfs_free_dirent(struct tmpfs_mount * tmp,struct tmpfs_dirent * de)3390786baf1SMatthew Dillon tmpfs_free_dirent(struct tmpfs_mount *tmp, struct tmpfs_dirent *de)
3407a2de9a4SMatthew Dillon {
3417a2de9a4SMatthew Dillon 	struct tmpfs_node *node;
3427a2de9a4SMatthew Dillon 
3437a2de9a4SMatthew Dillon 	node = de->td_node;
3447a2de9a4SMatthew Dillon 
3457a2de9a4SMatthew Dillon 	KKASSERT(node->tn_links > 0);
3464d22d8eeSMatthew Dillon 	atomic_add_int(&node->tn_links, -1);
3477a2de9a4SMatthew Dillon 
348d00cd01cSVenkatesh Srinivas 	kfree(de->td_name, tmp->tm_name_zone);
3490786baf1SMatthew Dillon 	de->td_namelen = 0;
3509fc94b5fSMatthew Dillon 	de->td_name = NULL;
3510786baf1SMatthew Dillon 	de->td_node = NULL;
352e9dbfea1SMatthew Dillon 	kfree_obj(de, tmp->tm_dirent_zone);
3537a2de9a4SMatthew Dillon }
3547a2de9a4SMatthew Dillon 
3557a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
3567a2de9a4SMatthew Dillon 
3577a2de9a4SMatthew Dillon /*
3587a2de9a4SMatthew Dillon  * Allocates a new vnode for the node node or returns a new reference to
3597a2de9a4SMatthew Dillon  * an existing one if the node had already a vnode referencing it.  The
3607a2de9a4SMatthew Dillon  * resulting locked vnode is returned in *vpp.
3617a2de9a4SMatthew Dillon  *
3627a2de9a4SMatthew Dillon  * Returns zero on success or an appropriate error code on failure.
363d89a0e31SMatthew Dillon  *
364d89a0e31SMatthew Dillon  * The caller must ensure that node cannot go away (usually by holding
365d89a0e31SMatthew Dillon  * the related directory entry).
366d89a0e31SMatthew Dillon  *
367d89a0e31SMatthew Dillon  * If dnode is non-NULL this routine avoids deadlocking against it but
368d89a0e31SMatthew Dillon  * can return EAGAIN.  Caller must try again.  The dnode lock will cycle
369d89a0e31SMatthew Dillon  * in this case, it remains locked on return in all cases.  dnode must
370d89a0e31SMatthew Dillon  * be shared-locked.
3717a2de9a4SMatthew Dillon  */
3727a2de9a4SMatthew Dillon int
tmpfs_alloc_vp(struct mount * mp,struct tmpfs_node * dnode,struct tmpfs_node * node,int lkflag,struct vnode ** vpp)373d89a0e31SMatthew Dillon tmpfs_alloc_vp(struct mount *mp,
374d89a0e31SMatthew Dillon 	       struct tmpfs_node *dnode, struct tmpfs_node *node, int lkflag,
3757a2de9a4SMatthew Dillon 	       struct vnode **vpp)
3767a2de9a4SMatthew Dillon {
3777a2de9a4SMatthew Dillon 	int error = 0;
3787a2de9a4SMatthew Dillon 	struct vnode *vp;
3797a2de9a4SMatthew Dillon 
3807a2de9a4SMatthew Dillon loop:
381a44ecf5cSMatthew Dillon 	vp = NULL;
382a44ecf5cSMatthew Dillon 	if (node->tn_vnode == NULL) {
383a44ecf5cSMatthew Dillon 		error = getnewvnode(VT_TMPFS, mp, &vp,
384a44ecf5cSMatthew Dillon 				    VLKTIMEOUT, LK_CANRECURSE);
385a44ecf5cSMatthew Dillon 		if (error)
386a44ecf5cSMatthew Dillon 			goto out;
387a44ecf5cSMatthew Dillon 	}
388a44ecf5cSMatthew Dillon 
3892706b587SMatthew Dillon 	/*
3902706b587SMatthew Dillon 	 * Interlocked extraction from node.  This can race many things.
3912706b587SMatthew Dillon 	 * We have to get a soft reference on the vnode while we hold
3922706b587SMatthew Dillon 	 * the node locked, then acquire it properly and check for races.
3932706b587SMatthew Dillon 	 */
3947a2de9a4SMatthew Dillon 	TMPFS_NODE_LOCK(node);
395a44ecf5cSMatthew Dillon 	if (node->tn_vnode) {
396a44ecf5cSMatthew Dillon 		if (vp) {
397a44ecf5cSMatthew Dillon 			vp->v_type = VBAD;
398a44ecf5cSMatthew Dillon 			vx_put(vp);
399a44ecf5cSMatthew Dillon 		}
400a44ecf5cSMatthew Dillon 		vp = node->tn_vnode;
401a44ecf5cSMatthew Dillon 
4027a2de9a4SMatthew Dillon 		KKASSERT((node->tn_vpstate & TMPFS_VNODE_DOOMED) == 0);
403ee173d09SSascha Wildner 		vhold(vp);
4047a2de9a4SMatthew Dillon 		TMPFS_NODE_UNLOCK(node);
4057a2de9a4SMatthew Dillon 
406d89a0e31SMatthew Dillon 		if (dnode) {
407d89a0e31SMatthew Dillon 			/*
408d89a0e31SMatthew Dillon 			 * Special-case handling to avoid deadlocking against
40910981c75SMatthew Dillon 			 * dnode.  This case has been validated and occurs
41013cc9baaSMatthew Dillon 			 * every so often during synth builds and in other
41113cc9baaSMatthew Dillon 			 * situations.
412d89a0e31SMatthew Dillon 			 */
413d89a0e31SMatthew Dillon 			if (vget(vp, (lkflag & ~LK_RETRY) |
414d89a0e31SMatthew Dillon 				     LK_NOWAIT |
41513cc9baaSMatthew Dillon 				     LK_EXCLUSIVE) != 0)
41613cc9baaSMatthew Dillon 			{
417d89a0e31SMatthew Dillon 				TMPFS_NODE_UNLOCK(dnode);
418d89a0e31SMatthew Dillon 				if (vget(vp, (lkflag & ~LK_RETRY) |
419d89a0e31SMatthew Dillon 					     LK_SLEEPFAIL |
420d89a0e31SMatthew Dillon 					     LK_EXCLUSIVE) == 0) {
42179721fbfSMatthew Dillon 					vput(vp);
422d89a0e31SMatthew Dillon 				}
423d89a0e31SMatthew Dillon 				vdrop(vp);
424d89a0e31SMatthew Dillon 				TMPFS_NODE_LOCK_SH(dnode);
42510981c75SMatthew Dillon 
426d89a0e31SMatthew Dillon 				return EAGAIN;
427d89a0e31SMatthew Dillon 			}
428d89a0e31SMatthew Dillon 		} else {
429d89a0e31SMatthew Dillon 			/*
430d89a0e31SMatthew Dillon 			 * Normal path
431d89a0e31SMatthew Dillon 			 */
4322706b587SMatthew Dillon 			if (vget(vp, lkflag | LK_EXCLUSIVE) != 0) {
4332706b587SMatthew Dillon 				vdrop(vp);
4347a2de9a4SMatthew Dillon 				goto loop;
4357a2de9a4SMatthew Dillon 			}
436d89a0e31SMatthew Dillon 		}
4372706b587SMatthew Dillon 		if (node->tn_vnode != vp) {
4382706b587SMatthew Dillon 			vput(vp);
4392706b587SMatthew Dillon 			vdrop(vp);
4402706b587SMatthew Dillon 			goto loop;
4412706b587SMatthew Dillon 		}
4422706b587SMatthew Dillon 		vdrop(vp);
4437a2de9a4SMatthew Dillon 		goto out;
4447a2de9a4SMatthew Dillon 	}
445a44ecf5cSMatthew Dillon 
446a44ecf5cSMatthew Dillon 	/*
447a44ecf5cSMatthew Dillon 	 * We need to assign node->tn_vnode.  If vp is NULL, loop up to
448a44ecf5cSMatthew Dillon 	 * allocate the vp.  This can happen due to SMP races.
449a44ecf5cSMatthew Dillon 	 */
4504d22d8eeSMatthew Dillon 	if (vp == NULL) {
45177206c5dSMatthew Dillon 		TMPFS_NODE_UNLOCK(node);
452a44ecf5cSMatthew Dillon 		goto loop;
4534d22d8eeSMatthew Dillon 	}
4547a2de9a4SMatthew Dillon 
4550786baf1SMatthew Dillon 	/*
4560786baf1SMatthew Dillon 	 * This should never happen.
4570786baf1SMatthew Dillon 	 */
4580786baf1SMatthew Dillon 	if (node->tn_vpstate & TMPFS_VNODE_DOOMED) {
4597a2de9a4SMatthew Dillon 		TMPFS_NODE_UNLOCK(node);
460a44ecf5cSMatthew Dillon 		vp->v_type = VBAD;
461a44ecf5cSMatthew Dillon 		vx_put(vp);
4627a2de9a4SMatthew Dillon 		error = ENOENT;
4637a2de9a4SMatthew Dillon 		goto out;
4647a2de9a4SMatthew Dillon 	}
4657a2de9a4SMatthew Dillon 
4662706b587SMatthew Dillon 	KKASSERT(node->tn_vnode == NULL);
4672706b587SMatthew Dillon 	KKASSERT(vp != NULL);
4687a2de9a4SMatthew Dillon 	vp->v_data = node;
4697a2de9a4SMatthew Dillon 	vp->v_type = node->tn_type;
4707a2de9a4SMatthew Dillon 
4717a2de9a4SMatthew Dillon 	/* Type-specific initialization. */
4727a2de9a4SMatthew Dillon 	switch (node->tn_type) {
4737a2de9a4SMatthew Dillon 	case VBLK:
4747a2de9a4SMatthew Dillon 		/* FALLTHROUGH */
4757a2de9a4SMatthew Dillon 	case VCHR:
4767a2de9a4SMatthew Dillon 		/* FALLTHROUGH */
4777a2de9a4SMatthew Dillon 	case VSOCK:
4787a2de9a4SMatthew Dillon 		break;
4797a2de9a4SMatthew Dillon 	case VREG:
48048db4e20SMatthew Dillon 		/*
48148db4e20SMatthew Dillon 		 * VMIO is mandatory.  Tmpfs also supports KVABIO
48248db4e20SMatthew Dillon 		 * for its tmpfs_strategy().
48348db4e20SMatthew Dillon 		 */
48448db4e20SMatthew Dillon 		vsetflags(vp, VKVABIO);
4859cd86db5SMatthew Dillon 		vinitvmio(vp, node->tn_size, node->tn_blksize, -1);
4867a2de9a4SMatthew Dillon 		break;
4877a2de9a4SMatthew Dillon 	case VLNK:
4887a2de9a4SMatthew Dillon 		break;
4897a2de9a4SMatthew Dillon 	case VFIFO:
4907a2de9a4SMatthew Dillon 		vp->v_ops = &mp->mnt_vn_fifo_ops;
4917a2de9a4SMatthew Dillon 		break;
4927a2de9a4SMatthew Dillon 	case VDIR:
4937a2de9a4SMatthew Dillon 		break;
4947a2de9a4SMatthew Dillon 
4957a2de9a4SMatthew Dillon 	default:
4967a2de9a4SMatthew Dillon 		panic("tmpfs_alloc_vp: type %p %d", node, (int)node->tn_type);
4977a2de9a4SMatthew Dillon 	}
4987a2de9a4SMatthew Dillon 
4997a2de9a4SMatthew Dillon 	node->tn_vnode = vp;
5007a2de9a4SMatthew Dillon 	TMPFS_NODE_UNLOCK(node);
5017a2de9a4SMatthew Dillon 
502fc36a10bSMatthew Dillon 	vx_downgrade(vp);
5037a2de9a4SMatthew Dillon out:
5047a2de9a4SMatthew Dillon 	*vpp = vp;
5057a2de9a4SMatthew Dillon 	KKASSERT(IFF(error == 0, *vpp != NULL && vn_islocked(*vpp)));
5067a2de9a4SMatthew Dillon 
5077a2de9a4SMatthew Dillon 	return error;
5087a2de9a4SMatthew Dillon }
5097a2de9a4SMatthew Dillon 
5107a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
5117a2de9a4SMatthew Dillon 
5127a2de9a4SMatthew Dillon /*
5137a2de9a4SMatthew Dillon  * Allocates a new file of type 'type' and adds it to the parent directory
5147a2de9a4SMatthew Dillon  * 'dvp'; this addition is done using the component name given in 'cnp'.
5157a2de9a4SMatthew Dillon  * The ownership of the new file is automatically assigned based on the
5167a2de9a4SMatthew Dillon  * credentials of the caller (through 'cnp'), the group is set based on
5177a2de9a4SMatthew Dillon  * the parent directory and the mode is determined from the 'vap' argument.
5187a2de9a4SMatthew Dillon  * If successful, *vpp holds a vnode to the newly created file and zero
5197a2de9a4SMatthew Dillon  * is returned.  Otherwise *vpp is NULL and the function returns an
5207a2de9a4SMatthew Dillon  * appropriate error code.
5217a2de9a4SMatthew Dillon  */
5227a2de9a4SMatthew Dillon int
tmpfs_alloc_file(struct vnode * dvp,struct vnode ** vpp,struct vattr * vap,struct namecache * ncp,struct ucred * cred,char * target)5237a2de9a4SMatthew Dillon tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
5247a2de9a4SMatthew Dillon 		 struct namecache *ncp, struct ucred *cred, char *target)
5257a2de9a4SMatthew Dillon {
5267a2de9a4SMatthew Dillon 	int error;
5277a2de9a4SMatthew Dillon 	struct tmpfs_dirent *de;
5287a2de9a4SMatthew Dillon 	struct tmpfs_mount *tmp;
5297a2de9a4SMatthew Dillon 	struct tmpfs_node *dnode;
5307a2de9a4SMatthew Dillon 	struct tmpfs_node *node;
5317a2de9a4SMatthew Dillon 
5327a2de9a4SMatthew Dillon 	tmp = VFS_TO_TMPFS(dvp->v_mount);
5337a2de9a4SMatthew Dillon 	dnode = VP_TO_TMPFS_DIR(dvp);
5347a2de9a4SMatthew Dillon 	*vpp = NULL;
5357a2de9a4SMatthew Dillon 
536307bf766SMatthew Dillon 	TMPFS_NODE_LOCK(dnode);
537307bf766SMatthew Dillon 
5386e0c5aabSMatthew Dillon 	/*
5396e0c5aabSMatthew Dillon 	 * If the directory was removed but a process was CD'd into it,
5406e0c5aabSMatthew Dillon 	 * we do not allow any more file/dir creation within it.  Otherwise
5416e0c5aabSMatthew Dillon 	 * we will lose track of it.
5426e0c5aabSMatthew Dillon 	 */
5436e0c5aabSMatthew Dillon 	KKASSERT(dnode->tn_type == VDIR);
544307bf766SMatthew Dillon 	if (dnode != tmp->tm_root && dnode->tn_dir.tn_parent == NULL) {
545307bf766SMatthew Dillon 		TMPFS_NODE_UNLOCK(dnode);
5466e0c5aabSMatthew Dillon 		return ENOENT;
547307bf766SMatthew Dillon 	}
5487a2de9a4SMatthew Dillon 
5496e0c5aabSMatthew Dillon 	/*
5506e0c5aabSMatthew Dillon 	 * Make sure the link count does not overflow.
5516e0c5aabSMatthew Dillon 	 */
552307bf766SMatthew Dillon 	if (vap->va_type == VDIR && dnode->tn_links >= LINK_MAX) {
553307bf766SMatthew Dillon 		TMPFS_NODE_UNLOCK(dnode);
5546e0c5aabSMatthew Dillon 		return EMLINK;
555307bf766SMatthew Dillon 	}
5567a2de9a4SMatthew Dillon 
5577a2de9a4SMatthew Dillon 	/* Allocate a node that represents the new file. */
5587a2de9a4SMatthew Dillon 	error = tmpfs_alloc_node(tmp, vap->va_type, cred->cr_uid,
5596e0c5aabSMatthew Dillon 				 dnode->tn_gid, vap->va_mode, target,
5606e0c5aabSMatthew Dillon 				 vap->va_rmajor, vap->va_rminor, &node);
561307bf766SMatthew Dillon 	if (error != 0) {
562307bf766SMatthew Dillon 		TMPFS_NODE_UNLOCK(dnode);
5637a2de9a4SMatthew Dillon 		return error;
564307bf766SMatthew Dillon 	}
5650786baf1SMatthew Dillon 	TMPFS_NODE_LOCK(node);
5667a2de9a4SMatthew Dillon 
5677a2de9a4SMatthew Dillon 	/* Allocate a directory entry that points to the new file. */
5680786baf1SMatthew Dillon 	error = tmpfs_alloc_dirent(tmp, node, ncp->nc_name, ncp->nc_nlen, &de);
5697a2de9a4SMatthew Dillon 	if (error != 0) {
570307bf766SMatthew Dillon 		TMPFS_NODE_UNLOCK(dnode);
5717a2de9a4SMatthew Dillon 		tmpfs_free_node(tmp, node);
5720786baf1SMatthew Dillon 		/* eats node lock */
5737a2de9a4SMatthew Dillon 		return error;
5747a2de9a4SMatthew Dillon 	}
5757a2de9a4SMatthew Dillon 
5767a2de9a4SMatthew Dillon 	/* Allocate a vnode for the new file. */
577d89a0e31SMatthew Dillon 	error = tmpfs_alloc_vp(dvp->v_mount, NULL, node, LK_EXCLUSIVE, vpp);
5787a2de9a4SMatthew Dillon 	if (error != 0) {
579307bf766SMatthew Dillon 		TMPFS_NODE_UNLOCK(dnode);
5800786baf1SMatthew Dillon 		tmpfs_free_dirent(tmp, de);
5817a2de9a4SMatthew Dillon 		tmpfs_free_node(tmp, node);
5820786baf1SMatthew Dillon 		/* eats node lock */
5837a2de9a4SMatthew Dillon 		return error;
5847a2de9a4SMatthew Dillon 	}
5857a2de9a4SMatthew Dillon 
5866e0c5aabSMatthew Dillon 	/*
5876e0c5aabSMatthew Dillon 	 * Now that all required items are allocated, we can proceed to
5887a2de9a4SMatthew Dillon 	 * insert the new node into the directory, an operation that
5896e0c5aabSMatthew Dillon 	 * cannot fail.
5906e0c5aabSMatthew Dillon 	 */
591307bf766SMatthew Dillon 	tmpfs_dir_attach_locked(dnode, de);
592307bf766SMatthew Dillon 	TMPFS_NODE_UNLOCK(dnode);
5930786baf1SMatthew Dillon 	TMPFS_NODE_UNLOCK(node);
5947a2de9a4SMatthew Dillon 
5957a2de9a4SMatthew Dillon 	return error;
5967a2de9a4SMatthew Dillon }
5977a2de9a4SMatthew Dillon 
5987a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
5997a2de9a4SMatthew Dillon 
6007a2de9a4SMatthew Dillon /*
6019855a822STomohiro Kusumi  * Attaches the directory entry de to the directory represented by dnode.
6027a2de9a4SMatthew Dillon  * Note that this does not change the link count of the node pointed by
6037a2de9a4SMatthew Dillon  * the directory entry, as this is done by tmpfs_alloc_dirent.
604307bf766SMatthew Dillon  *
605307bf766SMatthew Dillon  * dnode must be locked.
6067a2de9a4SMatthew Dillon  */
6077a2de9a4SMatthew Dillon void
tmpfs_dir_attach_locked(struct tmpfs_node * dnode,struct tmpfs_dirent * de)608307bf766SMatthew Dillon tmpfs_dir_attach_locked(struct tmpfs_node *dnode, struct tmpfs_dirent *de)
6097a2de9a4SMatthew Dillon {
6106e0c5aabSMatthew Dillon 	struct tmpfs_node *node = de->td_node;
611fd67ed24SMatthew Dillon 	struct tmpfs_dirent *de2;
6127a2de9a4SMatthew Dillon 
6136e0c5aabSMatthew Dillon 	if (node && node->tn_type == VDIR) {
6146e0c5aabSMatthew Dillon 		TMPFS_NODE_LOCK(node);
6154d22d8eeSMatthew Dillon 		atomic_add_int(&node->tn_links, 1);
6166e0c5aabSMatthew Dillon 		node->tn_status |= TMPFS_NODE_CHANGED;
6176e0c5aabSMatthew Dillon 		node->tn_dir.tn_parent = dnode;
6184d22d8eeSMatthew Dillon 		atomic_add_int(&dnode->tn_links, 1);
6196e0c5aabSMatthew Dillon 		TMPFS_NODE_UNLOCK(node);
6206e0c5aabSMatthew Dillon 	}
621fd67ed24SMatthew Dillon 	de2 = RB_INSERT(tmpfs_dirtree, &dnode->tn_dir.tn_dirtree, de);
622fd67ed24SMatthew Dillon 	KASSERT(de2 == NULL,
623fd67ed24SMatthew Dillon 		("tmpfs_dir_attach_lockedA: duplicate insertion of %p, has %p\n",
624fd67ed24SMatthew Dillon 		de, de2));
625fd67ed24SMatthew Dillon 	de2 = RB_INSERT(tmpfs_dirtree_cookie, &dnode->tn_dir.tn_cookietree, de);
626fd67ed24SMatthew Dillon 	KASSERT(de2 == NULL,
627fd67ed24SMatthew Dillon 		("tmpfs_dir_attach_lockedB: duplicate insertion of %p, has %p\n",
628fd67ed24SMatthew Dillon 		de, de2));
6297a2de9a4SMatthew Dillon 	dnode->tn_size += sizeof(struct tmpfs_dirent);
63022d3b394SMatthew Dillon 	dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED |
6317a2de9a4SMatthew Dillon 			    TMPFS_NODE_MODIFIED;
6327a2de9a4SMatthew Dillon }
6337a2de9a4SMatthew Dillon 
6347a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
6357a2de9a4SMatthew Dillon 
6367a2de9a4SMatthew Dillon /*
6379855a822STomohiro Kusumi  * Detaches the directory entry de from the directory represented by dnode.
6387a2de9a4SMatthew Dillon  * Note that this does not change the link count of the node pointed by
6397a2de9a4SMatthew Dillon  * the directory entry, as this is done by tmpfs_free_dirent.
640307bf766SMatthew Dillon  *
641307bf766SMatthew Dillon  * dnode must be locked.
6427a2de9a4SMatthew Dillon  */
6437a2de9a4SMatthew Dillon void
tmpfs_dir_detach_locked(struct tmpfs_node * dnode,struct tmpfs_dirent * de)644307bf766SMatthew Dillon tmpfs_dir_detach_locked(struct tmpfs_node *dnode, struct tmpfs_dirent *de)
6457a2de9a4SMatthew Dillon {
6466e0c5aabSMatthew Dillon 	struct tmpfs_node *node = de->td_node;
6476e0c5aabSMatthew Dillon 
64829ca4fd6SJohannes Hofmann 	RB_REMOVE(tmpfs_dirtree, &dnode->tn_dir.tn_dirtree, de);
649f5f22af6SMatthew Dillon 	RB_REMOVE(tmpfs_dirtree_cookie, &dnode->tn_dir.tn_cookietree, de);
6507a2de9a4SMatthew Dillon 	dnode->tn_size -= sizeof(struct tmpfs_dirent);
65122d3b394SMatthew Dillon 	dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED |
6527a2de9a4SMatthew Dillon 			    TMPFS_NODE_MODIFIED;
6536e0c5aabSMatthew Dillon 
6546e0c5aabSMatthew Dillon 	/*
6556e0c5aabSMatthew Dillon 	 * Clean out the tn_parent pointer immediately when removing a
6566e0c5aabSMatthew Dillon 	 * directory.
6576e0c5aabSMatthew Dillon 	 *
6586e0c5aabSMatthew Dillon 	 * Removal of the parent linkage also cleans out the extra tn_links
6596e0c5aabSMatthew Dillon 	 * count we had on both node and dnode.
6606e0c5aabSMatthew Dillon 	 *
6616e0c5aabSMatthew Dillon 	 * node can be NULL (typ during a forced umount), in which case
6626e0c5aabSMatthew Dillon 	 * the mount code is dealing with the linkages from a linked list
6636e0c5aabSMatthew Dillon 	 * scan.
6646e0c5aabSMatthew Dillon 	 */
6656e0c5aabSMatthew Dillon 	if (node && node->tn_type == VDIR && node->tn_dir.tn_parent) {
6666e0c5aabSMatthew Dillon 		TMPFS_NODE_LOCK(node);
6676e0c5aabSMatthew Dillon 		KKASSERT(node->tn_dir.tn_parent == dnode);
6684d22d8eeSMatthew Dillon 		atomic_add_int(&dnode->tn_links, -1);
6694d22d8eeSMatthew Dillon 		atomic_add_int(&node->tn_links, -1);
6706e0c5aabSMatthew Dillon 		node->tn_dir.tn_parent = NULL;
6716e0c5aabSMatthew Dillon 		TMPFS_NODE_UNLOCK(node);
6726e0c5aabSMatthew Dillon 	}
6737a2de9a4SMatthew Dillon }
6747a2de9a4SMatthew Dillon 
6757a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
6767a2de9a4SMatthew Dillon 
6777a2de9a4SMatthew Dillon /*
6787a2de9a4SMatthew Dillon  * Looks for a directory entry in the directory represented by node.
6797a2de9a4SMatthew Dillon  * 'ncp' describes the name of the entry to look for.  Note that the .
6807a2de9a4SMatthew Dillon  * and .. components are not allowed as they do not physically exist
6817a2de9a4SMatthew Dillon  * within directories.
6827a2de9a4SMatthew Dillon  *
6837a2de9a4SMatthew Dillon  * Returns a pointer to the entry when found, otherwise NULL.
684ff837cd5SMatthew Dillon  *
685ff837cd5SMatthew Dillon  * Caller must hold the node locked (shared ok)
6867a2de9a4SMatthew Dillon  */
6877a2de9a4SMatthew Dillon struct tmpfs_dirent *
tmpfs_dir_lookup(struct tmpfs_node * node,struct tmpfs_node * f,struct namecache * ncp)6887a2de9a4SMatthew Dillon tmpfs_dir_lookup(struct tmpfs_node *node, struct tmpfs_node *f,
6897a2de9a4SMatthew Dillon 		 struct namecache *ncp)
6907a2de9a4SMatthew Dillon {
6917a2de9a4SMatthew Dillon 	struct tmpfs_dirent *de;
6927a2de9a4SMatthew Dillon 	int len = ncp->nc_nlen;
69329ca4fd6SJohannes Hofmann 	struct tmpfs_dirent wanted;
69429ca4fd6SJohannes Hofmann 
69529ca4fd6SJohannes Hofmann 	wanted.td_namelen = len;
69629ca4fd6SJohannes Hofmann 	wanted.td_name = ncp->nc_name;
6977a2de9a4SMatthew Dillon 
6987a2de9a4SMatthew Dillon 	TMPFS_VALIDATE_DIR(node);
6997a2de9a4SMatthew Dillon 
70029ca4fd6SJohannes Hofmann 	de = RB_FIND(tmpfs_dirtree, &node->tn_dir.tn_dirtree, &wanted);
70129ca4fd6SJohannes Hofmann 
702df5bf90cSMatthew Dillon 	KASSERT((f == NULL || de == NULL || f == de->td_node),
703df5bf90cSMatthew Dillon 		("tmpfs_dir_lookup: Incorrect node %p %p %p",
704df5bf90cSMatthew Dillon 		 f, de, (de ? de->td_node : NULL)));
7057a2de9a4SMatthew Dillon 
7069fc94b5fSMatthew Dillon 	return de;
7077a2de9a4SMatthew Dillon }
7087a2de9a4SMatthew Dillon 
7097a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
7107a2de9a4SMatthew Dillon 
7117a2de9a4SMatthew Dillon /*
7127a2de9a4SMatthew Dillon  * Helper function for tmpfs_readdir.  Creates a '.' entry for the given
7137a2de9a4SMatthew Dillon  * directory and returns it in the uio space.  The function returns 0
7147a2de9a4SMatthew Dillon  * on success, -1 if there was not enough space in the uio structure to
7157a2de9a4SMatthew Dillon  * hold the directory entry or an appropriate error code if another
7167a2de9a4SMatthew Dillon  * error happens.
7177a2de9a4SMatthew Dillon  */
7187a2de9a4SMatthew Dillon int
tmpfs_dir_getdotdent(struct tmpfs_node * node,struct uio * uio)7197a2de9a4SMatthew Dillon tmpfs_dir_getdotdent(struct tmpfs_node *node, struct uio *uio)
7207a2de9a4SMatthew Dillon {
7217a2de9a4SMatthew Dillon 	int error;
7227a2de9a4SMatthew Dillon 
7237a2de9a4SMatthew Dillon 	TMPFS_VALIDATE_DIR(node);
7247a2de9a4SMatthew Dillon 	KKASSERT(uio->uio_offset == TMPFS_DIRCOOKIE_DOT);
7257a2de9a4SMatthew Dillon 
726e4f20828STomohiro Kusumi 	if (vop_write_dirent(&error, uio, node->tn_id, DT_DIR, 1, "."))
727e4f20828STomohiro Kusumi 		return -1;
7287a2de9a4SMatthew Dillon 	if (error == 0)
7297a2de9a4SMatthew Dillon 		uio->uio_offset = TMPFS_DIRCOOKIE_DOTDOT;
7307a2de9a4SMatthew Dillon 	return error;
7317a2de9a4SMatthew Dillon }
7327a2de9a4SMatthew Dillon 
7337a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
7347a2de9a4SMatthew Dillon 
7357a2de9a4SMatthew Dillon /*
7367a2de9a4SMatthew Dillon  * Helper function for tmpfs_readdir.  Creates a '..' entry for the given
7377a2de9a4SMatthew Dillon  * directory and returns it in the uio space.  The function returns 0
7387a2de9a4SMatthew Dillon  * on success, -1 if there was not enough space in the uio structure to
7397a2de9a4SMatthew Dillon  * hold the directory entry or an appropriate error code if another
7407a2de9a4SMatthew Dillon  * error happens.
7417a2de9a4SMatthew Dillon  */
7427a2de9a4SMatthew Dillon int
tmpfs_dir_getdotdotdent(struct tmpfs_mount * tmp,struct tmpfs_node * node,struct uio * uio)74322d3b394SMatthew Dillon tmpfs_dir_getdotdotdent(struct tmpfs_mount *tmp, struct tmpfs_node *node,
74422d3b394SMatthew Dillon 			struct uio *uio)
7457a2de9a4SMatthew Dillon {
7467a2de9a4SMatthew Dillon 	int error;
747e4f20828STomohiro Kusumi 	ino_t d_ino;
7487a2de9a4SMatthew Dillon 
7497a2de9a4SMatthew Dillon 	TMPFS_VALIDATE_DIR(node);
7507a2de9a4SMatthew Dillon 	KKASSERT(uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT);
7517a2de9a4SMatthew Dillon 
75222d3b394SMatthew Dillon 	if (node->tn_dir.tn_parent) {
753b08327b7SMatthew Dillon 		TMPFS_NODE_LOCK(node);
754b08327b7SMatthew Dillon 		if (node->tn_dir.tn_parent)
755e4f20828STomohiro Kusumi 			d_ino = node->tn_dir.tn_parent->tn_id;
756b08327b7SMatthew Dillon 		else
757e4f20828STomohiro Kusumi 			d_ino = tmp->tm_root->tn_id;
758b08327b7SMatthew Dillon 		TMPFS_NODE_UNLOCK(node);
75922d3b394SMatthew Dillon 	} else {
760e4f20828STomohiro Kusumi 		d_ino = tmp->tm_root->tn_id;
76122d3b394SMatthew Dillon 	}
7627a2de9a4SMatthew Dillon 
763e4f20828STomohiro Kusumi 	if (vop_write_dirent(&error, uio, d_ino, DT_DIR, 2, ".."))
764e4f20828STomohiro Kusumi 		return -1;
7657a2de9a4SMatthew Dillon 	if (error == 0) {
7667a2de9a4SMatthew Dillon 		struct tmpfs_dirent *de;
767e4f20828STomohiro Kusumi 		de = RB_MIN(tmpfs_dirtree_cookie, &node->tn_dir.tn_cookietree);
7687a2de9a4SMatthew Dillon 		if (de == NULL)
7697a2de9a4SMatthew Dillon 			uio->uio_offset = TMPFS_DIRCOOKIE_EOF;
7707a2de9a4SMatthew Dillon 		else
7717a2de9a4SMatthew Dillon 			uio->uio_offset = tmpfs_dircookie(de);
7727a2de9a4SMatthew Dillon 	}
7737a2de9a4SMatthew Dillon 	return error;
7747a2de9a4SMatthew Dillon }
7757a2de9a4SMatthew Dillon 
7767a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
7777a2de9a4SMatthew Dillon 
7787a2de9a4SMatthew Dillon /*
7797a2de9a4SMatthew Dillon  * Lookup a directory entry by its associated cookie.
780f5f22af6SMatthew Dillon  *
781f5f22af6SMatthew Dillon  * Must be called with the directory node locked (shared ok)
7827a2de9a4SMatthew Dillon  */
783e91c5b26SMatthew Dillon #if 0
784e91c5b26SMatthew Dillon 
785f5f22af6SMatthew Dillon struct lubycookie_info {
786f5f22af6SMatthew Dillon 	off_t	cookie;
787f5f22af6SMatthew Dillon 	struct tmpfs_dirent *de;
788f5f22af6SMatthew Dillon };
789f5f22af6SMatthew Dillon 
790f5f22af6SMatthew Dillon static int
791f5f22af6SMatthew Dillon lubycookie_cmp(struct tmpfs_dirent *de, void *arg)
792f5f22af6SMatthew Dillon {
793f5f22af6SMatthew Dillon 	struct lubycookie_info *info = arg;
794f5f22af6SMatthew Dillon 	off_t cookie = tmpfs_dircookie(de);
795f5f22af6SMatthew Dillon 
796f5f22af6SMatthew Dillon 	if (cookie < info->cookie)
797f5f22af6SMatthew Dillon 		return(-1);
798f5f22af6SMatthew Dillon 	if (cookie > info->cookie)
799f5f22af6SMatthew Dillon 		return(1);
800f5f22af6SMatthew Dillon 	return(0);
801f5f22af6SMatthew Dillon }
802f5f22af6SMatthew Dillon 
803f5f22af6SMatthew Dillon static int
804f5f22af6SMatthew Dillon lubycookie_callback(struct tmpfs_dirent *de, void *arg)
805f5f22af6SMatthew Dillon {
806f5f22af6SMatthew Dillon 	struct lubycookie_info *info = arg;
807f5f22af6SMatthew Dillon 
808f5f22af6SMatthew Dillon 	if (tmpfs_dircookie(de) == info->cookie) {
809f5f22af6SMatthew Dillon 		info->de = de;
810f5f22af6SMatthew Dillon 		return(-1);
811f5f22af6SMatthew Dillon 	}
812f5f22af6SMatthew Dillon 	return(0);
813f5f22af6SMatthew Dillon }
814f5f22af6SMatthew Dillon 
815e91c5b26SMatthew Dillon #endif
816e91c5b26SMatthew Dillon 
817e91c5b26SMatthew Dillon /*
818e91c5b26SMatthew Dillon  * Find first cookie >= (cookie).  If exact specified, find the exact
819e91c5b26SMatthew Dillon  * cookie.
820e91c5b26SMatthew Dillon  */
8217a2de9a4SMatthew Dillon struct tmpfs_dirent *
tmpfs_dir_lookupbycookie(struct tmpfs_node * node,off_t cookie,int exact)822e91c5b26SMatthew Dillon tmpfs_dir_lookupbycookie(struct tmpfs_node *node, off_t cookie, int exact)
8237a2de9a4SMatthew Dillon {
824e91c5b26SMatthew Dillon #if 0
825f5f22af6SMatthew Dillon 	struct lubycookie_info info;
8267a2de9a4SMatthew Dillon 
827f5f22af6SMatthew Dillon 	info.cookie = cookie;
828f5f22af6SMatthew Dillon 	info.de = NULL;
829f5f22af6SMatthew Dillon 	RB_SCAN(tmpfs_dirtree_cookie, &node->tn_dir.tn_cookietree,
830f5f22af6SMatthew Dillon 		lubycookie_cmp, lubycookie_callback, &info);
831f5f22af6SMatthew Dillon 	return (info.de);
832e91c5b26SMatthew Dillon #endif
833e91c5b26SMatthew Dillon 	struct tmpfs_dirent *cdent = tmpfs_cookiedir(cookie);
834e91c5b26SMatthew Dillon 	struct tmpfs_dirent *last;
835e91c5b26SMatthew Dillon 	struct tmpfs_dirent *tmp;
836e91c5b26SMatthew Dillon 
837e91c5b26SMatthew Dillon 	last = NULL;
838e91c5b26SMatthew Dillon 	tmp = RB_ROOT(&node->tn_dir.tn_cookietree);
839e91c5b26SMatthew Dillon 	while (tmp) {
840e91c5b26SMatthew Dillon 		if (cdent == tmp)
841e91c5b26SMatthew Dillon 			return cdent;
842e91c5b26SMatthew Dillon 		if (cdent > tmp) {
843e91c5b26SMatthew Dillon 			last = tmp;
844e91c5b26SMatthew Dillon 			tmp = RB_RIGHT(tmp, rb_cookienode);
845e91c5b26SMatthew Dillon 		} else {
846e91c5b26SMatthew Dillon 			tmp = RB_LEFT(tmp, rb_cookienode);
847e91c5b26SMatthew Dillon 		}
848e91c5b26SMatthew Dillon 	}
849e91c5b26SMatthew Dillon 	return (exact ? NULL : last);
8507a2de9a4SMatthew Dillon }
8517a2de9a4SMatthew Dillon 
8527a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
8537a2de9a4SMatthew Dillon 
8547a2de9a4SMatthew Dillon /*
8557a2de9a4SMatthew Dillon  * Helper function for tmpfs_readdir.  Returns as much directory entries
8567a2de9a4SMatthew Dillon  * as can fit in the uio space.  The read starts at uio->uio_offset.
8577a2de9a4SMatthew Dillon  * The function returns 0 on success, -1 if there was not enough space
8587a2de9a4SMatthew Dillon  * in the uio structure to hold the directory entry or an appropriate
8597a2de9a4SMatthew Dillon  * error code if another error happens.
860833d1222SMatthew Dillon  *
861833d1222SMatthew Dillon  * Caller must hold the node locked (shared ok)
8627a2de9a4SMatthew Dillon  */
8637a2de9a4SMatthew Dillon int
tmpfs_dir_getdents(struct tmpfs_node * node,struct uio * uio,off_t * cntp)8647a2de9a4SMatthew Dillon tmpfs_dir_getdents(struct tmpfs_node *node, struct uio *uio, off_t *cntp)
8657a2de9a4SMatthew Dillon {
8667a2de9a4SMatthew Dillon 	int error;
8677a2de9a4SMatthew Dillon 	off_t startcookie;
8687a2de9a4SMatthew Dillon 	struct tmpfs_dirent *de;
8697a2de9a4SMatthew Dillon 
8707a2de9a4SMatthew Dillon 	TMPFS_VALIDATE_DIR(node);
8717a2de9a4SMatthew Dillon 
872833d1222SMatthew Dillon 	/*
873833d1222SMatthew Dillon 	 * Locate the first directory entry we have to return.  We have cached
8747a2de9a4SMatthew Dillon 	 * the last readdir in the node, so use those values if appropriate.
875833d1222SMatthew Dillon 	 * Otherwise do a linear scan to find the requested entry.
876e91c5b26SMatthew Dillon 	 *
877e91c5b26SMatthew Dillon 	 * If a particular cookie does not exist, locate the first valid
878e91c5b26SMatthew Dillon 	 * cookie after that one.
879833d1222SMatthew Dillon 	 */
8807a2de9a4SMatthew Dillon 	startcookie = uio->uio_offset;
8817a2de9a4SMatthew Dillon 	KKASSERT(startcookie != TMPFS_DIRCOOKIE_DOT);
8827a2de9a4SMatthew Dillon 	KKASSERT(startcookie != TMPFS_DIRCOOKIE_DOTDOT);
883833d1222SMatthew Dillon 
884833d1222SMatthew Dillon 	if (startcookie == TMPFS_DIRCOOKIE_EOF)
8857a2de9a4SMatthew Dillon 		return 0;
886833d1222SMatthew Dillon 
887e91c5b26SMatthew Dillon 	/*
888e91c5b26SMatthew Dillon 	 * Inexact lookup, find first direntry with a cookie >= startcookie.
889e91c5b26SMatthew Dillon 	 * If none found we are at the EOF.
890e91c5b26SMatthew Dillon 	 */
891e91c5b26SMatthew Dillon 	de = tmpfs_dir_lookupbycookie(node, startcookie, 0);
892e91c5b26SMatthew Dillon 	if (de == NULL) {
893e91c5b26SMatthew Dillon 		uio->uio_offset = TMPFS_DIRCOOKIE_EOF;
894e91c5b26SMatthew Dillon 		return 0;
895e91c5b26SMatthew Dillon 	}
8967a2de9a4SMatthew Dillon 
897f5f22af6SMatthew Dillon 	/*
898f5f22af6SMatthew Dillon 	 * Read as much entries as possible; i.e., until we reach the end of
899f5f22af6SMatthew Dillon 	 * the directory or we exhaust uio space.
900f5f22af6SMatthew Dillon 	 */
9017a2de9a4SMatthew Dillon 	do {
902e4f20828STomohiro Kusumi 		ino_t d_ino;
903e4f20828STomohiro Kusumi 		uint8_t d_type;
9047a2de9a4SMatthew Dillon 
9057a2de9a4SMatthew Dillon 		/* Create a dirent structure representing the current
9067a2de9a4SMatthew Dillon 		 * tmpfs_node and fill it. */
907e4f20828STomohiro Kusumi 		d_ino = de->td_node->tn_id;
9087a2de9a4SMatthew Dillon 		switch (de->td_node->tn_type) {
9097a2de9a4SMatthew Dillon 		case VBLK:
910e4f20828STomohiro Kusumi 			d_type = DT_BLK;
9117a2de9a4SMatthew Dillon 			break;
9127a2de9a4SMatthew Dillon 
9137a2de9a4SMatthew Dillon 		case VCHR:
914e4f20828STomohiro Kusumi 			d_type = DT_CHR;
9157a2de9a4SMatthew Dillon 			break;
9167a2de9a4SMatthew Dillon 
9177a2de9a4SMatthew Dillon 		case VDIR:
918e4f20828STomohiro Kusumi 			d_type = DT_DIR;
9197a2de9a4SMatthew Dillon 			break;
9207a2de9a4SMatthew Dillon 
9217a2de9a4SMatthew Dillon 		case VFIFO:
922e4f20828STomohiro Kusumi 			d_type = DT_FIFO;
9237a2de9a4SMatthew Dillon 			break;
9247a2de9a4SMatthew Dillon 
9257a2de9a4SMatthew Dillon 		case VLNK:
926e4f20828STomohiro Kusumi 			d_type = DT_LNK;
9277a2de9a4SMatthew Dillon 			break;
9287a2de9a4SMatthew Dillon 
9297a2de9a4SMatthew Dillon 		case VREG:
930e4f20828STomohiro Kusumi 			d_type = DT_REG;
9317a2de9a4SMatthew Dillon 			break;
9327a2de9a4SMatthew Dillon 
9337a2de9a4SMatthew Dillon 		case VSOCK:
934e4f20828STomohiro Kusumi 			d_type = DT_SOCK;
9357a2de9a4SMatthew Dillon 			break;
9367a2de9a4SMatthew Dillon 
9377a2de9a4SMatthew Dillon 		default:
9387a2de9a4SMatthew Dillon 			panic("tmpfs_dir_getdents: type %p %d",
9397a2de9a4SMatthew Dillon 			    de->td_node, (int)de->td_node->tn_type);
9407a2de9a4SMatthew Dillon 		}
941e4f20828STomohiro Kusumi 		KKASSERT(de->td_namelen < 256); /* 255 + 1 */
9427a2de9a4SMatthew Dillon 
943e4f20828STomohiro Kusumi 		if (vop_write_dirent(&error, uio, d_ino, d_type,
944e4f20828STomohiro Kusumi 		    de->td_namelen, de->td_name)) {
9457a2de9a4SMatthew Dillon 			error = -1;
9467a2de9a4SMatthew Dillon 			break;
9477a2de9a4SMatthew Dillon 		}
9487a2de9a4SMatthew Dillon 
9497a2de9a4SMatthew Dillon 		(*cntp)++;
950f5f22af6SMatthew Dillon 		de = RB_NEXT(tmpfs_dirtree_cookie,
951f5f22af6SMatthew Dillon 			     node->tn_dir.tn_cookietree, de);
9527a2de9a4SMatthew Dillon 	} while (error == 0 && uio->uio_resid > 0 && de != NULL);
9537a2de9a4SMatthew Dillon 
9547a2de9a4SMatthew Dillon 	/* Update the offset and cache. */
9557a2de9a4SMatthew Dillon 	if (de == NULL) {
9567a2de9a4SMatthew Dillon 		uio->uio_offset = TMPFS_DIRCOOKIE_EOF;
9577a2de9a4SMatthew Dillon 	} else {
958f5f22af6SMatthew Dillon 		uio->uio_offset = tmpfs_dircookie(de);
9597a2de9a4SMatthew Dillon 	}
9607a2de9a4SMatthew Dillon 
9617a2de9a4SMatthew Dillon 	return error;
9627a2de9a4SMatthew Dillon }
9637a2de9a4SMatthew Dillon 
9647a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
9657a2de9a4SMatthew Dillon 
9667a2de9a4SMatthew Dillon /*
9677a2de9a4SMatthew Dillon  * Resizes the aobj associated to the regular file pointed to by vp to
9687a2de9a4SMatthew Dillon  * the size newsize.  'vp' must point to a vnode that represents a regular
9697a2de9a4SMatthew Dillon  * file.  'newsize' must be positive.
9707a2de9a4SMatthew Dillon  *
9719cd86db5SMatthew Dillon  * pass NVEXTF_TRIVIAL when buf content will be overwritten, otherwise set 0
9727a2de9a4SMatthew Dillon  * to be zero filled.
9737a2de9a4SMatthew Dillon  *
9747a2de9a4SMatthew Dillon  * Returns zero on success or an appropriate error code on failure.
975b37a7c00SMatthew Dillon  *
976b37a7c00SMatthew Dillon  * Caller must hold the node exclusively locked.
9777a2de9a4SMatthew Dillon  */
9787a2de9a4SMatthew Dillon int
tmpfs_reg_resize(struct vnode * vp,off_t newsize,int trivial)9797a2de9a4SMatthew Dillon tmpfs_reg_resize(struct vnode *vp, off_t newsize, int trivial)
9807a2de9a4SMatthew Dillon {
9817a2de9a4SMatthew Dillon 	int error;
98229ffeb28SMatthew Dillon 	vm_pindex_t newpages, oldpages;
9837a2de9a4SMatthew Dillon 	struct tmpfs_mount *tmp;
9847a2de9a4SMatthew Dillon 	struct tmpfs_node *node;
9857a2de9a4SMatthew Dillon 	off_t oldsize;
9869cd86db5SMatthew Dillon 	int nvextflags;
9877a2de9a4SMatthew Dillon 
9887a2de9a4SMatthew Dillon #ifdef INVARIANTS
9897a2de9a4SMatthew Dillon 	KKASSERT(vp->v_type == VREG);
9907a2de9a4SMatthew Dillon 	KKASSERT(newsize >= 0);
9917a2de9a4SMatthew Dillon #endif
9927a2de9a4SMatthew Dillon 
9937a2de9a4SMatthew Dillon 	node = VP_TO_TMPFS_NODE(vp);
9947a2de9a4SMatthew Dillon 	tmp = VFS_TO_TMPFS(vp->v_mount);
9957a2de9a4SMatthew Dillon 
996ff837cd5SMatthew Dillon 	/*
997ff837cd5SMatthew Dillon 	 * Convert the old and new sizes to the number of pages needed to
9987a2de9a4SMatthew Dillon 	 * store them.  It may happen that we do not need to do anything
9997a2de9a4SMatthew Dillon 	 * because the last allocated page can accommodate the change on
1000ff837cd5SMatthew Dillon 	 * its own.
1001ff837cd5SMatthew Dillon 	 */
10027a2de9a4SMatthew Dillon 	oldsize = node->tn_size;
100329ffeb28SMatthew Dillon 	oldpages = round_page64(oldsize) / PAGE_SIZE;
10047a2de9a4SMatthew Dillon 	KKASSERT(oldpages == node->tn_reg.tn_aobj_pages);
100529ffeb28SMatthew Dillon 	newpages = round_page64(newsize) / PAGE_SIZE;
10067a2de9a4SMatthew Dillon 
10077a2de9a4SMatthew Dillon 	if (newpages > oldpages &&
100829ffeb28SMatthew Dillon 	   tmp->tm_pages_used + newpages - oldpages > tmp->tm_pages_max) {
10097a2de9a4SMatthew Dillon 		error = ENOSPC;
10107a2de9a4SMatthew Dillon 		goto out;
10117a2de9a4SMatthew Dillon 	}
1012ff837cd5SMatthew Dillon 	node->tn_reg.tn_aobj_pages = newpages;
1013ff837cd5SMatthew Dillon 	node->tn_size = newsize;
10147a2de9a4SMatthew Dillon 
1015eae96dbbSMatthew Dillon 	if (newpages != oldpages)
1016b37a7c00SMatthew Dillon 		atomic_add_long(&tmp->tm_pages_used, (newpages - oldpages));
10177a2de9a4SMatthew Dillon 
10189fc94b5fSMatthew Dillon 	/*
1019e91e64c7SMatthew Dillon 	 * nvextflags to pass along for bdwrite() vs buwrite(), this is
1020e91e64c7SMatthew Dillon 	 * so tmpfs activity doesn't eat memory being freed by the pageout
1021e91e64c7SMatthew Dillon 	 * daemon.
10229cd86db5SMatthew Dillon 	 */
1023e91e64c7SMatthew Dillon 	if (vm_pages_needed || vm_paging_start(0) ||
10249cd86db5SMatthew Dillon 	    tmpfs_bufcache_mode >= 2) {
10259cd86db5SMatthew Dillon 		nvextflags = 0;
10269cd86db5SMatthew Dillon 	} else {
10279cd86db5SMatthew Dillon 		nvextflags = NVEXTF_BUWRITE;
10289cd86db5SMatthew Dillon 	}
10299cd86db5SMatthew Dillon 
10309cd86db5SMatthew Dillon 
10319cd86db5SMatthew Dillon 	/*
10329fc94b5fSMatthew Dillon 	 * When adjusting the vnode filesize and its VM object we must
10339fc94b5fSMatthew Dillon 	 * also adjust our backing VM object (aobj).  The blocksize
10349fc94b5fSMatthew Dillon 	 * used must match the block sized we use for the buffer cache.
103522d3b394SMatthew Dillon 	 *
1036a1b829f2SMatthew Dillon 	 * The backing VM object may contain VM pages as well as swap
1037a1b829f2SMatthew Dillon 	 * assignments if we previously renamed main object pages into
1038a1b829f2SMatthew Dillon 	 * it during deactivation.
10399cd86db5SMatthew Dillon 	 *
10409cd86db5SMatthew Dillon 	 * To make things easier tmpfs uses a blksize in multiples of
10419cd86db5SMatthew Dillon 	 * PAGE_SIZE, and will only increase the blksize as a small file
10429cd86db5SMatthew Dillon 	 * increases in size.  Once a file has exceeded TMPFS_BLKSIZE (16KB),
10439cd86db5SMatthew Dillon 	 * the blksize is maxed out.  Truncating the file does not reduce
10449cd86db5SMatthew Dillon 	 * the blksize.
10459fc94b5fSMatthew Dillon 	 */
10469fc94b5fSMatthew Dillon 	if (newsize < oldsize) {
10479fc94b5fSMatthew Dillon 		vm_pindex_t osize;
104822d3b394SMatthew Dillon 		vm_pindex_t nsize;
10499fc94b5fSMatthew Dillon 		vm_object_t aobj;
10509fc94b5fSMatthew Dillon 
10519cd86db5SMatthew Dillon 		error = nvtruncbuf(vp, newsize, node->tn_blksize,
10529cd86db5SMatthew Dillon 				   -1, nvextflags);
10539fc94b5fSMatthew Dillon 		aobj = node->tn_reg.tn_aobj;
10549fc94b5fSMatthew Dillon 		if (aobj) {
10559fc94b5fSMatthew Dillon 			osize = aobj->size;
105622d3b394SMatthew Dillon 			nsize = vp->v_object->size;
105722d3b394SMatthew Dillon 			if (nsize < osize) {
10589fc94b5fSMatthew Dillon 				aobj->size = osize;
105922d3b394SMatthew Dillon 				swap_pager_freespace(aobj, nsize,
106022d3b394SMatthew Dillon 						     osize - nsize);
1061a1b829f2SMatthew Dillon 				vm_object_page_remove(aobj, nsize, osize,
1062a1b829f2SMatthew Dillon 						      FALSE);
10639fc94b5fSMatthew Dillon 			}
10649fc94b5fSMatthew Dillon 		}
10659fc94b5fSMatthew Dillon 	} else {
10669fc94b5fSMatthew Dillon 		vm_object_t aobj;
10679cd86db5SMatthew Dillon 		int nblksize;
10689cd86db5SMatthew Dillon 
10699cd86db5SMatthew Dillon 		/*
10709cd86db5SMatthew Dillon 		 * The first (and only the first) buffer in the file is resized
10719cd86db5SMatthew Dillon 		 * in multiples of PAGE_SIZE, up to TMPFS_BLKSIZE.
10729cd86db5SMatthew Dillon 		 */
10739cd86db5SMatthew Dillon 		nblksize = node->tn_blksize;
10749cd86db5SMatthew Dillon 		while (nblksize < TMPFS_BLKSIZE &&
10759cd86db5SMatthew Dillon 		       nblksize < newsize) {
10769cd86db5SMatthew Dillon 			nblksize += PAGE_SIZE;
10779cd86db5SMatthew Dillon 		}
10789cd86db5SMatthew Dillon 
10799cd86db5SMatthew Dillon 		if (trivial)
10809cd86db5SMatthew Dillon 			nvextflags |= NVEXTF_TRIVIAL;
10819fc94b5fSMatthew Dillon 
10823c54bb74SMatthew Dillon 		error = nvextendbuf(vp, oldsize, newsize,
10839cd86db5SMatthew Dillon 				    node->tn_blksize, nblksize,
10849cd86db5SMatthew Dillon 				    -1, -1, nvextflags);
10859cd86db5SMatthew Dillon 		node->tn_blksize = nblksize;
10869fc94b5fSMatthew Dillon 		aobj = node->tn_reg.tn_aobj;
10879fc94b5fSMatthew Dillon 		if (aobj)
10889fc94b5fSMatthew Dillon 			aobj->size = vp->v_object->size;
10899fc94b5fSMatthew Dillon 	}
10907a2de9a4SMatthew Dillon 
10917a2de9a4SMatthew Dillon out:
10927a2de9a4SMatthew Dillon 	return error;
10937a2de9a4SMatthew Dillon }
10947a2de9a4SMatthew Dillon 
10957a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
10967a2de9a4SMatthew Dillon 
10977a2de9a4SMatthew Dillon /*
10987a2de9a4SMatthew Dillon  * Change flags of the given vnode.
10997a2de9a4SMatthew Dillon  * Caller should execute tmpfs_update on vp after a successful execution.
11007a2de9a4SMatthew Dillon  * The vnode must be locked on entry and remain locked on exit.
11017a2de9a4SMatthew Dillon  */
11027a2de9a4SMatthew Dillon int
tmpfs_chflags(struct vnode * vp,u_long vaflags,struct ucred * cred)1103513a5bc4Szrj tmpfs_chflags(struct vnode *vp, u_long vaflags, struct ucred *cred)
11047a2de9a4SMatthew Dillon {
11057a2de9a4SMatthew Dillon 	int error;
11067a2de9a4SMatthew Dillon 	struct tmpfs_node *node;
110780ae59d7SMatthew Dillon 	int flags;
11087a2de9a4SMatthew Dillon 
11097a2de9a4SMatthew Dillon 	KKASSERT(vn_islocked(vp));
11107a2de9a4SMatthew Dillon 
11117a2de9a4SMatthew Dillon 	node = VP_TO_TMPFS_NODE(vp);
111280ae59d7SMatthew Dillon 	flags = node->tn_flags;
11137a2de9a4SMatthew Dillon 
11147a2de9a4SMatthew Dillon 	/* Disallow this operation if the file system is mounted read-only. */
11157a2de9a4SMatthew Dillon 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
11167a2de9a4SMatthew Dillon 		return EROFS;
111780ae59d7SMatthew Dillon 	error = vop_helper_setattr_flags(&flags, vaflags, node->tn_uid, cred);
11187a2de9a4SMatthew Dillon 
11195339dfe4SAntonio Huete Jimenez 	/* Actually change the flags on the node itself */
112080ae59d7SMatthew Dillon 	if (error == 0) {
11217a2de9a4SMatthew Dillon 		TMPFS_NODE_LOCK(node);
11227a2de9a4SMatthew Dillon 		node->tn_flags = flags;
11237a2de9a4SMatthew Dillon 		node->tn_status |= TMPFS_NODE_CHANGED;
11247a2de9a4SMatthew Dillon 		TMPFS_NODE_UNLOCK(node);
112580ae59d7SMatthew Dillon 	}
11267a2de9a4SMatthew Dillon 
11277a2de9a4SMatthew Dillon 	KKASSERT(vn_islocked(vp));
11287a2de9a4SMatthew Dillon 
112980ae59d7SMatthew Dillon 	return error;
11307a2de9a4SMatthew Dillon }
11317a2de9a4SMatthew Dillon 
11327a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
11337a2de9a4SMatthew Dillon 
11347a2de9a4SMatthew Dillon /*
11357a2de9a4SMatthew Dillon  * Change access mode on the given vnode.
11367a2de9a4SMatthew Dillon  * Caller should execute tmpfs_update on vp after a successful execution.
11377a2de9a4SMatthew Dillon  * The vnode must be locked on entry and remain locked on exit.
11387a2de9a4SMatthew Dillon  */
11397a2de9a4SMatthew Dillon int
tmpfs_chmod(struct vnode * vp,mode_t vamode,struct ucred * cred)114080ae59d7SMatthew Dillon tmpfs_chmod(struct vnode *vp, mode_t vamode, struct ucred *cred)
11417a2de9a4SMatthew Dillon {
11427a2de9a4SMatthew Dillon 	struct tmpfs_node *node;
114380ae59d7SMatthew Dillon 	mode_t cur_mode;
114480ae59d7SMatthew Dillon 	int error;
11457a2de9a4SMatthew Dillon 
11467a2de9a4SMatthew Dillon 	KKASSERT(vn_islocked(vp));
11477a2de9a4SMatthew Dillon 
11487a2de9a4SMatthew Dillon 	node = VP_TO_TMPFS_NODE(vp);
11497a2de9a4SMatthew Dillon 
11507a2de9a4SMatthew Dillon 	/* Disallow this operation if the file system is mounted read-only. */
11517a2de9a4SMatthew Dillon 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
11527a2de9a4SMatthew Dillon 		return EROFS;
11537a2de9a4SMatthew Dillon 
11547a2de9a4SMatthew Dillon 	/* Immutable or append-only files cannot be modified, either. */
11557a2de9a4SMatthew Dillon 	if (node->tn_flags & (IMMUTABLE | APPEND))
11567a2de9a4SMatthew Dillon 		return EPERM;
11577a2de9a4SMatthew Dillon 
115880ae59d7SMatthew Dillon 	cur_mode = node->tn_mode;
115980ae59d7SMatthew Dillon 	error = vop_helper_chmod(vp, vamode, cred, node->tn_uid, node->tn_gid,
116080ae59d7SMatthew Dillon 				 &cur_mode);
11617a2de9a4SMatthew Dillon 
116280ae59d7SMatthew Dillon 	if (error == 0 &&
116380ae59d7SMatthew Dillon 	    (node->tn_mode & ALLPERMS) != (cur_mode & ALLPERMS)) {
11647a2de9a4SMatthew Dillon 		TMPFS_NODE_LOCK(node);
11657a2de9a4SMatthew Dillon 		node->tn_mode &= ~ALLPERMS;
116680ae59d7SMatthew Dillon 		node->tn_mode |= cur_mode & ALLPERMS;
11677a2de9a4SMatthew Dillon 
11687a2de9a4SMatthew Dillon 		node->tn_status |= TMPFS_NODE_CHANGED;
11697a2de9a4SMatthew Dillon 		TMPFS_NODE_UNLOCK(node);
117080ae59d7SMatthew Dillon 	}
11717a2de9a4SMatthew Dillon 
11727a2de9a4SMatthew Dillon 	KKASSERT(vn_islocked(vp));
11737a2de9a4SMatthew Dillon 
11747a2de9a4SMatthew Dillon 	return 0;
11757a2de9a4SMatthew Dillon }
11767a2de9a4SMatthew Dillon 
11777a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
11787a2de9a4SMatthew Dillon 
11797a2de9a4SMatthew Dillon /*
11807a2de9a4SMatthew Dillon  * Change ownership of the given vnode.  At least one of uid or gid must
11817a2de9a4SMatthew Dillon  * be different than VNOVAL.  If one is set to that value, the attribute
11827a2de9a4SMatthew Dillon  * is unchanged.
11837a2de9a4SMatthew Dillon  * Caller should execute tmpfs_update on vp after a successful execution.
11847a2de9a4SMatthew Dillon  * The vnode must be locked on entry and remain locked on exit.
11857a2de9a4SMatthew Dillon  */
11867a2de9a4SMatthew Dillon int
tmpfs_chown(struct vnode * vp,uid_t uid,gid_t gid,struct ucred * cred)11877a2de9a4SMatthew Dillon tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred)
11887a2de9a4SMatthew Dillon {
1189bd48f29cSMatthew Dillon 	mode_t cur_mode;
1190bd48f29cSMatthew Dillon 	uid_t cur_uid;
1191bd48f29cSMatthew Dillon 	gid_t cur_gid;
11927a2de9a4SMatthew Dillon 	struct tmpfs_node *node;
1193bd48f29cSMatthew Dillon 	int error;
11947a2de9a4SMatthew Dillon 
11957a2de9a4SMatthew Dillon 	KKASSERT(vn_islocked(vp));
11967a2de9a4SMatthew Dillon 	node = VP_TO_TMPFS_NODE(vp);
11977a2de9a4SMatthew Dillon 
11987a2de9a4SMatthew Dillon 	/* Disallow this operation if the file system is mounted read-only. */
11997a2de9a4SMatthew Dillon 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
12007a2de9a4SMatthew Dillon 		return EROFS;
12017a2de9a4SMatthew Dillon 
12027a2de9a4SMatthew Dillon 	/* Immutable or append-only files cannot be modified, either. */
12037a2de9a4SMatthew Dillon 	if (node->tn_flags & (IMMUTABLE | APPEND))
12047a2de9a4SMatthew Dillon 		return EPERM;
12057a2de9a4SMatthew Dillon 
1206bd48f29cSMatthew Dillon 	cur_uid = node->tn_uid;
1207bd48f29cSMatthew Dillon 	cur_gid = node->tn_gid;
1208bd48f29cSMatthew Dillon 	cur_mode = node->tn_mode;
1209bd48f29cSMatthew Dillon 	error = vop_helper_chown(vp, uid, gid, cred,
1210bd48f29cSMatthew Dillon 				 &cur_uid, &cur_gid, &cur_mode);
12117a2de9a4SMatthew Dillon 
1212bd48f29cSMatthew Dillon 	if (error == 0) {
12137a2de9a4SMatthew Dillon 		TMPFS_NODE_LOCK(node);
1214bd48f29cSMatthew Dillon 		if (cur_uid != node->tn_uid ||
1215bd48f29cSMatthew Dillon 		    cur_gid != node->tn_gid ||
1216bd48f29cSMatthew Dillon 		    cur_mode != node->tn_mode) {
12173b9337bbSYONETANI Tomokazu 			node->tn_uid = cur_uid;
12183b9337bbSYONETANI Tomokazu 			node->tn_gid = cur_gid;
1219bd48f29cSMatthew Dillon 			node->tn_mode = cur_mode;
12207a2de9a4SMatthew Dillon 			node->tn_status |= TMPFS_NODE_CHANGED;
12217a2de9a4SMatthew Dillon 		}
12227a2de9a4SMatthew Dillon 		TMPFS_NODE_UNLOCK(node);
1223bd48f29cSMatthew Dillon 	}
12247a2de9a4SMatthew Dillon 
1225bd48f29cSMatthew Dillon 	return error;
12267a2de9a4SMatthew Dillon }
12277a2de9a4SMatthew Dillon 
12287a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
12297a2de9a4SMatthew Dillon 
12307a2de9a4SMatthew Dillon /*
12317a2de9a4SMatthew Dillon  * Change size of the given vnode.
12327a2de9a4SMatthew Dillon  * Caller should execute tmpfs_update on vp after a successful execution.
12337a2de9a4SMatthew Dillon  * The vnode must be locked on entry and remain locked on exit.
12347a2de9a4SMatthew Dillon  */
12357a2de9a4SMatthew Dillon int
tmpfs_chsize(struct vnode * vp,u_quad_t size,struct ucred * cred)12367a2de9a4SMatthew Dillon tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred)
12377a2de9a4SMatthew Dillon {
12387a2de9a4SMatthew Dillon 	int error;
12397a2de9a4SMatthew Dillon 	struct tmpfs_node *node;
12407a2de9a4SMatthew Dillon 
12417a2de9a4SMatthew Dillon 	KKASSERT(vn_islocked(vp));
12427a2de9a4SMatthew Dillon 
12437a2de9a4SMatthew Dillon 	node = VP_TO_TMPFS_NODE(vp);
12447a2de9a4SMatthew Dillon 
12457a2de9a4SMatthew Dillon 	/* Decide whether this is a valid operation based on the file type. */
12467a2de9a4SMatthew Dillon 	error = 0;
12477a2de9a4SMatthew Dillon 	switch (vp->v_type) {
12487a2de9a4SMatthew Dillon 	case VDIR:
12497a2de9a4SMatthew Dillon 		return EISDIR;
12507a2de9a4SMatthew Dillon 
12517a2de9a4SMatthew Dillon 	case VREG:
12527a2de9a4SMatthew Dillon 		if (vp->v_mount->mnt_flag & MNT_RDONLY)
12537a2de9a4SMatthew Dillon 			return EROFS;
12547a2de9a4SMatthew Dillon 		break;
12557a2de9a4SMatthew Dillon 
12567a2de9a4SMatthew Dillon 	case VBLK:
12577a2de9a4SMatthew Dillon 		/* FALLTHROUGH */
12587a2de9a4SMatthew Dillon 	case VCHR:
12597a2de9a4SMatthew Dillon 		/* FALLTHROUGH */
12607a2de9a4SMatthew Dillon 	case VFIFO:
12617a2de9a4SMatthew Dillon 		/* Allow modifications of special files even if in the file
12627a2de9a4SMatthew Dillon 		 * system is mounted read-only (we are not modifying the
12637a2de9a4SMatthew Dillon 		 * files themselves, but the objects they represent). */
12647a2de9a4SMatthew Dillon 		return 0;
12657a2de9a4SMatthew Dillon 
12667a2de9a4SMatthew Dillon 	default:
12677a2de9a4SMatthew Dillon 		/* Anything else is unsupported. */
12687a2de9a4SMatthew Dillon 		return EOPNOTSUPP;
12697a2de9a4SMatthew Dillon 	}
12707a2de9a4SMatthew Dillon 
12717a2de9a4SMatthew Dillon 	/* Immutable or append-only files cannot be modified, either. */
12727a2de9a4SMatthew Dillon 	if (node->tn_flags & (IMMUTABLE | APPEND))
12737a2de9a4SMatthew Dillon 		return EPERM;
12747a2de9a4SMatthew Dillon 
12757a2de9a4SMatthew Dillon 	error = tmpfs_truncate(vp, size);
12767a2de9a4SMatthew Dillon 	/* tmpfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents
12777a2de9a4SMatthew Dillon 	 * for us, as will update tn_status; no need to do that here. */
12787a2de9a4SMatthew Dillon 
12797a2de9a4SMatthew Dillon 	KKASSERT(vn_islocked(vp));
12807a2de9a4SMatthew Dillon 
12817a2de9a4SMatthew Dillon 	return error;
12827a2de9a4SMatthew Dillon }
12837a2de9a4SMatthew Dillon 
12847a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
12857a2de9a4SMatthew Dillon 
12867a2de9a4SMatthew Dillon /*
12877a2de9a4SMatthew Dillon  * Change access and modification times of the given vnode.
12887a2de9a4SMatthew Dillon  * Caller should execute tmpfs_update on vp after a successful execution.
12897a2de9a4SMatthew Dillon  * The vnode must be locked on entry and remain locked on exit.
12907a2de9a4SMatthew Dillon  */
12917a2de9a4SMatthew Dillon int
tmpfs_chtimes(struct vnode * vp,struct timespec * atime,struct timespec * mtime,int vaflags,struct ucred * cred)12927a2de9a4SMatthew Dillon tmpfs_chtimes(struct vnode *vp, struct timespec *atime, struct timespec *mtime,
12937a2de9a4SMatthew Dillon 	      int vaflags, struct ucred *cred)
12947a2de9a4SMatthew Dillon {
12957a2de9a4SMatthew Dillon 	struct tmpfs_node *node;
12967a2de9a4SMatthew Dillon 
12977a2de9a4SMatthew Dillon 	KKASSERT(vn_islocked(vp));
12987a2de9a4SMatthew Dillon 
12997a2de9a4SMatthew Dillon 	node = VP_TO_TMPFS_NODE(vp);
13007a2de9a4SMatthew Dillon 
13017a2de9a4SMatthew Dillon 	/* Disallow this operation if the file system is mounted read-only. */
13027a2de9a4SMatthew Dillon 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
13037a2de9a4SMatthew Dillon 		return EROFS;
13047a2de9a4SMatthew Dillon 
13057a2de9a4SMatthew Dillon 	/* Immutable or append-only files cannot be modified, either. */
13067a2de9a4SMatthew Dillon 	if (node->tn_flags & (IMMUTABLE | APPEND))
13077a2de9a4SMatthew Dillon 		return EPERM;
13087a2de9a4SMatthew Dillon 
13097a2de9a4SMatthew Dillon 	TMPFS_NODE_LOCK(node);
13107a2de9a4SMatthew Dillon 	if (atime->tv_sec != VNOVAL && atime->tv_nsec != VNOVAL)
13117a2de9a4SMatthew Dillon 		node->tn_status |= TMPFS_NODE_ACCESSED;
13127a2de9a4SMatthew Dillon 
1313fa4a12c4SMatthew Dillon 	if (mtime->tv_sec != VNOVAL && mtime->tv_nsec != VNOVAL) {
13147a2de9a4SMatthew Dillon 		node->tn_status |= TMPFS_NODE_MODIFIED;
1315fa4a12c4SMatthew Dillon 		vclrflags(vp, VLASTWRITETS);
1316fa4a12c4SMatthew Dillon 	}
13177a2de9a4SMatthew Dillon 
13187a2de9a4SMatthew Dillon 	TMPFS_NODE_UNLOCK(node);
13197a2de9a4SMatthew Dillon 
13207a2de9a4SMatthew Dillon 	tmpfs_itimes(vp, atime, mtime);
13217a2de9a4SMatthew Dillon 
13227a2de9a4SMatthew Dillon 	KKASSERT(vn_islocked(vp));
13237a2de9a4SMatthew Dillon 
13247a2de9a4SMatthew Dillon 	return 0;
13257a2de9a4SMatthew Dillon }
13267a2de9a4SMatthew Dillon 
13277a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
13287a2de9a4SMatthew Dillon /* Sync timestamps */
13297a2de9a4SMatthew Dillon void
tmpfs_itimes(struct vnode * vp,const struct timespec * acc,const struct timespec * mod)13307a2de9a4SMatthew Dillon tmpfs_itimes(struct vnode *vp, const struct timespec *acc,
13317a2de9a4SMatthew Dillon 	     const struct timespec *mod)
13327a2de9a4SMatthew Dillon {
13337a2de9a4SMatthew Dillon 	struct tmpfs_node *node;
13347a2de9a4SMatthew Dillon 	struct timespec now;
13357a2de9a4SMatthew Dillon 
13367a2de9a4SMatthew Dillon 	node = VP_TO_TMPFS_NODE(vp);
13377a2de9a4SMatthew Dillon 
13387a2de9a4SMatthew Dillon 	if ((node->tn_status & (TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED |
13394d22d8eeSMatthew Dillon 	    TMPFS_NODE_CHANGED)) == 0) {
13407a2de9a4SMatthew Dillon 		return;
13414d22d8eeSMatthew Dillon 	}
13427a2de9a4SMatthew Dillon 
13437a2de9a4SMatthew Dillon 	vfs_timestamp(&now);
13447a2de9a4SMatthew Dillon 
13457a2de9a4SMatthew Dillon 	TMPFS_NODE_LOCK(node);
13467a2de9a4SMatthew Dillon 	if (node->tn_status & TMPFS_NODE_ACCESSED) {
13477a2de9a4SMatthew Dillon 		if (acc == NULL)
13487a2de9a4SMatthew Dillon 			 acc = &now;
13497a2de9a4SMatthew Dillon 		node->tn_atime = acc->tv_sec;
13507a2de9a4SMatthew Dillon 		node->tn_atimensec = acc->tv_nsec;
13517a2de9a4SMatthew Dillon 	}
13527a2de9a4SMatthew Dillon 	if (node->tn_status & TMPFS_NODE_MODIFIED) {
13537a2de9a4SMatthew Dillon 		if (mod == NULL)
13547a2de9a4SMatthew Dillon 			mod = &now;
13557a2de9a4SMatthew Dillon 		node->tn_mtime = mod->tv_sec;
13567a2de9a4SMatthew Dillon 		node->tn_mtimensec = mod->tv_nsec;
13577a2de9a4SMatthew Dillon 	}
13587a2de9a4SMatthew Dillon 	if (node->tn_status & TMPFS_NODE_CHANGED) {
13597a2de9a4SMatthew Dillon 		node->tn_ctime = now.tv_sec;
13607a2de9a4SMatthew Dillon 		node->tn_ctimensec = now.tv_nsec;
13617a2de9a4SMatthew Dillon 	}
1362fa4a12c4SMatthew Dillon 
1363fa4a12c4SMatthew Dillon 	node->tn_status &= ~(TMPFS_NODE_ACCESSED |
1364fa4a12c4SMatthew Dillon 			     TMPFS_NODE_MODIFIED |
1365fa4a12c4SMatthew Dillon 			     TMPFS_NODE_CHANGED);
13667a2de9a4SMatthew Dillon 	TMPFS_NODE_UNLOCK(node);
13677a2de9a4SMatthew Dillon }
13687a2de9a4SMatthew Dillon 
13697a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
13707a2de9a4SMatthew Dillon 
13717a2de9a4SMatthew Dillon void
tmpfs_update(struct vnode * vp)13727a2de9a4SMatthew Dillon tmpfs_update(struct vnode *vp)
13737a2de9a4SMatthew Dillon {
13747a2de9a4SMatthew Dillon 	tmpfs_itimes(vp, NULL, NULL);
13757a2de9a4SMatthew Dillon }
13767a2de9a4SMatthew Dillon 
13777a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
13787a2de9a4SMatthew Dillon 
1379b37a7c00SMatthew Dillon /*
1380b37a7c00SMatthew Dillon  * Caller must hold an exclusive node lock.
1381b37a7c00SMatthew Dillon  */
13827a2de9a4SMatthew Dillon int
tmpfs_truncate(struct vnode * vp,off_t length)13837a2de9a4SMatthew Dillon tmpfs_truncate(struct vnode *vp, off_t length)
13847a2de9a4SMatthew Dillon {
13857a2de9a4SMatthew Dillon 	int error;
13867a2de9a4SMatthew Dillon 	struct tmpfs_node *node;
13877a2de9a4SMatthew Dillon 
13887a2de9a4SMatthew Dillon 	node = VP_TO_TMPFS_NODE(vp);
13897a2de9a4SMatthew Dillon 
13907a2de9a4SMatthew Dillon 	if (length < 0) {
13917a2de9a4SMatthew Dillon 		error = EINVAL;
13927a2de9a4SMatthew Dillon 		goto out;
13937a2de9a4SMatthew Dillon 	}
13947a2de9a4SMatthew Dillon 
13957a2de9a4SMatthew Dillon 	if (node->tn_size == length) {
13967a2de9a4SMatthew Dillon 		error = 0;
13977a2de9a4SMatthew Dillon 		goto out;
13987a2de9a4SMatthew Dillon 	}
13997a2de9a4SMatthew Dillon 
14007a2de9a4SMatthew Dillon 	if (length > VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize)
14017a2de9a4SMatthew Dillon 		return (EFBIG);
14027a2de9a4SMatthew Dillon 
14037a2de9a4SMatthew Dillon 
14047a2de9a4SMatthew Dillon 	error = tmpfs_reg_resize(vp, length, 1);
14057a2de9a4SMatthew Dillon 
1406b37a7c00SMatthew Dillon 	if (error == 0)
14077a2de9a4SMatthew Dillon 		node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
14087a2de9a4SMatthew Dillon 
14097a2de9a4SMatthew Dillon out:
14107a2de9a4SMatthew Dillon 	tmpfs_update(vp);
14117a2de9a4SMatthew Dillon 
14127a2de9a4SMatthew Dillon 	return error;
14137a2de9a4SMatthew Dillon }
14147a2de9a4SMatthew Dillon 
14157a2de9a4SMatthew Dillon /* --------------------------------------------------------------------- */
14167a2de9a4SMatthew Dillon 
14177a2de9a4SMatthew Dillon static ino_t
tmpfs_fetch_ino(struct tmpfs_mount * tmp)1418f7db522fSVenkatesh Srinivas tmpfs_fetch_ino(struct tmpfs_mount *tmp)
14197a2de9a4SMatthew Dillon {
14207a2de9a4SMatthew Dillon 	ino_t ret;
14217a2de9a4SMatthew Dillon 
14225af112abSMatthew Dillon 	ret = atomic_fetchadd_64(&tmp->tm_ino, 1);
14237a2de9a4SMatthew Dillon 
1424f7db522fSVenkatesh Srinivas 	return (ret);
14257a2de9a4SMatthew Dillon }
142629ca4fd6SJohannes Hofmann 
142729ca4fd6SJohannes Hofmann static int
tmpfs_dirtree_compare(struct tmpfs_dirent * a,struct tmpfs_dirent * b)142829ca4fd6SJohannes Hofmann tmpfs_dirtree_compare(struct tmpfs_dirent *a, struct tmpfs_dirent *b)
142929ca4fd6SJohannes Hofmann {
143029ca4fd6SJohannes Hofmann 	if (a->td_namelen > b->td_namelen)
143129ca4fd6SJohannes Hofmann 		return 1;
143229ca4fd6SJohannes Hofmann 	else if (a->td_namelen < b->td_namelen)
143329ca4fd6SJohannes Hofmann 		return -1;
143429ca4fd6SJohannes Hofmann 	else
143529ca4fd6SJohannes Hofmann 		return strncmp(a->td_name, b->td_name, a->td_namelen);
143629ca4fd6SJohannes Hofmann }
1437f5f22af6SMatthew Dillon 
1438f5f22af6SMatthew Dillon static int
tmpfs_dirtree_compare_cookie(struct tmpfs_dirent * a,struct tmpfs_dirent * b)1439f5f22af6SMatthew Dillon tmpfs_dirtree_compare_cookie(struct tmpfs_dirent *a, struct tmpfs_dirent *b)
1440f5f22af6SMatthew Dillon {
1441f5f22af6SMatthew Dillon 	if (a < b)
1442f5f22af6SMatthew Dillon 		return(-1);
1443f5f22af6SMatthew Dillon 	if (a > b)
1444f5f22af6SMatthew Dillon 		return(1);
1445f5f22af6SMatthew Dillon 	return 0;
1446f5f22af6SMatthew Dillon }
14474c154053SMatthew Dillon 
14484c154053SMatthew Dillon /*
14490d3f37f7SMatthew Dillon  * Lock for rename.  The namecache entries for the related terminal files
14500d3f37f7SMatthew Dillon  * are already locked but the directories are not.  A directory lock order
14510d3f37f7SMatthew Dillon  * reversal is possible so use a deterministic order.
14520d3f37f7SMatthew Dillon  *
14530d3f37f7SMatthew Dillon  * Generally order path parent-to-child or using a simple pointer comparison.
14540d3f37f7SMatthew Dillon  * Probably not perfect but it should catch most of the cases.
14550d3f37f7SMatthew Dillon  *
14560d3f37f7SMatthew Dillon  * Underlying files must be locked after the related directory.
14574c154053SMatthew Dillon  */
14584c154053SMatthew Dillon void
tmpfs_lock4(struct tmpfs_node * node1,struct tmpfs_node * node2,struct tmpfs_node * node3,struct tmpfs_node * node4)14594c154053SMatthew Dillon tmpfs_lock4(struct tmpfs_node *node1, struct tmpfs_node *node2,
14604c154053SMatthew Dillon 	    struct tmpfs_node *node3, struct tmpfs_node *node4)
14614c154053SMatthew Dillon {
14620d3f37f7SMatthew Dillon 	if (node1->tn_dir.tn_parent != node2 &&
14630d3f37f7SMatthew Dillon 	    (node1 < node2 || node2->tn_dir.tn_parent == node1)) {
14644c154053SMatthew Dillon 		TMPFS_NODE_LOCK(node1);		/* fdir */
14654c154053SMatthew Dillon 		TMPFS_NODE_LOCK(node3);		/* ffile */
14664c154053SMatthew Dillon 		TMPFS_NODE_LOCK(node2);		/* tdir */
14674c154053SMatthew Dillon 		if (node4)
14684c154053SMatthew Dillon 			TMPFS_NODE_LOCK(node4);	/* tfile */
14690d3f37f7SMatthew Dillon 	} else {
14700d3f37f7SMatthew Dillon 		TMPFS_NODE_LOCK(node2);		/* tdir */
14710d3f37f7SMatthew Dillon 		if (node4)
14720d3f37f7SMatthew Dillon 			TMPFS_NODE_LOCK(node4);	/* tfile */
14730d3f37f7SMatthew Dillon 		TMPFS_NODE_LOCK(node1);		/* fdir */
14740d3f37f7SMatthew Dillon 		TMPFS_NODE_LOCK(node3);		/* ffile */
14750d3f37f7SMatthew Dillon 	}
14764c154053SMatthew Dillon }
14774c154053SMatthew Dillon 
14784c154053SMatthew Dillon void
tmpfs_unlock4(struct tmpfs_node * node1,struct tmpfs_node * node2,struct tmpfs_node * node3,struct tmpfs_node * node4)14794c154053SMatthew Dillon tmpfs_unlock4(struct tmpfs_node *node1, struct tmpfs_node *node2,
14804c154053SMatthew Dillon 	      struct tmpfs_node *node3, struct tmpfs_node *node4)
14814c154053SMatthew Dillon {
14824c154053SMatthew Dillon 	if (node4)
14834c154053SMatthew Dillon 		TMPFS_NODE_UNLOCK(node4);
14844d22d8eeSMatthew Dillon 	TMPFS_NODE_UNLOCK(node2);
14854d22d8eeSMatthew Dillon 	TMPFS_NODE_UNLOCK(node3);
14864d22d8eeSMatthew Dillon 	TMPFS_NODE_UNLOCK(node1);
14874c154053SMatthew Dillon }
1488