xref: /dflybsd-src/sys/vfs/tmpfs/tmpfs_vfsops.c (revision 4041d91945fc1e4ba1bb1065fd5f9915983f8605)
1 /*	$NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 2005 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
9  * 2005 program.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Efficient memory file system.
35  *
36  * tmpfs is a file system that uses NetBSD's virtual memory sub-system
37  * (the well-known UVM) to store file data and metadata in an efficient
38  * way.  This means that it does not follow the structure of an on-disk
39  * file system because it simply does not need to.  Instead, it uses
40  * memory-specific data structures and algorithms to automatically
41  * allocate and release resources.
42  */
43 #include <sys/cdefs.h>
44 #include <sys/conf.h>
45 #include <sys/param.h>
46 #include <sys/limits.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/kernel.h>
50 #include <sys/stat.h>
51 #include <sys/systm.h>
52 #include <sys/sysctl.h>
53 #include <sys/objcache.h>
54 
55 #include <vm/vm.h>
56 #include <vm/vm_object.h>
57 #include <vm/vm_param.h>
58 
59 #include <vfs/tmpfs/tmpfs.h>
60 #include <vfs/tmpfs/tmpfs_vnops.h>
61 #include <vfs/tmpfs/tmpfs_args.h>
62 
63 /*
64  * Default permission for root node
65  */
66 #define TMPFS_DEFAULT_ROOT_MODE	(S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
67 
68 MALLOC_DEFINE(M_TMPFSMNT, "tmpfs mount", "tmpfs mount structures");
69 MALLOC_DEFINE(M_TMPFSNAME, "tmpfs name", "tmpfs file names");
70 
71 /* --------------------------------------------------------------------- */
72 
73 static int	tmpfs_mount(struct mount *, char *, caddr_t, struct ucred *);
74 static int	tmpfs_unmount(struct mount *, int);
75 static int	tmpfs_root(struct mount *, struct vnode **);
76 static int	tmpfs_fhtovp(struct mount *, struct vnode *, struct fid *, struct vnode **);
77 static int	tmpfs_statfs(struct mount *, struct statfs *, struct ucred *cred);
78 
79 /* --------------------------------------------------------------------- */
80 int
81 tmpfs_node_ctor(void *obj, void *privdata, int flags)
82 {
83 	struct tmpfs_node *node = (struct tmpfs_node *)obj;
84 
85 	node->tn_gen++;
86 	node->tn_size = 0;
87 	node->tn_status = 0;
88 	node->tn_flags = 0;
89 	node->tn_links = 0;
90 	node->tn_vnode = NULL;
91 	node->tn_vpstate = TMPFS_VNODE_WANT;
92 	bzero(&node->tn_spec, sizeof(node->tn_spec));
93 
94 	return (1);
95 }
96 
97 static void
98 tmpfs_node_dtor(void *obj, void *privdata)
99 {
100 	struct tmpfs_node *node = (struct tmpfs_node *)obj;
101 	node->tn_type = VNON;
102 	node->tn_vpstate = TMPFS_VNODE_DOOMED;
103 }
104 
105 static void*
106 tmpfs_node_init(void *args, int flags)
107 {
108 	struct tmpfs_node *node = (struct tmpfs_node *)objcache_malloc_alloc(args, flags);
109 	node->tn_id = 0;
110 
111 	lockinit(&node->tn_interlock, "tmpfs node interlock", 0, LK_CANRECURSE);
112 	node->tn_gen = karc4random();
113 
114 	return node;
115 }
116 
117 static void
118 tmpfs_node_fini(void *obj, void *args)
119 {
120 	struct tmpfs_node *node = (struct tmpfs_node *)obj;
121 	lockuninit(&node->tn_interlock);
122 	objcache_malloc_free(obj, args);
123 }
124 
125 static int
126 tmpfs_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred)
127 {
128 	struct tmpfs_mount *tmp;
129 	struct tmpfs_node *root;
130 	struct tmpfs_args args;
131 	vm_pindex_t pages;
132 	vm_pindex_t pages_limit;
133 	ino_t nodes;
134 	u_int64_t	maxfsize;
135 	int error;
136 	/* Size counters. */
137 	ino_t	nodes_max;
138 	off_t	size_max;
139 	size_t	maxfsize_max;
140 	size_t	size;
141 
142 	/* Root node attributes. */
143 	uid_t	root_uid = cred->cr_uid;
144 	gid_t	root_gid = cred->cr_gid;
145 	mode_t	root_mode = (VREAD | VWRITE);
146 
147 	if (mp->mnt_flag & MNT_UPDATE) {
148 		/* XXX: There is no support yet to update file system
149 		 * settings.  Should be added. */
150 
151 		return EOPNOTSUPP;
152 	}
153 
154 	/*
155 	 * mount info
156 	 */
157 	bzero(&args, sizeof(args));
158 	size_max  = 0;
159 	nodes_max = 0;
160 	maxfsize_max = 0;
161 
162 	if (path) {
163 		if (data) {
164 			error = copyin(data, &args, sizeof(args));
165 			if (error)
166 				return (error);
167 		}
168 		size_max = args.ta_size_max;
169 		nodes_max = args.ta_nodes_max;
170 		maxfsize_max = args.ta_maxfsize_max;
171 		root_uid = args.ta_root_uid;
172 		root_gid = args.ta_root_gid;
173 		root_mode = args.ta_root_mode;
174 	}
175 
176 	/*
177 	 * If mount by non-root, then verify that user has necessary
178 	 * permissions on the device.
179 	 */
180 	if (cred->cr_uid != 0) {
181 		root_mode = VREAD;
182 		if ((mp->mnt_flag & MNT_RDONLY) == 0)
183 			root_mode |= VWRITE;
184 	}
185 
186 	pages_limit = vm_swap_max + vmstats.v_page_count / 2;
187 
188 	if (size_max == 0)
189 		pages = pages_limit / 2;
190 	else if (size_max < PAGE_SIZE)
191 		pages = 1;
192 	else if (OFF_TO_IDX(size_max) > pages_limit)
193 		pages = pages_limit;
194 	else
195 		pages = OFF_TO_IDX(size_max);
196 
197 	if (nodes_max == 0)
198 		nodes = 3 + pages * PAGE_SIZE / 1024;
199 	else if (nodes_max < 3)
200 		nodes = 3;
201 	else if (nodes_max > pages)
202 		nodes = pages;
203 	else
204 		nodes = nodes_max;
205 
206 	maxfsize = IDX_TO_OFF(pages_limit);
207 	if (maxfsize_max != 0 && maxfsize > maxfsize_max)
208 		maxfsize = maxfsize_max;
209 
210 	/* Allocate the tmpfs mount structure and fill it. */
211 	tmp = kmalloc(sizeof(*tmp), M_TMPFSMNT, M_WAITOK | M_ZERO);
212 
213 	lockinit(&(tmp->allnode_lock), "tmpfs allnode lock", 0, LK_CANRECURSE);
214 	tmp->tm_nodes_max = nodes;
215 	tmp->tm_nodes_inuse = 0;
216 	tmp->tm_maxfilesize = maxfsize;
217 	LIST_INIT(&tmp->tm_nodes_used);
218 
219 	tmp->tm_pages_max = pages;
220 	tmp->tm_pages_used = 0;
221 
222 	kmalloc_create(&tmp->tm_node_zone, "tmpfs node");
223 	kmalloc_create(&tmp->tm_dirent_zone, "tmpfs dirent");
224 
225 	kmalloc_raise_limit(tmp->tm_node_zone, sizeof(struct tmpfs_node) *
226 			    tmp->tm_nodes_max);
227 
228 	tmp->tm_node_zone_malloc_args.objsize = sizeof(struct tmpfs_node);
229 	tmp->tm_node_zone_malloc_args.mtype = tmp->tm_node_zone;
230 
231 	tmp->tm_dirent_zone_malloc_args.objsize = sizeof(struct tmpfs_dirent);
232 	tmp->tm_dirent_zone_malloc_args.mtype = tmp->tm_dirent_zone;
233 
234 	tmp->tm_dirent_pool =  objcache_create( "tmpfs dirent cache",
235 	    0, 0,
236 	    NULL, NULL, NULL,
237 	    objcache_malloc_alloc, objcache_malloc_free,
238 	    &tmp->tm_dirent_zone_malloc_args);
239 	tmp->tm_node_pool = objcache_create( "tmpfs node cache",
240 	    0, 0,
241 	    tmpfs_node_ctor, tmpfs_node_dtor, NULL,
242 	    tmpfs_node_init, tmpfs_node_fini,
243 	    &tmp->tm_node_zone_malloc_args);
244 
245 	/* Allocate the root node. */
246 	error = tmpfs_alloc_node(tmp, VDIR, root_uid, root_gid,
247 				 root_mode & ALLPERMS, NULL, NULL,
248 				 VNOVAL, VNOVAL, &root);
249 
250 	/*
251 	 * We are backed by swap, set snocache chflags flag so we
252 	 * don't trip over swapcache.
253 	 */
254 	root->tn_flags = SF_NOCACHE;
255 
256 	if (error != 0 || root == NULL) {
257 	    objcache_destroy(tmp->tm_node_pool);
258 	    objcache_destroy(tmp->tm_dirent_pool);
259 	    kfree(tmp, M_TMPFSMNT);
260 	    return error;
261 	}
262 	KASSERT(root->tn_id >= 0, ("tmpfs root with invalid ino: %d", (int)root->tn_id));
263 	tmp->tm_root = root;
264 
265 	mp->mnt_flag |= MNT_LOCAL;
266 #if 0
267 	mp->mnt_kern_flag |= MNTK_RD_MPSAFE | MNTK_WR_MPSAFE | MNTK_GA_MPSAFE  |
268 			     MNTK_IN_MPSAFE | MNTK_SG_MPSAFE;
269 #endif
270 	mp->mnt_kern_flag |= MNTK_NOMSYNC;
271 	mp->mnt_data = (qaddr_t)tmp;
272 	vfs_getnewfsid(mp);
273 
274 
275 	vfs_add_vnodeops(mp, &tmpfs_vnode_vops, &mp->mnt_vn_norm_ops);
276 	vfs_add_vnodeops(mp, &tmpfs_fifo_vops, &mp->mnt_vn_fifo_ops);
277 
278 	copystr("tmpfs", mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
279 	bzero(mp->mnt_stat.f_mntfromname +size, MNAMELEN - size);
280 	bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname));
281 	copyinstr(path, mp->mnt_stat.f_mntonname,
282 		  sizeof(mp->mnt_stat.f_mntonname) -1,
283 		  &size);
284 
285 	tmpfs_statfs(mp, &mp->mnt_stat, cred);
286 
287 	return 0;
288 }
289 
290 /* --------------------------------------------------------------------- */
291 
292 /* ARGSUSED2 */
293 static int
294 tmpfs_unmount(struct mount *mp, int mntflags)
295 {
296 	int error;
297 	int flags = 0;
298 	int found;
299 	struct tmpfs_mount *tmp;
300 	struct tmpfs_node *node;
301 
302 	/* Handle forced unmounts. */
303 	if (mntflags & MNT_FORCE)
304 		flags |= FORCECLOSE;
305 
306 	tmp = VFS_TO_TMPFS(mp);
307 
308 	/*
309 	 * Finalize all pending I/O.  In the case of tmpfs we want
310 	 * to throw all the data away so clean out the buffer cache
311 	 * and vm objects before calling vflush().
312 	 */
313 	LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
314 		if (node->tn_type == VREG && node->tn_vnode) {
315 			++node->tn_links;
316 			TMPFS_NODE_LOCK(node);
317 			vx_get(node->tn_vnode);
318 			tmpfs_truncate(node->tn_vnode, 0);
319 			vx_put(node->tn_vnode);
320 			TMPFS_NODE_UNLOCK(node);
321 			--node->tn_links;
322 		}
323 	}
324 	error = vflush(mp, 0, flags);
325 	if (error != 0)
326 		return error;
327 
328 	/*
329 	 * First pass get rid of all the directory entries and
330 	 * vnode associations.  The directory structure will
331 	 * remain via the extra link count representing tn_dir.tn_parent.
332 	 *
333 	 * No vnodes should remain after the vflush above.
334 	 */
335 	LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
336 		++node->tn_links;
337 		TMPFS_NODE_LOCK(node);
338 		if (node->tn_type == VDIR) {
339 			struct tmpfs_dirent *de;
340 
341 			while (!TAILQ_EMPTY(&node->tn_dir.tn_dirhead)) {
342 				de = TAILQ_FIRST(&node->tn_dir.tn_dirhead);
343 				tmpfs_dir_detach(node, de);
344 				tmpfs_free_dirent(tmp, de);
345 				node->tn_size -= sizeof(struct tmpfs_dirent);
346 			}
347 		}
348 		KKASSERT(node->tn_vnode == NULL);
349 #if 0
350 		vp = node->tn_vnode;
351 		if (vp != NULL) {
352 			tmpfs_free_vp(vp);
353 			vrecycle(vp);
354 			node->tn_vnode = NULL;
355 		}
356 #endif
357 		TMPFS_NODE_UNLOCK(node);
358 		--node->tn_links;
359 	}
360 
361 	/*
362 	 * Now get rid of all nodes.  We can remove any node with a
363 	 * link count of 0 or any directory node with a link count of
364 	 * 1.  The parents will not be destroyed until all their children
365 	 * have been destroyed.
366 	 *
367 	 * Recursion in tmpfs_free_node() can further modify the list so
368 	 * we cannot use a next pointer here.
369 	 *
370 	 * The root node will be destroyed by this loop (it will be last).
371 	 */
372 	while (!LIST_EMPTY(&tmp->tm_nodes_used)) {
373 		found = 0;
374 		LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
375 			if (node->tn_links == 0 ||
376 			    (node->tn_links == 1 && node->tn_type == VDIR)) {
377 				TMPFS_NODE_LOCK(node);
378 				tmpfs_free_node(tmp, node);
379 				/* eats lock */
380 				found = 1;
381 				break;
382 			}
383 		}
384 		if (found == 0) {
385 			kprintf("tmpfs: Cannot free entire node tree!");
386 			break;
387 		}
388 	}
389 
390 	KKASSERT(tmp->tm_root == NULL);
391 
392 	objcache_destroy(tmp->tm_dirent_pool);
393 	objcache_destroy(tmp->tm_node_pool);
394 
395 	kmalloc_destroy(&tmp->tm_dirent_zone);
396 	kmalloc_destroy(&tmp->tm_node_zone);
397 
398 	tmp->tm_node_zone = tmp->tm_dirent_zone = NULL;
399 
400 	lockuninit(&tmp->allnode_lock);
401 	KKASSERT(tmp->tm_pages_used == 0);
402 	KKASSERT(tmp->tm_nodes_inuse == 0);
403 
404 	/* Throw away the tmpfs_mount structure. */
405 	kfree(tmp, M_TMPFSMNT);
406 	mp->mnt_data = NULL;
407 
408 	mp->mnt_flag &= ~MNT_LOCAL;
409 	return 0;
410 }
411 
412 /* --------------------------------------------------------------------- */
413 
414 static int
415 tmpfs_root(struct mount *mp, struct vnode **vpp)
416 {
417 	struct tmpfs_mount *tmp;
418 	int error;
419 
420 	tmp = VFS_TO_TMPFS(mp);
421 	if (tmp->tm_root == NULL) {
422 		kprintf("tmpfs_root: called without root node %p\n", mp);
423 		print_backtrace(-1);
424 		*vpp = NULL;
425 		error = EINVAL;
426 	} else {
427 		error = tmpfs_alloc_vp(mp, tmp->tm_root, LK_EXCLUSIVE, vpp);
428 		(*vpp)->v_flag |= VROOT;
429 		(*vpp)->v_type = VDIR;
430 	}
431 	return error;
432 }
433 
434 /* --------------------------------------------------------------------- */
435 
436 static int
437 tmpfs_fhtovp(struct mount *mp, struct vnode *rootvp, struct fid *fhp, struct vnode **vpp)
438 {
439 	boolean_t found;
440 	struct tmpfs_fid *tfhp;
441 	struct tmpfs_mount *tmp;
442 	struct tmpfs_node *node;
443 
444 	tmp = VFS_TO_TMPFS(mp);
445 
446 	tfhp = (struct tmpfs_fid *)fhp;
447 	if (tfhp->tf_len != sizeof(struct tmpfs_fid))
448 		return EINVAL;
449 
450 	if (tfhp->tf_id >= tmp->tm_nodes_max)
451 		return EINVAL;
452 
453 	found = FALSE;
454 
455 	TMPFS_LOCK(tmp);
456 	LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
457 		if (node->tn_id == tfhp->tf_id &&
458 		    node->tn_gen == tfhp->tf_gen) {
459 			found = TRUE;
460 			break;
461 		}
462 	}
463 	TMPFS_UNLOCK(tmp);
464 
465 	if (found)
466 		return (tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp));
467 
468 	return (EINVAL);
469 }
470 
471 /* --------------------------------------------------------------------- */
472 
473 /* ARGSUSED2 */
474 static int
475 tmpfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
476 {
477 	fsfilcnt_t freenodes;
478 	struct tmpfs_mount *tmp;
479 
480 	tmp = VFS_TO_TMPFS(mp);
481 
482 	sbp->f_iosize = PAGE_SIZE;
483 	sbp->f_bsize = PAGE_SIZE;
484 
485 	sbp->f_blocks = tmp->tm_pages_max;
486 	sbp->f_bavail = tmp->tm_pages_max - tmp->tm_pages_used;
487 	sbp->f_bfree = sbp->f_bavail;
488 
489 	freenodes = tmp->tm_nodes_max - tmp->tm_nodes_inuse;
490 
491 	sbp->f_files = freenodes + tmp->tm_nodes_inuse;
492 	sbp->f_ffree = freenodes;
493 	sbp->f_owner = tmp->tm_root->tn_uid;
494 
495 	return 0;
496 }
497 
498 /* --------------------------------------------------------------------- */
499 
500 /*
501  * tmpfs vfs operations.
502  */
503 
504 static struct vfsops tmpfs_vfsops = {
505 	.vfs_mount =			tmpfs_mount,
506 	.vfs_unmount =			tmpfs_unmount,
507 	.vfs_root =			tmpfs_root,
508 	.vfs_statfs =			tmpfs_statfs,
509 	.vfs_fhtovp =			tmpfs_fhtovp,
510 	.vfs_sync =			vfs_stdsync
511 };
512 
513 VFS_SET(tmpfs_vfsops, tmpfs, 0);
514