xref: /netbsd-src/sys/fs/tmpfs/tmpfs_vfsops.c (revision 267197ec1eebfcb9810ea27a89625b6ddf68e3e7)
1 /*	$NetBSD: tmpfs_vfsops.c,v 1.38 2008/02/06 11:22:12 jmmv Exp $	*/
2 
3 /*
4  * Copyright (c) 2005, 2006, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
9  * 2005 program.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *        This product includes software developed by the NetBSD
22  *        Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Efficient memory file system.
42  *
43  * tmpfs is a file system that uses NetBSD's virtual memory sub-system
44  * (the well-known UVM) to store file data and metadata in an efficient
45  * way.  This means that it does not follow the structure of an on-disk
46  * file system because it simply does not need to.  Instead, it uses
47  * memory-specific data structures and algorithms to automatically
48  * allocate and release resources.
49  */
50 
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: tmpfs_vfsops.c,v 1.38 2008/02/06 11:22:12 jmmv Exp $");
53 
54 #include <sys/param.h>
55 #include <sys/types.h>
56 #include <sys/kmem.h>
57 #include <sys/mount.h>
58 #include <sys/stat.h>
59 #include <sys/systm.h>
60 #include <sys/vnode.h>
61 #include <sys/proc.h>
62 
63 #include <miscfs/genfs/genfs.h>
64 #include <fs/tmpfs/tmpfs.h>
65 
66 /* --------------------------------------------------------------------- */
67 
68 static int	tmpfs_mount(struct mount *, const char *, void *, size_t *);
69 static int	tmpfs_start(struct mount *, int);
70 static int	tmpfs_unmount(struct mount *, int);
71 static int	tmpfs_root(struct mount *, struct vnode **);
72 static int	tmpfs_vget(struct mount *, ino_t, struct vnode **);
73 static int	tmpfs_fhtovp(struct mount *, struct fid *, struct vnode **);
74 static int	tmpfs_vptofh(struct vnode *, struct fid *, size_t *);
75 static int	tmpfs_statvfs(struct mount *, struct statvfs *);
76 static int	tmpfs_sync(struct mount *, int, kauth_cred_t);
77 static void	tmpfs_init(void);
78 static void	tmpfs_done(void);
79 static int	tmpfs_snapshot(struct mount *, struct vnode *,
80 		    struct timespec *);
81 
82 /* --------------------------------------------------------------------- */
83 
84 static int
85 tmpfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
86 {
87 	struct lwp *l = curlwp;
88 	int error;
89 	ino_t nodes;
90 	size_t pages;
91 	struct tmpfs_mount *tmp;
92 	struct tmpfs_node *root;
93 	struct tmpfs_args *args = data;
94 
95 	if (*data_len < sizeof *args)
96 		return EINVAL;
97 
98 	/* Handle retrieval of mount point arguments. */
99 	if (mp->mnt_flag & MNT_GETARGS) {
100 		if (mp->mnt_data == NULL)
101 			return EIO;
102 		tmp = VFS_TO_TMPFS(mp);
103 
104 		args->ta_version = TMPFS_ARGS_VERSION;
105 		args->ta_nodes_max = tmp->tm_nodes_max;
106 		args->ta_size_max = tmp->tm_pages_max * PAGE_SIZE;
107 
108 		root = tmp->tm_root;
109 		args->ta_root_uid = root->tn_uid;
110 		args->ta_root_gid = root->tn_gid;
111 		args->ta_root_mode = root->tn_mode;
112 
113 		*data_len = sizeof *args;
114 		return 0;
115 	}
116 
117 	if (mp->mnt_flag & MNT_UPDATE) {
118 		/* XXX: There is no support yet to update file system
119 		 * settings.  Should be added. */
120 
121 		return EOPNOTSUPP;
122 	}
123 
124 	if (args->ta_version != TMPFS_ARGS_VERSION)
125 		return EINVAL;
126 
127 	/* Do not allow mounts if we do not have enough memory to preserve
128 	 * the minimum reserved pages. */
129 	if (tmpfs_mem_info(true) < TMPFS_PAGES_RESERVED)
130 		return EINVAL;
131 
132 	/* Get the maximum number of memory pages this file system is
133 	 * allowed to use, based on the maximum size the user passed in
134 	 * the mount structure.  A value of zero is treated as if the
135 	 * maximum available space was requested. */
136 	if (args->ta_size_max < PAGE_SIZE || args->ta_size_max >= SIZE_MAX)
137 		pages = SIZE_MAX;
138 	else
139 		pages = args->ta_size_max / PAGE_SIZE +
140 		    (args->ta_size_max % PAGE_SIZE == 0 ? 0 : 1);
141 	if (pages > INT_MAX)
142 		pages = INT_MAX;
143 	KASSERT(pages > 0);
144 
145 	if (args->ta_nodes_max <= 3)
146 		nodes = 3 + pages * PAGE_SIZE / 1024;
147 	else
148 		nodes = args->ta_nodes_max;
149 	if (nodes > INT_MAX)
150 		nodes = INT_MAX;
151 	KASSERT(nodes >= 3);
152 
153 	/* Allocate the tmpfs mount structure and fill it. */
154 	tmp = kmem_alloc(sizeof(struct tmpfs_mount), KM_SLEEP);
155 	if (tmp == NULL)
156 		return ENOMEM;
157 
158 	tmp->tm_nodes_max = nodes;
159 	tmp->tm_nodes_cnt = 0;
160 	LIST_INIT(&tmp->tm_nodes);
161 
162 	mutex_init(&tmp->tm_lock, MUTEX_DEFAULT, IPL_NONE);
163 
164 	tmp->tm_pages_max = pages;
165 	tmp->tm_pages_used = 0;
166 	tmpfs_pool_init(&tmp->tm_dirent_pool, sizeof(struct tmpfs_dirent),
167 	    "dirent", tmp);
168 	tmpfs_pool_init(&tmp->tm_node_pool, sizeof(struct tmpfs_node),
169 	    "node", tmp);
170 	tmpfs_str_pool_init(&tmp->tm_str_pool, tmp);
171 
172 	/* Allocate the root node. */
173 	error = tmpfs_alloc_node(tmp, VDIR, args->ta_root_uid,
174 	    args->ta_root_gid, args->ta_root_mode & ALLPERMS, NULL, NULL,
175 	    VNOVAL, &root);
176 	KASSERT(error == 0 && root != NULL);
177 	root->tn_links++;
178 	tmp->tm_root = root;
179 
180 	mp->mnt_data = tmp;
181 	mp->mnt_flag |= MNT_LOCAL;
182 	mp->mnt_stat.f_namemax = MAXNAMLEN;
183 	mp->mnt_fs_bshift = PAGE_SHIFT;
184 	mp->mnt_dev_bshift = DEV_BSHIFT;
185 	mp->mnt_iflag |= IMNT_MPSAFE;
186 	vfs_getnewfsid(mp);
187 
188 	return set_statvfs_info(path, UIO_USERSPACE, "tmpfs", UIO_SYSSPACE,
189 	    mp->mnt_op->vfs_name, mp, l);
190 }
191 
192 /* --------------------------------------------------------------------- */
193 
194 static int
195 tmpfs_start(struct mount *mp, int flags)
196 {
197 
198 	return 0;
199 }
200 
201 /* --------------------------------------------------------------------- */
202 
203 /* ARGSUSED2 */
204 static int
205 tmpfs_unmount(struct mount *mp, int mntflags)
206 {
207 	int error;
208 	int flags = 0;
209 	struct tmpfs_mount *tmp;
210 	struct tmpfs_node *node;
211 
212 	/* Handle forced unmounts. */
213 	if (mntflags & MNT_FORCE)
214 		flags |= FORCECLOSE;
215 
216 	/* Finalize all pending I/O. */
217 	error = vflush(mp, NULL, flags);
218 	if (error != 0)
219 		return error;
220 
221 	tmp = VFS_TO_TMPFS(mp);
222 
223 	/* Free all associated data.  The loop iterates over the linked list
224 	 * we have containing all used nodes.  For each of them that is
225 	 * a directory, we free all its directory entries.  Note that after
226 	 * freeing a node, it will automatically go to the available list,
227 	 * so we will later have to iterate over it to release its items. */
228 	node = LIST_FIRST(&tmp->tm_nodes);
229 	while (node != NULL) {
230 		struct tmpfs_node *next;
231 
232 		if (node->tn_type == VDIR) {
233 			struct tmpfs_dirent *de;
234 
235 			de = TAILQ_FIRST(&node->tn_spec.tn_dir.tn_dir);
236 			while (de != NULL) {
237 				struct tmpfs_dirent *nde;
238 
239 				nde = TAILQ_NEXT(de, td_entries);
240 				tmpfs_free_dirent(tmp, de, false);
241 				de = nde;
242 				node->tn_size -= sizeof(struct tmpfs_dirent);
243 			}
244 		}
245 
246 		next = LIST_NEXT(node, tn_entries);
247 		tmpfs_free_node(tmp, node);
248 		node = next;
249 	}
250 
251 	tmpfs_pool_destroy(&tmp->tm_dirent_pool);
252 	tmpfs_pool_destroy(&tmp->tm_node_pool);
253 	tmpfs_str_pool_destroy(&tmp->tm_str_pool);
254 
255 	KASSERT(tmp->tm_pages_used == 0);
256 
257 	/* Throw away the tmpfs_mount structure. */
258 	mutex_destroy(&tmp->tm_lock);
259 	kmem_free(tmp, sizeof(*tmp));
260 	mp->mnt_data = NULL;
261 
262 	return 0;
263 }
264 
265 /* --------------------------------------------------------------------- */
266 
267 static int
268 tmpfs_root(struct mount *mp, struct vnode **vpp)
269 {
270 
271 	return tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, vpp);
272 }
273 
274 /* --------------------------------------------------------------------- */
275 
276 static int
277 tmpfs_vget(struct mount *mp, ino_t ino,
278     struct vnode **vpp)
279 {
280 
281 	printf("tmpfs_vget called; need for it unknown yet\n");
282 	return EOPNOTSUPP;
283 }
284 
285 /* --------------------------------------------------------------------- */
286 
287 static int
288 tmpfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
289 {
290 	bool found;
291 	struct tmpfs_fid tfh;
292 	struct tmpfs_mount *tmp;
293 	struct tmpfs_node *node;
294 
295 	tmp = VFS_TO_TMPFS(mp);
296 
297 	if (fhp->fid_len != sizeof(struct tmpfs_fid))
298 		return EINVAL;
299 
300 	memcpy(&tfh, fhp, sizeof(struct tmpfs_fid));
301 
302 	if (tfh.tf_id >= tmp->tm_nodes_max)
303 		return EINVAL;
304 
305 	found = false;
306 	mutex_enter(&tmp->tm_lock);
307 	LIST_FOREACH(node, &tmp->tm_nodes, tn_entries) {
308 		if (node->tn_id == tfh.tf_id &&
309 		    node->tn_gen == tfh.tf_gen) {
310 			found = true;
311 			break;
312 		}
313 	}
314 	mutex_exit(&tmp->tm_lock);
315 
316 	/* XXXAD nothing to prevent 'node' from being removed. */
317 	return found ? tmpfs_alloc_vp(mp, node, vpp) : EINVAL;
318 }
319 
320 /* --------------------------------------------------------------------- */
321 
322 static int
323 tmpfs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
324 {
325 	struct tmpfs_fid tfh;
326 	struct tmpfs_node *node;
327 
328 	if (*fh_size < sizeof(struct tmpfs_fid)) {
329 		*fh_size = sizeof(struct tmpfs_fid);
330 		return E2BIG;
331 	}
332 	*fh_size = sizeof(struct tmpfs_fid);
333 	node = VP_TO_TMPFS_NODE(vp);
334 
335 	memset(&tfh, 0, sizeof(tfh));
336 	tfh.tf_len = sizeof(struct tmpfs_fid);
337 	tfh.tf_gen = node->tn_gen;
338 	tfh.tf_id = node->tn_id;
339 	memcpy(fhp, &tfh, sizeof(tfh));
340 
341 	return 0;
342 }
343 
344 /* --------------------------------------------------------------------- */
345 
346 /* ARGSUSED2 */
347 static int
348 tmpfs_statvfs(struct mount *mp, struct statvfs *sbp)
349 {
350 	fsfilcnt_t freenodes;
351 	struct tmpfs_mount *tmp;
352 
353 	tmp = VFS_TO_TMPFS(mp);
354 
355 	sbp->f_iosize = sbp->f_frsize = sbp->f_bsize = PAGE_SIZE;
356 
357 	sbp->f_blocks = TMPFS_PAGES_MAX(tmp);
358 	sbp->f_bavail = sbp->f_bfree = TMPFS_PAGES_AVAIL(tmp);
359 	sbp->f_bresvd = 0;
360 
361 	freenodes = MIN(tmp->tm_nodes_max - tmp->tm_nodes_cnt,
362 	    TMPFS_PAGES_AVAIL(tmp) * PAGE_SIZE / sizeof(struct tmpfs_node));
363 
364 	sbp->f_files = tmp->tm_nodes_cnt + freenodes;
365 	sbp->f_favail = sbp->f_ffree = freenodes;
366 	sbp->f_fresvd = 0;
367 
368 	copy_statvfs_info(sbp, mp);
369 
370 	return 0;
371 }
372 
373 /* --------------------------------------------------------------------- */
374 
375 /* ARGSUSED0 */
376 static int
377 tmpfs_sync(struct mount *mp, int waitfor,
378     kauth_cred_t uc)
379 {
380 
381 	return 0;
382 }
383 
384 /* --------------------------------------------------------------------- */
385 
386 static void
387 tmpfs_init(void)
388 {
389 
390 }
391 
392 /* --------------------------------------------------------------------- */
393 
394 static void
395 tmpfs_done(void)
396 {
397 
398 }
399 
400 /* --------------------------------------------------------------------- */
401 
402 static int
403 tmpfs_snapshot(struct mount *mp, struct vnode *vp,
404     struct timespec *ctime)
405 {
406 
407 	return EOPNOTSUPP;
408 }
409 
410 /* --------------------------------------------------------------------- */
411 
412 /*
413  * tmpfs vfs operations.
414  */
415 
416 extern const struct vnodeopv_desc tmpfs_fifoop_opv_desc;
417 extern const struct vnodeopv_desc tmpfs_specop_opv_desc;
418 extern const struct vnodeopv_desc tmpfs_vnodeop_opv_desc;
419 
420 const struct vnodeopv_desc * const tmpfs_vnodeopv_descs[] = {
421 	&tmpfs_fifoop_opv_desc,
422 	&tmpfs_specop_opv_desc,
423 	&tmpfs_vnodeop_opv_desc,
424 	NULL,
425 };
426 
427 struct vfsops tmpfs_vfsops = {
428 	MOUNT_TMPFS,			/* vfs_name */
429 	sizeof (struct tmpfs_args),
430 	tmpfs_mount,			/* vfs_mount */
431 	tmpfs_start,			/* vfs_start */
432 	tmpfs_unmount,			/* vfs_unmount */
433 	tmpfs_root,			/* vfs_root */
434 	(void *)eopnotsupp,		/* vfs_quotactl */
435 	tmpfs_statvfs,			/* vfs_statvfs */
436 	tmpfs_sync,			/* vfs_sync */
437 	tmpfs_vget,			/* vfs_vget */
438 	tmpfs_fhtovp,			/* vfs_fhtovp */
439 	tmpfs_vptofh,			/* vfs_vptofh */
440 	tmpfs_init,			/* vfs_init */
441 	NULL,				/* vfs_reinit */
442 	tmpfs_done,			/* vfs_done */
443 	NULL,				/* vfs_mountroot */
444 	tmpfs_snapshot,			/* vfs_snapshot */
445 	vfs_stdextattrctl,		/* vfs_extattrctl */
446 	(void *)eopnotsupp,		/* vfs_suspendctl */
447 	genfs_renamelock_enter,
448 	genfs_renamelock_exit,
449 	tmpfs_vnodeopv_descs,
450 	0,				/* vfs_refcount */
451 	{ NULL, NULL },
452 };
453 VFS_ATTACH(tmpfs_vfsops);
454