xref: /netbsd-src/sys/rump/librump/rumpvfs/rump_vfs.c (revision 4391d5e9d4f291db41e3b3ba26a01b5e51364aae)
1 /*	$NetBSD: rump_vfs.c,v 1.77 2013/06/10 19:48:22 pooka Exp $	*/
2 
3 /*
4  * Copyright (c) 2008 Antti Kantee.  All Rights Reserved.
5  *
6  * Development of this software was supported by the
7  * Finnish Cultural Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: rump_vfs.c,v 1.77 2013/06/10 19:48:22 pooka Exp $");
33 
34 #include <sys/param.h>
35 #include <sys/buf.h>
36 #include <sys/conf.h>
37 #include <sys/evcnt.h>
38 #include <sys/filedesc.h>
39 #include <sys/fstrans.h>
40 #include <sys/lockf.h>
41 #include <sys/kthread.h>
42 #include <sys/module.h>
43 #include <sys/namei.h>
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 #include <sys/vfs_syscalls.h>
47 #include <sys/vnode.h>
48 #include <sys/wapbl.h>
49 
50 #include <miscfs/specfs/specdev.h>
51 #include <miscfs/syncfs/syncfs.h>
52 
53 #include <rump/rump.h>
54 #include <rump/rumpuser.h>
55 
56 #include "rump_private.h"
57 #include "rump_vfs_private.h"
58 
59 extern struct cwdinfo cwdi0;
60 const char *rootfstype = ROOT_FSTYPE_ANY;
61 
62 static void
63 pvfs_init(struct proc *p)
64 {
65 
66 	p->p_cwdi = cwdinit();
67 }
68 
69 static void
70 pvfs_rele(struct proc *p)
71 {
72 
73 	cwdfree(p->p_cwdi);
74 }
75 
76 static void
77 fini(void)
78 {
79 
80 	vfs_shutdown();
81 }
82 
83 static void
84 drainbufs(int npages)
85 {
86 
87 	mutex_enter(&bufcache_lock);
88 	buf_drain(npages);
89 	mutex_exit(&bufcache_lock);
90 }
91 
92 RUMP_COMPONENT(RUMP__FACTION_VFS)
93 {
94 	extern struct vfsops rumpfs_vfsops;
95 	char buf[64];
96 	int rv, i;
97 
98 	/* initialize indirect interfaces */
99 	rump_vfs_fini = fini;
100 	rump_vfs_drainbufs = drainbufs;
101 
102 	if (rumpuser_getparam("RUMP_NVNODES", buf, sizeof(buf)) == 0) {
103 		desiredvnodes = strtoul(buf, NULL, 10);
104 	} else {
105 		desiredvnodes = 1<<10;
106 	}
107 
108 	rumpblk_init();
109 
110 	for (i = 0; i < ncpu; i++) {
111 		struct cpu_info *ci = cpu_lookup(i);
112 		cache_cpu_init(ci);
113 	}
114 
115 	/* make number of bufpages 5% of total memory limit */
116 	if (rump_physmemlimit != RUMPMEM_UNLIMITED) {
117 		extern u_int bufpages;
118 		bufpages = rump_physmemlimit / (20 * PAGE_SIZE);
119 	}
120 
121 	vfsinit();
122 	bufinit();
123 	cwd_sys_init();
124 	lf_init();
125 	spec_init();
126 	fstrans_init();
127 
128 	root_device = &rump_rootdev;
129 
130 	/* bootstrap cwdi (rest done in vfs_mountroot() */
131 	proc0.p_cwdi = &cwdi0;
132 	proc0.p_cwdi = cwdinit();
133 
134 	vfs_attach(&rumpfs_vfsops);
135 	vfs_mountroot();
136 
137 	/* "mtree": create /dev */
138 	do_sys_mkdir("/dev", 0755, UIO_SYSSPACE);
139 
140 	rump_proc_vfs_init = pvfs_init;
141 	rump_proc_vfs_release = pvfs_rele;
142 
143 	if (rump_threads) {
144 		if ((rv = kthread_create(PRI_IOFLUSH, KTHREAD_MPSAFE, NULL,
145 		    sched_sync, NULL, NULL, "ioflush")) != 0)
146 			panic("syncer thread create failed: %d", rv);
147 	} else {
148 		syncdelay = 0;
149 	}
150 
151 	/*
152 	 * On archs where the native kernel ABI is supported, map
153 	 * host module directory to rump.  This means that kernel
154 	 * modules from the host will be autoloaded to rump kernels.
155 	 */
156 #ifdef _RUMP_NATIVE_ABI
157 	{
158 	char *mbase;
159 
160 	if (rumpuser_getparam("RUMP_MODULEBASE", buf, sizeof(buf)) == 0)
161 		mbase = buf;
162 	else
163 		mbase = module_base;
164 
165 	if (strlen(mbase) != 0 && *mbase != '0') {
166 		rump_etfs_register(module_base, mbase, RUMP_ETFS_DIR_SUBDIRS);
167 	}
168 	}
169 #endif
170 
171 	module_init_class(MODULE_CLASS_VFS);
172 
173 	/*
174 	 * Don't build device names for a large set of devices by
175 	 * default.  While the pseudo-devfs is a fun experiment,
176 	 * creating many many device nodes may increase rump kernel
177 	 * bootstrap time by ~40%.  Device nodes should be created
178 	 * per-demand in the component constructors.
179 	 */
180 #if 0
181 	{
182 	extern struct devsw_conv devsw_conv0[];
183 	extern int max_devsw_convs;
184 	rump_vfs_builddevs(devsw_conv0, max_devsw_convs);
185 	}
186 #else
187 	rump_vfs_builddevs(NULL, 0);
188 #endif
189 
190 	/* attach null device and create /dev/{null,zero} */
191 	rump_devnull_init();
192 
193 	rump_component_init(RUMP_COMPONENT_VFS);
194 }
195 
196 struct rumpcn {
197 	struct componentname rcn_cn;
198 	char *rcn_path;
199 };
200 
201 struct componentname *
202 rump_makecn(u_long nameiop, u_long flags, const char *name, size_t namelen,
203 	kauth_cred_t creds, struct lwp *l)
204 {
205 	struct rumpcn *rcn;
206 	struct componentname *cnp;
207 
208 	rcn = kmem_zalloc(sizeof(*rcn), KM_SLEEP);
209 	cnp = &rcn->rcn_cn;
210 
211 	rcn->rcn_path = PNBUF_GET();
212 	strlcpy(rcn->rcn_path, name, MAXPATHLEN);
213 	cnp->cn_nameptr = rcn->rcn_path;
214 
215 	cnp->cn_nameiop = nameiop;
216 	cnp->cn_flags = flags & (MODMASK | PARAMASK);
217 
218 	cnp->cn_namelen = namelen;
219 
220 	cnp->cn_cred = creds;
221 
222 	return cnp;
223 }
224 
225 void
226 rump_freecn(struct componentname *cnp, int flags)
227 {
228 	struct rumpcn *rcn = (void *)cnp;
229 
230 	if (flags & RUMPCN_FREECRED)
231 		rump_cred_put(cnp->cn_cred);
232 
233 	PNBUF_PUT(rcn->rcn_path);
234 	kmem_free(rcn, sizeof(*rcn));
235 }
236 
237 /* hey baby, what's your namei? */
238 int
239 rump_namei(uint32_t op, uint32_t flags, const char *namep,
240 	struct vnode **dvpp, struct vnode **vpp, struct componentname **cnpp)
241 {
242 	struct pathbuf *pb;
243 	struct nameidata nd;
244 	int rv;
245 
246 	pb = pathbuf_create(namep);
247 	if (pb == NULL) {
248 		return ENOMEM;
249 	}
250 	NDINIT(&nd, op, flags, pb);
251 	rv = namei(&nd);
252 	if (rv) {
253 		pathbuf_destroy(pb);
254 		return rv;
255 	}
256 
257 	if (dvpp) {
258 		KASSERT(flags & LOCKPARENT);
259 		*dvpp = nd.ni_dvp;
260 	} else {
261 		KASSERT((flags & LOCKPARENT) == 0);
262 	}
263 
264 	if (vpp) {
265 		*vpp = nd.ni_vp;
266 	} else {
267 		if (nd.ni_vp) {
268 			if (flags & LOCKLEAF)
269 				vput(nd.ni_vp);
270 			else
271 				vrele(nd.ni_vp);
272 		}
273 	}
274 
275 	if (cnpp) {
276 		struct componentname *cnp;
277 
278 		cnp = kmem_alloc(sizeof(*cnp), KM_SLEEP);
279 		memcpy(cnp, &nd.ni_cnd, sizeof(*cnp));
280 		*cnpp = cnp;
281 	}
282 	pathbuf_destroy(pb);
283 
284 	return rv;
285 }
286 
287 void
288 rump_getvninfo(struct vnode *vp, enum rump_vtype *vtype,
289 	voff_t *vsize, dev_t *vdev)
290 {
291 
292 	*vtype = (enum rump_vtype)vp->v_type;
293 	*vsize = vp->v_size;
294 	if (vp->v_specnode)
295 		*vdev = vp->v_rdev;
296 	else
297 		*vdev = 0;
298 }
299 
300 struct vfsops *
301 rump_vfslist_iterate(struct vfsops *ops)
302 {
303 
304 	if (ops == NULL)
305 		return LIST_FIRST(&vfs_list);
306 	else
307 		return LIST_NEXT(ops, vfs_list);
308 }
309 
310 struct vfsops *
311 rump_vfs_getopsbyname(const char *name)
312 {
313 
314 	return vfs_getopsbyname(name);
315 }
316 
317 int
318 rump_vfs_getmp(const char *path, struct mount **mpp)
319 {
320 	struct vnode *vp;
321 	int rv;
322 
323 	if ((rv = namei_simple_user(path, NSM_FOLLOW_TRYEMULROOT, &vp)) != 0)
324 		return rv;
325 
326 	*mpp = vp->v_mount;
327 	vrele(vp);
328 	return 0;
329 }
330 
331 struct vattr*
332 rump_vattr_init(void)
333 {
334 	struct vattr *vap;
335 
336 	vap = kmem_alloc(sizeof(struct vattr), KM_SLEEP);
337 	vattr_null(vap);
338 
339 	return vap;
340 }
341 
342 void
343 rump_vattr_settype(struct vattr *vap, enum rump_vtype vt)
344 {
345 
346 	vap->va_type = (enum vtype)vt;
347 }
348 
349 void
350 rump_vattr_setmode(struct vattr *vap, mode_t mode)
351 {
352 
353 	vap->va_mode = mode;
354 }
355 
356 void
357 rump_vattr_setrdev(struct vattr *vap, dev_t dev)
358 {
359 
360 	vap->va_rdev = dev;
361 }
362 
363 void
364 rump_vattr_free(struct vattr *vap)
365 {
366 
367 	kmem_free(vap, sizeof(*vap));
368 }
369 
370 void
371 rump_vp_incref(struct vnode *vp)
372 {
373 
374 	vref(vp);
375 }
376 
377 int
378 rump_vp_getref(struct vnode *vp)
379 {
380 
381 	return vp->v_usecount;
382 }
383 
384 void
385 rump_vp_rele(struct vnode *vp)
386 {
387 
388 	vrele(vp);
389 }
390 
391 void
392 rump_vp_interlock(struct vnode *vp)
393 {
394 
395 	mutex_enter(vp->v_interlock);
396 }
397 
398 int
399 rump_vfs_unmount(struct mount *mp, int mntflags)
400 {
401 
402 	return VFS_UNMOUNT(mp, mntflags);
403 }
404 
405 int
406 rump_vfs_root(struct mount *mp, struct vnode **vpp, int lock)
407 {
408 	int rv;
409 
410 	rv = VFS_ROOT(mp, vpp);
411 	if (rv)
412 		return rv;
413 
414 	if (!lock)
415 		VOP_UNLOCK(*vpp);
416 
417 	return 0;
418 }
419 
420 int
421 rump_vfs_statvfs(struct mount *mp, struct statvfs *sbp)
422 {
423 
424 	return VFS_STATVFS(mp, sbp);
425 }
426 
427 int
428 rump_vfs_sync(struct mount *mp, int wait, kauth_cred_t cred)
429 {
430 
431 	return VFS_SYNC(mp, wait ? MNT_WAIT : MNT_NOWAIT, cred);
432 }
433 
434 int
435 rump_vfs_fhtovp(struct mount *mp, struct fid *fid, struct vnode **vpp)
436 {
437 
438 	return VFS_FHTOVP(mp, fid, vpp);
439 }
440 
441 int
442 rump_vfs_vptofh(struct vnode *vp, struct fid *fid, size_t *fidsize)
443 {
444 
445 	return VFS_VPTOFH(vp, fid, fidsize);
446 }
447 
448 int
449 rump_vfs_extattrctl(struct mount *mp, int cmd, struct vnode *vp,
450 	int attrnamespace, const char *attrname)
451 {
452 
453 	return VFS_EXTATTRCTL(mp, cmd, vp, attrnamespace, attrname);
454 }
455 
456 /*ARGSUSED*/
457 void
458 rump_vfs_syncwait(struct mount *mp)
459 {
460 	int n;
461 
462 	n = buf_syncwait();
463 	if (n)
464 		printf("syncwait: unsynced buffers: %d\n", n);
465 }
466 
467 /*
468  * Dump info about mount point.  No locking.
469  */
470 void
471 rump_vfs_mount_print(const char *path, int full)
472 {
473 #ifdef DEBUGPRINT
474 	struct vnode *mvp;
475 	struct vnode *vp;
476 	int error;
477 
478 	rumpuser_dprintf("\n==== dumping mountpoint at ``%s'' ====\n\n", path);
479 	if ((error = namei_simple_user(path, NSM_FOLLOW_NOEMULROOT, &mvp))!=0) {
480 		rumpuser_dprintf("==== lookup error %d ====\n\n", error);
481 		return;
482 	}
483 	vfs_mount_print(mvp->v_mount, full, (void *)rumpuser_dprintf);
484 	if (full) {
485 		rumpuser_dprintf("\n== dumping vnodes ==\n\n");
486 		TAILQ_FOREACH(vp, &mvp->v_mount->mnt_vnodelist, v_mntvnodes) {
487 			vfs_vnode_print(vp, full, (void *)rumpuser_dprintf);
488 		}
489 	}
490 	vrele(mvp);
491 	rumpuser_dprintf("\n==== done ====\n\n");
492 #else
493 	rumpuser_dprintf("mount dump not supported without DEBUGPRINT\n");
494 #endif
495 }
496 
497 void
498 rump_biodone(void *arg, size_t count, int error)
499 {
500 	struct buf *bp = arg;
501 
502 	bp->b_resid = bp->b_bcount - count;
503 	KASSERT(bp->b_resid >= 0);
504 	bp->b_error = error;
505 
506 	biodone(bp);
507 }
508