1 /* $NetBSD: rump_vfs.c,v 1.97 2023/09/23 18:21:12 ad Exp $ */
2
3 /*
4 * Copyright (c) 2008 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Finnish Cultural Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: rump_vfs.c,v 1.97 2023/09/23 18:21:12 ad Exp $");
33
34 #include <sys/param.h>
35 #include <sys/buf.h>
36 #include <sys/conf.h>
37 #include <sys/evcnt.h>
38 #include <sys/fcntl.h>
39 #include <sys/filedesc.h>
40 #include <sys/fstrans.h>
41 #include <sys/lockf.h>
42 #include <sys/kthread.h>
43 #include <sys/module.h>
44 #include <sys/namei.h>
45 #include <sys/queue.h>
46 #include <sys/stat.h>
47 #include <sys/vfs_syscalls.h>
48 #include <sys/vnode.h>
49 #include <sys/wapbl.h>
50 #include <sys/bufq.h>
51
52 #include <miscfs/specfs/specdev.h>
53
54 #include <rump-sys/kern.h>
55 #include <rump-sys/vfs.h>
56
57 #include <rump/rump.h>
58 #include <rump/rumpuser.h>
59
60 const char *rootfstype = ROOT_FSTYPE_ANY;
61
62 static void
pvfs_init(struct proc * p)63 pvfs_init(struct proc *p)
64 {
65
66 p->p_cwdi = cwdinit();
67 }
68
69 static void
pvfs_rele(struct proc * p)70 pvfs_rele(struct proc *p)
71 {
72
73 cwdfree(p->p_cwdi);
74 }
75
76 static void
fini(void)77 fini(void)
78 {
79
80 vfs_shutdown();
81 rumpblk_fini();
82 }
83
84 static void
drainbufs(int npages)85 drainbufs(int npages)
86 {
87
88 mutex_enter(&bufcache_lock);
89 buf_drain(npages);
90 mutex_exit(&bufcache_lock);
91 }
92
RUMP_COMPONENT(RUMP__FACTION_VFS)93 RUMP_COMPONENT(RUMP__FACTION_VFS)
94 {
95 extern struct vfsops rumpfs_vfsops;
96 char buf[64];
97 char *mbase;
98 int rv, i;
99
100 /* initialize indirect interfaces */
101 rump_vfs_fini = fini;
102 rump_vfs_drainbufs = drainbufs;
103
104 if (rumpuser_getparam("RUMP_NVNODES", buf, sizeof(buf)) == 0) {
105 desiredvnodes = strtoul(buf, NULL, 10);
106 } else {
107 desiredvnodes = 1<<10;
108 }
109
110 rumpblk_init();
111
112 for (i = 0; i < ncpu; i++) {
113 struct cpu_info *ci = cpu_lookup(i);
114 cache_cpu_init(ci);
115 }
116
117 /* make number of bufpages 5% of total memory limit */
118 if (rump_physmemlimit != RUMPMEM_UNLIMITED) {
119 extern u_int bufpages;
120 bufpages = rump_physmemlimit / (20 * PAGE_SIZE);
121 }
122
123 bufq_init();
124 fstrans_init();
125 vfsinit();
126 bufinit();
127 lf_init();
128 spec_init();
129
130 root_device = &rump_rootdev;
131
132 /* bootstrap cwdi (rest done in vfs_mountroot() */
133 proc0.p_cwdi = &cwdi0;
134 proc0.p_cwdi = cwdinit();
135
136 vfs_attach(&rumpfs_vfsops);
137 vfs_mountroot();
138
139 /* "mtree": create /dev and /tmp */
140 do_sys_mkdir("/dev", 0755, UIO_SYSSPACE);
141 do_sys_mkdir("/tmp", 01777, UIO_SYSSPACE);
142 do_sys_chmodat(curlwp, AT_FDCWD, "/tmp", 01777, 0);
143
144 rump_proc_vfs_init = pvfs_init;
145 rump_proc_vfs_release = pvfs_rele;
146
147 if (rump_threads) {
148 if ((rv = kthread_create(PRI_IOFLUSH, KTHREAD_MPSAFE, NULL,
149 sched_sync, NULL, NULL, "ioflush")) != 0)
150 panic("syncer thread create failed: %d", rv);
151 } else {
152 syncdelay = 0;
153 }
154
155 /*
156 * On archs where the native kernel ABI is supported, map
157 * host module directory to rump. This means that kernel
158 * modules from the host will be autoloaded to rump kernels.
159 */
160 if (rump_nativeabi_p()) {
161 if (rumpuser_getparam("RUMP_MODULEBASE", buf, sizeof(buf)) == 0)
162 mbase = buf;
163 else
164 mbase = module_base;
165
166 if (strlen(mbase) != 0 && *mbase != '0') {
167 rump_etfs_register(module_base, mbase,
168 RUMP_ETFS_DIR_SUBDIRS);
169 }
170 }
171
172 module_init_class(MODULE_CLASS_BUFQ);
173 module_init_class(MODULE_CLASS_VFS);
174
175 /*
176 * Don't build device names for a large set of devices by
177 * default. While the pseudo-devfs is a fun experiment,
178 * creating many many device nodes may increase rump kernel
179 * bootstrap time by ~40%. Device nodes should be created
180 * per-demand in the component constructors.
181 */
182 #if 0
183 {
184 extern struct devsw_conv devsw_conv0[];
185 extern int max_devsw_convs;
186 rump_vfs_builddevs(devsw_conv0, max_devsw_convs);
187 }
188 #else
189 rump_vfs_builddevs(NULL, 0);
190 #endif
191
192 /* attach null device and create /dev/{null,zero} */
193 rump_devnull_init();
194
195 rump_component_init(RUMP_COMPONENT_VFS);
196 }
197
198 struct rumpcn {
199 struct componentname rcn_cn;
200 char *rcn_path;
201 };
202
203 struct componentname *
rump_makecn(u_long nameiop,u_long flags,const char * name,size_t namelen,kauth_cred_t creds,struct lwp * l)204 rump_makecn(u_long nameiop, u_long flags, const char *name, size_t namelen,
205 kauth_cred_t creds, struct lwp *l)
206 {
207 struct rumpcn *rcn;
208 struct componentname *cnp;
209
210 rcn = kmem_zalloc(sizeof(*rcn), KM_SLEEP);
211 cnp = &rcn->rcn_cn;
212
213 rcn->rcn_path = PNBUF_GET();
214 strlcpy(rcn->rcn_path, name, MAXPATHLEN);
215 cnp->cn_nameptr = rcn->rcn_path;
216
217 cnp->cn_nameiop = nameiop;
218 cnp->cn_flags = flags & (MODMASK | PARAMASK);
219
220 cnp->cn_namelen = namelen;
221
222 cnp->cn_cred = creds;
223
224 return cnp;
225 }
226
227 void
rump_freecn(struct componentname * cnp,int flags)228 rump_freecn(struct componentname *cnp, int flags)
229 {
230 struct rumpcn *rcn = (void *)cnp;
231
232 if (flags & RUMPCN_FREECRED)
233 rump_cred_put(cnp->cn_cred);
234
235 PNBUF_PUT(rcn->rcn_path);
236 kmem_free(rcn, sizeof(*rcn));
237 }
238
239 /* hey baby, what's your namei? */
240 int
rump_namei(uint32_t op,uint32_t flags,const char * namep,struct vnode ** dvpp,struct vnode ** vpp,struct componentname ** cnpp)241 rump_namei(uint32_t op, uint32_t flags, const char *namep,
242 struct vnode **dvpp, struct vnode **vpp, struct componentname **cnpp)
243 {
244 struct pathbuf *pb;
245 struct nameidata nd;
246 int rv;
247
248 pb = pathbuf_create(namep);
249 if (pb == NULL) {
250 return ENOMEM;
251 }
252 NDINIT(&nd, op, flags, pb);
253 rv = namei(&nd);
254 if (rv) {
255 pathbuf_destroy(pb);
256 return rv;
257 }
258
259 if (dvpp) {
260 KASSERT(flags & LOCKPARENT);
261 *dvpp = nd.ni_dvp;
262 } else {
263 KASSERT((flags & LOCKPARENT) == 0);
264 }
265
266 if (vpp) {
267 *vpp = nd.ni_vp;
268 } else {
269 if (nd.ni_vp) {
270 if (flags & LOCKLEAF)
271 vput(nd.ni_vp);
272 else
273 vrele(nd.ni_vp);
274 }
275 }
276
277 if (cnpp) {
278 struct componentname *cnp;
279
280 cnp = kmem_alloc(sizeof(*cnp), KM_SLEEP);
281 memcpy(cnp, &nd.ni_cnd, sizeof(*cnp));
282 *cnpp = cnp;
283 }
284 pathbuf_destroy(pb);
285
286 return rv;
287 }
288
289 void
rump_getvninfo(struct vnode * vp,enum rump_vtype * vtype,voff_t * vsize,dev_t * vdev)290 rump_getvninfo(struct vnode *vp, enum rump_vtype *vtype,
291 voff_t *vsize, dev_t *vdev)
292 {
293
294 *vtype = (enum rump_vtype)vp->v_type;
295 *vsize = vp->v_size;
296 if (vp->v_specnode)
297 *vdev = vp->v_rdev;
298 else
299 *vdev = 0;
300 }
301
302 struct vfsops *
rump_vfslist_iterate(struct vfsops * ops)303 rump_vfslist_iterate(struct vfsops *ops)
304 {
305
306 if (ops == NULL)
307 return LIST_FIRST(&vfs_list);
308 else
309 return LIST_NEXT(ops, vfs_list);
310 }
311
312 struct vfsops *
rump_vfs_getopsbyname(const char * name)313 rump_vfs_getopsbyname(const char *name)
314 {
315
316 return vfs_getopsbyname(name);
317 }
318
319 int
rump_vfs_getmp(const char * path,struct mount ** mpp)320 rump_vfs_getmp(const char *path, struct mount **mpp)
321 {
322 struct vnode *vp;
323 int rv;
324
325 if ((rv = namei_simple_user(path, NSM_FOLLOW_TRYEMULROOT, &vp)) != 0)
326 return rv;
327
328 *mpp = vp->v_mount;
329 vrele(vp);
330 return 0;
331 }
332
333 struct vattr*
rump_vattr_init(void)334 rump_vattr_init(void)
335 {
336 struct vattr *vap;
337
338 vap = kmem_alloc(sizeof(struct vattr), KM_SLEEP);
339 vattr_null(vap);
340
341 return vap;
342 }
343
344 void
rump_vattr_settype(struct vattr * vap,enum rump_vtype vt)345 rump_vattr_settype(struct vattr *vap, enum rump_vtype vt)
346 {
347
348 vap->va_type = (enum vtype)vt;
349 }
350
351 void
rump_vattr_setmode(struct vattr * vap,mode_t mode)352 rump_vattr_setmode(struct vattr *vap, mode_t mode)
353 {
354
355 vap->va_mode = mode;
356 }
357
358 void
rump_vattr_setrdev(struct vattr * vap,dev_t dev)359 rump_vattr_setrdev(struct vattr *vap, dev_t dev)
360 {
361
362 vap->va_rdev = dev;
363 }
364
365 void
rump_vattr_free(struct vattr * vap)366 rump_vattr_free(struct vattr *vap)
367 {
368
369 kmem_free(vap, sizeof(*vap));
370 }
371
372 void
rump_vp_incref(struct vnode * vp)373 rump_vp_incref(struct vnode *vp)
374 {
375
376 vref(vp);
377 }
378
379 int
rump_vp_getref(struct vnode * vp)380 rump_vp_getref(struct vnode *vp)
381 {
382
383 return vrefcnt(vp);
384 }
385
386 void
rump_vp_rele(struct vnode * vp)387 rump_vp_rele(struct vnode *vp)
388 {
389
390 vrele(vp);
391 }
392
393 void
rump_vp_interlock(struct vnode * vp)394 rump_vp_interlock(struct vnode *vp)
395 {
396
397 mutex_enter(vp->v_interlock);
398 }
399
400 void
rump_vp_vmobjlock(struct vnode * vp,int write)401 rump_vp_vmobjlock(struct vnode *vp, int write)
402 {
403
404 rw_enter(vp->v_uobj.vmobjlock, write ? RW_WRITER : RW_READER);
405 }
406
407 int
rump_vfs_unmount(struct mount * mp,int mntflags)408 rump_vfs_unmount(struct mount *mp, int mntflags)
409 {
410
411 return VFS_UNMOUNT(mp, mntflags);
412 }
413
414 int
rump_vfs_root(struct mount * mp,struct vnode ** vpp,int lock)415 rump_vfs_root(struct mount *mp, struct vnode **vpp, int lock)
416 {
417 int rv;
418
419 rv = VFS_ROOT(mp, LK_EXCLUSIVE, vpp);
420 if (rv)
421 return rv;
422
423 if (!lock)
424 VOP_UNLOCK(*vpp);
425
426 return 0;
427 }
428
429 int
rump_vfs_statvfs(struct mount * mp,struct statvfs * sbp)430 rump_vfs_statvfs(struct mount *mp, struct statvfs *sbp)
431 {
432
433 return VFS_STATVFS(mp, sbp);
434 }
435
436 int
rump_vfs_sync(struct mount * mp,int wait,kauth_cred_t cred)437 rump_vfs_sync(struct mount *mp, int wait, kauth_cred_t cred)
438 {
439
440 return VFS_SYNC(mp, wait ? MNT_WAIT : MNT_NOWAIT, cred);
441 }
442
443 int
rump_vfs_fhtovp(struct mount * mp,struct fid * fid,struct vnode ** vpp)444 rump_vfs_fhtovp(struct mount *mp, struct fid *fid, struct vnode **vpp)
445 {
446
447 return VFS_FHTOVP(mp, fid, LK_EXCLUSIVE, vpp);
448 }
449
450 int
rump_vfs_vptofh(struct vnode * vp,struct fid * fid,size_t * fidsize)451 rump_vfs_vptofh(struct vnode *vp, struct fid *fid, size_t *fidsize)
452 {
453
454 return VFS_VPTOFH(vp, fid, fidsize);
455 }
456
457 int
rump_vfs_extattrctl(struct mount * mp,int cmd,struct vnode * vp,int attrnamespace,const char * attrname)458 rump_vfs_extattrctl(struct mount *mp, int cmd, struct vnode *vp,
459 int attrnamespace, const char *attrname)
460 {
461
462 return VFS_EXTATTRCTL(mp, cmd, vp, attrnamespace, attrname);
463 }
464
465 /*ARGSUSED*/
466 void
rump_vfs_syncwait(struct mount * mp)467 rump_vfs_syncwait(struct mount *mp)
468 {
469 int n;
470
471 n = vfs_syncwait();
472 if (n)
473 printf("syncwait: unsynced buffers: %d\n", n);
474 }
475
476 /*
477 * Dump info about mount point. No locking.
478 */
479 static bool
rump_print_selector(void * cl,struct vnode * vp)480 rump_print_selector(void *cl, struct vnode *vp)
481 {
482 int *full = cl;
483
484 KASSERT(mutex_owned(vp->v_interlock));
485
486 vfs_vnode_print(vp, *full, (void *)rumpuser_dprintf);
487 return false;
488 }
489
490 void
rump_vfs_mount_print(const char * path,int full)491 rump_vfs_mount_print(const char *path, int full)
492 {
493 #ifdef DEBUGPRINT
494 struct vnode *mvp;
495 struct vnode_iterator *marker;
496 int error;
497
498 rumpuser_dprintf("\n==== dumping mountpoint at ``%s'' ====\n\n", path);
499 if ((error = namei_simple_user(path, NSM_FOLLOW_NOEMULROOT, &mvp))!=0) {
500 rumpuser_dprintf("==== lookup error %d ====\n\n", error);
501 return;
502 }
503 vfs_mount_print(mvp->v_mount, full, (void *)rumpuser_dprintf);
504 if (full) {
505 rumpuser_dprintf("\n== dumping vnodes ==\n\n");
506 vfs_vnode_iterator_init(mvp->v_mount, &marker);
507 vfs_vnode_iterator_next(marker, rump_print_selector, &full);
508 vfs_vnode_iterator_destroy(marker);
509 }
510 vrele(mvp);
511 rumpuser_dprintf("\n==== done ====\n\n");
512 #else
513 rumpuser_dprintf("mount dump not supported without DEBUGPRINT\n");
514 #endif
515 }
516
517 void
rump_biodone(void * arg,size_t count,int error)518 rump_biodone(void *arg, size_t count, int error)
519 {
520 struct buf *bp = arg;
521
522 bp->b_resid = bp->b_bcount - count;
523 KASSERT(bp->b_resid >= 0);
524 bp->b_error = error;
525
526 biodone(bp);
527 }
528