xref: /netbsd-src/sys/fs/puffs/puffs_vfsops.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: puffs_vfsops.c,v 1.121 2018/05/28 21:04:37 chs Exp $	*/
2 
3 /*
4  * Copyright (c) 2005, 2006  Antti Kantee.  All Rights Reserved.
5  *
6  * Development of this software was supported by the
7  * Google Summer of Code program and the Ulla Tuominen Foundation.
8  * The Google SoC project was mentored by Bill Studenmund.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: puffs_vfsops.c,v 1.121 2018/05/28 21:04:37 chs Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/mount.h>
38 #include <sys/extattr.h>
39 #include <sys/queue.h>
40 #include <sys/vnode.h>
41 #include <sys/dirent.h>
42 #include <sys/kauth.h>
43 #include <sys/proc.h>
44 #include <sys/module.h>
45 #include <sys/kthread.h>
46 
47 #include <uvm/uvm.h>
48 
49 #include <dev/putter/putter_sys.h>
50 
51 #include <miscfs/genfs/genfs.h>
52 
53 #include <fs/puffs/puffs_msgif.h>
54 #include <fs/puffs/puffs_sys.h>
55 
56 #include <lib/libkern/libkern.h>
57 
58 #include <nfs/nfsproto.h> /* for fh sizes */
59 
60 MODULE(MODULE_CLASS_VFS, puffs, "putter");
61 
62 VFS_PROTOS(puffs_vfsop);
63 
64 static struct putter_ops puffs_putter = {
65 	.pop_getout	= puffs_msgif_getout,
66 	.pop_releaseout	= puffs_msgif_releaseout,
67 	.pop_waitcount	= puffs_msgif_waitcount,
68 	.pop_dispatch	= puffs_msgif_dispatch,
69 	.pop_close	= puffs_msgif_close,
70 };
71 
72 static const struct genfs_ops puffs_genfsops = {
73         .gop_size = puffs_gop_size,
74 	.gop_write = genfs_gop_write,
75 	.gop_markupdate = puffs_gop_markupdate,
76 #if 0
77 	.gop_alloc, should ask userspace
78 #endif
79 	.gop_putrange = genfs_gop_putrange,
80 };
81 
82 /*
83  * Try to ensure data structures used by the puffs protocol
84  * do not unexpectedly change.
85  */
86 #if defined(__i386__) && defined(__ELF__)
87 CTASSERT(sizeof(struct puffs_kargs) == 3928);
88 CTASSERT(sizeof(struct vattr) == 136);
89 CTASSERT(sizeof(struct puffs_req) == 44);
90 #endif
91 
92 int
93 puffs_vfsop_mount(struct mount *mp, const char *path, void *data,
94 	size_t *data_len)
95 {
96 	struct puffs_mount *pmp = NULL;
97 	struct puffs_kargs *args;
98 	char fstype[_VFS_NAMELEN];
99 	char *p;
100 	int error = 0, i;
101 	pid_t mntpid = curlwp->l_proc->p_pid;
102 
103 	if (data == NULL)
104 		return EINVAL;
105 	if (*data_len < sizeof *args)
106 		return EINVAL;
107 
108 	if (mp->mnt_flag & MNT_GETARGS) {
109 		pmp = MPTOPUFFSMP(mp);
110 		*(struct puffs_kargs *)data = pmp->pmp_args;
111 		*data_len = sizeof *args;
112 		return 0;
113 	}
114 
115 	/* update is not supported currently */
116 	if (mp->mnt_flag & MNT_UPDATE)
117 		return EOPNOTSUPP;
118 
119 	args = (struct puffs_kargs *)data;
120 
121 	if (args->pa_vers != PUFFSVERSION) {
122 		printf("puffs_mount: development version mismatch: "
123 		    "kernel %d, lib %d\n", PUFFSVERSION, args->pa_vers);
124 		error = EINVAL;
125 		goto out;
126 	}
127 
128 	if ((args->pa_flags & ~PUFFS_KFLAG_MASK) != 0) {
129 		printf("puffs_mount: invalid KFLAGs 0x%x\n", args->pa_flags);
130 		error = EINVAL;
131 		goto out;
132 	}
133 	if ((args->pa_fhflags & ~PUFFS_FHFLAG_MASK) != 0) {
134 		printf("puffs_mount: invalid FHFLAGs 0x%x\n", args->pa_fhflags);
135 		error = EINVAL;
136 		goto out;
137 	}
138 
139 	for (i = 0; i < __arraycount(args->pa_spare); i++) {
140 		if (args->pa_spare[i] != 0) {
141 			printf("puffs_mount: pa_spare[%d] = 0x%x\n",
142 			    i, args->pa_spare[i]);
143 			error = EINVAL;
144 			goto out;
145 		}
146 	}
147 
148 	/* use dummy value for passthrough */
149 	if (args->pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH)
150 		args->pa_fhsize = sizeof(struct fid);
151 
152 	/* sanitize file handle length */
153 	if (PUFFS_TOFHSIZE(args->pa_fhsize) > FHANDLE_SIZE_MAX) {
154 		printf("puffs_mount: handle size %zu too large\n",
155 		    args->pa_fhsize);
156 		error = EINVAL;
157 		goto out;
158 	}
159 	/* sanity check file handle max sizes */
160 	if (args->pa_fhsize && args->pa_fhflags & PUFFS_FHFLAG_PROTOMASK) {
161 		size_t kfhsize = PUFFS_TOFHSIZE(args->pa_fhsize);
162 
163 		if (args->pa_fhflags & PUFFS_FHFLAG_NFSV2) {
164 			if (NFSX_FHTOOBIG_P(kfhsize, 0)) {
165 				printf("puffs_mount: fhsize larger than "
166 				    "NFSv2 max %d\n",
167 				    PUFFS_FROMFHSIZE(NFSX_V2FH));
168 				error = EINVAL;
169 				goto out;
170 			}
171 		}
172 
173 		if (args->pa_fhflags & PUFFS_FHFLAG_NFSV3) {
174 			if (NFSX_FHTOOBIG_P(kfhsize, 1)) {
175 				printf("puffs_mount: fhsize larger than "
176 				    "NFSv3 max %d\n",
177 				    PUFFS_FROMFHSIZE(NFSX_V3FHMAX));
178 				error = EINVAL;
179 				goto out;
180 			}
181 		}
182 	}
183 
184 	/* don't allow non-printing characters (like my sweet umlauts.. snif) */
185 	args->pa_typename[sizeof(args->pa_typename)-1] = '\0';
186 	for (p = args->pa_typename; *p; p++)
187 		if (*p < ' ' || *p > '~')
188 			*p = '.';
189 
190 	args->pa_mntfromname[sizeof(args->pa_mntfromname)-1] = '\0';
191 	for (p = args->pa_mntfromname; *p; p++)
192 		if (*p < ' ' || *p > '~')
193 			*p = '.';
194 
195 	/* build real name */
196 	(void)strlcpy(fstype, PUFFS_TYPEPREFIX, sizeof(fstype));
197 	(void)strlcat(fstype, args->pa_typename, sizeof(fstype));
198 
199 	/* inform user server if it got the max request size it wanted */
200 	if (args->pa_maxmsglen == 0 || args->pa_maxmsglen > PUFFS_MSG_MAXSIZE)
201 		args->pa_maxmsglen = PUFFS_MSG_MAXSIZE;
202 	else if (args->pa_maxmsglen < 2*PUFFS_MSGSTRUCT_MAX)
203 		args->pa_maxmsglen = 2*PUFFS_MSGSTRUCT_MAX;
204 
205 	(void)strlcpy(args->pa_typename, fstype, sizeof(args->pa_typename));
206 
207 	error = set_statvfs_info(path, UIO_USERSPACE, args->pa_mntfromname,
208 	    UIO_SYSSPACE, fstype, mp, curlwp);
209 	if (error)
210 		goto out;
211 	mp->mnt_stat.f_iosize = DEV_BSIZE;
212 	mp->mnt_stat.f_namemax = args->pa_svfsb.f_namemax;
213 
214 	/*
215 	 * We can't handle the VFS_STATVFS() mount_domount() does
216 	 * after VFS_MOUNT() because we'd deadlock, so handle it
217 	 * here already.
218 	 */
219 	copy_statvfs_info(&args->pa_svfsb, mp);
220 	(void)memcpy(&mp->mnt_stat, &args->pa_svfsb, sizeof(mp->mnt_stat));
221 
222 	KASSERT(curlwp != uvm.pagedaemon_lwp);
223 	pmp = kmem_zalloc(sizeof(struct puffs_mount), KM_SLEEP);
224 
225 	mp->mnt_fs_bshift = DEV_BSHIFT;
226 	mp->mnt_dev_bshift = DEV_BSHIFT;
227 	mp->mnt_flag &= ~MNT_LOCAL; /* we don't really know, so ... */
228 	mp->mnt_data = pmp;
229 
230 #if 0
231 	/*
232 	 * XXX: puffs code is MPSAFE.  However, VFS really isn't.
233 	 * Currently, there is nothing which protects an inode from
234 	 * reclaim while there are threads inside the file system.
235 	 * This means that in the event of a server crash, an MPSAFE
236 	 * mount is likely to end up accessing invalid memory.  For the
237 	 * non-mpsafe case, the kernel lock, general structure of
238 	 * puffs and pmp_refcount protect the threads during escape.
239 	 *
240 	 * Fixing this will require:
241 	 *  a) fixing vfs
242 	 * OR
243 	 *  b) adding a small sleep to puffs_msgif_close() between
244 	 *     userdead() and dounmount().
245 	 *     (well, this isn't really a fix, but would solve
246 	 *     99.999% of the race conditions).
247 	 *
248 	 * Also, in the event of "b", unmount -f should be used,
249 	 * like with any other file system, sparingly and only when
250 	 * it is "known" to be safe.
251 	 */
252 	mp->mnt_iflags |= IMNT_MPSAFE;
253 #endif
254 
255 	pmp->pmp_status = PUFFSTAT_MOUNTING;
256 	pmp->pmp_mp = mp;
257 	pmp->pmp_msg_maxsize = args->pa_maxmsglen;
258 	pmp->pmp_args = *args;
259 
260 	/*
261 	 * Inform the fileops processing code that we have a mountpoint.
262 	 * If it doesn't know about anyone with our pid/fd having the
263 	 * device open, punt
264 	 */
265 	if ((pmp->pmp_pi
266 	    = putter_attach(mntpid, args->pa_fd, pmp, &puffs_putter)) == NULL) {
267 		error = ENOENT;
268 		goto out;
269 	}
270 
271 	/* XXX: check parameters */
272 	pmp->pmp_root_cookie = args->pa_root_cookie;
273 	switch (args->pa_root_vtype) {
274 	case VNON: case VREG: case VDIR: case VBLK:
275 	case VCHR: case VLNK: case VSOCK: case VFIFO:
276 		break;
277 	default:
278 		error = EINVAL;
279 		goto out;
280 	}
281 	pmp->pmp_root_vtype = args->pa_root_vtype;
282 
283 	if (args->pa_root_vsize < 0) {
284 		error = EINVAL;
285 		goto out;
286 	}
287 	pmp->pmp_root_vsize = args->pa_root_vsize;
288 
289 	pmp->pmp_root_rdev = args->pa_root_rdev;
290 	pmp->pmp_docompat = args->pa_time32;
291 
292 	mutex_init(&pmp->pmp_lock, MUTEX_DEFAULT, IPL_NONE);
293 	mutex_init(&pmp->pmp_sopmtx, MUTEX_DEFAULT, IPL_NONE);
294 	cv_init(&pmp->pmp_msg_waiter_cv, "puffsget");
295 	cv_init(&pmp->pmp_refcount_cv, "puffsref");
296 	cv_init(&pmp->pmp_unmounting_cv, "puffsum");
297 	cv_init(&pmp->pmp_sopcv, "puffsop");
298 	TAILQ_INIT(&pmp->pmp_msg_touser);
299 	TAILQ_INIT(&pmp->pmp_msg_replywait);
300 	TAILQ_INIT(&pmp->pmp_sopfastreqs);
301 	TAILQ_INIT(&pmp->pmp_sopnodereqs);
302 
303 	if ((error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
304 	    puffs_sop_thread, pmp, NULL, "puffsop")) != 0)
305 		goto out;
306 	pmp->pmp_sopthrcount = 1;
307 
308 	DPRINTF(("puffs_mount: mount point at %p, puffs specific at %p\n",
309 	    mp, MPTOPUFFSMP(mp)));
310 
311 	vfs_getnewfsid(mp);
312 
313  out:
314 	if (error && pmp && pmp->pmp_pi)
315 		putter_detach(pmp->pmp_pi);
316 	if (error && pmp)
317 		kmem_free(pmp, sizeof(struct puffs_mount));
318 	return error;
319 }
320 
321 int
322 puffs_vfsop_start(struct mount *mp, int flags)
323 {
324 	struct puffs_mount *pmp = MPTOPUFFSMP(mp);
325 
326 	KASSERT(pmp->pmp_status == PUFFSTAT_MOUNTING);
327 	pmp->pmp_status = PUFFSTAT_RUNNING;
328 
329 	return 0;
330 }
331 
332 int
333 puffs_vfsop_unmount(struct mount *mp, int mntflags)
334 {
335 	PUFFS_MSG_VARS(vfs, unmount);
336 	struct puffs_mount *pmp;
337 	int error, force;
338 
339 	error = 0;
340 	force = mntflags & MNT_FORCE;
341 	pmp = MPTOPUFFSMP(mp);
342 
343 	DPRINTF(("puffs_unmount: detach filesystem from vfs, current "
344 	    "status 0x%x\n", pmp->pmp_status));
345 
346 	/*
347 	 * flush all the vnodes.  VOP_RECLAIM() takes care that the
348 	 * root vnode does not get flushed until unmount.  The
349 	 * userspace root node cookie is stored in the mount
350 	 * structure, so we can always re-instantiate a root vnode,
351 	 * should userspace unmount decide it doesn't want to
352 	 * cooperate.
353 	 */
354 	error = vflush(mp, NULLVP, force ? FORCECLOSE : 0);
355 	if (error)
356 		goto out;
357 
358 	/*
359 	 * If we are not DYING, we should ask userspace's opinion
360 	 * about the situation
361 	 */
362 	mutex_enter(&pmp->pmp_lock);
363 	if (pmp->pmp_status != PUFFSTAT_DYING) {
364 		pmp->pmp_unmounting = 1;
365 		mutex_exit(&pmp->pmp_lock);
366 
367 		PUFFS_MSG_ALLOC(vfs, unmount);
368 		puffs_msg_setinfo(park_unmount,
369 		    PUFFSOP_VFS, PUFFS_VFS_UNMOUNT, NULL);
370 		unmount_msg->pvfsr_flags = mntflags;
371 
372 		PUFFS_MSG_ENQUEUEWAIT(pmp, park_unmount, error);
373 		PUFFS_MSG_RELEASE(unmount);
374 
375 		error = checkerr(pmp, error, __func__);
376 		DPRINTF(("puffs_unmount: error %d force %d\n", error, force));
377 
378 		mutex_enter(&pmp->pmp_lock);
379 		pmp->pmp_unmounting = 0;
380 		cv_broadcast(&pmp->pmp_unmounting_cv);
381 	}
382 
383 	/*
384 	 * if userspace cooperated or we really need to die,
385 	 * screw what userland thinks and just die.
386 	 */
387 	if (error == 0 || force) {
388 		struct puffs_sopreq *psopr;
389 
390 		/* tell waiters & other resources to go unwait themselves */
391 		puffs_userdead(pmp);
392 		putter_detach(pmp->pmp_pi);
393 
394 		/*
395 		 * Wait until there are no more users for the mount resource.
396 		 * Notice that this is hooked against transport_close
397 		 * and return from touser.  In an ideal world, it would
398 		 * be hooked against final return from all operations.
399 		 * But currently it works well enough, since nobody
400 		 * does weird blocking voodoo after return from touser().
401 		 */
402 		while (pmp->pmp_refcount != 0)
403 			cv_wait(&pmp->pmp_refcount_cv, &pmp->pmp_lock);
404 		mutex_exit(&pmp->pmp_lock);
405 
406 		/*
407 		 * Release kernel thread now that there is nothing
408 		 * it would be wanting to lock.
409 		 */
410 		KASSERT(curlwp != uvm.pagedaemon_lwp);
411 		psopr = kmem_alloc(sizeof(*psopr), KM_SLEEP);
412 		psopr->psopr_sopreq = PUFFS_SOPREQSYS_EXIT;
413 		mutex_enter(&pmp->pmp_sopmtx);
414 		if (pmp->pmp_sopthrcount == 0) {
415 			mutex_exit(&pmp->pmp_sopmtx);
416 			kmem_free(psopr, sizeof(*psopr));
417 			mutex_enter(&pmp->pmp_sopmtx);
418 			KASSERT(pmp->pmp_sopthrcount == 0);
419 		} else {
420 			TAILQ_INSERT_TAIL(&pmp->pmp_sopfastreqs,
421 			    psopr, psopr_entries);
422 			cv_signal(&pmp->pmp_sopcv);
423 		}
424 		while (pmp->pmp_sopthrcount > 0)
425 			cv_wait(&pmp->pmp_sopcv, &pmp->pmp_sopmtx);
426 		mutex_exit(&pmp->pmp_sopmtx);
427 
428 		/* free resources now that we hopefully have no waiters left */
429 		cv_destroy(&pmp->pmp_unmounting_cv);
430 		cv_destroy(&pmp->pmp_refcount_cv);
431 		cv_destroy(&pmp->pmp_msg_waiter_cv);
432 		cv_destroy(&pmp->pmp_sopcv);
433 		mutex_destroy(&pmp->pmp_lock);
434 		mutex_destroy(&pmp->pmp_sopmtx);
435 
436 		kmem_free(pmp, sizeof(struct puffs_mount));
437 		error = 0;
438 	} else {
439 		mutex_exit(&pmp->pmp_lock);
440 	}
441 
442  out:
443 	DPRINTF(("puffs_unmount: return %d\n", error));
444 	return error;
445 }
446 
447 /*
448  * This doesn't need to travel to userspace
449  */
450 int
451 puffs_vfsop_root(struct mount *mp, struct vnode **vpp)
452 {
453 	struct puffs_mount *pmp = MPTOPUFFSMP(mp);
454 	int rv;
455 
456 	rv = puffs_cookie2vnode(pmp, pmp->pmp_root_cookie, vpp);
457 	KASSERT(rv != PUFFS_NOSUCHCOOKIE);
458 	if (rv != 0)
459 		return rv;
460 	rv = vn_lock(*vpp, LK_EXCLUSIVE);
461 	if (rv != 0) {
462 		vrele(*vpp);
463 		*vpp = NULL;
464 		return rv;
465 	}
466 	return 0;
467 }
468 
469 int
470 puffs_vfsop_statvfs(struct mount *mp, struct statvfs *sbp)
471 {
472 	PUFFS_MSG_VARS(vfs, statvfs);
473 	struct puffs_mount *pmp;
474 	int error = 0;
475 
476 	pmp = MPTOPUFFSMP(mp);
477 
478 	/*
479 	 * If we are mounting, it means that the userspace counterpart
480 	 * is calling mount(2), but mount(2) also calls statvfs.  So
481 	 * requesting statvfs from userspace would mean a deadlock.
482 	 * Compensate.
483 	 */
484 	if (__predict_false(pmp->pmp_status == PUFFSTAT_MOUNTING))
485 		return EINPROGRESS;
486 
487 	PUFFS_MSG_ALLOC(vfs, statvfs);
488 	puffs_msg_setinfo(park_statvfs, PUFFSOP_VFS, PUFFS_VFS_STATVFS, NULL);
489 
490 	PUFFS_MSG_ENQUEUEWAIT(pmp, park_statvfs, error);
491 	error = checkerr(pmp, error, __func__);
492 	statvfs_msg->pvfsr_sb.f_iosize = DEV_BSIZE;
493 
494 	/*
495 	 * Try to produce a sensible result even in the event
496 	 * of userspace error.
497 	 *
498 	 * XXX: cache the copy in non-error case
499 	 */
500 	if (!error) {
501 		copy_statvfs_info(&statvfs_msg->pvfsr_sb, mp);
502 		(void)memcpy(sbp, &statvfs_msg->pvfsr_sb,
503 		    sizeof(struct statvfs));
504 	} else {
505 		copy_statvfs_info(sbp, mp);
506 	}
507 
508 	PUFFS_MSG_RELEASE(statvfs);
509 	return error;
510 }
511 
512 static bool
513 pageflush_selector(void *cl, struct vnode *vp)
514 {
515 
516 	KASSERT(mutex_owned(vp->v_interlock));
517 
518 	return vp->v_type == VREG &&
519 	    !(LIST_EMPTY(&vp->v_dirtyblkhd) && UVM_OBJ_IS_CLEAN(&vp->v_uobj));
520 }
521 
522 static int
523 pageflush(struct mount *mp, kauth_cred_t cred, int waitfor)
524 {
525 	struct puffs_node *pn;
526 	struct vnode *vp;
527 	struct vnode_iterator *marker;
528 	int error, rv, fsyncwait;
529 
530 	error = 0;
531 	fsyncwait = (waitfor == MNT_WAIT) ? FSYNC_WAIT : 0;
532 
533 	/*
534 	 * Sync all cached data from regular vnodes (which are not
535 	 * currently locked, see below).  After this we call VFS_SYNC
536 	 * for the fs server, which should handle data and metadata for
537 	 * all the nodes it knows to exist.
538 	 */
539 	vfs_vnode_iterator_init(mp, &marker);
540 	while ((vp = vfs_vnode_iterator_next(marker, pageflush_selector,
541 	    NULL)))
542 	{
543 		/*
544 		 * Here we try to get a reference to the vnode and to
545 		 * lock it.  This is mostly cargo-culted, but I will
546 		 * offer an explanation to why I believe this might
547 		 * actually do the right thing.
548 		 *
549 		 * If the vnode is a goner, we quite obviously don't need
550 		 * to sync it.
551 		 *
552 		 * If the vnode was busy, we don't need to sync it because
553 		 * this is never called with MNT_WAIT except from
554 		 * dounmount(), when we are wait-flushing all the dirty
555 		 * vnodes through other routes in any case.  So there,
556 		 * sync() doesn't actually sync.  Happy now?
557 		 */
558 		error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
559 		if (error) {
560 			vrele(vp);
561 			continue;
562 		}
563 		pn = VPTOPP(vp);
564 		/* hmm.. is the FAF thing entirely sensible? */
565 		if (waitfor == MNT_LAZY) {
566 			mutex_enter(vp->v_interlock);
567 			pn->pn_stat |= PNODE_FAF;
568 			mutex_exit(vp->v_interlock);
569 		}
570 		rv = VOP_FSYNC(vp, cred, fsyncwait, 0, 0);
571 		if (waitfor == MNT_LAZY) {
572 			mutex_enter(vp->v_interlock);
573 			pn->pn_stat &= ~PNODE_FAF;
574 			mutex_exit(vp->v_interlock);
575 		}
576 		if (rv)
577 			error = rv;
578 		vput(vp);
579 	}
580 	vfs_vnode_iterator_destroy(marker);
581 
582 	return error;
583 }
584 
585 int
586 puffs_vfsop_sync(struct mount *mp, int waitfor, struct kauth_cred *cred)
587 {
588 	PUFFS_MSG_VARS(vfs, sync);
589 	struct puffs_mount *pmp = MPTOPUFFSMP(mp);
590 	int error, rv;
591 
592 	error = pageflush(mp, cred, waitfor);
593 
594 	/* sync fs */
595 	PUFFS_MSG_ALLOC(vfs, sync);
596 	sync_msg->pvfsr_waitfor = waitfor;
597 	puffs_credcvt(&sync_msg->pvfsr_cred, cred);
598 	puffs_msg_setinfo(park_sync, PUFFSOP_VFS, PUFFS_VFS_SYNC, NULL);
599 
600 	PUFFS_MSG_ENQUEUEWAIT(pmp, park_sync, rv);
601 	rv = checkerr(pmp, rv, __func__);
602 	if (rv)
603 		error = rv;
604 
605 	PUFFS_MSG_RELEASE(sync);
606 	return error;
607 }
608 
609 int
610 puffs_vfsop_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
611 {
612 	PUFFS_MSG_VARS(vfs, fhtonode);
613 	struct puffs_mount *pmp = MPTOPUFFSMP(mp);
614 	struct vnode *vp;
615 	void *fhdata;
616 	size_t argsize, fhlen;
617 	int error;
618 
619 	if (pmp->pmp_args.pa_fhsize == 0)
620 		return EOPNOTSUPP;
621 
622 	if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH) {
623 		fhlen = fhp->fid_len;
624 		fhdata = fhp;
625 	} else {
626 		fhlen = PUFFS_FROMFHSIZE(fhp->fid_len);
627 		fhdata = fhp->fid_data;
628 
629 		if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) {
630 			if (pmp->pmp_args.pa_fhsize < fhlen)
631 				return EINVAL;
632 		} else {
633 			if (pmp->pmp_args.pa_fhsize != fhlen)
634 				return EINVAL;
635 		}
636 	}
637 
638 	argsize = sizeof(struct puffs_vfsmsg_fhtonode) + fhlen;
639 	puffs_msgmem_alloc(argsize, &park_fhtonode, (void *)&fhtonode_msg, 1);
640 	fhtonode_msg->pvfsr_dsize = fhlen;
641 	memcpy(fhtonode_msg->pvfsr_data, fhdata, fhlen);
642 	puffs_msg_setinfo(park_fhtonode, PUFFSOP_VFS, PUFFS_VFS_FHTOVP, NULL);
643 
644 	PUFFS_MSG_ENQUEUEWAIT(pmp, park_fhtonode, error);
645 	error = checkerr(pmp, error, __func__);
646 	if (error)
647 		goto out;
648 
649 	error = puffs_getvnode(mp, fhtonode_msg->pvfsr_fhcookie,
650 	    fhtonode_msg->pvfsr_vtype, fhtonode_msg->pvfsr_size,
651 	    fhtonode_msg->pvfsr_rdev, &vp);
652 	if (error)
653 		goto out;
654 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
655 
656 	*vpp = vp;
657  out:
658 	puffs_msgmem_release(park_fhtonode);
659 	return error;
660 }
661 
662 int
663 puffs_vfsop_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
664 {
665 	PUFFS_MSG_VARS(vfs, nodetofh);
666 	struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
667 	size_t argsize, fhlen;
668 	int error;
669 
670 	if (pmp->pmp_args.pa_fhsize == 0)
671 		return EOPNOTSUPP;
672 
673 	/* if file handles are static len, we can test len immediately */
674 	if (((pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) == 0)
675 	    && ((pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH) == 0)
676 	    && (PUFFS_FROMFHSIZE(*fh_size) < pmp->pmp_args.pa_fhsize)) {
677 		*fh_size = PUFFS_TOFHSIZE(pmp->pmp_args.pa_fhsize);
678 		return E2BIG;
679 	}
680 
681 	if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH)
682 		fhlen = *fh_size;
683 	else
684 		fhlen = PUFFS_FROMFHSIZE(*fh_size);
685 
686 	argsize = sizeof(struct puffs_vfsmsg_nodetofh) + fhlen;
687 	puffs_msgmem_alloc(argsize, &park_nodetofh, (void *)&nodetofh_msg, 1);
688 	nodetofh_msg->pvfsr_fhcookie = VPTOPNC(vp);
689 	nodetofh_msg->pvfsr_dsize = fhlen;
690 	puffs_msg_setinfo(park_nodetofh, PUFFSOP_VFS, PUFFS_VFS_VPTOFH, NULL);
691 
692 	PUFFS_MSG_ENQUEUEWAIT(pmp, park_nodetofh, error);
693 	error = checkerr(pmp, error, __func__);
694 
695 	if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH)
696 		fhlen = nodetofh_msg->pvfsr_dsize;
697 	else if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC)
698 		fhlen = PUFFS_TOFHSIZE(nodetofh_msg->pvfsr_dsize);
699 	else
700 		fhlen = PUFFS_TOFHSIZE(pmp->pmp_args.pa_fhsize);
701 
702 	if (error) {
703 		if (error == E2BIG)
704 			*fh_size = fhlen;
705 		goto out;
706 	}
707 
708 	if (fhlen > FHANDLE_SIZE_MAX) {
709 		puffs_senderr(pmp, PUFFS_ERR_VPTOFH, E2BIG,
710 		    "file handle too big", VPTOPNC(vp));
711 		error = EPROTO;
712 		goto out;
713 	}
714 
715 	if (*fh_size < fhlen) {
716 		*fh_size = fhlen;
717 		error = E2BIG;
718 		goto out;
719 	}
720 	*fh_size = fhlen;
721 
722 	if (fhp) {
723 		if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH) {
724 			memcpy(fhp, nodetofh_msg->pvfsr_data, fhlen);
725 		} else {
726 			fhp->fid_len = *fh_size;
727 			memcpy(fhp->fid_data, nodetofh_msg->pvfsr_data,
728 			    nodetofh_msg->pvfsr_dsize);
729 		}
730 	}
731 
732  out:
733 	puffs_msgmem_release(park_nodetofh);
734 	return error;
735 }
736 
737 int
738 puffs_vfsop_loadvnode(struct mount *mp, struct vnode *vp,
739     const void *key, size_t key_len, const void **new_key)
740 {
741 	struct puffs_mount *pmp;
742 	struct puffs_node *pnode;
743 
744 	KASSERT(key_len == sizeof(puffs_cookie_t));
745 
746 	pmp = MPTOPUFFSMP(mp);
747 
748 	/* Allocate and initialize the pnode. */
749 	pnode = pool_get(&puffs_pnpool, PR_WAITOK);
750 	memset(pnode, 0, sizeof(struct puffs_node));
751 
752 	pnode->pn_vp = vp;
753 	memcpy(&pnode->pn_cookie, key, key_len);
754 	pnode->pn_refcount = 1;
755 	mutex_init(&pnode->pn_mtx, MUTEX_DEFAULT, IPL_NONE);
756 	mutex_init(&pnode->pn_sizemtx, MUTEX_DEFAULT, IPL_NONE);
757 	selinit(&pnode->pn_sel);
758 	vp->v_tag = VT_PUFFS;
759 	vp->v_type = VNON;
760 	vp->v_op = puffs_vnodeop_p;
761 	if (pnode->pn_cookie == pmp->pmp_root_cookie)
762 		vp->v_vflag |= VV_ROOT;
763 	vp->v_data = pnode;
764 
765 	genfs_node_init(vp, &puffs_genfsops);
766 	uvm_vnp_setsize(vp, 0);
767 
768 	*new_key = &pnode->pn_cookie;
769 	return 0;
770 }
771 
772 void
773 puffs_vfsop_init(void)
774 {
775 
776 	/* some checks depend on this */
777 	KASSERT(VNOVAL == VSIZENOTSET);
778 
779 	pool_init(&puffs_pnpool, sizeof(struct puffs_node), 0, 0, 0,
780 	    "puffpnpl", &pool_allocator_nointr, IPL_NONE);
781 	pool_init(&puffs_vapool, sizeof(struct vattr), 0, 0, 0,
782 	    "puffvapl", &pool_allocator_nointr, IPL_NONE);
783 	puffs_msgif_init();
784 }
785 
786 void
787 puffs_vfsop_done(void)
788 {
789 
790 	puffs_msgif_destroy();
791 	pool_destroy(&puffs_pnpool);
792 	pool_destroy(&puffs_vapool);
793 }
794 
795 int
796 puffs_vfsop_snapshot(struct mount *mp, struct vnode *vp, struct timespec *ts)
797 {
798 
799 	return EOPNOTSUPP;
800 }
801 
802 int
803 puffs_vfsop_extattrctl(struct mount *mp, int cmd, struct vnode *vp,
804 	int attrnamespace, const char *attrname)
805 {
806 	PUFFS_MSG_VARS(vfs, extattrctl);
807 	struct puffs_mount *pmp = MPTOPUFFSMP(mp);
808 	struct puffs_node *pnp;
809 	puffs_cookie_t pnc;
810 	int error, flags;
811 
812 	if (vp) {
813 		/* doesn't make sense for puffs servers */
814 		if (vp->v_mount != mp)
815 			return EXDEV;
816 		pnp = vp->v_data;
817 		pnc = pnp->pn_cookie;
818 		flags = PUFFS_EXTATTRCTL_HASNODE;
819 	} else {
820 		pnp = pnc = NULL;
821 		flags = 0;
822 	}
823 
824 	PUFFS_MSG_ALLOC(vfs, extattrctl);
825 	extattrctl_msg->pvfsr_cmd = cmd;
826 	extattrctl_msg->pvfsr_attrnamespace = attrnamespace;
827 	extattrctl_msg->pvfsr_flags = flags;
828 	if (attrname) {
829 		strlcpy(extattrctl_msg->pvfsr_attrname, attrname,
830 		    sizeof(extattrctl_msg->pvfsr_attrname));
831 		extattrctl_msg->pvfsr_flags |= PUFFS_EXTATTRCTL_HASATTRNAME;
832 	}
833 	puffs_msg_setinfo(park_extattrctl,
834 	    PUFFSOP_VFS, PUFFS_VFS_EXTATTRCTL, pnc);
835 
836 	puffs_msg_enqueue(pmp, park_extattrctl);
837 	if (vp) {
838 		mutex_enter(&pnp->pn_mtx);
839 		puffs_referencenode(pnp);
840 		mutex_exit(&pnp->pn_mtx);
841 		VOP_UNLOCK(vp);
842 	}
843 	error = puffs_msg_wait2(pmp, park_extattrctl, pnp, NULL);
844 	PUFFS_MSG_RELEASE(extattrctl);
845 	if (vp) {
846 		puffs_releasenode(pnp);
847 	}
848 
849 	return checkerr(pmp, error, __func__);
850 }
851 
852 const struct vnodeopv_desc * const puffs_vnodeopv_descs[] = {
853 	&puffs_vnodeop_opv_desc,
854 	&puffs_specop_opv_desc,
855 	&puffs_fifoop_opv_desc,
856 	&puffs_msgop_opv_desc,
857 	NULL,
858 };
859 
860 struct vfsops puffs_vfsops = {
861 	.vfs_name = MOUNT_PUFFS,
862 	.vfs_min_mount_data = sizeof (struct puffs_kargs),
863 	.vfs_mount = puffs_vfsop_mount,
864 	.vfs_start = puffs_vfsop_start,
865 	.vfs_unmount = puffs_vfsop_unmount,
866 	.vfs_root = puffs_vfsop_root,
867 	.vfs_quotactl = (void *)eopnotsupp,
868 	.vfs_statvfs = puffs_vfsop_statvfs,
869 	.vfs_sync = puffs_vfsop_sync,
870 	.vfs_vget = (void *)eopnotsupp,
871 	.vfs_loadvnode = puffs_vfsop_loadvnode,
872 	.vfs_fhtovp = puffs_vfsop_fhtovp,
873 	.vfs_vptofh = puffs_vfsop_vptofh,
874 	.vfs_init = puffs_vfsop_init,
875 	.vfs_done = puffs_vfsop_done,
876 	.vfs_snapshot = puffs_vfsop_snapshot,
877 	.vfs_extattrctl = puffs_vfsop_extattrctl,
878 	.vfs_suspendctl = genfs_suspendctl,
879 	.vfs_renamelock_enter = genfs_renamelock_enter,
880 	.vfs_renamelock_exit = genfs_renamelock_exit,
881 	.vfs_fsync = (void *)eopnotsupp,
882 	.vfs_opv_descs = puffs_vnodeopv_descs
883 };
884 
885 static int
886 puffs_modcmd(modcmd_t cmd, void *arg)
887 {
888 
889 	switch (cmd) {
890 	case MODULE_CMD_INIT:
891 		return vfs_attach(&puffs_vfsops);
892 	case MODULE_CMD_FINI:
893 		return vfs_detach(&puffs_vfsops);
894 	default:
895 		return ENOTTY;
896 	}
897 }
898