1 /* $NetBSD: puffs_vfsops.c,v 1.107 2013/01/16 21:10:14 pooka Exp $ */ 2 3 /* 4 * Copyright (c) 2005, 2006 Antti Kantee. All Rights Reserved. 5 * 6 * Development of this software was supported by the 7 * Google Summer of Code program and the Ulla Tuominen Foundation. 8 * The Google SoC project was mentored by Bill Studenmund. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 20 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: puffs_vfsops.c,v 1.107 2013/01/16 21:10:14 pooka Exp $"); 34 35 #include <sys/param.h> 36 #include <sys/kernel.h> 37 #include <sys/mount.h> 38 #include <sys/malloc.h> 39 #include <sys/extattr.h> 40 #include <sys/queue.h> 41 #include <sys/vnode.h> 42 #include <sys/dirent.h> 43 #include <sys/kauth.h> 44 #include <sys/proc.h> 45 #include <sys/module.h> 46 #include <sys/kthread.h> 47 48 #include <uvm/uvm.h> 49 50 #include <dev/putter/putter_sys.h> 51 52 #include <miscfs/genfs/genfs.h> 53 54 #include <fs/puffs/puffs_msgif.h> 55 #include <fs/puffs/puffs_sys.h> 56 57 #include <lib/libkern/libkern.h> 58 59 #include <nfs/nfsproto.h> /* for fh sizes */ 60 61 MODULE(MODULE_CLASS_VFS, puffs, "putter"); 62 63 VFS_PROTOS(puffs_vfsop); 64 65 #ifndef PUFFS_PNODEBUCKETS 66 #define PUFFS_PNODEBUCKETS 256 67 #endif 68 #ifndef PUFFS_MAXPNODEBUCKETS 69 #define PUFFS_MAXPNODEBUCKETS 8192 70 #endif 71 int puffs_pnodebuckets_default = PUFFS_PNODEBUCKETS; 72 int puffs_maxpnodebuckets = PUFFS_MAXPNODEBUCKETS; 73 74 #define BUCKETALLOC(a) (sizeof(struct puffs_pnode_hashlist *) * (a)) 75 76 static struct putter_ops puffs_putter = { 77 .pop_getout = puffs_msgif_getout, 78 .pop_releaseout = puffs_msgif_releaseout, 79 .pop_waitcount = puffs_msgif_waitcount, 80 .pop_dispatch = puffs_msgif_dispatch, 81 .pop_close = puffs_msgif_close, 82 }; 83 84 /* 85 * Try to ensure data structures used by the puffs protocol 86 * do not unexpectedly change. 87 */ 88 #if defined(__i386__) && defined(__ELF__) 89 CTASSERT(sizeof(struct puffs_kargs) == 3928); 90 CTASSERT(sizeof(struct vattr) == 136); 91 CTASSERT(sizeof(struct puffs_req) == 44); 92 #endif 93 94 int 95 puffs_vfsop_mount(struct mount *mp, const char *path, void *data, 96 size_t *data_len) 97 { 98 struct puffs_mount *pmp = NULL; 99 struct puffs_kargs *args; 100 char fstype[_VFS_NAMELEN]; 101 char *p; 102 int error = 0, i; 103 pid_t mntpid = curlwp->l_proc->p_pid; 104 105 if (*data_len < sizeof *args) 106 return EINVAL; 107 108 if (mp->mnt_flag & MNT_GETARGS) { 109 pmp = MPTOPUFFSMP(mp); 110 *(struct puffs_kargs *)data = pmp->pmp_args; 111 *data_len = sizeof *args; 112 return 0; 113 } 114 115 /* update is not supported currently */ 116 if (mp->mnt_flag & MNT_UPDATE) 117 return EOPNOTSUPP; 118 119 /* 120 * We need the file system name 121 */ 122 if (!data) 123 return EINVAL; 124 125 args = (struct puffs_kargs *)data; 126 127 if (args->pa_vers != PUFFSVERSION) { 128 printf("puffs_mount: development version mismatch: " 129 "kernel %d, lib %d\n", PUFFSVERSION, args->pa_vers); 130 error = EINVAL; 131 goto out; 132 } 133 134 if ((args->pa_flags & ~PUFFS_KFLAG_MASK) != 0) { 135 printf("puffs_mount: invalid KFLAGs 0x%x\n", args->pa_flags); 136 error = EINVAL; 137 goto out; 138 } 139 if ((args->pa_fhflags & ~PUFFS_FHFLAG_MASK) != 0) { 140 printf("puffs_mount: invalid FHFLAGs 0x%x\n", args->pa_fhflags); 141 error = EINVAL; 142 goto out; 143 } 144 145 for (i = 0; i < __arraycount(args->pa_spare); i++) { 146 if (args->pa_spare[i] != 0) { 147 printf("puffs_mount: pa_spare[%d] = 0x%x\n", 148 i, args->pa_spare[i]); 149 error = EINVAL; 150 goto out; 151 } 152 } 153 154 /* use dummy value for passthrough */ 155 if (args->pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH) 156 args->pa_fhsize = sizeof(struct fid); 157 158 /* sanitize file handle length */ 159 if (PUFFS_TOFHSIZE(args->pa_fhsize) > FHANDLE_SIZE_MAX) { 160 printf("puffs_mount: handle size %zu too large\n", 161 args->pa_fhsize); 162 error = EINVAL; 163 goto out; 164 } 165 /* sanity check file handle max sizes */ 166 if (args->pa_fhsize && args->pa_fhflags & PUFFS_FHFLAG_PROTOMASK) { 167 size_t kfhsize = PUFFS_TOFHSIZE(args->pa_fhsize); 168 169 if (args->pa_fhflags & PUFFS_FHFLAG_NFSV2) { 170 if (NFSX_FHTOOBIG_P(kfhsize, 0)) { 171 printf("puffs_mount: fhsize larger than " 172 "NFSv2 max %d\n", 173 PUFFS_FROMFHSIZE(NFSX_V2FH)); 174 error = EINVAL; 175 goto out; 176 } 177 } 178 179 if (args->pa_fhflags & PUFFS_FHFLAG_NFSV3) { 180 if (NFSX_FHTOOBIG_P(kfhsize, 1)) { 181 printf("puffs_mount: fhsize larger than " 182 "NFSv3 max %d\n", 183 PUFFS_FROMFHSIZE(NFSX_V3FHMAX)); 184 error = EINVAL; 185 goto out; 186 } 187 } 188 } 189 190 /* don't allow non-printing characters (like my sweet umlauts.. snif) */ 191 args->pa_typename[sizeof(args->pa_typename)-1] = '\0'; 192 for (p = args->pa_typename; *p; p++) 193 if (*p < ' ' || *p > '~') 194 *p = '.'; 195 196 args->pa_mntfromname[sizeof(args->pa_mntfromname)-1] = '\0'; 197 for (p = args->pa_mntfromname; *p; p++) 198 if (*p < ' ' || *p > '~') 199 *p = '.'; 200 201 /* build real name */ 202 (void)strlcpy(fstype, PUFFS_TYPEPREFIX, sizeof(fstype)); 203 (void)strlcat(fstype, args->pa_typename, sizeof(fstype)); 204 205 /* inform user server if it got the max request size it wanted */ 206 if (args->pa_maxmsglen == 0 || args->pa_maxmsglen > PUFFS_MSG_MAXSIZE) 207 args->pa_maxmsglen = PUFFS_MSG_MAXSIZE; 208 else if (args->pa_maxmsglen < 2*PUFFS_MSGSTRUCT_MAX) 209 args->pa_maxmsglen = 2*PUFFS_MSGSTRUCT_MAX; 210 211 (void)strlcpy(args->pa_typename, fstype, sizeof(args->pa_typename)); 212 213 if (args->pa_nhashbuckets == 0) 214 args->pa_nhashbuckets = puffs_pnodebuckets_default; 215 if (args->pa_nhashbuckets < 1) 216 args->pa_nhashbuckets = 1; 217 if (args->pa_nhashbuckets > PUFFS_MAXPNODEBUCKETS) { 218 args->pa_nhashbuckets = puffs_maxpnodebuckets; 219 printf("puffs_mount: using %d hash buckets. " 220 "adjust puffs_maxpnodebuckets for more\n", 221 puffs_maxpnodebuckets); 222 } 223 224 error = set_statvfs_info(path, UIO_USERSPACE, args->pa_mntfromname, 225 UIO_SYSSPACE, fstype, mp, curlwp); 226 if (error) 227 goto out; 228 mp->mnt_stat.f_iosize = DEV_BSIZE; 229 mp->mnt_stat.f_namemax = args->pa_svfsb.f_namemax; 230 231 /* 232 * We can't handle the VFS_STATVFS() mount_domount() does 233 * after VFS_MOUNT() because we'd deadlock, so handle it 234 * here already. 235 */ 236 copy_statvfs_info(&args->pa_svfsb, mp); 237 (void)memcpy(&mp->mnt_stat, &args->pa_svfsb, sizeof(mp->mnt_stat)); 238 239 KASSERT(curlwp != uvm.pagedaemon_lwp); 240 pmp = kmem_zalloc(sizeof(struct puffs_mount), KM_SLEEP); 241 242 mp->mnt_fs_bshift = DEV_BSHIFT; 243 mp->mnt_dev_bshift = DEV_BSHIFT; 244 mp->mnt_flag &= ~MNT_LOCAL; /* we don't really know, so ... */ 245 mp->mnt_data = pmp; 246 247 #if 0 248 /* 249 * XXX: puffs code is MPSAFE. However, VFS really isn't. 250 * Currently, there is nothing which protects an inode from 251 * reclaim while there are threads inside the file system. 252 * This means that in the event of a server crash, an MPSAFE 253 * mount is likely to end up accessing invalid memory. For the 254 * non-mpsafe case, the kernel lock, general structure of 255 * puffs and pmp_refcount protect the threads during escape. 256 * 257 * Fixing this will require: 258 * a) fixing vfs 259 * OR 260 * b) adding a small sleep to puffs_msgif_close() between 261 * userdead() and dounmount(). 262 * (well, this isn't really a fix, but would solve 263 * 99.999% of the race conditions). 264 * 265 * Also, in the event of "b", unmount -f should be used, 266 * like with any other file system, sparingly and only when 267 * it is "known" to be safe. 268 */ 269 mp->mnt_iflags |= IMNT_MPSAFE; 270 #endif 271 272 pmp->pmp_status = PUFFSTAT_MOUNTING; 273 pmp->pmp_mp = mp; 274 pmp->pmp_msg_maxsize = args->pa_maxmsglen; 275 pmp->pmp_args = *args; 276 277 pmp->pmp_npnodehash = args->pa_nhashbuckets; 278 pmp->pmp_pnodehash = kmem_alloc(BUCKETALLOC(pmp->pmp_npnodehash), 279 KM_SLEEP); 280 for (i = 0; i < pmp->pmp_npnodehash; i++) 281 LIST_INIT(&pmp->pmp_pnodehash[i]); 282 LIST_INIT(&pmp->pmp_newcookie); 283 284 /* 285 * Inform the fileops processing code that we have a mountpoint. 286 * If it doesn't know about anyone with our pid/fd having the 287 * device open, punt 288 */ 289 if ((pmp->pmp_pi 290 = putter_attach(mntpid, args->pa_fd, pmp, &puffs_putter)) == NULL) { 291 error = ENOENT; 292 goto out; 293 } 294 295 /* XXX: check parameters */ 296 pmp->pmp_root_cookie = args->pa_root_cookie; 297 pmp->pmp_root_vtype = args->pa_root_vtype; 298 pmp->pmp_root_vsize = args->pa_root_vsize; 299 pmp->pmp_root_rdev = args->pa_root_rdev; 300 pmp->pmp_docompat = args->pa_time32; 301 302 mutex_init(&pmp->pmp_lock, MUTEX_DEFAULT, IPL_NONE); 303 mutex_init(&pmp->pmp_sopmtx, MUTEX_DEFAULT, IPL_NONE); 304 cv_init(&pmp->pmp_msg_waiter_cv, "puffsget"); 305 cv_init(&pmp->pmp_refcount_cv, "puffsref"); 306 cv_init(&pmp->pmp_unmounting_cv, "puffsum"); 307 cv_init(&pmp->pmp_sopcv, "puffsop"); 308 TAILQ_INIT(&pmp->pmp_msg_touser); 309 TAILQ_INIT(&pmp->pmp_msg_replywait); 310 TAILQ_INIT(&pmp->pmp_sopfastreqs); 311 TAILQ_INIT(&pmp->pmp_sopnodereqs); 312 313 if ((error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, 314 puffs_sop_thread, pmp, NULL, "puffsop")) != 0) 315 goto out; 316 pmp->pmp_sopthrcount = 1; 317 318 DPRINTF(("puffs_mount: mount point at %p, puffs specific at %p\n", 319 mp, MPTOPUFFSMP(mp))); 320 321 vfs_getnewfsid(mp); 322 323 out: 324 if (error && pmp && pmp->pmp_pi) 325 putter_detach(pmp->pmp_pi); 326 if (error && pmp && pmp->pmp_pnodehash) 327 kmem_free(pmp->pmp_pnodehash, BUCKETALLOC(pmp->pmp_npnodehash)); 328 if (error && pmp) 329 kmem_free(pmp, sizeof(struct puffs_mount)); 330 return error; 331 } 332 333 int 334 puffs_vfsop_start(struct mount *mp, int flags) 335 { 336 struct puffs_mount *pmp = MPTOPUFFSMP(mp); 337 338 KASSERT(pmp->pmp_status == PUFFSTAT_MOUNTING); 339 pmp->pmp_status = PUFFSTAT_RUNNING; 340 341 return 0; 342 } 343 344 int 345 puffs_vfsop_unmount(struct mount *mp, int mntflags) 346 { 347 PUFFS_MSG_VARS(vfs, unmount); 348 struct puffs_mount *pmp; 349 int error, force; 350 351 error = 0; 352 force = mntflags & MNT_FORCE; 353 pmp = MPTOPUFFSMP(mp); 354 355 DPRINTF(("puffs_unmount: detach filesystem from vfs, current " 356 "status 0x%x\n", pmp->pmp_status)); 357 358 /* 359 * flush all the vnodes. VOP_RECLAIM() takes care that the 360 * root vnode does not get flushed until unmount. The 361 * userspace root node cookie is stored in the mount 362 * structure, so we can always re-instantiate a root vnode, 363 * should userspace unmount decide it doesn't want to 364 * cooperate. 365 */ 366 error = vflush(mp, NULLVP, force ? FORCECLOSE : 0); 367 if (error) 368 goto out; 369 370 /* 371 * If we are not DYING, we should ask userspace's opinion 372 * about the situation 373 */ 374 mutex_enter(&pmp->pmp_lock); 375 if (pmp->pmp_status != PUFFSTAT_DYING) { 376 pmp->pmp_unmounting = 1; 377 mutex_exit(&pmp->pmp_lock); 378 379 PUFFS_MSG_ALLOC(vfs, unmount); 380 puffs_msg_setinfo(park_unmount, 381 PUFFSOP_VFS, PUFFS_VFS_UNMOUNT, NULL); 382 unmount_msg->pvfsr_flags = mntflags; 383 384 PUFFS_MSG_ENQUEUEWAIT(pmp, park_unmount, error); 385 PUFFS_MSG_RELEASE(unmount); 386 387 error = checkerr(pmp, error, __func__); 388 DPRINTF(("puffs_unmount: error %d force %d\n", error, force)); 389 390 mutex_enter(&pmp->pmp_lock); 391 pmp->pmp_unmounting = 0; 392 cv_broadcast(&pmp->pmp_unmounting_cv); 393 } 394 395 /* 396 * if userspace cooperated or we really need to die, 397 * screw what userland thinks and just die. 398 */ 399 if (error == 0 || force) { 400 struct puffs_sopreq *psopr; 401 402 /* tell waiters & other resources to go unwait themselves */ 403 puffs_userdead(pmp); 404 putter_detach(pmp->pmp_pi); 405 406 /* 407 * Wait until there are no more users for the mount resource. 408 * Notice that this is hooked against transport_close 409 * and return from touser. In an ideal world, it would 410 * be hooked against final return from all operations. 411 * But currently it works well enough, since nobody 412 * does weird blocking voodoo after return from touser(). 413 */ 414 while (pmp->pmp_refcount != 0) 415 cv_wait(&pmp->pmp_refcount_cv, &pmp->pmp_lock); 416 mutex_exit(&pmp->pmp_lock); 417 418 /* 419 * Release kernel thread now that there is nothing 420 * it would be wanting to lock. 421 */ 422 KASSERT(curlwp != uvm.pagedaemon_lwp); 423 psopr = kmem_alloc(sizeof(*psopr), KM_SLEEP); 424 psopr->psopr_sopreq = PUFFS_SOPREQSYS_EXIT; 425 mutex_enter(&pmp->pmp_sopmtx); 426 if (pmp->pmp_sopthrcount == 0) { 427 mutex_exit(&pmp->pmp_sopmtx); 428 kmem_free(psopr, sizeof(*psopr)); 429 mutex_enter(&pmp->pmp_sopmtx); 430 KASSERT(pmp->pmp_sopthrcount == 0); 431 } else { 432 TAILQ_INSERT_TAIL(&pmp->pmp_sopfastreqs, 433 psopr, psopr_entries); 434 cv_signal(&pmp->pmp_sopcv); 435 } 436 while (pmp->pmp_sopthrcount > 0) 437 cv_wait(&pmp->pmp_sopcv, &pmp->pmp_sopmtx); 438 mutex_exit(&pmp->pmp_sopmtx); 439 440 /* free resources now that we hopefully have no waiters left */ 441 cv_destroy(&pmp->pmp_unmounting_cv); 442 cv_destroy(&pmp->pmp_refcount_cv); 443 cv_destroy(&pmp->pmp_msg_waiter_cv); 444 cv_destroy(&pmp->pmp_sopcv); 445 mutex_destroy(&pmp->pmp_lock); 446 mutex_destroy(&pmp->pmp_sopmtx); 447 448 kmem_free(pmp->pmp_pnodehash, BUCKETALLOC(pmp->pmp_npnodehash)); 449 kmem_free(pmp, sizeof(struct puffs_mount)); 450 error = 0; 451 } else { 452 mutex_exit(&pmp->pmp_lock); 453 } 454 455 out: 456 DPRINTF(("puffs_unmount: return %d\n", error)); 457 return error; 458 } 459 460 /* 461 * This doesn't need to travel to userspace 462 */ 463 int 464 puffs_vfsop_root(struct mount *mp, struct vnode **vpp) 465 { 466 struct puffs_mount *pmp = MPTOPUFFSMP(mp); 467 int rv; 468 469 rv = puffs_cookie2vnode(pmp, pmp->pmp_root_cookie, 1, 1, vpp); 470 KASSERT(rv != PUFFS_NOSUCHCOOKIE); 471 return rv; 472 } 473 474 int 475 puffs_vfsop_statvfs(struct mount *mp, struct statvfs *sbp) 476 { 477 PUFFS_MSG_VARS(vfs, statvfs); 478 struct puffs_mount *pmp; 479 int error = 0; 480 481 pmp = MPTOPUFFSMP(mp); 482 483 /* 484 * If we are mounting, it means that the userspace counterpart 485 * is calling mount(2), but mount(2) also calls statvfs. So 486 * requesting statvfs from userspace would mean a deadlock. 487 * Compensate. 488 */ 489 if (__predict_false(pmp->pmp_status == PUFFSTAT_MOUNTING)) 490 return EINPROGRESS; 491 492 PUFFS_MSG_ALLOC(vfs, statvfs); 493 puffs_msg_setinfo(park_statvfs, PUFFSOP_VFS, PUFFS_VFS_STATVFS, NULL); 494 495 PUFFS_MSG_ENQUEUEWAIT(pmp, park_statvfs, error); 496 error = checkerr(pmp, error, __func__); 497 statvfs_msg->pvfsr_sb.f_iosize = DEV_BSIZE; 498 499 /* 500 * Try to produce a sensible result even in the event 501 * of userspace error. 502 * 503 * XXX: cache the copy in non-error case 504 */ 505 if (!error) { 506 copy_statvfs_info(&statvfs_msg->pvfsr_sb, mp); 507 (void)memcpy(sbp, &statvfs_msg->pvfsr_sb, 508 sizeof(struct statvfs)); 509 } else { 510 copy_statvfs_info(sbp, mp); 511 } 512 513 PUFFS_MSG_RELEASE(statvfs); 514 return error; 515 } 516 517 static int 518 pageflush(struct mount *mp, kauth_cred_t cred, int waitfor) 519 { 520 struct puffs_node *pn; 521 struct vnode *vp, *mvp; 522 int error, rv, fsyncwait; 523 524 error = 0; 525 fsyncwait = (waitfor == MNT_WAIT) ? FSYNC_WAIT : 0; 526 527 /* Allocate a marker vnode. */ 528 mvp = vnalloc(mp); 529 530 /* 531 * Sync all cached data from regular vnodes (which are not 532 * currently locked, see below). After this we call VFS_SYNC 533 * for the fs server, which should handle data and metadata for 534 * all the nodes it knows to exist. 535 */ 536 mutex_enter(&mntvnode_lock); 537 loop: 538 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) { 539 vmark(mvp, vp); 540 if (vp->v_mount != mp || vismarker(vp)) 541 continue; 542 543 mutex_enter(vp->v_interlock); 544 pn = VPTOPP(vp); 545 if (vp->v_type != VREG || UVM_OBJ_IS_CLEAN(&vp->v_uobj)) { 546 mutex_exit(vp->v_interlock); 547 continue; 548 } 549 550 mutex_exit(&mntvnode_lock); 551 552 /* 553 * Here we try to get a reference to the vnode and to 554 * lock it. This is mostly cargo-culted, but I will 555 * offer an explanation to why I believe this might 556 * actually do the right thing. 557 * 558 * If the vnode is a goner, we quite obviously don't need 559 * to sync it. 560 * 561 * If the vnode was busy, we don't need to sync it because 562 * this is never called with MNT_WAIT except from 563 * dounmount(), when we are wait-flushing all the dirty 564 * vnodes through other routes in any case. So there, 565 * sync() doesn't actually sync. Happy now? 566 */ 567 rv = vget(vp, LK_EXCLUSIVE | LK_NOWAIT); 568 if (rv) { 569 mutex_enter(&mntvnode_lock); 570 if (rv == ENOENT) { 571 (void)vunmark(mvp); 572 goto loop; 573 } 574 continue; 575 } 576 577 /* hmm.. is the FAF thing entirely sensible? */ 578 if (waitfor == MNT_LAZY) { 579 mutex_enter(vp->v_interlock); 580 pn->pn_stat |= PNODE_FAF; 581 mutex_exit(vp->v_interlock); 582 } 583 rv = VOP_FSYNC(vp, cred, fsyncwait, 0, 0); 584 if (waitfor == MNT_LAZY) { 585 mutex_enter(vp->v_interlock); 586 pn->pn_stat &= ~PNODE_FAF; 587 mutex_exit(vp->v_interlock); 588 } 589 if (rv) 590 error = rv; 591 vput(vp); 592 mutex_enter(&mntvnode_lock); 593 } 594 mutex_exit(&mntvnode_lock); 595 vnfree(mvp); 596 597 return error; 598 } 599 600 int 601 puffs_vfsop_sync(struct mount *mp, int waitfor, struct kauth_cred *cred) 602 { 603 PUFFS_MSG_VARS(vfs, sync); 604 struct puffs_mount *pmp = MPTOPUFFSMP(mp); 605 int error, rv; 606 607 error = pageflush(mp, cred, waitfor); 608 609 /* sync fs */ 610 PUFFS_MSG_ALLOC(vfs, sync); 611 sync_msg->pvfsr_waitfor = waitfor; 612 puffs_credcvt(&sync_msg->pvfsr_cred, cred); 613 puffs_msg_setinfo(park_sync, PUFFSOP_VFS, PUFFS_VFS_SYNC, NULL); 614 615 PUFFS_MSG_ENQUEUEWAIT(pmp, park_sync, rv); 616 rv = checkerr(pmp, rv, __func__); 617 if (rv) 618 error = rv; 619 620 PUFFS_MSG_RELEASE(sync); 621 return error; 622 } 623 624 int 625 puffs_vfsop_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp) 626 { 627 PUFFS_MSG_VARS(vfs, fhtonode); 628 struct puffs_mount *pmp = MPTOPUFFSMP(mp); 629 struct vnode *vp; 630 void *fhdata; 631 size_t argsize, fhlen; 632 int error; 633 634 if (pmp->pmp_args.pa_fhsize == 0) 635 return EOPNOTSUPP; 636 637 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH) { 638 fhlen = fhp->fid_len; 639 fhdata = fhp; 640 } else { 641 fhlen = PUFFS_FROMFHSIZE(fhp->fid_len); 642 fhdata = fhp->fid_data; 643 644 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) { 645 if (pmp->pmp_args.pa_fhsize < fhlen) 646 return EINVAL; 647 } else { 648 if (pmp->pmp_args.pa_fhsize != fhlen) 649 return EINVAL; 650 } 651 } 652 653 argsize = sizeof(struct puffs_vfsmsg_fhtonode) + fhlen; 654 puffs_msgmem_alloc(argsize, &park_fhtonode, (void *)&fhtonode_msg, 1); 655 fhtonode_msg->pvfsr_dsize = fhlen; 656 memcpy(fhtonode_msg->pvfsr_data, fhdata, fhlen); 657 puffs_msg_setinfo(park_fhtonode, PUFFSOP_VFS, PUFFS_VFS_FHTOVP, NULL); 658 659 PUFFS_MSG_ENQUEUEWAIT(pmp, park_fhtonode, error); 660 error = checkerr(pmp, error, __func__); 661 if (error) 662 goto out; 663 664 error = puffs_cookie2vnode(pmp, fhtonode_msg->pvfsr_fhcookie, 1,1,&vp); 665 DPRINTF(("puffs_fhtovp: got cookie %p, existing vnode %p\n", 666 fhtonode_msg->pvfsr_fhcookie, vp)); 667 if (error == PUFFS_NOSUCHCOOKIE) { 668 error = puffs_getvnode(mp, fhtonode_msg->pvfsr_fhcookie, 669 fhtonode_msg->pvfsr_vtype, fhtonode_msg->pvfsr_size, 670 fhtonode_msg->pvfsr_rdev, &vp); 671 if (error) 672 goto out; 673 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 674 } else if (error) { 675 goto out; 676 } 677 678 *vpp = vp; 679 out: 680 puffs_msgmem_release(park_fhtonode); 681 return error; 682 } 683 684 int 685 puffs_vfsop_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size) 686 { 687 PUFFS_MSG_VARS(vfs, nodetofh); 688 struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount); 689 size_t argsize, fhlen; 690 int error; 691 692 if (pmp->pmp_args.pa_fhsize == 0) 693 return EOPNOTSUPP; 694 695 /* if file handles are static len, we can test len immediately */ 696 if (((pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) == 0) 697 && ((pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH) == 0) 698 && (PUFFS_FROMFHSIZE(*fh_size) < pmp->pmp_args.pa_fhsize)) { 699 *fh_size = PUFFS_TOFHSIZE(pmp->pmp_args.pa_fhsize); 700 return E2BIG; 701 } 702 703 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH) 704 fhlen = *fh_size; 705 else 706 fhlen = PUFFS_FROMFHSIZE(*fh_size); 707 708 argsize = sizeof(struct puffs_vfsmsg_nodetofh) + fhlen; 709 puffs_msgmem_alloc(argsize, &park_nodetofh, (void *)&nodetofh_msg, 1); 710 nodetofh_msg->pvfsr_fhcookie = VPTOPNC(vp); 711 nodetofh_msg->pvfsr_dsize = fhlen; 712 puffs_msg_setinfo(park_nodetofh, PUFFSOP_VFS, PUFFS_VFS_VPTOFH, NULL); 713 714 PUFFS_MSG_ENQUEUEWAIT(pmp, park_nodetofh, error); 715 error = checkerr(pmp, error, __func__); 716 717 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH) 718 fhlen = nodetofh_msg->pvfsr_dsize; 719 else if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) 720 fhlen = PUFFS_TOFHSIZE(nodetofh_msg->pvfsr_dsize); 721 else 722 fhlen = PUFFS_TOFHSIZE(pmp->pmp_args.pa_fhsize); 723 724 if (error) { 725 if (error == E2BIG) 726 *fh_size = fhlen; 727 goto out; 728 } 729 730 if (fhlen > FHANDLE_SIZE_MAX) { 731 puffs_senderr(pmp, PUFFS_ERR_VPTOFH, E2BIG, 732 "file handle too big", VPTOPNC(vp)); 733 error = EPROTO; 734 goto out; 735 } 736 737 if (*fh_size < fhlen) { 738 *fh_size = fhlen; 739 error = E2BIG; 740 goto out; 741 } 742 *fh_size = fhlen; 743 744 if (fhp) { 745 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH) { 746 memcpy(fhp, nodetofh_msg->pvfsr_data, fhlen); 747 } else { 748 fhp->fid_len = *fh_size; 749 memcpy(fhp->fid_data, nodetofh_msg->pvfsr_data, 750 nodetofh_msg->pvfsr_dsize); 751 } 752 } 753 754 out: 755 puffs_msgmem_release(park_nodetofh); 756 return error; 757 } 758 759 void 760 puffs_vfsop_init(void) 761 { 762 763 /* some checks depend on this */ 764 KASSERT(VNOVAL == VSIZENOTSET); 765 766 pool_init(&puffs_pnpool, sizeof(struct puffs_node), 0, 0, 0, 767 "puffpnpl", &pool_allocator_nointr, IPL_NONE); 768 pool_init(&puffs_vapool, sizeof(struct vattr), 0, 0, 0, 769 "puffvapl", &pool_allocator_nointr, IPL_NONE); 770 puffs_msgif_init(); 771 } 772 773 void 774 puffs_vfsop_done(void) 775 { 776 777 puffs_msgif_destroy(); 778 pool_destroy(&puffs_pnpool); 779 pool_destroy(&puffs_vapool); 780 } 781 782 int 783 puffs_vfsop_snapshot(struct mount *mp, struct vnode *vp, struct timespec *ts) 784 { 785 786 return EOPNOTSUPP; 787 } 788 789 int 790 puffs_vfsop_extattrctl(struct mount *mp, int cmd, struct vnode *vp, 791 int attrnamespace, const char *attrname) 792 { 793 PUFFS_MSG_VARS(vfs, extattrctl); 794 struct puffs_mount *pmp = MPTOPUFFSMP(mp); 795 struct puffs_node *pnp; 796 puffs_cookie_t pnc; 797 int error, flags; 798 799 if (vp) { 800 /* doesn't make sense for puffs servers */ 801 if (vp->v_mount != mp) 802 return EXDEV; 803 pnp = vp->v_data; 804 pnc = pnp->pn_cookie; 805 flags = PUFFS_EXTATTRCTL_HASNODE; 806 } else { 807 pnp = pnc = NULL; 808 flags = 0; 809 } 810 811 PUFFS_MSG_ALLOC(vfs, extattrctl); 812 extattrctl_msg->pvfsr_cmd = cmd; 813 extattrctl_msg->pvfsr_attrnamespace = attrnamespace; 814 extattrctl_msg->pvfsr_flags = flags; 815 if (attrname) { 816 strlcpy(extattrctl_msg->pvfsr_attrname, attrname, 817 sizeof(extattrctl_msg->pvfsr_attrname)); 818 extattrctl_msg->pvfsr_flags |= PUFFS_EXTATTRCTL_HASATTRNAME; 819 } 820 puffs_msg_setinfo(park_extattrctl, 821 PUFFSOP_VFS, PUFFS_VFS_EXTATTRCTL, pnc); 822 823 puffs_msg_enqueue(pmp, park_extattrctl); 824 if (vp) { 825 mutex_enter(&pnp->pn_mtx); 826 puffs_referencenode(pnp); 827 mutex_exit(&pnp->pn_mtx); 828 VOP_UNLOCK(vp); 829 } 830 error = puffs_msg_wait2(pmp, park_extattrctl, pnp, NULL); 831 PUFFS_MSG_RELEASE(extattrctl); 832 if (vp) { 833 puffs_releasenode(pnp); 834 } 835 836 return checkerr(pmp, error, __func__); 837 } 838 839 const struct vnodeopv_desc * const puffs_vnodeopv_descs[] = { 840 &puffs_vnodeop_opv_desc, 841 &puffs_specop_opv_desc, 842 &puffs_fifoop_opv_desc, 843 &puffs_msgop_opv_desc, 844 NULL, 845 }; 846 847 struct vfsops puffs_vfsops = { 848 MOUNT_PUFFS, 849 sizeof (struct puffs_kargs), 850 puffs_vfsop_mount, /* mount */ 851 puffs_vfsop_start, /* start */ 852 puffs_vfsop_unmount, /* unmount */ 853 puffs_vfsop_root, /* root */ 854 (void *)eopnotsupp, /* quotactl */ 855 puffs_vfsop_statvfs, /* statvfs */ 856 puffs_vfsop_sync, /* sync */ 857 (void *)eopnotsupp, /* vget */ 858 puffs_vfsop_fhtovp, /* fhtovp */ 859 puffs_vfsop_vptofh, /* vptofh */ 860 puffs_vfsop_init, /* init */ 861 NULL, /* reinit */ 862 puffs_vfsop_done, /* done */ 863 NULL, /* mountroot */ 864 puffs_vfsop_snapshot, /* snapshot */ 865 puffs_vfsop_extattrctl, /* extattrctl */ 866 (void *)eopnotsupp, /* suspendctl */ 867 genfs_renamelock_enter, 868 genfs_renamelock_exit, 869 (void *)eopnotsupp, 870 puffs_vnodeopv_descs, /* vnodeops */ 871 0, /* refcount */ 872 { NULL, NULL } 873 }; 874 875 static int 876 puffs_modcmd(modcmd_t cmd, void *arg) 877 { 878 879 switch (cmd) { 880 case MODULE_CMD_INIT: 881 return vfs_attach(&puffs_vfsops); 882 case MODULE_CMD_FINI: 883 return vfs_detach(&puffs_vfsops); 884 default: 885 return ENOTTY; 886 } 887 } 888