1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.35 2008/05/18 01:48:50 dillon Exp $ 35 */ 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/vnode.h> 41 #include <sys/mount.h> 42 #include <sys/malloc.h> 43 #include <sys/nlookup.h> 44 #include <sys/fcntl.h> 45 #include <sys/sysctl.h> 46 #include <sys/buf.h> 47 #include <sys/buf2.h> 48 #include "hammer.h" 49 50 int hammer_debug_io; 51 int hammer_debug_general; 52 int hammer_debug_debug; 53 int hammer_debug_inode; 54 int hammer_debug_locks; 55 int hammer_debug_btree; 56 int hammer_debug_tid; 57 int hammer_debug_recover; /* -1 will disable, +1 will force */ 58 int hammer_debug_recover_faults; 59 int hammer_count_inodes; 60 int hammer_count_records; 61 int hammer_count_record_datas; 62 int hammer_count_volumes; 63 int hammer_count_buffers; 64 int hammer_count_nodes; 65 int hammer_count_dirtybufs; /* global */ 66 int hammer_limit_dirtybufs = 100; /* per-mount */ 67 int hammer_bio_count; 68 int64_t hammer_contention_count; 69 int64_t hammer_zone_limit; 70 71 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem"); 72 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW, 73 &hammer_debug_general, 0, ""); 74 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_io, CTLFLAG_RW, 75 &hammer_debug_io, 0, ""); 76 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_debug, CTLFLAG_RW, 77 &hammer_debug_debug, 0, ""); 78 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW, 79 &hammer_debug_inode, 0, ""); 80 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW, 81 &hammer_debug_locks, 0, ""); 82 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW, 83 &hammer_debug_btree, 0, ""); 84 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW, 85 &hammer_debug_tid, 0, ""); 86 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW, 87 &hammer_debug_recover, 0, ""); 88 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW, 89 &hammer_debug_recover_faults, 0, ""); 90 91 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_dirtybufs, CTLFLAG_RW, 92 &hammer_limit_dirtybufs, 0, ""); 93 94 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD, 95 &hammer_count_inodes, 0, ""); 96 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD, 97 &hammer_count_records, 0, ""); 98 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD, 99 &hammer_count_record_datas, 0, ""); 100 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD, 101 &hammer_count_volumes, 0, ""); 102 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD, 103 &hammer_count_buffers, 0, ""); 104 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD, 105 &hammer_count_nodes, 0, ""); 106 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_dirtybufs, CTLFLAG_RD, 107 &hammer_count_dirtybufs, 0, ""); 108 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW, 109 &hammer_zone_limit, 0, ""); 110 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW, 111 &hammer_contention_count, 0, ""); 112 113 /* 114 * VFS ABI 115 */ 116 static void hammer_free_hmp(struct mount *mp); 117 118 static int hammer_vfs_mount(struct mount *mp, char *path, caddr_t data, 119 struct ucred *cred); 120 static int hammer_vfs_unmount(struct mount *mp, int mntflags); 121 static int hammer_vfs_root(struct mount *mp, struct vnode **vpp); 122 static int hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, 123 struct ucred *cred); 124 static int hammer_vfs_sync(struct mount *mp, int waitfor); 125 static int hammer_vfs_vget(struct mount *mp, ino_t ino, 126 struct vnode **vpp); 127 static int hammer_vfs_init(struct vfsconf *conf); 128 static int hammer_vfs_fhtovp(struct mount *mp, struct fid *fhp, 129 struct vnode **vpp); 130 static int hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp); 131 static int hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 132 int *exflagsp, struct ucred **credanonp); 133 134 135 static struct vfsops hammer_vfsops = { 136 .vfs_mount = hammer_vfs_mount, 137 .vfs_unmount = hammer_vfs_unmount, 138 .vfs_root = hammer_vfs_root, 139 .vfs_statfs = hammer_vfs_statfs, 140 .vfs_sync = hammer_vfs_sync, 141 .vfs_vget = hammer_vfs_vget, 142 .vfs_init = hammer_vfs_init, 143 .vfs_vptofh = hammer_vfs_vptofh, 144 .vfs_fhtovp = hammer_vfs_fhtovp, 145 .vfs_checkexp = hammer_vfs_checkexp 146 }; 147 148 MALLOC_DEFINE(M_HAMMER, "hammer-mount", "hammer mount"); 149 150 VFS_SET(hammer_vfsops, hammer, 0); 151 MODULE_VERSION(hammer, 1); 152 153 static int 154 hammer_vfs_init(struct vfsconf *conf) 155 { 156 /*hammer_init_alist_config();*/ 157 return(0); 158 } 159 160 static int 161 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data, 162 struct ucred *cred) 163 { 164 struct hammer_mount_info info; 165 hammer_mount_t hmp; 166 hammer_volume_t rootvol; 167 struct vnode *rootvp; 168 const char *upath; /* volume name in userspace */ 169 char *path; /* volume name in system space */ 170 int error; 171 int i; 172 173 if ((error = copyin(data, &info, sizeof(info))) != 0) 174 return (error); 175 if (info.nvolumes <= 0 || info.nvolumes >= 32768) 176 return (EINVAL); 177 178 /* 179 * Interal mount data structure 180 */ 181 if (mp->mnt_flag & MNT_UPDATE) { 182 hmp = (void *)mp->mnt_data; 183 KKASSERT(hmp != NULL); 184 } else { 185 hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO); 186 mp->mnt_data = (qaddr_t)hmp; 187 hmp->mp = mp; 188 hmp->zbuf = kmalloc(HAMMER_BUFSIZE, M_HAMMER, M_WAITOK|M_ZERO); 189 hmp->namekey_iterator = mycpu->gd_time_seconds; 190 /*TAILQ_INIT(&hmp->recycle_list);*/ 191 192 hmp->root_btree_beg.localization = HAMMER_MIN_LOCALIZATION; 193 hmp->root_btree_beg.obj_id = -0x8000000000000000LL; 194 hmp->root_btree_beg.key = -0x8000000000000000LL; 195 hmp->root_btree_beg.create_tid = 1; 196 hmp->root_btree_beg.delete_tid = 1; 197 hmp->root_btree_beg.rec_type = 0; 198 hmp->root_btree_beg.obj_type = 0; 199 200 hmp->root_btree_end.localization = HAMMER_MAX_LOCALIZATION; 201 hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL; 202 hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL; 203 hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL; 204 hmp->root_btree_end.delete_tid = 0; /* special case */ 205 hmp->root_btree_end.rec_type = 0xFFFFU; 206 hmp->root_btree_end.obj_type = 0; 207 lockinit(&hmp->blockmap_lock, "blkmap", 0, 0); 208 209 hmp->sync_lock.refs = 1; 210 hmp->free_lock.refs = 1; 211 212 TAILQ_INIT(&hmp->flush_list); 213 TAILQ_INIT(&hmp->objid_cache_list); 214 TAILQ_INIT(&hmp->undo_lru_list); 215 216 /* 217 * Set default zone limits. This value can be reduced 218 * further by the zone limit specified in the root volume. 219 * 220 * The sysctl can force a small zone limit for debugging 221 * purposes. 222 */ 223 for (i = 0; i < HAMMER_MAX_ZONES; ++i) { 224 hmp->zone_limits[i] = 225 HAMMER_ZONE_ENCODE(i, HAMMER_ZONE_LIMIT); 226 227 if (hammer_zone_limit) { 228 hmp->zone_limits[i] = 229 HAMMER_ZONE_ENCODE(i, hammer_zone_limit); 230 } 231 hammer_init_holes(hmp, &hmp->holes[i]); 232 } 233 } 234 hmp->hflags = info.hflags; 235 if (info.asof) { 236 mp->mnt_flag |= MNT_RDONLY; 237 hmp->asof = info.asof; 238 } else { 239 hmp->asof = HAMMER_MAX_TID; 240 } 241 242 /* 243 * Re-open read-write if originally read-only, or vise-versa XXX 244 */ 245 if (mp->mnt_flag & MNT_UPDATE) { 246 if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { 247 kprintf("HAMMER read-write -> read-only XXX\n"); 248 hmp->ronly = 1; 249 } else if (hmp->ronly && (mp->mnt_flag & MNT_RDONLY) == 0) { 250 kprintf("HAMMER read-only -> read-write XXX\n"); 251 hmp->ronly = 0; 252 } 253 return(0); 254 } 255 256 RB_INIT(&hmp->rb_vols_root); 257 RB_INIT(&hmp->rb_inos_root); 258 RB_INIT(&hmp->rb_nods_root); 259 RB_INIT(&hmp->rb_undo_root); 260 hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); 261 262 TAILQ_INIT(&hmp->volu_list); 263 TAILQ_INIT(&hmp->undo_list); 264 TAILQ_INIT(&hmp->data_list); 265 TAILQ_INIT(&hmp->meta_list); 266 TAILQ_INIT(&hmp->lose_list); 267 268 /* 269 * Load volumes 270 */ 271 path = objcache_get(namei_oc, M_WAITOK); 272 hmp->nvolumes = info.nvolumes; 273 for (i = 0; i < info.nvolumes; ++i) { 274 error = copyin(&info.volumes[i], &upath, sizeof(char *)); 275 if (error == 0) 276 error = copyinstr(upath, path, MAXPATHLEN, NULL); 277 if (error == 0) 278 error = hammer_install_volume(hmp, path); 279 if (error) 280 break; 281 } 282 objcache_put(namei_oc, path); 283 284 /* 285 * Make sure we found a root volume 286 */ 287 if (error == 0 && hmp->rootvol == NULL) { 288 kprintf("hammer_mount: No root volume found!\n"); 289 error = EINVAL; 290 } 291 if (error) { 292 hammer_free_hmp(mp); 293 return (error); 294 } 295 296 /* 297 * No errors, setup enough of the mount point so we can lookup the 298 * root vnode. 299 */ 300 mp->mnt_iosize_max = MAXPHYS; 301 mp->mnt_kern_flag |= MNTK_FSMID; 302 303 /* 304 * note: f_iosize is used by vnode_pager_haspage() when constructing 305 * its VOP_BMAP call. 306 */ 307 mp->mnt_stat.f_iosize = HAMMER_BUFSIZE; 308 mp->mnt_stat.f_bsize = HAMMER_BUFSIZE; 309 mp->mnt_maxsymlinklen = 255; 310 mp->mnt_flag |= MNT_LOCAL; 311 312 vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops); 313 vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops); 314 vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops); 315 316 /* 317 * The root volume's ondisk pointer is only valid if we hold a 318 * reference to it. 319 */ 320 rootvol = hammer_get_root_volume(hmp, &error); 321 if (error) 322 goto failed; 323 324 /* 325 * Perform any necessary UNDO operations. The recover code does 326 * call hammer_undo_lookup() so we have to pre-cache the blockmap, 327 * and then re-copy it again after recovery is complete. 328 * 329 * The recover code will load hmp->flusher_undo_start. 330 */ 331 bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, 332 sizeof(hmp->blockmap)); 333 334 error = hammer_recover(hmp, rootvol); 335 if (error) { 336 kprintf("Failed to recover HAMMER filesystem on mount\n"); 337 goto done; 338 } 339 340 /* 341 * Finish setup now that we have a good root volume 342 */ 343 ksnprintf(mp->mnt_stat.f_mntfromname, 344 sizeof(mp->mnt_stat.f_mntfromname), "%s", 345 rootvol->ondisk->vol_name); 346 mp->mnt_stat.f_fsid.val[0] = 347 crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8); 348 mp->mnt_stat.f_fsid.val[1] = 349 crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8); 350 351 /* 352 * Certain often-modified fields in the root volume are cached in 353 * the hammer_mount structure so we do not have to generate lots 354 * of little UNDO structures for them. 355 * 356 * Recopy after recovery. 357 */ 358 hmp->next_tid = rootvol->ondisk->vol0_next_tid; 359 bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, 360 sizeof(hmp->blockmap)); 361 362 /* 363 * Use the zone limit set by newfs_hammer, or the zone limit set by 364 * sysctl (for debugging), whichever is smaller. 365 */ 366 if (rootvol->ondisk->vol0_zone_limit) { 367 hammer_off_t vol0_zone_limit; 368 369 vol0_zone_limit = rootvol->ondisk->vol0_zone_limit; 370 for (i = 0; i < HAMMER_MAX_ZONES; ++i) { 371 if (hmp->zone_limits[i] > vol0_zone_limit) 372 hmp->zone_limits[i] = vol0_zone_limit; 373 } 374 } 375 376 hammer_flusher_create(hmp); 377 378 /* 379 * Locate the root directory using the root cluster's B-Tree as a 380 * starting point. The root directory uses an obj_id of 1. 381 * 382 * FUTURE: Leave the root directory cached referenced but unlocked 383 * in hmp->rootvp (need to flush it on unmount). 384 */ 385 error = hammer_vfs_vget(mp, 1, &rootvp); 386 if (error) 387 goto done; 388 vput(rootvp); 389 /*vn_unlock(hmp->rootvp);*/ 390 391 done: 392 hammer_rel_volume(rootvol, 0); 393 failed: 394 /* 395 * Cleanup and return. 396 */ 397 if (error) 398 hammer_free_hmp(mp); 399 return (error); 400 } 401 402 static int 403 hammer_vfs_unmount(struct mount *mp, int mntflags) 404 { 405 #if 0 406 struct hammer_mount *hmp = (void *)mp->mnt_data; 407 #endif 408 int flags; 409 int error; 410 411 /* 412 * Clean out the vnodes 413 */ 414 flags = 0; 415 if (mntflags & MNT_FORCE) 416 flags |= FORCECLOSE; 417 if ((error = vflush(mp, 0, flags)) != 0) 418 return (error); 419 420 /* 421 * Clean up the internal mount structure and related entities. This 422 * may issue I/O. 423 */ 424 hammer_free_hmp(mp); 425 return(0); 426 } 427 428 /* 429 * Clean up the internal mount structure and disassociate it from the mount. 430 * This may issue I/O. 431 */ 432 static void 433 hammer_free_hmp(struct mount *mp) 434 { 435 struct hammer_mount *hmp = (void *)mp->mnt_data; 436 int i; 437 438 #if 0 439 /* 440 * Clean up the root vnode 441 */ 442 if (hmp->rootvp) { 443 vrele(hmp->rootvp); 444 hmp->rootvp = NULL; 445 } 446 #endif 447 hammer_flusher_sync(hmp); 448 hammer_flusher_sync(hmp); 449 hammer_flusher_destroy(hmp); 450 451 KKASSERT(RB_EMPTY(&hmp->rb_inos_root)); 452 453 #if 0 454 /* 455 * Unload & flush inodes 456 * 457 * XXX illegal to call this from here, it can only be done from 458 * the flusher. 459 */ 460 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, 461 hammer_unload_inode, (void *)MNT_WAIT); 462 463 /* 464 * Unload & flush volumes 465 */ 466 #endif 467 /* 468 * Unload the volumes 469 */ 470 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, 471 hammer_unload_volume, NULL); 472 473 mp->mnt_data = NULL; 474 mp->mnt_flag &= ~MNT_LOCAL; 475 hmp->mp = NULL; 476 hammer_destroy_objid_cache(hmp); 477 kfree(hmp->zbuf, M_HAMMER); 478 lockuninit(&hmp->blockmap_lock); 479 480 for (i = 0; i < HAMMER_MAX_ZONES; ++i) 481 hammer_free_holes(hmp, &hmp->holes[i]); 482 483 kfree(hmp, M_HAMMER); 484 } 485 486 /* 487 * Obtain a vnode for the specified inode number. An exclusively locked 488 * vnode is returned. 489 */ 490 int 491 hammer_vfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp) 492 { 493 struct hammer_transaction trans; 494 struct hammer_mount *hmp = (void *)mp->mnt_data; 495 struct hammer_inode *ip; 496 int error; 497 498 hammer_simple_transaction(&trans, hmp); 499 500 /* 501 * Lookup the requested HAMMER inode. The structure must be 502 * left unlocked while we manipulate the related vnode to avoid 503 * a deadlock. 504 */ 505 ip = hammer_get_inode(&trans, NULL, ino, hmp->asof, 0, &error); 506 if (ip == NULL) { 507 *vpp = NULL; 508 return(error); 509 } 510 error = hammer_get_vnode(ip, vpp); 511 hammer_rel_inode(ip, 0); 512 hammer_done_transaction(&trans); 513 return (error); 514 } 515 516 /* 517 * Return the root vnode for the filesystem. 518 * 519 * HAMMER stores the root vnode in the hammer_mount structure so 520 * getting it is easy. 521 */ 522 static int 523 hammer_vfs_root(struct mount *mp, struct vnode **vpp) 524 { 525 #if 0 526 struct hammer_mount *hmp = (void *)mp->mnt_data; 527 #endif 528 int error; 529 530 error = hammer_vfs_vget(mp, 1, vpp); 531 return (error); 532 } 533 534 static int 535 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) 536 { 537 struct hammer_mount *hmp = (void *)mp->mnt_data; 538 hammer_volume_t volume; 539 hammer_volume_ondisk_t ondisk; 540 int error; 541 int64_t bfree; 542 543 volume = hammer_get_root_volume(hmp, &error); 544 if (error) 545 return(error); 546 ondisk = volume->ondisk; 547 548 /* 549 * Basic stats 550 */ 551 mp->mnt_stat.f_files = ondisk->vol0_stat_inodes; 552 bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE; 553 hammer_rel_volume(volume, 0); 554 555 mp->mnt_stat.f_bfree = bfree / HAMMER_BUFSIZE; 556 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree; 557 if (mp->mnt_stat.f_files < 0) 558 mp->mnt_stat.f_files = 0; 559 560 *sbp = mp->mnt_stat; 561 return(0); 562 } 563 564 /* 565 * Sync the filesystem. Currently we have to run it twice, the second 566 * one will advance the undo start index to the end index, so if a crash 567 * occurs no undos will be run on mount. 568 * 569 * We do not sync the filesystem if we are called from a panic. If we did 570 * we might end up blowing up a sync that was already in progress. 571 */ 572 static int 573 hammer_vfs_sync(struct mount *mp, int waitfor) 574 { 575 struct hammer_mount *hmp = (void *)mp->mnt_data; 576 int error; 577 578 if (panicstr == NULL) { 579 error = hammer_sync_hmp(hmp, waitfor); 580 if (error == 0) 581 error = hammer_sync_hmp(hmp, waitfor); 582 } else { 583 error = EIO; 584 hkprintf("S"); 585 } 586 return (error); 587 } 588 589 /* 590 * Convert a vnode to a file handle. 591 */ 592 static int 593 hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp) 594 { 595 hammer_inode_t ip; 596 597 KKASSERT(MAXFIDSZ >= 16); 598 ip = VTOI(vp); 599 fhp->fid_len = offsetof(struct fid, fid_data[16]); 600 fhp->fid_reserved = 0; 601 bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id)); 602 bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof)); 603 return(0); 604 } 605 606 607 /* 608 * Convert a file handle back to a vnode. 609 */ 610 static int 611 hammer_vfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp) 612 { 613 struct hammer_transaction trans; 614 struct hammer_inode *ip; 615 struct hammer_inode_info info; 616 int error; 617 618 bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id)); 619 bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof)); 620 621 hammer_simple_transaction(&trans, (void *)mp->mnt_data); 622 623 /* 624 * Get/allocate the hammer_inode structure. The structure must be 625 * unlocked while we manipulate the related vnode to avoid a 626 * deadlock. 627 */ 628 ip = hammer_get_inode(&trans, NULL, info.obj_id, info.obj_asof, 629 0, &error); 630 if (ip == NULL) { 631 *vpp = NULL; 632 return(error); 633 } 634 error = hammer_get_vnode(ip, vpp); 635 hammer_rel_inode(ip, 0); 636 hammer_done_transaction(&trans); 637 return (error); 638 } 639 640 static int 641 hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 642 int *exflagsp, struct ucred **credanonp) 643 { 644 hammer_mount_t hmp = (void *)mp->mnt_data; 645 struct netcred *np; 646 int error; 647 648 np = vfs_export_lookup(mp, &hmp->export, nam); 649 if (np) { 650 *exflagsp = np->netc_exflags; 651 *credanonp = &np->netc_anon; 652 error = 0; 653 } else { 654 error = EACCES; 655 } 656 return (error); 657 658 } 659 660 int 661 hammer_vfs_export(struct mount *mp, int op, const struct export_args *export) 662 { 663 hammer_mount_t hmp = (void *)mp->mnt_data; 664 int error; 665 666 switch(op) { 667 case MOUNTCTL_SET_EXPORT: 668 error = vfs_export(mp, &hmp->export, export); 669 break; 670 default: 671 error = EOPNOTSUPP; 672 break; 673 } 674 return(error); 675 } 676 677