1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.32 2008/05/03 20:21:20 dillon Exp $ 35 */ 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/vnode.h> 41 #include <sys/mount.h> 42 #include <sys/malloc.h> 43 #include <sys/nlookup.h> 44 #include <sys/fcntl.h> 45 #include <sys/sysctl.h> 46 #include <sys/buf.h> 47 #include <sys/buf2.h> 48 #include "hammer.h" 49 50 int hammer_debug_general; 51 int hammer_debug_inode; 52 int hammer_debug_locks; 53 int hammer_debug_btree; 54 int hammer_debug_tid; 55 int hammer_debug_recover; /* -1 will disable, +1 will force */ 56 int hammer_debug_recover_faults; 57 int hammer_count_inodes; 58 int hammer_count_records; 59 int hammer_count_record_datas; 60 int hammer_count_volumes; 61 int hammer_count_buffers; 62 int hammer_count_nodes; 63 int hammer_count_dirtybufs; /* global */ 64 int hammer_limit_dirtybufs = 100; /* per-mount */ 65 int hammer_bio_count; 66 int64_t hammer_contention_count; 67 int64_t hammer_zone_limit; 68 69 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem"); 70 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW, 71 &hammer_debug_general, 0, ""); 72 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW, 73 &hammer_debug_inode, 0, ""); 74 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW, 75 &hammer_debug_locks, 0, ""); 76 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW, 77 &hammer_debug_btree, 0, ""); 78 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW, 79 &hammer_debug_tid, 0, ""); 80 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW, 81 &hammer_debug_recover, 0, ""); 82 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW, 83 &hammer_debug_recover_faults, 0, ""); 84 85 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_dirtybufs, CTLFLAG_RW, 86 &hammer_limit_dirtybufs, 0, ""); 87 88 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD, 89 &hammer_count_inodes, 0, ""); 90 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD, 91 &hammer_count_records, 0, ""); 92 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD, 93 &hammer_count_record_datas, 0, ""); 94 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD, 95 &hammer_count_volumes, 0, ""); 96 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD, 97 &hammer_count_buffers, 0, ""); 98 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD, 99 &hammer_count_nodes, 0, ""); 100 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_dirtybufs, CTLFLAG_RD, 101 &hammer_count_dirtybufs, 0, ""); 102 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW, 103 &hammer_zone_limit, 0, ""); 104 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW, 105 &hammer_contention_count, 0, ""); 106 107 /* 108 * VFS ABI 109 */ 110 static void hammer_free_hmp(struct mount *mp); 111 112 static int hammer_vfs_mount(struct mount *mp, char *path, caddr_t data, 113 struct ucred *cred); 114 static int hammer_vfs_unmount(struct mount *mp, int mntflags); 115 static int hammer_vfs_root(struct mount *mp, struct vnode **vpp); 116 static int hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, 117 struct ucred *cred); 118 static int hammer_vfs_sync(struct mount *mp, int waitfor); 119 static int hammer_vfs_vget(struct mount *mp, ino_t ino, 120 struct vnode **vpp); 121 static int hammer_vfs_init(struct vfsconf *conf); 122 static int hammer_vfs_fhtovp(struct mount *mp, struct fid *fhp, 123 struct vnode **vpp); 124 static int hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp); 125 static int hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 126 int *exflagsp, struct ucred **credanonp); 127 128 129 static struct vfsops hammer_vfsops = { 130 .vfs_mount = hammer_vfs_mount, 131 .vfs_unmount = hammer_vfs_unmount, 132 .vfs_root = hammer_vfs_root, 133 .vfs_statfs = hammer_vfs_statfs, 134 .vfs_sync = hammer_vfs_sync, 135 .vfs_vget = hammer_vfs_vget, 136 .vfs_init = hammer_vfs_init, 137 .vfs_vptofh = hammer_vfs_vptofh, 138 .vfs_fhtovp = hammer_vfs_fhtovp, 139 .vfs_checkexp = hammer_vfs_checkexp 140 }; 141 142 MALLOC_DEFINE(M_HAMMER, "hammer-mount", "hammer mount"); 143 144 VFS_SET(hammer_vfsops, hammer, 0); 145 MODULE_VERSION(hammer, 1); 146 147 static int 148 hammer_vfs_init(struct vfsconf *conf) 149 { 150 /*hammer_init_alist_config();*/ 151 return(0); 152 } 153 154 static int 155 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data, 156 struct ucred *cred) 157 { 158 struct hammer_mount_info info; 159 hammer_mount_t hmp; 160 hammer_volume_t rootvol; 161 struct vnode *rootvp; 162 const char *upath; /* volume name in userspace */ 163 char *path; /* volume name in system space */ 164 int error; 165 int i; 166 167 if ((error = copyin(data, &info, sizeof(info))) != 0) 168 return (error); 169 if (info.nvolumes <= 0 || info.nvolumes >= 32768) 170 return (EINVAL); 171 172 /* 173 * Interal mount data structure 174 */ 175 if (mp->mnt_flag & MNT_UPDATE) { 176 hmp = (void *)mp->mnt_data; 177 KKASSERT(hmp != NULL); 178 } else { 179 hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO); 180 mp->mnt_data = (qaddr_t)hmp; 181 hmp->mp = mp; 182 hmp->zbuf = kmalloc(HAMMER_BUFSIZE, M_HAMMER, M_WAITOK|M_ZERO); 183 hmp->namekey_iterator = mycpu->gd_time_seconds; 184 /*TAILQ_INIT(&hmp->recycle_list);*/ 185 186 hmp->root_btree_beg.obj_id = -0x8000000000000000LL; 187 hmp->root_btree_beg.key = -0x8000000000000000LL; 188 hmp->root_btree_beg.create_tid = 1; 189 hmp->root_btree_beg.delete_tid = 1; 190 hmp->root_btree_beg.rec_type = 0; 191 hmp->root_btree_beg.obj_type = 0; 192 193 hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL; 194 hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL; 195 hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL; 196 hmp->root_btree_end.delete_tid = 0; /* special case */ 197 hmp->root_btree_end.rec_type = 0xFFFFU; 198 hmp->root_btree_end.obj_type = 0; 199 lockinit(&hmp->blockmap_lock, "blkmap", 0, 0); 200 201 hmp->sync_lock.refs = 1; 202 203 TAILQ_INIT(&hmp->flush_list); 204 TAILQ_INIT(&hmp->objid_cache_list); 205 TAILQ_INIT(&hmp->undo_lru_list); 206 207 /* 208 * Set default zone limits. This value can be reduced 209 * further by the zone limit specified in the root volume. 210 * 211 * The sysctl can force a small zone limit for debugging 212 * purposes. 213 */ 214 for (i = 0; i < HAMMER_MAX_ZONES; ++i) { 215 hmp->zone_limits[i] = 216 HAMMER_ZONE_ENCODE(i, HAMMER_ZONE_LIMIT); 217 218 if (hammer_zone_limit) { 219 hmp->zone_limits[i] = 220 HAMMER_ZONE_ENCODE(i, hammer_zone_limit); 221 } 222 hammer_init_holes(hmp, &hmp->holes[i]); 223 } 224 } 225 hmp->hflags = info.hflags; 226 if (info.asof) { 227 mp->mnt_flag |= MNT_RDONLY; 228 hmp->asof = info.asof; 229 } else { 230 hmp->asof = HAMMER_MAX_TID; 231 } 232 233 /* 234 * Re-open read-write if originally read-only, or vise-versa XXX 235 */ 236 if (mp->mnt_flag & MNT_UPDATE) { 237 if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { 238 kprintf("HAMMER read-write -> read-only XXX\n"); 239 hmp->ronly = 1; 240 } else if (hmp->ronly && (mp->mnt_flag & MNT_RDONLY) == 0) { 241 kprintf("HAMMER read-only -> read-write XXX\n"); 242 hmp->ronly = 0; 243 } 244 return(0); 245 } 246 247 RB_INIT(&hmp->rb_vols_root); 248 RB_INIT(&hmp->rb_inos_root); 249 RB_INIT(&hmp->rb_nods_root); 250 RB_INIT(&hmp->rb_undo_root); 251 hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); 252 253 TAILQ_INIT(&hmp->volu_list); 254 TAILQ_INIT(&hmp->undo_list); 255 TAILQ_INIT(&hmp->data_list); 256 TAILQ_INIT(&hmp->meta_list); 257 TAILQ_INIT(&hmp->lose_list); 258 259 /* 260 * Load volumes 261 */ 262 path = objcache_get(namei_oc, M_WAITOK); 263 hmp->nvolumes = info.nvolumes; 264 for (i = 0; i < info.nvolumes; ++i) { 265 error = copyin(&info.volumes[i], &upath, sizeof(char *)); 266 if (error == 0) 267 error = copyinstr(upath, path, MAXPATHLEN, NULL); 268 if (error == 0) 269 error = hammer_install_volume(hmp, path); 270 if (error) 271 break; 272 } 273 objcache_put(namei_oc, path); 274 275 /* 276 * Make sure we found a root volume 277 */ 278 if (error == 0 && hmp->rootvol == NULL) { 279 kprintf("hammer_mount: No root volume found!\n"); 280 error = EINVAL; 281 } 282 if (error) { 283 hammer_free_hmp(mp); 284 return (error); 285 } 286 287 /* 288 * No errors, setup enough of the mount point so we can lookup the 289 * root vnode. 290 */ 291 mp->mnt_iosize_max = MAXPHYS; 292 mp->mnt_kern_flag |= MNTK_FSMID; 293 294 /* 295 * note: f_iosize is used by vnode_pager_haspage() when constructing 296 * its VOP_BMAP call. 297 */ 298 mp->mnt_stat.f_iosize = HAMMER_BUFSIZE; 299 mp->mnt_stat.f_bsize = HAMMER_BUFSIZE; 300 mp->mnt_maxsymlinklen = 255; 301 mp->mnt_flag |= MNT_LOCAL; 302 303 vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops); 304 vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops); 305 vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops); 306 307 /* 308 * The root volume's ondisk pointer is only valid if we hold a 309 * reference to it. 310 */ 311 rootvol = hammer_get_root_volume(hmp, &error); 312 if (error) 313 goto failed; 314 315 /* 316 * Perform any necessary UNDO operations. The recover code does 317 * call hammer_undo_lookup() so we have to pre-cache the blockmap, 318 * and then re-copy it again after recovery is complete. 319 */ 320 bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, 321 sizeof(hmp->blockmap)); 322 error = hammer_recover(hmp, rootvol); 323 if (error) { 324 kprintf("Failed to recover HAMMER filesystem on mount\n"); 325 goto done; 326 } 327 328 /* 329 * Finish setup now that we have a good root volume 330 */ 331 ksnprintf(mp->mnt_stat.f_mntfromname, 332 sizeof(mp->mnt_stat.f_mntfromname), "%s", 333 rootvol->ondisk->vol_name); 334 mp->mnt_stat.f_fsid.val[0] = 335 crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8); 336 mp->mnt_stat.f_fsid.val[1] = 337 crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8); 338 339 /* 340 * Certain often-modified fields in the root volume are cached in 341 * the hammer_mount structure so we do not have to generate lots 342 * of little UNDO structures for them. 343 */ 344 hmp->next_tid = rootvol->ondisk->vol0_next_tid; 345 bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, 346 sizeof(hmp->blockmap)); 347 348 /* 349 * Use the zone limit set by newfs_hammer, or the zone limit set by 350 * sysctl (for debugging), whichever is smaller. 351 */ 352 if (rootvol->ondisk->vol0_zone_limit) { 353 hammer_off_t vol0_zone_limit; 354 355 vol0_zone_limit = rootvol->ondisk->vol0_zone_limit; 356 for (i = 0; i < HAMMER_MAX_ZONES; ++i) { 357 if (hmp->zone_limits[i] > vol0_zone_limit) 358 hmp->zone_limits[i] = vol0_zone_limit; 359 } 360 } 361 362 hammer_flusher_create(hmp); 363 364 /* 365 * Locate the root directory using the root cluster's B-Tree as a 366 * starting point. The root directory uses an obj_id of 1. 367 * 368 * FUTURE: Leave the root directory cached referenced but unlocked 369 * in hmp->rootvp (need to flush it on unmount). 370 */ 371 error = hammer_vfs_vget(mp, 1, &rootvp); 372 if (error) 373 goto done; 374 vput(rootvp); 375 /*vn_unlock(hmp->rootvp);*/ 376 377 done: 378 hammer_rel_volume(rootvol, 0); 379 failed: 380 /* 381 * Cleanup and return. 382 */ 383 if (error) 384 hammer_free_hmp(mp); 385 return (error); 386 } 387 388 static int 389 hammer_vfs_unmount(struct mount *mp, int mntflags) 390 { 391 #if 0 392 struct hammer_mount *hmp = (void *)mp->mnt_data; 393 #endif 394 int flags; 395 int error; 396 397 /* 398 * Clean out the vnodes 399 */ 400 flags = 0; 401 if (mntflags & MNT_FORCE) 402 flags |= FORCECLOSE; 403 if ((error = vflush(mp, 0, flags)) != 0) 404 return (error); 405 406 /* 407 * Clean up the internal mount structure and related entities. This 408 * may issue I/O. 409 */ 410 hammer_free_hmp(mp); 411 return(0); 412 } 413 414 /* 415 * Clean up the internal mount structure and disassociate it from the mount. 416 * This may issue I/O. 417 */ 418 static void 419 hammer_free_hmp(struct mount *mp) 420 { 421 struct hammer_mount *hmp = (void *)mp->mnt_data; 422 int i; 423 424 #if 0 425 /* 426 * Clean up the root vnode 427 */ 428 if (hmp->rootvp) { 429 vrele(hmp->rootvp); 430 hmp->rootvp = NULL; 431 } 432 #endif 433 hammer_flusher_sync(hmp); 434 hammer_flusher_sync(hmp); 435 hammer_flusher_destroy(hmp); 436 437 KKASSERT(RB_EMPTY(&hmp->rb_inos_root)); 438 439 #if 0 440 /* 441 * Unload & flush inodes 442 * 443 * XXX illegal to call this from here, it can only be done from 444 * the flusher. 445 */ 446 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, 447 hammer_unload_inode, (void *)MNT_WAIT); 448 449 /* 450 * Unload & flush volumes 451 */ 452 #endif 453 /* 454 * Unload the volumes 455 */ 456 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, 457 hammer_unload_volume, NULL); 458 459 mp->mnt_data = NULL; 460 mp->mnt_flag &= ~MNT_LOCAL; 461 hmp->mp = NULL; 462 hammer_destroy_objid_cache(hmp); 463 kfree(hmp->zbuf, M_HAMMER); 464 lockuninit(&hmp->blockmap_lock); 465 466 for (i = 0; i < HAMMER_MAX_ZONES; ++i) 467 hammer_free_holes(hmp, &hmp->holes[i]); 468 469 kfree(hmp, M_HAMMER); 470 } 471 472 /* 473 * Obtain a vnode for the specified inode number. An exclusively locked 474 * vnode is returned. 475 */ 476 int 477 hammer_vfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp) 478 { 479 struct hammer_transaction trans; 480 struct hammer_mount *hmp = (void *)mp->mnt_data; 481 struct hammer_inode *ip; 482 int error; 483 484 hammer_simple_transaction(&trans, hmp); 485 486 /* 487 * Lookup the requested HAMMER inode. The structure must be 488 * left unlocked while we manipulate the related vnode to avoid 489 * a deadlock. 490 */ 491 ip = hammer_get_inode(&trans, NULL, ino, hmp->asof, 0, &error); 492 if (ip == NULL) { 493 *vpp = NULL; 494 return(error); 495 } 496 error = hammer_get_vnode(ip, vpp); 497 hammer_rel_inode(ip, 0); 498 hammer_done_transaction(&trans); 499 return (error); 500 } 501 502 /* 503 * Return the root vnode for the filesystem. 504 * 505 * HAMMER stores the root vnode in the hammer_mount structure so 506 * getting it is easy. 507 */ 508 static int 509 hammer_vfs_root(struct mount *mp, struct vnode **vpp) 510 { 511 #if 0 512 struct hammer_mount *hmp = (void *)mp->mnt_data; 513 #endif 514 int error; 515 516 error = hammer_vfs_vget(mp, 1, vpp); 517 return (error); 518 } 519 520 static int 521 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) 522 { 523 struct hammer_mount *hmp = (void *)mp->mnt_data; 524 hammer_volume_t volume; 525 hammer_volume_ondisk_t ondisk; 526 int error; 527 int64_t bfree; 528 529 volume = hammer_get_root_volume(hmp, &error); 530 if (error) 531 return(error); 532 ondisk = volume->ondisk; 533 534 /* 535 * Basic stats 536 */ 537 mp->mnt_stat.f_files = ondisk->vol0_stat_inodes; 538 bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE; 539 hammer_rel_volume(volume, 0); 540 541 mp->mnt_stat.f_bfree = bfree / HAMMER_BUFSIZE; 542 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree; 543 if (mp->mnt_stat.f_files < 0) 544 mp->mnt_stat.f_files = 0; 545 546 *sbp = mp->mnt_stat; 547 return(0); 548 } 549 550 /* 551 * Sync the filesystem. Currently we have to run it twice, the second 552 * one will advance the undo start index to the end index, so if a crash 553 * occurs no undos will be run on mount. 554 */ 555 static int 556 hammer_vfs_sync(struct mount *mp, int waitfor) 557 { 558 struct hammer_mount *hmp = (void *)mp->mnt_data; 559 int error; 560 561 error = hammer_sync_hmp(hmp, waitfor); 562 if (error == 0) 563 error = hammer_sync_hmp(hmp, waitfor); 564 return (error); 565 } 566 567 /* 568 * Convert a vnode to a file handle. 569 */ 570 static int 571 hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp) 572 { 573 hammer_inode_t ip; 574 575 KKASSERT(MAXFIDSZ >= 16); 576 ip = VTOI(vp); 577 fhp->fid_len = offsetof(struct fid, fid_data[16]); 578 fhp->fid_reserved = 0; 579 bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id)); 580 bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof)); 581 return(0); 582 } 583 584 585 /* 586 * Convert a file handle back to a vnode. 587 */ 588 static int 589 hammer_vfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp) 590 { 591 struct hammer_transaction trans; 592 struct hammer_inode *ip; 593 struct hammer_inode_info info; 594 int error; 595 596 bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id)); 597 bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof)); 598 599 hammer_simple_transaction(&trans, (void *)mp->mnt_data); 600 601 /* 602 * Get/allocate the hammer_inode structure. The structure must be 603 * unlocked while we manipulate the related vnode to avoid a 604 * deadlock. 605 */ 606 ip = hammer_get_inode(&trans, NULL, info.obj_id, info.obj_asof, 607 0, &error); 608 if (ip == NULL) { 609 *vpp = NULL; 610 return(error); 611 } 612 error = hammer_get_vnode(ip, vpp); 613 hammer_rel_inode(ip, 0); 614 hammer_done_transaction(&trans); 615 return (error); 616 } 617 618 static int 619 hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 620 int *exflagsp, struct ucred **credanonp) 621 { 622 hammer_mount_t hmp = (void *)mp->mnt_data; 623 struct netcred *np; 624 int error; 625 626 np = vfs_export_lookup(mp, &hmp->export, nam); 627 if (np) { 628 *exflagsp = np->netc_exflags; 629 *credanonp = &np->netc_anon; 630 error = 0; 631 } else { 632 error = EACCES; 633 } 634 return (error); 635 636 } 637 638 int 639 hammer_vfs_export(struct mount *mp, int op, const struct export_args *export) 640 { 641 hammer_mount_t hmp = (void *)mp->mnt_data; 642 int error; 643 644 switch(op) { 645 case MOUNTCTL_SET_EXPORT: 646 error = vfs_export(mp, &hmp->export, export); 647 break; 648 default: 649 error = EOPNOTSUPP; 650 break; 651 } 652 return(error); 653 } 654 655