1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.31 2008/05/02 01:00:42 dillon Exp $ 35 */ 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/vnode.h> 41 #include <sys/mount.h> 42 #include <sys/malloc.h> 43 #include <sys/nlookup.h> 44 #include <sys/fcntl.h> 45 #include <sys/sysctl.h> 46 #include <sys/buf.h> 47 #include <sys/buf2.h> 48 #include "hammer.h" 49 50 int hammer_debug_general; 51 int hammer_debug_locks; 52 int hammer_debug_btree; 53 int hammer_debug_tid; 54 int hammer_debug_recover; /* -1 will disable, +1 will force */ 55 int hammer_debug_recover_faults; 56 int hammer_count_inodes; 57 int hammer_count_records; 58 int hammer_count_record_datas; 59 int hammer_count_volumes; 60 int hammer_count_buffers; 61 int hammer_count_nodes; 62 int hammer_count_dirtybufs; /* global */ 63 int hammer_limit_dirtybufs = 100; /* per-mount */ 64 int hammer_bio_count; 65 int64_t hammer_contention_count; 66 int64_t hammer_zone_limit; 67 68 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem"); 69 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW, 70 &hammer_debug_general, 0, ""); 71 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW, 72 &hammer_debug_locks, 0, ""); 73 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW, 74 &hammer_debug_btree, 0, ""); 75 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW, 76 &hammer_debug_tid, 0, ""); 77 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW, 78 &hammer_debug_recover, 0, ""); 79 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW, 80 &hammer_debug_recover_faults, 0, ""); 81 82 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_dirtybufs, CTLFLAG_RW, 83 &hammer_limit_dirtybufs, 0, ""); 84 85 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD, 86 &hammer_count_inodes, 0, ""); 87 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD, 88 &hammer_count_records, 0, ""); 89 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD, 90 &hammer_count_record_datas, 0, ""); 91 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD, 92 &hammer_count_volumes, 0, ""); 93 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD, 94 &hammer_count_buffers, 0, ""); 95 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD, 96 &hammer_count_nodes, 0, ""); 97 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_dirtybufs, CTLFLAG_RD, 98 &hammer_count_dirtybufs, 0, ""); 99 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW, 100 &hammer_zone_limit, 0, ""); 101 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW, 102 &hammer_contention_count, 0, ""); 103 104 /* 105 * VFS ABI 106 */ 107 static void hammer_free_hmp(struct mount *mp); 108 109 static int hammer_vfs_mount(struct mount *mp, char *path, caddr_t data, 110 struct ucred *cred); 111 static int hammer_vfs_unmount(struct mount *mp, int mntflags); 112 static int hammer_vfs_root(struct mount *mp, struct vnode **vpp); 113 static int hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, 114 struct ucred *cred); 115 static int hammer_vfs_sync(struct mount *mp, int waitfor); 116 static int hammer_vfs_vget(struct mount *mp, ino_t ino, 117 struct vnode **vpp); 118 static int hammer_vfs_init(struct vfsconf *conf); 119 static int hammer_vfs_fhtovp(struct mount *mp, struct fid *fhp, 120 struct vnode **vpp); 121 static int hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp); 122 static int hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 123 int *exflagsp, struct ucred **credanonp); 124 125 126 static struct vfsops hammer_vfsops = { 127 .vfs_mount = hammer_vfs_mount, 128 .vfs_unmount = hammer_vfs_unmount, 129 .vfs_root = hammer_vfs_root, 130 .vfs_statfs = hammer_vfs_statfs, 131 .vfs_sync = hammer_vfs_sync, 132 .vfs_vget = hammer_vfs_vget, 133 .vfs_init = hammer_vfs_init, 134 .vfs_vptofh = hammer_vfs_vptofh, 135 .vfs_fhtovp = hammer_vfs_fhtovp, 136 .vfs_checkexp = hammer_vfs_checkexp 137 }; 138 139 MALLOC_DEFINE(M_HAMMER, "hammer-mount", "hammer mount"); 140 141 VFS_SET(hammer_vfsops, hammer, 0); 142 MODULE_VERSION(hammer, 1); 143 144 static int 145 hammer_vfs_init(struct vfsconf *conf) 146 { 147 /*hammer_init_alist_config();*/ 148 return(0); 149 } 150 151 static int 152 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data, 153 struct ucred *cred) 154 { 155 struct hammer_mount_info info; 156 hammer_mount_t hmp; 157 hammer_volume_t rootvol; 158 struct vnode *rootvp; 159 const char *upath; /* volume name in userspace */ 160 char *path; /* volume name in system space */ 161 int error; 162 int i; 163 164 if ((error = copyin(data, &info, sizeof(info))) != 0) 165 return (error); 166 if (info.nvolumes <= 0 || info.nvolumes >= 32768) 167 return (EINVAL); 168 169 /* 170 * Interal mount data structure 171 */ 172 if (mp->mnt_flag & MNT_UPDATE) { 173 hmp = (void *)mp->mnt_data; 174 KKASSERT(hmp != NULL); 175 } else { 176 hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO); 177 mp->mnt_data = (qaddr_t)hmp; 178 hmp->mp = mp; 179 hmp->zbuf = kmalloc(HAMMER_BUFSIZE, M_HAMMER, M_WAITOK|M_ZERO); 180 hmp->namekey_iterator = mycpu->gd_time_seconds; 181 /*TAILQ_INIT(&hmp->recycle_list);*/ 182 183 hmp->root_btree_beg.obj_id = -0x8000000000000000LL; 184 hmp->root_btree_beg.key = -0x8000000000000000LL; 185 hmp->root_btree_beg.create_tid = 1; 186 hmp->root_btree_beg.delete_tid = 1; 187 hmp->root_btree_beg.rec_type = 0; 188 hmp->root_btree_beg.obj_type = 0; 189 190 hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL; 191 hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL; 192 hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL; 193 hmp->root_btree_end.delete_tid = 0; /* special case */ 194 hmp->root_btree_end.rec_type = 0xFFFFU; 195 hmp->root_btree_end.obj_type = 0; 196 lockinit(&hmp->blockmap_lock, "blkmap", 0, 0); 197 198 hmp->sync_lock.refs = 1; 199 200 TAILQ_INIT(&hmp->flush_list); 201 TAILQ_INIT(&hmp->objid_cache_list); 202 203 /* 204 * Set default zone limits. This value can be reduced 205 * further by the zone limit specified in the root volume. 206 * 207 * The sysctl can force a small zone limit for debugging 208 * purposes. 209 */ 210 for (i = 0; i < HAMMER_MAX_ZONES; ++i) { 211 hmp->zone_limits[i] = 212 HAMMER_ZONE_ENCODE(i, HAMMER_ZONE_LIMIT); 213 214 if (hammer_zone_limit) { 215 hmp->zone_limits[i] = 216 HAMMER_ZONE_ENCODE(i, hammer_zone_limit); 217 } 218 hammer_init_holes(hmp, &hmp->holes[i]); 219 } 220 } 221 hmp->hflags = info.hflags; 222 if (info.asof) { 223 mp->mnt_flag |= MNT_RDONLY; 224 hmp->asof = info.asof; 225 } else { 226 hmp->asof = HAMMER_MAX_TID; 227 } 228 229 /* 230 * Re-open read-write if originally read-only, or vise-versa XXX 231 */ 232 if (mp->mnt_flag & MNT_UPDATE) { 233 if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { 234 kprintf("HAMMER read-write -> read-only XXX\n"); 235 hmp->ronly = 1; 236 } else if (hmp->ronly && (mp->mnt_flag & MNT_RDONLY) == 0) { 237 kprintf("HAMMER read-only -> read-write XXX\n"); 238 hmp->ronly = 0; 239 } 240 return(0); 241 } 242 243 RB_INIT(&hmp->rb_vols_root); 244 RB_INIT(&hmp->rb_inos_root); 245 RB_INIT(&hmp->rb_nods_root); 246 hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); 247 248 TAILQ_INIT(&hmp->volu_list); 249 TAILQ_INIT(&hmp->undo_list); 250 TAILQ_INIT(&hmp->data_list); 251 TAILQ_INIT(&hmp->meta_list); 252 TAILQ_INIT(&hmp->lose_list); 253 254 /* 255 * Load volumes 256 */ 257 path = objcache_get(namei_oc, M_WAITOK); 258 hmp->nvolumes = info.nvolumes; 259 for (i = 0; i < info.nvolumes; ++i) { 260 error = copyin(&info.volumes[i], &upath, sizeof(char *)); 261 if (error == 0) 262 error = copyinstr(upath, path, MAXPATHLEN, NULL); 263 if (error == 0) 264 error = hammer_install_volume(hmp, path); 265 if (error) 266 break; 267 } 268 objcache_put(namei_oc, path); 269 270 /* 271 * Make sure we found a root volume 272 */ 273 if (error == 0 && hmp->rootvol == NULL) { 274 kprintf("hammer_mount: No root volume found!\n"); 275 error = EINVAL; 276 } 277 if (error) { 278 hammer_free_hmp(mp); 279 return (error); 280 } 281 282 /* 283 * No errors, setup enough of the mount point so we can lookup the 284 * root vnode. 285 */ 286 mp->mnt_iosize_max = MAXPHYS; 287 mp->mnt_kern_flag |= MNTK_FSMID; 288 289 /* 290 * note: f_iosize is used by vnode_pager_haspage() when constructing 291 * its VOP_BMAP call. 292 */ 293 mp->mnt_stat.f_iosize = HAMMER_BUFSIZE; 294 mp->mnt_stat.f_bsize = HAMMER_BUFSIZE; 295 mp->mnt_maxsymlinklen = 255; 296 mp->mnt_flag |= MNT_LOCAL; 297 298 vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops); 299 vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops); 300 vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops); 301 302 /* 303 * The root volume's ondisk pointer is only valid if we hold a 304 * reference to it. 305 */ 306 rootvol = hammer_get_root_volume(hmp, &error); 307 if (error) 308 goto failed; 309 310 /* 311 * Perform any necessary UNDO operations. The recover code does 312 * call hammer_undo_lookup() so we have to pre-cache the blockmap, 313 * and then re-copy it again after recovery is complete. 314 */ 315 bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, 316 sizeof(hmp->blockmap)); 317 error = hammer_recover(hmp, rootvol); 318 if (error) { 319 kprintf("Failed to recover HAMMER filesystem on mount\n"); 320 goto done; 321 } 322 323 /* 324 * Finish setup now that we have a good root volume 325 */ 326 ksnprintf(mp->mnt_stat.f_mntfromname, 327 sizeof(mp->mnt_stat.f_mntfromname), "%s", 328 rootvol->ondisk->vol_name); 329 mp->mnt_stat.f_fsid.val[0] = 330 crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8); 331 mp->mnt_stat.f_fsid.val[1] = 332 crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8); 333 334 /* 335 * Certain often-modified fields in the root volume are cached in 336 * the hammer_mount structure so we do not have to generate lots 337 * of little UNDO structures for them. 338 */ 339 hmp->next_tid = rootvol->ondisk->vol0_next_tid; 340 bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, 341 sizeof(hmp->blockmap)); 342 343 /* 344 * Use the zone limit set by newfs_hammer, or the zone limit set by 345 * sysctl (for debugging), whichever is smaller. 346 */ 347 if (rootvol->ondisk->vol0_zone_limit) { 348 hammer_off_t vol0_zone_limit; 349 350 vol0_zone_limit = rootvol->ondisk->vol0_zone_limit; 351 for (i = 0; i < HAMMER_MAX_ZONES; ++i) { 352 if (hmp->zone_limits[i] > vol0_zone_limit) 353 hmp->zone_limits[i] = vol0_zone_limit; 354 } 355 } 356 357 hammer_flusher_create(hmp); 358 359 /* 360 * Locate the root directory using the root cluster's B-Tree as a 361 * starting point. The root directory uses an obj_id of 1. 362 * 363 * FUTURE: Leave the root directory cached referenced but unlocked 364 * in hmp->rootvp (need to flush it on unmount). 365 */ 366 error = hammer_vfs_vget(mp, 1, &rootvp); 367 if (error) 368 goto done; 369 vput(rootvp); 370 /*vn_unlock(hmp->rootvp);*/ 371 372 done: 373 hammer_rel_volume(rootvol, 0); 374 failed: 375 /* 376 * Cleanup and return. 377 */ 378 if (error) 379 hammer_free_hmp(mp); 380 return (error); 381 } 382 383 static int 384 hammer_vfs_unmount(struct mount *mp, int mntflags) 385 { 386 #if 0 387 struct hammer_mount *hmp = (void *)mp->mnt_data; 388 #endif 389 int flags; 390 int error; 391 392 /* 393 * Clean out the vnodes 394 */ 395 flags = 0; 396 if (mntflags & MNT_FORCE) 397 flags |= FORCECLOSE; 398 if ((error = vflush(mp, 0, flags)) != 0) 399 return (error); 400 401 /* 402 * Clean up the internal mount structure and related entities. This 403 * may issue I/O. 404 */ 405 hammer_free_hmp(mp); 406 return(0); 407 } 408 409 /* 410 * Clean up the internal mount structure and disassociate it from the mount. 411 * This may issue I/O. 412 */ 413 static void 414 hammer_free_hmp(struct mount *mp) 415 { 416 struct hammer_mount *hmp = (void *)mp->mnt_data; 417 int i; 418 419 #if 0 420 /* 421 * Clean up the root vnode 422 */ 423 if (hmp->rootvp) { 424 vrele(hmp->rootvp); 425 hmp->rootvp = NULL; 426 } 427 #endif 428 hammer_flusher_sync(hmp); 429 hammer_flusher_sync(hmp); 430 hammer_flusher_destroy(hmp); 431 432 KKASSERT(RB_EMPTY(&hmp->rb_inos_root)); 433 434 #if 0 435 /* 436 * Unload & flush inodes 437 * 438 * XXX illegal to call this from here, it can only be done from 439 * the flusher. 440 */ 441 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, 442 hammer_unload_inode, (void *)MNT_WAIT); 443 444 /* 445 * Unload & flush volumes 446 */ 447 #endif 448 /* 449 * Unload the volumes 450 */ 451 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, 452 hammer_unload_volume, NULL); 453 454 mp->mnt_data = NULL; 455 mp->mnt_flag &= ~MNT_LOCAL; 456 hmp->mp = NULL; 457 hammer_destroy_objid_cache(hmp); 458 kfree(hmp->zbuf, M_HAMMER); 459 lockuninit(&hmp->blockmap_lock); 460 461 for (i = 0; i < HAMMER_MAX_ZONES; ++i) 462 hammer_free_holes(hmp, &hmp->holes[i]); 463 464 kfree(hmp, M_HAMMER); 465 } 466 467 /* 468 * Obtain a vnode for the specified inode number. An exclusively locked 469 * vnode is returned. 470 */ 471 int 472 hammer_vfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp) 473 { 474 struct hammer_transaction trans; 475 struct hammer_mount *hmp = (void *)mp->mnt_data; 476 struct hammer_inode *ip; 477 int error; 478 479 hammer_simple_transaction(&trans, hmp); 480 481 /* 482 * Lookup the requested HAMMER inode. The structure must be 483 * left unlocked while we manipulate the related vnode to avoid 484 * a deadlock. 485 */ 486 ip = hammer_get_inode(&trans, NULL, ino, hmp->asof, 0, &error); 487 if (ip == NULL) { 488 *vpp = NULL; 489 return(error); 490 } 491 error = hammer_get_vnode(ip, LK_EXCLUSIVE, vpp); 492 hammer_rel_inode(ip, 0); 493 hammer_done_transaction(&trans); 494 return (error); 495 } 496 497 /* 498 * Return the root vnode for the filesystem. 499 * 500 * HAMMER stores the root vnode in the hammer_mount structure so 501 * getting it is easy. 502 */ 503 static int 504 hammer_vfs_root(struct mount *mp, struct vnode **vpp) 505 { 506 #if 0 507 struct hammer_mount *hmp = (void *)mp->mnt_data; 508 #endif 509 int error; 510 511 error = hammer_vfs_vget(mp, 1, vpp); 512 return (error); 513 } 514 515 static int 516 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) 517 { 518 struct hammer_mount *hmp = (void *)mp->mnt_data; 519 hammer_volume_t volume; 520 hammer_volume_ondisk_t ondisk; 521 int error; 522 int64_t bfree; 523 524 volume = hammer_get_root_volume(hmp, &error); 525 if (error) 526 return(error); 527 ondisk = volume->ondisk; 528 529 /* 530 * Basic stats 531 */ 532 mp->mnt_stat.f_files = ondisk->vol0_stat_inodes; 533 bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE; 534 hammer_rel_volume(volume, 0); 535 536 mp->mnt_stat.f_bfree = bfree / HAMMER_BUFSIZE; 537 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree; 538 if (mp->mnt_stat.f_files < 0) 539 mp->mnt_stat.f_files = 0; 540 541 *sbp = mp->mnt_stat; 542 return(0); 543 } 544 545 /* 546 * Sync the filesystem. Currently we have to run it twice, the second 547 * one will advance the undo start index to the end index, so if a crash 548 * occurs no undos will be run on mount. 549 */ 550 static int 551 hammer_vfs_sync(struct mount *mp, int waitfor) 552 { 553 struct hammer_mount *hmp = (void *)mp->mnt_data; 554 int error; 555 556 error = hammer_sync_hmp(hmp, waitfor); 557 if (error == 0) 558 error = hammer_sync_hmp(hmp, waitfor); 559 return (error); 560 } 561 562 /* 563 * Convert a vnode to a file handle. 564 */ 565 static int 566 hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp) 567 { 568 hammer_inode_t ip; 569 570 KKASSERT(MAXFIDSZ >= 16); 571 ip = VTOI(vp); 572 fhp->fid_len = offsetof(struct fid, fid_data[16]); 573 fhp->fid_reserved = 0; 574 bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id)); 575 bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof)); 576 return(0); 577 } 578 579 580 /* 581 * Convert a file handle back to a vnode. 582 */ 583 static int 584 hammer_vfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp) 585 { 586 struct hammer_transaction trans; 587 struct hammer_inode *ip; 588 struct hammer_inode_info info; 589 int error; 590 591 bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id)); 592 bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof)); 593 594 hammer_simple_transaction(&trans, (void *)mp->mnt_data); 595 596 /* 597 * Get/allocate the hammer_inode structure. The structure must be 598 * unlocked while we manipulate the related vnode to avoid a 599 * deadlock. 600 */ 601 ip = hammer_get_inode(&trans, NULL, info.obj_id, info.obj_asof, 602 0, &error); 603 if (ip == NULL) { 604 *vpp = NULL; 605 return(error); 606 } 607 error = hammer_get_vnode(ip, LK_EXCLUSIVE, vpp); 608 hammer_rel_inode(ip, 0); 609 hammer_done_transaction(&trans); 610 return (error); 611 } 612 613 static int 614 hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 615 int *exflagsp, struct ucred **credanonp) 616 { 617 hammer_mount_t hmp = (void *)mp->mnt_data; 618 struct netcred *np; 619 int error; 620 621 np = vfs_export_lookup(mp, &hmp->export, nam); 622 if (np) { 623 *exflagsp = np->netc_exflags; 624 *credanonp = &np->netc_anon; 625 error = 0; 626 } else { 627 error = EACCES; 628 } 629 return (error); 630 631 } 632 633 int 634 hammer_vfs_export(struct mount *mp, int op, const struct export_args *export) 635 { 636 hammer_mount_t hmp = (void *)mp->mnt_data; 637 int error; 638 639 switch(op) { 640 case MOUNTCTL_SET_EXPORT: 641 error = vfs_export(mp, &hmp->export, export); 642 break; 643 default: 644 error = EOPNOTSUPP; 645 break; 646 } 647 return(error); 648 } 649 650