1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.30 2008/04/29 01:10:37 dillon Exp $ 35 */ 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/vnode.h> 41 #include <sys/mount.h> 42 #include <sys/malloc.h> 43 #include <sys/nlookup.h> 44 #include <sys/fcntl.h> 45 #include <sys/sysctl.h> 46 #include <sys/buf.h> 47 #include <sys/buf2.h> 48 #include "hammer.h" 49 50 int hammer_debug_general; 51 int hammer_debug_locks; 52 int hammer_debug_btree; 53 int hammer_debug_tid; 54 int hammer_debug_recover; /* -1 will disable, +1 will force */ 55 int hammer_debug_recover_faults; 56 int hammer_count_inodes; 57 int hammer_count_records; 58 int hammer_count_record_datas; 59 int hammer_count_volumes; 60 int hammer_count_buffers; 61 int hammer_count_nodes; 62 int hammer_count_dirtybufs; /* global */ 63 int hammer_limit_dirtybufs = 100; /* per-mount */ 64 int64_t hammer_contention_count; 65 int64_t hammer_zone_limit; 66 67 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem"); 68 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW, 69 &hammer_debug_general, 0, ""); 70 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW, 71 &hammer_debug_locks, 0, ""); 72 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW, 73 &hammer_debug_btree, 0, ""); 74 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW, 75 &hammer_debug_tid, 0, ""); 76 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW, 77 &hammer_debug_recover, 0, ""); 78 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW, 79 &hammer_debug_recover_faults, 0, ""); 80 81 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_dirtybufs, CTLFLAG_RW, 82 &hammer_limit_dirtybufs, 0, ""); 83 84 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD, 85 &hammer_count_inodes, 0, ""); 86 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD, 87 &hammer_count_records, 0, ""); 88 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD, 89 &hammer_count_record_datas, 0, ""); 90 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD, 91 &hammer_count_volumes, 0, ""); 92 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD, 93 &hammer_count_buffers, 0, ""); 94 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD, 95 &hammer_count_nodes, 0, ""); 96 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_dirtybufs, CTLFLAG_RD, 97 &hammer_count_dirtybufs, 0, ""); 98 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW, 99 &hammer_zone_limit, 0, ""); 100 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW, 101 &hammer_contention_count, 0, ""); 102 103 /* 104 * VFS ABI 105 */ 106 static void hammer_free_hmp(struct mount *mp); 107 108 static int hammer_vfs_mount(struct mount *mp, char *path, caddr_t data, 109 struct ucred *cred); 110 static int hammer_vfs_unmount(struct mount *mp, int mntflags); 111 static int hammer_vfs_root(struct mount *mp, struct vnode **vpp); 112 static int hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, 113 struct ucred *cred); 114 static int hammer_vfs_sync(struct mount *mp, int waitfor); 115 static int hammer_vfs_vget(struct mount *mp, ino_t ino, 116 struct vnode **vpp); 117 static int hammer_vfs_init(struct vfsconf *conf); 118 static int hammer_vfs_fhtovp(struct mount *mp, struct fid *fhp, 119 struct vnode **vpp); 120 static int hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp); 121 static int hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 122 int *exflagsp, struct ucred **credanonp); 123 124 125 static struct vfsops hammer_vfsops = { 126 .vfs_mount = hammer_vfs_mount, 127 .vfs_unmount = hammer_vfs_unmount, 128 .vfs_root = hammer_vfs_root, 129 .vfs_statfs = hammer_vfs_statfs, 130 .vfs_sync = hammer_vfs_sync, 131 .vfs_vget = hammer_vfs_vget, 132 .vfs_init = hammer_vfs_init, 133 .vfs_vptofh = hammer_vfs_vptofh, 134 .vfs_fhtovp = hammer_vfs_fhtovp, 135 .vfs_checkexp = hammer_vfs_checkexp 136 }; 137 138 MALLOC_DEFINE(M_HAMMER, "hammer-mount", "hammer mount"); 139 140 VFS_SET(hammer_vfsops, hammer, 0); 141 MODULE_VERSION(hammer, 1); 142 143 static int 144 hammer_vfs_init(struct vfsconf *conf) 145 { 146 /*hammer_init_alist_config();*/ 147 return(0); 148 } 149 150 static int 151 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data, 152 struct ucred *cred) 153 { 154 struct hammer_mount_info info; 155 hammer_mount_t hmp; 156 hammer_volume_t rootvol; 157 struct vnode *rootvp; 158 const char *upath; /* volume name in userspace */ 159 char *path; /* volume name in system space */ 160 int error; 161 int i; 162 163 if ((error = copyin(data, &info, sizeof(info))) != 0) 164 return (error); 165 if (info.nvolumes <= 0 || info.nvolumes >= 32768) 166 return (EINVAL); 167 168 /* 169 * Interal mount data structure 170 */ 171 if (mp->mnt_flag & MNT_UPDATE) { 172 hmp = (void *)mp->mnt_data; 173 KKASSERT(hmp != NULL); 174 } else { 175 hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO); 176 mp->mnt_data = (qaddr_t)hmp; 177 hmp->mp = mp; 178 hmp->zbuf = kmalloc(HAMMER_BUFSIZE, M_HAMMER, M_WAITOK|M_ZERO); 179 hmp->namekey_iterator = mycpu->gd_time_seconds; 180 /*TAILQ_INIT(&hmp->recycle_list);*/ 181 182 hmp->root_btree_beg.obj_id = -0x8000000000000000LL; 183 hmp->root_btree_beg.key = -0x8000000000000000LL; 184 hmp->root_btree_beg.create_tid = 1; 185 hmp->root_btree_beg.delete_tid = 1; 186 hmp->root_btree_beg.rec_type = 0; 187 hmp->root_btree_beg.obj_type = 0; 188 189 hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL; 190 hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL; 191 hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL; 192 hmp->root_btree_end.delete_tid = 0; /* special case */ 193 hmp->root_btree_end.rec_type = 0xFFFFU; 194 hmp->root_btree_end.obj_type = 0; 195 lockinit(&hmp->blockmap_lock, "blkmap", 0, 0); 196 197 hmp->sync_lock.refs = 1; 198 199 TAILQ_INIT(&hmp->flush_list); 200 TAILQ_INIT(&hmp->objid_cache_list); 201 202 /* 203 * Set default zone limits. This value can be reduced 204 * further by the zone limit specified in the root volume. 205 * 206 * The sysctl can force a small zone limit for debugging 207 * purposes. 208 */ 209 for (i = 0; i < HAMMER_MAX_ZONES; ++i) { 210 hmp->zone_limits[i] = 211 HAMMER_ZONE_ENCODE(i, HAMMER_ZONE_LIMIT); 212 213 if (hammer_zone_limit) { 214 hmp->zone_limits[i] = 215 HAMMER_ZONE_ENCODE(i, hammer_zone_limit); 216 } 217 hammer_init_holes(hmp, &hmp->holes[i]); 218 } 219 } 220 hmp->hflags = info.hflags; 221 if (info.asof) { 222 mp->mnt_flag |= MNT_RDONLY; 223 hmp->asof = info.asof; 224 } else { 225 hmp->asof = HAMMER_MAX_TID; 226 } 227 228 /* 229 * Re-open read-write if originally read-only, or vise-versa XXX 230 */ 231 if (mp->mnt_flag & MNT_UPDATE) { 232 if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { 233 kprintf("HAMMER read-write -> read-only XXX\n"); 234 hmp->ronly = 1; 235 } else if (hmp->ronly && (mp->mnt_flag & MNT_RDONLY) == 0) { 236 kprintf("HAMMER read-only -> read-write XXX\n"); 237 hmp->ronly = 0; 238 } 239 return(0); 240 } 241 242 RB_INIT(&hmp->rb_vols_root); 243 RB_INIT(&hmp->rb_inos_root); 244 RB_INIT(&hmp->rb_nods_root); 245 hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); 246 247 TAILQ_INIT(&hmp->volu_list); 248 TAILQ_INIT(&hmp->undo_list); 249 TAILQ_INIT(&hmp->data_list); 250 TAILQ_INIT(&hmp->meta_list); 251 TAILQ_INIT(&hmp->lose_list); 252 253 /* 254 * Load volumes 255 */ 256 path = objcache_get(namei_oc, M_WAITOK); 257 hmp->nvolumes = info.nvolumes; 258 for (i = 0; i < info.nvolumes; ++i) { 259 error = copyin(&info.volumes[i], &upath, sizeof(char *)); 260 if (error == 0) 261 error = copyinstr(upath, path, MAXPATHLEN, NULL); 262 if (error == 0) 263 error = hammer_install_volume(hmp, path); 264 if (error) 265 break; 266 } 267 objcache_put(namei_oc, path); 268 269 /* 270 * Make sure we found a root volume 271 */ 272 if (error == 0 && hmp->rootvol == NULL) { 273 kprintf("hammer_mount: No root volume found!\n"); 274 error = EINVAL; 275 } 276 if (error) { 277 hammer_free_hmp(mp); 278 return (error); 279 } 280 281 /* 282 * No errors, setup enough of the mount point so we can lookup the 283 * root vnode. 284 */ 285 mp->mnt_iosize_max = MAXPHYS; 286 mp->mnt_kern_flag |= MNTK_FSMID; 287 288 /* 289 * note: f_iosize is used by vnode_pager_haspage() when constructing 290 * its VOP_BMAP call. 291 */ 292 mp->mnt_stat.f_iosize = HAMMER_BUFSIZE; 293 mp->mnt_stat.f_bsize = HAMMER_BUFSIZE; 294 mp->mnt_maxsymlinklen = 255; 295 mp->mnt_flag |= MNT_LOCAL; 296 297 vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops); 298 vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops); 299 vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops); 300 301 /* 302 * The root volume's ondisk pointer is only valid if we hold a 303 * reference to it. 304 */ 305 rootvol = hammer_get_root_volume(hmp, &error); 306 if (error) 307 goto failed; 308 309 /* 310 * Perform any necessary UNDO operations. The recover code does 311 * call hammer_undo_lookup() so we have to pre-cache the blockmap, 312 * and then re-copy it again after recovery is complete. 313 */ 314 bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, 315 sizeof(hmp->blockmap)); 316 error = hammer_recover(hmp, rootvol); 317 if (error) { 318 kprintf("Failed to recover HAMMER filesystem on mount\n"); 319 goto done; 320 } 321 322 /* 323 * Finish setup now that we have a good root volume 324 */ 325 ksnprintf(mp->mnt_stat.f_mntfromname, 326 sizeof(mp->mnt_stat.f_mntfromname), "%s", 327 rootvol->ondisk->vol_name); 328 mp->mnt_stat.f_fsid.val[0] = 329 crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8); 330 mp->mnt_stat.f_fsid.val[1] = 331 crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8); 332 333 /* 334 * Certain often-modified fields in the root volume are cached in 335 * the hammer_mount structure so we do not have to generate lots 336 * of little UNDO structures for them. 337 */ 338 hmp->next_tid = rootvol->ondisk->vol0_next_tid; 339 bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, 340 sizeof(hmp->blockmap)); 341 342 /* 343 * Use the zone limit set by newfs_hammer, or the zone limit set by 344 * sysctl (for debugging), whichever is smaller. 345 */ 346 if (rootvol->ondisk->vol0_zone_limit) { 347 hammer_off_t vol0_zone_limit; 348 349 vol0_zone_limit = rootvol->ondisk->vol0_zone_limit; 350 for (i = 0; i < HAMMER_MAX_ZONES; ++i) { 351 if (hmp->zone_limits[i] > vol0_zone_limit) 352 hmp->zone_limits[i] = vol0_zone_limit; 353 } 354 } 355 356 hammer_flusher_create(hmp); 357 358 /* 359 * Locate the root directory using the root cluster's B-Tree as a 360 * starting point. The root directory uses an obj_id of 1. 361 * 362 * FUTURE: Leave the root directory cached referenced but unlocked 363 * in hmp->rootvp (need to flush it on unmount). 364 */ 365 error = hammer_vfs_vget(mp, 1, &rootvp); 366 if (error) 367 goto done; 368 vput(rootvp); 369 /*vn_unlock(hmp->rootvp);*/ 370 371 done: 372 hammer_rel_volume(rootvol, 0); 373 failed: 374 /* 375 * Cleanup and return. 376 */ 377 if (error) 378 hammer_free_hmp(mp); 379 return (error); 380 } 381 382 static int 383 hammer_vfs_unmount(struct mount *mp, int mntflags) 384 { 385 #if 0 386 struct hammer_mount *hmp = (void *)mp->mnt_data; 387 #endif 388 int flags; 389 int error; 390 391 /* 392 * Clean out the vnodes 393 */ 394 flags = 0; 395 if (mntflags & MNT_FORCE) 396 flags |= FORCECLOSE; 397 if ((error = vflush(mp, 0, flags)) != 0) 398 return (error); 399 400 /* 401 * Clean up the internal mount structure and related entities. This 402 * may issue I/O. 403 */ 404 hammer_free_hmp(mp); 405 return(0); 406 } 407 408 /* 409 * Clean up the internal mount structure and disassociate it from the mount. 410 * This may issue I/O. 411 */ 412 static void 413 hammer_free_hmp(struct mount *mp) 414 { 415 struct hammer_mount *hmp = (void *)mp->mnt_data; 416 int i; 417 418 #if 0 419 /* 420 * Clean up the root vnode 421 */ 422 if (hmp->rootvp) { 423 vrele(hmp->rootvp); 424 hmp->rootvp = NULL; 425 } 426 #endif 427 hammer_flusher_sync(hmp); 428 hammer_flusher_sync(hmp); 429 hammer_flusher_destroy(hmp); 430 431 KKASSERT(RB_EMPTY(&hmp->rb_inos_root)); 432 433 #if 0 434 /* 435 * Unload & flush inodes 436 * 437 * XXX illegal to call this from here, it can only be done from 438 * the flusher. 439 */ 440 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, 441 hammer_unload_inode, (void *)MNT_WAIT); 442 443 /* 444 * Unload & flush volumes 445 */ 446 #endif 447 /* 448 * Unload the volumes 449 */ 450 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, 451 hammer_unload_volume, NULL); 452 453 mp->mnt_data = NULL; 454 mp->mnt_flag &= ~MNT_LOCAL; 455 hmp->mp = NULL; 456 hammer_destroy_objid_cache(hmp); 457 kfree(hmp->zbuf, M_HAMMER); 458 lockuninit(&hmp->blockmap_lock); 459 460 for (i = 0; i < HAMMER_MAX_ZONES; ++i) 461 hammer_free_holes(hmp, &hmp->holes[i]); 462 463 kfree(hmp, M_HAMMER); 464 } 465 466 /* 467 * Obtain a vnode for the specified inode number. An exclusively locked 468 * vnode is returned. 469 */ 470 int 471 hammer_vfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp) 472 { 473 struct hammer_transaction trans; 474 struct hammer_mount *hmp = (void *)mp->mnt_data; 475 struct hammer_inode *ip; 476 int error; 477 478 hammer_simple_transaction(&trans, hmp); 479 480 /* 481 * Lookup the requested HAMMER inode. The structure must be 482 * left unlocked while we manipulate the related vnode to avoid 483 * a deadlock. 484 */ 485 ip = hammer_get_inode(&trans, NULL, ino, hmp->asof, 0, &error); 486 if (ip == NULL) { 487 *vpp = NULL; 488 return(error); 489 } 490 error = hammer_get_vnode(ip, LK_EXCLUSIVE, vpp); 491 hammer_rel_inode(ip, 0); 492 hammer_done_transaction(&trans); 493 return (error); 494 } 495 496 /* 497 * Return the root vnode for the filesystem. 498 * 499 * HAMMER stores the root vnode in the hammer_mount structure so 500 * getting it is easy. 501 */ 502 static int 503 hammer_vfs_root(struct mount *mp, struct vnode **vpp) 504 { 505 #if 0 506 struct hammer_mount *hmp = (void *)mp->mnt_data; 507 #endif 508 int error; 509 510 error = hammer_vfs_vget(mp, 1, vpp); 511 return (error); 512 } 513 514 static int 515 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) 516 { 517 struct hammer_mount *hmp = (void *)mp->mnt_data; 518 hammer_volume_t volume; 519 hammer_volume_ondisk_t ondisk; 520 int error; 521 int64_t bfree; 522 523 volume = hammer_get_root_volume(hmp, &error); 524 if (error) 525 return(error); 526 ondisk = volume->ondisk; 527 528 /* 529 * Basic stats 530 */ 531 mp->mnt_stat.f_files = ondisk->vol0_stat_inodes; 532 bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE; 533 hammer_rel_volume(volume, 0); 534 535 mp->mnt_stat.f_bfree = bfree / HAMMER_BUFSIZE; 536 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree; 537 if (mp->mnt_stat.f_files < 0) 538 mp->mnt_stat.f_files = 0; 539 540 *sbp = mp->mnt_stat; 541 return(0); 542 } 543 544 /* 545 * Sync the filesystem. Currently we have to run it twice, the second 546 * one will advance the undo start index to the end index, so if a crash 547 * occurs no undos will be run on mount. 548 */ 549 static int 550 hammer_vfs_sync(struct mount *mp, int waitfor) 551 { 552 struct hammer_mount *hmp = (void *)mp->mnt_data; 553 int error; 554 555 error = hammer_sync_hmp(hmp, waitfor); 556 if (error == 0) 557 error = hammer_sync_hmp(hmp, waitfor); 558 return (error); 559 } 560 561 /* 562 * Convert a vnode to a file handle. 563 */ 564 static int 565 hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp) 566 { 567 hammer_inode_t ip; 568 569 KKASSERT(MAXFIDSZ >= 16); 570 ip = VTOI(vp); 571 fhp->fid_len = offsetof(struct fid, fid_data[16]); 572 fhp->fid_reserved = 0; 573 bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id)); 574 bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof)); 575 return(0); 576 } 577 578 579 /* 580 * Convert a file handle back to a vnode. 581 */ 582 static int 583 hammer_vfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp) 584 { 585 struct hammer_transaction trans; 586 struct hammer_inode *ip; 587 struct hammer_inode_info info; 588 int error; 589 590 bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id)); 591 bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof)); 592 593 hammer_simple_transaction(&trans, (void *)mp->mnt_data); 594 595 /* 596 * Get/allocate the hammer_inode structure. The structure must be 597 * unlocked while we manipulate the related vnode to avoid a 598 * deadlock. 599 */ 600 ip = hammer_get_inode(&trans, NULL, info.obj_id, info.obj_asof, 601 0, &error); 602 if (ip == NULL) { 603 *vpp = NULL; 604 return(error); 605 } 606 error = hammer_get_vnode(ip, LK_EXCLUSIVE, vpp); 607 hammer_rel_inode(ip, 0); 608 hammer_done_transaction(&trans); 609 return (error); 610 } 611 612 static int 613 hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 614 int *exflagsp, struct ucred **credanonp) 615 { 616 hammer_mount_t hmp = (void *)mp->mnt_data; 617 struct netcred *np; 618 int error; 619 620 np = vfs_export_lookup(mp, &hmp->export, nam); 621 if (np) { 622 *exflagsp = np->netc_exflags; 623 *credanonp = &np->netc_anon; 624 error = 0; 625 } else { 626 error = EACCES; 627 } 628 return (error); 629 630 } 631 632 int 633 hammer_vfs_export(struct mount *mp, int op, const struct export_args *export) 634 { 635 hammer_mount_t hmp = (void *)mp->mnt_data; 636 int error; 637 638 switch(op) { 639 case MOUNTCTL_SET_EXPORT: 640 error = vfs_export(mp, &hmp->export, export); 641 break; 642 default: 643 error = EOPNOTSUPP; 644 break; 645 } 646 return(error); 647 } 648 649