1427e5fc6SMatthew Dillon /* 2b84de5afSMatthew Dillon * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3427e5fc6SMatthew Dillon * 4427e5fc6SMatthew Dillon * This code is derived from software contributed to The DragonFly Project 5427e5fc6SMatthew Dillon * by Matthew Dillon <dillon@backplane.com> 6427e5fc6SMatthew Dillon * 7427e5fc6SMatthew Dillon * Redistribution and use in source and binary forms, with or without 8427e5fc6SMatthew Dillon * modification, are permitted provided that the following conditions 9427e5fc6SMatthew Dillon * are met: 10427e5fc6SMatthew Dillon * 11427e5fc6SMatthew Dillon * 1. Redistributions of source code must retain the above copyright 12427e5fc6SMatthew Dillon * notice, this list of conditions and the following disclaimer. 13427e5fc6SMatthew Dillon * 2. Redistributions in binary form must reproduce the above copyright 14427e5fc6SMatthew Dillon * notice, this list of conditions and the following disclaimer in 15427e5fc6SMatthew Dillon * the documentation and/or other materials provided with the 16427e5fc6SMatthew Dillon * distribution. 17427e5fc6SMatthew Dillon * 3. Neither the name of The DragonFly Project nor the names of its 18427e5fc6SMatthew Dillon * contributors may be used to endorse or promote products derived 19427e5fc6SMatthew Dillon * from this software without specific, prior written permission. 20427e5fc6SMatthew Dillon * 21427e5fc6SMatthew Dillon * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22427e5fc6SMatthew Dillon * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23427e5fc6SMatthew Dillon * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24427e5fc6SMatthew Dillon * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25427e5fc6SMatthew Dillon * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26427e5fc6SMatthew Dillon * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27427e5fc6SMatthew Dillon * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28427e5fc6SMatthew Dillon * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29427e5fc6SMatthew Dillon * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30427e5fc6SMatthew Dillon * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31427e5fc6SMatthew Dillon * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32427e5fc6SMatthew Dillon * SUCH DAMAGE. 33427e5fc6SMatthew Dillon * 34*2f85fa4dSMatthew Dillon * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.35 2008/05/18 01:48:50 dillon Exp $ 35427e5fc6SMatthew Dillon */ 36427e5fc6SMatthew Dillon 37427e5fc6SMatthew Dillon #include <sys/param.h> 38427e5fc6SMatthew Dillon #include <sys/systm.h> 39427e5fc6SMatthew Dillon #include <sys/kernel.h> 40427e5fc6SMatthew Dillon #include <sys/vnode.h> 41427e5fc6SMatthew Dillon #include <sys/mount.h> 42427e5fc6SMatthew Dillon #include <sys/malloc.h> 43427e5fc6SMatthew Dillon #include <sys/nlookup.h> 44427e5fc6SMatthew Dillon #include <sys/fcntl.h> 45b3deaf57SMatthew Dillon #include <sys/sysctl.h> 46427e5fc6SMatthew Dillon #include <sys/buf.h> 47427e5fc6SMatthew Dillon #include <sys/buf2.h> 48427e5fc6SMatthew Dillon #include "hammer.h" 49427e5fc6SMatthew Dillon 50*2f85fa4dSMatthew Dillon int hammer_debug_io; 51d5ef456eSMatthew Dillon int hammer_debug_general; 5277062c8aSMatthew Dillon int hammer_debug_debug; 53e8599db1SMatthew Dillon int hammer_debug_inode; 547d683b0fSMatthew Dillon int hammer_debug_locks; 55b3deaf57SMatthew Dillon int hammer_debug_btree; 56d113fda1SMatthew Dillon int hammer_debug_tid; 5746fe7ae1SMatthew Dillon int hammer_debug_recover; /* -1 will disable, +1 will force */ 5846fe7ae1SMatthew Dillon int hammer_debug_recover_faults; 59b3deaf57SMatthew Dillon int hammer_count_inodes; 60b3deaf57SMatthew Dillon int hammer_count_records; 61b3deaf57SMatthew Dillon int hammer_count_record_datas; 62b3deaf57SMatthew Dillon int hammer_count_volumes; 63b3deaf57SMatthew Dillon int hammer_count_buffers; 64b3deaf57SMatthew Dillon int hammer_count_nodes; 659480ff55SMatthew Dillon int hammer_count_dirtybufs; /* global */ 669480ff55SMatthew Dillon int hammer_limit_dirtybufs = 100; /* per-mount */ 671f07f686SMatthew Dillon int hammer_bio_count; 687d683b0fSMatthew Dillon int64_t hammer_contention_count; 69f03c9cf4SMatthew Dillon int64_t hammer_zone_limit; 70b3deaf57SMatthew Dillon 71b3deaf57SMatthew Dillon SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem"); 72d5ef456eSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW, 73d5ef456eSMatthew Dillon &hammer_debug_general, 0, ""); 74*2f85fa4dSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_io, CTLFLAG_RW, 75*2f85fa4dSMatthew Dillon &hammer_debug_io, 0, ""); 7677062c8aSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_debug, CTLFLAG_RW, 7777062c8aSMatthew Dillon &hammer_debug_debug, 0, ""); 78e8599db1SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW, 79e8599db1SMatthew Dillon &hammer_debug_inode, 0, ""); 807d683b0fSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW, 817d683b0fSMatthew Dillon &hammer_debug_locks, 0, ""); 82b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW, 83b3deaf57SMatthew Dillon &hammer_debug_btree, 0, ""); 84d113fda1SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW, 85d113fda1SMatthew Dillon &hammer_debug_tid, 0, ""); 86b33e2cc0SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW, 87b33e2cc0SMatthew Dillon &hammer_debug_recover, 0, ""); 8846fe7ae1SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW, 8946fe7ae1SMatthew Dillon &hammer_debug_recover_faults, 0, ""); 909480ff55SMatthew Dillon 919480ff55SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_dirtybufs, CTLFLAG_RW, 929480ff55SMatthew Dillon &hammer_limit_dirtybufs, 0, ""); 939480ff55SMatthew Dillon 94b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD, 95b3deaf57SMatthew Dillon &hammer_count_inodes, 0, ""); 96b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD, 97b3deaf57SMatthew Dillon &hammer_count_records, 0, ""); 98b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD, 99b3deaf57SMatthew Dillon &hammer_count_record_datas, 0, ""); 100b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD, 101b3deaf57SMatthew Dillon &hammer_count_volumes, 0, ""); 102b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD, 103b3deaf57SMatthew Dillon &hammer_count_buffers, 0, ""); 104b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD, 105b3deaf57SMatthew Dillon &hammer_count_nodes, 0, ""); 1069480ff55SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_dirtybufs, CTLFLAG_RD, 1079480ff55SMatthew Dillon &hammer_count_dirtybufs, 0, ""); 108f03c9cf4SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW, 109f03c9cf4SMatthew Dillon &hammer_zone_limit, 0, ""); 1107d683b0fSMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW, 1117d683b0fSMatthew Dillon &hammer_contention_count, 0, ""); 112b3deaf57SMatthew Dillon 113427e5fc6SMatthew Dillon /* 114427e5fc6SMatthew Dillon * VFS ABI 115427e5fc6SMatthew Dillon */ 116427e5fc6SMatthew Dillon static void hammer_free_hmp(struct mount *mp); 117427e5fc6SMatthew Dillon 118427e5fc6SMatthew Dillon static int hammer_vfs_mount(struct mount *mp, char *path, caddr_t data, 119427e5fc6SMatthew Dillon struct ucred *cred); 120427e5fc6SMatthew Dillon static int hammer_vfs_unmount(struct mount *mp, int mntflags); 121427e5fc6SMatthew Dillon static int hammer_vfs_root(struct mount *mp, struct vnode **vpp); 122427e5fc6SMatthew Dillon static int hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, 123427e5fc6SMatthew Dillon struct ucred *cred); 124427e5fc6SMatthew Dillon static int hammer_vfs_sync(struct mount *mp, int waitfor); 125513ca7d7SMatthew Dillon static int hammer_vfs_vget(struct mount *mp, ino_t ino, 126513ca7d7SMatthew Dillon struct vnode **vpp); 127427e5fc6SMatthew Dillon static int hammer_vfs_init(struct vfsconf *conf); 128513ca7d7SMatthew Dillon static int hammer_vfs_fhtovp(struct mount *mp, struct fid *fhp, 129513ca7d7SMatthew Dillon struct vnode **vpp); 130513ca7d7SMatthew Dillon static int hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp); 131513ca7d7SMatthew Dillon static int hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 132513ca7d7SMatthew Dillon int *exflagsp, struct ucred **credanonp); 133513ca7d7SMatthew Dillon 134427e5fc6SMatthew Dillon 135427e5fc6SMatthew Dillon static struct vfsops hammer_vfsops = { 136427e5fc6SMatthew Dillon .vfs_mount = hammer_vfs_mount, 137427e5fc6SMatthew Dillon .vfs_unmount = hammer_vfs_unmount, 138427e5fc6SMatthew Dillon .vfs_root = hammer_vfs_root, 139427e5fc6SMatthew Dillon .vfs_statfs = hammer_vfs_statfs, 140427e5fc6SMatthew Dillon .vfs_sync = hammer_vfs_sync, 141427e5fc6SMatthew Dillon .vfs_vget = hammer_vfs_vget, 142513ca7d7SMatthew Dillon .vfs_init = hammer_vfs_init, 143513ca7d7SMatthew Dillon .vfs_vptofh = hammer_vfs_vptofh, 144513ca7d7SMatthew Dillon .vfs_fhtovp = hammer_vfs_fhtovp, 145513ca7d7SMatthew Dillon .vfs_checkexp = hammer_vfs_checkexp 146427e5fc6SMatthew Dillon }; 147427e5fc6SMatthew Dillon 148427e5fc6SMatthew Dillon MALLOC_DEFINE(M_HAMMER, "hammer-mount", "hammer mount"); 149427e5fc6SMatthew Dillon 150427e5fc6SMatthew Dillon VFS_SET(hammer_vfsops, hammer, 0); 151427e5fc6SMatthew Dillon MODULE_VERSION(hammer, 1); 152427e5fc6SMatthew Dillon 153427e5fc6SMatthew Dillon static int 154427e5fc6SMatthew Dillon hammer_vfs_init(struct vfsconf *conf) 155427e5fc6SMatthew Dillon { 15647197d71SMatthew Dillon /*hammer_init_alist_config();*/ 157427e5fc6SMatthew Dillon return(0); 158427e5fc6SMatthew Dillon } 159427e5fc6SMatthew Dillon 160427e5fc6SMatthew Dillon static int 161427e5fc6SMatthew Dillon hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data, 162427e5fc6SMatthew Dillon struct ucred *cred) 163427e5fc6SMatthew Dillon { 164427e5fc6SMatthew Dillon struct hammer_mount_info info; 165a89aec1bSMatthew Dillon hammer_mount_t hmp; 166a89aec1bSMatthew Dillon hammer_volume_t rootvol; 16727ea2398SMatthew Dillon struct vnode *rootvp; 168427e5fc6SMatthew Dillon const char *upath; /* volume name in userspace */ 169427e5fc6SMatthew Dillon char *path; /* volume name in system space */ 170427e5fc6SMatthew Dillon int error; 171427e5fc6SMatthew Dillon int i; 172427e5fc6SMatthew Dillon 173427e5fc6SMatthew Dillon if ((error = copyin(data, &info, sizeof(info))) != 0) 174427e5fc6SMatthew Dillon return (error); 175427e5fc6SMatthew Dillon if (info.nvolumes <= 0 || info.nvolumes >= 32768) 176427e5fc6SMatthew Dillon return (EINVAL); 177427e5fc6SMatthew Dillon 178427e5fc6SMatthew Dillon /* 179427e5fc6SMatthew Dillon * Interal mount data structure 180427e5fc6SMatthew Dillon */ 181195c19a1SMatthew Dillon if (mp->mnt_flag & MNT_UPDATE) { 182195c19a1SMatthew Dillon hmp = (void *)mp->mnt_data; 183195c19a1SMatthew Dillon KKASSERT(hmp != NULL); 184195c19a1SMatthew Dillon } else { 185427e5fc6SMatthew Dillon hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO); 186427e5fc6SMatthew Dillon mp->mnt_data = (qaddr_t)hmp; 187427e5fc6SMatthew Dillon hmp->mp = mp; 188195c19a1SMatthew Dillon hmp->zbuf = kmalloc(HAMMER_BUFSIZE, M_HAMMER, M_WAITOK|M_ZERO); 189195c19a1SMatthew Dillon hmp->namekey_iterator = mycpu->gd_time_seconds; 19046fe7ae1SMatthew Dillon /*TAILQ_INIT(&hmp->recycle_list);*/ 19147197d71SMatthew Dillon 192*2f85fa4dSMatthew Dillon hmp->root_btree_beg.localization = HAMMER_MIN_LOCALIZATION; 19347197d71SMatthew Dillon hmp->root_btree_beg.obj_id = -0x8000000000000000LL; 19447197d71SMatthew Dillon hmp->root_btree_beg.key = -0x8000000000000000LL; 19547197d71SMatthew Dillon hmp->root_btree_beg.create_tid = 1; 19647197d71SMatthew Dillon hmp->root_btree_beg.delete_tid = 1; 19747197d71SMatthew Dillon hmp->root_btree_beg.rec_type = 0; 19847197d71SMatthew Dillon hmp->root_btree_beg.obj_type = 0; 19947197d71SMatthew Dillon 200*2f85fa4dSMatthew Dillon hmp->root_btree_end.localization = HAMMER_MAX_LOCALIZATION; 20147197d71SMatthew Dillon hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL; 20247197d71SMatthew Dillon hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL; 20347197d71SMatthew Dillon hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL; 20447197d71SMatthew Dillon hmp->root_btree_end.delete_tid = 0; /* special case */ 20547197d71SMatthew Dillon hmp->root_btree_end.rec_type = 0xFFFFU; 20647197d71SMatthew Dillon hmp->root_btree_end.obj_type = 0; 20740043e7fSMatthew Dillon lockinit(&hmp->blockmap_lock, "blkmap", 0, 0); 208f03c9cf4SMatthew Dillon 2099480ff55SMatthew Dillon hmp->sync_lock.refs = 1; 210c9b9e29dSMatthew Dillon hmp->free_lock.refs = 1; 2119480ff55SMatthew Dillon 212059819e3SMatthew Dillon TAILQ_INIT(&hmp->flush_list); 2130729c8c8SMatthew Dillon TAILQ_INIT(&hmp->objid_cache_list); 214e8599db1SMatthew Dillon TAILQ_INIT(&hmp->undo_lru_list); 215059819e3SMatthew Dillon 2160729c8c8SMatthew Dillon /* 2170729c8c8SMatthew Dillon * Set default zone limits. This value can be reduced 2180729c8c8SMatthew Dillon * further by the zone limit specified in the root volume. 2190729c8c8SMatthew Dillon * 2200729c8c8SMatthew Dillon * The sysctl can force a small zone limit for debugging 2210729c8c8SMatthew Dillon * purposes. 2220729c8c8SMatthew Dillon */ 223f03c9cf4SMatthew Dillon for (i = 0; i < HAMMER_MAX_ZONES; ++i) { 224f03c9cf4SMatthew Dillon hmp->zone_limits[i] = 225f03c9cf4SMatthew Dillon HAMMER_ZONE_ENCODE(i, HAMMER_ZONE_LIMIT); 2260729c8c8SMatthew Dillon 227f03c9cf4SMatthew Dillon if (hammer_zone_limit) { 228f03c9cf4SMatthew Dillon hmp->zone_limits[i] = 229f03c9cf4SMatthew Dillon HAMMER_ZONE_ENCODE(i, hammer_zone_limit); 230f03c9cf4SMatthew Dillon } 231bf686dbeSMatthew Dillon hammer_init_holes(hmp, &hmp->holes[i]); 232f03c9cf4SMatthew Dillon } 233195c19a1SMatthew Dillon } 234195c19a1SMatthew Dillon hmp->hflags = info.hflags; 2357f7c1f84SMatthew Dillon if (info.asof) { 2367f7c1f84SMatthew Dillon mp->mnt_flag |= MNT_RDONLY; 2377f7c1f84SMatthew Dillon hmp->asof = info.asof; 2387f7c1f84SMatthew Dillon } else { 2397f7c1f84SMatthew Dillon hmp->asof = HAMMER_MAX_TID; 2407f7c1f84SMatthew Dillon } 241195c19a1SMatthew Dillon 242195c19a1SMatthew Dillon /* 243195c19a1SMatthew Dillon * Re-open read-write if originally read-only, or vise-versa XXX 244195c19a1SMatthew Dillon */ 245195c19a1SMatthew Dillon if (mp->mnt_flag & MNT_UPDATE) { 246195c19a1SMatthew Dillon if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { 247195c19a1SMatthew Dillon kprintf("HAMMER read-write -> read-only XXX\n"); 248195c19a1SMatthew Dillon hmp->ronly = 1; 249195c19a1SMatthew Dillon } else if (hmp->ronly && (mp->mnt_flag & MNT_RDONLY) == 0) { 250195c19a1SMatthew Dillon kprintf("HAMMER read-only -> read-write XXX\n"); 251195c19a1SMatthew Dillon hmp->ronly = 0; 252195c19a1SMatthew Dillon } 253195c19a1SMatthew Dillon return(0); 254195c19a1SMatthew Dillon } 255195c19a1SMatthew Dillon 256427e5fc6SMatthew Dillon RB_INIT(&hmp->rb_vols_root); 257427e5fc6SMatthew Dillon RB_INIT(&hmp->rb_inos_root); 25840043e7fSMatthew Dillon RB_INIT(&hmp->rb_nods_root); 259e8599db1SMatthew Dillon RB_INIT(&hmp->rb_undo_root); 260195c19a1SMatthew Dillon hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); 261427e5fc6SMatthew Dillon 26210a5d1baSMatthew Dillon TAILQ_INIT(&hmp->volu_list); 26310a5d1baSMatthew Dillon TAILQ_INIT(&hmp->undo_list); 26410a5d1baSMatthew Dillon TAILQ_INIT(&hmp->data_list); 26510a5d1baSMatthew Dillon TAILQ_INIT(&hmp->meta_list); 26610a5d1baSMatthew Dillon TAILQ_INIT(&hmp->lose_list); 26710a5d1baSMatthew Dillon 268427e5fc6SMatthew Dillon /* 269427e5fc6SMatthew Dillon * Load volumes 270427e5fc6SMatthew Dillon */ 271427e5fc6SMatthew Dillon path = objcache_get(namei_oc, M_WAITOK); 272d26d0ae9SMatthew Dillon hmp->nvolumes = info.nvolumes; 273427e5fc6SMatthew Dillon for (i = 0; i < info.nvolumes; ++i) { 274427e5fc6SMatthew Dillon error = copyin(&info.volumes[i], &upath, sizeof(char *)); 275427e5fc6SMatthew Dillon if (error == 0) 276427e5fc6SMatthew Dillon error = copyinstr(upath, path, MAXPATHLEN, NULL); 277427e5fc6SMatthew Dillon if (error == 0) 2788cd0a023SMatthew Dillon error = hammer_install_volume(hmp, path); 279427e5fc6SMatthew Dillon if (error) 280427e5fc6SMatthew Dillon break; 281427e5fc6SMatthew Dillon } 282427e5fc6SMatthew Dillon objcache_put(namei_oc, path); 283427e5fc6SMatthew Dillon 284427e5fc6SMatthew Dillon /* 285427e5fc6SMatthew Dillon * Make sure we found a root volume 286427e5fc6SMatthew Dillon */ 287427e5fc6SMatthew Dillon if (error == 0 && hmp->rootvol == NULL) { 288427e5fc6SMatthew Dillon kprintf("hammer_mount: No root volume found!\n"); 289427e5fc6SMatthew Dillon error = EINVAL; 290427e5fc6SMatthew Dillon } 291427e5fc6SMatthew Dillon if (error) { 292427e5fc6SMatthew Dillon hammer_free_hmp(mp); 293427e5fc6SMatthew Dillon return (error); 294427e5fc6SMatthew Dillon } 295427e5fc6SMatthew Dillon 296427e5fc6SMatthew Dillon /* 29727ea2398SMatthew Dillon * No errors, setup enough of the mount point so we can lookup the 29827ea2398SMatthew Dillon * root vnode. 299427e5fc6SMatthew Dillon */ 300427e5fc6SMatthew Dillon mp->mnt_iosize_max = MAXPHYS; 301427e5fc6SMatthew Dillon mp->mnt_kern_flag |= MNTK_FSMID; 302c0ade690SMatthew Dillon 303c0ade690SMatthew Dillon /* 304c0ade690SMatthew Dillon * note: f_iosize is used by vnode_pager_haspage() when constructing 305c0ade690SMatthew Dillon * its VOP_BMAP call. 306c0ade690SMatthew Dillon */ 307c0ade690SMatthew Dillon mp->mnt_stat.f_iosize = HAMMER_BUFSIZE; 308fbc6e32aSMatthew Dillon mp->mnt_stat.f_bsize = HAMMER_BUFSIZE; 309427e5fc6SMatthew Dillon mp->mnt_maxsymlinklen = 255; 310427e5fc6SMatthew Dillon mp->mnt_flag |= MNT_LOCAL; 311427e5fc6SMatthew Dillon 312427e5fc6SMatthew Dillon vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops); 3137a04d74fSMatthew Dillon vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops); 3147a04d74fSMatthew Dillon vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops); 31527ea2398SMatthew Dillon 31627ea2398SMatthew Dillon /* 317a89aec1bSMatthew Dillon * The root volume's ondisk pointer is only valid if we hold a 318a89aec1bSMatthew Dillon * reference to it. 319a89aec1bSMatthew Dillon */ 320a89aec1bSMatthew Dillon rootvol = hammer_get_root_volume(hmp, &error); 321a89aec1bSMatthew Dillon if (error) 322f90dde4cSMatthew Dillon goto failed; 323f90dde4cSMatthew Dillon 324f90dde4cSMatthew Dillon /* 3250729c8c8SMatthew Dillon * Perform any necessary UNDO operations. The recover code does 3260729c8c8SMatthew Dillon * call hammer_undo_lookup() so we have to pre-cache the blockmap, 3270729c8c8SMatthew Dillon * and then re-copy it again after recovery is complete. 328c9b9e29dSMatthew Dillon * 329c9b9e29dSMatthew Dillon * The recover code will load hmp->flusher_undo_start. 330f90dde4cSMatthew Dillon */ 3310729c8c8SMatthew Dillon bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, 3320729c8c8SMatthew Dillon sizeof(hmp->blockmap)); 333c9b9e29dSMatthew Dillon 334f90dde4cSMatthew Dillon error = hammer_recover(hmp, rootvol); 335f90dde4cSMatthew Dillon if (error) { 336f90dde4cSMatthew Dillon kprintf("Failed to recover HAMMER filesystem on mount\n"); 337a89aec1bSMatthew Dillon goto done; 338f90dde4cSMatthew Dillon } 339f90dde4cSMatthew Dillon 340f90dde4cSMatthew Dillon /* 341f90dde4cSMatthew Dillon * Finish setup now that we have a good root volume 342f90dde4cSMatthew Dillon */ 343a89aec1bSMatthew Dillon ksnprintf(mp->mnt_stat.f_mntfromname, 344a89aec1bSMatthew Dillon sizeof(mp->mnt_stat.f_mntfromname), "%s", 345a89aec1bSMatthew Dillon rootvol->ondisk->vol_name); 346513ca7d7SMatthew Dillon mp->mnt_stat.f_fsid.val[0] = 347513ca7d7SMatthew Dillon crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8); 348513ca7d7SMatthew Dillon mp->mnt_stat.f_fsid.val[1] = 349513ca7d7SMatthew Dillon crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8); 350b84de5afSMatthew Dillon 3510729c8c8SMatthew Dillon /* 3520729c8c8SMatthew Dillon * Certain often-modified fields in the root volume are cached in 3530729c8c8SMatthew Dillon * the hammer_mount structure so we do not have to generate lots 3540729c8c8SMatthew Dillon * of little UNDO structures for them. 355c9b9e29dSMatthew Dillon * 356c9b9e29dSMatthew Dillon * Recopy after recovery. 3570729c8c8SMatthew Dillon */ 358b84de5afSMatthew Dillon hmp->next_tid = rootvol->ondisk->vol0_next_tid; 3590729c8c8SMatthew Dillon bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, 3600729c8c8SMatthew Dillon sizeof(hmp->blockmap)); 3610729c8c8SMatthew Dillon 3620729c8c8SMatthew Dillon /* 3630729c8c8SMatthew Dillon * Use the zone limit set by newfs_hammer, or the zone limit set by 3640729c8c8SMatthew Dillon * sysctl (for debugging), whichever is smaller. 3650729c8c8SMatthew Dillon */ 3660729c8c8SMatthew Dillon if (rootvol->ondisk->vol0_zone_limit) { 3670729c8c8SMatthew Dillon hammer_off_t vol0_zone_limit; 3680729c8c8SMatthew Dillon 3690729c8c8SMatthew Dillon vol0_zone_limit = rootvol->ondisk->vol0_zone_limit; 3700729c8c8SMatthew Dillon for (i = 0; i < HAMMER_MAX_ZONES; ++i) { 3710729c8c8SMatthew Dillon if (hmp->zone_limits[i] > vol0_zone_limit) 3720729c8c8SMatthew Dillon hmp->zone_limits[i] = vol0_zone_limit; 3730729c8c8SMatthew Dillon } 3740729c8c8SMatthew Dillon } 375b84de5afSMatthew Dillon 376059819e3SMatthew Dillon hammer_flusher_create(hmp); 377059819e3SMatthew Dillon 378a89aec1bSMatthew Dillon /* 37927ea2398SMatthew Dillon * Locate the root directory using the root cluster's B-Tree as a 38027ea2398SMatthew Dillon * starting point. The root directory uses an obj_id of 1. 38127ea2398SMatthew Dillon * 38227ea2398SMatthew Dillon * FUTURE: Leave the root directory cached referenced but unlocked 38327ea2398SMatthew Dillon * in hmp->rootvp (need to flush it on unmount). 38427ea2398SMatthew Dillon */ 38527ea2398SMatthew Dillon error = hammer_vfs_vget(mp, 1, &rootvp); 386a89aec1bSMatthew Dillon if (error) 387a89aec1bSMatthew Dillon goto done; 38827ea2398SMatthew Dillon vput(rootvp); 38927ea2398SMatthew Dillon /*vn_unlock(hmp->rootvp);*/ 39027ea2398SMatthew Dillon 391a89aec1bSMatthew Dillon done: 392f90dde4cSMatthew Dillon hammer_rel_volume(rootvol, 0); 393f90dde4cSMatthew Dillon failed: 39427ea2398SMatthew Dillon /* 39527ea2398SMatthew Dillon * Cleanup and return. 39627ea2398SMatthew Dillon */ 39727ea2398SMatthew Dillon if (error) 39827ea2398SMatthew Dillon hammer_free_hmp(mp); 399427e5fc6SMatthew Dillon return (error); 400427e5fc6SMatthew Dillon } 401427e5fc6SMatthew Dillon 402427e5fc6SMatthew Dillon static int 403427e5fc6SMatthew Dillon hammer_vfs_unmount(struct mount *mp, int mntflags) 404427e5fc6SMatthew Dillon { 405427e5fc6SMatthew Dillon #if 0 406427e5fc6SMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 407427e5fc6SMatthew Dillon #endif 408427e5fc6SMatthew Dillon int flags; 40966325755SMatthew Dillon int error; 41027ea2398SMatthew Dillon 41127ea2398SMatthew Dillon /* 412427e5fc6SMatthew Dillon * Clean out the vnodes 413427e5fc6SMatthew Dillon */ 41466325755SMatthew Dillon flags = 0; 41566325755SMatthew Dillon if (mntflags & MNT_FORCE) 41666325755SMatthew Dillon flags |= FORCECLOSE; 41766325755SMatthew Dillon if ((error = vflush(mp, 0, flags)) != 0) 41866325755SMatthew Dillon return (error); 419427e5fc6SMatthew Dillon 420427e5fc6SMatthew Dillon /* 421427e5fc6SMatthew Dillon * Clean up the internal mount structure and related entities. This 422427e5fc6SMatthew Dillon * may issue I/O. 423427e5fc6SMatthew Dillon */ 424427e5fc6SMatthew Dillon hammer_free_hmp(mp); 425427e5fc6SMatthew Dillon return(0); 426427e5fc6SMatthew Dillon } 427427e5fc6SMatthew Dillon 428427e5fc6SMatthew Dillon /* 429427e5fc6SMatthew Dillon * Clean up the internal mount structure and disassociate it from the mount. 430427e5fc6SMatthew Dillon * This may issue I/O. 431427e5fc6SMatthew Dillon */ 432427e5fc6SMatthew Dillon static void 433427e5fc6SMatthew Dillon hammer_free_hmp(struct mount *mp) 434427e5fc6SMatthew Dillon { 435427e5fc6SMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 436bf686dbeSMatthew Dillon int i; 437427e5fc6SMatthew Dillon 43827ea2398SMatthew Dillon #if 0 439427e5fc6SMatthew Dillon /* 440427e5fc6SMatthew Dillon * Clean up the root vnode 441427e5fc6SMatthew Dillon */ 442427e5fc6SMatthew Dillon if (hmp->rootvp) { 443427e5fc6SMatthew Dillon vrele(hmp->rootvp); 444427e5fc6SMatthew Dillon hmp->rootvp = NULL; 445427e5fc6SMatthew Dillon } 44627ea2398SMatthew Dillon #endif 447059819e3SMatthew Dillon hammer_flusher_sync(hmp); 448b84de5afSMatthew Dillon hammer_flusher_sync(hmp); 449059819e3SMatthew Dillon hammer_flusher_destroy(hmp); 450427e5fc6SMatthew Dillon 451b84de5afSMatthew Dillon KKASSERT(RB_EMPTY(&hmp->rb_inos_root)); 452b84de5afSMatthew Dillon 453b84de5afSMatthew Dillon #if 0 454427e5fc6SMatthew Dillon /* 455427e5fc6SMatthew Dillon * Unload & flush inodes 456b84de5afSMatthew Dillon * 457b84de5afSMatthew Dillon * XXX illegal to call this from here, it can only be done from 458b84de5afSMatthew Dillon * the flusher. 459427e5fc6SMatthew Dillon */ 460427e5fc6SMatthew Dillon RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, 461d113fda1SMatthew Dillon hammer_unload_inode, (void *)MNT_WAIT); 462427e5fc6SMatthew Dillon 463427e5fc6SMatthew Dillon /* 464427e5fc6SMatthew Dillon * Unload & flush volumes 465427e5fc6SMatthew Dillon */ 466b84de5afSMatthew Dillon #endif 467b84de5afSMatthew Dillon /* 468b84de5afSMatthew Dillon * Unload the volumes 469b84de5afSMatthew Dillon */ 470427e5fc6SMatthew Dillon RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, 471427e5fc6SMatthew Dillon hammer_unload_volume, NULL); 472427e5fc6SMatthew Dillon 473427e5fc6SMatthew Dillon mp->mnt_data = NULL; 47466325755SMatthew Dillon mp->mnt_flag &= ~MNT_LOCAL; 475427e5fc6SMatthew Dillon hmp->mp = NULL; 4760729c8c8SMatthew Dillon hammer_destroy_objid_cache(hmp); 47766325755SMatthew Dillon kfree(hmp->zbuf, M_HAMMER); 47840043e7fSMatthew Dillon lockuninit(&hmp->blockmap_lock); 479bf686dbeSMatthew Dillon 480bf686dbeSMatthew Dillon for (i = 0; i < HAMMER_MAX_ZONES; ++i) 481bf686dbeSMatthew Dillon hammer_free_holes(hmp, &hmp->holes[i]); 482bf686dbeSMatthew Dillon 483427e5fc6SMatthew Dillon kfree(hmp, M_HAMMER); 484427e5fc6SMatthew Dillon } 485427e5fc6SMatthew Dillon 486427e5fc6SMatthew Dillon /* 487513ca7d7SMatthew Dillon * Obtain a vnode for the specified inode number. An exclusively locked 488513ca7d7SMatthew Dillon * vnode is returned. 489513ca7d7SMatthew Dillon */ 490513ca7d7SMatthew Dillon int 491513ca7d7SMatthew Dillon hammer_vfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp) 492513ca7d7SMatthew Dillon { 49336f82b23SMatthew Dillon struct hammer_transaction trans; 494513ca7d7SMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 495513ca7d7SMatthew Dillon struct hammer_inode *ip; 496513ca7d7SMatthew Dillon int error; 497513ca7d7SMatthew Dillon 49836f82b23SMatthew Dillon hammer_simple_transaction(&trans, hmp); 49936f82b23SMatthew Dillon 500513ca7d7SMatthew Dillon /* 50136f82b23SMatthew Dillon * Lookup the requested HAMMER inode. The structure must be 50236f82b23SMatthew Dillon * left unlocked while we manipulate the related vnode to avoid 50336f82b23SMatthew Dillon * a deadlock. 504513ca7d7SMatthew Dillon */ 50536f82b23SMatthew Dillon ip = hammer_get_inode(&trans, NULL, ino, hmp->asof, 0, &error); 506513ca7d7SMatthew Dillon if (ip == NULL) { 507513ca7d7SMatthew Dillon *vpp = NULL; 508513ca7d7SMatthew Dillon return(error); 509513ca7d7SMatthew Dillon } 510e8599db1SMatthew Dillon error = hammer_get_vnode(ip, vpp); 511513ca7d7SMatthew Dillon hammer_rel_inode(ip, 0); 512b84de5afSMatthew Dillon hammer_done_transaction(&trans); 513513ca7d7SMatthew Dillon return (error); 514513ca7d7SMatthew Dillon } 515513ca7d7SMatthew Dillon 516513ca7d7SMatthew Dillon /* 517427e5fc6SMatthew Dillon * Return the root vnode for the filesystem. 518427e5fc6SMatthew Dillon * 519427e5fc6SMatthew Dillon * HAMMER stores the root vnode in the hammer_mount structure so 520427e5fc6SMatthew Dillon * getting it is easy. 521427e5fc6SMatthew Dillon */ 522427e5fc6SMatthew Dillon static int 523427e5fc6SMatthew Dillon hammer_vfs_root(struct mount *mp, struct vnode **vpp) 524427e5fc6SMatthew Dillon { 52547197d71SMatthew Dillon #if 0 526427e5fc6SMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 52747197d71SMatthew Dillon #endif 52827ea2398SMatthew Dillon int error; 529427e5fc6SMatthew Dillon 53027ea2398SMatthew Dillon error = hammer_vfs_vget(mp, 1, vpp); 53127ea2398SMatthew Dillon return (error); 532427e5fc6SMatthew Dillon } 533427e5fc6SMatthew Dillon 534427e5fc6SMatthew Dillon static int 535427e5fc6SMatthew Dillon hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) 536427e5fc6SMatthew Dillon { 537fbc6e32aSMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 538fbc6e32aSMatthew Dillon hammer_volume_t volume; 539fbc6e32aSMatthew Dillon hammer_volume_ondisk_t ondisk; 540fbc6e32aSMatthew Dillon int error; 54147197d71SMatthew Dillon int64_t bfree; 542fbc6e32aSMatthew Dillon 543fbc6e32aSMatthew Dillon volume = hammer_get_root_volume(hmp, &error); 544fbc6e32aSMatthew Dillon if (error) 545fbc6e32aSMatthew Dillon return(error); 546fbc6e32aSMatthew Dillon ondisk = volume->ondisk; 547fbc6e32aSMatthew Dillon 54847197d71SMatthew Dillon /* 54947197d71SMatthew Dillon * Basic stats 55047197d71SMatthew Dillon */ 551fbc6e32aSMatthew Dillon mp->mnt_stat.f_files = ondisk->vol0_stat_inodes; 552c3be93f2SMatthew Dillon bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE; 55340043e7fSMatthew Dillon hammer_rel_volume(volume, 0); 55447197d71SMatthew Dillon 55547197d71SMatthew Dillon mp->mnt_stat.f_bfree = bfree / HAMMER_BUFSIZE; 55647197d71SMatthew Dillon mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree; 557fbc6e32aSMatthew Dillon if (mp->mnt_stat.f_files < 0) 558fbc6e32aSMatthew Dillon mp->mnt_stat.f_files = 0; 559fbc6e32aSMatthew Dillon 56027ea2398SMatthew Dillon *sbp = mp->mnt_stat; 56127ea2398SMatthew Dillon return(0); 562427e5fc6SMatthew Dillon } 563427e5fc6SMatthew Dillon 5640729c8c8SMatthew Dillon /* 5650729c8c8SMatthew Dillon * Sync the filesystem. Currently we have to run it twice, the second 5660729c8c8SMatthew Dillon * one will advance the undo start index to the end index, so if a crash 5670729c8c8SMatthew Dillon * occurs no undos will be run on mount. 56877062c8aSMatthew Dillon * 56977062c8aSMatthew Dillon * We do not sync the filesystem if we are called from a panic. If we did 57077062c8aSMatthew Dillon * we might end up blowing up a sync that was already in progress. 5710729c8c8SMatthew Dillon */ 572427e5fc6SMatthew Dillon static int 573427e5fc6SMatthew Dillon hammer_vfs_sync(struct mount *mp, int waitfor) 574427e5fc6SMatthew Dillon { 575fbc6e32aSMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 5760729c8c8SMatthew Dillon int error; 5770729c8c8SMatthew Dillon 57877062c8aSMatthew Dillon if (panicstr == NULL) { 5790729c8c8SMatthew Dillon error = hammer_sync_hmp(hmp, waitfor); 5800729c8c8SMatthew Dillon if (error == 0) 5810729c8c8SMatthew Dillon error = hammer_sync_hmp(hmp, waitfor); 58277062c8aSMatthew Dillon } else { 58377062c8aSMatthew Dillon error = EIO; 58477062c8aSMatthew Dillon hkprintf("S"); 58577062c8aSMatthew Dillon } 5860729c8c8SMatthew Dillon return (error); 587427e5fc6SMatthew Dillon } 588427e5fc6SMatthew Dillon 589513ca7d7SMatthew Dillon /* 590513ca7d7SMatthew Dillon * Convert a vnode to a file handle. 591513ca7d7SMatthew Dillon */ 592513ca7d7SMatthew Dillon static int 593513ca7d7SMatthew Dillon hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp) 594513ca7d7SMatthew Dillon { 595513ca7d7SMatthew Dillon hammer_inode_t ip; 596513ca7d7SMatthew Dillon 597513ca7d7SMatthew Dillon KKASSERT(MAXFIDSZ >= 16); 598513ca7d7SMatthew Dillon ip = VTOI(vp); 599513ca7d7SMatthew Dillon fhp->fid_len = offsetof(struct fid, fid_data[16]); 600513ca7d7SMatthew Dillon fhp->fid_reserved = 0; 601513ca7d7SMatthew Dillon bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id)); 602513ca7d7SMatthew Dillon bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof)); 603513ca7d7SMatthew Dillon return(0); 604513ca7d7SMatthew Dillon } 605513ca7d7SMatthew Dillon 606513ca7d7SMatthew Dillon 607513ca7d7SMatthew Dillon /* 608513ca7d7SMatthew Dillon * Convert a file handle back to a vnode. 609513ca7d7SMatthew Dillon */ 610513ca7d7SMatthew Dillon static int 611513ca7d7SMatthew Dillon hammer_vfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp) 612513ca7d7SMatthew Dillon { 61336f82b23SMatthew Dillon struct hammer_transaction trans; 614513ca7d7SMatthew Dillon struct hammer_inode *ip; 615513ca7d7SMatthew Dillon struct hammer_inode_info info; 616513ca7d7SMatthew Dillon int error; 617513ca7d7SMatthew Dillon 618513ca7d7SMatthew Dillon bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id)); 619513ca7d7SMatthew Dillon bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof)); 620513ca7d7SMatthew Dillon 62136f82b23SMatthew Dillon hammer_simple_transaction(&trans, (void *)mp->mnt_data); 62236f82b23SMatthew Dillon 623513ca7d7SMatthew Dillon /* 624513ca7d7SMatthew Dillon * Get/allocate the hammer_inode structure. The structure must be 625513ca7d7SMatthew Dillon * unlocked while we manipulate the related vnode to avoid a 626513ca7d7SMatthew Dillon * deadlock. 627513ca7d7SMatthew Dillon */ 62836f82b23SMatthew Dillon ip = hammer_get_inode(&trans, NULL, info.obj_id, info.obj_asof, 62936f82b23SMatthew Dillon 0, &error); 630513ca7d7SMatthew Dillon if (ip == NULL) { 631513ca7d7SMatthew Dillon *vpp = NULL; 632513ca7d7SMatthew Dillon return(error); 633513ca7d7SMatthew Dillon } 634e8599db1SMatthew Dillon error = hammer_get_vnode(ip, vpp); 635513ca7d7SMatthew Dillon hammer_rel_inode(ip, 0); 636b84de5afSMatthew Dillon hammer_done_transaction(&trans); 637513ca7d7SMatthew Dillon return (error); 638513ca7d7SMatthew Dillon } 639513ca7d7SMatthew Dillon 640513ca7d7SMatthew Dillon static int 641513ca7d7SMatthew Dillon hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 642513ca7d7SMatthew Dillon int *exflagsp, struct ucred **credanonp) 643513ca7d7SMatthew Dillon { 644513ca7d7SMatthew Dillon hammer_mount_t hmp = (void *)mp->mnt_data; 645513ca7d7SMatthew Dillon struct netcred *np; 646513ca7d7SMatthew Dillon int error; 647513ca7d7SMatthew Dillon 648513ca7d7SMatthew Dillon np = vfs_export_lookup(mp, &hmp->export, nam); 649513ca7d7SMatthew Dillon if (np) { 650513ca7d7SMatthew Dillon *exflagsp = np->netc_exflags; 651513ca7d7SMatthew Dillon *credanonp = &np->netc_anon; 652513ca7d7SMatthew Dillon error = 0; 653513ca7d7SMatthew Dillon } else { 654513ca7d7SMatthew Dillon error = EACCES; 655513ca7d7SMatthew Dillon } 656513ca7d7SMatthew Dillon return (error); 657513ca7d7SMatthew Dillon 658513ca7d7SMatthew Dillon } 659513ca7d7SMatthew Dillon 660513ca7d7SMatthew Dillon int 661513ca7d7SMatthew Dillon hammer_vfs_export(struct mount *mp, int op, const struct export_args *export) 662513ca7d7SMatthew Dillon { 663513ca7d7SMatthew Dillon hammer_mount_t hmp = (void *)mp->mnt_data; 664513ca7d7SMatthew Dillon int error; 665513ca7d7SMatthew Dillon 666513ca7d7SMatthew Dillon switch(op) { 667513ca7d7SMatthew Dillon case MOUNTCTL_SET_EXPORT: 668513ca7d7SMatthew Dillon error = vfs_export(mp, &hmp->export, export); 669513ca7d7SMatthew Dillon break; 670513ca7d7SMatthew Dillon default: 671513ca7d7SMatthew Dillon error = EOPNOTSUPP; 672513ca7d7SMatthew Dillon break; 673513ca7d7SMatthew Dillon } 674513ca7d7SMatthew Dillon return(error); 675513ca7d7SMatthew Dillon } 676513ca7d7SMatthew Dillon 677