1427e5fc6SMatthew Dillon /* 2b84de5afSMatthew Dillon * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3427e5fc6SMatthew Dillon * 4427e5fc6SMatthew Dillon * This code is derived from software contributed to The DragonFly Project 5427e5fc6SMatthew Dillon * by Matthew Dillon <dillon@backplane.com> 6427e5fc6SMatthew Dillon * 7427e5fc6SMatthew Dillon * Redistribution and use in source and binary forms, with or without 8427e5fc6SMatthew Dillon * modification, are permitted provided that the following conditions 9427e5fc6SMatthew Dillon * are met: 10427e5fc6SMatthew Dillon * 11427e5fc6SMatthew Dillon * 1. Redistributions of source code must retain the above copyright 12427e5fc6SMatthew Dillon * notice, this list of conditions and the following disclaimer. 13427e5fc6SMatthew Dillon * 2. Redistributions in binary form must reproduce the above copyright 14427e5fc6SMatthew Dillon * notice, this list of conditions and the following disclaimer in 15427e5fc6SMatthew Dillon * the documentation and/or other materials provided with the 16427e5fc6SMatthew Dillon * distribution. 17427e5fc6SMatthew Dillon * 3. Neither the name of The DragonFly Project nor the names of its 18427e5fc6SMatthew Dillon * contributors may be used to endorse or promote products derived 19427e5fc6SMatthew Dillon * from this software without specific, prior written permission. 20427e5fc6SMatthew Dillon * 21427e5fc6SMatthew Dillon * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22427e5fc6SMatthew Dillon * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23427e5fc6SMatthew Dillon * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24427e5fc6SMatthew Dillon * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25427e5fc6SMatthew Dillon * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26427e5fc6SMatthew Dillon * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27427e5fc6SMatthew Dillon * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28427e5fc6SMatthew Dillon * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29427e5fc6SMatthew Dillon * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30427e5fc6SMatthew Dillon * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31427e5fc6SMatthew Dillon * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32427e5fc6SMatthew Dillon * SUCH DAMAGE. 33427e5fc6SMatthew Dillon * 34*1b0ab2c3SMatthew Dillon * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.63 2008/07/14 03:20:49 dillon Exp $ 35427e5fc6SMatthew Dillon */ 36427e5fc6SMatthew Dillon 37427e5fc6SMatthew Dillon #include <sys/param.h> 38427e5fc6SMatthew Dillon #include <sys/systm.h> 39427e5fc6SMatthew Dillon #include <sys/kernel.h> 40427e5fc6SMatthew Dillon #include <sys/vnode.h> 41427e5fc6SMatthew Dillon #include <sys/mount.h> 42427e5fc6SMatthew Dillon #include <sys/malloc.h> 43427e5fc6SMatthew Dillon #include <sys/nlookup.h> 44427e5fc6SMatthew Dillon #include <sys/fcntl.h> 45b3deaf57SMatthew Dillon #include <sys/sysctl.h> 46427e5fc6SMatthew Dillon #include <sys/buf.h> 47427e5fc6SMatthew Dillon #include <sys/buf2.h> 48427e5fc6SMatthew Dillon #include "hammer.h" 49427e5fc6SMatthew Dillon 502f85fa4dSMatthew Dillon int hammer_debug_io; 51d5ef456eSMatthew Dillon int hammer_debug_general; 52*1b0ab2c3SMatthew Dillon int hammer_debug_debug = 1; /* medium-error panics */ 53e8599db1SMatthew Dillon int hammer_debug_inode; 547d683b0fSMatthew Dillon int hammer_debug_locks; 55b3deaf57SMatthew Dillon int hammer_debug_btree; 56d113fda1SMatthew Dillon int hammer_debug_tid; 5746fe7ae1SMatthew Dillon int hammer_debug_recover; /* -1 will disable, +1 will force */ 5846fe7ae1SMatthew Dillon int hammer_debug_recover_faults; 59*1b0ab2c3SMatthew Dillon int hammer_cluster_enable = 1; /* enable read clustering by default */ 607a61b85dSMatthew Dillon int hammer_count_fsyncs; 61b3deaf57SMatthew Dillon int hammer_count_inodes; 62af209b0fSMatthew Dillon int hammer_count_iqueued; 639f5097dcSMatthew Dillon int hammer_count_reclaiming; 64b3deaf57SMatthew Dillon int hammer_count_records; 65b3deaf57SMatthew Dillon int hammer_count_record_datas; 66b3deaf57SMatthew Dillon int hammer_count_volumes; 67b3deaf57SMatthew Dillon int hammer_count_buffers; 68b3deaf57SMatthew Dillon int hammer_count_nodes; 69a7e9bef1SMatthew Dillon int64_t hammer_count_extra_space_used; 70cb51be26SMatthew Dillon int64_t hammer_stats_btree_lookups; 71cb51be26SMatthew Dillon int64_t hammer_stats_btree_searches; 72cb51be26SMatthew Dillon int64_t hammer_stats_btree_inserts; 73cb51be26SMatthew Dillon int64_t hammer_stats_btree_deletes; 74cb51be26SMatthew Dillon int64_t hammer_stats_btree_elements; 75cb51be26SMatthew Dillon int64_t hammer_stats_btree_splits; 76cb51be26SMatthew Dillon int64_t hammer_stats_btree_iterations; 77cb51be26SMatthew Dillon int64_t hammer_stats_record_iterations; 78f5a07a7aSMatthew Dillon int hammer_count_dirtybufspace; /* global */ 79a99b9ea2SMatthew Dillon int hammer_count_refedbufs; /* global */ 800832c9bbSMatthew Dillon int hammer_count_reservations; 81a99b9ea2SMatthew Dillon int hammer_count_io_running_read; 82a99b9ea2SMatthew Dillon int hammer_count_io_running_write; 83a99b9ea2SMatthew Dillon int hammer_count_io_locked; 84f5a07a7aSMatthew Dillon int hammer_limit_dirtybufspace; /* per-mount */ 850832c9bbSMatthew Dillon int hammer_limit_recs; /* as a whole XXX */ 86af209b0fSMatthew Dillon int hammer_limit_iqueued; /* per-mount */ 871f07f686SMatthew Dillon int hammer_bio_count; 88cb51be26SMatthew Dillon int hammer_verify_zone; 89*1b0ab2c3SMatthew Dillon int hammer_verify_data = 1; 90cb51be26SMatthew Dillon int hammer_write_mode; 917d683b0fSMatthew Dillon int64_t hammer_contention_count; 92f03c9cf4SMatthew Dillon int64_t hammer_zone_limit; 93b3deaf57SMatthew Dillon 94b3deaf57SMatthew Dillon SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem"); 95d5ef456eSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW, 96d5ef456eSMatthew Dillon &hammer_debug_general, 0, ""); 972f85fa4dSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_io, CTLFLAG_RW, 982f85fa4dSMatthew Dillon &hammer_debug_io, 0, ""); 9977062c8aSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_debug, CTLFLAG_RW, 10077062c8aSMatthew Dillon &hammer_debug_debug, 0, ""); 101e8599db1SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW, 102e8599db1SMatthew Dillon &hammer_debug_inode, 0, ""); 1037d683b0fSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW, 1047d683b0fSMatthew Dillon &hammer_debug_locks, 0, ""); 105b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW, 106b3deaf57SMatthew Dillon &hammer_debug_btree, 0, ""); 107d113fda1SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW, 108d113fda1SMatthew Dillon &hammer_debug_tid, 0, ""); 109b33e2cc0SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW, 110b33e2cc0SMatthew Dillon &hammer_debug_recover, 0, ""); 11146fe7ae1SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW, 11246fe7ae1SMatthew Dillon &hammer_debug_recover_faults, 0, ""); 113*1b0ab2c3SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, cluster_enable, CTLFLAG_RW, 114*1b0ab2c3SMatthew Dillon &hammer_cluster_enable, 0, ""); 1159480ff55SMatthew Dillon 116f5a07a7aSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_dirtybufspace, CTLFLAG_RW, 117f5a07a7aSMatthew Dillon &hammer_limit_dirtybufspace, 0, ""); 11847637bffSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_recs, CTLFLAG_RW, 11947637bffSMatthew Dillon &hammer_limit_recs, 0, ""); 120af209b0fSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_iqueued, CTLFLAG_RW, 121af209b0fSMatthew Dillon &hammer_limit_iqueued, 0, ""); 1229480ff55SMatthew Dillon 1237a61b85dSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_fsyncs, CTLFLAG_RD, 1247a61b85dSMatthew Dillon &hammer_count_fsyncs, 0, ""); 125b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD, 126b3deaf57SMatthew Dillon &hammer_count_inodes, 0, ""); 127af209b0fSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_iqueued, CTLFLAG_RD, 128af209b0fSMatthew Dillon &hammer_count_iqueued, 0, ""); 1299f5097dcSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reclaiming, CTLFLAG_RD, 1309f5097dcSMatthew Dillon &hammer_count_reclaiming, 0, ""); 131b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD, 132b3deaf57SMatthew Dillon &hammer_count_records, 0, ""); 133b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD, 134b3deaf57SMatthew Dillon &hammer_count_record_datas, 0, ""); 135b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD, 136b3deaf57SMatthew Dillon &hammer_count_volumes, 0, ""); 137b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD, 138b3deaf57SMatthew Dillon &hammer_count_buffers, 0, ""); 139b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD, 140b3deaf57SMatthew Dillon &hammer_count_nodes, 0, ""); 141a7e9bef1SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, count_extra_space_used, CTLFLAG_RD, 142a7e9bef1SMatthew Dillon &hammer_count_extra_space_used, 0, ""); 143cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_searches, CTLFLAG_RD, 144cb51be26SMatthew Dillon &hammer_stats_btree_searches, 0, ""); 145cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_lookups, CTLFLAG_RD, 146cb51be26SMatthew Dillon &hammer_stats_btree_lookups, 0, ""); 147cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_inserts, CTLFLAG_RD, 148cb51be26SMatthew Dillon &hammer_stats_btree_inserts, 0, ""); 149cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_deletes, CTLFLAG_RD, 150cb51be26SMatthew Dillon &hammer_stats_btree_deletes, 0, ""); 151cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_elements, CTLFLAG_RD, 152cb51be26SMatthew Dillon &hammer_stats_btree_elements, 0, ""); 153cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_splits, CTLFLAG_RD, 154cb51be26SMatthew Dillon &hammer_stats_btree_splits, 0, ""); 155cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_iterations, CTLFLAG_RD, 156cb51be26SMatthew Dillon &hammer_stats_btree_iterations, 0, ""); 157cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_record_iterations, CTLFLAG_RD, 158cb51be26SMatthew Dillon &hammer_stats_record_iterations, 0, ""); 159f5a07a7aSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_dirtybufspace, CTLFLAG_RD, 160f5a07a7aSMatthew Dillon &hammer_count_dirtybufspace, 0, ""); 161a99b9ea2SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_refedbufs, CTLFLAG_RD, 162a99b9ea2SMatthew Dillon &hammer_count_refedbufs, 0, ""); 1630832c9bbSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reservations, CTLFLAG_RD, 1640832c9bbSMatthew Dillon &hammer_count_reservations, 0, ""); 165a99b9ea2SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_read, CTLFLAG_RD, 166a99b9ea2SMatthew Dillon &hammer_count_io_running_read, 0, ""); 167a99b9ea2SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_locked, CTLFLAG_RD, 168a99b9ea2SMatthew Dillon &hammer_count_io_locked, 0, ""); 169a99b9ea2SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_write, CTLFLAG_RD, 170a99b9ea2SMatthew Dillon &hammer_count_io_running_write, 0, ""); 171f03c9cf4SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW, 172f03c9cf4SMatthew Dillon &hammer_zone_limit, 0, ""); 1737d683b0fSMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW, 1747d683b0fSMatthew Dillon &hammer_contention_count, 0, ""); 175cb51be26SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_zone, CTLFLAG_RW, 176cb51be26SMatthew Dillon &hammer_verify_zone, 0, ""); 177*1b0ab2c3SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_data, CTLFLAG_RW, 178*1b0ab2c3SMatthew Dillon &hammer_verify_data, 0, ""); 179cb51be26SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, write_mode, CTLFLAG_RW, 180cb51be26SMatthew Dillon &hammer_write_mode, 0, ""); 181b3deaf57SMatthew Dillon 182bcac4bbbSMatthew Dillon KTR_INFO_MASTER(hammer); 183bcac4bbbSMatthew Dillon 184427e5fc6SMatthew Dillon /* 185427e5fc6SMatthew Dillon * VFS ABI 186427e5fc6SMatthew Dillon */ 187427e5fc6SMatthew Dillon static void hammer_free_hmp(struct mount *mp); 188427e5fc6SMatthew Dillon 189427e5fc6SMatthew Dillon static int hammer_vfs_mount(struct mount *mp, char *path, caddr_t data, 190427e5fc6SMatthew Dillon struct ucred *cred); 191427e5fc6SMatthew Dillon static int hammer_vfs_unmount(struct mount *mp, int mntflags); 192427e5fc6SMatthew Dillon static int hammer_vfs_root(struct mount *mp, struct vnode **vpp); 193427e5fc6SMatthew Dillon static int hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, 194427e5fc6SMatthew Dillon struct ucred *cred); 1956f97fce3SMatthew Dillon static int hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, 1966f97fce3SMatthew Dillon struct ucred *cred); 197427e5fc6SMatthew Dillon static int hammer_vfs_sync(struct mount *mp, int waitfor); 198513ca7d7SMatthew Dillon static int hammer_vfs_vget(struct mount *mp, ino_t ino, 199513ca7d7SMatthew Dillon struct vnode **vpp); 200427e5fc6SMatthew Dillon static int hammer_vfs_init(struct vfsconf *conf); 201513ca7d7SMatthew Dillon static int hammer_vfs_fhtovp(struct mount *mp, struct fid *fhp, 202513ca7d7SMatthew Dillon struct vnode **vpp); 203513ca7d7SMatthew Dillon static int hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp); 204513ca7d7SMatthew Dillon static int hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 205513ca7d7SMatthew Dillon int *exflagsp, struct ucred **credanonp); 206513ca7d7SMatthew Dillon 207427e5fc6SMatthew Dillon 208427e5fc6SMatthew Dillon static struct vfsops hammer_vfsops = { 209427e5fc6SMatthew Dillon .vfs_mount = hammer_vfs_mount, 210427e5fc6SMatthew Dillon .vfs_unmount = hammer_vfs_unmount, 211427e5fc6SMatthew Dillon .vfs_root = hammer_vfs_root, 212427e5fc6SMatthew Dillon .vfs_statfs = hammer_vfs_statfs, 2136f97fce3SMatthew Dillon .vfs_statvfs = hammer_vfs_statvfs, 214427e5fc6SMatthew Dillon .vfs_sync = hammer_vfs_sync, 215427e5fc6SMatthew Dillon .vfs_vget = hammer_vfs_vget, 216513ca7d7SMatthew Dillon .vfs_init = hammer_vfs_init, 217513ca7d7SMatthew Dillon .vfs_vptofh = hammer_vfs_vptofh, 218513ca7d7SMatthew Dillon .vfs_fhtovp = hammer_vfs_fhtovp, 219513ca7d7SMatthew Dillon .vfs_checkexp = hammer_vfs_checkexp 220427e5fc6SMatthew Dillon }; 221427e5fc6SMatthew Dillon 222427e5fc6SMatthew Dillon MALLOC_DEFINE(M_HAMMER, "hammer-mount", "hammer mount"); 223427e5fc6SMatthew Dillon 224427e5fc6SMatthew Dillon VFS_SET(hammer_vfsops, hammer, 0); 225427e5fc6SMatthew Dillon MODULE_VERSION(hammer, 1); 226427e5fc6SMatthew Dillon 227427e5fc6SMatthew Dillon static int 228427e5fc6SMatthew Dillon hammer_vfs_init(struct vfsconf *conf) 229427e5fc6SMatthew Dillon { 2303098dc2fSMatthew Dillon int n; 2313098dc2fSMatthew Dillon 2323098dc2fSMatthew Dillon if (hammer_limit_recs == 0) { 233a99b9ea2SMatthew Dillon hammer_limit_recs = nbuf * 25; 2343098dc2fSMatthew Dillon n = kmalloc_limit(M_HAMMER) / 512; 2353098dc2fSMatthew Dillon if (hammer_limit_recs > n) 2363098dc2fSMatthew Dillon hammer_limit_recs = n; 2373098dc2fSMatthew Dillon } 238f5a07a7aSMatthew Dillon if (hammer_limit_dirtybufspace == 0) { 239f5a07a7aSMatthew Dillon hammer_limit_dirtybufspace = hidirtybufspace / 2; 240f5a07a7aSMatthew Dillon if (hammer_limit_dirtybufspace < 100) 241f5a07a7aSMatthew Dillon hammer_limit_dirtybufspace = 100; 2429f5097dcSMatthew Dillon } 243af209b0fSMatthew Dillon if (hammer_limit_iqueued == 0) 244af209b0fSMatthew Dillon hammer_limit_iqueued = desiredvnodes / 5; 245427e5fc6SMatthew Dillon return(0); 246427e5fc6SMatthew Dillon } 247427e5fc6SMatthew Dillon 248427e5fc6SMatthew Dillon static int 249427e5fc6SMatthew Dillon hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data, 250427e5fc6SMatthew Dillon struct ucred *cred) 251427e5fc6SMatthew Dillon { 252427e5fc6SMatthew Dillon struct hammer_mount_info info; 253a89aec1bSMatthew Dillon hammer_mount_t hmp; 254a89aec1bSMatthew Dillon hammer_volume_t rootvol; 25527ea2398SMatthew Dillon struct vnode *rootvp; 256427e5fc6SMatthew Dillon const char *upath; /* volume name in userspace */ 257427e5fc6SMatthew Dillon char *path; /* volume name in system space */ 258427e5fc6SMatthew Dillon int error; 259427e5fc6SMatthew Dillon int i; 260427e5fc6SMatthew Dillon 261427e5fc6SMatthew Dillon if ((error = copyin(data, &info, sizeof(info))) != 0) 262427e5fc6SMatthew Dillon return (error); 26351c35492SMatthew Dillon if ((mp->mnt_flag & MNT_UPDATE) == 0) { 264427e5fc6SMatthew Dillon if (info.nvolumes <= 0 || info.nvolumes >= 32768) 265427e5fc6SMatthew Dillon return (EINVAL); 26651c35492SMatthew Dillon } 2675de0c0e5SMatthew Dillon if ((info.hflags & HMNT_MASTERID) && 2685de0c0e5SMatthew Dillon (info.masterid < -1 || info.masterid >= HAMMER_MAX_MASTERS)) { 2695de0c0e5SMatthew Dillon return (EINVAL); 2705de0c0e5SMatthew Dillon } 271427e5fc6SMatthew Dillon 272427e5fc6SMatthew Dillon /* 273427e5fc6SMatthew Dillon * Interal mount data structure 274427e5fc6SMatthew Dillon */ 275195c19a1SMatthew Dillon if (mp->mnt_flag & MNT_UPDATE) { 276195c19a1SMatthew Dillon hmp = (void *)mp->mnt_data; 277195c19a1SMatthew Dillon KKASSERT(hmp != NULL); 278195c19a1SMatthew Dillon } else { 279427e5fc6SMatthew Dillon hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO); 280427e5fc6SMatthew Dillon mp->mnt_data = (qaddr_t)hmp; 281427e5fc6SMatthew Dillon hmp->mp = mp; 282195c19a1SMatthew Dillon hmp->namekey_iterator = mycpu->gd_time_seconds; 28346fe7ae1SMatthew Dillon /*TAILQ_INIT(&hmp->recycle_list);*/ 28447197d71SMatthew Dillon 285dd94f1b1SMatthew Dillon hmp->root_btree_beg.localization = 0x00000000U; 28647197d71SMatthew Dillon hmp->root_btree_beg.obj_id = -0x8000000000000000LL; 28747197d71SMatthew Dillon hmp->root_btree_beg.key = -0x8000000000000000LL; 28847197d71SMatthew Dillon hmp->root_btree_beg.create_tid = 1; 28947197d71SMatthew Dillon hmp->root_btree_beg.delete_tid = 1; 29047197d71SMatthew Dillon hmp->root_btree_beg.rec_type = 0; 29147197d71SMatthew Dillon hmp->root_btree_beg.obj_type = 0; 29247197d71SMatthew Dillon 293dd94f1b1SMatthew Dillon hmp->root_btree_end.localization = 0xFFFFFFFFU; 29447197d71SMatthew Dillon hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL; 29547197d71SMatthew Dillon hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL; 29647197d71SMatthew Dillon hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL; 29747197d71SMatthew Dillon hmp->root_btree_end.delete_tid = 0; /* special case */ 29847197d71SMatthew Dillon hmp->root_btree_end.rec_type = 0xFFFFU; 29947197d71SMatthew Dillon hmp->root_btree_end.obj_type = 0; 300f03c9cf4SMatthew Dillon 3019480ff55SMatthew Dillon hmp->sync_lock.refs = 1; 302c9b9e29dSMatthew Dillon hmp->free_lock.refs = 1; 303d99d6bf5SMatthew Dillon hmp->undo_lock.refs = 1; 304d99d6bf5SMatthew Dillon hmp->blkmap_lock.refs = 1; 3059480ff55SMatthew Dillon 306cebe9493SMatthew Dillon TAILQ_INIT(&hmp->delay_list); 3077a61b85dSMatthew Dillon TAILQ_INIT(&hmp->flush_group_list); 3080729c8c8SMatthew Dillon TAILQ_INIT(&hmp->objid_cache_list); 309e8599db1SMatthew Dillon TAILQ_INIT(&hmp->undo_lru_list); 3107bc5b8c2SMatthew Dillon TAILQ_INIT(&hmp->reclaim_list); 311195c19a1SMatthew Dillon } 31251c35492SMatthew Dillon hmp->hflags &= ~HMNT_USERFLAGS; 31351c35492SMatthew Dillon hmp->hflags |= info.hflags & HMNT_USERFLAGS; 3145de0c0e5SMatthew Dillon if (info.hflags & HMNT_MASTERID) 3155de0c0e5SMatthew Dillon hmp->masterid = info.masterid; 316c82af904SMatthew Dillon else 317c82af904SMatthew Dillon hmp->masterid = -1; 3187f7c1f84SMatthew Dillon if (info.asof) { 31951c35492SMatthew Dillon kprintf("ASOF\n"); 3207f7c1f84SMatthew Dillon mp->mnt_flag |= MNT_RDONLY; 3217f7c1f84SMatthew Dillon hmp->asof = info.asof; 3227f7c1f84SMatthew Dillon } else { 3237f7c1f84SMatthew Dillon hmp->asof = HAMMER_MAX_TID; 3247f7c1f84SMatthew Dillon } 325195c19a1SMatthew Dillon 326195c19a1SMatthew Dillon /* 32751c35492SMatthew Dillon * Re-open read-write if originally read-only, or vise-versa. 328195c19a1SMatthew Dillon */ 329195c19a1SMatthew Dillon if (mp->mnt_flag & MNT_UPDATE) { 33051c35492SMatthew Dillon error = 0; 33151c35492SMatthew Dillon if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) { 33251c35492SMatthew Dillon kprintf("HAMMER read-only -> read-write\n"); 333195c19a1SMatthew Dillon hmp->ronly = 0; 33451c35492SMatthew Dillon RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, 33551c35492SMatthew Dillon hammer_adjust_volume_mode, NULL); 33651c35492SMatthew Dillon rootvol = hammer_get_root_volume(hmp, &error); 33751c35492SMatthew Dillon if (rootvol) { 33806ad81ffSMatthew Dillon hammer_recover_flush_buffers(hmp, rootvol, 1); 3399f5097dcSMatthew Dillon bcopy(rootvol->ondisk->vol0_blockmap, 3409f5097dcSMatthew Dillon hmp->blockmap, 3419f5097dcSMatthew Dillon sizeof(hmp->blockmap)); 34251c35492SMatthew Dillon hammer_rel_volume(rootvol, 0); 343195c19a1SMatthew Dillon } 34451c35492SMatthew Dillon RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, 34551c35492SMatthew Dillon hammer_reload_inode, NULL); 34651c35492SMatthew Dillon /* kernel clears MNT_RDONLY */ 34751c35492SMatthew Dillon } else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { 34851c35492SMatthew Dillon kprintf("HAMMER read-write -> read-only\n"); 34951c35492SMatthew Dillon hmp->ronly = 1; /* messy */ 35051c35492SMatthew Dillon RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, 35151c35492SMatthew Dillon hammer_reload_inode, NULL); 35251c35492SMatthew Dillon hmp->ronly = 0; 35351c35492SMatthew Dillon hammer_flusher_sync(hmp); 35451c35492SMatthew Dillon hammer_flusher_sync(hmp); 35551c35492SMatthew Dillon hammer_flusher_sync(hmp); 35651c35492SMatthew Dillon hmp->ronly = 1; 35751c35492SMatthew Dillon RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, 35851c35492SMatthew Dillon hammer_adjust_volume_mode, NULL); 35951c35492SMatthew Dillon } 36051c35492SMatthew Dillon return(error); 361195c19a1SMatthew Dillon } 362195c19a1SMatthew Dillon 363427e5fc6SMatthew Dillon RB_INIT(&hmp->rb_vols_root); 364427e5fc6SMatthew Dillon RB_INIT(&hmp->rb_inos_root); 36540043e7fSMatthew Dillon RB_INIT(&hmp->rb_nods_root); 366e8599db1SMatthew Dillon RB_INIT(&hmp->rb_undo_root); 3670832c9bbSMatthew Dillon RB_INIT(&hmp->rb_resv_root); 3680832c9bbSMatthew Dillon RB_INIT(&hmp->rb_bufs_root); 3695fa5c92fSMatthew Dillon RB_INIT(&hmp->rb_pfsm_root); 3700832c9bbSMatthew Dillon 371195c19a1SMatthew Dillon hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); 372427e5fc6SMatthew Dillon 37310a5d1baSMatthew Dillon TAILQ_INIT(&hmp->volu_list); 37410a5d1baSMatthew Dillon TAILQ_INIT(&hmp->undo_list); 37510a5d1baSMatthew Dillon TAILQ_INIT(&hmp->data_list); 37610a5d1baSMatthew Dillon TAILQ_INIT(&hmp->meta_list); 37710a5d1baSMatthew Dillon TAILQ_INIT(&hmp->lose_list); 37810a5d1baSMatthew Dillon 379427e5fc6SMatthew Dillon /* 380427e5fc6SMatthew Dillon * Load volumes 381427e5fc6SMatthew Dillon */ 382427e5fc6SMatthew Dillon path = objcache_get(namei_oc, M_WAITOK); 383*1b0ab2c3SMatthew Dillon hmp->nvolumes = -1; 384427e5fc6SMatthew Dillon for (i = 0; i < info.nvolumes; ++i) { 385427e5fc6SMatthew Dillon error = copyin(&info.volumes[i], &upath, sizeof(char *)); 386427e5fc6SMatthew Dillon if (error == 0) 387427e5fc6SMatthew Dillon error = copyinstr(upath, path, MAXPATHLEN, NULL); 388427e5fc6SMatthew Dillon if (error == 0) 3898cd0a023SMatthew Dillon error = hammer_install_volume(hmp, path); 390427e5fc6SMatthew Dillon if (error) 391427e5fc6SMatthew Dillon break; 392427e5fc6SMatthew Dillon } 393427e5fc6SMatthew Dillon objcache_put(namei_oc, path); 394427e5fc6SMatthew Dillon 395427e5fc6SMatthew Dillon /* 396427e5fc6SMatthew Dillon * Make sure we found a root volume 397427e5fc6SMatthew Dillon */ 398427e5fc6SMatthew Dillon if (error == 0 && hmp->rootvol == NULL) { 399427e5fc6SMatthew Dillon kprintf("hammer_mount: No root volume found!\n"); 400427e5fc6SMatthew Dillon error = EINVAL; 401427e5fc6SMatthew Dillon } 402*1b0ab2c3SMatthew Dillon 403*1b0ab2c3SMatthew Dillon /* 404*1b0ab2c3SMatthew Dillon * Check that all required volumes are available 405*1b0ab2c3SMatthew Dillon */ 406*1b0ab2c3SMatthew Dillon if (error == 0 && hammer_mountcheck_volumes(hmp)) { 407*1b0ab2c3SMatthew Dillon kprintf("hammer_mount: Missing volumes, cannot mount!\n"); 408*1b0ab2c3SMatthew Dillon error = EINVAL; 409*1b0ab2c3SMatthew Dillon } 410*1b0ab2c3SMatthew Dillon 411427e5fc6SMatthew Dillon if (error) { 412427e5fc6SMatthew Dillon hammer_free_hmp(mp); 413427e5fc6SMatthew Dillon return (error); 414427e5fc6SMatthew Dillon } 415427e5fc6SMatthew Dillon 416427e5fc6SMatthew Dillon /* 41727ea2398SMatthew Dillon * No errors, setup enough of the mount point so we can lookup the 41827ea2398SMatthew Dillon * root vnode. 419427e5fc6SMatthew Dillon */ 420427e5fc6SMatthew Dillon mp->mnt_iosize_max = MAXPHYS; 421427e5fc6SMatthew Dillon mp->mnt_kern_flag |= MNTK_FSMID; 422c0ade690SMatthew Dillon 423c0ade690SMatthew Dillon /* 424c0ade690SMatthew Dillon * note: f_iosize is used by vnode_pager_haspage() when constructing 425c0ade690SMatthew Dillon * its VOP_BMAP call. 426c0ade690SMatthew Dillon */ 427c0ade690SMatthew Dillon mp->mnt_stat.f_iosize = HAMMER_BUFSIZE; 428fbc6e32aSMatthew Dillon mp->mnt_stat.f_bsize = HAMMER_BUFSIZE; 4296f97fce3SMatthew Dillon 4306f97fce3SMatthew Dillon mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE; 4316f97fce3SMatthew Dillon mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE; 4326f97fce3SMatthew Dillon 433427e5fc6SMatthew Dillon mp->mnt_maxsymlinklen = 255; 434427e5fc6SMatthew Dillon mp->mnt_flag |= MNT_LOCAL; 435427e5fc6SMatthew Dillon 436427e5fc6SMatthew Dillon vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops); 4377a04d74fSMatthew Dillon vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops); 4387a04d74fSMatthew Dillon vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops); 43927ea2398SMatthew Dillon 44027ea2398SMatthew Dillon /* 441a89aec1bSMatthew Dillon * The root volume's ondisk pointer is only valid if we hold a 442a89aec1bSMatthew Dillon * reference to it. 443a89aec1bSMatthew Dillon */ 444a89aec1bSMatthew Dillon rootvol = hammer_get_root_volume(hmp, &error); 445a89aec1bSMatthew Dillon if (error) 446f90dde4cSMatthew Dillon goto failed; 447f90dde4cSMatthew Dillon 448f90dde4cSMatthew Dillon /* 4499f5097dcSMatthew Dillon * Perform any necessary UNDO operations. The recovery code does 4500729c8c8SMatthew Dillon * call hammer_undo_lookup() so we have to pre-cache the blockmap, 4510729c8c8SMatthew Dillon * and then re-copy it again after recovery is complete. 452c9b9e29dSMatthew Dillon * 45351c35492SMatthew Dillon * If this is a read-only mount the UNDO information is retained 45451c35492SMatthew Dillon * in memory in the form of dirty buffer cache buffers, and not 45551c35492SMatthew Dillon * written back to the media. 456f90dde4cSMatthew Dillon */ 4570729c8c8SMatthew Dillon bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, 4580729c8c8SMatthew Dillon sizeof(hmp->blockmap)); 459c9b9e29dSMatthew Dillon 4607a61b85dSMatthew Dillon /* 4617a61b85dSMatthew Dillon * The undo_rec_limit limits the size of flush groups to avoid 4627a61b85dSMatthew Dillon * blowing out the UNDO FIFO. This calculation is typically in 4637a61b85dSMatthew Dillon * the tens of thousands and is designed primarily when small 4647a61b85dSMatthew Dillon * HAMMER filesystems are created. 4657a61b85dSMatthew Dillon */ 4667a61b85dSMatthew Dillon hmp->undo_rec_limit = hammer_undo_max(hmp) / 8192 + 100; 4677a61b85dSMatthew Dillon if (hammer_debug_general & 0x0001) 4687a61b85dSMatthew Dillon kprintf("HAMMER: undo_rec_limit %d\n", hmp->undo_rec_limit); 4697a61b85dSMatthew Dillon 470f90dde4cSMatthew Dillon error = hammer_recover(hmp, rootvol); 471f90dde4cSMatthew Dillon if (error) { 472f90dde4cSMatthew Dillon kprintf("Failed to recover HAMMER filesystem on mount\n"); 473a89aec1bSMatthew Dillon goto done; 474f90dde4cSMatthew Dillon } 475f90dde4cSMatthew Dillon 476f90dde4cSMatthew Dillon /* 477adf01747SMatthew Dillon * Finish setup now that we have a good root volume. 478adf01747SMatthew Dillon * 479adf01747SMatthew Dillon * The top 16 bits of fsid.val[1] is a pfs id. 480f90dde4cSMatthew Dillon */ 481a89aec1bSMatthew Dillon ksnprintf(mp->mnt_stat.f_mntfromname, 482a89aec1bSMatthew Dillon sizeof(mp->mnt_stat.f_mntfromname), "%s", 483a89aec1bSMatthew Dillon rootvol->ondisk->vol_name); 484513ca7d7SMatthew Dillon mp->mnt_stat.f_fsid.val[0] = 485513ca7d7SMatthew Dillon crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8); 486513ca7d7SMatthew Dillon mp->mnt_stat.f_fsid.val[1] = 487513ca7d7SMatthew Dillon crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8); 488adf01747SMatthew Dillon mp->mnt_stat.f_fsid.val[1] &= 0x0000FFFF; 489b84de5afSMatthew Dillon 4906f97fce3SMatthew Dillon mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid; 4916f97fce3SMatthew Dillon mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid, 4926f97fce3SMatthew Dillon sizeof(mp->mnt_vstat.f_fsid_uuid)); 4936f97fce3SMatthew Dillon 4940729c8c8SMatthew Dillon /* 4950729c8c8SMatthew Dillon * Certain often-modified fields in the root volume are cached in 4960729c8c8SMatthew Dillon * the hammer_mount structure so we do not have to generate lots 4970729c8c8SMatthew Dillon * of little UNDO structures for them. 498c9b9e29dSMatthew Dillon * 4999f5097dcSMatthew Dillon * Recopy after recovery. This also has the side effect of 5009f5097dcSMatthew Dillon * setting our cached undo FIFO's first_offset, which serves to 5019f5097dcSMatthew Dillon * placemark the FIFO start for the NEXT flush cycle while the 5029f5097dcSMatthew Dillon * on-disk first_offset represents the LAST flush cycle. 5030729c8c8SMatthew Dillon */ 504b84de5afSMatthew Dillon hmp->next_tid = rootvol->ondisk->vol0_next_tid; 5050729c8c8SMatthew Dillon bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, 5060729c8c8SMatthew Dillon sizeof(hmp->blockmap)); 507e63644f0SMatthew Dillon hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks; 5080729c8c8SMatthew Dillon 509059819e3SMatthew Dillon hammer_flusher_create(hmp); 510059819e3SMatthew Dillon 511a89aec1bSMatthew Dillon /* 51227ea2398SMatthew Dillon * Locate the root directory using the root cluster's B-Tree as a 51327ea2398SMatthew Dillon * starting point. The root directory uses an obj_id of 1. 51427ea2398SMatthew Dillon * 51527ea2398SMatthew Dillon * FUTURE: Leave the root directory cached referenced but unlocked 51627ea2398SMatthew Dillon * in hmp->rootvp (need to flush it on unmount). 51727ea2398SMatthew Dillon */ 51827ea2398SMatthew Dillon error = hammer_vfs_vget(mp, 1, &rootvp); 519a89aec1bSMatthew Dillon if (error) 520a89aec1bSMatthew Dillon goto done; 52127ea2398SMatthew Dillon vput(rootvp); 52227ea2398SMatthew Dillon /*vn_unlock(hmp->rootvp);*/ 52327ea2398SMatthew Dillon 524a89aec1bSMatthew Dillon done: 525f90dde4cSMatthew Dillon hammer_rel_volume(rootvol, 0); 526f90dde4cSMatthew Dillon failed: 52727ea2398SMatthew Dillon /* 52827ea2398SMatthew Dillon * Cleanup and return. 52927ea2398SMatthew Dillon */ 53027ea2398SMatthew Dillon if (error) 53127ea2398SMatthew Dillon hammer_free_hmp(mp); 532427e5fc6SMatthew Dillon return (error); 533427e5fc6SMatthew Dillon } 534427e5fc6SMatthew Dillon 535427e5fc6SMatthew Dillon static int 536427e5fc6SMatthew Dillon hammer_vfs_unmount(struct mount *mp, int mntflags) 537427e5fc6SMatthew Dillon { 538427e5fc6SMatthew Dillon #if 0 539427e5fc6SMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 540427e5fc6SMatthew Dillon #endif 541427e5fc6SMatthew Dillon int flags; 54266325755SMatthew Dillon int error; 54327ea2398SMatthew Dillon 54427ea2398SMatthew Dillon /* 545427e5fc6SMatthew Dillon * Clean out the vnodes 546427e5fc6SMatthew Dillon */ 54766325755SMatthew Dillon flags = 0; 54866325755SMatthew Dillon if (mntflags & MNT_FORCE) 54966325755SMatthew Dillon flags |= FORCECLOSE; 55066325755SMatthew Dillon if ((error = vflush(mp, 0, flags)) != 0) 55166325755SMatthew Dillon return (error); 552427e5fc6SMatthew Dillon 553427e5fc6SMatthew Dillon /* 554427e5fc6SMatthew Dillon * Clean up the internal mount structure and related entities. This 555427e5fc6SMatthew Dillon * may issue I/O. 556427e5fc6SMatthew Dillon */ 557427e5fc6SMatthew Dillon hammer_free_hmp(mp); 558427e5fc6SMatthew Dillon return(0); 559427e5fc6SMatthew Dillon } 560427e5fc6SMatthew Dillon 561427e5fc6SMatthew Dillon /* 562427e5fc6SMatthew Dillon * Clean up the internal mount structure and disassociate it from the mount. 563427e5fc6SMatthew Dillon * This may issue I/O. 564427e5fc6SMatthew Dillon */ 565427e5fc6SMatthew Dillon static void 566427e5fc6SMatthew Dillon hammer_free_hmp(struct mount *mp) 567427e5fc6SMatthew Dillon { 568427e5fc6SMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 569*1b0ab2c3SMatthew Dillon int count; 570427e5fc6SMatthew Dillon 57127ea2398SMatthew Dillon #if 0 572427e5fc6SMatthew Dillon /* 573427e5fc6SMatthew Dillon * Clean up the root vnode 574427e5fc6SMatthew Dillon */ 575427e5fc6SMatthew Dillon if (hmp->rootvp) { 576427e5fc6SMatthew Dillon vrele(hmp->rootvp); 577427e5fc6SMatthew Dillon hmp->rootvp = NULL; 578427e5fc6SMatthew Dillon } 57927ea2398SMatthew Dillon #endif 580*1b0ab2c3SMatthew Dillon count = 0; 581*1b0ab2c3SMatthew Dillon while (hammer_flusher_haswork(hmp)) { 582059819e3SMatthew Dillon hammer_flusher_sync(hmp); 583*1b0ab2c3SMatthew Dillon ++count; 584*1b0ab2c3SMatthew Dillon if (count >= 5) { 585*1b0ab2c3SMatthew Dillon if (count == 5) 586*1b0ab2c3SMatthew Dillon kprintf("HAMMER: umount flushing."); 587*1b0ab2c3SMatthew Dillon else 588*1b0ab2c3SMatthew Dillon kprintf("."); 589*1b0ab2c3SMatthew Dillon tsleep(hmp, 0, "hmrufl", hz); 590*1b0ab2c3SMatthew Dillon } 591*1b0ab2c3SMatthew Dillon if (count == 30) { 592*1b0ab2c3SMatthew Dillon kprintf("giving up\n"); 593*1b0ab2c3SMatthew Dillon break; 594*1b0ab2c3SMatthew Dillon } 595*1b0ab2c3SMatthew Dillon } 596*1b0ab2c3SMatthew Dillon if (count >= 5 && count < 30) 597*1b0ab2c3SMatthew Dillon kprintf("\n"); 598059819e3SMatthew Dillon hammer_flusher_destroy(hmp); 599427e5fc6SMatthew Dillon 600b84de5afSMatthew Dillon KKASSERT(RB_EMPTY(&hmp->rb_inos_root)); 601b84de5afSMatthew Dillon 602b84de5afSMatthew Dillon #if 0 603427e5fc6SMatthew Dillon /* 604427e5fc6SMatthew Dillon * Unload & flush inodes 605b84de5afSMatthew Dillon * 606b84de5afSMatthew Dillon * XXX illegal to call this from here, it can only be done from 607b84de5afSMatthew Dillon * the flusher. 608427e5fc6SMatthew Dillon */ 609427e5fc6SMatthew Dillon RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, 610d113fda1SMatthew Dillon hammer_unload_inode, (void *)MNT_WAIT); 611427e5fc6SMatthew Dillon 612427e5fc6SMatthew Dillon /* 613427e5fc6SMatthew Dillon * Unload & flush volumes 614427e5fc6SMatthew Dillon */ 615b84de5afSMatthew Dillon #endif 616b84de5afSMatthew Dillon /* 6170832c9bbSMatthew Dillon * Unload buffers and then volumes 618b84de5afSMatthew Dillon */ 6190832c9bbSMatthew Dillon RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL, 6200832c9bbSMatthew Dillon hammer_unload_buffer, NULL); 621427e5fc6SMatthew Dillon RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, 622427e5fc6SMatthew Dillon hammer_unload_volume, NULL); 623427e5fc6SMatthew Dillon 624427e5fc6SMatthew Dillon mp->mnt_data = NULL; 62566325755SMatthew Dillon mp->mnt_flag &= ~MNT_LOCAL; 626427e5fc6SMatthew Dillon hmp->mp = NULL; 6270729c8c8SMatthew Dillon hammer_destroy_objid_cache(hmp); 628427e5fc6SMatthew Dillon kfree(hmp, M_HAMMER); 629427e5fc6SMatthew Dillon } 630427e5fc6SMatthew Dillon 631427e5fc6SMatthew Dillon /* 632513ca7d7SMatthew Dillon * Obtain a vnode for the specified inode number. An exclusively locked 633513ca7d7SMatthew Dillon * vnode is returned. 634513ca7d7SMatthew Dillon */ 635513ca7d7SMatthew Dillon int 636513ca7d7SMatthew Dillon hammer_vfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp) 637513ca7d7SMatthew Dillon { 63836f82b23SMatthew Dillon struct hammer_transaction trans; 639513ca7d7SMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 640513ca7d7SMatthew Dillon struct hammer_inode *ip; 641513ca7d7SMatthew Dillon int error; 642513ca7d7SMatthew Dillon 64336f82b23SMatthew Dillon hammer_simple_transaction(&trans, hmp); 64436f82b23SMatthew Dillon 645513ca7d7SMatthew Dillon /* 64636f82b23SMatthew Dillon * Lookup the requested HAMMER inode. The structure must be 64736f82b23SMatthew Dillon * left unlocked while we manipulate the related vnode to avoid 64836f82b23SMatthew Dillon * a deadlock. 649513ca7d7SMatthew Dillon */ 650ddfdf542SMatthew Dillon ip = hammer_get_inode(&trans, NULL, ino, 651ddfdf542SMatthew Dillon hmp->asof, HAMMER_DEF_LOCALIZATION, 652ddfdf542SMatthew Dillon 0, &error); 653513ca7d7SMatthew Dillon if (ip == NULL) { 654513ca7d7SMatthew Dillon *vpp = NULL; 655513ca7d7SMatthew Dillon return(error); 656513ca7d7SMatthew Dillon } 657e8599db1SMatthew Dillon error = hammer_get_vnode(ip, vpp); 658513ca7d7SMatthew Dillon hammer_rel_inode(ip, 0); 659b84de5afSMatthew Dillon hammer_done_transaction(&trans); 660513ca7d7SMatthew Dillon return (error); 661513ca7d7SMatthew Dillon } 662513ca7d7SMatthew Dillon 663513ca7d7SMatthew Dillon /* 664427e5fc6SMatthew Dillon * Return the root vnode for the filesystem. 665427e5fc6SMatthew Dillon * 666427e5fc6SMatthew Dillon * HAMMER stores the root vnode in the hammer_mount structure so 667427e5fc6SMatthew Dillon * getting it is easy. 668427e5fc6SMatthew Dillon */ 669427e5fc6SMatthew Dillon static int 670427e5fc6SMatthew Dillon hammer_vfs_root(struct mount *mp, struct vnode **vpp) 671427e5fc6SMatthew Dillon { 67247197d71SMatthew Dillon #if 0 673427e5fc6SMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 67447197d71SMatthew Dillon #endif 67527ea2398SMatthew Dillon int error; 676427e5fc6SMatthew Dillon 67727ea2398SMatthew Dillon error = hammer_vfs_vget(mp, 1, vpp); 67827ea2398SMatthew Dillon return (error); 679427e5fc6SMatthew Dillon } 680427e5fc6SMatthew Dillon 681427e5fc6SMatthew Dillon static int 682427e5fc6SMatthew Dillon hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) 683427e5fc6SMatthew Dillon { 684fbc6e32aSMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 685fbc6e32aSMatthew Dillon hammer_volume_t volume; 686fbc6e32aSMatthew Dillon hammer_volume_ondisk_t ondisk; 687fbc6e32aSMatthew Dillon int error; 68847197d71SMatthew Dillon int64_t bfree; 689fbc6e32aSMatthew Dillon 690fbc6e32aSMatthew Dillon volume = hammer_get_root_volume(hmp, &error); 691fbc6e32aSMatthew Dillon if (error) 692fbc6e32aSMatthew Dillon return(error); 693fbc6e32aSMatthew Dillon ondisk = volume->ondisk; 694fbc6e32aSMatthew Dillon 69547197d71SMatthew Dillon /* 69647197d71SMatthew Dillon * Basic stats 69747197d71SMatthew Dillon */ 698fbc6e32aSMatthew Dillon mp->mnt_stat.f_files = ondisk->vol0_stat_inodes; 699c3be93f2SMatthew Dillon bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE; 70040043e7fSMatthew Dillon hammer_rel_volume(volume, 0); 70147197d71SMatthew Dillon 70247197d71SMatthew Dillon mp->mnt_stat.f_bfree = bfree / HAMMER_BUFSIZE; 70347197d71SMatthew Dillon mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree; 704fbc6e32aSMatthew Dillon if (mp->mnt_stat.f_files < 0) 705fbc6e32aSMatthew Dillon mp->mnt_stat.f_files = 0; 706fbc6e32aSMatthew Dillon 70727ea2398SMatthew Dillon *sbp = mp->mnt_stat; 70827ea2398SMatthew Dillon return(0); 709427e5fc6SMatthew Dillon } 710427e5fc6SMatthew Dillon 7116f97fce3SMatthew Dillon static int 7126f97fce3SMatthew Dillon hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred) 7136f97fce3SMatthew Dillon { 7146f97fce3SMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 7156f97fce3SMatthew Dillon hammer_volume_t volume; 7166f97fce3SMatthew Dillon hammer_volume_ondisk_t ondisk; 7176f97fce3SMatthew Dillon int error; 7186f97fce3SMatthew Dillon int64_t bfree; 7196f97fce3SMatthew Dillon 7206f97fce3SMatthew Dillon volume = hammer_get_root_volume(hmp, &error); 7216f97fce3SMatthew Dillon if (error) 7226f97fce3SMatthew Dillon return(error); 7236f97fce3SMatthew Dillon ondisk = volume->ondisk; 7246f97fce3SMatthew Dillon 7256f97fce3SMatthew Dillon /* 7266f97fce3SMatthew Dillon * Basic stats 7276f97fce3SMatthew Dillon */ 7286f97fce3SMatthew Dillon mp->mnt_vstat.f_files = ondisk->vol0_stat_inodes; 7296f97fce3SMatthew Dillon bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE; 7306f97fce3SMatthew Dillon hammer_rel_volume(volume, 0); 7316f97fce3SMatthew Dillon 7326f97fce3SMatthew Dillon mp->mnt_vstat.f_bfree = bfree / HAMMER_BUFSIZE; 7336f97fce3SMatthew Dillon mp->mnt_vstat.f_bavail = mp->mnt_stat.f_bfree; 7346f97fce3SMatthew Dillon if (mp->mnt_vstat.f_files < 0) 7356f97fce3SMatthew Dillon mp->mnt_vstat.f_files = 0; 7366f97fce3SMatthew Dillon *sbp = mp->mnt_vstat; 7376f97fce3SMatthew Dillon return(0); 7386f97fce3SMatthew Dillon } 7396f97fce3SMatthew Dillon 7400729c8c8SMatthew Dillon /* 7410729c8c8SMatthew Dillon * Sync the filesystem. Currently we have to run it twice, the second 7420729c8c8SMatthew Dillon * one will advance the undo start index to the end index, so if a crash 7430729c8c8SMatthew Dillon * occurs no undos will be run on mount. 74477062c8aSMatthew Dillon * 74577062c8aSMatthew Dillon * We do not sync the filesystem if we are called from a panic. If we did 74677062c8aSMatthew Dillon * we might end up blowing up a sync that was already in progress. 7470729c8c8SMatthew Dillon */ 748427e5fc6SMatthew Dillon static int 749427e5fc6SMatthew Dillon hammer_vfs_sync(struct mount *mp, int waitfor) 750427e5fc6SMatthew Dillon { 751fbc6e32aSMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 7520729c8c8SMatthew Dillon int error; 7530729c8c8SMatthew Dillon 75477062c8aSMatthew Dillon if (panicstr == NULL) { 7550729c8c8SMatthew Dillon error = hammer_sync_hmp(hmp, waitfor); 7560729c8c8SMatthew Dillon if (error == 0) 7570729c8c8SMatthew Dillon error = hammer_sync_hmp(hmp, waitfor); 75877062c8aSMatthew Dillon } else { 75977062c8aSMatthew Dillon error = EIO; 76077062c8aSMatthew Dillon } 7610729c8c8SMatthew Dillon return (error); 762427e5fc6SMatthew Dillon } 763427e5fc6SMatthew Dillon 764513ca7d7SMatthew Dillon /* 765513ca7d7SMatthew Dillon * Convert a vnode to a file handle. 766513ca7d7SMatthew Dillon */ 767513ca7d7SMatthew Dillon static int 768513ca7d7SMatthew Dillon hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp) 769513ca7d7SMatthew Dillon { 770513ca7d7SMatthew Dillon hammer_inode_t ip; 771513ca7d7SMatthew Dillon 772513ca7d7SMatthew Dillon KKASSERT(MAXFIDSZ >= 16); 773513ca7d7SMatthew Dillon ip = VTOI(vp); 774513ca7d7SMatthew Dillon fhp->fid_len = offsetof(struct fid, fid_data[16]); 775adf01747SMatthew Dillon fhp->fid_ext = ip->obj_localization >> 16; 776513ca7d7SMatthew Dillon bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id)); 777513ca7d7SMatthew Dillon bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof)); 778513ca7d7SMatthew Dillon return(0); 779513ca7d7SMatthew Dillon } 780513ca7d7SMatthew Dillon 781513ca7d7SMatthew Dillon 782513ca7d7SMatthew Dillon /* 783513ca7d7SMatthew Dillon * Convert a file handle back to a vnode. 784513ca7d7SMatthew Dillon */ 785513ca7d7SMatthew Dillon static int 786513ca7d7SMatthew Dillon hammer_vfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp) 787513ca7d7SMatthew Dillon { 78836f82b23SMatthew Dillon struct hammer_transaction trans; 789513ca7d7SMatthew Dillon struct hammer_inode *ip; 790513ca7d7SMatthew Dillon struct hammer_inode_info info; 791513ca7d7SMatthew Dillon int error; 792adf01747SMatthew Dillon u_int32_t localization; 793513ca7d7SMatthew Dillon 794513ca7d7SMatthew Dillon bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id)); 795513ca7d7SMatthew Dillon bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof)); 796adf01747SMatthew Dillon localization = (u_int32_t)fhp->fid_ext << 16; 797513ca7d7SMatthew Dillon 79836f82b23SMatthew Dillon hammer_simple_transaction(&trans, (void *)mp->mnt_data); 79936f82b23SMatthew Dillon 800513ca7d7SMatthew Dillon /* 801513ca7d7SMatthew Dillon * Get/allocate the hammer_inode structure. The structure must be 802513ca7d7SMatthew Dillon * unlocked while we manipulate the related vnode to avoid a 803513ca7d7SMatthew Dillon * deadlock. 804513ca7d7SMatthew Dillon */ 805ddfdf542SMatthew Dillon ip = hammer_get_inode(&trans, NULL, info.obj_id, 806adf01747SMatthew Dillon info.obj_asof, localization, 0, &error); 807513ca7d7SMatthew Dillon if (ip == NULL) { 808513ca7d7SMatthew Dillon *vpp = NULL; 809513ca7d7SMatthew Dillon return(error); 810513ca7d7SMatthew Dillon } 811e8599db1SMatthew Dillon error = hammer_get_vnode(ip, vpp); 812513ca7d7SMatthew Dillon hammer_rel_inode(ip, 0); 813b84de5afSMatthew Dillon hammer_done_transaction(&trans); 814513ca7d7SMatthew Dillon return (error); 815513ca7d7SMatthew Dillon } 816513ca7d7SMatthew Dillon 817513ca7d7SMatthew Dillon static int 818513ca7d7SMatthew Dillon hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 819513ca7d7SMatthew Dillon int *exflagsp, struct ucred **credanonp) 820513ca7d7SMatthew Dillon { 821513ca7d7SMatthew Dillon hammer_mount_t hmp = (void *)mp->mnt_data; 822513ca7d7SMatthew Dillon struct netcred *np; 823513ca7d7SMatthew Dillon int error; 824513ca7d7SMatthew Dillon 825513ca7d7SMatthew Dillon np = vfs_export_lookup(mp, &hmp->export, nam); 826513ca7d7SMatthew Dillon if (np) { 827513ca7d7SMatthew Dillon *exflagsp = np->netc_exflags; 828513ca7d7SMatthew Dillon *credanonp = &np->netc_anon; 829513ca7d7SMatthew Dillon error = 0; 830513ca7d7SMatthew Dillon } else { 831513ca7d7SMatthew Dillon error = EACCES; 832513ca7d7SMatthew Dillon } 833513ca7d7SMatthew Dillon return (error); 834513ca7d7SMatthew Dillon 835513ca7d7SMatthew Dillon } 836513ca7d7SMatthew Dillon 837513ca7d7SMatthew Dillon int 838513ca7d7SMatthew Dillon hammer_vfs_export(struct mount *mp, int op, const struct export_args *export) 839513ca7d7SMatthew Dillon { 840513ca7d7SMatthew Dillon hammer_mount_t hmp = (void *)mp->mnt_data; 841513ca7d7SMatthew Dillon int error; 842513ca7d7SMatthew Dillon 843513ca7d7SMatthew Dillon switch(op) { 844513ca7d7SMatthew Dillon case MOUNTCTL_SET_EXPORT: 845513ca7d7SMatthew Dillon error = vfs_export(mp, &hmp->export, export); 846513ca7d7SMatthew Dillon break; 847513ca7d7SMatthew Dillon default: 848513ca7d7SMatthew Dillon error = EOPNOTSUPP; 849513ca7d7SMatthew Dillon break; 850513ca7d7SMatthew Dillon } 851513ca7d7SMatthew Dillon return(error); 852513ca7d7SMatthew Dillon } 853513ca7d7SMatthew Dillon 854