1427e5fc6SMatthew Dillon /* 2b84de5afSMatthew Dillon * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3427e5fc6SMatthew Dillon * 4427e5fc6SMatthew Dillon * This code is derived from software contributed to The DragonFly Project 5427e5fc6SMatthew Dillon * by Matthew Dillon <dillon@backplane.com> 6427e5fc6SMatthew Dillon * 7427e5fc6SMatthew Dillon * Redistribution and use in source and binary forms, with or without 8427e5fc6SMatthew Dillon * modification, are permitted provided that the following conditions 9427e5fc6SMatthew Dillon * are met: 10427e5fc6SMatthew Dillon * 11427e5fc6SMatthew Dillon * 1. Redistributions of source code must retain the above copyright 12427e5fc6SMatthew Dillon * notice, this list of conditions and the following disclaimer. 13427e5fc6SMatthew Dillon * 2. Redistributions in binary form must reproduce the above copyright 14427e5fc6SMatthew Dillon * notice, this list of conditions and the following disclaimer in 15427e5fc6SMatthew Dillon * the documentation and/or other materials provided with the 16427e5fc6SMatthew Dillon * distribution. 17427e5fc6SMatthew Dillon * 3. Neither the name of The DragonFly Project nor the names of its 18427e5fc6SMatthew Dillon * contributors may be used to endorse or promote products derived 19427e5fc6SMatthew Dillon * from this software without specific, prior written permission. 20427e5fc6SMatthew Dillon * 21427e5fc6SMatthew Dillon * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22427e5fc6SMatthew Dillon * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23427e5fc6SMatthew Dillon * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24427e5fc6SMatthew Dillon * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25427e5fc6SMatthew Dillon * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26427e5fc6SMatthew Dillon * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27427e5fc6SMatthew Dillon * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28427e5fc6SMatthew Dillon * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29427e5fc6SMatthew Dillon * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30427e5fc6SMatthew Dillon * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31427e5fc6SMatthew Dillon * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32427e5fc6SMatthew Dillon * SUCH DAMAGE. 33427e5fc6SMatthew Dillon * 3444a83111SMatthew Dillon * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.74 2008/11/13 02:18:43 dillon Exp $ 35427e5fc6SMatthew Dillon */ 36427e5fc6SMatthew Dillon 37427e5fc6SMatthew Dillon #include <sys/param.h> 38427e5fc6SMatthew Dillon #include <sys/systm.h> 39427e5fc6SMatthew Dillon #include <sys/kernel.h> 40427e5fc6SMatthew Dillon #include <sys/vnode.h> 41427e5fc6SMatthew Dillon #include <sys/mount.h> 42427e5fc6SMatthew Dillon #include <sys/malloc.h> 43427e5fc6SMatthew Dillon #include <sys/nlookup.h> 44427e5fc6SMatthew Dillon #include <sys/fcntl.h> 45b3deaf57SMatthew Dillon #include <sys/sysctl.h> 46427e5fc6SMatthew Dillon #include <sys/buf.h> 47427e5fc6SMatthew Dillon #include <sys/buf2.h> 48427e5fc6SMatthew Dillon #include "hammer.h" 49427e5fc6SMatthew Dillon 50aac0aabdSMatthew Dillon /* 51aac0aabdSMatthew Dillon * NOTE! Global statistics may not be MPSAFE so HAMMER never uses them 52aac0aabdSMatthew Dillon * in conditionals. 53aac0aabdSMatthew Dillon */ 5402428fb6SMatthew Dillon int hammer_supported_version = HAMMER_VOL_VERSION_DEFAULT; 552f85fa4dSMatthew Dillon int hammer_debug_io; 56d5ef456eSMatthew Dillon int hammer_debug_general; 571b0ab2c3SMatthew Dillon int hammer_debug_debug = 1; /* medium-error panics */ 58e8599db1SMatthew Dillon int hammer_debug_inode; 597d683b0fSMatthew Dillon int hammer_debug_locks; 60b3deaf57SMatthew Dillon int hammer_debug_btree; 61d113fda1SMatthew Dillon int hammer_debug_tid; 6246fe7ae1SMatthew Dillon int hammer_debug_recover; /* -1 will disable, +1 will force */ 6346fe7ae1SMatthew Dillon int hammer_debug_recover_faults; 64fc73edd8SMatthew Dillon int hammer_debug_critical; /* non-zero enter debugger on error */ 651b0ab2c3SMatthew Dillon int hammer_cluster_enable = 1; /* enable read clustering by default */ 667a61b85dSMatthew Dillon int hammer_count_fsyncs; 67b3deaf57SMatthew Dillon int hammer_count_inodes; 68af209b0fSMatthew Dillon int hammer_count_iqueued; 699f5097dcSMatthew Dillon int hammer_count_reclaiming; 70b3deaf57SMatthew Dillon int hammer_count_records; 71b3deaf57SMatthew Dillon int hammer_count_record_datas; 72b3deaf57SMatthew Dillon int hammer_count_volumes; 73b3deaf57SMatthew Dillon int hammer_count_buffers; 74b3deaf57SMatthew Dillon int hammer_count_nodes; 75a7e9bef1SMatthew Dillon int64_t hammer_count_extra_space_used; 76cb51be26SMatthew Dillon int64_t hammer_stats_btree_lookups; 77cb51be26SMatthew Dillon int64_t hammer_stats_btree_searches; 78cb51be26SMatthew Dillon int64_t hammer_stats_btree_inserts; 79cb51be26SMatthew Dillon int64_t hammer_stats_btree_deletes; 80cb51be26SMatthew Dillon int64_t hammer_stats_btree_elements; 81cb51be26SMatthew Dillon int64_t hammer_stats_btree_splits; 82cb51be26SMatthew Dillon int64_t hammer_stats_btree_iterations; 8339d8fd63SMatthew Dillon int64_t hammer_stats_btree_root_iterations; 84cb51be26SMatthew Dillon int64_t hammer_stats_record_iterations; 85ce0138a6SMatthew Dillon 86ce0138a6SMatthew Dillon int64_t hammer_stats_file_read; 87ce0138a6SMatthew Dillon int64_t hammer_stats_file_write; 88ce0138a6SMatthew Dillon int64_t hammer_stats_file_iopsr; 89ce0138a6SMatthew Dillon int64_t hammer_stats_file_iopsw; 90ce0138a6SMatthew Dillon int64_t hammer_stats_disk_read; 91ce0138a6SMatthew Dillon int64_t hammer_stats_disk_write; 92ce0138a6SMatthew Dillon int64_t hammer_stats_inode_flushes; 93ce0138a6SMatthew Dillon int64_t hammer_stats_commits; 9489e744ceSMatthew Dillon int64_t hammer_stats_undo; 95ce0138a6SMatthew Dillon 96f5a07a7aSMatthew Dillon int hammer_count_dirtybufspace; /* global */ 97a99b9ea2SMatthew Dillon int hammer_count_refedbufs; /* global */ 980832c9bbSMatthew Dillon int hammer_count_reservations; 99a99b9ea2SMatthew Dillon int hammer_count_io_running_read; 100a99b9ea2SMatthew Dillon int hammer_count_io_running_write; 101a99b9ea2SMatthew Dillon int hammer_count_io_locked; 102f5a07a7aSMatthew Dillon int hammer_limit_dirtybufspace; /* per-mount */ 1030832c9bbSMatthew Dillon int hammer_limit_recs; /* as a whole XXX */ 104de996e86SMatthew Dillon int hammer_limit_inode_recs = 1024; /* per inode */ 105ff003b11SMatthew Dillon int hammer_limit_reclaim = HAMMER_RECLAIM_WAIT; 10621fde338SMatthew Dillon int hammer_autoflush = 2000; /* auto flush */ 1071f07f686SMatthew Dillon int hammer_bio_count; 108cb51be26SMatthew Dillon int hammer_verify_zone; 1091b0ab2c3SMatthew Dillon int hammer_verify_data = 1; 110cb51be26SMatthew Dillon int hammer_write_mode; 1113e583440SMatthew Dillon int hammer_yield_check = 16; 1126f3d87c0SMatthew Dillon int hammer_fsync_mode; 1137d683b0fSMatthew Dillon int64_t hammer_contention_count; 114f03c9cf4SMatthew Dillon int64_t hammer_zone_limit; 115b3deaf57SMatthew Dillon 116b3deaf57SMatthew Dillon SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem"); 1175987cc42SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, supported_version, CTLFLAG_RD, 1185987cc42SMatthew Dillon &hammer_supported_version, 0, ""); 119d5ef456eSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW, 120d5ef456eSMatthew Dillon &hammer_debug_general, 0, ""); 1212f85fa4dSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_io, CTLFLAG_RW, 1222f85fa4dSMatthew Dillon &hammer_debug_io, 0, ""); 12377062c8aSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_debug, CTLFLAG_RW, 12477062c8aSMatthew Dillon &hammer_debug_debug, 0, ""); 125e8599db1SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW, 126e8599db1SMatthew Dillon &hammer_debug_inode, 0, ""); 1277d683b0fSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW, 1287d683b0fSMatthew Dillon &hammer_debug_locks, 0, ""); 129b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW, 130b3deaf57SMatthew Dillon &hammer_debug_btree, 0, ""); 131d113fda1SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW, 132d113fda1SMatthew Dillon &hammer_debug_tid, 0, ""); 133b33e2cc0SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW, 134b33e2cc0SMatthew Dillon &hammer_debug_recover, 0, ""); 13546fe7ae1SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW, 13646fe7ae1SMatthew Dillon &hammer_debug_recover_faults, 0, ""); 137fc73edd8SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_critical, CTLFLAG_RW, 138fc73edd8SMatthew Dillon &hammer_debug_critical, 0, ""); 1391b0ab2c3SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, cluster_enable, CTLFLAG_RW, 1401b0ab2c3SMatthew Dillon &hammer_cluster_enable, 0, ""); 1419480ff55SMatthew Dillon 142f5a07a7aSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_dirtybufspace, CTLFLAG_RW, 143f5a07a7aSMatthew Dillon &hammer_limit_dirtybufspace, 0, ""); 14447637bffSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_recs, CTLFLAG_RW, 14547637bffSMatthew Dillon &hammer_limit_recs, 0, ""); 146de996e86SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_inode_recs, CTLFLAG_RW, 147de996e86SMatthew Dillon &hammer_limit_inode_recs, 0, ""); 148ff003b11SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_reclaim, CTLFLAG_RW, 149ff003b11SMatthew Dillon &hammer_limit_reclaim, 0, ""); 1509480ff55SMatthew Dillon 1517a61b85dSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_fsyncs, CTLFLAG_RD, 1527a61b85dSMatthew Dillon &hammer_count_fsyncs, 0, ""); 153b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD, 154b3deaf57SMatthew Dillon &hammer_count_inodes, 0, ""); 155af209b0fSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_iqueued, CTLFLAG_RD, 156af209b0fSMatthew Dillon &hammer_count_iqueued, 0, ""); 1579f5097dcSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reclaiming, CTLFLAG_RD, 1589f5097dcSMatthew Dillon &hammer_count_reclaiming, 0, ""); 159b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD, 160b3deaf57SMatthew Dillon &hammer_count_records, 0, ""); 161b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD, 162b3deaf57SMatthew Dillon &hammer_count_record_datas, 0, ""); 163b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD, 164b3deaf57SMatthew Dillon &hammer_count_volumes, 0, ""); 165b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD, 166b3deaf57SMatthew Dillon &hammer_count_buffers, 0, ""); 167b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD, 168b3deaf57SMatthew Dillon &hammer_count_nodes, 0, ""); 169a7e9bef1SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, count_extra_space_used, CTLFLAG_RD, 170a7e9bef1SMatthew Dillon &hammer_count_extra_space_used, 0, ""); 171ce0138a6SMatthew Dillon 172cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_searches, CTLFLAG_RD, 173cb51be26SMatthew Dillon &hammer_stats_btree_searches, 0, ""); 174cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_lookups, CTLFLAG_RD, 175cb51be26SMatthew Dillon &hammer_stats_btree_lookups, 0, ""); 176cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_inserts, CTLFLAG_RD, 177cb51be26SMatthew Dillon &hammer_stats_btree_inserts, 0, ""); 178cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_deletes, CTLFLAG_RD, 179cb51be26SMatthew Dillon &hammer_stats_btree_deletes, 0, ""); 180cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_elements, CTLFLAG_RD, 181cb51be26SMatthew Dillon &hammer_stats_btree_elements, 0, ""); 182cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_splits, CTLFLAG_RD, 183cb51be26SMatthew Dillon &hammer_stats_btree_splits, 0, ""); 184cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_iterations, CTLFLAG_RD, 185cb51be26SMatthew Dillon &hammer_stats_btree_iterations, 0, ""); 18639d8fd63SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_root_iterations, CTLFLAG_RD, 18739d8fd63SMatthew Dillon &hammer_stats_btree_root_iterations, 0, ""); 188cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_record_iterations, CTLFLAG_RD, 189cb51be26SMatthew Dillon &hammer_stats_record_iterations, 0, ""); 190ce0138a6SMatthew Dillon 191ce0138a6SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_read, CTLFLAG_RD, 192ce0138a6SMatthew Dillon &hammer_stats_file_read, 0, ""); 193ce0138a6SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_write, CTLFLAG_RD, 194ce0138a6SMatthew Dillon &hammer_stats_file_write, 0, ""); 195ce0138a6SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsr, CTLFLAG_RD, 196ce0138a6SMatthew Dillon &hammer_stats_file_iopsr, 0, ""); 197ce0138a6SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsw, CTLFLAG_RD, 198ce0138a6SMatthew Dillon &hammer_stats_file_iopsw, 0, ""); 199ce0138a6SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_read, CTLFLAG_RD, 200ce0138a6SMatthew Dillon &hammer_stats_disk_read, 0, ""); 201ce0138a6SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_write, CTLFLAG_RD, 202ce0138a6SMatthew Dillon &hammer_stats_disk_write, 0, ""); 203ce0138a6SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_inode_flushes, CTLFLAG_RD, 204ce0138a6SMatthew Dillon &hammer_stats_inode_flushes, 0, ""); 205ce0138a6SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_commits, CTLFLAG_RD, 206ce0138a6SMatthew Dillon &hammer_stats_commits, 0, ""); 20789e744ceSMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_undo, CTLFLAG_RD, 20889e744ceSMatthew Dillon &hammer_stats_undo, 0, ""); 209ce0138a6SMatthew Dillon 210f5a07a7aSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_dirtybufspace, CTLFLAG_RD, 211f5a07a7aSMatthew Dillon &hammer_count_dirtybufspace, 0, ""); 212a99b9ea2SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_refedbufs, CTLFLAG_RD, 213a99b9ea2SMatthew Dillon &hammer_count_refedbufs, 0, ""); 2140832c9bbSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reservations, CTLFLAG_RD, 2150832c9bbSMatthew Dillon &hammer_count_reservations, 0, ""); 216a99b9ea2SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_read, CTLFLAG_RD, 217a99b9ea2SMatthew Dillon &hammer_count_io_running_read, 0, ""); 218a99b9ea2SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_locked, CTLFLAG_RD, 219a99b9ea2SMatthew Dillon &hammer_count_io_locked, 0, ""); 220a99b9ea2SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_write, CTLFLAG_RD, 221a99b9ea2SMatthew Dillon &hammer_count_io_running_write, 0, ""); 222f03c9cf4SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW, 223f03c9cf4SMatthew Dillon &hammer_zone_limit, 0, ""); 2247d683b0fSMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW, 2257d683b0fSMatthew Dillon &hammer_contention_count, 0, ""); 22621fde338SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, autoflush, CTLFLAG_RW, 22721fde338SMatthew Dillon &hammer_autoflush, 0, ""); 228cb51be26SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_zone, CTLFLAG_RW, 229cb51be26SMatthew Dillon &hammer_verify_zone, 0, ""); 2301b0ab2c3SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_data, CTLFLAG_RW, 2311b0ab2c3SMatthew Dillon &hammer_verify_data, 0, ""); 232cb51be26SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, write_mode, CTLFLAG_RW, 233cb51be26SMatthew Dillon &hammer_write_mode, 0, ""); 2343e583440SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, yield_check, CTLFLAG_RW, 2353e583440SMatthew Dillon &hammer_yield_check, 0, ""); 2366f3d87c0SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, fsync_mode, CTLFLAG_RW, 2376f3d87c0SMatthew Dillon &hammer_fsync_mode, 0, ""); 238b3deaf57SMatthew Dillon 239bcac4bbbSMatthew Dillon KTR_INFO_MASTER(hammer); 240bcac4bbbSMatthew Dillon 241427e5fc6SMatthew Dillon /* 242427e5fc6SMatthew Dillon * VFS ABI 243427e5fc6SMatthew Dillon */ 244427e5fc6SMatthew Dillon static void hammer_free_hmp(struct mount *mp); 245427e5fc6SMatthew Dillon 246427e5fc6SMatthew Dillon static int hammer_vfs_mount(struct mount *mp, char *path, caddr_t data, 247427e5fc6SMatthew Dillon struct ucred *cred); 248427e5fc6SMatthew Dillon static int hammer_vfs_unmount(struct mount *mp, int mntflags); 249427e5fc6SMatthew Dillon static int hammer_vfs_root(struct mount *mp, struct vnode **vpp); 250427e5fc6SMatthew Dillon static int hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, 251427e5fc6SMatthew Dillon struct ucred *cred); 2526f97fce3SMatthew Dillon static int hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, 2536f97fce3SMatthew Dillon struct ucred *cred); 254427e5fc6SMatthew Dillon static int hammer_vfs_sync(struct mount *mp, int waitfor); 255b9b0a6d0SMatthew Dillon static int hammer_vfs_vget(struct mount *mp, struct vnode *dvp, 256b9b0a6d0SMatthew Dillon ino_t ino, struct vnode **vpp); 257427e5fc6SMatthew Dillon static int hammer_vfs_init(struct vfsconf *conf); 25867863d04SMatthew Dillon static int hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 25967863d04SMatthew Dillon struct fid *fhp, struct vnode **vpp); 260513ca7d7SMatthew Dillon static int hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp); 261513ca7d7SMatthew Dillon static int hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 262513ca7d7SMatthew Dillon int *exflagsp, struct ucred **credanonp); 263513ca7d7SMatthew Dillon 264427e5fc6SMatthew Dillon 265427e5fc6SMatthew Dillon static struct vfsops hammer_vfsops = { 266427e5fc6SMatthew Dillon .vfs_mount = hammer_vfs_mount, 267427e5fc6SMatthew Dillon .vfs_unmount = hammer_vfs_unmount, 268427e5fc6SMatthew Dillon .vfs_root = hammer_vfs_root, 269427e5fc6SMatthew Dillon .vfs_statfs = hammer_vfs_statfs, 2706f97fce3SMatthew Dillon .vfs_statvfs = hammer_vfs_statvfs, 271427e5fc6SMatthew Dillon .vfs_sync = hammer_vfs_sync, 272427e5fc6SMatthew Dillon .vfs_vget = hammer_vfs_vget, 273513ca7d7SMatthew Dillon .vfs_init = hammer_vfs_init, 274513ca7d7SMatthew Dillon .vfs_vptofh = hammer_vfs_vptofh, 275513ca7d7SMatthew Dillon .vfs_fhtovp = hammer_vfs_fhtovp, 276513ca7d7SMatthew Dillon .vfs_checkexp = hammer_vfs_checkexp 277427e5fc6SMatthew Dillon }; 278427e5fc6SMatthew Dillon 279bac808feSMatthew Dillon MALLOC_DEFINE(M_HAMMER, "HAMMER-mount", ""); 280427e5fc6SMatthew Dillon 281427e5fc6SMatthew Dillon VFS_SET(hammer_vfsops, hammer, 0); 282427e5fc6SMatthew Dillon MODULE_VERSION(hammer, 1); 283427e5fc6SMatthew Dillon 284427e5fc6SMatthew Dillon static int 285427e5fc6SMatthew Dillon hammer_vfs_init(struct vfsconf *conf) 286427e5fc6SMatthew Dillon { 2873098dc2fSMatthew Dillon int n; 2883098dc2fSMatthew Dillon 2893098dc2fSMatthew Dillon if (hammer_limit_recs == 0) { 290a99b9ea2SMatthew Dillon hammer_limit_recs = nbuf * 25; 2913098dc2fSMatthew Dillon n = kmalloc_limit(M_HAMMER) / 512; 2923098dc2fSMatthew Dillon if (hammer_limit_recs > n) 2933098dc2fSMatthew Dillon hammer_limit_recs = n; 2943098dc2fSMatthew Dillon } 295f5a07a7aSMatthew Dillon if (hammer_limit_dirtybufspace == 0) { 296f5a07a7aSMatthew Dillon hammer_limit_dirtybufspace = hidirtybufspace / 2; 297f5a07a7aSMatthew Dillon if (hammer_limit_dirtybufspace < 100) 298f5a07a7aSMatthew Dillon hammer_limit_dirtybufspace = 100; 2999f5097dcSMatthew Dillon } 300427e5fc6SMatthew Dillon return(0); 301427e5fc6SMatthew Dillon } 302427e5fc6SMatthew Dillon 303427e5fc6SMatthew Dillon static int 304427e5fc6SMatthew Dillon hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data, 305427e5fc6SMatthew Dillon struct ucred *cred) 306427e5fc6SMatthew Dillon { 307427e5fc6SMatthew Dillon struct hammer_mount_info info; 308a89aec1bSMatthew Dillon hammer_mount_t hmp; 309a89aec1bSMatthew Dillon hammer_volume_t rootvol; 31027ea2398SMatthew Dillon struct vnode *rootvp; 3117c19b529SMichael Neumann struct vnode *devvp = NULL; 312427e5fc6SMatthew Dillon const char *upath; /* volume name in userspace */ 313427e5fc6SMatthew Dillon char *path; /* volume name in system space */ 314427e5fc6SMatthew Dillon int error; 315427e5fc6SMatthew Dillon int i; 316732a1697SMatthew Dillon int master_id; 317ab0bf4a3SMatthew Dillon int maxinodes; 318104cb849SMichael Neumann char *next_volume_ptr = NULL; 319ab0bf4a3SMatthew Dillon 320ab0bf4a3SMatthew Dillon /* 321ab0bf4a3SMatthew Dillon * Accept hammer_mount_info. mntpt is NULL for root mounts at boot. 322ab0bf4a3SMatthew Dillon */ 323ab0bf4a3SMatthew Dillon if (mntpt == NULL) { 3247c19b529SMichael Neumann bzero(&info, sizeof(info)); 3257c19b529SMichael Neumann info.asof = 0; 3267c19b529SMichael Neumann info.hflags = 0; 3277c19b529SMichael Neumann info.nvolumes = 1; 328104cb849SMichael Neumann 329104cb849SMichael Neumann next_volume_ptr = mp->mnt_stat.f_mntfromname; 330104cb849SMichael Neumann 331104cb849SMichael Neumann /* Count number of volumes separated by ':' */ 332104cb849SMichael Neumann for (char *p = next_volume_ptr; *p != '\0'; ++p) { 333104cb849SMichael Neumann if (*p == ':') { 334104cb849SMichael Neumann ++info.nvolumes; 335104cb849SMichael Neumann } 336104cb849SMichael Neumann } 337104cb849SMichael Neumann 338104cb849SMichael Neumann mp->mnt_flag &= ~MNT_RDONLY; /* mount R/W */ 3397c19b529SMichael Neumann } else { 340427e5fc6SMatthew Dillon if ((error = copyin(data, &info, sizeof(info))) != 0) 341427e5fc6SMatthew Dillon return (error); 3427c19b529SMichael Neumann } 343427e5fc6SMatthew Dillon 344427e5fc6SMatthew Dillon /* 345732a1697SMatthew Dillon * updating or new mount 346427e5fc6SMatthew Dillon */ 347195c19a1SMatthew Dillon if (mp->mnt_flag & MNT_UPDATE) { 348195c19a1SMatthew Dillon hmp = (void *)mp->mnt_data; 349195c19a1SMatthew Dillon KKASSERT(hmp != NULL); 350195c19a1SMatthew Dillon } else { 351732a1697SMatthew Dillon if (info.nvolumes <= 0 || info.nvolumes >= 32768) 352732a1697SMatthew Dillon return (EINVAL); 353732a1697SMatthew Dillon hmp = NULL; 354732a1697SMatthew Dillon } 355732a1697SMatthew Dillon 356732a1697SMatthew Dillon /* 357732a1697SMatthew Dillon * master-id validation. The master id may not be changed by a 358732a1697SMatthew Dillon * mount update. 359732a1697SMatthew Dillon */ 360732a1697SMatthew Dillon if (info.hflags & HMNT_MASTERID) { 361732a1697SMatthew Dillon if (hmp && hmp->master_id != info.master_id) { 362732a1697SMatthew Dillon kprintf("hammer: cannot change master id " 363732a1697SMatthew Dillon "with mount update\n"); 364732a1697SMatthew Dillon return(EINVAL); 365732a1697SMatthew Dillon } 366732a1697SMatthew Dillon master_id = info.master_id; 367732a1697SMatthew Dillon if (master_id < -1 || master_id >= HAMMER_MAX_MASTERS) 368732a1697SMatthew Dillon return (EINVAL); 369732a1697SMatthew Dillon } else { 370732a1697SMatthew Dillon if (hmp) 371732a1697SMatthew Dillon master_id = hmp->master_id; 372732a1697SMatthew Dillon else 373732a1697SMatthew Dillon master_id = 0; 374732a1697SMatthew Dillon } 375732a1697SMatthew Dillon 376732a1697SMatthew Dillon /* 377732a1697SMatthew Dillon * Interal mount data structure 378732a1697SMatthew Dillon */ 379732a1697SMatthew Dillon if (hmp == NULL) { 380427e5fc6SMatthew Dillon hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO); 381427e5fc6SMatthew Dillon mp->mnt_data = (qaddr_t)hmp; 382427e5fc6SMatthew Dillon hmp->mp = mp; 38346fe7ae1SMatthew Dillon /*TAILQ_INIT(&hmp->recycle_list);*/ 38447197d71SMatthew Dillon 385bac808feSMatthew Dillon /* 386bac808feSMatthew Dillon * Make sure kmalloc type limits are set appropriately. If root 387bac808feSMatthew Dillon * increases the vnode limit you may have to do a dummy remount 388bac808feSMatthew Dillon * to adjust the HAMMER inode limit. 389bac808feSMatthew Dillon */ 390bac808feSMatthew Dillon kmalloc_create(&hmp->m_misc, "HAMMER-others"); 391bac808feSMatthew Dillon kmalloc_create(&hmp->m_inodes, "HAMMER-inodes"); 392bac808feSMatthew Dillon 393bac808feSMatthew Dillon maxinodes = desiredvnodes + desiredvnodes / 5 + 394ff003b11SMatthew Dillon hammer_limit_reclaim * 2; 395bac808feSMatthew Dillon kmalloc_raise_limit(hmp->m_inodes, 396bac808feSMatthew Dillon maxinodes * sizeof(struct hammer_inode)); 397bac808feSMatthew Dillon 398dd94f1b1SMatthew Dillon hmp->root_btree_beg.localization = 0x00000000U; 39947197d71SMatthew Dillon hmp->root_btree_beg.obj_id = -0x8000000000000000LL; 40047197d71SMatthew Dillon hmp->root_btree_beg.key = -0x8000000000000000LL; 40147197d71SMatthew Dillon hmp->root_btree_beg.create_tid = 1; 40247197d71SMatthew Dillon hmp->root_btree_beg.delete_tid = 1; 40347197d71SMatthew Dillon hmp->root_btree_beg.rec_type = 0; 40447197d71SMatthew Dillon hmp->root_btree_beg.obj_type = 0; 40547197d71SMatthew Dillon 406dd94f1b1SMatthew Dillon hmp->root_btree_end.localization = 0xFFFFFFFFU; 40747197d71SMatthew Dillon hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL; 40847197d71SMatthew Dillon hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL; 40947197d71SMatthew Dillon hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL; 41047197d71SMatthew Dillon hmp->root_btree_end.delete_tid = 0; /* special case */ 41147197d71SMatthew Dillon hmp->root_btree_end.rec_type = 0xFFFFU; 41247197d71SMatthew Dillon hmp->root_btree_end.obj_type = 0; 413f03c9cf4SMatthew Dillon 414cdb6e4e6SMatthew Dillon hmp->krate.freq = 1; /* maximum reporting rate (hz) */ 415cdb6e4e6SMatthew Dillon hmp->krate.count = -16; /* initial burst */ 416cdb6e4e6SMatthew Dillon 4179480ff55SMatthew Dillon hmp->sync_lock.refs = 1; 418c9b9e29dSMatthew Dillon hmp->free_lock.refs = 1; 419d99d6bf5SMatthew Dillon hmp->undo_lock.refs = 1; 420d99d6bf5SMatthew Dillon hmp->blkmap_lock.refs = 1; 42183f2a3aaSMatthew Dillon hmp->snapshot_lock.refs = 1; 422*52e547e3SMichael Neumann hmp->volume_lock.refs = 1; 4239480ff55SMatthew Dillon 424cebe9493SMatthew Dillon TAILQ_INIT(&hmp->delay_list); 4257a61b85dSMatthew Dillon TAILQ_INIT(&hmp->flush_group_list); 4260729c8c8SMatthew Dillon TAILQ_INIT(&hmp->objid_cache_list); 427e8599db1SMatthew Dillon TAILQ_INIT(&hmp->undo_lru_list); 4287bc5b8c2SMatthew Dillon TAILQ_INIT(&hmp->reclaim_list); 429195c19a1SMatthew Dillon } 43051c35492SMatthew Dillon hmp->hflags &= ~HMNT_USERFLAGS; 43151c35492SMatthew Dillon hmp->hflags |= info.hflags & HMNT_USERFLAGS; 432732a1697SMatthew Dillon 433732a1697SMatthew Dillon hmp->master_id = master_id; 434732a1697SMatthew Dillon 4357f7c1f84SMatthew Dillon if (info.asof) { 4367f7c1f84SMatthew Dillon mp->mnt_flag |= MNT_RDONLY; 4377f7c1f84SMatthew Dillon hmp->asof = info.asof; 4387f7c1f84SMatthew Dillon } else { 4397f7c1f84SMatthew Dillon hmp->asof = HAMMER_MAX_TID; 4407f7c1f84SMatthew Dillon } 441195c19a1SMatthew Dillon 442865c9609SMichael Neumann hmp->volume_to_remove = -1; 443865c9609SMichael Neumann 444195c19a1SMatthew Dillon /* 44551c35492SMatthew Dillon * Re-open read-write if originally read-only, or vise-versa. 44602428fb6SMatthew Dillon * 44702428fb6SMatthew Dillon * When going from read-only to read-write execute the stage2 44802428fb6SMatthew Dillon * recovery if it has not already been run. 449195c19a1SMatthew Dillon */ 450195c19a1SMatthew Dillon if (mp->mnt_flag & MNT_UPDATE) { 45151c35492SMatthew Dillon error = 0; 45251c35492SMatthew Dillon if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) { 45351c35492SMatthew Dillon kprintf("HAMMER read-only -> read-write\n"); 454195c19a1SMatthew Dillon hmp->ronly = 0; 45551c35492SMatthew Dillon RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, 45651c35492SMatthew Dillon hammer_adjust_volume_mode, NULL); 45751c35492SMatthew Dillon rootvol = hammer_get_root_volume(hmp, &error); 45851c35492SMatthew Dillon if (rootvol) { 45906ad81ffSMatthew Dillon hammer_recover_flush_buffers(hmp, rootvol, 1); 46002428fb6SMatthew Dillon error = hammer_recover_stage2(hmp, rootvol); 4619f5097dcSMatthew Dillon bcopy(rootvol->ondisk->vol0_blockmap, 4629f5097dcSMatthew Dillon hmp->blockmap, 4639f5097dcSMatthew Dillon sizeof(hmp->blockmap)); 46451c35492SMatthew Dillon hammer_rel_volume(rootvol, 0); 465195c19a1SMatthew Dillon } 46651c35492SMatthew Dillon RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, 46751c35492SMatthew Dillon hammer_reload_inode, NULL); 46851c35492SMatthew Dillon /* kernel clears MNT_RDONLY */ 46951c35492SMatthew Dillon } else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { 47051c35492SMatthew Dillon kprintf("HAMMER read-write -> read-only\n"); 47151c35492SMatthew Dillon hmp->ronly = 1; /* messy */ 47251c35492SMatthew Dillon RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, 47351c35492SMatthew Dillon hammer_reload_inode, NULL); 47451c35492SMatthew Dillon hmp->ronly = 0; 47551c35492SMatthew Dillon hammer_flusher_sync(hmp); 47651c35492SMatthew Dillon hammer_flusher_sync(hmp); 47751c35492SMatthew Dillon hammer_flusher_sync(hmp); 47851c35492SMatthew Dillon hmp->ronly = 1; 47951c35492SMatthew Dillon RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, 48051c35492SMatthew Dillon hammer_adjust_volume_mode, NULL); 48151c35492SMatthew Dillon } 48251c35492SMatthew Dillon return(error); 483195c19a1SMatthew Dillon } 484195c19a1SMatthew Dillon 485427e5fc6SMatthew Dillon RB_INIT(&hmp->rb_vols_root); 486427e5fc6SMatthew Dillon RB_INIT(&hmp->rb_inos_root); 48740043e7fSMatthew Dillon RB_INIT(&hmp->rb_nods_root); 488e8599db1SMatthew Dillon RB_INIT(&hmp->rb_undo_root); 4890832c9bbSMatthew Dillon RB_INIT(&hmp->rb_resv_root); 4900832c9bbSMatthew Dillon RB_INIT(&hmp->rb_bufs_root); 4915fa5c92fSMatthew Dillon RB_INIT(&hmp->rb_pfsm_root); 4920832c9bbSMatthew Dillon 493195c19a1SMatthew Dillon hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); 494427e5fc6SMatthew Dillon 49510a5d1baSMatthew Dillon TAILQ_INIT(&hmp->volu_list); 49610a5d1baSMatthew Dillon TAILQ_INIT(&hmp->undo_list); 49710a5d1baSMatthew Dillon TAILQ_INIT(&hmp->data_list); 49810a5d1baSMatthew Dillon TAILQ_INIT(&hmp->meta_list); 49910a5d1baSMatthew Dillon TAILQ_INIT(&hmp->lose_list); 50010a5d1baSMatthew Dillon 501427e5fc6SMatthew Dillon /* 502427e5fc6SMatthew Dillon * Load volumes 503427e5fc6SMatthew Dillon */ 504427e5fc6SMatthew Dillon path = objcache_get(namei_oc, M_WAITOK); 5051b0ab2c3SMatthew Dillon hmp->nvolumes = -1; 506427e5fc6SMatthew Dillon for (i = 0; i < info.nvolumes; ++i) { 5077c19b529SMichael Neumann if (mntpt == NULL) { 5087c19b529SMichael Neumann /* 5097c19b529SMichael Neumann * Root mount. 5107c19b529SMichael Neumann */ 511104cb849SMichael Neumann KKASSERT(next_volume_ptr != NULL); 512a407819fSMichael Neumann strcpy(path, ""); 513a407819fSMichael Neumann if (*next_volume_ptr != '/') { 514a407819fSMichael Neumann /* relative path */ 515104cb849SMichael Neumann strcpy(path, "/dev/"); 516a407819fSMichael Neumann } 517104cb849SMichael Neumann int k; 518104cb849SMichael Neumann for (k = strlen(path); k < MAXPATHLEN-1; ++k) { 519104cb849SMichael Neumann if (*next_volume_ptr == '\0') { 520104cb849SMichael Neumann break; 521104cb849SMichael Neumann } else if (*next_volume_ptr == ':') { 522104cb849SMichael Neumann ++next_volume_ptr; 523104cb849SMichael Neumann break; 524104cb849SMichael Neumann } else { 525104cb849SMichael Neumann path[k] = *next_volume_ptr; 526104cb849SMichael Neumann ++next_volume_ptr; 527104cb849SMichael Neumann } 528104cb849SMichael Neumann } 529104cb849SMichael Neumann path[k] = '\0'; 530104cb849SMichael Neumann 5317c19b529SMichael Neumann error = 0; 532104cb849SMichael Neumann cdev_t dev = kgetdiskbyname(path); 533104cb849SMichael Neumann error = bdevvp(dev, &devvp); 534104cb849SMichael Neumann if (error) { 535104cb849SMichael Neumann kprintf("hammer_mountroot: can't find devvp\n"); 536104cb849SMichael Neumann } 5377c19b529SMichael Neumann } else { 5387c19b529SMichael Neumann error = copyin(&info.volumes[i], &upath, 5397c19b529SMichael Neumann sizeof(char *)); 540427e5fc6SMatthew Dillon if (error == 0) 5417c19b529SMichael Neumann error = copyinstr(upath, path, 5427c19b529SMichael Neumann MAXPATHLEN, NULL); 5437c19b529SMichael Neumann } 544427e5fc6SMatthew Dillon if (error == 0) 5457c19b529SMichael Neumann error = hammer_install_volume(hmp, path, devvp); 546427e5fc6SMatthew Dillon if (error) 547427e5fc6SMatthew Dillon break; 548427e5fc6SMatthew Dillon } 549427e5fc6SMatthew Dillon objcache_put(namei_oc, path); 550427e5fc6SMatthew Dillon 551427e5fc6SMatthew Dillon /* 552427e5fc6SMatthew Dillon * Make sure we found a root volume 553427e5fc6SMatthew Dillon */ 554427e5fc6SMatthew Dillon if (error == 0 && hmp->rootvol == NULL) { 555427e5fc6SMatthew Dillon kprintf("hammer_mount: No root volume found!\n"); 556427e5fc6SMatthew Dillon error = EINVAL; 557427e5fc6SMatthew Dillon } 5581b0ab2c3SMatthew Dillon 5591b0ab2c3SMatthew Dillon /* 5601b0ab2c3SMatthew Dillon * Check that all required volumes are available 5611b0ab2c3SMatthew Dillon */ 5621b0ab2c3SMatthew Dillon if (error == 0 && hammer_mountcheck_volumes(hmp)) { 5631b0ab2c3SMatthew Dillon kprintf("hammer_mount: Missing volumes, cannot mount!\n"); 5641b0ab2c3SMatthew Dillon error = EINVAL; 5651b0ab2c3SMatthew Dillon } 5661b0ab2c3SMatthew Dillon 567427e5fc6SMatthew Dillon if (error) { 568427e5fc6SMatthew Dillon hammer_free_hmp(mp); 569427e5fc6SMatthew Dillon return (error); 570427e5fc6SMatthew Dillon } 571427e5fc6SMatthew Dillon 572427e5fc6SMatthew Dillon /* 57327ea2398SMatthew Dillon * No errors, setup enough of the mount point so we can lookup the 57427ea2398SMatthew Dillon * root vnode. 575427e5fc6SMatthew Dillon */ 576427e5fc6SMatthew Dillon mp->mnt_iosize_max = MAXPHYS; 577427e5fc6SMatthew Dillon mp->mnt_kern_flag |= MNTK_FSMID; 578c0ade690SMatthew Dillon 579c0ade690SMatthew Dillon /* 580aac0aabdSMatthew Dillon * MPSAFE code. Note that VOPs and VFSops which are not MPSAFE 581aac0aabdSMatthew Dillon * will acquire a per-mount token prior to entry and release it 582aac0aabdSMatthew Dillon * on return, so even if we do not specify it we no longer get 583aac0aabdSMatthew Dillon * the BGL regardlless of how we are flagged. 584aac0aabdSMatthew Dillon */ 585aac0aabdSMatthew Dillon mp->mnt_kern_flag |= MNTK_RD_MPSAFE | MNTK_GA_MPSAFE; 586aac0aabdSMatthew Dillon 587aac0aabdSMatthew Dillon /* 588c0ade690SMatthew Dillon * note: f_iosize is used by vnode_pager_haspage() when constructing 589c0ade690SMatthew Dillon * its VOP_BMAP call. 590c0ade690SMatthew Dillon */ 591c0ade690SMatthew Dillon mp->mnt_stat.f_iosize = HAMMER_BUFSIZE; 592fbc6e32aSMatthew Dillon mp->mnt_stat.f_bsize = HAMMER_BUFSIZE; 5936f97fce3SMatthew Dillon 5946f97fce3SMatthew Dillon mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE; 5956f97fce3SMatthew Dillon mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE; 5966f97fce3SMatthew Dillon 597427e5fc6SMatthew Dillon mp->mnt_maxsymlinklen = 255; 598427e5fc6SMatthew Dillon mp->mnt_flag |= MNT_LOCAL; 599427e5fc6SMatthew Dillon 600427e5fc6SMatthew Dillon vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops); 6017a04d74fSMatthew Dillon vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops); 6027a04d74fSMatthew Dillon vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops); 60327ea2398SMatthew Dillon 60427ea2398SMatthew Dillon /* 605a89aec1bSMatthew Dillon * The root volume's ondisk pointer is only valid if we hold a 606a89aec1bSMatthew Dillon * reference to it. 607a89aec1bSMatthew Dillon */ 608a89aec1bSMatthew Dillon rootvol = hammer_get_root_volume(hmp, &error); 609a89aec1bSMatthew Dillon if (error) 610f90dde4cSMatthew Dillon goto failed; 611f90dde4cSMatthew Dillon 612f90dde4cSMatthew Dillon /* 6139f5097dcSMatthew Dillon * Perform any necessary UNDO operations. The recovery code does 6140729c8c8SMatthew Dillon * call hammer_undo_lookup() so we have to pre-cache the blockmap, 6150729c8c8SMatthew Dillon * and then re-copy it again after recovery is complete. 616c9b9e29dSMatthew Dillon * 61751c35492SMatthew Dillon * If this is a read-only mount the UNDO information is retained 61851c35492SMatthew Dillon * in memory in the form of dirty buffer cache buffers, and not 61951c35492SMatthew Dillon * written back to the media. 620f90dde4cSMatthew Dillon */ 6210729c8c8SMatthew Dillon bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, 6220729c8c8SMatthew Dillon sizeof(hmp->blockmap)); 623c9b9e29dSMatthew Dillon 6247a61b85dSMatthew Dillon /* 62544a83111SMatthew Dillon * Check filesystem version 62644a83111SMatthew Dillon */ 62744a83111SMatthew Dillon hmp->version = rootvol->ondisk->vol_version; 62844a83111SMatthew Dillon if (hmp->version < HAMMER_VOL_VERSION_MIN || 62944a83111SMatthew Dillon hmp->version > HAMMER_VOL_VERSION_MAX) { 63044a83111SMatthew Dillon kprintf("HAMMER: mount unsupported fs version %d\n", 63144a83111SMatthew Dillon hmp->version); 63244a83111SMatthew Dillon error = ERANGE; 63344a83111SMatthew Dillon goto done; 63444a83111SMatthew Dillon } 63544a83111SMatthew Dillon 63644a83111SMatthew Dillon /* 6377a61b85dSMatthew Dillon * The undo_rec_limit limits the size of flush groups to avoid 6387a61b85dSMatthew Dillon * blowing out the UNDO FIFO. This calculation is typically in 6397a61b85dSMatthew Dillon * the tens of thousands and is designed primarily when small 6407a61b85dSMatthew Dillon * HAMMER filesystems are created. 6417a61b85dSMatthew Dillon */ 6427a61b85dSMatthew Dillon hmp->undo_rec_limit = hammer_undo_max(hmp) / 8192 + 100; 6437a61b85dSMatthew Dillon if (hammer_debug_general & 0x0001) 6447a61b85dSMatthew Dillon kprintf("HAMMER: undo_rec_limit %d\n", hmp->undo_rec_limit); 6457a61b85dSMatthew Dillon 64602428fb6SMatthew Dillon /* 64702428fb6SMatthew Dillon * NOTE: Recover stage1 not only handles meta-data recovery, it 64802428fb6SMatthew Dillon * also sets hmp->undo_seqno for HAMMER VERSION 4+ filesystems. 64902428fb6SMatthew Dillon */ 65002428fb6SMatthew Dillon error = hammer_recover_stage1(hmp, rootvol); 651f90dde4cSMatthew Dillon if (error) { 652f90dde4cSMatthew Dillon kprintf("Failed to recover HAMMER filesystem on mount\n"); 653a89aec1bSMatthew Dillon goto done; 654f90dde4cSMatthew Dillon } 655f90dde4cSMatthew Dillon 656f90dde4cSMatthew Dillon /* 657adf01747SMatthew Dillon * Finish setup now that we have a good root volume. 658adf01747SMatthew Dillon * 659adf01747SMatthew Dillon * The top 16 bits of fsid.val[1] is a pfs id. 660f90dde4cSMatthew Dillon */ 661a89aec1bSMatthew Dillon ksnprintf(mp->mnt_stat.f_mntfromname, 662a89aec1bSMatthew Dillon sizeof(mp->mnt_stat.f_mntfromname), "%s", 663a89aec1bSMatthew Dillon rootvol->ondisk->vol_name); 664513ca7d7SMatthew Dillon mp->mnt_stat.f_fsid.val[0] = 665513ca7d7SMatthew Dillon crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8); 666513ca7d7SMatthew Dillon mp->mnt_stat.f_fsid.val[1] = 667513ca7d7SMatthew Dillon crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8); 668adf01747SMatthew Dillon mp->mnt_stat.f_fsid.val[1] &= 0x0000FFFF; 669b84de5afSMatthew Dillon 6706f97fce3SMatthew Dillon mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid; 6716f97fce3SMatthew Dillon mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid, 6726f97fce3SMatthew Dillon sizeof(mp->mnt_vstat.f_fsid_uuid)); 6736f97fce3SMatthew Dillon 6740729c8c8SMatthew Dillon /* 6750729c8c8SMatthew Dillon * Certain often-modified fields in the root volume are cached in 6760729c8c8SMatthew Dillon * the hammer_mount structure so we do not have to generate lots 6770729c8c8SMatthew Dillon * of little UNDO structures for them. 678c9b9e29dSMatthew Dillon * 6799f5097dcSMatthew Dillon * Recopy after recovery. This also has the side effect of 6809f5097dcSMatthew Dillon * setting our cached undo FIFO's first_offset, which serves to 6819f5097dcSMatthew Dillon * placemark the FIFO start for the NEXT flush cycle while the 6829f5097dcSMatthew Dillon * on-disk first_offset represents the LAST flush cycle. 6830729c8c8SMatthew Dillon */ 684b84de5afSMatthew Dillon hmp->next_tid = rootvol->ondisk->vol0_next_tid; 6854889cbd4SMatthew Dillon hmp->flush_tid1 = hmp->next_tid; 6864889cbd4SMatthew Dillon hmp->flush_tid2 = hmp->next_tid; 6870729c8c8SMatthew Dillon bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, 6880729c8c8SMatthew Dillon sizeof(hmp->blockmap)); 689e63644f0SMatthew Dillon hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks; 6900729c8c8SMatthew Dillon 691059819e3SMatthew Dillon hammer_flusher_create(hmp); 692059819e3SMatthew Dillon 693a89aec1bSMatthew Dillon /* 69427ea2398SMatthew Dillon * Locate the root directory using the root cluster's B-Tree as a 69527ea2398SMatthew Dillon * starting point. The root directory uses an obj_id of 1. 69627ea2398SMatthew Dillon * 69727ea2398SMatthew Dillon * FUTURE: Leave the root directory cached referenced but unlocked 69827ea2398SMatthew Dillon * in hmp->rootvp (need to flush it on unmount). 69927ea2398SMatthew Dillon */ 700b9b0a6d0SMatthew Dillon error = hammer_vfs_vget(mp, NULL, 1, &rootvp); 701a89aec1bSMatthew Dillon if (error) 702a89aec1bSMatthew Dillon goto done; 70327ea2398SMatthew Dillon vput(rootvp); 70427ea2398SMatthew Dillon /*vn_unlock(hmp->rootvp);*/ 70588c39f64SThomas Nikolajsen if (hmp->ronly == 0) 70602428fb6SMatthew Dillon error = hammer_recover_stage2(hmp, rootvol); 70727ea2398SMatthew Dillon 708a89aec1bSMatthew Dillon done: 709f90dde4cSMatthew Dillon hammer_rel_volume(rootvol, 0); 710f90dde4cSMatthew Dillon failed: 71127ea2398SMatthew Dillon /* 71227ea2398SMatthew Dillon * Cleanup and return. 71327ea2398SMatthew Dillon */ 71427ea2398SMatthew Dillon if (error) 71527ea2398SMatthew Dillon hammer_free_hmp(mp); 716427e5fc6SMatthew Dillon return (error); 717427e5fc6SMatthew Dillon } 718427e5fc6SMatthew Dillon 719427e5fc6SMatthew Dillon static int 720427e5fc6SMatthew Dillon hammer_vfs_unmount(struct mount *mp, int mntflags) 721427e5fc6SMatthew Dillon { 722427e5fc6SMatthew Dillon #if 0 723427e5fc6SMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 724427e5fc6SMatthew Dillon #endif 725427e5fc6SMatthew Dillon int flags; 72666325755SMatthew Dillon int error; 72727ea2398SMatthew Dillon 72827ea2398SMatthew Dillon /* 729427e5fc6SMatthew Dillon * Clean out the vnodes 730427e5fc6SMatthew Dillon */ 73166325755SMatthew Dillon flags = 0; 73266325755SMatthew Dillon if (mntflags & MNT_FORCE) 73366325755SMatthew Dillon flags |= FORCECLOSE; 73466325755SMatthew Dillon if ((error = vflush(mp, 0, flags)) != 0) 73566325755SMatthew Dillon return (error); 736427e5fc6SMatthew Dillon 737427e5fc6SMatthew Dillon /* 738427e5fc6SMatthew Dillon * Clean up the internal mount structure and related entities. This 739427e5fc6SMatthew Dillon * may issue I/O. 740427e5fc6SMatthew Dillon */ 741427e5fc6SMatthew Dillon hammer_free_hmp(mp); 742427e5fc6SMatthew Dillon return(0); 743427e5fc6SMatthew Dillon } 744427e5fc6SMatthew Dillon 745427e5fc6SMatthew Dillon /* 746427e5fc6SMatthew Dillon * Clean up the internal mount structure and disassociate it from the mount. 747427e5fc6SMatthew Dillon * This may issue I/O. 748427e5fc6SMatthew Dillon */ 749427e5fc6SMatthew Dillon static void 750427e5fc6SMatthew Dillon hammer_free_hmp(struct mount *mp) 751427e5fc6SMatthew Dillon { 752427e5fc6SMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 753cdb6e4e6SMatthew Dillon hammer_flush_group_t flg; 7541b0ab2c3SMatthew Dillon int count; 755d40bfecaSMatthew Dillon int dummy; 756427e5fc6SMatthew Dillon 757427e5fc6SMatthew Dillon /* 758cdb6e4e6SMatthew Dillon * Flush anything dirty. This won't even run if the 759cdb6e4e6SMatthew Dillon * filesystem errored-out. 760427e5fc6SMatthew Dillon */ 7611b0ab2c3SMatthew Dillon count = 0; 7621b0ab2c3SMatthew Dillon while (hammer_flusher_haswork(hmp)) { 763059819e3SMatthew Dillon hammer_flusher_sync(hmp); 7641b0ab2c3SMatthew Dillon ++count; 7651b0ab2c3SMatthew Dillon if (count >= 5) { 7661b0ab2c3SMatthew Dillon if (count == 5) 7671b0ab2c3SMatthew Dillon kprintf("HAMMER: umount flushing."); 7681b0ab2c3SMatthew Dillon else 7691b0ab2c3SMatthew Dillon kprintf("."); 770d40bfecaSMatthew Dillon tsleep(&dummy, 0, "hmrufl", hz); 7711b0ab2c3SMatthew Dillon } 7721b0ab2c3SMatthew Dillon if (count == 30) { 7731b0ab2c3SMatthew Dillon kprintf("giving up\n"); 7741b0ab2c3SMatthew Dillon break; 7751b0ab2c3SMatthew Dillon } 7761b0ab2c3SMatthew Dillon } 7771b0ab2c3SMatthew Dillon if (count >= 5 && count < 30) 7781b0ab2c3SMatthew Dillon kprintf("\n"); 779cdb6e4e6SMatthew Dillon 780cdb6e4e6SMatthew Dillon /* 781cdb6e4e6SMatthew Dillon * If the mount had a critical error we have to destroy any 782cdb6e4e6SMatthew Dillon * remaining inodes before we can finish cleaning up the flusher. 783cdb6e4e6SMatthew Dillon */ 784cdb6e4e6SMatthew Dillon if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) { 785cdb6e4e6SMatthew Dillon RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, 786cdb6e4e6SMatthew Dillon hammer_destroy_inode_callback, NULL); 787cdb6e4e6SMatthew Dillon } 788cdb6e4e6SMatthew Dillon 789cdb6e4e6SMatthew Dillon /* 790cdb6e4e6SMatthew Dillon * There shouldn't be any inodes left now and any left over 791cdb6e4e6SMatthew Dillon * flush groups should now be empty. 792cdb6e4e6SMatthew Dillon */ 793cdb6e4e6SMatthew Dillon KKASSERT(RB_EMPTY(&hmp->rb_inos_root)); 794cdb6e4e6SMatthew Dillon while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) { 795cdb6e4e6SMatthew Dillon TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry); 796ff003b11SMatthew Dillon KKASSERT(RB_EMPTY(&flg->flush_tree)); 797cdb6e4e6SMatthew Dillon if (flg->refs) { 798cdb6e4e6SMatthew Dillon kprintf("HAMMER: Warning, flush_group %p was " 799cdb6e4e6SMatthew Dillon "not empty on umount!\n", flg); 800cdb6e4e6SMatthew Dillon } 801bac808feSMatthew Dillon kfree(flg, hmp->m_misc); 802cdb6e4e6SMatthew Dillon } 803cdb6e4e6SMatthew Dillon 804cdb6e4e6SMatthew Dillon /* 805cdb6e4e6SMatthew Dillon * We can finally destroy the flusher 806cdb6e4e6SMatthew Dillon */ 807059819e3SMatthew Dillon hammer_flusher_destroy(hmp); 808427e5fc6SMatthew Dillon 809b84de5afSMatthew Dillon /* 81000f16fadSMatthew Dillon * We may have held recovered buffers due to a read-only mount. 81100f16fadSMatthew Dillon * These must be discarded. 81200f16fadSMatthew Dillon */ 81300f16fadSMatthew Dillon if (hmp->ronly) 81400f16fadSMatthew Dillon hammer_recover_flush_buffers(hmp, NULL, -1); 81500f16fadSMatthew Dillon 81600f16fadSMatthew Dillon /* 8170832c9bbSMatthew Dillon * Unload buffers and then volumes 818b84de5afSMatthew Dillon */ 8190832c9bbSMatthew Dillon RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL, 8200832c9bbSMatthew Dillon hammer_unload_buffer, NULL); 821427e5fc6SMatthew Dillon RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, 822427e5fc6SMatthew Dillon hammer_unload_volume, NULL); 823427e5fc6SMatthew Dillon 824427e5fc6SMatthew Dillon mp->mnt_data = NULL; 82566325755SMatthew Dillon mp->mnt_flag &= ~MNT_LOCAL; 826427e5fc6SMatthew Dillon hmp->mp = NULL; 8270729c8c8SMatthew Dillon hammer_destroy_objid_cache(hmp); 828bac808feSMatthew Dillon kmalloc_destroy(&hmp->m_misc); 829bac808feSMatthew Dillon kmalloc_destroy(&hmp->m_inodes); 830427e5fc6SMatthew Dillon kfree(hmp, M_HAMMER); 831427e5fc6SMatthew Dillon } 832427e5fc6SMatthew Dillon 833427e5fc6SMatthew Dillon /* 834cdb6e4e6SMatthew Dillon * Report critical errors. ip may be NULL. 835cdb6e4e6SMatthew Dillon */ 836cdb6e4e6SMatthew Dillon void 837cdb6e4e6SMatthew Dillon hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip, 838cdb6e4e6SMatthew Dillon int error, const char *msg) 839cdb6e4e6SMatthew Dillon { 840cdb6e4e6SMatthew Dillon hmp->flags |= HAMMER_MOUNT_CRITICAL_ERROR; 841c9ce54d6SMatthew Dillon 842cdb6e4e6SMatthew Dillon krateprintf(&hmp->krate, 843c9ce54d6SMatthew Dillon "HAMMER(%s): Critical error inode=%jd error=%d %s\n", 844cdb6e4e6SMatthew Dillon hmp->mp->mnt_stat.f_mntfromname, 845c9ce54d6SMatthew Dillon (intmax_t)(ip ? ip->obj_id : -1), 846c9ce54d6SMatthew Dillon error, msg); 847c9ce54d6SMatthew Dillon 848cdb6e4e6SMatthew Dillon if (hmp->ronly == 0) { 849cdb6e4e6SMatthew Dillon hmp->ronly = 2; /* special errored read-only mode */ 850cdb6e4e6SMatthew Dillon hmp->mp->mnt_flag |= MNT_RDONLY; 851cdb6e4e6SMatthew Dillon kprintf("HAMMER(%s): Forcing read-only mode\n", 852cdb6e4e6SMatthew Dillon hmp->mp->mnt_stat.f_mntfromname); 853cdb6e4e6SMatthew Dillon } 854cdb6e4e6SMatthew Dillon hmp->error = error; 855fc73edd8SMatthew Dillon if (hammer_debug_critical) 856c9ce54d6SMatthew Dillon Debugger("Entering debugger"); 857cdb6e4e6SMatthew Dillon } 858cdb6e4e6SMatthew Dillon 859cdb6e4e6SMatthew Dillon 860cdb6e4e6SMatthew Dillon /* 861513ca7d7SMatthew Dillon * Obtain a vnode for the specified inode number. An exclusively locked 862513ca7d7SMatthew Dillon * vnode is returned. 863513ca7d7SMatthew Dillon */ 864513ca7d7SMatthew Dillon int 865b9b0a6d0SMatthew Dillon hammer_vfs_vget(struct mount *mp, struct vnode *dvp, 866b9b0a6d0SMatthew Dillon ino_t ino, struct vnode **vpp) 867513ca7d7SMatthew Dillon { 86836f82b23SMatthew Dillon struct hammer_transaction trans; 869513ca7d7SMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 870513ca7d7SMatthew Dillon struct hammer_inode *ip; 871513ca7d7SMatthew Dillon int error; 872b9b0a6d0SMatthew Dillon u_int32_t localization; 873513ca7d7SMatthew Dillon 87436f82b23SMatthew Dillon hammer_simple_transaction(&trans, hmp); 87536f82b23SMatthew Dillon 876513ca7d7SMatthew Dillon /* 877b9b0a6d0SMatthew Dillon * If a directory vnode is supplied (mainly NFS) then we can acquire 878b9b0a6d0SMatthew Dillon * the PFS domain from it. Otherwise we would only be able to vget 879b9b0a6d0SMatthew Dillon * inodes in the root PFS. 880b9b0a6d0SMatthew Dillon */ 881b9b0a6d0SMatthew Dillon if (dvp) { 882b9b0a6d0SMatthew Dillon localization = HAMMER_DEF_LOCALIZATION + 883b9b0a6d0SMatthew Dillon VTOI(dvp)->obj_localization; 884b9b0a6d0SMatthew Dillon } else { 885b9b0a6d0SMatthew Dillon localization = HAMMER_DEF_LOCALIZATION; 886b9b0a6d0SMatthew Dillon } 887b9b0a6d0SMatthew Dillon 888b9b0a6d0SMatthew Dillon /* 88936f82b23SMatthew Dillon * Lookup the requested HAMMER inode. The structure must be 89036f82b23SMatthew Dillon * left unlocked while we manipulate the related vnode to avoid 89136f82b23SMatthew Dillon * a deadlock. 892513ca7d7SMatthew Dillon */ 893ddfdf542SMatthew Dillon ip = hammer_get_inode(&trans, NULL, ino, 894b9b0a6d0SMatthew Dillon hmp->asof, localization, 895ddfdf542SMatthew Dillon 0, &error); 896513ca7d7SMatthew Dillon if (ip == NULL) { 897513ca7d7SMatthew Dillon *vpp = NULL; 89800f16fadSMatthew Dillon hammer_done_transaction(&trans); 899513ca7d7SMatthew Dillon return(error); 900513ca7d7SMatthew Dillon } 901e8599db1SMatthew Dillon error = hammer_get_vnode(ip, vpp); 902513ca7d7SMatthew Dillon hammer_rel_inode(ip, 0); 903b84de5afSMatthew Dillon hammer_done_transaction(&trans); 904513ca7d7SMatthew Dillon return (error); 905513ca7d7SMatthew Dillon } 906513ca7d7SMatthew Dillon 907513ca7d7SMatthew Dillon /* 908427e5fc6SMatthew Dillon * Return the root vnode for the filesystem. 909427e5fc6SMatthew Dillon * 910427e5fc6SMatthew Dillon * HAMMER stores the root vnode in the hammer_mount structure so 911427e5fc6SMatthew Dillon * getting it is easy. 912427e5fc6SMatthew Dillon */ 913427e5fc6SMatthew Dillon static int 914427e5fc6SMatthew Dillon hammer_vfs_root(struct mount *mp, struct vnode **vpp) 915427e5fc6SMatthew Dillon { 91647197d71SMatthew Dillon #if 0 917427e5fc6SMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 91847197d71SMatthew Dillon #endif 91927ea2398SMatthew Dillon int error; 920427e5fc6SMatthew Dillon 921b9b0a6d0SMatthew Dillon error = hammer_vfs_vget(mp, NULL, 1, vpp); 92227ea2398SMatthew Dillon return (error); 923427e5fc6SMatthew Dillon } 924427e5fc6SMatthew Dillon 925427e5fc6SMatthew Dillon static int 926427e5fc6SMatthew Dillon hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) 927427e5fc6SMatthew Dillon { 928fbc6e32aSMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 929fbc6e32aSMatthew Dillon hammer_volume_t volume; 930fbc6e32aSMatthew Dillon hammer_volume_ondisk_t ondisk; 931fbc6e32aSMatthew Dillon int error; 93247197d71SMatthew Dillon int64_t bfree; 93331a56ce2SMatthew Dillon int64_t breserved; 934fbc6e32aSMatthew Dillon 935fbc6e32aSMatthew Dillon volume = hammer_get_root_volume(hmp, &error); 936fbc6e32aSMatthew Dillon if (error) 937fbc6e32aSMatthew Dillon return(error); 938fbc6e32aSMatthew Dillon ondisk = volume->ondisk; 939fbc6e32aSMatthew Dillon 94047197d71SMatthew Dillon /* 94147197d71SMatthew Dillon * Basic stats 94247197d71SMatthew Dillon */ 94331a56ce2SMatthew Dillon _hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved); 944fbc6e32aSMatthew Dillon mp->mnt_stat.f_files = ondisk->vol0_stat_inodes; 945c3be93f2SMatthew Dillon bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE; 94640043e7fSMatthew Dillon hammer_rel_volume(volume, 0); 94747197d71SMatthew Dillon 94831a56ce2SMatthew Dillon mp->mnt_stat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE; 94947197d71SMatthew Dillon mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree; 950fbc6e32aSMatthew Dillon if (mp->mnt_stat.f_files < 0) 951fbc6e32aSMatthew Dillon mp->mnt_stat.f_files = 0; 952fbc6e32aSMatthew Dillon 95327ea2398SMatthew Dillon *sbp = mp->mnt_stat; 95427ea2398SMatthew Dillon return(0); 955427e5fc6SMatthew Dillon } 956427e5fc6SMatthew Dillon 9576f97fce3SMatthew Dillon static int 9586f97fce3SMatthew Dillon hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred) 9596f97fce3SMatthew Dillon { 9606f97fce3SMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 9616f97fce3SMatthew Dillon hammer_volume_t volume; 9626f97fce3SMatthew Dillon hammer_volume_ondisk_t ondisk; 9636f97fce3SMatthew Dillon int error; 9646f97fce3SMatthew Dillon int64_t bfree; 9650f65be10SMatthew Dillon int64_t breserved; 9666f97fce3SMatthew Dillon 9676f97fce3SMatthew Dillon volume = hammer_get_root_volume(hmp, &error); 9686f97fce3SMatthew Dillon if (error) 9696f97fce3SMatthew Dillon return(error); 9706f97fce3SMatthew Dillon ondisk = volume->ondisk; 9716f97fce3SMatthew Dillon 9726f97fce3SMatthew Dillon /* 9736f97fce3SMatthew Dillon * Basic stats 9746f97fce3SMatthew Dillon */ 9750f65be10SMatthew Dillon _hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved); 9766f97fce3SMatthew Dillon mp->mnt_vstat.f_files = ondisk->vol0_stat_inodes; 9776f97fce3SMatthew Dillon bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE; 9786f97fce3SMatthew Dillon hammer_rel_volume(volume, 0); 9796f97fce3SMatthew Dillon 9800f65be10SMatthew Dillon mp->mnt_vstat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE; 981c0763659SMatthew Dillon mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree; 9826f97fce3SMatthew Dillon if (mp->mnt_vstat.f_files < 0) 9836f97fce3SMatthew Dillon mp->mnt_vstat.f_files = 0; 9846f97fce3SMatthew Dillon *sbp = mp->mnt_vstat; 9856f97fce3SMatthew Dillon return(0); 9866f97fce3SMatthew Dillon } 9876f97fce3SMatthew Dillon 9880729c8c8SMatthew Dillon /* 9890729c8c8SMatthew Dillon * Sync the filesystem. Currently we have to run it twice, the second 9900729c8c8SMatthew Dillon * one will advance the undo start index to the end index, so if a crash 9910729c8c8SMatthew Dillon * occurs no undos will be run on mount. 99277062c8aSMatthew Dillon * 99377062c8aSMatthew Dillon * We do not sync the filesystem if we are called from a panic. If we did 99477062c8aSMatthew Dillon * we might end up blowing up a sync that was already in progress. 9950729c8c8SMatthew Dillon */ 996427e5fc6SMatthew Dillon static int 997427e5fc6SMatthew Dillon hammer_vfs_sync(struct mount *mp, int waitfor) 998427e5fc6SMatthew Dillon { 999fbc6e32aSMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 10000729c8c8SMatthew Dillon int error; 10010729c8c8SMatthew Dillon 100277062c8aSMatthew Dillon if (panicstr == NULL) { 10030729c8c8SMatthew Dillon error = hammer_sync_hmp(hmp, waitfor); 100477062c8aSMatthew Dillon } else { 100577062c8aSMatthew Dillon error = EIO; 100677062c8aSMatthew Dillon } 10070729c8c8SMatthew Dillon return (error); 1008427e5fc6SMatthew Dillon } 1009427e5fc6SMatthew Dillon 1010513ca7d7SMatthew Dillon /* 1011513ca7d7SMatthew Dillon * Convert a vnode to a file handle. 1012513ca7d7SMatthew Dillon */ 1013513ca7d7SMatthew Dillon static int 1014513ca7d7SMatthew Dillon hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp) 1015513ca7d7SMatthew Dillon { 1016513ca7d7SMatthew Dillon hammer_inode_t ip; 1017513ca7d7SMatthew Dillon 1018513ca7d7SMatthew Dillon KKASSERT(MAXFIDSZ >= 16); 1019513ca7d7SMatthew Dillon ip = VTOI(vp); 1020513ca7d7SMatthew Dillon fhp->fid_len = offsetof(struct fid, fid_data[16]); 1021adf01747SMatthew Dillon fhp->fid_ext = ip->obj_localization >> 16; 1022513ca7d7SMatthew Dillon bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id)); 1023513ca7d7SMatthew Dillon bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof)); 1024513ca7d7SMatthew Dillon return(0); 1025513ca7d7SMatthew Dillon } 1026513ca7d7SMatthew Dillon 1027513ca7d7SMatthew Dillon 1028513ca7d7SMatthew Dillon /* 1029513ca7d7SMatthew Dillon * Convert a file handle back to a vnode. 103067863d04SMatthew Dillon * 103167863d04SMatthew Dillon * Use rootvp to enforce PFS isolation when a PFS is exported via a 103267863d04SMatthew Dillon * null mount. 1033513ca7d7SMatthew Dillon */ 1034513ca7d7SMatthew Dillon static int 103567863d04SMatthew Dillon hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 103667863d04SMatthew Dillon struct fid *fhp, struct vnode **vpp) 1037513ca7d7SMatthew Dillon { 103836f82b23SMatthew Dillon struct hammer_transaction trans; 1039513ca7d7SMatthew Dillon struct hammer_inode *ip; 1040513ca7d7SMatthew Dillon struct hammer_inode_info info; 1041513ca7d7SMatthew Dillon int error; 1042adf01747SMatthew Dillon u_int32_t localization; 1043513ca7d7SMatthew Dillon 1044513ca7d7SMatthew Dillon bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id)); 1045513ca7d7SMatthew Dillon bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof)); 104667863d04SMatthew Dillon if (rootvp) 104767863d04SMatthew Dillon localization = VTOI(rootvp)->obj_localization; 104867863d04SMatthew Dillon else 1049adf01747SMatthew Dillon localization = (u_int32_t)fhp->fid_ext << 16; 1050513ca7d7SMatthew Dillon 105136f82b23SMatthew Dillon hammer_simple_transaction(&trans, (void *)mp->mnt_data); 105236f82b23SMatthew Dillon 1053513ca7d7SMatthew Dillon /* 1054513ca7d7SMatthew Dillon * Get/allocate the hammer_inode structure. The structure must be 1055513ca7d7SMatthew Dillon * unlocked while we manipulate the related vnode to avoid a 1056513ca7d7SMatthew Dillon * deadlock. 1057513ca7d7SMatthew Dillon */ 1058ddfdf542SMatthew Dillon ip = hammer_get_inode(&trans, NULL, info.obj_id, 1059adf01747SMatthew Dillon info.obj_asof, localization, 0, &error); 1060513ca7d7SMatthew Dillon if (ip == NULL) { 1061513ca7d7SMatthew Dillon *vpp = NULL; 1062513ca7d7SMatthew Dillon return(error); 1063513ca7d7SMatthew Dillon } 1064e8599db1SMatthew Dillon error = hammer_get_vnode(ip, vpp); 1065513ca7d7SMatthew Dillon hammer_rel_inode(ip, 0); 1066b84de5afSMatthew Dillon hammer_done_transaction(&trans); 1067513ca7d7SMatthew Dillon return (error); 1068513ca7d7SMatthew Dillon } 1069513ca7d7SMatthew Dillon 1070513ca7d7SMatthew Dillon static int 1071513ca7d7SMatthew Dillon hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 1072513ca7d7SMatthew Dillon int *exflagsp, struct ucred **credanonp) 1073513ca7d7SMatthew Dillon { 1074513ca7d7SMatthew Dillon hammer_mount_t hmp = (void *)mp->mnt_data; 1075513ca7d7SMatthew Dillon struct netcred *np; 1076513ca7d7SMatthew Dillon int error; 1077513ca7d7SMatthew Dillon 1078513ca7d7SMatthew Dillon np = vfs_export_lookup(mp, &hmp->export, nam); 1079513ca7d7SMatthew Dillon if (np) { 1080513ca7d7SMatthew Dillon *exflagsp = np->netc_exflags; 1081513ca7d7SMatthew Dillon *credanonp = &np->netc_anon; 1082513ca7d7SMatthew Dillon error = 0; 1083513ca7d7SMatthew Dillon } else { 1084513ca7d7SMatthew Dillon error = EACCES; 1085513ca7d7SMatthew Dillon } 1086513ca7d7SMatthew Dillon return (error); 1087513ca7d7SMatthew Dillon 1088513ca7d7SMatthew Dillon } 1089513ca7d7SMatthew Dillon 1090513ca7d7SMatthew Dillon int 1091513ca7d7SMatthew Dillon hammer_vfs_export(struct mount *mp, int op, const struct export_args *export) 1092513ca7d7SMatthew Dillon { 1093513ca7d7SMatthew Dillon hammer_mount_t hmp = (void *)mp->mnt_data; 1094513ca7d7SMatthew Dillon int error; 1095513ca7d7SMatthew Dillon 1096513ca7d7SMatthew Dillon switch(op) { 1097513ca7d7SMatthew Dillon case MOUNTCTL_SET_EXPORT: 1098513ca7d7SMatthew Dillon error = vfs_export(mp, &hmp->export, export); 1099513ca7d7SMatthew Dillon break; 1100513ca7d7SMatthew Dillon default: 1101513ca7d7SMatthew Dillon error = EOPNOTSUPP; 1102513ca7d7SMatthew Dillon break; 1103513ca7d7SMatthew Dillon } 1104513ca7d7SMatthew Dillon return(error); 1105513ca7d7SMatthew Dillon } 1106513ca7d7SMatthew Dillon 1107