1427e5fc6SMatthew Dillon /* 2b84de5afSMatthew Dillon * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3427e5fc6SMatthew Dillon * 4427e5fc6SMatthew Dillon * This code is derived from software contributed to The DragonFly Project 5427e5fc6SMatthew Dillon * by Matthew Dillon <dillon@backplane.com> 6427e5fc6SMatthew Dillon * 7427e5fc6SMatthew Dillon * Redistribution and use in source and binary forms, with or without 8427e5fc6SMatthew Dillon * modification, are permitted provided that the following conditions 9427e5fc6SMatthew Dillon * are met: 10427e5fc6SMatthew Dillon * 11427e5fc6SMatthew Dillon * 1. Redistributions of source code must retain the above copyright 12427e5fc6SMatthew Dillon * notice, this list of conditions and the following disclaimer. 13427e5fc6SMatthew Dillon * 2. Redistributions in binary form must reproduce the above copyright 14427e5fc6SMatthew Dillon * notice, this list of conditions and the following disclaimer in 15427e5fc6SMatthew Dillon * the documentation and/or other materials provided with the 16427e5fc6SMatthew Dillon * distribution. 17427e5fc6SMatthew Dillon * 3. Neither the name of The DragonFly Project nor the names of its 18427e5fc6SMatthew Dillon * contributors may be used to endorse or promote products derived 19427e5fc6SMatthew Dillon * from this software without specific, prior written permission. 20427e5fc6SMatthew Dillon * 21427e5fc6SMatthew Dillon * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22427e5fc6SMatthew Dillon * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23427e5fc6SMatthew Dillon * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24427e5fc6SMatthew Dillon * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25427e5fc6SMatthew Dillon * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26427e5fc6SMatthew Dillon * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27427e5fc6SMatthew Dillon * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28427e5fc6SMatthew Dillon * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29427e5fc6SMatthew Dillon * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30427e5fc6SMatthew Dillon * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31427e5fc6SMatthew Dillon * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32427e5fc6SMatthew Dillon * SUCH DAMAGE. 33427e5fc6SMatthew Dillon * 34*dd94f1b1SMatthew Dillon * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.53 2008/06/24 17:38:17 dillon Exp $ 35427e5fc6SMatthew Dillon */ 36427e5fc6SMatthew Dillon 37427e5fc6SMatthew Dillon #include <sys/param.h> 38427e5fc6SMatthew Dillon #include <sys/systm.h> 39427e5fc6SMatthew Dillon #include <sys/kernel.h> 40427e5fc6SMatthew Dillon #include <sys/vnode.h> 41427e5fc6SMatthew Dillon #include <sys/mount.h> 42427e5fc6SMatthew Dillon #include <sys/malloc.h> 43427e5fc6SMatthew Dillon #include <sys/nlookup.h> 44427e5fc6SMatthew Dillon #include <sys/fcntl.h> 45b3deaf57SMatthew Dillon #include <sys/sysctl.h> 46427e5fc6SMatthew Dillon #include <sys/buf.h> 47427e5fc6SMatthew Dillon #include <sys/buf2.h> 48427e5fc6SMatthew Dillon #include "hammer.h" 49427e5fc6SMatthew Dillon 502f85fa4dSMatthew Dillon int hammer_debug_io; 51d5ef456eSMatthew Dillon int hammer_debug_general; 5277062c8aSMatthew Dillon int hammer_debug_debug; 53e8599db1SMatthew Dillon int hammer_debug_inode; 547d683b0fSMatthew Dillon int hammer_debug_locks; 55b3deaf57SMatthew Dillon int hammer_debug_btree; 56d113fda1SMatthew Dillon int hammer_debug_tid; 5746fe7ae1SMatthew Dillon int hammer_debug_recover; /* -1 will disable, +1 will force */ 5846fe7ae1SMatthew Dillon int hammer_debug_recover_faults; 59a99b9ea2SMatthew Dillon int hammer_debug_cluster_enable = 1; /* enable read clustering by default */ 60b3deaf57SMatthew Dillon int hammer_count_inodes; 61af209b0fSMatthew Dillon int hammer_count_iqueued; 629f5097dcSMatthew Dillon int hammer_count_reclaiming; 63b3deaf57SMatthew Dillon int hammer_count_records; 64b3deaf57SMatthew Dillon int hammer_count_record_datas; 65b3deaf57SMatthew Dillon int hammer_count_volumes; 66b3deaf57SMatthew Dillon int hammer_count_buffers; 67b3deaf57SMatthew Dillon int hammer_count_nodes; 68cb51be26SMatthew Dillon int64_t hammer_stats_btree_lookups; 69cb51be26SMatthew Dillon int64_t hammer_stats_btree_searches; 70cb51be26SMatthew Dillon int64_t hammer_stats_btree_inserts; 71cb51be26SMatthew Dillon int64_t hammer_stats_btree_deletes; 72cb51be26SMatthew Dillon int64_t hammer_stats_btree_elements; 73cb51be26SMatthew Dillon int64_t hammer_stats_btree_splits; 74cb51be26SMatthew Dillon int64_t hammer_stats_btree_iterations; 75cb51be26SMatthew Dillon int64_t hammer_stats_record_iterations; 769480ff55SMatthew Dillon int hammer_count_dirtybufs; /* global */ 77a99b9ea2SMatthew Dillon int hammer_count_refedbufs; /* global */ 780832c9bbSMatthew Dillon int hammer_count_reservations; 79a99b9ea2SMatthew Dillon int hammer_count_io_running_read; 80a99b9ea2SMatthew Dillon int hammer_count_io_running_write; 81a99b9ea2SMatthew Dillon int hammer_count_io_locked; 829f5097dcSMatthew Dillon int hammer_limit_dirtybufs; /* per-mount */ 830832c9bbSMatthew Dillon int hammer_limit_recs; /* as a whole XXX */ 84af209b0fSMatthew Dillon int hammer_limit_iqueued; /* per-mount */ 851f07f686SMatthew Dillon int hammer_bio_count; 86cb51be26SMatthew Dillon int hammer_verify_zone; 87cb51be26SMatthew Dillon int hammer_write_mode; 887d683b0fSMatthew Dillon int64_t hammer_contention_count; 89f03c9cf4SMatthew Dillon int64_t hammer_zone_limit; 90b3deaf57SMatthew Dillon 91b3deaf57SMatthew Dillon SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem"); 92d5ef456eSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW, 93d5ef456eSMatthew Dillon &hammer_debug_general, 0, ""); 942f85fa4dSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_io, CTLFLAG_RW, 952f85fa4dSMatthew Dillon &hammer_debug_io, 0, ""); 9677062c8aSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_debug, CTLFLAG_RW, 9777062c8aSMatthew Dillon &hammer_debug_debug, 0, ""); 98e8599db1SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW, 99e8599db1SMatthew Dillon &hammer_debug_inode, 0, ""); 1007d683b0fSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW, 1017d683b0fSMatthew Dillon &hammer_debug_locks, 0, ""); 102b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW, 103b3deaf57SMatthew Dillon &hammer_debug_btree, 0, ""); 104d113fda1SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW, 105d113fda1SMatthew Dillon &hammer_debug_tid, 0, ""); 106b33e2cc0SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW, 107b33e2cc0SMatthew Dillon &hammer_debug_recover, 0, ""); 10846fe7ae1SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW, 10946fe7ae1SMatthew Dillon &hammer_debug_recover_faults, 0, ""); 110a99b9ea2SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_cluster_enable, CTLFLAG_RW, 111a99b9ea2SMatthew Dillon &hammer_debug_cluster_enable, 0, ""); 1129480ff55SMatthew Dillon 1139480ff55SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_dirtybufs, CTLFLAG_RW, 1149480ff55SMatthew Dillon &hammer_limit_dirtybufs, 0, ""); 11547637bffSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_recs, CTLFLAG_RW, 11647637bffSMatthew Dillon &hammer_limit_recs, 0, ""); 117af209b0fSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_iqueued, CTLFLAG_RW, 118af209b0fSMatthew Dillon &hammer_limit_iqueued, 0, ""); 1199480ff55SMatthew Dillon 120b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD, 121b3deaf57SMatthew Dillon &hammer_count_inodes, 0, ""); 122af209b0fSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_iqueued, CTLFLAG_RD, 123af209b0fSMatthew Dillon &hammer_count_iqueued, 0, ""); 1249f5097dcSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reclaiming, CTLFLAG_RD, 1259f5097dcSMatthew Dillon &hammer_count_reclaiming, 0, ""); 126b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD, 127b3deaf57SMatthew Dillon &hammer_count_records, 0, ""); 128b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD, 129b3deaf57SMatthew Dillon &hammer_count_record_datas, 0, ""); 130b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD, 131b3deaf57SMatthew Dillon &hammer_count_volumes, 0, ""); 132b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD, 133b3deaf57SMatthew Dillon &hammer_count_buffers, 0, ""); 134b3deaf57SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD, 135b3deaf57SMatthew Dillon &hammer_count_nodes, 0, ""); 136cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_searches, CTLFLAG_RD, 137cb51be26SMatthew Dillon &hammer_stats_btree_searches, 0, ""); 138cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_lookups, CTLFLAG_RD, 139cb51be26SMatthew Dillon &hammer_stats_btree_lookups, 0, ""); 140cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_inserts, CTLFLAG_RD, 141cb51be26SMatthew Dillon &hammer_stats_btree_inserts, 0, ""); 142cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_deletes, CTLFLAG_RD, 143cb51be26SMatthew Dillon &hammer_stats_btree_deletes, 0, ""); 144cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_elements, CTLFLAG_RD, 145cb51be26SMatthew Dillon &hammer_stats_btree_elements, 0, ""); 146cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_splits, CTLFLAG_RD, 147cb51be26SMatthew Dillon &hammer_stats_btree_splits, 0, ""); 148cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_iterations, CTLFLAG_RD, 149cb51be26SMatthew Dillon &hammer_stats_btree_iterations, 0, ""); 150cb51be26SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_record_iterations, CTLFLAG_RD, 151cb51be26SMatthew Dillon &hammer_stats_record_iterations, 0, ""); 1529480ff55SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_dirtybufs, CTLFLAG_RD, 1539480ff55SMatthew Dillon &hammer_count_dirtybufs, 0, ""); 154a99b9ea2SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_refedbufs, CTLFLAG_RD, 155a99b9ea2SMatthew Dillon &hammer_count_refedbufs, 0, ""); 1560832c9bbSMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reservations, CTLFLAG_RD, 1570832c9bbSMatthew Dillon &hammer_count_reservations, 0, ""); 158a99b9ea2SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_read, CTLFLAG_RD, 159a99b9ea2SMatthew Dillon &hammer_count_io_running_read, 0, ""); 160a99b9ea2SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_locked, CTLFLAG_RD, 161a99b9ea2SMatthew Dillon &hammer_count_io_locked, 0, ""); 162a99b9ea2SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_write, CTLFLAG_RD, 163a99b9ea2SMatthew Dillon &hammer_count_io_running_write, 0, ""); 164f03c9cf4SMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW, 165f03c9cf4SMatthew Dillon &hammer_zone_limit, 0, ""); 1667d683b0fSMatthew Dillon SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW, 1677d683b0fSMatthew Dillon &hammer_contention_count, 0, ""); 168cb51be26SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_zone, CTLFLAG_RW, 169cb51be26SMatthew Dillon &hammer_verify_zone, 0, ""); 170cb51be26SMatthew Dillon SYSCTL_INT(_vfs_hammer, OID_AUTO, write_mode, CTLFLAG_RW, 171cb51be26SMatthew Dillon &hammer_write_mode, 0, ""); 172b3deaf57SMatthew Dillon 173bcac4bbbSMatthew Dillon KTR_INFO_MASTER(hammer); 174bcac4bbbSMatthew Dillon 175427e5fc6SMatthew Dillon /* 176427e5fc6SMatthew Dillon * VFS ABI 177427e5fc6SMatthew Dillon */ 178427e5fc6SMatthew Dillon static void hammer_free_hmp(struct mount *mp); 179427e5fc6SMatthew Dillon 180427e5fc6SMatthew Dillon static int hammer_vfs_mount(struct mount *mp, char *path, caddr_t data, 181427e5fc6SMatthew Dillon struct ucred *cred); 182427e5fc6SMatthew Dillon static int hammer_vfs_unmount(struct mount *mp, int mntflags); 183427e5fc6SMatthew Dillon static int hammer_vfs_root(struct mount *mp, struct vnode **vpp); 184427e5fc6SMatthew Dillon static int hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, 185427e5fc6SMatthew Dillon struct ucred *cred); 1866f97fce3SMatthew Dillon static int hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, 1876f97fce3SMatthew Dillon struct ucred *cred); 188427e5fc6SMatthew Dillon static int hammer_vfs_sync(struct mount *mp, int waitfor); 189513ca7d7SMatthew Dillon static int hammer_vfs_vget(struct mount *mp, ino_t ino, 190513ca7d7SMatthew Dillon struct vnode **vpp); 191427e5fc6SMatthew Dillon static int hammer_vfs_init(struct vfsconf *conf); 192513ca7d7SMatthew Dillon static int hammer_vfs_fhtovp(struct mount *mp, struct fid *fhp, 193513ca7d7SMatthew Dillon struct vnode **vpp); 194513ca7d7SMatthew Dillon static int hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp); 195513ca7d7SMatthew Dillon static int hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 196513ca7d7SMatthew Dillon int *exflagsp, struct ucred **credanonp); 197513ca7d7SMatthew Dillon 198427e5fc6SMatthew Dillon 199427e5fc6SMatthew Dillon static struct vfsops hammer_vfsops = { 200427e5fc6SMatthew Dillon .vfs_mount = hammer_vfs_mount, 201427e5fc6SMatthew Dillon .vfs_unmount = hammer_vfs_unmount, 202427e5fc6SMatthew Dillon .vfs_root = hammer_vfs_root, 203427e5fc6SMatthew Dillon .vfs_statfs = hammer_vfs_statfs, 2046f97fce3SMatthew Dillon .vfs_statvfs = hammer_vfs_statvfs, 205427e5fc6SMatthew Dillon .vfs_sync = hammer_vfs_sync, 206427e5fc6SMatthew Dillon .vfs_vget = hammer_vfs_vget, 207513ca7d7SMatthew Dillon .vfs_init = hammer_vfs_init, 208513ca7d7SMatthew Dillon .vfs_vptofh = hammer_vfs_vptofh, 209513ca7d7SMatthew Dillon .vfs_fhtovp = hammer_vfs_fhtovp, 210513ca7d7SMatthew Dillon .vfs_checkexp = hammer_vfs_checkexp 211427e5fc6SMatthew Dillon }; 212427e5fc6SMatthew Dillon 213427e5fc6SMatthew Dillon MALLOC_DEFINE(M_HAMMER, "hammer-mount", "hammer mount"); 214427e5fc6SMatthew Dillon 215427e5fc6SMatthew Dillon VFS_SET(hammer_vfsops, hammer, 0); 216427e5fc6SMatthew Dillon MODULE_VERSION(hammer, 1); 217427e5fc6SMatthew Dillon 218427e5fc6SMatthew Dillon static int 219427e5fc6SMatthew Dillon hammer_vfs_init(struct vfsconf *conf) 220427e5fc6SMatthew Dillon { 2210832c9bbSMatthew Dillon if (hammer_limit_recs == 0) /* XXX TODO */ 222a99b9ea2SMatthew Dillon hammer_limit_recs = nbuf * 25; 2239f5097dcSMatthew Dillon if (hammer_limit_dirtybufs == 0) { 2249f5097dcSMatthew Dillon hammer_limit_dirtybufs = hidirtybuffers / 2; 2259f5097dcSMatthew Dillon if (hammer_limit_dirtybufs < 100) 2269f5097dcSMatthew Dillon hammer_limit_dirtybufs = 100; 2279f5097dcSMatthew Dillon } 228af209b0fSMatthew Dillon if (hammer_limit_iqueued == 0) 229af209b0fSMatthew Dillon hammer_limit_iqueued = desiredvnodes / 5; 230427e5fc6SMatthew Dillon return(0); 231427e5fc6SMatthew Dillon } 232427e5fc6SMatthew Dillon 233427e5fc6SMatthew Dillon static int 234427e5fc6SMatthew Dillon hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data, 235427e5fc6SMatthew Dillon struct ucred *cred) 236427e5fc6SMatthew Dillon { 237427e5fc6SMatthew Dillon struct hammer_mount_info info; 238a89aec1bSMatthew Dillon hammer_mount_t hmp; 239a89aec1bSMatthew Dillon hammer_volume_t rootvol; 24027ea2398SMatthew Dillon struct vnode *rootvp; 241427e5fc6SMatthew Dillon const char *upath; /* volume name in userspace */ 242427e5fc6SMatthew Dillon char *path; /* volume name in system space */ 243427e5fc6SMatthew Dillon int error; 244427e5fc6SMatthew Dillon int i; 245427e5fc6SMatthew Dillon 246427e5fc6SMatthew Dillon if ((error = copyin(data, &info, sizeof(info))) != 0) 247427e5fc6SMatthew Dillon return (error); 24851c35492SMatthew Dillon if ((mp->mnt_flag & MNT_UPDATE) == 0) { 249427e5fc6SMatthew Dillon if (info.nvolumes <= 0 || info.nvolumes >= 32768) 250427e5fc6SMatthew Dillon return (EINVAL); 25151c35492SMatthew Dillon } 2525de0c0e5SMatthew Dillon if ((info.hflags & HMNT_MASTERID) && 2535de0c0e5SMatthew Dillon (info.masterid < -1 || info.masterid >= HAMMER_MAX_MASTERS)) { 2545de0c0e5SMatthew Dillon return (EINVAL); 2555de0c0e5SMatthew Dillon } 256427e5fc6SMatthew Dillon 257427e5fc6SMatthew Dillon /* 258427e5fc6SMatthew Dillon * Interal mount data structure 259427e5fc6SMatthew Dillon */ 260195c19a1SMatthew Dillon if (mp->mnt_flag & MNT_UPDATE) { 261195c19a1SMatthew Dillon hmp = (void *)mp->mnt_data; 262195c19a1SMatthew Dillon KKASSERT(hmp != NULL); 263195c19a1SMatthew Dillon } else { 264427e5fc6SMatthew Dillon hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO); 265427e5fc6SMatthew Dillon mp->mnt_data = (qaddr_t)hmp; 266427e5fc6SMatthew Dillon hmp->mp = mp; 267195c19a1SMatthew Dillon hmp->namekey_iterator = mycpu->gd_time_seconds; 26846fe7ae1SMatthew Dillon /*TAILQ_INIT(&hmp->recycle_list);*/ 26947197d71SMatthew Dillon 270*dd94f1b1SMatthew Dillon hmp->root_btree_beg.localization = 0x00000000U; 27147197d71SMatthew Dillon hmp->root_btree_beg.obj_id = -0x8000000000000000LL; 27247197d71SMatthew Dillon hmp->root_btree_beg.key = -0x8000000000000000LL; 27347197d71SMatthew Dillon hmp->root_btree_beg.create_tid = 1; 27447197d71SMatthew Dillon hmp->root_btree_beg.delete_tid = 1; 27547197d71SMatthew Dillon hmp->root_btree_beg.rec_type = 0; 27647197d71SMatthew Dillon hmp->root_btree_beg.obj_type = 0; 27747197d71SMatthew Dillon 278*dd94f1b1SMatthew Dillon hmp->root_btree_end.localization = 0xFFFFFFFFU; 27947197d71SMatthew Dillon hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL; 28047197d71SMatthew Dillon hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL; 28147197d71SMatthew Dillon hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL; 28247197d71SMatthew Dillon hmp->root_btree_end.delete_tid = 0; /* special case */ 28347197d71SMatthew Dillon hmp->root_btree_end.rec_type = 0xFFFFU; 28447197d71SMatthew Dillon hmp->root_btree_end.obj_type = 0; 285f03c9cf4SMatthew Dillon 2869480ff55SMatthew Dillon hmp->sync_lock.refs = 1; 287c9b9e29dSMatthew Dillon hmp->free_lock.refs = 1; 288d99d6bf5SMatthew Dillon hmp->undo_lock.refs = 1; 289d99d6bf5SMatthew Dillon hmp->blkmap_lock.refs = 1; 2909480ff55SMatthew Dillon 291059819e3SMatthew Dillon TAILQ_INIT(&hmp->flush_list); 292cebe9493SMatthew Dillon TAILQ_INIT(&hmp->delay_list); 2930729c8c8SMatthew Dillon TAILQ_INIT(&hmp->objid_cache_list); 294e8599db1SMatthew Dillon TAILQ_INIT(&hmp->undo_lru_list); 2957bc5b8c2SMatthew Dillon TAILQ_INIT(&hmp->reclaim_list); 296195c19a1SMatthew Dillon } 29751c35492SMatthew Dillon hmp->hflags &= ~HMNT_USERFLAGS; 29851c35492SMatthew Dillon hmp->hflags |= info.hflags & HMNT_USERFLAGS; 2995de0c0e5SMatthew Dillon if (info.hflags & HMNT_MASTERID) 3005de0c0e5SMatthew Dillon hmp->masterid = -1; 3015de0c0e5SMatthew Dillon else 3025de0c0e5SMatthew Dillon hmp->masterid = info.masterid; 3037f7c1f84SMatthew Dillon if (info.asof) { 30451c35492SMatthew Dillon kprintf("ASOF\n"); 3057f7c1f84SMatthew Dillon mp->mnt_flag |= MNT_RDONLY; 3067f7c1f84SMatthew Dillon hmp->asof = info.asof; 3077f7c1f84SMatthew Dillon } else { 3087f7c1f84SMatthew Dillon hmp->asof = HAMMER_MAX_TID; 3097f7c1f84SMatthew Dillon } 310195c19a1SMatthew Dillon 311195c19a1SMatthew Dillon /* 31251c35492SMatthew Dillon * Re-open read-write if originally read-only, or vise-versa. 313195c19a1SMatthew Dillon */ 314195c19a1SMatthew Dillon if (mp->mnt_flag & MNT_UPDATE) { 31551c35492SMatthew Dillon error = 0; 31651c35492SMatthew Dillon if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) { 31751c35492SMatthew Dillon kprintf("HAMMER read-only -> read-write\n"); 318195c19a1SMatthew Dillon hmp->ronly = 0; 31951c35492SMatthew Dillon RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, 32051c35492SMatthew Dillon hammer_adjust_volume_mode, NULL); 32151c35492SMatthew Dillon rootvol = hammer_get_root_volume(hmp, &error); 32251c35492SMatthew Dillon if (rootvol) { 32351c35492SMatthew Dillon hammer_recover_flush_buffers(hmp, rootvol); 3249f5097dcSMatthew Dillon bcopy(rootvol->ondisk->vol0_blockmap, 3259f5097dcSMatthew Dillon hmp->blockmap, 3269f5097dcSMatthew Dillon sizeof(hmp->blockmap)); 32751c35492SMatthew Dillon hammer_rel_volume(rootvol, 0); 328195c19a1SMatthew Dillon } 32951c35492SMatthew Dillon RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, 33051c35492SMatthew Dillon hammer_reload_inode, NULL); 33151c35492SMatthew Dillon /* kernel clears MNT_RDONLY */ 33251c35492SMatthew Dillon } else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { 33351c35492SMatthew Dillon kprintf("HAMMER read-write -> read-only\n"); 33451c35492SMatthew Dillon hmp->ronly = 1; /* messy */ 33551c35492SMatthew Dillon RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, 33651c35492SMatthew Dillon hammer_reload_inode, NULL); 33751c35492SMatthew Dillon hmp->ronly = 0; 33851c35492SMatthew Dillon hammer_flusher_sync(hmp); 33951c35492SMatthew Dillon hammer_flusher_sync(hmp); 34051c35492SMatthew Dillon hammer_flusher_sync(hmp); 34151c35492SMatthew Dillon hmp->ronly = 1; 34251c35492SMatthew Dillon RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, 34351c35492SMatthew Dillon hammer_adjust_volume_mode, NULL); 34451c35492SMatthew Dillon } 34551c35492SMatthew Dillon return(error); 346195c19a1SMatthew Dillon } 347195c19a1SMatthew Dillon 348427e5fc6SMatthew Dillon RB_INIT(&hmp->rb_vols_root); 349427e5fc6SMatthew Dillon RB_INIT(&hmp->rb_inos_root); 35040043e7fSMatthew Dillon RB_INIT(&hmp->rb_nods_root); 351e8599db1SMatthew Dillon RB_INIT(&hmp->rb_undo_root); 3520832c9bbSMatthew Dillon RB_INIT(&hmp->rb_resv_root); 3530832c9bbSMatthew Dillon RB_INIT(&hmp->rb_bufs_root); 3540832c9bbSMatthew Dillon 355195c19a1SMatthew Dillon hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); 356427e5fc6SMatthew Dillon 35710a5d1baSMatthew Dillon TAILQ_INIT(&hmp->volu_list); 35810a5d1baSMatthew Dillon TAILQ_INIT(&hmp->undo_list); 35910a5d1baSMatthew Dillon TAILQ_INIT(&hmp->data_list); 36010a5d1baSMatthew Dillon TAILQ_INIT(&hmp->meta_list); 36110a5d1baSMatthew Dillon TAILQ_INIT(&hmp->lose_list); 36210a5d1baSMatthew Dillon 363427e5fc6SMatthew Dillon /* 364427e5fc6SMatthew Dillon * Load volumes 365427e5fc6SMatthew Dillon */ 366427e5fc6SMatthew Dillon path = objcache_get(namei_oc, M_WAITOK); 367d26d0ae9SMatthew Dillon hmp->nvolumes = info.nvolumes; 368427e5fc6SMatthew Dillon for (i = 0; i < info.nvolumes; ++i) { 369427e5fc6SMatthew Dillon error = copyin(&info.volumes[i], &upath, sizeof(char *)); 370427e5fc6SMatthew Dillon if (error == 0) 371427e5fc6SMatthew Dillon error = copyinstr(upath, path, MAXPATHLEN, NULL); 372427e5fc6SMatthew Dillon if (error == 0) 3738cd0a023SMatthew Dillon error = hammer_install_volume(hmp, path); 374427e5fc6SMatthew Dillon if (error) 375427e5fc6SMatthew Dillon break; 376427e5fc6SMatthew Dillon } 377427e5fc6SMatthew Dillon objcache_put(namei_oc, path); 378427e5fc6SMatthew Dillon 379427e5fc6SMatthew Dillon /* 380427e5fc6SMatthew Dillon * Make sure we found a root volume 381427e5fc6SMatthew Dillon */ 382427e5fc6SMatthew Dillon if (error == 0 && hmp->rootvol == NULL) { 383427e5fc6SMatthew Dillon kprintf("hammer_mount: No root volume found!\n"); 384427e5fc6SMatthew Dillon error = EINVAL; 385427e5fc6SMatthew Dillon } 386427e5fc6SMatthew Dillon if (error) { 387427e5fc6SMatthew Dillon hammer_free_hmp(mp); 388427e5fc6SMatthew Dillon return (error); 389427e5fc6SMatthew Dillon } 390427e5fc6SMatthew Dillon 391427e5fc6SMatthew Dillon /* 39227ea2398SMatthew Dillon * No errors, setup enough of the mount point so we can lookup the 39327ea2398SMatthew Dillon * root vnode. 394427e5fc6SMatthew Dillon */ 395427e5fc6SMatthew Dillon mp->mnt_iosize_max = MAXPHYS; 396427e5fc6SMatthew Dillon mp->mnt_kern_flag |= MNTK_FSMID; 397c0ade690SMatthew Dillon 398c0ade690SMatthew Dillon /* 399c0ade690SMatthew Dillon * note: f_iosize is used by vnode_pager_haspage() when constructing 400c0ade690SMatthew Dillon * its VOP_BMAP call. 401c0ade690SMatthew Dillon */ 402c0ade690SMatthew Dillon mp->mnt_stat.f_iosize = HAMMER_BUFSIZE; 403fbc6e32aSMatthew Dillon mp->mnt_stat.f_bsize = HAMMER_BUFSIZE; 4046f97fce3SMatthew Dillon 4056f97fce3SMatthew Dillon mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE; 4066f97fce3SMatthew Dillon mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE; 4076f97fce3SMatthew Dillon 408427e5fc6SMatthew Dillon mp->mnt_maxsymlinklen = 255; 409427e5fc6SMatthew Dillon mp->mnt_flag |= MNT_LOCAL; 410427e5fc6SMatthew Dillon 411427e5fc6SMatthew Dillon vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops); 4127a04d74fSMatthew Dillon vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops); 4137a04d74fSMatthew Dillon vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops); 41427ea2398SMatthew Dillon 41527ea2398SMatthew Dillon /* 416a89aec1bSMatthew Dillon * The root volume's ondisk pointer is only valid if we hold a 417a89aec1bSMatthew Dillon * reference to it. 418a89aec1bSMatthew Dillon */ 419a89aec1bSMatthew Dillon rootvol = hammer_get_root_volume(hmp, &error); 420a89aec1bSMatthew Dillon if (error) 421f90dde4cSMatthew Dillon goto failed; 422f90dde4cSMatthew Dillon 423f90dde4cSMatthew Dillon /* 4249f5097dcSMatthew Dillon * Perform any necessary UNDO operations. The recovery code does 4250729c8c8SMatthew Dillon * call hammer_undo_lookup() so we have to pre-cache the blockmap, 4260729c8c8SMatthew Dillon * and then re-copy it again after recovery is complete. 427c9b9e29dSMatthew Dillon * 42851c35492SMatthew Dillon * If this is a read-only mount the UNDO information is retained 42951c35492SMatthew Dillon * in memory in the form of dirty buffer cache buffers, and not 43051c35492SMatthew Dillon * written back to the media. 431f90dde4cSMatthew Dillon */ 4320729c8c8SMatthew Dillon bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, 4330729c8c8SMatthew Dillon sizeof(hmp->blockmap)); 434c9b9e29dSMatthew Dillon 435f90dde4cSMatthew Dillon error = hammer_recover(hmp, rootvol); 436f90dde4cSMatthew Dillon if (error) { 437f90dde4cSMatthew Dillon kprintf("Failed to recover HAMMER filesystem on mount\n"); 438a89aec1bSMatthew Dillon goto done; 439f90dde4cSMatthew Dillon } 440f90dde4cSMatthew Dillon 441f90dde4cSMatthew Dillon /* 442f90dde4cSMatthew Dillon * Finish setup now that we have a good root volume 443f90dde4cSMatthew Dillon */ 444a89aec1bSMatthew Dillon ksnprintf(mp->mnt_stat.f_mntfromname, 445a89aec1bSMatthew Dillon sizeof(mp->mnt_stat.f_mntfromname), "%s", 446a89aec1bSMatthew Dillon rootvol->ondisk->vol_name); 447513ca7d7SMatthew Dillon mp->mnt_stat.f_fsid.val[0] = 448513ca7d7SMatthew Dillon crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8); 449513ca7d7SMatthew Dillon mp->mnt_stat.f_fsid.val[1] = 450513ca7d7SMatthew Dillon crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8); 451b84de5afSMatthew Dillon 4526f97fce3SMatthew Dillon mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid; 4536f97fce3SMatthew Dillon mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid, 4546f97fce3SMatthew Dillon sizeof(mp->mnt_vstat.f_fsid_uuid)); 4556f97fce3SMatthew Dillon 4560729c8c8SMatthew Dillon /* 4570729c8c8SMatthew Dillon * Certain often-modified fields in the root volume are cached in 4580729c8c8SMatthew Dillon * the hammer_mount structure so we do not have to generate lots 4590729c8c8SMatthew Dillon * of little UNDO structures for them. 460c9b9e29dSMatthew Dillon * 4619f5097dcSMatthew Dillon * Recopy after recovery. This also has the side effect of 4629f5097dcSMatthew Dillon * setting our cached undo FIFO's first_offset, which serves to 4639f5097dcSMatthew Dillon * placemark the FIFO start for the NEXT flush cycle while the 4649f5097dcSMatthew Dillon * on-disk first_offset represents the LAST flush cycle. 4650729c8c8SMatthew Dillon */ 466b84de5afSMatthew Dillon hmp->next_tid = rootvol->ondisk->vol0_next_tid; 4670729c8c8SMatthew Dillon bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, 4680729c8c8SMatthew Dillon sizeof(hmp->blockmap)); 469e63644f0SMatthew Dillon hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks; 4700729c8c8SMatthew Dillon 471059819e3SMatthew Dillon hammer_flusher_create(hmp); 472059819e3SMatthew Dillon 473a89aec1bSMatthew Dillon /* 47427ea2398SMatthew Dillon * Locate the root directory using the root cluster's B-Tree as a 47527ea2398SMatthew Dillon * starting point. The root directory uses an obj_id of 1. 47627ea2398SMatthew Dillon * 47727ea2398SMatthew Dillon * FUTURE: Leave the root directory cached referenced but unlocked 47827ea2398SMatthew Dillon * in hmp->rootvp (need to flush it on unmount). 47927ea2398SMatthew Dillon */ 48027ea2398SMatthew Dillon error = hammer_vfs_vget(mp, 1, &rootvp); 481a89aec1bSMatthew Dillon if (error) 482a89aec1bSMatthew Dillon goto done; 48327ea2398SMatthew Dillon vput(rootvp); 48427ea2398SMatthew Dillon /*vn_unlock(hmp->rootvp);*/ 48527ea2398SMatthew Dillon 486a89aec1bSMatthew Dillon done: 487f90dde4cSMatthew Dillon hammer_rel_volume(rootvol, 0); 488f90dde4cSMatthew Dillon failed: 48927ea2398SMatthew Dillon /* 49027ea2398SMatthew Dillon * Cleanup and return. 49127ea2398SMatthew Dillon */ 49227ea2398SMatthew Dillon if (error) 49327ea2398SMatthew Dillon hammer_free_hmp(mp); 494427e5fc6SMatthew Dillon return (error); 495427e5fc6SMatthew Dillon } 496427e5fc6SMatthew Dillon 497427e5fc6SMatthew Dillon static int 498427e5fc6SMatthew Dillon hammer_vfs_unmount(struct mount *mp, int mntflags) 499427e5fc6SMatthew Dillon { 500427e5fc6SMatthew Dillon #if 0 501427e5fc6SMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 502427e5fc6SMatthew Dillon #endif 503427e5fc6SMatthew Dillon int flags; 50466325755SMatthew Dillon int error; 50527ea2398SMatthew Dillon 50627ea2398SMatthew Dillon /* 507427e5fc6SMatthew Dillon * Clean out the vnodes 508427e5fc6SMatthew Dillon */ 50966325755SMatthew Dillon flags = 0; 51066325755SMatthew Dillon if (mntflags & MNT_FORCE) 51166325755SMatthew Dillon flags |= FORCECLOSE; 51266325755SMatthew Dillon if ((error = vflush(mp, 0, flags)) != 0) 51366325755SMatthew Dillon return (error); 514427e5fc6SMatthew Dillon 515427e5fc6SMatthew Dillon /* 516427e5fc6SMatthew Dillon * Clean up the internal mount structure and related entities. This 517427e5fc6SMatthew Dillon * may issue I/O. 518427e5fc6SMatthew Dillon */ 519427e5fc6SMatthew Dillon hammer_free_hmp(mp); 520427e5fc6SMatthew Dillon return(0); 521427e5fc6SMatthew Dillon } 522427e5fc6SMatthew Dillon 523427e5fc6SMatthew Dillon /* 524427e5fc6SMatthew Dillon * Clean up the internal mount structure and disassociate it from the mount. 525427e5fc6SMatthew Dillon * This may issue I/O. 526427e5fc6SMatthew Dillon */ 527427e5fc6SMatthew Dillon static void 528427e5fc6SMatthew Dillon hammer_free_hmp(struct mount *mp) 529427e5fc6SMatthew Dillon { 530427e5fc6SMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 531427e5fc6SMatthew Dillon 53227ea2398SMatthew Dillon #if 0 533427e5fc6SMatthew Dillon /* 534427e5fc6SMatthew Dillon * Clean up the root vnode 535427e5fc6SMatthew Dillon */ 536427e5fc6SMatthew Dillon if (hmp->rootvp) { 537427e5fc6SMatthew Dillon vrele(hmp->rootvp); 538427e5fc6SMatthew Dillon hmp->rootvp = NULL; 539427e5fc6SMatthew Dillon } 54027ea2398SMatthew Dillon #endif 541059819e3SMatthew Dillon hammer_flusher_sync(hmp); 542b84de5afSMatthew Dillon hammer_flusher_sync(hmp); 543059819e3SMatthew Dillon hammer_flusher_destroy(hmp); 544427e5fc6SMatthew Dillon 545b84de5afSMatthew Dillon KKASSERT(RB_EMPTY(&hmp->rb_inos_root)); 546b84de5afSMatthew Dillon 547b84de5afSMatthew Dillon #if 0 548427e5fc6SMatthew Dillon /* 549427e5fc6SMatthew Dillon * Unload & flush inodes 550b84de5afSMatthew Dillon * 551b84de5afSMatthew Dillon * XXX illegal to call this from here, it can only be done from 552b84de5afSMatthew Dillon * the flusher. 553427e5fc6SMatthew Dillon */ 554427e5fc6SMatthew Dillon RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, 555d113fda1SMatthew Dillon hammer_unload_inode, (void *)MNT_WAIT); 556427e5fc6SMatthew Dillon 557427e5fc6SMatthew Dillon /* 558427e5fc6SMatthew Dillon * Unload & flush volumes 559427e5fc6SMatthew Dillon */ 560b84de5afSMatthew Dillon #endif 561b84de5afSMatthew Dillon /* 5620832c9bbSMatthew Dillon * Unload buffers and then volumes 563b84de5afSMatthew Dillon */ 5640832c9bbSMatthew Dillon RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL, 5650832c9bbSMatthew Dillon hammer_unload_buffer, NULL); 566427e5fc6SMatthew Dillon RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, 567427e5fc6SMatthew Dillon hammer_unload_volume, NULL); 568427e5fc6SMatthew Dillon 569427e5fc6SMatthew Dillon mp->mnt_data = NULL; 57066325755SMatthew Dillon mp->mnt_flag &= ~MNT_LOCAL; 571427e5fc6SMatthew Dillon hmp->mp = NULL; 5720729c8c8SMatthew Dillon hammer_destroy_objid_cache(hmp); 573427e5fc6SMatthew Dillon kfree(hmp, M_HAMMER); 574427e5fc6SMatthew Dillon } 575427e5fc6SMatthew Dillon 576427e5fc6SMatthew Dillon /* 577513ca7d7SMatthew Dillon * Obtain a vnode for the specified inode number. An exclusively locked 578513ca7d7SMatthew Dillon * vnode is returned. 579513ca7d7SMatthew Dillon */ 580513ca7d7SMatthew Dillon int 581513ca7d7SMatthew Dillon hammer_vfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp) 582513ca7d7SMatthew Dillon { 58336f82b23SMatthew Dillon struct hammer_transaction trans; 584513ca7d7SMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 585513ca7d7SMatthew Dillon struct hammer_inode *ip; 586513ca7d7SMatthew Dillon int error; 587513ca7d7SMatthew Dillon 58836f82b23SMatthew Dillon hammer_simple_transaction(&trans, hmp); 58936f82b23SMatthew Dillon 590513ca7d7SMatthew Dillon /* 59136f82b23SMatthew Dillon * Lookup the requested HAMMER inode. The structure must be 59236f82b23SMatthew Dillon * left unlocked while we manipulate the related vnode to avoid 59336f82b23SMatthew Dillon * a deadlock. 594513ca7d7SMatthew Dillon */ 595ddfdf542SMatthew Dillon ip = hammer_get_inode(&trans, NULL, ino, 596ddfdf542SMatthew Dillon hmp->asof, HAMMER_DEF_LOCALIZATION, 597ddfdf542SMatthew Dillon 0, &error); 598513ca7d7SMatthew Dillon if (ip == NULL) { 599513ca7d7SMatthew Dillon *vpp = NULL; 600513ca7d7SMatthew Dillon return(error); 601513ca7d7SMatthew Dillon } 602e8599db1SMatthew Dillon error = hammer_get_vnode(ip, vpp); 603513ca7d7SMatthew Dillon hammer_rel_inode(ip, 0); 604b84de5afSMatthew Dillon hammer_done_transaction(&trans); 605513ca7d7SMatthew Dillon return (error); 606513ca7d7SMatthew Dillon } 607513ca7d7SMatthew Dillon 608513ca7d7SMatthew Dillon /* 609427e5fc6SMatthew Dillon * Return the root vnode for the filesystem. 610427e5fc6SMatthew Dillon * 611427e5fc6SMatthew Dillon * HAMMER stores the root vnode in the hammer_mount structure so 612427e5fc6SMatthew Dillon * getting it is easy. 613427e5fc6SMatthew Dillon */ 614427e5fc6SMatthew Dillon static int 615427e5fc6SMatthew Dillon hammer_vfs_root(struct mount *mp, struct vnode **vpp) 616427e5fc6SMatthew Dillon { 61747197d71SMatthew Dillon #if 0 618427e5fc6SMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 61947197d71SMatthew Dillon #endif 62027ea2398SMatthew Dillon int error; 621427e5fc6SMatthew Dillon 62227ea2398SMatthew Dillon error = hammer_vfs_vget(mp, 1, vpp); 62327ea2398SMatthew Dillon return (error); 624427e5fc6SMatthew Dillon } 625427e5fc6SMatthew Dillon 626427e5fc6SMatthew Dillon static int 627427e5fc6SMatthew Dillon hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) 628427e5fc6SMatthew Dillon { 629fbc6e32aSMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 630fbc6e32aSMatthew Dillon hammer_volume_t volume; 631fbc6e32aSMatthew Dillon hammer_volume_ondisk_t ondisk; 632fbc6e32aSMatthew Dillon int error; 63347197d71SMatthew Dillon int64_t bfree; 634fbc6e32aSMatthew Dillon 635fbc6e32aSMatthew Dillon volume = hammer_get_root_volume(hmp, &error); 636fbc6e32aSMatthew Dillon if (error) 637fbc6e32aSMatthew Dillon return(error); 638fbc6e32aSMatthew Dillon ondisk = volume->ondisk; 639fbc6e32aSMatthew Dillon 64047197d71SMatthew Dillon /* 64147197d71SMatthew Dillon * Basic stats 64247197d71SMatthew Dillon */ 643fbc6e32aSMatthew Dillon mp->mnt_stat.f_files = ondisk->vol0_stat_inodes; 644c3be93f2SMatthew Dillon bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE; 64540043e7fSMatthew Dillon hammer_rel_volume(volume, 0); 64647197d71SMatthew Dillon 64747197d71SMatthew Dillon mp->mnt_stat.f_bfree = bfree / HAMMER_BUFSIZE; 64847197d71SMatthew Dillon mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree; 649fbc6e32aSMatthew Dillon if (mp->mnt_stat.f_files < 0) 650fbc6e32aSMatthew Dillon mp->mnt_stat.f_files = 0; 651fbc6e32aSMatthew Dillon 65227ea2398SMatthew Dillon *sbp = mp->mnt_stat; 65327ea2398SMatthew Dillon return(0); 654427e5fc6SMatthew Dillon } 655427e5fc6SMatthew Dillon 6566f97fce3SMatthew Dillon static int 6576f97fce3SMatthew Dillon hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred) 6586f97fce3SMatthew Dillon { 6596f97fce3SMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 6606f97fce3SMatthew Dillon hammer_volume_t volume; 6616f97fce3SMatthew Dillon hammer_volume_ondisk_t ondisk; 6626f97fce3SMatthew Dillon int error; 6636f97fce3SMatthew Dillon int64_t bfree; 6646f97fce3SMatthew Dillon 6656f97fce3SMatthew Dillon volume = hammer_get_root_volume(hmp, &error); 6666f97fce3SMatthew Dillon if (error) 6676f97fce3SMatthew Dillon return(error); 6686f97fce3SMatthew Dillon ondisk = volume->ondisk; 6696f97fce3SMatthew Dillon 6706f97fce3SMatthew Dillon /* 6716f97fce3SMatthew Dillon * Basic stats 6726f97fce3SMatthew Dillon */ 6736f97fce3SMatthew Dillon mp->mnt_vstat.f_files = ondisk->vol0_stat_inodes; 6746f97fce3SMatthew Dillon bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE; 6756f97fce3SMatthew Dillon hammer_rel_volume(volume, 0); 6766f97fce3SMatthew Dillon 6776f97fce3SMatthew Dillon mp->mnt_vstat.f_bfree = bfree / HAMMER_BUFSIZE; 6786f97fce3SMatthew Dillon mp->mnt_vstat.f_bavail = mp->mnt_stat.f_bfree; 6796f97fce3SMatthew Dillon if (mp->mnt_vstat.f_files < 0) 6806f97fce3SMatthew Dillon mp->mnt_vstat.f_files = 0; 6816f97fce3SMatthew Dillon *sbp = mp->mnt_vstat; 6826f97fce3SMatthew Dillon return(0); 6836f97fce3SMatthew Dillon } 6846f97fce3SMatthew Dillon 6850729c8c8SMatthew Dillon /* 6860729c8c8SMatthew Dillon * Sync the filesystem. Currently we have to run it twice, the second 6870729c8c8SMatthew Dillon * one will advance the undo start index to the end index, so if a crash 6880729c8c8SMatthew Dillon * occurs no undos will be run on mount. 68977062c8aSMatthew Dillon * 69077062c8aSMatthew Dillon * We do not sync the filesystem if we are called from a panic. If we did 69177062c8aSMatthew Dillon * we might end up blowing up a sync that was already in progress. 6920729c8c8SMatthew Dillon */ 693427e5fc6SMatthew Dillon static int 694427e5fc6SMatthew Dillon hammer_vfs_sync(struct mount *mp, int waitfor) 695427e5fc6SMatthew Dillon { 696fbc6e32aSMatthew Dillon struct hammer_mount *hmp = (void *)mp->mnt_data; 6970729c8c8SMatthew Dillon int error; 6980729c8c8SMatthew Dillon 69977062c8aSMatthew Dillon if (panicstr == NULL) { 7000729c8c8SMatthew Dillon error = hammer_sync_hmp(hmp, waitfor); 7010729c8c8SMatthew Dillon if (error == 0) 7020729c8c8SMatthew Dillon error = hammer_sync_hmp(hmp, waitfor); 70377062c8aSMatthew Dillon } else { 70477062c8aSMatthew Dillon error = EIO; 70577062c8aSMatthew Dillon } 7060729c8c8SMatthew Dillon return (error); 707427e5fc6SMatthew Dillon } 708427e5fc6SMatthew Dillon 709513ca7d7SMatthew Dillon /* 710513ca7d7SMatthew Dillon * Convert a vnode to a file handle. 711513ca7d7SMatthew Dillon */ 712513ca7d7SMatthew Dillon static int 713513ca7d7SMatthew Dillon hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp) 714513ca7d7SMatthew Dillon { 715513ca7d7SMatthew Dillon hammer_inode_t ip; 716513ca7d7SMatthew Dillon 717513ca7d7SMatthew Dillon KKASSERT(MAXFIDSZ >= 16); 718513ca7d7SMatthew Dillon ip = VTOI(vp); 719513ca7d7SMatthew Dillon fhp->fid_len = offsetof(struct fid, fid_data[16]); 720513ca7d7SMatthew Dillon fhp->fid_reserved = 0; 721513ca7d7SMatthew Dillon bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id)); 722513ca7d7SMatthew Dillon bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof)); 723513ca7d7SMatthew Dillon return(0); 724513ca7d7SMatthew Dillon } 725513ca7d7SMatthew Dillon 726513ca7d7SMatthew Dillon 727513ca7d7SMatthew Dillon /* 728513ca7d7SMatthew Dillon * Convert a file handle back to a vnode. 729513ca7d7SMatthew Dillon */ 730513ca7d7SMatthew Dillon static int 731513ca7d7SMatthew Dillon hammer_vfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp) 732513ca7d7SMatthew Dillon { 73336f82b23SMatthew Dillon struct hammer_transaction trans; 734513ca7d7SMatthew Dillon struct hammer_inode *ip; 735513ca7d7SMatthew Dillon struct hammer_inode_info info; 736513ca7d7SMatthew Dillon int error; 737513ca7d7SMatthew Dillon 738513ca7d7SMatthew Dillon bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id)); 739513ca7d7SMatthew Dillon bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof)); 740513ca7d7SMatthew Dillon 74136f82b23SMatthew Dillon hammer_simple_transaction(&trans, (void *)mp->mnt_data); 74236f82b23SMatthew Dillon 743513ca7d7SMatthew Dillon /* 744513ca7d7SMatthew Dillon * Get/allocate the hammer_inode structure. The structure must be 745513ca7d7SMatthew Dillon * unlocked while we manipulate the related vnode to avoid a 746513ca7d7SMatthew Dillon * deadlock. 747513ca7d7SMatthew Dillon */ 748ddfdf542SMatthew Dillon ip = hammer_get_inode(&trans, NULL, info.obj_id, 749ddfdf542SMatthew Dillon info.obj_asof, HAMMER_DEF_LOCALIZATION, 75036f82b23SMatthew Dillon 0, &error); 751513ca7d7SMatthew Dillon if (ip == NULL) { 752513ca7d7SMatthew Dillon *vpp = NULL; 753513ca7d7SMatthew Dillon return(error); 754513ca7d7SMatthew Dillon } 755e8599db1SMatthew Dillon error = hammer_get_vnode(ip, vpp); 756513ca7d7SMatthew Dillon hammer_rel_inode(ip, 0); 757b84de5afSMatthew Dillon hammer_done_transaction(&trans); 758513ca7d7SMatthew Dillon return (error); 759513ca7d7SMatthew Dillon } 760513ca7d7SMatthew Dillon 761513ca7d7SMatthew Dillon static int 762513ca7d7SMatthew Dillon hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 763513ca7d7SMatthew Dillon int *exflagsp, struct ucred **credanonp) 764513ca7d7SMatthew Dillon { 765513ca7d7SMatthew Dillon hammer_mount_t hmp = (void *)mp->mnt_data; 766513ca7d7SMatthew Dillon struct netcred *np; 767513ca7d7SMatthew Dillon int error; 768513ca7d7SMatthew Dillon 769513ca7d7SMatthew Dillon np = vfs_export_lookup(mp, &hmp->export, nam); 770513ca7d7SMatthew Dillon if (np) { 771513ca7d7SMatthew Dillon *exflagsp = np->netc_exflags; 772513ca7d7SMatthew Dillon *credanonp = &np->netc_anon; 773513ca7d7SMatthew Dillon error = 0; 774513ca7d7SMatthew Dillon } else { 775513ca7d7SMatthew Dillon error = EACCES; 776513ca7d7SMatthew Dillon } 777513ca7d7SMatthew Dillon return (error); 778513ca7d7SMatthew Dillon 779513ca7d7SMatthew Dillon } 780513ca7d7SMatthew Dillon 781513ca7d7SMatthew Dillon int 782513ca7d7SMatthew Dillon hammer_vfs_export(struct mount *mp, int op, const struct export_args *export) 783513ca7d7SMatthew Dillon { 784513ca7d7SMatthew Dillon hammer_mount_t hmp = (void *)mp->mnt_data; 785513ca7d7SMatthew Dillon int error; 786513ca7d7SMatthew Dillon 787513ca7d7SMatthew Dillon switch(op) { 788513ca7d7SMatthew Dillon case MOUNTCTL_SET_EXPORT: 789513ca7d7SMatthew Dillon error = vfs_export(mp, &hmp->export, export); 790513ca7d7SMatthew Dillon break; 791513ca7d7SMatthew Dillon default: 792513ca7d7SMatthew Dillon error = EOPNOTSUPP; 793513ca7d7SMatthew Dillon break; 794513ca7d7SMatthew Dillon } 795513ca7d7SMatthew Dillon return(error); 796513ca7d7SMatthew Dillon } 797513ca7d7SMatthew Dillon 798