1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.74 2008/11/13 02:18:43 dillon Exp $ 35 */ 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/vnode.h> 41 #include <sys/mount.h> 42 #include <sys/malloc.h> 43 #include <sys/nlookup.h> 44 #include <sys/fcntl.h> 45 #include <sys/sysctl.h> 46 #include <sys/buf.h> 47 #include <sys/buf2.h> 48 #include "hammer.h" 49 50 int hammer_supported_version = HAMMER_VOL_VERSION_DEFAULT; 51 int hammer_debug_io; 52 int hammer_debug_general; 53 int hammer_debug_debug = 1; /* medium-error panics */ 54 int hammer_debug_inode; 55 int hammer_debug_locks; 56 int hammer_debug_btree; 57 int hammer_debug_tid; 58 int hammer_debug_recover; /* -1 will disable, +1 will force */ 59 int hammer_debug_recover_faults; 60 int hammer_error_panic; /* panic on error levels */ 61 int hammer_cluster_enable = 1; /* enable read clustering by default */ 62 int hammer_count_fsyncs; 63 int hammer_count_inodes; 64 int hammer_count_iqueued; 65 int hammer_count_reclaiming; 66 int hammer_count_records; 67 int hammer_count_record_datas; 68 int hammer_count_volumes; 69 int hammer_count_buffers; 70 int hammer_count_nodes; 71 int64_t hammer_count_extra_space_used; 72 int64_t hammer_stats_btree_lookups; 73 int64_t hammer_stats_btree_searches; 74 int64_t hammer_stats_btree_inserts; 75 int64_t hammer_stats_btree_deletes; 76 int64_t hammer_stats_btree_elements; 77 int64_t hammer_stats_btree_splits; 78 int64_t hammer_stats_btree_iterations; 79 int64_t hammer_stats_btree_root_iterations; 80 int64_t hammer_stats_record_iterations; 81 82 int64_t hammer_stats_file_read; 83 int64_t hammer_stats_file_write; 84 int64_t hammer_stats_file_iopsr; 85 int64_t hammer_stats_file_iopsw; 86 int64_t hammer_stats_disk_read; 87 int64_t hammer_stats_disk_write; 88 int64_t hammer_stats_inode_flushes; 89 int64_t hammer_stats_commits; 90 int64_t hammer_stats_undo; 91 92 int hammer_count_dirtybufspace; /* global */ 93 int hammer_count_refedbufs; /* global */ 94 int hammer_count_reservations; 95 int hammer_count_io_running_read; 96 int hammer_count_io_running_write; 97 int hammer_count_io_locked; 98 int hammer_limit_dirtybufspace; /* per-mount */ 99 int hammer_limit_recs; /* as a whole XXX */ 100 int hammer_limit_inode_recs = 1024; /* per inode */ 101 int hammer_autoflush = 2000; /* auto flush */ 102 int hammer_bio_count; 103 int hammer_verify_zone; 104 int hammer_verify_data = 1; 105 int hammer_write_mode; 106 int hammer_yield_check = 16; 107 int hammer_fsync_mode; 108 int64_t hammer_contention_count; 109 int64_t hammer_zone_limit; 110 111 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem"); 112 SYSCTL_INT(_vfs_hammer, OID_AUTO, supported_version, CTLFLAG_RD, 113 &hammer_supported_version, 0, ""); 114 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW, 115 &hammer_debug_general, 0, ""); 116 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_io, CTLFLAG_RW, 117 &hammer_debug_io, 0, ""); 118 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_debug, CTLFLAG_RW, 119 &hammer_debug_debug, 0, ""); 120 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW, 121 &hammer_debug_inode, 0, ""); 122 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW, 123 &hammer_debug_locks, 0, ""); 124 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW, 125 &hammer_debug_btree, 0, ""); 126 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW, 127 &hammer_debug_tid, 0, ""); 128 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW, 129 &hammer_debug_recover, 0, ""); 130 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW, 131 &hammer_debug_recover_faults, 0, ""); 132 SYSCTL_INT(_vfs_hammer, OID_AUTO, error_panic, CTLFLAG_RW, 133 &hammer_error_panic, 0, ""); 134 SYSCTL_INT(_vfs_hammer, OID_AUTO, cluster_enable, CTLFLAG_RW, 135 &hammer_cluster_enable, 0, ""); 136 137 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_dirtybufspace, CTLFLAG_RW, 138 &hammer_limit_dirtybufspace, 0, ""); 139 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_recs, CTLFLAG_RW, 140 &hammer_limit_recs, 0, ""); 141 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_inode_recs, CTLFLAG_RW, 142 &hammer_limit_inode_recs, 0, ""); 143 144 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_fsyncs, CTLFLAG_RD, 145 &hammer_count_fsyncs, 0, ""); 146 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD, 147 &hammer_count_inodes, 0, ""); 148 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_iqueued, CTLFLAG_RD, 149 &hammer_count_iqueued, 0, ""); 150 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reclaiming, CTLFLAG_RD, 151 &hammer_count_reclaiming, 0, ""); 152 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD, 153 &hammer_count_records, 0, ""); 154 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD, 155 &hammer_count_record_datas, 0, ""); 156 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD, 157 &hammer_count_volumes, 0, ""); 158 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD, 159 &hammer_count_buffers, 0, ""); 160 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD, 161 &hammer_count_nodes, 0, ""); 162 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, count_extra_space_used, CTLFLAG_RD, 163 &hammer_count_extra_space_used, 0, ""); 164 165 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_searches, CTLFLAG_RD, 166 &hammer_stats_btree_searches, 0, ""); 167 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_lookups, CTLFLAG_RD, 168 &hammer_stats_btree_lookups, 0, ""); 169 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_inserts, CTLFLAG_RD, 170 &hammer_stats_btree_inserts, 0, ""); 171 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_deletes, CTLFLAG_RD, 172 &hammer_stats_btree_deletes, 0, ""); 173 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_elements, CTLFLAG_RD, 174 &hammer_stats_btree_elements, 0, ""); 175 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_splits, CTLFLAG_RD, 176 &hammer_stats_btree_splits, 0, ""); 177 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_iterations, CTLFLAG_RD, 178 &hammer_stats_btree_iterations, 0, ""); 179 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_root_iterations, CTLFLAG_RD, 180 &hammer_stats_btree_root_iterations, 0, ""); 181 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_record_iterations, CTLFLAG_RD, 182 &hammer_stats_record_iterations, 0, ""); 183 184 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_read, CTLFLAG_RD, 185 &hammer_stats_file_read, 0, ""); 186 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_write, CTLFLAG_RD, 187 &hammer_stats_file_write, 0, ""); 188 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsr, CTLFLAG_RD, 189 &hammer_stats_file_iopsr, 0, ""); 190 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsw, CTLFLAG_RD, 191 &hammer_stats_file_iopsw, 0, ""); 192 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_read, CTLFLAG_RD, 193 &hammer_stats_disk_read, 0, ""); 194 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_write, CTLFLAG_RD, 195 &hammer_stats_disk_write, 0, ""); 196 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_inode_flushes, CTLFLAG_RD, 197 &hammer_stats_inode_flushes, 0, ""); 198 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_commits, CTLFLAG_RD, 199 &hammer_stats_commits, 0, ""); 200 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_undo, CTLFLAG_RD, 201 &hammer_stats_undo, 0, ""); 202 203 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_dirtybufspace, CTLFLAG_RD, 204 &hammer_count_dirtybufspace, 0, ""); 205 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_refedbufs, CTLFLAG_RD, 206 &hammer_count_refedbufs, 0, ""); 207 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reservations, CTLFLAG_RD, 208 &hammer_count_reservations, 0, ""); 209 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_read, CTLFLAG_RD, 210 &hammer_count_io_running_read, 0, ""); 211 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_locked, CTLFLAG_RD, 212 &hammer_count_io_locked, 0, ""); 213 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_write, CTLFLAG_RD, 214 &hammer_count_io_running_write, 0, ""); 215 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW, 216 &hammer_zone_limit, 0, ""); 217 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW, 218 &hammer_contention_count, 0, ""); 219 SYSCTL_INT(_vfs_hammer, OID_AUTO, autoflush, CTLFLAG_RW, 220 &hammer_autoflush, 0, ""); 221 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_zone, CTLFLAG_RW, 222 &hammer_verify_zone, 0, ""); 223 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_data, CTLFLAG_RW, 224 &hammer_verify_data, 0, ""); 225 SYSCTL_INT(_vfs_hammer, OID_AUTO, write_mode, CTLFLAG_RW, 226 &hammer_write_mode, 0, ""); 227 SYSCTL_INT(_vfs_hammer, OID_AUTO, yield_check, CTLFLAG_RW, 228 &hammer_yield_check, 0, ""); 229 SYSCTL_INT(_vfs_hammer, OID_AUTO, fsync_mode, CTLFLAG_RW, 230 &hammer_fsync_mode, 0, ""); 231 232 KTR_INFO_MASTER(hammer); 233 234 /* 235 * VFS ABI 236 */ 237 static void hammer_free_hmp(struct mount *mp); 238 239 static int hammer_vfs_mount(struct mount *mp, char *path, caddr_t data, 240 struct ucred *cred); 241 static int hammer_vfs_unmount(struct mount *mp, int mntflags); 242 static int hammer_vfs_root(struct mount *mp, struct vnode **vpp); 243 static int hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, 244 struct ucred *cred); 245 static int hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, 246 struct ucred *cred); 247 static int hammer_vfs_sync(struct mount *mp, int waitfor); 248 static int hammer_vfs_vget(struct mount *mp, struct vnode *dvp, 249 ino_t ino, struct vnode **vpp); 250 static int hammer_vfs_init(struct vfsconf *conf); 251 static int hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 252 struct fid *fhp, struct vnode **vpp); 253 static int hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp); 254 static int hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 255 int *exflagsp, struct ucred **credanonp); 256 257 258 static struct vfsops hammer_vfsops = { 259 .vfs_mount = hammer_vfs_mount, 260 .vfs_unmount = hammer_vfs_unmount, 261 .vfs_root = hammer_vfs_root, 262 .vfs_statfs = hammer_vfs_statfs, 263 .vfs_statvfs = hammer_vfs_statvfs, 264 .vfs_sync = hammer_vfs_sync, 265 .vfs_vget = hammer_vfs_vget, 266 .vfs_init = hammer_vfs_init, 267 .vfs_vptofh = hammer_vfs_vptofh, 268 .vfs_fhtovp = hammer_vfs_fhtovp, 269 .vfs_checkexp = hammer_vfs_checkexp 270 }; 271 272 MALLOC_DEFINE(M_HAMMER, "HAMMER-mount", ""); 273 274 VFS_SET(hammer_vfsops, hammer, 0); 275 MODULE_VERSION(hammer, 1); 276 277 static int 278 hammer_vfs_init(struct vfsconf *conf) 279 { 280 int n; 281 282 if (hammer_limit_recs == 0) { 283 hammer_limit_recs = nbuf * 25; 284 n = kmalloc_limit(M_HAMMER) / 512; 285 if (hammer_limit_recs > n) 286 hammer_limit_recs = n; 287 } 288 if (hammer_limit_dirtybufspace == 0) { 289 hammer_limit_dirtybufspace = hidirtybufspace / 2; 290 if (hammer_limit_dirtybufspace < 100) 291 hammer_limit_dirtybufspace = 100; 292 } 293 return(0); 294 } 295 296 static int 297 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data, 298 struct ucred *cred) 299 { 300 struct hammer_mount_info info; 301 hammer_mount_t hmp; 302 hammer_volume_t rootvol; 303 struct vnode *rootvp; 304 struct vnode *devvp = NULL; 305 const char *upath; /* volume name in userspace */ 306 char *path; /* volume name in system space */ 307 int error; 308 int i; 309 int master_id; 310 int maxinodes; 311 312 /* 313 * Accept hammer_mount_info. mntpt is NULL for root mounts at boot. 314 */ 315 if (mntpt == NULL) { 316 if ((error = bdevvp(rootdev, &devvp))) { 317 kprintf("hammer_mountroot: can't find devvp\n"); 318 return (error); 319 } 320 mp->mnt_flag &= ~MNT_RDONLY; /* mount R/W */ 321 bzero(&info, sizeof(info)); 322 info.asof = 0; 323 info.hflags = 0; 324 info.nvolumes = 1; 325 } else { 326 if ((error = copyin(data, &info, sizeof(info))) != 0) 327 return (error); 328 } 329 330 /* 331 * updating or new mount 332 */ 333 if (mp->mnt_flag & MNT_UPDATE) { 334 hmp = (void *)mp->mnt_data; 335 KKASSERT(hmp != NULL); 336 } else { 337 if (info.nvolumes <= 0 || info.nvolumes >= 32768) 338 return (EINVAL); 339 hmp = NULL; 340 } 341 342 /* 343 * master-id validation. The master id may not be changed by a 344 * mount update. 345 */ 346 if (info.hflags & HMNT_MASTERID) { 347 if (hmp && hmp->master_id != info.master_id) { 348 kprintf("hammer: cannot change master id " 349 "with mount update\n"); 350 return(EINVAL); 351 } 352 master_id = info.master_id; 353 if (master_id < -1 || master_id >= HAMMER_MAX_MASTERS) 354 return (EINVAL); 355 } else { 356 if (hmp) 357 master_id = hmp->master_id; 358 else 359 master_id = 0; 360 } 361 362 /* 363 * Interal mount data structure 364 */ 365 if (hmp == NULL) { 366 hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO); 367 mp->mnt_data = (qaddr_t)hmp; 368 hmp->mp = mp; 369 /*TAILQ_INIT(&hmp->recycle_list);*/ 370 371 /* 372 * Make sure kmalloc type limits are set appropriately. If root 373 * increases the vnode limit you may have to do a dummy remount 374 * to adjust the HAMMER inode limit. 375 */ 376 kmalloc_create(&hmp->m_misc, "HAMMER-others"); 377 kmalloc_create(&hmp->m_inodes, "HAMMER-inodes"); 378 379 maxinodes = desiredvnodes + desiredvnodes / 5 + 380 HAMMER_RECLAIM_WAIT; 381 kmalloc_raise_limit(hmp->m_inodes, 382 maxinodes * sizeof(struct hammer_inode)); 383 384 hmp->root_btree_beg.localization = 0x00000000U; 385 hmp->root_btree_beg.obj_id = -0x8000000000000000LL; 386 hmp->root_btree_beg.key = -0x8000000000000000LL; 387 hmp->root_btree_beg.create_tid = 1; 388 hmp->root_btree_beg.delete_tid = 1; 389 hmp->root_btree_beg.rec_type = 0; 390 hmp->root_btree_beg.obj_type = 0; 391 392 hmp->root_btree_end.localization = 0xFFFFFFFFU; 393 hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL; 394 hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL; 395 hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL; 396 hmp->root_btree_end.delete_tid = 0; /* special case */ 397 hmp->root_btree_end.rec_type = 0xFFFFU; 398 hmp->root_btree_end.obj_type = 0; 399 400 hmp->krate.freq = 1; /* maximum reporting rate (hz) */ 401 hmp->krate.count = -16; /* initial burst */ 402 403 hmp->sync_lock.refs = 1; 404 hmp->free_lock.refs = 1; 405 hmp->undo_lock.refs = 1; 406 hmp->blkmap_lock.refs = 1; 407 hmp->snapshot_lock.refs = 1; 408 409 TAILQ_INIT(&hmp->delay_list); 410 TAILQ_INIT(&hmp->flush_group_list); 411 TAILQ_INIT(&hmp->objid_cache_list); 412 TAILQ_INIT(&hmp->undo_lru_list); 413 TAILQ_INIT(&hmp->reclaim_list); 414 } 415 hmp->hflags &= ~HMNT_USERFLAGS; 416 hmp->hflags |= info.hflags & HMNT_USERFLAGS; 417 418 hmp->master_id = master_id; 419 420 if (info.asof) { 421 mp->mnt_flag |= MNT_RDONLY; 422 hmp->asof = info.asof; 423 } else { 424 hmp->asof = HAMMER_MAX_TID; 425 } 426 427 /* 428 * Re-open read-write if originally read-only, or vise-versa. 429 * 430 * When going from read-only to read-write execute the stage2 431 * recovery if it has not already been run. 432 */ 433 if (mp->mnt_flag & MNT_UPDATE) { 434 error = 0; 435 if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) { 436 kprintf("HAMMER read-only -> read-write\n"); 437 hmp->ronly = 0; 438 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, 439 hammer_adjust_volume_mode, NULL); 440 rootvol = hammer_get_root_volume(hmp, &error); 441 if (rootvol) { 442 hammer_recover_flush_buffers(hmp, rootvol, 1); 443 error = hammer_recover_stage2(hmp, rootvol); 444 bcopy(rootvol->ondisk->vol0_blockmap, 445 hmp->blockmap, 446 sizeof(hmp->blockmap)); 447 hammer_rel_volume(rootvol, 0); 448 } 449 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, 450 hammer_reload_inode, NULL); 451 /* kernel clears MNT_RDONLY */ 452 } else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { 453 kprintf("HAMMER read-write -> read-only\n"); 454 hmp->ronly = 1; /* messy */ 455 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, 456 hammer_reload_inode, NULL); 457 hmp->ronly = 0; 458 hammer_flusher_sync(hmp); 459 hammer_flusher_sync(hmp); 460 hammer_flusher_sync(hmp); 461 hmp->ronly = 1; 462 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, 463 hammer_adjust_volume_mode, NULL); 464 } 465 return(error); 466 } 467 468 RB_INIT(&hmp->rb_vols_root); 469 RB_INIT(&hmp->rb_inos_root); 470 RB_INIT(&hmp->rb_nods_root); 471 RB_INIT(&hmp->rb_undo_root); 472 RB_INIT(&hmp->rb_resv_root); 473 RB_INIT(&hmp->rb_bufs_root); 474 RB_INIT(&hmp->rb_pfsm_root); 475 476 hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); 477 478 TAILQ_INIT(&hmp->volu_list); 479 TAILQ_INIT(&hmp->undo_list); 480 TAILQ_INIT(&hmp->data_list); 481 TAILQ_INIT(&hmp->meta_list); 482 TAILQ_INIT(&hmp->lose_list); 483 484 /* 485 * Load volumes 486 */ 487 path = objcache_get(namei_oc, M_WAITOK); 488 hmp->nvolumes = -1; 489 for (i = 0; i < info.nvolumes; ++i) { 490 if (mntpt == NULL) { 491 /* 492 * Root mount. 493 * Only one volume; and no need for copyin. 494 */ 495 KKASSERT(info.nvolumes == 1); 496 ksnprintf(path, MAXPATHLEN, "/dev/%s", 497 mp->mnt_stat.f_mntfromname); 498 error = 0; 499 } else { 500 error = copyin(&info.volumes[i], &upath, 501 sizeof(char *)); 502 if (error == 0) 503 error = copyinstr(upath, path, 504 MAXPATHLEN, NULL); 505 } 506 if (error == 0) 507 error = hammer_install_volume(hmp, path, devvp); 508 if (error) 509 break; 510 } 511 objcache_put(namei_oc, path); 512 513 /* 514 * Make sure we found a root volume 515 */ 516 if (error == 0 && hmp->rootvol == NULL) { 517 kprintf("hammer_mount: No root volume found!\n"); 518 error = EINVAL; 519 } 520 521 /* 522 * Check that all required volumes are available 523 */ 524 if (error == 0 && hammer_mountcheck_volumes(hmp)) { 525 kprintf("hammer_mount: Missing volumes, cannot mount!\n"); 526 error = EINVAL; 527 } 528 529 if (error) { 530 hammer_free_hmp(mp); 531 return (error); 532 } 533 534 /* 535 * No errors, setup enough of the mount point so we can lookup the 536 * root vnode. 537 */ 538 mp->mnt_iosize_max = MAXPHYS; 539 mp->mnt_kern_flag |= MNTK_FSMID; 540 541 /* 542 * note: f_iosize is used by vnode_pager_haspage() when constructing 543 * its VOP_BMAP call. 544 */ 545 mp->mnt_stat.f_iosize = HAMMER_BUFSIZE; 546 mp->mnt_stat.f_bsize = HAMMER_BUFSIZE; 547 548 mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE; 549 mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE; 550 551 mp->mnt_maxsymlinklen = 255; 552 mp->mnt_flag |= MNT_LOCAL; 553 554 vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops); 555 vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops); 556 vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops); 557 558 /* 559 * The root volume's ondisk pointer is only valid if we hold a 560 * reference to it. 561 */ 562 rootvol = hammer_get_root_volume(hmp, &error); 563 if (error) 564 goto failed; 565 566 /* 567 * Perform any necessary UNDO operations. The recovery code does 568 * call hammer_undo_lookup() so we have to pre-cache the blockmap, 569 * and then re-copy it again after recovery is complete. 570 * 571 * If this is a read-only mount the UNDO information is retained 572 * in memory in the form of dirty buffer cache buffers, and not 573 * written back to the media. 574 */ 575 bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, 576 sizeof(hmp->blockmap)); 577 578 /* 579 * Check filesystem version 580 */ 581 hmp->version = rootvol->ondisk->vol_version; 582 if (hmp->version < HAMMER_VOL_VERSION_MIN || 583 hmp->version > HAMMER_VOL_VERSION_MAX) { 584 kprintf("HAMMER: mount unsupported fs version %d\n", 585 hmp->version); 586 error = ERANGE; 587 goto done; 588 } 589 590 /* 591 * The undo_rec_limit limits the size of flush groups to avoid 592 * blowing out the UNDO FIFO. This calculation is typically in 593 * the tens of thousands and is designed primarily when small 594 * HAMMER filesystems are created. 595 */ 596 hmp->undo_rec_limit = hammer_undo_max(hmp) / 8192 + 100; 597 if (hammer_debug_general & 0x0001) 598 kprintf("HAMMER: undo_rec_limit %d\n", hmp->undo_rec_limit); 599 600 /* 601 * NOTE: Recover stage1 not only handles meta-data recovery, it 602 * also sets hmp->undo_seqno for HAMMER VERSION 4+ filesystems. 603 */ 604 error = hammer_recover_stage1(hmp, rootvol); 605 if (error) { 606 kprintf("Failed to recover HAMMER filesystem on mount\n"); 607 goto done; 608 } 609 610 /* 611 * Finish setup now that we have a good root volume. 612 * 613 * The top 16 bits of fsid.val[1] is a pfs id. 614 */ 615 ksnprintf(mp->mnt_stat.f_mntfromname, 616 sizeof(mp->mnt_stat.f_mntfromname), "%s", 617 rootvol->ondisk->vol_name); 618 mp->mnt_stat.f_fsid.val[0] = 619 crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8); 620 mp->mnt_stat.f_fsid.val[1] = 621 crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8); 622 mp->mnt_stat.f_fsid.val[1] &= 0x0000FFFF; 623 624 mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid; 625 mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid, 626 sizeof(mp->mnt_vstat.f_fsid_uuid)); 627 628 /* 629 * Certain often-modified fields in the root volume are cached in 630 * the hammer_mount structure so we do not have to generate lots 631 * of little UNDO structures for them. 632 * 633 * Recopy after recovery. This also has the side effect of 634 * setting our cached undo FIFO's first_offset, which serves to 635 * placemark the FIFO start for the NEXT flush cycle while the 636 * on-disk first_offset represents the LAST flush cycle. 637 */ 638 hmp->next_tid = rootvol->ondisk->vol0_next_tid; 639 hmp->flush_tid1 = hmp->next_tid; 640 hmp->flush_tid2 = hmp->next_tid; 641 bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, 642 sizeof(hmp->blockmap)); 643 hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks; 644 645 hammer_flusher_create(hmp); 646 647 /* 648 * Locate the root directory using the root cluster's B-Tree as a 649 * starting point. The root directory uses an obj_id of 1. 650 * 651 * FUTURE: Leave the root directory cached referenced but unlocked 652 * in hmp->rootvp (need to flush it on unmount). 653 */ 654 error = hammer_vfs_vget(mp, NULL, 1, &rootvp); 655 if (error) 656 goto done; 657 vput(rootvp); 658 /*vn_unlock(hmp->rootvp);*/ 659 error = hammer_recover_stage2(hmp, rootvol); 660 661 done: 662 hammer_rel_volume(rootvol, 0); 663 failed: 664 /* 665 * Cleanup and return. 666 */ 667 if (error) 668 hammer_free_hmp(mp); 669 return (error); 670 } 671 672 static int 673 hammer_vfs_unmount(struct mount *mp, int mntflags) 674 { 675 #if 0 676 struct hammer_mount *hmp = (void *)mp->mnt_data; 677 #endif 678 int flags; 679 int error; 680 681 /* 682 * Clean out the vnodes 683 */ 684 flags = 0; 685 if (mntflags & MNT_FORCE) 686 flags |= FORCECLOSE; 687 if ((error = vflush(mp, 0, flags)) != 0) 688 return (error); 689 690 /* 691 * Clean up the internal mount structure and related entities. This 692 * may issue I/O. 693 */ 694 hammer_free_hmp(mp); 695 return(0); 696 } 697 698 /* 699 * Clean up the internal mount structure and disassociate it from the mount. 700 * This may issue I/O. 701 */ 702 static void 703 hammer_free_hmp(struct mount *mp) 704 { 705 struct hammer_mount *hmp = (void *)mp->mnt_data; 706 hammer_flush_group_t flg; 707 int count; 708 int dummy; 709 710 /* 711 * Flush anything dirty. This won't even run if the 712 * filesystem errored-out. 713 */ 714 count = 0; 715 while (hammer_flusher_haswork(hmp)) { 716 hammer_flusher_sync(hmp); 717 ++count; 718 if (count >= 5) { 719 if (count == 5) 720 kprintf("HAMMER: umount flushing."); 721 else 722 kprintf("."); 723 tsleep(&dummy, 0, "hmrufl", hz); 724 } 725 if (count == 30) { 726 kprintf("giving up\n"); 727 break; 728 } 729 } 730 if (count >= 5 && count < 30) 731 kprintf("\n"); 732 733 /* 734 * If the mount had a critical error we have to destroy any 735 * remaining inodes before we can finish cleaning up the flusher. 736 */ 737 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) { 738 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, 739 hammer_destroy_inode_callback, NULL); 740 } 741 742 /* 743 * There shouldn't be any inodes left now and any left over 744 * flush groups should now be empty. 745 */ 746 KKASSERT(RB_EMPTY(&hmp->rb_inos_root)); 747 while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) { 748 TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry); 749 KKASSERT(TAILQ_EMPTY(&flg->flush_list)); 750 if (flg->refs) { 751 kprintf("HAMMER: Warning, flush_group %p was " 752 "not empty on umount!\n", flg); 753 } 754 kfree(flg, hmp->m_misc); 755 } 756 757 /* 758 * We can finally destroy the flusher 759 */ 760 hammer_flusher_destroy(hmp); 761 762 /* 763 * We may have held recovered buffers due to a read-only mount. 764 * These must be discarded. 765 */ 766 if (hmp->ronly) 767 hammer_recover_flush_buffers(hmp, NULL, -1); 768 769 /* 770 * Unload buffers and then volumes 771 */ 772 RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL, 773 hammer_unload_buffer, NULL); 774 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, 775 hammer_unload_volume, NULL); 776 777 mp->mnt_data = NULL; 778 mp->mnt_flag &= ~MNT_LOCAL; 779 hmp->mp = NULL; 780 hammer_destroy_objid_cache(hmp); 781 kmalloc_destroy(&hmp->m_misc); 782 kmalloc_destroy(&hmp->m_inodes); 783 kfree(hmp, M_HAMMER); 784 } 785 786 /* 787 * Report critical errors. ip may be NULL. 788 */ 789 void 790 hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip, 791 int error, const char *msg) 792 { 793 hmp->flags |= HAMMER_MOUNT_CRITICAL_ERROR; 794 795 krateprintf(&hmp->krate, 796 "HAMMER(%s): Critical error inode=%jd error=%d %s\n", 797 hmp->mp->mnt_stat.f_mntfromname, 798 (intmax_t)(ip ? ip->obj_id : -1), 799 error, msg); 800 801 if (hmp->ronly == 0) { 802 hmp->ronly = 2; /* special errored read-only mode */ 803 hmp->mp->mnt_flag |= MNT_RDONLY; 804 kprintf("HAMMER(%s): Forcing read-only mode\n", 805 hmp->mp->mnt_stat.f_mntfromname); 806 } 807 hmp->error = error; 808 if (hammer_error_panic > 2) 809 Debugger("Entering debugger"); 810 } 811 812 813 /* 814 * Obtain a vnode for the specified inode number. An exclusively locked 815 * vnode is returned. 816 */ 817 int 818 hammer_vfs_vget(struct mount *mp, struct vnode *dvp, 819 ino_t ino, struct vnode **vpp) 820 { 821 struct hammer_transaction trans; 822 struct hammer_mount *hmp = (void *)mp->mnt_data; 823 struct hammer_inode *ip; 824 int error; 825 u_int32_t localization; 826 827 hammer_simple_transaction(&trans, hmp); 828 829 /* 830 * If a directory vnode is supplied (mainly NFS) then we can acquire 831 * the PFS domain from it. Otherwise we would only be able to vget 832 * inodes in the root PFS. 833 */ 834 if (dvp) { 835 localization = HAMMER_DEF_LOCALIZATION + 836 VTOI(dvp)->obj_localization; 837 } else { 838 localization = HAMMER_DEF_LOCALIZATION; 839 } 840 841 /* 842 * Lookup the requested HAMMER inode. The structure must be 843 * left unlocked while we manipulate the related vnode to avoid 844 * a deadlock. 845 */ 846 ip = hammer_get_inode(&trans, NULL, ino, 847 hmp->asof, localization, 848 0, &error); 849 if (ip == NULL) { 850 *vpp = NULL; 851 hammer_done_transaction(&trans); 852 return(error); 853 } 854 error = hammer_get_vnode(ip, vpp); 855 hammer_rel_inode(ip, 0); 856 hammer_done_transaction(&trans); 857 return (error); 858 } 859 860 /* 861 * Return the root vnode for the filesystem. 862 * 863 * HAMMER stores the root vnode in the hammer_mount structure so 864 * getting it is easy. 865 */ 866 static int 867 hammer_vfs_root(struct mount *mp, struct vnode **vpp) 868 { 869 #if 0 870 struct hammer_mount *hmp = (void *)mp->mnt_data; 871 #endif 872 int error; 873 874 error = hammer_vfs_vget(mp, NULL, 1, vpp); 875 return (error); 876 } 877 878 static int 879 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) 880 { 881 struct hammer_mount *hmp = (void *)mp->mnt_data; 882 hammer_volume_t volume; 883 hammer_volume_ondisk_t ondisk; 884 int error; 885 int64_t bfree; 886 int64_t breserved; 887 888 volume = hammer_get_root_volume(hmp, &error); 889 if (error) 890 return(error); 891 ondisk = volume->ondisk; 892 893 /* 894 * Basic stats 895 */ 896 _hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved); 897 mp->mnt_stat.f_files = ondisk->vol0_stat_inodes; 898 bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE; 899 hammer_rel_volume(volume, 0); 900 901 mp->mnt_stat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE; 902 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree; 903 if (mp->mnt_stat.f_files < 0) 904 mp->mnt_stat.f_files = 0; 905 906 *sbp = mp->mnt_stat; 907 return(0); 908 } 909 910 static int 911 hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred) 912 { 913 struct hammer_mount *hmp = (void *)mp->mnt_data; 914 hammer_volume_t volume; 915 hammer_volume_ondisk_t ondisk; 916 int error; 917 int64_t bfree; 918 int64_t breserved; 919 920 volume = hammer_get_root_volume(hmp, &error); 921 if (error) 922 return(error); 923 ondisk = volume->ondisk; 924 925 /* 926 * Basic stats 927 */ 928 _hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved); 929 mp->mnt_vstat.f_files = ondisk->vol0_stat_inodes; 930 bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE; 931 hammer_rel_volume(volume, 0); 932 933 mp->mnt_vstat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE; 934 mp->mnt_vstat.f_bavail = mp->mnt_stat.f_bfree; 935 if (mp->mnt_vstat.f_files < 0) 936 mp->mnt_vstat.f_files = 0; 937 *sbp = mp->mnt_vstat; 938 return(0); 939 } 940 941 /* 942 * Sync the filesystem. Currently we have to run it twice, the second 943 * one will advance the undo start index to the end index, so if a crash 944 * occurs no undos will be run on mount. 945 * 946 * We do not sync the filesystem if we are called from a panic. If we did 947 * we might end up blowing up a sync that was already in progress. 948 */ 949 static int 950 hammer_vfs_sync(struct mount *mp, int waitfor) 951 { 952 struct hammer_mount *hmp = (void *)mp->mnt_data; 953 int error; 954 955 if (panicstr == NULL) { 956 error = hammer_sync_hmp(hmp, waitfor); 957 } else { 958 error = EIO; 959 } 960 return (error); 961 } 962 963 /* 964 * Convert a vnode to a file handle. 965 */ 966 static int 967 hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp) 968 { 969 hammer_inode_t ip; 970 971 KKASSERT(MAXFIDSZ >= 16); 972 ip = VTOI(vp); 973 fhp->fid_len = offsetof(struct fid, fid_data[16]); 974 fhp->fid_ext = ip->obj_localization >> 16; 975 bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id)); 976 bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof)); 977 return(0); 978 } 979 980 981 /* 982 * Convert a file handle back to a vnode. 983 * 984 * Use rootvp to enforce PFS isolation when a PFS is exported via a 985 * null mount. 986 */ 987 static int 988 hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 989 struct fid *fhp, struct vnode **vpp) 990 { 991 struct hammer_transaction trans; 992 struct hammer_inode *ip; 993 struct hammer_inode_info info; 994 int error; 995 u_int32_t localization; 996 997 bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id)); 998 bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof)); 999 if (rootvp) 1000 localization = VTOI(rootvp)->obj_localization; 1001 else 1002 localization = (u_int32_t)fhp->fid_ext << 16; 1003 1004 hammer_simple_transaction(&trans, (void *)mp->mnt_data); 1005 1006 /* 1007 * Get/allocate the hammer_inode structure. The structure must be 1008 * unlocked while we manipulate the related vnode to avoid a 1009 * deadlock. 1010 */ 1011 ip = hammer_get_inode(&trans, NULL, info.obj_id, 1012 info.obj_asof, localization, 0, &error); 1013 if (ip == NULL) { 1014 *vpp = NULL; 1015 return(error); 1016 } 1017 error = hammer_get_vnode(ip, vpp); 1018 hammer_rel_inode(ip, 0); 1019 hammer_done_transaction(&trans); 1020 return (error); 1021 } 1022 1023 static int 1024 hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 1025 int *exflagsp, struct ucred **credanonp) 1026 { 1027 hammer_mount_t hmp = (void *)mp->mnt_data; 1028 struct netcred *np; 1029 int error; 1030 1031 np = vfs_export_lookup(mp, &hmp->export, nam); 1032 if (np) { 1033 *exflagsp = np->netc_exflags; 1034 *credanonp = &np->netc_anon; 1035 error = 0; 1036 } else { 1037 error = EACCES; 1038 } 1039 return (error); 1040 1041 } 1042 1043 int 1044 hammer_vfs_export(struct mount *mp, int op, const struct export_args *export) 1045 { 1046 hammer_mount_t hmp = (void *)mp->mnt_data; 1047 int error; 1048 1049 switch(op) { 1050 case MOUNTCTL_SET_EXPORT: 1051 error = vfs_export(mp, &hmp->export, export); 1052 break; 1053 default: 1054 error = EOPNOTSUPP; 1055 break; 1056 } 1057 return(error); 1058 } 1059 1060