1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.74 2008/11/13 02:18:43 dillon Exp $ 35 */ 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/vnode.h> 41 #include <sys/mount.h> 42 #include <sys/malloc.h> 43 #include <sys/nlookup.h> 44 #include <sys/fcntl.h> 45 #include <sys/sysctl.h> 46 #include <sys/buf.h> 47 #include <sys/buf2.h> 48 #include "hammer.h" 49 50 /* 51 * NOTE! Global statistics may not be MPSAFE so HAMMER never uses them 52 * in conditionals. 53 */ 54 int hammer_supported_version = HAMMER_VOL_VERSION_DEFAULT; 55 int hammer_debug_io; 56 int hammer_debug_general; 57 int hammer_debug_debug = 1; /* medium-error panics */ 58 int hammer_debug_inode; 59 int hammer_debug_locks; 60 int hammer_debug_btree; 61 int hammer_debug_tid; 62 int hammer_debug_recover; /* -1 will disable, +1 will force */ 63 int hammer_debug_recover_faults; 64 int hammer_debug_critical; /* non-zero enter debugger on error */ 65 int hammer_cluster_enable = 1; /* enable read clustering by default */ 66 int hammer_count_fsyncs; 67 int hammer_count_inodes; 68 int hammer_count_iqueued; 69 int hammer_count_reclaiming; 70 int hammer_count_records; 71 int hammer_count_record_datas; 72 int hammer_count_volumes; 73 int hammer_count_buffers; 74 int hammer_count_nodes; 75 int64_t hammer_count_extra_space_used; 76 int64_t hammer_stats_btree_lookups; 77 int64_t hammer_stats_btree_searches; 78 int64_t hammer_stats_btree_inserts; 79 int64_t hammer_stats_btree_deletes; 80 int64_t hammer_stats_btree_elements; 81 int64_t hammer_stats_btree_splits; 82 int64_t hammer_stats_btree_iterations; 83 int64_t hammer_stats_btree_root_iterations; 84 int64_t hammer_stats_record_iterations; 85 86 int64_t hammer_stats_file_read; 87 int64_t hammer_stats_file_write; 88 int64_t hammer_stats_file_iopsr; 89 int64_t hammer_stats_file_iopsw; 90 int64_t hammer_stats_disk_read; 91 int64_t hammer_stats_disk_write; 92 int64_t hammer_stats_inode_flushes; 93 int64_t hammer_stats_commits; 94 int64_t hammer_stats_undo; 95 int64_t hammer_stats_redo; 96 97 int hammer_count_dirtybufspace; /* global */ 98 int hammer_count_refedbufs; /* global */ 99 int hammer_count_reservations; 100 int hammer_count_io_running_read; 101 int hammer_count_io_running_write; 102 int hammer_count_io_locked; 103 int hammer_limit_dirtybufspace; /* per-mount */ 104 int hammer_limit_running_io; /* per-mount */ 105 int hammer_limit_recs; /* as a whole XXX */ 106 int hammer_limit_inode_recs = 1024; /* per inode */ 107 int hammer_limit_reclaim = HAMMER_RECLAIM_WAIT; 108 int hammer_limit_redo = 4096 * 1024; /* per inode */ 109 int hammer_autoflush = 2000; /* auto flush */ 110 int hammer_bio_count; 111 int hammer_verify_zone; 112 int hammer_verify_data = 1; 113 int hammer_write_mode; 114 int hammer_yield_check = 16; 115 int hammer_fsync_mode = 3; 116 int64_t hammer_contention_count; 117 int64_t hammer_zone_limit; 118 119 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem"); 120 SYSCTL_INT(_vfs_hammer, OID_AUTO, supported_version, CTLFLAG_RD, 121 &hammer_supported_version, 0, ""); 122 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW, 123 &hammer_debug_general, 0, ""); 124 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_io, CTLFLAG_RW, 125 &hammer_debug_io, 0, ""); 126 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_debug, CTLFLAG_RW, 127 &hammer_debug_debug, 0, ""); 128 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW, 129 &hammer_debug_inode, 0, ""); 130 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW, 131 &hammer_debug_locks, 0, ""); 132 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW, 133 &hammer_debug_btree, 0, ""); 134 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW, 135 &hammer_debug_tid, 0, ""); 136 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW, 137 &hammer_debug_recover, 0, ""); 138 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW, 139 &hammer_debug_recover_faults, 0, ""); 140 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_critical, CTLFLAG_RW, 141 &hammer_debug_critical, 0, ""); 142 SYSCTL_INT(_vfs_hammer, OID_AUTO, cluster_enable, CTLFLAG_RW, 143 &hammer_cluster_enable, 0, ""); 144 145 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_dirtybufspace, CTLFLAG_RW, 146 &hammer_limit_dirtybufspace, 0, ""); 147 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_running_io, CTLFLAG_RW, 148 &hammer_limit_running_io, 0, ""); 149 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_recs, CTLFLAG_RW, 150 &hammer_limit_recs, 0, ""); 151 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_inode_recs, CTLFLAG_RW, 152 &hammer_limit_inode_recs, 0, ""); 153 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_reclaim, CTLFLAG_RW, 154 &hammer_limit_reclaim, 0, ""); 155 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_redo, CTLFLAG_RW, 156 &hammer_limit_redo, 0, ""); 157 158 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_fsyncs, CTLFLAG_RD, 159 &hammer_count_fsyncs, 0, ""); 160 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD, 161 &hammer_count_inodes, 0, ""); 162 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_iqueued, CTLFLAG_RD, 163 &hammer_count_iqueued, 0, ""); 164 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reclaiming, CTLFLAG_RD, 165 &hammer_count_reclaiming, 0, ""); 166 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD, 167 &hammer_count_records, 0, ""); 168 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD, 169 &hammer_count_record_datas, 0, ""); 170 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD, 171 &hammer_count_volumes, 0, ""); 172 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD, 173 &hammer_count_buffers, 0, ""); 174 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD, 175 &hammer_count_nodes, 0, ""); 176 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, count_extra_space_used, CTLFLAG_RD, 177 &hammer_count_extra_space_used, 0, ""); 178 179 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_searches, CTLFLAG_RD, 180 &hammer_stats_btree_searches, 0, ""); 181 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_lookups, CTLFLAG_RD, 182 &hammer_stats_btree_lookups, 0, ""); 183 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_inserts, CTLFLAG_RD, 184 &hammer_stats_btree_inserts, 0, ""); 185 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_deletes, CTLFLAG_RD, 186 &hammer_stats_btree_deletes, 0, ""); 187 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_elements, CTLFLAG_RD, 188 &hammer_stats_btree_elements, 0, ""); 189 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_splits, CTLFLAG_RD, 190 &hammer_stats_btree_splits, 0, ""); 191 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_iterations, CTLFLAG_RD, 192 &hammer_stats_btree_iterations, 0, ""); 193 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_root_iterations, CTLFLAG_RD, 194 &hammer_stats_btree_root_iterations, 0, ""); 195 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_record_iterations, CTLFLAG_RD, 196 &hammer_stats_record_iterations, 0, ""); 197 198 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_read, CTLFLAG_RD, 199 &hammer_stats_file_read, 0, ""); 200 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_write, CTLFLAG_RD, 201 &hammer_stats_file_write, 0, ""); 202 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsr, CTLFLAG_RD, 203 &hammer_stats_file_iopsr, 0, ""); 204 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsw, CTLFLAG_RD, 205 &hammer_stats_file_iopsw, 0, ""); 206 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_read, CTLFLAG_RD, 207 &hammer_stats_disk_read, 0, ""); 208 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_write, CTLFLAG_RD, 209 &hammer_stats_disk_write, 0, ""); 210 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_inode_flushes, CTLFLAG_RD, 211 &hammer_stats_inode_flushes, 0, ""); 212 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_commits, CTLFLAG_RD, 213 &hammer_stats_commits, 0, ""); 214 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_undo, CTLFLAG_RD, 215 &hammer_stats_undo, 0, ""); 216 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_redo, CTLFLAG_RD, 217 &hammer_stats_redo, 0, ""); 218 219 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_dirtybufspace, CTLFLAG_RD, 220 &hammer_count_dirtybufspace, 0, ""); 221 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_refedbufs, CTLFLAG_RD, 222 &hammer_count_refedbufs, 0, ""); 223 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reservations, CTLFLAG_RD, 224 &hammer_count_reservations, 0, ""); 225 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_read, CTLFLAG_RD, 226 &hammer_count_io_running_read, 0, ""); 227 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_locked, CTLFLAG_RD, 228 &hammer_count_io_locked, 0, ""); 229 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_write, CTLFLAG_RD, 230 &hammer_count_io_running_write, 0, ""); 231 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW, 232 &hammer_zone_limit, 0, ""); 233 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW, 234 &hammer_contention_count, 0, ""); 235 SYSCTL_INT(_vfs_hammer, OID_AUTO, autoflush, CTLFLAG_RW, 236 &hammer_autoflush, 0, ""); 237 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_zone, CTLFLAG_RW, 238 &hammer_verify_zone, 0, ""); 239 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_data, CTLFLAG_RW, 240 &hammer_verify_data, 0, ""); 241 SYSCTL_INT(_vfs_hammer, OID_AUTO, write_mode, CTLFLAG_RW, 242 &hammer_write_mode, 0, ""); 243 SYSCTL_INT(_vfs_hammer, OID_AUTO, yield_check, CTLFLAG_RW, 244 &hammer_yield_check, 0, ""); 245 SYSCTL_INT(_vfs_hammer, OID_AUTO, fsync_mode, CTLFLAG_RW, 246 &hammer_fsync_mode, 0, ""); 247 248 KTR_INFO_MASTER(hammer); 249 250 /* 251 * VFS ABI 252 */ 253 static void hammer_free_hmp(struct mount *mp); 254 255 static int hammer_vfs_mount(struct mount *mp, char *path, caddr_t data, 256 struct ucred *cred); 257 static int hammer_vfs_unmount(struct mount *mp, int mntflags); 258 static int hammer_vfs_root(struct mount *mp, struct vnode **vpp); 259 static int hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, 260 struct ucred *cred); 261 static int hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, 262 struct ucred *cred); 263 static int hammer_vfs_sync(struct mount *mp, int waitfor); 264 static int hammer_vfs_vget(struct mount *mp, struct vnode *dvp, 265 ino_t ino, struct vnode **vpp); 266 static int hammer_vfs_init(struct vfsconf *conf); 267 static int hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 268 struct fid *fhp, struct vnode **vpp); 269 static int hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp); 270 static int hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 271 int *exflagsp, struct ucred **credanonp); 272 273 274 static struct vfsops hammer_vfsops = { 275 .vfs_mount = hammer_vfs_mount, 276 .vfs_unmount = hammer_vfs_unmount, 277 .vfs_root = hammer_vfs_root, 278 .vfs_statfs = hammer_vfs_statfs, 279 .vfs_statvfs = hammer_vfs_statvfs, 280 .vfs_sync = hammer_vfs_sync, 281 .vfs_vget = hammer_vfs_vget, 282 .vfs_init = hammer_vfs_init, 283 .vfs_vptofh = hammer_vfs_vptofh, 284 .vfs_fhtovp = hammer_vfs_fhtovp, 285 .vfs_checkexp = hammer_vfs_checkexp 286 }; 287 288 MALLOC_DEFINE(M_HAMMER, "HAMMER-mount", ""); 289 290 VFS_SET(hammer_vfsops, hammer, 0); 291 MODULE_VERSION(hammer, 1); 292 293 static int 294 hammer_vfs_init(struct vfsconf *conf) 295 { 296 int n; 297 298 if (hammer_limit_recs == 0) { 299 hammer_limit_recs = nbuf * 25; 300 n = kmalloc_limit(M_HAMMER) / 512; 301 if (hammer_limit_recs > n) 302 hammer_limit_recs = n; 303 } 304 if (hammer_limit_dirtybufspace == 0) { 305 hammer_limit_dirtybufspace = hidirtybufspace / 2; 306 if (hammer_limit_dirtybufspace < 100) 307 hammer_limit_dirtybufspace = 100; 308 } 309 310 /* 311 * Set reasonable limits to maintain an I/O pipeline. This is 312 * used by the flush code which explicitly initiates I/O, and 313 * is per-mount. 314 * 315 * The system-driven buffer cache uses vfs.lorunningspace and 316 * vfs.hirunningspace globally. 317 */ 318 if (hammer_limit_running_io == 0) 319 hammer_limit_running_io = hammer_limit_dirtybufspace; 320 if (hammer_limit_running_io > 10 * 1024 * 1024) 321 hammer_limit_running_io = 10 * 1024 * 1024; 322 return(0); 323 } 324 325 static int 326 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data, 327 struct ucred *cred) 328 { 329 struct hammer_mount_info info; 330 hammer_mount_t hmp; 331 hammer_volume_t rootvol; 332 struct vnode *rootvp; 333 struct vnode *devvp = NULL; 334 const char *upath; /* volume name in userspace */ 335 char *path; /* volume name in system space */ 336 int error; 337 int i; 338 int master_id; 339 char *next_volume_ptr = NULL; 340 341 /* 342 * Accept hammer_mount_info. mntpt is NULL for root mounts at boot. 343 */ 344 if (mntpt == NULL) { 345 bzero(&info, sizeof(info)); 346 info.asof = 0; 347 info.hflags = 0; 348 info.nvolumes = 1; 349 350 next_volume_ptr = mp->mnt_stat.f_mntfromname; 351 352 /* Count number of volumes separated by ':' */ 353 for (char *p = next_volume_ptr; *p != '\0'; ++p) { 354 if (*p == ':') { 355 ++info.nvolumes; 356 } 357 } 358 359 mp->mnt_flag &= ~MNT_RDONLY; /* mount R/W */ 360 } else { 361 if ((error = copyin(data, &info, sizeof(info))) != 0) 362 return (error); 363 } 364 365 /* 366 * updating or new mount 367 */ 368 if (mp->mnt_flag & MNT_UPDATE) { 369 hmp = (void *)mp->mnt_data; 370 KKASSERT(hmp != NULL); 371 } else { 372 if (info.nvolumes <= 0 || info.nvolumes >= 32768) 373 return (EINVAL); 374 hmp = NULL; 375 } 376 377 /* 378 * master-id validation. The master id may not be changed by a 379 * mount update. 380 */ 381 if (info.hflags & HMNT_MASTERID) { 382 if (hmp && hmp->master_id != info.master_id) { 383 kprintf("hammer: cannot change master id " 384 "with mount update\n"); 385 return(EINVAL); 386 } 387 master_id = info.master_id; 388 if (master_id < -1 || master_id >= HAMMER_MAX_MASTERS) 389 return (EINVAL); 390 } else { 391 if (hmp) 392 master_id = hmp->master_id; 393 else 394 master_id = 0; 395 } 396 397 /* 398 * Interal mount data structure 399 */ 400 if (hmp == NULL) { 401 hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO); 402 mp->mnt_data = (qaddr_t)hmp; 403 hmp->mp = mp; 404 /*TAILQ_INIT(&hmp->recycle_list);*/ 405 406 /* 407 * Make sure kmalloc type limits are set appropriately. 408 * 409 * Our inode kmalloc group is sized based on maxvnodes 410 * (controlled by the system, not us). 411 */ 412 kmalloc_create(&hmp->m_misc, "HAMMER-others"); 413 kmalloc_create(&hmp->m_inodes, "HAMMER-inodes"); 414 415 kmalloc_raise_limit(hmp->m_inodes, 0); /* unlimited */ 416 417 hmp->root_btree_beg.localization = 0x00000000U; 418 hmp->root_btree_beg.obj_id = -0x8000000000000000LL; 419 hmp->root_btree_beg.key = -0x8000000000000000LL; 420 hmp->root_btree_beg.create_tid = 1; 421 hmp->root_btree_beg.delete_tid = 1; 422 hmp->root_btree_beg.rec_type = 0; 423 hmp->root_btree_beg.obj_type = 0; 424 425 hmp->root_btree_end.localization = 0xFFFFFFFFU; 426 hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL; 427 hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL; 428 hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL; 429 hmp->root_btree_end.delete_tid = 0; /* special case */ 430 hmp->root_btree_end.rec_type = 0xFFFFU; 431 hmp->root_btree_end.obj_type = 0; 432 433 hmp->krate.freq = 1; /* maximum reporting rate (hz) */ 434 hmp->krate.count = -16; /* initial burst */ 435 436 hmp->sync_lock.refs = 1; 437 hmp->free_lock.refs = 1; 438 hmp->undo_lock.refs = 1; 439 hmp->blkmap_lock.refs = 1; 440 hmp->snapshot_lock.refs = 1; 441 hmp->volume_lock.refs = 1; 442 443 TAILQ_INIT(&hmp->delay_list); 444 TAILQ_INIT(&hmp->flush_group_list); 445 TAILQ_INIT(&hmp->objid_cache_list); 446 TAILQ_INIT(&hmp->undo_lru_list); 447 TAILQ_INIT(&hmp->reclaim_list); 448 } 449 hmp->hflags &= ~HMNT_USERFLAGS; 450 hmp->hflags |= info.hflags & HMNT_USERFLAGS; 451 452 hmp->master_id = master_id; 453 454 if (info.asof) { 455 mp->mnt_flag |= MNT_RDONLY; 456 hmp->asof = info.asof; 457 } else { 458 hmp->asof = HAMMER_MAX_TID; 459 } 460 461 hmp->volume_to_remove = -1; 462 463 /* 464 * Re-open read-write if originally read-only, or vise-versa. 465 * 466 * When going from read-only to read-write execute the stage2 467 * recovery if it has not already been run. 468 */ 469 if (mp->mnt_flag & MNT_UPDATE) { 470 error = 0; 471 if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) { 472 kprintf("HAMMER read-only -> read-write\n"); 473 hmp->ronly = 0; 474 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, 475 hammer_adjust_volume_mode, NULL); 476 rootvol = hammer_get_root_volume(hmp, &error); 477 if (rootvol) { 478 hammer_recover_flush_buffers(hmp, rootvol, 1); 479 error = hammer_recover_stage2(hmp, rootvol); 480 bcopy(rootvol->ondisk->vol0_blockmap, 481 hmp->blockmap, 482 sizeof(hmp->blockmap)); 483 hammer_rel_volume(rootvol, 0); 484 } 485 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, 486 hammer_reload_inode, NULL); 487 /* kernel clears MNT_RDONLY */ 488 } else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { 489 kprintf("HAMMER read-write -> read-only\n"); 490 hmp->ronly = 1; /* messy */ 491 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, 492 hammer_reload_inode, NULL); 493 hmp->ronly = 0; 494 hammer_flusher_sync(hmp); 495 hammer_flusher_sync(hmp); 496 hammer_flusher_sync(hmp); 497 hmp->ronly = 1; 498 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, 499 hammer_adjust_volume_mode, NULL); 500 } 501 return(error); 502 } 503 504 RB_INIT(&hmp->rb_vols_root); 505 RB_INIT(&hmp->rb_inos_root); 506 RB_INIT(&hmp->rb_redo_root); 507 RB_INIT(&hmp->rb_nods_root); 508 RB_INIT(&hmp->rb_undo_root); 509 RB_INIT(&hmp->rb_resv_root); 510 RB_INIT(&hmp->rb_bufs_root); 511 RB_INIT(&hmp->rb_pfsm_root); 512 513 hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); 514 515 TAILQ_INIT(&hmp->volu_list); 516 TAILQ_INIT(&hmp->undo_list); 517 TAILQ_INIT(&hmp->data_list); 518 TAILQ_INIT(&hmp->meta_list); 519 TAILQ_INIT(&hmp->lose_list); 520 TAILQ_INIT(&hmp->iorun_list); 521 522 /* 523 * Load volumes 524 */ 525 path = objcache_get(namei_oc, M_WAITOK); 526 hmp->nvolumes = -1; 527 for (i = 0; i < info.nvolumes; ++i) { 528 if (mntpt == NULL) { 529 /* 530 * Root mount. 531 */ 532 KKASSERT(next_volume_ptr != NULL); 533 strcpy(path, ""); 534 if (*next_volume_ptr != '/') { 535 /* relative path */ 536 strcpy(path, "/dev/"); 537 } 538 int k; 539 for (k = strlen(path); k < MAXPATHLEN-1; ++k) { 540 if (*next_volume_ptr == '\0') { 541 break; 542 } else if (*next_volume_ptr == ':') { 543 ++next_volume_ptr; 544 break; 545 } else { 546 path[k] = *next_volume_ptr; 547 ++next_volume_ptr; 548 } 549 } 550 path[k] = '\0'; 551 552 error = 0; 553 cdev_t dev = kgetdiskbyname(path); 554 error = bdevvp(dev, &devvp); 555 if (error) { 556 kprintf("hammer_mountroot: can't find devvp\n"); 557 } 558 } else { 559 error = copyin(&info.volumes[i], &upath, 560 sizeof(char *)); 561 if (error == 0) 562 error = copyinstr(upath, path, 563 MAXPATHLEN, NULL); 564 } 565 if (error == 0) 566 error = hammer_install_volume(hmp, path, devvp); 567 if (error) 568 break; 569 } 570 objcache_put(namei_oc, path); 571 572 /* 573 * Make sure we found a root volume 574 */ 575 if (error == 0 && hmp->rootvol == NULL) { 576 kprintf("hammer_mount: No root volume found!\n"); 577 error = EINVAL; 578 } 579 580 /* 581 * Check that all required volumes are available 582 */ 583 if (error == 0 && hammer_mountcheck_volumes(hmp)) { 584 kprintf("hammer_mount: Missing volumes, cannot mount!\n"); 585 error = EINVAL; 586 } 587 588 if (error) { 589 hammer_free_hmp(mp); 590 return (error); 591 } 592 593 /* 594 * No errors, setup enough of the mount point so we can lookup the 595 * root vnode. 596 */ 597 mp->mnt_iosize_max = MAXPHYS; 598 mp->mnt_kern_flag |= MNTK_FSMID; 599 600 /* 601 * MPSAFE code. Note that VOPs and VFSops which are not MPSAFE 602 * will acquire a per-mount token prior to entry and release it 603 * on return, so even if we do not specify it we no longer get 604 * the BGL regardlless of how we are flagged. 605 */ 606 mp->mnt_kern_flag |= MNTK_RD_MPSAFE | MNTK_GA_MPSAFE | 607 MNTK_IN_MPSAFE; 608 609 /* 610 * note: f_iosize is used by vnode_pager_haspage() when constructing 611 * its VOP_BMAP call. 612 */ 613 mp->mnt_stat.f_iosize = HAMMER_BUFSIZE; 614 mp->mnt_stat.f_bsize = HAMMER_BUFSIZE; 615 616 mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE; 617 mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE; 618 619 mp->mnt_maxsymlinklen = 255; 620 mp->mnt_flag |= MNT_LOCAL; 621 622 vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops); 623 vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops); 624 vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops); 625 626 /* 627 * The root volume's ondisk pointer is only valid if we hold a 628 * reference to it. 629 */ 630 rootvol = hammer_get_root_volume(hmp, &error); 631 if (error) 632 goto failed; 633 634 /* 635 * Perform any necessary UNDO operations. The recovery code does 636 * call hammer_undo_lookup() so we have to pre-cache the blockmap, 637 * and then re-copy it again after recovery is complete. 638 * 639 * If this is a read-only mount the UNDO information is retained 640 * in memory in the form of dirty buffer cache buffers, and not 641 * written back to the media. 642 */ 643 bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, 644 sizeof(hmp->blockmap)); 645 646 /* 647 * Check filesystem version 648 */ 649 hmp->version = rootvol->ondisk->vol_version; 650 if (hmp->version < HAMMER_VOL_VERSION_MIN || 651 hmp->version > HAMMER_VOL_VERSION_MAX) { 652 kprintf("HAMMER: mount unsupported fs version %d\n", 653 hmp->version); 654 error = ERANGE; 655 goto done; 656 } 657 658 /* 659 * The undo_rec_limit limits the size of flush groups to avoid 660 * blowing out the UNDO FIFO. This calculation is typically in 661 * the tens of thousands and is designed primarily when small 662 * HAMMER filesystems are created. 663 */ 664 hmp->undo_rec_limit = hammer_undo_max(hmp) / 8192 + 100; 665 if (hammer_debug_general & 0x0001) 666 kprintf("HAMMER: undo_rec_limit %d\n", hmp->undo_rec_limit); 667 668 /* 669 * NOTE: Recover stage1 not only handles meta-data recovery, it 670 * also sets hmp->undo_seqno for HAMMER VERSION 4+ filesystems. 671 */ 672 error = hammer_recover_stage1(hmp, rootvol); 673 if (error) { 674 kprintf("Failed to recover HAMMER filesystem on mount\n"); 675 goto done; 676 } 677 678 /* 679 * Finish setup now that we have a good root volume. 680 * 681 * The top 16 bits of fsid.val[1] is a pfs id. 682 */ 683 ksnprintf(mp->mnt_stat.f_mntfromname, 684 sizeof(mp->mnt_stat.f_mntfromname), "%s", 685 rootvol->ondisk->vol_name); 686 mp->mnt_stat.f_fsid.val[0] = 687 crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8); 688 mp->mnt_stat.f_fsid.val[1] = 689 crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8); 690 mp->mnt_stat.f_fsid.val[1] &= 0x0000FFFF; 691 692 mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid; 693 mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid, 694 sizeof(mp->mnt_vstat.f_fsid_uuid)); 695 696 /* 697 * Certain often-modified fields in the root volume are cached in 698 * the hammer_mount structure so we do not have to generate lots 699 * of little UNDO structures for them. 700 * 701 * Recopy after recovery. This also has the side effect of 702 * setting our cached undo FIFO's first_offset, which serves to 703 * placemark the FIFO start for the NEXT flush cycle while the 704 * on-disk first_offset represents the LAST flush cycle. 705 */ 706 hmp->next_tid = rootvol->ondisk->vol0_next_tid; 707 hmp->flush_tid1 = hmp->next_tid; 708 hmp->flush_tid2 = hmp->next_tid; 709 bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, 710 sizeof(hmp->blockmap)); 711 hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks; 712 713 hammer_flusher_create(hmp); 714 715 /* 716 * Locate the root directory using the root cluster's B-Tree as a 717 * starting point. The root directory uses an obj_id of 1. 718 * 719 * FUTURE: Leave the root directory cached referenced but unlocked 720 * in hmp->rootvp (need to flush it on unmount). 721 */ 722 error = hammer_vfs_vget(mp, NULL, 1, &rootvp); 723 if (error) 724 goto done; 725 vput(rootvp); 726 /*vn_unlock(hmp->rootvp);*/ 727 if (hmp->ronly == 0) 728 error = hammer_recover_stage2(hmp, rootvol); 729 730 done: 731 hammer_rel_volume(rootvol, 0); 732 failed: 733 /* 734 * Cleanup and return. 735 */ 736 if (error) 737 hammer_free_hmp(mp); 738 return (error); 739 } 740 741 static int 742 hammer_vfs_unmount(struct mount *mp, int mntflags) 743 { 744 #if 0 745 struct hammer_mount *hmp = (void *)mp->mnt_data; 746 #endif 747 int flags; 748 int error; 749 750 /* 751 * Clean out the vnodes 752 */ 753 flags = 0; 754 if (mntflags & MNT_FORCE) 755 flags |= FORCECLOSE; 756 if ((error = vflush(mp, 0, flags)) != 0) 757 return (error); 758 759 /* 760 * Clean up the internal mount structure and related entities. This 761 * may issue I/O. 762 */ 763 hammer_free_hmp(mp); 764 return(0); 765 } 766 767 /* 768 * Clean up the internal mount structure and disassociate it from the mount. 769 * This may issue I/O. 770 */ 771 static void 772 hammer_free_hmp(struct mount *mp) 773 { 774 struct hammer_mount *hmp = (void *)mp->mnt_data; 775 hammer_flush_group_t flg; 776 int count; 777 int dummy; 778 779 /* 780 * Flush anything dirty. This won't even run if the 781 * filesystem errored-out. 782 */ 783 count = 0; 784 while (hammer_flusher_haswork(hmp)) { 785 hammer_flusher_sync(hmp); 786 ++count; 787 if (count >= 5) { 788 if (count == 5) 789 kprintf("HAMMER: umount flushing."); 790 else 791 kprintf("."); 792 tsleep(&dummy, 0, "hmrufl", hz); 793 } 794 if (count == 30) { 795 kprintf("giving up\n"); 796 break; 797 } 798 } 799 if (count >= 5 && count < 30) 800 kprintf("\n"); 801 802 /* 803 * If the mount had a critical error we have to destroy any 804 * remaining inodes before we can finish cleaning up the flusher. 805 */ 806 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) { 807 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, 808 hammer_destroy_inode_callback, NULL); 809 } 810 811 /* 812 * There shouldn't be any inodes left now and any left over 813 * flush groups should now be empty. 814 */ 815 KKASSERT(RB_EMPTY(&hmp->rb_inos_root)); 816 while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) { 817 TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry); 818 KKASSERT(RB_EMPTY(&flg->flush_tree)); 819 if (flg->refs) { 820 kprintf("HAMMER: Warning, flush_group %p was " 821 "not empty on umount!\n", flg); 822 } 823 kfree(flg, hmp->m_misc); 824 } 825 826 /* 827 * We can finally destroy the flusher 828 */ 829 hammer_flusher_destroy(hmp); 830 831 /* 832 * We may have held recovered buffers due to a read-only mount. 833 * These must be discarded. 834 */ 835 if (hmp->ronly) 836 hammer_recover_flush_buffers(hmp, NULL, -1); 837 838 /* 839 * Unload buffers and then volumes 840 */ 841 RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL, 842 hammer_unload_buffer, NULL); 843 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, 844 hammer_unload_volume, NULL); 845 846 mp->mnt_data = NULL; 847 mp->mnt_flag &= ~MNT_LOCAL; 848 hmp->mp = NULL; 849 hammer_destroy_objid_cache(hmp); 850 kmalloc_destroy(&hmp->m_misc); 851 kmalloc_destroy(&hmp->m_inodes); 852 kfree(hmp, M_HAMMER); 853 } 854 855 /* 856 * Report critical errors. ip may be NULL. 857 */ 858 void 859 hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip, 860 int error, const char *msg) 861 { 862 hmp->flags |= HAMMER_MOUNT_CRITICAL_ERROR; 863 864 krateprintf(&hmp->krate, 865 "HAMMER(%s): Critical error inode=%jd error=%d %s\n", 866 hmp->mp->mnt_stat.f_mntfromname, 867 (intmax_t)(ip ? ip->obj_id : -1), 868 error, msg); 869 870 if (hmp->ronly == 0) { 871 hmp->ronly = 2; /* special errored read-only mode */ 872 hmp->mp->mnt_flag |= MNT_RDONLY; 873 kprintf("HAMMER(%s): Forcing read-only mode\n", 874 hmp->mp->mnt_stat.f_mntfromname); 875 } 876 hmp->error = error; 877 if (hammer_debug_critical) 878 Debugger("Entering debugger"); 879 } 880 881 882 /* 883 * Obtain a vnode for the specified inode number. An exclusively locked 884 * vnode is returned. 885 */ 886 int 887 hammer_vfs_vget(struct mount *mp, struct vnode *dvp, 888 ino_t ino, struct vnode **vpp) 889 { 890 struct hammer_transaction trans; 891 struct hammer_mount *hmp = (void *)mp->mnt_data; 892 struct hammer_inode *ip; 893 int error; 894 u_int32_t localization; 895 896 hammer_simple_transaction(&trans, hmp); 897 898 /* 899 * If a directory vnode is supplied (mainly NFS) then we can acquire 900 * the PFS domain from it. Otherwise we would only be able to vget 901 * inodes in the root PFS. 902 */ 903 if (dvp) { 904 localization = HAMMER_DEF_LOCALIZATION + 905 VTOI(dvp)->obj_localization; 906 } else { 907 localization = HAMMER_DEF_LOCALIZATION; 908 } 909 910 /* 911 * Lookup the requested HAMMER inode. The structure must be 912 * left unlocked while we manipulate the related vnode to avoid 913 * a deadlock. 914 */ 915 ip = hammer_get_inode(&trans, NULL, ino, 916 hmp->asof, localization, 917 0, &error); 918 if (ip == NULL) { 919 *vpp = NULL; 920 hammer_done_transaction(&trans); 921 return(error); 922 } 923 error = hammer_get_vnode(ip, vpp); 924 hammer_rel_inode(ip, 0); 925 hammer_done_transaction(&trans); 926 return (error); 927 } 928 929 /* 930 * Return the root vnode for the filesystem. 931 * 932 * HAMMER stores the root vnode in the hammer_mount structure so 933 * getting it is easy. 934 */ 935 static int 936 hammer_vfs_root(struct mount *mp, struct vnode **vpp) 937 { 938 #if 0 939 struct hammer_mount *hmp = (void *)mp->mnt_data; 940 #endif 941 int error; 942 943 error = hammer_vfs_vget(mp, NULL, 1, vpp); 944 return (error); 945 } 946 947 static int 948 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) 949 { 950 struct hammer_mount *hmp = (void *)mp->mnt_data; 951 hammer_volume_t volume; 952 hammer_volume_ondisk_t ondisk; 953 int error; 954 int64_t bfree; 955 int64_t breserved; 956 957 volume = hammer_get_root_volume(hmp, &error); 958 if (error) 959 return(error); 960 ondisk = volume->ondisk; 961 962 /* 963 * Basic stats 964 */ 965 _hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved); 966 mp->mnt_stat.f_files = ondisk->vol0_stat_inodes; 967 bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE; 968 hammer_rel_volume(volume, 0); 969 970 mp->mnt_stat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE; 971 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree; 972 if (mp->mnt_stat.f_files < 0) 973 mp->mnt_stat.f_files = 0; 974 975 *sbp = mp->mnt_stat; 976 return(0); 977 } 978 979 static int 980 hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred) 981 { 982 struct hammer_mount *hmp = (void *)mp->mnt_data; 983 hammer_volume_t volume; 984 hammer_volume_ondisk_t ondisk; 985 int error; 986 int64_t bfree; 987 int64_t breserved; 988 989 volume = hammer_get_root_volume(hmp, &error); 990 if (error) 991 return(error); 992 ondisk = volume->ondisk; 993 994 /* 995 * Basic stats 996 */ 997 _hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved); 998 mp->mnt_vstat.f_files = ondisk->vol0_stat_inodes; 999 bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE; 1000 hammer_rel_volume(volume, 0); 1001 1002 mp->mnt_vstat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE; 1003 mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree; 1004 if (mp->mnt_vstat.f_files < 0) 1005 mp->mnt_vstat.f_files = 0; 1006 *sbp = mp->mnt_vstat; 1007 return(0); 1008 } 1009 1010 /* 1011 * Sync the filesystem. Currently we have to run it twice, the second 1012 * one will advance the undo start index to the end index, so if a crash 1013 * occurs no undos will be run on mount. 1014 * 1015 * We do not sync the filesystem if we are called from a panic. If we did 1016 * we might end up blowing up a sync that was already in progress. 1017 */ 1018 static int 1019 hammer_vfs_sync(struct mount *mp, int waitfor) 1020 { 1021 struct hammer_mount *hmp = (void *)mp->mnt_data; 1022 int error; 1023 1024 if (panicstr == NULL) { 1025 error = hammer_sync_hmp(hmp, waitfor); 1026 } else { 1027 error = EIO; 1028 } 1029 return (error); 1030 } 1031 1032 /* 1033 * Convert a vnode to a file handle. 1034 */ 1035 static int 1036 hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp) 1037 { 1038 hammer_inode_t ip; 1039 1040 KKASSERT(MAXFIDSZ >= 16); 1041 ip = VTOI(vp); 1042 fhp->fid_len = offsetof(struct fid, fid_data[16]); 1043 fhp->fid_ext = ip->obj_localization >> 16; 1044 bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id)); 1045 bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof)); 1046 return(0); 1047 } 1048 1049 1050 /* 1051 * Convert a file handle back to a vnode. 1052 * 1053 * Use rootvp to enforce PFS isolation when a PFS is exported via a 1054 * null mount. 1055 */ 1056 static int 1057 hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 1058 struct fid *fhp, struct vnode **vpp) 1059 { 1060 struct hammer_transaction trans; 1061 struct hammer_inode *ip; 1062 struct hammer_inode_info info; 1063 int error; 1064 u_int32_t localization; 1065 1066 bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id)); 1067 bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof)); 1068 if (rootvp) 1069 localization = VTOI(rootvp)->obj_localization; 1070 else 1071 localization = (u_int32_t)fhp->fid_ext << 16; 1072 1073 hammer_simple_transaction(&trans, (void *)mp->mnt_data); 1074 1075 /* 1076 * Get/allocate the hammer_inode structure. The structure must be 1077 * unlocked while we manipulate the related vnode to avoid a 1078 * deadlock. 1079 */ 1080 ip = hammer_get_inode(&trans, NULL, info.obj_id, 1081 info.obj_asof, localization, 0, &error); 1082 if (ip) { 1083 error = hammer_get_vnode(ip, vpp); 1084 hammer_rel_inode(ip, 0); 1085 } else { 1086 *vpp = NULL; 1087 } 1088 hammer_done_transaction(&trans); 1089 return (error); 1090 } 1091 1092 static int 1093 hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 1094 int *exflagsp, struct ucred **credanonp) 1095 { 1096 hammer_mount_t hmp = (void *)mp->mnt_data; 1097 struct netcred *np; 1098 int error; 1099 1100 np = vfs_export_lookup(mp, &hmp->export, nam); 1101 if (np) { 1102 *exflagsp = np->netc_exflags; 1103 *credanonp = &np->netc_anon; 1104 error = 0; 1105 } else { 1106 error = EACCES; 1107 } 1108 return (error); 1109 1110 } 1111 1112 int 1113 hammer_vfs_export(struct mount *mp, int op, const struct export_args *export) 1114 { 1115 hammer_mount_t hmp = (void *)mp->mnt_data; 1116 int error; 1117 1118 switch(op) { 1119 case MOUNTCTL_SET_EXPORT: 1120 error = vfs_export(mp, &hmp->export, export); 1121 break; 1122 default: 1123 error = EOPNOTSUPP; 1124 break; 1125 } 1126 return(error); 1127 } 1128 1129