1 /* 2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression) 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/nlookup.h> 39 #include <sys/vnode.h> 40 #include <sys/mount.h> 41 #include <sys/fcntl.h> 42 #include <sys/buf.h> 43 #include <sys/uuid.h> 44 #include <sys/vfsops.h> 45 #include <sys/sysctl.h> 46 #include <sys/socket.h> 47 #include <sys/objcache.h> 48 49 #include <sys/proc.h> 50 #include <sys/namei.h> 51 #include <sys/mountctl.h> 52 #include <sys/dirent.h> 53 #include <sys/uio.h> 54 55 #include <sys/mutex.h> 56 #include <sys/mutex2.h> 57 58 #include "hammer2.h" 59 #include "hammer2_disk.h" 60 #include "hammer2_mount.h" 61 #include "hammer2_lz4.h" 62 63 #include "zlib/hammer2_zlib.h" 64 65 #define REPORT_REFS_ERRORS 1 /* XXX remove me */ 66 67 MALLOC_DEFINE(M_OBJCACHE, "objcache", "Object Cache"); 68 69 struct hammer2_sync_info { 70 hammer2_trans_t trans; 71 int error; 72 int waitfor; 73 }; 74 75 TAILQ_HEAD(hammer2_mntlist, hammer2_dev); 76 TAILQ_HEAD(hammer2_pfslist, hammer2_pfs); 77 static struct hammer2_mntlist hammer2_mntlist; 78 static struct hammer2_pfslist hammer2_pfslist; 79 static struct lock hammer2_mntlk; 80 81 int hammer2_debug; 82 int hammer2_cluster_enable = 1; 83 int hammer2_hardlink_enable = 1; 84 int hammer2_flush_pipe = 100; 85 int hammer2_synchronous_flush = 1; 86 int hammer2_dio_count; 87 long hammer2_limit_dirty_chains; 88 long hammer2_iod_file_read; 89 long hammer2_iod_meta_read; 90 long hammer2_iod_indr_read; 91 long hammer2_iod_fmap_read; 92 long hammer2_iod_volu_read; 93 long hammer2_iod_file_write; 94 long hammer2_iod_meta_write; 95 long hammer2_iod_indr_write; 96 long hammer2_iod_fmap_write; 97 long hammer2_iod_volu_write; 98 long hammer2_ioa_file_read; 99 long hammer2_ioa_meta_read; 100 long hammer2_ioa_indr_read; 101 long hammer2_ioa_fmap_read; 102 long hammer2_ioa_volu_read; 103 long hammer2_ioa_fmap_write; 104 long hammer2_ioa_file_write; 105 long hammer2_ioa_meta_write; 106 long hammer2_ioa_indr_write; 107 long hammer2_ioa_volu_write; 108 109 MALLOC_DECLARE(M_HAMMER2_CBUFFER); 110 MALLOC_DEFINE(M_HAMMER2_CBUFFER, "HAMMER2-compbuffer", 111 "Buffer used for compression."); 112 113 MALLOC_DECLARE(M_HAMMER2_DEBUFFER); 114 MALLOC_DEFINE(M_HAMMER2_DEBUFFER, "HAMMER2-decompbuffer", 115 "Buffer used for decompression."); 116 117 SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem"); 118 119 SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW, 120 &hammer2_debug, 0, ""); 121 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_enable, CTLFLAG_RW, 122 &hammer2_cluster_enable, 0, ""); 123 SYSCTL_INT(_vfs_hammer2, OID_AUTO, hardlink_enable, CTLFLAG_RW, 124 &hammer2_hardlink_enable, 0, ""); 125 SYSCTL_INT(_vfs_hammer2, OID_AUTO, flush_pipe, CTLFLAG_RW, 126 &hammer2_flush_pipe, 0, ""); 127 SYSCTL_INT(_vfs_hammer2, OID_AUTO, synchronous_flush, CTLFLAG_RW, 128 &hammer2_synchronous_flush, 0, ""); 129 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_chains, CTLFLAG_RW, 130 &hammer2_limit_dirty_chains, 0, ""); 131 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_count, CTLFLAG_RD, 132 &hammer2_dio_count, 0, ""); 133 134 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RW, 135 &hammer2_iod_file_read, 0, ""); 136 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RW, 137 &hammer2_iod_meta_read, 0, ""); 138 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RW, 139 &hammer2_iod_indr_read, 0, ""); 140 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_read, CTLFLAG_RW, 141 &hammer2_iod_fmap_read, 0, ""); 142 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_read, CTLFLAG_RW, 143 &hammer2_iod_volu_read, 0, ""); 144 145 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RW, 146 &hammer2_iod_file_write, 0, ""); 147 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RW, 148 &hammer2_iod_meta_write, 0, ""); 149 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RW, 150 &hammer2_iod_indr_write, 0, ""); 151 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_write, CTLFLAG_RW, 152 &hammer2_iod_fmap_write, 0, ""); 153 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RW, 154 &hammer2_iod_volu_write, 0, ""); 155 156 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_read, CTLFLAG_RW, 157 &hammer2_ioa_file_read, 0, ""); 158 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_read, CTLFLAG_RW, 159 &hammer2_ioa_meta_read, 0, ""); 160 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_read, CTLFLAG_RW, 161 &hammer2_ioa_indr_read, 0, ""); 162 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_fmap_read, CTLFLAG_RW, 163 &hammer2_ioa_fmap_read, 0, ""); 164 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_read, CTLFLAG_RW, 165 &hammer2_ioa_volu_read, 0, ""); 166 167 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_write, CTLFLAG_RW, 168 &hammer2_ioa_file_write, 0, ""); 169 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_write, CTLFLAG_RW, 170 &hammer2_ioa_meta_write, 0, ""); 171 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_write, CTLFLAG_RW, 172 &hammer2_ioa_indr_write, 0, ""); 173 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_fmap_write, CTLFLAG_RW, 174 &hammer2_ioa_fmap_write, 0, ""); 175 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_write, CTLFLAG_RW, 176 &hammer2_ioa_volu_write, 0, ""); 177 178 static int hammer2_vfs_init(struct vfsconf *conf); 179 static int hammer2_vfs_uninit(struct vfsconf *vfsp); 180 static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data, 181 struct ucred *cred); 182 static int hammer2_remount(hammer2_dev_t *, struct mount *, char *, 183 struct vnode *, struct ucred *); 184 static int hammer2_recovery(hammer2_dev_t *hmp); 185 static int hammer2_vfs_unmount(struct mount *mp, int mntflags); 186 static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp); 187 static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, 188 struct ucred *cred); 189 static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, 190 struct ucred *cred); 191 static int hammer2_vfs_vget(struct mount *mp, struct vnode *dvp, 192 ino_t ino, struct vnode **vpp); 193 static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 194 struct fid *fhp, struct vnode **vpp); 195 static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp); 196 static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 197 int *exflagsp, struct ucred **credanonp); 198 199 static int hammer2_install_volume_header(hammer2_dev_t *hmp); 200 static int hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data); 201 202 static void hammer2_update_pmps(hammer2_dev_t *hmp); 203 204 static void hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp); 205 static void hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp, 206 hammer2_dev_t *hmp); 207 208 /* 209 * HAMMER2 vfs operations. 210 */ 211 static struct vfsops hammer2_vfsops = { 212 .vfs_init = hammer2_vfs_init, 213 .vfs_uninit = hammer2_vfs_uninit, 214 .vfs_sync = hammer2_vfs_sync, 215 .vfs_mount = hammer2_vfs_mount, 216 .vfs_unmount = hammer2_vfs_unmount, 217 .vfs_root = hammer2_vfs_root, 218 .vfs_statfs = hammer2_vfs_statfs, 219 .vfs_statvfs = hammer2_vfs_statvfs, 220 .vfs_vget = hammer2_vfs_vget, 221 .vfs_vptofh = hammer2_vfs_vptofh, 222 .vfs_fhtovp = hammer2_vfs_fhtovp, 223 .vfs_checkexp = hammer2_vfs_checkexp 224 }; 225 226 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", ""); 227 228 VFS_SET(hammer2_vfsops, hammer2, 0); 229 MODULE_VERSION(hammer2, 1); 230 231 static 232 int 233 hammer2_vfs_init(struct vfsconf *conf) 234 { 235 static struct objcache_malloc_args margs_read; 236 static struct objcache_malloc_args margs_write; 237 static struct objcache_malloc_args margs_vop; 238 239 int error; 240 241 error = 0; 242 243 if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref)) 244 error = EINVAL; 245 if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data)) 246 error = EINVAL; 247 if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data)) 248 error = EINVAL; 249 250 if (error) 251 kprintf("HAMMER2 structure size mismatch; cannot continue.\n"); 252 253 margs_read.objsize = 65536; 254 margs_read.mtype = M_HAMMER2_DEBUFFER; 255 256 margs_write.objsize = 32768; 257 margs_write.mtype = M_HAMMER2_CBUFFER; 258 259 margs_vop.objsize = sizeof(hammer2_xop_t); 260 margs_vop.mtype = M_HAMMER2; 261 262 cache_buffer_read = objcache_create(margs_read.mtype->ks_shortdesc, 263 0, 1, NULL, NULL, NULL, objcache_malloc_alloc, 264 objcache_malloc_free, &margs_read); 265 cache_buffer_write = objcache_create(margs_write.mtype->ks_shortdesc, 266 0, 1, NULL, NULL, NULL, objcache_malloc_alloc, 267 objcache_malloc_free, &margs_write); 268 cache_xops = objcache_create(margs_vop.mtype->ks_shortdesc, 269 0, 1, NULL, NULL, NULL, objcache_malloc_alloc, 270 objcache_malloc_free, &margs_vop); 271 272 273 lockinit(&hammer2_mntlk, "mntlk", 0, 0); 274 TAILQ_INIT(&hammer2_mntlist); 275 TAILQ_INIT(&hammer2_pfslist); 276 277 hammer2_limit_dirty_chains = desiredvnodes / 10; 278 279 return (error); 280 } 281 282 static 283 int 284 hammer2_vfs_uninit(struct vfsconf *vfsp __unused) 285 { 286 objcache_destroy(cache_buffer_read); 287 objcache_destroy(cache_buffer_write); 288 objcache_destroy(cache_xops); 289 return 0; 290 } 291 292 /* 293 * Core PFS allocator. Used to allocate the pmp structure for PFS cluster 294 * mounts and the spmp structure for media (hmp) structures. 295 * 296 * pmp->modify_tid tracks new modify_tid transaction ids for front-end 297 * transactions. Note that synchronization does not use this field. 298 * (typically frontend operations and synchronization cannot run on the 299 * same PFS node at the same time). 300 * 301 * XXX check locking 302 */ 303 hammer2_pfs_t * 304 hammer2_pfsalloc(hammer2_cluster_t *cluster, 305 const hammer2_inode_data_t *ripdata, 306 hammer2_tid_t modify_tid) 307 { 308 hammer2_chain_t *rchain; 309 hammer2_inode_t *iroot; 310 hammer2_pfs_t *pmp; 311 int count; 312 int i; 313 int j; 314 315 /* 316 * Locate or create the PFS based on the cluster id. If ripdata 317 * is NULL this is a spmp which is unique and is always allocated. 318 */ 319 if (ripdata) { 320 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) { 321 if (bcmp(&pmp->pfs_clid, &ripdata->meta.pfs_clid, 322 sizeof(pmp->pfs_clid)) == 0) { 323 break; 324 } 325 } 326 } else { 327 pmp = NULL; 328 } 329 330 if (pmp == NULL) { 331 pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO); 332 hammer2_trans_manage_init(&pmp->tmanage); 333 kmalloc_create(&pmp->minode, "HAMMER2-inodes"); 334 kmalloc_create(&pmp->mmsg, "HAMMER2-pfsmsg"); 335 lockinit(&pmp->lock, "pfslk", 0, 0); 336 spin_init(&pmp->inum_spin, "hm2pfsalloc_inum"); 337 RB_INIT(&pmp->inum_tree); 338 TAILQ_INIT(&pmp->unlinkq); 339 spin_init(&pmp->list_spin, "hm2pfsalloc_list"); 340 341 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) 342 hammer2_xop_group_init(pmp, &pmp->xop_groups[j]); 343 344 /* 345 * Save the last media transaction id for the flusher. Set 346 * initial 347 */ 348 if (ripdata) 349 pmp->pfs_clid = ripdata->meta.pfs_clid; 350 hammer2_mtx_init(&pmp->wthread_mtx, "h2wthr"); 351 bioq_init(&pmp->wthread_bioq); 352 TAILQ_INSERT_TAIL(&hammer2_pfslist, pmp, mntentry); 353 354 /* 355 * The synchronization thread may start too early, make 356 * sure it stays frozen until we are ready to let it go. 357 * XXX 358 */ 359 /* 360 pmp->primary_thr.flags = HAMMER2_THREAD_FROZEN | 361 HAMMER2_THREAD_REMASTER; 362 */ 363 } 364 365 /* 366 * Create the PFS's root inode. 367 */ 368 if ((iroot = pmp->iroot) == NULL) { 369 iroot = hammer2_inode_get(pmp, NULL, NULL); 370 pmp->iroot = iroot; 371 hammer2_inode_ref(iroot); 372 hammer2_inode_unlock(iroot, NULL); 373 } 374 375 /* 376 * Stop here if no cluster is passed in. 377 */ 378 if (cluster == NULL) 379 goto done; 380 381 /* 382 * When a cluster is passed in we must add the cluster's chains 383 * to the PFS's root inode, update pmp->pfs_types[], and update 384 * the syncronization threads. 385 * 386 * At the moment empty spots can develop due to removals or failures. 387 * Ultimately we want to re-fill these spots but doing so might 388 * confused running code. XXX 389 */ 390 hammer2_inode_ref(iroot); 391 hammer2_mtx_ex(&iroot->lock); 392 j = iroot->cluster.nchains; 393 394 kprintf("add PFS to pmp %p[%d]\n", pmp, j); 395 396 for (i = 0; i < cluster->nchains; ++i) { 397 if (j == HAMMER2_MAXCLUSTER) 398 break; 399 rchain = cluster->array[i].chain; 400 KKASSERT(rchain->pmp == NULL); 401 rchain->pmp = pmp; 402 hammer2_chain_ref(rchain); 403 iroot->cluster.array[j].chain = rchain; 404 pmp->pfs_types[j] = ripdata->meta.pfs_type; 405 pmp->pfs_names[j] = kstrdup(ripdata->filename, M_HAMMER2); 406 407 /* 408 * If the PFS is already mounted we must account 409 * for the mount_count here. 410 */ 411 if (pmp->mp) 412 ++rchain->hmp->mount_count; 413 414 /* 415 * May have to fixup dirty chain tracking. Previous 416 * pmp was NULL so nothing to undo. 417 */ 418 if (rchain->flags & HAMMER2_CHAIN_MODIFIED) 419 hammer2_pfs_memory_inc(pmp); 420 ++j; 421 } 422 iroot->cluster.nchains = j; 423 424 if (i != cluster->nchains) { 425 kprintf("hammer2_mount: cluster full!\n"); 426 /* XXX fatal error? */ 427 } 428 429 /* 430 * Update nmasters from any PFS inode which is part of the cluster. 431 * It is possible that this will result in a value which is too 432 * high. MASTER PFSs are authoritative for pfs_nmasters and will 433 * override this value later on. 434 * 435 * (This informs us of masters that might not currently be 436 * discoverable by this mount). 437 */ 438 if (ripdata && pmp->pfs_nmasters < ripdata->meta.pfs_nmasters) { 439 pmp->pfs_nmasters = ripdata->meta.pfs_nmasters; 440 } 441 442 /* 443 * Count visible masters. Masters are usually added with 444 * ripdata->meta.pfs_nmasters set to 1. This detects when there 445 * are more (XXX and must update the master inodes). 446 */ 447 count = 0; 448 for (i = 0; i < iroot->cluster.nchains; ++i) { 449 if (pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER) 450 ++count; 451 } 452 if (pmp->pfs_nmasters < count) 453 pmp->pfs_nmasters = count; 454 455 /* 456 * Create missing synchronization and support threads. 457 * 458 * Single-node masters (including snapshots) have nothing to 459 * synchronize and do not require this thread. 460 * 461 * Multi-node masters or any number of soft masters, slaves, copy, 462 * or other PFS types need the thread. 463 * 464 * Each thread is responsible for its particular cluster index. 465 * We use independent threads so stalls or mismatches related to 466 * any given target do not affect other targets. 467 */ 468 for (i = 0; i < iroot->cluster.nchains; ++i) { 469 /* 470 * Single-node masters (including snapshots) have nothing 471 * to synchronize and will make direct xops support calls, 472 * thus they do not require this thread. 473 * 474 * Note that there can be thousands of snapshots. We do not 475 * want to create thousands of threads. 476 */ 477 if (pmp->pfs_nmasters <= 1 && 478 pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER) { 479 continue; 480 } 481 482 /* 483 * Sync support thread 484 */ 485 if (pmp->sync_thrs[i].td == NULL) { 486 hammer2_thr_create(&pmp->sync_thrs[i], pmp, 487 "h2nod", i, 0, 488 hammer2_primary_sync_thread); 489 } 490 491 /* 492 * Xops support threads 493 */ 494 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) { 495 if (pmp->xop_groups[j].thrs[i].td) 496 continue; 497 hammer2_thr_create(&pmp->xop_groups[j].thrs[i], pmp, 498 "h2xop", i, j, 499 hammer2_primary_xops_thread); 500 } 501 } 502 503 hammer2_mtx_unlock(&iroot->lock); 504 hammer2_inode_drop(iroot); 505 done: 506 return pmp; 507 } 508 509 /* 510 * Destroy a PFS, typically only occurs after the last mount on a device 511 * has gone away. 512 */ 513 static void 514 hammer2_pfsfree(hammer2_pfs_t *pmp) 515 { 516 hammer2_inode_t *iroot; 517 int i; 518 int j; 519 520 /* 521 * Cleanup our reference on iroot. iroot is (should) not be needed 522 * by the flush code. 523 */ 524 TAILQ_REMOVE(&hammer2_pfslist, pmp, mntentry); 525 526 iroot = pmp->iroot; 527 if (iroot) { 528 for (i = 0; i < iroot->cluster.nchains; ++i) { 529 hammer2_thr_delete(&pmp->sync_thrs[i]); 530 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) 531 hammer2_thr_delete(&pmp->xop_groups[j].thrs[i]); 532 } 533 #if REPORT_REFS_ERRORS 534 if (pmp->iroot->refs != 1) 535 kprintf("PMP->IROOT %p REFS WRONG %d\n", 536 pmp->iroot, pmp->iroot->refs); 537 #else 538 KKASSERT(pmp->iroot->refs == 1); 539 #endif 540 /* ref for pmp->iroot */ 541 hammer2_inode_drop(pmp->iroot); 542 pmp->iroot = NULL; 543 } 544 545 kmalloc_destroy(&pmp->mmsg); 546 kmalloc_destroy(&pmp->minode); 547 548 kfree(pmp, M_HAMMER2); 549 } 550 551 /* 552 * Remove all references to hmp from the pfs list. Any PFS which becomes 553 * empty is terminated and freed. 554 * 555 * XXX inefficient. 556 */ 557 static void 558 hammer2_pfsfree_scan(hammer2_dev_t *hmp) 559 { 560 hammer2_pfs_t *pmp; 561 hammer2_inode_t *iroot; 562 hammer2_cluster_t *cluster; 563 hammer2_chain_t *rchain; 564 int didfreeze; 565 int i; 566 int j; 567 568 again: 569 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) { 570 if ((iroot = pmp->iroot) == NULL) 571 continue; 572 if (hmp->spmp == pmp) { 573 kprintf("unmount hmp %p remove spmp %p\n", 574 hmp, pmp); 575 hmp->spmp = NULL; 576 } 577 578 /* 579 * Determine if this PFS is affected. If it is we must 580 * freeze all management threads and lock its iroot. 581 * 582 * Freezing a management thread forces it idle, operations 583 * in-progress will be aborted and it will have to start 584 * over again when unfrozen, or exit if told to exit. 585 */ 586 cluster = &iroot->cluster; 587 for (i = 0; i < cluster->nchains; ++i) { 588 rchain = cluster->array[i].chain; 589 if (rchain == NULL || rchain->hmp != hmp) 590 continue; 591 break; 592 } 593 if (i != cluster->nchains) { 594 /* 595 * Make sure all synchronization threads are locked 596 * down. 597 */ 598 for (i = 0; i < iroot->cluster.nchains; ++i) { 599 hammer2_thr_freeze_async(&pmp->sync_thrs[i]); 600 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) { 601 hammer2_thr_freeze_async( 602 &pmp->xop_groups[j].thrs[i]); 603 } 604 } 605 for (i = 0; i < iroot->cluster.nchains; ++i) { 606 hammer2_thr_freeze(&pmp->sync_thrs[i]); 607 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) { 608 hammer2_thr_freeze( 609 &pmp->xop_groups[j].thrs[i]); 610 } 611 } 612 613 /* 614 * Lock the inode and clean out matching chains. 615 * Note that we cannot use hammer2_inode_lock_*() 616 * here because that would attempt to validate the 617 * cluster that we are in the middle of ripping 618 * apart. 619 * 620 * WARNING! We are working directly on the inodes 621 * embedded cluster. 622 */ 623 hammer2_mtx_ex(&iroot->lock); 624 625 /* 626 * Remove the chain from matching elements of the PFS. 627 */ 628 for (i = 0; i < cluster->nchains; ++i) { 629 rchain = cluster->array[i].chain; 630 if (rchain == NULL || rchain->hmp != hmp) 631 continue; 632 hammer2_thr_delete(&pmp->sync_thrs[i]); 633 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) { 634 hammer2_thr_delete( 635 &pmp->xop_groups[j].thrs[i]); 636 } 637 rchain = cluster->array[i].chain; 638 cluster->array[i].chain = NULL; 639 pmp->pfs_types[i] = 0; 640 if (pmp->pfs_names[i]) { 641 kfree(pmp->pfs_names[i], M_HAMMER2); 642 pmp->pfs_names[i] = NULL; 643 } 644 hammer2_chain_drop(rchain); 645 646 /* focus hint */ 647 if (cluster->focus == rchain) 648 cluster->focus = NULL; 649 } 650 hammer2_mtx_unlock(&iroot->lock); 651 didfreeze = 1; /* remaster, unfreeze down below */ 652 } else { 653 didfreeze = 0; 654 } 655 656 /* 657 * Cleanup trailing chains. Do not reorder chains (for now). 658 * XXX might remove more than we intended. 659 */ 660 while (i > 0) { 661 if (cluster->array[i - 1].chain) 662 break; 663 --i; 664 } 665 cluster->nchains = i; 666 667 /* 668 * If the PMP has no elements remaining we can destroy it. 669 * (this will transition management threads from frozen->exit). 670 */ 671 if (cluster->nchains == 0) { 672 kprintf("unmount hmp %p last ref to PMP=%p\n", 673 hmp, pmp); 674 hammer2_pfsfree(pmp); 675 goto again; 676 } 677 678 /* 679 * If elements still remain we need to set the REMASTER 680 * flag and unfreeze it. 681 */ 682 if (didfreeze) { 683 for (i = 0; i < iroot->cluster.nchains; ++i) { 684 hammer2_thr_remaster(&pmp->sync_thrs[i]); 685 hammer2_thr_unfreeze(&pmp->sync_thrs[i]); 686 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) { 687 hammer2_thr_remaster( 688 &pmp->xop_groups[j].thrs[i]); 689 hammer2_thr_unfreeze( 690 &pmp->xop_groups[j].thrs[i]); 691 } 692 } 693 } 694 } 695 } 696 697 /* 698 * Mount or remount HAMMER2 fileystem from physical media 699 * 700 * mountroot 701 * mp mount point structure 702 * path NULL 703 * data <unused> 704 * cred <unused> 705 * 706 * mount 707 * mp mount point structure 708 * path path to mount point 709 * data pointer to argument structure in user space 710 * volume volume path (device@LABEL form) 711 * hflags user mount flags 712 * cred user credentials 713 * 714 * RETURNS: 0 Success 715 * !0 error number 716 */ 717 static 718 int 719 hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data, 720 struct ucred *cred) 721 { 722 struct hammer2_mount_info info; 723 hammer2_pfs_t *pmp; 724 hammer2_pfs_t *spmp; 725 hammer2_dev_t *hmp; 726 hammer2_key_t key_next; 727 hammer2_key_t key_dummy; 728 hammer2_key_t lhc; 729 struct vnode *devvp; 730 struct nlookupdata nd; 731 hammer2_chain_t *parent; 732 hammer2_cluster_t *cluster; 733 hammer2_cluster_t *cparent; 734 const hammer2_inode_data_t *ripdata; 735 hammer2_blockref_t bref; 736 struct file *fp; 737 char devstr[MNAMELEN]; 738 size_t size; 739 size_t done; 740 char *dev; 741 char *label; 742 int ronly = 1; 743 int error; 744 int cache_index; 745 int i; 746 747 hmp = NULL; 748 pmp = NULL; 749 dev = NULL; 750 label = NULL; 751 devvp = NULL; 752 cache_index = -1; 753 754 kprintf("hammer2_mount\n"); 755 756 if (path == NULL) { 757 /* 758 * Root mount 759 */ 760 bzero(&info, sizeof(info)); 761 info.cluster_fd = -1; 762 return (EOPNOTSUPP); 763 } else { 764 /* 765 * Non-root mount or updating a mount 766 */ 767 error = copyin(data, &info, sizeof(info)); 768 if (error) 769 return (error); 770 771 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done); 772 if (error) 773 return (error); 774 775 /* Extract device and label */ 776 dev = devstr; 777 label = strchr(devstr, '@'); 778 if (label == NULL || 779 ((label + 1) - dev) > done) { 780 return (EINVAL); 781 } 782 *label = '\0'; 783 label++; 784 if (*label == '\0') 785 return (EINVAL); 786 787 if (mp->mnt_flag & MNT_UPDATE) { 788 /* 789 * Update mount. Note that pmp->iroot->cluster is 790 * an inode-embedded cluster and thus cannot be 791 * directly locked. 792 * 793 * XXX HAMMER2 needs to implement NFS export via 794 * mountctl. 795 */ 796 pmp = MPTOPMP(mp); 797 cluster = &pmp->iroot->cluster; 798 for (i = 0; i < cluster->nchains; ++i) { 799 if (cluster->array[i].chain == NULL) 800 continue; 801 hmp = cluster->array[i].chain->hmp; 802 devvp = hmp->devvp; 803 error = hammer2_remount(hmp, mp, path, 804 devvp, cred); 805 if (error) 806 break; 807 } 808 /*hammer2_inode_install_hidden(pmp);*/ 809 810 return error; 811 } 812 } 813 814 /* 815 * HMP device mount 816 * 817 * Lookup name and verify it refers to a block device. 818 */ 819 error = nlookup_init(&nd, dev, UIO_SYSSPACE, NLC_FOLLOW); 820 if (error == 0) 821 error = nlookup(&nd); 822 if (error == 0) 823 error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp); 824 nlookup_done(&nd); 825 826 if (error == 0) { 827 if (vn_isdisk(devvp, &error)) 828 error = vfs_mountedon(devvp); 829 } 830 831 /* 832 * Determine if the device has already been mounted. After this 833 * check hmp will be non-NULL if we are doing the second or more 834 * hammer2 mounts from the same device. 835 */ 836 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE); 837 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) { 838 if (hmp->devvp == devvp) 839 break; 840 } 841 842 /* 843 * Open the device if this isn't a secondary mount and construct 844 * the H2 device mount (hmp). 845 */ 846 if (hmp == NULL) { 847 hammer2_chain_t *schain; 848 hammer2_xid_t xid; 849 850 if (error == 0 && vcount(devvp) > 0) 851 error = EBUSY; 852 853 /* 854 * Now open the device 855 */ 856 if (error == 0) { 857 ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); 858 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 859 error = vinvalbuf(devvp, V_SAVE, 0, 0); 860 if (error == 0) { 861 error = VOP_OPEN(devvp, 862 ronly ? FREAD : FREAD | FWRITE, 863 FSCRED, NULL); 864 } 865 vn_unlock(devvp); 866 } 867 if (error && devvp) { 868 vrele(devvp); 869 devvp = NULL; 870 } 871 if (error) { 872 lockmgr(&hammer2_mntlk, LK_RELEASE); 873 return error; 874 } 875 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO); 876 ksnprintf(hmp->devrepname, sizeof(hmp->devrepname), "%s", dev); 877 hmp->ronly = ronly; 878 hmp->devvp = devvp; 879 kmalloc_create(&hmp->mchain, "HAMMER2-chains"); 880 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry); 881 RB_INIT(&hmp->iotree); 882 spin_init(&hmp->io_spin, "hm2mount_io"); 883 spin_init(&hmp->list_spin, "hm2mount_list"); 884 TAILQ_INIT(&hmp->flushq); 885 886 lockinit(&hmp->vollk, "h2vol", 0, 0); 887 888 /* 889 * vchain setup. vchain.data is embedded. 890 * vchain.refs is initialized and will never drop to 0. 891 * 892 * NOTE! voldata is not yet loaded. 893 */ 894 hmp->vchain.hmp = hmp; 895 hmp->vchain.refs = 1; 896 hmp->vchain.data = (void *)&hmp->voldata; 897 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME; 898 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX; 899 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid; 900 901 hammer2_chain_core_init(&hmp->vchain); 902 /* hmp->vchain.u.xxx is left NULL */ 903 904 /* 905 * fchain setup. fchain.data is embedded. 906 * fchain.refs is initialized and will never drop to 0. 907 * 908 * The data is not used but needs to be initialized to 909 * pass assertion muster. We use this chain primarily 910 * as a placeholder for the freemap's top-level RBTREE 911 * so it does not interfere with the volume's topology 912 * RBTREE. 913 */ 914 hmp->fchain.hmp = hmp; 915 hmp->fchain.refs = 1; 916 hmp->fchain.data = (void *)&hmp->voldata.freemap_blockset; 917 hmp->fchain.bref.type = HAMMER2_BREF_TYPE_FREEMAP; 918 hmp->fchain.bref.data_off = 0 | HAMMER2_PBUFRADIX; 919 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid; 920 hmp->fchain.bref.methods = 921 HAMMER2_ENC_CHECK(HAMMER2_CHECK_FREEMAP) | 922 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE); 923 924 hammer2_chain_core_init(&hmp->fchain); 925 /* hmp->fchain.u.xxx is left NULL */ 926 927 /* 928 * Install the volume header and initialize fields from 929 * voldata. 930 */ 931 error = hammer2_install_volume_header(hmp); 932 if (error) { 933 hammer2_unmount_helper(mp, NULL, hmp); 934 lockmgr(&hammer2_mntlk, LK_RELEASE); 935 hammer2_vfs_unmount(mp, MNT_FORCE); 936 return error; 937 } 938 939 /* 940 * Really important to get these right or flush will get 941 * confused. 942 */ 943 hmp->spmp = hammer2_pfsalloc(NULL, NULL, 0); 944 kprintf("alloc spmp %p tid %016jx\n", 945 hmp->spmp, hmp->voldata.mirror_tid); 946 spmp = hmp->spmp; 947 948 /* 949 * Dummy-up vchain and fchain's modify_tid. mirror_tid 950 * is inherited from the volume header. 951 */ 952 xid = 0; 953 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid; 954 hmp->vchain.bref.modify_tid = hmp->vchain.bref.mirror_tid; 955 hmp->vchain.pmp = spmp; 956 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid; 957 hmp->fchain.bref.modify_tid = hmp->fchain.bref.mirror_tid; 958 hmp->fchain.pmp = spmp; 959 960 /* 961 * First locate the super-root inode, which is key 0 962 * relative to the volume header's blockset. 963 * 964 * Then locate the root inode by scanning the directory keyspace 965 * represented by the label. 966 */ 967 parent = hammer2_chain_lookup_init(&hmp->vchain, 0); 968 schain = hammer2_chain_lookup(&parent, &key_dummy, 969 HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY, 970 &cache_index, 0); 971 hammer2_chain_lookup_done(parent); 972 if (schain == NULL) { 973 kprintf("hammer2_mount: invalid super-root\n"); 974 hammer2_unmount_helper(mp, NULL, hmp); 975 lockmgr(&hammer2_mntlk, LK_RELEASE); 976 hammer2_vfs_unmount(mp, MNT_FORCE); 977 return EINVAL; 978 } 979 if (schain->error) { 980 kprintf("hammer2_mount: error %s reading super-root\n", 981 hammer2_error_str(schain->error)); 982 hammer2_chain_unlock(schain); 983 hammer2_chain_drop(schain); 984 schain = NULL; 985 hammer2_unmount_helper(mp, NULL, hmp); 986 lockmgr(&hammer2_mntlk, LK_RELEASE); 987 hammer2_vfs_unmount(mp, MNT_FORCE); 988 return EINVAL; 989 } 990 991 /* 992 * The super-root always uses an inode_tid of 1 when 993 * creating PFSs. 994 */ 995 spmp->inode_tid = 1; 996 spmp->modify_tid = schain->bref.modify_tid; 997 998 /* 999 * Sanity-check schain's pmp and finish initialization. 1000 * Any chain belonging to the super-root topology should 1001 * have a NULL pmp (not even set to spmp). 1002 */ 1003 ripdata = &hammer2_chain_rdata(schain)->ipdata; 1004 KKASSERT(schain->pmp == NULL); 1005 spmp->pfs_clid = ripdata->meta.pfs_clid; 1006 1007 /* 1008 * Replace the dummy spmp->iroot with a real one. It's 1009 * easier to just do a wholesale replacement than to try 1010 * to update the chain and fixup the iroot fields. 1011 * 1012 * The returned inode is locked with the supplied cluster. 1013 */ 1014 cluster = hammer2_cluster_from_chain(schain); 1015 hammer2_inode_drop(spmp->iroot); 1016 spmp->iroot = NULL; 1017 spmp->iroot = hammer2_inode_get(spmp, NULL, cluster); 1018 spmp->spmp_hmp = hmp; 1019 spmp->pfs_types[0] = ripdata->meta.pfs_type; 1020 hammer2_inode_ref(spmp->iroot); 1021 hammer2_inode_unlock(spmp->iroot, cluster); 1022 schain = NULL; 1023 /* leave spmp->iroot with one ref */ 1024 1025 if ((mp->mnt_flag & MNT_RDONLY) == 0) { 1026 error = hammer2_recovery(hmp); 1027 /* XXX do something with error */ 1028 } 1029 hammer2_update_pmps(hmp); 1030 hammer2_iocom_init(hmp); 1031 1032 /* 1033 * Ref the cluster management messaging descriptor. The mount 1034 * program deals with the other end of the communications pipe. 1035 */ 1036 fp = holdfp(curproc->p_fd, info.cluster_fd, -1); 1037 if (fp) { 1038 hammer2_cluster_reconnect(hmp, fp); 1039 } else { 1040 kprintf("hammer2_mount: bad cluster_fd!\n"); 1041 } 1042 } else { 1043 spmp = hmp->spmp; 1044 } 1045 1046 /* 1047 * Lookup the mount point under the media-localized super-root. 1048 * Scanning hammer2_pfslist doesn't help us because it represents 1049 * PFS cluster ids which can aggregate several named PFSs together. 1050 * 1051 * cluster->pmp will incorrectly point to spmp and must be fixed 1052 * up later on. 1053 */ 1054 hammer2_inode_lock(spmp->iroot, HAMMER2_RESOLVE_ALWAYS); 1055 cparent = hammer2_inode_cluster(spmp->iroot, HAMMER2_RESOLVE_ALWAYS); 1056 lhc = hammer2_dirhash(label, strlen(label)); 1057 cluster = hammer2_cluster_lookup(cparent, &key_next, 1058 lhc, lhc + HAMMER2_DIRHASH_LOMASK, 1059 0); 1060 while (cluster) { 1061 if (hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE && 1062 strcmp(label, 1063 hammer2_cluster_rdata(cluster)->ipdata.filename) == 0) { 1064 break; 1065 } 1066 cluster = hammer2_cluster_next(cparent, cluster, &key_next, 1067 key_next, 1068 lhc + HAMMER2_DIRHASH_LOMASK, 0); 1069 } 1070 hammer2_inode_unlock(spmp->iroot, cparent); 1071 1072 /* 1073 * PFS could not be found? 1074 */ 1075 if (cluster == NULL) { 1076 kprintf("hammer2_mount: PFS label not found\n"); 1077 hammer2_unmount_helper(mp, NULL, hmp); 1078 lockmgr(&hammer2_mntlk, LK_RELEASE); 1079 hammer2_vfs_unmount(mp, MNT_FORCE); 1080 1081 return EINVAL; 1082 } 1083 1084 /* 1085 * Acquire the pmp structure (it should have already been allocated 1086 * via hammer2_update_pmps() so do not pass cluster in to add to 1087 * available chains). 1088 * 1089 * Check if the cluster has already been mounted. A cluster can 1090 * only be mounted once, use null mounts to mount additional copies. 1091 */ 1092 ripdata = &hammer2_cluster_rdata(cluster)->ipdata; 1093 hammer2_cluster_bref(cluster, &bref); 1094 pmp = hammer2_pfsalloc(NULL, ripdata, bref.modify_tid); 1095 hammer2_cluster_unlock(cluster); 1096 hammer2_cluster_drop(cluster); 1097 1098 if (pmp->mp) { 1099 kprintf("hammer2_mount: PFS already mounted!\n"); 1100 hammer2_unmount_helper(mp, NULL, hmp); 1101 lockmgr(&hammer2_mntlk, LK_RELEASE); 1102 hammer2_vfs_unmount(mp, MNT_FORCE); 1103 1104 return EBUSY; 1105 } 1106 1107 /* 1108 * Finish the mount 1109 */ 1110 kprintf("hammer2_mount hmp=%p pmp=%p\n", hmp, pmp); 1111 1112 mp->mnt_flag = MNT_LOCAL; 1113 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; /* all entry pts are SMP */ 1114 mp->mnt_kern_flag |= MNTK_THR_SYNC; /* new vsyncscan semantics */ 1115 1116 /* 1117 * required mount structure initializations 1118 */ 1119 mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE; 1120 mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE; 1121 1122 mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE; 1123 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE; 1124 1125 /* 1126 * Optional fields 1127 */ 1128 mp->mnt_iosize_max = MAXPHYS; 1129 1130 /* 1131 * Connect up mount pointers. 1132 */ 1133 hammer2_mount_helper(mp, pmp); 1134 1135 lockmgr(&hammer2_mntlk, LK_RELEASE); 1136 1137 /* 1138 * A mounted PFS needs a write thread for logical buffers and 1139 * a hidden directory for deletions of open files. These features 1140 * are not used by unmounted PFSs. 1141 * 1142 * The logical file buffer bio write thread handles things like 1143 * physical block assignment and compression. 1144 */ 1145 pmp->wthread_destroy = 0; 1146 lwkt_create(hammer2_write_thread, pmp, 1147 &pmp->wthread_td, NULL, 0, -1, "h2pfs-%s", label); 1148 1149 /* 1150 * With the cluster operational install ihidden. 1151 * (only applicable to pfs mounts, not applicable to spmp) 1152 */ 1153 hammer2_inode_install_hidden(pmp); 1154 1155 /* 1156 * Finish setup 1157 */ 1158 vfs_getnewfsid(mp); 1159 vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops); 1160 vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops); 1161 vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops); 1162 1163 copyinstr(info.volume, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size); 1164 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); 1165 bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname)); 1166 copyinstr(path, mp->mnt_stat.f_mntonname, 1167 sizeof(mp->mnt_stat.f_mntonname) - 1, 1168 &size); 1169 1170 /* 1171 * Initial statfs to prime mnt_stat. 1172 */ 1173 hammer2_vfs_statfs(mp, &mp->mnt_stat, cred); 1174 1175 return 0; 1176 } 1177 1178 /* 1179 * Scan PFSs under the super-root and create hammer2_pfs structures. 1180 */ 1181 static 1182 void 1183 hammer2_update_pmps(hammer2_dev_t *hmp) 1184 { 1185 const hammer2_inode_data_t *ripdata; 1186 hammer2_cluster_t *cparent; 1187 hammer2_cluster_t *cluster; 1188 hammer2_blockref_t bref; 1189 hammer2_pfs_t *spmp; 1190 hammer2_pfs_t *pmp; 1191 hammer2_key_t key_next; 1192 1193 /* 1194 * Lookup mount point under the media-localized super-root. 1195 * 1196 * cluster->pmp will incorrectly point to spmp and must be fixed 1197 * up later on. 1198 */ 1199 spmp = hmp->spmp; 1200 hammer2_inode_lock(spmp->iroot, HAMMER2_RESOLVE_ALWAYS); 1201 cparent = hammer2_inode_cluster(spmp->iroot, HAMMER2_RESOLVE_ALWAYS); 1202 cluster = hammer2_cluster_lookup(cparent, &key_next, 1203 HAMMER2_KEY_MIN, 1204 HAMMER2_KEY_MAX, 1205 0); 1206 while (cluster) { 1207 if (hammer2_cluster_type(cluster) != HAMMER2_BREF_TYPE_INODE) 1208 continue; 1209 ripdata = &hammer2_cluster_rdata(cluster)->ipdata; 1210 hammer2_cluster_bref(cluster, &bref); 1211 kprintf("ADD LOCAL PFS: %s\n", ripdata->filename); 1212 1213 pmp = hammer2_pfsalloc(cluster, ripdata, bref.modify_tid); 1214 cluster = hammer2_cluster_next(cparent, cluster, 1215 &key_next, 1216 key_next, 1217 HAMMER2_KEY_MAX, 1218 0); 1219 } 1220 hammer2_inode_unlock(spmp->iroot, cparent); 1221 } 1222 1223 static 1224 int 1225 hammer2_remount(hammer2_dev_t *hmp, struct mount *mp, char *path, 1226 struct vnode *devvp, struct ucred *cred) 1227 { 1228 int error; 1229 1230 if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) { 1231 error = hammer2_recovery(hmp); 1232 } else { 1233 error = 0; 1234 } 1235 return error; 1236 } 1237 1238 static 1239 int 1240 hammer2_vfs_unmount(struct mount *mp, int mntflags) 1241 { 1242 hammer2_pfs_t *pmp; 1243 int flags; 1244 int error = 0; 1245 1246 pmp = MPTOPMP(mp); 1247 1248 if (pmp == NULL) 1249 return(0); 1250 1251 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE); 1252 1253 /* 1254 * If mount initialization proceeded far enough we must flush 1255 * its vnodes and sync the underlying mount points. Three syncs 1256 * are required to fully flush the filesystem (freemap updates lag 1257 * by one flush, and one extra for safety). 1258 */ 1259 if (mntflags & MNT_FORCE) 1260 flags = FORCECLOSE; 1261 else 1262 flags = 0; 1263 if (pmp->iroot) { 1264 error = vflush(mp, 0, flags); 1265 if (error) 1266 goto failed; 1267 hammer2_vfs_sync(mp, MNT_WAIT); 1268 hammer2_vfs_sync(mp, MNT_WAIT); 1269 hammer2_vfs_sync(mp, MNT_WAIT); 1270 } 1271 1272 if (pmp->wthread_td) { 1273 hammer2_mtx_ex(&pmp->wthread_mtx); 1274 pmp->wthread_destroy = 1; 1275 wakeup(&pmp->wthread_bioq); 1276 while (pmp->wthread_destroy != -1) { 1277 mtxsleep(&pmp->wthread_destroy, 1278 &pmp->wthread_mtx, 0, 1279 "umount-sleep", 0); 1280 } 1281 hammer2_mtx_unlock(&pmp->wthread_mtx); 1282 pmp->wthread_td = NULL; 1283 } 1284 1285 /* 1286 * Cleanup our reference on ihidden. 1287 */ 1288 if (pmp->ihidden) { 1289 hammer2_inode_drop(pmp->ihidden); 1290 pmp->ihidden = NULL; 1291 } 1292 if (pmp->mp) 1293 hammer2_unmount_helper(mp, pmp, NULL); 1294 1295 error = 0; 1296 failed: 1297 lockmgr(&hammer2_mntlk, LK_RELEASE); 1298 1299 return (error); 1300 } 1301 1302 /* 1303 * Mount helper, hook the system mount into our PFS. 1304 * The mount lock is held. 1305 * 1306 * We must bump the mount_count on related devices for any 1307 * mounted PFSs. 1308 */ 1309 static 1310 void 1311 hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp) 1312 { 1313 hammer2_cluster_t *cluster; 1314 hammer2_chain_t *rchain; 1315 int i; 1316 1317 mp->mnt_data = (qaddr_t)pmp; 1318 pmp->mp = mp; 1319 1320 /* 1321 * After pmp->mp is set we have to adjust hmp->mount_count. 1322 */ 1323 cluster = &pmp->iroot->cluster; 1324 for (i = 0; i < cluster->nchains; ++i) { 1325 rchain = cluster->array[i].chain; 1326 if (rchain == NULL) 1327 continue; 1328 ++rchain->hmp->mount_count; 1329 kprintf("hammer2_mount hmp=%p ++mount_count=%d\n", 1330 rchain->hmp, rchain->hmp->mount_count); 1331 } 1332 } 1333 1334 /* 1335 * Mount helper, unhook the system mount from our PFS. 1336 * The mount lock is held. 1337 * 1338 * If hmp is supplied a mount responsible for being the first to open 1339 * the block device failed and the block device and all PFSs using the 1340 * block device must be cleaned up. 1341 * 1342 * If pmp is supplied multiple devices might be backing the PFS and each 1343 * must be disconnect. This might not be the last PFS using some of the 1344 * underlying devices. Also, we have to adjust our hmp->mount_count 1345 * accounting for the devices backing the pmp which is now undergoing an 1346 * unmount. 1347 */ 1348 static 1349 void 1350 hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp, hammer2_dev_t *hmp) 1351 { 1352 hammer2_cluster_t *cluster; 1353 hammer2_chain_t *rchain; 1354 struct vnode *devvp; 1355 int dumpcnt; 1356 int ronly = 0; 1357 int i; 1358 1359 /* 1360 * If no device supplied this is a high-level unmount and we have to 1361 * to disconnect the mount, adjust mount_count, and locate devices 1362 * that might now have no mounts. 1363 */ 1364 if (pmp) { 1365 KKASSERT(hmp == NULL); 1366 KKASSERT((void *)(intptr_t)mp->mnt_data == pmp); 1367 pmp->mp = NULL; 1368 mp->mnt_data = NULL; 1369 1370 /* 1371 * After pmp->mp is cleared we have to account for 1372 * mount_count. 1373 */ 1374 cluster = &pmp->iroot->cluster; 1375 for (i = 0; i < cluster->nchains; ++i) { 1376 rchain = cluster->array[i].chain; 1377 if (rchain == NULL) 1378 continue; 1379 --rchain->hmp->mount_count; 1380 kprintf("hammer2_unmount hmp=%p --mount_count=%d\n", 1381 rchain->hmp, rchain->hmp->mount_count); 1382 /* scrapping hmp now may invalidate the pmp */ 1383 } 1384 again: 1385 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) { 1386 if (hmp->mount_count == 0) { 1387 hammer2_unmount_helper(NULL, NULL, hmp); 1388 goto again; 1389 } 1390 } 1391 return; 1392 } 1393 1394 /* 1395 * Try to terminate the block device. We can't terminate it if 1396 * there are still PFSs referencing it. 1397 */ 1398 kprintf("hammer2_unmount hmp=%p mount_count=%d\n", 1399 hmp, hmp->mount_count); 1400 if (hmp->mount_count) 1401 return; 1402 1403 hammer2_pfsfree_scan(hmp); 1404 hammer2_dev_exlock(hmp); /* XXX order */ 1405 1406 /* 1407 * Cycle the volume data lock as a safety (probably not needed any 1408 * more). To ensure everything is out we need to flush at least 1409 * three times. (1) The running of the unlinkq can dirty the 1410 * filesystem, (2) A normal flush can dirty the freemap, and 1411 * (3) ensure that the freemap is fully synchronized. 1412 * 1413 * The next mount's recovery scan can clean everything up but we want 1414 * to leave the filesystem in a 100% clean state on a normal unmount. 1415 */ 1416 #if 0 1417 hammer2_voldata_lock(hmp); 1418 hammer2_voldata_unlock(hmp); 1419 #endif 1420 hammer2_iocom_uninit(hmp); 1421 1422 if ((hmp->vchain.flags | hmp->fchain.flags) & 1423 HAMMER2_CHAIN_FLUSH_MASK) { 1424 kprintf("hammer2_unmount: chains left over " 1425 "after final sync\n"); 1426 kprintf(" vchain %08x\n", hmp->vchain.flags); 1427 kprintf(" fchain %08x\n", hmp->fchain.flags); 1428 1429 if (hammer2_debug & 0x0010) 1430 Debugger("entered debugger"); 1431 } 1432 1433 KKASSERT(hmp->spmp == NULL); 1434 1435 /* 1436 * Finish up with the device vnode 1437 */ 1438 if ((devvp = hmp->devvp) != NULL) { 1439 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1440 vinvalbuf(devvp, (ronly ? 0 : V_SAVE), 0, 0); 1441 hmp->devvp = NULL; 1442 VOP_CLOSE(devvp, (ronly ? FREAD : FREAD|FWRITE), NULL); 1443 vn_unlock(devvp); 1444 vrele(devvp); 1445 devvp = NULL; 1446 } 1447 1448 /* 1449 * Clear vchain/fchain flags that might prevent final cleanup 1450 * of these chains. 1451 */ 1452 if (hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) { 1453 atomic_clear_int(&hmp->vchain.flags, 1454 HAMMER2_CHAIN_MODIFIED); 1455 hammer2_pfs_memory_wakeup(hmp->vchain.pmp); 1456 hammer2_chain_drop(&hmp->vchain); 1457 } 1458 if (hmp->vchain.flags & HAMMER2_CHAIN_UPDATE) { 1459 atomic_clear_int(&hmp->vchain.flags, 1460 HAMMER2_CHAIN_UPDATE); 1461 hammer2_chain_drop(&hmp->vchain); 1462 } 1463 1464 if (hmp->fchain.flags & HAMMER2_CHAIN_MODIFIED) { 1465 atomic_clear_int(&hmp->fchain.flags, 1466 HAMMER2_CHAIN_MODIFIED); 1467 hammer2_pfs_memory_wakeup(hmp->fchain.pmp); 1468 hammer2_chain_drop(&hmp->fchain); 1469 } 1470 if (hmp->fchain.flags & HAMMER2_CHAIN_UPDATE) { 1471 atomic_clear_int(&hmp->fchain.flags, 1472 HAMMER2_CHAIN_UPDATE); 1473 hammer2_chain_drop(&hmp->fchain); 1474 } 1475 1476 /* 1477 * Final drop of embedded freemap root chain to 1478 * clean up fchain.core (fchain structure is not 1479 * flagged ALLOCATED so it is cleaned out and then 1480 * left to rot). 1481 */ 1482 hammer2_chain_drop(&hmp->fchain); 1483 1484 /* 1485 * Final drop of embedded volume root chain to clean 1486 * up vchain.core (vchain structure is not flagged 1487 * ALLOCATED so it is cleaned out and then left to 1488 * rot). 1489 */ 1490 dumpcnt = 50; 1491 hammer2_dump_chain(&hmp->vchain, 0, &dumpcnt, 'v'); 1492 dumpcnt = 50; 1493 hammer2_dump_chain(&hmp->fchain, 0, &dumpcnt, 'f'); 1494 hammer2_dev_unlock(hmp); 1495 hammer2_chain_drop(&hmp->vchain); 1496 1497 hammer2_io_cleanup(hmp, &hmp->iotree); 1498 if (hmp->iofree_count) { 1499 kprintf("io_cleanup: %d I/O's left hanging\n", 1500 hmp->iofree_count); 1501 } 1502 1503 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry); 1504 kmalloc_destroy(&hmp->mchain); 1505 kfree(hmp, M_HAMMER2); 1506 } 1507 1508 static 1509 int 1510 hammer2_vfs_vget(struct mount *mp, struct vnode *dvp, 1511 ino_t ino, struct vnode **vpp) 1512 { 1513 kprintf("hammer2_vget\n"); 1514 return (EOPNOTSUPP); 1515 } 1516 1517 static 1518 int 1519 hammer2_vfs_root(struct mount *mp, struct vnode **vpp) 1520 { 1521 hammer2_pfs_t *pmp; 1522 hammer2_cluster_t *cparent; 1523 int error; 1524 struct vnode *vp; 1525 1526 pmp = MPTOPMP(mp); 1527 if (pmp->iroot == NULL) { 1528 *vpp = NULL; 1529 error = EINVAL; 1530 } else { 1531 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_ALWAYS | 1532 HAMMER2_RESOLVE_SHARED); 1533 cparent = hammer2_inode_cluster(pmp->iroot, 1534 HAMMER2_RESOLVE_ALWAYS | 1535 HAMMER2_RESOLVE_SHARED); 1536 1537 /* 1538 * Initialize pmp->inode_tid and pmp->modify_tid on first access 1539 * to the root of mount that resolves good. 1540 * XXX probably not the best place for this. 1541 */ 1542 if (pmp->inode_tid == 0 && 1543 cparent->error == 0 && cparent->focus) { 1544 const hammer2_inode_data_t *ripdata; 1545 hammer2_blockref_t bref; 1546 1547 ripdata = &hammer2_cluster_rdata(cparent)->ipdata; 1548 hammer2_cluster_bref(cparent, &bref); 1549 pmp->inode_tid = ripdata->meta.pfs_inum + 1; 1550 pmp->modify_tid = bref.modify_tid; 1551 pmp->iroot->meta = ripdata->meta; 1552 hammer2_cluster_bref(cparent, &pmp->iroot->bref); 1553 kprintf("PMP focus good set nextino=%ld mod=%016jx\n", 1554 pmp->inode_tid, pmp->modify_tid); 1555 } 1556 1557 vp = hammer2_igetv(pmp->iroot, cparent, &error); 1558 hammer2_inode_unlock(pmp->iroot, cparent); 1559 *vpp = vp; 1560 if (vp == NULL) 1561 kprintf("vnodefail\n"); 1562 } 1563 1564 return (error); 1565 } 1566 1567 /* 1568 * Filesystem status 1569 * 1570 * XXX incorporate ipdata->meta.inode_quota and data_quota 1571 */ 1572 static 1573 int 1574 hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) 1575 { 1576 hammer2_pfs_t *pmp; 1577 hammer2_dev_t *hmp; 1578 hammer2_blockref_t bref; 1579 1580 pmp = MPTOPMP(mp); 1581 KKASSERT(pmp->iroot->cluster.nchains >= 1); 1582 hmp = pmp->iroot->cluster.focus->hmp; /* iroot retains focus */ 1583 bref = pmp->iroot->cluster.focus->bref; /* no lock */ 1584 1585 mp->mnt_stat.f_files = bref.inode_count; 1586 mp->mnt_stat.f_ffree = 0; 1587 mp->mnt_stat.f_blocks = (bref.data_count + 1588 hmp->voldata.allocator_free) / 1589 mp->mnt_vstat.f_bsize; 1590 mp->mnt_stat.f_bfree = hmp->voldata.allocator_free / 1591 mp->mnt_vstat.f_bsize; 1592 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree; 1593 1594 *sbp = mp->mnt_stat; 1595 return (0); 1596 } 1597 1598 static 1599 int 1600 hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred) 1601 { 1602 hammer2_pfs_t *pmp; 1603 hammer2_dev_t *hmp; 1604 hammer2_blockref_t bref; 1605 1606 pmp = MPTOPMP(mp); 1607 KKASSERT(pmp->iroot->cluster.nchains >= 1); 1608 hmp = pmp->iroot->cluster.focus->hmp; /* iroot retains focus */ 1609 bref = pmp->iroot->cluster.focus->bref; /* no lock */ 1610 1611 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE; 1612 mp->mnt_vstat.f_files = bref.inode_count; 1613 mp->mnt_vstat.f_ffree = 0; 1614 mp->mnt_vstat.f_blocks = (bref.data_count + 1615 hmp->voldata.allocator_free) / 1616 mp->mnt_vstat.f_bsize; 1617 mp->mnt_vstat.f_bfree = hmp->voldata.allocator_free / 1618 mp->mnt_vstat.f_bsize; 1619 mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree; 1620 1621 *sbp = mp->mnt_vstat; 1622 return (0); 1623 } 1624 1625 /* 1626 * Mount-time recovery (RW mounts) 1627 * 1628 * Updates to the free block table are allowed to lag flushes by one 1629 * transaction. In case of a crash, then on a fresh mount we must do an 1630 * incremental scan of the last committed transaction id and make sure that 1631 * all related blocks have been marked allocated. 1632 * 1633 * The super-root topology and each PFS has its own transaction id domain, 1634 * so we must track PFS boundary transitions. 1635 */ 1636 struct hammer2_recovery_elm { 1637 TAILQ_ENTRY(hammer2_recovery_elm) entry; 1638 hammer2_chain_t *chain; 1639 hammer2_tid_t sync_tid; 1640 }; 1641 1642 TAILQ_HEAD(hammer2_recovery_list, hammer2_recovery_elm); 1643 1644 struct hammer2_recovery_info { 1645 struct hammer2_recovery_list list; 1646 int depth; 1647 }; 1648 1649 static int hammer2_recovery_scan(hammer2_trans_t *trans, hammer2_dev_t *hmp, 1650 hammer2_chain_t *parent, 1651 struct hammer2_recovery_info *info, 1652 hammer2_tid_t sync_tid); 1653 1654 #define HAMMER2_RECOVERY_MAXDEPTH 10 1655 1656 static 1657 int 1658 hammer2_recovery(hammer2_dev_t *hmp) 1659 { 1660 hammer2_trans_t trans; 1661 struct hammer2_recovery_info info; 1662 struct hammer2_recovery_elm *elm; 1663 hammer2_chain_t *parent; 1664 hammer2_tid_t sync_tid; 1665 hammer2_tid_t mirror_tid; 1666 int error; 1667 int cumulative_error = 0; 1668 1669 hammer2_trans_init(&trans, hmp->spmp, 0); 1670 1671 sync_tid = hmp->voldata.freemap_tid; 1672 mirror_tid = hmp->voldata.mirror_tid; 1673 1674 kprintf("hammer2 mount \"%s\": ", hmp->devrepname); 1675 if (sync_tid >= mirror_tid) { 1676 kprintf(" no recovery needed\n"); 1677 } else { 1678 kprintf(" freemap recovery %016jx-%016jx\n", 1679 sync_tid + 1, mirror_tid); 1680 } 1681 1682 TAILQ_INIT(&info.list); 1683 info.depth = 0; 1684 parent = hammer2_chain_lookup_init(&hmp->vchain, 0); 1685 cumulative_error = hammer2_recovery_scan(&trans, hmp, parent, 1686 &info, sync_tid); 1687 hammer2_chain_lookup_done(parent); 1688 1689 while ((elm = TAILQ_FIRST(&info.list)) != NULL) { 1690 TAILQ_REMOVE(&info.list, elm, entry); 1691 parent = elm->chain; 1692 sync_tid = elm->sync_tid; 1693 kfree(elm, M_HAMMER2); 1694 1695 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 1696 error = hammer2_recovery_scan(&trans, hmp, parent, 1697 &info, 1698 hmp->voldata.freemap_tid); 1699 hammer2_chain_unlock(parent); 1700 hammer2_chain_drop(parent); /* drop elm->chain ref */ 1701 if (error) 1702 cumulative_error = error; 1703 } 1704 hammer2_trans_done(&trans); 1705 1706 return cumulative_error; 1707 } 1708 1709 static 1710 int 1711 hammer2_recovery_scan(hammer2_trans_t *trans, hammer2_dev_t *hmp, 1712 hammer2_chain_t *parent, 1713 struct hammer2_recovery_info *info, 1714 hammer2_tid_t sync_tid) 1715 { 1716 const hammer2_inode_data_t *ripdata; 1717 hammer2_chain_t *chain; 1718 int cache_index; 1719 int cumulative_error = 0; 1720 int error; 1721 1722 /* 1723 * Adjust freemap to ensure that the block(s) are marked allocated. 1724 */ 1725 if (parent->bref.type != HAMMER2_BREF_TYPE_VOLUME) { 1726 hammer2_freemap_adjust(trans, hmp, &parent->bref, 1727 HAMMER2_FREEMAP_DORECOVER); 1728 } 1729 1730 /* 1731 * Check type for recursive scan 1732 */ 1733 switch(parent->bref.type) { 1734 case HAMMER2_BREF_TYPE_VOLUME: 1735 /* data already instantiated */ 1736 break; 1737 case HAMMER2_BREF_TYPE_INODE: 1738 /* 1739 * Must instantiate data for DIRECTDATA test and also 1740 * for recursion. 1741 */ 1742 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 1743 ripdata = &hammer2_chain_rdata(parent)->ipdata; 1744 if (ripdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) { 1745 /* not applicable to recovery scan */ 1746 hammer2_chain_unlock(parent); 1747 return 0; 1748 } 1749 hammer2_chain_unlock(parent); 1750 break; 1751 case HAMMER2_BREF_TYPE_INDIRECT: 1752 /* 1753 * Must instantiate data for recursion 1754 */ 1755 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 1756 hammer2_chain_unlock(parent); 1757 break; 1758 case HAMMER2_BREF_TYPE_DATA: 1759 case HAMMER2_BREF_TYPE_FREEMAP: 1760 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 1761 case HAMMER2_BREF_TYPE_FREEMAP_LEAF: 1762 /* not applicable to recovery scan */ 1763 return 0; 1764 break; 1765 default: 1766 return EDOM; 1767 } 1768 1769 /* 1770 * Defer operation if depth limit reached or if we are crossing a 1771 * PFS boundary. 1772 */ 1773 if (info->depth >= HAMMER2_RECOVERY_MAXDEPTH) { 1774 struct hammer2_recovery_elm *elm; 1775 1776 elm = kmalloc(sizeof(*elm), M_HAMMER2, M_ZERO | M_WAITOK); 1777 elm->chain = parent; 1778 elm->sync_tid = sync_tid; 1779 hammer2_chain_ref(parent); 1780 TAILQ_INSERT_TAIL(&info->list, elm, entry); 1781 /* unlocked by caller */ 1782 1783 return(0); 1784 } 1785 1786 1787 /* 1788 * Recursive scan of the last flushed transaction only. We are 1789 * doing this without pmp assignments so don't leave the chains 1790 * hanging around after we are done with them. 1791 */ 1792 cache_index = 0; 1793 chain = hammer2_chain_scan(parent, NULL, &cache_index, 1794 HAMMER2_LOOKUP_NODATA); 1795 while (chain) { 1796 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 1797 if (chain->bref.mirror_tid > sync_tid) { 1798 ++info->depth; 1799 error = hammer2_recovery_scan(trans, hmp, chain, 1800 info, sync_tid); 1801 --info->depth; 1802 if (error) 1803 cumulative_error = error; 1804 } 1805 1806 /* 1807 * Flush the recovery at the PFS boundary to stage it for 1808 * the final flush of the super-root topology. 1809 */ 1810 if ((chain->bref.flags & HAMMER2_BREF_FLAG_PFSROOT) && 1811 (chain->flags & HAMMER2_CHAIN_ONFLUSH)) { 1812 hammer2_flush(trans, chain, 1); 1813 } 1814 chain = hammer2_chain_scan(parent, chain, &cache_index, 1815 HAMMER2_LOOKUP_NODATA); 1816 } 1817 1818 return cumulative_error; 1819 } 1820 1821 /* 1822 * Sync a mount point; this is called on a per-mount basis from the 1823 * filesystem syncer process periodically and whenever a user issues 1824 * a sync. 1825 */ 1826 int 1827 hammer2_vfs_sync(struct mount *mp, int waitfor) 1828 { 1829 struct hammer2_sync_info info; 1830 hammer2_inode_t *iroot; 1831 hammer2_chain_t *chain; 1832 hammer2_chain_t *parent; 1833 hammer2_pfs_t *pmp; 1834 hammer2_dev_t *hmp; 1835 int flags; 1836 int error; 1837 int total_error; 1838 int i; 1839 int j; 1840 1841 pmp = MPTOPMP(mp); 1842 iroot = pmp->iroot; 1843 KKASSERT(iroot); 1844 KKASSERT(iroot->pmp == pmp); 1845 1846 /* 1847 * We can't acquire locks on existing vnodes while in a transaction 1848 * without risking a deadlock. This assumes that vfsync() can be 1849 * called without the vnode locked (which it can in DragonFly). 1850 * Otherwise we'd have to implement a multi-pass or flag the lock 1851 * failures and retry. 1852 * 1853 * The reclamation code interlocks with the sync list's token 1854 * (by removing the vnode from the scan list) before unlocking 1855 * the inode, giving us time to ref the inode. 1856 */ 1857 /*flags = VMSC_GETVP;*/ 1858 flags = 0; 1859 if (waitfor & MNT_LAZY) 1860 flags |= VMSC_ONEPASS; 1861 1862 #if 0 1863 /* 1864 * Preflush the vnodes using a normal transaction before interlocking 1865 * with a flush transaction. 1866 */ 1867 hammer2_trans_init(&info.trans, pmp, 0); 1868 info.error = 0; 1869 info.waitfor = MNT_NOWAIT; 1870 vsyncscan(mp, flags | VMSC_NOWAIT, hammer2_sync_scan2, &info); 1871 hammer2_trans_done(&info.trans); 1872 #endif 1873 1874 /* 1875 * Start our flush transaction. This does not return until all 1876 * concurrent transactions have completed and will prevent any 1877 * new transactions from running concurrently, except for the 1878 * buffer cache transactions. 1879 * 1880 * For efficiency do an async pass before making sure with a 1881 * synchronous pass on all related buffer cache buffers. It 1882 * should theoretically not be possible for any new file buffers 1883 * to be instantiated during this sequence. 1884 */ 1885 hammer2_trans_init(&info.trans, pmp, HAMMER2_TRANS_ISFLUSH | 1886 HAMMER2_TRANS_PREFLUSH); 1887 hammer2_run_unlinkq(&info.trans, pmp); 1888 1889 info.error = 0; 1890 info.waitfor = MNT_NOWAIT; 1891 vsyncscan(mp, flags | VMSC_NOWAIT, hammer2_sync_scan2, &info); 1892 info.waitfor = MNT_WAIT; 1893 vsyncscan(mp, flags, hammer2_sync_scan2, &info); 1894 1895 /* 1896 * Clear PREFLUSH. This prevents (or asserts on) any new logical 1897 * buffer cache flushes which occur during the flush. Device buffers 1898 * are not affected. 1899 */ 1900 hammer2_bioq_sync(info.trans.pmp); 1901 atomic_clear_int(&info.trans.flags, HAMMER2_TRANS_PREFLUSH); 1902 1903 total_error = 0; 1904 1905 /* 1906 * Flush all nodes to synchronize the PFSROOT subtopology to the media. 1907 * 1908 * Note that this flush will not be visible on crash recovery until 1909 * we flush the super-root topology in the next loop. 1910 */ 1911 for (i = 0; iroot && i < iroot->cluster.nchains; ++i) { 1912 chain = iroot->cluster.array[i].chain; 1913 if (chain == NULL) 1914 continue; 1915 1916 hammer2_chain_ref(chain); 1917 hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS); 1918 if (chain->flags & HAMMER2_CHAIN_FLUSH_MASK) { 1919 hammer2_flush(&info.trans, chain, 1); 1920 parent = chain->parent; 1921 KKASSERT(chain->pmp != parent->pmp); 1922 hammer2_chain_setflush(&info.trans, parent); 1923 } 1924 hammer2_chain_unlock(chain); 1925 hammer2_chain_drop(chain); 1926 } 1927 hammer2_trans_done(&info.trans); 1928 1929 /* 1930 * Flush all volume roots to synchronize PFS flushes with the 1931 * storage media volume header. This will flush the freemap and 1932 * the superroot topology but stops when it reaches a PFSROOT 1933 * (which we already flushed above). 1934 * 1935 * This is the last step which connects the volume root to the 1936 * PFSROOT dirs flushed above. 1937 * 1938 * Each spmp (representing the hmp's super-root) requires its own 1939 * transaction. 1940 */ 1941 for (i = 0; iroot && i < iroot->cluster.nchains; ++i) { 1942 hammer2_chain_t *tmp; 1943 1944 chain = iroot->cluster.array[i].chain; 1945 if (chain == NULL) 1946 continue; 1947 1948 hmp = chain->hmp; 1949 1950 /* 1951 * We only have to flush each hmp once 1952 */ 1953 for (j = i - 1; j >= 0; --j) { 1954 if ((tmp = iroot->cluster.array[j].chain) != NULL) { 1955 if (tmp->hmp == hmp) 1956 break; 1957 } 1958 } 1959 if (j >= 0) 1960 continue; 1961 1962 /* 1963 * spmp transaction. The super-root is never directly 1964 * mounted so there shouldn't be any vnodes, let alone any 1965 * dirty vnodes associated with it. 1966 */ 1967 hammer2_trans_init(&info.trans, hmp->spmp, 1968 HAMMER2_TRANS_ISFLUSH); 1969 1970 /* 1971 * Media mounts have two 'roots', vchain for the topology 1972 * and fchain for the free block table. Flush both. 1973 * 1974 * Note that the topology and free block table are handled 1975 * independently, so the free block table can wind up being 1976 * ahead of the topology. We depend on the bulk free scan 1977 * code to deal with any loose ends. 1978 */ 1979 hammer2_chain_ref(&hmp->vchain); 1980 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS); 1981 hammer2_chain_ref(&hmp->fchain); 1982 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS); 1983 if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) { 1984 /* 1985 * This will also modify vchain as a side effect, 1986 * mark vchain as modified now. 1987 */ 1988 hammer2_voldata_modify(hmp); 1989 chain = &hmp->fchain; 1990 hammer2_flush(&info.trans, chain, 1); 1991 KKASSERT(chain == &hmp->fchain); 1992 } 1993 hammer2_chain_unlock(&hmp->fchain); 1994 hammer2_chain_unlock(&hmp->vchain); 1995 hammer2_chain_drop(&hmp->fchain); 1996 /* vchain dropped down below */ 1997 1998 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS); 1999 if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) { 2000 chain = &hmp->vchain; 2001 hammer2_flush(&info.trans, chain, 1); 2002 KKASSERT(chain == &hmp->vchain); 2003 } 2004 hammer2_chain_unlock(&hmp->vchain); 2005 hammer2_chain_drop(&hmp->vchain); 2006 2007 error = 0; 2008 2009 /* 2010 * We can't safely flush the volume header until we have 2011 * flushed any device buffers which have built up. 2012 * 2013 * XXX this isn't being incremental 2014 */ 2015 vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY); 2016 error = VOP_FSYNC(hmp->devvp, MNT_WAIT, 0); 2017 vn_unlock(hmp->devvp); 2018 2019 /* 2020 * The flush code sets CHAIN_VOLUMESYNC to indicate that the 2021 * volume header needs synchronization via hmp->volsync. 2022 * 2023 * XXX synchronize the flag & data with only this flush XXX 2024 */ 2025 if (error == 0 && 2026 (hmp->vchain.flags & HAMMER2_CHAIN_VOLUMESYNC)) { 2027 struct buf *bp; 2028 2029 /* 2030 * Synchronize the disk before flushing the volume 2031 * header. 2032 */ 2033 bp = getpbuf(NULL); 2034 bp->b_bio1.bio_offset = 0; 2035 bp->b_bufsize = 0; 2036 bp->b_bcount = 0; 2037 bp->b_cmd = BUF_CMD_FLUSH; 2038 bp->b_bio1.bio_done = biodone_sync; 2039 bp->b_bio1.bio_flags |= BIO_SYNC; 2040 vn_strategy(hmp->devvp, &bp->b_bio1); 2041 biowait(&bp->b_bio1, "h2vol"); 2042 relpbuf(bp, NULL); 2043 2044 /* 2045 * Then we can safely flush the version of the 2046 * volume header synchronized by the flush code. 2047 */ 2048 i = hmp->volhdrno + 1; 2049 if (i >= HAMMER2_NUM_VOLHDRS) 2050 i = 0; 2051 if (i * HAMMER2_ZONE_BYTES64 + HAMMER2_SEGSIZE > 2052 hmp->volsync.volu_size) { 2053 i = 0; 2054 } 2055 kprintf("sync volhdr %d %jd\n", 2056 i, (intmax_t)hmp->volsync.volu_size); 2057 bp = getblk(hmp->devvp, i * HAMMER2_ZONE_BYTES64, 2058 HAMMER2_PBUFSIZE, 0, 0); 2059 atomic_clear_int(&hmp->vchain.flags, 2060 HAMMER2_CHAIN_VOLUMESYNC); 2061 bcopy(&hmp->volsync, bp->b_data, HAMMER2_PBUFSIZE); 2062 bawrite(bp); 2063 hmp->volhdrno = i; 2064 } 2065 if (error) 2066 total_error = error; 2067 2068 hammer2_trans_done(&info.trans); /* spmp trans */ 2069 } 2070 return (total_error); 2071 } 2072 2073 /* 2074 * Sync passes. 2075 */ 2076 static int 2077 hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data) 2078 { 2079 struct hammer2_sync_info *info = data; 2080 hammer2_inode_t *ip; 2081 int error; 2082 2083 /* 2084 * Degenerate cases. Note that ip == NULL typically means the 2085 * syncer vnode itself and we don't want to vclrisdirty() in that 2086 * situation. 2087 */ 2088 ip = VTOI(vp); 2089 if (ip == NULL) { 2090 return(0); 2091 } 2092 if (vp->v_type == VNON || vp->v_type == VBAD) { 2093 vclrisdirty(vp); 2094 return(0); 2095 } 2096 2097 /* 2098 * VOP_FSYNC will start a new transaction so replicate some code 2099 * here to do it inline (see hammer2_vop_fsync()). 2100 * 2101 * WARNING: The vfsync interacts with the buffer cache and might 2102 * block, we can't hold the inode lock at that time. 2103 * However, we MUST ref ip before blocking to ensure that 2104 * it isn't ripped out from under us (since we do not 2105 * hold a lock on the vnode). 2106 */ 2107 hammer2_inode_ref(ip); 2108 if ((ip->flags & HAMMER2_INODE_MODIFIED) || 2109 !RB_EMPTY(&vp->v_rbdirty_tree)) { 2110 vfsync(vp, info->waitfor, 1, NULL, NULL); 2111 hammer2_inode_fsync(&info->trans, ip, NULL); 2112 } 2113 if ((ip->flags & HAMMER2_INODE_MODIFIED) == 0 && 2114 RB_EMPTY(&vp->v_rbdirty_tree)) { 2115 vclrisdirty(vp); 2116 } 2117 2118 hammer2_inode_drop(ip); 2119 #if 1 2120 error = 0; 2121 if (error) 2122 info->error = error; 2123 #endif 2124 return(0); 2125 } 2126 2127 static 2128 int 2129 hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp) 2130 { 2131 return (0); 2132 } 2133 2134 static 2135 int 2136 hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 2137 struct fid *fhp, struct vnode **vpp) 2138 { 2139 return (0); 2140 } 2141 2142 static 2143 int 2144 hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 2145 int *exflagsp, struct ucred **credanonp) 2146 { 2147 return (0); 2148 } 2149 2150 /* 2151 * Support code for hammer2_vfs_mount(). Read, verify, and install the volume 2152 * header into the HMP 2153 * 2154 * XXX read four volhdrs and use the one with the highest TID whos CRC 2155 * matches. 2156 * 2157 * XXX check iCRCs. 2158 * 2159 * XXX For filesystems w/ less than 4 volhdrs, make sure to not write to 2160 * nonexistant locations. 2161 * 2162 * XXX Record selected volhdr and ring updates to each of 4 volhdrs 2163 */ 2164 static 2165 int 2166 hammer2_install_volume_header(hammer2_dev_t *hmp) 2167 { 2168 hammer2_volume_data_t *vd; 2169 struct buf *bp; 2170 hammer2_crc32_t crc0, crc, bcrc0, bcrc; 2171 int error_reported; 2172 int error; 2173 int valid; 2174 int i; 2175 2176 error_reported = 0; 2177 error = 0; 2178 valid = 0; 2179 bp = NULL; 2180 2181 /* 2182 * There are up to 4 copies of the volume header (syncs iterate 2183 * between them so there is no single master). We don't trust the 2184 * volu_size field so we don't know precisely how large the filesystem 2185 * is, so depend on the OS to return an error if we go beyond the 2186 * block device's EOF. 2187 */ 2188 for (i = 0; i < HAMMER2_NUM_VOLHDRS; i++) { 2189 error = bread(hmp->devvp, i * HAMMER2_ZONE_BYTES64, 2190 HAMMER2_VOLUME_BYTES, &bp); 2191 if (error) { 2192 brelse(bp); 2193 bp = NULL; 2194 continue; 2195 } 2196 2197 vd = (struct hammer2_volume_data *) bp->b_data; 2198 if ((vd->magic != HAMMER2_VOLUME_ID_HBO) && 2199 (vd->magic != HAMMER2_VOLUME_ID_ABO)) { 2200 brelse(bp); 2201 bp = NULL; 2202 continue; 2203 } 2204 2205 if (vd->magic == HAMMER2_VOLUME_ID_ABO) { 2206 /* XXX: Reversed-endianness filesystem */ 2207 kprintf("hammer2: reverse-endian filesystem detected"); 2208 brelse(bp); 2209 bp = NULL; 2210 continue; 2211 } 2212 2213 crc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT0]; 2214 crc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC0_OFF, 2215 HAMMER2_VOLUME_ICRC0_SIZE); 2216 bcrc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT1]; 2217 bcrc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC1_OFF, 2218 HAMMER2_VOLUME_ICRC1_SIZE); 2219 if ((crc0 != crc) || (bcrc0 != bcrc)) { 2220 kprintf("hammer2 volume header crc " 2221 "mismatch copy #%d %08x/%08x\n", 2222 i, crc0, crc); 2223 error_reported = 1; 2224 brelse(bp); 2225 bp = NULL; 2226 continue; 2227 } 2228 if (valid == 0 || hmp->voldata.mirror_tid < vd->mirror_tid) { 2229 valid = 1; 2230 hmp->voldata = *vd; 2231 hmp->volhdrno = i; 2232 } 2233 brelse(bp); 2234 bp = NULL; 2235 } 2236 if (valid) { 2237 hmp->volsync = hmp->voldata; 2238 error = 0; 2239 if (error_reported || bootverbose || 1) { /* 1/DEBUG */ 2240 kprintf("hammer2: using volume header #%d\n", 2241 hmp->volhdrno); 2242 } 2243 } else { 2244 error = EINVAL; 2245 kprintf("hammer2: no valid volume headers found!\n"); 2246 } 2247 return (error); 2248 } 2249 2250 /* 2251 * This handles hysteresis on regular file flushes. Because the BIOs are 2252 * routed to a thread it is possible for an excessive number to build up 2253 * and cause long front-end stalls long before the runningbuffspace limit 2254 * is hit, so we implement hammer2_flush_pipe to control the 2255 * hysteresis. 2256 * 2257 * This is a particular problem when compression is used. 2258 */ 2259 void 2260 hammer2_lwinprog_ref(hammer2_pfs_t *pmp) 2261 { 2262 atomic_add_int(&pmp->count_lwinprog, 1); 2263 } 2264 2265 void 2266 hammer2_lwinprog_drop(hammer2_pfs_t *pmp) 2267 { 2268 int lwinprog; 2269 2270 lwinprog = atomic_fetchadd_int(&pmp->count_lwinprog, -1); 2271 if ((lwinprog & HAMMER2_LWINPROG_WAITING) && 2272 (lwinprog & HAMMER2_LWINPROG_MASK) <= hammer2_flush_pipe * 2 / 3) { 2273 atomic_clear_int(&pmp->count_lwinprog, 2274 HAMMER2_LWINPROG_WAITING); 2275 wakeup(&pmp->count_lwinprog); 2276 } 2277 } 2278 2279 void 2280 hammer2_lwinprog_wait(hammer2_pfs_t *pmp) 2281 { 2282 int lwinprog; 2283 2284 for (;;) { 2285 lwinprog = pmp->count_lwinprog; 2286 cpu_ccfence(); 2287 if ((lwinprog & HAMMER2_LWINPROG_MASK) < hammer2_flush_pipe) 2288 break; 2289 tsleep_interlock(&pmp->count_lwinprog, 0); 2290 atomic_set_int(&pmp->count_lwinprog, HAMMER2_LWINPROG_WAITING); 2291 lwinprog = pmp->count_lwinprog; 2292 if ((lwinprog & HAMMER2_LWINPROG_MASK) < hammer2_flush_pipe) 2293 break; 2294 tsleep(&pmp->count_lwinprog, PINTERLOCKED, "h2wpipe", hz); 2295 } 2296 } 2297 2298 /* 2299 * Manage excessive memory resource use for chain and related 2300 * structures. 2301 */ 2302 void 2303 hammer2_pfs_memory_wait(hammer2_pfs_t *pmp) 2304 { 2305 uint32_t waiting; 2306 uint32_t count; 2307 uint32_t limit; 2308 #if 0 2309 static int zzticks; 2310 #endif 2311 2312 /* 2313 * Atomic check condition and wait. Also do an early speedup of 2314 * the syncer to try to avoid hitting the wait. 2315 */ 2316 for (;;) { 2317 waiting = pmp->inmem_dirty_chains; 2318 cpu_ccfence(); 2319 count = waiting & HAMMER2_DIRTYCHAIN_MASK; 2320 2321 limit = pmp->mp->mnt_nvnodelistsize / 10; 2322 if (limit < hammer2_limit_dirty_chains) 2323 limit = hammer2_limit_dirty_chains; 2324 if (limit < 1000) 2325 limit = 1000; 2326 2327 #if 0 2328 if ((int)(ticks - zzticks) > hz) { 2329 zzticks = ticks; 2330 kprintf("count %ld %ld\n", count, limit); 2331 } 2332 #endif 2333 2334 /* 2335 * Block if there are too many dirty chains present, wait 2336 * for the flush to clean some out. 2337 */ 2338 if (count > limit) { 2339 tsleep_interlock(&pmp->inmem_dirty_chains, 0); 2340 if (atomic_cmpset_int(&pmp->inmem_dirty_chains, 2341 waiting, 2342 waiting | HAMMER2_DIRTYCHAIN_WAITING)) { 2343 speedup_syncer(pmp->mp); 2344 tsleep(&pmp->inmem_dirty_chains, PINTERLOCKED, 2345 "chnmem", hz); 2346 } 2347 continue; /* loop on success or fail */ 2348 } 2349 2350 /* 2351 * Try to start an early flush before we are forced to block. 2352 */ 2353 if (count > limit * 7 / 10) 2354 speedup_syncer(pmp->mp); 2355 break; 2356 } 2357 } 2358 2359 void 2360 hammer2_pfs_memory_inc(hammer2_pfs_t *pmp) 2361 { 2362 if (pmp) { 2363 atomic_add_int(&pmp->inmem_dirty_chains, 1); 2364 } 2365 } 2366 2367 void 2368 hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp) 2369 { 2370 uint32_t waiting; 2371 2372 if (pmp == NULL) 2373 return; 2374 2375 for (;;) { 2376 waiting = pmp->inmem_dirty_chains; 2377 cpu_ccfence(); 2378 if (atomic_cmpset_int(&pmp->inmem_dirty_chains, 2379 waiting, 2380 (waiting - 1) & 2381 ~HAMMER2_DIRTYCHAIN_WAITING)) { 2382 break; 2383 } 2384 } 2385 2386 if (waiting & HAMMER2_DIRTYCHAIN_WAITING) 2387 wakeup(&pmp->inmem_dirty_chains); 2388 } 2389 2390 /* 2391 * Debugging 2392 */ 2393 void 2394 hammer2_dump_chain(hammer2_chain_t *chain, int tab, int *countp, char pfx) 2395 { 2396 hammer2_chain_t *scan; 2397 hammer2_chain_t *parent; 2398 2399 --*countp; 2400 if (*countp == 0) { 2401 kprintf("%*.*s...\n", tab, tab, ""); 2402 return; 2403 } 2404 if (*countp < 0) 2405 return; 2406 kprintf("%*.*s%c-chain %p.%d %016jx/%d mir=%016jx\n", 2407 tab, tab, "", pfx, 2408 chain, chain->bref.type, 2409 chain->bref.key, chain->bref.keybits, 2410 chain->bref.mirror_tid); 2411 2412 kprintf("%*.*s [%08x] (%s) refs=%d", 2413 tab, tab, "", 2414 chain->flags, 2415 ((chain->bref.type == HAMMER2_BREF_TYPE_INODE && 2416 chain->data) ? (char *)chain->data->ipdata.filename : "?"), 2417 chain->refs); 2418 2419 parent = chain->parent; 2420 if (parent) 2421 kprintf("\n%*.*s p=%p [pflags %08x prefs %d", 2422 tab, tab, "", 2423 parent, parent->flags, parent->refs); 2424 if (RB_EMPTY(&chain->core.rbtree)) { 2425 kprintf("\n"); 2426 } else { 2427 kprintf(" {\n"); 2428 RB_FOREACH(scan, hammer2_chain_tree, &chain->core.rbtree) 2429 hammer2_dump_chain(scan, tab + 4, countp, 'a'); 2430 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE && chain->data) 2431 kprintf("%*.*s}(%s)\n", tab, tab, "", 2432 chain->data->ipdata.filename); 2433 else 2434 kprintf("%*.*s}\n", tab, tab, ""); 2435 } 2436 } 2437