1 /* 2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression) 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/nlookup.h> 39 #include <sys/vnode.h> 40 #include <sys/mount.h> 41 #include <sys/fcntl.h> 42 #include <sys/buf.h> 43 #include <sys/uuid.h> 44 #include <sys/vfsops.h> 45 #include <sys/sysctl.h> 46 #include <sys/socket.h> 47 #include <sys/objcache.h> 48 49 #include <sys/proc.h> 50 #include <sys/namei.h> 51 #include <sys/mountctl.h> 52 #include <sys/dirent.h> 53 #include <sys/uio.h> 54 55 #include <sys/mutex.h> 56 #include <sys/mutex2.h> 57 58 #include "hammer2.h" 59 #include "hammer2_disk.h" 60 #include "hammer2_mount.h" 61 #include "hammer2_lz4.h" 62 63 #include "zlib/hammer2_zlib.h" 64 65 #define REPORT_REFS_ERRORS 1 /* XXX remove me */ 66 67 MALLOC_DEFINE(M_OBJCACHE, "objcache", "Object Cache"); 68 69 struct hammer2_sync_info { 70 int error; 71 int waitfor; 72 }; 73 74 TAILQ_HEAD(hammer2_mntlist, hammer2_dev); 75 static struct hammer2_mntlist hammer2_mntlist; 76 77 struct hammer2_pfslist hammer2_pfslist; 78 struct lock hammer2_mntlk; 79 80 int hammer2_debug; 81 int hammer2_cluster_meta_read = 1; /* physical read-ahead */ 82 int hammer2_cluster_data_read = 4; /* physical read-ahead */ 83 int hammer2_dedup_enable = 1; 84 int hammer2_always_compress = 0; /* always try to compress */ 85 int hammer2_inval_enable = 0; 86 int hammer2_flush_pipe = 100; 87 int hammer2_synchronous_flush = 1; 88 int hammer2_dio_count; 89 int hammer2_limit_dio = 256; 90 int hammer2_bulkfree_tps = 5000; 91 long hammer2_chain_allocs; 92 long hammer2_chain_frees; 93 long hammer2_limit_dirty_chains; 94 long hammer2_count_modified_chains; 95 long hammer2_iod_invals; 96 long hammer2_iod_file_read; 97 long hammer2_iod_meta_read; 98 long hammer2_iod_indr_read; 99 long hammer2_iod_fmap_read; 100 long hammer2_iod_volu_read; 101 long hammer2_iod_file_write; 102 long hammer2_iod_file_wembed; 103 long hammer2_iod_file_wzero; 104 long hammer2_iod_file_wdedup; 105 long hammer2_iod_meta_write; 106 long hammer2_iod_indr_write; 107 long hammer2_iod_fmap_write; 108 long hammer2_iod_volu_write; 109 110 MALLOC_DECLARE(M_HAMMER2_CBUFFER); 111 MALLOC_DEFINE(M_HAMMER2_CBUFFER, "HAMMER2-compbuffer", 112 "Buffer used for compression."); 113 114 MALLOC_DECLARE(M_HAMMER2_DEBUFFER); 115 MALLOC_DEFINE(M_HAMMER2_DEBUFFER, "HAMMER2-decompbuffer", 116 "Buffer used for decompression."); 117 118 SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem"); 119 120 SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW, 121 &hammer2_debug, 0, ""); 122 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_meta_read, CTLFLAG_RW, 123 &hammer2_cluster_meta_read, 0, ""); 124 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_data_read, CTLFLAG_RW, 125 &hammer2_cluster_data_read, 0, ""); 126 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dedup_enable, CTLFLAG_RW, 127 &hammer2_dedup_enable, 0, ""); 128 SYSCTL_INT(_vfs_hammer2, OID_AUTO, always_compress, CTLFLAG_RW, 129 &hammer2_always_compress, 0, ""); 130 SYSCTL_INT(_vfs_hammer2, OID_AUTO, inval_enable, CTLFLAG_RW, 131 &hammer2_inval_enable, 0, ""); 132 SYSCTL_INT(_vfs_hammer2, OID_AUTO, flush_pipe, CTLFLAG_RW, 133 &hammer2_flush_pipe, 0, ""); 134 SYSCTL_INT(_vfs_hammer2, OID_AUTO, synchronous_flush, CTLFLAG_RW, 135 &hammer2_synchronous_flush, 0, ""); 136 SYSCTL_INT(_vfs_hammer2, OID_AUTO, bulkfree_tps, CTLFLAG_RW, 137 &hammer2_bulkfree_tps, 0, ""); 138 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, chain_allocs, CTLFLAG_RW, 139 &hammer2_chain_allocs, 0, ""); 140 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, chain_frees, CTLFLAG_RW, 141 &hammer2_chain_frees, 0, ""); 142 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_chains, CTLFLAG_RW, 143 &hammer2_limit_dirty_chains, 0, ""); 144 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, count_modified_chains, CTLFLAG_RW, 145 &hammer2_count_modified_chains, 0, ""); 146 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_count, CTLFLAG_RD, 147 &hammer2_dio_count, 0, ""); 148 SYSCTL_INT(_vfs_hammer2, OID_AUTO, limit_dio, CTLFLAG_RW, 149 &hammer2_limit_dio, 0, ""); 150 151 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_invals, CTLFLAG_RW, 152 &hammer2_iod_invals, 0, ""); 153 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RW, 154 &hammer2_iod_file_read, 0, ""); 155 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RW, 156 &hammer2_iod_meta_read, 0, ""); 157 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RW, 158 &hammer2_iod_indr_read, 0, ""); 159 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_read, CTLFLAG_RW, 160 &hammer2_iod_fmap_read, 0, ""); 161 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_read, CTLFLAG_RW, 162 &hammer2_iod_volu_read, 0, ""); 163 164 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RW, 165 &hammer2_iod_file_write, 0, ""); 166 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wembed, CTLFLAG_RW, 167 &hammer2_iod_file_wembed, 0, ""); 168 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wzero, CTLFLAG_RW, 169 &hammer2_iod_file_wzero, 0, ""); 170 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wdedup, CTLFLAG_RW, 171 &hammer2_iod_file_wdedup, 0, ""); 172 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RW, 173 &hammer2_iod_meta_write, 0, ""); 174 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RW, 175 &hammer2_iod_indr_write, 0, ""); 176 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_write, CTLFLAG_RW, 177 &hammer2_iod_fmap_write, 0, ""); 178 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RW, 179 &hammer2_iod_volu_write, 0, ""); 180 181 long hammer2_check_icrc32; 182 long hammer2_check_xxhash64; 183 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, check_icrc32, CTLFLAG_RW, 184 &hammer2_check_icrc32, 0, ""); 185 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, check_xxhash64, CTLFLAG_RW, 186 &hammer2_check_xxhash64, 0, ""); 187 188 static int hammer2_vfs_init(struct vfsconf *conf); 189 static int hammer2_vfs_uninit(struct vfsconf *vfsp); 190 static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data, 191 struct ucred *cred); 192 static int hammer2_remount(hammer2_dev_t *, struct mount *, char *, 193 struct vnode *, struct ucred *); 194 static int hammer2_recovery(hammer2_dev_t *hmp); 195 static int hammer2_vfs_unmount(struct mount *mp, int mntflags); 196 static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp); 197 static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, 198 struct ucred *cred); 199 static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, 200 struct ucred *cred); 201 static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 202 struct fid *fhp, struct vnode **vpp); 203 static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp); 204 static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 205 int *exflagsp, struct ucred **credanonp); 206 207 static int hammer2_install_volume_header(hammer2_dev_t *hmp); 208 static int hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data); 209 210 static void hammer2_update_pmps(hammer2_dev_t *hmp); 211 212 static void hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp); 213 static void hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp, 214 hammer2_dev_t *hmp); 215 216 /* 217 * HAMMER2 vfs operations. 218 */ 219 static struct vfsops hammer2_vfsops = { 220 .vfs_init = hammer2_vfs_init, 221 .vfs_uninit = hammer2_vfs_uninit, 222 .vfs_sync = hammer2_vfs_sync, 223 .vfs_mount = hammer2_vfs_mount, 224 .vfs_unmount = hammer2_vfs_unmount, 225 .vfs_root = hammer2_vfs_root, 226 .vfs_statfs = hammer2_vfs_statfs, 227 .vfs_statvfs = hammer2_vfs_statvfs, 228 .vfs_vget = hammer2_vfs_vget, 229 .vfs_vptofh = hammer2_vfs_vptofh, 230 .vfs_fhtovp = hammer2_vfs_fhtovp, 231 .vfs_checkexp = hammer2_vfs_checkexp 232 }; 233 234 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", ""); 235 236 VFS_SET(hammer2_vfsops, hammer2, VFCF_MPSAFE); 237 MODULE_VERSION(hammer2, 1); 238 239 static 240 int 241 hammer2_vfs_init(struct vfsconf *conf) 242 { 243 static struct objcache_malloc_args margs_read; 244 static struct objcache_malloc_args margs_write; 245 static struct objcache_malloc_args margs_vop; 246 247 int error; 248 249 error = 0; 250 251 /* 252 * A large DIO cache is needed to retain dedup enablement masks. 253 * The bulkfree code clears related masks as part of the disk block 254 * recycling algorithm, preventing it from being used for a later 255 * dedup. 256 * 257 * NOTE: A large buffer cache can actually interfere with dedup 258 * operation because we dedup based on media physical buffers 259 * and not logical buffers. Try to make the DIO chace large 260 * enough to avoid this problem, but also cap it. 261 */ 262 hammer2_limit_dio = nbuf * 2; 263 if (hammer2_limit_dio > 100000) 264 hammer2_limit_dio = 100000; 265 266 if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref)) 267 error = EINVAL; 268 if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data)) 269 error = EINVAL; 270 if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data)) 271 error = EINVAL; 272 273 if (error) 274 kprintf("HAMMER2 structure size mismatch; cannot continue.\n"); 275 276 margs_read.objsize = 65536; 277 margs_read.mtype = M_HAMMER2_DEBUFFER; 278 279 margs_write.objsize = 32768; 280 margs_write.mtype = M_HAMMER2_CBUFFER; 281 282 margs_vop.objsize = sizeof(hammer2_xop_t); 283 margs_vop.mtype = M_HAMMER2; 284 285 /* 286 * Note thaht for the XOPS cache we want backing store allocations 287 * to use M_ZERO. This is not allowed in objcache_get() (to avoid 288 * confusion), so use the backing store function that does it. This 289 * means that initial XOPS objects are zerod but REUSED objects are 290 * not. So we are responsible for cleaning the object up sufficiently 291 * for our needs before objcache_put()ing it back (typically just the 292 * FIFO indices). 293 */ 294 cache_buffer_read = objcache_create(margs_read.mtype->ks_shortdesc, 295 0, 1, NULL, NULL, NULL, 296 objcache_malloc_alloc, 297 objcache_malloc_free, 298 &margs_read); 299 cache_buffer_write = objcache_create(margs_write.mtype->ks_shortdesc, 300 0, 1, NULL, NULL, NULL, 301 objcache_malloc_alloc, 302 objcache_malloc_free, 303 &margs_write); 304 cache_xops = objcache_create(margs_vop.mtype->ks_shortdesc, 305 0, 1, NULL, NULL, NULL, 306 objcache_malloc_alloc_zero, 307 objcache_malloc_free, 308 &margs_vop); 309 310 311 lockinit(&hammer2_mntlk, "mntlk", 0, 0); 312 TAILQ_INIT(&hammer2_mntlist); 313 TAILQ_INIT(&hammer2_pfslist); 314 315 hammer2_limit_dirty_chains = maxvnodes / 10; 316 if (hammer2_limit_dirty_chains > HAMMER2_LIMIT_DIRTY_CHAINS) 317 hammer2_limit_dirty_chains = HAMMER2_LIMIT_DIRTY_CHAINS; 318 319 return (error); 320 } 321 322 static 323 int 324 hammer2_vfs_uninit(struct vfsconf *vfsp __unused) 325 { 326 objcache_destroy(cache_buffer_read); 327 objcache_destroy(cache_buffer_write); 328 objcache_destroy(cache_xops); 329 return 0; 330 } 331 332 /* 333 * Core PFS allocator. Used to allocate or reference the pmp structure 334 * for PFS cluster mounts and the spmp structure for media (hmp) structures. 335 * The pmp can be passed in or loaded by this function using the chain and 336 * inode data. 337 * 338 * pmp->modify_tid tracks new modify_tid transaction ids for front-end 339 * transactions. Note that synchronization does not use this field. 340 * (typically frontend operations and synchronization cannot run on the 341 * same PFS node at the same time). 342 * 343 * XXX check locking 344 */ 345 hammer2_pfs_t * 346 hammer2_pfsalloc(hammer2_chain_t *chain, 347 const hammer2_inode_data_t *ripdata, 348 hammer2_tid_t modify_tid, hammer2_dev_t *force_local) 349 { 350 hammer2_pfs_t *pmp; 351 hammer2_inode_t *iroot; 352 int count; 353 int i; 354 int j; 355 356 pmp = NULL; 357 358 /* 359 * Locate or create the PFS based on the cluster id. If ripdata 360 * is NULL this is a spmp which is unique and is always allocated. 361 * 362 * If the device is mounted in local mode all PFSs are considered 363 * independent and not part of any cluster (for debugging only). 364 */ 365 if (ripdata) { 366 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) { 367 if (force_local != pmp->force_local) 368 continue; 369 if (force_local == NULL && 370 bcmp(&pmp->pfs_clid, &ripdata->meta.pfs_clid, 371 sizeof(pmp->pfs_clid)) == 0) { 372 break; 373 } else if (force_local && pmp->pfs_names[0] && 374 strcmp(pmp->pfs_names[0], ripdata->filename) == 0) { 375 break; 376 } 377 } 378 } 379 380 if (pmp == NULL) { 381 pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO); 382 pmp->force_local = force_local; 383 hammer2_trans_manage_init(pmp); 384 kmalloc_create(&pmp->minode, "HAMMER2-inodes"); 385 kmalloc_create(&pmp->mmsg, "HAMMER2-pfsmsg"); 386 lockinit(&pmp->lock, "pfslk", 0, 0); 387 lockinit(&pmp->lock_nlink, "h2nlink", 0, 0); 388 spin_init(&pmp->inum_spin, "hm2pfsalloc_inum"); 389 spin_init(&pmp->xop_spin, "h2xop"); 390 spin_init(&pmp->lru_spin, "h2lru"); 391 RB_INIT(&pmp->inum_tree); 392 TAILQ_INIT(&pmp->sideq); 393 TAILQ_INIT(&pmp->lru_list); 394 spin_init(&pmp->list_spin, "hm2pfsalloc_list"); 395 396 /* 397 * Distribute backend operations to threads 398 */ 399 for (i = 0; i < HAMMER2_XOPGROUPS; ++i) 400 hammer2_xop_group_init(pmp, &pmp->xop_groups[i]); 401 402 /* 403 * Save the last media transaction id for the flusher. Set 404 * initial 405 */ 406 if (ripdata) 407 pmp->pfs_clid = ripdata->meta.pfs_clid; 408 TAILQ_INSERT_TAIL(&hammer2_pfslist, pmp, mntentry); 409 410 /* 411 * The synchronization thread may start too early, make 412 * sure it stays frozen until we are ready to let it go. 413 * XXX 414 */ 415 /* 416 pmp->primary_thr.flags = HAMMER2_THREAD_FROZEN | 417 HAMMER2_THREAD_REMASTER; 418 */ 419 } 420 421 /* 422 * Create the PFS's root inode and any missing XOP helper threads. 423 */ 424 if ((iroot = pmp->iroot) == NULL) { 425 iroot = hammer2_inode_get(pmp, NULL, NULL, -1); 426 if (ripdata) 427 iroot->meta = ripdata->meta; 428 pmp->iroot = iroot; 429 hammer2_inode_ref(iroot); 430 hammer2_inode_unlock(iroot); 431 } 432 433 /* 434 * Stop here if no chain is passed in. 435 */ 436 if (chain == NULL) 437 goto done; 438 439 /* 440 * When a chain is passed in we must add it to the PFS's root 441 * inode, update pmp->pfs_types[], and update the syncronization 442 * threads. 443 * 444 * When forcing local mode, mark the PFS as a MASTER regardless. 445 * 446 * At the moment empty spots can develop due to removals or failures. 447 * Ultimately we want to re-fill these spots but doing so might 448 * confused running code. XXX 449 */ 450 hammer2_inode_ref(iroot); 451 hammer2_mtx_ex(&iroot->lock); 452 j = iroot->cluster.nchains; 453 454 if (j == HAMMER2_MAXCLUSTER) { 455 kprintf("hammer2_mount: cluster full!\n"); 456 /* XXX fatal error? */ 457 } else { 458 KKASSERT(chain->pmp == NULL); 459 chain->pmp = pmp; 460 hammer2_chain_ref(chain); 461 iroot->cluster.array[j].chain = chain; 462 if (force_local) 463 pmp->pfs_types[j] = HAMMER2_PFSTYPE_MASTER; 464 else 465 pmp->pfs_types[j] = ripdata->meta.pfs_type; 466 pmp->pfs_names[j] = kstrdup(ripdata->filename, M_HAMMER2); 467 pmp->pfs_hmps[j] = chain->hmp; 468 469 /* 470 * If the PFS is already mounted we must account 471 * for the mount_count here. 472 */ 473 if (pmp->mp) 474 ++chain->hmp->mount_count; 475 476 /* 477 * May have to fixup dirty chain tracking. Previous 478 * pmp was NULL so nothing to undo. 479 */ 480 if (chain->flags & HAMMER2_CHAIN_MODIFIED) 481 hammer2_pfs_memory_inc(pmp); 482 ++j; 483 } 484 iroot->cluster.nchains = j; 485 486 /* 487 * Update nmasters from any PFS inode which is part of the cluster. 488 * It is possible that this will result in a value which is too 489 * high. MASTER PFSs are authoritative for pfs_nmasters and will 490 * override this value later on. 491 * 492 * (This informs us of masters that might not currently be 493 * discoverable by this mount). 494 */ 495 if (ripdata && pmp->pfs_nmasters < ripdata->meta.pfs_nmasters) { 496 pmp->pfs_nmasters = ripdata->meta.pfs_nmasters; 497 } 498 499 /* 500 * Count visible masters. Masters are usually added with 501 * ripdata->meta.pfs_nmasters set to 1. This detects when there 502 * are more (XXX and must update the master inodes). 503 */ 504 count = 0; 505 for (i = 0; i < iroot->cluster.nchains; ++i) { 506 if (pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER) 507 ++count; 508 } 509 if (pmp->pfs_nmasters < count) 510 pmp->pfs_nmasters = count; 511 512 /* 513 * Create missing synchronization and support threads. 514 * 515 * Single-node masters (including snapshots) have nothing to 516 * synchronize and do not require this thread. 517 * 518 * Multi-node masters or any number of soft masters, slaves, copy, 519 * or other PFS types need the thread. 520 * 521 * Each thread is responsible for its particular cluster index. 522 * We use independent threads so stalls or mismatches related to 523 * any given target do not affect other targets. 524 */ 525 for (i = 0; i < iroot->cluster.nchains; ++i) { 526 /* 527 * Single-node masters (including snapshots) have nothing 528 * to synchronize and will make direct xops support calls, 529 * thus they do not require this thread. 530 * 531 * Note that there can be thousands of snapshots. We do not 532 * want to create thousands of threads. 533 */ 534 if (pmp->pfs_nmasters <= 1 && 535 pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER) { 536 continue; 537 } 538 539 /* 540 * Sync support thread 541 */ 542 if (pmp->sync_thrs[i].td == NULL) { 543 hammer2_thr_create(&pmp->sync_thrs[i], pmp, NULL, 544 "h2nod", i, -1, 545 hammer2_primary_sync_thread); 546 } 547 } 548 549 /* 550 * Create missing Xop threads 551 * 552 * NOTE: We create helper threads for all mounted PFSs or any 553 * PFSs with 2+ nodes (so the sync thread can update them, 554 * even if not mounted). 555 */ 556 if (pmp->mp || iroot->cluster.nchains >= 2) 557 hammer2_xop_helper_create(pmp); 558 559 hammer2_mtx_unlock(&iroot->lock); 560 hammer2_inode_drop(iroot); 561 done: 562 return pmp; 563 } 564 565 /* 566 * Deallocate an element of a probed PFS. If destroying and this is a 567 * MASTER, adjust nmasters. 568 * 569 * This function does not physically destroy the PFS element in its device 570 * under the super-root (see hammer2_ioctl_pfs_delete()). 571 */ 572 void 573 hammer2_pfsdealloc(hammer2_pfs_t *pmp, int clindex, int destroying) 574 { 575 hammer2_inode_t *iroot; 576 hammer2_chain_t *chain; 577 int j; 578 579 /* 580 * Cleanup our reference on iroot. iroot is (should) not be needed 581 * by the flush code. 582 */ 583 iroot = pmp->iroot; 584 if (iroot) { 585 /* 586 * Stop synchronizing 587 * 588 * XXX flush after acquiring the iroot lock. 589 * XXX clean out the cluster index from all inode structures. 590 */ 591 hammer2_thr_delete(&pmp->sync_thrs[clindex]); 592 593 /* 594 * Remove the cluster index from the group. If destroying 595 * the PFS and this is a master, adjust pfs_nmasters. 596 */ 597 hammer2_mtx_ex(&iroot->lock); 598 chain = iroot->cluster.array[clindex].chain; 599 iroot->cluster.array[clindex].chain = NULL; 600 601 switch(pmp->pfs_types[clindex]) { 602 case HAMMER2_PFSTYPE_MASTER: 603 if (destroying && pmp->pfs_nmasters > 0) 604 --pmp->pfs_nmasters; 605 /* XXX adjust ripdata->meta.pfs_nmasters */ 606 break; 607 default: 608 break; 609 } 610 pmp->pfs_types[clindex] = HAMMER2_PFSTYPE_NONE; 611 612 hammer2_mtx_unlock(&iroot->lock); 613 614 /* 615 * Release the chain. 616 */ 617 if (chain) { 618 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 619 hammer2_chain_drop(chain); 620 } 621 622 /* 623 * Terminate all XOP threads for the cluster index. 624 */ 625 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) 626 hammer2_thr_delete(&pmp->xop_groups[j].thrs[clindex]); 627 } 628 } 629 630 /* 631 * Destroy a PFS, typically only occurs after the last mount on a device 632 * has gone away. 633 */ 634 static void 635 hammer2_pfsfree(hammer2_pfs_t *pmp) 636 { 637 hammer2_inode_t *iroot; 638 hammer2_chain_t *chain; 639 int i; 640 int j; 641 642 /* 643 * Cleanup our reference on iroot. iroot is (should) not be needed 644 * by the flush code. 645 */ 646 TAILQ_REMOVE(&hammer2_pfslist, pmp, mntentry); 647 648 iroot = pmp->iroot; 649 if (iroot) { 650 for (i = 0; i < iroot->cluster.nchains; ++i) { 651 hammer2_thr_delete(&pmp->sync_thrs[i]); 652 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) 653 hammer2_thr_delete(&pmp->xop_groups[j].thrs[i]); 654 } 655 #if REPORT_REFS_ERRORS 656 if (pmp->iroot->refs != 1) 657 kprintf("PMP->IROOT %p REFS WRONG %d\n", 658 pmp->iroot, pmp->iroot->refs); 659 #else 660 KKASSERT(pmp->iroot->refs == 1); 661 #endif 662 /* ref for pmp->iroot */ 663 hammer2_inode_drop(pmp->iroot); 664 pmp->iroot = NULL; 665 } 666 667 /* 668 * Cleanup chains remaining on LRU list. 669 */ 670 while ((chain = TAILQ_FIRST(&pmp->lru_list)) != NULL) { 671 hammer2_chain_ref(chain); 672 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 673 hammer2_chain_drop(chain); 674 } 675 676 /* 677 * Free remaining pmp resources 678 */ 679 kmalloc_destroy(&pmp->mmsg); 680 kmalloc_destroy(&pmp->minode); 681 682 kfree(pmp, M_HAMMER2); 683 } 684 685 /* 686 * Remove all references to hmp from the pfs list. Any PFS which becomes 687 * empty is terminated and freed. 688 * 689 * XXX inefficient. 690 */ 691 static void 692 hammer2_pfsfree_scan(hammer2_dev_t *hmp) 693 { 694 hammer2_pfs_t *pmp; 695 hammer2_inode_t *iroot; 696 hammer2_chain_t *rchain; 697 int didfreeze; 698 int i; 699 int j; 700 701 again: 702 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) { 703 if ((iroot = pmp->iroot) == NULL) 704 continue; 705 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH); 706 hammer2_inode_run_sideq(pmp, 1); 707 hammer2_bioq_sync(pmp); 708 hammer2_trans_done(pmp); 709 if (hmp->spmp == pmp) { 710 hmp->spmp = NULL; 711 hmp->vchain.pmp = NULL; 712 hmp->fchain.pmp = NULL; 713 } 714 715 /* 716 * Determine if this PFS is affected. If it is we must 717 * freeze all management threads and lock its iroot. 718 * 719 * Freezing a management thread forces it idle, operations 720 * in-progress will be aborted and it will have to start 721 * over again when unfrozen, or exit if told to exit. 722 */ 723 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 724 if (pmp->pfs_hmps[i] == hmp) 725 break; 726 } 727 if (i != HAMMER2_MAXCLUSTER) { 728 /* 729 * Make sure all synchronization threads are locked 730 * down. 731 */ 732 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 733 if (pmp->pfs_hmps[i] == NULL) 734 continue; 735 hammer2_thr_freeze_async(&pmp->sync_thrs[i]); 736 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) { 737 hammer2_thr_freeze_async( 738 &pmp->xop_groups[j].thrs[i]); 739 } 740 } 741 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 742 if (pmp->pfs_hmps[i] == NULL) 743 continue; 744 hammer2_thr_freeze(&pmp->sync_thrs[i]); 745 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) { 746 hammer2_thr_freeze( 747 &pmp->xop_groups[j].thrs[i]); 748 } 749 } 750 751 /* 752 * Lock the inode and clean out matching chains. 753 * Note that we cannot use hammer2_inode_lock_*() 754 * here because that would attempt to validate the 755 * cluster that we are in the middle of ripping 756 * apart. 757 * 758 * WARNING! We are working directly on the inodes 759 * embedded cluster. 760 */ 761 hammer2_mtx_ex(&iroot->lock); 762 763 /* 764 * Remove the chain from matching elements of the PFS. 765 */ 766 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 767 if (pmp->pfs_hmps[i] != hmp) 768 continue; 769 hammer2_thr_delete(&pmp->sync_thrs[i]); 770 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) { 771 hammer2_thr_delete( 772 &pmp->xop_groups[j].thrs[i]); 773 } 774 rchain = iroot->cluster.array[i].chain; 775 iroot->cluster.array[i].chain = NULL; 776 pmp->pfs_types[i] = 0; 777 if (pmp->pfs_names[i]) { 778 kfree(pmp->pfs_names[i], M_HAMMER2); 779 pmp->pfs_names[i] = NULL; 780 } 781 if (rchain) { 782 hammer2_chain_drop(rchain); 783 /* focus hint */ 784 if (iroot->cluster.focus == rchain) 785 iroot->cluster.focus = NULL; 786 } 787 pmp->pfs_hmps[i] = NULL; 788 } 789 hammer2_mtx_unlock(&iroot->lock); 790 didfreeze = 1; /* remaster, unfreeze down below */ 791 } else { 792 didfreeze = 0; 793 } 794 795 /* 796 * Cleanup trailing chains. Gaps may remain. 797 */ 798 for (i = HAMMER2_MAXCLUSTER - 1; i >= 0; --i) { 799 if (pmp->pfs_hmps[i]) 800 break; 801 } 802 iroot->cluster.nchains = i + 1; 803 804 /* 805 * If the PMP has no elements remaining we can destroy it. 806 * (this will transition management threads from frozen->exit). 807 */ 808 if (iroot->cluster.nchains == 0) { 809 hammer2_pfsfree(pmp); 810 goto again; 811 } 812 813 /* 814 * If elements still remain we need to set the REMASTER 815 * flag and unfreeze it. 816 */ 817 if (didfreeze) { 818 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 819 if (pmp->pfs_hmps[i] == NULL) 820 continue; 821 hammer2_thr_remaster(&pmp->sync_thrs[i]); 822 hammer2_thr_unfreeze(&pmp->sync_thrs[i]); 823 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) { 824 hammer2_thr_remaster( 825 &pmp->xop_groups[j].thrs[i]); 826 hammer2_thr_unfreeze( 827 &pmp->xop_groups[j].thrs[i]); 828 } 829 } 830 } 831 } 832 } 833 834 /* 835 * Mount or remount HAMMER2 fileystem from physical media 836 * 837 * mountroot 838 * mp mount point structure 839 * path NULL 840 * data <unused> 841 * cred <unused> 842 * 843 * mount 844 * mp mount point structure 845 * path path to mount point 846 * data pointer to argument structure in user space 847 * volume volume path (device@LABEL form) 848 * hflags user mount flags 849 * cred user credentials 850 * 851 * RETURNS: 0 Success 852 * !0 error number 853 */ 854 static 855 int 856 hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data, 857 struct ucred *cred) 858 { 859 struct hammer2_mount_info info; 860 hammer2_pfs_t *pmp; 861 hammer2_pfs_t *spmp; 862 hammer2_dev_t *hmp; 863 hammer2_dev_t *force_local; 864 hammer2_key_t key_next; 865 hammer2_key_t key_dummy; 866 hammer2_key_t lhc; 867 struct vnode *devvp; 868 struct nlookupdata nd; 869 hammer2_chain_t *parent; 870 hammer2_chain_t *chain; 871 hammer2_cluster_t *cluster; 872 const hammer2_inode_data_t *ripdata; 873 hammer2_blockref_t bref; 874 struct file *fp; 875 char devstr[MNAMELEN]; 876 size_t size; 877 size_t done; 878 char *dev; 879 char *label; 880 int ronly = 1; 881 int error; 882 int i; 883 884 hmp = NULL; 885 pmp = NULL; 886 dev = NULL; 887 label = NULL; 888 devvp = NULL; 889 890 kprintf("hammer2_mount\n"); 891 892 if (path == NULL) { 893 /* 894 * Root mount 895 */ 896 bzero(&info, sizeof(info)); 897 info.cluster_fd = -1; 898 ksnprintf(devstr, sizeof(devstr), "%s", 899 mp->mnt_stat.f_mntfromname); 900 kprintf("hammer2_mount: root '%s'\n", devstr); 901 } else { 902 /* 903 * Non-root mount or updating a mount 904 */ 905 error = copyin(data, &info, sizeof(info)); 906 if (error) 907 return (error); 908 909 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done); 910 if (error) 911 return (error); 912 } 913 914 /* 915 * Extract device and label, automatically mount @BOOT, @ROOT, or @DATA 916 * if no label specified, based on the partition id. Error out if no 917 * label or device (with partition id) is specified. This is strictly 918 * a convenience to match the default label created by newfs_hammer2, 919 * our preference is that a label always be specified. 920 * 921 * NOTE: We allow 'mount @LABEL <blah>'... that is, a mount command 922 * that does not specify a device, as long as some H2 label 923 * has already been mounted from that device. This makes 924 * mounting snapshots a lot easier. 925 */ 926 dev = devstr; 927 label = strchr(devstr, '@'); 928 if (label && ((label + 1) - dev) > done) 929 return (EINVAL); 930 if (label == NULL || label[1] == 0) { 931 char slice; 932 933 if (label == NULL) 934 label = devstr + strlen(devstr); 935 slice = label[-1]; 936 switch(slice) { 937 case 'a': 938 label = "BOOT"; 939 break; 940 case 'd': 941 label = "ROOT"; 942 break; 943 default: 944 label = "DATA"; 945 break; 946 } 947 } else { 948 *label = '\0'; 949 label++; 950 } 951 952 kprintf("hammer2_mount: dev=\"%s\" label=\"%s\" rdonly=%d\n", 953 dev, label, (mp->mnt_flag & MNT_RDONLY)); 954 955 if (mp->mnt_flag & MNT_UPDATE) { 956 /* 957 * Update mount. Note that pmp->iroot->cluster is 958 * an inode-embedded cluster and thus cannot be 959 * directly locked. 960 * 961 * XXX HAMMER2 needs to implement NFS export via 962 * mountctl. 963 */ 964 pmp = MPTOPMP(mp); 965 pmp->hflags = info.hflags; 966 cluster = &pmp->iroot->cluster; 967 for (i = 0; i < cluster->nchains; ++i) { 968 if (cluster->array[i].chain == NULL) 969 continue; 970 hmp = cluster->array[i].chain->hmp; 971 devvp = hmp->devvp; 972 error = hammer2_remount(hmp, mp, path, 973 devvp, cred); 974 if (error) 975 break; 976 } 977 978 return error; 979 } 980 981 /* 982 * HMP device mount 983 * 984 * If a path is specified and dev is not an empty string, lookup the 985 * name and verify that it referes to a block device. 986 * 987 * If a path is specified and dev is an empty string we fall through 988 * and locate the label in the hmp search. 989 */ 990 if (path && *dev != 0) { 991 error = nlookup_init(&nd, dev, UIO_SYSSPACE, NLC_FOLLOW); 992 if (error == 0) 993 error = nlookup(&nd); 994 if (error == 0) 995 error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp); 996 nlookup_done(&nd); 997 } else if (path == NULL) { 998 /* root mount */ 999 cdev_t cdev = kgetdiskbyname(dev); 1000 error = bdevvp(cdev, &devvp); 1001 if (error) 1002 kprintf("hammer2: cannot find '%s'\n", dev); 1003 } else { 1004 /* 1005 * We will locate the hmp using the label in the hmp loop. 1006 */ 1007 error = 0; 1008 } 1009 1010 /* 1011 * Make sure its a block device. Do not check to see if it is 1012 * already mounted until we determine that its a fresh H2 device. 1013 */ 1014 if (error == 0 && devvp) { 1015 vn_isdisk(devvp, &error); 1016 } 1017 1018 /* 1019 * Determine if the device has already been mounted. After this 1020 * check hmp will be non-NULL if we are doing the second or more 1021 * hammer2 mounts from the same device. 1022 */ 1023 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE); 1024 if (devvp) { 1025 /* 1026 * Match the device. Due to the way devfs works, 1027 * we may not be able to directly match the vnode pointer, 1028 * so also check to see if the underlying device matches. 1029 */ 1030 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) { 1031 if (hmp->devvp == devvp) 1032 break; 1033 if (devvp->v_rdev && 1034 hmp->devvp->v_rdev == devvp->v_rdev) { 1035 break; 1036 } 1037 } 1038 1039 /* 1040 * If no match this may be a fresh H2 mount, make sure 1041 * the device is not mounted on anything else. 1042 */ 1043 if (hmp == NULL) 1044 error = vfs_mountedon(devvp); 1045 } else if (error == 0) { 1046 /* 1047 * Match the label to a pmp already probed. 1048 */ 1049 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) { 1050 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 1051 if (pmp->pfs_names[i] && 1052 strcmp(pmp->pfs_names[i], label) == 0) { 1053 hmp = pmp->pfs_hmps[i]; 1054 break; 1055 } 1056 } 1057 if (hmp) 1058 break; 1059 } 1060 if (hmp == NULL) 1061 error = ENOENT; 1062 } 1063 1064 /* 1065 * Open the device if this isn't a secondary mount and construct 1066 * the H2 device mount (hmp). 1067 */ 1068 if (hmp == NULL) { 1069 hammer2_chain_t *schain; 1070 hammer2_xid_t xid; 1071 1072 if (error == 0 && vcount(devvp) > 0) { 1073 kprintf("Primary device already has references\n"); 1074 error = EBUSY; 1075 } 1076 1077 /* 1078 * Now open the device 1079 */ 1080 if (error == 0) { 1081 ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); 1082 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1083 error = vinvalbuf(devvp, V_SAVE, 0, 0); 1084 if (error == 0) { 1085 error = VOP_OPEN(devvp, 1086 (ronly ? FREAD : FREAD | FWRITE), 1087 FSCRED, NULL); 1088 } 1089 vn_unlock(devvp); 1090 } 1091 if (error && devvp) { 1092 vrele(devvp); 1093 devvp = NULL; 1094 } 1095 if (error) { 1096 lockmgr(&hammer2_mntlk, LK_RELEASE); 1097 return error; 1098 } 1099 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO); 1100 ksnprintf(hmp->devrepname, sizeof(hmp->devrepname), "%s", dev); 1101 hmp->ronly = ronly; 1102 hmp->devvp = devvp; 1103 hmp->hflags = info.hflags & HMNT2_DEVFLAGS; 1104 kmalloc_create(&hmp->mchain, "HAMMER2-chains"); 1105 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry); 1106 RB_INIT(&hmp->iotree); 1107 spin_init(&hmp->io_spin, "hm2mount_io"); 1108 spin_init(&hmp->list_spin, "hm2mount_list"); 1109 TAILQ_INIT(&hmp->flushq); 1110 1111 lockinit(&hmp->vollk, "h2vol", 0, 0); 1112 lockinit(&hmp->bulklk, "h2bulk", 0, 0); 1113 lockinit(&hmp->bflock, "h2bflk", 0, 0); 1114 1115 /* 1116 * vchain setup. vchain.data is embedded. 1117 * vchain.refs is initialized and will never drop to 0. 1118 * 1119 * NOTE! voldata is not yet loaded. 1120 */ 1121 hmp->vchain.hmp = hmp; 1122 hmp->vchain.refs = 1; 1123 hmp->vchain.data = (void *)&hmp->voldata; 1124 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME; 1125 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX; 1126 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid; 1127 1128 hammer2_chain_core_init(&hmp->vchain); 1129 /* hmp->vchain.u.xxx is left NULL */ 1130 1131 /* 1132 * fchain setup. fchain.data is embedded. 1133 * fchain.refs is initialized and will never drop to 0. 1134 * 1135 * The data is not used but needs to be initialized to 1136 * pass assertion muster. We use this chain primarily 1137 * as a placeholder for the freemap's top-level RBTREE 1138 * so it does not interfere with the volume's topology 1139 * RBTREE. 1140 */ 1141 hmp->fchain.hmp = hmp; 1142 hmp->fchain.refs = 1; 1143 hmp->fchain.data = (void *)&hmp->voldata.freemap_blockset; 1144 hmp->fchain.bref.type = HAMMER2_BREF_TYPE_FREEMAP; 1145 hmp->fchain.bref.data_off = 0 | HAMMER2_PBUFRADIX; 1146 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid; 1147 hmp->fchain.bref.methods = 1148 HAMMER2_ENC_CHECK(HAMMER2_CHECK_FREEMAP) | 1149 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE); 1150 1151 hammer2_chain_core_init(&hmp->fchain); 1152 /* hmp->fchain.u.xxx is left NULL */ 1153 1154 /* 1155 * Install the volume header and initialize fields from 1156 * voldata. 1157 */ 1158 error = hammer2_install_volume_header(hmp); 1159 if (error) { 1160 hammer2_unmount_helper(mp, NULL, hmp); 1161 lockmgr(&hammer2_mntlk, LK_RELEASE); 1162 hammer2_vfs_unmount(mp, MNT_FORCE); 1163 return error; 1164 } 1165 1166 /* 1167 * Really important to get these right or flush will get 1168 * confused. 1169 */ 1170 hmp->spmp = hammer2_pfsalloc(NULL, NULL, 0, NULL); 1171 kprintf("alloc spmp %p tid %016jx\n", 1172 hmp->spmp, hmp->voldata.mirror_tid); 1173 spmp = hmp->spmp; 1174 1175 /* 1176 * Dummy-up vchain and fchain's modify_tid. mirror_tid 1177 * is inherited from the volume header. 1178 */ 1179 xid = 0; 1180 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid; 1181 hmp->vchain.bref.modify_tid = hmp->vchain.bref.mirror_tid; 1182 hmp->vchain.pmp = spmp; 1183 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid; 1184 hmp->fchain.bref.modify_tid = hmp->fchain.bref.mirror_tid; 1185 hmp->fchain.pmp = spmp; 1186 1187 /* 1188 * First locate the super-root inode, which is key 0 1189 * relative to the volume header's blockset. 1190 * 1191 * Then locate the root inode by scanning the directory keyspace 1192 * represented by the label. 1193 */ 1194 parent = hammer2_chain_lookup_init(&hmp->vchain, 0); 1195 schain = hammer2_chain_lookup(&parent, &key_dummy, 1196 HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY, 1197 &error, 0); 1198 hammer2_chain_lookup_done(parent); 1199 if (schain == NULL) { 1200 kprintf("hammer2_mount: invalid super-root\n"); 1201 hammer2_unmount_helper(mp, NULL, hmp); 1202 lockmgr(&hammer2_mntlk, LK_RELEASE); 1203 hammer2_vfs_unmount(mp, MNT_FORCE); 1204 return EINVAL; 1205 } 1206 if (schain->error) { 1207 kprintf("hammer2_mount: error %s reading super-root\n", 1208 hammer2_error_str(schain->error)); 1209 hammer2_chain_unlock(schain); 1210 hammer2_chain_drop(schain); 1211 schain = NULL; 1212 hammer2_unmount_helper(mp, NULL, hmp); 1213 lockmgr(&hammer2_mntlk, LK_RELEASE); 1214 hammer2_vfs_unmount(mp, MNT_FORCE); 1215 return EINVAL; 1216 } 1217 1218 /* 1219 * The super-root always uses an inode_tid of 1 when 1220 * creating PFSs. 1221 */ 1222 spmp->inode_tid = 1; 1223 spmp->modify_tid = schain->bref.modify_tid + 1; 1224 1225 /* 1226 * Sanity-check schain's pmp and finish initialization. 1227 * Any chain belonging to the super-root topology should 1228 * have a NULL pmp (not even set to spmp). 1229 */ 1230 ripdata = &hammer2_chain_rdata(schain)->ipdata; 1231 KKASSERT(schain->pmp == NULL); 1232 spmp->pfs_clid = ripdata->meta.pfs_clid; 1233 1234 /* 1235 * Replace the dummy spmp->iroot with a real one. It's 1236 * easier to just do a wholesale replacement than to try 1237 * to update the chain and fixup the iroot fields. 1238 * 1239 * The returned inode is locked with the supplied cluster. 1240 */ 1241 cluster = hammer2_cluster_from_chain(schain); 1242 hammer2_inode_drop(spmp->iroot); 1243 spmp->iroot = NULL; 1244 spmp->iroot = hammer2_inode_get(spmp, NULL, cluster, -1); 1245 spmp->spmp_hmp = hmp; 1246 spmp->pfs_types[0] = ripdata->meta.pfs_type; 1247 spmp->pfs_hmps[0] = hmp; 1248 hammer2_inode_ref(spmp->iroot); 1249 hammer2_inode_unlock(spmp->iroot); 1250 hammer2_cluster_unlock(cluster); 1251 hammer2_cluster_drop(cluster); 1252 schain = NULL; 1253 /* leave spmp->iroot with one ref */ 1254 1255 if ((mp->mnt_flag & MNT_RDONLY) == 0) { 1256 error = hammer2_recovery(hmp); 1257 /* XXX do something with error */ 1258 } 1259 hammer2_update_pmps(hmp); 1260 hammer2_iocom_init(hmp); 1261 hammer2_bulkfree_init(hmp); 1262 1263 /* 1264 * Ref the cluster management messaging descriptor. The mount 1265 * program deals with the other end of the communications pipe. 1266 * 1267 * Root mounts typically do not supply one. 1268 */ 1269 if (info.cluster_fd >= 0) { 1270 fp = holdfp(curproc->p_fd, info.cluster_fd, -1); 1271 if (fp) { 1272 hammer2_cluster_reconnect(hmp, fp); 1273 } else { 1274 kprintf("hammer2_mount: bad cluster_fd!\n"); 1275 } 1276 } 1277 } else { 1278 spmp = hmp->spmp; 1279 if (info.hflags & HMNT2_DEVFLAGS) { 1280 kprintf("hammer2: Warning: mount flags pertaining " 1281 "to the whole device may only be specified " 1282 "on the first mount of the device: %08x\n", 1283 info.hflags & HMNT2_DEVFLAGS); 1284 } 1285 } 1286 1287 /* 1288 * Force local mount (disassociate all PFSs from their clusters). 1289 * Used primarily for debugging. 1290 */ 1291 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL; 1292 1293 /* 1294 * Lookup the mount point under the media-localized super-root. 1295 * Scanning hammer2_pfslist doesn't help us because it represents 1296 * PFS cluster ids which can aggregate several named PFSs together. 1297 * 1298 * cluster->pmp will incorrectly point to spmp and must be fixed 1299 * up later on. 1300 */ 1301 hammer2_inode_lock(spmp->iroot, 0); 1302 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS); 1303 lhc = hammer2_dirhash(label, strlen(label)); 1304 chain = hammer2_chain_lookup(&parent, &key_next, 1305 lhc, lhc + HAMMER2_DIRHASH_LOMASK, 1306 &error, 0); 1307 while (chain) { 1308 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE && 1309 strcmp(label, chain->data->ipdata.filename) == 0) { 1310 break; 1311 } 1312 chain = hammer2_chain_next(&parent, chain, &key_next, 1313 key_next, 1314 lhc + HAMMER2_DIRHASH_LOMASK, 1315 &error, 0); 1316 } 1317 if (parent) { 1318 hammer2_chain_unlock(parent); 1319 hammer2_chain_drop(parent); 1320 } 1321 hammer2_inode_unlock(spmp->iroot); 1322 1323 /* 1324 * PFS could not be found? 1325 */ 1326 if (chain == NULL) { 1327 if (error) 1328 kprintf("hammer2_mount: PFS label I/O error\n"); 1329 else 1330 kprintf("hammer2_mount: PFS label not found\n"); 1331 hammer2_unmount_helper(mp, NULL, hmp); 1332 lockmgr(&hammer2_mntlk, LK_RELEASE); 1333 hammer2_vfs_unmount(mp, MNT_FORCE); 1334 1335 return EINVAL; 1336 } 1337 1338 /* 1339 * Acquire the pmp structure (it should have already been allocated 1340 * via hammer2_update_pmps() so do not pass cluster in to add to 1341 * available chains). 1342 * 1343 * Check if the cluster has already been mounted. A cluster can 1344 * only be mounted once, use null mounts to mount additional copies. 1345 */ 1346 if (chain->error) { 1347 kprintf("hammer2_mount: PFS label I/O error\n"); 1348 } else { 1349 ripdata = &chain->data->ipdata; 1350 bref = chain->bref; 1351 pmp = hammer2_pfsalloc(NULL, ripdata, 1352 bref.modify_tid, force_local); 1353 } 1354 hammer2_chain_unlock(chain); 1355 hammer2_chain_drop(chain); 1356 1357 /* 1358 * Finish the mount 1359 */ 1360 kprintf("hammer2_mount hmp=%p pmp=%p\n", hmp, pmp); 1361 1362 if (pmp->mp) { 1363 kprintf("hammer2_mount: PFS already mounted!\n"); 1364 hammer2_unmount_helper(mp, NULL, hmp); 1365 lockmgr(&hammer2_mntlk, LK_RELEASE); 1366 hammer2_vfs_unmount(mp, MNT_FORCE); 1367 1368 return EBUSY; 1369 } 1370 1371 pmp->hflags = info.hflags; 1372 mp->mnt_flag |= MNT_LOCAL; 1373 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; /* all entry pts are SMP */ 1374 mp->mnt_kern_flag |= MNTK_THR_SYNC; /* new vsyncscan semantics */ 1375 1376 /* 1377 * required mount structure initializations 1378 */ 1379 mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE; 1380 mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE; 1381 1382 mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE; 1383 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE; 1384 1385 /* 1386 * Optional fields 1387 */ 1388 mp->mnt_iosize_max = MAXPHYS; 1389 1390 /* 1391 * Connect up mount pointers. 1392 */ 1393 hammer2_mount_helper(mp, pmp); 1394 1395 lockmgr(&hammer2_mntlk, LK_RELEASE); 1396 1397 /* 1398 * Finish setup 1399 */ 1400 vfs_getnewfsid(mp); 1401 vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops); 1402 vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops); 1403 vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops); 1404 1405 if (path) { 1406 copyinstr(info.volume, mp->mnt_stat.f_mntfromname, 1407 MNAMELEN - 1, &size); 1408 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); 1409 } /* else root mount, already in there */ 1410 1411 bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname)); 1412 if (path) { 1413 copyinstr(path, mp->mnt_stat.f_mntonname, 1414 sizeof(mp->mnt_stat.f_mntonname) - 1, 1415 &size); 1416 } else { 1417 /* root mount */ 1418 mp->mnt_stat.f_mntonname[0] = '/'; 1419 } 1420 1421 /* 1422 * Initial statfs to prime mnt_stat. 1423 */ 1424 hammer2_vfs_statfs(mp, &mp->mnt_stat, cred); 1425 1426 return 0; 1427 } 1428 1429 /* 1430 * Scan PFSs under the super-root and create hammer2_pfs structures. 1431 */ 1432 static 1433 void 1434 hammer2_update_pmps(hammer2_dev_t *hmp) 1435 { 1436 const hammer2_inode_data_t *ripdata; 1437 hammer2_chain_t *parent; 1438 hammer2_chain_t *chain; 1439 hammer2_blockref_t bref; 1440 hammer2_dev_t *force_local; 1441 hammer2_pfs_t *spmp; 1442 hammer2_pfs_t *pmp; 1443 hammer2_key_t key_next; 1444 int error; 1445 1446 /* 1447 * Force local mount (disassociate all PFSs from their clusters). 1448 * Used primarily for debugging. 1449 */ 1450 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL; 1451 1452 /* 1453 * Lookup mount point under the media-localized super-root. 1454 * 1455 * cluster->pmp will incorrectly point to spmp and must be fixed 1456 * up later on. 1457 */ 1458 spmp = hmp->spmp; 1459 hammer2_inode_lock(spmp->iroot, 0); 1460 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS); 1461 chain = hammer2_chain_lookup(&parent, &key_next, 1462 HAMMER2_KEY_MIN, HAMMER2_KEY_MAX, 1463 &error, 0); 1464 while (chain) { 1465 if (chain->bref.type != HAMMER2_BREF_TYPE_INODE) 1466 continue; 1467 if (chain->error) { 1468 kprintf("I/O error scanning PFS labels\n"); 1469 } else { 1470 ripdata = &chain->data->ipdata; 1471 bref = chain->bref; 1472 1473 pmp = hammer2_pfsalloc(chain, ripdata, 1474 bref.modify_tid, force_local); 1475 } 1476 chain = hammer2_chain_next(&parent, chain, &key_next, 1477 key_next, HAMMER2_KEY_MAX, 1478 &error, 0); 1479 } 1480 if (parent) { 1481 hammer2_chain_unlock(parent); 1482 hammer2_chain_drop(parent); 1483 } 1484 hammer2_inode_unlock(spmp->iroot); 1485 } 1486 1487 static 1488 int 1489 hammer2_remount(hammer2_dev_t *hmp, struct mount *mp, char *path __unused, 1490 struct vnode *devvp, struct ucred *cred) 1491 { 1492 int error; 1493 1494 if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) { 1495 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1496 VOP_OPEN(devvp, FREAD | FWRITE, FSCRED, NULL); 1497 vn_unlock(devvp); 1498 error = hammer2_recovery(hmp); 1499 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1500 if (error == 0) { 1501 VOP_CLOSE(devvp, FREAD, NULL); 1502 hmp->ronly = 0; 1503 } else { 1504 VOP_CLOSE(devvp, FREAD | FWRITE, NULL); 1505 } 1506 vn_unlock(devvp); 1507 } else { 1508 error = 0; 1509 } 1510 return error; 1511 } 1512 1513 static 1514 int 1515 hammer2_vfs_unmount(struct mount *mp, int mntflags) 1516 { 1517 hammer2_pfs_t *pmp; 1518 int flags; 1519 int error = 0; 1520 1521 pmp = MPTOPMP(mp); 1522 1523 if (pmp == NULL) 1524 return(0); 1525 1526 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE); 1527 1528 /* 1529 * If mount initialization proceeded far enough we must flush 1530 * its vnodes and sync the underlying mount points. Three syncs 1531 * are required to fully flush the filesystem (freemap updates lag 1532 * by one flush, and one extra for safety). 1533 */ 1534 if (mntflags & MNT_FORCE) 1535 flags = FORCECLOSE; 1536 else 1537 flags = 0; 1538 if (pmp->iroot) { 1539 error = vflush(mp, 0, flags); 1540 if (error) 1541 goto failed; 1542 hammer2_vfs_sync(mp, MNT_WAIT); 1543 hammer2_vfs_sync(mp, MNT_WAIT); 1544 hammer2_vfs_sync(mp, MNT_WAIT); 1545 } 1546 1547 /* 1548 * Cleanup the frontend support XOPS threads 1549 */ 1550 hammer2_xop_helper_cleanup(pmp); 1551 1552 if (pmp->mp) 1553 hammer2_unmount_helper(mp, pmp, NULL); 1554 1555 error = 0; 1556 failed: 1557 lockmgr(&hammer2_mntlk, LK_RELEASE); 1558 1559 return (error); 1560 } 1561 1562 /* 1563 * Mount helper, hook the system mount into our PFS. 1564 * The mount lock is held. 1565 * 1566 * We must bump the mount_count on related devices for any 1567 * mounted PFSs. 1568 */ 1569 static 1570 void 1571 hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp) 1572 { 1573 hammer2_cluster_t *cluster; 1574 hammer2_chain_t *rchain; 1575 int i; 1576 1577 mp->mnt_data = (qaddr_t)pmp; 1578 pmp->mp = mp; 1579 1580 /* 1581 * After pmp->mp is set we have to adjust hmp->mount_count. 1582 */ 1583 cluster = &pmp->iroot->cluster; 1584 for (i = 0; i < cluster->nchains; ++i) { 1585 rchain = cluster->array[i].chain; 1586 if (rchain == NULL) 1587 continue; 1588 ++rchain->hmp->mount_count; 1589 kprintf("hammer2_mount hmp=%p ++mount_count=%d\n", 1590 rchain->hmp, rchain->hmp->mount_count); 1591 } 1592 1593 /* 1594 * Create missing Xop threads 1595 */ 1596 hammer2_xop_helper_create(pmp); 1597 } 1598 1599 /* 1600 * Mount helper, unhook the system mount from our PFS. 1601 * The mount lock is held. 1602 * 1603 * If hmp is supplied a mount responsible for being the first to open 1604 * the block device failed and the block device and all PFSs using the 1605 * block device must be cleaned up. 1606 * 1607 * If pmp is supplied multiple devices might be backing the PFS and each 1608 * must be disconnected. This might not be the last PFS using some of the 1609 * underlying devices. Also, we have to adjust our hmp->mount_count 1610 * accounting for the devices backing the pmp which is now undergoing an 1611 * unmount. 1612 */ 1613 static 1614 void 1615 hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp, hammer2_dev_t *hmp) 1616 { 1617 hammer2_cluster_t *cluster; 1618 hammer2_chain_t *rchain; 1619 struct vnode *devvp; 1620 int dumpcnt; 1621 int ronly; 1622 int i; 1623 1624 /* 1625 * If no device supplied this is a high-level unmount and we have to 1626 * to disconnect the mount, adjust mount_count, and locate devices 1627 * that might now have no mounts. 1628 */ 1629 if (pmp) { 1630 KKASSERT(hmp == NULL); 1631 KKASSERT((void *)(intptr_t)mp->mnt_data == pmp); 1632 pmp->mp = NULL; 1633 mp->mnt_data = NULL; 1634 1635 /* 1636 * After pmp->mp is cleared we have to account for 1637 * mount_count. 1638 */ 1639 cluster = &pmp->iroot->cluster; 1640 for (i = 0; i < cluster->nchains; ++i) { 1641 rchain = cluster->array[i].chain; 1642 if (rchain == NULL) 1643 continue; 1644 --rchain->hmp->mount_count; 1645 /* scrapping hmp now may invalidate the pmp */ 1646 } 1647 again: 1648 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) { 1649 if (hmp->mount_count == 0) { 1650 hammer2_unmount_helper(NULL, NULL, hmp); 1651 goto again; 1652 } 1653 } 1654 return; 1655 } 1656 1657 /* 1658 * Try to terminate the block device. We can't terminate it if 1659 * there are still PFSs referencing it. 1660 */ 1661 if (hmp->mount_count) 1662 return; 1663 1664 /* 1665 * Decomission the network before we start messing with the 1666 * device and PFS. 1667 */ 1668 hammer2_iocom_uninit(hmp); 1669 1670 hammer2_bulkfree_uninit(hmp); 1671 hammer2_pfsfree_scan(hmp); 1672 hammer2_dev_exlock(hmp); /* XXX order */ 1673 1674 /* 1675 * Cycle the volume data lock as a safety (probably not needed any 1676 * more). To ensure everything is out we need to flush at least 1677 * three times. (1) The running of the sideq can dirty the 1678 * filesystem, (2) A normal flush can dirty the freemap, and 1679 * (3) ensure that the freemap is fully synchronized. 1680 * 1681 * The next mount's recovery scan can clean everything up but we want 1682 * to leave the filesystem in a 100% clean state on a normal unmount. 1683 */ 1684 #if 0 1685 hammer2_voldata_lock(hmp); 1686 hammer2_voldata_unlock(hmp); 1687 #endif 1688 1689 /* 1690 * Flush whatever is left. Unmounted but modified PFS's might still 1691 * have some dirty chains on them. 1692 */ 1693 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS); 1694 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS); 1695 hammer2_flush(&hmp->fchain, HAMMER2_FLUSH_TOP | HAMMER2_FLUSH_ALL); 1696 hammer2_chain_unlock(&hmp->fchain); 1697 hammer2_flush(&hmp->vchain, HAMMER2_FLUSH_TOP | HAMMER2_FLUSH_ALL); 1698 hammer2_chain_unlock(&hmp->vchain); 1699 1700 if ((hmp->vchain.flags | hmp->fchain.flags) & 1701 HAMMER2_CHAIN_FLUSH_MASK) { 1702 kprintf("hammer2_unmount: chains left over " 1703 "after final sync\n"); 1704 kprintf(" vchain %08x\n", hmp->vchain.flags); 1705 kprintf(" fchain %08x\n", hmp->fchain.flags); 1706 1707 if (hammer2_debug & 0x0010) 1708 Debugger("entered debugger"); 1709 } 1710 1711 KKASSERT(hmp->spmp == NULL); 1712 1713 /* 1714 * Finish up with the device vnode 1715 */ 1716 if ((devvp = hmp->devvp) != NULL) { 1717 ronly = hmp->ronly; 1718 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1719 kprintf("hammer2_unmount(A): devvp %s rbdirty %p ronly=%d\n", 1720 hmp->devrepname, RB_ROOT(&devvp->v_rbdirty_tree), 1721 ronly); 1722 vinvalbuf(devvp, (ronly ? 0 : V_SAVE), 0, 0); 1723 kprintf("hammer2_unmount(B): devvp %s rbdirty %p\n", 1724 hmp->devrepname, RB_ROOT(&devvp->v_rbdirty_tree)); 1725 hmp->devvp = NULL; 1726 VOP_CLOSE(devvp, (ronly ? FREAD : FREAD|FWRITE), NULL); 1727 vn_unlock(devvp); 1728 vrele(devvp); 1729 devvp = NULL; 1730 } 1731 1732 /* 1733 * Clear vchain/fchain flags that might prevent final cleanup 1734 * of these chains. 1735 */ 1736 if (hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) { 1737 atomic_add_long(&hammer2_count_modified_chains, -1); 1738 atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED); 1739 hammer2_pfs_memory_wakeup(hmp->vchain.pmp); 1740 } 1741 if (hmp->vchain.flags & HAMMER2_CHAIN_UPDATE) { 1742 atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_UPDATE); 1743 } 1744 1745 if (hmp->fchain.flags & HAMMER2_CHAIN_MODIFIED) { 1746 atomic_add_long(&hammer2_count_modified_chains, -1); 1747 atomic_clear_int(&hmp->fchain.flags, HAMMER2_CHAIN_MODIFIED); 1748 hammer2_pfs_memory_wakeup(hmp->fchain.pmp); 1749 } 1750 if (hmp->fchain.flags & HAMMER2_CHAIN_UPDATE) { 1751 atomic_clear_int(&hmp->fchain.flags, HAMMER2_CHAIN_UPDATE); 1752 } 1753 1754 /* 1755 * Final drop of embedded freemap root chain to 1756 * clean up fchain.core (fchain structure is not 1757 * flagged ALLOCATED so it is cleaned out and then 1758 * left to rot). 1759 */ 1760 hammer2_chain_drop(&hmp->fchain); 1761 1762 /* 1763 * Final drop of embedded volume root chain to clean 1764 * up vchain.core (vchain structure is not flagged 1765 * ALLOCATED so it is cleaned out and then left to 1766 * rot). 1767 */ 1768 dumpcnt = 50; 1769 hammer2_dump_chain(&hmp->vchain, 0, &dumpcnt, 'v'); 1770 dumpcnt = 50; 1771 hammer2_dump_chain(&hmp->fchain, 0, &dumpcnt, 'f'); 1772 hammer2_dev_unlock(hmp); 1773 hammer2_chain_drop(&hmp->vchain); 1774 1775 hammer2_io_cleanup(hmp, &hmp->iotree); 1776 if (hmp->iofree_count) { 1777 kprintf("io_cleanup: %d I/O's left hanging\n", 1778 hmp->iofree_count); 1779 } 1780 1781 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry); 1782 kmalloc_destroy(&hmp->mchain); 1783 kfree(hmp, M_HAMMER2); 1784 } 1785 1786 int 1787 hammer2_vfs_vget(struct mount *mp, struct vnode *dvp, 1788 ino_t ino, struct vnode **vpp) 1789 { 1790 hammer2_xop_lookup_t *xop; 1791 hammer2_pfs_t *pmp; 1792 hammer2_inode_t *ip; 1793 hammer2_tid_t inum; 1794 int error; 1795 1796 inum = (hammer2_tid_t)ino & HAMMER2_DIRHASH_USERMSK; 1797 1798 error = 0; 1799 pmp = MPTOPMP(mp); 1800 1801 /* 1802 * Easy if we already have it cached 1803 */ 1804 ip = hammer2_inode_lookup(pmp, inum); 1805 if (ip) { 1806 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 1807 *vpp = hammer2_igetv(ip, &error); 1808 hammer2_inode_unlock(ip); 1809 hammer2_inode_drop(ip); /* from lookup */ 1810 1811 return error; 1812 } 1813 1814 /* 1815 * Otherwise we have to find the inode 1816 */ 1817 xop = hammer2_xop_alloc(pmp->iroot, 0); 1818 xop->lhc = inum; 1819 hammer2_xop_start(&xop->head, hammer2_xop_lookup); 1820 error = hammer2_xop_collect(&xop->head, 0); 1821 1822 if (error == 0) { 1823 if (hammer2_cluster_rdata(&xop->head.cluster) == NULL) { 1824 kprintf("vget: no collect error but also no rdata\n"); 1825 kprintf("xop %p\n", xop); 1826 while ((hammer2_debug & 0x80000) == 0) { 1827 tsleep(xop, PCATCH, "wait", hz * 10); 1828 } 1829 ip = NULL; 1830 } else { 1831 ip = hammer2_inode_get(pmp, NULL, &xop->head.cluster, -1); 1832 } 1833 } 1834 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1835 1836 if (ip) { 1837 *vpp = hammer2_igetv(ip, &error); 1838 hammer2_inode_unlock(ip); 1839 } else { 1840 *vpp = NULL; 1841 error = ENOENT; 1842 } 1843 return (error); 1844 } 1845 1846 static 1847 int 1848 hammer2_vfs_root(struct mount *mp, struct vnode **vpp) 1849 { 1850 hammer2_pfs_t *pmp; 1851 struct vnode *vp; 1852 int error; 1853 1854 pmp = MPTOPMP(mp); 1855 if (pmp->iroot == NULL) { 1856 *vpp = NULL; 1857 return EINVAL; 1858 } 1859 1860 error = 0; 1861 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED); 1862 1863 while (pmp->inode_tid == 0) { 1864 hammer2_xop_ipcluster_t *xop; 1865 hammer2_inode_meta_t *meta; 1866 1867 xop = hammer2_xop_alloc(pmp->iroot, HAMMER2_XOP_MODIFYING); 1868 hammer2_xop_start(&xop->head, hammer2_xop_ipcluster); 1869 error = hammer2_xop_collect(&xop->head, 0); 1870 1871 if (error == 0) { 1872 meta = &xop->head.cluster.focus->data->ipdata.meta; 1873 pmp->iroot->meta = *meta; 1874 pmp->inode_tid = meta->pfs_inum + 1; 1875 if (pmp->inode_tid < HAMMER2_INODE_START) 1876 pmp->inode_tid = HAMMER2_INODE_START; 1877 pmp->modify_tid = 1878 xop->head.cluster.focus->bref.modify_tid + 1; 1879 kprintf("PFS: Starting inode %jd\n", 1880 (intmax_t)pmp->inode_tid); 1881 kprintf("PMP focus good set nextino=%ld mod=%016jx\n", 1882 pmp->inode_tid, pmp->modify_tid); 1883 wakeup(&pmp->iroot); 1884 1885 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1886 1887 /* 1888 * Prime the mount info. 1889 */ 1890 hammer2_vfs_statfs(mp, &mp->mnt_stat, NULL); 1891 break; 1892 } 1893 1894 /* 1895 * Loop, try again 1896 */ 1897 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1898 hammer2_inode_unlock(pmp->iroot); 1899 error = tsleep(&pmp->iroot, PCATCH, "h2root", hz); 1900 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED); 1901 if (error == EINTR) 1902 break; 1903 } 1904 1905 if (error) { 1906 hammer2_inode_unlock(pmp->iroot); 1907 *vpp = NULL; 1908 } else { 1909 vp = hammer2_igetv(pmp->iroot, &error); 1910 hammer2_inode_unlock(pmp->iroot); 1911 *vpp = vp; 1912 } 1913 1914 return (error); 1915 } 1916 1917 /* 1918 * Filesystem status 1919 * 1920 * XXX incorporate ipdata->meta.inode_quota and data_quota 1921 */ 1922 static 1923 int 1924 hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) 1925 { 1926 hammer2_pfs_t *pmp; 1927 hammer2_dev_t *hmp; 1928 hammer2_blockref_t bref; 1929 int i; 1930 1931 /* 1932 * NOTE: iroot might not have validated the cluster yet. 1933 */ 1934 pmp = MPTOPMP(mp); 1935 1936 mp->mnt_stat.f_files = 0; 1937 mp->mnt_stat.f_ffree = 0; 1938 mp->mnt_stat.f_blocks = 0; 1939 mp->mnt_stat.f_bfree = 0; 1940 mp->mnt_stat.f_bavail = 0; 1941 1942 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 1943 hmp = pmp->pfs_hmps[i]; 1944 if (hmp == NULL) 1945 continue; 1946 if (pmp->iroot->cluster.array[i].chain) 1947 bref = pmp->iroot->cluster.array[i].chain->bref; 1948 else 1949 bzero(&bref, sizeof(bref)); 1950 1951 mp->mnt_stat.f_files = bref.embed.stats.inode_count; 1952 mp->mnt_stat.f_ffree = 0; 1953 mp->mnt_stat.f_blocks = hmp->voldata.allocator_size / 1954 mp->mnt_vstat.f_bsize; 1955 mp->mnt_stat.f_bfree = hmp->voldata.allocator_free / 1956 mp->mnt_vstat.f_bsize; 1957 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree; 1958 1959 if (cred && cred->cr_uid != 0) { 1960 uint64_t adj; 1961 1962 /* 5% */ 1963 adj = hmp->free_reserved / mp->mnt_vstat.f_bsize; 1964 mp->mnt_stat.f_blocks -= adj; 1965 mp->mnt_stat.f_bfree -= adj; 1966 mp->mnt_stat.f_bavail -= adj; 1967 } 1968 1969 *sbp = mp->mnt_stat; 1970 } 1971 return (0); 1972 } 1973 1974 static 1975 int 1976 hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred) 1977 { 1978 hammer2_pfs_t *pmp; 1979 hammer2_dev_t *hmp; 1980 hammer2_blockref_t bref; 1981 int i; 1982 1983 /* 1984 * NOTE: iroot might not have validated the cluster yet. 1985 */ 1986 pmp = MPTOPMP(mp); 1987 1988 mp->mnt_vstat.f_bsize = 0; 1989 mp->mnt_vstat.f_files = 0; 1990 mp->mnt_vstat.f_ffree = 0; 1991 mp->mnt_vstat.f_blocks = 0; 1992 mp->mnt_vstat.f_bfree = 0; 1993 mp->mnt_vstat.f_bavail = 0; 1994 1995 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 1996 hmp = pmp->pfs_hmps[i]; 1997 if (hmp == NULL) 1998 continue; 1999 if (pmp->iroot->cluster.array[i].chain) 2000 bref = pmp->iroot->cluster.array[i].chain->bref; 2001 else 2002 bzero(&bref, sizeof(bref)); 2003 2004 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE; 2005 mp->mnt_vstat.f_files = bref.embed.stats.inode_count; 2006 mp->mnt_vstat.f_ffree = 0; 2007 mp->mnt_vstat.f_blocks = hmp->voldata.allocator_size / 2008 mp->mnt_vstat.f_bsize; 2009 mp->mnt_vstat.f_bfree = hmp->voldata.allocator_free / 2010 mp->mnt_vstat.f_bsize; 2011 mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree; 2012 2013 if (cred && cred->cr_uid != 0) { 2014 uint64_t adj; 2015 2016 /* 5% */ 2017 adj = hmp->free_reserved / mp->mnt_vstat.f_bsize; 2018 mp->mnt_vstat.f_blocks -= adj; 2019 mp->mnt_vstat.f_bfree -= adj; 2020 mp->mnt_vstat.f_bavail -= adj; 2021 } 2022 2023 *sbp = mp->mnt_vstat; 2024 } 2025 return (0); 2026 } 2027 2028 /* 2029 * Mount-time recovery (RW mounts) 2030 * 2031 * Updates to the free block table are allowed to lag flushes by one 2032 * transaction. In case of a crash, then on a fresh mount we must do an 2033 * incremental scan of the last committed transaction id and make sure that 2034 * all related blocks have been marked allocated. 2035 * 2036 * The super-root topology and each PFS has its own transaction id domain, 2037 * so we must track PFS boundary transitions. 2038 */ 2039 struct hammer2_recovery_elm { 2040 TAILQ_ENTRY(hammer2_recovery_elm) entry; 2041 hammer2_chain_t *chain; 2042 hammer2_tid_t sync_tid; 2043 }; 2044 2045 TAILQ_HEAD(hammer2_recovery_list, hammer2_recovery_elm); 2046 2047 struct hammer2_recovery_info { 2048 struct hammer2_recovery_list list; 2049 hammer2_tid_t mtid; 2050 int depth; 2051 }; 2052 2053 static int hammer2_recovery_scan(hammer2_dev_t *hmp, 2054 hammer2_chain_t *parent, 2055 struct hammer2_recovery_info *info, 2056 hammer2_tid_t sync_tid); 2057 2058 #define HAMMER2_RECOVERY_MAXDEPTH 10 2059 2060 static 2061 int 2062 hammer2_recovery(hammer2_dev_t *hmp) 2063 { 2064 struct hammer2_recovery_info info; 2065 struct hammer2_recovery_elm *elm; 2066 hammer2_chain_t *parent; 2067 hammer2_tid_t sync_tid; 2068 hammer2_tid_t mirror_tid; 2069 int error; 2070 2071 hammer2_trans_init(hmp->spmp, 0); 2072 2073 sync_tid = hmp->voldata.freemap_tid; 2074 mirror_tid = hmp->voldata.mirror_tid; 2075 2076 kprintf("hammer2 mount \"%s\": ", hmp->devrepname); 2077 if (sync_tid >= mirror_tid) { 2078 kprintf(" no recovery needed\n"); 2079 } else { 2080 kprintf(" freemap recovery %016jx-%016jx\n", 2081 sync_tid + 1, mirror_tid); 2082 } 2083 2084 TAILQ_INIT(&info.list); 2085 info.depth = 0; 2086 parent = hammer2_chain_lookup_init(&hmp->vchain, 0); 2087 error = hammer2_recovery_scan(hmp, parent, &info, sync_tid); 2088 hammer2_chain_lookup_done(parent); 2089 2090 while ((elm = TAILQ_FIRST(&info.list)) != NULL) { 2091 TAILQ_REMOVE(&info.list, elm, entry); 2092 parent = elm->chain; 2093 sync_tid = elm->sync_tid; 2094 kfree(elm, M_HAMMER2); 2095 2096 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 2097 error |= hammer2_recovery_scan(hmp, parent, &info, 2098 hmp->voldata.freemap_tid); 2099 hammer2_chain_unlock(parent); 2100 hammer2_chain_drop(parent); /* drop elm->chain ref */ 2101 } 2102 hammer2_trans_done(hmp->spmp); 2103 2104 return error; 2105 } 2106 2107 static 2108 int 2109 hammer2_recovery_scan(hammer2_dev_t *hmp, hammer2_chain_t *parent, 2110 struct hammer2_recovery_info *info, 2111 hammer2_tid_t sync_tid) 2112 { 2113 const hammer2_inode_data_t *ripdata; 2114 hammer2_chain_t *chain; 2115 hammer2_blockref_t bref; 2116 int tmp_error; 2117 int rup_error; 2118 int error; 2119 int first; 2120 2121 /* 2122 * Adjust freemap to ensure that the block(s) are marked allocated. 2123 */ 2124 if (parent->bref.type != HAMMER2_BREF_TYPE_VOLUME) { 2125 hammer2_freemap_adjust(hmp, &parent->bref, 2126 HAMMER2_FREEMAP_DORECOVER); 2127 } 2128 2129 /* 2130 * Check type for recursive scan 2131 */ 2132 switch(parent->bref.type) { 2133 case HAMMER2_BREF_TYPE_VOLUME: 2134 /* data already instantiated */ 2135 break; 2136 case HAMMER2_BREF_TYPE_INODE: 2137 /* 2138 * Must instantiate data for DIRECTDATA test and also 2139 * for recursion. 2140 */ 2141 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 2142 ripdata = &hammer2_chain_rdata(parent)->ipdata; 2143 if (ripdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) { 2144 /* not applicable to recovery scan */ 2145 hammer2_chain_unlock(parent); 2146 return 0; 2147 } 2148 hammer2_chain_unlock(parent); 2149 break; 2150 case HAMMER2_BREF_TYPE_INDIRECT: 2151 /* 2152 * Must instantiate data for recursion 2153 */ 2154 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 2155 hammer2_chain_unlock(parent); 2156 break; 2157 case HAMMER2_BREF_TYPE_DIRENT: 2158 case HAMMER2_BREF_TYPE_DATA: 2159 case HAMMER2_BREF_TYPE_FREEMAP: 2160 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 2161 case HAMMER2_BREF_TYPE_FREEMAP_LEAF: 2162 /* not applicable to recovery scan */ 2163 return 0; 2164 break; 2165 default: 2166 return HAMMER2_ERROR_BADBREF; 2167 } 2168 2169 /* 2170 * Defer operation if depth limit reached or if we are crossing a 2171 * PFS boundary. 2172 */ 2173 if (info->depth >= HAMMER2_RECOVERY_MAXDEPTH) { 2174 struct hammer2_recovery_elm *elm; 2175 2176 elm = kmalloc(sizeof(*elm), M_HAMMER2, M_ZERO | M_WAITOK); 2177 elm->chain = parent; 2178 elm->sync_tid = sync_tid; 2179 hammer2_chain_ref(parent); 2180 TAILQ_INSERT_TAIL(&info->list, elm, entry); 2181 /* unlocked by caller */ 2182 2183 return(0); 2184 } 2185 2186 2187 /* 2188 * Recursive scan of the last flushed transaction only. We are 2189 * doing this without pmp assignments so don't leave the chains 2190 * hanging around after we are done with them. 2191 * 2192 * error Cumulative error this level only 2193 * rup_error Cumulative error for recursion 2194 * tmp_error Specific non-cumulative recursion error 2195 */ 2196 chain = NULL; 2197 first = 1; 2198 rup_error = 0; 2199 error = 0; 2200 2201 for (;;) { 2202 error |= hammer2_chain_scan(parent, &chain, &bref, 2203 &first, 2204 HAMMER2_LOOKUP_NODATA); 2205 2206 /* 2207 * Problem during scan or EOF 2208 */ 2209 if (error) 2210 break; 2211 2212 /* 2213 * If this is a leaf 2214 */ 2215 if (chain == NULL) { 2216 if (bref.mirror_tid > sync_tid) { 2217 hammer2_freemap_adjust(hmp, &bref, 2218 HAMMER2_FREEMAP_DORECOVER); 2219 } 2220 continue; 2221 } 2222 2223 /* 2224 * This may or may not be a recursive node. 2225 */ 2226 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 2227 if (bref.mirror_tid > sync_tid) { 2228 ++info->depth; 2229 tmp_error = hammer2_recovery_scan(hmp, chain, 2230 info, sync_tid); 2231 --info->depth; 2232 } else { 2233 tmp_error = 0; 2234 } 2235 2236 /* 2237 * Flush the recovery at the PFS boundary to stage it for 2238 * the final flush of the super-root topology. 2239 */ 2240 if (tmp_error == 0 && 2241 (bref.flags & HAMMER2_BREF_FLAG_PFSROOT) && 2242 (chain->flags & HAMMER2_CHAIN_ONFLUSH)) { 2243 hammer2_flush(chain, HAMMER2_FLUSH_TOP); 2244 } 2245 rup_error |= tmp_error; 2246 } 2247 return ((error | rup_error) & ~HAMMER2_ERROR_EOF); 2248 } 2249 2250 /* 2251 * Sync a mount point; this is called on a per-mount basis from the 2252 * filesystem syncer process periodically and whenever a user issues 2253 * a sync. 2254 */ 2255 int 2256 hammer2_vfs_sync(struct mount *mp, int waitfor) 2257 { 2258 hammer2_xop_flush_t *xop; 2259 struct hammer2_sync_info info; 2260 hammer2_inode_t *iroot; 2261 hammer2_pfs_t *pmp; 2262 int flags; 2263 int error; 2264 2265 pmp = MPTOPMP(mp); 2266 iroot = pmp->iroot; 2267 KKASSERT(iroot); 2268 KKASSERT(iroot->pmp == pmp); 2269 2270 /* 2271 * We can't acquire locks on existing vnodes while in a transaction 2272 * without risking a deadlock. This assumes that vfsync() can be 2273 * called without the vnode locked (which it can in DragonFly). 2274 * Otherwise we'd have to implement a multi-pass or flag the lock 2275 * failures and retry. 2276 * 2277 * The reclamation code interlocks with the sync list's token 2278 * (by removing the vnode from the scan list) before unlocking 2279 * the inode, giving us time to ref the inode. 2280 */ 2281 /*flags = VMSC_GETVP;*/ 2282 flags = 0; 2283 if (waitfor & MNT_LAZY) 2284 flags |= VMSC_ONEPASS; 2285 2286 /* 2287 * Preflush the vnodes using a normal transaction before interlocking 2288 * with a flush transaction. We do this to try to run as much of 2289 * the compression as possible outside the flush transaction. 2290 * 2291 * For efficiency do an async pass before making sure with a 2292 * synchronous pass on all related buffer cache buffers. 2293 */ 2294 hammer2_trans_init(pmp, 0); 2295 info.error = 0; 2296 info.waitfor = MNT_NOWAIT; 2297 vsyncscan(mp, flags | VMSC_NOWAIT, hammer2_sync_scan2, &info); 2298 info.waitfor = MNT_WAIT; 2299 vsyncscan(mp, flags, hammer2_sync_scan2, &info); 2300 hammer2_trans_done(pmp); 2301 2302 /* 2303 * Start our flush transaction. This does not return until all 2304 * concurrent transactions have completed and will prevent any 2305 * new transactions from running concurrently, except for the 2306 * buffer cache transactions. 2307 * 2308 * (1) vfsync() all dirty vnodes via vfsyncscan(). 2309 * 2310 * (2) Flush any remaining dirty inodes (the sideq), including any 2311 * which may have been created during or raced against the 2312 * vfsync(). To catch all cases this must be done after the 2313 * vfsync(). 2314 * 2315 * (3) Wait for any pending BIO I/O to complete (hammer2_bioq_sync()). 2316 * 2317 * NOTE! It is still possible for the paging code to push pages 2318 * out via a UIO_NOCOPY hammer2_vop_write() during the main 2319 * flush. 2320 */ 2321 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH); 2322 2323 info.error = 0; 2324 info.waitfor = MNT_NOWAIT; 2325 vsyncscan(mp, flags | VMSC_NOWAIT, hammer2_sync_scan2, &info); 2326 info.waitfor = MNT_WAIT; 2327 vsyncscan(mp, flags, hammer2_sync_scan2, &info); 2328 hammer2_inode_run_sideq(pmp, 1); 2329 hammer2_bioq_sync(pmp); 2330 2331 /* 2332 * Use the XOP interface to concurrently flush all nodes to 2333 * synchronize the PFSROOT subtopology to the media. A standard 2334 * end-of-scan ENOENT error indicates cluster sufficiency. 2335 * 2336 * Note that this flush will not be visible on crash recovery until 2337 * we flush the super-root topology in the next loop. 2338 * 2339 * XXX For now wait for all flushes to complete. 2340 */ 2341 if (iroot) { 2342 xop = hammer2_xop_alloc(iroot, HAMMER2_XOP_MODIFYING); 2343 hammer2_xop_start(&xop->head, hammer2_inode_xop_flush); 2344 error = hammer2_xop_collect(&xop->head, 2345 HAMMER2_XOP_COLLECT_WAITALL); 2346 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2347 if (error == HAMMER2_ERROR_ENOENT) 2348 error = 0; 2349 else 2350 error = hammer2_error_to_errno(error); 2351 } else { 2352 error = 0; 2353 } 2354 hammer2_trans_done(pmp); 2355 2356 return (error); 2357 } 2358 2359 /* 2360 * Sync passes. 2361 * 2362 * Note that we ignore the tranasction mtid we got above. Instead, 2363 * each vfsync below will ultimately get its own via TRANS_BUFCACHE 2364 * transactions. 2365 */ 2366 static int 2367 hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data) 2368 { 2369 struct hammer2_sync_info *info = data; 2370 hammer2_inode_t *ip; 2371 int error; 2372 2373 /* 2374 * Degenerate cases. Note that ip == NULL typically means the 2375 * syncer vnode itself and we don't want to vclrisdirty() in that 2376 * situation. 2377 */ 2378 ip = VTOI(vp); 2379 if (ip == NULL) { 2380 return(0); 2381 } 2382 if (vp->v_type == VNON || vp->v_type == VBAD) { 2383 vclrisdirty(vp); 2384 return(0); 2385 } 2386 2387 /* 2388 * VOP_FSYNC will start a new transaction so replicate some code 2389 * here to do it inline (see hammer2_vop_fsync()). 2390 * 2391 * WARNING: The vfsync interacts with the buffer cache and might 2392 * block, we can't hold the inode lock at that time. 2393 * However, we MUST ref ip before blocking to ensure that 2394 * it isn't ripped out from under us (since we do not 2395 * hold a lock on the vnode). 2396 */ 2397 hammer2_inode_ref(ip); 2398 if ((ip->flags & HAMMER2_INODE_MODIFIED) || 2399 !RB_EMPTY(&vp->v_rbdirty_tree)) { 2400 vfsync(vp, info->waitfor, 1, NULL, NULL); 2401 if (ip->flags & (HAMMER2_INODE_RESIZED | 2402 HAMMER2_INODE_MODIFIED)) { 2403 hammer2_inode_lock(ip, 0); 2404 if (ip->flags & (HAMMER2_INODE_RESIZED | 2405 HAMMER2_INODE_MODIFIED)) { 2406 hammer2_inode_chain_sync(ip); 2407 } 2408 hammer2_inode_unlock(ip); 2409 } 2410 } 2411 if ((ip->flags & HAMMER2_INODE_MODIFIED) == 0 && 2412 RB_EMPTY(&vp->v_rbdirty_tree)) { 2413 vclrisdirty(vp); 2414 } 2415 2416 hammer2_inode_drop(ip); 2417 #if 1 2418 error = 0; 2419 if (error) 2420 info->error = error; 2421 #endif 2422 return(0); 2423 } 2424 2425 static 2426 int 2427 hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp) 2428 { 2429 hammer2_inode_t *ip; 2430 2431 KKASSERT(MAXFIDSZ >= 16); 2432 ip = VTOI(vp); 2433 fhp->fid_len = offsetof(struct fid, fid_data[16]); 2434 fhp->fid_ext = 0; 2435 ((hammer2_tid_t *)fhp->fid_data)[0] = ip->meta.inum; 2436 ((hammer2_tid_t *)fhp->fid_data)[1] = 0; 2437 2438 return 0; 2439 } 2440 2441 static 2442 int 2443 hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 2444 struct fid *fhp, struct vnode **vpp) 2445 { 2446 hammer2_pfs_t *pmp; 2447 hammer2_tid_t inum; 2448 int error; 2449 2450 pmp = MPTOPMP(mp); 2451 inum = ((hammer2_tid_t *)fhp->fid_data)[0] & HAMMER2_DIRHASH_USERMSK; 2452 if (vpp) { 2453 if (inum == 1) 2454 error = hammer2_vfs_root(mp, vpp); 2455 else 2456 error = hammer2_vfs_vget(mp, NULL, inum, vpp); 2457 } else { 2458 error = 0; 2459 } 2460 if (error) 2461 kprintf("fhtovp: %016jx -> %p, %d\n", inum, *vpp, error); 2462 return error; 2463 } 2464 2465 static 2466 int 2467 hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 2468 int *exflagsp, struct ucred **credanonp) 2469 { 2470 hammer2_pfs_t *pmp; 2471 struct netcred *np; 2472 int error; 2473 2474 pmp = MPTOPMP(mp); 2475 np = vfs_export_lookup(mp, &pmp->export, nam); 2476 if (np) { 2477 *exflagsp = np->netc_exflags; 2478 *credanonp = &np->netc_anon; 2479 error = 0; 2480 } else { 2481 error = EACCES; 2482 } 2483 return error; 2484 } 2485 2486 /* 2487 * Support code for hammer2_vfs_mount(). Read, verify, and install the volume 2488 * header into the HMP 2489 * 2490 * XXX read four volhdrs and use the one with the highest TID whos CRC 2491 * matches. 2492 * 2493 * XXX check iCRCs. 2494 * 2495 * XXX For filesystems w/ less than 4 volhdrs, make sure to not write to 2496 * nonexistant locations. 2497 * 2498 * XXX Record selected volhdr and ring updates to each of 4 volhdrs 2499 */ 2500 static 2501 int 2502 hammer2_install_volume_header(hammer2_dev_t *hmp) 2503 { 2504 hammer2_volume_data_t *vd; 2505 struct buf *bp; 2506 hammer2_crc32_t crc0, crc, bcrc0, bcrc; 2507 int error_reported; 2508 int error; 2509 int valid; 2510 int i; 2511 2512 error_reported = 0; 2513 error = 0; 2514 valid = 0; 2515 bp = NULL; 2516 2517 /* 2518 * There are up to 4 copies of the volume header (syncs iterate 2519 * between them so there is no single master). We don't trust the 2520 * volu_size field so we don't know precisely how large the filesystem 2521 * is, so depend on the OS to return an error if we go beyond the 2522 * block device's EOF. 2523 */ 2524 for (i = 0; i < HAMMER2_NUM_VOLHDRS; i++) { 2525 error = bread(hmp->devvp, i * HAMMER2_ZONE_BYTES64, 2526 HAMMER2_VOLUME_BYTES, &bp); 2527 if (error) { 2528 brelse(bp); 2529 bp = NULL; 2530 continue; 2531 } 2532 2533 vd = (struct hammer2_volume_data *) bp->b_data; 2534 if ((vd->magic != HAMMER2_VOLUME_ID_HBO) && 2535 (vd->magic != HAMMER2_VOLUME_ID_ABO)) { 2536 brelse(bp); 2537 bp = NULL; 2538 continue; 2539 } 2540 2541 if (vd->magic == HAMMER2_VOLUME_ID_ABO) { 2542 /* XXX: Reversed-endianness filesystem */ 2543 kprintf("hammer2: reverse-endian filesystem detected"); 2544 brelse(bp); 2545 bp = NULL; 2546 continue; 2547 } 2548 2549 crc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT0]; 2550 crc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC0_OFF, 2551 HAMMER2_VOLUME_ICRC0_SIZE); 2552 bcrc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT1]; 2553 bcrc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC1_OFF, 2554 HAMMER2_VOLUME_ICRC1_SIZE); 2555 if ((crc0 != crc) || (bcrc0 != bcrc)) { 2556 kprintf("hammer2 volume header crc " 2557 "mismatch copy #%d %08x/%08x\n", 2558 i, crc0, crc); 2559 error_reported = 1; 2560 brelse(bp); 2561 bp = NULL; 2562 continue; 2563 } 2564 if (valid == 0 || hmp->voldata.mirror_tid < vd->mirror_tid) { 2565 valid = 1; 2566 hmp->voldata = *vd; 2567 hmp->volhdrno = i; 2568 } 2569 brelse(bp); 2570 bp = NULL; 2571 } 2572 if (valid) { 2573 hmp->volsync = hmp->voldata; 2574 hmp->free_reserved = hmp->voldata.allocator_size / 20; 2575 error = 0; 2576 if (error_reported || bootverbose || 1) { /* 1/DEBUG */ 2577 kprintf("hammer2: using volume header #%d\n", 2578 hmp->volhdrno); 2579 } 2580 } else { 2581 error = EINVAL; 2582 kprintf("hammer2: no valid volume headers found!\n"); 2583 } 2584 return (error); 2585 } 2586 2587 /* 2588 * This handles hysteresis on regular file flushes. Because the BIOs are 2589 * routed to a thread it is possible for an excessive number to build up 2590 * and cause long front-end stalls long before the runningbuffspace limit 2591 * is hit, so we implement hammer2_flush_pipe to control the 2592 * hysteresis. 2593 * 2594 * This is a particular problem when compression is used. 2595 */ 2596 void 2597 hammer2_lwinprog_ref(hammer2_pfs_t *pmp) 2598 { 2599 atomic_add_int(&pmp->count_lwinprog, 1); 2600 } 2601 2602 void 2603 hammer2_lwinprog_drop(hammer2_pfs_t *pmp) 2604 { 2605 int lwinprog; 2606 2607 lwinprog = atomic_fetchadd_int(&pmp->count_lwinprog, -1); 2608 if ((lwinprog & HAMMER2_LWINPROG_WAITING) && 2609 (lwinprog & HAMMER2_LWINPROG_MASK) <= hammer2_flush_pipe * 2 / 3) { 2610 atomic_clear_int(&pmp->count_lwinprog, 2611 HAMMER2_LWINPROG_WAITING); 2612 wakeup(&pmp->count_lwinprog); 2613 } 2614 if ((lwinprog & HAMMER2_LWINPROG_WAITING0) && 2615 (lwinprog & HAMMER2_LWINPROG_MASK) <= 0) { 2616 atomic_clear_int(&pmp->count_lwinprog, 2617 HAMMER2_LWINPROG_WAITING0); 2618 wakeup(&pmp->count_lwinprog); 2619 } 2620 } 2621 2622 void 2623 hammer2_lwinprog_wait(hammer2_pfs_t *pmp, int flush_pipe) 2624 { 2625 int lwinprog; 2626 int lwflag = (flush_pipe) ? HAMMER2_LWINPROG_WAITING : 2627 HAMMER2_LWINPROG_WAITING0; 2628 2629 for (;;) { 2630 lwinprog = pmp->count_lwinprog; 2631 cpu_ccfence(); 2632 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe) 2633 break; 2634 tsleep_interlock(&pmp->count_lwinprog, 0); 2635 atomic_set_int(&pmp->count_lwinprog, lwflag); 2636 lwinprog = pmp->count_lwinprog; 2637 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe) 2638 break; 2639 tsleep(&pmp->count_lwinprog, PINTERLOCKED, "h2wpipe", hz); 2640 } 2641 } 2642 2643 /* 2644 * Manage excessive memory resource use for chain and related 2645 * structures. 2646 */ 2647 void 2648 hammer2_pfs_memory_wait(hammer2_pfs_t *pmp) 2649 { 2650 uint32_t waiting; 2651 uint32_t count; 2652 uint32_t limit; 2653 #if 0 2654 static int zzticks; 2655 #endif 2656 2657 /* 2658 * Atomic check condition and wait. Also do an early speedup of 2659 * the syncer to try to avoid hitting the wait. 2660 */ 2661 for (;;) { 2662 waiting = pmp->inmem_dirty_chains; 2663 cpu_ccfence(); 2664 count = waiting & HAMMER2_DIRTYCHAIN_MASK; 2665 2666 limit = pmp->mp->mnt_nvnodelistsize / 10; 2667 if (limit < hammer2_limit_dirty_chains) 2668 limit = hammer2_limit_dirty_chains; 2669 if (limit < 1000) 2670 limit = 1000; 2671 2672 #if 0 2673 if ((int)(ticks - zzticks) > hz) { 2674 zzticks = ticks; 2675 kprintf("count %ld %ld\n", count, limit); 2676 } 2677 #endif 2678 2679 /* 2680 * Block if there are too many dirty chains present, wait 2681 * for the flush to clean some out. 2682 */ 2683 if (count > limit) { 2684 tsleep_interlock(&pmp->inmem_dirty_chains, 0); 2685 if (atomic_cmpset_int(&pmp->inmem_dirty_chains, 2686 waiting, 2687 waiting | HAMMER2_DIRTYCHAIN_WAITING)) { 2688 speedup_syncer(pmp->mp); 2689 tsleep(&pmp->inmem_dirty_chains, PINTERLOCKED, 2690 "chnmem", hz); 2691 } 2692 continue; /* loop on success or fail */ 2693 } 2694 2695 /* 2696 * Try to start an early flush before we are forced to block. 2697 */ 2698 if (count > limit * 7 / 10) 2699 speedup_syncer(pmp->mp); 2700 break; 2701 } 2702 } 2703 2704 void 2705 hammer2_pfs_memory_inc(hammer2_pfs_t *pmp) 2706 { 2707 if (pmp) { 2708 atomic_add_int(&pmp->inmem_dirty_chains, 1); 2709 } 2710 } 2711 2712 void 2713 hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp) 2714 { 2715 uint32_t waiting; 2716 2717 if (pmp == NULL) 2718 return; 2719 2720 for (;;) { 2721 waiting = pmp->inmem_dirty_chains; 2722 cpu_ccfence(); 2723 if (atomic_cmpset_int(&pmp->inmem_dirty_chains, 2724 waiting, 2725 (waiting - 1) & 2726 ~HAMMER2_DIRTYCHAIN_WAITING)) { 2727 break; 2728 } 2729 } 2730 2731 if (waiting & HAMMER2_DIRTYCHAIN_WAITING) 2732 wakeup(&pmp->inmem_dirty_chains); 2733 } 2734 2735 /* 2736 * Returns 0 if the filesystem has tons of free space 2737 * Returns 1 if the filesystem has less than 10% remaining 2738 * Returns 2 if the filesystem has less than 2%/5% (user/root) remaining. 2739 */ 2740 int 2741 hammer2_vfs_enospace(hammer2_inode_t *ip, off_t bytes, struct ucred *cred) 2742 { 2743 hammer2_pfs_t *pmp; 2744 hammer2_dev_t *hmp; 2745 hammer2_off_t free_reserved; 2746 hammer2_off_t free_nominal; 2747 int i; 2748 2749 pmp = ip->pmp; 2750 2751 if (pmp->free_ticks == 0 || pmp->free_ticks != ticks) { 2752 free_reserved = HAMMER2_SEGSIZE; 2753 free_nominal = 0x7FFFFFFFFFFFFFFFLLU; 2754 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 2755 hmp = pmp->pfs_hmps[i]; 2756 if (hmp == NULL) 2757 continue; 2758 if (pmp->pfs_types[i] != HAMMER2_PFSTYPE_MASTER && 2759 pmp->pfs_types[i] != HAMMER2_PFSTYPE_SOFT_MASTER) 2760 continue; 2761 2762 if (free_nominal > hmp->voldata.allocator_free) 2763 free_nominal = hmp->voldata.allocator_free; 2764 if (free_reserved < hmp->free_reserved) 2765 free_reserved = hmp->free_reserved; 2766 } 2767 2768 /* 2769 * SMP races ok 2770 */ 2771 pmp->free_reserved = free_reserved; 2772 pmp->free_nominal = free_nominal; 2773 pmp->free_ticks = ticks; 2774 } else { 2775 free_reserved = pmp->free_reserved; 2776 free_nominal = pmp->free_nominal; 2777 } 2778 if (cred && cred->cr_uid != 0) { 2779 if ((int64_t)(free_nominal - bytes) < 2780 (int64_t)free_reserved) { 2781 return 2; 2782 } 2783 } else { 2784 if ((int64_t)(free_nominal - bytes) < 2785 (int64_t)free_reserved / 2) { 2786 return 2; 2787 } 2788 } 2789 if ((int64_t)(free_nominal - bytes) < (int64_t)free_reserved * 2) 2790 return 1; 2791 return 0; 2792 } 2793 2794 /* 2795 * Debugging 2796 */ 2797 void 2798 hammer2_dump_chain(hammer2_chain_t *chain, int tab, int *countp, char pfx) 2799 { 2800 hammer2_chain_t *scan; 2801 hammer2_chain_t *parent; 2802 2803 --*countp; 2804 if (*countp == 0) { 2805 kprintf("%*.*s...\n", tab, tab, ""); 2806 return; 2807 } 2808 if (*countp < 0) 2809 return; 2810 kprintf("%*.*s%c-chain %p.%d %016jx/%d mir=%016jx\n", 2811 tab, tab, "", pfx, 2812 chain, chain->bref.type, 2813 chain->bref.key, chain->bref.keybits, 2814 chain->bref.mirror_tid); 2815 2816 kprintf("%*.*s [%08x] (%s) refs=%d", 2817 tab, tab, "", 2818 chain->flags, 2819 ((chain->bref.type == HAMMER2_BREF_TYPE_INODE && 2820 chain->data) ? (char *)chain->data->ipdata.filename : "?"), 2821 chain->refs); 2822 2823 parent = chain->parent; 2824 if (parent) 2825 kprintf("\n%*.*s p=%p [pflags %08x prefs %d", 2826 tab, tab, "", 2827 parent, parent->flags, parent->refs); 2828 if (RB_EMPTY(&chain->core.rbtree)) { 2829 kprintf("\n"); 2830 } else { 2831 kprintf(" {\n"); 2832 RB_FOREACH(scan, hammer2_chain_tree, &chain->core.rbtree) 2833 hammer2_dump_chain(scan, tab + 4, countp, 'a'); 2834 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE && chain->data) 2835 kprintf("%*.*s}(%s)\n", tab, tab, "", 2836 chain->data->ipdata.filename); 2837 else 2838 kprintf("%*.*s}\n", tab, tab, ""); 2839 } 2840 } 2841