1 /* 2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression) 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/nlookup.h> 39 #include <sys/vnode.h> 40 #include <sys/mount.h> 41 #include <sys/fcntl.h> 42 #include <sys/buf.h> 43 #include <sys/uuid.h> 44 #include <sys/vfsops.h> 45 #include <sys/sysctl.h> 46 #include <sys/socket.h> 47 #include <sys/objcache.h> 48 49 #include <sys/proc.h> 50 #include <sys/mountctl.h> 51 #include <sys/dirent.h> 52 #include <sys/uio.h> 53 54 #include "hammer2.h" 55 #include "hammer2_disk.h" 56 #include "hammer2_mount.h" 57 #include "hammer2_lz4.h" 58 59 #include "zlib/hammer2_zlib.h" 60 61 #define REPORT_REFS_ERRORS 1 /* XXX remove me */ 62 63 MALLOC_DEFINE(M_OBJCACHE, "objcache", "Object Cache"); 64 65 struct hammer2_sync_info { 66 int error; 67 int waitfor; 68 int pass; 69 }; 70 71 TAILQ_HEAD(hammer2_mntlist, hammer2_dev); 72 static struct hammer2_mntlist hammer2_mntlist; 73 74 struct hammer2_pfslist hammer2_pfslist; 75 struct hammer2_pfslist hammer2_spmplist; 76 struct lock hammer2_mntlk; 77 78 int hammer2_supported_version = HAMMER2_VOL_VERSION_DEFAULT; 79 int hammer2_debug; 80 int hammer2_xopgroups; 81 long hammer2_debug_inode; 82 int hammer2_cluster_meta_read = 1; /* physical read-ahead */ 83 int hammer2_cluster_data_read = 4; /* physical read-ahead */ 84 int hammer2_cluster_write = 0; /* physical write clustering */ 85 int hammer2_dedup_enable = 1; 86 int hammer2_always_compress = 0; /* always try to compress */ 87 int hammer2_flush_pipe = 100; 88 int hammer2_dio_count; 89 int hammer2_dio_limit = 256; 90 int hammer2_bulkfree_tps = 5000; 91 int hammer2_worker_rmask = 3; 92 long hammer2_chain_allocs; 93 long hammer2_limit_dirty_chains; 94 long hammer2_limit_dirty_inodes; 95 long hammer2_count_modified_chains; 96 long hammer2_iod_file_read; 97 long hammer2_iod_meta_read; 98 long hammer2_iod_indr_read; 99 long hammer2_iod_fmap_read; 100 long hammer2_iod_volu_read; 101 long hammer2_iod_file_write; 102 long hammer2_iod_file_wembed; 103 long hammer2_iod_file_wzero; 104 long hammer2_iod_file_wdedup; 105 long hammer2_iod_meta_write; 106 long hammer2_iod_indr_write; 107 long hammer2_iod_fmap_write; 108 long hammer2_iod_volu_write; 109 static long hammer2_iod_inode_creates; 110 static long hammer2_iod_inode_deletes; 111 112 long hammer2_process_icrc32; 113 long hammer2_process_xxhash64; 114 115 MALLOC_DECLARE(M_HAMMER2_CBUFFER); 116 MALLOC_DEFINE(M_HAMMER2_CBUFFER, "HAMMER2-compbuffer", 117 "Buffer used for compression."); 118 119 MALLOC_DECLARE(M_HAMMER2_DEBUFFER); 120 MALLOC_DEFINE(M_HAMMER2_DEBUFFER, "HAMMER2-decompbuffer", 121 "Buffer used for decompression."); 122 123 SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem"); 124 125 SYSCTL_INT(_vfs_hammer2, OID_AUTO, supported_version, CTLFLAG_RD, 126 &hammer2_supported_version, 0, ""); 127 SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW, 128 &hammer2_debug, 0, ""); 129 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, debug_inode, CTLFLAG_RW, 130 &hammer2_debug_inode, 0, ""); 131 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_meta_read, CTLFLAG_RW, 132 &hammer2_cluster_meta_read, 0, ""); 133 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_data_read, CTLFLAG_RW, 134 &hammer2_cluster_data_read, 0, ""); 135 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_write, CTLFLAG_RW, 136 &hammer2_cluster_write, 0, ""); 137 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dedup_enable, CTLFLAG_RW, 138 &hammer2_dedup_enable, 0, ""); 139 SYSCTL_INT(_vfs_hammer2, OID_AUTO, always_compress, CTLFLAG_RW, 140 &hammer2_always_compress, 0, ""); 141 SYSCTL_INT(_vfs_hammer2, OID_AUTO, flush_pipe, CTLFLAG_RW, 142 &hammer2_flush_pipe, 0, ""); 143 SYSCTL_INT(_vfs_hammer2, OID_AUTO, worker_rmask, CTLFLAG_RW, 144 &hammer2_worker_rmask, 0, ""); 145 SYSCTL_INT(_vfs_hammer2, OID_AUTO, bulkfree_tps, CTLFLAG_RW, 146 &hammer2_bulkfree_tps, 0, ""); 147 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, chain_allocs, CTLFLAG_RW, 148 &hammer2_chain_allocs, 0, ""); 149 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_chains, CTLFLAG_RW, 150 &hammer2_limit_dirty_chains, 0, ""); 151 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_inodes, CTLFLAG_RW, 152 &hammer2_limit_dirty_inodes, 0, ""); 153 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, count_modified_chains, CTLFLAG_RW, 154 &hammer2_count_modified_chains, 0, ""); 155 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_count, CTLFLAG_RD, 156 &hammer2_dio_count, 0, ""); 157 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_limit, CTLFLAG_RW, 158 &hammer2_dio_limit, 0, ""); 159 160 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RW, 161 &hammer2_iod_file_read, 0, ""); 162 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RW, 163 &hammer2_iod_meta_read, 0, ""); 164 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RW, 165 &hammer2_iod_indr_read, 0, ""); 166 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_read, CTLFLAG_RW, 167 &hammer2_iod_fmap_read, 0, ""); 168 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_read, CTLFLAG_RW, 169 &hammer2_iod_volu_read, 0, ""); 170 171 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RW, 172 &hammer2_iod_file_write, 0, ""); 173 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wembed, CTLFLAG_RW, 174 &hammer2_iod_file_wembed, 0, ""); 175 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wzero, CTLFLAG_RW, 176 &hammer2_iod_file_wzero, 0, ""); 177 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wdedup, CTLFLAG_RW, 178 &hammer2_iod_file_wdedup, 0, ""); 179 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RW, 180 &hammer2_iod_meta_write, 0, ""); 181 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RW, 182 &hammer2_iod_indr_write, 0, ""); 183 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_write, CTLFLAG_RW, 184 &hammer2_iod_fmap_write, 0, ""); 185 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RW, 186 &hammer2_iod_volu_write, 0, ""); 187 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_inode_creates, CTLFLAG_RW, 188 &hammer2_iod_inode_creates, 0, ""); 189 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_inode_deletes, CTLFLAG_RW, 190 &hammer2_iod_inode_deletes, 0, ""); 191 192 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, process_icrc32, CTLFLAG_RW, 193 &hammer2_process_icrc32, 0, ""); 194 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, process_xxhash64, CTLFLAG_RW, 195 &hammer2_process_xxhash64, 0, ""); 196 197 static int hammer2_vfs_init(struct vfsconf *conf); 198 static int hammer2_vfs_uninit(struct vfsconf *vfsp); 199 static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data, 200 struct ucred *cred); 201 static int hammer2_remount(hammer2_dev_t *, struct mount *, char *, 202 struct ucred *); 203 static int hammer2_recovery(hammer2_dev_t *hmp); 204 static int hammer2_vfs_unmount(struct mount *mp, int mntflags); 205 static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp); 206 static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, 207 struct ucred *cred); 208 static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, 209 struct ucred *cred); 210 static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 211 struct fid *fhp, struct vnode **vpp); 212 static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp); 213 static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 214 int *exflagsp, struct ucred **credanonp); 215 static int hammer2_vfs_modifying(struct mount *mp); 216 217 static void hammer2_update_pmps(hammer2_dev_t *hmp); 218 219 static void hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp); 220 static void hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp, 221 hammer2_dev_t *hmp); 222 static int hammer2_fixup_pfses(hammer2_dev_t *hmp); 223 224 /* 225 * HAMMER2 vfs operations. 226 */ 227 static struct vfsops hammer2_vfsops = { 228 .vfs_flags = 0, 229 .vfs_init = hammer2_vfs_init, 230 .vfs_uninit = hammer2_vfs_uninit, 231 .vfs_sync = hammer2_vfs_sync, 232 .vfs_mount = hammer2_vfs_mount, 233 .vfs_unmount = hammer2_vfs_unmount, 234 .vfs_root = hammer2_vfs_root, 235 .vfs_statfs = hammer2_vfs_statfs, 236 .vfs_statvfs = hammer2_vfs_statvfs, 237 .vfs_vget = hammer2_vfs_vget, 238 .vfs_vptofh = hammer2_vfs_vptofh, 239 .vfs_fhtovp = hammer2_vfs_fhtovp, 240 .vfs_checkexp = hammer2_vfs_checkexp, 241 .vfs_modifying = hammer2_vfs_modifying 242 }; 243 244 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", ""); 245 246 VFS_SET(hammer2_vfsops, hammer2, VFCF_MPSAFE); 247 MODULE_VERSION(hammer2, 1); 248 249 static 250 int 251 hammer2_vfs_init(struct vfsconf *conf) 252 { 253 static struct objcache_malloc_args margs_read; 254 static struct objcache_malloc_args margs_write; 255 static struct objcache_malloc_args margs_vop; 256 257 int error; 258 259 error = 0; 260 kmalloc_raise_limit(M_HAMMER2, 0); /* unlimited */ 261 262 /* 263 * hammer2_xopgroups must be even and is most optimal if 264 * 2 x ncpus so strategy functions can be queued to the same 265 * cpu. 266 */ 267 hammer2_xopgroups = HAMMER2_XOPGROUPS_MIN; 268 if (hammer2_xopgroups < ncpus * 2) 269 hammer2_xopgroups = ncpus * 2; 270 271 /* 272 * A large DIO cache is needed to retain dedup enablement masks. 273 * The bulkfree code clears related masks as part of the disk block 274 * recycling algorithm, preventing it from being used for a later 275 * dedup. 276 * 277 * NOTE: A large buffer cache can actually interfere with dedup 278 * operation because we dedup based on media physical buffers 279 * and not logical buffers. Try to make the DIO case large 280 * enough to avoid this problem, but also cap it. 281 */ 282 hammer2_dio_limit = nbuf * 2; 283 if (hammer2_dio_limit > 100000) 284 hammer2_dio_limit = 100000; 285 286 if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref)) 287 error = EINVAL; 288 if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data)) 289 error = EINVAL; 290 if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data)) 291 error = EINVAL; 292 293 if (error) 294 kprintf("HAMMER2 structure size mismatch; cannot continue.\n"); 295 296 margs_read.objsize = 65536; 297 margs_read.mtype = M_HAMMER2_DEBUFFER; 298 299 margs_write.objsize = 32768; 300 margs_write.mtype = M_HAMMER2_CBUFFER; 301 302 margs_vop.objsize = sizeof(hammer2_xop_t); 303 margs_vop.mtype = M_HAMMER2; 304 305 /* 306 * Note thaht for the XOPS cache we want backing store allocations 307 * to use M_ZERO. This is not allowed in objcache_get() (to avoid 308 * confusion), so use the backing store function that does it. This 309 * means that initial XOPS objects are zerod but REUSED objects are 310 * not. So we are responsible for cleaning the object up sufficiently 311 * for our needs before objcache_put()ing it back (typically just the 312 * FIFO indices). 313 */ 314 cache_buffer_read = objcache_create(margs_read.mtype->ks_shortdesc, 315 0, 1, NULL, NULL, NULL, 316 objcache_malloc_alloc, 317 objcache_malloc_free, 318 &margs_read); 319 cache_buffer_write = objcache_create(margs_write.mtype->ks_shortdesc, 320 0, 1, NULL, NULL, NULL, 321 objcache_malloc_alloc, 322 objcache_malloc_free, 323 &margs_write); 324 cache_xops = objcache_create(margs_vop.mtype->ks_shortdesc, 325 0, 1, NULL, NULL, NULL, 326 objcache_malloc_alloc_zero, 327 objcache_malloc_free, 328 &margs_vop); 329 330 331 lockinit(&hammer2_mntlk, "mntlk", 0, 0); 332 TAILQ_INIT(&hammer2_mntlist); 333 TAILQ_INIT(&hammer2_pfslist); 334 TAILQ_INIT(&hammer2_spmplist); 335 336 hammer2_limit_dirty_chains = maxvnodes / 10; 337 if (hammer2_limit_dirty_chains > HAMMER2_LIMIT_DIRTY_CHAINS) 338 hammer2_limit_dirty_chains = HAMMER2_LIMIT_DIRTY_CHAINS; 339 if (hammer2_limit_dirty_chains < 1000) 340 hammer2_limit_dirty_chains = 1000; 341 342 hammer2_limit_dirty_inodes = maxvnodes / 25; 343 if (hammer2_limit_dirty_inodes < 100) 344 hammer2_limit_dirty_inodes = 100; 345 if (hammer2_limit_dirty_inodes > HAMMER2_LIMIT_DIRTY_INODES) 346 hammer2_limit_dirty_inodes = HAMMER2_LIMIT_DIRTY_INODES; 347 348 return (error); 349 } 350 351 static 352 int 353 hammer2_vfs_uninit(struct vfsconf *vfsp __unused) 354 { 355 objcache_destroy(cache_buffer_read); 356 objcache_destroy(cache_buffer_write); 357 objcache_destroy(cache_xops); 358 return 0; 359 } 360 361 /* 362 * Core PFS allocator. Used to allocate or reference the pmp structure 363 * for PFS cluster mounts and the spmp structure for media (hmp) structures. 364 * The pmp can be passed in or loaded by this function using the chain and 365 * inode data. 366 * 367 * pmp->modify_tid tracks new modify_tid transaction ids for front-end 368 * transactions. Note that synchronization does not use this field. 369 * (typically frontend operations and synchronization cannot run on the 370 * same PFS node at the same time). 371 * 372 * XXX check locking 373 */ 374 hammer2_pfs_t * 375 hammer2_pfsalloc(hammer2_chain_t *chain, 376 const hammer2_inode_data_t *ripdata, 377 hammer2_tid_t modify_tid, hammer2_dev_t *force_local) 378 { 379 hammer2_pfs_t *pmp; 380 hammer2_inode_t *iroot; 381 int count; 382 int i; 383 int j; 384 385 pmp = NULL; 386 387 /* 388 * Locate or create the PFS based on the cluster id. If ripdata 389 * is NULL this is a spmp which is unique and is always allocated. 390 * 391 * If the device is mounted in local mode all PFSs are considered 392 * independent and not part of any cluster (for debugging only). 393 */ 394 if (ripdata) { 395 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) { 396 if (force_local != pmp->force_local) 397 continue; 398 if (force_local == NULL && 399 bcmp(&pmp->pfs_clid, &ripdata->meta.pfs_clid, 400 sizeof(pmp->pfs_clid)) == 0) { 401 break; 402 } else if (force_local && pmp->pfs_names[0] && 403 strcmp(pmp->pfs_names[0], ripdata->filename) == 0) { 404 break; 405 } 406 } 407 } 408 409 if (pmp == NULL) { 410 pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO); 411 pmp->force_local = force_local; 412 hammer2_trans_manage_init(pmp); 413 kmalloc_create_obj(&pmp->minode, "HAMMER2-inodes", 414 sizeof(struct hammer2_inode)); 415 lockinit(&pmp->lock, "pfslk", 0, 0); 416 lockinit(&pmp->lock_nlink, "h2nlink", 0, 0); 417 spin_init(&pmp->inum_spin, "hm2pfsalloc_inum"); 418 spin_init(&pmp->xop_spin, "h2xop"); 419 spin_init(&pmp->lru_spin, "h2lru"); 420 RB_INIT(&pmp->inum_tree); 421 TAILQ_INIT(&pmp->syncq); 422 TAILQ_INIT(&pmp->depq); 423 TAILQ_INIT(&pmp->lru_list); 424 spin_init(&pmp->list_spin, "h2pfsalloc_list"); 425 426 /* 427 * Save the last media transaction id for the flusher. Set 428 * initial 429 */ 430 if (ripdata) { 431 pmp->pfs_clid = ripdata->meta.pfs_clid; 432 TAILQ_INSERT_TAIL(&hammer2_pfslist, pmp, mntentry); 433 } else { 434 pmp->flags |= HAMMER2_PMPF_SPMP; 435 TAILQ_INSERT_TAIL(&hammer2_spmplist, pmp, mntentry); 436 } 437 438 /* 439 * The synchronization thread may start too early, make 440 * sure it stays frozen until we are ready to let it go. 441 * XXX 442 */ 443 /* 444 pmp->primary_thr.flags = HAMMER2_THREAD_FROZEN | 445 HAMMER2_THREAD_REMASTER; 446 */ 447 } 448 449 /* 450 * Create the PFS's root inode and any missing XOP helper threads. 451 */ 452 if ((iroot = pmp->iroot) == NULL) { 453 iroot = hammer2_inode_get(pmp, NULL, 1, -1); 454 if (ripdata) 455 iroot->meta = ripdata->meta; 456 pmp->iroot = iroot; 457 hammer2_inode_ref(iroot); 458 hammer2_inode_unlock(iroot); 459 } 460 461 /* 462 * Stop here if no chain is passed in. 463 */ 464 if (chain == NULL) 465 goto done; 466 467 /* 468 * When a chain is passed in we must add it to the PFS's root 469 * inode, update pmp->pfs_types[], and update the syncronization 470 * threads. 471 * 472 * When forcing local mode, mark the PFS as a MASTER regardless. 473 * 474 * At the moment empty spots can develop due to removals or failures. 475 * Ultimately we want to re-fill these spots but doing so might 476 * confused running code. XXX 477 */ 478 hammer2_inode_ref(iroot); 479 hammer2_mtx_ex(&iroot->lock); 480 j = iroot->cluster.nchains; 481 482 if (j == HAMMER2_MAXCLUSTER) { 483 kprintf("hammer2_pfsalloc: cluster full!\n"); 484 /* XXX fatal error? */ 485 } else { 486 KKASSERT(chain->pmp == NULL); 487 chain->pmp = pmp; 488 hammer2_chain_ref(chain); 489 iroot->cluster.array[j].chain = chain; 490 if (force_local) 491 pmp->pfs_types[j] = HAMMER2_PFSTYPE_MASTER; 492 else 493 pmp->pfs_types[j] = ripdata->meta.pfs_type; 494 pmp->pfs_names[j] = kstrdup(ripdata->filename, M_HAMMER2); 495 pmp->pfs_hmps[j] = chain->hmp; 496 hammer2_spin_ex(&pmp->inum_spin); 497 pmp->pfs_iroot_blocksets[j] = chain->data->ipdata.u.blockset; 498 hammer2_spin_unex(&pmp->inum_spin); 499 500 /* 501 * If the PFS is already mounted we must account 502 * for the mount_count here. 503 */ 504 if (pmp->mp) 505 ++chain->hmp->mount_count; 506 507 /* 508 * May have to fixup dirty chain tracking. Previous 509 * pmp was NULL so nothing to undo. 510 */ 511 if (chain->flags & HAMMER2_CHAIN_MODIFIED) 512 hammer2_pfs_memory_inc(pmp); 513 ++j; 514 } 515 iroot->cluster.nchains = j; 516 517 /* 518 * Update nmasters from any PFS inode which is part of the cluster. 519 * It is possible that this will result in a value which is too 520 * high. MASTER PFSs are authoritative for pfs_nmasters and will 521 * override this value later on. 522 * 523 * (This informs us of masters that might not currently be 524 * discoverable by this mount). 525 */ 526 if (ripdata && pmp->pfs_nmasters < ripdata->meta.pfs_nmasters) { 527 pmp->pfs_nmasters = ripdata->meta.pfs_nmasters; 528 } 529 530 /* 531 * Count visible masters. Masters are usually added with 532 * ripdata->meta.pfs_nmasters set to 1. This detects when there 533 * are more (XXX and must update the master inodes). 534 */ 535 count = 0; 536 for (i = 0; i < iroot->cluster.nchains; ++i) { 537 if (pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER) 538 ++count; 539 } 540 if (pmp->pfs_nmasters < count) 541 pmp->pfs_nmasters = count; 542 543 /* 544 * Create missing synchronization and support threads. 545 * 546 * Single-node masters (including snapshots) have nothing to 547 * synchronize and do not require this thread. 548 * 549 * Multi-node masters or any number of soft masters, slaves, copy, 550 * or other PFS types need the thread. 551 * 552 * Each thread is responsible for its particular cluster index. 553 * We use independent threads so stalls or mismatches related to 554 * any given target do not affect other targets. 555 */ 556 for (i = 0; i < iroot->cluster.nchains; ++i) { 557 /* 558 * Single-node masters (including snapshots) have nothing 559 * to synchronize and will make direct xops support calls, 560 * thus they do not require this thread. 561 * 562 * Note that there can be thousands of snapshots. We do not 563 * want to create thousands of threads. 564 */ 565 if (pmp->pfs_nmasters <= 1 && 566 pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER) { 567 continue; 568 } 569 570 /* 571 * Sync support thread 572 */ 573 if (pmp->sync_thrs[i].td == NULL) { 574 hammer2_thr_create(&pmp->sync_thrs[i], pmp, NULL, 575 "h2nod", i, -1, 576 hammer2_primary_sync_thread); 577 } 578 } 579 580 /* 581 * Create missing Xop threads 582 * 583 * NOTE: We create helper threads for all mounted PFSs or any 584 * PFSs with 2+ nodes (so the sync thread can update them, 585 * even if not mounted). 586 */ 587 if (pmp->mp || iroot->cluster.nchains >= 2) 588 hammer2_xop_helper_create(pmp); 589 590 hammer2_mtx_unlock(&iroot->lock); 591 hammer2_inode_drop(iroot); 592 done: 593 return pmp; 594 } 595 596 /* 597 * Deallocate an element of a probed PFS. If destroying and this is a 598 * MASTER, adjust nmasters. 599 * 600 * This function does not physically destroy the PFS element in its device 601 * under the super-root (see hammer2_ioctl_pfs_delete()). 602 */ 603 void 604 hammer2_pfsdealloc(hammer2_pfs_t *pmp, int clindex, int destroying) 605 { 606 hammer2_inode_t *iroot; 607 hammer2_chain_t *chain; 608 int j; 609 610 /* 611 * Cleanup our reference on iroot. iroot is (should) not be needed 612 * by the flush code. 613 */ 614 iroot = pmp->iroot; 615 if (iroot) { 616 /* 617 * Stop synchronizing 618 * 619 * XXX flush after acquiring the iroot lock. 620 * XXX clean out the cluster index from all inode structures. 621 */ 622 hammer2_thr_delete(&pmp->sync_thrs[clindex]); 623 624 /* 625 * Remove the cluster index from the group. If destroying 626 * the PFS and this is a master, adjust pfs_nmasters. 627 */ 628 hammer2_mtx_ex(&iroot->lock); 629 chain = iroot->cluster.array[clindex].chain; 630 iroot->cluster.array[clindex].chain = NULL; 631 632 switch(pmp->pfs_types[clindex]) { 633 case HAMMER2_PFSTYPE_MASTER: 634 if (destroying && pmp->pfs_nmasters > 0) 635 --pmp->pfs_nmasters; 636 /* XXX adjust ripdata->meta.pfs_nmasters */ 637 break; 638 default: 639 break; 640 } 641 pmp->pfs_types[clindex] = HAMMER2_PFSTYPE_NONE; 642 643 hammer2_mtx_unlock(&iroot->lock); 644 645 /* 646 * Release the chain. 647 */ 648 if (chain) { 649 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 650 hammer2_chain_drop(chain); 651 } 652 653 /* 654 * Terminate all XOP threads for the cluster index. 655 */ 656 if (pmp->xop_groups) { 657 for (j = 0; j < hammer2_xopgroups; ++j) { 658 hammer2_thr_delete( 659 &pmp->xop_groups[j].thrs[clindex]); 660 } 661 } 662 } 663 } 664 665 /* 666 * Destroy a PFS, typically only occurs after the last mount on a device 667 * has gone away. 668 */ 669 static void 670 hammer2_pfsfree(hammer2_pfs_t *pmp) 671 { 672 hammer2_inode_t *iroot; 673 hammer2_chain_t *chain; 674 int chains_still_present = 0; 675 int i; 676 int j; 677 678 /* 679 * Cleanup our reference on iroot. iroot is (should) not be needed 680 * by the flush code. 681 */ 682 if (pmp->flags & HAMMER2_PMPF_SPMP) 683 TAILQ_REMOVE(&hammer2_spmplist, pmp, mntentry); 684 else 685 TAILQ_REMOVE(&hammer2_pfslist, pmp, mntentry); 686 687 /* 688 * Cleanup chains remaining on LRU list. 689 */ 690 hammer2_spin_ex(&pmp->lru_spin); 691 while ((chain = TAILQ_FIRST(&pmp->lru_list)) != NULL) { 692 KKASSERT(chain->flags & HAMMER2_CHAIN_ONLRU); 693 atomic_add_int(&pmp->lru_count, -1); 694 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONLRU); 695 TAILQ_REMOVE(&pmp->lru_list, chain, lru_node); 696 hammer2_chain_ref(chain); 697 hammer2_spin_unex(&pmp->lru_spin); 698 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 699 hammer2_chain_drop(chain); 700 hammer2_spin_ex(&pmp->lru_spin); 701 } 702 hammer2_spin_unex(&pmp->lru_spin); 703 704 /* 705 * Clean up iroot 706 */ 707 iroot = pmp->iroot; 708 if (iroot) { 709 for (i = 0; i < iroot->cluster.nchains; ++i) { 710 hammer2_thr_delete(&pmp->sync_thrs[i]); 711 if (pmp->xop_groups) { 712 for (j = 0; j < hammer2_xopgroups; ++j) 713 hammer2_thr_delete( 714 &pmp->xop_groups[j].thrs[i]); 715 } 716 chain = iroot->cluster.array[i].chain; 717 if (chain && !RB_EMPTY(&chain->core.rbtree)) { 718 kprintf("hammer2: Warning pmp %p still " 719 "has active chains\n", pmp); 720 chains_still_present = 1; 721 } 722 } 723 #if REPORT_REFS_ERRORS 724 if (iroot->refs != 1) 725 kprintf("PMP->IROOT %p REFS WRONG %d\n", 726 iroot, iroot->refs); 727 #else 728 KKASSERT(iroot->refs == 1); 729 #endif 730 /* ref for iroot */ 731 hammer2_inode_drop(iroot); 732 pmp->iroot = NULL; 733 } 734 735 /* 736 * Free remaining pmp resources 737 */ 738 if (chains_still_present) { 739 kprintf("hammer2: cannot free pmp %p, still in use\n", pmp); 740 } else { 741 kmalloc_destroy_obj(&pmp->minode); 742 kfree(pmp, M_HAMMER2); 743 } 744 } 745 746 /* 747 * Remove all references to hmp from the pfs list. Any PFS which becomes 748 * empty is terminated and freed. 749 * 750 * XXX inefficient. 751 */ 752 static void 753 hammer2_pfsfree_scan(hammer2_dev_t *hmp, int which) 754 { 755 hammer2_pfs_t *pmp; 756 hammer2_inode_t *iroot; 757 hammer2_chain_t *rchain; 758 int i; 759 int j; 760 struct hammer2_pfslist *wlist; 761 762 if (which == 0) 763 wlist = &hammer2_pfslist; 764 else 765 wlist = &hammer2_spmplist; 766 again: 767 TAILQ_FOREACH(pmp, wlist, mntentry) { 768 if ((iroot = pmp->iroot) == NULL) 769 continue; 770 771 /* 772 * Determine if this PFS is affected. If it is we must 773 * freeze all management threads and lock its iroot. 774 * 775 * Freezing a management thread forces it idle, operations 776 * in-progress will be aborted and it will have to start 777 * over again when unfrozen, or exit if told to exit. 778 */ 779 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 780 if (pmp->pfs_hmps[i] == hmp) 781 break; 782 } 783 if (i == HAMMER2_MAXCLUSTER) 784 continue; 785 786 hammer2_vfs_sync_pmp(pmp, MNT_WAIT); 787 788 /* 789 * Make sure all synchronization threads are locked 790 * down. 791 */ 792 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 793 if (pmp->pfs_hmps[i] == NULL) 794 continue; 795 hammer2_thr_freeze_async(&pmp->sync_thrs[i]); 796 if (pmp->xop_groups) { 797 for (j = 0; j < hammer2_xopgroups; ++j) { 798 hammer2_thr_freeze_async( 799 &pmp->xop_groups[j].thrs[i]); 800 } 801 } 802 } 803 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 804 if (pmp->pfs_hmps[i] == NULL) 805 continue; 806 hammer2_thr_freeze(&pmp->sync_thrs[i]); 807 if (pmp->xop_groups) { 808 for (j = 0; j < hammer2_xopgroups; ++j) { 809 hammer2_thr_freeze( 810 &pmp->xop_groups[j].thrs[i]); 811 } 812 } 813 } 814 815 /* 816 * Lock the inode and clean out matching chains. 817 * Note that we cannot use hammer2_inode_lock_*() 818 * here because that would attempt to validate the 819 * cluster that we are in the middle of ripping 820 * apart. 821 * 822 * WARNING! We are working directly on the inodes 823 * embedded cluster. 824 */ 825 hammer2_mtx_ex(&iroot->lock); 826 827 /* 828 * Remove the chain from matching elements of the PFS. 829 */ 830 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 831 if (pmp->pfs_hmps[i] != hmp) 832 continue; 833 hammer2_thr_delete(&pmp->sync_thrs[i]); 834 if (pmp->xop_groups) { 835 for (j = 0; j < hammer2_xopgroups; ++j) { 836 hammer2_thr_delete( 837 &pmp->xop_groups[j].thrs[i]); 838 } 839 } 840 rchain = iroot->cluster.array[i].chain; 841 iroot->cluster.array[i].chain = NULL; 842 pmp->pfs_types[i] = 0; 843 if (pmp->pfs_names[i]) { 844 kfree(pmp->pfs_names[i], M_HAMMER2); 845 pmp->pfs_names[i] = NULL; 846 } 847 if (rchain) { 848 hammer2_chain_drop(rchain); 849 /* focus hint */ 850 if (iroot->cluster.focus == rchain) 851 iroot->cluster.focus = NULL; 852 } 853 pmp->pfs_hmps[i] = NULL; 854 } 855 hammer2_mtx_unlock(&iroot->lock); 856 857 /* 858 * Cleanup trailing chains. Gaps may remain. 859 */ 860 for (i = HAMMER2_MAXCLUSTER - 1; i >= 0; --i) { 861 if (pmp->pfs_hmps[i]) 862 break; 863 } 864 iroot->cluster.nchains = i + 1; 865 866 /* 867 * If the PMP has no elements remaining we can destroy it. 868 * (this will transition management threads from frozen->exit). 869 */ 870 if (iroot->cluster.nchains == 0) { 871 /* 872 * If this was the hmp's spmp, we need to clean 873 * a little more stuff out. 874 */ 875 if (hmp->spmp == pmp) { 876 hmp->spmp = NULL; 877 hmp->vchain.pmp = NULL; 878 hmp->fchain.pmp = NULL; 879 } 880 881 /* 882 * Free the pmp and restart the loop 883 */ 884 KKASSERT(TAILQ_EMPTY(&pmp->syncq)); 885 KKASSERT(TAILQ_EMPTY(&pmp->depq)); 886 hammer2_pfsfree(pmp); 887 goto again; 888 } 889 890 /* 891 * If elements still remain we need to set the REMASTER 892 * flag and unfreeze it. 893 */ 894 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 895 if (pmp->pfs_hmps[i] == NULL) 896 continue; 897 hammer2_thr_remaster(&pmp->sync_thrs[i]); 898 hammer2_thr_unfreeze(&pmp->sync_thrs[i]); 899 if (pmp->xop_groups) { 900 for (j = 0; j < hammer2_xopgroups; ++j) { 901 hammer2_thr_remaster( 902 &pmp->xop_groups[j].thrs[i]); 903 hammer2_thr_unfreeze( 904 &pmp->xop_groups[j].thrs[i]); 905 } 906 } 907 } 908 } 909 } 910 911 /* 912 * Mount or remount HAMMER2 fileystem from physical media 913 * 914 * mountroot 915 * mp mount point structure 916 * path NULL 917 * data <unused> 918 * cred <unused> 919 * 920 * mount 921 * mp mount point structure 922 * path path to mount point 923 * data pointer to argument structure in user space 924 * volume volume path (device@LABEL form) 925 * hflags user mount flags 926 * cred user credentials 927 * 928 * RETURNS: 0 Success 929 * !0 error number 930 */ 931 static 932 int 933 hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data, 934 struct ucred *cred) 935 { 936 struct hammer2_mount_info info; 937 hammer2_pfs_t *pmp; 938 hammer2_pfs_t *spmp; 939 hammer2_dev_t *hmp, *hmp_tmp; 940 hammer2_dev_t *force_local; 941 hammer2_key_t key_next; 942 hammer2_key_t key_dummy; 943 hammer2_key_t lhc; 944 hammer2_chain_t *parent; 945 hammer2_chain_t *chain; 946 const hammer2_inode_data_t *ripdata; 947 hammer2_blockref_t bref; 948 hammer2_devvp_list_t devvpl; 949 hammer2_devvp_t *e, *e_tmp; 950 struct file *fp; 951 char devstr[MNAMELEN]; 952 size_t size; 953 size_t done; 954 char *dev; 955 char *label; 956 int ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); 957 int error; 958 int i; 959 960 hmp = NULL; 961 pmp = NULL; 962 dev = NULL; 963 label = NULL; 964 bzero(&info, sizeof(info)); 965 966 if (path) { 967 /* 968 * Non-root mount or updating a mount 969 */ 970 error = copyin(data, &info, sizeof(info)); 971 if (error) 972 return (error); 973 } 974 975 if (mp->mnt_flag & MNT_UPDATE) { 976 /* 977 * Update mount. Note that pmp->iroot->cluster is 978 * an inode-embedded cluster and thus cannot be 979 * directly locked. 980 * 981 * XXX HAMMER2 needs to implement NFS export via 982 * mountctl. 983 */ 984 hammer2_cluster_t *cluster; 985 986 pmp = MPTOPMP(mp); 987 pmp->hflags = info.hflags; 988 cluster = &pmp->iroot->cluster; 989 for (i = 0; i < cluster->nchains; ++i) { 990 if (cluster->array[i].chain == NULL) 991 continue; 992 hmp = cluster->array[i].chain->hmp; 993 error = hammer2_remount(hmp, mp, path, cred); 994 if (error) 995 break; 996 } 997 998 return error; 999 } 1000 1001 if (path == NULL) { 1002 /* 1003 * Root mount 1004 */ 1005 info.cluster_fd = -1; 1006 ksnprintf(devstr, sizeof(devstr), "%s", 1007 mp->mnt_stat.f_mntfromname); 1008 done = strlen(devstr) + 1; 1009 kprintf("hammer2_mount: root devstr=\"%s\"\n", devstr); 1010 } else { 1011 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done); 1012 if (error) 1013 return (error); 1014 kprintf("hammer2_mount: devstr=\"%s\"\n", devstr); 1015 } 1016 1017 /* 1018 * Extract device and label, automatically mount @BOOT, @ROOT, or @DATA 1019 * if no label specified, based on the partition id. Error out if no 1020 * label or device (with partition id) is specified. This is strictly 1021 * a convenience to match the default label created by newfs_hammer2, 1022 * our preference is that a label always be specified. 1023 * 1024 * NOTE: We allow 'mount @LABEL <blah>'... that is, a mount command 1025 * that does not specify a device, as long as some H2 label 1026 * has already been mounted from that device. This makes 1027 * mounting snapshots a lot easier. 1028 */ 1029 dev = devstr; 1030 label = strchr(devstr, '@'); 1031 if (label && ((label + 1) - dev) > done) { 1032 kprintf("hammer2_mount: bad label %s/%zd\n", devstr, done); 1033 return (EINVAL); 1034 } 1035 if (label == NULL || label[1] == 0) { 1036 char slice; 1037 1038 if (label == NULL) 1039 label = devstr + strlen(devstr); 1040 else 1041 *label = '\0'; /* clean up trailing @ */ 1042 1043 slice = label[-1]; 1044 switch(slice) { 1045 case 'a': 1046 label = "BOOT"; 1047 break; 1048 case 'd': 1049 label = "ROOT"; 1050 break; 1051 default: 1052 label = "DATA"; 1053 break; 1054 } 1055 } else { 1056 *label = '\0'; 1057 label++; 1058 } 1059 1060 kprintf("hammer2_mount: dev=\"%s\" label=\"%s\" rdonly=%d\n", 1061 dev, label, ronly); 1062 1063 /* 1064 * Initialize all device vnodes. 1065 */ 1066 TAILQ_INIT(&devvpl); 1067 error = hammer2_init_devvp(dev, path == NULL, &devvpl); 1068 if (error) { 1069 kprintf("hammer2: failed to initialize devvp in %s\n", dev); 1070 hammer2_cleanup_devvp(&devvpl); 1071 return error; 1072 } 1073 1074 /* 1075 * Determine if the device has already been mounted. After this 1076 * check hmp will be non-NULL if we are doing the second or more 1077 * hammer2 mounts from the same device. 1078 */ 1079 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE); 1080 if (!TAILQ_EMPTY(&devvpl)) { 1081 /* 1082 * Match the device. Due to the way devfs works, 1083 * we may not be able to directly match the vnode pointer, 1084 * so also check to see if the underlying device matches. 1085 */ 1086 TAILQ_FOREACH(hmp_tmp, &hammer2_mntlist, mntentry) { 1087 TAILQ_FOREACH(e_tmp, &hmp_tmp->devvpl, entry) { 1088 int devvp_found = 0; 1089 TAILQ_FOREACH(e, &devvpl, entry) { 1090 KKASSERT(e->devvp); 1091 if (e_tmp->devvp == e->devvp) 1092 devvp_found = 1; 1093 if (e_tmp->devvp->v_rdev && 1094 e_tmp->devvp->v_rdev == e->devvp->v_rdev) 1095 devvp_found = 1; 1096 } 1097 if (!devvp_found) 1098 goto next_hmp; 1099 } 1100 hmp = hmp_tmp; 1101 kprintf("hammer2_mount: hmp=%p matched\n", hmp); 1102 break; 1103 next_hmp: 1104 continue; 1105 } 1106 1107 /* 1108 * If no match this may be a fresh H2 mount, make sure 1109 * the device is not mounted on anything else. 1110 */ 1111 if (hmp == NULL) { 1112 TAILQ_FOREACH(e, &devvpl, entry) { 1113 struct vnode *devvp = e->devvp; 1114 KKASSERT(devvp); 1115 error = vfs_mountedon(devvp); 1116 if (error) { 1117 kprintf("hammer2_mount: %s mounted %d\n", 1118 e->path, error); 1119 hammer2_cleanup_devvp(&devvpl); 1120 lockmgr(&hammer2_mntlk, LK_RELEASE); 1121 return error; 1122 } 1123 } 1124 } 1125 } else { 1126 /* 1127 * Match the label to a pmp already probed. 1128 */ 1129 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) { 1130 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 1131 if (pmp->pfs_names[i] && 1132 strcmp(pmp->pfs_names[i], label) == 0) { 1133 hmp = pmp->pfs_hmps[i]; 1134 break; 1135 } 1136 } 1137 if (hmp) 1138 break; 1139 } 1140 if (hmp == NULL) { 1141 kprintf("hammer2_mount: PFS label \"%s\" not found\n", 1142 label); 1143 hammer2_cleanup_devvp(&devvpl); 1144 lockmgr(&hammer2_mntlk, LK_RELEASE); 1145 return ENOENT; 1146 } 1147 } 1148 1149 /* 1150 * Open the device if this isn't a secondary mount and construct 1151 * the H2 device mount (hmp). 1152 */ 1153 if (hmp == NULL) { 1154 hammer2_chain_t *schain; 1155 hammer2_xid_t xid; 1156 hammer2_xop_head_t xop; 1157 1158 /* 1159 * Now open the device 1160 */ 1161 KKASSERT(!TAILQ_EMPTY(&devvpl)); 1162 if (error == 0) { 1163 error = hammer2_open_devvp(&devvpl, ronly); 1164 if (error) { 1165 hammer2_close_devvp(&devvpl, ronly); 1166 hammer2_cleanup_devvp(&devvpl); 1167 lockmgr(&hammer2_mntlk, LK_RELEASE); 1168 return error; 1169 } 1170 } 1171 1172 /* 1173 * Construct volumes and link with device vnodes. 1174 */ 1175 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO); 1176 hmp->devvp = NULL; 1177 error = hammer2_init_volumes(mp, &devvpl, hmp->volumes, 1178 &hmp->voldata, &hmp->devvp); 1179 if (error) { 1180 hammer2_close_devvp(&devvpl, ronly); 1181 hammer2_cleanup_devvp(&devvpl); 1182 lockmgr(&hammer2_mntlk, LK_RELEASE); 1183 kfree(hmp, M_HAMMER2); 1184 return error; 1185 } 1186 if (!hmp->devvp) { 1187 kprintf("hammer2: failed to initialize root volume\n"); 1188 hammer2_unmount_helper(mp, NULL, hmp); 1189 lockmgr(&hammer2_mntlk, LK_RELEASE); 1190 hammer2_vfs_unmount(mp, MNT_FORCE); 1191 return EINVAL; 1192 } 1193 1194 ksnprintf(hmp->devrepname, sizeof(hmp->devrepname), "%s", dev); 1195 hmp->ronly = ronly; 1196 hmp->hflags = info.hflags & HMNT2_DEVFLAGS; 1197 kmalloc_create_obj(&hmp->mchain, "HAMMER2-chains", 1198 sizeof(struct hammer2_chain)); 1199 kmalloc_create(&hmp->mmsg, "HAMMER2-msg"); 1200 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry); 1201 RB_INIT(&hmp->iotree); 1202 spin_init(&hmp->io_spin, "h2mount_io"); 1203 spin_init(&hmp->list_spin, "h2mount_list"); 1204 1205 lockinit(&hmp->vollk, "h2vol", 0, 0); 1206 lockinit(&hmp->bulklk, "h2bulk", 0, 0); 1207 lockinit(&hmp->bflock, "h2bflk", 0, 0); 1208 1209 /* 1210 * vchain setup. vchain.data is embedded. 1211 * vchain.refs is initialized and will never drop to 0. 1212 * 1213 * NOTE! voldata is not yet loaded. 1214 */ 1215 hmp->vchain.hmp = hmp; 1216 hmp->vchain.refs = 1; 1217 hmp->vchain.data = (void *)&hmp->voldata; 1218 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME; 1219 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX; 1220 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid; 1221 hammer2_chain_core_init(&hmp->vchain); 1222 1223 /* 1224 * fchain setup. fchain.data is embedded. 1225 * fchain.refs is initialized and will never drop to 0. 1226 * 1227 * The data is not used but needs to be initialized to 1228 * pass assertion muster. We use this chain primarily 1229 * as a placeholder for the freemap's top-level radix tree 1230 * so it does not interfere with the volume's topology 1231 * radix tree. 1232 */ 1233 hmp->fchain.hmp = hmp; 1234 hmp->fchain.refs = 1; 1235 hmp->fchain.data = (void *)&hmp->voldata.freemap_blockset; 1236 hmp->fchain.bref.type = HAMMER2_BREF_TYPE_FREEMAP; 1237 hmp->fchain.bref.data_off = 0 | HAMMER2_PBUFRADIX; 1238 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid; 1239 hmp->fchain.bref.methods = 1240 HAMMER2_ENC_CHECK(HAMMER2_CHECK_FREEMAP) | 1241 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE); 1242 hammer2_chain_core_init(&hmp->fchain); 1243 1244 /* 1245 * Initialize volume header related fields. 1246 */ 1247 KKASSERT(hmp->voldata.magic == HAMMER2_VOLUME_ID_HBO || 1248 hmp->voldata.magic == HAMMER2_VOLUME_ID_ABO); 1249 hmp->volhdrno = error; 1250 hmp->volsync = hmp->voldata; 1251 hmp->free_reserved = hmp->voldata.allocator_size / 20; 1252 /* 1253 * Must use hmp instead of volume header for these two 1254 * in order to handle volume versions transparently. 1255 */ 1256 if (hmp->voldata.version >= HAMMER2_VOL_VERSION_MULTI_VOLUMES) { 1257 hmp->nvolumes = hmp->voldata.nvolumes; 1258 hmp->total_size = hmp->voldata.total_size; 1259 } else { 1260 hmp->nvolumes = 1; 1261 hmp->total_size = hmp->voldata.volu_size; 1262 } 1263 KKASSERT(hmp->nvolumes > 0); 1264 1265 /* 1266 * Move devvpl entries to hmp. 1267 */ 1268 TAILQ_INIT(&hmp->devvpl); 1269 while ((e = TAILQ_FIRST(&devvpl)) != NULL) { 1270 TAILQ_REMOVE(&devvpl, e, entry); 1271 TAILQ_INSERT_TAIL(&hmp->devvpl, e, entry); 1272 } 1273 KKASSERT(TAILQ_EMPTY(&devvpl)); 1274 KKASSERT(!TAILQ_EMPTY(&hmp->devvpl)); 1275 1276 /* 1277 * Really important to get these right or the flush and 1278 * teardown code will get confused. 1279 */ 1280 hmp->spmp = hammer2_pfsalloc(NULL, NULL, 0, NULL); 1281 spmp = hmp->spmp; 1282 spmp->pfs_hmps[0] = hmp; 1283 1284 /* 1285 * Dummy-up vchain and fchain's modify_tid. mirror_tid 1286 * is inherited from the volume header. 1287 */ 1288 xid = 0; 1289 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid; 1290 hmp->vchain.bref.modify_tid = hmp->vchain.bref.mirror_tid; 1291 hmp->vchain.pmp = spmp; 1292 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid; 1293 hmp->fchain.bref.modify_tid = hmp->fchain.bref.mirror_tid; 1294 hmp->fchain.pmp = spmp; 1295 1296 /* 1297 * First locate the super-root inode, which is key 0 1298 * relative to the volume header's blockset. 1299 * 1300 * Then locate the root inode by scanning the directory keyspace 1301 * represented by the label. 1302 */ 1303 parent = hammer2_chain_lookup_init(&hmp->vchain, 0); 1304 schain = hammer2_chain_lookup(&parent, &key_dummy, 1305 HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY, 1306 &error, 0); 1307 hammer2_chain_lookup_done(parent); 1308 if (schain == NULL) { 1309 kprintf("hammer2_mount: invalid super-root\n"); 1310 hammer2_unmount_helper(mp, NULL, hmp); 1311 lockmgr(&hammer2_mntlk, LK_RELEASE); 1312 hammer2_vfs_unmount(mp, MNT_FORCE); 1313 return EINVAL; 1314 } 1315 if (schain->error) { 1316 kprintf("hammer2_mount: error %s reading super-root\n", 1317 hammer2_error_str(schain->error)); 1318 hammer2_chain_unlock(schain); 1319 hammer2_chain_drop(schain); 1320 schain = NULL; 1321 hammer2_unmount_helper(mp, NULL, hmp); 1322 lockmgr(&hammer2_mntlk, LK_RELEASE); 1323 hammer2_vfs_unmount(mp, MNT_FORCE); 1324 return EINVAL; 1325 } 1326 1327 /* 1328 * The super-root always uses an inode_tid of 1 when 1329 * creating PFSs. 1330 */ 1331 spmp->inode_tid = 1; 1332 spmp->modify_tid = schain->bref.modify_tid + 1; 1333 1334 /* 1335 * Sanity-check schain's pmp and finish initialization. 1336 * Any chain belonging to the super-root topology should 1337 * have a NULL pmp (not even set to spmp). 1338 */ 1339 ripdata = &schain->data->ipdata; 1340 KKASSERT(schain->pmp == NULL); 1341 spmp->pfs_clid = ripdata->meta.pfs_clid; 1342 1343 /* 1344 * Replace the dummy spmp->iroot with a real one. It's 1345 * easier to just do a wholesale replacement than to try 1346 * to update the chain and fixup the iroot fields. 1347 * 1348 * The returned inode is locked with the supplied cluster. 1349 */ 1350 hammer2_dummy_xop_from_chain(&xop, schain); 1351 hammer2_inode_drop(spmp->iroot); 1352 spmp->iroot = NULL; 1353 spmp->iroot = hammer2_inode_get(spmp, &xop, -1, -1); 1354 spmp->spmp_hmp = hmp; 1355 spmp->pfs_types[0] = ripdata->meta.pfs_type; 1356 spmp->pfs_hmps[0] = hmp; 1357 hammer2_inode_ref(spmp->iroot); 1358 hammer2_inode_unlock(spmp->iroot); 1359 hammer2_cluster_unlock(&xop.cluster); 1360 hammer2_chain_drop(schain); 1361 /* do not call hammer2_cluster_drop() on an embedded cluster */ 1362 schain = NULL; /* now invalid */ 1363 /* leave spmp->iroot with one ref */ 1364 1365 if (!hmp->ronly) { 1366 error = hammer2_recovery(hmp); 1367 if (error == 0) 1368 error |= hammer2_fixup_pfses(hmp); 1369 /* XXX do something with error */ 1370 } 1371 hammer2_update_pmps(hmp); 1372 hammer2_iocom_init(hmp); 1373 hammer2_bulkfree_init(hmp); 1374 1375 /* 1376 * Ref the cluster management messaging descriptor. The mount 1377 * program deals with the other end of the communications pipe. 1378 * 1379 * Root mounts typically do not supply one. 1380 */ 1381 if (info.cluster_fd >= 0) { 1382 fp = holdfp(curthread, info.cluster_fd, -1); 1383 if (fp) { 1384 hammer2_cluster_reconnect(hmp, fp); 1385 } else { 1386 kprintf("hammer2_mount: bad cluster_fd!\n"); 1387 } 1388 } 1389 } else { 1390 spmp = hmp->spmp; 1391 if (info.hflags & HMNT2_DEVFLAGS) { 1392 kprintf("hammer2_mount: Warning: mount flags pertaining " 1393 "to the whole device may only be specified " 1394 "on the first mount of the device: %08x\n", 1395 info.hflags & HMNT2_DEVFLAGS); 1396 } 1397 } 1398 1399 /* 1400 * Force local mount (disassociate all PFSs from their clusters). 1401 * Used primarily for debugging. 1402 */ 1403 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL; 1404 1405 /* 1406 * Lookup the mount point under the media-localized super-root. 1407 * Scanning hammer2_pfslist doesn't help us because it represents 1408 * PFS cluster ids which can aggregate several named PFSs together. 1409 * 1410 * cluster->pmp will incorrectly point to spmp and must be fixed 1411 * up later on. 1412 */ 1413 hammer2_inode_lock(spmp->iroot, 0); 1414 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS); 1415 lhc = hammer2_dirhash(label, strlen(label)); 1416 chain = hammer2_chain_lookup(&parent, &key_next, 1417 lhc, lhc + HAMMER2_DIRHASH_LOMASK, 1418 &error, 0); 1419 while (chain) { 1420 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE && 1421 strcmp(label, chain->data->ipdata.filename) == 0) { 1422 break; 1423 } 1424 chain = hammer2_chain_next(&parent, chain, &key_next, 1425 key_next, 1426 lhc + HAMMER2_DIRHASH_LOMASK, 1427 &error, 0); 1428 } 1429 if (parent) { 1430 hammer2_chain_unlock(parent); 1431 hammer2_chain_drop(parent); 1432 } 1433 hammer2_inode_unlock(spmp->iroot); 1434 1435 /* 1436 * PFS could not be found? 1437 */ 1438 if (chain == NULL) { 1439 hammer2_unmount_helper(mp, NULL, hmp); 1440 lockmgr(&hammer2_mntlk, LK_RELEASE); 1441 hammer2_vfs_unmount(mp, MNT_FORCE); 1442 1443 if (error) { 1444 kprintf("hammer2_mount: PFS label I/O error\n"); 1445 return EINVAL; 1446 } else { 1447 kprintf("hammer2_mount: PFS label \"%s\" not found\n", 1448 label); 1449 return ENOENT; 1450 } 1451 } 1452 1453 /* 1454 * Acquire the pmp structure (it should have already been allocated 1455 * via hammer2_update_pmps() so do not pass cluster in to add to 1456 * available chains). 1457 * 1458 * Check if the cluster has already been mounted. A cluster can 1459 * only be mounted once, use null mounts to mount additional copies. 1460 */ 1461 if (chain->error) { 1462 kprintf("hammer2_mount: PFS label I/O error\n"); 1463 } else { 1464 ripdata = &chain->data->ipdata; 1465 bref = chain->bref; 1466 pmp = hammer2_pfsalloc(NULL, ripdata, 1467 bref.modify_tid, force_local); 1468 } 1469 hammer2_chain_unlock(chain); 1470 hammer2_chain_drop(chain); 1471 1472 /* 1473 * Finish the mount 1474 */ 1475 kprintf("hammer2_mount: hmp=%p pmp=%p\n", hmp, pmp); 1476 1477 if (pmp->mp) { 1478 kprintf("hammer2_mount: PFS already mounted!\n"); 1479 hammer2_unmount_helper(mp, NULL, hmp); 1480 lockmgr(&hammer2_mntlk, LK_RELEASE); 1481 hammer2_vfs_unmount(mp, MNT_FORCE); 1482 1483 return EBUSY; 1484 } 1485 1486 pmp->hflags = info.hflags; 1487 mp->mnt_flag |= MNT_LOCAL; 1488 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; /* all entry pts are SMP */ 1489 mp->mnt_kern_flag |= MNTK_THR_SYNC; /* new vsyncscan semantics */ 1490 1491 /* 1492 * required mount structure initializations 1493 */ 1494 mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE; 1495 mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE; 1496 1497 mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE; 1498 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE; 1499 1500 /* 1501 * Optional fields 1502 */ 1503 mp->mnt_iosize_max = MAXPHYS; 1504 1505 /* 1506 * Connect up mount pointers. 1507 */ 1508 hammer2_mount_helper(mp, pmp); 1509 lockmgr(&hammer2_mntlk, LK_RELEASE); 1510 1511 /* 1512 * Finish setup 1513 */ 1514 vfs_getnewfsid(mp); 1515 vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops); 1516 vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops); 1517 vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops); 1518 1519 if (path) { 1520 copyinstr(info.volume, mp->mnt_stat.f_mntfromname, 1521 MNAMELEN - 1, &size); 1522 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); 1523 } /* else root mount, already in there */ 1524 1525 bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname)); 1526 if (path) { 1527 copyinstr(path, mp->mnt_stat.f_mntonname, 1528 sizeof(mp->mnt_stat.f_mntonname) - 1, 1529 &size); 1530 } else { 1531 /* root mount */ 1532 mp->mnt_stat.f_mntonname[0] = '/'; 1533 } 1534 1535 /* 1536 * Initial statfs to prime mnt_stat. 1537 */ 1538 hammer2_vfs_statfs(mp, &mp->mnt_stat, cred); 1539 1540 return 0; 1541 } 1542 1543 /* 1544 * Scan PFSs under the super-root and create hammer2_pfs structures. 1545 */ 1546 static 1547 void 1548 hammer2_update_pmps(hammer2_dev_t *hmp) 1549 { 1550 const hammer2_inode_data_t *ripdata; 1551 hammer2_chain_t *parent; 1552 hammer2_chain_t *chain; 1553 hammer2_blockref_t bref; 1554 hammer2_dev_t *force_local; 1555 hammer2_pfs_t *spmp; 1556 hammer2_pfs_t *pmp; 1557 hammer2_key_t key_next; 1558 int error; 1559 1560 /* 1561 * Force local mount (disassociate all PFSs from their clusters). 1562 * Used primarily for debugging. 1563 */ 1564 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL; 1565 1566 /* 1567 * Lookup mount point under the media-localized super-root. 1568 * 1569 * cluster->pmp will incorrectly point to spmp and must be fixed 1570 * up later on. 1571 */ 1572 spmp = hmp->spmp; 1573 hammer2_inode_lock(spmp->iroot, 0); 1574 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS); 1575 chain = hammer2_chain_lookup(&parent, &key_next, 1576 HAMMER2_KEY_MIN, HAMMER2_KEY_MAX, 1577 &error, 0); 1578 while (chain) { 1579 if (chain->error) { 1580 kprintf("I/O error scanning PFS labels\n"); 1581 } else if (chain->bref.type != HAMMER2_BREF_TYPE_INODE) { 1582 kprintf("Non inode chain type %d under super-root\n", 1583 chain->bref.type); 1584 } else { 1585 ripdata = &chain->data->ipdata; 1586 bref = chain->bref; 1587 pmp = hammer2_pfsalloc(chain, ripdata, 1588 bref.modify_tid, force_local); 1589 } 1590 chain = hammer2_chain_next(&parent, chain, &key_next, 1591 key_next, HAMMER2_KEY_MAX, 1592 &error, 0); 1593 } 1594 if (parent) { 1595 hammer2_chain_unlock(parent); 1596 hammer2_chain_drop(parent); 1597 } 1598 hammer2_inode_unlock(spmp->iroot); 1599 } 1600 1601 static 1602 int 1603 hammer2_remount(hammer2_dev_t *hmp, struct mount *mp, char *path __unused, 1604 struct ucred *cred) 1605 { 1606 hammer2_volume_t *vol; 1607 struct vnode *devvp; 1608 int i, error, result = 0; 1609 1610 if (!(hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR))) 1611 return 0; 1612 1613 for (i = 0; i < hmp->nvolumes; ++i) { 1614 vol = &hmp->volumes[i]; 1615 devvp = vol->dev->devvp; 1616 KKASSERT(devvp); 1617 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1618 VOP_OPEN(devvp, FREAD | FWRITE, FSCRED, NULL); 1619 vn_unlock(devvp); 1620 error = 0; 1621 if (vol->id == HAMMER2_ROOT_VOLUME) { 1622 error = hammer2_recovery(hmp); 1623 if (error == 0) 1624 error |= hammer2_fixup_pfses(hmp); 1625 } 1626 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1627 if (error == 0) { 1628 VOP_CLOSE(devvp, FREAD, NULL); 1629 } else { 1630 VOP_CLOSE(devvp, FREAD | FWRITE, NULL); 1631 } 1632 vn_unlock(devvp); 1633 result |= error; 1634 } 1635 if (result == 0) { 1636 kprintf("hammer2: enable read/write\n"); 1637 hmp->ronly = 0; 1638 } 1639 1640 return result; 1641 } 1642 1643 static 1644 int 1645 hammer2_vfs_unmount(struct mount *mp, int mntflags) 1646 { 1647 hammer2_pfs_t *pmp; 1648 int flags; 1649 int error = 0; 1650 1651 pmp = MPTOPMP(mp); 1652 1653 if (pmp == NULL) 1654 return(0); 1655 1656 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE); 1657 1658 /* 1659 * If mount initialization proceeded far enough we must flush 1660 * its vnodes and sync the underlying mount points. Three syncs 1661 * are required to fully flush the filesystem (freemap updates lag 1662 * by one flush, and one extra for safety). 1663 */ 1664 if (mntflags & MNT_FORCE) 1665 flags = FORCECLOSE; 1666 else 1667 flags = 0; 1668 if (pmp->iroot) { 1669 error = vflush(mp, 0, flags); 1670 if (error) 1671 goto failed; 1672 hammer2_vfs_sync(mp, MNT_WAIT); 1673 hammer2_vfs_sync(mp, MNT_WAIT); 1674 hammer2_vfs_sync(mp, MNT_WAIT); 1675 } 1676 1677 /* 1678 * Cleanup the frontend support XOPS threads 1679 */ 1680 hammer2_xop_helper_cleanup(pmp); 1681 1682 if (pmp->mp) 1683 hammer2_unmount_helper(mp, pmp, NULL); 1684 1685 error = 0; 1686 failed: 1687 lockmgr(&hammer2_mntlk, LK_RELEASE); 1688 1689 return (error); 1690 } 1691 1692 /* 1693 * Mount helper, hook the system mount into our PFS. 1694 * The mount lock is held. 1695 * 1696 * We must bump the mount_count on related devices for any 1697 * mounted PFSs. 1698 */ 1699 static 1700 void 1701 hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp) 1702 { 1703 hammer2_cluster_t *cluster; 1704 hammer2_chain_t *rchain; 1705 int i; 1706 1707 mp->mnt_data = (qaddr_t)pmp; 1708 pmp->mp = mp; 1709 1710 /* 1711 * After pmp->mp is set we have to adjust hmp->mount_count. 1712 */ 1713 cluster = &pmp->iroot->cluster; 1714 for (i = 0; i < cluster->nchains; ++i) { 1715 rchain = cluster->array[i].chain; 1716 if (rchain == NULL) 1717 continue; 1718 ++rchain->hmp->mount_count; 1719 } 1720 1721 /* 1722 * Create missing Xop threads 1723 */ 1724 hammer2_xop_helper_create(pmp); 1725 } 1726 1727 /* 1728 * Mount helper, unhook the system mount from our PFS. 1729 * The mount lock is held. 1730 * 1731 * If hmp is supplied a mount responsible for being the first to open 1732 * the block device failed and the block device and all PFSs using the 1733 * block device must be cleaned up. 1734 * 1735 * If pmp is supplied multiple devices might be backing the PFS and each 1736 * must be disconnected. This might not be the last PFS using some of the 1737 * underlying devices. Also, we have to adjust our hmp->mount_count 1738 * accounting for the devices backing the pmp which is now undergoing an 1739 * unmount. 1740 */ 1741 static 1742 void 1743 hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp, hammer2_dev_t *hmp) 1744 { 1745 hammer2_cluster_t *cluster; 1746 hammer2_chain_t *rchain; 1747 int dumpcnt; 1748 int i; 1749 1750 /* 1751 * If no device supplied this is a high-level unmount and we have to 1752 * to disconnect the mount, adjust mount_count, and locate devices 1753 * that might now have no mounts. 1754 */ 1755 if (pmp) { 1756 KKASSERT(hmp == NULL); 1757 KKASSERT((void *)(intptr_t)mp->mnt_data == pmp); 1758 pmp->mp = NULL; 1759 mp->mnt_data = NULL; 1760 1761 /* 1762 * After pmp->mp is cleared we have to account for 1763 * mount_count. 1764 */ 1765 cluster = &pmp->iroot->cluster; 1766 for (i = 0; i < cluster->nchains; ++i) { 1767 rchain = cluster->array[i].chain; 1768 if (rchain == NULL) 1769 continue; 1770 --rchain->hmp->mount_count; 1771 /* scrapping hmp now may invalidate the pmp */ 1772 } 1773 again: 1774 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) { 1775 if (hmp->mount_count == 0) { 1776 hammer2_unmount_helper(NULL, NULL, hmp); 1777 goto again; 1778 } 1779 } 1780 return; 1781 } 1782 1783 /* 1784 * Try to terminate the block device. We can't terminate it if 1785 * there are still PFSs referencing it. 1786 */ 1787 if (hmp->mount_count) 1788 return; 1789 1790 /* 1791 * Decomission the network before we start messing with the 1792 * device and PFS. 1793 */ 1794 hammer2_iocom_uninit(hmp); 1795 1796 hammer2_bulkfree_uninit(hmp); 1797 hammer2_pfsfree_scan(hmp, 0); 1798 1799 /* 1800 * Cycle the volume data lock as a safety (probably not needed any 1801 * more). To ensure everything is out we need to flush at least 1802 * three times. (1) The running of the sideq can dirty the 1803 * filesystem, (2) A normal flush can dirty the freemap, and 1804 * (3) ensure that the freemap is fully synchronized. 1805 * 1806 * The next mount's recovery scan can clean everything up but we want 1807 * to leave the filesystem in a 100% clean state on a normal unmount. 1808 */ 1809 #if 0 1810 hammer2_voldata_lock(hmp); 1811 hammer2_voldata_unlock(hmp); 1812 #endif 1813 1814 /* 1815 * Flush whatever is left. Unmounted but modified PFS's might still 1816 * have some dirty chains on them. 1817 */ 1818 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS); 1819 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS); 1820 1821 if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) { 1822 hammer2_voldata_modify(hmp); 1823 hammer2_flush(&hmp->fchain, HAMMER2_FLUSH_TOP | 1824 HAMMER2_FLUSH_ALL); 1825 } 1826 hammer2_chain_unlock(&hmp->fchain); 1827 1828 if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) { 1829 hammer2_flush(&hmp->vchain, HAMMER2_FLUSH_TOP | 1830 HAMMER2_FLUSH_ALL); 1831 } 1832 hammer2_chain_unlock(&hmp->vchain); 1833 1834 if ((hmp->vchain.flags | hmp->fchain.flags) & 1835 HAMMER2_CHAIN_FLUSH_MASK) { 1836 kprintf("hammer2_unmount: chains left over after final sync\n"); 1837 kprintf(" vchain %08x\n", hmp->vchain.flags); 1838 kprintf(" fchain %08x\n", hmp->fchain.flags); 1839 1840 if (hammer2_debug & 0x0010) 1841 Debugger("entered debugger"); 1842 } 1843 1844 hammer2_pfsfree_scan(hmp, 1); 1845 1846 KKASSERT(hmp->spmp == NULL); 1847 1848 /* 1849 * Finish up with the device vnode 1850 */ 1851 if (!TAILQ_EMPTY(&hmp->devvpl)) { 1852 hammer2_close_devvp(&hmp->devvpl, hmp->ronly); 1853 hammer2_cleanup_devvp(&hmp->devvpl); 1854 } 1855 KKASSERT(TAILQ_EMPTY(&hmp->devvpl)); 1856 1857 /* 1858 * Clear vchain/fchain flags that might prevent final cleanup 1859 * of these chains. 1860 */ 1861 if (hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) { 1862 atomic_add_long(&hammer2_count_modified_chains, -1); 1863 atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED); 1864 hammer2_pfs_memory_wakeup(hmp->vchain.pmp, -1); 1865 } 1866 if (hmp->vchain.flags & HAMMER2_CHAIN_UPDATE) { 1867 atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_UPDATE); 1868 } 1869 1870 if (hmp->fchain.flags & HAMMER2_CHAIN_MODIFIED) { 1871 atomic_add_long(&hammer2_count_modified_chains, -1); 1872 atomic_clear_int(&hmp->fchain.flags, HAMMER2_CHAIN_MODIFIED); 1873 hammer2_pfs_memory_wakeup(hmp->fchain.pmp, -1); 1874 } 1875 if (hmp->fchain.flags & HAMMER2_CHAIN_UPDATE) { 1876 atomic_clear_int(&hmp->fchain.flags, HAMMER2_CHAIN_UPDATE); 1877 } 1878 1879 /* 1880 * Final drop of embedded freemap root chain to 1881 * clean up fchain.core (fchain structure is not 1882 * flagged ALLOCATED so it is cleaned out and then 1883 * left to rot). 1884 */ 1885 hammer2_chain_drop(&hmp->fchain); 1886 1887 /* 1888 * Final drop of embedded volume root chain to clean 1889 * up vchain.core (vchain structure is not flagged 1890 * ALLOCATED so it is cleaned out and then left to 1891 * rot). 1892 */ 1893 dumpcnt = 50; 1894 hammer2_dump_chain(&hmp->vchain, 0, 0, &dumpcnt, 'v', (u_int)-1); 1895 dumpcnt = 50; 1896 hammer2_dump_chain(&hmp->fchain, 0, 0, &dumpcnt, 'f', (u_int)-1); 1897 1898 hammer2_chain_drop(&hmp->vchain); 1899 1900 hammer2_io_cleanup(hmp, &hmp->iotree); 1901 if (hmp->iofree_count) { 1902 kprintf("io_cleanup: %d I/O's left hanging\n", 1903 hmp->iofree_count); 1904 } 1905 1906 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry); 1907 kmalloc_destroy_obj(&hmp->mchain); 1908 kmalloc_destroy(&hmp->mmsg); 1909 kfree(hmp, M_HAMMER2); 1910 } 1911 1912 int 1913 hammer2_vfs_vget(struct mount *mp, struct vnode *dvp, 1914 ino_t ino, struct vnode **vpp) 1915 { 1916 hammer2_xop_lookup_t *xop; 1917 hammer2_pfs_t *pmp; 1918 hammer2_inode_t *ip; 1919 hammer2_tid_t inum; 1920 int error; 1921 1922 inum = (hammer2_tid_t)ino & HAMMER2_DIRHASH_USERMSK; 1923 1924 error = 0; 1925 pmp = MPTOPMP(mp); 1926 1927 /* 1928 * Easy if we already have it cached 1929 */ 1930 ip = hammer2_inode_lookup(pmp, inum); 1931 if (ip) { 1932 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 1933 *vpp = hammer2_igetv(ip, &error); 1934 hammer2_inode_unlock(ip); 1935 hammer2_inode_drop(ip); /* from lookup */ 1936 1937 return error; 1938 } 1939 1940 /* 1941 * Otherwise we have to find the inode 1942 */ 1943 xop = hammer2_xop_alloc(pmp->iroot, 0); 1944 xop->lhc = inum; 1945 hammer2_xop_start(&xop->head, &hammer2_lookup_desc); 1946 error = hammer2_xop_collect(&xop->head, 0); 1947 1948 if (error == 0) 1949 ip = hammer2_inode_get(pmp, &xop->head, -1, -1); 1950 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1951 1952 if (ip) { 1953 *vpp = hammer2_igetv(ip, &error); 1954 hammer2_inode_unlock(ip); 1955 } else { 1956 *vpp = NULL; 1957 error = ENOENT; 1958 } 1959 return (error); 1960 } 1961 1962 static 1963 int 1964 hammer2_vfs_root(struct mount *mp, struct vnode **vpp) 1965 { 1966 hammer2_pfs_t *pmp; 1967 struct vnode *vp; 1968 int error; 1969 1970 pmp = MPTOPMP(mp); 1971 if (pmp->iroot == NULL) { 1972 kprintf("hammer2 (%s): no root inode\n", 1973 mp->mnt_stat.f_mntfromname); 1974 *vpp = NULL; 1975 return EINVAL; 1976 } 1977 1978 error = 0; 1979 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED); 1980 1981 while (pmp->inode_tid == 0) { 1982 hammer2_xop_ipcluster_t *xop; 1983 const hammer2_inode_meta_t *meta; 1984 1985 xop = hammer2_xop_alloc(pmp->iroot, HAMMER2_XOP_MODIFYING); 1986 hammer2_xop_start(&xop->head, &hammer2_ipcluster_desc); 1987 error = hammer2_xop_collect(&xop->head, 0); 1988 1989 if (error == 0) { 1990 meta = &hammer2_xop_gdata(&xop->head)->ipdata.meta; 1991 pmp->iroot->meta = *meta; 1992 pmp->inode_tid = meta->pfs_inum + 1; 1993 hammer2_xop_pdata(&xop->head); 1994 /* meta invalid */ 1995 1996 if (pmp->inode_tid < HAMMER2_INODE_START) 1997 pmp->inode_tid = HAMMER2_INODE_START; 1998 pmp->modify_tid = 1999 xop->head.cluster.focus->bref.modify_tid + 1; 2000 #if 0 2001 kprintf("PFS: Starting inode %jd\n", 2002 (intmax_t)pmp->inode_tid); 2003 kprintf("PMP focus good set nextino=%ld mod=%016jx\n", 2004 pmp->inode_tid, pmp->modify_tid); 2005 #endif 2006 wakeup(&pmp->iroot); 2007 2008 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2009 2010 /* 2011 * Prime the mount info. 2012 */ 2013 hammer2_vfs_statfs(mp, &mp->mnt_stat, NULL); 2014 break; 2015 } 2016 2017 /* 2018 * Loop, try again 2019 */ 2020 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2021 hammer2_inode_unlock(pmp->iroot); 2022 error = tsleep(&pmp->iroot, PCATCH, "h2root", hz); 2023 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED); 2024 if (error == EINTR) 2025 break; 2026 } 2027 2028 if (error) { 2029 hammer2_inode_unlock(pmp->iroot); 2030 *vpp = NULL; 2031 } else { 2032 vp = hammer2_igetv(pmp->iroot, &error); 2033 hammer2_inode_unlock(pmp->iroot); 2034 *vpp = vp; 2035 } 2036 2037 return (error); 2038 } 2039 2040 /* 2041 * Filesystem status 2042 * 2043 * XXX incorporate ipdata->meta.inode_quota and data_quota 2044 */ 2045 static 2046 int 2047 hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) 2048 { 2049 hammer2_pfs_t *pmp; 2050 hammer2_dev_t *hmp; 2051 hammer2_blockref_t bref; 2052 struct statfs tmp; 2053 int i; 2054 2055 /* 2056 * NOTE: iroot might not have validated the cluster yet. 2057 */ 2058 pmp = MPTOPMP(mp); 2059 2060 bzero(&tmp, sizeof(tmp)); 2061 2062 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 2063 hmp = pmp->pfs_hmps[i]; 2064 if (hmp == NULL) 2065 continue; 2066 if (pmp->iroot->cluster.array[i].chain) 2067 bref = pmp->iroot->cluster.array[i].chain->bref; 2068 else 2069 bzero(&bref, sizeof(bref)); 2070 2071 tmp.f_files = bref.embed.stats.inode_count; 2072 tmp.f_ffree = 0; 2073 tmp.f_blocks = hmp->voldata.allocator_size / 2074 mp->mnt_vstat.f_bsize; 2075 tmp.f_bfree = hmp->voldata.allocator_free / 2076 mp->mnt_vstat.f_bsize; 2077 tmp.f_bavail = tmp.f_bfree; 2078 2079 if (cred && cred->cr_uid != 0) { 2080 uint64_t adj; 2081 2082 /* 5% */ 2083 adj = hmp->free_reserved / mp->mnt_vstat.f_bsize; 2084 tmp.f_blocks -= adj; 2085 tmp.f_bfree -= adj; 2086 tmp.f_bavail -= adj; 2087 } 2088 2089 mp->mnt_stat.f_blocks = tmp.f_blocks; 2090 mp->mnt_stat.f_bfree = tmp.f_bfree; 2091 mp->mnt_stat.f_bavail = tmp.f_bavail; 2092 mp->mnt_stat.f_files = tmp.f_files; 2093 mp->mnt_stat.f_ffree = tmp.f_ffree; 2094 2095 *sbp = mp->mnt_stat; 2096 } 2097 return (0); 2098 } 2099 2100 static 2101 int 2102 hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred) 2103 { 2104 hammer2_pfs_t *pmp; 2105 hammer2_dev_t *hmp; 2106 hammer2_blockref_t bref; 2107 struct statvfs tmp; 2108 int i; 2109 2110 /* 2111 * NOTE: iroot might not have validated the cluster yet. 2112 */ 2113 pmp = MPTOPMP(mp); 2114 bzero(&tmp, sizeof(tmp)); 2115 2116 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 2117 hmp = pmp->pfs_hmps[i]; 2118 if (hmp == NULL) 2119 continue; 2120 if (pmp->iroot->cluster.array[i].chain) 2121 bref = pmp->iroot->cluster.array[i].chain->bref; 2122 else 2123 bzero(&bref, sizeof(bref)); 2124 2125 tmp.f_files = bref.embed.stats.inode_count; 2126 tmp.f_ffree = 0; 2127 tmp.f_blocks = hmp->voldata.allocator_size / 2128 mp->mnt_vstat.f_bsize; 2129 tmp.f_bfree = hmp->voldata.allocator_free / 2130 mp->mnt_vstat.f_bsize; 2131 tmp.f_bavail = tmp.f_bfree; 2132 2133 if (cred && cred->cr_uid != 0) { 2134 uint64_t adj; 2135 2136 /* 5% */ 2137 adj = hmp->free_reserved / mp->mnt_vstat.f_bsize; 2138 tmp.f_blocks -= adj; 2139 tmp.f_bfree -= adj; 2140 tmp.f_bavail -= adj; 2141 } 2142 2143 mp->mnt_vstat.f_blocks = tmp.f_blocks; 2144 mp->mnt_vstat.f_bfree = tmp.f_bfree; 2145 mp->mnt_vstat.f_bavail = tmp.f_bavail; 2146 mp->mnt_vstat.f_files = tmp.f_files; 2147 mp->mnt_vstat.f_ffree = tmp.f_ffree; 2148 2149 *sbp = mp->mnt_vstat; 2150 } 2151 return (0); 2152 } 2153 2154 /* 2155 * Mount-time recovery (RW mounts) 2156 * 2157 * Updates to the free block table are allowed to lag flushes by one 2158 * transaction. In case of a crash, then on a fresh mount we must do an 2159 * incremental scan of the last committed transaction id and make sure that 2160 * all related blocks have been marked allocated. 2161 */ 2162 struct hammer2_recovery_elm { 2163 TAILQ_ENTRY(hammer2_recovery_elm) entry; 2164 hammer2_chain_t *chain; 2165 hammer2_tid_t sync_tid; 2166 }; 2167 2168 TAILQ_HEAD(hammer2_recovery_list, hammer2_recovery_elm); 2169 2170 struct hammer2_recovery_info { 2171 struct hammer2_recovery_list list; 2172 hammer2_tid_t mtid; 2173 int depth; 2174 }; 2175 2176 static int hammer2_recovery_scan(hammer2_dev_t *hmp, 2177 hammer2_chain_t *parent, 2178 struct hammer2_recovery_info *info, 2179 hammer2_tid_t sync_tid); 2180 2181 #define HAMMER2_RECOVERY_MAXDEPTH 10 2182 2183 static 2184 int 2185 hammer2_recovery(hammer2_dev_t *hmp) 2186 { 2187 struct hammer2_recovery_info info; 2188 struct hammer2_recovery_elm *elm; 2189 hammer2_chain_t *parent; 2190 hammer2_tid_t sync_tid; 2191 hammer2_tid_t mirror_tid; 2192 int error; 2193 2194 hammer2_trans_init(hmp->spmp, 0); 2195 2196 sync_tid = hmp->voldata.freemap_tid; 2197 mirror_tid = hmp->voldata.mirror_tid; 2198 2199 kprintf("hammer2_mount: \"%s\": ", hmp->devrepname); 2200 if (sync_tid >= mirror_tid) { 2201 kprintf("no recovery needed\n"); 2202 } else { 2203 kprintf("freemap recovery %016jx-%016jx\n", 2204 sync_tid + 1, mirror_tid); 2205 } 2206 2207 TAILQ_INIT(&info.list); 2208 info.depth = 0; 2209 parent = hammer2_chain_lookup_init(&hmp->vchain, 0); 2210 error = hammer2_recovery_scan(hmp, parent, &info, sync_tid); 2211 hammer2_chain_lookup_done(parent); 2212 2213 while ((elm = TAILQ_FIRST(&info.list)) != NULL) { 2214 TAILQ_REMOVE(&info.list, elm, entry); 2215 parent = elm->chain; 2216 sync_tid = elm->sync_tid; 2217 kfree(elm, M_HAMMER2); 2218 2219 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 2220 error |= hammer2_recovery_scan(hmp, parent, &info, 2221 hmp->voldata.freemap_tid); 2222 hammer2_chain_unlock(parent); 2223 hammer2_chain_drop(parent); /* drop elm->chain ref */ 2224 } 2225 2226 hammer2_trans_done(hmp->spmp, 0); 2227 2228 return error; 2229 } 2230 2231 static 2232 int 2233 hammer2_recovery_scan(hammer2_dev_t *hmp, hammer2_chain_t *parent, 2234 struct hammer2_recovery_info *info, 2235 hammer2_tid_t sync_tid) 2236 { 2237 const hammer2_inode_data_t *ripdata; 2238 hammer2_chain_t *chain; 2239 hammer2_blockref_t bref; 2240 int tmp_error; 2241 int rup_error; 2242 int error; 2243 int first; 2244 2245 /* 2246 * Adjust freemap to ensure that the block(s) are marked allocated. 2247 */ 2248 if (parent->bref.type != HAMMER2_BREF_TYPE_VOLUME) { 2249 hammer2_freemap_adjust(hmp, &parent->bref, 2250 HAMMER2_FREEMAP_DORECOVER); 2251 } 2252 2253 /* 2254 * Check type for recursive scan 2255 */ 2256 switch(parent->bref.type) { 2257 case HAMMER2_BREF_TYPE_VOLUME: 2258 /* data already instantiated */ 2259 break; 2260 case HAMMER2_BREF_TYPE_INODE: 2261 /* 2262 * Must instantiate data for DIRECTDATA test and also 2263 * for recursion. 2264 */ 2265 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 2266 ripdata = &parent->data->ipdata; 2267 if (ripdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) { 2268 /* not applicable to recovery scan */ 2269 hammer2_chain_unlock(parent); 2270 return 0; 2271 } 2272 hammer2_chain_unlock(parent); 2273 break; 2274 case HAMMER2_BREF_TYPE_INDIRECT: 2275 /* 2276 * Must instantiate data for recursion 2277 */ 2278 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 2279 hammer2_chain_unlock(parent); 2280 break; 2281 case HAMMER2_BREF_TYPE_DIRENT: 2282 case HAMMER2_BREF_TYPE_DATA: 2283 case HAMMER2_BREF_TYPE_FREEMAP: 2284 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 2285 case HAMMER2_BREF_TYPE_FREEMAP_LEAF: 2286 /* not applicable to recovery scan */ 2287 return 0; 2288 break; 2289 default: 2290 return HAMMER2_ERROR_BADBREF; 2291 } 2292 2293 /* 2294 * Defer operation if depth limit reached. 2295 */ 2296 if (info->depth >= HAMMER2_RECOVERY_MAXDEPTH) { 2297 struct hammer2_recovery_elm *elm; 2298 2299 elm = kmalloc(sizeof(*elm), M_HAMMER2, M_ZERO | M_WAITOK); 2300 elm->chain = parent; 2301 elm->sync_tid = sync_tid; 2302 hammer2_chain_ref(parent); 2303 TAILQ_INSERT_TAIL(&info->list, elm, entry); 2304 /* unlocked by caller */ 2305 2306 return(0); 2307 } 2308 2309 2310 /* 2311 * Recursive scan of the last flushed transaction only. We are 2312 * doing this without pmp assignments so don't leave the chains 2313 * hanging around after we are done with them. 2314 * 2315 * error Cumulative error this level only 2316 * rup_error Cumulative error for recursion 2317 * tmp_error Specific non-cumulative recursion error 2318 */ 2319 chain = NULL; 2320 first = 1; 2321 rup_error = 0; 2322 error = 0; 2323 2324 for (;;) { 2325 error |= hammer2_chain_scan(parent, &chain, &bref, 2326 &first, 2327 HAMMER2_LOOKUP_NODATA); 2328 2329 /* 2330 * Problem during scan or EOF 2331 */ 2332 if (error) 2333 break; 2334 2335 /* 2336 * If this is a leaf 2337 */ 2338 if (chain == NULL) { 2339 if (bref.mirror_tid > sync_tid) { 2340 hammer2_freemap_adjust(hmp, &bref, 2341 HAMMER2_FREEMAP_DORECOVER); 2342 } 2343 continue; 2344 } 2345 2346 /* 2347 * This may or may not be a recursive node. 2348 */ 2349 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 2350 if (bref.mirror_tid > sync_tid) { 2351 ++info->depth; 2352 tmp_error = hammer2_recovery_scan(hmp, chain, 2353 info, sync_tid); 2354 --info->depth; 2355 } else { 2356 tmp_error = 0; 2357 } 2358 2359 /* 2360 * Flush the recovery at the PFS boundary to stage it for 2361 * the final flush of the super-root topology. 2362 */ 2363 if (tmp_error == 0 && 2364 (bref.flags & HAMMER2_BREF_FLAG_PFSROOT) && 2365 (chain->flags & HAMMER2_CHAIN_ONFLUSH)) { 2366 hammer2_flush(chain, HAMMER2_FLUSH_TOP | 2367 HAMMER2_FLUSH_ALL); 2368 } 2369 rup_error |= tmp_error; 2370 } 2371 return ((error | rup_error) & ~HAMMER2_ERROR_EOF); 2372 } 2373 2374 /* 2375 * This fixes up an error introduced in earlier H2 implementations where 2376 * moving a PFS inode into an indirect block wound up causing the 2377 * HAMMER2_BREF_FLAG_PFSROOT flag in the bref to get cleared. 2378 */ 2379 static 2380 int 2381 hammer2_fixup_pfses(hammer2_dev_t *hmp) 2382 { 2383 const hammer2_inode_data_t *ripdata; 2384 hammer2_chain_t *parent; 2385 hammer2_chain_t *chain; 2386 hammer2_key_t key_next; 2387 hammer2_pfs_t *spmp; 2388 int error; 2389 2390 error = 0; 2391 2392 /* 2393 * Lookup mount point under the media-localized super-root. 2394 * 2395 * cluster->pmp will incorrectly point to spmp and must be fixed 2396 * up later on. 2397 */ 2398 spmp = hmp->spmp; 2399 hammer2_inode_lock(spmp->iroot, 0); 2400 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS); 2401 chain = hammer2_chain_lookup(&parent, &key_next, 2402 HAMMER2_KEY_MIN, HAMMER2_KEY_MAX, 2403 &error, 0); 2404 while (chain) { 2405 if (chain->bref.type != HAMMER2_BREF_TYPE_INODE) 2406 continue; 2407 if (chain->error) { 2408 kprintf("I/O error scanning PFS labels\n"); 2409 error |= chain->error; 2410 } else if ((chain->bref.flags & 2411 HAMMER2_BREF_FLAG_PFSROOT) == 0) { 2412 int error2; 2413 2414 ripdata = &chain->data->ipdata; 2415 hammer2_trans_init(hmp->spmp, 0); 2416 error2 = hammer2_chain_modify(chain, 2417 chain->bref.modify_tid, 2418 0, 0); 2419 if (error2 == 0) { 2420 kprintf("hammer2: Correct mis-flagged PFS %s\n", 2421 ripdata->filename); 2422 chain->bref.flags |= HAMMER2_BREF_FLAG_PFSROOT; 2423 } else { 2424 error |= error2; 2425 } 2426 hammer2_flush(chain, HAMMER2_FLUSH_TOP | 2427 HAMMER2_FLUSH_ALL); 2428 hammer2_trans_done(hmp->spmp, 0); 2429 } 2430 chain = hammer2_chain_next(&parent, chain, &key_next, 2431 key_next, HAMMER2_KEY_MAX, 2432 &error, 0); 2433 } 2434 if (parent) { 2435 hammer2_chain_unlock(parent); 2436 hammer2_chain_drop(parent); 2437 } 2438 hammer2_inode_unlock(spmp->iroot); 2439 2440 return error; 2441 } 2442 2443 /* 2444 * Sync a mount point; this is called periodically on a per-mount basis from 2445 * the filesystem syncer, and whenever a user issues a sync. 2446 */ 2447 int 2448 hammer2_vfs_sync(struct mount *mp, int waitfor) 2449 { 2450 int error; 2451 2452 error = hammer2_vfs_sync_pmp(MPTOPMP(mp), waitfor); 2453 2454 return error; 2455 } 2456 2457 /* 2458 * Because frontend operations lock vnodes before we get a chance to 2459 * lock the related inode, we can't just acquire a vnode lock without 2460 * risking a deadlock. The frontend may be holding a vnode lock while 2461 * also blocked on our SYNCQ flag while trying to get the inode lock. 2462 * 2463 * To deal with this situation we can check the vnode lock situation 2464 * after locking the inode and perform a work-around. 2465 */ 2466 int 2467 hammer2_vfs_sync_pmp(hammer2_pfs_t *pmp, int waitfor) 2468 { 2469 struct mount *mp; 2470 /*hammer2_xop_flush_t *xop;*/ 2471 /*struct hammer2_sync_info info;*/ 2472 hammer2_inode_t *ip; 2473 hammer2_depend_t *depend; 2474 hammer2_depend_t *depend_next; 2475 struct vnode *vp; 2476 uint32_t pass2; 2477 int error; 2478 int wakecount; 2479 int dorestart; 2480 2481 mp = pmp->mp; 2482 2483 /* 2484 * Move all inodes on sideq to syncq. This will clear sideq. 2485 * This should represent all flushable inodes. These inodes 2486 * will already have refs due to being on syncq or sideq. We 2487 * must do this all at once with the spinlock held to ensure that 2488 * all inode dependencies are part of the same flush. 2489 * 2490 * We should be able to do this asynchronously from frontend 2491 * operations because we will be locking the inodes later on 2492 * to actually flush them, and that will partition any frontend 2493 * op using the same inode. Either it has already locked the 2494 * inode and we will block, or it has not yet locked the inode 2495 * and it will block until we are finished flushing that inode. 2496 * 2497 * When restarting, only move the inodes flagged as PASS2 from 2498 * SIDEQ to SYNCQ. PASS2 propagation by inode_lock4() and 2499 * inode_depend() are atomic with the spin-lock. 2500 */ 2501 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH); 2502 #ifdef HAMMER2_DEBUG_SYNC 2503 kprintf("FILESYSTEM SYNC BOUNDARY\n"); 2504 #endif 2505 dorestart = 0; 2506 2507 /* 2508 * Move inodes from depq to syncq, releasing the related 2509 * depend structures. 2510 */ 2511 restart: 2512 #ifdef HAMMER2_DEBUG_SYNC 2513 kprintf("FILESYSTEM SYNC RESTART (%d)\n", dorestart); 2514 #endif 2515 hammer2_trans_setflags(pmp, 0/*HAMMER2_TRANS_COPYQ*/); 2516 hammer2_trans_clearflags(pmp, HAMMER2_TRANS_RESCAN); 2517 2518 /* 2519 * Move inodes from depq to syncq. When restarting, only depq's 2520 * marked pass2 are moved. 2521 */ 2522 hammer2_spin_ex(&pmp->list_spin); 2523 depend_next = TAILQ_FIRST(&pmp->depq); 2524 wakecount = 0; 2525 2526 while ((depend = depend_next) != NULL) { 2527 depend_next = TAILQ_NEXT(depend, entry); 2528 if (dorestart && depend->pass2 == 0) 2529 continue; 2530 TAILQ_FOREACH(ip, &depend->sideq, entry) { 2531 KKASSERT(ip->flags & HAMMER2_INODE_SIDEQ); 2532 atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ); 2533 atomic_clear_int(&ip->flags, HAMMER2_INODE_SIDEQ); 2534 ip->depend = NULL; 2535 } 2536 2537 /* 2538 * NOTE: pmp->sideq_count includes both sideq and syncq 2539 */ 2540 TAILQ_CONCAT(&pmp->syncq, &depend->sideq, entry); 2541 2542 depend->count = 0; 2543 depend->pass2 = 0; 2544 TAILQ_REMOVE(&pmp->depq, depend, entry); 2545 } 2546 2547 hammer2_spin_unex(&pmp->list_spin); 2548 hammer2_trans_clearflags(pmp, /*HAMMER2_TRANS_COPYQ |*/ 2549 HAMMER2_TRANS_WAITING); 2550 dorestart = 0; 2551 2552 /* 2553 * sideq_count may have dropped enough to allow us to unstall 2554 * the frontend. 2555 */ 2556 hammer2_pfs_memory_wakeup(pmp, 0); 2557 2558 /* 2559 * Now run through all inodes on syncq. 2560 * 2561 * Flush transactions only interlock with other flush transactions. 2562 * Any conflicting frontend operations will block on the inode, but 2563 * may hold a vnode lock while doing so. 2564 */ 2565 hammer2_spin_ex(&pmp->list_spin); 2566 while ((ip = TAILQ_FIRST(&pmp->syncq)) != NULL) { 2567 /* 2568 * Remove the inode from the SYNCQ, transfer the syncq ref 2569 * to us. We must clear SYNCQ to allow any potential 2570 * front-end deadlock to proceed. We must set PASS2 so 2571 * the dependency code knows what to do. 2572 */ 2573 pass2 = ip->flags; 2574 cpu_ccfence(); 2575 if (atomic_cmpset_int(&ip->flags, 2576 pass2, 2577 (pass2 & ~(HAMMER2_INODE_SYNCQ | 2578 HAMMER2_INODE_SYNCQ_WAKEUP)) | 2579 HAMMER2_INODE_SYNCQ_PASS2) == 0) { 2580 continue; 2581 } 2582 TAILQ_REMOVE(&pmp->syncq, ip, entry); 2583 --pmp->sideq_count; 2584 hammer2_spin_unex(&pmp->list_spin); 2585 2586 /* 2587 * Tickle anyone waiting on ip->flags or the hysteresis 2588 * on the dirty inode count. 2589 */ 2590 if (pass2 & HAMMER2_INODE_SYNCQ_WAKEUP) 2591 wakeup(&ip->flags); 2592 if (++wakecount >= hammer2_limit_dirty_inodes / 20 + 1) { 2593 wakecount = 0; 2594 hammer2_pfs_memory_wakeup(pmp, 0); 2595 } 2596 2597 /* 2598 * Relock the inode, and we inherit a ref from the above. 2599 * We will check for a race after we acquire the vnode. 2600 */ 2601 hammer2_mtx_ex(&ip->lock); 2602 2603 /* 2604 * We need the vp in order to vfsync() dirty buffers, so if 2605 * one isn't attached we can skip it. 2606 * 2607 * Ordering the inode lock and then the vnode lock has the 2608 * potential to deadlock. If we had left SYNCQ set that could 2609 * also deadlock us against the frontend even if we don't hold 2610 * any locks, but the latter is not a problem now since we 2611 * cleared it. igetv will temporarily release the inode lock 2612 * in a safe manner to work-around the deadlock. 2613 * 2614 * Unfortunately it is still possible to deadlock when the 2615 * frontend obtains multiple inode locks, because all the 2616 * related vnodes are already locked (nor can the vnode locks 2617 * be released and reacquired without messing up RECLAIM and 2618 * INACTIVE sequencing). 2619 * 2620 * The solution for now is to move the vp back onto SIDEQ 2621 * and set dorestart, which will restart the flush after we 2622 * exhaust the current SYNCQ. Note that additional 2623 * dependencies may build up, so we definitely need to move 2624 * the whole SIDEQ back to SYNCQ when we restart. 2625 */ 2626 vp = ip->vp; 2627 if (vp) { 2628 if (vget(vp, LK_EXCLUSIVE|LK_NOWAIT)) { 2629 /* 2630 * Failed to get the vnode, requeue the inode 2631 * (PASS2 is already set so it will be found 2632 * again on the restart). 2633 * 2634 * Then unlock, possibly sleep, and retry 2635 * later. We sleep if PASS2 was *previously* 2636 * set, before we set it again above. 2637 */ 2638 vp = NULL; 2639 dorestart = 1; 2640 #ifdef HAMMER2_DEBUG_SYNC 2641 kprintf("inum %ld (sync delayed by vnode)\n", 2642 (long)ip->meta.inum); 2643 #endif 2644 hammer2_inode_delayed_sideq(ip); 2645 2646 hammer2_mtx_unlock(&ip->lock); 2647 hammer2_inode_drop(ip); 2648 2649 if (pass2 & HAMMER2_INODE_SYNCQ_PASS2) { 2650 tsleep(&dorestart, 0, "h2syndel", 2); 2651 } 2652 hammer2_spin_ex(&pmp->list_spin); 2653 continue; 2654 } 2655 } else { 2656 vp = NULL; 2657 } 2658 2659 /* 2660 * If the inode wound up on a SIDEQ again it will already be 2661 * prepped for another PASS2. In this situation if we flush 2662 * it now we will just wind up flushing it again in the same 2663 * syncer run, so we might as well not flush it now. 2664 */ 2665 if (ip->flags & HAMMER2_INODE_SIDEQ) { 2666 hammer2_mtx_unlock(&ip->lock); 2667 hammer2_inode_drop(ip); 2668 if (vp) 2669 vput(vp); 2670 dorestart = 1; 2671 hammer2_spin_ex(&pmp->list_spin); 2672 continue; 2673 } 2674 2675 /* 2676 * Ok we have the inode exclusively locked and if vp is 2677 * not NULL that will also be exclusively locked. Do the 2678 * meat of the flush. 2679 * 2680 * vp token needed for v_rbdirty_tree check / vclrisdirty 2681 * sequencing. Though we hold the vnode exclusively so 2682 * we shouldn't need to hold the token also in this case. 2683 */ 2684 if (vp) { 2685 vfsync(vp, MNT_WAIT, 1, NULL, NULL); 2686 bio_track_wait(&vp->v_track_write, 0, 0); /* XXX */ 2687 } 2688 2689 /* 2690 * If the inode has not yet been inserted into the tree 2691 * we must do so. Then sync and flush it. The flush should 2692 * update the parent. 2693 */ 2694 if (ip->flags & HAMMER2_INODE_DELETING) { 2695 #ifdef HAMMER2_DEBUG_SYNC 2696 kprintf("inum %ld destroy\n", (long)ip->meta.inum); 2697 #endif 2698 hammer2_inode_chain_des(ip); 2699 atomic_add_long(&hammer2_iod_inode_deletes, 1); 2700 } else if (ip->flags & HAMMER2_INODE_CREATING) { 2701 #ifdef HAMMER2_DEBUG_SYNC 2702 kprintf("inum %ld insert\n", (long)ip->meta.inum); 2703 #endif 2704 hammer2_inode_chain_ins(ip); 2705 atomic_add_long(&hammer2_iod_inode_creates, 1); 2706 } 2707 #ifdef HAMMER2_DEBUG_SYNC 2708 kprintf("inum %ld chain-sync\n", (long)ip->meta.inum); 2709 #endif 2710 2711 /* 2712 * Because I kinda messed up the design and index the inodes 2713 * under the root inode, along side the directory entries, 2714 * we can't flush the inode index under the iroot until the 2715 * end. If we do it now we might miss effects created by 2716 * other inodes on the SYNCQ. 2717 * 2718 * Do a normal (non-FSSYNC) flush instead, which allows the 2719 * vnode code to work the same. We don't want to force iroot 2720 * back onto the SIDEQ, and we also don't want the flush code 2721 * to update pfs_iroot_blocksets until the final flush later. 2722 * 2723 * XXX at the moment this will likely result in a double-flush 2724 * of the iroot chain. 2725 */ 2726 hammer2_inode_chain_sync(ip); 2727 if (ip == pmp->iroot) { 2728 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP); 2729 } else { 2730 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP | 2731 HAMMER2_XOP_FSSYNC); 2732 } 2733 if (vp) { 2734 lwkt_gettoken(&vp->v_token); 2735 if ((ip->flags & (HAMMER2_INODE_MODIFIED | 2736 HAMMER2_INODE_RESIZED | 2737 HAMMER2_INODE_DIRTYDATA)) == 0 && 2738 RB_EMPTY(&vp->v_rbdirty_tree) && 2739 !bio_track_active(&vp->v_track_write)) { 2740 vclrisdirty(vp); 2741 } else { 2742 hammer2_inode_delayed_sideq(ip); 2743 } 2744 lwkt_reltoken(&vp->v_token); 2745 vput(vp); 2746 vp = NULL; /* safety */ 2747 } 2748 atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_PASS2); 2749 hammer2_inode_unlock(ip); /* unlock+drop */ 2750 /* ip pointer invalid */ 2751 2752 /* 2753 * If the inode got dirted after we dropped our locks, 2754 * it will have already been moved back to the SIDEQ. 2755 */ 2756 hammer2_spin_ex(&pmp->list_spin); 2757 } 2758 hammer2_spin_unex(&pmp->list_spin); 2759 hammer2_pfs_memory_wakeup(pmp, 0); 2760 2761 if (dorestart || (pmp->trans.flags & HAMMER2_TRANS_RESCAN)) { 2762 #ifdef HAMMER2_DEBUG_SYNC 2763 kprintf("FILESYSTEM SYNC STAGE 1 RESTART\n"); 2764 /*tsleep(&dorestart, 0, "h2STG1-R", hz*20);*/ 2765 #endif 2766 dorestart = 1; 2767 goto restart; 2768 } 2769 #ifdef HAMMER2_DEBUG_SYNC 2770 kprintf("FILESYSTEM SYNC STAGE 2 BEGIN\n"); 2771 /*tsleep(&dorestart, 0, "h2STG2", hz*20);*/ 2772 #endif 2773 2774 /* 2775 * We have to flush the PFS root last, even if it does not appear to 2776 * be dirty, because all the inodes in the PFS are indexed under it. 2777 * The normal flushing of iroot above would only occur if directory 2778 * entries under the root were changed. 2779 * 2780 * Specifying VOLHDR will cause an additionl flush of hmp->spmp 2781 * for the media making up the cluster. 2782 */ 2783 if ((ip = pmp->iroot) != NULL) { 2784 hammer2_inode_ref(ip); 2785 hammer2_mtx_ex(&ip->lock); 2786 hammer2_inode_chain_sync(ip); 2787 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP | 2788 HAMMER2_XOP_FSSYNC | 2789 HAMMER2_XOP_VOLHDR); 2790 hammer2_inode_unlock(ip); /* unlock+drop */ 2791 } 2792 #ifdef HAMMER2_DEBUG_SYNC 2793 kprintf("FILESYSTEM SYNC STAGE 2 DONE\n"); 2794 #endif 2795 2796 /* 2797 * device bioq sync 2798 */ 2799 hammer2_bioq_sync(pmp); 2800 2801 #if 0 2802 /* 2803 * Generally speaking we now want to flush the media topology from 2804 * the iroot through to the inodes. The flush stops at any inode 2805 * boundary, which allows the frontend to continue running concurrent 2806 * modifying operations on inodes (including kernel flushes of 2807 * buffers) without interfering with the main sync. 2808 * 2809 * Use the XOP interface to concurrently flush all nodes to 2810 * synchronize the PFSROOT subtopology to the media. A standard 2811 * end-of-scan ENOENT error indicates cluster sufficiency. 2812 * 2813 * Note that this flush will not be visible on crash recovery until 2814 * we flush the super-root topology in the next loop. 2815 * 2816 * XXX For now wait for all flushes to complete. 2817 */ 2818 if (mp && (ip = pmp->iroot) != NULL) { 2819 /* 2820 * If unmounting try to flush everything including any 2821 * sub-trees under inodes, just in case there is dangling 2822 * modified data, as a safety. Otherwise just flush up to 2823 * the inodes in this stage. 2824 */ 2825 kprintf("MP & IROOT\n"); 2826 #ifdef HAMMER2_DEBUG_SYNC 2827 kprintf("FILESYSTEM SYNC STAGE 3 IROOT BEGIN\n"); 2828 #endif 2829 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 2830 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | 2831 HAMMER2_XOP_VOLHDR | 2832 HAMMER2_XOP_FSSYNC | 2833 HAMMER2_XOP_INODE_STOP); 2834 } else { 2835 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | 2836 HAMMER2_XOP_INODE_STOP | 2837 HAMMER2_XOP_VOLHDR | 2838 HAMMER2_XOP_FSSYNC | 2839 HAMMER2_XOP_INODE_STOP); 2840 } 2841 hammer2_xop_start(&xop->head, &hammer2_inode_flush_desc); 2842 error = hammer2_xop_collect(&xop->head, 2843 HAMMER2_XOP_COLLECT_WAITALL); 2844 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2845 #ifdef HAMMER2_DEBUG_SYNC 2846 kprintf("FILESYSTEM SYNC STAGE 3 IROOT END\n"); 2847 #endif 2848 if (error == HAMMER2_ERROR_ENOENT) 2849 error = 0; 2850 else 2851 error = hammer2_error_to_errno(error); 2852 } else { 2853 error = 0; 2854 } 2855 #endif 2856 error = 0; /* XXX */ 2857 hammer2_trans_done(pmp, HAMMER2_TRANS_ISFLUSH); 2858 2859 return (error); 2860 } 2861 2862 static 2863 int 2864 hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp) 2865 { 2866 hammer2_inode_t *ip; 2867 2868 KKASSERT(MAXFIDSZ >= 16); 2869 ip = VTOI(vp); 2870 fhp->fid_len = offsetof(struct fid, fid_data[16]); 2871 fhp->fid_ext = 0; 2872 ((hammer2_tid_t *)fhp->fid_data)[0] = ip->meta.inum; 2873 ((hammer2_tid_t *)fhp->fid_data)[1] = 0; 2874 2875 return 0; 2876 } 2877 2878 static 2879 int 2880 hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 2881 struct fid *fhp, struct vnode **vpp) 2882 { 2883 hammer2_pfs_t *pmp; 2884 hammer2_tid_t inum; 2885 int error; 2886 2887 pmp = MPTOPMP(mp); 2888 inum = ((hammer2_tid_t *)fhp->fid_data)[0] & HAMMER2_DIRHASH_USERMSK; 2889 if (vpp) { 2890 if (inum == 1) 2891 error = hammer2_vfs_root(mp, vpp); 2892 else 2893 error = hammer2_vfs_vget(mp, NULL, inum, vpp); 2894 } else { 2895 error = 0; 2896 } 2897 if (error) 2898 kprintf("fhtovp: %016jx -> %p, %d\n", inum, *vpp, error); 2899 return error; 2900 } 2901 2902 static 2903 int 2904 hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 2905 int *exflagsp, struct ucred **credanonp) 2906 { 2907 hammer2_pfs_t *pmp; 2908 struct netcred *np; 2909 int error; 2910 2911 pmp = MPTOPMP(mp); 2912 np = vfs_export_lookup(mp, &pmp->export, nam); 2913 if (np) { 2914 *exflagsp = np->netc_exflags; 2915 *credanonp = &np->netc_anon; 2916 error = 0; 2917 } else { 2918 error = EACCES; 2919 } 2920 return error; 2921 } 2922 2923 /* 2924 * This handles hysteresis on regular file flushes. Because the BIOs are 2925 * routed to a thread it is possible for an excessive number to build up 2926 * and cause long front-end stalls long before the runningbuffspace limit 2927 * is hit, so we implement hammer2_flush_pipe to control the 2928 * hysteresis. 2929 * 2930 * This is a particular problem when compression is used. 2931 */ 2932 void 2933 hammer2_lwinprog_ref(hammer2_pfs_t *pmp) 2934 { 2935 atomic_add_int(&pmp->count_lwinprog, 1); 2936 } 2937 2938 void 2939 hammer2_lwinprog_drop(hammer2_pfs_t *pmp) 2940 { 2941 int lwinprog; 2942 2943 lwinprog = atomic_fetchadd_int(&pmp->count_lwinprog, -1); 2944 if ((lwinprog & HAMMER2_LWINPROG_WAITING) && 2945 (lwinprog & HAMMER2_LWINPROG_MASK) <= hammer2_flush_pipe * 2 / 3) { 2946 atomic_clear_int(&pmp->count_lwinprog, 2947 HAMMER2_LWINPROG_WAITING); 2948 wakeup(&pmp->count_lwinprog); 2949 } 2950 if ((lwinprog & HAMMER2_LWINPROG_WAITING0) && 2951 (lwinprog & HAMMER2_LWINPROG_MASK) <= 0) { 2952 atomic_clear_int(&pmp->count_lwinprog, 2953 HAMMER2_LWINPROG_WAITING0); 2954 wakeup(&pmp->count_lwinprog); 2955 } 2956 } 2957 2958 void 2959 hammer2_lwinprog_wait(hammer2_pfs_t *pmp, int flush_pipe) 2960 { 2961 int lwinprog; 2962 int lwflag = (flush_pipe) ? HAMMER2_LWINPROG_WAITING : 2963 HAMMER2_LWINPROG_WAITING0; 2964 2965 for (;;) { 2966 lwinprog = pmp->count_lwinprog; 2967 cpu_ccfence(); 2968 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe) 2969 break; 2970 tsleep_interlock(&pmp->count_lwinprog, 0); 2971 atomic_set_int(&pmp->count_lwinprog, lwflag); 2972 lwinprog = pmp->count_lwinprog; 2973 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe) 2974 break; 2975 tsleep(&pmp->count_lwinprog, PINTERLOCKED, "h2wpipe", hz); 2976 } 2977 } 2978 2979 /* 2980 * It is possible for an excessive number of dirty chains or dirty inodes 2981 * to build up. When this occurs we start an asynchronous filesystem sync. 2982 * If the level continues to build up, we stall, waiting for it to drop, 2983 * with some hysteresis. 2984 * 2985 * This relies on the kernel calling hammer2_vfs_modifying() prior to 2986 * obtaining any vnode locks before making a modifying VOP call. 2987 */ 2988 static int 2989 hammer2_vfs_modifying(struct mount *mp) 2990 { 2991 if (mp->mnt_flag & MNT_RDONLY) 2992 return EROFS; 2993 hammer2_pfs_memory_wait(MPTOPMP(mp)); 2994 2995 return 0; 2996 } 2997 2998 /* 2999 * Initiate an asynchronous filesystem sync and, with hysteresis, 3000 * stall if the internal data structure count becomes too bloated. 3001 */ 3002 void 3003 hammer2_pfs_memory_wait(hammer2_pfs_t *pmp) 3004 { 3005 uint32_t waiting; 3006 int pcatch; 3007 int error; 3008 3009 if (pmp == NULL || pmp->mp == NULL) 3010 return; 3011 3012 for (;;) { 3013 waiting = pmp->inmem_dirty_chains & HAMMER2_DIRTYCHAIN_MASK; 3014 cpu_ccfence(); 3015 3016 /* 3017 * Start the syncer running at 1/2 the limit 3018 */ 3019 if (waiting > hammer2_limit_dirty_chains / 2 || 3020 pmp->sideq_count > hammer2_limit_dirty_inodes / 2) { 3021 trigger_syncer(pmp->mp); 3022 } 3023 3024 /* 3025 * Stall at the limit waiting for the counts to drop. 3026 * This code will typically be woken up once the count 3027 * drops below 3/4 the limit, or in one second. 3028 */ 3029 if (waiting < hammer2_limit_dirty_chains && 3030 pmp->sideq_count < hammer2_limit_dirty_inodes) { 3031 break; 3032 } 3033 3034 pcatch = curthread->td_proc ? PCATCH : 0; 3035 3036 tsleep_interlock(&pmp->inmem_dirty_chains, pcatch); 3037 atomic_set_int(&pmp->inmem_dirty_chains, 3038 HAMMER2_DIRTYCHAIN_WAITING); 3039 if (waiting < hammer2_limit_dirty_chains && 3040 pmp->sideq_count < hammer2_limit_dirty_inodes) { 3041 break; 3042 } 3043 trigger_syncer(pmp->mp); 3044 error = tsleep(&pmp->inmem_dirty_chains, PINTERLOCKED | pcatch, 3045 "h2memw", hz); 3046 if (error == ERESTART) 3047 break; 3048 } 3049 } 3050 3051 /* 3052 * Wake up any stalled frontend ops waiting, with hysteresis, using 3053 * 2/3 of the limit. 3054 */ 3055 void 3056 hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp, int count) 3057 { 3058 uint32_t waiting; 3059 3060 if (pmp) { 3061 waiting = atomic_fetchadd_int(&pmp->inmem_dirty_chains, count); 3062 /* don't need --waiting to test flag */ 3063 3064 if ((waiting & HAMMER2_DIRTYCHAIN_WAITING) && 3065 (pmp->inmem_dirty_chains & HAMMER2_DIRTYCHAIN_MASK) <= 3066 hammer2_limit_dirty_chains * 2 / 3 && 3067 pmp->sideq_count <= hammer2_limit_dirty_inodes * 2 / 3) { 3068 atomic_clear_int(&pmp->inmem_dirty_chains, 3069 HAMMER2_DIRTYCHAIN_WAITING); 3070 wakeup(&pmp->inmem_dirty_chains); 3071 } 3072 } 3073 } 3074 3075 void 3076 hammer2_pfs_memory_inc(hammer2_pfs_t *pmp) 3077 { 3078 if (pmp) { 3079 atomic_add_int(&pmp->inmem_dirty_chains, 1); 3080 } 3081 } 3082 3083 /* 3084 * Volume header data locks 3085 */ 3086 void 3087 hammer2_voldata_lock(hammer2_dev_t *hmp) 3088 { 3089 lockmgr(&hmp->vollk, LK_EXCLUSIVE); 3090 } 3091 3092 void 3093 hammer2_voldata_unlock(hammer2_dev_t *hmp) 3094 { 3095 lockmgr(&hmp->vollk, LK_RELEASE); 3096 } 3097 3098 void 3099 hammer2_voldata_modify(hammer2_dev_t *hmp) 3100 { 3101 if ((hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) == 0) { 3102 atomic_add_long(&hammer2_count_modified_chains, 1); 3103 atomic_set_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED); 3104 hammer2_pfs_memory_inc(hmp->vchain.pmp); 3105 } 3106 } 3107 3108 /* 3109 * Returns 0 if the filesystem has tons of free space 3110 * Returns 1 if the filesystem has less than 10% remaining 3111 * Returns 2 if the filesystem has less than 2%/5% (user/root) remaining. 3112 */ 3113 int 3114 hammer2_vfs_enospace(hammer2_inode_t *ip, off_t bytes, struct ucred *cred) 3115 { 3116 hammer2_pfs_t *pmp; 3117 hammer2_dev_t *hmp; 3118 hammer2_off_t free_reserved; 3119 hammer2_off_t free_nominal; 3120 int i; 3121 3122 pmp = ip->pmp; 3123 3124 if (pmp->free_ticks == 0 || pmp->free_ticks != ticks) { 3125 free_reserved = HAMMER2_SEGSIZE; 3126 free_nominal = 0x7FFFFFFFFFFFFFFFLLU; 3127 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 3128 hmp = pmp->pfs_hmps[i]; 3129 if (hmp == NULL) 3130 continue; 3131 if (pmp->pfs_types[i] != HAMMER2_PFSTYPE_MASTER && 3132 pmp->pfs_types[i] != HAMMER2_PFSTYPE_SOFT_MASTER) 3133 continue; 3134 3135 if (free_nominal > hmp->voldata.allocator_free) 3136 free_nominal = hmp->voldata.allocator_free; 3137 if (free_reserved < hmp->free_reserved) 3138 free_reserved = hmp->free_reserved; 3139 } 3140 3141 /* 3142 * SMP races ok 3143 */ 3144 pmp->free_reserved = free_reserved; 3145 pmp->free_nominal = free_nominal; 3146 pmp->free_ticks = ticks; 3147 } else { 3148 free_reserved = pmp->free_reserved; 3149 free_nominal = pmp->free_nominal; 3150 } 3151 if (cred && cred->cr_uid != 0) { 3152 if ((int64_t)(free_nominal - bytes) < 3153 (int64_t)free_reserved) { 3154 return 2; 3155 } 3156 } else { 3157 if ((int64_t)(free_nominal - bytes) < 3158 (int64_t)free_reserved / 2) { 3159 return 2; 3160 } 3161 } 3162 if ((int64_t)(free_nominal - bytes) < (int64_t)free_reserved * 2) 3163 return 1; 3164 return 0; 3165 } 3166 3167 /* 3168 * Debugging 3169 */ 3170 void 3171 hammer2_dump_chain(hammer2_chain_t *chain, int tab, int bi, int *countp, 3172 char pfx, u_int flags) 3173 { 3174 hammer2_chain_t *scan; 3175 hammer2_chain_t *parent; 3176 3177 --*countp; 3178 if (*countp == 0) { 3179 kprintf("%*.*s...\n", tab, tab, ""); 3180 return; 3181 } 3182 if (*countp < 0) 3183 return; 3184 kprintf("%*.*s%c-chain %p %s.%-3d %016jx %016jx/%-2d mir=%016jx\n", 3185 tab, tab, "", pfx, chain, 3186 hammer2_bref_type_str(chain->bref.type), bi, 3187 chain->bref.data_off, chain->bref.key, chain->bref.keybits, 3188 chain->bref.mirror_tid); 3189 3190 kprintf("%*.*s [%08x] (%s) refs=%d", 3191 tab, tab, "", 3192 chain->flags, 3193 ((chain->bref.type == HAMMER2_BREF_TYPE_INODE && 3194 chain->data) ? (char *)chain->data->ipdata.filename : "?"), 3195 chain->refs); 3196 3197 parent = chain->parent; 3198 if (parent) 3199 kprintf("\n%*.*s p=%p [pflags %08x prefs %d]", 3200 tab, tab, "", 3201 parent, parent->flags, parent->refs); 3202 if (RB_EMPTY(&chain->core.rbtree)) { 3203 kprintf("\n"); 3204 } else { 3205 int bi = 0; 3206 kprintf(" {\n"); 3207 RB_FOREACH(scan, hammer2_chain_tree, &chain->core.rbtree) { 3208 if ((scan->flags & flags) || flags == (u_int)-1) { 3209 hammer2_dump_chain(scan, tab + 4, bi, countp, 3210 'a', flags); 3211 } 3212 bi++; 3213 } 3214 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE && chain->data) 3215 kprintf("%*.*s}(%s)\n", tab, tab, "", 3216 chain->data->ipdata.filename); 3217 else 3218 kprintf("%*.*s}\n", tab, tab, ""); 3219 } 3220 } 3221