1 /* 2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression) 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/nlookup.h> 39 #include <sys/vnode.h> 40 #include <sys/mount.h> 41 #include <sys/fcntl.h> 42 #include <sys/buf.h> 43 #include <sys/uuid.h> 44 #include <sys/vfsops.h> 45 #include <sys/sysctl.h> 46 #include <sys/socket.h> 47 #include <sys/objcache.h> 48 49 #include <sys/proc.h> 50 #include <sys/mountctl.h> 51 #include <sys/dirent.h> 52 #include <sys/uio.h> 53 54 #include "hammer2.h" 55 #include "hammer2_disk.h" 56 #include "hammer2_mount.h" 57 #include "hammer2_lz4.h" 58 59 #include "zlib/hammer2_zlib.h" 60 61 MALLOC_DEFINE(M_OBJCACHE, "objcache", "Object Cache"); 62 63 struct hammer2_sync_info { 64 int error; 65 int waitfor; 66 int pass; 67 }; 68 69 TAILQ_HEAD(hammer2_mntlist, hammer2_dev); 70 static struct hammer2_mntlist hammer2_mntlist; 71 72 struct hammer2_pfslist hammer2_pfslist; 73 struct hammer2_pfslist hammer2_spmplist; 74 struct lock hammer2_mntlk; 75 76 int hammer2_supported_version = HAMMER2_VOL_VERSION_DEFAULT; 77 int hammer2_debug; 78 int hammer2_xopgroups; 79 long hammer2_debug_inode; 80 int hammer2_cluster_meta_read = 1; /* physical read-ahead */ 81 int hammer2_cluster_data_read = 4; /* physical read-ahead */ 82 int hammer2_cluster_write = 0; /* physical write clustering */ 83 int hammer2_dedup_enable = 1; 84 int hammer2_always_compress = 0; /* always try to compress */ 85 int hammer2_flush_pipe = 100; 86 int hammer2_dio_count; 87 int hammer2_dio_limit = 256; 88 int hammer2_bulkfree_tps = 5000; 89 int hammer2_worker_rmask = 3; 90 long hammer2_chain_allocs; 91 long hammer2_limit_dirty_chains; 92 long hammer2_limit_dirty_inodes; 93 long hammer2_count_modified_chains; 94 long hammer2_iod_file_read; 95 long hammer2_iod_meta_read; 96 long hammer2_iod_indr_read; 97 long hammer2_iod_fmap_read; 98 long hammer2_iod_volu_read; 99 long hammer2_iod_file_write; 100 long hammer2_iod_file_wembed; 101 long hammer2_iod_file_wzero; 102 long hammer2_iod_file_wdedup; 103 long hammer2_iod_meta_write; 104 long hammer2_iod_indr_write; 105 long hammer2_iod_fmap_write; 106 long hammer2_iod_volu_write; 107 static long hammer2_iod_inode_creates; 108 static long hammer2_iod_inode_deletes; 109 110 long hammer2_process_icrc32; 111 long hammer2_process_xxhash64; 112 113 MALLOC_DECLARE(M_HAMMER2_CBUFFER); 114 MALLOC_DEFINE(M_HAMMER2_CBUFFER, "HAMMER2-compbuffer", 115 "Buffer used for compression."); 116 117 MALLOC_DECLARE(M_HAMMER2_DEBUFFER); 118 MALLOC_DEFINE(M_HAMMER2_DEBUFFER, "HAMMER2-decompbuffer", 119 "Buffer used for decompression."); 120 121 SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem"); 122 123 SYSCTL_INT(_vfs_hammer2, OID_AUTO, supported_version, CTLFLAG_RD, 124 &hammer2_supported_version, 0, ""); 125 SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW, 126 &hammer2_debug, 0, ""); 127 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, debug_inode, CTLFLAG_RW, 128 &hammer2_debug_inode, 0, ""); 129 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_meta_read, CTLFLAG_RW, 130 &hammer2_cluster_meta_read, 0, ""); 131 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_data_read, CTLFLAG_RW, 132 &hammer2_cluster_data_read, 0, ""); 133 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_write, CTLFLAG_RW, 134 &hammer2_cluster_write, 0, ""); 135 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dedup_enable, CTLFLAG_RW, 136 &hammer2_dedup_enable, 0, ""); 137 SYSCTL_INT(_vfs_hammer2, OID_AUTO, always_compress, CTLFLAG_RW, 138 &hammer2_always_compress, 0, ""); 139 SYSCTL_INT(_vfs_hammer2, OID_AUTO, flush_pipe, CTLFLAG_RW, 140 &hammer2_flush_pipe, 0, ""); 141 SYSCTL_INT(_vfs_hammer2, OID_AUTO, worker_rmask, CTLFLAG_RW, 142 &hammer2_worker_rmask, 0, ""); 143 SYSCTL_INT(_vfs_hammer2, OID_AUTO, bulkfree_tps, CTLFLAG_RW, 144 &hammer2_bulkfree_tps, 0, ""); 145 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, chain_allocs, CTLFLAG_RW, 146 &hammer2_chain_allocs, 0, ""); 147 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_chains, CTLFLAG_RW, 148 &hammer2_limit_dirty_chains, 0, ""); 149 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_inodes, CTLFLAG_RW, 150 &hammer2_limit_dirty_inodes, 0, ""); 151 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, count_modified_chains, CTLFLAG_RW, 152 &hammer2_count_modified_chains, 0, ""); 153 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_count, CTLFLAG_RD, 154 &hammer2_dio_count, 0, ""); 155 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_limit, CTLFLAG_RW, 156 &hammer2_dio_limit, 0, ""); 157 158 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RW, 159 &hammer2_iod_file_read, 0, ""); 160 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RW, 161 &hammer2_iod_meta_read, 0, ""); 162 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RW, 163 &hammer2_iod_indr_read, 0, ""); 164 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_read, CTLFLAG_RW, 165 &hammer2_iod_fmap_read, 0, ""); 166 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_read, CTLFLAG_RW, 167 &hammer2_iod_volu_read, 0, ""); 168 169 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RW, 170 &hammer2_iod_file_write, 0, ""); 171 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wembed, CTLFLAG_RW, 172 &hammer2_iod_file_wembed, 0, ""); 173 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wzero, CTLFLAG_RW, 174 &hammer2_iod_file_wzero, 0, ""); 175 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wdedup, CTLFLAG_RW, 176 &hammer2_iod_file_wdedup, 0, ""); 177 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RW, 178 &hammer2_iod_meta_write, 0, ""); 179 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RW, 180 &hammer2_iod_indr_write, 0, ""); 181 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_write, CTLFLAG_RW, 182 &hammer2_iod_fmap_write, 0, ""); 183 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RW, 184 &hammer2_iod_volu_write, 0, ""); 185 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_inode_creates, CTLFLAG_RW, 186 &hammer2_iod_inode_creates, 0, ""); 187 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_inode_deletes, CTLFLAG_RW, 188 &hammer2_iod_inode_deletes, 0, ""); 189 190 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, process_icrc32, CTLFLAG_RW, 191 &hammer2_process_icrc32, 0, ""); 192 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, process_xxhash64, CTLFLAG_RW, 193 &hammer2_process_xxhash64, 0, ""); 194 195 static int hammer2_vfs_init(struct vfsconf *conf); 196 static int hammer2_vfs_uninit(struct vfsconf *vfsp); 197 static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data, 198 struct ucred *cred); 199 static int hammer2_remount(hammer2_dev_t *, struct mount *, char *, 200 struct ucred *); 201 static int hammer2_recovery(hammer2_dev_t *hmp); 202 static int hammer2_vfs_unmount(struct mount *mp, int mntflags); 203 static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp); 204 static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, 205 struct ucred *cred); 206 static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, 207 struct ucred *cred); 208 static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 209 struct fid *fhp, struct vnode **vpp); 210 static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp); 211 static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 212 int *exflagsp, struct ucred **credanonp); 213 static int hammer2_vfs_modifying(struct mount *mp); 214 215 static void hammer2_update_pmps(hammer2_dev_t *hmp); 216 217 static void hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp); 218 static void hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp, 219 hammer2_dev_t *hmp); 220 static int hammer2_fixup_pfses(hammer2_dev_t *hmp); 221 222 /* 223 * HAMMER2 vfs operations. 224 */ 225 static struct vfsops hammer2_vfsops = { 226 .vfs_flags = 0, 227 .vfs_init = hammer2_vfs_init, 228 .vfs_uninit = hammer2_vfs_uninit, 229 .vfs_sync = hammer2_vfs_sync, 230 .vfs_mount = hammer2_vfs_mount, 231 .vfs_unmount = hammer2_vfs_unmount, 232 .vfs_root = hammer2_vfs_root, 233 .vfs_statfs = hammer2_vfs_statfs, 234 .vfs_statvfs = hammer2_vfs_statvfs, 235 .vfs_vget = hammer2_vfs_vget, 236 .vfs_vptofh = hammer2_vfs_vptofh, 237 .vfs_fhtovp = hammer2_vfs_fhtovp, 238 .vfs_checkexp = hammer2_vfs_checkexp, 239 .vfs_modifying = hammer2_vfs_modifying 240 }; 241 242 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", ""); 243 244 VFS_SET(hammer2_vfsops, hammer2, VFCF_MPSAFE); 245 MODULE_VERSION(hammer2, 1); 246 247 static 248 int 249 hammer2_vfs_init(struct vfsconf *conf) 250 { 251 static struct objcache_malloc_args margs_read; 252 static struct objcache_malloc_args margs_write; 253 static struct objcache_malloc_args margs_vop; 254 255 int error; 256 257 error = 0; 258 kmalloc_raise_limit(M_HAMMER2, 0); /* unlimited */ 259 260 /* 261 * hammer2_xopgroups must be even and is most optimal if 262 * 2 x ncpus so strategy functions can be queued to the same 263 * cpu. 264 */ 265 hammer2_xopgroups = HAMMER2_XOPGROUPS_MIN; 266 if (hammer2_xopgroups < ncpus * 2) 267 hammer2_xopgroups = ncpus * 2; 268 269 /* 270 * A large DIO cache is needed to retain dedup enablement masks. 271 * The bulkfree code clears related masks as part of the disk block 272 * recycling algorithm, preventing it from being used for a later 273 * dedup. 274 * 275 * NOTE: A large buffer cache can actually interfere with dedup 276 * operation because we dedup based on media physical buffers 277 * and not logical buffers. Try to make the DIO case large 278 * enough to avoid this problem, but also cap it. 279 */ 280 hammer2_dio_limit = nbuf * 2; 281 if (hammer2_dio_limit > 100000) 282 hammer2_dio_limit = 100000; 283 284 if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref)) 285 error = EINVAL; 286 if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data)) 287 error = EINVAL; 288 if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data)) 289 error = EINVAL; 290 291 if (error) 292 kprintf("HAMMER2 structure size mismatch; cannot continue.\n"); 293 294 margs_read.objsize = 65536; 295 margs_read.mtype = M_HAMMER2_DEBUFFER; 296 297 margs_write.objsize = 32768; 298 margs_write.mtype = M_HAMMER2_CBUFFER; 299 300 margs_vop.objsize = sizeof(hammer2_xop_t); 301 margs_vop.mtype = M_HAMMER2; 302 303 /* 304 * Note thaht for the XOPS cache we want backing store allocations 305 * to use M_ZERO. This is not allowed in objcache_get() (to avoid 306 * confusion), so use the backing store function that does it. This 307 * means that initial XOPS objects are zerod but REUSED objects are 308 * not. So we are responsible for cleaning the object up sufficiently 309 * for our needs before objcache_put()ing it back (typically just the 310 * FIFO indices). 311 */ 312 cache_buffer_read = objcache_create(margs_read.mtype->ks_shortdesc, 313 0, 1, NULL, NULL, NULL, 314 objcache_malloc_alloc, 315 objcache_malloc_free, 316 &margs_read); 317 cache_buffer_write = objcache_create(margs_write.mtype->ks_shortdesc, 318 0, 1, NULL, NULL, NULL, 319 objcache_malloc_alloc, 320 objcache_malloc_free, 321 &margs_write); 322 cache_xops = objcache_create(margs_vop.mtype->ks_shortdesc, 323 0, 1, NULL, NULL, NULL, 324 objcache_malloc_alloc_zero, 325 objcache_malloc_free, 326 &margs_vop); 327 328 329 lockinit(&hammer2_mntlk, "mntlk", 0, 0); 330 TAILQ_INIT(&hammer2_mntlist); 331 TAILQ_INIT(&hammer2_pfslist); 332 TAILQ_INIT(&hammer2_spmplist); 333 334 hammer2_limit_dirty_chains = maxvnodes / 10; 335 if (hammer2_limit_dirty_chains > HAMMER2_LIMIT_DIRTY_CHAINS) 336 hammer2_limit_dirty_chains = HAMMER2_LIMIT_DIRTY_CHAINS; 337 if (hammer2_limit_dirty_chains < 1000) 338 hammer2_limit_dirty_chains = 1000; 339 340 hammer2_limit_dirty_inodes = maxvnodes / 25; 341 if (hammer2_limit_dirty_inodes < 100) 342 hammer2_limit_dirty_inodes = 100; 343 if (hammer2_limit_dirty_inodes > HAMMER2_LIMIT_DIRTY_INODES) 344 hammer2_limit_dirty_inodes = HAMMER2_LIMIT_DIRTY_INODES; 345 346 return (error); 347 } 348 349 static 350 int 351 hammer2_vfs_uninit(struct vfsconf *vfsp __unused) 352 { 353 objcache_destroy(cache_buffer_read); 354 objcache_destroy(cache_buffer_write); 355 objcache_destroy(cache_xops); 356 return 0; 357 } 358 359 /* 360 * Core PFS allocator. Used to allocate or reference the pmp structure 361 * for PFS cluster mounts and the spmp structure for media (hmp) structures. 362 * The pmp can be passed in or loaded by this function using the chain and 363 * inode data. 364 * 365 * pmp->modify_tid tracks new modify_tid transaction ids for front-end 366 * transactions. Note that synchronization does not use this field. 367 * (typically frontend operations and synchronization cannot run on the 368 * same PFS node at the same time). 369 * 370 * XXX check locking 371 */ 372 hammer2_pfs_t * 373 hammer2_pfsalloc(hammer2_chain_t *chain, 374 const hammer2_inode_data_t *ripdata, 375 hammer2_tid_t modify_tid, hammer2_dev_t *force_local) 376 { 377 hammer2_pfs_t *pmp; 378 hammer2_inode_t *iroot; 379 int count; 380 int i; 381 int j; 382 383 pmp = NULL; 384 385 /* 386 * Locate or create the PFS based on the cluster id. If ripdata 387 * is NULL this is a spmp which is unique and is always allocated. 388 * 389 * If the device is mounted in local mode all PFSs are considered 390 * independent and not part of any cluster (for debugging only). 391 */ 392 if (ripdata) { 393 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) { 394 if (force_local != pmp->force_local) 395 continue; 396 if (force_local == NULL && 397 bcmp(&pmp->pfs_clid, &ripdata->meta.pfs_clid, 398 sizeof(pmp->pfs_clid)) == 0) { 399 break; 400 } else if (force_local && pmp->pfs_names[0] && 401 strcmp(pmp->pfs_names[0], ripdata->filename) == 0) { 402 break; 403 } 404 } 405 } 406 407 if (pmp == NULL) { 408 pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO); 409 pmp->force_local = force_local; 410 hammer2_trans_manage_init(pmp); 411 kmalloc_create_obj(&pmp->minode, "HAMMER2-inodes", 412 sizeof(struct hammer2_inode)); 413 lockinit(&pmp->lock, "pfslk", 0, 0); 414 lockinit(&pmp->lock_nlink, "h2nlink", 0, 0); 415 spin_init(&pmp->inum_spin, "hm2pfsalloc_inum"); 416 spin_init(&pmp->xop_spin, "h2xop"); 417 spin_init(&pmp->lru_spin, "h2lru"); 418 RB_INIT(&pmp->inum_tree); 419 TAILQ_INIT(&pmp->syncq); 420 TAILQ_INIT(&pmp->depq); 421 TAILQ_INIT(&pmp->lru_list); 422 spin_init(&pmp->list_spin, "h2pfsalloc_list"); 423 424 /* 425 * Save the last media transaction id for the flusher. Set 426 * initial 427 */ 428 if (ripdata) { 429 pmp->pfs_clid = ripdata->meta.pfs_clid; 430 TAILQ_INSERT_TAIL(&hammer2_pfslist, pmp, mntentry); 431 } else { 432 pmp->flags |= HAMMER2_PMPF_SPMP; 433 TAILQ_INSERT_TAIL(&hammer2_spmplist, pmp, mntentry); 434 } 435 436 /* 437 * The synchronization thread may start too early, make 438 * sure it stays frozen until we are ready to let it go. 439 * XXX 440 */ 441 /* 442 pmp->primary_thr.flags = HAMMER2_THREAD_FROZEN | 443 HAMMER2_THREAD_REMASTER; 444 */ 445 } 446 447 /* 448 * Create the PFS's root inode and any missing XOP helper threads. 449 */ 450 if ((iroot = pmp->iroot) == NULL) { 451 iroot = hammer2_inode_get(pmp, NULL, 1, -1); 452 if (ripdata) 453 iroot->meta = ripdata->meta; 454 pmp->iroot = iroot; 455 hammer2_inode_ref(iroot); 456 hammer2_inode_unlock(iroot); 457 } 458 459 /* 460 * Stop here if no chain is passed in. 461 */ 462 if (chain == NULL) 463 goto done; 464 465 /* 466 * When a chain is passed in we must add it to the PFS's root 467 * inode, update pmp->pfs_types[], and update the syncronization 468 * threads. 469 * 470 * When forcing local mode, mark the PFS as a MASTER regardless. 471 * 472 * At the moment empty spots can develop due to removals or failures. 473 * Ultimately we want to re-fill these spots but doing so might 474 * confused running code. XXX 475 */ 476 hammer2_inode_ref(iroot); 477 hammer2_mtx_ex(&iroot->lock); 478 j = iroot->cluster.nchains; 479 480 if (j == HAMMER2_MAXCLUSTER) { 481 kprintf("hammer2_pfsalloc: cluster full!\n"); 482 /* XXX fatal error? */ 483 } else { 484 KKASSERT(chain->pmp == NULL); 485 chain->pmp = pmp; 486 hammer2_chain_ref(chain); 487 iroot->cluster.array[j].chain = chain; 488 if (force_local) 489 pmp->pfs_types[j] = HAMMER2_PFSTYPE_MASTER; 490 else 491 pmp->pfs_types[j] = ripdata->meta.pfs_type; 492 pmp->pfs_names[j] = kstrdup(ripdata->filename, M_HAMMER2); 493 pmp->pfs_hmps[j] = chain->hmp; 494 hammer2_spin_ex(&pmp->inum_spin); 495 pmp->pfs_iroot_blocksets[j] = chain->data->ipdata.u.blockset; 496 hammer2_spin_unex(&pmp->inum_spin); 497 498 /* 499 * If the PFS is already mounted we must account 500 * for the mount_count here. 501 */ 502 if (pmp->mp) 503 ++chain->hmp->mount_count; 504 505 /* 506 * May have to fixup dirty chain tracking. Previous 507 * pmp was NULL so nothing to undo. 508 */ 509 if (chain->flags & HAMMER2_CHAIN_MODIFIED) 510 hammer2_pfs_memory_inc(pmp); 511 ++j; 512 } 513 iroot->cluster.nchains = j; 514 515 /* 516 * Update nmasters from any PFS inode which is part of the cluster. 517 * It is possible that this will result in a value which is too 518 * high. MASTER PFSs are authoritative for pfs_nmasters and will 519 * override this value later on. 520 * 521 * (This informs us of masters that might not currently be 522 * discoverable by this mount). 523 */ 524 if (ripdata && pmp->pfs_nmasters < ripdata->meta.pfs_nmasters) { 525 pmp->pfs_nmasters = ripdata->meta.pfs_nmasters; 526 } 527 528 /* 529 * Count visible masters. Masters are usually added with 530 * ripdata->meta.pfs_nmasters set to 1. This detects when there 531 * are more (XXX and must update the master inodes). 532 */ 533 count = 0; 534 for (i = 0; i < iroot->cluster.nchains; ++i) { 535 if (pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER) 536 ++count; 537 } 538 if (pmp->pfs_nmasters < count) 539 pmp->pfs_nmasters = count; 540 541 /* 542 * Create missing synchronization and support threads. 543 * 544 * Single-node masters (including snapshots) have nothing to 545 * synchronize and do not require this thread. 546 * 547 * Multi-node masters or any number of soft masters, slaves, copy, 548 * or other PFS types need the thread. 549 * 550 * Each thread is responsible for its particular cluster index. 551 * We use independent threads so stalls or mismatches related to 552 * any given target do not affect other targets. 553 */ 554 for (i = 0; i < iroot->cluster.nchains; ++i) { 555 /* 556 * Single-node masters (including snapshots) have nothing 557 * to synchronize and will make direct xops support calls, 558 * thus they do not require this thread. 559 * 560 * Note that there can be thousands of snapshots. We do not 561 * want to create thousands of threads. 562 */ 563 if (pmp->pfs_nmasters <= 1 && 564 pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER) { 565 continue; 566 } 567 568 /* 569 * Sync support thread 570 */ 571 if (pmp->sync_thrs[i].td == NULL) { 572 hammer2_thr_create(&pmp->sync_thrs[i], pmp, NULL, 573 "h2nod", i, -1, 574 hammer2_primary_sync_thread); 575 } 576 } 577 578 /* 579 * Create missing Xop threads 580 * 581 * NOTE: We create helper threads for all mounted PFSs or any 582 * PFSs with 2+ nodes (so the sync thread can update them, 583 * even if not mounted). 584 */ 585 if (pmp->mp || iroot->cluster.nchains >= 2) 586 hammer2_xop_helper_create(pmp); 587 588 hammer2_mtx_unlock(&iroot->lock); 589 hammer2_inode_drop(iroot); 590 done: 591 return pmp; 592 } 593 594 /* 595 * Deallocate an element of a probed PFS. If destroying and this is a 596 * MASTER, adjust nmasters. 597 * 598 * This function does not physically destroy the PFS element in its device 599 * under the super-root (see hammer2_ioctl_pfs_delete()). 600 */ 601 void 602 hammer2_pfsdealloc(hammer2_pfs_t *pmp, int clindex, int destroying) 603 { 604 hammer2_inode_t *iroot; 605 hammer2_chain_t *chain; 606 int j; 607 608 /* 609 * Cleanup our reference on iroot. iroot is (should) not be needed 610 * by the flush code. 611 */ 612 iroot = pmp->iroot; 613 if (iroot) { 614 /* 615 * Stop synchronizing 616 * 617 * XXX flush after acquiring the iroot lock. 618 * XXX clean out the cluster index from all inode structures. 619 */ 620 hammer2_thr_delete(&pmp->sync_thrs[clindex]); 621 622 /* 623 * Remove the cluster index from the group. If destroying 624 * the PFS and this is a master, adjust pfs_nmasters. 625 */ 626 hammer2_mtx_ex(&iroot->lock); 627 chain = iroot->cluster.array[clindex].chain; 628 iroot->cluster.array[clindex].chain = NULL; 629 630 switch(pmp->pfs_types[clindex]) { 631 case HAMMER2_PFSTYPE_MASTER: 632 if (destroying && pmp->pfs_nmasters > 0) 633 --pmp->pfs_nmasters; 634 /* XXX adjust ripdata->meta.pfs_nmasters */ 635 break; 636 default: 637 break; 638 } 639 pmp->pfs_types[clindex] = HAMMER2_PFSTYPE_NONE; 640 641 hammer2_mtx_unlock(&iroot->lock); 642 643 /* 644 * Release the chain. 645 */ 646 if (chain) { 647 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 648 hammer2_chain_drop(chain); 649 } 650 651 /* 652 * Terminate all XOP threads for the cluster index. 653 */ 654 if (pmp->xop_groups) { 655 for (j = 0; j < hammer2_xopgroups; ++j) { 656 hammer2_thr_delete( 657 &pmp->xop_groups[j].thrs[clindex]); 658 } 659 } 660 } 661 } 662 663 /* 664 * Destroy a PFS, typically only occurs after the last mount on a device 665 * has gone away. 666 */ 667 static void 668 hammer2_pfsfree(hammer2_pfs_t *pmp) 669 { 670 hammer2_inode_t *iroot; 671 hammer2_chain_t *chain; 672 int chains_still_present = 0; 673 int i; 674 int j; 675 676 /* 677 * Cleanup our reference on iroot. iroot is (should) not be needed 678 * by the flush code. 679 */ 680 if (pmp->flags & HAMMER2_PMPF_SPMP) 681 TAILQ_REMOVE(&hammer2_spmplist, pmp, mntentry); 682 else 683 TAILQ_REMOVE(&hammer2_pfslist, pmp, mntentry); 684 685 /* 686 * Cleanup chains remaining on LRU list. 687 */ 688 hammer2_spin_ex(&pmp->lru_spin); 689 while ((chain = TAILQ_FIRST(&pmp->lru_list)) != NULL) { 690 KKASSERT(chain->flags & HAMMER2_CHAIN_ONLRU); 691 atomic_add_int(&pmp->lru_count, -1); 692 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONLRU); 693 TAILQ_REMOVE(&pmp->lru_list, chain, lru_node); 694 hammer2_chain_ref(chain); 695 hammer2_spin_unex(&pmp->lru_spin); 696 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 697 hammer2_chain_drop(chain); 698 hammer2_spin_ex(&pmp->lru_spin); 699 } 700 hammer2_spin_unex(&pmp->lru_spin); 701 702 /* 703 * Clean up iroot 704 */ 705 iroot = pmp->iroot; 706 if (iroot) { 707 for (i = 0; i < iroot->cluster.nchains; ++i) { 708 hammer2_thr_delete(&pmp->sync_thrs[i]); 709 if (pmp->xop_groups) { 710 for (j = 0; j < hammer2_xopgroups; ++j) 711 hammer2_thr_delete( 712 &pmp->xop_groups[j].thrs[i]); 713 } 714 chain = iroot->cluster.array[i].chain; 715 if (chain && !RB_EMPTY(&chain->core.rbtree)) { 716 kprintf("hammer2: Warning pmp %p still " 717 "has active chains\n", pmp); 718 chains_still_present = 1; 719 } 720 } 721 KASSERT(iroot->refs == 1, 722 ("PMP->IROOT %p REFS WRONG %d", iroot, iroot->refs)); 723 724 /* ref for iroot */ 725 hammer2_inode_drop(iroot); 726 pmp->iroot = NULL; 727 } 728 729 /* 730 * Free remaining pmp resources 731 */ 732 if (chains_still_present) { 733 kprintf("hammer2: cannot free pmp %p, still in use\n", pmp); 734 } else { 735 kmalloc_destroy_obj(&pmp->minode); 736 kfree(pmp, M_HAMMER2); 737 } 738 } 739 740 /* 741 * Remove all references to hmp from the pfs list. Any PFS which becomes 742 * empty is terminated and freed. 743 * 744 * XXX inefficient. 745 */ 746 static void 747 hammer2_pfsfree_scan(hammer2_dev_t *hmp, int which) 748 { 749 hammer2_pfs_t *pmp; 750 hammer2_inode_t *iroot; 751 hammer2_chain_t *rchain; 752 int i; 753 int j; 754 struct hammer2_pfslist *wlist; 755 756 if (which == 0) 757 wlist = &hammer2_pfslist; 758 else 759 wlist = &hammer2_spmplist; 760 again: 761 TAILQ_FOREACH(pmp, wlist, mntentry) { 762 if ((iroot = pmp->iroot) == NULL) 763 continue; 764 765 /* 766 * Determine if this PFS is affected. If it is we must 767 * freeze all management threads and lock its iroot. 768 * 769 * Freezing a management thread forces it idle, operations 770 * in-progress will be aborted and it will have to start 771 * over again when unfrozen, or exit if told to exit. 772 */ 773 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 774 if (pmp->pfs_hmps[i] == hmp) 775 break; 776 } 777 if (i == HAMMER2_MAXCLUSTER) 778 continue; 779 780 hammer2_vfs_sync_pmp(pmp, MNT_WAIT); 781 782 /* 783 * Make sure all synchronization threads are locked 784 * down. 785 */ 786 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 787 if (pmp->pfs_hmps[i] == NULL) 788 continue; 789 hammer2_thr_freeze_async(&pmp->sync_thrs[i]); 790 if (pmp->xop_groups) { 791 for (j = 0; j < hammer2_xopgroups; ++j) { 792 hammer2_thr_freeze_async( 793 &pmp->xop_groups[j].thrs[i]); 794 } 795 } 796 } 797 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 798 if (pmp->pfs_hmps[i] == NULL) 799 continue; 800 hammer2_thr_freeze(&pmp->sync_thrs[i]); 801 if (pmp->xop_groups) { 802 for (j = 0; j < hammer2_xopgroups; ++j) { 803 hammer2_thr_freeze( 804 &pmp->xop_groups[j].thrs[i]); 805 } 806 } 807 } 808 809 /* 810 * Lock the inode and clean out matching chains. 811 * Note that we cannot use hammer2_inode_lock_*() 812 * here because that would attempt to validate the 813 * cluster that we are in the middle of ripping 814 * apart. 815 * 816 * WARNING! We are working directly on the inodes 817 * embedded cluster. 818 */ 819 hammer2_mtx_ex(&iroot->lock); 820 821 /* 822 * Remove the chain from matching elements of the PFS. 823 */ 824 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 825 if (pmp->pfs_hmps[i] != hmp) 826 continue; 827 hammer2_thr_delete(&pmp->sync_thrs[i]); 828 if (pmp->xop_groups) { 829 for (j = 0; j < hammer2_xopgroups; ++j) { 830 hammer2_thr_delete( 831 &pmp->xop_groups[j].thrs[i]); 832 } 833 } 834 rchain = iroot->cluster.array[i].chain; 835 iroot->cluster.array[i].chain = NULL; 836 pmp->pfs_types[i] = 0; 837 if (pmp->pfs_names[i]) { 838 kfree(pmp->pfs_names[i], M_HAMMER2); 839 pmp->pfs_names[i] = NULL; 840 } 841 if (rchain) { 842 hammer2_chain_drop(rchain); 843 /* focus hint */ 844 if (iroot->cluster.focus == rchain) 845 iroot->cluster.focus = NULL; 846 } 847 pmp->pfs_hmps[i] = NULL; 848 } 849 hammer2_mtx_unlock(&iroot->lock); 850 851 /* 852 * Cleanup trailing chains. Gaps may remain. 853 */ 854 for (i = HAMMER2_MAXCLUSTER - 1; i >= 0; --i) { 855 if (pmp->pfs_hmps[i]) 856 break; 857 } 858 iroot->cluster.nchains = i + 1; 859 860 /* 861 * If the PMP has no elements remaining we can destroy it. 862 * (this will transition management threads from frozen->exit). 863 */ 864 if (iroot->cluster.nchains == 0) { 865 /* 866 * If this was the hmp's spmp, we need to clean 867 * a little more stuff out. 868 */ 869 if (hmp->spmp == pmp) { 870 hmp->spmp = NULL; 871 hmp->vchain.pmp = NULL; 872 hmp->fchain.pmp = NULL; 873 } 874 875 /* 876 * Free the pmp and restart the loop 877 */ 878 KKASSERT(TAILQ_EMPTY(&pmp->syncq)); 879 KKASSERT(TAILQ_EMPTY(&pmp->depq)); 880 hammer2_pfsfree(pmp); 881 goto again; 882 } 883 884 /* 885 * If elements still remain we need to set the REMASTER 886 * flag and unfreeze it. 887 */ 888 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 889 if (pmp->pfs_hmps[i] == NULL) 890 continue; 891 hammer2_thr_remaster(&pmp->sync_thrs[i]); 892 hammer2_thr_unfreeze(&pmp->sync_thrs[i]); 893 if (pmp->xop_groups) { 894 for (j = 0; j < hammer2_xopgroups; ++j) { 895 hammer2_thr_remaster( 896 &pmp->xop_groups[j].thrs[i]); 897 hammer2_thr_unfreeze( 898 &pmp->xop_groups[j].thrs[i]); 899 } 900 } 901 } 902 } 903 } 904 905 /* 906 * Mount or remount HAMMER2 fileystem from physical media 907 * 908 * mountroot 909 * mp mount point structure 910 * path NULL 911 * data <unused> 912 * cred <unused> 913 * 914 * mount 915 * mp mount point structure 916 * path path to mount point 917 * data pointer to argument structure in user space 918 * volume volume path (device@LABEL form) 919 * hflags user mount flags 920 * cred user credentials 921 * 922 * RETURNS: 0 Success 923 * !0 error number 924 */ 925 static 926 int 927 hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data, 928 struct ucred *cred) 929 { 930 struct hammer2_mount_info info; 931 hammer2_pfs_t *pmp; 932 hammer2_pfs_t *spmp; 933 hammer2_dev_t *hmp, *hmp_tmp; 934 hammer2_dev_t *force_local; 935 hammer2_key_t key_next; 936 hammer2_key_t key_dummy; 937 hammer2_key_t lhc; 938 hammer2_chain_t *parent; 939 hammer2_chain_t *chain; 940 const hammer2_inode_data_t *ripdata; 941 hammer2_blockref_t bref; 942 hammer2_devvp_list_t devvpl; 943 hammer2_devvp_t *e, *e_tmp; 944 struct file *fp; 945 char devstr[MNAMELEN]; 946 size_t size; 947 size_t done; 948 char *dev; 949 char *label; 950 int ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); 951 int error; 952 int i; 953 954 hmp = NULL; 955 pmp = NULL; 956 dev = NULL; 957 label = NULL; 958 bzero(&info, sizeof(info)); 959 960 if (path) { 961 /* 962 * Non-root mount or updating a mount 963 */ 964 error = copyin(data, &info, sizeof(info)); 965 if (error) 966 return (error); 967 } 968 969 if (mp->mnt_flag & MNT_UPDATE) { 970 /* 971 * Update mount. Note that pmp->iroot->cluster is 972 * an inode-embedded cluster and thus cannot be 973 * directly locked. 974 * 975 * XXX HAMMER2 needs to implement NFS export via 976 * mountctl. 977 */ 978 hammer2_cluster_t *cluster; 979 980 pmp = MPTOPMP(mp); 981 pmp->hflags = info.hflags; 982 cluster = &pmp->iroot->cluster; 983 for (i = 0; i < cluster->nchains; ++i) { 984 if (cluster->array[i].chain == NULL) 985 continue; 986 hmp = cluster->array[i].chain->hmp; 987 error = hammer2_remount(hmp, mp, path, cred); 988 if (error) 989 break; 990 } 991 992 return error; 993 } 994 995 if (path == NULL) { 996 /* 997 * Root mount 998 */ 999 info.cluster_fd = -1; 1000 ksnprintf(devstr, sizeof(devstr), "%s", 1001 mp->mnt_stat.f_mntfromname); 1002 done = strlen(devstr) + 1; 1003 kprintf("hammer2_mount: root devstr=\"%s\"\n", devstr); 1004 } else { 1005 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done); 1006 if (error) 1007 return (error); 1008 kprintf("hammer2_mount: devstr=\"%s\"\n", devstr); 1009 } 1010 1011 /* 1012 * Extract device and label, automatically mount @BOOT, @ROOT, or @DATA 1013 * if no label specified, based on the partition id. Error out if no 1014 * label or device (with partition id) is specified. This is strictly 1015 * a convenience to match the default label created by newfs_hammer2, 1016 * our preference is that a label always be specified. 1017 * 1018 * NOTE: We allow 'mount @LABEL <blah>'... that is, a mount command 1019 * that does not specify a device, as long as some H2 label 1020 * has already been mounted from that device. This makes 1021 * mounting snapshots a lot easier. 1022 */ 1023 dev = devstr; 1024 label = strchr(devstr, '@'); 1025 if (label && ((label + 1) - dev) > done) { 1026 kprintf("hammer2_mount: bad label %s/%zd\n", devstr, done); 1027 return (EINVAL); 1028 } 1029 if (label == NULL || label[1] == 0) { 1030 char slice; 1031 1032 if (label == NULL) 1033 label = devstr + strlen(devstr); 1034 else 1035 *label = '\0'; /* clean up trailing @ */ 1036 1037 slice = label[-1]; 1038 switch(slice) { 1039 case 'a': 1040 label = "BOOT"; 1041 break; 1042 case 'd': 1043 label = "ROOT"; 1044 break; 1045 default: 1046 label = "DATA"; 1047 break; 1048 } 1049 } else { 1050 *label = '\0'; 1051 label++; 1052 } 1053 1054 kprintf("hammer2_mount: dev=\"%s\" label=\"%s\" rdonly=%d\n", 1055 dev, label, ronly); 1056 1057 /* 1058 * Initialize all device vnodes. 1059 */ 1060 TAILQ_INIT(&devvpl); 1061 error = hammer2_init_devvp(dev, path == NULL, &devvpl); 1062 if (error) { 1063 kprintf("hammer2: failed to initialize devvp in %s\n", dev); 1064 hammer2_cleanup_devvp(&devvpl); 1065 return error; 1066 } 1067 1068 /* 1069 * Determine if the device has already been mounted. After this 1070 * check hmp will be non-NULL if we are doing the second or more 1071 * hammer2 mounts from the same device. 1072 */ 1073 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE); 1074 if (!TAILQ_EMPTY(&devvpl)) { 1075 /* 1076 * Match the device. Due to the way devfs works, 1077 * we may not be able to directly match the vnode pointer, 1078 * so also check to see if the underlying device matches. 1079 */ 1080 TAILQ_FOREACH(hmp_tmp, &hammer2_mntlist, mntentry) { 1081 TAILQ_FOREACH(e_tmp, &hmp_tmp->devvpl, entry) { 1082 int devvp_found = 0; 1083 TAILQ_FOREACH(e, &devvpl, entry) { 1084 KKASSERT(e->devvp); 1085 if (e_tmp->devvp == e->devvp) 1086 devvp_found = 1; 1087 if (e_tmp->devvp->v_rdev && 1088 e_tmp->devvp->v_rdev == e->devvp->v_rdev) 1089 devvp_found = 1; 1090 } 1091 if (!devvp_found) 1092 goto next_hmp; 1093 } 1094 hmp = hmp_tmp; 1095 kprintf("hammer2_mount: hmp=%p matched\n", hmp); 1096 break; 1097 next_hmp: 1098 continue; 1099 } 1100 1101 /* 1102 * If no match this may be a fresh H2 mount, make sure 1103 * the device is not mounted on anything else. 1104 */ 1105 if (hmp == NULL) { 1106 TAILQ_FOREACH(e, &devvpl, entry) { 1107 struct vnode *devvp = e->devvp; 1108 KKASSERT(devvp); 1109 error = vfs_mountedon(devvp); 1110 if (error) { 1111 kprintf("hammer2_mount: %s mounted %d\n", 1112 e->path, error); 1113 hammer2_cleanup_devvp(&devvpl); 1114 lockmgr(&hammer2_mntlk, LK_RELEASE); 1115 return error; 1116 } 1117 } 1118 } 1119 } else { 1120 /* 1121 * Match the label to a pmp already probed. 1122 */ 1123 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) { 1124 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 1125 if (pmp->pfs_names[i] && 1126 strcmp(pmp->pfs_names[i], label) == 0) { 1127 hmp = pmp->pfs_hmps[i]; 1128 break; 1129 } 1130 } 1131 if (hmp) 1132 break; 1133 } 1134 if (hmp == NULL) { 1135 kprintf("hammer2_mount: PFS label \"%s\" not found\n", 1136 label); 1137 hammer2_cleanup_devvp(&devvpl); 1138 lockmgr(&hammer2_mntlk, LK_RELEASE); 1139 return ENOENT; 1140 } 1141 } 1142 1143 /* 1144 * Open the device if this isn't a secondary mount and construct 1145 * the H2 device mount (hmp). 1146 */ 1147 if (hmp == NULL) { 1148 hammer2_chain_t *schain; 1149 hammer2_xop_head_t xop; 1150 1151 /* 1152 * Now open the device 1153 */ 1154 KKASSERT(!TAILQ_EMPTY(&devvpl)); 1155 if (error == 0) { 1156 error = hammer2_open_devvp(&devvpl, ronly); 1157 if (error) { 1158 hammer2_close_devvp(&devvpl, ronly); 1159 hammer2_cleanup_devvp(&devvpl); 1160 lockmgr(&hammer2_mntlk, LK_RELEASE); 1161 return error; 1162 } 1163 } 1164 1165 /* 1166 * Construct volumes and link with device vnodes. 1167 */ 1168 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO); 1169 hmp->devvp = NULL; 1170 error = hammer2_init_volumes(mp, &devvpl, hmp->volumes, 1171 &hmp->voldata, &hmp->devvp); 1172 if (error) { 1173 hammer2_close_devvp(&devvpl, ronly); 1174 hammer2_cleanup_devvp(&devvpl); 1175 lockmgr(&hammer2_mntlk, LK_RELEASE); 1176 kfree(hmp, M_HAMMER2); 1177 return error; 1178 } 1179 if (!hmp->devvp) { 1180 kprintf("hammer2: failed to initialize root volume\n"); 1181 hammer2_unmount_helper(mp, NULL, hmp); 1182 lockmgr(&hammer2_mntlk, LK_RELEASE); 1183 hammer2_vfs_unmount(mp, MNT_FORCE); 1184 return EINVAL; 1185 } 1186 1187 ksnprintf(hmp->devrepname, sizeof(hmp->devrepname), "%s", dev); 1188 hmp->ronly = ronly; 1189 hmp->hflags = info.hflags & HMNT2_DEVFLAGS; 1190 kmalloc_create_obj(&hmp->mchain, "HAMMER2-chains", 1191 sizeof(struct hammer2_chain)); 1192 kmalloc_create_obj(&hmp->mio, "HAMMER2-dio", 1193 sizeof(struct hammer2_io)); 1194 kmalloc_create(&hmp->mmsg, "HAMMER2-msg"); 1195 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry); 1196 RB_INIT(&hmp->iotree); 1197 spin_init(&hmp->io_spin, "h2mount_io"); 1198 spin_init(&hmp->list_spin, "h2mount_list"); 1199 1200 lockinit(&hmp->vollk, "h2vol", 0, 0); 1201 lockinit(&hmp->bulklk, "h2bulk", 0, 0); 1202 lockinit(&hmp->bflock, "h2bflk", 0, 0); 1203 1204 /* 1205 * vchain setup. vchain.data is embedded. 1206 * vchain.refs is initialized and will never drop to 0. 1207 * 1208 * NOTE! voldata is not yet loaded. 1209 */ 1210 hmp->vchain.hmp = hmp; 1211 hmp->vchain.refs = 1; 1212 hmp->vchain.data = (void *)&hmp->voldata; 1213 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME; 1214 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX; 1215 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid; 1216 hammer2_chain_core_init(&hmp->vchain); 1217 1218 /* 1219 * fchain setup. fchain.data is embedded. 1220 * fchain.refs is initialized and will never drop to 0. 1221 * 1222 * The data is not used but needs to be initialized to 1223 * pass assertion muster. We use this chain primarily 1224 * as a placeholder for the freemap's top-level radix tree 1225 * so it does not interfere with the volume's topology 1226 * radix tree. 1227 */ 1228 hmp->fchain.hmp = hmp; 1229 hmp->fchain.refs = 1; 1230 hmp->fchain.data = (void *)&hmp->voldata.freemap_blockset; 1231 hmp->fchain.bref.type = HAMMER2_BREF_TYPE_FREEMAP; 1232 hmp->fchain.bref.data_off = 0 | HAMMER2_PBUFRADIX; 1233 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid; 1234 hmp->fchain.bref.methods = 1235 HAMMER2_ENC_CHECK(HAMMER2_CHECK_FREEMAP) | 1236 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE); 1237 hammer2_chain_core_init(&hmp->fchain); 1238 1239 /* 1240 * Initialize volume header related fields. 1241 */ 1242 KKASSERT(hmp->voldata.magic == HAMMER2_VOLUME_ID_HBO || 1243 hmp->voldata.magic == HAMMER2_VOLUME_ID_ABO); 1244 hmp->volhdrno = error; 1245 hmp->volsync = hmp->voldata; 1246 hmp->free_reserved = hmp->voldata.allocator_size / 20; 1247 /* 1248 * Must use hmp instead of volume header for these two 1249 * in order to handle volume versions transparently. 1250 */ 1251 if (hmp->voldata.version >= HAMMER2_VOL_VERSION_MULTI_VOLUMES) { 1252 hmp->nvolumes = hmp->voldata.nvolumes; 1253 hmp->total_size = hmp->voldata.total_size; 1254 } else { 1255 hmp->nvolumes = 1; 1256 hmp->total_size = hmp->voldata.volu_size; 1257 } 1258 KKASSERT(hmp->nvolumes > 0); 1259 1260 /* 1261 * Move devvpl entries to hmp. 1262 */ 1263 TAILQ_INIT(&hmp->devvpl); 1264 while ((e = TAILQ_FIRST(&devvpl)) != NULL) { 1265 TAILQ_REMOVE(&devvpl, e, entry); 1266 TAILQ_INSERT_TAIL(&hmp->devvpl, e, entry); 1267 } 1268 KKASSERT(TAILQ_EMPTY(&devvpl)); 1269 KKASSERT(!TAILQ_EMPTY(&hmp->devvpl)); 1270 1271 /* 1272 * Really important to get these right or the flush and 1273 * teardown code will get confused. 1274 */ 1275 hmp->spmp = hammer2_pfsalloc(NULL, NULL, 0, NULL); 1276 spmp = hmp->spmp; 1277 spmp->pfs_hmps[0] = hmp; 1278 1279 /* 1280 * Dummy-up vchain and fchain's modify_tid. mirror_tid 1281 * is inherited from the volume header. 1282 */ 1283 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid; 1284 hmp->vchain.bref.modify_tid = hmp->vchain.bref.mirror_tid; 1285 hmp->vchain.pmp = spmp; 1286 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid; 1287 hmp->fchain.bref.modify_tid = hmp->fchain.bref.mirror_tid; 1288 hmp->fchain.pmp = spmp; 1289 1290 /* 1291 * First locate the super-root inode, which is key 0 1292 * relative to the volume header's blockset. 1293 * 1294 * Then locate the root inode by scanning the directory keyspace 1295 * represented by the label. 1296 */ 1297 parent = hammer2_chain_lookup_init(&hmp->vchain, 0); 1298 schain = hammer2_chain_lookup(&parent, &key_dummy, 1299 HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY, 1300 &error, 0); 1301 hammer2_chain_lookup_done(parent); 1302 if (schain == NULL) { 1303 kprintf("hammer2_mount: invalid super-root\n"); 1304 hammer2_unmount_helper(mp, NULL, hmp); 1305 lockmgr(&hammer2_mntlk, LK_RELEASE); 1306 hammer2_vfs_unmount(mp, MNT_FORCE); 1307 return EINVAL; 1308 } 1309 if (schain->error) { 1310 kprintf("hammer2_mount: error %s reading super-root\n", 1311 hammer2_error_str(schain->error)); 1312 hammer2_chain_unlock(schain); 1313 hammer2_chain_drop(schain); 1314 schain = NULL; 1315 hammer2_unmount_helper(mp, NULL, hmp); 1316 lockmgr(&hammer2_mntlk, LK_RELEASE); 1317 hammer2_vfs_unmount(mp, MNT_FORCE); 1318 return EINVAL; 1319 } 1320 1321 /* 1322 * The super-root always uses an inode_tid of 1 when 1323 * creating PFSs. 1324 */ 1325 spmp->inode_tid = 1; 1326 spmp->modify_tid = schain->bref.modify_tid + 1; 1327 1328 /* 1329 * Sanity-check schain's pmp and finish initialization. 1330 * Any chain belonging to the super-root topology should 1331 * have a NULL pmp (not even set to spmp). 1332 */ 1333 ripdata = &schain->data->ipdata; 1334 KKASSERT(schain->pmp == NULL); 1335 spmp->pfs_clid = ripdata->meta.pfs_clid; 1336 1337 /* 1338 * Replace the dummy spmp->iroot with a real one. It's 1339 * easier to just do a wholesale replacement than to try 1340 * to update the chain and fixup the iroot fields. 1341 * 1342 * The returned inode is locked with the supplied cluster. 1343 */ 1344 hammer2_dummy_xop_from_chain(&xop, schain); 1345 hammer2_inode_drop(spmp->iroot); 1346 spmp->iroot = NULL; 1347 spmp->iroot = hammer2_inode_get(spmp, &xop, -1, -1); 1348 spmp->spmp_hmp = hmp; 1349 spmp->pfs_types[0] = ripdata->meta.pfs_type; 1350 spmp->pfs_hmps[0] = hmp; 1351 hammer2_inode_ref(spmp->iroot); 1352 hammer2_inode_unlock(spmp->iroot); 1353 hammer2_cluster_unlock(&xop.cluster); 1354 hammer2_chain_drop(schain); 1355 /* do not call hammer2_cluster_drop() on an embedded cluster */ 1356 schain = NULL; /* now invalid */ 1357 /* leave spmp->iroot with one ref */ 1358 1359 if (!hmp->ronly) { 1360 error = hammer2_recovery(hmp); 1361 if (error == 0) 1362 error |= hammer2_fixup_pfses(hmp); 1363 /* XXX do something with error */ 1364 } 1365 hammer2_update_pmps(hmp); 1366 hammer2_iocom_init(hmp); 1367 hammer2_bulkfree_init(hmp); 1368 1369 /* 1370 * Ref the cluster management messaging descriptor. The mount 1371 * program deals with the other end of the communications pipe. 1372 * 1373 * Root mounts typically do not supply one. 1374 */ 1375 if (info.cluster_fd >= 0) { 1376 fp = holdfp(curthread, info.cluster_fd, -1); 1377 if (fp) { 1378 hammer2_cluster_reconnect(hmp, fp); 1379 } else { 1380 kprintf("hammer2_mount: bad cluster_fd!\n"); 1381 } 1382 } 1383 } else { 1384 spmp = hmp->spmp; 1385 if (info.hflags & HMNT2_DEVFLAGS) { 1386 kprintf("hammer2_mount: Warning: mount flags pertaining " 1387 "to the whole device may only be specified " 1388 "on the first mount of the device: %08x\n", 1389 info.hflags & HMNT2_DEVFLAGS); 1390 } 1391 } 1392 1393 /* 1394 * Force local mount (disassociate all PFSs from their clusters). 1395 * Used primarily for debugging. 1396 */ 1397 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL; 1398 1399 /* 1400 * Lookup the mount point under the media-localized super-root. 1401 * Scanning hammer2_pfslist doesn't help us because it represents 1402 * PFS cluster ids which can aggregate several named PFSs together. 1403 * 1404 * cluster->pmp will incorrectly point to spmp and must be fixed 1405 * up later on. 1406 */ 1407 hammer2_inode_lock(spmp->iroot, 0); 1408 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS); 1409 lhc = hammer2_dirhash(label, strlen(label)); 1410 chain = hammer2_chain_lookup(&parent, &key_next, 1411 lhc, lhc + HAMMER2_DIRHASH_LOMASK, 1412 &error, 0); 1413 while (chain) { 1414 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE && 1415 strcmp(label, chain->data->ipdata.filename) == 0) { 1416 break; 1417 } 1418 chain = hammer2_chain_next(&parent, chain, &key_next, 1419 key_next, 1420 lhc + HAMMER2_DIRHASH_LOMASK, 1421 &error, 0); 1422 } 1423 if (parent) { 1424 hammer2_chain_unlock(parent); 1425 hammer2_chain_drop(parent); 1426 } 1427 hammer2_inode_unlock(spmp->iroot); 1428 1429 /* 1430 * PFS could not be found? 1431 */ 1432 if (chain == NULL) { 1433 hammer2_unmount_helper(mp, NULL, hmp); 1434 lockmgr(&hammer2_mntlk, LK_RELEASE); 1435 hammer2_vfs_unmount(mp, MNT_FORCE); 1436 1437 if (error) { 1438 kprintf("hammer2_mount: PFS label I/O error\n"); 1439 return EINVAL; 1440 } else { 1441 kprintf("hammer2_mount: PFS label \"%s\" not found\n", 1442 label); 1443 return ENOENT; 1444 } 1445 } 1446 1447 /* 1448 * Acquire the pmp structure (it should have already been allocated 1449 * via hammer2_update_pmps() so do not pass cluster in to add to 1450 * available chains). 1451 * 1452 * Check if the cluster has already been mounted. A cluster can 1453 * only be mounted once, use null mounts to mount additional copies. 1454 */ 1455 if (chain->error) { 1456 kprintf("hammer2_mount: PFS label I/O error\n"); 1457 } else { 1458 ripdata = &chain->data->ipdata; 1459 bref = chain->bref; 1460 pmp = hammer2_pfsalloc(NULL, ripdata, 1461 bref.modify_tid, force_local); 1462 } 1463 hammer2_chain_unlock(chain); 1464 hammer2_chain_drop(chain); 1465 1466 /* 1467 * Finish the mount 1468 */ 1469 kprintf("hammer2_mount: hmp=%p pmp=%p\n", hmp, pmp); 1470 1471 if (pmp->mp) { 1472 kprintf("hammer2_mount: PFS already mounted!\n"); 1473 hammer2_unmount_helper(mp, NULL, hmp); 1474 lockmgr(&hammer2_mntlk, LK_RELEASE); 1475 hammer2_vfs_unmount(mp, MNT_FORCE); 1476 1477 return EBUSY; 1478 } 1479 1480 pmp->hflags = info.hflags; 1481 mp->mnt_flag |= MNT_LOCAL; 1482 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; /* all entry pts are SMP */ 1483 mp->mnt_kern_flag |= MNTK_THR_SYNC; /* new vsyncscan semantics */ 1484 1485 /* 1486 * required mount structure initializations 1487 */ 1488 mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE; 1489 mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE; 1490 1491 mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE; 1492 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE; 1493 1494 /* 1495 * Optional fields 1496 */ 1497 mp->mnt_iosize_max = MAXPHYS; 1498 1499 /* 1500 * Connect up mount pointers. 1501 */ 1502 hammer2_mount_helper(mp, pmp); 1503 lockmgr(&hammer2_mntlk, LK_RELEASE); 1504 1505 /* 1506 * Finish setup 1507 */ 1508 vfs_getnewfsid(mp); 1509 vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops); 1510 vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops); 1511 vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops); 1512 1513 if (path) { 1514 copyinstr(info.volume, mp->mnt_stat.f_mntfromname, 1515 MNAMELEN - 1, &size); 1516 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); 1517 } /* else root mount, already in there */ 1518 1519 bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname)); 1520 if (path) { 1521 copyinstr(path, mp->mnt_stat.f_mntonname, 1522 sizeof(mp->mnt_stat.f_mntonname) - 1, 1523 &size); 1524 } else { 1525 /* root mount */ 1526 mp->mnt_stat.f_mntonname[0] = '/'; 1527 } 1528 1529 /* 1530 * Initial statfs to prime mnt_stat. 1531 */ 1532 hammer2_vfs_statfs(mp, &mp->mnt_stat, cred); 1533 1534 return 0; 1535 } 1536 1537 /* 1538 * Scan PFSs under the super-root and create hammer2_pfs structures. 1539 */ 1540 static 1541 void 1542 hammer2_update_pmps(hammer2_dev_t *hmp) 1543 { 1544 const hammer2_inode_data_t *ripdata; 1545 hammer2_chain_t *parent; 1546 hammer2_chain_t *chain; 1547 hammer2_blockref_t bref; 1548 hammer2_dev_t *force_local; 1549 hammer2_pfs_t *spmp; 1550 hammer2_pfs_t *pmp; 1551 hammer2_key_t key_next; 1552 int error; 1553 1554 /* 1555 * Force local mount (disassociate all PFSs from their clusters). 1556 * Used primarily for debugging. 1557 */ 1558 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL; 1559 1560 /* 1561 * Lookup mount point under the media-localized super-root. 1562 * 1563 * cluster->pmp will incorrectly point to spmp and must be fixed 1564 * up later on. 1565 */ 1566 spmp = hmp->spmp; 1567 hammer2_inode_lock(spmp->iroot, 0); 1568 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS); 1569 chain = hammer2_chain_lookup(&parent, &key_next, 1570 HAMMER2_KEY_MIN, HAMMER2_KEY_MAX, 1571 &error, 0); 1572 while (chain) { 1573 if (chain->error) { 1574 kprintf("I/O error scanning PFS labels\n"); 1575 } else if (chain->bref.type != HAMMER2_BREF_TYPE_INODE) { 1576 kprintf("Non inode chain type %d under super-root\n", 1577 chain->bref.type); 1578 } else { 1579 ripdata = &chain->data->ipdata; 1580 bref = chain->bref; 1581 pmp = hammer2_pfsalloc(chain, ripdata, 1582 bref.modify_tid, force_local); 1583 } 1584 chain = hammer2_chain_next(&parent, chain, &key_next, 1585 key_next, HAMMER2_KEY_MAX, 1586 &error, 0); 1587 } 1588 if (parent) { 1589 hammer2_chain_unlock(parent); 1590 hammer2_chain_drop(parent); 1591 } 1592 hammer2_inode_unlock(spmp->iroot); 1593 } 1594 1595 static 1596 int 1597 hammer2_remount(hammer2_dev_t *hmp, struct mount *mp, char *path __unused, 1598 struct ucred *cred) 1599 { 1600 hammer2_volume_t *vol; 1601 struct vnode *devvp; 1602 int i, error, result = 0; 1603 1604 if (!(hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR))) 1605 return 0; 1606 1607 for (i = 0; i < hmp->nvolumes; ++i) { 1608 vol = &hmp->volumes[i]; 1609 devvp = vol->dev->devvp; 1610 KKASSERT(devvp); 1611 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1612 VOP_OPEN(devvp, FREAD | FWRITE, FSCRED, NULL); 1613 vn_unlock(devvp); 1614 error = 0; 1615 if (vol->id == HAMMER2_ROOT_VOLUME) { 1616 error = hammer2_recovery(hmp); 1617 if (error == 0) 1618 error |= hammer2_fixup_pfses(hmp); 1619 } 1620 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1621 if (error == 0) { 1622 VOP_CLOSE(devvp, FREAD, NULL); 1623 } else { 1624 VOP_CLOSE(devvp, FREAD | FWRITE, NULL); 1625 } 1626 vn_unlock(devvp); 1627 result |= error; 1628 } 1629 if (result == 0) { 1630 kprintf("hammer2: enable read/write\n"); 1631 hmp->ronly = 0; 1632 } 1633 1634 return result; 1635 } 1636 1637 static 1638 int 1639 hammer2_vfs_unmount(struct mount *mp, int mntflags) 1640 { 1641 hammer2_pfs_t *pmp; 1642 int flags; 1643 int error = 0; 1644 1645 pmp = MPTOPMP(mp); 1646 1647 if (pmp == NULL) 1648 return(0); 1649 1650 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE); 1651 1652 /* 1653 * If mount initialization proceeded far enough we must flush 1654 * its vnodes and sync the underlying mount points. Three syncs 1655 * are required to fully flush the filesystem (freemap updates lag 1656 * by one flush, and one extra for safety). 1657 */ 1658 if (mntflags & MNT_FORCE) 1659 flags = FORCECLOSE; 1660 else 1661 flags = 0; 1662 if (pmp->iroot) { 1663 error = vflush(mp, 0, flags); 1664 if (error) 1665 goto failed; 1666 hammer2_vfs_sync(mp, MNT_WAIT); 1667 hammer2_vfs_sync(mp, MNT_WAIT); 1668 hammer2_vfs_sync(mp, MNT_WAIT); 1669 } 1670 1671 /* 1672 * Cleanup the frontend support XOPS threads 1673 */ 1674 hammer2_xop_helper_cleanup(pmp); 1675 1676 if (pmp->mp) 1677 hammer2_unmount_helper(mp, pmp, NULL); 1678 1679 error = 0; 1680 failed: 1681 lockmgr(&hammer2_mntlk, LK_RELEASE); 1682 1683 return (error); 1684 } 1685 1686 /* 1687 * Mount helper, hook the system mount into our PFS. 1688 * The mount lock is held. 1689 * 1690 * We must bump the mount_count on related devices for any 1691 * mounted PFSs. 1692 */ 1693 static 1694 void 1695 hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp) 1696 { 1697 hammer2_cluster_t *cluster; 1698 hammer2_chain_t *rchain; 1699 int i; 1700 1701 mp->mnt_data = (qaddr_t)pmp; 1702 pmp->mp = mp; 1703 1704 /* 1705 * After pmp->mp is set we have to adjust hmp->mount_count. 1706 */ 1707 cluster = &pmp->iroot->cluster; 1708 for (i = 0; i < cluster->nchains; ++i) { 1709 rchain = cluster->array[i].chain; 1710 if (rchain == NULL) 1711 continue; 1712 ++rchain->hmp->mount_count; 1713 } 1714 1715 /* 1716 * Create missing Xop threads 1717 */ 1718 hammer2_xop_helper_create(pmp); 1719 } 1720 1721 /* 1722 * Mount helper, unhook the system mount from our PFS. 1723 * The mount lock is held. 1724 * 1725 * If hmp is supplied a mount responsible for being the first to open 1726 * the block device failed and the block device and all PFSs using the 1727 * block device must be cleaned up. 1728 * 1729 * If pmp is supplied multiple devices might be backing the PFS and each 1730 * must be disconnected. This might not be the last PFS using some of the 1731 * underlying devices. Also, we have to adjust our hmp->mount_count 1732 * accounting for the devices backing the pmp which is now undergoing an 1733 * unmount. 1734 */ 1735 static 1736 void 1737 hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp, hammer2_dev_t *hmp) 1738 { 1739 hammer2_cluster_t *cluster; 1740 hammer2_chain_t *rchain; 1741 int dumpcnt; 1742 int i; 1743 1744 /* 1745 * If no device supplied this is a high-level unmount and we have to 1746 * to disconnect the mount, adjust mount_count, and locate devices 1747 * that might now have no mounts. 1748 */ 1749 if (pmp) { 1750 KKASSERT(hmp == NULL); 1751 KKASSERT(MPTOPMP(mp) == pmp); 1752 pmp->mp = NULL; 1753 mp->mnt_data = NULL; 1754 1755 /* 1756 * After pmp->mp is cleared we have to account for 1757 * mount_count. 1758 */ 1759 cluster = &pmp->iroot->cluster; 1760 for (i = 0; i < cluster->nchains; ++i) { 1761 rchain = cluster->array[i].chain; 1762 if (rchain == NULL) 1763 continue; 1764 --rchain->hmp->mount_count; 1765 /* scrapping hmp now may invalidate the pmp */ 1766 } 1767 again: 1768 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) { 1769 if (hmp->mount_count == 0) { 1770 hammer2_unmount_helper(NULL, NULL, hmp); 1771 goto again; 1772 } 1773 } 1774 return; 1775 } 1776 1777 /* 1778 * Try to terminate the block device. We can't terminate it if 1779 * there are still PFSs referencing it. 1780 */ 1781 if (hmp->mount_count) 1782 return; 1783 1784 /* 1785 * Decomission the network before we start messing with the 1786 * device and PFS. 1787 */ 1788 hammer2_iocom_uninit(hmp); 1789 1790 hammer2_bulkfree_uninit(hmp); 1791 hammer2_pfsfree_scan(hmp, 0); 1792 1793 /* 1794 * Cycle the volume data lock as a safety (probably not needed any 1795 * more). To ensure everything is out we need to flush at least 1796 * three times. (1) The running of the sideq can dirty the 1797 * filesystem, (2) A normal flush can dirty the freemap, and 1798 * (3) ensure that the freemap is fully synchronized. 1799 * 1800 * The next mount's recovery scan can clean everything up but we want 1801 * to leave the filesystem in a 100% clean state on a normal unmount. 1802 */ 1803 #if 0 1804 hammer2_voldata_lock(hmp); 1805 hammer2_voldata_unlock(hmp); 1806 #endif 1807 1808 /* 1809 * Flush whatever is left. Unmounted but modified PFS's might still 1810 * have some dirty chains on them. 1811 */ 1812 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS); 1813 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS); 1814 1815 if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) { 1816 hammer2_voldata_modify(hmp); 1817 hammer2_flush(&hmp->fchain, HAMMER2_FLUSH_TOP | 1818 HAMMER2_FLUSH_ALL); 1819 } 1820 hammer2_chain_unlock(&hmp->fchain); 1821 1822 if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) { 1823 hammer2_flush(&hmp->vchain, HAMMER2_FLUSH_TOP | 1824 HAMMER2_FLUSH_ALL); 1825 } 1826 hammer2_chain_unlock(&hmp->vchain); 1827 1828 if ((hmp->vchain.flags | hmp->fchain.flags) & 1829 HAMMER2_CHAIN_FLUSH_MASK) { 1830 kprintf("hammer2_unmount: chains left over after final sync\n"); 1831 kprintf(" vchain %08x\n", hmp->vchain.flags); 1832 kprintf(" fchain %08x\n", hmp->fchain.flags); 1833 1834 if (hammer2_debug & 0x0010) 1835 Debugger("entered debugger"); 1836 } 1837 1838 hammer2_pfsfree_scan(hmp, 1); 1839 1840 KKASSERT(hmp->spmp == NULL); 1841 1842 /* 1843 * Finish up with the device vnode 1844 */ 1845 if (!TAILQ_EMPTY(&hmp->devvpl)) { 1846 hammer2_close_devvp(&hmp->devvpl, hmp->ronly); 1847 hammer2_cleanup_devvp(&hmp->devvpl); 1848 } 1849 KKASSERT(TAILQ_EMPTY(&hmp->devvpl)); 1850 1851 /* 1852 * Clear vchain/fchain flags that might prevent final cleanup 1853 * of these chains. 1854 */ 1855 if (hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) { 1856 atomic_add_long(&hammer2_count_modified_chains, -1); 1857 atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED); 1858 hammer2_pfs_memory_wakeup(hmp->vchain.pmp, -1); 1859 } 1860 if (hmp->vchain.flags & HAMMER2_CHAIN_UPDATE) { 1861 atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_UPDATE); 1862 } 1863 1864 if (hmp->fchain.flags & HAMMER2_CHAIN_MODIFIED) { 1865 atomic_add_long(&hammer2_count_modified_chains, -1); 1866 atomic_clear_int(&hmp->fchain.flags, HAMMER2_CHAIN_MODIFIED); 1867 hammer2_pfs_memory_wakeup(hmp->fchain.pmp, -1); 1868 } 1869 if (hmp->fchain.flags & HAMMER2_CHAIN_UPDATE) { 1870 atomic_clear_int(&hmp->fchain.flags, HAMMER2_CHAIN_UPDATE); 1871 } 1872 1873 /* 1874 * Final drop of embedded freemap root chain to 1875 * clean up fchain.core (fchain structure is not 1876 * flagged ALLOCATED so it is cleaned out and then 1877 * left to rot). 1878 */ 1879 hammer2_chain_drop(&hmp->fchain); 1880 1881 /* 1882 * Final drop of embedded volume root chain to clean 1883 * up vchain.core (vchain structure is not flagged 1884 * ALLOCATED so it is cleaned out and then left to 1885 * rot). 1886 */ 1887 dumpcnt = 50; 1888 hammer2_dump_chain(&hmp->vchain, 0, 0, &dumpcnt, 'v', (u_int)-1); 1889 dumpcnt = 50; 1890 hammer2_dump_chain(&hmp->fchain, 0, 0, &dumpcnt, 'f', (u_int)-1); 1891 1892 hammer2_chain_drop(&hmp->vchain); 1893 1894 hammer2_io_cleanup(hmp, &hmp->iotree); 1895 if (hmp->iofree_count) { 1896 kprintf("io_cleanup: %d I/O's left hanging\n", 1897 hmp->iofree_count); 1898 } 1899 1900 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry); 1901 kmalloc_destroy_obj(&hmp->mchain); 1902 kmalloc_destroy_obj(&hmp->mio); 1903 kmalloc_destroy(&hmp->mmsg); 1904 kfree(hmp, M_HAMMER2); 1905 } 1906 1907 int 1908 hammer2_vfs_vget(struct mount *mp, struct vnode *dvp, 1909 ino_t ino, struct vnode **vpp) 1910 { 1911 hammer2_xop_lookup_t *xop; 1912 hammer2_pfs_t *pmp; 1913 hammer2_inode_t *ip; 1914 hammer2_tid_t inum; 1915 int error; 1916 1917 inum = (hammer2_tid_t)ino & HAMMER2_DIRHASH_USERMSK; 1918 1919 error = 0; 1920 pmp = MPTOPMP(mp); 1921 1922 /* 1923 * Easy if we already have it cached 1924 */ 1925 ip = hammer2_inode_lookup(pmp, inum); 1926 if (ip) { 1927 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 1928 *vpp = hammer2_igetv(ip, &error); 1929 hammer2_inode_unlock(ip); 1930 hammer2_inode_drop(ip); /* from lookup */ 1931 1932 return error; 1933 } 1934 1935 /* 1936 * Otherwise we have to find the inode 1937 */ 1938 xop = hammer2_xop_alloc(pmp->iroot, 0); 1939 xop->lhc = inum; 1940 hammer2_xop_start(&xop->head, &hammer2_lookup_desc); 1941 error = hammer2_xop_collect(&xop->head, 0); 1942 1943 if (error == 0) 1944 ip = hammer2_inode_get(pmp, &xop->head, -1, -1); 1945 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1946 1947 if (ip) { 1948 *vpp = hammer2_igetv(ip, &error); 1949 hammer2_inode_unlock(ip); 1950 } else { 1951 *vpp = NULL; 1952 error = ENOENT; 1953 } 1954 return (error); 1955 } 1956 1957 static 1958 int 1959 hammer2_vfs_root(struct mount *mp, struct vnode **vpp) 1960 { 1961 hammer2_pfs_t *pmp; 1962 struct vnode *vp; 1963 int error; 1964 1965 pmp = MPTOPMP(mp); 1966 if (pmp->iroot == NULL) { 1967 kprintf("hammer2 (%s): no root inode\n", 1968 mp->mnt_stat.f_mntfromname); 1969 *vpp = NULL; 1970 return EINVAL; 1971 } 1972 1973 error = 0; 1974 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED); 1975 1976 while (pmp->inode_tid == 0) { 1977 hammer2_xop_ipcluster_t *xop; 1978 const hammer2_inode_meta_t *meta; 1979 1980 xop = hammer2_xop_alloc(pmp->iroot, HAMMER2_XOP_MODIFYING); 1981 hammer2_xop_start(&xop->head, &hammer2_ipcluster_desc); 1982 error = hammer2_xop_collect(&xop->head, 0); 1983 1984 if (error == 0) { 1985 meta = &hammer2_xop_gdata(&xop->head)->ipdata.meta; 1986 pmp->iroot->meta = *meta; 1987 pmp->inode_tid = meta->pfs_inum + 1; 1988 hammer2_xop_pdata(&xop->head); 1989 /* meta invalid */ 1990 1991 if (pmp->inode_tid < HAMMER2_INODE_START) 1992 pmp->inode_tid = HAMMER2_INODE_START; 1993 pmp->modify_tid = 1994 xop->head.cluster.focus->bref.modify_tid + 1; 1995 #if 0 1996 kprintf("PFS: Starting inode %jd\n", 1997 (intmax_t)pmp->inode_tid); 1998 kprintf("PMP focus good set nextino=%ld mod=%016jx\n", 1999 pmp->inode_tid, pmp->modify_tid); 2000 #endif 2001 wakeup(&pmp->iroot); 2002 2003 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2004 2005 /* 2006 * Prime the mount info. 2007 */ 2008 hammer2_vfs_statfs(mp, &mp->mnt_stat, NULL); 2009 break; 2010 } 2011 2012 /* 2013 * Loop, try again 2014 */ 2015 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2016 hammer2_inode_unlock(pmp->iroot); 2017 error = tsleep(&pmp->iroot, PCATCH, "h2root", hz); 2018 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED); 2019 if (error == EINTR) 2020 break; 2021 } 2022 2023 if (error) { 2024 hammer2_inode_unlock(pmp->iroot); 2025 *vpp = NULL; 2026 } else { 2027 vp = hammer2_igetv(pmp->iroot, &error); 2028 hammer2_inode_unlock(pmp->iroot); 2029 *vpp = vp; 2030 } 2031 2032 return (error); 2033 } 2034 2035 /* 2036 * Filesystem status 2037 * 2038 * XXX incorporate ipdata->meta.inode_quota and data_quota 2039 */ 2040 static 2041 int 2042 hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) 2043 { 2044 hammer2_pfs_t *pmp; 2045 hammer2_dev_t *hmp; 2046 hammer2_blockref_t bref; 2047 struct statfs tmp; 2048 int i; 2049 2050 /* 2051 * NOTE: iroot might not have validated the cluster yet. 2052 */ 2053 pmp = MPTOPMP(mp); 2054 2055 bzero(&tmp, sizeof(tmp)); 2056 2057 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 2058 hmp = pmp->pfs_hmps[i]; 2059 if (hmp == NULL) 2060 continue; 2061 if (pmp->iroot->cluster.array[i].chain) 2062 bref = pmp->iroot->cluster.array[i].chain->bref; 2063 else 2064 bzero(&bref, sizeof(bref)); 2065 2066 tmp.f_files = bref.embed.stats.inode_count; 2067 tmp.f_ffree = 0; 2068 tmp.f_blocks = hmp->voldata.allocator_size / 2069 mp->mnt_vstat.f_bsize; 2070 tmp.f_bfree = hmp->voldata.allocator_free / 2071 mp->mnt_vstat.f_bsize; 2072 tmp.f_bavail = tmp.f_bfree; 2073 2074 if (cred && cred->cr_uid != 0) { 2075 uint64_t adj; 2076 2077 /* 5% */ 2078 adj = hmp->free_reserved / mp->mnt_vstat.f_bsize; 2079 tmp.f_blocks -= adj; 2080 tmp.f_bfree -= adj; 2081 tmp.f_bavail -= adj; 2082 } 2083 2084 mp->mnt_stat.f_blocks = tmp.f_blocks; 2085 mp->mnt_stat.f_bfree = tmp.f_bfree; 2086 mp->mnt_stat.f_bavail = tmp.f_bavail; 2087 mp->mnt_stat.f_files = tmp.f_files; 2088 mp->mnt_stat.f_ffree = tmp.f_ffree; 2089 2090 *sbp = mp->mnt_stat; 2091 } 2092 return (0); 2093 } 2094 2095 static 2096 int 2097 hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred) 2098 { 2099 hammer2_pfs_t *pmp; 2100 hammer2_dev_t *hmp; 2101 hammer2_blockref_t bref; 2102 struct statvfs tmp; 2103 int i; 2104 2105 /* 2106 * NOTE: iroot might not have validated the cluster yet. 2107 */ 2108 pmp = MPTOPMP(mp); 2109 bzero(&tmp, sizeof(tmp)); 2110 2111 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 2112 hmp = pmp->pfs_hmps[i]; 2113 if (hmp == NULL) 2114 continue; 2115 if (pmp->iroot->cluster.array[i].chain) 2116 bref = pmp->iroot->cluster.array[i].chain->bref; 2117 else 2118 bzero(&bref, sizeof(bref)); 2119 2120 tmp.f_files = bref.embed.stats.inode_count; 2121 tmp.f_ffree = 0; 2122 tmp.f_blocks = hmp->voldata.allocator_size / 2123 mp->mnt_vstat.f_bsize; 2124 tmp.f_bfree = hmp->voldata.allocator_free / 2125 mp->mnt_vstat.f_bsize; 2126 tmp.f_bavail = tmp.f_bfree; 2127 2128 if (cred && cred->cr_uid != 0) { 2129 uint64_t adj; 2130 2131 /* 5% */ 2132 adj = hmp->free_reserved / mp->mnt_vstat.f_bsize; 2133 tmp.f_blocks -= adj; 2134 tmp.f_bfree -= adj; 2135 tmp.f_bavail -= adj; 2136 } 2137 2138 mp->mnt_vstat.f_blocks = tmp.f_blocks; 2139 mp->mnt_vstat.f_bfree = tmp.f_bfree; 2140 mp->mnt_vstat.f_bavail = tmp.f_bavail; 2141 mp->mnt_vstat.f_files = tmp.f_files; 2142 mp->mnt_vstat.f_ffree = tmp.f_ffree; 2143 2144 *sbp = mp->mnt_vstat; 2145 } 2146 return (0); 2147 } 2148 2149 /* 2150 * Mount-time recovery (RW mounts) 2151 * 2152 * Updates to the free block table are allowed to lag flushes by one 2153 * transaction. In case of a crash, then on a fresh mount we must do an 2154 * incremental scan of the last committed transaction id and make sure that 2155 * all related blocks have been marked allocated. 2156 */ 2157 struct hammer2_recovery_elm { 2158 TAILQ_ENTRY(hammer2_recovery_elm) entry; 2159 hammer2_chain_t *chain; 2160 hammer2_tid_t sync_tid; 2161 }; 2162 2163 TAILQ_HEAD(hammer2_recovery_list, hammer2_recovery_elm); 2164 2165 struct hammer2_recovery_info { 2166 struct hammer2_recovery_list list; 2167 hammer2_tid_t mtid; 2168 int depth; 2169 }; 2170 2171 static int hammer2_recovery_scan(hammer2_dev_t *hmp, 2172 hammer2_chain_t *parent, 2173 struct hammer2_recovery_info *info, 2174 hammer2_tid_t sync_tid); 2175 2176 #define HAMMER2_RECOVERY_MAXDEPTH 10 2177 2178 static 2179 int 2180 hammer2_recovery(hammer2_dev_t *hmp) 2181 { 2182 struct hammer2_recovery_info info; 2183 struct hammer2_recovery_elm *elm; 2184 hammer2_chain_t *parent; 2185 hammer2_tid_t sync_tid; 2186 hammer2_tid_t mirror_tid; 2187 int error; 2188 2189 hammer2_trans_init(hmp->spmp, 0); 2190 2191 sync_tid = hmp->voldata.freemap_tid; 2192 mirror_tid = hmp->voldata.mirror_tid; 2193 2194 kprintf("hammer2_mount: \"%s\": ", hmp->devrepname); 2195 if (sync_tid >= mirror_tid) { 2196 kprintf("no recovery needed\n"); 2197 } else { 2198 kprintf("freemap recovery %016jx-%016jx\n", 2199 sync_tid + 1, mirror_tid); 2200 } 2201 2202 TAILQ_INIT(&info.list); 2203 info.depth = 0; 2204 parent = hammer2_chain_lookup_init(&hmp->vchain, 0); 2205 error = hammer2_recovery_scan(hmp, parent, &info, sync_tid); 2206 hammer2_chain_lookup_done(parent); 2207 2208 while ((elm = TAILQ_FIRST(&info.list)) != NULL) { 2209 TAILQ_REMOVE(&info.list, elm, entry); 2210 parent = elm->chain; 2211 sync_tid = elm->sync_tid; 2212 kfree(elm, M_HAMMER2); 2213 2214 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 2215 error |= hammer2_recovery_scan(hmp, parent, &info, 2216 hmp->voldata.freemap_tid); 2217 hammer2_chain_unlock(parent); 2218 hammer2_chain_drop(parent); /* drop elm->chain ref */ 2219 } 2220 2221 hammer2_trans_done(hmp->spmp, 0); 2222 2223 return error; 2224 } 2225 2226 static 2227 int 2228 hammer2_recovery_scan(hammer2_dev_t *hmp, hammer2_chain_t *parent, 2229 struct hammer2_recovery_info *info, 2230 hammer2_tid_t sync_tid) 2231 { 2232 const hammer2_inode_data_t *ripdata; 2233 hammer2_chain_t *chain; 2234 hammer2_blockref_t bref; 2235 int tmp_error; 2236 int rup_error; 2237 int error; 2238 int first; 2239 2240 /* 2241 * Adjust freemap to ensure that the block(s) are marked allocated. 2242 */ 2243 if (parent->bref.type != HAMMER2_BREF_TYPE_VOLUME) { 2244 hammer2_freemap_adjust(hmp, &parent->bref, 2245 HAMMER2_FREEMAP_DORECOVER); 2246 } 2247 2248 /* 2249 * Check type for recursive scan 2250 */ 2251 switch(parent->bref.type) { 2252 case HAMMER2_BREF_TYPE_VOLUME: 2253 /* data already instantiated */ 2254 break; 2255 case HAMMER2_BREF_TYPE_INODE: 2256 /* 2257 * Must instantiate data for DIRECTDATA test and also 2258 * for recursion. 2259 */ 2260 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 2261 ripdata = &parent->data->ipdata; 2262 if (ripdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) { 2263 /* not applicable to recovery scan */ 2264 hammer2_chain_unlock(parent); 2265 return 0; 2266 } 2267 hammer2_chain_unlock(parent); 2268 break; 2269 case HAMMER2_BREF_TYPE_INDIRECT: 2270 /* 2271 * Must instantiate data for recursion 2272 */ 2273 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 2274 hammer2_chain_unlock(parent); 2275 break; 2276 case HAMMER2_BREF_TYPE_DIRENT: 2277 case HAMMER2_BREF_TYPE_DATA: 2278 case HAMMER2_BREF_TYPE_FREEMAP: 2279 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 2280 case HAMMER2_BREF_TYPE_FREEMAP_LEAF: 2281 /* not applicable to recovery scan */ 2282 return 0; 2283 break; 2284 default: 2285 return HAMMER2_ERROR_BADBREF; 2286 } 2287 2288 /* 2289 * Defer operation if depth limit reached. 2290 */ 2291 if (info->depth >= HAMMER2_RECOVERY_MAXDEPTH) { 2292 struct hammer2_recovery_elm *elm; 2293 2294 elm = kmalloc(sizeof(*elm), M_HAMMER2, M_ZERO | M_WAITOK); 2295 elm->chain = parent; 2296 elm->sync_tid = sync_tid; 2297 hammer2_chain_ref(parent); 2298 TAILQ_INSERT_TAIL(&info->list, elm, entry); 2299 /* unlocked by caller */ 2300 2301 return(0); 2302 } 2303 2304 2305 /* 2306 * Recursive scan of the last flushed transaction only. We are 2307 * doing this without pmp assignments so don't leave the chains 2308 * hanging around after we are done with them. 2309 * 2310 * error Cumulative error this level only 2311 * rup_error Cumulative error for recursion 2312 * tmp_error Specific non-cumulative recursion error 2313 */ 2314 chain = NULL; 2315 first = 1; 2316 rup_error = 0; 2317 error = 0; 2318 2319 for (;;) { 2320 error |= hammer2_chain_scan(parent, &chain, &bref, 2321 &first, 2322 HAMMER2_LOOKUP_NODATA); 2323 2324 /* 2325 * Problem during scan or EOF 2326 */ 2327 if (error) 2328 break; 2329 2330 /* 2331 * If this is a leaf 2332 */ 2333 if (chain == NULL) { 2334 if (bref.mirror_tid > sync_tid) { 2335 hammer2_freemap_adjust(hmp, &bref, 2336 HAMMER2_FREEMAP_DORECOVER); 2337 } 2338 continue; 2339 } 2340 2341 /* 2342 * This may or may not be a recursive node. 2343 */ 2344 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 2345 if (bref.mirror_tid > sync_tid) { 2346 ++info->depth; 2347 tmp_error = hammer2_recovery_scan(hmp, chain, 2348 info, sync_tid); 2349 --info->depth; 2350 } else { 2351 tmp_error = 0; 2352 } 2353 2354 /* 2355 * Flush the recovery at the PFS boundary to stage it for 2356 * the final flush of the super-root topology. 2357 */ 2358 if (tmp_error == 0 && 2359 (bref.flags & HAMMER2_BREF_FLAG_PFSROOT) && 2360 (chain->flags & HAMMER2_CHAIN_ONFLUSH)) { 2361 hammer2_flush(chain, HAMMER2_FLUSH_TOP | 2362 HAMMER2_FLUSH_ALL); 2363 } 2364 rup_error |= tmp_error; 2365 } 2366 return ((error | rup_error) & ~HAMMER2_ERROR_EOF); 2367 } 2368 2369 /* 2370 * This fixes up an error introduced in earlier H2 implementations where 2371 * moving a PFS inode into an indirect block wound up causing the 2372 * HAMMER2_BREF_FLAG_PFSROOT flag in the bref to get cleared. 2373 */ 2374 static 2375 int 2376 hammer2_fixup_pfses(hammer2_dev_t *hmp) 2377 { 2378 const hammer2_inode_data_t *ripdata; 2379 hammer2_chain_t *parent; 2380 hammer2_chain_t *chain; 2381 hammer2_key_t key_next; 2382 hammer2_pfs_t *spmp; 2383 int error; 2384 2385 error = 0; 2386 2387 /* 2388 * Lookup mount point under the media-localized super-root. 2389 * 2390 * cluster->pmp will incorrectly point to spmp and must be fixed 2391 * up later on. 2392 */ 2393 spmp = hmp->spmp; 2394 hammer2_inode_lock(spmp->iroot, 0); 2395 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS); 2396 chain = hammer2_chain_lookup(&parent, &key_next, 2397 HAMMER2_KEY_MIN, HAMMER2_KEY_MAX, 2398 &error, 0); 2399 while (chain) { 2400 if (chain->bref.type != HAMMER2_BREF_TYPE_INODE) 2401 continue; 2402 if (chain->error) { 2403 kprintf("I/O error scanning PFS labels\n"); 2404 error |= chain->error; 2405 } else if ((chain->bref.flags & 2406 HAMMER2_BREF_FLAG_PFSROOT) == 0) { 2407 int error2; 2408 2409 ripdata = &chain->data->ipdata; 2410 hammer2_trans_init(hmp->spmp, 0); 2411 error2 = hammer2_chain_modify(chain, 2412 chain->bref.modify_tid, 2413 0, 0); 2414 if (error2 == 0) { 2415 kprintf("hammer2: Correct mis-flagged PFS %s\n", 2416 ripdata->filename); 2417 chain->bref.flags |= HAMMER2_BREF_FLAG_PFSROOT; 2418 } else { 2419 error |= error2; 2420 } 2421 hammer2_flush(chain, HAMMER2_FLUSH_TOP | 2422 HAMMER2_FLUSH_ALL); 2423 hammer2_trans_done(hmp->spmp, 0); 2424 } 2425 chain = hammer2_chain_next(&parent, chain, &key_next, 2426 key_next, HAMMER2_KEY_MAX, 2427 &error, 0); 2428 } 2429 if (parent) { 2430 hammer2_chain_unlock(parent); 2431 hammer2_chain_drop(parent); 2432 } 2433 hammer2_inode_unlock(spmp->iroot); 2434 2435 return error; 2436 } 2437 2438 /* 2439 * Sync a mount point; this is called periodically on a per-mount basis from 2440 * the filesystem syncer, and whenever a user issues a sync. 2441 */ 2442 int 2443 hammer2_vfs_sync(struct mount *mp, int waitfor) 2444 { 2445 int error; 2446 2447 error = hammer2_vfs_sync_pmp(MPTOPMP(mp), waitfor); 2448 2449 return error; 2450 } 2451 2452 /* 2453 * Because frontend operations lock vnodes before we get a chance to 2454 * lock the related inode, we can't just acquire a vnode lock without 2455 * risking a deadlock. The frontend may be holding a vnode lock while 2456 * also blocked on our SYNCQ flag while trying to get the inode lock. 2457 * 2458 * To deal with this situation we can check the vnode lock situation 2459 * after locking the inode and perform a work-around. 2460 */ 2461 int 2462 hammer2_vfs_sync_pmp(hammer2_pfs_t *pmp, int waitfor) 2463 { 2464 struct mount *mp; 2465 /*hammer2_xop_flush_t *xop;*/ 2466 /*struct hammer2_sync_info info;*/ 2467 hammer2_inode_t *ip; 2468 hammer2_depend_t *depend; 2469 hammer2_depend_t *depend_next; 2470 struct vnode *vp; 2471 uint32_t pass2; 2472 int error; 2473 int wakecount; 2474 int dorestart; 2475 2476 mp = pmp->mp; 2477 2478 /* 2479 * Move all inodes on sideq to syncq. This will clear sideq. 2480 * This should represent all flushable inodes. These inodes 2481 * will already have refs due to being on syncq or sideq. We 2482 * must do this all at once with the spinlock held to ensure that 2483 * all inode dependencies are part of the same flush. 2484 * 2485 * We should be able to do this asynchronously from frontend 2486 * operations because we will be locking the inodes later on 2487 * to actually flush them, and that will partition any frontend 2488 * op using the same inode. Either it has already locked the 2489 * inode and we will block, or it has not yet locked the inode 2490 * and it will block until we are finished flushing that inode. 2491 * 2492 * When restarting, only move the inodes flagged as PASS2 from 2493 * SIDEQ to SYNCQ. PASS2 propagation by inode_lock4() and 2494 * inode_depend() are atomic with the spin-lock. 2495 */ 2496 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH); 2497 #ifdef HAMMER2_DEBUG_SYNC 2498 kprintf("FILESYSTEM SYNC BOUNDARY\n"); 2499 #endif 2500 dorestart = 0; 2501 2502 /* 2503 * Move inodes from depq to syncq, releasing the related 2504 * depend structures. 2505 */ 2506 restart: 2507 #ifdef HAMMER2_DEBUG_SYNC 2508 kprintf("FILESYSTEM SYNC RESTART (%d)\n", dorestart); 2509 #endif 2510 hammer2_trans_setflags(pmp, 0/*HAMMER2_TRANS_COPYQ*/); 2511 hammer2_trans_clearflags(pmp, HAMMER2_TRANS_RESCAN); 2512 2513 /* 2514 * Move inodes from depq to syncq. When restarting, only depq's 2515 * marked pass2 are moved. 2516 */ 2517 hammer2_spin_ex(&pmp->list_spin); 2518 depend_next = TAILQ_FIRST(&pmp->depq); 2519 wakecount = 0; 2520 2521 while ((depend = depend_next) != NULL) { 2522 depend_next = TAILQ_NEXT(depend, entry); 2523 if (dorestart && depend->pass2 == 0) 2524 continue; 2525 TAILQ_FOREACH(ip, &depend->sideq, entry) { 2526 KKASSERT(ip->flags & HAMMER2_INODE_SIDEQ); 2527 atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ); 2528 atomic_clear_int(&ip->flags, HAMMER2_INODE_SIDEQ); 2529 ip->depend = NULL; 2530 } 2531 2532 /* 2533 * NOTE: pmp->sideq_count includes both sideq and syncq 2534 */ 2535 TAILQ_CONCAT(&pmp->syncq, &depend->sideq, entry); 2536 2537 depend->count = 0; 2538 depend->pass2 = 0; 2539 TAILQ_REMOVE(&pmp->depq, depend, entry); 2540 } 2541 2542 hammer2_spin_unex(&pmp->list_spin); 2543 hammer2_trans_clearflags(pmp, /*HAMMER2_TRANS_COPYQ |*/ 2544 HAMMER2_TRANS_WAITING); 2545 dorestart = 0; 2546 2547 /* 2548 * sideq_count may have dropped enough to allow us to unstall 2549 * the frontend. 2550 */ 2551 hammer2_pfs_memory_wakeup(pmp, 0); 2552 2553 /* 2554 * Now run through all inodes on syncq. 2555 * 2556 * Flush transactions only interlock with other flush transactions. 2557 * Any conflicting frontend operations will block on the inode, but 2558 * may hold a vnode lock while doing so. 2559 */ 2560 hammer2_spin_ex(&pmp->list_spin); 2561 while ((ip = TAILQ_FIRST(&pmp->syncq)) != NULL) { 2562 /* 2563 * Remove the inode from the SYNCQ, transfer the syncq ref 2564 * to us. We must clear SYNCQ to allow any potential 2565 * front-end deadlock to proceed. We must set PASS2 so 2566 * the dependency code knows what to do. 2567 */ 2568 pass2 = ip->flags; 2569 cpu_ccfence(); 2570 if (atomic_cmpset_int(&ip->flags, 2571 pass2, 2572 (pass2 & ~(HAMMER2_INODE_SYNCQ | 2573 HAMMER2_INODE_SYNCQ_WAKEUP)) | 2574 HAMMER2_INODE_SYNCQ_PASS2) == 0) { 2575 continue; 2576 } 2577 TAILQ_REMOVE(&pmp->syncq, ip, entry); 2578 --pmp->sideq_count; 2579 hammer2_spin_unex(&pmp->list_spin); 2580 2581 /* 2582 * Tickle anyone waiting on ip->flags or the hysteresis 2583 * on the dirty inode count. 2584 */ 2585 if (pass2 & HAMMER2_INODE_SYNCQ_WAKEUP) 2586 wakeup(&ip->flags); 2587 if (++wakecount >= hammer2_limit_dirty_inodes / 20 + 1) { 2588 wakecount = 0; 2589 hammer2_pfs_memory_wakeup(pmp, 0); 2590 } 2591 2592 /* 2593 * Relock the inode, and we inherit a ref from the above. 2594 * We will check for a race after we acquire the vnode. 2595 */ 2596 hammer2_mtx_ex(&ip->lock); 2597 2598 /* 2599 * We need the vp in order to vfsync() dirty buffers, so if 2600 * one isn't attached we can skip it. 2601 * 2602 * Ordering the inode lock and then the vnode lock has the 2603 * potential to deadlock. If we had left SYNCQ set that could 2604 * also deadlock us against the frontend even if we don't hold 2605 * any locks, but the latter is not a problem now since we 2606 * cleared it. igetv will temporarily release the inode lock 2607 * in a safe manner to work-around the deadlock. 2608 * 2609 * Unfortunately it is still possible to deadlock when the 2610 * frontend obtains multiple inode locks, because all the 2611 * related vnodes are already locked (nor can the vnode locks 2612 * be released and reacquired without messing up RECLAIM and 2613 * INACTIVE sequencing). 2614 * 2615 * The solution for now is to move the vp back onto SIDEQ 2616 * and set dorestart, which will restart the flush after we 2617 * exhaust the current SYNCQ. Note that additional 2618 * dependencies may build up, so we definitely need to move 2619 * the whole SIDEQ back to SYNCQ when we restart. 2620 */ 2621 vp = ip->vp; 2622 if (vp) { 2623 if (vget(vp, LK_EXCLUSIVE|LK_NOWAIT)) { 2624 /* 2625 * Failed to get the vnode, requeue the inode 2626 * (PASS2 is already set so it will be found 2627 * again on the restart). 2628 * 2629 * Then unlock, possibly sleep, and retry 2630 * later. We sleep if PASS2 was *previously* 2631 * set, before we set it again above. 2632 */ 2633 vp = NULL; 2634 dorestart = 1; 2635 #ifdef HAMMER2_DEBUG_SYNC 2636 kprintf("inum %ld (sync delayed by vnode)\n", 2637 (long)ip->meta.inum); 2638 #endif 2639 hammer2_inode_delayed_sideq(ip); 2640 2641 hammer2_mtx_unlock(&ip->lock); 2642 hammer2_inode_drop(ip); 2643 2644 if (pass2 & HAMMER2_INODE_SYNCQ_PASS2) { 2645 tsleep(&dorestart, 0, "h2syndel", 2); 2646 } 2647 hammer2_spin_ex(&pmp->list_spin); 2648 continue; 2649 } 2650 } else { 2651 vp = NULL; 2652 } 2653 2654 /* 2655 * If the inode wound up on a SIDEQ again it will already be 2656 * prepped for another PASS2. In this situation if we flush 2657 * it now we will just wind up flushing it again in the same 2658 * syncer run, so we might as well not flush it now. 2659 */ 2660 if (ip->flags & HAMMER2_INODE_SIDEQ) { 2661 hammer2_mtx_unlock(&ip->lock); 2662 hammer2_inode_drop(ip); 2663 if (vp) 2664 vput(vp); 2665 dorestart = 1; 2666 hammer2_spin_ex(&pmp->list_spin); 2667 continue; 2668 } 2669 2670 /* 2671 * Ok we have the inode exclusively locked and if vp is 2672 * not NULL that will also be exclusively locked. Do the 2673 * meat of the flush. 2674 * 2675 * vp token needed for v_rbdirty_tree check / vclrisdirty 2676 * sequencing. Though we hold the vnode exclusively so 2677 * we shouldn't need to hold the token also in this case. 2678 */ 2679 if (vp) { 2680 vfsync(vp, MNT_WAIT, 1, NULL, NULL); 2681 bio_track_wait(&vp->v_track_write, 0, 0); /* XXX */ 2682 } 2683 2684 /* 2685 * If the inode has not yet been inserted into the tree 2686 * we must do so. Then sync and flush it. The flush should 2687 * update the parent. 2688 */ 2689 if (ip->flags & HAMMER2_INODE_DELETING) { 2690 #ifdef HAMMER2_DEBUG_SYNC 2691 kprintf("inum %ld destroy\n", (long)ip->meta.inum); 2692 #endif 2693 hammer2_inode_chain_des(ip); 2694 atomic_add_long(&hammer2_iod_inode_deletes, 1); 2695 } else if (ip->flags & HAMMER2_INODE_CREATING) { 2696 #ifdef HAMMER2_DEBUG_SYNC 2697 kprintf("inum %ld insert\n", (long)ip->meta.inum); 2698 #endif 2699 hammer2_inode_chain_ins(ip); 2700 atomic_add_long(&hammer2_iod_inode_creates, 1); 2701 } 2702 #ifdef HAMMER2_DEBUG_SYNC 2703 kprintf("inum %ld chain-sync\n", (long)ip->meta.inum); 2704 #endif 2705 2706 /* 2707 * Because I kinda messed up the design and index the inodes 2708 * under the root inode, along side the directory entries, 2709 * we can't flush the inode index under the iroot until the 2710 * end. If we do it now we might miss effects created by 2711 * other inodes on the SYNCQ. 2712 * 2713 * Do a normal (non-FSSYNC) flush instead, which allows the 2714 * vnode code to work the same. We don't want to force iroot 2715 * back onto the SIDEQ, and we also don't want the flush code 2716 * to update pfs_iroot_blocksets until the final flush later. 2717 * 2718 * XXX at the moment this will likely result in a double-flush 2719 * of the iroot chain. 2720 */ 2721 hammer2_inode_chain_sync(ip); 2722 if (ip == pmp->iroot) { 2723 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP); 2724 } else { 2725 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP | 2726 HAMMER2_XOP_FSSYNC); 2727 } 2728 if (vp) { 2729 lwkt_gettoken(&vp->v_token); 2730 if ((ip->flags & (HAMMER2_INODE_MODIFIED | 2731 HAMMER2_INODE_RESIZED | 2732 HAMMER2_INODE_DIRTYDATA)) == 0 && 2733 RB_EMPTY(&vp->v_rbdirty_tree) && 2734 !bio_track_active(&vp->v_track_write)) { 2735 vclrisdirty(vp); 2736 } else { 2737 hammer2_inode_delayed_sideq(ip); 2738 } 2739 lwkt_reltoken(&vp->v_token); 2740 vput(vp); 2741 vp = NULL; /* safety */ 2742 } 2743 atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_PASS2); 2744 hammer2_inode_unlock(ip); /* unlock+drop */ 2745 /* ip pointer invalid */ 2746 2747 /* 2748 * If the inode got dirted after we dropped our locks, 2749 * it will have already been moved back to the SIDEQ. 2750 */ 2751 hammer2_spin_ex(&pmp->list_spin); 2752 } 2753 hammer2_spin_unex(&pmp->list_spin); 2754 hammer2_pfs_memory_wakeup(pmp, 0); 2755 2756 if (dorestart || (pmp->trans.flags & HAMMER2_TRANS_RESCAN)) { 2757 #ifdef HAMMER2_DEBUG_SYNC 2758 kprintf("FILESYSTEM SYNC STAGE 1 RESTART\n"); 2759 /*tsleep(&dorestart, 0, "h2STG1-R", hz*20);*/ 2760 #endif 2761 dorestart = 1; 2762 goto restart; 2763 } 2764 #ifdef HAMMER2_DEBUG_SYNC 2765 kprintf("FILESYSTEM SYNC STAGE 2 BEGIN\n"); 2766 /*tsleep(&dorestart, 0, "h2STG2", hz*20);*/ 2767 #endif 2768 2769 /* 2770 * We have to flush the PFS root last, even if it does not appear to 2771 * be dirty, because all the inodes in the PFS are indexed under it. 2772 * The normal flushing of iroot above would only occur if directory 2773 * entries under the root were changed. 2774 * 2775 * Specifying VOLHDR will cause an additionl flush of hmp->spmp 2776 * for the media making up the cluster. 2777 */ 2778 if ((ip = pmp->iroot) != NULL) { 2779 hammer2_inode_ref(ip); 2780 hammer2_mtx_ex(&ip->lock); 2781 hammer2_inode_chain_sync(ip); 2782 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP | 2783 HAMMER2_XOP_FSSYNC | 2784 HAMMER2_XOP_VOLHDR); 2785 hammer2_inode_unlock(ip); /* unlock+drop */ 2786 } 2787 #ifdef HAMMER2_DEBUG_SYNC 2788 kprintf("FILESYSTEM SYNC STAGE 2 DONE\n"); 2789 #endif 2790 2791 /* 2792 * device bioq sync 2793 */ 2794 hammer2_bioq_sync(pmp); 2795 2796 #if 0 2797 /* 2798 * Generally speaking we now want to flush the media topology from 2799 * the iroot through to the inodes. The flush stops at any inode 2800 * boundary, which allows the frontend to continue running concurrent 2801 * modifying operations on inodes (including kernel flushes of 2802 * buffers) without interfering with the main sync. 2803 * 2804 * Use the XOP interface to concurrently flush all nodes to 2805 * synchronize the PFSROOT subtopology to the media. A standard 2806 * end-of-scan ENOENT error indicates cluster sufficiency. 2807 * 2808 * Note that this flush will not be visible on crash recovery until 2809 * we flush the super-root topology in the next loop. 2810 * 2811 * XXX For now wait for all flushes to complete. 2812 */ 2813 if (mp && (ip = pmp->iroot) != NULL) { 2814 /* 2815 * If unmounting try to flush everything including any 2816 * sub-trees under inodes, just in case there is dangling 2817 * modified data, as a safety. Otherwise just flush up to 2818 * the inodes in this stage. 2819 */ 2820 kprintf("MP & IROOT\n"); 2821 #ifdef HAMMER2_DEBUG_SYNC 2822 kprintf("FILESYSTEM SYNC STAGE 3 IROOT BEGIN\n"); 2823 #endif 2824 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 2825 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | 2826 HAMMER2_XOP_VOLHDR | 2827 HAMMER2_XOP_FSSYNC | 2828 HAMMER2_XOP_INODE_STOP); 2829 } else { 2830 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | 2831 HAMMER2_XOP_INODE_STOP | 2832 HAMMER2_XOP_VOLHDR | 2833 HAMMER2_XOP_FSSYNC | 2834 HAMMER2_XOP_INODE_STOP); 2835 } 2836 hammer2_xop_start(&xop->head, &hammer2_inode_flush_desc); 2837 error = hammer2_xop_collect(&xop->head, 2838 HAMMER2_XOP_COLLECT_WAITALL); 2839 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2840 #ifdef HAMMER2_DEBUG_SYNC 2841 kprintf("FILESYSTEM SYNC STAGE 3 IROOT END\n"); 2842 #endif 2843 if (error == HAMMER2_ERROR_ENOENT) 2844 error = 0; 2845 else 2846 error = hammer2_error_to_errno(error); 2847 } else { 2848 error = 0; 2849 } 2850 #endif 2851 error = 0; /* XXX */ 2852 hammer2_trans_done(pmp, HAMMER2_TRANS_ISFLUSH); 2853 2854 return (error); 2855 } 2856 2857 static 2858 int 2859 hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp) 2860 { 2861 hammer2_inode_t *ip; 2862 2863 KKASSERT(MAXFIDSZ >= 16); 2864 ip = VTOI(vp); 2865 fhp->fid_len = offsetof(struct fid, fid_data[16]); 2866 fhp->fid_ext = 0; 2867 ((hammer2_tid_t *)fhp->fid_data)[0] = ip->meta.inum; 2868 ((hammer2_tid_t *)fhp->fid_data)[1] = 0; 2869 2870 return 0; 2871 } 2872 2873 static 2874 int 2875 hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 2876 struct fid *fhp, struct vnode **vpp) 2877 { 2878 hammer2_pfs_t *pmp; 2879 hammer2_tid_t inum; 2880 int error; 2881 2882 pmp = MPTOPMP(mp); 2883 inum = ((hammer2_tid_t *)fhp->fid_data)[0] & HAMMER2_DIRHASH_USERMSK; 2884 if (vpp) { 2885 if (inum == 1) 2886 error = hammer2_vfs_root(mp, vpp); 2887 else 2888 error = hammer2_vfs_vget(mp, NULL, inum, vpp); 2889 } else { 2890 error = 0; 2891 } 2892 if (error) 2893 kprintf("fhtovp: %016jx -> %p, %d\n", inum, *vpp, error); 2894 return error; 2895 } 2896 2897 static 2898 int 2899 hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 2900 int *exflagsp, struct ucred **credanonp) 2901 { 2902 hammer2_pfs_t *pmp; 2903 struct netcred *np; 2904 int error; 2905 2906 pmp = MPTOPMP(mp); 2907 np = vfs_export_lookup(mp, &pmp->export, nam); 2908 if (np) { 2909 *exflagsp = np->netc_exflags; 2910 *credanonp = &np->netc_anon; 2911 error = 0; 2912 } else { 2913 error = EACCES; 2914 } 2915 return error; 2916 } 2917 2918 /* 2919 * This handles hysteresis on regular file flushes. Because the BIOs are 2920 * routed to a thread it is possible for an excessive number to build up 2921 * and cause long front-end stalls long before the runningbuffspace limit 2922 * is hit, so we implement hammer2_flush_pipe to control the 2923 * hysteresis. 2924 * 2925 * This is a particular problem when compression is used. 2926 */ 2927 void 2928 hammer2_lwinprog_ref(hammer2_pfs_t *pmp) 2929 { 2930 atomic_add_int(&pmp->count_lwinprog, 1); 2931 } 2932 2933 void 2934 hammer2_lwinprog_drop(hammer2_pfs_t *pmp) 2935 { 2936 int lwinprog; 2937 2938 lwinprog = atomic_fetchadd_int(&pmp->count_lwinprog, -1); 2939 if ((lwinprog & HAMMER2_LWINPROG_WAITING) && 2940 (lwinprog & HAMMER2_LWINPROG_MASK) <= hammer2_flush_pipe * 2 / 3) { 2941 atomic_clear_int(&pmp->count_lwinprog, 2942 HAMMER2_LWINPROG_WAITING); 2943 wakeup(&pmp->count_lwinprog); 2944 } 2945 if ((lwinprog & HAMMER2_LWINPROG_WAITING0) && 2946 (lwinprog & HAMMER2_LWINPROG_MASK) <= 0) { 2947 atomic_clear_int(&pmp->count_lwinprog, 2948 HAMMER2_LWINPROG_WAITING0); 2949 wakeup(&pmp->count_lwinprog); 2950 } 2951 } 2952 2953 void 2954 hammer2_lwinprog_wait(hammer2_pfs_t *pmp, int flush_pipe) 2955 { 2956 int lwinprog; 2957 int lwflag = (flush_pipe) ? HAMMER2_LWINPROG_WAITING : 2958 HAMMER2_LWINPROG_WAITING0; 2959 2960 for (;;) { 2961 lwinprog = pmp->count_lwinprog; 2962 cpu_ccfence(); 2963 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe) 2964 break; 2965 tsleep_interlock(&pmp->count_lwinprog, 0); 2966 atomic_set_int(&pmp->count_lwinprog, lwflag); 2967 lwinprog = pmp->count_lwinprog; 2968 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe) 2969 break; 2970 tsleep(&pmp->count_lwinprog, PINTERLOCKED, "h2wpipe", hz); 2971 } 2972 } 2973 2974 /* 2975 * It is possible for an excessive number of dirty chains or dirty inodes 2976 * to build up. When this occurs we start an asynchronous filesystem sync. 2977 * If the level continues to build up, we stall, waiting for it to drop, 2978 * with some hysteresis. 2979 * 2980 * This relies on the kernel calling hammer2_vfs_modifying() prior to 2981 * obtaining any vnode locks before making a modifying VOP call. 2982 */ 2983 static int 2984 hammer2_vfs_modifying(struct mount *mp) 2985 { 2986 if (mp->mnt_flag & MNT_RDONLY) 2987 return EROFS; 2988 hammer2_pfs_memory_wait(MPTOPMP(mp)); 2989 2990 return 0; 2991 } 2992 2993 /* 2994 * Initiate an asynchronous filesystem sync and, with hysteresis, 2995 * stall if the internal data structure count becomes too bloated. 2996 */ 2997 void 2998 hammer2_pfs_memory_wait(hammer2_pfs_t *pmp) 2999 { 3000 uint32_t waiting; 3001 int pcatch; 3002 int error; 3003 3004 if (pmp == NULL || pmp->mp == NULL) 3005 return; 3006 3007 for (;;) { 3008 waiting = pmp->inmem_dirty_chains & HAMMER2_DIRTYCHAIN_MASK; 3009 cpu_ccfence(); 3010 3011 /* 3012 * Start the syncer running at 1/2 the limit 3013 */ 3014 if (waiting > hammer2_limit_dirty_chains / 2 || 3015 pmp->sideq_count > hammer2_limit_dirty_inodes / 2) { 3016 trigger_syncer(pmp->mp); 3017 } 3018 3019 /* 3020 * Stall at the limit waiting for the counts to drop. 3021 * This code will typically be woken up once the count 3022 * drops below 3/4 the limit, or in one second. 3023 */ 3024 if (waiting < hammer2_limit_dirty_chains && 3025 pmp->sideq_count < hammer2_limit_dirty_inodes) { 3026 break; 3027 } 3028 3029 pcatch = curthread->td_proc ? PCATCH : 0; 3030 3031 tsleep_interlock(&pmp->inmem_dirty_chains, pcatch); 3032 atomic_set_int(&pmp->inmem_dirty_chains, 3033 HAMMER2_DIRTYCHAIN_WAITING); 3034 if (waiting < hammer2_limit_dirty_chains && 3035 pmp->sideq_count < hammer2_limit_dirty_inodes) { 3036 break; 3037 } 3038 trigger_syncer(pmp->mp); 3039 error = tsleep(&pmp->inmem_dirty_chains, PINTERLOCKED | pcatch, 3040 "h2memw", hz); 3041 if (error == ERESTART) 3042 break; 3043 } 3044 } 3045 3046 /* 3047 * Wake up any stalled frontend ops waiting, with hysteresis, using 3048 * 2/3 of the limit. 3049 */ 3050 void 3051 hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp, int count) 3052 { 3053 uint32_t waiting; 3054 3055 if (pmp) { 3056 waiting = atomic_fetchadd_int(&pmp->inmem_dirty_chains, count); 3057 /* don't need --waiting to test flag */ 3058 3059 if ((waiting & HAMMER2_DIRTYCHAIN_WAITING) && 3060 (pmp->inmem_dirty_chains & HAMMER2_DIRTYCHAIN_MASK) <= 3061 hammer2_limit_dirty_chains * 2 / 3 && 3062 pmp->sideq_count <= hammer2_limit_dirty_inodes * 2 / 3) { 3063 atomic_clear_int(&pmp->inmem_dirty_chains, 3064 HAMMER2_DIRTYCHAIN_WAITING); 3065 wakeup(&pmp->inmem_dirty_chains); 3066 } 3067 } 3068 } 3069 3070 void 3071 hammer2_pfs_memory_inc(hammer2_pfs_t *pmp) 3072 { 3073 if (pmp) { 3074 atomic_add_int(&pmp->inmem_dirty_chains, 1); 3075 } 3076 } 3077 3078 /* 3079 * Volume header data locks 3080 */ 3081 void 3082 hammer2_voldata_lock(hammer2_dev_t *hmp) 3083 { 3084 lockmgr(&hmp->vollk, LK_EXCLUSIVE); 3085 } 3086 3087 void 3088 hammer2_voldata_unlock(hammer2_dev_t *hmp) 3089 { 3090 lockmgr(&hmp->vollk, LK_RELEASE); 3091 } 3092 3093 void 3094 hammer2_voldata_modify(hammer2_dev_t *hmp) 3095 { 3096 if ((hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) == 0) { 3097 atomic_add_long(&hammer2_count_modified_chains, 1); 3098 atomic_set_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED); 3099 hammer2_pfs_memory_inc(hmp->vchain.pmp); 3100 } 3101 } 3102 3103 /* 3104 * Returns 0 if the filesystem has tons of free space 3105 * Returns 1 if the filesystem has less than 10% remaining 3106 * Returns 2 if the filesystem has less than 2%/5% (user/root) remaining. 3107 */ 3108 int 3109 hammer2_vfs_enospace(hammer2_inode_t *ip, off_t bytes, struct ucred *cred) 3110 { 3111 hammer2_pfs_t *pmp; 3112 hammer2_dev_t *hmp; 3113 hammer2_off_t free_reserved; 3114 hammer2_off_t free_nominal; 3115 int i; 3116 3117 pmp = ip->pmp; 3118 3119 if (pmp->free_ticks == 0 || pmp->free_ticks != ticks) { 3120 free_reserved = HAMMER2_SEGSIZE; 3121 free_nominal = 0x7FFFFFFFFFFFFFFFLLU; 3122 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 3123 hmp = pmp->pfs_hmps[i]; 3124 if (hmp == NULL) 3125 continue; 3126 if (pmp->pfs_types[i] != HAMMER2_PFSTYPE_MASTER && 3127 pmp->pfs_types[i] != HAMMER2_PFSTYPE_SOFT_MASTER) 3128 continue; 3129 3130 if (free_nominal > hmp->voldata.allocator_free) 3131 free_nominal = hmp->voldata.allocator_free; 3132 if (free_reserved < hmp->free_reserved) 3133 free_reserved = hmp->free_reserved; 3134 } 3135 3136 /* 3137 * SMP races ok 3138 */ 3139 pmp->free_reserved = free_reserved; 3140 pmp->free_nominal = free_nominal; 3141 pmp->free_ticks = ticks; 3142 } else { 3143 free_reserved = pmp->free_reserved; 3144 free_nominal = pmp->free_nominal; 3145 } 3146 if (cred && cred->cr_uid != 0) { 3147 if ((int64_t)(free_nominal - bytes) < 3148 (int64_t)free_reserved) { 3149 return 2; 3150 } 3151 } else { 3152 if ((int64_t)(free_nominal - bytes) < 3153 (int64_t)free_reserved / 2) { 3154 return 2; 3155 } 3156 } 3157 if ((int64_t)(free_nominal - bytes) < (int64_t)free_reserved * 2) 3158 return 1; 3159 return 0; 3160 } 3161 3162 /* 3163 * Debugging 3164 */ 3165 void 3166 hammer2_dump_chain(hammer2_chain_t *chain, int tab, int bi, int *countp, 3167 char pfx, u_int flags) 3168 { 3169 hammer2_chain_t *scan; 3170 hammer2_chain_t *parent; 3171 3172 --*countp; 3173 if (*countp == 0) { 3174 kprintf("%*.*s...\n", tab, tab, ""); 3175 return; 3176 } 3177 if (*countp < 0) 3178 return; 3179 kprintf("%*.*s%c-chain %p %s.%-3d %016jx %016jx/%-2d mir=%016jx\n", 3180 tab, tab, "", pfx, chain, 3181 hammer2_bref_type_str(chain->bref.type), bi, 3182 chain->bref.data_off, chain->bref.key, chain->bref.keybits, 3183 chain->bref.mirror_tid); 3184 3185 kprintf("%*.*s [%08x] (%s) refs=%d", 3186 tab, tab, "", 3187 chain->flags, 3188 ((chain->bref.type == HAMMER2_BREF_TYPE_INODE && 3189 chain->data) ? (char *)chain->data->ipdata.filename : "?"), 3190 chain->refs); 3191 3192 parent = chain->parent; 3193 if (parent) 3194 kprintf("\n%*.*s p=%p [pflags %08x prefs %d]", 3195 tab, tab, "", 3196 parent, parent->flags, parent->refs); 3197 if (RB_EMPTY(&chain->core.rbtree)) { 3198 kprintf("\n"); 3199 } else { 3200 int bi = 0; 3201 kprintf(" {\n"); 3202 RB_FOREACH(scan, hammer2_chain_tree, &chain->core.rbtree) { 3203 if ((scan->flags & flags) || flags == (u_int)-1) { 3204 hammer2_dump_chain(scan, tab + 4, bi, countp, 3205 'a', flags); 3206 } 3207 bi++; 3208 } 3209 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE && chain->data) 3210 kprintf("%*.*s}(%s)\n", tab, tab, "", 3211 chain->data->ipdata.filename); 3212 else 3213 kprintf("%*.*s}\n", tab, tab, ""); 3214 } 3215 } 3216