1 /* 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2022 Tomohiro Kusumi <tkusumi@netbsd.org> 5 * Copyright (c) 2011-2022 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Matthew Dillon <dillon@dragonflybsd.org> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 38 /* 39 * HAMMER2 IN-MEMORY CACHE OF MEDIA STRUCTURES 40 * 41 * This header file contains structures used internally by the HAMMER2 42 * implementation. See hammer2_disk.h for on-disk structures. 43 * 44 * There is an in-memory representation of all on-media data structure. 45 * Almost everything is represented by a hammer2_chain structure in-memory. 46 * Other higher-level structures typically map to chains. 47 * 48 * A great deal of data is accessed simply via its buffer cache buffer, 49 * which is mapped for the duration of the chain's lock. Hammer2 must 50 * implement its own buffer cache layer on top of the system layer to 51 * allow for different threads to lock different sub-block-sized buffers. 52 * 53 * When modifications are made to a chain a new filesystem block must be 54 * allocated. Multiple modifications do not typically allocate new blocks 55 * until the current block has been flushed. Flushes do not block the 56 * front-end unless the front-end operation crosses the current inode being 57 * flushed. 58 * 59 * The in-memory representation may remain cached (for example in order to 60 * placemark clustering locks) even after the related data has been 61 * detached. 62 */ 63 64 #ifndef _VFS_HAMMER2_HAMMER2_H_ 65 #define _VFS_HAMMER2_HAMMER2_H_ 66 67 #ifdef _KERNEL 68 #error "_KERNEL shouldn't be defined" 69 #endif 70 #ifdef _KERNEL_STRUCTURES 71 #error "_KERNEL_STRUCTURES shouldn't be defined" 72 #endif 73 74 #ifdef _KERNEL 75 #include <sys/param.h> 76 #endif 77 #include <sys/types.h> 78 #ifdef _KERNEL 79 #include <sys/kernel.h> 80 #endif 81 //#include <sys/conf.h> 82 #ifdef _KERNEL 83 #include <sys/systm.h> 84 #endif 85 //#include <sys/diskslice.h> 86 #include <sys/tree.h> 87 //#include <sys/malloc.h> 88 #include <sys/mount.h> 89 /* 90 #include <sys/vnode.h> 91 #include <sys/proc.h> 92 #include <sys/caps.h> 93 #include <sys/stat.h> 94 #include <sys/thread.h> 95 #include <sys/lockf.h> 96 #include <sys/buf.h> 97 */ 98 #include <sys/queue.h> 99 /* 100 #include <sys/limits.h> 101 #include <sys/dmsg.h> 102 #include <sys/mutex.h> 103 #include <sys/lock.h> 104 #include <sys/file.h> 105 #include <sys/objcache.h> 106 */ 107 108 #ifdef _KERNEL 109 #include <sys/signal2.h> 110 #include <sys/buf2.h> 111 #include <sys/mutex2.h> 112 #include <sys/spinlock2.h> 113 #endif 114 115 /* 116 #include "hammer2_xxhash.h" 117 #include "hammer2_disk.h" 118 #include "hammer2_mount.h" 119 #include "hammer2_ioctl.h" 120 */ 121 122 #include <sys/time.h> 123 #include <sys/vfscache.h> 124 #include <sys/errno.h> 125 126 #include <machine/atomic.h> 127 128 #include <unistd.h> 129 #include <stdio.h> 130 #include <stdlib.h> 131 #include <stdint.h> 132 #include <stdbool.h> 133 #include <string.h> 134 #include <uuid.h> 135 136 #include <vfs/hammer2/hammer2_disk.h> 137 #include <vfs/hammer2/hammer2_ioctl.h> 138 #include <vfs/hammer2/hammer2_mount.h> 139 #include <vfs/hammer2/hammer2_xxhash.h> 140 #include <mkfs_hammer2.h> 141 142 #include "hammer2_compat.h" 143 144 struct hammer2_io; 145 struct hammer2_chain; 146 struct hammer2_inode; 147 struct hammer2_depend; 148 struct hammer2_dev; 149 struct hammer2_pfs; 150 union hammer2_xop; 151 152 /* 153 * Mutex and lock shims. Hammer2 requires support for asynchronous and 154 * abortable locks, and both exclusive and shared spinlocks. Normal 155 * synchronous non-abortable locks can be substituted for spinlocks. 156 */ 157 /* 158 typedef mtx_t hammer2_mtx_t; 159 typedef mtx_state_t hammer2_mtx_state_t; 160 161 typedef struct spinlock hammer2_spin_t; 162 163 #define hammer2_mtx_ex mtx_lock_ex_quick 164 #define hammer2_mtx_ex_try mtx_lock_ex_try 165 #define hammer2_mtx_sh mtx_lock_sh_quick 166 #define hammer2_mtx_sh_again mtx_lock_sh_again 167 #define hammer2_mtx_sh_try mtx_lock_sh_try 168 #define hammer2_mtx_unlock mtx_unlock 169 #define hammer2_mtx_upgrade_try mtx_upgrade_try 170 #define hammer2_mtx_downgrade mtx_downgrade 171 #define hammer2_mtx_owned mtx_owned 172 #define hammer2_mtx_init mtx_init 173 #define hammer2_mtx_temp_release mtx_lock_temp_release 174 #define hammer2_mtx_temp_restore mtx_lock_temp_restore 175 #define hammer2_mtx_refs mtx_lockrefs 176 177 #define hammer2_spin_init spin_init 178 #define hammer2_spin_sh spin_lock_shared 179 #define hammer2_spin_ex spin_lock 180 #define hammer2_spin_unsh spin_unlock_shared 181 #define hammer2_spin_unex spin_unlock 182 #define hammer2_spin_lock_update spin_lock_update 183 #define hammer2_spin_unlock_update spin_unlock_update 184 */ 185 186 TAILQ_HEAD(hammer2_xop_list, hammer2_xop_head); 187 TAILQ_HEAD(hammer2_chain_list, hammer2_chain); 188 189 typedef struct hammer2_xop_list hammer2_xop_list_t; 190 191 /* 192 * Cap the dynamic calculation for the maximum number of dirty 193 * chains and dirty inodes allowed. 194 */ 195 #define HAMMER2_LIMIT_DIRTY_CHAINS (1024*1024) 196 #define HAMMER2_LIMIT_DIRTY_INODES (65536) 197 198 /* 199 * The chain structure tracks a portion of the media topology from the 200 * root (volume) down. Chains represent volumes, inodes, indirect blocks, 201 * data blocks, and freemap nodes and leafs. 202 * 203 * The chain structure utilizes a simple singly-homed topology and the 204 * chain's in-memory topology will move around as the chains do, due mainly 205 * to renames and indirect block creation. 206 * 207 * Block Table Updates 208 * 209 * Block table updates for insertions and updates are delayed until the 210 * flush. This allows us to avoid having to modify the parent chain 211 * all the way to the root. 212 * 213 * Block table deletions are performed immediately (modifying the parent 214 * in the process) because the flush code uses the chain structure to 215 * track delayed updates and the chain will be (likely) gone or moved to 216 * another location in the topology after a deletion. 217 * 218 * A prior iteration of the code tried to keep the relationship intact 219 * on deletes by doing a delete-duplicate operation on the chain, but 220 * it added way too much complexity to the codebase. 221 * 222 * Flush Synchronization 223 * 224 * The flush code must flush modified chains bottom-up. Because chain 225 * structures can shift around and are NOT topologically stable, 226 * modified chains are independently indexed for the flush. As the flush 227 * runs it modifies (or further modifies) and updates the parents, 228 * propagating the flush all the way to the volume root. 229 * 230 * Modifying front-end operations can occur during a flush but will block 231 * in two cases: (1) when the front-end tries to operate on the inode 232 * currently in the midst of being flushed and (2) if the front-end 233 * crosses an inode currently being flushed (such as during a rename). 234 * So, for example, if you rename directory "x" to "a/b/c/d/e/f/g/x" and 235 * the flusher is currently working on "a/b/c", the rename will block 236 * temporarily in order to ensure that "x" exists in one place or the 237 * other. 238 * 239 * Meta-data statistics are updated by the flusher. The front-end will 240 * make estimates but meta-data must be fully synchronized only during a 241 * flush in order to ensure that it remains correct across a crash. 242 * 243 * Multiple flush synchronizations can theoretically be in-flight at the 244 * same time but the implementation is not coded to handle the case and 245 * currently serializes them. 246 * 247 * Snapshots: 248 * 249 * Snapshots currently require the subdirectory tree being snapshotted 250 * to be flushed. The snapshot then creates a new super-root inode which 251 * copies the flushed blockdata of the directory or file that was 252 * snapshotted. 253 * 254 * Radix tree NOTES: 255 * 256 * - Note that the radix tree runs in powers of 2 only so sub-trees 257 * cannot straddle edges. 258 */ 259 RB_HEAD(hammer2_chain_tree, hammer2_chain); 260 261 struct hammer2_reptrack { 262 hammer2_spin_t spin; 263 struct hammer2_reptrack *next; 264 struct hammer2_chain *chain; 265 }; 266 267 /* 268 * Core topology for chain (embedded in chain). Protected by a spinlock. 269 */ 270 struct hammer2_chain_core { 271 hammer2_spin_t spin; 272 struct hammer2_reptrack *reptrack; 273 struct hammer2_chain_tree rbtree; /* sub-chains */ 274 int live_zero; /* blockref array opt */ 275 u_int live_count; /* live (not deleted) chains in tree */ 276 u_int chain_count; /* live + deleted chains under core */ 277 int generation; /* generation number (inserts only) */ 278 }; 279 280 typedef struct hammer2_chain_core hammer2_chain_core_t; 281 282 RB_HEAD(hammer2_io_tree, hammer2_io); 283 284 /* 285 * DIO - Management structure wrapping system buffer cache. 286 * 287 * HAMMER2 uses an I/O abstraction that allows it to cache and manipulate 288 * fixed-sized filesystem buffers frontend by variable-sized hammer2_chain 289 * structures. 290 */ 291 /* #define HAMMER2_IO_DEBUG */ 292 293 #ifdef HAMMER2_IO_DEBUG 294 #define HAMMER2_IO_DEBUG_ARGS , const char *file, int line 295 #define HAMMER2_IO_DEBUG_CALL , file, line 296 #define HAMMER2_IO_DEBUG_COUNT 2048 297 #define HAMMER2_IO_DEBUG_MASK (HAMMER2_IO_DEBUG_COUNT - 1) 298 #else 299 #define HAMMER2_IO_DEBUG_ARGS 300 #define HAMMER2_IO_DEBUG_CALL 301 #endif 302 303 struct hammer2_io { 304 RB_ENTRY(hammer2_io) rbnode; /* indexed by device offset */ 305 struct hammer2_dev *hmp; 306 struct m_vnode *devvp; 307 struct m_buf *bp; 308 off_t dbase; /* offset of devvp within volumes */ 309 off_t pbase; 310 uint64_t refs; 311 int psize; 312 int act; /* activity */ 313 int btype; /* approximate BREF_TYPE_* */ 314 int ticks; 315 int error; 316 #ifdef HAMMER2_IO_DEBUG 317 int debug_index; 318 #else 319 int unused01; 320 #endif 321 uint64_t dedup_valid; /* valid for dedup operation */ 322 uint64_t dedup_alloc; /* allocated / de-dupable */ 323 #ifdef HAMMER2_IO_DEBUG 324 const char *debug_file[HAMMER2_IO_DEBUG_COUNT]; 325 void *debug_td[HAMMER2_IO_DEBUG_COUNT]; 326 int debug_line[HAMMER2_IO_DEBUG_COUNT]; 327 uint64_t debug_refs[HAMMER2_IO_DEBUG_COUNT]; 328 #endif 329 }; 330 331 typedef struct hammer2_io hammer2_io_t; 332 333 #define HAMMER2_DIO_INPROG 0x8000000000000000LLU /* bio in progress */ 334 #define HAMMER2_DIO_GOOD 0x4000000000000000LLU /* dio->bp is stable */ 335 #define HAMMER2_DIO_WAITING 0x2000000000000000LLU /* wait on INPROG */ 336 #define HAMMER2_DIO_DIRTY 0x1000000000000000LLU /* flush last drop */ 337 #define HAMMER2_DIO_FLUSH 0x0800000000000000LLU /* immediate flush */ 338 339 #define HAMMER2_DIO_MASK 0x00FFFFFFFFFFFFFFLLU 340 341 /* 342 * Primary chain structure keeps track of the topology in-memory. 343 */ 344 struct hammer2_chain { 345 hammer2_mtx_t lock; 346 hammer2_chain_core_t core; 347 RB_ENTRY(hammer2_chain) rbnode; /* live chain(s) */ 348 hammer2_blockref_t bref; 349 struct hammer2_chain *parent; 350 struct hammer2_dev *hmp; 351 struct hammer2_pfs *pmp; /* A PFS or super-root (spmp) */ 352 353 struct lock diolk; /* xop focus interlock */ 354 hammer2_io_t *dio; /* physical data buffer */ 355 hammer2_media_data_t *data; /* data pointer shortcut */ 356 u_int bytes; /* physical data size */ 357 u_int flags; 358 u_int refs; 359 u_int lockcnt; 360 int error; /* on-lock data error state */ 361 int cache_index; /* heur speeds up lookup */ 362 363 TAILQ_ENTRY(hammer2_chain) lru_node; /* 0-refs LRU */ 364 }; 365 366 typedef struct hammer2_chain hammer2_chain_t; 367 368 int hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2); 369 RB_PROTOTYPE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp); 370 371 /* 372 * Passed to hammer2_chain_create(), causes methods to be inherited from 373 * parent. 374 */ 375 #define HAMMER2_METH_DEFAULT -1 376 377 /* 378 * Special notes on flags: 379 * 380 * INITIAL - This flag allows a chain to be created and for storage to 381 * be allocated without having to immediately instantiate the 382 * related buffer. The data is assumed to be all-zeros. It 383 * is primarily used for indirect blocks. 384 * 385 * MODIFIED - The chain's media data has been modified. Prevents chain 386 * free on lastdrop if still in the topology. 387 * 388 * UPDATE - Chain might not be modified but parent blocktable needs 389 * an update. Prevents chain free on lastdrop if still in 390 * the topology. 391 * 392 * BLKMAPPED - Indicates that the chain is present in the parent blockmap. 393 * 394 * BLKMAPUPD - Indicates that the chain is present but needs to be updated 395 * in the parent blockmap. 396 */ 397 #define HAMMER2_CHAIN_MODIFIED 0x00000001 /* dirty chain data */ 398 #define HAMMER2_CHAIN_ALLOCATED 0x00000002 /* kmalloc'd chain */ 399 #define HAMMER2_CHAIN_DESTROY 0x00000004 400 #define HAMMER2_CHAIN_DEDUPABLE 0x00000008 /* registered w/dedup */ 401 #define HAMMER2_CHAIN_DELETED 0x00000010 /* deleted chain */ 402 #define HAMMER2_CHAIN_INITIAL 0x00000020 /* initial create */ 403 #define HAMMER2_CHAIN_UPDATE 0x00000040 /* need parent update */ 404 #define HAMMER2_CHAIN_NOTTESTED 0x00000080 /* crc not generated */ 405 #define HAMMER2_CHAIN_TESTEDGOOD 0x00000100 /* crc tested good */ 406 #define HAMMER2_CHAIN_ONFLUSH 0x00000200 /* on a flush list */ 407 #define HAMMER2_CHAIN_UNUSED0400 0x00000400 408 #define HAMMER2_CHAIN_VOLUMESYNC 0x00000800 /* needs volume sync */ 409 #define HAMMER2_CHAIN_UNUSED1000 0x00001000 410 #define HAMMER2_CHAIN_COUNTEDBREFS 0x00002000 /* block table stats */ 411 #define HAMMER2_CHAIN_ONRBTREE 0x00004000 /* on parent RB tree */ 412 #define HAMMER2_CHAIN_ONLRU 0x00008000 /* on LRU list */ 413 #define HAMMER2_CHAIN_UNUSED10000 0x00010000 414 #define HAMMER2_CHAIN_RELEASE 0x00020000 /* don't keep around */ 415 #define HAMMER2_CHAIN_BLKMAPPED 0x00040000 /* present in blkmap */ 416 #define HAMMER2_CHAIN_BLKMAPUPD 0x00080000 /* +needs updating */ 417 #define HAMMER2_CHAIN_IOINPROG 0x00100000 /* I/O interlock */ 418 #define HAMMER2_CHAIN_IOSIGNAL 0x00200000 /* I/O interlock */ 419 #define HAMMER2_CHAIN_PFSBOUNDARY 0x00400000 /* super->pfs inode */ 420 #define HAMMER2_CHAIN_HINT_LEAF_COUNT 0x00800000 /* redo leaf count */ 421 #define HAMMER2_CHAIN_LRUHINT 0x01000000 /* was reused */ 422 423 #define HAMMER2_CHAIN_FLUSH_MASK (HAMMER2_CHAIN_MODIFIED | \ 424 HAMMER2_CHAIN_UPDATE | \ 425 HAMMER2_CHAIN_ONFLUSH | \ 426 HAMMER2_CHAIN_DESTROY) 427 428 /* 429 * Hammer2 error codes, used by chain->error and cluster->error. The error 430 * code is typically set on-lock unless no I/O was requested, and set on 431 * I/O otherwise. If set for a cluster it generally means that the cluster 432 * code could not find a valid copy to present. 433 * 434 * All H2 error codes are flags and can be accumulated by ORing them 435 * together. 436 * 437 * IO - An I/O error occurred 438 * CHECK - I/O succeeded but did not match the check code 439 * INCOMPLETE - A cluster is not complete enough to use, or 440 * a chain cannot be loaded because its parent has an error. 441 * 442 * NOTE: API allows callers to check zero/non-zero to determine if an error 443 * condition exists. 444 * 445 * NOTE: Chain's data field is usually NULL on an IO error but not necessarily 446 * NULL on other errors. Check chain->error, not chain->data. 447 */ 448 #define HAMMER2_ERROR_NONE 0 /* no error (must be 0) */ 449 #define HAMMER2_ERROR_EIO 0x00000001 /* device I/O error */ 450 #define HAMMER2_ERROR_CHECK 0x00000002 /* check code error */ 451 #define HAMMER2_ERROR_INCOMPLETE 0x00000004 /* incomplete cluster */ 452 #define HAMMER2_ERROR_DEPTH 0x00000008 /* tmp depth limit */ 453 #define HAMMER2_ERROR_BADBREF 0x00000010 /* illegal bref */ 454 #define HAMMER2_ERROR_ENOSPC 0x00000020 /* allocation failure */ 455 #define HAMMER2_ERROR_ENOENT 0x00000040 /* entry not found */ 456 #define HAMMER2_ERROR_ENOTEMPTY 0x00000080 /* dir not empty */ 457 #define HAMMER2_ERROR_EAGAIN 0x00000100 /* retry */ 458 #define HAMMER2_ERROR_ENOTDIR 0x00000200 /* not directory */ 459 #define HAMMER2_ERROR_EISDIR 0x00000400 /* is directory */ 460 #define HAMMER2_ERROR_EINPROGRESS 0x00000800 /* already running */ 461 #define HAMMER2_ERROR_ABORTED 0x00001000 /* aborted operation */ 462 #define HAMMER2_ERROR_EOF 0x00002000 /* end of scan */ 463 #define HAMMER2_ERROR_EINVAL 0x00004000 /* catch-all */ 464 #define HAMMER2_ERROR_EEXIST 0x00008000 /* entry exists */ 465 #define HAMMER2_ERROR_EDEADLK 0x00010000 466 #define HAMMER2_ERROR_ESRCH 0x00020000 467 #define HAMMER2_ERROR_ETIMEDOUT 0x00040000 468 469 /* 470 * Flags passed to hammer2_chain_lookup() and hammer2_chain_next() 471 * 472 * NOTES: 473 * NODATA - Asks that the chain->data not be resolved in order 474 * to avoid I/O. 475 * 476 * NODIRECT - Prevents a lookup of offset 0 in an inode from returning 477 * the inode itself if the inode is in DIRECTDATA mode 478 * (i.e. file is <= 512 bytes). Used by the synchronization 479 * code to prevent confusion. 480 * 481 * SHARED - The input chain is expected to be locked shared, 482 * and the output chain is locked shared. 483 * 484 * MATCHIND - Allows an indirect block / freemap node to be returned 485 * when the passed key range matches the radix. Remember 486 * that key_end is inclusive (e.g. {0x000,0xFFF}, 487 * not {0x000,0x1000}). 488 * 489 * (Cannot be used for remote or cluster ops). 490 * 491 * ALWAYS - Always resolve the data. If ALWAYS and NODATA are both 492 * missing, bulk file data is not resolved but inodes and 493 * other meta-data will. 494 */ 495 #define HAMMER2_LOOKUP_UNUSED0001 0x00000001 496 #define HAMMER2_LOOKUP_NODATA 0x00000002 /* data left NULL */ 497 #define HAMMER2_LOOKUP_NODIRECT 0x00000004 /* no offset=0 DD */ 498 #define HAMMER2_LOOKUP_SHARED 0x00000100 499 #define HAMMER2_LOOKUP_MATCHIND 0x00000200 /* return all chains */ 500 #define HAMMER2_LOOKUP_UNUSED0400 0x00000400 501 #define HAMMER2_LOOKUP_ALWAYS 0x00000800 /* resolve data */ 502 #define HAMMER2_LOOKUP_UNUSED1000 0x00001000 503 504 /* 505 * Flags passed to hammer2_chain_modify() and hammer2_chain_resize() 506 * 507 * NOTE: OPTDATA allows us to avoid instantiating buffers for INDIRECT 508 * blocks in the INITIAL-create state. 509 */ 510 #define HAMMER2_MODIFY_OPTDATA 0x00000002 /* data can be NULL */ 511 512 /* 513 * Flags passed to hammer2_chain_lock() 514 * 515 * NOTE: NONBLOCK is only used for hammer2_chain_repparent() and getparent(), 516 * other functions (e.g. hammer2_chain_lookup(), etc) can't handle its 517 * operation. 518 */ 519 #define HAMMER2_RESOLVE_NEVER 1 520 #define HAMMER2_RESOLVE_MAYBE 2 521 #define HAMMER2_RESOLVE_ALWAYS 3 522 #define HAMMER2_RESOLVE_MASK 0x0F 523 524 #define HAMMER2_RESOLVE_SHARED 0x10 /* request shared lock */ 525 #define HAMMER2_RESOLVE_LOCKAGAIN 0x20 /* another shared lock */ 526 #define HAMMER2_RESOLVE_UNUSED40 0x40 527 #define HAMMER2_RESOLVE_NONBLOCK 0x80 /* non-blocking */ 528 529 /* 530 * Flags passed to hammer2_chain_delete() 531 */ 532 #define HAMMER2_DELETE_PERMANENT 0x0001 533 534 /* 535 * Flags passed to hammer2_chain_insert() or hammer2_chain_rename() 536 * or hammer2_chain_create(). 537 */ 538 #define HAMMER2_INSERT_PFSROOT 0x0004 539 #define HAMMER2_INSERT_SAMEPARENT 0x0008 540 541 /* 542 * hammer2_freemap_adjust() 543 */ 544 #define HAMMER2_FREEMAP_DORECOVER 1 545 #if 0 546 #define HAMMER2_FREEMAP_DOMAYFREE 2 547 #define HAMMER2_FREEMAP_DOREALFREE 3 548 #endif 549 550 /* 551 * HAMMER2 cluster - A set of chains representing the same entity. 552 * 553 * hammer2_cluster typically represents a temporary set of representitive 554 * chains. The one exception is that a hammer2_cluster is embedded in 555 * hammer2_inode. This embedded cluster is ONLY used to track the 556 * representitive chains and cannot be directly locked. 557 * 558 * A cluster is usually temporary (and thus per-thread) for locking purposes, 559 * allowing us to embed the asynchronous storage required for cluster 560 * operations in the cluster itself and adjust the state and status without 561 * having to worry too much about SMP issues. 562 * 563 * The exception is the cluster embedded in the hammer2_inode structure. 564 * This is used to cache the cluster state on an inode-by-inode basis. 565 * Individual hammer2_chain structures not incorporated into clusters might 566 * also stick around to cache miscellanious elements. 567 * 568 * Because the cluster is a 'working copy' and is usually subject to cluster 569 * quorum rules, it is quite possible for us to end up with an insufficient 570 * number of live chains to execute an operation. If an insufficient number 571 * of chains remain in a working copy, the operation may have to be 572 * downgraded, retried, stall until the requisit number of chains are 573 * available, or possibly even error out depending on the mount type. 574 * 575 * A cluster's focus is set when it is locked. The focus can only be set 576 * to a chain still part of the synchronized set. 577 */ 578 #define HAMMER2_XOPFIFO 16 579 #define HAMMER2_XOPFIFO_MASK (HAMMER2_XOPFIFO - 1) 580 #define HAMMER2_XOPTHREADS_MIN 32 581 #define HAMMER2_XOPGROUPS_MIN 4 582 583 #define HAMMER2_MAXCLUSTER 8 584 #define HAMMER2_XOPMASK_CLUSTER ((uint64_t)((1LLU << HAMMER2_MAXCLUSTER) - 1)) 585 #define HAMMER2_XOPMASK_VOP ((uint64_t)0x0000000080000000LLU) 586 #define HAMMER2_XOPMASK_FIFOW ((uint64_t)0x0000000040000000LLU) 587 #define HAMMER2_XOPMASK_WAIT ((uint64_t)0x0000000020000000LLU) 588 #define HAMMER2_XOPMASK_FEED ((uint64_t)0x0000000100000000LLU) 589 590 #define HAMMER2_XOPMASK_ALLDONE (HAMMER2_XOPMASK_VOP | HAMMER2_XOPMASK_CLUSTER) 591 592 struct hammer2_cluster_item { 593 hammer2_chain_t *chain; 594 int error; 595 uint32_t flags; 596 }; 597 598 typedef struct hammer2_cluster_item hammer2_cluster_item_t; 599 600 /* 601 * INVALID - Invalid for focus, i.e. not part of synchronized set. 602 * Once set, this bit is sticky across operations. 603 * 604 * FEMOD - Indicates that front-end modifying operations can 605 * mess with this entry and MODSYNC will copy also 606 * effect it. 607 */ 608 #define HAMMER2_CITEM_INVALID 0x00000001 609 #define HAMMER2_CITEM_FEMOD 0x00000002 610 #define HAMMER2_CITEM_NULL 0x00000004 611 612 struct hammer2_cluster { 613 int refs; /* track for deallocation */ 614 int ddflag; 615 struct hammer2_pfs *pmp; 616 uint32_t flags; 617 int nchains; 618 int error; /* error code valid on lock */ 619 int focus_index; 620 hammer2_chain_t *focus; /* current focus (or mod) */ 621 hammer2_cluster_item_t array[HAMMER2_MAXCLUSTER]; 622 }; 623 624 typedef struct hammer2_cluster hammer2_cluster_t; 625 626 /* 627 * WRHARD - Hard mounts can write fully synchronized 628 * RDHARD - Hard mounts can read fully synchronized 629 * UNHARD - Unsynchronized masters present 630 * NOHARD - No masters visible 631 * WRSOFT - Soft mounts can write to at least the SOFT_MASTER 632 * RDSOFT - Soft mounts can read from at least a SOFT_SLAVE 633 * UNSOFT - Unsynchronized slaves present 634 * NOSOFT - No slaves visible 635 * RDSLAVE - slaves are accessible (possibly unsynchronized or remote). 636 * MSYNCED - All masters are fully synchronized 637 * SSYNCED - All known local slaves are fully synchronized to masters 638 * 639 * All available masters are always incorporated. All PFSs belonging to a 640 * cluster (master, slave, copy, whatever) always try to synchronize the 641 * total number of known masters in the PFSs root inode. 642 * 643 * A cluster might have access to many slaves, copies, or caches, but we 644 * have a limited number of cluster slots. Any such elements which are 645 * directly mounted from block device(s) will always be incorporated. Note 646 * that SSYNCED only applies to such elements which are directly mounted, 647 * not to any remote slaves, copies, or caches that could be available. These 648 * bits are used to monitor and drive our synchronization threads. 649 * 650 * When asking the question 'is any data accessible at all', then a simple 651 * test against (RDHARD|RDSOFT|RDSLAVE) gives you the answer. If any of 652 * these bits are set the object can be read with certain caveats: 653 * RDHARD - no caveats. RDSOFT - authoritative but might not be synchronized. 654 * and RDSLAVE - not authoritative, has some data but it could be old or 655 * incomplete. 656 * 657 * When both soft and hard mounts are available, data will be read and written 658 * via the soft mount only. But all might be in the cluster because 659 * background synchronization threads still need to do their work. 660 */ 661 #define HAMMER2_CLUSTER_INODE 0x00000001 /* embedded in inode struct */ 662 #define HAMMER2_CLUSTER_UNUSED2 0x00000002 663 #define HAMMER2_CLUSTER_LOCKED 0x00000004 /* cluster lks not recursive */ 664 #define HAMMER2_CLUSTER_WRHARD 0x00000100 /* hard-mount can write */ 665 #define HAMMER2_CLUSTER_RDHARD 0x00000200 /* hard-mount can read */ 666 #define HAMMER2_CLUSTER_UNHARD 0x00000400 /* unsynchronized masters */ 667 #define HAMMER2_CLUSTER_NOHARD 0x00000800 /* no masters visible */ 668 #define HAMMER2_CLUSTER_WRSOFT 0x00001000 /* soft-mount can write */ 669 #define HAMMER2_CLUSTER_RDSOFT 0x00002000 /* soft-mount can read */ 670 #define HAMMER2_CLUSTER_UNSOFT 0x00004000 /* unsynchronized slaves */ 671 #define HAMMER2_CLUSTER_NOSOFT 0x00008000 /* no slaves visible */ 672 #define HAMMER2_CLUSTER_MSYNCED 0x00010000 /* all masters synchronized */ 673 #define HAMMER2_CLUSTER_SSYNCED 0x00020000 /* known slaves synchronized */ 674 675 #define HAMMER2_CLUSTER_ANYDATA ( HAMMER2_CLUSTER_RDHARD | \ 676 HAMMER2_CLUSTER_RDSOFT | \ 677 HAMMER2_CLUSTER_RDSLAVE) 678 #if 0 679 #define HAMMER2_CLUSTER_RDOK ( HAMMER2_CLUSTER_RDHARD | \ 680 HAMMER2_CLUSTER_RDSOFT) 681 682 #define HAMMER2_CLUSTER_WROK ( HAMMER2_CLUSTER_WRHARD | \ 683 HAMMER2_CLUSTER_WRSOFT) 684 #endif 685 #define HAMMER2_CLUSTER_ZFLAGS ( HAMMER2_CLUSTER_WRHARD | \ 686 HAMMER2_CLUSTER_RDHARD | \ 687 HAMMER2_CLUSTER_WRSOFT | \ 688 HAMMER2_CLUSTER_RDSOFT | \ 689 HAMMER2_CLUSTER_MSYNCED | \ 690 HAMMER2_CLUSTER_SSYNCED) 691 692 RB_HEAD(hammer2_inode_tree, hammer2_inode); /* ip->rbnode */ 693 TAILQ_HEAD(inoq_head, hammer2_inode); /* ip->entry */ 694 TAILQ_HEAD(depq_head, hammer2_depend); /* depend->entry */ 695 TAILQ_HEAD(recq_head, hammer2_inode); /* ip->recq_entry */ 696 697 struct hammer2_depend { 698 TAILQ_ENTRY(hammer2_depend) entry; 699 struct inoq_head sideq; 700 long count; 701 int pass2; 702 int unused01; 703 }; 704 705 typedef struct hammer2_depend hammer2_depend_t; 706 707 /* 708 * A hammer2 inode. 709 * 710 * NOTE: The inode-embedded cluster is never used directly for I/O (since 711 * it may be shared). Instead it will be replicated-in and synchronized 712 * back out if changed. 713 */ 714 struct hammer2_inode { 715 RB_ENTRY(hammer2_inode) rbnode; /* inumber lookup (HL) */ 716 TAILQ_ENTRY(hammer2_inode) entry; /* SYNCQ/SIDEQ */ 717 TAILQ_ENTRY(hammer2_inode) recq_entry; /* makefs */ 718 hammer2_depend_t *depend; /* non-NULL if SIDEQ */ 719 hammer2_depend_t depend_static; /* (in-place allocation) */ 720 hammer2_mtx_t lock; /* inode lock */ 721 hammer2_mtx_t truncate_lock; /* prevent truncates */ 722 struct hammer2_pfs *pmp; /* PFS mount */ 723 struct m_vnode *vp; 724 hammer2_spin_t cluster_spin; /* update cluster */ 725 hammer2_cluster_t cluster; 726 //struct lockf advlock; 727 u_int flags; 728 u_int refs; /* +vpref, +flushref */ 729 int ihash; /* xop worker distribution */ 730 uint8_t comp_heuristic; 731 hammer2_inode_meta_t meta; /* copy of meta-data */ 732 hammer2_off_t osize; 733 }; 734 735 typedef struct hammer2_inode hammer2_inode_t; 736 737 /* 738 * MODIFIED - Inode is in a modified state, ip->meta may have changes. 739 * RESIZED - Inode truncated (any) or inode extended beyond 740 * EMBEDDED_BYTES. 741 * 742 * SYNCQ - Inode is included in the current filesystem sync. The 743 * DELETING and CREATING flags will be acted upon. 744 * 745 * SIDEQ - Inode has likely been disconnected from the vnode topology 746 * and so is not visible to the vnode-based filesystem syncer 747 * code, but is dirty and must be included in the next 748 * filesystem sync. These inodes are moved to the SYNCQ at 749 * the time the sync occurs. 750 * 751 * Inodes are not placed on this queue simply because they have 752 * become dirty, if a vnode is attached. 753 * 754 * DELETING - Inode is flagged for deletion during the next filesystem 755 * sync. That is, the inode's chain is currently connected 756 * and must be deleting during the current or next fs sync. 757 * 758 * CREATING - Inode is flagged for creation during the next filesystem 759 * sync. That is, the inode's chain topology exists (so 760 * kernel buffer flushes can occur), but is currently 761 * disconnected and must be inserted during the current or 762 * next fs sync. If the DELETING flag is also set, the 763 * topology can be thrown away instead. 764 * 765 * If an inode that is already part of the current filesystem sync is 766 * modified by the frontend, including by buffer flushes, the inode lock 767 * code detects the SYNCQ flag and moves the inode to the head of the 768 * flush-in-progress, then blocks until the flush has gotten past it. 769 */ 770 #define HAMMER2_INODE_MODIFIED 0x0001 771 #define HAMMER2_INODE_UNUSED0002 0x0002 772 #define HAMMER2_INODE_UNUSED0004 0x0004 773 #define HAMMER2_INODE_ONRBTREE 0x0008 774 #define HAMMER2_INODE_RESIZED 0x0010 /* requires inode_chain_sync */ 775 #define HAMMER2_INODE_UNUSED0020 0x0020 776 #define HAMMER2_INODE_ISUNLINKED 0x0040 777 #define HAMMER2_INODE_UNUSED0080 0x0080 778 #define HAMMER2_INODE_SIDEQ 0x0100 /* on side processing queue */ 779 #define HAMMER2_INODE_NOSIDEQ 0x0200 /* disable sideq operation */ 780 #define HAMMER2_INODE_DIRTYDATA 0x0400 /* interlocks inode flush */ 781 #define HAMMER2_INODE_SYNCQ 0x0800 /* sync interlock, sequenced */ 782 #define HAMMER2_INODE_DELETING 0x1000 /* sync interlock, chain topo */ 783 #define HAMMER2_INODE_CREATING 0x2000 /* sync interlock, chain topo */ 784 #define HAMMER2_INODE_SYNCQ_WAKEUP 0x4000 /* sync interlock wakeup */ 785 #define HAMMER2_INODE_SYNCQ_PASS2 0x8000 /* force retry delay */ 786 787 #define HAMMER2_INODE_DIRTY (HAMMER2_INODE_MODIFIED | \ 788 HAMMER2_INODE_DIRTYDATA | \ 789 HAMMER2_INODE_DELETING | \ 790 HAMMER2_INODE_CREATING) 791 792 int hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2); 793 RB_PROTOTYPE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp, 794 hammer2_tid_t); 795 796 /* 797 * Transaction management sub-structure under hammer2_pfs 798 */ 799 struct hammer2_trans { 800 uint32_t flags; 801 uint32_t sync_wait; 802 }; 803 804 typedef struct hammer2_trans hammer2_trans_t; 805 806 #define HAMMER2_TRANS_ISFLUSH 0x80000000 /* flush code */ 807 #define HAMMER2_TRANS_BUFCACHE 0x40000000 /* bio strategy */ 808 #define HAMMER2_TRANS_SIDEQ 0x20000000 /* run sideq */ 809 #define HAMMER2_TRANS_UNUSED10 0x10000000 810 #define HAMMER2_TRANS_WAITING 0x08000000 /* someone waiting */ 811 #define HAMMER2_TRANS_RESCAN 0x04000000 /* rescan sideq */ 812 #define HAMMER2_TRANS_MASK 0x00FFFFFF /* count mask */ 813 814 #define HAMMER2_FREEMAP_HEUR_NRADIX 4 /* pwr 2 PBUFRADIX-LBUFRADIX */ 815 #define HAMMER2_FREEMAP_HEUR_TYPES 8 816 #define HAMMER2_FREEMAP_HEUR_SIZE (HAMMER2_FREEMAP_HEUR_NRADIX * \ 817 HAMMER2_FREEMAP_HEUR_TYPES) 818 819 #define HAMMER2_DEDUP_HEUR_SIZE (65536 * 4) 820 #define HAMMER2_DEDUP_HEUR_MASK (HAMMER2_DEDUP_HEUR_SIZE - 1) 821 822 #define HAMMER2_FLUSH_TOP 0x0001 823 #define HAMMER2_FLUSH_ALL 0x0002 824 #define HAMMER2_FLUSH_INODE_STOP 0x0004 /* stop at sub-inode */ 825 #define HAMMER2_FLUSH_FSSYNC 0x0008 /* part of filesystem sync */ 826 827 828 /* 829 * Hammer2 support thread element. 830 * 831 * Potentially many support threads can hang off of hammer2, primarily 832 * off the hammer2_pfs structure. Typically: 833 * 834 * td x Nodes A synchronization thread for each node. 835 * td x Nodes x workers Worker threads for frontend operations. 836 * td x 1 Bioq thread for logical buffer writes. 837 * 838 * In addition, the synchronization thread(s) associated with the 839 * super-root PFS (spmp) for a node is responsible for automatic bulkfree 840 * and dedup scans. 841 */ 842 struct hammer2_thread { 843 struct hammer2_pfs *pmp; 844 struct hammer2_dev *hmp; 845 hammer2_xop_list_t xopq; 846 thread_t td; 847 uint32_t flags; 848 int clindex; /* cluster element index */ 849 int repidx; 850 char *scratch; /* MAXPHYS */ 851 }; 852 853 typedef struct hammer2_thread hammer2_thread_t; 854 855 #define HAMMER2_THREAD_UNMOUNTING 0x0001 /* unmount request */ 856 #define HAMMER2_THREAD_DEV 0x0002 /* related to dev, not pfs */ 857 #define HAMMER2_THREAD_WAITING 0x0004 /* thread in idle tsleep */ 858 #define HAMMER2_THREAD_REMASTER 0x0008 /* remaster request */ 859 #define HAMMER2_THREAD_STOP 0x0010 /* exit request */ 860 #define HAMMER2_THREAD_FREEZE 0x0020 /* force idle */ 861 #define HAMMER2_THREAD_FROZEN 0x0040 /* thread is frozen */ 862 #define HAMMER2_THREAD_XOPQ 0x0080 /* work pending */ 863 #define HAMMER2_THREAD_STOPPED 0x0100 /* thread has stopped */ 864 #define HAMMER2_THREAD_UNFREEZE 0x0200 865 866 #define HAMMER2_THREAD_WAKEUP_MASK (HAMMER2_THREAD_UNMOUNTING | \ 867 HAMMER2_THREAD_REMASTER | \ 868 HAMMER2_THREAD_STOP | \ 869 HAMMER2_THREAD_FREEZE | \ 870 HAMMER2_THREAD_XOPQ) 871 872 /* 873 * Support structure for dedup heuristic. 874 */ 875 struct hammer2_dedup { 876 hammer2_off_t data_off; 877 uint64_t data_crc; 878 uint32_t ticks; 879 uint32_t saved_error; 880 }; 881 882 typedef struct hammer2_dedup hammer2_dedup_t; 883 884 /* 885 * hammer2_xop - container for VOP/XOP operation (allocated, not on stack). 886 * 887 * This structure is used to distribute a VOP operation across multiple 888 * nodes. It provides a rendezvous for concurrent node execution and 889 * can be detached from the frontend operation to allow the frontend to 890 * return early. 891 * 892 * This structure also sequences operations on up to three inodes. 893 */ 894 typedef void (*hammer2_xop_func_t)(union hammer2_xop *xop, void *scratch, 895 int clindex); 896 897 struct hammer2_xop_desc { 898 hammer2_xop_func_t storage_func; /* local storage function */ 899 hammer2_xop_func_t dmsg_dispatch; /* dmsg dispatch function */ 900 hammer2_xop_func_t dmsg_process; /* dmsg processing function */ 901 const char *id; 902 }; 903 904 typedef struct hammer2_xop_desc hammer2_xop_desc_t; 905 906 struct hammer2_xop_fifo { 907 TAILQ_ENTRY(hammer2_xop_head) entry; 908 hammer2_chain_t **array; 909 int *errors; 910 int ri; 911 int wi; 912 int flags; 913 hammer2_thread_t *thr; 914 }; 915 916 typedef struct hammer2_xop_fifo hammer2_xop_fifo_t; 917 918 #define HAMMER2_XOP_FIFO_RUN 0x0001 919 #define HAMMER2_XOP_FIFO_STALL 0x0002 920 921 struct hammer2_xop_head { 922 hammer2_xop_desc_t *desc; 923 hammer2_tid_t mtid; 924 struct hammer2_inode *ip1; 925 struct hammer2_inode *ip2; 926 struct hammer2_inode *ip3; 927 struct hammer2_inode *ip4; 928 uint64_t run_mask; 929 uint64_t chk_mask; 930 int fifo_size; 931 int flags; 932 int state; 933 int error; 934 hammer2_key_t collect_key; 935 char *name1; 936 size_t name1_len; 937 char *name2; 938 size_t name2_len; 939 hammer2_xop_fifo_t collect[HAMMER2_MAXCLUSTER]; 940 hammer2_cluster_t cluster; /* help collections */ 941 hammer2_io_t *focus_dio; 942 }; 943 944 typedef struct hammer2_xop_head hammer2_xop_head_t; 945 946 #define fifo_mask(xop_head) ((xop_head)->fifo_size - 1) 947 948 struct hammer2_xop_ipcluster { 949 hammer2_xop_head_t head; 950 }; 951 952 struct hammer2_xop_strategy { 953 hammer2_xop_head_t head; 954 hammer2_key_t lbase; 955 int finished; 956 hammer2_mtx_t lock; 957 struct bio *bio; 958 }; 959 960 struct hammer2_xop_readdir { 961 hammer2_xop_head_t head; 962 hammer2_key_t lkey; 963 }; 964 965 struct hammer2_xop_nresolve { 966 hammer2_xop_head_t head; 967 hammer2_key_t lhc; /* if name is NULL used lhc */ 968 }; 969 970 struct hammer2_xop_unlink { 971 hammer2_xop_head_t head; 972 int isdir; 973 int dopermanent; 974 }; 975 976 #define H2DOPERM_PERMANENT 0x01 977 #define H2DOPERM_FORCE 0x02 978 #define H2DOPERM_IGNINO 0x04 979 980 struct hammer2_xop_nrename { 981 hammer2_xop_head_t head; 982 hammer2_tid_t lhc; 983 int ip_key; 984 }; 985 986 struct hammer2_xop_scanlhc { 987 hammer2_xop_head_t head; 988 hammer2_key_t lhc; 989 }; 990 991 struct hammer2_xop_scanall { 992 hammer2_xop_head_t head; 993 hammer2_key_t key_beg; /* inclusive */ 994 hammer2_key_t key_end; /* inclusive */ 995 int resolve_flags; 996 int lookup_flags; 997 }; 998 999 struct hammer2_xop_lookup { 1000 hammer2_xop_head_t head; 1001 hammer2_key_t lhc; 1002 }; 1003 1004 struct hammer2_xop_mkdirent { 1005 hammer2_xop_head_t head; 1006 hammer2_dirent_head_t dirent; 1007 hammer2_key_t lhc; 1008 }; 1009 1010 struct hammer2_xop_create { 1011 hammer2_xop_head_t head; 1012 hammer2_inode_meta_t meta; /* initial metadata */ 1013 hammer2_key_t lhc; 1014 int flags; 1015 }; 1016 1017 struct hammer2_xop_destroy { 1018 hammer2_xop_head_t head; 1019 }; 1020 1021 struct hammer2_xop_fsync { 1022 hammer2_xop_head_t head; 1023 hammer2_inode_meta_t meta; 1024 hammer2_off_t osize; 1025 u_int ipflags; 1026 int clear_directdata; 1027 }; 1028 1029 struct hammer2_xop_unlinkall { 1030 hammer2_xop_head_t head; 1031 hammer2_key_t key_beg; 1032 hammer2_key_t key_end; 1033 }; 1034 1035 struct hammer2_xop_connect { 1036 hammer2_xop_head_t head; 1037 hammer2_key_t lhc; 1038 }; 1039 1040 struct hammer2_xop_flush { 1041 hammer2_xop_head_t head; 1042 }; 1043 1044 typedef struct hammer2_xop_readdir hammer2_xop_readdir_t; 1045 typedef struct hammer2_xop_nresolve hammer2_xop_nresolve_t; 1046 typedef struct hammer2_xop_unlink hammer2_xop_unlink_t; 1047 typedef struct hammer2_xop_nrename hammer2_xop_nrename_t; 1048 typedef struct hammer2_xop_ipcluster hammer2_xop_ipcluster_t; 1049 typedef struct hammer2_xop_strategy hammer2_xop_strategy_t; 1050 typedef struct hammer2_xop_mkdirent hammer2_xop_mkdirent_t; 1051 typedef struct hammer2_xop_create hammer2_xop_create_t; 1052 typedef struct hammer2_xop_destroy hammer2_xop_destroy_t; 1053 typedef struct hammer2_xop_fsync hammer2_xop_fsync_t; 1054 typedef struct hammer2_xop_unlinkall hammer2_xop_unlinkall_t; 1055 typedef struct hammer2_xop_scanlhc hammer2_xop_scanlhc_t; 1056 typedef struct hammer2_xop_scanall hammer2_xop_scanall_t; 1057 typedef struct hammer2_xop_lookup hammer2_xop_lookup_t; 1058 typedef struct hammer2_xop_connect hammer2_xop_connect_t; 1059 typedef struct hammer2_xop_flush hammer2_xop_flush_t; 1060 1061 union hammer2_xop { 1062 hammer2_xop_head_t head; 1063 hammer2_xop_ipcluster_t xop_ipcluster; 1064 hammer2_xop_readdir_t xop_readdir; 1065 hammer2_xop_nresolve_t xop_nresolve; 1066 hammer2_xop_unlink_t xop_unlink; 1067 hammer2_xop_nrename_t xop_nrename; 1068 hammer2_xop_strategy_t xop_strategy; 1069 hammer2_xop_mkdirent_t xop_mkdirent; 1070 hammer2_xop_create_t xop_create; 1071 hammer2_xop_destroy_t xop_destroy; 1072 hammer2_xop_fsync_t xop_fsync; 1073 hammer2_xop_unlinkall_t xop_unlinkall; 1074 hammer2_xop_scanlhc_t xop_scanlhc; 1075 hammer2_xop_scanall_t xop_scanall; 1076 hammer2_xop_lookup_t xop_lookup; 1077 hammer2_xop_flush_t xop_flush; 1078 hammer2_xop_connect_t xop_connect; 1079 }; 1080 1081 typedef union hammer2_xop hammer2_xop_t; 1082 1083 /* 1084 * hammer2_xop_group - Manage XOP support threads. 1085 */ 1086 struct hammer2_xop_group { 1087 hammer2_thread_t thrs[HAMMER2_MAXCLUSTER]; 1088 }; 1089 1090 typedef struct hammer2_xop_group hammer2_xop_group_t; 1091 1092 /* 1093 * flags to hammer2_xop_collect() 1094 */ 1095 #define HAMMER2_XOP_COLLECT_NOWAIT 0x00000001 1096 #define HAMMER2_XOP_COLLECT_WAITALL 0x00000002 1097 1098 /* 1099 * flags to hammer2_xop_alloc() 1100 * 1101 * MODIFYING - This is a modifying transaction, allocate a mtid. 1102 */ 1103 #define HAMMER2_XOP_MODIFYING 0x00000001 1104 #define HAMMER2_XOP_STRATEGY 0x00000002 1105 #define HAMMER2_XOP_INODE_STOP 0x00000004 1106 #define HAMMER2_XOP_VOLHDR 0x00000008 1107 #define HAMMER2_XOP_FSSYNC 0x00000010 1108 1109 /* 1110 * Device vnode management structure 1111 */ 1112 struct hammer2_devvp { 1113 TAILQ_ENTRY(hammer2_devvp) entry; 1114 struct m_vnode *devvp; /* device vnode */ 1115 char *path; /* device vnode path */ 1116 int open; /* 1 if devvp open */ 1117 }; 1118 1119 typedef struct hammer2_devvp hammer2_devvp_t; 1120 1121 TAILQ_HEAD(hammer2_devvp_list, hammer2_devvp); 1122 1123 typedef struct hammer2_devvp_list hammer2_devvp_list_t; 1124 1125 /* 1126 * Volume management structure 1127 */ 1128 struct hammer2_vfsvolume { 1129 hammer2_devvp_t *dev; /* device vnode management */ 1130 int id; /* volume id */ 1131 hammer2_off_t offset; /* offset within volumes */ 1132 hammer2_off_t size; /* volume size */ 1133 }; 1134 1135 typedef struct hammer2_vfsvolume hammer2_vfsvolume_t; 1136 1137 /* 1138 * Global (per partition) management structure, represents a hard block 1139 * device. Typically referenced by hammer2_chain structures when applicable. 1140 * Typically not used for network-managed elements. 1141 * 1142 * Note that a single hammer2_dev can be indirectly tied to multiple system 1143 * mount points. There is no direct relationship. System mounts are 1144 * per-cluster-id, not per-block-device, and a single hard mount might contain 1145 * many PFSs and those PFSs might combine together in various ways to form 1146 * the set of available clusters. 1147 */ 1148 struct hammer2_dev { 1149 struct m_vnode *devvp; /* device vnode for root volume */ 1150 int ronly; /* read-only mount */ 1151 int mount_count; /* number of actively mounted PFSs */ 1152 TAILQ_ENTRY(hammer2_dev) mntentry; /* hammer2_mntlist */ 1153 1154 struct malloc_type *mchain_obj; 1155 struct malloc_type *mio_obj; 1156 struct malloc_type *mmsg; 1157 //kdmsg_iocom_t iocom; /* volume-level dmsg interface */ 1158 hammer2_spin_t io_spin; /* iotree, iolruq access */ 1159 struct hammer2_io_tree iotree; 1160 int iofree_count; 1161 int freemap_relaxed; 1162 hammer2_chain_t vchain; /* anchor chain (topology) */ 1163 hammer2_chain_t fchain; /* anchor chain (freemap) */ 1164 hammer2_spin_t list_spin; 1165 struct hammer2_pfs *spmp; /* super-root pmp for transactions */ 1166 struct lock vollk; /* lockmgr lock */ 1167 struct lock bulklk; /* bulkfree operation lock */ 1168 struct lock bflock; /* bulk-free manual function lock */ 1169 hammer2_off_t heur_freemap[HAMMER2_FREEMAP_HEUR_SIZE]; 1170 hammer2_dedup_t heur_dedup[HAMMER2_DEDUP_HEUR_SIZE]; 1171 int volhdrno; /* last volhdrno written */ 1172 uint32_t hflags; /* HMNT2 flags applicable to device */ 1173 hammer2_off_t free_reserved; /* nominal free reserved */ 1174 hammer2_off_t total_size; /* total size of volumes */ 1175 int nvolumes; /* total number of volumes */ 1176 hammer2_thread_t bfthr; /* bulk-free thread */ 1177 char devrepname[64]; /* for kprintf */ 1178 hammer2_volume_data_t voldata; 1179 hammer2_volume_data_t volsync; /* synchronized voldata */ 1180 1181 hammer2_devvp_list_t devvpl; /* list of device vnodes including *devvp */ 1182 hammer2_vfsvolume_t volumes[HAMMER2_MAX_VOLUMES]; /* list of volumes */ 1183 }; 1184 1185 typedef struct hammer2_dev hammer2_dev_t; 1186 1187 /* 1188 * Per-cluster management structure. This structure will be tied to a 1189 * system mount point if the system is mounting the PFS, but is also used 1190 * to manage clusters encountered during the super-root scan or received 1191 * via LNK_SPANs that might not be mounted. 1192 * 1193 * This structure is also used to represent the super-root that hangs off 1194 * of a hard mount point. The super-root is not really a cluster element. 1195 * In this case the spmp_hmp field will be non-NULL. It's just easier to do 1196 * this than to special case super-root manipulation in the hammer2_chain* 1197 * code as being only hammer2_dev-related. 1198 * 1199 * pfs_mode and pfs_nmasters are rollup fields which critically describes 1200 * how elements of the cluster act on the cluster. pfs_mode is only applicable 1201 * when a PFS is mounted by the system. pfs_nmasters is our best guess as to 1202 * how many masters have been configured for a cluster and is always 1203 * applicable. pfs_types[] is an array with 1:1 correspondance to the 1204 * iroot cluster and describes the PFS types of the nodes making up the 1205 * cluster. 1206 * 1207 * WARNING! Portions of this structure have deferred initialization. In 1208 * particular, if not mounted there will be no wthread. 1209 * umounted network PFSs will also be missing iroot and numerous 1210 * other fields will not be initialized prior to mount. 1211 * 1212 * Synchronization threads are chain-specific and only applicable 1213 * to local hard PFS entries. A hammer2_pfs structure may contain 1214 * more than one when multiple hard PFSs are present on the local 1215 * machine which require synchronization monitoring. Most PFSs 1216 * (such as snapshots) are 1xMASTER PFSs which do not need a 1217 * synchronization thread. 1218 * 1219 * WARNING! The chains making up pfs->iroot's cluster are accounted for in 1220 * hammer2_dev->mount_count when the pfs is associated with a mount 1221 * point. 1222 */ 1223 struct hammer2_pfs { 1224 struct mount *mp; 1225 TAILQ_ENTRY(hammer2_pfs) mntentry; /* hammer2_pfslist */ 1226 uuid_t pfs_clid; 1227 hammer2_dev_t *spmp_hmp; /* only if super-root pmp */ 1228 hammer2_dev_t *force_local; /* only if 'local' mount */ 1229 hammer2_inode_t *iroot; /* PFS root inode */ 1230 uint8_t pfs_types[HAMMER2_MAXCLUSTER]; 1231 char *pfs_names[HAMMER2_MAXCLUSTER]; 1232 hammer2_dev_t *pfs_hmps[HAMMER2_MAXCLUSTER]; 1233 hammer2_blockset_t pfs_iroot_blocksets[HAMMER2_MAXCLUSTER]; 1234 hammer2_trans_t trans; 1235 struct lock lock; /* PFS lock for certain ops */ 1236 //struct netexport export; /* nfs export */ 1237 int unused00; 1238 int ronly; /* read-only mount */ 1239 int hflags; /* pfs-specific mount flags */ 1240 struct malloc_type *minode_obj; 1241 hammer2_spin_t inum_spin; /* inumber lookup */ 1242 struct hammer2_inode_tree inum_tree; /* (not applicable to spmp) */ 1243 long inum_count; /* #of inodes in inum_tree */ 1244 hammer2_spin_t lru_spin; 1245 struct hammer2_chain_list lru_list; /* basis for LRU tests */ 1246 int lru_count; /* #of chains on LRU */ 1247 int flags; 1248 hammer2_tid_t modify_tid; /* modify transaction id */ 1249 hammer2_tid_t inode_tid; /* inode allocator */ 1250 uint8_t pfs_nmasters; /* total masters */ 1251 uint8_t pfs_mode; /* operating mode PFSMODE */ 1252 uint8_t unused01; 1253 uint8_t unused02; 1254 int free_ticks; /* free_* calculations */ 1255 long inmem_inodes; 1256 hammer2_off_t free_reserved; 1257 hammer2_off_t free_nominal; 1258 uint32_t inmem_dirty_chains; 1259 int count_lwinprog; /* logical write in prog */ 1260 hammer2_spin_t list_spin; 1261 struct inoq_head syncq; /* SYNCQ flagged inodes */ 1262 struct depq_head depq; /* SIDEQ flagged inodes */ 1263 long sideq_count; /* total inodes on depq */ 1264 hammer2_thread_t sync_thrs[HAMMER2_MAXCLUSTER]; 1265 uint32_t cluster_flags; /* cached cluster flags */ 1266 int has_xop_threads; 1267 hammer2_spin_t xop_spin; /* xop sequencer */ 1268 hammer2_xop_group_t *xop_groups; 1269 struct recq_head recq; /* makefs */ 1270 }; 1271 1272 typedef struct hammer2_pfs hammer2_pfs_t; 1273 1274 TAILQ_HEAD(hammer2_pfslist, hammer2_pfs); 1275 1276 /* 1277 * pmp->flags 1278 */ 1279 #define HAMMER2_PMPF_SPMP 0x00000001 1280 #define HAMMER2_PMPF_EMERG 0x00000002 /* Emergency delete mode */ 1281 1282 /* 1283 * NOTE: The LRU list contains at least all the chains with refs == 0 1284 * that can be recycled, and may contain additional chains which 1285 * cannot. 1286 */ 1287 #define HAMMER2_LRU_LIMIT 4096 1288 1289 #define HAMMER2_DIRTYCHAIN_WAITING 0x80000000 1290 #define HAMMER2_DIRTYCHAIN_MASK 0x7FFFFFFF 1291 1292 #define HAMMER2_LWINPROG_WAITING 0x80000000 1293 #define HAMMER2_LWINPROG_WAITING0 0x40000000 1294 #define HAMMER2_LWINPROG_MASK 0x3FFFFFFF 1295 1296 /* 1297 * hammer2_cluster_check 1298 */ 1299 #define HAMMER2_CHECK_NULL 0x00000001 1300 1301 /* 1302 * Misc 1303 */ 1304 //#if defined(_KERNEL) || defined(_KERNEL_STRUCTURES) 1305 #define VTOI(vp) ((hammer2_inode_t *)(vp)->v_data) 1306 //#endif 1307 1308 //#if defined(_KERNEL) 1309 1310 #ifdef MALLOC_DECLARE 1311 MALLOC_DECLARE(M_HAMMER2); 1312 #endif 1313 1314 static __inline 1315 hammer2_pfs_t * 1316 MPTOPMP(struct mount *mp) 1317 { 1318 return ((hammer2_pfs_t *)mp->mnt_data); 1319 } 1320 1321 #define HAMMER2_DEDUP_FRAG (HAMMER2_PBUFSIZE / 64) 1322 #define HAMMER2_DEDUP_FRAGRADIX (HAMMER2_PBUFRADIX - 6) 1323 1324 static __inline 1325 uint64_t 1326 hammer2_dedup_mask(hammer2_io_t *dio, hammer2_off_t data_off, u_int bytes) 1327 { 1328 int bbeg; 1329 int bits; 1330 uint64_t mask; 1331 1332 bbeg = (int)((data_off & ~HAMMER2_OFF_MASK_RADIX) - dio->pbase) >> 1333 HAMMER2_DEDUP_FRAGRADIX; 1334 bits = (int)((bytes + (HAMMER2_DEDUP_FRAG - 1)) >> 1335 HAMMER2_DEDUP_FRAGRADIX); 1336 if (bbeg + bits == 64) 1337 mask = (uint64_t)-1; 1338 else 1339 mask = ((uint64_t)1 << (bbeg + bits)) - 1; 1340 1341 mask &= ~(((uint64_t)1 << bbeg) - 1); 1342 1343 return mask; 1344 } 1345 1346 static __inline 1347 int 1348 hammer2_error_to_errno(int error) 1349 { 1350 if (error) { 1351 if (error & HAMMER2_ERROR_EIO) 1352 error = EIO; 1353 else if (error & HAMMER2_ERROR_CHECK) 1354 error = EDOM; 1355 else if (error & HAMMER2_ERROR_ABORTED) 1356 error = EINTR; 1357 else if (error & HAMMER2_ERROR_BADBREF) 1358 error = EIO; 1359 else if (error & HAMMER2_ERROR_ENOSPC) 1360 error = ENOSPC; 1361 else if (error & HAMMER2_ERROR_ENOENT) 1362 error = ENOENT; 1363 else if (error & HAMMER2_ERROR_ENOTEMPTY) 1364 error = ENOTEMPTY; 1365 else if (error & HAMMER2_ERROR_EAGAIN) 1366 error = EAGAIN; 1367 else if (error & HAMMER2_ERROR_ENOTDIR) 1368 error = ENOTDIR; 1369 else if (error & HAMMER2_ERROR_EISDIR) 1370 error = EISDIR; 1371 else if (error & HAMMER2_ERROR_EINPROGRESS) 1372 error = EINPROGRESS; 1373 else if (error & HAMMER2_ERROR_EEXIST) 1374 error = EEXIST; 1375 else if (error & HAMMER2_ERROR_EINVAL) 1376 error = EINVAL; 1377 else if (error & HAMMER2_ERROR_EDEADLK) 1378 error = EDEADLK; 1379 else if (error & HAMMER2_ERROR_ESRCH) 1380 error = ESRCH; 1381 else if (error & HAMMER2_ERROR_ETIMEDOUT) 1382 error = ETIMEDOUT; 1383 else 1384 error = EDOM; 1385 } 1386 return error; 1387 } 1388 1389 static __inline 1390 int 1391 hammer2_errno_to_error(int error) 1392 { 1393 switch(error) { 1394 case 0: 1395 return 0; 1396 case EIO: 1397 return HAMMER2_ERROR_EIO; 1398 case EDOM: 1399 return HAMMER2_ERROR_CHECK; 1400 case EINTR: 1401 return HAMMER2_ERROR_ABORTED; 1402 //case EIO: 1403 // return HAMMER2_ERROR_BADBREF; 1404 case ENOSPC: 1405 return HAMMER2_ERROR_ENOSPC; 1406 case ENOENT: 1407 return HAMMER2_ERROR_ENOENT; 1408 case ENOTEMPTY: 1409 return HAMMER2_ERROR_ENOTEMPTY; 1410 case EAGAIN: 1411 return HAMMER2_ERROR_EAGAIN; 1412 case ENOTDIR: 1413 return HAMMER2_ERROR_ENOTDIR; 1414 case EISDIR: 1415 return HAMMER2_ERROR_EISDIR; 1416 case EINPROGRESS: 1417 return HAMMER2_ERROR_EINPROGRESS; 1418 case EEXIST: 1419 return HAMMER2_ERROR_EEXIST; 1420 case EINVAL: 1421 return HAMMER2_ERROR_EINVAL; 1422 case EDEADLK: 1423 return HAMMER2_ERROR_EDEADLK; 1424 case ESRCH: 1425 return HAMMER2_ERROR_ESRCH; 1426 case ETIMEDOUT: 1427 return HAMMER2_ERROR_ETIMEDOUT; 1428 default: 1429 return HAMMER2_ERROR_EINVAL; 1430 } 1431 } 1432 1433 1434 extern struct vop_ops hammer2_vnode_vops; 1435 extern struct vop_ops hammer2_spec_vops; 1436 extern struct vop_ops hammer2_fifo_vops; 1437 extern struct hammer2_pfslist hammer2_pfslist; 1438 extern struct lock hammer2_mntlk; 1439 1440 extern int hammer2_aux_flags; 1441 extern int hammer2_debug; 1442 extern int hammer2_xop_nthreads; 1443 extern int hammer2_xop_sgroups; 1444 extern int hammer2_xop_xgroups; 1445 extern int hammer2_xop_xbase; 1446 extern int hammer2_xop_mod; 1447 extern long hammer2_debug_inode; 1448 extern int hammer2_cluster_meta_read; 1449 extern int hammer2_cluster_data_read; 1450 extern int hammer2_cluster_write; 1451 extern int hammer2_dedup_enable; 1452 extern int hammer2_always_compress; 1453 extern int hammer2_flush_pipe; 1454 extern int hammer2_dio_count; 1455 extern int hammer2_dio_limit; 1456 extern int hammer2_bulkfree_tps; 1457 extern int hammer2_spread_workers; 1458 extern int hammer2_limit_saved_depth; 1459 extern long hammer2_chain_allocs; 1460 extern long hammer2_limit_saved_chains; 1461 extern long hammer2_limit_dirty_chains; 1462 extern long hammer2_limit_dirty_inodes; 1463 extern long hammer2_count_modified_chains; 1464 extern long hammer2_iod_file_read; 1465 extern long hammer2_iod_meta_read; 1466 extern long hammer2_iod_indr_read; 1467 extern long hammer2_iod_fmap_read; 1468 extern long hammer2_iod_volu_read; 1469 extern long hammer2_iod_file_write; 1470 extern long hammer2_iod_file_wembed; 1471 extern long hammer2_iod_file_wzero; 1472 extern long hammer2_iod_file_wdedup; 1473 extern long hammer2_iod_meta_write; 1474 extern long hammer2_iod_indr_write; 1475 extern long hammer2_iod_fmap_write; 1476 extern long hammer2_iod_volu_write; 1477 1478 extern long hammer2_process_icrc32; 1479 extern long hammer2_process_xxhash64; 1480 1481 extern struct objcache *cache_buffer_read; 1482 extern struct objcache *cache_buffer_write; 1483 extern struct objcache *cache_xops; 1484 1485 /* 1486 * hammer2_subr.c 1487 */ 1488 #define hammer2_icrc32(buf, size) iscsi_crc32((buf), (size)) 1489 #define hammer2_icrc32c(buf, size, crc) iscsi_crc32_ext((buf), (size), (crc)) 1490 1491 int hammer2_signal_check(time_t *timep); 1492 const char *hammer2_error_str(int error); 1493 const char *hammer2_bref_type_str(int btype); 1494 1495 int hammer2_get_dtype(uint8_t type); 1496 int hammer2_get_vtype(uint8_t type); 1497 uint8_t hammer2_get_obj_type(enum vtype vtype); 1498 void hammer2_time_to_timespec(uint64_t xtime, struct timespec *ts); 1499 uint64_t hammer2_timespec_to_time(const struct timespec *ts); 1500 void hammer2_time_to_timeval(uint64_t xtime, struct timeval *tv); 1501 uint32_t hammer2_to_unix_xid(const uuid_t *uuid); 1502 void hammer2_guid_to_uuid(uuid_t *uuid, uint32_t guid); 1503 1504 hammer2_key_t hammer2_dirhash(const char *aname, size_t len); 1505 int hammer2_getradix(size_t bytes); 1506 1507 int hammer2_calc_logical(hammer2_inode_t *ip, hammer2_off_t uoff, 1508 hammer2_key_t *lbasep, hammer2_key_t *leofp); 1509 int hammer2_calc_physical(hammer2_inode_t *ip, hammer2_key_t lbase); 1510 void hammer2_update_time(uint64_t *timep, bool is_mtime); 1511 void hammer2_adjreadcounter(int btype, size_t bytes); 1512 void hammer2_adjwritecounter(int btype, size_t bytes); 1513 1514 /* 1515 * hammer2_inode.c 1516 */ 1517 struct m_vnode *hammer2_igetv(hammer2_inode_t *ip, int *errorp); 1518 hammer2_inode_t *hammer2_inode_lookup(hammer2_pfs_t *pmp, 1519 hammer2_tid_t inum); 1520 hammer2_inode_t *hammer2_inode_get(hammer2_pfs_t *pmp, 1521 hammer2_xop_head_t *xop, hammer2_tid_t inum, int idx); 1522 void hammer2_inode_ref(hammer2_inode_t *ip); 1523 void hammer2_inode_drop(hammer2_inode_t *ip); 1524 void hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_cluster_t *cluster); 1525 void hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster, 1526 int idx); 1527 hammer2_key_t hammer2_inode_data_count(const hammer2_inode_t *ip); 1528 hammer2_key_t hammer2_inode_inode_count(const hammer2_inode_t *ip); 1529 void hammer2_inode_modify(hammer2_inode_t *ip); 1530 void hammer2_inode_delayed_sideq(hammer2_inode_t *ip); 1531 void hammer2_inode_lock(hammer2_inode_t *ip, int how); 1532 void hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2, 1533 hammer2_inode_t *ip3, hammer2_inode_t *ip4); 1534 void hammer2_inode_unlock(hammer2_inode_t *ip); 1535 void hammer2_inode_depend(hammer2_inode_t *ip1, hammer2_inode_t *ip2); 1536 hammer2_chain_t *hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how); 1537 hammer2_chain_t *hammer2_inode_chain_and_parent(hammer2_inode_t *ip, 1538 int clindex, hammer2_chain_t **parentp, int how); 1539 hammer2_mtx_state_t hammer2_inode_lock_temp_release(hammer2_inode_t *ip); 1540 void hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, 1541 hammer2_mtx_state_t ostate); 1542 int hammer2_inode_lock_upgrade(hammer2_inode_t *ip); 1543 void hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int); 1544 1545 hammer2_inode_t *hammer2_inode_create_normal(hammer2_inode_t *pip, 1546 struct vattr *vap, struct ucred *cred, 1547 hammer2_key_t inum, int *errorp); 1548 hammer2_inode_t *hammer2_inode_create_pfs(hammer2_pfs_t *spmp, 1549 const char *name, size_t name_len, 1550 int *errorp); 1551 int hammer2_inode_chain_ins(hammer2_inode_t *ip); 1552 int hammer2_inode_chain_des(hammer2_inode_t *ip); 1553 int hammer2_inode_chain_sync(hammer2_inode_t *ip); 1554 int hammer2_inode_chain_flush(hammer2_inode_t *ip, int flags); 1555 int hammer2_inode_unlink_finisher(hammer2_inode_t *ip, struct m_vnode **vpp); 1556 void hammer2_inode_vprecycle(struct m_vnode *vp); 1557 int hammer2_dirent_create(hammer2_inode_t *dip, const char *name, 1558 size_t name_len, hammer2_key_t inum, uint8_t type); 1559 1560 hammer2_key_t hammer2_pfs_inode_count(hammer2_pfs_t *pmp); 1561 int vflush(struct mount *mp, int rootrefs, int flags); 1562 1563 /* 1564 * hammer2_chain.c 1565 */ 1566 hammer2_chain_t *hammer2_chain_alloc(hammer2_dev_t *hmp, 1567 hammer2_pfs_t *pmp, 1568 hammer2_blockref_t *bref); 1569 void hammer2_chain_init(hammer2_chain_t *chain); 1570 void hammer2_chain_ref(hammer2_chain_t *chain); 1571 void hammer2_chain_ref_hold(hammer2_chain_t *chain); 1572 void hammer2_chain_drop(hammer2_chain_t *chain); 1573 void hammer2_chain_drop_unhold(hammer2_chain_t *chain); 1574 void hammer2_chain_unhold(hammer2_chain_t *chain); 1575 void hammer2_chain_rehold(hammer2_chain_t *chain); 1576 int hammer2_chain_lock(hammer2_chain_t *chain, int how); 1577 //void hammer2_chain_lock_unhold(hammer2_chain_t *chain, int how); 1578 void hammer2_chain_load_data(hammer2_chain_t *chain); 1579 1580 int hammer2_chain_inode_find(hammer2_pfs_t *pmp, hammer2_key_t inum, 1581 int clindex, int flags, 1582 hammer2_chain_t **parentp, 1583 hammer2_chain_t **chainp); 1584 int hammer2_chain_modify(hammer2_chain_t *chain, hammer2_tid_t mtid, 1585 hammer2_off_t dedup_off, int flags); 1586 int hammer2_chain_modify_ip(hammer2_inode_t *ip, hammer2_chain_t *chain, 1587 hammer2_tid_t mtid, int flags); 1588 int hammer2_chain_resize(hammer2_chain_t *chain, 1589 hammer2_tid_t mtid, hammer2_off_t dedup_off, 1590 int nradix, int flags); 1591 void hammer2_chain_unlock(hammer2_chain_t *chain); 1592 //void hammer2_chain_unlock_hold(hammer2_chain_t *chain); 1593 hammer2_chain_t *hammer2_chain_get(hammer2_chain_t *parent, int generation, 1594 hammer2_blockref_t *bref, int how); 1595 hammer2_chain_t *hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags); 1596 void hammer2_chain_lookup_done(hammer2_chain_t *parent); 1597 hammer2_chain_t *hammer2_chain_getparent(hammer2_chain_t *chain, int flags); 1598 hammer2_chain_t *hammer2_chain_repparent(hammer2_chain_t **chainp, int flags); 1599 hammer2_chain_t *hammer2_chain_lookup(hammer2_chain_t **parentp, 1600 hammer2_key_t *key_nextp, 1601 hammer2_key_t key_beg, hammer2_key_t key_end, 1602 int *errorp, int flags); 1603 hammer2_chain_t *hammer2_chain_next(hammer2_chain_t **parentp, 1604 hammer2_chain_t *chain, 1605 hammer2_key_t *key_nextp, 1606 hammer2_key_t key_beg, hammer2_key_t key_end, 1607 int *errorp, int flags); 1608 int hammer2_chain_scan(hammer2_chain_t *parent, 1609 hammer2_chain_t **chainp, 1610 hammer2_blockref_t *bref, 1611 int *firstp, int flags); 1612 1613 int hammer2_chain_create(hammer2_chain_t **parentp, hammer2_chain_t **chainp, 1614 hammer2_dev_t *hmp, hammer2_pfs_t *pmp, 1615 int methods, hammer2_key_t key, int keybits, 1616 int type, size_t bytes, hammer2_tid_t mtid, 1617 hammer2_off_t dedup_off, int flags); 1618 void hammer2_chain_rename(hammer2_chain_t **parentp, 1619 hammer2_chain_t *chain, 1620 hammer2_tid_t mtid, int flags); 1621 int hammer2_chain_delete(hammer2_chain_t *parent, hammer2_chain_t *chain, 1622 hammer2_tid_t mtid, int flags); 1623 int hammer2_chain_indirect_maintenance(hammer2_chain_t *parent, 1624 hammer2_chain_t *chain); 1625 void hammer2_chain_setflush(hammer2_chain_t *chain); 1626 void hammer2_chain_countbrefs(hammer2_chain_t *chain, 1627 hammer2_blockref_t *base, int count); 1628 hammer2_chain_t *hammer2_chain_bulksnap(hammer2_dev_t *hmp); 1629 void hammer2_chain_bulkdrop(hammer2_chain_t *copy); 1630 1631 void hammer2_chain_setcheck(hammer2_chain_t *chain, void *bdata); 1632 int hammer2_chain_testcheck(hammer2_chain_t *chain, void *bdata); 1633 int hammer2_chain_dirent_test(hammer2_chain_t *chain, const char *name, 1634 size_t name_len); 1635 1636 void hammer2_base_delete(hammer2_chain_t *parent, 1637 hammer2_blockref_t *base, int count, 1638 hammer2_chain_t *chain, 1639 hammer2_blockref_t *obref); 1640 void hammer2_base_insert(hammer2_chain_t *parent, 1641 hammer2_blockref_t *base, int count, 1642 hammer2_chain_t *chain, 1643 hammer2_blockref_t *elm); 1644 void hammer2_dump_chain(hammer2_chain_t *chain, int tab, int bi, int *countp, 1645 char pfx, u_int flags); 1646 void hammer2_dump_chains(hammer2_dev_t *hmp, char vpfx, char fpfx); 1647 1648 /* 1649 * hammer2_flush.c 1650 */ 1651 void hammer2_trans_manage_init(hammer2_pfs_t *pmp); 1652 int hammer2_flush(hammer2_chain_t *chain, int istop); 1653 void hammer2_trans_init(hammer2_pfs_t *pmp, uint32_t flags); 1654 void hammer2_trans_setflags(hammer2_pfs_t *pmp, uint32_t flags); 1655 void hammer2_trans_clearflags(hammer2_pfs_t *pmp, uint32_t flags); 1656 hammer2_tid_t hammer2_trans_sub(hammer2_pfs_t *pmp); 1657 void hammer2_trans_done(hammer2_pfs_t *pmp, uint32_t flags); 1658 hammer2_tid_t hammer2_trans_newinum(hammer2_pfs_t *pmp); 1659 void hammer2_trans_assert_strategy(hammer2_pfs_t *pmp); 1660 1661 /* 1662 * hammer2_ioctl.c 1663 */ 1664 int hammer2_ioctl(hammer2_inode_t *ip, u_long com, void *data, 1665 int fflag, struct ucred *cred); 1666 int hammer2_ioctl_version_get(hammer2_inode_t *ip, void *data); 1667 int hammer2_ioctl_pfs_get(hammer2_inode_t *ip, void *data); 1668 int hammer2_ioctl_pfs_lookup(hammer2_inode_t *ip, void *data); 1669 int hammer2_ioctl_pfs_create(hammer2_inode_t *ip, void *data); 1670 int hammer2_ioctl_pfs_delete(hammer2_inode_t *ip, void *data); 1671 int hammer2_ioctl_pfs_snapshot(hammer2_inode_t *ip, void *data); 1672 int hammer2_ioctl_inode_get(hammer2_inode_t *ip, void *data); 1673 int hammer2_ioctl_inode_set(hammer2_inode_t *ip, void *data); 1674 int hammer2_ioctl_emerg_mode(hammer2_inode_t *ip, u_int mode); 1675 int hammer2_ioctl_bulkfree_scan(hammer2_inode_t *ip, void *data); 1676 int hammer2_ioctl_destroy(hammer2_inode_t *ip, void *data); 1677 int hammer2_ioctl_growfs(hammer2_inode_t *ip, void *data, struct ucred *cred); 1678 1679 /* 1680 * hammer2_io.c 1681 */ 1682 void hammer2_io_inval(hammer2_io_t *dio, hammer2_off_t data_off, u_int bytes); 1683 void hammer2_io_cleanup(hammer2_dev_t *hmp, struct hammer2_io_tree *tree); 1684 char *hammer2_io_data(hammer2_io_t *dio, off_t lbase); 1685 void hammer2_io_bkvasync(hammer2_io_t *dio); 1686 void hammer2_io_dedup_set(hammer2_dev_t *hmp, hammer2_blockref_t *bref); 1687 void hammer2_io_dedup_delete(hammer2_dev_t *hmp, uint8_t btype, 1688 hammer2_off_t data_off, u_int bytes); 1689 void hammer2_io_dedup_assert(hammer2_dev_t *hmp, hammer2_off_t data_off, 1690 u_int bytes); 1691 int hammer2_io_new(hammer2_dev_t *hmp, int btype, off_t lbase, int lsize, 1692 hammer2_io_t **diop); 1693 int hammer2_io_newnz(hammer2_dev_t *hmp, int btype, off_t lbase, int lsize, 1694 hammer2_io_t **diop); 1695 int _hammer2_io_bread(hammer2_dev_t *hmp, int btype, off_t lbase, int lsize, 1696 hammer2_io_t **diop HAMMER2_IO_DEBUG_ARGS); 1697 void hammer2_io_setdirty(hammer2_io_t *dio); 1698 1699 hammer2_io_t *_hammer2_io_getblk(hammer2_dev_t *hmp, int btype, off_t lbase, 1700 int lsize, int op HAMMER2_IO_DEBUG_ARGS); 1701 hammer2_io_t *_hammer2_io_getquick(hammer2_dev_t *hmp, off_t lbase, 1702 int lsize HAMMER2_IO_DEBUG_ARGS); 1703 void _hammer2_io_putblk(hammer2_io_t **diop HAMMER2_IO_DEBUG_ARGS); 1704 int _hammer2_io_bwrite(hammer2_io_t **diop HAMMER2_IO_DEBUG_ARGS); 1705 void _hammer2_io_bawrite(hammer2_io_t **diop HAMMER2_IO_DEBUG_ARGS); 1706 void _hammer2_io_bdwrite(hammer2_io_t **diop HAMMER2_IO_DEBUG_ARGS); 1707 void _hammer2_io_brelse(hammer2_io_t **diop HAMMER2_IO_DEBUG_ARGS); 1708 void _hammer2_io_bqrelse(hammer2_io_t **diop HAMMER2_IO_DEBUG_ARGS); 1709 void _hammer2_io_ref(hammer2_io_t *dio HAMMER2_IO_DEBUG_ARGS); 1710 1711 #ifndef HAMMER2_IO_DEBUG 1712 1713 #define hammer2_io_getblk(hmp, btype, lbase, lsize, op) \ 1714 _hammer2_io_getblk((hmp), (btype), (lbase), (lsize), (op)) 1715 #define hammer2_io_getquick(hmp, lbase, lsize) \ 1716 _hammer2_io_getquick((hmp), (lbase), (lsize)) 1717 #define hammer2_io_putblk(diop) \ 1718 _hammer2_io_putblk(diop) 1719 #define hammer2_io_bwrite(diop) \ 1720 _hammer2_io_bwrite((diop)) 1721 #define hammer2_io_bawrite(diop) \ 1722 _hammer2_io_bawrite((diop)) 1723 #define hammer2_io_bdwrite(diop) \ 1724 _hammer2_io_bdwrite((diop)) 1725 #define hammer2_io_brelse(diop) \ 1726 _hammer2_io_brelse((diop)) 1727 #define hammer2_io_bqrelse(diop) \ 1728 _hammer2_io_bqrelse((diop)) 1729 #define hammer2_io_ref(dio) \ 1730 _hammer2_io_ref((dio)) 1731 1732 #define hammer2_io_bread(hmp, btype, lbase, lsize, diop) \ 1733 _hammer2_io_bread((hmp), (btype), (lbase), (lsize), (diop)) 1734 1735 #else 1736 1737 #define hammer2_io_getblk(hmp, btype, lbase, lsize, op) \ 1738 _hammer2_io_getblk((hmp), (btype), (lbase), (lsize), (op), \ 1739 __FILE__, __LINE__) 1740 1741 #define hammer2_io_getquick(hmp, lbase, lsize) \ 1742 _hammer2_io_getquick((hmp), (lbase), (lsize), __FILE__, __LINE__) 1743 1744 #define hammer2_io_putblk(diop) \ 1745 _hammer2_io_putblk(diop, __FILE__, __LINE__) 1746 1747 #define hammer2_io_bwrite(diop) \ 1748 _hammer2_io_bwrite((diop), __FILE__, __LINE__) 1749 #define hammer2_io_bawrite(diop) \ 1750 _hammer2_io_bawrite((diop), __FILE__, __LINE__) 1751 #define hammer2_io_bdwrite(diop) \ 1752 _hammer2_io_bdwrite((diop), __FILE__, __LINE__) 1753 #define hammer2_io_brelse(diop) \ 1754 _hammer2_io_brelse((diop), __FILE__, __LINE__) 1755 #define hammer2_io_bqrelse(diop) \ 1756 _hammer2_io_bqrelse((diop), __FILE__, __LINE__) 1757 #define hammer2_io_ref(dio) \ 1758 _hammer2_io_ref((dio), __FILE__, __LINE__) 1759 1760 #define hammer2_io_bread(hmp, btype, lbase, lsize, diop) \ 1761 _hammer2_io_bread((hmp), (btype), (lbase), (lsize), (diop), \ 1762 __FILE__, __LINE__) 1763 1764 #endif 1765 1766 /* 1767 * hammer2_admin.c 1768 */ 1769 void hammer2_thr_signal(hammer2_thread_t *thr, uint32_t flags); 1770 void hammer2_thr_signal2(hammer2_thread_t *thr, 1771 uint32_t pflags, uint32_t nflags); 1772 void hammer2_thr_wait(hammer2_thread_t *thr, uint32_t flags); 1773 void hammer2_thr_wait_neg(hammer2_thread_t *thr, uint32_t flags); 1774 int hammer2_thr_wait_any(hammer2_thread_t *thr, uint32_t flags, int timo); 1775 void hammer2_thr_create(hammer2_thread_t *thr, 1776 hammer2_pfs_t *pmp, hammer2_dev_t *hmp, 1777 const char *id, int clindex, int repidx, 1778 void (*func)(void *arg)); 1779 void hammer2_thr_delete(hammer2_thread_t *thr); 1780 void hammer2_thr_remaster(hammer2_thread_t *thr); 1781 void hammer2_thr_freeze_async(hammer2_thread_t *thr); 1782 void hammer2_thr_freeze(hammer2_thread_t *thr); 1783 void hammer2_thr_unfreeze(hammer2_thread_t *thr); 1784 int hammer2_thr_break(hammer2_thread_t *thr); 1785 void hammer2_primary_xops_thread(void *arg); 1786 1787 /* 1788 * hammer2_thread.c (XOP API) 1789 */ 1790 void *hammer2_xop_alloc(hammer2_inode_t *ip, int flags); 1791 void hammer2_xop_setname(hammer2_xop_head_t *xop, 1792 const char *name, size_t name_len); 1793 void hammer2_xop_setname2(hammer2_xop_head_t *xop, 1794 const char *name, size_t name_len); 1795 size_t hammer2_xop_setname_inum(hammer2_xop_head_t *xop, hammer2_key_t inum); 1796 void hammer2_xop_setip2(hammer2_xop_head_t *xop, hammer2_inode_t *ip2); 1797 void hammer2_xop_setip3(hammer2_xop_head_t *xop, hammer2_inode_t *ip3); 1798 void hammer2_xop_setip4(hammer2_xop_head_t *xop, hammer2_inode_t *ip4); 1799 void hammer2_xop_reinit(hammer2_xop_head_t *xop); 1800 void hammer2_xop_helper_create(hammer2_pfs_t *pmp); 1801 void hammer2_xop_helper_cleanup(hammer2_pfs_t *pmp); 1802 void hammer2_xop_start(hammer2_xop_head_t *xop, hammer2_xop_desc_t *desc); 1803 void hammer2_xop_start_except(hammer2_xop_head_t *xop, hammer2_xop_desc_t *desc, 1804 int notidx); 1805 int hammer2_xop_collect(hammer2_xop_head_t *xop, int flags); 1806 void hammer2_xop_retire(hammer2_xop_head_t *xop, uint64_t mask); 1807 int hammer2_xop_active(hammer2_xop_head_t *xop); 1808 int hammer2_xop_feed(hammer2_xop_head_t *xop, hammer2_chain_t *chain, 1809 int clindex, int error); 1810 1811 /* 1812 * hammer2_synchro.c 1813 */ 1814 void hammer2_primary_sync_thread(void *arg); 1815 1816 /* 1817 * XOP backends in hammer2_xops.c, primarily for VNOPS. Other XOP backends 1818 * may be integrated into other source files. 1819 */ 1820 void hammer2_xop_ipcluster(hammer2_xop_t *xop, void *scratch, int clindex); 1821 void hammer2_xop_readdir(hammer2_xop_t *xop, void *scratch, int clindex); 1822 void hammer2_xop_nresolve(hammer2_xop_t *xop, void *scratch, int clindex); 1823 void hammer2_xop_unlink(hammer2_xop_t *xop, void *scratch, int clindex); 1824 void hammer2_xop_nrename(hammer2_xop_t *xop, void *scratch, int clindex); 1825 void hammer2_xop_scanlhc(hammer2_xop_t *xop, void *scratch, int clindex); 1826 void hammer2_xop_scanall(hammer2_xop_t *xop, void *scratch, int clindex); 1827 void hammer2_xop_lookup(hammer2_xop_t *xop, void *scratch, int clindex); 1828 void hammer2_xop_delete(hammer2_xop_t *xop, void *scratch, int clindex); 1829 void hammer2_xop_inode_mkdirent(hammer2_xop_t *xop, void *scratch, int clindex); 1830 void hammer2_xop_inode_create(hammer2_xop_t *xop, void *scratch, int clindex); 1831 void hammer2_xop_inode_create_det(hammer2_xop_t *xop, 1832 void *scratch, int clindex); 1833 void hammer2_xop_inode_create_ins(hammer2_xop_t *xop, 1834 void *scratch, int clindex); 1835 void hammer2_xop_inode_destroy(hammer2_xop_t *xop, void *scratch, int clindex); 1836 void hammer2_xop_inode_chain_sync(hammer2_xop_t *xop, void *scratch, 1837 int clindex); 1838 void hammer2_xop_inode_unlinkall(hammer2_xop_t *xop, void *scratch, 1839 int clindex); 1840 void hammer2_xop_inode_connect(hammer2_xop_t *xop, void *scratch, int clindex); 1841 void hammer2_xop_inode_flush(hammer2_xop_t *xop, void *scratch, int clindex); 1842 void hammer2_xop_strategy_read(hammer2_xop_t *xop, void *scratch, int clindex); 1843 void hammer2_xop_strategy_write(hammer2_xop_t *xop, void *scratch, int clindex); 1844 1845 void hammer2_dmsg_ipcluster(hammer2_xop_t *xop, void *scratch, int clindex); 1846 void hammer2_dmsg_readdir(hammer2_xop_t *xop, void *scratch, int clindex); 1847 void hammer2_dmsg_nresolve(hammer2_xop_t *xop, void *scratch, int clindex); 1848 void hammer2_dmsg_unlink(hammer2_xop_t *xop, void *scratch, int clindex); 1849 void hammer2_dmsg_nrename(hammer2_xop_t *xop, void *scratch, int clindex); 1850 void hammer2_dmsg_scanlhc(hammer2_xop_t *xop, void *scratch, int clindex); 1851 void hammer2_dmsg_scanall(hammer2_xop_t *xop, void *scratch, int clindex); 1852 void hammer2_dmsg_lookup(hammer2_xop_t *xop, void *scratch, int clindex); 1853 void hammer2_dmsg_inode_mkdirent(hammer2_xop_t *xop, void *scratch, 1854 int clindex); 1855 void hammer2_dmsg_inode_create(hammer2_xop_t *xop, void *scratch, int clindex); 1856 void hammer2_dmsg_inode_destroy(hammer2_xop_t *xop, void *scratch, int clindex); 1857 void hammer2_dmsg_inode_chain_sync(hammer2_xop_t *xop, void *scratch, 1858 int clindex); 1859 void hammer2_dmsg_inode_unlinkall(hammer2_xop_t *xop, void *scratch, 1860 int clindex); 1861 void hammer2_dmsg_inode_connect(hammer2_xop_t *xop, void *scratch, int clindex); 1862 void hammer2_dmsg_inode_flush(hammer2_xop_t *xop, void *scratch, int clindex); 1863 void hammer2_dmsg_strategy_read(hammer2_xop_t *xop, void *scratch, int clindex); 1864 void hammer2_dmsg_strategy_write(hammer2_xop_t *xop, void *scratch, 1865 int clindex); 1866 1867 void hammer2_rmsg_ipcluster(hammer2_xop_t *xop, void *scratch, int clindex); 1868 void hammer2_rmsg_readdir(hammer2_xop_t *xop, void *scratch, int clindex); 1869 void hammer2_rmsg_nresolve(hammer2_xop_t *xop, void *scratch, int clindex); 1870 void hammer2_rmsg_unlink(hammer2_xop_t *xop, void *scratch, int clindex); 1871 void hammer2_rmsg_nrename(hammer2_xop_t *xop, void *scratch, int clindex); 1872 void hammer2_rmsg_scanlhc(hammer2_xop_t *xop, void *scratch, int clindex); 1873 void hammer2_rmsg_scanall(hammer2_xop_t *xop, void *scratch, int clindex); 1874 void hammer2_rmsg_lookup(hammer2_xop_t *xop, void *scratch, int clindex); 1875 void hammer2_rmsg_inode_mkdirent(hammer2_xop_t *xop, void *scratch, 1876 int clindex); 1877 void hammer2_rmsg_inode_create(hammer2_xop_t *xop, void *scratch, int clindex); 1878 void hammer2_rmsg_inode_destroy(hammer2_xop_t *xop, void *scratch, int clindex); 1879 void hammer2_rmsg_inode_chain_sync(hammer2_xop_t *xop, void *scratch, 1880 int clindex); 1881 void hammer2_rmsg_inode_unlinkall(hammer2_xop_t *xop, void *scratch, 1882 int clindex); 1883 void hammer2_rmsg_inode_connect(hammer2_xop_t *xop, void *scratch, int clindex); 1884 void hammer2_rmsg_inode_flush(hammer2_xop_t *xop, void *scratch, int clindex); 1885 void hammer2_rmsg_strategy_read(hammer2_xop_t *xop, void *scratch, int clindex); 1886 void hammer2_rmsg_strategy_write(hammer2_xop_t *xop, void *scratch, 1887 int clindex); 1888 1889 extern hammer2_xop_desc_t hammer2_ipcluster_desc; 1890 extern hammer2_xop_desc_t hammer2_readdir_desc; 1891 extern hammer2_xop_desc_t hammer2_nresolve_desc; 1892 extern hammer2_xop_desc_t hammer2_unlink_desc; 1893 extern hammer2_xop_desc_t hammer2_nrename_desc; 1894 extern hammer2_xop_desc_t hammer2_scanlhc_desc; 1895 extern hammer2_xop_desc_t hammer2_scanall_desc; 1896 extern hammer2_xop_desc_t hammer2_lookup_desc; 1897 extern hammer2_xop_desc_t hammer2_delete_desc; 1898 extern hammer2_xop_desc_t hammer2_inode_mkdirent_desc; 1899 extern hammer2_xop_desc_t hammer2_inode_create_desc; 1900 extern hammer2_xop_desc_t hammer2_inode_create_det_desc; 1901 extern hammer2_xop_desc_t hammer2_inode_create_ins_desc; 1902 extern hammer2_xop_desc_t hammer2_inode_destroy_desc; 1903 extern hammer2_xop_desc_t hammer2_inode_chain_sync_desc; 1904 extern hammer2_xop_desc_t hammer2_inode_unlinkall_desc; 1905 extern hammer2_xop_desc_t hammer2_inode_connect_desc; 1906 extern hammer2_xop_desc_t hammer2_inode_flush_desc; 1907 extern hammer2_xop_desc_t hammer2_strategy_read_desc; 1908 extern hammer2_xop_desc_t hammer2_strategy_write_desc; 1909 1910 /* 1911 * hammer2_msgops.c 1912 */ 1913 /* 1914 int hammer2_msg_dbg_rcvmsg(kdmsg_msg_t *msg); 1915 int hammer2_msg_adhoc_input(kdmsg_msg_t *msg); 1916 */ 1917 1918 /* 1919 * hammer2_vfsops.c 1920 */ 1921 int hammer2_vfs_sync(struct mount *mp, int waitflags); 1922 int hammer2_vfs_sync_pmp(hammer2_pfs_t *pmp, int waitfor); 1923 int hammer2_vfs_enospace(hammer2_inode_t *ip, off_t bytes, struct ucred *cred); 1924 1925 hammer2_pfs_t *hammer2_pfsalloc(hammer2_chain_t *chain, 1926 const hammer2_inode_data_t *ripdata, 1927 hammer2_dev_t *force_local); 1928 void hammer2_pfsdealloc(hammer2_pfs_t *pmp, int clindex, int destroying); 1929 int hammer2_vfs_vget(struct mount *mp, struct m_vnode *dvp, 1930 ino_t ino, struct m_vnode **vpp); 1931 int hammer2_vfs_root(struct mount *mp, struct m_vnode **vpp); 1932 1933 void hammer2_lwinprog_ref(hammer2_pfs_t *pmp); 1934 void hammer2_lwinprog_drop(hammer2_pfs_t *pmp); 1935 void hammer2_lwinprog_wait(hammer2_pfs_t *pmp, int pipe); 1936 1937 void hammer2_pfs_memory_wait(hammer2_pfs_t *pmp); 1938 void hammer2_pfs_memory_inc(hammer2_pfs_t *pmp); 1939 void hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp, int count); 1940 1941 void hammer2_voldata_lock(hammer2_dev_t *hmp); 1942 void hammer2_voldata_unlock(hammer2_dev_t *hmp); 1943 void hammer2_voldata_modify(hammer2_dev_t *hmp); 1944 1945 int hammer2_vfs_init(void); 1946 int hammer2_vfs_uninit(void); 1947 1948 int hammer2_vfs_mount(struct m_vnode *makefs_devvp, struct mount *mp, 1949 const char *label, const struct hammer2_mount_info *mi); 1950 int hammer2_vfs_unmount(struct mount *mp, int mntflags); 1951 1952 /* 1953 * hammer2_freemap.c 1954 */ 1955 int hammer2_freemap_alloc(hammer2_chain_t *chain, size_t bytes); 1956 void hammer2_freemap_adjust(hammer2_dev_t *hmp, 1957 hammer2_blockref_t *bref, int how); 1958 1959 /* 1960 * hammer2_cluster.c 1961 */ 1962 uint8_t hammer2_cluster_type(hammer2_cluster_t *cluster); 1963 void hammer2_cluster_bref(hammer2_cluster_t *cluster, hammer2_blockref_t *bref); 1964 void hammer2_cluster_ref(hammer2_cluster_t *cluster); 1965 void hammer2_cluster_drop(hammer2_cluster_t *cluster); 1966 void hammer2_cluster_unhold(hammer2_cluster_t *cluster); 1967 void hammer2_cluster_rehold(hammer2_cluster_t *cluster); 1968 void hammer2_cluster_lock(hammer2_cluster_t *cluster, int how); 1969 int hammer2_cluster_check(hammer2_cluster_t *cluster, hammer2_key_t lokey, 1970 int flags); 1971 void hammer2_cluster_unlock(hammer2_cluster_t *cluster); 1972 1973 void hammer2_bulkfree_init(hammer2_dev_t *hmp); 1974 void hammer2_bulkfree_uninit(hammer2_dev_t *hmp); 1975 int hammer2_bulkfree_pass(hammer2_dev_t *hmp, hammer2_chain_t *vchain, 1976 struct hammer2_ioc_bulkfree *bfi); 1977 void hammer2_dummy_xop_from_chain(hammer2_xop_head_t *xop, 1978 hammer2_chain_t *chain); 1979 1980 /* 1981 * hammer2_iocom.c 1982 */ 1983 /* 1984 void hammer2_iocom_init(hammer2_dev_t *hmp); 1985 void hammer2_iocom_uninit(hammer2_dev_t *hmp); 1986 void hammer2_cluster_reconnect(hammer2_dev_t *hmp, struct file *fp); 1987 */ 1988 void hammer2_volconf_update(hammer2_dev_t *hmp, int index); 1989 1990 /* 1991 * hammer2_strategy.c 1992 */ 1993 int hammer2_vop_strategy(struct vop_strategy_args *ap); 1994 int hammer2_vop_bmap(struct vop_bmap_args *ap); 1995 void hammer2_bioq_sync(hammer2_pfs_t *pmp); 1996 void hammer2_dedup_record(hammer2_chain_t *chain, hammer2_io_t *dio, 1997 const char *data); 1998 void hammer2_dedup_clear(hammer2_dev_t *hmp); 1999 2000 /* 2001 * hammer2_ondisk.c 2002 */ 2003 int hammer2_open_devvp(const hammer2_devvp_list_t *devvpl, int ronly); 2004 int hammer2_close_devvp(const hammer2_devvp_list_t *devvpl, int ronly); 2005 int hammer2_init_devvp(struct m_vnode *devvp, hammer2_devvp_list_t *devvpl); 2006 void hammer2_cleanup_devvp(hammer2_devvp_list_t *devvpl); 2007 int hammer2_init_vfsvolumes(struct mount *mp, 2008 const hammer2_devvp_list_t *devvpl, 2009 hammer2_vfsvolume_t *volumes, 2010 hammer2_volume_data_t *rootvoldata, 2011 int *rootvolzone, 2012 struct m_vnode **rootvoldevvp); 2013 hammer2_vfsvolume_t *hammer2_get_volume_from_hmp(hammer2_dev_t *hmp, 2014 hammer2_off_t offset); 2015 2016 /* 2017 * hammer2_vnops.c 2018 */ 2019 int hammer2_reclaim(struct m_vnode *vp); 2020 int hammer2_readdir(struct m_vnode *vp, void *buf, size_t size, off_t *offsetp, 2021 int *ndirentp, int *eofflagp); 2022 int hammer2_readlink(struct m_vnode *vp, void *buf, size_t size); 2023 int hammer2_read(struct m_vnode *vp, void *buf, size_t size, off_t offset); 2024 int hammer2_write(struct m_vnode *vp, void *buf, size_t size, off_t offset); 2025 int hammer2_nresolve(struct m_vnode *dvp, struct m_vnode **vpp, char *name, int nlen); 2026 int hammer2_nmkdir(struct m_vnode *dvp, struct m_vnode **vpp, char *name, int nlen, 2027 mode_t mode); 2028 int hammer2_nlink(struct m_vnode *dvp, struct m_vnode *vp, char *name, int nlen); 2029 int hammer2_ncreate(struct m_vnode *dvp, struct m_vnode **vpp, char *name, int nlen, 2030 mode_t mode); 2031 int hammer2_nmknod(struct m_vnode *dvp, struct m_vnode **vpp, char *name, int nlen, 2032 int type, mode_t mode); 2033 int hammer2_nsymlink(struct m_vnode *dvp, struct m_vnode **vpp, char *name, int nlen, 2034 char *target, mode_t mode); 2035 2036 /* 2037 * hammer2_buf.c 2038 */ 2039 struct m_buf *getblkx(struct m_vnode *vp, off_t loffset, int size, int blkflags, 2040 int slptimeo); 2041 int breadx(struct m_vnode *vp, off_t loffset, int size, struct m_buf **bpp); 2042 int bread_kvabio(struct m_vnode *vp, off_t loffset, int size, struct m_buf **bpp); 2043 void bqrelse(struct m_buf *bp); 2044 int bawrite(struct m_buf *bp); 2045 int uiomove(caddr_t cp, size_t n, struct uio *uio); 2046 int uiomovebp(struct m_buf *bp, caddr_t cp, size_t n, struct uio *uio); 2047 2048 /* 2049 * More complex inlines 2050 */ 2051 2052 #define hammer2_xop_gdata(xop) _hammer2_xop_gdata((xop), __FILE__, __LINE__) 2053 2054 static __inline 2055 const hammer2_media_data_t * 2056 _hammer2_xop_gdata(hammer2_xop_head_t *xop, const char *file, int line) 2057 { 2058 hammer2_chain_t *focus; 2059 const void *data; 2060 2061 focus = xop->cluster.focus; 2062 if (focus->dio) { 2063 lockmgr(&focus->diolk, LK_SHARED); 2064 if ((xop->focus_dio = focus->dio) != NULL) { 2065 _hammer2_io_ref(xop->focus_dio HAMMER2_IO_DEBUG_CALL); 2066 hammer2_io_bkvasync(xop->focus_dio); 2067 } 2068 data = focus->data; 2069 lockmgr(&focus->diolk, LK_RELEASE); 2070 } else { 2071 data = focus->data; 2072 } 2073 2074 return data; 2075 } 2076 2077 #define hammer2_xop_pdata(xop) _hammer2_xop_pdata((xop), __FILE__, __LINE__) 2078 2079 static __inline 2080 void 2081 _hammer2_xop_pdata(hammer2_xop_head_t *xop, const char *file, int line) 2082 { 2083 if (xop->focus_dio) 2084 _hammer2_io_putblk(&xop->focus_dio HAMMER2_IO_DEBUG_CALL); 2085 } 2086 2087 static __inline 2088 void 2089 hammer2_knote(struct m_vnode *vp, int flags) 2090 { 2091 if (flags) 2092 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags); 2093 } 2094 2095 static __inline 2096 void 2097 hammer2_iocom_init(hammer2_dev_t *hmp) 2098 { 2099 } 2100 2101 static __inline 2102 void 2103 hammer2_iocom_uninit(hammer2_dev_t *hmp) 2104 { 2105 } 2106 2107 //#endif /* !_KERNEL */ 2108 #endif /* !_VFS_HAMMER2_HAMMER2_H_ */ 2109