1 /* 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2022 Tomohiro Kusumi <tkusumi@netbsd.org> 5 * Copyright (c) 2011-2022 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Matthew Dillon <dillon@dragonflybsd.org> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 /* 38 * This subsystem implements most of the core support functions for 39 * the hammer2_chain structure. 40 * 41 * Chains are the in-memory version on media objects (volume header, inodes, 42 * indirect blocks, data blocks, etc). Chains represent a portion of the 43 * HAMMER2 topology. 44 * 45 * Chains are no-longer delete-duplicated. Instead, the original in-memory 46 * chain will be moved along with its block reference (e.g. for things like 47 * renames, hardlink operations, modifications, etc), and will be indexed 48 * on a secondary list for flush handling instead of propagating a flag 49 * upward to the root. 50 * 51 * Concurrent front-end operations can still run against backend flushes 52 * as long as they do not cross the current flush boundary. An operation 53 * running above the current flush (in areas not yet flushed) can become 54 * part of the current flush while ano peration running below the current 55 * flush can become part of the next flush. 56 */ 57 /* 58 #include <sys/cdefs.h> 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/types.h> 62 #include <sys/lock.h> 63 #include <sys/buf.h> 64 65 #include <crypto/sha2/sha2.h> 66 */ 67 68 #include "hammer2.h" 69 70 static hammer2_chain_t *hammer2_chain_create_indirect( 71 hammer2_chain_t *parent, 72 hammer2_key_t key, int keybits, 73 hammer2_tid_t mtid, int for_type, int *errorp); 74 static int hammer2_chain_delete_obref(hammer2_chain_t *parent, 75 hammer2_chain_t *chain, 76 hammer2_tid_t mtid, int flags, 77 hammer2_blockref_t *obref); 78 static hammer2_chain_t *hammer2_combined_find( 79 hammer2_chain_t *parent, 80 hammer2_blockref_t *base, int count, 81 hammer2_key_t *key_nextp, 82 hammer2_key_t key_beg, hammer2_key_t key_end, 83 hammer2_blockref_t **brefp); 84 static hammer2_chain_t *hammer2_chain_lastdrop(hammer2_chain_t *chain, 85 int depth); 86 /* 87 * There are many degenerate situations where an extreme rate of console 88 * output can occur from warnings and errors. Make sure this output does 89 * not impede operations. 90 */ 91 /* 92 static struct krate krate_h2chk = { .freq = 5 }; 93 static struct krate krate_h2me = { .freq = 1 }; 94 static struct krate krate_h2em = { .freq = 1 }; 95 */ 96 97 /* 98 * Basic RBTree for chains (core.rbtree). 99 */ 100 RB_GENERATE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp); 101 102 int 103 hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2) 104 { 105 hammer2_key_t c1_beg; 106 hammer2_key_t c1_end; 107 hammer2_key_t c2_beg; 108 hammer2_key_t c2_end; 109 110 /* 111 * Compare chains. Overlaps are not supposed to happen and catch 112 * any software issues early we count overlaps as a match. 113 */ 114 c1_beg = chain1->bref.key; 115 c1_end = c1_beg + ((hammer2_key_t)1 << chain1->bref.keybits) - 1; 116 c2_beg = chain2->bref.key; 117 c2_end = c2_beg + ((hammer2_key_t)1 << chain2->bref.keybits) - 1; 118 119 if (c1_end < c2_beg) /* fully to the left */ 120 return(-1); 121 if (c1_beg > c2_end) /* fully to the right */ 122 return(1); 123 return(0); /* overlap (must not cross edge boundary) */ 124 } 125 126 /* 127 * Assert that a chain has no media data associated with it. 128 */ 129 static __inline void 130 hammer2_chain_assert_no_data(hammer2_chain_t *chain) 131 { 132 KKASSERT(chain->dio == NULL); 133 if (chain->bref.type != HAMMER2_BREF_TYPE_VOLUME && 134 chain->bref.type != HAMMER2_BREF_TYPE_FREEMAP && 135 chain->data) { 136 panic("hammer2_chain_assert_no_data: chain %p still has data", 137 chain); 138 } 139 } 140 141 /* 142 * Make a chain visible to the flusher. The flusher operates using a top-down 143 * recursion based on the ONFLUSH flag. It locates MODIFIED and UPDATE chains, 144 * flushes them, and updates blocks back to the volume root. 145 * 146 * This routine sets the ONFLUSH flag upward from the triggering chain until 147 * it hits an inode root or the volume root. Inode chains serve as inflection 148 * points, requiring the flusher to bridge across trees. Inodes include 149 * regular inodes, PFS roots (pmp->iroot), and the media super root 150 * (spmp->iroot). 151 */ 152 void 153 hammer2_chain_setflush(hammer2_chain_t *chain) 154 { 155 hammer2_chain_t *parent; 156 157 if ((chain->flags & HAMMER2_CHAIN_ONFLUSH) == 0) { 158 hammer2_spin_sh(&chain->core.spin); 159 while ((chain->flags & HAMMER2_CHAIN_ONFLUSH) == 0) { 160 atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH); 161 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) 162 break; 163 if ((parent = chain->parent) == NULL) 164 break; 165 hammer2_spin_sh(&parent->core.spin); 166 hammer2_spin_unsh(&chain->core.spin); 167 chain = parent; 168 } 169 hammer2_spin_unsh(&chain->core.spin); 170 } 171 } 172 173 /* 174 * Allocate a new disconnected chain element representing the specified 175 * bref. chain->refs is set to 1 and the passed bref is copied to 176 * chain->bref. chain->bytes is derived from the bref. 177 * 178 * chain->pmp inherits pmp unless the chain is an inode (other than the 179 * super-root inode). 180 * 181 * NOTE: Returns a referenced but unlocked (because there is no core) chain. 182 */ 183 hammer2_chain_t * 184 hammer2_chain_alloc(hammer2_dev_t *hmp, hammer2_pfs_t *pmp, 185 hammer2_blockref_t *bref) 186 { 187 hammer2_chain_t *chain; 188 u_int bytes; 189 190 /* 191 * Special case - radix of 0 indicates a chain that does not 192 * need a data reference (context is completely embedded in the 193 * bref). 194 */ 195 if ((int)(bref->data_off & HAMMER2_OFF_MASK_RADIX)) 196 bytes = 1U << (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX); 197 else 198 bytes = 0; 199 200 switch(bref->type) { 201 case HAMMER2_BREF_TYPE_INODE: 202 case HAMMER2_BREF_TYPE_INDIRECT: 203 case HAMMER2_BREF_TYPE_DATA: 204 case HAMMER2_BREF_TYPE_DIRENT: 205 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 206 case HAMMER2_BREF_TYPE_FREEMAP_LEAF: 207 case HAMMER2_BREF_TYPE_FREEMAP: 208 case HAMMER2_BREF_TYPE_VOLUME: 209 chain = kmalloc_obj(sizeof(*chain), hmp->mchain, 210 M_WAITOK | M_ZERO); 211 atomic_add_long(&hammer2_chain_allocs, 1); 212 break; 213 case HAMMER2_BREF_TYPE_EMPTY: 214 default: 215 panic("hammer2_chain_alloc: unrecognized blockref type: %d", 216 bref->type); 217 break; 218 } 219 220 /* 221 * Initialize the new chain structure. pmp must be set to NULL for 222 * chains belonging to the super-root topology of a device mount. 223 */ 224 if (pmp == hmp->spmp) 225 chain->pmp = NULL; 226 else 227 chain->pmp = pmp; 228 229 chain->hmp = hmp; 230 chain->bref = *bref; 231 chain->bytes = bytes; 232 chain->refs = 1; 233 chain->flags = HAMMER2_CHAIN_ALLOCATED; 234 235 /* 236 * Set the PFS boundary flag if this chain represents a PFS root. 237 */ 238 if (bref->flags & HAMMER2_BREF_FLAG_PFSROOT) 239 atomic_set_int(&chain->flags, HAMMER2_CHAIN_PFSBOUNDARY); 240 hammer2_chain_init(chain); 241 242 return (chain); 243 } 244 245 /* 246 * A common function to initialize chains including fchain and vchain. 247 */ 248 void 249 hammer2_chain_init(hammer2_chain_t *chain) 250 { 251 RB_INIT(&chain->core.rbtree); /* live chains */ 252 hammer2_mtx_init(&chain->lock, "h2chain"); 253 hammer2_spin_init(&chain->core.spin, "h2chain"); 254 lockinit(&chain->diolk, "chdio", 0, 0); 255 } 256 257 /* 258 * Add a reference to a chain element, preventing its destruction. 259 * Undone via hammer2_chain_drop() 260 * 261 * (can be called with spinlock held) 262 */ 263 void 264 hammer2_chain_ref(hammer2_chain_t *chain) 265 { 266 if (atomic_fetchadd_int(&chain->refs, 1) == 0) { 267 /* NOP */ 268 } 269 } 270 271 /* 272 * Ref a locked chain and force the data to be held across an unlock. 273 * Chain must be currently locked. The user of the chain who desires 274 * to release the hold must call hammer2_chain_lock_unhold() to relock 275 * and unhold the chain, then unlock normally, or may simply call 276 * hammer2_chain_drop_unhold() (which is safer against deadlocks). 277 */ 278 void 279 hammer2_chain_ref_hold(hammer2_chain_t *chain) 280 { 281 atomic_add_int(&chain->lockcnt, 1); 282 hammer2_chain_ref(chain); 283 } 284 285 /* 286 * Insert the chain in the core rbtree. 287 * 288 * Normal insertions are placed in the live rbtree. Insertion of a deleted 289 * chain is a special case used by the flush code that is placed on the 290 * unstaged deleted list to avoid confusing the live view. 291 */ 292 #define HAMMER2_CHAIN_INSERT_SPIN 0x0001 293 #define HAMMER2_CHAIN_INSERT_LIVE 0x0002 294 #define HAMMER2_CHAIN_INSERT_RACE 0x0004 295 296 static 297 int 298 hammer2_chain_insert(hammer2_chain_t *parent, hammer2_chain_t *chain, 299 int flags, int generation) 300 { 301 hammer2_chain_t *xchain __debugvar; 302 int error = 0; 303 304 if (flags & HAMMER2_CHAIN_INSERT_SPIN) 305 hammer2_spin_ex(&parent->core.spin); 306 307 /* 308 * Interlocked by spinlock, check for race 309 */ 310 if ((flags & HAMMER2_CHAIN_INSERT_RACE) && 311 parent->core.generation != generation) { 312 error = HAMMER2_ERROR_EAGAIN; 313 goto failed; 314 } 315 316 /* 317 * Insert chain 318 */ 319 xchain = RB_INSERT(hammer2_chain_tree, &parent->core.rbtree, chain); 320 KASSERT(xchain == NULL, 321 ("hammer2_chain_insert: collision %p %p (key=%016jx)", 322 chain, xchain, chain->bref.key)); 323 atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE); 324 chain->parent = parent; 325 ++parent->core.chain_count; 326 ++parent->core.generation; /* XXX incs for _get() too, XXX */ 327 328 /* 329 * We have to keep track of the effective live-view blockref count 330 * so the create code knows when to push an indirect block. 331 */ 332 if (flags & HAMMER2_CHAIN_INSERT_LIVE) 333 atomic_add_int(&parent->core.live_count, 1); 334 failed: 335 if (flags & HAMMER2_CHAIN_INSERT_SPIN) 336 hammer2_spin_unex(&parent->core.spin); 337 return error; 338 } 339 340 /* 341 * Drop the caller's reference to the chain. When the ref count drops to 342 * zero this function will try to disassociate the chain from its parent and 343 * deallocate it, then recursely drop the parent using the implied ref 344 * from the chain's chain->parent. 345 * 346 * Nobody should own chain's mutex on the 1->0 transition, unless this drop 347 * races an acquisition by another cpu. Therefore we can loop if we are 348 * unable to acquire the mutex, and refs is unlikely to be 1 unless we again 349 * race against another drop. 350 */ 351 void 352 hammer2_chain_drop(hammer2_chain_t *chain) 353 { 354 u_int refs; 355 356 KKASSERT(chain->refs > 0); 357 358 while (chain) { 359 refs = chain->refs; 360 cpu_ccfence(); 361 KKASSERT(refs > 0); 362 363 if (refs == 1) { 364 if (hammer2_mtx_ex_try(&chain->lock) == 0) 365 chain = hammer2_chain_lastdrop(chain, 0); 366 /* retry the same chain, or chain from lastdrop */ 367 } else { 368 if (atomic_cmpset_int(&chain->refs, refs, refs - 1)) 369 break; 370 /* retry the same chain */ 371 } 372 cpu_pause(); 373 } 374 } 375 376 /* 377 * Unhold a held and probably not-locked chain, ensure that the data is 378 * dropped on the 1->0 transition of lockcnt by obtaining an exclusive 379 * lock and then simply unlocking the chain. 380 */ 381 void 382 hammer2_chain_unhold(hammer2_chain_t *chain) 383 { 384 u_int lockcnt; 385 int iter = 0; 386 387 for (;;) { 388 lockcnt = chain->lockcnt; 389 cpu_ccfence(); 390 if (lockcnt > 1) { 391 if (atomic_cmpset_int(&chain->lockcnt, 392 lockcnt, lockcnt - 1)) { 393 break; 394 } 395 } else if (hammer2_mtx_ex_try(&chain->lock) == 0) { 396 hammer2_chain_unlock(chain); 397 break; 398 } else { 399 /* 400 * This situation can easily occur on SMP due to 401 * the gap inbetween the 1->0 transition and the 402 * final unlock. We cannot safely block on the 403 * mutex because lockcnt might go above 1. 404 * 405 * XXX Sleep for one tick if it takes too long. 406 */ 407 if (++iter > 1000) { 408 if (iter > 1000 + hz) { 409 kprintf("hammer2: h2race1 %p\n", chain); 410 iter = 1000; 411 } 412 tsleep(&iter, 0, "h2race1", 1); 413 } 414 cpu_pause(); 415 } 416 } 417 } 418 419 void 420 hammer2_chain_drop_unhold(hammer2_chain_t *chain) 421 { 422 hammer2_chain_unhold(chain); 423 hammer2_chain_drop(chain); 424 } 425 426 void 427 hammer2_chain_rehold(hammer2_chain_t *chain) 428 { 429 hammer2_chain_lock(chain, HAMMER2_RESOLVE_SHARED); 430 atomic_add_int(&chain->lockcnt, 1); 431 hammer2_chain_unlock(chain); 432 } 433 434 /* 435 * Handles the (potential) last drop of chain->refs from 1->0. Called with 436 * the mutex exclusively locked, refs == 1, and lockcnt 0. SMP races are 437 * possible against refs and lockcnt. We must dispose of the mutex on chain. 438 * 439 * This function returns an unlocked chain for recursive drop or NULL. It 440 * can return the same chain if it determines it has raced another ref. 441 * 442 * -- 443 * 444 * When two chains need to be recursively dropped we use the chain we 445 * would otherwise free to placehold the additional chain. It's a bit 446 * convoluted but we can't just recurse without potentially blowing out 447 * the kernel stack. 448 * 449 * The chain cannot be freed if it has any children. 450 * The chain cannot be freed if flagged MODIFIED unless we can dispose of it. 451 * The chain cannot be freed if flagged UPDATE unless we can dispose of it. 452 * Any dedup registration can remain intact. 453 * 454 * The core spinlock is allowed to nest child-to-parent (not parent-to-child). 455 */ 456 static 457 hammer2_chain_t * 458 hammer2_chain_lastdrop(hammer2_chain_t *chain, int depth) 459 { 460 hammer2_dev_t *hmp; 461 hammer2_chain_t *parent; 462 hammer2_chain_t *rdrop; 463 464 /* 465 * We need chain's spinlock to interlock the sub-tree test. 466 * We already have chain's mutex, protecting chain->parent. 467 * 468 * Remember that chain->refs can be in flux. 469 */ 470 hammer2_spin_ex(&chain->core.spin); 471 472 if (chain->parent != NULL) { 473 /* 474 * If the chain has a parent the UPDATE bit prevents scrapping 475 * as the chain is needed to properly flush the parent. Try 476 * to complete the 1->0 transition and return NULL. Retry 477 * (return chain) if we are unable to complete the 1->0 478 * transition, else return NULL (nothing more to do). 479 * 480 * If the chain has a parent the MODIFIED bit prevents 481 * scrapping. 482 */ 483 if (chain->flags & (HAMMER2_CHAIN_UPDATE | 484 HAMMER2_CHAIN_MODIFIED)) { 485 if (atomic_cmpset_int(&chain->refs, 1, 0)) { 486 hammer2_spin_unex(&chain->core.spin); 487 hammer2_chain_assert_no_data(chain); 488 hammer2_mtx_unlock(&chain->lock); 489 chain = NULL; 490 } else { 491 hammer2_spin_unex(&chain->core.spin); 492 hammer2_mtx_unlock(&chain->lock); 493 } 494 return (chain); 495 } 496 /* spinlock still held */ 497 } else if (chain->bref.type == HAMMER2_BREF_TYPE_VOLUME || 498 chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP) { 499 /* 500 * Retain the static vchain and fchain. Clear bits that 501 * are not relevant. Do not clear the MODIFIED bit, 502 * and certainly do not put it on the delayed-flush queue. 503 */ 504 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE); 505 } else { 506 /* 507 * The chain has no parent and can be flagged for destruction. 508 * Since it has no parent, UPDATE can also be cleared. 509 */ 510 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DESTROY); 511 if (chain->flags & HAMMER2_CHAIN_UPDATE) 512 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE); 513 514 /* 515 * If the chain has children we must propagate the DESTROY 516 * flag downward and rip the disconnected topology apart. 517 * This is accomplished by calling hammer2_flush() on the 518 * chain. 519 * 520 * Any dedup is already handled by the underlying DIO, so 521 * we do not have to specifically flush it here. 522 */ 523 if (chain->core.chain_count) { 524 hammer2_spin_unex(&chain->core.spin); 525 hammer2_flush(chain, HAMMER2_FLUSH_TOP | 526 HAMMER2_FLUSH_ALL); 527 hammer2_mtx_unlock(&chain->lock); 528 529 return(chain); /* retry drop */ 530 } 531 532 /* 533 * Otherwise we can scrap the MODIFIED bit if it is set, 534 * and continue along the freeing path. 535 * 536 * Be sure to clean-out any dedup bits. Without a parent 537 * this chain will no longer be visible to the flush code. 538 * Easy check data_off to avoid the volume root. 539 */ 540 if (chain->flags & HAMMER2_CHAIN_MODIFIED) { 541 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED); 542 atomic_add_long(&hammer2_count_modified_chains, -1); 543 if (chain->pmp) 544 hammer2_pfs_memory_wakeup(chain->pmp, -1); 545 } 546 /* spinlock still held */ 547 } 548 549 /* spinlock still held */ 550 551 /* 552 * If any children exist we must leave the chain intact with refs == 0. 553 * They exist because chains are retained below us which have refs or 554 * may require flushing. 555 * 556 * Retry (return chain) if we fail to transition the refs to 0, else 557 * return NULL indication nothing more to do. 558 */ 559 if (chain->core.chain_count) { 560 if (atomic_cmpset_int(&chain->refs, 1, 0)) { 561 hammer2_spin_unex(&chain->core.spin); 562 hammer2_chain_assert_no_data(chain); 563 hammer2_mtx_unlock(&chain->lock); 564 chain = NULL; 565 } else { 566 hammer2_spin_unex(&chain->core.spin); 567 hammer2_mtx_unlock(&chain->lock); 568 } 569 return (chain); 570 } 571 /* spinlock still held */ 572 /* no chains left under us */ 573 574 /* 575 * chain->core has no children left so no accessors can get to our 576 * chain from there. Now we have to lock the parent core to interlock 577 * remaining possible accessors that might bump chain's refs before 578 * we can safely drop chain's refs with intent to free the chain. 579 */ 580 hmp = chain->hmp; 581 rdrop = NULL; 582 583 parent = chain->parent; 584 585 /* 586 * WARNING! chain's spin lock is still held here, and other spinlocks 587 * will be acquired and released in the code below. We 588 * cannot be making fancy procedure calls! 589 */ 590 591 /* 592 * Spinlock the parent and try to drop the last ref on chain. 593 * On success determine if we should dispose of the chain 594 * (remove the chain from its parent, etc). 595 * 596 * (normal core locks are top-down recursive but we define 597 * core spinlocks as bottom-up recursive, so this is safe). 598 */ 599 if (parent) { 600 hammer2_spin_ex(&parent->core.spin); 601 if (atomic_cmpset_int(&chain->refs, 1, 0) == 0) { 602 /* 603 * 1->0 transition failed, retry. 604 */ 605 hammer2_spin_unex(&parent->core.spin); 606 hammer2_spin_unex(&chain->core.spin); 607 hammer2_mtx_unlock(&chain->lock); 608 609 return(chain); 610 } 611 612 /* 613 * 1->0 transition successful, parent spin held to prevent 614 * new lookups, chain spinlock held to protect parent field. 615 * Remove chain from the parent. 616 * 617 * If the chain is being removed from the parent's rbtree but 618 * is not blkmapped, we have to adjust live_count downward. If 619 * it is blkmapped then the blockref is retained in the parent 620 * as is its associated live_count. This case can occur when 621 * a chain added to the topology is unable to flush and is 622 * then later deleted. 623 */ 624 if (chain->flags & HAMMER2_CHAIN_ONRBTREE) { 625 if ((parent->flags & HAMMER2_CHAIN_COUNTEDBREFS) && 626 (chain->flags & HAMMER2_CHAIN_BLKMAPPED) == 0) { 627 atomic_add_int(&parent->core.live_count, -1); 628 } 629 RB_REMOVE(hammer2_chain_tree, 630 &parent->core.rbtree, chain); 631 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE); 632 --parent->core.chain_count; 633 chain->parent = NULL; 634 } 635 636 /* 637 * If our chain was the last chain in the parent's core the 638 * core is now empty and its parent might have to be 639 * re-dropped if it has 0 refs. 640 */ 641 if (parent->core.chain_count == 0) { 642 rdrop = parent; 643 atomic_add_int(&rdrop->refs, 1); 644 /* 645 if (atomic_cmpset_int(&rdrop->refs, 0, 1) == 0) 646 rdrop = NULL; 647 */ 648 } 649 hammer2_spin_unex(&parent->core.spin); 650 parent = NULL; /* safety */ 651 /* FALL THROUGH */ 652 } else { 653 /* 654 * No-parent case. 655 */ 656 if (atomic_cmpset_int(&chain->refs, 1, 0) == 0) { 657 /* 658 * 1->0 transition failed, retry. 659 */ 660 hammer2_spin_unex(&parent->core.spin); 661 hammer2_spin_unex(&chain->core.spin); 662 hammer2_mtx_unlock(&chain->lock); 663 664 return(chain); 665 } 666 } 667 668 /* 669 * Successful 1->0 transition, no parent, no children... no way for 670 * anyone to ref this chain any more. We can clean-up and free it. 671 * 672 * We still have the core spinlock, and core's chain_count is 0. 673 * Any parent spinlock is gone. 674 */ 675 hammer2_spin_unex(&chain->core.spin); 676 hammer2_chain_assert_no_data(chain); 677 hammer2_mtx_unlock(&chain->lock); 678 KKASSERT(RB_EMPTY(&chain->core.rbtree) && 679 chain->core.chain_count == 0); 680 681 /* 682 * All locks are gone, no pointers remain to the chain, finish 683 * freeing it. 684 */ 685 KKASSERT((chain->flags & (HAMMER2_CHAIN_UPDATE | 686 HAMMER2_CHAIN_MODIFIED)) == 0); 687 688 /* 689 * Once chain resources are gone we can use the now dead chain 690 * structure to placehold what might otherwise require a recursive 691 * drop, because we have potentially two things to drop and can only 692 * return one directly. 693 */ 694 if (chain->flags & HAMMER2_CHAIN_ALLOCATED) { 695 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ALLOCATED); 696 chain->hmp = NULL; 697 kfree_obj(chain, hmp->mchain); 698 atomic_add_long(&hammer2_chain_allocs, -1); 699 } 700 701 /* 702 * Possible chaining loop when parent re-drop needed. 703 */ 704 return(rdrop); 705 } 706 707 /* 708 * On last lock release. 709 */ 710 static hammer2_io_t * 711 hammer2_chain_drop_data(hammer2_chain_t *chain) 712 { 713 hammer2_io_t *dio; 714 715 if ((dio = chain->dio) != NULL) { 716 chain->dio = NULL; 717 chain->data = NULL; 718 } else { 719 switch(chain->bref.type) { 720 case HAMMER2_BREF_TYPE_VOLUME: 721 case HAMMER2_BREF_TYPE_FREEMAP: 722 break; 723 default: 724 if (chain->data != NULL) { 725 hammer2_spin_unex(&chain->core.spin); 726 panic("chain data not null: " 727 "chain %p bref %016jx.%02x " 728 "refs %d parent %p dio %p data %p", 729 chain, chain->bref.data_off, 730 chain->bref.type, chain->refs, 731 chain->parent, 732 chain->dio, chain->data); 733 } 734 KKASSERT(chain->data == NULL); 735 break; 736 } 737 } 738 return dio; 739 } 740 741 /* 742 * Lock a referenced chain element, acquiring its data with I/O if necessary, 743 * and specify how you would like the data to be resolved. 744 * 745 * If an I/O or other fatal error occurs, chain->error will be set to non-zero. 746 * 747 * The lock is allowed to recurse, multiple locking ops will aggregate 748 * the requested resolve types. Once data is assigned it will not be 749 * removed until the last unlock. 750 * 751 * HAMMER2_RESOLVE_NEVER - Do not resolve the data element. 752 * (typically used to avoid device/logical buffer 753 * aliasing for data) 754 * 755 * HAMMER2_RESOLVE_MAYBE - Do not resolve data elements for chains in 756 * the INITIAL-create state (indirect blocks only). 757 * 758 * Do not resolve data elements for DATA chains. 759 * (typically used to avoid device/logical buffer 760 * aliasing for data) 761 * 762 * HAMMER2_RESOLVE_ALWAYS- Always resolve the data element. 763 * 764 * HAMMER2_RESOLVE_SHARED- (flag) The chain is locked shared, otherwise 765 * it will be locked exclusive. 766 * 767 * HAMMER2_RESOLVE_NONBLOCK- (flag) The chain is locked non-blocking. If 768 * the lock fails, EAGAIN is returned. 769 * 770 * NOTE: Embedded elements (volume header, inodes) are always resolved 771 * regardless. 772 * 773 * NOTE: Specifying HAMMER2_RESOLVE_ALWAYS on a newly-created non-embedded 774 * element will instantiate and zero its buffer, and flush it on 775 * release. 776 * 777 * NOTE: (data) elements are normally locked RESOLVE_NEVER or RESOLVE_MAYBE 778 * so as not to instantiate a device buffer, which could alias against 779 * a logical file buffer. However, if ALWAYS is specified the 780 * device buffer will be instantiated anyway. 781 * 782 * NOTE: The return value is always 0 unless NONBLOCK is specified, in which 783 * case it can be either 0 or EAGAIN. 784 * 785 * WARNING! This function blocks on I/O if data needs to be fetched. This 786 * blocking can run concurrent with other compatible lock holders 787 * who do not need data returning. The lock is not upgraded to 788 * exclusive during a data fetch, a separate bit is used to 789 * interlock I/O. However, an exclusive lock holder can still count 790 * on being interlocked against an I/O fetch managed by a shared 791 * lock holder. 792 */ 793 int 794 hammer2_chain_lock(hammer2_chain_t *chain, int how) 795 { 796 KKASSERT(chain->refs > 0); 797 798 if (how & HAMMER2_RESOLVE_NONBLOCK) { 799 /* 800 * We still have to bump lockcnt before acquiring the lock, 801 * even for non-blocking operation, because the unlock code 802 * live-loops on lockcnt == 1 when dropping the last lock. 803 * 804 * If the non-blocking operation fails we have to use an 805 * unhold sequence to undo the mess. 806 * 807 * NOTE: LOCKAGAIN must always succeed without blocking, 808 * even if NONBLOCK is specified. 809 */ 810 atomic_add_int(&chain->lockcnt, 1); 811 if (how & HAMMER2_RESOLVE_SHARED) { 812 if (how & HAMMER2_RESOLVE_LOCKAGAIN) { 813 hammer2_mtx_sh_again(&chain->lock); 814 } else { 815 if (hammer2_mtx_sh_try(&chain->lock) != 0) { 816 hammer2_chain_unhold(chain); 817 return EAGAIN; 818 } 819 } 820 } else { 821 if (hammer2_mtx_ex_try(&chain->lock) != 0) { 822 hammer2_chain_unhold(chain); 823 return EAGAIN; 824 } 825 } 826 } else { 827 /* 828 * Get the appropriate lock. If LOCKAGAIN is flagged with 829 * SHARED the caller expects a shared lock to already be 830 * present and we are giving it another ref. This case must 831 * importantly not block if there is a pending exclusive lock 832 * request. 833 */ 834 atomic_add_int(&chain->lockcnt, 1); 835 if (how & HAMMER2_RESOLVE_SHARED) { 836 if (how & HAMMER2_RESOLVE_LOCKAGAIN) { 837 hammer2_mtx_sh_again(&chain->lock); 838 } else { 839 hammer2_mtx_sh(&chain->lock); 840 } 841 } else { 842 hammer2_mtx_ex(&chain->lock); 843 } 844 } 845 846 /* 847 * If we already have a valid data pointer make sure the data is 848 * synchronized to the current cpu, and then no further action is 849 * necessary. 850 */ 851 if (chain->data) { 852 if (chain->dio) 853 hammer2_io_bkvasync(chain->dio); 854 return 0; 855 } 856 857 /* 858 * Do we have to resolve the data? This is generally only 859 * applicable to HAMMER2_BREF_TYPE_DATA which is special-cased. 860 * Other BREF types expects the data to be there. 861 */ 862 switch(how & HAMMER2_RESOLVE_MASK) { 863 case HAMMER2_RESOLVE_NEVER: 864 return 0; 865 case HAMMER2_RESOLVE_MAYBE: 866 if (chain->flags & HAMMER2_CHAIN_INITIAL) 867 return 0; 868 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) 869 return 0; 870 #if 0 871 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) 872 return 0; 873 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) 874 return 0; 875 #endif 876 /* fall through */ 877 case HAMMER2_RESOLVE_ALWAYS: 878 default: 879 break; 880 } 881 882 /* 883 * Caller requires data 884 */ 885 hammer2_chain_load_data(chain); 886 887 return 0; 888 } 889 890 #if 0 891 /* 892 * Lock the chain, retain the hold, and drop the data persistence count. 893 * The data should remain valid because we never transitioned lockcnt 894 * through 0. 895 */ 896 void 897 hammer2_chain_lock_unhold(hammer2_chain_t *chain, int how) 898 { 899 hammer2_chain_lock(chain, how); 900 atomic_add_int(&chain->lockcnt, -1); 901 } 902 903 /* 904 * Downgrade an exclusive chain lock to a shared chain lock. 905 * 906 * NOTE: There is no upgrade equivalent due to the ease of 907 * deadlocks in that direction. 908 */ 909 void 910 hammer2_chain_lock_downgrade(hammer2_chain_t *chain) 911 { 912 hammer2_mtx_downgrade(&chain->lock); 913 } 914 #endif 915 916 /* 917 * Issue I/O and install chain->data. Caller must hold a chain lock, lock 918 * may be of any type. 919 * 920 * Once chain->data is set it cannot be disposed of until all locks are 921 * released. 922 * 923 * Make sure the data is synchronized to the current cpu. 924 */ 925 void 926 hammer2_chain_load_data(hammer2_chain_t *chain) 927 { 928 hammer2_blockref_t *bref; 929 hammer2_dev_t *hmp; 930 hammer2_io_t *dio; 931 char *bdata; 932 int error; 933 934 /* 935 * Degenerate case, data already present, or chain has no media 936 * reference to load. 937 */ 938 KKASSERT(chain->lock.mtx_lock & MTX_MASK); 939 if (chain->data) { 940 if (chain->dio) 941 hammer2_io_bkvasync(chain->dio); 942 return; 943 } 944 if ((chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX) == 0) 945 return; 946 947 hmp = chain->hmp; 948 KKASSERT(hmp != NULL); 949 950 /* 951 * Gain the IOINPROG bit, interlocked block. 952 */ 953 for (;;) { 954 u_int oflags; 955 u_int nflags; 956 957 oflags = chain->flags; 958 cpu_ccfence(); 959 if (oflags & HAMMER2_CHAIN_IOINPROG) { 960 nflags = oflags | HAMMER2_CHAIN_IOSIGNAL; 961 tsleep_interlock(&chain->flags, 0); 962 if (atomic_cmpset_int(&chain->flags, oflags, nflags)) { 963 tsleep(&chain->flags, PINTERLOCKED, 964 "h2iocw", 0); 965 } 966 /* retry */ 967 } else { 968 nflags = oflags | HAMMER2_CHAIN_IOINPROG; 969 if (atomic_cmpset_int(&chain->flags, oflags, nflags)) { 970 break; 971 } 972 /* retry */ 973 } 974 } 975 976 /* 977 * We own CHAIN_IOINPROG 978 * 979 * Degenerate case if we raced another load. 980 */ 981 if (chain->data) { 982 if (chain->dio) 983 hammer2_io_bkvasync(chain->dio); 984 goto done; 985 } 986 987 /* 988 * We must resolve to a device buffer, either by issuing I/O or 989 * by creating a zero-fill element. We do not mark the buffer 990 * dirty when creating a zero-fill element (the hammer2_chain_modify() 991 * API must still be used to do that). 992 */ 993 bref = &chain->bref; 994 995 /* 996 * The getblk() optimization can only be used on newly created 997 * elements if the physical block size matches the request. 998 */ 999 if (chain->flags & HAMMER2_CHAIN_INITIAL) { 1000 error = hammer2_io_new(hmp, bref->type, 1001 bref->data_off, chain->bytes, 1002 &chain->dio); 1003 } else { 1004 error = hammer2_io_bread(hmp, bref->type, 1005 bref->data_off, chain->bytes, 1006 &chain->dio); 1007 hammer2_adjreadcounter(chain->bref.type, chain->bytes); 1008 } 1009 if (error) { 1010 chain->error = HAMMER2_ERROR_EIO; 1011 kprintf("hammer2_chain_load_data: I/O error %016jx: %d\n", 1012 (intmax_t)bref->data_off, error); 1013 hammer2_io_bqrelse(&chain->dio); 1014 goto done; 1015 } 1016 chain->error = 0; 1017 1018 /* 1019 * This isn't perfect and can be ignored on OSs which do not have 1020 * an indication as to whether a buffer is coming from cache or 1021 * if I/O was actually issued for the read. TESTEDGOOD will work 1022 * pretty well without the B_IOISSUED logic because chains are 1023 * cached, but in that situation (without B_IOISSUED) it will not 1024 * detect whether a re-read via I/O is corrupted verses the original 1025 * read. 1026 * 1027 * We can't re-run the CRC on every fresh lock. That would be 1028 * insanely expensive. 1029 * 1030 * If the underlying kernel buffer covers the entire chain we can 1031 * use the B_IOISSUED indication to determine if we have to re-run 1032 * the CRC on chain data for chains that managed to stay cached 1033 * across the kernel disposal of the original buffer. 1034 */ 1035 if ((dio = chain->dio) != NULL && dio->bp) { 1036 //struct m_buf *bp = dio->bp; 1037 1038 if (dio->psize == chain->bytes //&& 1039 /*(bp->b_flags & B_IOISSUED)*/) { 1040 atomic_clear_int(&chain->flags, 1041 HAMMER2_CHAIN_TESTEDGOOD); 1042 //bp->b_flags &= ~B_IOISSUED; 1043 } 1044 } 1045 1046 /* 1047 * NOTE: A locked chain's data cannot be modified without first 1048 * calling hammer2_chain_modify(). 1049 */ 1050 1051 /* 1052 * NOTE: hammer2_io_data() call issues bkvasync() 1053 */ 1054 bdata = hammer2_io_data(chain->dio, chain->bref.data_off); 1055 1056 if (chain->flags & HAMMER2_CHAIN_INITIAL) { 1057 /* 1058 * Clear INITIAL. In this case we used io_new() and the 1059 * buffer has been zero'd and marked dirty. 1060 * 1061 * CHAIN_MODIFIED has not been set yet, and we leave it 1062 * that way for now. Set a temporary CHAIN_NOTTESTED flag 1063 * to prevent hammer2_chain_testcheck() from trying to match 1064 * a check code that has not yet been generated. This bit 1065 * should NOT end up on the actual media. 1066 */ 1067 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL); 1068 atomic_set_int(&chain->flags, HAMMER2_CHAIN_NOTTESTED); 1069 } else if (chain->flags & HAMMER2_CHAIN_MODIFIED) { 1070 /* 1071 * check data not currently synchronized due to 1072 * modification. XXX assumes data stays in the buffer 1073 * cache, which might not be true (need biodep on flush 1074 * to calculate crc? or simple crc?). 1075 */ 1076 } else if ((chain->flags & HAMMER2_CHAIN_TESTEDGOOD) == 0) { 1077 if (hammer2_chain_testcheck(chain, bdata) == 0) { 1078 chain->error = HAMMER2_ERROR_CHECK; 1079 } else { 1080 atomic_set_int(&chain->flags, HAMMER2_CHAIN_TESTEDGOOD); 1081 } 1082 } 1083 1084 /* 1085 * Setup the data pointer by pointing it into the buffer. 1086 * WARNING! Other threads can start using the data the instant we 1087 * set chain->data non-NULL. 1088 */ 1089 switch (bref->type) { 1090 case HAMMER2_BREF_TYPE_VOLUME: 1091 case HAMMER2_BREF_TYPE_FREEMAP: 1092 panic("hammer2_chain_load_data: unresolved volume header"); 1093 break; 1094 case HAMMER2_BREF_TYPE_DIRENT: 1095 KKASSERT(chain->bytes != 0); 1096 /* fall through */ 1097 case HAMMER2_BREF_TYPE_INODE: 1098 case HAMMER2_BREF_TYPE_FREEMAP_LEAF: 1099 case HAMMER2_BREF_TYPE_INDIRECT: 1100 case HAMMER2_BREF_TYPE_DATA: 1101 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 1102 default: 1103 /* 1104 * Point data at the device buffer and leave dio intact. 1105 */ 1106 chain->data = (void *)bdata; 1107 break; 1108 } 1109 1110 /* 1111 * Release HAMMER2_CHAIN_IOINPROG and signal waiters if requested. 1112 */ 1113 done: 1114 for (;;) { 1115 u_int oflags; 1116 u_int nflags; 1117 1118 oflags = chain->flags; 1119 nflags = oflags & ~(HAMMER2_CHAIN_IOINPROG | 1120 HAMMER2_CHAIN_IOSIGNAL); 1121 KKASSERT(oflags & HAMMER2_CHAIN_IOINPROG); 1122 if (atomic_cmpset_int(&chain->flags, oflags, nflags)) { 1123 if (oflags & HAMMER2_CHAIN_IOSIGNAL) 1124 wakeup(&chain->flags); 1125 break; 1126 } 1127 } 1128 } 1129 1130 /* 1131 * Unlock and deref a chain element. 1132 * 1133 * Remember that the presence of children under chain prevent the chain's 1134 * destruction but do not add additional references, so the dio will still 1135 * be dropped. 1136 */ 1137 void 1138 hammer2_chain_unlock(hammer2_chain_t *chain) 1139 { 1140 hammer2_io_t *dio; 1141 u_int lockcnt; 1142 int iter = 0; 1143 1144 /* 1145 * If multiple locks are present (or being attempted) on this 1146 * particular chain we can just unlock, drop refs, and return. 1147 * 1148 * Otherwise fall-through on the 1->0 transition. 1149 */ 1150 for (;;) { 1151 lockcnt = chain->lockcnt; 1152 KKASSERT(lockcnt > 0); 1153 cpu_ccfence(); 1154 if (lockcnt > 1) { 1155 if (atomic_cmpset_int(&chain->lockcnt, 1156 lockcnt, lockcnt - 1)) { 1157 hammer2_mtx_unlock(&chain->lock); 1158 return; 1159 } 1160 } else if (hammer2_mtx_upgrade_try(&chain->lock) == 0) { 1161 /* while holding the mutex exclusively */ 1162 if (atomic_cmpset_int(&chain->lockcnt, 1, 0)) 1163 break; 1164 } else { 1165 /* 1166 * This situation can easily occur on SMP due to 1167 * the gap inbetween the 1->0 transition and the 1168 * final unlock. We cannot safely block on the 1169 * mutex because lockcnt might go above 1. 1170 * 1171 * XXX Sleep for one tick if it takes too long. 1172 */ 1173 if (++iter > 1000) { 1174 if (iter > 1000 + hz) { 1175 kprintf("hammer2: h2race2 %p\n", chain); 1176 iter = 1000; 1177 } 1178 tsleep(&iter, 0, "h2race2", 1); 1179 } 1180 cpu_pause(); 1181 } 1182 /* retry */ 1183 } 1184 1185 /* 1186 * Last unlock / mutex upgraded to exclusive. Drop the data 1187 * reference. 1188 */ 1189 dio = hammer2_chain_drop_data(chain); 1190 if (dio) 1191 hammer2_io_bqrelse(&dio); 1192 hammer2_mtx_unlock(&chain->lock); 1193 } 1194 1195 #if 0 1196 /* 1197 * Unlock and hold chain data intact 1198 */ 1199 void 1200 hammer2_chain_unlock_hold(hammer2_chain_t *chain) 1201 { 1202 atomic_add_int(&chain->lockcnt, 1); 1203 hammer2_chain_unlock(chain); 1204 } 1205 #endif 1206 1207 /* 1208 * Helper to obtain the blockref[] array base and count for a chain. 1209 * 1210 * XXX Not widely used yet, various use cases need to be validated and 1211 * converted to use this function. 1212 */ 1213 static 1214 hammer2_blockref_t * 1215 hammer2_chain_base_and_count(hammer2_chain_t *parent, int *countp) 1216 { 1217 hammer2_blockref_t *base; 1218 int count; 1219 1220 if (parent->flags & HAMMER2_CHAIN_INITIAL) { 1221 base = NULL; 1222 1223 switch(parent->bref.type) { 1224 case HAMMER2_BREF_TYPE_INODE: 1225 count = HAMMER2_SET_COUNT; 1226 break; 1227 case HAMMER2_BREF_TYPE_INDIRECT: 1228 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 1229 count = parent->bytes / sizeof(hammer2_blockref_t); 1230 break; 1231 case HAMMER2_BREF_TYPE_VOLUME: 1232 count = HAMMER2_SET_COUNT; 1233 break; 1234 case HAMMER2_BREF_TYPE_FREEMAP: 1235 count = HAMMER2_SET_COUNT; 1236 break; 1237 default: 1238 panic("hammer2_chain_base_and_count: " 1239 "unrecognized blockref type: %d", 1240 parent->bref.type); 1241 count = 0; 1242 break; 1243 } 1244 } else { 1245 switch(parent->bref.type) { 1246 case HAMMER2_BREF_TYPE_INODE: 1247 base = &parent->data->ipdata.u.blockset.blockref[0]; 1248 count = HAMMER2_SET_COUNT; 1249 break; 1250 case HAMMER2_BREF_TYPE_INDIRECT: 1251 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 1252 base = &parent->data->npdata[0]; 1253 count = parent->bytes / sizeof(hammer2_blockref_t); 1254 break; 1255 case HAMMER2_BREF_TYPE_VOLUME: 1256 base = &parent->data->voldata. 1257 sroot_blockset.blockref[0]; 1258 count = HAMMER2_SET_COUNT; 1259 break; 1260 case HAMMER2_BREF_TYPE_FREEMAP: 1261 base = &parent->data->blkset.blockref[0]; 1262 count = HAMMER2_SET_COUNT; 1263 break; 1264 default: 1265 panic("hammer2_chain_base_and_count: " 1266 "unrecognized blockref type: %d", 1267 parent->bref.type); 1268 base = NULL; 1269 count = 0; 1270 break; 1271 } 1272 } 1273 *countp = count; 1274 1275 return base; 1276 } 1277 1278 /* 1279 * This counts the number of live blockrefs in a block array and 1280 * also calculates the point at which all remaining blockrefs are empty. 1281 * This routine can only be called on a live chain. 1282 * 1283 * Caller holds the chain locked, but possibly with a shared lock. We 1284 * must use an exclusive spinlock to prevent corruption. 1285 * 1286 * NOTE: Flag is not set until after the count is complete, allowing 1287 * callers to test the flag without holding the spinlock. 1288 * 1289 * NOTE: If base is NULL the related chain is still in the INITIAL 1290 * state and there are no blockrefs to count. 1291 * 1292 * NOTE: live_count may already have some counts accumulated due to 1293 * creation and deletion and could even be initially negative. 1294 */ 1295 void 1296 hammer2_chain_countbrefs(hammer2_chain_t *chain, 1297 hammer2_blockref_t *base, int count) 1298 { 1299 hammer2_spin_ex(&chain->core.spin); 1300 if ((chain->flags & HAMMER2_CHAIN_COUNTEDBREFS) == 0) { 1301 if (base) { 1302 while (--count >= 0) { 1303 if (base[count].type != HAMMER2_BREF_TYPE_EMPTY) 1304 break; 1305 } 1306 chain->core.live_zero = count + 1; 1307 while (count >= 0) { 1308 if (base[count].type != HAMMER2_BREF_TYPE_EMPTY) 1309 atomic_add_int(&chain->core.live_count, 1310 1); 1311 --count; 1312 } 1313 } else { 1314 chain->core.live_zero = 0; 1315 } 1316 /* else do not modify live_count */ 1317 atomic_set_int(&chain->flags, HAMMER2_CHAIN_COUNTEDBREFS); 1318 } 1319 hammer2_spin_unex(&chain->core.spin); 1320 } 1321 1322 /* 1323 * Resize the chain's physical storage allocation in-place. This function does 1324 * not usually adjust the data pointer and must be followed by (typically) a 1325 * hammer2_chain_modify() call to copy any old data over and adjust the 1326 * data pointer. 1327 * 1328 * Chains can be resized smaller without reallocating the storage. Resizing 1329 * larger will reallocate the storage. Excess or prior storage is reclaimed 1330 * asynchronously at a later time. 1331 * 1332 * An nradix value of 0 is special-cased to mean that the storage should 1333 * be disassociated, that is the chain is being resized to 0 bytes (not 1 1334 * byte). 1335 * 1336 * Must be passed an exclusively locked parent and chain. 1337 * 1338 * This function is mostly used with DATA blocks locked RESOLVE_NEVER in order 1339 * to avoid instantiating a device buffer that conflicts with the vnode data 1340 * buffer. However, because H2 can compress or encrypt data, the chain may 1341 * have a dio assigned to it in those situations, and they do not conflict. 1342 * 1343 * XXX return error if cannot resize. 1344 */ 1345 int 1346 hammer2_chain_resize(hammer2_chain_t *chain, 1347 hammer2_tid_t mtid, hammer2_off_t dedup_off, 1348 int nradix, int flags) 1349 { 1350 hammer2_dev_t *hmp; 1351 size_t obytes; 1352 size_t nbytes; 1353 int error; 1354 1355 hmp = chain->hmp; 1356 1357 /* 1358 * Only data and indirect blocks can be resized for now. 1359 * (The volu root, inodes, and freemap elements use a fixed size). 1360 */ 1361 KKASSERT(chain != &hmp->vchain); 1362 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA || 1363 chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT || 1364 chain->bref.type == HAMMER2_BREF_TYPE_DIRENT); 1365 1366 /* 1367 * Nothing to do if the element is already the proper size 1368 */ 1369 obytes = chain->bytes; 1370 nbytes = (nradix) ? (1U << nradix) : 0; 1371 if (obytes == nbytes) 1372 return (chain->error); 1373 1374 /* 1375 * Make sure the old data is instantiated so we can copy it. If this 1376 * is a data block, the device data may be superfluous since the data 1377 * might be in a logical block, but compressed or encrypted data is 1378 * another matter. 1379 * 1380 * NOTE: The modify will set BLKMAPUPD for us if BLKMAPPED is set. 1381 */ 1382 error = hammer2_chain_modify(chain, mtid, dedup_off, 0); 1383 if (error) 1384 return error; 1385 1386 /* 1387 * Reallocate the block, even if making it smaller (because different 1388 * block sizes may be in different regions). 1389 * 1390 * NOTE: Operation does not copy the data and may only be used 1391 * to resize data blocks in-place, or directory entry blocks 1392 * which are about to be modified in some manner. 1393 */ 1394 error = hammer2_freemap_alloc(chain, nbytes); 1395 if (error) 1396 return error; 1397 1398 chain->bytes = nbytes; 1399 1400 /* 1401 * We don't want the followup chain_modify() to try to copy data 1402 * from the old (wrong-sized) buffer. It won't know how much to 1403 * copy. This case should only occur during writes when the 1404 * originator already has the data to write in-hand. 1405 */ 1406 if (chain->dio) { 1407 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA || 1408 chain->bref.type == HAMMER2_BREF_TYPE_DIRENT); 1409 hammer2_io_brelse(&chain->dio); 1410 chain->data = NULL; 1411 } 1412 return (chain->error); 1413 } 1414 1415 /* 1416 * Set the chain modified so its data can be changed by the caller, or 1417 * install deduplicated data. The caller must call this routine for each 1418 * set of modifications it makes, even if the chain is already flagged 1419 * MODIFIED. 1420 * 1421 * Sets bref.modify_tid to mtid only if mtid != 0. Note that bref.modify_tid 1422 * is a CLC (cluster level change) field and is not updated by parent 1423 * propagation during a flush. 1424 * 1425 * Returns an appropriate HAMMER2_ERROR_* code, which will generally reflect 1426 * chain->error except for HAMMER2_ERROR_ENOSPC. If the allocation fails 1427 * due to no space available, HAMMER2_ERROR_ENOSPC is returned and the chain 1428 * remains unmodified with its old data ref intact and chain->error 1429 * unchanged. 1430 * 1431 * Dedup Handling 1432 * 1433 * If the DEDUPABLE flag is set in the chain the storage must be reallocated 1434 * even if the chain is still flagged MODIFIED. In this case the chain's 1435 * DEDUPABLE flag will be cleared once the new storage has been assigned. 1436 * 1437 * If the caller passes a non-zero dedup_off we will use it to assign the 1438 * new storage. The MODIFIED flag will be *CLEARED* in this case, and 1439 * DEDUPABLE will be set (NOTE: the UPDATE flag is always set). The caller 1440 * must not modify the data content upon return. 1441 */ 1442 int 1443 hammer2_chain_modify(hammer2_chain_t *chain, hammer2_tid_t mtid, 1444 hammer2_off_t dedup_off, int flags) 1445 { 1446 hammer2_dev_t *hmp; 1447 hammer2_io_t *dio; 1448 int error; 1449 int wasinitial; 1450 int setmodified; 1451 int setupdate; 1452 int newmod; 1453 char *bdata; 1454 1455 hmp = chain->hmp; 1456 KKASSERT(chain->lock.mtx_lock & MTX_EXCLUSIVE); 1457 1458 /* 1459 * Data is not optional for freemap chains (we must always be sure 1460 * to copy the data on COW storage allocations). 1461 */ 1462 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE || 1463 chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) { 1464 KKASSERT((chain->flags & HAMMER2_CHAIN_INITIAL) || 1465 (flags & HAMMER2_MODIFY_OPTDATA) == 0); 1466 } 1467 1468 /* 1469 * Data must be resolved if already assigned, unless explicitly 1470 * flagged otherwise. If we cannot safety load the data the 1471 * modification fails and we return early. 1472 */ 1473 if (chain->data == NULL && chain->bytes != 0 && 1474 (flags & HAMMER2_MODIFY_OPTDATA) == 0 && 1475 (chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX)) { 1476 hammer2_chain_load_data(chain); 1477 if (chain->error) 1478 return (chain->error); 1479 } 1480 error = 0; 1481 1482 /* 1483 * Set MODIFIED to indicate that the chain has been modified. A new 1484 * allocation is required when modifying a chain. 1485 * 1486 * Set UPDATE to ensure that the blockref is updated in the parent. 1487 * 1488 * If MODIFIED is already set determine if we can reuse the assigned 1489 * data block or if we need a new data block. 1490 */ 1491 if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) { 1492 /* 1493 * Must set modified bit. 1494 */ 1495 atomic_add_long(&hammer2_count_modified_chains, 1); 1496 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED); 1497 hammer2_pfs_memory_inc(chain->pmp); /* can be NULL */ 1498 setmodified = 1; 1499 1500 /* 1501 * We may be able to avoid a copy-on-write if the chain's 1502 * check mode is set to NONE and the chain's current 1503 * modify_tid is beyond the last explicit snapshot tid. 1504 * 1505 * This implements HAMMER2's overwrite-in-place feature. 1506 * 1507 * NOTE! This data-block cannot be used as a de-duplication 1508 * source when the check mode is set to NONE. 1509 */ 1510 if ((chain->bref.type == HAMMER2_BREF_TYPE_DATA || 1511 chain->bref.type == HAMMER2_BREF_TYPE_DIRENT) && 1512 (chain->flags & HAMMER2_CHAIN_INITIAL) == 0 && 1513 (chain->flags & HAMMER2_CHAIN_DEDUPABLE) == 0 && 1514 HAMMER2_DEC_CHECK(chain->bref.methods) == 1515 HAMMER2_CHECK_NONE && 1516 chain->pmp && 1517 chain->bref.modify_tid > 1518 chain->pmp->iroot->meta.pfs_lsnap_tid) { 1519 /* 1520 * Sector overwrite allowed. 1521 */ 1522 newmod = 0; 1523 } else if ((hmp->hflags & HMNT2_EMERG) && 1524 chain->pmp && 1525 chain->bref.modify_tid > 1526 chain->pmp->iroot->meta.pfs_lsnap_tid) { 1527 /* 1528 * If in emergency delete mode then do a modify-in- 1529 * place on any chain type belonging to the PFS as 1530 * long as it doesn't mess up a snapshot. We might 1531 * be forced to do this anyway a little further down 1532 * in the code if the allocation fails. 1533 * 1534 * Also note that in emergency mode, these modify-in- 1535 * place operations are NOT SAFE. A storage failure, 1536 * power failure, or panic can corrupt the filesystem. 1537 */ 1538 newmod = 0; 1539 } else { 1540 /* 1541 * Sector overwrite not allowed, must copy-on-write. 1542 */ 1543 newmod = 1; 1544 } 1545 } else if (chain->flags & HAMMER2_CHAIN_DEDUPABLE) { 1546 /* 1547 * If the modified chain was registered for dedup we need 1548 * a new allocation. This only happens for delayed-flush 1549 * chains (i.e. which run through the front-end buffer 1550 * cache). 1551 */ 1552 newmod = 1; 1553 setmodified = 0; 1554 } else { 1555 /* 1556 * Already flagged modified, no new allocation is needed. 1557 */ 1558 newmod = 0; 1559 setmodified = 0; 1560 } 1561 1562 /* 1563 * Flag parent update required. 1564 */ 1565 if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0) { 1566 atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE); 1567 setupdate = 1; 1568 } else { 1569 setupdate = 0; 1570 } 1571 1572 /* 1573 * The XOP code returns held but unlocked focus chains. This 1574 * prevents the chain from being destroyed but does not prevent 1575 * it from being modified. diolk is used to interlock modifications 1576 * against XOP frontend accesses to the focus. 1577 * 1578 * This allows us to theoretically avoid deadlocking the frontend 1579 * if one of the backends lock up by not formally locking the 1580 * focused chain in the frontend. In addition, the synchronization 1581 * code relies on this mechanism to avoid deadlocking concurrent 1582 * synchronization threads. 1583 */ 1584 lockmgr(&chain->diolk, LK_EXCLUSIVE); 1585 1586 /* 1587 * The modification or re-modification requires an allocation and 1588 * possible COW. If an error occurs, the previous content and data 1589 * reference is retained and the modification fails. 1590 * 1591 * If dedup_off is non-zero, the caller is requesting a deduplication 1592 * rather than a modification. The MODIFIED bit is not set and the 1593 * data offset is set to the deduplication offset. The data cannot 1594 * be modified. 1595 * 1596 * NOTE: The dedup offset is allowed to be in a partially free state 1597 * and we must be sure to reset it to a fully allocated state 1598 * to force two bulkfree passes to free it again. 1599 * 1600 * NOTE: Only applicable when chain->bytes != 0. 1601 * 1602 * XXX can a chain already be marked MODIFIED without a data 1603 * assignment? If not, assert here instead of testing the case. 1604 */ 1605 if (chain != &hmp->vchain && chain != &hmp->fchain && 1606 chain->bytes) { 1607 if ((chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX) == 0 || 1608 newmod 1609 ) { 1610 /* 1611 * NOTE: We do not have to remove the dedup 1612 * registration because the area is still 1613 * allocated and the underlying DIO will 1614 * still be flushed. 1615 */ 1616 if (dedup_off) { 1617 chain->bref.data_off = dedup_off; 1618 if ((int)(dedup_off & HAMMER2_OFF_MASK_RADIX)) 1619 chain->bytes = 1 << 1620 (int)(dedup_off & 1621 HAMMER2_OFF_MASK_RADIX); 1622 else 1623 chain->bytes = 0; 1624 chain->error = 0; 1625 atomic_clear_int(&chain->flags, 1626 HAMMER2_CHAIN_MODIFIED); 1627 atomic_add_long(&hammer2_count_modified_chains, 1628 -1); 1629 if (chain->pmp) { 1630 hammer2_pfs_memory_wakeup( 1631 chain->pmp, -1); 1632 } 1633 hammer2_freemap_adjust(hmp, &chain->bref, 1634 HAMMER2_FREEMAP_DORECOVER); 1635 atomic_set_int(&chain->flags, 1636 HAMMER2_CHAIN_DEDUPABLE); 1637 } else { 1638 error = hammer2_freemap_alloc(chain, 1639 chain->bytes); 1640 atomic_clear_int(&chain->flags, 1641 HAMMER2_CHAIN_DEDUPABLE); 1642 1643 /* 1644 * If we are unable to allocate a new block 1645 * but we are in emergency mode, issue a 1646 * warning to the console and reuse the same 1647 * block. 1648 * 1649 * We behave as if the allocation were 1650 * successful. 1651 * 1652 * THIS IS IMPORTANT: These modifications 1653 * are virtually guaranteed to corrupt any 1654 * snapshots related to this filesystem. 1655 */ 1656 if (error && (hmp->hflags & HMNT2_EMERG)) { 1657 error = 0; 1658 chain->bref.flags |= 1659 HAMMER2_BREF_FLAG_EMERG_MIP; 1660 1661 krateprintf(&krate_h2em, 1662 "hammer2: Emergency Mode WARNING: " 1663 "Operation will likely corrupt " 1664 "related snapshot: " 1665 "%016jx.%02x key=%016jx\n", 1666 chain->bref.data_off, 1667 chain->bref.type, 1668 chain->bref.key); 1669 } else if (error == 0) { 1670 chain->bref.flags &= 1671 ~HAMMER2_BREF_FLAG_EMERG_MIP; 1672 } 1673 } 1674 } 1675 } 1676 1677 /* 1678 * Stop here if error. We have to undo any flag bits we might 1679 * have set above. 1680 */ 1681 if (error) { 1682 if (setmodified) { 1683 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED); 1684 atomic_add_long(&hammer2_count_modified_chains, -1); 1685 if (chain->pmp) 1686 hammer2_pfs_memory_wakeup(chain->pmp, -1); 1687 } 1688 if (setupdate) { 1689 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE); 1690 } 1691 lockmgr(&chain->diolk, LK_RELEASE); 1692 1693 return error; 1694 } 1695 1696 /* 1697 * Update mirror_tid and modify_tid. modify_tid is only updated 1698 * if not passed as zero (during flushes, parent propagation passes 1699 * the value 0). 1700 * 1701 * NOTE: chain->pmp could be the device spmp. 1702 */ 1703 chain->bref.mirror_tid = hmp->voldata.mirror_tid + 1; 1704 if (mtid) 1705 chain->bref.modify_tid = mtid; 1706 1707 /* 1708 * Set BLKMAPUPD to tell the flush code that an existing blockmap entry 1709 * requires updating as well as to tell the delete code that the 1710 * chain's blockref might not exactly match (in terms of physical size 1711 * or block offset) the one in the parent's blocktable. The base key 1712 * of course will still match. 1713 */ 1714 if (chain->flags & HAMMER2_CHAIN_BLKMAPPED) 1715 atomic_set_int(&chain->flags, HAMMER2_CHAIN_BLKMAPUPD); 1716 1717 /* 1718 * Short-cut data block handling when the caller does not need an 1719 * actual data reference to (aka OPTDATA), as long as the chain does 1720 * not already have a data pointer to the data and no de-duplication 1721 * occurred. 1722 * 1723 * This generally means that the modifications are being done via the 1724 * logical buffer cache. 1725 * 1726 * NOTE: If deduplication occurred we have to run through the data 1727 * stuff to clear INITIAL, and the caller will likely want to 1728 * assign the check code anyway. Leaving INITIAL set on a 1729 * dedup can be deadly (it can cause the block to be zero'd!). 1730 * 1731 * This code also handles bytes == 0 (most dirents). 1732 */ 1733 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA && 1734 (flags & HAMMER2_MODIFY_OPTDATA) && 1735 chain->data == NULL) { 1736 if (dedup_off == 0) { 1737 KKASSERT(chain->dio == NULL); 1738 goto skip2; 1739 } 1740 } 1741 1742 /* 1743 * Clearing the INITIAL flag (for indirect blocks) indicates that 1744 * we've processed the uninitialized storage allocation. 1745 * 1746 * If this flag is already clear we are likely in a copy-on-write 1747 * situation but we have to be sure NOT to bzero the storage if 1748 * no data is present. 1749 * 1750 * Clearing of NOTTESTED is allowed if the MODIFIED bit is set, 1751 */ 1752 if (chain->flags & HAMMER2_CHAIN_INITIAL) { 1753 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL); 1754 wasinitial = 1; 1755 } else { 1756 wasinitial = 0; 1757 } 1758 1759 /* 1760 * Instantiate data buffer and possibly execute COW operation 1761 */ 1762 switch(chain->bref.type) { 1763 case HAMMER2_BREF_TYPE_VOLUME: 1764 case HAMMER2_BREF_TYPE_FREEMAP: 1765 /* 1766 * The data is embedded, no copy-on-write operation is 1767 * needed. 1768 */ 1769 KKASSERT(chain->dio == NULL); 1770 break; 1771 case HAMMER2_BREF_TYPE_DIRENT: 1772 /* 1773 * The data might be fully embedded. 1774 */ 1775 if (chain->bytes == 0) { 1776 KKASSERT(chain->dio == NULL); 1777 break; 1778 } 1779 /* fall through */ 1780 case HAMMER2_BREF_TYPE_INODE: 1781 case HAMMER2_BREF_TYPE_FREEMAP_LEAF: 1782 case HAMMER2_BREF_TYPE_DATA: 1783 case HAMMER2_BREF_TYPE_INDIRECT: 1784 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 1785 /* 1786 * Perform the copy-on-write operation 1787 * 1788 * zero-fill or copy-on-write depending on whether 1789 * chain->data exists or not and set the dirty state for 1790 * the new buffer. hammer2_io_new() will handle the 1791 * zero-fill. 1792 * 1793 * If a dedup_off was supplied this is an existing block 1794 * and no COW, copy, or further modification is required. 1795 */ 1796 KKASSERT(chain != &hmp->vchain && chain != &hmp->fchain); 1797 1798 if (wasinitial && dedup_off == 0) { 1799 error = hammer2_io_new(hmp, chain->bref.type, 1800 chain->bref.data_off, 1801 chain->bytes, &dio); 1802 } else { 1803 error = hammer2_io_bread(hmp, chain->bref.type, 1804 chain->bref.data_off, 1805 chain->bytes, &dio); 1806 } 1807 hammer2_adjreadcounter(chain->bref.type, chain->bytes); 1808 1809 /* 1810 * If an I/O error occurs make sure callers cannot accidently 1811 * modify the old buffer's contents and corrupt the filesystem. 1812 * 1813 * NOTE: hammer2_io_data() call issues bkvasync() 1814 */ 1815 if (error) { 1816 kprintf("hammer2_chain_modify: hmp=%p I/O error\n", 1817 hmp); 1818 chain->error = HAMMER2_ERROR_EIO; 1819 hammer2_io_brelse(&dio); 1820 hammer2_io_brelse(&chain->dio); 1821 chain->data = NULL; 1822 break; 1823 } 1824 chain->error = 0; 1825 bdata = hammer2_io_data(dio, chain->bref.data_off); 1826 1827 if (chain->data) { 1828 /* 1829 * COW (unless a dedup). 1830 */ 1831 KKASSERT(chain->dio != NULL); 1832 if (chain->data != (void *)bdata && dedup_off == 0) { 1833 bcopy(chain->data, bdata, chain->bytes); 1834 } 1835 } else if (wasinitial == 0 && dedup_off == 0) { 1836 /* 1837 * We have a problem. We were asked to COW but 1838 * we don't have any data to COW with! 1839 */ 1840 panic("hammer2_chain_modify: having a COW %p\n", 1841 chain); 1842 } 1843 1844 /* 1845 * Retire the old buffer, replace with the new. Dirty or 1846 * redirty the new buffer. 1847 * 1848 * WARNING! The system buffer cache may have already flushed 1849 * the buffer, so we must be sure to [re]dirty it 1850 * for further modification. 1851 * 1852 * If dedup_off was supplied, the caller is not 1853 * expected to make any further modification to the 1854 * buffer. 1855 * 1856 * WARNING! hammer2_get_gdata() assumes dio never transitions 1857 * through NULL in order to optimize away unnecessary 1858 * diolk operations. 1859 */ 1860 { 1861 hammer2_io_t *tio; 1862 1863 if ((tio = chain->dio) != NULL) 1864 hammer2_io_bqrelse(&tio); 1865 chain->data = (void *)bdata; 1866 chain->dio = dio; 1867 if (dedup_off == 0) 1868 hammer2_io_setdirty(dio); 1869 } 1870 break; 1871 default: 1872 panic("hammer2_chain_modify: illegal non-embedded type %d", 1873 chain->bref.type); 1874 break; 1875 1876 } 1877 skip2: 1878 /* 1879 * setflush on parent indicating that the parent must recurse down 1880 * to us. Do not call on chain itself which might already have it 1881 * set. 1882 */ 1883 if (chain->parent) 1884 hammer2_chain_setflush(chain->parent); 1885 lockmgr(&chain->diolk, LK_RELEASE); 1886 1887 return (chain->error); 1888 } 1889 1890 /* 1891 * Modify the chain associated with an inode. 1892 */ 1893 int 1894 hammer2_chain_modify_ip(hammer2_inode_t *ip, hammer2_chain_t *chain, 1895 hammer2_tid_t mtid, int flags) 1896 { 1897 int error; 1898 1899 hammer2_inode_modify(ip); 1900 error = hammer2_chain_modify(chain, mtid, 0, flags); 1901 1902 return error; 1903 } 1904 1905 /* 1906 * This function returns the chain at the nearest key within the specified 1907 * range. The returned chain will be referenced but not locked. 1908 * 1909 * This function will recurse through chain->rbtree as necessary and will 1910 * return a *key_nextp suitable for iteration. *key_nextp is only set if 1911 * the iteration value is less than the current value of *key_nextp. 1912 * 1913 * The caller should use (*key_nextp) to calculate the actual range of 1914 * the returned element, which will be (key_beg to *key_nextp - 1), because 1915 * there might be another element which is superior to the returned element 1916 * and overlaps it. 1917 * 1918 * (*key_nextp) can be passed as key_beg in an iteration only while non-NULL 1919 * chains continue to be returned. On EOF (*key_nextp) may overflow since 1920 * it will wind up being (key_end + 1). 1921 * 1922 * WARNING! Must be called with child's spinlock held. Spinlock remains 1923 * held through the operation. 1924 */ 1925 struct hammer2_chain_find_info { 1926 hammer2_chain_t *best; 1927 hammer2_key_t key_beg; 1928 hammer2_key_t key_end; 1929 hammer2_key_t key_next; 1930 }; 1931 1932 static int hammer2_chain_find_cmp(hammer2_chain_t *child, void *data); 1933 static int hammer2_chain_find_callback(hammer2_chain_t *child, void *data); 1934 1935 static 1936 hammer2_chain_t * 1937 hammer2_chain_find(hammer2_chain_t *parent, hammer2_key_t *key_nextp, 1938 hammer2_key_t key_beg, hammer2_key_t key_end) 1939 { 1940 struct hammer2_chain_find_info info; 1941 1942 info.best = NULL; 1943 info.key_beg = key_beg; 1944 info.key_end = key_end; 1945 info.key_next = *key_nextp; 1946 1947 RB_SCAN(hammer2_chain_tree, &parent->core.rbtree, 1948 hammer2_chain_find_cmp, hammer2_chain_find_callback, 1949 &info); 1950 *key_nextp = info.key_next; 1951 #if 0 1952 kprintf("chain_find %p %016jx:%016jx next=%016jx\n", 1953 parent, key_beg, key_end, *key_nextp); 1954 #endif 1955 1956 return (info.best); 1957 } 1958 1959 static 1960 int 1961 hammer2_chain_find_cmp(hammer2_chain_t *child, void *data) 1962 { 1963 struct hammer2_chain_find_info *info = data; 1964 hammer2_key_t child_beg; 1965 hammer2_key_t child_end; 1966 1967 child_beg = child->bref.key; 1968 child_end = child_beg + ((hammer2_key_t)1 << child->bref.keybits) - 1; 1969 1970 if (child_end < info->key_beg) 1971 return(-1); 1972 if (child_beg > info->key_end) 1973 return(1); 1974 return(0); 1975 } 1976 1977 static 1978 int 1979 hammer2_chain_find_callback(hammer2_chain_t *child, void *data) 1980 { 1981 struct hammer2_chain_find_info *info = data; 1982 hammer2_chain_t *best; 1983 hammer2_key_t child_end; 1984 1985 if ((best = info->best) == NULL) { 1986 /* 1987 * No previous best. Assign best 1988 */ 1989 info->best = child; 1990 } else if (best->bref.key <= info->key_beg && 1991 child->bref.key <= info->key_beg) { 1992 /* 1993 * Illegal overlap. 1994 */ 1995 KKASSERT(0); 1996 /*info->best = child;*/ 1997 } else if (child->bref.key < best->bref.key) { 1998 /* 1999 * Child has a nearer key and best is not flush with key_beg. 2000 * Set best to child. Truncate key_next to the old best key. 2001 */ 2002 info->best = child; 2003 if (info->key_next > best->bref.key || info->key_next == 0) 2004 info->key_next = best->bref.key; 2005 } else if (child->bref.key == best->bref.key) { 2006 /* 2007 * If our current best is flush with the child then this 2008 * is an illegal overlap. 2009 * 2010 * key_next will automatically be limited to the smaller of 2011 * the two end-points. 2012 */ 2013 KKASSERT(0); 2014 info->best = child; 2015 } else { 2016 /* 2017 * Keep the current best but truncate key_next to the child's 2018 * base. 2019 * 2020 * key_next will also automatically be limited to the smaller 2021 * of the two end-points (probably not necessary for this case 2022 * but we do it anyway). 2023 */ 2024 if (info->key_next > child->bref.key || info->key_next == 0) 2025 info->key_next = child->bref.key; 2026 } 2027 2028 /* 2029 * Always truncate key_next based on child's end-of-range. 2030 */ 2031 child_end = child->bref.key + ((hammer2_key_t)1 << child->bref.keybits); 2032 if (child_end && (info->key_next > child_end || info->key_next == 0)) 2033 info->key_next = child_end; 2034 2035 return(0); 2036 } 2037 2038 /* 2039 * Retrieve the specified chain from a media blockref, creating the 2040 * in-memory chain structure which reflects it. The returned chain is 2041 * held and locked according to (how) (HAMMER2_RESOLVE_*). The caller must 2042 * handle crc-checks and so forth, and should check chain->error before 2043 * assuming that the data is good. 2044 * 2045 * To handle insertion races pass the INSERT_RACE flag along with the 2046 * generation number of the core. NULL will be returned if the generation 2047 * number changes before we have a chance to insert the chain. Insert 2048 * races can occur because the parent might be held shared. 2049 * 2050 * Caller must hold the parent locked shared or exclusive since we may 2051 * need the parent's bref array to find our block. 2052 * 2053 * WARNING! chain->pmp is always set to NULL for any chain representing 2054 * part of the super-root topology. 2055 */ 2056 hammer2_chain_t * 2057 hammer2_chain_get(hammer2_chain_t *parent, int generation, 2058 hammer2_blockref_t *bref, int how) 2059 { 2060 hammer2_dev_t *hmp = parent->hmp; 2061 hammer2_chain_t *chain; 2062 int error; 2063 2064 /* 2065 * Allocate a chain structure representing the existing media 2066 * entry. Resulting chain has one ref and is not locked. 2067 */ 2068 if (bref->flags & HAMMER2_BREF_FLAG_PFSROOT) 2069 chain = hammer2_chain_alloc(hmp, NULL, bref); 2070 else 2071 chain = hammer2_chain_alloc(hmp, parent->pmp, bref); 2072 /* ref'd chain returned */ 2073 2074 /* 2075 * Flag that the chain is in the parent's blockmap so delete/flush 2076 * knows what to do with it. 2077 */ 2078 atomic_set_int(&chain->flags, HAMMER2_CHAIN_BLKMAPPED); 2079 2080 /* 2081 * chain must be locked to avoid unexpected ripouts 2082 */ 2083 hammer2_chain_lock(chain, how); 2084 2085 /* 2086 * Link the chain into its parent. A spinlock is required to safely 2087 * access the RBTREE, and it is possible to collide with another 2088 * hammer2_chain_get() operation because the caller might only hold 2089 * a shared lock on the parent. 2090 * 2091 * NOTE: Get races can occur quite often when we distribute 2092 * asynchronous read-aheads across multiple threads. 2093 */ 2094 KKASSERT(parent->refs > 0); 2095 error = hammer2_chain_insert(parent, chain, 2096 HAMMER2_CHAIN_INSERT_SPIN | 2097 HAMMER2_CHAIN_INSERT_RACE, 2098 generation); 2099 if (error) { 2100 KKASSERT((chain->flags & HAMMER2_CHAIN_ONRBTREE) == 0); 2101 /*kprintf("chain %p get race\n", chain);*/ 2102 hammer2_chain_unlock(chain); 2103 hammer2_chain_drop(chain); 2104 chain = NULL; 2105 } else { 2106 KKASSERT(chain->flags & HAMMER2_CHAIN_ONRBTREE); 2107 } 2108 2109 /* 2110 * Return our new chain referenced but not locked, or NULL if 2111 * a race occurred. 2112 */ 2113 return (chain); 2114 } 2115 2116 /* 2117 * Lookup initialization/completion API 2118 */ 2119 hammer2_chain_t * 2120 hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags) 2121 { 2122 hammer2_chain_ref(parent); 2123 if (flags & HAMMER2_LOOKUP_SHARED) { 2124 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS | 2125 HAMMER2_RESOLVE_SHARED); 2126 } else { 2127 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 2128 } 2129 return (parent); 2130 } 2131 2132 void 2133 hammer2_chain_lookup_done(hammer2_chain_t *parent) 2134 { 2135 if (parent) { 2136 hammer2_chain_unlock(parent); 2137 hammer2_chain_drop(parent); 2138 } 2139 } 2140 2141 /* 2142 * Take the locked chain and return a locked parent. The chain remains 2143 * locked on return, but may have to be temporarily unlocked to acquire 2144 * the parent. Because of this, (chain) must be stable and cannot be 2145 * deleted while it was temporarily unlocked (typically means that (chain) 2146 * is an inode). 2147 * 2148 * Pass HAMMER2_RESOLVE_* flags in flags. 2149 * 2150 * This will work even if the chain is errored, and the caller can check 2151 * parent->error on return if desired since the parent will be locked. 2152 * 2153 * This function handles the lock order reversal. 2154 */ 2155 hammer2_chain_t * 2156 hammer2_chain_getparent(hammer2_chain_t *chain, int flags) 2157 { 2158 hammer2_chain_t *parent; 2159 2160 /* 2161 * Be careful of order, chain must be unlocked before parent 2162 * is locked below to avoid a deadlock. Try it trivially first. 2163 */ 2164 parent = chain->parent; 2165 if (parent == NULL) 2166 panic("hammer2_chain_getparent: no parent"); 2167 hammer2_chain_ref(parent); 2168 if (hammer2_chain_lock(parent, flags|HAMMER2_RESOLVE_NONBLOCK) == 0) 2169 return parent; 2170 2171 for (;;) { 2172 hammer2_chain_unlock(chain); 2173 hammer2_chain_lock(parent, flags); 2174 hammer2_chain_lock(chain, flags); 2175 2176 /* 2177 * Parent relinking races are quite common. We have to get 2178 * it right or we will blow up the block table. 2179 */ 2180 if (chain->parent == parent) 2181 break; 2182 hammer2_chain_unlock(parent); 2183 hammer2_chain_drop(parent); 2184 cpu_ccfence(); 2185 parent = chain->parent; 2186 if (parent == NULL) 2187 panic("hammer2_chain_getparent: no parent"); 2188 hammer2_chain_ref(parent); 2189 } 2190 return parent; 2191 } 2192 2193 /* 2194 * Take the locked chain and return a locked parent. The chain is unlocked 2195 * and dropped. *chainp is set to the returned parent as a convenience. 2196 * Pass HAMMER2_RESOLVE_* flags in flags. 2197 * 2198 * This will work even if the chain is errored, and the caller can check 2199 * parent->error on return if desired since the parent will be locked. 2200 * 2201 * The chain does NOT need to be stable. We use a tracking structure 2202 * to track the expected parent if the chain is deleted out from under us. 2203 * 2204 * This function handles the lock order reversal. 2205 */ 2206 hammer2_chain_t * 2207 hammer2_chain_repparent(hammer2_chain_t **chainp, int flags) 2208 { 2209 hammer2_chain_t *chain; 2210 hammer2_chain_t *parent; 2211 struct hammer2_reptrack reptrack; 2212 struct hammer2_reptrack **repp; 2213 2214 /* 2215 * Be careful of order, chain must be unlocked before parent 2216 * is locked below to avoid a deadlock. Try it trivially first. 2217 */ 2218 chain = *chainp; 2219 parent = chain->parent; 2220 if (parent == NULL) { 2221 hammer2_spin_unex(&chain->core.spin); 2222 panic("hammer2_chain_repparent: no parent"); 2223 } 2224 hammer2_chain_ref(parent); 2225 if (hammer2_chain_lock(parent, flags|HAMMER2_RESOLVE_NONBLOCK) == 0) { 2226 hammer2_chain_unlock(chain); 2227 hammer2_chain_drop(chain); 2228 *chainp = parent; 2229 2230 return parent; 2231 } 2232 2233 /* 2234 * Ok, now it gets a bit nasty. There are multiple situations where 2235 * the parent might be in the middle of a deletion, or where the child 2236 * (chain) might be deleted the instant we let go of its lock. 2237 * We can potentially end up in a no-win situation! 2238 * 2239 * In particular, the indirect_maintenance() case can cause these 2240 * situations. 2241 * 2242 * To deal with this we install a reptrack structure in the parent 2243 * This reptrack structure 'owns' the parent ref and will automatically 2244 * migrate to the parent's parent if the parent is deleted permanently. 2245 */ 2246 hammer2_spin_init(&reptrack.spin, "h2reptrk"); 2247 reptrack.chain = parent; 2248 hammer2_chain_ref(parent); /* for the reptrack */ 2249 2250 hammer2_spin_ex(&parent->core.spin); 2251 reptrack.next = parent->core.reptrack; 2252 parent->core.reptrack = &reptrack; 2253 hammer2_spin_unex(&parent->core.spin); 2254 2255 hammer2_chain_unlock(chain); 2256 hammer2_chain_drop(chain); 2257 chain = NULL; /* gone */ 2258 2259 /* 2260 * At the top of this loop, chain is gone and parent is refd both 2261 * by us explicitly AND via our reptrack. We are attempting to 2262 * lock parent. 2263 */ 2264 for (;;) { 2265 hammer2_chain_lock(parent, flags); 2266 2267 if (reptrack.chain == parent) 2268 break; 2269 hammer2_chain_unlock(parent); 2270 hammer2_chain_drop(parent); 2271 2272 kprintf("hammer2: debug REPTRACK %p->%p\n", 2273 parent, reptrack.chain); 2274 hammer2_spin_ex(&reptrack.spin); 2275 parent = reptrack.chain; 2276 hammer2_chain_ref(parent); 2277 hammer2_spin_unex(&reptrack.spin); 2278 } 2279 2280 /* 2281 * Once parent is locked and matches our reptrack, our reptrack 2282 * will be stable and we have our parent. We can unlink our 2283 * reptrack. 2284 * 2285 * WARNING! Remember that the chain lock might be shared. Chains 2286 * locked shared have stable parent linkages. 2287 */ 2288 hammer2_spin_ex(&parent->core.spin); 2289 repp = &parent->core.reptrack; 2290 while (*repp != &reptrack) 2291 repp = &(*repp)->next; 2292 *repp = reptrack.next; 2293 hammer2_spin_unex(&parent->core.spin); 2294 2295 hammer2_chain_drop(parent); /* reptrack ref */ 2296 *chainp = parent; /* return parent lock+ref */ 2297 2298 return parent; 2299 } 2300 2301 /* 2302 * Dispose of any linked reptrack structures in (chain) by shifting them to 2303 * (parent). Both (chain) and (parent) must be exclusively locked. 2304 * 2305 * This is interlocked against any children of (chain) on the other side. 2306 * No children so remain as-of when this is called so we can test 2307 * core.reptrack without holding the spin-lock. 2308 * 2309 * Used whenever the caller intends to permanently delete chains related 2310 * to topological recursions (BREF_TYPE_INDIRECT, BREF_TYPE_FREEMAP_NODE), 2311 * where the chains underneath the node being deleted are given a new parent 2312 * above the node being deleted. 2313 */ 2314 static 2315 void 2316 hammer2_chain_repchange(hammer2_chain_t *parent, hammer2_chain_t *chain) 2317 { 2318 struct hammer2_reptrack *reptrack; 2319 2320 KKASSERT(chain->core.live_count == 0 && RB_EMPTY(&chain->core.rbtree)); 2321 while (chain->core.reptrack) { 2322 hammer2_spin_ex(&parent->core.spin); 2323 hammer2_spin_ex(&chain->core.spin); 2324 reptrack = chain->core.reptrack; 2325 if (reptrack == NULL) { 2326 hammer2_spin_unex(&chain->core.spin); 2327 hammer2_spin_unex(&parent->core.spin); 2328 break; 2329 } 2330 hammer2_spin_ex(&reptrack->spin); 2331 chain->core.reptrack = reptrack->next; 2332 reptrack->chain = parent; 2333 reptrack->next = parent->core.reptrack; 2334 parent->core.reptrack = reptrack; 2335 hammer2_chain_ref(parent); /* reptrack */ 2336 2337 hammer2_spin_unex(&chain->core.spin); 2338 hammer2_spin_unex(&parent->core.spin); 2339 kprintf("hammer2: debug repchange %p %p->%p\n", 2340 reptrack, chain, parent); 2341 hammer2_chain_drop(chain); /* reptrack */ 2342 } 2343 } 2344 2345 /* 2346 * Locate the first chain whos key range overlaps (key_beg, key_end) inclusive. 2347 * (*parentp) typically points to an inode but can also point to a related 2348 * indirect block and this function will recurse upwards and find the inode 2349 * or the nearest undeleted indirect block covering the key range. 2350 * 2351 * This function unconditionally sets *errorp, replacing any previous value. 2352 * 2353 * (*parentp) must be exclusive or shared locked (depending on flags) and 2354 * referenced and can be an inode or an existing indirect block within the 2355 * inode. 2356 * 2357 * If (*parent) is errored out, this function will not attempt to recurse 2358 * the radix tree and will return NULL along with an appropriate *errorp. 2359 * If NULL is returned and *errorp is 0, the requested lookup could not be 2360 * located. 2361 * 2362 * On return (*parentp) will be modified to point at the deepest parent chain 2363 * element encountered during the search, as a helper for an insertion or 2364 * deletion. 2365 * 2366 * The new (*parentp) will be locked shared or exclusive (depending on flags), 2367 * and referenced, and the old will be unlocked and dereferenced (no change 2368 * if they are both the same). This is particularly important if the caller 2369 * wishes to insert a new chain, (*parentp) will be set properly even if NULL 2370 * is returned, as long as no error occurred. 2371 * 2372 * The matching chain will be returned locked according to flags. 2373 * 2374 * -- 2375 * 2376 * NULL is returned if no match was found, but (*parentp) will still 2377 * potentially be adjusted. 2378 * 2379 * On return (*key_nextp) will point to an iterative value for key_beg. 2380 * (If NULL is returned (*key_nextp) is set to (key_end + 1)). 2381 * 2382 * This function will also recurse up the chain if the key is not within the 2383 * current parent's range. (*parentp) can never be set to NULL. An iteration 2384 * can simply allow (*parentp) to float inside the loop. 2385 * 2386 * NOTE! chain->data is not always resolved. By default it will not be 2387 * resolved for BREF_TYPE_DATA, FREEMAP_NODE, or FREEMAP_LEAF. Use 2388 * HAMMER2_LOOKUP_ALWAYS to force resolution (but be careful w/ 2389 * BREF_TYPE_DATA as the device buffer can alias the logical file 2390 * buffer). 2391 */ 2392 hammer2_chain_t * 2393 hammer2_chain_lookup(hammer2_chain_t **parentp, hammer2_key_t *key_nextp, 2394 hammer2_key_t key_beg, hammer2_key_t key_end, 2395 int *errorp, int flags) 2396 { 2397 hammer2_chain_t *parent; 2398 hammer2_chain_t *chain; 2399 hammer2_blockref_t *base; 2400 hammer2_blockref_t *bref; 2401 hammer2_blockref_t bsave; 2402 hammer2_key_t scan_beg; 2403 hammer2_key_t scan_end; 2404 int count = 0; 2405 int how_always = HAMMER2_RESOLVE_ALWAYS; 2406 int how_maybe = HAMMER2_RESOLVE_MAYBE; 2407 int how; 2408 int generation; 2409 int maxloops = 300000; 2410 2411 if (flags & HAMMER2_LOOKUP_ALWAYS) { 2412 how_maybe = how_always; 2413 how = HAMMER2_RESOLVE_ALWAYS; 2414 } else if (flags & HAMMER2_LOOKUP_NODATA) { 2415 how = HAMMER2_RESOLVE_NEVER; 2416 } else { 2417 how = HAMMER2_RESOLVE_MAYBE; 2418 } 2419 if (flags & HAMMER2_LOOKUP_SHARED) { 2420 how_maybe |= HAMMER2_RESOLVE_SHARED; 2421 how_always |= HAMMER2_RESOLVE_SHARED; 2422 how |= HAMMER2_RESOLVE_SHARED; 2423 } 2424 2425 /* 2426 * Recurse (*parentp) upward if necessary until the parent completely 2427 * encloses the key range or we hit the inode. 2428 * 2429 * Handle races against the flusher deleting indirect nodes on its 2430 * way back up by continuing to recurse upward past the deletion. 2431 */ 2432 parent = *parentp; 2433 *errorp = 0; 2434 2435 while (parent->bref.type == HAMMER2_BREF_TYPE_INDIRECT || 2436 parent->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) { 2437 scan_beg = parent->bref.key; 2438 scan_end = scan_beg + 2439 ((hammer2_key_t)1 << parent->bref.keybits) - 1; 2440 if ((parent->flags & HAMMER2_CHAIN_DELETED) == 0) { 2441 if (key_beg >= scan_beg && key_end <= scan_end) 2442 break; 2443 } 2444 parent = hammer2_chain_repparent(parentp, how_maybe); 2445 } 2446 again: 2447 if (--maxloops == 0) 2448 panic("hammer2_chain_lookup: maxloops"); 2449 2450 /* 2451 * MATCHIND case that does not require parent->data (do prior to 2452 * parent->error check). 2453 */ 2454 switch(parent->bref.type) { 2455 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 2456 case HAMMER2_BREF_TYPE_INDIRECT: 2457 if (flags & HAMMER2_LOOKUP_MATCHIND) { 2458 scan_beg = parent->bref.key; 2459 scan_end = scan_beg + 2460 ((hammer2_key_t)1 << parent->bref.keybits) - 1; 2461 if (key_beg == scan_beg && key_end == scan_end) { 2462 chain = parent; 2463 hammer2_chain_ref(chain); 2464 hammer2_chain_lock(chain, how_maybe); 2465 *key_nextp = scan_end + 1; 2466 goto done; 2467 } 2468 } 2469 break; 2470 default: 2471 break; 2472 } 2473 2474 /* 2475 * No lookup is possible if the parent is errored. We delayed 2476 * this check as long as we could to ensure that the parent backup, 2477 * embedded data, and MATCHIND code could still execute. 2478 */ 2479 if (parent->error) { 2480 *errorp = parent->error; 2481 return NULL; 2482 } 2483 2484 /* 2485 * Locate the blockref array. Currently we do a fully associative 2486 * search through the array. 2487 */ 2488 switch(parent->bref.type) { 2489 case HAMMER2_BREF_TYPE_INODE: 2490 /* 2491 * Special shortcut for embedded data returns the inode 2492 * itself. Callers must detect this condition and access 2493 * the embedded data (the strategy code does this for us). 2494 * 2495 * This is only applicable to regular files and softlinks. 2496 * 2497 * We need a second lock on parent. Since we already have 2498 * a lock we must pass LOCKAGAIN to prevent unexpected 2499 * blocking (we don't want to block on a second shared 2500 * ref if an exclusive lock is pending) 2501 */ 2502 if (parent->data->ipdata.meta.op_flags & 2503 HAMMER2_OPFLAG_DIRECTDATA) { 2504 if (flags & HAMMER2_LOOKUP_NODIRECT) { 2505 chain = NULL; 2506 *key_nextp = key_end + 1; 2507 goto done; 2508 } 2509 hammer2_chain_ref(parent); 2510 hammer2_chain_lock(parent, how_always | 2511 HAMMER2_RESOLVE_LOCKAGAIN); 2512 *key_nextp = key_end + 1; 2513 return (parent); 2514 } 2515 base = &parent->data->ipdata.u.blockset.blockref[0]; 2516 count = HAMMER2_SET_COUNT; 2517 break; 2518 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 2519 case HAMMER2_BREF_TYPE_INDIRECT: 2520 /* 2521 * Optimize indirect blocks in the INITIAL state to avoid 2522 * I/O. 2523 * 2524 * Debugging: Enter permanent wait state instead of 2525 * panicing on unexpectedly NULL data for the moment. 2526 */ 2527 if (parent->flags & HAMMER2_CHAIN_INITIAL) { 2528 base = NULL; 2529 } else { 2530 if (parent->data == NULL) { 2531 kprintf("hammer2: unexpected NULL data " 2532 "on %p\n", parent); 2533 while (1) 2534 tsleep(parent, 0, "xxx", 0); 2535 } 2536 base = &parent->data->npdata[0]; 2537 } 2538 count = parent->bytes / sizeof(hammer2_blockref_t); 2539 break; 2540 case HAMMER2_BREF_TYPE_VOLUME: 2541 base = &parent->data->voldata.sroot_blockset.blockref[0]; 2542 count = HAMMER2_SET_COUNT; 2543 break; 2544 case HAMMER2_BREF_TYPE_FREEMAP: 2545 base = &parent->data->blkset.blockref[0]; 2546 count = HAMMER2_SET_COUNT; 2547 break; 2548 default: 2549 panic("hammer2_chain_lookup: unrecognized " 2550 "blockref(B) type: %d", 2551 parent->bref.type); 2552 base = NULL; /* safety */ 2553 count = 0; /* safety */ 2554 break; 2555 } 2556 2557 /* 2558 * Merged scan to find next candidate. 2559 * 2560 * hammer2_base_*() functions require the parent->core.live_* fields 2561 * to be synchronized. 2562 * 2563 * We need to hold the spinlock to access the block array and RB tree 2564 * and to interlock chain creation. 2565 */ 2566 if ((parent->flags & HAMMER2_CHAIN_COUNTEDBREFS) == 0) 2567 hammer2_chain_countbrefs(parent, base, count); 2568 2569 /* 2570 * Combined search 2571 */ 2572 hammer2_spin_ex(&parent->core.spin); 2573 chain = hammer2_combined_find(parent, base, count, 2574 key_nextp, 2575 key_beg, key_end, 2576 &bref); 2577 generation = parent->core.generation; 2578 2579 /* 2580 * Exhausted parent chain, iterate. 2581 */ 2582 if (bref == NULL) { 2583 KKASSERT(chain == NULL); 2584 hammer2_spin_unex(&parent->core.spin); 2585 if (key_beg == key_end) /* short cut single-key case */ 2586 return (NULL); 2587 2588 /* 2589 * Stop if we reached the end of the iteration. 2590 */ 2591 if (parent->bref.type != HAMMER2_BREF_TYPE_INDIRECT && 2592 parent->bref.type != HAMMER2_BREF_TYPE_FREEMAP_NODE) { 2593 return (NULL); 2594 } 2595 2596 /* 2597 * Calculate next key, stop if we reached the end of the 2598 * iteration, otherwise go up one level and loop. 2599 */ 2600 key_beg = parent->bref.key + 2601 ((hammer2_key_t)1 << parent->bref.keybits); 2602 if (key_beg == 0 || key_beg > key_end) 2603 return (NULL); 2604 parent = hammer2_chain_repparent(parentp, how_maybe); 2605 goto again; 2606 } 2607 2608 /* 2609 * Selected from blockref or in-memory chain. 2610 */ 2611 bsave = *bref; 2612 if (chain == NULL) { 2613 hammer2_spin_unex(&parent->core.spin); 2614 if (bsave.type == HAMMER2_BREF_TYPE_INDIRECT || 2615 bsave.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) { 2616 chain = hammer2_chain_get(parent, generation, 2617 &bsave, how_maybe); 2618 } else { 2619 chain = hammer2_chain_get(parent, generation, 2620 &bsave, how); 2621 } 2622 if (chain == NULL) 2623 goto again; 2624 } else { 2625 hammer2_chain_ref(chain); 2626 hammer2_spin_unex(&parent->core.spin); 2627 2628 /* 2629 * chain is referenced but not locked. We must lock the 2630 * chain to obtain definitive state. 2631 */ 2632 if (bsave.type == HAMMER2_BREF_TYPE_INDIRECT || 2633 bsave.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) { 2634 hammer2_chain_lock(chain, how_maybe); 2635 } else { 2636 hammer2_chain_lock(chain, how); 2637 } 2638 KKASSERT(chain->parent == parent); 2639 } 2640 if (bcmp(&bsave, &chain->bref, sizeof(bsave)) || 2641 chain->parent != parent) { 2642 hammer2_chain_unlock(chain); 2643 hammer2_chain_drop(chain); 2644 chain = NULL; /* SAFETY */ 2645 goto again; 2646 } 2647 2648 2649 /* 2650 * Skip deleted chains (XXX cache 'i' end-of-block-array? XXX) 2651 * 2652 * NOTE: Chain's key range is not relevant as there might be 2653 * one-offs within the range that are not deleted. 2654 * 2655 * NOTE: Lookups can race delete-duplicate because 2656 * delete-duplicate does not lock the parent's core 2657 * (they just use the spinlock on the core). 2658 */ 2659 if (chain->flags & HAMMER2_CHAIN_DELETED) { 2660 kprintf("skip deleted chain %016jx.%02x key=%016jx\n", 2661 chain->bref.data_off, chain->bref.type, 2662 chain->bref.key); 2663 hammer2_chain_unlock(chain); 2664 hammer2_chain_drop(chain); 2665 chain = NULL; /* SAFETY */ 2666 key_beg = *key_nextp; 2667 if (key_beg == 0 || key_beg > key_end) 2668 return(NULL); 2669 goto again; 2670 } 2671 2672 /* 2673 * If the chain element is an indirect block it becomes the new 2674 * parent and we loop on it. We must maintain our top-down locks 2675 * to prevent the flusher from interfering (i.e. doing a 2676 * delete-duplicate and leaving us recursing down a deleted chain). 2677 * 2678 * The parent always has to be locked with at least RESOLVE_MAYBE 2679 * so we can access its data. It might need a fixup if the caller 2680 * passed incompatible flags. Be careful not to cause a deadlock 2681 * as a data-load requires an exclusive lock. 2682 * 2683 * If HAMMER2_LOOKUP_MATCHIND is set and the indirect block's key 2684 * range is within the requested key range we return the indirect 2685 * block and do NOT loop. This is usually only used to acquire 2686 * freemap nodes. 2687 */ 2688 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT || 2689 chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) { 2690 hammer2_chain_unlock(parent); 2691 hammer2_chain_drop(parent); 2692 *parentp = parent = chain; 2693 chain = NULL; /* SAFETY */ 2694 goto again; 2695 } 2696 done: 2697 /* 2698 * All done, return the locked chain. 2699 * 2700 * If the caller does not want a locked chain, replace the lock with 2701 * a ref. Perhaps this can eventually be optimized to not obtain the 2702 * lock in the first place for situations where the data does not 2703 * need to be resolved. 2704 * 2705 * NOTE! A chain->error must be tested by the caller upon return. 2706 * *errorp is only set based on issues which occur while 2707 * trying to reach the chain. 2708 */ 2709 return (chain); 2710 } 2711 2712 /* 2713 * After having issued a lookup we can iterate all matching keys. 2714 * 2715 * If chain is non-NULL we continue the iteration from just after it's index. 2716 * 2717 * If chain is NULL we assume the parent was exhausted and continue the 2718 * iteration at the next parent. 2719 * 2720 * If a fatal error occurs (typically an I/O error), a dummy chain is 2721 * returned with chain->error and error-identifying information set. This 2722 * chain will assert if you try to do anything fancy with it. 2723 * 2724 * XXX Depending on where the error occurs we should allow continued iteration. 2725 * 2726 * parent must be locked on entry and remains locked throughout. chain's 2727 * lock status must match flags. Chain is always at least referenced. 2728 * 2729 * WARNING! The MATCHIND flag does not apply to this function. 2730 */ 2731 hammer2_chain_t * 2732 hammer2_chain_next(hammer2_chain_t **parentp, hammer2_chain_t *chain, 2733 hammer2_key_t *key_nextp, 2734 hammer2_key_t key_beg, hammer2_key_t key_end, 2735 int *errorp, int flags) 2736 { 2737 hammer2_chain_t *parent; 2738 int how_maybe; 2739 2740 /* 2741 * Calculate locking flags for upward recursion. 2742 */ 2743 how_maybe = HAMMER2_RESOLVE_MAYBE; 2744 if (flags & HAMMER2_LOOKUP_SHARED) 2745 how_maybe |= HAMMER2_RESOLVE_SHARED; 2746 2747 parent = *parentp; 2748 *errorp = 0; 2749 2750 /* 2751 * Calculate the next index and recalculate the parent if necessary. 2752 */ 2753 if (chain) { 2754 key_beg = chain->bref.key + 2755 ((hammer2_key_t)1 << chain->bref.keybits); 2756 hammer2_chain_unlock(chain); 2757 hammer2_chain_drop(chain); 2758 2759 /* 2760 * chain invalid past this point, but we can still do a 2761 * pointer comparison w/parent. 2762 * 2763 * Any scan where the lookup returned degenerate data embedded 2764 * in the inode has an invalid index and must terminate. 2765 */ 2766 if (chain == parent) 2767 return(NULL); 2768 if (key_beg == 0 || key_beg > key_end) 2769 return(NULL); 2770 chain = NULL; 2771 } else if (parent->bref.type != HAMMER2_BREF_TYPE_INDIRECT && 2772 parent->bref.type != HAMMER2_BREF_TYPE_FREEMAP_NODE) { 2773 /* 2774 * We reached the end of the iteration. 2775 */ 2776 return (NULL); 2777 } else { 2778 /* 2779 * Continue iteration with next parent unless the current 2780 * parent covers the range. 2781 * 2782 * (This also handles the case of a deleted, empty indirect 2783 * node). 2784 */ 2785 key_beg = parent->bref.key + 2786 ((hammer2_key_t)1 << parent->bref.keybits); 2787 if (key_beg == 0 || key_beg > key_end) 2788 return (NULL); 2789 parent = hammer2_chain_repparent(parentp, how_maybe); 2790 } 2791 2792 /* 2793 * And execute 2794 */ 2795 return (hammer2_chain_lookup(parentp, key_nextp, 2796 key_beg, key_end, 2797 errorp, flags)); 2798 } 2799 2800 /* 2801 * Caller wishes to iterate chains under parent, loading new chains into 2802 * chainp. Caller must initialize *chainp to NULL and *firstp to 1, and 2803 * then call hammer2_chain_scan() repeatedly until a non-zero return. 2804 * During the scan, *firstp will be set to 0 and (*chainp) will be replaced 2805 * with the returned chain for the scan. The returned *chainp will be 2806 * locked and referenced. Any prior contents will be unlocked and dropped. 2807 * 2808 * Caller should check the return value. A normal scan EOF will return 2809 * exactly HAMMER2_ERROR_EOF. Any other non-zero value indicates an 2810 * error trying to access parent data. Any error in the returned chain 2811 * must be tested separately by the caller. 2812 * 2813 * (*chainp) is dropped on each scan, but will only be set if the returned 2814 * element itself can recurse. Leaf elements are NOT resolved, loaded, or 2815 * returned via *chainp. The caller will get their bref only. 2816 * 2817 * The raw scan function is similar to lookup/next but does not seek to a key. 2818 * Blockrefs are iterated via first_bref = (parent, NULL) and 2819 * next_chain = (parent, bref). 2820 * 2821 * The passed-in parent must be locked and its data resolved. The function 2822 * nominally returns a locked and referenced *chainp != NULL for chains 2823 * the caller might need to recurse on (and will dipose of any *chainp passed 2824 * in). The caller must check the chain->bref.type either way. 2825 */ 2826 int 2827 hammer2_chain_scan(hammer2_chain_t *parent, hammer2_chain_t **chainp, 2828 hammer2_blockref_t *bref, int *firstp, 2829 int flags) 2830 { 2831 hammer2_blockref_t *base; 2832 hammer2_blockref_t *bref_ptr; 2833 hammer2_key_t key; 2834 hammer2_key_t next_key; 2835 hammer2_chain_t *chain = NULL; 2836 int count = 0; 2837 int how; 2838 int generation; 2839 int maxloops = 300000; 2840 int error; 2841 2842 error = 0; 2843 2844 /* 2845 * Scan flags borrowed from lookup. 2846 */ 2847 if (flags & HAMMER2_LOOKUP_ALWAYS) { 2848 how = HAMMER2_RESOLVE_ALWAYS; 2849 } else if (flags & HAMMER2_LOOKUP_NODATA) { 2850 how = HAMMER2_RESOLVE_NEVER; 2851 } else { 2852 how = HAMMER2_RESOLVE_MAYBE; 2853 } 2854 if (flags & HAMMER2_LOOKUP_SHARED) { 2855 how |= HAMMER2_RESOLVE_SHARED; 2856 } 2857 2858 /* 2859 * Calculate key to locate first/next element, unlocking the previous 2860 * element as we go. Be careful, the key calculation can overflow. 2861 * 2862 * (also reset bref to NULL) 2863 */ 2864 if (*firstp) { 2865 key = 0; 2866 *firstp = 0; 2867 } else { 2868 key = bref->key + ((hammer2_key_t)1 << bref->keybits); 2869 if ((chain = *chainp) != NULL) { 2870 *chainp = NULL; 2871 hammer2_chain_unlock(chain); 2872 hammer2_chain_drop(chain); 2873 chain = NULL; 2874 } 2875 if (key == 0) { 2876 error |= HAMMER2_ERROR_EOF; 2877 goto done; 2878 } 2879 } 2880 2881 again: 2882 if (parent->error) { 2883 error = parent->error; 2884 goto done; 2885 } 2886 if (--maxloops == 0) 2887 panic("hammer2_chain_scan: maxloops"); 2888 2889 /* 2890 * Locate the blockref array. Currently we do a fully associative 2891 * search through the array. 2892 */ 2893 switch(parent->bref.type) { 2894 case HAMMER2_BREF_TYPE_INODE: 2895 /* 2896 * An inode with embedded data has no sub-chains. 2897 * 2898 * WARNING! Bulk scan code may pass a static chain marked 2899 * as BREF_TYPE_INODE with a copy of the volume 2900 * root blockset to snapshot the volume. 2901 */ 2902 if (parent->data->ipdata.meta.op_flags & 2903 HAMMER2_OPFLAG_DIRECTDATA) { 2904 error |= HAMMER2_ERROR_EOF; 2905 goto done; 2906 } 2907 base = &parent->data->ipdata.u.blockset.blockref[0]; 2908 count = HAMMER2_SET_COUNT; 2909 break; 2910 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 2911 case HAMMER2_BREF_TYPE_INDIRECT: 2912 /* 2913 * Optimize indirect blocks in the INITIAL state to avoid 2914 * I/O. 2915 */ 2916 if (parent->flags & HAMMER2_CHAIN_INITIAL) { 2917 base = NULL; 2918 } else { 2919 if (parent->data == NULL) 2920 panic("parent->data is NULL"); 2921 base = &parent->data->npdata[0]; 2922 } 2923 count = parent->bytes / sizeof(hammer2_blockref_t); 2924 break; 2925 case HAMMER2_BREF_TYPE_VOLUME: 2926 base = &parent->data->voldata.sroot_blockset.blockref[0]; 2927 count = HAMMER2_SET_COUNT; 2928 break; 2929 case HAMMER2_BREF_TYPE_FREEMAP: 2930 base = &parent->data->blkset.blockref[0]; 2931 count = HAMMER2_SET_COUNT; 2932 break; 2933 default: 2934 panic("hammer2_chain_scan: unrecognized blockref type: %d", 2935 parent->bref.type); 2936 base = NULL; /* safety */ 2937 count = 0; /* safety */ 2938 break; 2939 } 2940 2941 /* 2942 * Merged scan to find next candidate. 2943 * 2944 * hammer2_base_*() functions require the parent->core.live_* fields 2945 * to be synchronized. 2946 * 2947 * We need to hold the spinlock to access the block array and RB tree 2948 * and to interlock chain creation. 2949 */ 2950 if ((parent->flags & HAMMER2_CHAIN_COUNTEDBREFS) == 0) 2951 hammer2_chain_countbrefs(parent, base, count); 2952 2953 next_key = 0; 2954 bref_ptr = NULL; 2955 hammer2_spin_ex(&parent->core.spin); 2956 chain = hammer2_combined_find(parent, base, count, 2957 &next_key, 2958 key, HAMMER2_KEY_MAX, 2959 &bref_ptr); 2960 generation = parent->core.generation; 2961 2962 /* 2963 * Exhausted parent chain, we're done. 2964 */ 2965 if (bref_ptr == NULL) { 2966 hammer2_spin_unex(&parent->core.spin); 2967 KKASSERT(chain == NULL); 2968 error |= HAMMER2_ERROR_EOF; 2969 goto done; 2970 } 2971 2972 /* 2973 * Copy into the supplied stack-based blockref. 2974 */ 2975 *bref = *bref_ptr; 2976 2977 /* 2978 * Selected from blockref or in-memory chain. 2979 */ 2980 if (chain == NULL) { 2981 switch(bref->type) { 2982 case HAMMER2_BREF_TYPE_INODE: 2983 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 2984 case HAMMER2_BREF_TYPE_INDIRECT: 2985 case HAMMER2_BREF_TYPE_VOLUME: 2986 case HAMMER2_BREF_TYPE_FREEMAP: 2987 /* 2988 * Recursion, always get the chain 2989 */ 2990 hammer2_spin_unex(&parent->core.spin); 2991 chain = hammer2_chain_get(parent, generation, 2992 bref, how); 2993 if (chain == NULL) 2994 goto again; 2995 break; 2996 default: 2997 /* 2998 * No recursion, do not waste time instantiating 2999 * a chain, just iterate using the bref. 3000 */ 3001 hammer2_spin_unex(&parent->core.spin); 3002 break; 3003 } 3004 } else { 3005 /* 3006 * Recursion or not we need the chain in order to supply 3007 * the bref. 3008 */ 3009 hammer2_chain_ref(chain); 3010 hammer2_spin_unex(&parent->core.spin); 3011 hammer2_chain_lock(chain, how); 3012 } 3013 if (chain && 3014 (bcmp(bref, &chain->bref, sizeof(*bref)) || 3015 chain->parent != parent)) { 3016 hammer2_chain_unlock(chain); 3017 hammer2_chain_drop(chain); 3018 chain = NULL; 3019 goto again; 3020 } 3021 3022 /* 3023 * Skip deleted chains (XXX cache 'i' end-of-block-array? XXX) 3024 * 3025 * NOTE: chain's key range is not relevant as there might be 3026 * one-offs within the range that are not deleted. 3027 * 3028 * NOTE: XXX this could create problems with scans used in 3029 * situations other than mount-time recovery. 3030 * 3031 * NOTE: Lookups can race delete-duplicate because 3032 * delete-duplicate does not lock the parent's core 3033 * (they just use the spinlock on the core). 3034 */ 3035 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED)) { 3036 hammer2_chain_unlock(chain); 3037 hammer2_chain_drop(chain); 3038 chain = NULL; 3039 3040 key = next_key; 3041 if (key == 0) { 3042 error |= HAMMER2_ERROR_EOF; 3043 goto done; 3044 } 3045 goto again; 3046 } 3047 3048 done: 3049 /* 3050 * All done, return the bref or NULL, supply chain if necessary. 3051 */ 3052 if (chain) 3053 *chainp = chain; 3054 return (error); 3055 } 3056 3057 /* 3058 * Create and return a new hammer2 system memory structure of the specified 3059 * key, type and size and insert it under (*parentp). This is a full 3060 * insertion, based on the supplied key/keybits, and may involve creating 3061 * indirect blocks and moving other chains around via delete/duplicate. 3062 * 3063 * This call can be made with parent == NULL as long as a non -1 methods 3064 * is supplied. hmp must also be supplied in this situation (otherwise 3065 * hmp is extracted from the supplied parent). The chain will be detached 3066 * from the topology. A later call with both parent and chain can be made 3067 * to attach it. 3068 * 3069 * THE CALLER MUST HAVE ALREADY PROPERLY SEEKED (*parentp) TO THE INSERTION 3070 * POINT SANS ANY REQUIRED INDIRECT BLOCK CREATIONS DUE TO THE ARRAY BEING 3071 * FULL. This typically means that the caller is creating the chain after 3072 * doing a hammer2_chain_lookup(). 3073 * 3074 * (*parentp) must be exclusive locked and may be replaced on return 3075 * depending on how much work the function had to do. 3076 * 3077 * (*parentp) must not be errored or this function will assert. 3078 * 3079 * (*chainp) usually starts out NULL and returns the newly created chain, 3080 * but if the caller desires the caller may allocate a disconnected chain 3081 * and pass it in instead. 3082 * 3083 * This function should NOT be used to insert INDIRECT blocks. It is 3084 * typically used to create/insert inodes and data blocks. 3085 * 3086 * Caller must pass-in an exclusively locked parent the new chain is to 3087 * be inserted under, and optionally pass-in a disconnected, exclusively 3088 * locked chain to insert (else we create a new chain). The function will 3089 * adjust (*parentp) as necessary, create or connect the chain, and 3090 * return an exclusively locked chain in *chainp. 3091 * 3092 * When creating a PFSROOT inode under the super-root, pmp is typically NULL 3093 * and will be reassigned. 3094 * 3095 * NOTE: returns HAMMER_ERROR_* flags 3096 */ 3097 int 3098 hammer2_chain_create(hammer2_chain_t **parentp, hammer2_chain_t **chainp, 3099 hammer2_dev_t *hmp, hammer2_pfs_t *pmp, int methods, 3100 hammer2_key_t key, int keybits, int type, size_t bytes, 3101 hammer2_tid_t mtid, hammer2_off_t dedup_off, int flags) 3102 { 3103 hammer2_chain_t *chain; 3104 hammer2_chain_t *parent; 3105 hammer2_blockref_t *base; 3106 hammer2_blockref_t dummy; 3107 int allocated = 0; 3108 int error = 0; 3109 int count; 3110 int maxloops = 300000; 3111 3112 /* 3113 * Topology may be crossing a PFS boundary. 3114 */ 3115 parent = *parentp; 3116 if (parent) { 3117 KKASSERT(hammer2_mtx_owned(&parent->lock)); 3118 KKASSERT(parent->error == 0); 3119 hmp = parent->hmp; 3120 } 3121 chain = *chainp; 3122 3123 if (chain == NULL) { 3124 /* 3125 * First allocate media space and construct the dummy bref, 3126 * then allocate the in-memory chain structure. Set the 3127 * INITIAL flag for fresh chains which do not have embedded 3128 * data. 3129 */ 3130 bzero(&dummy, sizeof(dummy)); 3131 dummy.type = type; 3132 dummy.key = key; 3133 dummy.keybits = keybits; 3134 dummy.data_off = hammer2_getradix(bytes); 3135 3136 /* 3137 * Inherit methods from parent by default. Primarily used 3138 * for BREF_TYPE_DATA. Non-data types *must* be set to 3139 * a non-NONE check algorithm. 3140 */ 3141 if (methods == HAMMER2_METH_DEFAULT) 3142 dummy.methods = parent->bref.methods; 3143 else 3144 dummy.methods = (uint8_t)methods; 3145 3146 if (type != HAMMER2_BREF_TYPE_DATA && 3147 HAMMER2_DEC_CHECK(dummy.methods) == HAMMER2_CHECK_NONE) { 3148 dummy.methods |= 3149 HAMMER2_ENC_CHECK(HAMMER2_CHECK_DEFAULT); 3150 } 3151 3152 chain = hammer2_chain_alloc(hmp, pmp, &dummy); 3153 3154 /* 3155 * Lock the chain manually, chain_lock will load the chain 3156 * which we do NOT want to do. (note: chain->refs is set 3157 * to 1 by chain_alloc() for us, but lockcnt is not). 3158 */ 3159 chain->lockcnt = 1; 3160 hammer2_mtx_ex(&chain->lock); 3161 allocated = 1; 3162 3163 /* 3164 * Set INITIAL to optimize I/O. The flag will generally be 3165 * processed when we call hammer2_chain_modify(). 3166 */ 3167 switch(type) { 3168 case HAMMER2_BREF_TYPE_VOLUME: 3169 case HAMMER2_BREF_TYPE_FREEMAP: 3170 panic("hammer2_chain_create: called with volume type"); 3171 break; 3172 case HAMMER2_BREF_TYPE_INDIRECT: 3173 panic("hammer2_chain_create: cannot be used to" 3174 "create indirect block"); 3175 break; 3176 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 3177 panic("hammer2_chain_create: cannot be used to" 3178 "create freemap root or node"); 3179 break; 3180 case HAMMER2_BREF_TYPE_FREEMAP_LEAF: 3181 KKASSERT(bytes == sizeof(chain->data->bmdata)); 3182 /* fall through */ 3183 case HAMMER2_BREF_TYPE_DIRENT: 3184 case HAMMER2_BREF_TYPE_INODE: 3185 case HAMMER2_BREF_TYPE_DATA: 3186 default: 3187 /* 3188 * leave chain->data NULL, set INITIAL 3189 */ 3190 KKASSERT(chain->data == NULL); 3191 atomic_set_int(&chain->flags, HAMMER2_CHAIN_INITIAL); 3192 break; 3193 } 3194 } else { 3195 /* 3196 * We are reattaching a previously deleted chain, possibly 3197 * under a new parent and possibly with a new key/keybits. 3198 * The chain does not have to be in a modified state. The 3199 * UPDATE flag will be set later on in this routine. 3200 * 3201 * Do NOT mess with the current state of the INITIAL flag. 3202 */ 3203 chain->bref.key = key; 3204 chain->bref.keybits = keybits; 3205 if (chain->flags & HAMMER2_CHAIN_DELETED) 3206 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DELETED); 3207 KKASSERT(chain->parent == NULL); 3208 } 3209 3210 /* 3211 * Set the appropriate bref flag if requested. 3212 * 3213 * NOTE! Callers can call this function to move chains without 3214 * knowing about special flags, so don't clear bref flags 3215 * here! 3216 */ 3217 if (flags & HAMMER2_INSERT_PFSROOT) 3218 chain->bref.flags |= HAMMER2_BREF_FLAG_PFSROOT; 3219 3220 if (parent == NULL) 3221 goto skip; 3222 3223 /* 3224 * Calculate how many entries we have in the blockref array and 3225 * determine if an indirect block is required when inserting into 3226 * the parent. 3227 */ 3228 again: 3229 if (--maxloops == 0) 3230 panic("hammer2_chain_create: maxloops"); 3231 3232 switch(parent->bref.type) { 3233 case HAMMER2_BREF_TYPE_INODE: 3234 if ((parent->data->ipdata.meta.op_flags & 3235 HAMMER2_OPFLAG_DIRECTDATA) != 0) { 3236 kprintf("hammer2: parent set for direct-data! " 3237 "pkey=%016jx ckey=%016jx\n", 3238 parent->bref.key, 3239 chain->bref.key); 3240 } 3241 KKASSERT((parent->data->ipdata.meta.op_flags & 3242 HAMMER2_OPFLAG_DIRECTDATA) == 0); 3243 KKASSERT(parent->data != NULL); 3244 base = &parent->data->ipdata.u.blockset.blockref[0]; 3245 count = HAMMER2_SET_COUNT; 3246 break; 3247 case HAMMER2_BREF_TYPE_INDIRECT: 3248 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 3249 if (parent->flags & HAMMER2_CHAIN_INITIAL) 3250 base = NULL; 3251 else 3252 base = &parent->data->npdata[0]; 3253 count = parent->bytes / sizeof(hammer2_blockref_t); 3254 break; 3255 case HAMMER2_BREF_TYPE_VOLUME: 3256 KKASSERT(parent->data != NULL); 3257 base = &parent->data->voldata.sroot_blockset.blockref[0]; 3258 count = HAMMER2_SET_COUNT; 3259 break; 3260 case HAMMER2_BREF_TYPE_FREEMAP: 3261 KKASSERT(parent->data != NULL); 3262 base = &parent->data->blkset.blockref[0]; 3263 count = HAMMER2_SET_COUNT; 3264 break; 3265 default: 3266 panic("hammer2_chain_create: unrecognized blockref type: %d", 3267 parent->bref.type); 3268 base = NULL; 3269 count = 0; 3270 break; 3271 } 3272 3273 /* 3274 * Make sure we've counted the brefs 3275 */ 3276 if ((parent->flags & HAMMER2_CHAIN_COUNTEDBREFS) == 0) 3277 hammer2_chain_countbrefs(parent, base, count); 3278 3279 KASSERT(parent->core.live_count >= 0 && 3280 parent->core.live_count <= count, 3281 ("bad live_count %d/%d (%02x, %d)", 3282 parent->core.live_count, count, 3283 parent->bref.type, parent->bytes)); 3284 3285 /* 3286 * If no free blockref could be found we must create an indirect 3287 * block and move a number of blockrefs into it. With the parent 3288 * locked we can safely lock each child in order to delete+duplicate 3289 * it without causing a deadlock. 3290 * 3291 * This may return the new indirect block or the old parent depending 3292 * on where the key falls. NULL is returned on error. 3293 */ 3294 if (parent->core.live_count == count) { 3295 hammer2_chain_t *nparent; 3296 3297 KKASSERT((flags & HAMMER2_INSERT_SAMEPARENT) == 0); 3298 3299 nparent = hammer2_chain_create_indirect(parent, key, keybits, 3300 mtid, type, &error); 3301 if (nparent == NULL) { 3302 if (allocated) 3303 hammer2_chain_drop(chain); 3304 chain = NULL; 3305 goto done; 3306 } 3307 if (parent != nparent) { 3308 hammer2_chain_unlock(parent); 3309 hammer2_chain_drop(parent); 3310 parent = *parentp = nparent; 3311 } 3312 goto again; 3313 } 3314 3315 /* 3316 * fall through if parent, or skip to here if no parent. 3317 */ 3318 skip: 3319 if (chain->flags & HAMMER2_CHAIN_DELETED) 3320 kprintf("Inserting deleted chain @%016jx\n", 3321 chain->bref.key); 3322 3323 /* 3324 * Link the chain into its parent. 3325 */ 3326 if (chain->parent != NULL) 3327 panic("hammer2: hammer2_chain_create: chain already connected"); 3328 KKASSERT(chain->parent == NULL); 3329 if (parent) { 3330 KKASSERT(parent->core.live_count < count); 3331 hammer2_chain_insert(parent, chain, 3332 HAMMER2_CHAIN_INSERT_SPIN | 3333 HAMMER2_CHAIN_INSERT_LIVE, 3334 0); 3335 } 3336 3337 if (allocated) { 3338 /* 3339 * Mark the newly created chain modified. This will cause 3340 * UPDATE to be set and process the INITIAL flag. 3341 * 3342 * Device buffers are not instantiated for DATA elements 3343 * as these are handled by logical buffers. 3344 * 3345 * Indirect and freemap node indirect blocks are handled 3346 * by hammer2_chain_create_indirect() and not by this 3347 * function. 3348 * 3349 * Data for all other bref types is expected to be 3350 * instantiated (INODE, LEAF). 3351 */ 3352 switch(chain->bref.type) { 3353 case HAMMER2_BREF_TYPE_DATA: 3354 case HAMMER2_BREF_TYPE_FREEMAP_LEAF: 3355 case HAMMER2_BREF_TYPE_DIRENT: 3356 case HAMMER2_BREF_TYPE_INODE: 3357 error = hammer2_chain_modify(chain, mtid, dedup_off, 3358 HAMMER2_MODIFY_OPTDATA); 3359 break; 3360 default: 3361 /* 3362 * Remaining types are not supported by this function. 3363 * In particular, INDIRECT and LEAF_NODE types are 3364 * handled by create_indirect(). 3365 */ 3366 panic("hammer2_chain_create: bad type: %d", 3367 chain->bref.type); 3368 /* NOT REACHED */ 3369 break; 3370 } 3371 } else { 3372 /* 3373 * When reconnecting a chain we must set UPDATE and 3374 * setflush so the flush recognizes that it must update 3375 * the bref in the parent. 3376 */ 3377 if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0) 3378 atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE); 3379 } 3380 3381 /* 3382 * We must setflush(parent) to ensure that it recurses through to 3383 * chain. setflush(chain) might not work because ONFLUSH is possibly 3384 * already set in the chain (so it won't recurse up to set it in the 3385 * parent). 3386 */ 3387 if (parent) 3388 hammer2_chain_setflush(parent); 3389 3390 done: 3391 *chainp = chain; 3392 3393 return (error); 3394 } 3395 3396 /* 3397 * Move the chain from its old parent to a new parent. The chain must have 3398 * already been deleted or already disconnected (or never associated) with 3399 * a parent. The chain is reassociated with the new parent and the deleted 3400 * flag will be cleared (no longer deleted). The chain's modification state 3401 * is not altered. 3402 * 3403 * THE CALLER MUST HAVE ALREADY PROPERLY SEEKED (parent) TO THE INSERTION 3404 * POINT SANS ANY REQUIRED INDIRECT BLOCK CREATIONS DUE TO THE ARRAY BEING 3405 * FULL. This typically means that the caller is creating the chain after 3406 * doing a hammer2_chain_lookup(). 3407 * 3408 * Neither (parent) or (chain) can be errored. 3409 * 3410 * If (parent) is non-NULL then the chain is inserted under the parent. 3411 * 3412 * If (parent) is NULL then the newly duplicated chain is not inserted 3413 * anywhere, similar to if it had just been chain_alloc()'d (suitable for 3414 * passing into hammer2_chain_create() after this function returns). 3415 * 3416 * WARNING! This function calls create which means it can insert indirect 3417 * blocks. This can cause other unrelated chains in the parent to 3418 * be moved to a newly inserted indirect block in addition to the 3419 * specific chain. 3420 */ 3421 void 3422 hammer2_chain_rename(hammer2_chain_t **parentp, hammer2_chain_t *chain, 3423 hammer2_tid_t mtid, int flags) 3424 { 3425 hammer2_blockref_t *bref; 3426 hammer2_chain_t *parent; 3427 3428 /* 3429 * WARNING! We should never resolve DATA to device buffers 3430 * (XXX allow it if the caller did?), and since 3431 * we currently do not have the logical buffer cache 3432 * buffer in-hand to fix its cached physical offset 3433 * we also force the modify code to not COW it. XXX 3434 * 3435 * NOTE! We allow error'd chains to be renamed. The bref itself 3436 * is good and can be renamed. The content, however, may 3437 * be inaccessible. 3438 */ 3439 KKASSERT(chain->parent == NULL); 3440 /*KKASSERT(chain->error == 0); allow */ 3441 bref = &chain->bref; 3442 3443 /* 3444 * If parent is not NULL the duplicated chain will be entered under 3445 * the parent and the UPDATE bit set to tell flush to update 3446 * the blockref. 3447 * 3448 * We must setflush(parent) to ensure that it recurses through to 3449 * chain. setflush(chain) might not work because ONFLUSH is possibly 3450 * already set in the chain (so it won't recurse up to set it in the 3451 * parent). 3452 * 3453 * Having both chains locked is extremely important for atomicy. 3454 */ 3455 if (parentp && (parent = *parentp) != NULL) { 3456 KKASSERT(hammer2_mtx_owned(&parent->lock)); 3457 KKASSERT(parent->refs > 0); 3458 KKASSERT(parent->error == 0); 3459 3460 hammer2_chain_create(parentp, &chain, NULL, chain->pmp, 3461 HAMMER2_METH_DEFAULT, 3462 bref->key, bref->keybits, bref->type, 3463 chain->bytes, mtid, 0, flags); 3464 KKASSERT(chain->flags & HAMMER2_CHAIN_UPDATE); 3465 hammer2_chain_setflush(*parentp); 3466 } 3467 } 3468 3469 /* 3470 * This works in tandem with delete_obref() to install a blockref in 3471 * (typically) an indirect block that is associated with the chain being 3472 * moved to *parentp. 3473 * 3474 * The reason we need this function is that the caller needs to maintain 3475 * the blockref as it was, and not generate a new blockref for what might 3476 * be a modified chain. Otherwise stuff will leak into the flush that 3477 * the flush code's FLUSH_INODE_STOP flag is unable to catch. 3478 * 3479 * It is EXTREMELY important that we properly set CHAIN_BLKMAPUPD and 3480 * CHAIN_UPDATE. We must set BLKMAPUPD if the bref does not match, and 3481 * we must clear CHAIN_UPDATE (that was likely set by the chain_rename) if 3482 * it does. Otherwise we can end up in a situation where H2 is unable to 3483 * clean up the in-memory chain topology. 3484 * 3485 * The reason for this is that flushes do not generally flush through 3486 * BREF_TYPE_INODE chains and depend on a hammer2_inode_t queued to syncq 3487 * or sideq to properly flush and dispose of the related inode chain's flags. 3488 * Situations where the inode is not actually modified by the frontend, 3489 * but where we have to move the related chains around as we insert or cleanup 3490 * indirect blocks, can leave us with a 'dirty' (non-disposable) in-memory 3491 * inode chain that does not have a hammer2_inode_t associated with it. 3492 */ 3493 static void 3494 hammer2_chain_rename_obref(hammer2_chain_t **parentp, hammer2_chain_t *chain, 3495 hammer2_tid_t mtid, int flags, 3496 hammer2_blockref_t *obref) 3497 { 3498 hammer2_chain_rename(parentp, chain, mtid, flags); 3499 3500 if (obref->type != HAMMER2_BREF_TYPE_EMPTY) { 3501 hammer2_blockref_t *tbase; 3502 int tcount; 3503 3504 KKASSERT((chain->flags & HAMMER2_CHAIN_BLKMAPPED) == 0); 3505 hammer2_chain_modify(*parentp, mtid, 0, 0); 3506 tbase = hammer2_chain_base_and_count(*parentp, &tcount); 3507 hammer2_base_insert(*parentp, tbase, tcount, chain, obref); 3508 if (bcmp(obref, &chain->bref, sizeof(chain->bref))) { 3509 atomic_set_int(&chain->flags, HAMMER2_CHAIN_BLKMAPUPD | 3510 HAMMER2_CHAIN_UPDATE); 3511 } else { 3512 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE); 3513 } 3514 } 3515 } 3516 3517 /* 3518 * Helper function for deleting chains. 3519 * 3520 * The chain is removed from the live view (the RBTREE) as well as the parent's 3521 * blockmap. Both chain and its parent must be locked. 3522 * 3523 * parent may not be errored. chain can be errored. 3524 */ 3525 static int 3526 _hammer2_chain_delete_helper(hammer2_chain_t *parent, hammer2_chain_t *chain, 3527 hammer2_tid_t mtid, int flags, 3528 hammer2_blockref_t *obref) 3529 { 3530 int error = 0; 3531 3532 KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0); 3533 KKASSERT(chain->parent == parent); 3534 3535 if (chain->flags & HAMMER2_CHAIN_BLKMAPPED) { 3536 /* 3537 * Chain is blockmapped, so there must be a parent. 3538 * Atomically remove the chain from the parent and remove 3539 * the blockmap entry. The parent must be set modified 3540 * to remove the blockmap entry. 3541 */ 3542 hammer2_blockref_t *base; 3543 int count; 3544 3545 KKASSERT(parent != NULL); 3546 KKASSERT(parent->error == 0); 3547 KKASSERT((parent->flags & HAMMER2_CHAIN_INITIAL) == 0); 3548 error = hammer2_chain_modify(parent, mtid, 0, 0); 3549 if (error) 3550 goto done; 3551 3552 /* 3553 * Calculate blockmap pointer 3554 */ 3555 KKASSERT(chain->flags & HAMMER2_CHAIN_ONRBTREE); 3556 hammer2_spin_ex(&chain->core.spin); 3557 hammer2_spin_ex(&parent->core.spin); 3558 3559 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED); 3560 atomic_add_int(&parent->core.live_count, -1); 3561 ++parent->core.generation; 3562 RB_REMOVE(hammer2_chain_tree, &parent->core.rbtree, chain); 3563 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE); 3564 --parent->core.chain_count; 3565 chain->parent = NULL; 3566 3567 switch(parent->bref.type) { 3568 case HAMMER2_BREF_TYPE_INODE: 3569 /* 3570 * Access the inode's block array. However, there 3571 * is no block array if the inode is flagged 3572 * DIRECTDATA. 3573 */ 3574 if (parent->data && 3575 (parent->data->ipdata.meta.op_flags & 3576 HAMMER2_OPFLAG_DIRECTDATA) == 0) { 3577 base = 3578 &parent->data->ipdata.u.blockset.blockref[0]; 3579 } else { 3580 base = NULL; 3581 } 3582 count = HAMMER2_SET_COUNT; 3583 break; 3584 case HAMMER2_BREF_TYPE_INDIRECT: 3585 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 3586 if (parent->data) 3587 base = &parent->data->npdata[0]; 3588 else 3589 base = NULL; 3590 count = parent->bytes / sizeof(hammer2_blockref_t); 3591 break; 3592 case HAMMER2_BREF_TYPE_VOLUME: 3593 base = &parent->data->voldata. 3594 sroot_blockset.blockref[0]; 3595 count = HAMMER2_SET_COUNT; 3596 break; 3597 case HAMMER2_BREF_TYPE_FREEMAP: 3598 base = &parent->data->blkset.blockref[0]; 3599 count = HAMMER2_SET_COUNT; 3600 break; 3601 default: 3602 base = NULL; 3603 count = 0; 3604 panic("_hammer2_chain_delete_helper: " 3605 "unrecognized blockref type: %d", 3606 parent->bref.type); 3607 break; 3608 } 3609 3610 /* 3611 * delete blockmapped chain from its parent. 3612 */ 3613 if (base) { 3614 hammer2_base_delete(parent, base, count, chain, obref); 3615 } 3616 hammer2_spin_unex(&parent->core.spin); 3617 hammer2_spin_unex(&chain->core.spin); 3618 } else if (chain->flags & HAMMER2_CHAIN_ONRBTREE) { 3619 /* 3620 * Chain is not blockmapped but a parent is present. 3621 * Atomically remove the chain from the parent. There is 3622 * no blockmap entry to remove. 3623 */ 3624 hammer2_spin_ex(&chain->core.spin); 3625 hammer2_spin_ex(&parent->core.spin); 3626 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED); 3627 atomic_add_int(&parent->core.live_count, -1); 3628 ++parent->core.generation; 3629 RB_REMOVE(hammer2_chain_tree, &parent->core.rbtree, chain); 3630 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE); 3631 --parent->core.chain_count; 3632 chain->parent = NULL; 3633 hammer2_spin_unex(&parent->core.spin); 3634 hammer2_spin_unex(&chain->core.spin); 3635 } else { 3636 /* 3637 * Chain is not blockmapped and has no parent. This 3638 * is a degenerate case. 3639 */ 3640 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED); 3641 } 3642 done: 3643 return error; 3644 } 3645 3646 /* 3647 * Create an indirect block that covers one or more of the elements in the 3648 * current parent. Either returns the existing parent with no locking or 3649 * ref changes or returns the new indirect block locked and referenced 3650 * and leaving the original parent lock/ref intact as well. 3651 * 3652 * If an error occurs, NULL is returned and *errorp is set to the H2 error. 3653 * 3654 * The returned chain depends on where the specified key falls. 3655 * 3656 * The key/keybits for the indirect mode only needs to follow three rules: 3657 * 3658 * (1) That all elements underneath it fit within its key space and 3659 * 3660 * (2) That all elements outside it are outside its key space. 3661 * 3662 * (3) When creating the new indirect block any elements in the current 3663 * parent that fit within the new indirect block's keyspace must be 3664 * moved into the new indirect block. 3665 * 3666 * (4) The keyspace chosen for the inserted indirect block CAN cover a wider 3667 * keyspace the the current parent, but lookup/iteration rules will 3668 * ensure (and must ensure) that rule (2) for all parents leading up 3669 * to the nearest inode or the root volume header is adhered to. This 3670 * is accomplished by always recursing through matching keyspaces in 3671 * the hammer2_chain_lookup() and hammer2_chain_next() API. 3672 * 3673 * The current implementation calculates the current worst-case keyspace by 3674 * iterating the current parent and then divides it into two halves, choosing 3675 * whichever half has the most elements (not necessarily the half containing 3676 * the requested key). 3677 * 3678 * We can also opt to use the half with the least number of elements. This 3679 * causes lower-numbered keys (aka logical file offsets) to recurse through 3680 * fewer indirect blocks and higher-numbered keys to recurse through more. 3681 * This also has the risk of not moving enough elements to the new indirect 3682 * block and being forced to create several indirect blocks before the element 3683 * can be inserted. 3684 * 3685 * Must be called with an exclusively locked parent. 3686 * 3687 * NOTE: *errorp set to HAMMER_ERROR_* flags 3688 */ 3689 static int hammer2_chain_indkey_freemap(hammer2_chain_t *parent, 3690 hammer2_key_t *keyp, int keybits, 3691 hammer2_blockref_t *base, int count); 3692 static int hammer2_chain_indkey_file(hammer2_chain_t *parent, 3693 hammer2_key_t *keyp, int keybits, 3694 hammer2_blockref_t *base, int count, 3695 int ncount); 3696 static int hammer2_chain_indkey_dir(hammer2_chain_t *parent, 3697 hammer2_key_t *keyp, int keybits, 3698 hammer2_blockref_t *base, int count, 3699 int ncount); 3700 static 3701 hammer2_chain_t * 3702 hammer2_chain_create_indirect(hammer2_chain_t *parent, 3703 hammer2_key_t create_key, int create_bits, 3704 hammer2_tid_t mtid, int for_type, int *errorp) 3705 { 3706 hammer2_dev_t *hmp; 3707 hammer2_blockref_t *base; 3708 hammer2_blockref_t *bref; 3709 hammer2_blockref_t bsave; 3710 hammer2_blockref_t dummy; 3711 hammer2_chain_t *chain; 3712 hammer2_chain_t *ichain; 3713 hammer2_key_t key = create_key; 3714 hammer2_key_t key_beg; 3715 hammer2_key_t key_end; 3716 hammer2_key_t key_next; 3717 int keybits = create_bits; 3718 int count; 3719 int ncount; 3720 int nbytes; 3721 int loops; 3722 int error; 3723 int reason; 3724 int generation; 3725 int maxloops = 300000; 3726 3727 /* 3728 * Calculate the base blockref pointer or NULL if the chain 3729 * is known to be empty. We need to calculate the array count 3730 * for RB lookups either way. 3731 */ 3732 hmp = parent->hmp; 3733 KKASSERT(hammer2_mtx_owned(&parent->lock)); 3734 3735 /* 3736 * Pre-modify the parent now to avoid having to deal with error 3737 * processing if we tried to later (in the middle of our loop). 3738 * 3739 * We are going to be moving bref's around, the indirect blocks 3740 * cannot be in an initial state. Do not pass MODIFY_OPTDATA. 3741 */ 3742 *errorp = hammer2_chain_modify(parent, mtid, 0, 0); 3743 if (*errorp) { 3744 kprintf("hammer2_chain_create_indirect: error %08x %s\n", 3745 *errorp, hammer2_error_str(*errorp)); 3746 return NULL; 3747 } 3748 KKASSERT((parent->flags & HAMMER2_CHAIN_INITIAL) == 0); 3749 3750 /*hammer2_chain_modify(&parent, HAMMER2_MODIFY_OPTDATA);*/ 3751 base = hammer2_chain_base_and_count(parent, &count); 3752 3753 /* 3754 * How big should our new indirect block be? It has to be at least 3755 * as large as its parent for splits to work properly. 3756 * 3757 * The freemap uses a specific indirect block size. The number of 3758 * levels are built dynamically and ultimately depend on the size 3759 * volume. Because freemap blocks are taken from the reserved areas 3760 * of the volume our goal is efficiency (fewer levels) and not so 3761 * much to save disk space. 3762 * 3763 * The first indirect block level for a directory usually uses 3764 * HAMMER2_IND_BYTES_MIN (4KB = 32 directory entries). Due to 3765 * the hash mechanism, this typically gives us a nominal 3766 * 32 * 4 entries with one level of indirection. 3767 * 3768 * We use HAMMER2_IND_BYTES_NOM (16KB = 128 blockrefs) for FILE 3769 * indirect blocks. The initial 4 entries in the inode gives us 3770 * 256KB. Up to 4 indirect blocks gives us 32MB. Three levels 3771 * of indirection gives us 137GB, and so forth. H2 can support 3772 * huge file sizes but they are not typical, so we try to stick 3773 * with compactness and do not use a larger indirect block size. 3774 * 3775 * We could use 64KB (PBUFSIZE), giving us 512 blockrefs, but 3776 * due to the way indirect blocks are created this usually winds 3777 * up being extremely inefficient for small files. Even though 3778 * 16KB requires more levels of indirection for very large files, 3779 * the 16KB records can be ganged together into 64KB DIOs. 3780 */ 3781 if (for_type == HAMMER2_BREF_TYPE_FREEMAP_NODE || 3782 for_type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) { 3783 nbytes = HAMMER2_FREEMAP_LEVELN_PSIZE; 3784 } else if (parent->bref.type == HAMMER2_BREF_TYPE_INODE) { 3785 if (parent->data->ipdata.meta.type == 3786 HAMMER2_OBJTYPE_DIRECTORY) 3787 nbytes = HAMMER2_IND_BYTES_MIN; /* 4KB = 32 entries */ 3788 else 3789 nbytes = HAMMER2_IND_BYTES_NOM; /* 16KB = ~8MB file */ 3790 3791 } else { 3792 nbytes = HAMMER2_IND_BYTES_NOM; 3793 } 3794 if (nbytes < count * sizeof(hammer2_blockref_t)) { 3795 KKASSERT(for_type != HAMMER2_BREF_TYPE_FREEMAP_NODE && 3796 for_type != HAMMER2_BREF_TYPE_FREEMAP_LEAF); 3797 nbytes = count * sizeof(hammer2_blockref_t); 3798 } 3799 ncount = nbytes / sizeof(hammer2_blockref_t); 3800 3801 /* 3802 * When creating an indirect block for a freemap node or leaf 3803 * the key/keybits must be fitted to static radix levels because 3804 * particular radix levels use particular reserved blocks in the 3805 * related zone. 3806 * 3807 * This routine calculates the key/radix of the indirect block 3808 * we need to create, and whether it is on the high-side or the 3809 * low-side. 3810 */ 3811 switch(for_type) { 3812 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 3813 case HAMMER2_BREF_TYPE_FREEMAP_LEAF: 3814 keybits = hammer2_chain_indkey_freemap(parent, &key, keybits, 3815 base, count); 3816 break; 3817 case HAMMER2_BREF_TYPE_DATA: 3818 keybits = hammer2_chain_indkey_file(parent, &key, keybits, 3819 base, count, ncount); 3820 break; 3821 case HAMMER2_BREF_TYPE_DIRENT: 3822 case HAMMER2_BREF_TYPE_INODE: 3823 keybits = hammer2_chain_indkey_dir(parent, &key, keybits, 3824 base, count, ncount); 3825 break; 3826 default: 3827 panic("illegal indirect block for bref type %d", for_type); 3828 break; 3829 } 3830 3831 /* 3832 * Normalize the key for the radix being represented, keeping the 3833 * high bits and throwing away the low bits. 3834 */ 3835 key &= ~(((hammer2_key_t)1 << keybits) - 1); 3836 3837 /* 3838 * Ok, create our new indirect block 3839 */ 3840 bzero(&dummy, sizeof(dummy)); 3841 if (for_type == HAMMER2_BREF_TYPE_FREEMAP_NODE || 3842 for_type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) { 3843 dummy.type = HAMMER2_BREF_TYPE_FREEMAP_NODE; 3844 } else { 3845 dummy.type = HAMMER2_BREF_TYPE_INDIRECT; 3846 } 3847 dummy.key = key; 3848 dummy.keybits = keybits; 3849 dummy.data_off = hammer2_getradix(nbytes); 3850 dummy.methods = 3851 HAMMER2_ENC_CHECK(HAMMER2_DEC_CHECK(parent->bref.methods)) | 3852 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE); 3853 3854 ichain = hammer2_chain_alloc(hmp, parent->pmp, &dummy); 3855 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_INITIAL); 3856 hammer2_chain_lock(ichain, HAMMER2_RESOLVE_MAYBE); 3857 /* ichain has one ref at this point */ 3858 3859 /* 3860 * We have to mark it modified to allocate its block, but use 3861 * OPTDATA to allow it to remain in the INITIAL state. Otherwise 3862 * it won't be acted upon by the flush code. 3863 * 3864 * XXX remove OPTDATA, we need a fully initialized indirect block to 3865 * be able to move the original blockref. 3866 */ 3867 *errorp = hammer2_chain_modify(ichain, mtid, 0, 0); 3868 if (*errorp) { 3869 kprintf("hammer2_chain_create_indirect: error %08x %s\n", 3870 *errorp, hammer2_error_str(*errorp)); 3871 hammer2_chain_unlock(ichain); 3872 hammer2_chain_drop(ichain); 3873 return NULL; 3874 } 3875 KKASSERT((ichain->flags & HAMMER2_CHAIN_INITIAL) == 0); 3876 3877 /* 3878 * Iterate the original parent and move the matching brefs into 3879 * the new indirect block. 3880 * 3881 * XXX handle flushes. 3882 */ 3883 key_beg = 0; 3884 key_end = HAMMER2_KEY_MAX; 3885 key_next = 0; /* avoid gcc warnings */ 3886 hammer2_spin_ex(&parent->core.spin); 3887 loops = 0; 3888 reason = 0; 3889 3890 for (;;) { 3891 /* 3892 * Parent may have been modified, relocating its block array. 3893 * Reload the base pointer. 3894 */ 3895 base = hammer2_chain_base_and_count(parent, &count); 3896 3897 if (++loops > 100000) { 3898 hammer2_spin_unex(&parent->core.spin); 3899 panic("excessive loops r=%d p=%p base/count %p:%d %016jx\n", 3900 reason, parent, base, count, key_next); 3901 } 3902 3903 /* 3904 * NOTE: spinlock stays intact, returned chain (if not NULL) 3905 * is not referenced or locked which means that we 3906 * cannot safely check its flagged / deletion status 3907 * until we lock it. 3908 */ 3909 chain = hammer2_combined_find(parent, base, count, 3910 &key_next, 3911 key_beg, key_end, 3912 &bref); 3913 generation = parent->core.generation; 3914 if (bref == NULL) 3915 break; 3916 key_next = bref->key + ((hammer2_key_t)1 << bref->keybits); 3917 3918 /* 3919 * Skip keys that are not within the key/radix of the new 3920 * indirect block. They stay in the parent. 3921 */ 3922 if (rounddown2(key ^ bref->key, (hammer2_key_t)1 << keybits) != 0) { 3923 goto next_key_spinlocked; 3924 } 3925 3926 /* 3927 * Load the new indirect block by acquiring the related 3928 * chains (potentially from media as it might not be 3929 * in-memory). Then move it to the new parent (ichain). 3930 * 3931 * chain is referenced but not locked. We must lock the 3932 * chain to obtain definitive state. 3933 */ 3934 bsave = *bref; 3935 if (chain) { 3936 /* 3937 * Use chain already present in the RBTREE 3938 */ 3939 hammer2_chain_ref(chain); 3940 hammer2_spin_unex(&parent->core.spin); 3941 hammer2_chain_lock(chain, HAMMER2_RESOLVE_NEVER); 3942 } else { 3943 /* 3944 * Get chain for blockref element. _get returns NULL 3945 * on insertion race. 3946 */ 3947 hammer2_spin_unex(&parent->core.spin); 3948 chain = hammer2_chain_get(parent, generation, &bsave, 3949 HAMMER2_RESOLVE_NEVER); 3950 if (chain == NULL) { 3951 reason = 1; 3952 hammer2_spin_ex(&parent->core.spin); 3953 continue; 3954 } 3955 } 3956 3957 /* 3958 * This is always live so if the chain has been deleted 3959 * we raced someone and we have to retry. 3960 * 3961 * NOTE: Lookups can race delete-duplicate because 3962 * delete-duplicate does not lock the parent's core 3963 * (they just use the spinlock on the core). 3964 * 3965 * (note reversed logic for this one) 3966 */ 3967 if (bcmp(&bsave, &chain->bref, sizeof(bsave)) || 3968 chain->parent != parent || 3969 (chain->flags & HAMMER2_CHAIN_DELETED)) { 3970 hammer2_chain_unlock(chain); 3971 hammer2_chain_drop(chain); 3972 if (hammer2_debug & 0x0040) { 3973 kprintf("LOST PARENT RETRY " 3974 "RETRY (%p,%p)->%p %08x\n", 3975 parent, chain->parent, chain, chain->flags); 3976 } 3977 hammer2_spin_ex(&parent->core.spin); 3978 continue; 3979 } 3980 3981 /* 3982 * Shift the chain to the indirect block. 3983 * 3984 * WARNING! The (parent, chain) deletion may modify the parent 3985 * and invalidate the base pointer. 3986 * 3987 * WARNING! Parent must already be marked modified, so we 3988 * can assume that chain_delete always suceeds. 3989 * 3990 * WARNING! hammer2_chain_repchange() does not have to be 3991 * called (and doesn't work anyway because we are 3992 * only doing a partial shift). A recursion that is 3993 * in-progress can continue at the current parent 3994 * and will be able to properly find its next key. 3995 */ 3996 error = hammer2_chain_delete_obref(parent, chain, mtid, 0, 3997 &bsave); 3998 KKASSERT(error == 0); 3999 hammer2_chain_rename_obref(&ichain, chain, mtid, 0, &bsave); 4000 hammer2_chain_unlock(chain); 4001 hammer2_chain_drop(chain); 4002 KKASSERT(parent->refs > 0); 4003 chain = NULL; 4004 base = NULL; /* safety */ 4005 hammer2_spin_ex(&parent->core.spin); 4006 next_key_spinlocked: 4007 if (--maxloops == 0) 4008 panic("hammer2_chain_create_indirect: maxloops"); 4009 reason = 4; 4010 if (key_next == 0 || key_next > key_end) 4011 break; 4012 key_beg = key_next; 4013 /* loop */ 4014 } 4015 hammer2_spin_unex(&parent->core.spin); 4016 4017 /* 4018 * Insert the new indirect block into the parent now that we've 4019 * cleared out some entries in the parent. We calculated a good 4020 * insertion index in the loop above (ichain->index). 4021 * 4022 * We don't have to set UPDATE here because we mark ichain 4023 * modified down below (so the normal modified -> flush -> set-moved 4024 * sequence applies). 4025 * 4026 * The insertion shouldn't race as this is a completely new block 4027 * and the parent is locked. 4028 */ 4029 base = NULL; /* safety, parent modify may change address */ 4030 KKASSERT((ichain->flags & HAMMER2_CHAIN_ONRBTREE) == 0); 4031 KKASSERT(parent->core.live_count < count); 4032 hammer2_chain_insert(parent, ichain, 4033 HAMMER2_CHAIN_INSERT_SPIN | 4034 HAMMER2_CHAIN_INSERT_LIVE, 4035 0); 4036 4037 /* 4038 * Make sure flushes propogate after our manual insertion. 4039 */ 4040 hammer2_chain_setflush(ichain); 4041 hammer2_chain_setflush(parent); 4042 4043 /* 4044 * Figure out what to return. 4045 */ 4046 if (rounddown2(create_key ^ key, (hammer2_key_t)1 << keybits) != 0) { 4047 /* 4048 * Key being created is outside the key range, 4049 * return the original parent. 4050 */ 4051 hammer2_chain_unlock(ichain); 4052 hammer2_chain_drop(ichain); 4053 } else { 4054 /* 4055 * Otherwise its in the range, return the new parent. 4056 * (leave both the new and old parent locked). 4057 */ 4058 parent = ichain; 4059 } 4060 4061 return(parent); 4062 } 4063 4064 /* 4065 * Do maintenance on an indirect chain. Both parent and chain are locked. 4066 * 4067 * Returns non-zero if (chain) is deleted, either due to being empty or 4068 * because its children were safely moved into the parent. 4069 */ 4070 int 4071 hammer2_chain_indirect_maintenance(hammer2_chain_t *parent, 4072 hammer2_chain_t *chain) 4073 { 4074 hammer2_blockref_t *chain_base; 4075 hammer2_blockref_t *base; 4076 hammer2_blockref_t *bref; 4077 hammer2_blockref_t bsave; 4078 hammer2_key_t key_next; 4079 hammer2_key_t key_beg; 4080 hammer2_key_t key_end; 4081 hammer2_chain_t *sub; 4082 int chain_count; 4083 int count; 4084 int error; 4085 int generation; 4086 4087 /* 4088 * Make sure we have an accurate live_count 4089 */ 4090 if ((chain->flags & (HAMMER2_CHAIN_INITIAL | 4091 HAMMER2_CHAIN_COUNTEDBREFS)) == 0) { 4092 base = &chain->data->npdata[0]; 4093 count = chain->bytes / sizeof(hammer2_blockref_t); 4094 hammer2_chain_countbrefs(chain, base, count); 4095 } 4096 4097 /* 4098 * If the indirect block is empty we can delete it. 4099 * (ignore deletion error) 4100 */ 4101 if (chain->core.live_count == 0 && RB_EMPTY(&chain->core.rbtree)) { 4102 hammer2_chain_delete(parent, chain, 4103 chain->bref.modify_tid, 4104 HAMMER2_DELETE_PERMANENT); 4105 hammer2_chain_repchange(parent, chain); 4106 return 1; 4107 } 4108 4109 base = hammer2_chain_base_and_count(parent, &count); 4110 4111 if ((parent->flags & (HAMMER2_CHAIN_INITIAL | 4112 HAMMER2_CHAIN_COUNTEDBREFS)) == 0) { 4113 hammer2_chain_countbrefs(parent, base, count); 4114 } 4115 4116 /* 4117 * Determine if we can collapse chain into parent, calculate 4118 * hysteresis for chain emptiness. 4119 */ 4120 if (parent->core.live_count + chain->core.live_count - 1 > count) 4121 return 0; 4122 chain_count = chain->bytes / sizeof(hammer2_blockref_t); 4123 if (chain->core.live_count > chain_count * 3 / 4) 4124 return 0; 4125 4126 /* 4127 * Ok, theoretically we can collapse chain's contents into 4128 * parent. chain is locked, but any in-memory children of chain 4129 * are not. For this to work, we must be able to dispose of any 4130 * in-memory children of chain. 4131 * 4132 * For now require that there are no in-memory children of chain. 4133 * 4134 * WARNING! Both chain and parent must remain locked across this 4135 * entire operation. 4136 */ 4137 4138 /* 4139 * Parent must be marked modified. Don't try to collapse it if we 4140 * can't mark it modified. Once modified, destroy chain to make room 4141 * and to get rid of what will be a conflicting key (this is included 4142 * in the calculation above). Finally, move the children of chain 4143 * into chain's parent. 4144 * 4145 * This order creates an accounting problem for bref.embed.stats 4146 * because we destroy chain before we remove its children. Any 4147 * elements whos blockref is already synchronized will be counted 4148 * twice. To deal with the problem we clean out chain's stats prior 4149 * to deleting it. 4150 */ 4151 error = hammer2_chain_modify(parent, 0, 0, 0); 4152 if (error) { 4153 krateprintf(&krate_h2me, "hammer2: indirect_maint: %s\n", 4154 hammer2_error_str(error)); 4155 return 0; 4156 } 4157 error = hammer2_chain_modify(chain, chain->bref.modify_tid, 0, 0); 4158 if (error) { 4159 krateprintf(&krate_h2me, "hammer2: indirect_maint: %s\n", 4160 hammer2_error_str(error)); 4161 return 0; 4162 } 4163 4164 chain->bref.embed.stats.inode_count = 0; 4165 chain->bref.embed.stats.data_count = 0; 4166 error = hammer2_chain_delete(parent, chain, 4167 chain->bref.modify_tid, 4168 HAMMER2_DELETE_PERMANENT); 4169 KKASSERT(error == 0); 4170 4171 /* 4172 * The combined_find call requires core.spin to be held. One would 4173 * think there wouldn't be any conflicts since we hold chain 4174 * exclusively locked, but the caching mechanism for 0-ref children 4175 * does not require a chain lock. 4176 */ 4177 hammer2_spin_ex(&chain->core.spin); 4178 4179 key_next = 0; 4180 key_beg = 0; 4181 key_end = HAMMER2_KEY_MAX; 4182 for (;;) { 4183 chain_base = &chain->data->npdata[0]; 4184 chain_count = chain->bytes / sizeof(hammer2_blockref_t); 4185 sub = hammer2_combined_find(chain, chain_base, chain_count, 4186 &key_next, 4187 key_beg, key_end, 4188 &bref); 4189 generation = chain->core.generation; 4190 if (bref == NULL) 4191 break; 4192 key_next = bref->key + ((hammer2_key_t)1 << bref->keybits); 4193 4194 bsave = *bref; 4195 if (sub) { 4196 hammer2_chain_ref(sub); 4197 hammer2_spin_unex(&chain->core.spin); 4198 hammer2_chain_lock(sub, HAMMER2_RESOLVE_NEVER); 4199 } else { 4200 hammer2_spin_unex(&chain->core.spin); 4201 sub = hammer2_chain_get(chain, generation, &bsave, 4202 HAMMER2_RESOLVE_NEVER); 4203 if (sub == NULL) { 4204 hammer2_spin_ex(&chain->core.spin); 4205 continue; 4206 } 4207 } 4208 if (bcmp(&bsave, &sub->bref, sizeof(bsave)) || 4209 sub->parent != chain || 4210 (sub->flags & HAMMER2_CHAIN_DELETED)) { 4211 hammer2_chain_unlock(sub); 4212 hammer2_chain_drop(sub); 4213 hammer2_spin_ex(&chain->core.spin); 4214 sub = NULL; /* safety */ 4215 continue; 4216 } 4217 error = hammer2_chain_delete_obref(chain, sub, 4218 sub->bref.modify_tid, 0, 4219 &bsave); 4220 KKASSERT(error == 0); 4221 hammer2_chain_rename_obref(&parent, sub, 4222 sub->bref.modify_tid, 4223 HAMMER2_INSERT_SAMEPARENT, &bsave); 4224 hammer2_chain_unlock(sub); 4225 hammer2_chain_drop(sub); 4226 hammer2_spin_ex(&chain->core.spin); 4227 4228 if (key_next == 0) 4229 break; 4230 key_beg = key_next; 4231 } 4232 hammer2_spin_unex(&chain->core.spin); 4233 4234 hammer2_chain_repchange(parent, chain); 4235 4236 return 1; 4237 } 4238 4239 /* 4240 * Freemap indirect blocks 4241 * 4242 * Calculate the keybits and highside/lowside of the freemap node the 4243 * caller is creating. 4244 * 4245 * This routine will specify the next higher-level freemap key/radix 4246 * representing the lowest-ordered set. By doing so, eventually all 4247 * low-ordered sets will be moved one level down. 4248 * 4249 * We have to be careful here because the freemap reserves a limited 4250 * number of blocks for a limited number of levels. So we can't just 4251 * push indiscriminately. 4252 */ 4253 int 4254 hammer2_chain_indkey_freemap(hammer2_chain_t *parent, hammer2_key_t *keyp, 4255 int keybits, hammer2_blockref_t *base, int count) 4256 { 4257 hammer2_chain_t *chain; 4258 hammer2_blockref_t *bref; 4259 hammer2_key_t key; 4260 hammer2_key_t key_beg; 4261 hammer2_key_t key_end; 4262 hammer2_key_t key_next; 4263 int maxloops = 300000; 4264 4265 key = *keyp; 4266 keybits = 64; 4267 4268 /* 4269 * Calculate the range of keys in the array being careful to skip 4270 * slots which are overridden with a deletion. 4271 */ 4272 key_beg = 0; 4273 key_end = HAMMER2_KEY_MAX; 4274 hammer2_spin_ex(&parent->core.spin); 4275 4276 for (;;) { 4277 if (--maxloops == 0) { 4278 panic("indkey_freemap shit %p %p:%d\n", 4279 parent, base, count); 4280 } 4281 chain = hammer2_combined_find(parent, base, count, 4282 &key_next, 4283 key_beg, key_end, 4284 &bref); 4285 4286 /* 4287 * Exhausted search 4288 */ 4289 if (bref == NULL) 4290 break; 4291 4292 /* 4293 * Skip deleted chains. 4294 */ 4295 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED)) { 4296 if (key_next == 0 || key_next > key_end) 4297 break; 4298 key_beg = key_next; 4299 continue; 4300 } 4301 4302 /* 4303 * Use the full live (not deleted) element for the scan 4304 * iteration. HAMMER2 does not allow partial replacements. 4305 * 4306 * XXX should be built into hammer2_combined_find(). 4307 */ 4308 key_next = bref->key + ((hammer2_key_t)1 << bref->keybits); 4309 4310 if (keybits > bref->keybits) { 4311 key = bref->key; 4312 keybits = bref->keybits; 4313 } else if (keybits == bref->keybits && bref->key < key) { 4314 key = bref->key; 4315 } 4316 if (key_next == 0) 4317 break; 4318 key_beg = key_next; 4319 } 4320 hammer2_spin_unex(&parent->core.spin); 4321 4322 /* 4323 * Return the keybits for a higher-level FREEMAP_NODE covering 4324 * this node. 4325 */ 4326 switch(keybits) { 4327 case HAMMER2_FREEMAP_LEVEL0_RADIX: 4328 keybits = HAMMER2_FREEMAP_LEVEL1_RADIX; 4329 break; 4330 case HAMMER2_FREEMAP_LEVEL1_RADIX: 4331 keybits = HAMMER2_FREEMAP_LEVEL2_RADIX; 4332 break; 4333 case HAMMER2_FREEMAP_LEVEL2_RADIX: 4334 keybits = HAMMER2_FREEMAP_LEVEL3_RADIX; 4335 break; 4336 case HAMMER2_FREEMAP_LEVEL3_RADIX: 4337 keybits = HAMMER2_FREEMAP_LEVEL4_RADIX; 4338 break; 4339 case HAMMER2_FREEMAP_LEVEL4_RADIX: 4340 keybits = HAMMER2_FREEMAP_LEVEL5_RADIX; 4341 break; 4342 case HAMMER2_FREEMAP_LEVEL5_RADIX: 4343 panic("hammer2_chain_indkey_freemap: level too high"); 4344 break; 4345 default: 4346 panic("hammer2_chain_indkey_freemap: bad radix"); 4347 break; 4348 } 4349 *keyp = key; 4350 4351 return (keybits); 4352 } 4353 4354 /* 4355 * File indirect blocks 4356 * 4357 * Calculate the key/keybits for the indirect block to create by scanning 4358 * existing keys. The key being created is also passed in *keyp and can be 4359 * inside or outside the indirect block. Regardless, the indirect block 4360 * must hold at least two keys in order to guarantee sufficient space. 4361 * 4362 * We use a modified version of the freemap's fixed radix tree, but taylored 4363 * for file data. Basically we configure an indirect block encompassing the 4364 * smallest key. 4365 */ 4366 static int 4367 hammer2_chain_indkey_file(hammer2_chain_t *parent, hammer2_key_t *keyp, 4368 int keybits, hammer2_blockref_t *base, int count, 4369 int ncount) 4370 { 4371 hammer2_chain_t *chain; 4372 hammer2_blockref_t *bref; 4373 hammer2_key_t key; 4374 hammer2_key_t key_beg; 4375 hammer2_key_t key_end; 4376 hammer2_key_t key_next; 4377 int nradix; 4378 int maxloops = 300000; 4379 4380 key = *keyp; 4381 keybits = 64; 4382 4383 /* 4384 * Calculate the range of keys in the array being careful to skip 4385 * slots which are overridden with a deletion. 4386 * 4387 * Locate the smallest key. 4388 */ 4389 key_beg = 0; 4390 key_end = HAMMER2_KEY_MAX; 4391 hammer2_spin_ex(&parent->core.spin); 4392 4393 for (;;) { 4394 if (--maxloops == 0) { 4395 panic("indkey_freemap shit %p %p:%d\n", 4396 parent, base, count); 4397 } 4398 chain = hammer2_combined_find(parent, base, count, 4399 &key_next, 4400 key_beg, key_end, 4401 &bref); 4402 4403 /* 4404 * Exhausted search 4405 */ 4406 if (bref == NULL) 4407 break; 4408 4409 /* 4410 * Skip deleted chains. 4411 */ 4412 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED)) { 4413 if (key_next == 0 || key_next > key_end) 4414 break; 4415 key_beg = key_next; 4416 continue; 4417 } 4418 4419 /* 4420 * Use the full live (not deleted) element for the scan 4421 * iteration. HAMMER2 does not allow partial replacements. 4422 * 4423 * XXX should be built into hammer2_combined_find(). 4424 */ 4425 key_next = bref->key + ((hammer2_key_t)1 << bref->keybits); 4426 4427 if (keybits > bref->keybits) { 4428 key = bref->key; 4429 keybits = bref->keybits; 4430 } else if (keybits == bref->keybits && bref->key < key) { 4431 key = bref->key; 4432 } 4433 if (key_next == 0) 4434 break; 4435 key_beg = key_next; 4436 } 4437 hammer2_spin_unex(&parent->core.spin); 4438 4439 /* 4440 * Calculate the static keybits for a higher-level indirect block 4441 * that contains the key. 4442 */ 4443 *keyp = key; 4444 4445 switch(ncount) { 4446 case HAMMER2_IND_COUNT_MIN: 4447 nradix = HAMMER2_IND_RADIX_MIN - HAMMER2_BLOCKREF_RADIX; 4448 break; 4449 case HAMMER2_IND_COUNT_NOM: 4450 nradix = HAMMER2_IND_RADIX_NOM - HAMMER2_BLOCKREF_RADIX; 4451 break; 4452 case HAMMER2_IND_COUNT_MAX: 4453 nradix = HAMMER2_IND_RADIX_MAX - HAMMER2_BLOCKREF_RADIX; 4454 break; 4455 default: 4456 panic("bad ncount %d\n", ncount); 4457 nradix = 0; 4458 break; 4459 } 4460 4461 /* 4462 * The largest radix that can be returned for an indirect block is 4463 * 63 bits. (The largest practical indirect block radix is actually 4464 * 62 bits because the top-level inode or volume root contains four 4465 * entries, but allow 63 to be returned). 4466 */ 4467 if (nradix >= 64) 4468 nradix = 63; 4469 4470 return keybits + nradix; 4471 } 4472 4473 #if 1 4474 4475 /* 4476 * Directory indirect blocks. 4477 * 4478 * Covers both the inode index (directory of inodes), and directory contents 4479 * (filenames hardlinked to inodes). 4480 * 4481 * Because directory keys are hashed we generally try to cut the space in 4482 * half. We accomodate the inode index (which tends to have linearly 4483 * increasing inode numbers) by ensuring that the keyspace is at least large 4484 * enough to fill up the indirect block being created. 4485 */ 4486 static int 4487 hammer2_chain_indkey_dir(hammer2_chain_t *parent, hammer2_key_t *keyp, 4488 int keybits, hammer2_blockref_t *base, int count, 4489 int ncount) 4490 { 4491 hammer2_blockref_t *bref; 4492 hammer2_chain_t *chain; 4493 hammer2_key_t key_beg; 4494 hammer2_key_t key_end; 4495 hammer2_key_t key_next; 4496 hammer2_key_t key; 4497 int nkeybits; 4498 int locount; 4499 int hicount; 4500 int maxloops = 300000; 4501 4502 /* 4503 * NOTE: We can't take a shortcut here anymore for inodes because 4504 * the root directory can contain a mix of inodes and directory 4505 * entries (we used to just return 63 if parent->bref.type was 4506 * HAMMER2_BREF_TYPE_INODE. 4507 */ 4508 key = *keyp; 4509 locount = 0; 4510 hicount = 0; 4511 4512 /* 4513 * Calculate the range of keys in the array being careful to skip 4514 * slots which are overridden with a deletion. 4515 */ 4516 key_beg = 0; 4517 key_end = HAMMER2_KEY_MAX; 4518 hammer2_spin_ex(&parent->core.spin); 4519 4520 for (;;) { 4521 if (--maxloops == 0) { 4522 panic("indkey_freemap shit %p %p:%d\n", 4523 parent, base, count); 4524 } 4525 chain = hammer2_combined_find(parent, base, count, 4526 &key_next, 4527 key_beg, key_end, 4528 &bref); 4529 4530 /* 4531 * Exhausted search 4532 */ 4533 if (bref == NULL) 4534 break; 4535 4536 /* 4537 * Deleted object 4538 */ 4539 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED)) { 4540 if (key_next == 0 || key_next > key_end) 4541 break; 4542 key_beg = key_next; 4543 continue; 4544 } 4545 4546 /* 4547 * Use the full live (not deleted) element for the scan 4548 * iteration. HAMMER2 does not allow partial replacements. 4549 * 4550 * XXX should be built into hammer2_combined_find(). 4551 */ 4552 key_next = bref->key + ((hammer2_key_t)1 << bref->keybits); 4553 4554 /* 4555 * Expand our calculated key range (key, keybits) to fit 4556 * the scanned key. nkeybits represents the full range 4557 * that we will later cut in half (two halves @ nkeybits - 1). 4558 */ 4559 nkeybits = keybits; 4560 if (nkeybits < bref->keybits) { 4561 if (bref->keybits > 64) { 4562 kprintf("bad bref chain %p bref %p\n", 4563 chain, bref); 4564 Debugger("fubar"); 4565 } 4566 nkeybits = bref->keybits; 4567 } 4568 while (nkeybits < 64 && 4569 rounddown2(key ^ bref->key, (hammer2_key_t)1 << nkeybits) != 0) { 4570 ++nkeybits; 4571 } 4572 4573 /* 4574 * If the new key range is larger we have to determine 4575 * which side of the new key range the existing keys fall 4576 * under by checking the high bit, then collapsing the 4577 * locount into the hicount or vise-versa. 4578 */ 4579 if (keybits != nkeybits) { 4580 if (((hammer2_key_t)1 << (nkeybits - 1)) & key) { 4581 hicount += locount; 4582 locount = 0; 4583 } else { 4584 locount += hicount; 4585 hicount = 0; 4586 } 4587 keybits = nkeybits; 4588 } 4589 4590 /* 4591 * The newly scanned key will be in the lower half or the 4592 * upper half of the (new) key range. 4593 */ 4594 if (((hammer2_key_t)1 << (nkeybits - 1)) & bref->key) 4595 ++hicount; 4596 else 4597 ++locount; 4598 4599 if (key_next == 0) 4600 break; 4601 key_beg = key_next; 4602 } 4603 hammer2_spin_unex(&parent->core.spin); 4604 bref = NULL; /* now invalid (safety) */ 4605 4606 /* 4607 * Adjust keybits to represent half of the full range calculated 4608 * above (radix 63 max) for our new indirect block. 4609 */ 4610 --keybits; 4611 4612 /* 4613 * Expand keybits to hold at least ncount elements. ncount will be 4614 * a power of 2. This is to try to completely fill leaf nodes (at 4615 * least for keys which are not hashes). 4616 * 4617 * We aren't counting 'in' or 'out', we are counting 'high side' 4618 * and 'low side' based on the bit at (1LL << keybits). We want 4619 * everything to be inside in these cases so shift it all to 4620 * the low or high side depending on the new high bit. 4621 */ 4622 while (((hammer2_key_t)1 << keybits) < ncount) { 4623 ++keybits; 4624 if (key & ((hammer2_key_t)1 << keybits)) { 4625 hicount += locount; 4626 locount = 0; 4627 } else { 4628 locount += hicount; 4629 hicount = 0; 4630 } 4631 } 4632 4633 if (hicount > locount) 4634 key |= (hammer2_key_t)1 << keybits; 4635 else 4636 key &= ~(hammer2_key_t)1 << keybits; 4637 4638 *keyp = key; 4639 4640 return (keybits); 4641 } 4642 4643 #else 4644 4645 /* 4646 * Directory indirect blocks. 4647 * 4648 * Covers both the inode index (directory of inodes), and directory contents 4649 * (filenames hardlinked to inodes). 4650 * 4651 * Because directory keys are hashed we generally try to cut the space in 4652 * half. We accomodate the inode index (which tends to have linearly 4653 * increasing inode numbers) by ensuring that the keyspace is at least large 4654 * enough to fill up the indirect block being created. 4655 */ 4656 static int 4657 hammer2_chain_indkey_dir(hammer2_chain_t *parent, hammer2_key_t *keyp, 4658 int keybits, hammer2_blockref_t *base, int count, 4659 int ncount) 4660 { 4661 hammer2_blockref_t *bref; 4662 hammer2_chain_t *chain; 4663 hammer2_key_t key_beg; 4664 hammer2_key_t key_end; 4665 hammer2_key_t key_next; 4666 hammer2_key_t key; 4667 int nkeybits; 4668 int locount; 4669 int hicount; 4670 int maxloops = 300000; 4671 4672 /* 4673 * Shortcut if the parent is the inode. In this situation the 4674 * parent has 4+1 directory entries and we are creating an indirect 4675 * block capable of holding many more. 4676 */ 4677 if (parent->bref.type == HAMMER2_BREF_TYPE_INODE) { 4678 return 63; 4679 } 4680 4681 key = *keyp; 4682 locount = 0; 4683 hicount = 0; 4684 4685 /* 4686 * Calculate the range of keys in the array being careful to skip 4687 * slots which are overridden with a deletion. 4688 */ 4689 key_beg = 0; 4690 key_end = HAMMER2_KEY_MAX; 4691 hammer2_spin_ex(&parent->core.spin); 4692 4693 for (;;) { 4694 if (--maxloops == 0) { 4695 panic("indkey_freemap shit %p %p:%d\n", 4696 parent, base, count); 4697 } 4698 chain = hammer2_combined_find(parent, base, count, 4699 &key_next, 4700 key_beg, key_end, 4701 &bref); 4702 4703 /* 4704 * Exhausted search 4705 */ 4706 if (bref == NULL) 4707 break; 4708 4709 /* 4710 * Deleted object 4711 */ 4712 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED)) { 4713 if (key_next == 0 || key_next > key_end) 4714 break; 4715 key_beg = key_next; 4716 continue; 4717 } 4718 4719 /* 4720 * Use the full live (not deleted) element for the scan 4721 * iteration. HAMMER2 does not allow partial replacements. 4722 * 4723 * XXX should be built into hammer2_combined_find(). 4724 */ 4725 key_next = bref->key + ((hammer2_key_t)1 << bref->keybits); 4726 4727 /* 4728 * Expand our calculated key range (key, keybits) to fit 4729 * the scanned key. nkeybits represents the full range 4730 * that we will later cut in half (two halves @ nkeybits - 1). 4731 */ 4732 nkeybits = keybits; 4733 if (nkeybits < bref->keybits) { 4734 if (bref->keybits > 64) { 4735 kprintf("bad bref chain %p bref %p\n", 4736 chain, bref); 4737 Debugger("fubar"); 4738 } 4739 nkeybits = bref->keybits; 4740 } 4741 while (nkeybits < 64 && 4742 (~(((hammer2_key_t)1 << nkeybits) - 1) & 4743 (key ^ bref->key)) != 0) { 4744 ++nkeybits; 4745 } 4746 4747 /* 4748 * If the new key range is larger we have to determine 4749 * which side of the new key range the existing keys fall 4750 * under by checking the high bit, then collapsing the 4751 * locount into the hicount or vise-versa. 4752 */ 4753 if (keybits != nkeybits) { 4754 if (((hammer2_key_t)1 << (nkeybits - 1)) & key) { 4755 hicount += locount; 4756 locount = 0; 4757 } else { 4758 locount += hicount; 4759 hicount = 0; 4760 } 4761 keybits = nkeybits; 4762 } 4763 4764 /* 4765 * The newly scanned key will be in the lower half or the 4766 * upper half of the (new) key range. 4767 */ 4768 if (((hammer2_key_t)1 << (nkeybits - 1)) & bref->key) 4769 ++hicount; 4770 else 4771 ++locount; 4772 4773 if (key_next == 0) 4774 break; 4775 key_beg = key_next; 4776 } 4777 hammer2_spin_unex(&parent->core.spin); 4778 bref = NULL; /* now invalid (safety) */ 4779 4780 /* 4781 * Adjust keybits to represent half of the full range calculated 4782 * above (radix 63 max) for our new indirect block. 4783 */ 4784 --keybits; 4785 4786 /* 4787 * Expand keybits to hold at least ncount elements. ncount will be 4788 * a power of 2. This is to try to completely fill leaf nodes (at 4789 * least for keys which are not hashes). 4790 * 4791 * We aren't counting 'in' or 'out', we are counting 'high side' 4792 * and 'low side' based on the bit at (1LL << keybits). We want 4793 * everything to be inside in these cases so shift it all to 4794 * the low or high side depending on the new high bit. 4795 */ 4796 while (((hammer2_key_t)1 << keybits) < ncount) { 4797 ++keybits; 4798 if (key & ((hammer2_key_t)1 << keybits)) { 4799 hicount += locount; 4800 locount = 0; 4801 } else { 4802 locount += hicount; 4803 hicount = 0; 4804 } 4805 } 4806 4807 if (hicount > locount) 4808 key |= (hammer2_key_t)1 << keybits; 4809 else 4810 key &= ~(hammer2_key_t)1 << keybits; 4811 4812 *keyp = key; 4813 4814 return (keybits); 4815 } 4816 4817 #endif 4818 4819 /* 4820 * Sets CHAIN_DELETED and remove the chain's blockref from the parent if 4821 * it exists. 4822 * 4823 * Both parent and chain must be locked exclusively. 4824 * 4825 * This function will modify the parent if the blockref requires removal 4826 * from the parent's block table. 4827 * 4828 * This function is NOT recursive. Any entity already pushed into the 4829 * chain (such as an inode) may still need visibility into its contents, 4830 * as well as the ability to read and modify the contents. For example, 4831 * for an unlinked file which is still open. 4832 * 4833 * Also note that the flusher is responsible for cleaning up empty 4834 * indirect blocks. 4835 */ 4836 int 4837 hammer2_chain_delete(hammer2_chain_t *parent, hammer2_chain_t *chain, 4838 hammer2_tid_t mtid, int flags) 4839 { 4840 int error = 0; 4841 4842 KKASSERT(hammer2_mtx_owned(&chain->lock)); 4843 4844 /* 4845 * Nothing to do if already marked. 4846 * 4847 * We need the spinlock on the core whos RBTREE contains chain 4848 * to protect against races. 4849 */ 4850 if ((chain->flags & HAMMER2_CHAIN_DELETED) == 0) { 4851 KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0 && 4852 chain->parent == parent); 4853 error = _hammer2_chain_delete_helper(parent, chain, 4854 mtid, flags, NULL); 4855 } 4856 4857 /* 4858 * Permanent deletions mark the chain as destroyed. 4859 * 4860 * NOTE: We do not setflush the chain unless the deletion is 4861 * permanent, since the deletion of a chain does not actually 4862 * require it to be flushed. 4863 */ 4864 if (error == 0) { 4865 if (flags & HAMMER2_DELETE_PERMANENT) { 4866 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DESTROY); 4867 hammer2_chain_setflush(chain); 4868 } 4869 } 4870 4871 return error; 4872 } 4873 4874 static int 4875 hammer2_chain_delete_obref(hammer2_chain_t *parent, hammer2_chain_t *chain, 4876 hammer2_tid_t mtid, int flags, 4877 hammer2_blockref_t *obref) 4878 { 4879 int error = 0; 4880 4881 KKASSERT(hammer2_mtx_owned(&chain->lock)); 4882 4883 /* 4884 * Nothing to do if already marked. 4885 * 4886 * We need the spinlock on the core whos RBTREE contains chain 4887 * to protect against races. 4888 */ 4889 obref->type = HAMMER2_BREF_TYPE_EMPTY; 4890 if ((chain->flags & HAMMER2_CHAIN_DELETED) == 0) { 4891 KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0 && 4892 chain->parent == parent); 4893 error = _hammer2_chain_delete_helper(parent, chain, 4894 mtid, flags, obref); 4895 } 4896 4897 /* 4898 * Permanent deletions mark the chain as destroyed. 4899 * 4900 * NOTE: We do not setflush the chain unless the deletion is 4901 * permanent, since the deletion of a chain does not actually 4902 * require it to be flushed. 4903 */ 4904 if (error == 0) { 4905 if (flags & HAMMER2_DELETE_PERMANENT) { 4906 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DESTROY); 4907 hammer2_chain_setflush(chain); 4908 } 4909 } 4910 4911 return error; 4912 } 4913 4914 /* 4915 * Returns the index of the nearest element in the blockref array >= elm. 4916 * Returns (count) if no element could be found. 4917 * 4918 * Sets *key_nextp to the next key for loop purposes but does not modify 4919 * it if the next key would be higher than the current value of *key_nextp. 4920 * Note that *key_nexp can overflow to 0, which should be tested by the 4921 * caller. 4922 * 4923 * WARNING! Must be called with parent's spinlock held. Spinlock remains 4924 * held through the operation. 4925 */ 4926 static int 4927 hammer2_base_find(hammer2_chain_t *parent, 4928 hammer2_blockref_t *base, int count, 4929 hammer2_key_t *key_nextp, 4930 hammer2_key_t key_beg, hammer2_key_t key_end) 4931 { 4932 hammer2_blockref_t *scan; 4933 hammer2_key_t scan_end; 4934 int i; 4935 int limit; 4936 4937 /* 4938 * Require the live chain's already have their core's counted 4939 * so we can optimize operations. 4940 */ 4941 KKASSERT(parent->flags & HAMMER2_CHAIN_COUNTEDBREFS); 4942 4943 /* 4944 * Degenerate case 4945 */ 4946 if (count == 0 || base == NULL) 4947 return(count); 4948 4949 /* 4950 * Sequential optimization using parent->cache_index. This is 4951 * the most likely scenario. 4952 * 4953 * We can avoid trailing empty entries on live chains, otherwise 4954 * we might have to check the whole block array. 4955 */ 4956 i = parent->cache_index; /* SMP RACE OK */ 4957 cpu_ccfence(); 4958 limit = parent->core.live_zero; 4959 if (i >= limit) 4960 i = limit - 1; 4961 if (i < 0) 4962 i = 0; 4963 KKASSERT(i < count); 4964 4965 /* 4966 * Search backwards 4967 */ 4968 scan = &base[i]; 4969 while (i > 0 && (scan->type == HAMMER2_BREF_TYPE_EMPTY || 4970 scan->key > key_beg)) { 4971 --scan; 4972 --i; 4973 } 4974 parent->cache_index = i; 4975 4976 /* 4977 * Search forwards, stop when we find a scan element which 4978 * encloses the key or until we know that there are no further 4979 * elements. 4980 */ 4981 while (i < count) { 4982 if (scan->type != HAMMER2_BREF_TYPE_EMPTY) { 4983 scan_end = scan->key + 4984 ((hammer2_key_t)1 << scan->keybits) - 1; 4985 if (scan->key > key_beg || scan_end >= key_beg) 4986 break; 4987 } 4988 if (i >= limit) 4989 return (count); 4990 ++scan; 4991 ++i; 4992 } 4993 if (i != count) { 4994 parent->cache_index = i; 4995 if (i >= limit) { 4996 i = count; 4997 } else { 4998 scan_end = scan->key + 4999 ((hammer2_key_t)1 << scan->keybits); 5000 if (scan_end && (*key_nextp > scan_end || 5001 *key_nextp == 0)) { 5002 *key_nextp = scan_end; 5003 } 5004 } 5005 } 5006 return (i); 5007 } 5008 5009 /* 5010 * Do a combined search and return the next match either from the blockref 5011 * array or from the in-memory chain. Sets *brefp to the returned bref in 5012 * both cases, or sets it to NULL if the search exhausted. Only returns 5013 * a non-NULL chain if the search matched from the in-memory chain. 5014 * 5015 * When no in-memory chain has been found and a non-NULL bref is returned 5016 * in *brefp. 5017 * 5018 * 5019 * The returned chain is not locked or referenced. Use the returned bref 5020 * to determine if the search exhausted or not. Iterate if the base find 5021 * is chosen but matches a deleted chain. 5022 * 5023 * WARNING! Must be called with parent's spinlock held. Spinlock remains 5024 * held through the operation. 5025 */ 5026 static hammer2_chain_t * 5027 hammer2_combined_find(hammer2_chain_t *parent, 5028 hammer2_blockref_t *base, int count, 5029 hammer2_key_t *key_nextp, 5030 hammer2_key_t key_beg, hammer2_key_t key_end, 5031 hammer2_blockref_t **brefp) 5032 { 5033 hammer2_blockref_t *bref; 5034 hammer2_chain_t *chain; 5035 int i; 5036 5037 /* 5038 * Lookup in block array and in rbtree. 5039 */ 5040 *key_nextp = key_end + 1; 5041 i = hammer2_base_find(parent, base, count, key_nextp, 5042 key_beg, key_end); 5043 chain = hammer2_chain_find(parent, key_nextp, key_beg, key_end); 5044 5045 /* 5046 * Neither matched 5047 */ 5048 if (i == count && chain == NULL) { 5049 *brefp = NULL; 5050 return(NULL); 5051 } 5052 5053 /* 5054 * Only chain matched. 5055 */ 5056 if (i == count) { 5057 bref = &chain->bref; 5058 goto found; 5059 } 5060 5061 /* 5062 * Only blockref matched. 5063 */ 5064 if (chain == NULL) { 5065 bref = &base[i]; 5066 goto found; 5067 } 5068 5069 /* 5070 * Both in-memory and blockref matched, select the nearer element. 5071 * 5072 * If both are flush with the left-hand side or both are the 5073 * same distance away, select the chain. In this situation the 5074 * chain must have been loaded from the matching blockmap. 5075 */ 5076 if ((chain->bref.key <= key_beg && base[i].key <= key_beg) || 5077 chain->bref.key == base[i].key) { 5078 KKASSERT(chain->bref.key == base[i].key); 5079 bref = &chain->bref; 5080 goto found; 5081 } 5082 5083 /* 5084 * Select the nearer key 5085 */ 5086 if (chain->bref.key < base[i].key) { 5087 bref = &chain->bref; 5088 } else { 5089 bref = &base[i]; 5090 chain = NULL; 5091 } 5092 5093 /* 5094 * If the bref is out of bounds we've exhausted our search. 5095 */ 5096 found: 5097 if (bref->key > key_end) { 5098 *brefp = NULL; 5099 chain = NULL; 5100 } else { 5101 *brefp = bref; 5102 } 5103 return(chain); 5104 } 5105 5106 /* 5107 * Locate the specified block array element and delete it. The element 5108 * must exist. 5109 * 5110 * The spin lock on the related chain must be held. 5111 * 5112 * NOTE: live_count was adjusted when the chain was deleted, so it does not 5113 * need to be adjusted when we commit the media change. 5114 */ 5115 void 5116 hammer2_base_delete(hammer2_chain_t *parent, 5117 hammer2_blockref_t *base, int count, 5118 hammer2_chain_t *chain, 5119 hammer2_blockref_t *obref) 5120 { 5121 hammer2_blockref_t *elm = &chain->bref; 5122 hammer2_blockref_t *scan; 5123 hammer2_key_t key_next; 5124 int i; 5125 5126 /* 5127 * Delete element. Expect the element to exist. 5128 * 5129 * XXX see caller, flush code not yet sophisticated enough to prevent 5130 * re-flushed in some cases. 5131 */ 5132 key_next = 0; /* max range */ 5133 i = hammer2_base_find(parent, base, count, &key_next, 5134 elm->key, elm->key); 5135 scan = &base[i]; 5136 if (i == count || scan->type == HAMMER2_BREF_TYPE_EMPTY || 5137 scan->key != elm->key || 5138 ((chain->flags & HAMMER2_CHAIN_BLKMAPUPD) == 0 && 5139 scan->keybits != elm->keybits)) { 5140 hammer2_spin_unex(&parent->core.spin); 5141 panic("delete base %p element not found at %d/%d elm %p\n", 5142 base, i, count, elm); 5143 return; 5144 } 5145 5146 /* 5147 * Update stats and zero the entry. 5148 * 5149 * NOTE: Handle radix == 0 (0 bytes) case. 5150 */ 5151 if ((int)(scan->data_off & HAMMER2_OFF_MASK_RADIX)) { 5152 parent->bref.embed.stats.data_count -= (hammer2_off_t)1 << 5153 (int)(scan->data_off & HAMMER2_OFF_MASK_RADIX); 5154 } 5155 switch(scan->type) { 5156 case HAMMER2_BREF_TYPE_INODE: 5157 --parent->bref.embed.stats.inode_count; 5158 /* fall through */ 5159 case HAMMER2_BREF_TYPE_DATA: 5160 if (parent->bref.leaf_count == HAMMER2_BLOCKREF_LEAF_MAX) { 5161 atomic_set_int(&chain->flags, 5162 HAMMER2_CHAIN_HINT_LEAF_COUNT); 5163 } else { 5164 if (parent->bref.leaf_count) 5165 --parent->bref.leaf_count; 5166 } 5167 /* fall through */ 5168 case HAMMER2_BREF_TYPE_INDIRECT: 5169 if (scan->type != HAMMER2_BREF_TYPE_DATA) { 5170 parent->bref.embed.stats.data_count -= 5171 scan->embed.stats.data_count; 5172 parent->bref.embed.stats.inode_count -= 5173 scan->embed.stats.inode_count; 5174 } 5175 if (scan->type == HAMMER2_BREF_TYPE_INODE) 5176 break; 5177 if (parent->bref.leaf_count == HAMMER2_BLOCKREF_LEAF_MAX) { 5178 atomic_set_int(&chain->flags, 5179 HAMMER2_CHAIN_HINT_LEAF_COUNT); 5180 } else { 5181 if (parent->bref.leaf_count <= scan->leaf_count) 5182 parent->bref.leaf_count = 0; 5183 else 5184 parent->bref.leaf_count -= scan->leaf_count; 5185 } 5186 break; 5187 case HAMMER2_BREF_TYPE_DIRENT: 5188 if (parent->bref.leaf_count == HAMMER2_BLOCKREF_LEAF_MAX) { 5189 atomic_set_int(&chain->flags, 5190 HAMMER2_CHAIN_HINT_LEAF_COUNT); 5191 } else { 5192 if (parent->bref.leaf_count) 5193 --parent->bref.leaf_count; 5194 } 5195 default: 5196 break; 5197 } 5198 5199 if (obref) 5200 *obref = *scan; 5201 bzero(scan, sizeof(*scan)); 5202 5203 /* 5204 * We can only optimize parent->core.live_zero for live chains. 5205 */ 5206 if (parent->core.live_zero == i + 1) { 5207 while (--i >= 0 && base[i].type == HAMMER2_BREF_TYPE_EMPTY) 5208 ; 5209 parent->core.live_zero = i + 1; 5210 } 5211 5212 /* 5213 * Clear appropriate blockmap flags in chain. 5214 */ 5215 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_BLKMAPPED | 5216 HAMMER2_CHAIN_BLKMAPUPD); 5217 } 5218 5219 /* 5220 * Insert the specified element. The block array must not already have the 5221 * element and must have space available for the insertion. 5222 * 5223 * The spin lock on the related chain must be held. 5224 * 5225 * NOTE: live_count was adjusted when the chain was deleted, so it does not 5226 * need to be adjusted when we commit the media change. 5227 */ 5228 void 5229 hammer2_base_insert(hammer2_chain_t *parent, 5230 hammer2_blockref_t *base, int count, 5231 hammer2_chain_t *chain, hammer2_blockref_t *elm) 5232 { 5233 hammer2_key_t key_next; 5234 hammer2_key_t xkey; 5235 int i; 5236 int j; 5237 int k; 5238 int l; 5239 int u = 1; 5240 5241 /* 5242 * Insert new element. Expect the element to not already exist 5243 * unless we are replacing it. 5244 * 5245 * XXX see caller, flush code not yet sophisticated enough to prevent 5246 * re-flushed in some cases. 5247 */ 5248 key_next = 0; /* max range */ 5249 i = hammer2_base_find(parent, base, count, &key_next, 5250 elm->key, elm->key); 5251 5252 /* 5253 * Shortcut fill optimization, typical ordered insertion(s) may not 5254 * require a search. 5255 */ 5256 KKASSERT(i >= 0 && i <= count); 5257 5258 /* 5259 * Set appropriate blockmap flags in chain (if not NULL) 5260 */ 5261 if (chain) 5262 atomic_set_int(&chain->flags, HAMMER2_CHAIN_BLKMAPPED); 5263 5264 /* 5265 * Update stats and zero the entry 5266 */ 5267 if ((int)(elm->data_off & HAMMER2_OFF_MASK_RADIX)) { 5268 parent->bref.embed.stats.data_count += (hammer2_off_t)1 << 5269 (int)(elm->data_off & HAMMER2_OFF_MASK_RADIX); 5270 } 5271 switch(elm->type) { 5272 case HAMMER2_BREF_TYPE_INODE: 5273 ++parent->bref.embed.stats.inode_count; 5274 /* fall through */ 5275 case HAMMER2_BREF_TYPE_DATA: 5276 if (parent->bref.leaf_count != HAMMER2_BLOCKREF_LEAF_MAX) 5277 ++parent->bref.leaf_count; 5278 /* fall through */ 5279 case HAMMER2_BREF_TYPE_INDIRECT: 5280 if (elm->type != HAMMER2_BREF_TYPE_DATA) { 5281 parent->bref.embed.stats.data_count += 5282 elm->embed.stats.data_count; 5283 parent->bref.embed.stats.inode_count += 5284 elm->embed.stats.inode_count; 5285 } 5286 if (elm->type == HAMMER2_BREF_TYPE_INODE) 5287 break; 5288 if (parent->bref.leaf_count + elm->leaf_count < 5289 HAMMER2_BLOCKREF_LEAF_MAX) { 5290 parent->bref.leaf_count += elm->leaf_count; 5291 } else { 5292 parent->bref.leaf_count = HAMMER2_BLOCKREF_LEAF_MAX; 5293 } 5294 break; 5295 case HAMMER2_BREF_TYPE_DIRENT: 5296 if (parent->bref.leaf_count != HAMMER2_BLOCKREF_LEAF_MAX) 5297 ++parent->bref.leaf_count; 5298 break; 5299 default: 5300 break; 5301 } 5302 5303 5304 /* 5305 * We can only optimize parent->core.live_zero for live chains. 5306 */ 5307 if (i == count && parent->core.live_zero < count) { 5308 i = parent->core.live_zero++; 5309 base[i] = *elm; 5310 return; 5311 } 5312 5313 xkey = elm->key + ((hammer2_key_t)1 << elm->keybits) - 1; 5314 if (i != count && (base[i].key < elm->key || xkey >= base[i].key)) { 5315 hammer2_spin_unex(&parent->core.spin); 5316 panic("insert base %p overlapping elements at %d elm %p\n", 5317 base, i, elm); 5318 } 5319 5320 /* 5321 * Try to find an empty slot before or after. 5322 */ 5323 j = i; 5324 k = i; 5325 while (j > 0 || k < count) { 5326 --j; 5327 if (j >= 0 && base[j].type == HAMMER2_BREF_TYPE_EMPTY) { 5328 if (j == i - 1) { 5329 base[j] = *elm; 5330 } else { 5331 bcopy(&base[j+1], &base[j], 5332 (i - j - 1) * sizeof(*base)); 5333 base[i - 1] = *elm; 5334 } 5335 goto validate; 5336 } 5337 ++k; 5338 if (k < count && base[k].type == HAMMER2_BREF_TYPE_EMPTY) { 5339 bcopy(&base[i], &base[i+1], 5340 (k - i) * sizeof(hammer2_blockref_t)); 5341 base[i] = *elm; 5342 5343 /* 5344 * We can only update parent->core.live_zero for live 5345 * chains. 5346 */ 5347 if (parent->core.live_zero <= k) 5348 parent->core.live_zero = k + 1; 5349 u = 2; 5350 goto validate; 5351 } 5352 } 5353 panic("hammer2_base_insert: no room!"); 5354 5355 /* 5356 * Debugging 5357 */ 5358 validate: 5359 key_next = 0; 5360 for (l = 0; l < count; ++l) { 5361 if (base[l].type != HAMMER2_BREF_TYPE_EMPTY) { 5362 key_next = base[l].key + 5363 ((hammer2_key_t)1 << base[l].keybits) - 1; 5364 break; 5365 } 5366 } 5367 while (++l < count) { 5368 if (base[l].type != HAMMER2_BREF_TYPE_EMPTY) { 5369 if (base[l].key <= key_next) 5370 panic("base_insert %d %d,%d,%d fail %p:%d", u, i, j, k, base, l); 5371 key_next = base[l].key + 5372 ((hammer2_key_t)1 << base[l].keybits) - 1; 5373 5374 } 5375 } 5376 5377 } 5378 5379 #if 0 5380 5381 /* 5382 * Sort the blockref array for the chain. Used by the flush code to 5383 * sort the blockref[] array. 5384 * 5385 * The chain must be exclusively locked AND spin-locked. 5386 */ 5387 typedef hammer2_blockref_t *hammer2_blockref_p; 5388 5389 static 5390 int 5391 hammer2_base_sort_callback(const void *v1, const void *v2) 5392 { 5393 hammer2_blockref_p bref1 = *(const hammer2_blockref_p *)v1; 5394 hammer2_blockref_p bref2 = *(const hammer2_blockref_p *)v2; 5395 5396 /* 5397 * Make sure empty elements are placed at the end of the array 5398 */ 5399 if (bref1->type == HAMMER2_BREF_TYPE_EMPTY) { 5400 if (bref2->type == HAMMER2_BREF_TYPE_EMPTY) 5401 return(0); 5402 return(1); 5403 } else if (bref2->type == HAMMER2_BREF_TYPE_EMPTY) { 5404 return(-1); 5405 } 5406 5407 /* 5408 * Sort by key 5409 */ 5410 if (bref1->key < bref2->key) 5411 return(-1); 5412 if (bref1->key > bref2->key) 5413 return(1); 5414 return(0); 5415 } 5416 5417 void 5418 hammer2_base_sort(hammer2_chain_t *chain) 5419 { 5420 hammer2_blockref_t *base; 5421 int count; 5422 5423 switch(chain->bref.type) { 5424 case HAMMER2_BREF_TYPE_INODE: 5425 /* 5426 * Special shortcut for embedded data returns the inode 5427 * itself. Callers must detect this condition and access 5428 * the embedded data (the strategy code does this for us). 5429 * 5430 * This is only applicable to regular files and softlinks. 5431 */ 5432 if (chain->data->ipdata.meta.op_flags & 5433 HAMMER2_OPFLAG_DIRECTDATA) { 5434 return; 5435 } 5436 base = &chain->data->ipdata.u.blockset.blockref[0]; 5437 count = HAMMER2_SET_COUNT; 5438 break; 5439 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 5440 case HAMMER2_BREF_TYPE_INDIRECT: 5441 /* 5442 * Optimize indirect blocks in the INITIAL state to avoid 5443 * I/O. 5444 */ 5445 KKASSERT((chain->flags & HAMMER2_CHAIN_INITIAL) == 0); 5446 base = &chain->data->npdata[0]; 5447 count = chain->bytes / sizeof(hammer2_blockref_t); 5448 break; 5449 case HAMMER2_BREF_TYPE_VOLUME: 5450 base = &chain->data->voldata.sroot_blockset.blockref[0]; 5451 count = HAMMER2_SET_COUNT; 5452 break; 5453 case HAMMER2_BREF_TYPE_FREEMAP: 5454 base = &chain->data->blkset.blockref[0]; 5455 count = HAMMER2_SET_COUNT; 5456 break; 5457 default: 5458 panic("hammer2_base_sort: unrecognized " 5459 "blockref(A) type: %d", 5460 chain->bref.type); 5461 base = NULL; /* safety */ 5462 count = 0; /* safety */ 5463 break; 5464 } 5465 kqsort(base, count, sizeof(*base), hammer2_base_sort_callback); 5466 } 5467 5468 #endif 5469 5470 /* 5471 * Set the check data for a chain. This can be a heavy-weight operation 5472 * and typically only runs on-flush. For file data check data is calculated 5473 * when the logical buffers are flushed. 5474 */ 5475 void 5476 hammer2_chain_setcheck(hammer2_chain_t *chain, void *bdata) 5477 { 5478 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_NOTTESTED); 5479 5480 switch(HAMMER2_DEC_CHECK(chain->bref.methods)) { 5481 case HAMMER2_CHECK_NONE: 5482 break; 5483 case HAMMER2_CHECK_DISABLED: 5484 break; 5485 case HAMMER2_CHECK_ISCSI32: 5486 chain->bref.check.iscsi32.value = 5487 hammer2_icrc32(bdata, chain->bytes); 5488 break; 5489 case HAMMER2_CHECK_XXHASH64: 5490 chain->bref.check.xxhash64.value = 5491 XXH64(bdata, chain->bytes, XXH_HAMMER2_SEED); 5492 break; 5493 case HAMMER2_CHECK_SHA192: 5494 assert(0); /* XXX unsupported */ 5495 /* 5496 { 5497 SHA256_CTX hash_ctx; 5498 union { 5499 uint8_t digest[SHA256_DIGEST_LENGTH]; 5500 uint64_t digest64[SHA256_DIGEST_LENGTH/8]; 5501 } u; 5502 5503 SHA256_Init(&hash_ctx); 5504 SHA256_Update(&hash_ctx, bdata, chain->bytes); 5505 SHA256_Final(u.digest, &hash_ctx); 5506 u.digest64[2] ^= u.digest64[3]; 5507 bcopy(u.digest, 5508 chain->bref.check.sha192.data, 5509 sizeof(chain->bref.check.sha192.data)); 5510 } 5511 */ 5512 break; 5513 case HAMMER2_CHECK_FREEMAP: 5514 chain->bref.check.freemap.icrc32 = 5515 hammer2_icrc32(bdata, chain->bytes); 5516 break; 5517 default: 5518 kprintf("hammer2_chain_setcheck: unknown check type %02x\n", 5519 chain->bref.methods); 5520 break; 5521 } 5522 } 5523 5524 /* 5525 * Characterize a failed check code and try to trace back to the inode. 5526 */ 5527 static void 5528 hammer2_characterize_failed_chain(hammer2_chain_t *chain, uint64_t check, 5529 int bits) 5530 { 5531 hammer2_chain_t *lchain; 5532 hammer2_chain_t *ochain; 5533 int did; 5534 5535 did = krateprintf(&krate_h2chk, 5536 "chain %016jx.%02x (%s) meth=%02x CHECK FAIL " 5537 "(flags=%08x, bref/data ", 5538 chain->bref.data_off, 5539 chain->bref.type, 5540 hammer2_bref_type_str(chain->bref.type), 5541 chain->bref.methods, 5542 chain->flags); 5543 if (did == 0) 5544 return; 5545 5546 if (bits == 32) { 5547 kprintf("%08x/%08x)\n", 5548 chain->bref.check.iscsi32.value, 5549 (uint32_t)check); 5550 } else { 5551 kprintf("%016jx/%016jx)\n", 5552 chain->bref.check.xxhash64.value, 5553 check); 5554 } 5555 5556 /* 5557 * Run up the chains to try to find the governing inode so we 5558 * can report it. 5559 * 5560 * XXX This error reporting is not really MPSAFE 5561 */ 5562 ochain = chain; 5563 lchain = chain; 5564 while (chain && chain->bref.type != HAMMER2_BREF_TYPE_INODE) { 5565 lchain = chain; 5566 chain = chain->parent; 5567 } 5568 5569 if (chain && chain->bref.type == HAMMER2_BREF_TYPE_INODE && 5570 ((chain->bref.flags & HAMMER2_BREF_FLAG_PFSROOT) == 0 || 5571 (lchain->bref.key & HAMMER2_DIRHASH_VISIBLE))) { 5572 kprintf(" Resides at/in inode %ld\n", 5573 (long)chain->bref.key); 5574 } else if (chain && chain->bref.type == HAMMER2_BREF_TYPE_INODE) { 5575 kprintf(" Resides in inode index - CRITICAL!!!\n"); 5576 } else { 5577 kprintf(" Resides in root index - CRITICAL!!!\n"); 5578 } 5579 if (ochain->hmp) { 5580 const char *pfsname = "UNKNOWN"; 5581 int i; 5582 5583 if (ochain->pmp) { 5584 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 5585 if (ochain->pmp->pfs_hmps[i] == ochain->hmp && 5586 ochain->pmp->pfs_names[i]) { 5587 pfsname = ochain->pmp->pfs_names[i]; 5588 break; 5589 } 5590 } 5591 } 5592 kprintf(" In pfs %s on device %s\n", 5593 pfsname, ochain->hmp->devrepname); 5594 } 5595 } 5596 5597 /* 5598 * Returns non-zero on success, 0 on failure. 5599 */ 5600 int 5601 hammer2_chain_testcheck(hammer2_chain_t *chain, void *bdata) 5602 { 5603 uint32_t check32; 5604 uint64_t check64; 5605 int r; 5606 5607 if (chain->flags & HAMMER2_CHAIN_NOTTESTED) 5608 return 1; 5609 5610 switch(HAMMER2_DEC_CHECK(chain->bref.methods)) { 5611 case HAMMER2_CHECK_NONE: 5612 r = 1; 5613 break; 5614 case HAMMER2_CHECK_DISABLED: 5615 r = 1; 5616 break; 5617 case HAMMER2_CHECK_ISCSI32: 5618 check32 = hammer2_icrc32(bdata, chain->bytes); 5619 r = (chain->bref.check.iscsi32.value == check32); 5620 if (r == 0) { 5621 hammer2_characterize_failed_chain(chain, check32, 32); 5622 } 5623 hammer2_process_icrc32 += chain->bytes; 5624 break; 5625 case HAMMER2_CHECK_XXHASH64: 5626 check64 = XXH64(bdata, chain->bytes, XXH_HAMMER2_SEED); 5627 r = (chain->bref.check.xxhash64.value == check64); 5628 if (r == 0) { 5629 hammer2_characterize_failed_chain(chain, check64, 64); 5630 } 5631 hammer2_process_xxhash64 += chain->bytes; 5632 break; 5633 case HAMMER2_CHECK_SHA192: 5634 assert(0); /* XXX unsupported */ 5635 /* 5636 { 5637 SHA256_CTX hash_ctx; 5638 union { 5639 uint8_t digest[SHA256_DIGEST_LENGTH]; 5640 uint64_t digest64[SHA256_DIGEST_LENGTH/8]; 5641 } u; 5642 5643 SHA256_Init(&hash_ctx); 5644 SHA256_Update(&hash_ctx, bdata, chain->bytes); 5645 SHA256_Final(u.digest, &hash_ctx); 5646 u.digest64[2] ^= u.digest64[3]; 5647 if (bcmp(u.digest, 5648 chain->bref.check.sha192.data, 5649 sizeof(chain->bref.check.sha192.data)) == 0) { 5650 r = 1; 5651 } else { 5652 r = 0; 5653 krateprintf(&krate_h2chk, 5654 "chain %016jx.%02x meth=%02x " 5655 "CHECK FAIL\n", 5656 chain->bref.data_off, 5657 chain->bref.type, 5658 chain->bref.methods); 5659 } 5660 } 5661 */ 5662 break; 5663 case HAMMER2_CHECK_FREEMAP: 5664 r = (chain->bref.check.freemap.icrc32 == 5665 hammer2_icrc32(bdata, chain->bytes)); 5666 if (r == 0) { 5667 int did; 5668 5669 did = krateprintf(&krate_h2chk, 5670 "chain %016jx.%02x meth=%02x " 5671 "CHECK FAIL\n", 5672 chain->bref.data_off, 5673 chain->bref.type, 5674 chain->bref.methods); 5675 if (did) { 5676 kprintf("freemap.icrc %08x icrc32 %08x (%d)\n", 5677 chain->bref.check.freemap.icrc32, 5678 hammer2_icrc32(bdata, chain->bytes), 5679 chain->bytes); 5680 if (chain->dio) { 5681 kprintf("dio %p buf %016jx,%ld " 5682 "bdata %p/%p\n", 5683 chain->dio, 5684 (intmax_t)chain->dio->bp->b_loffset, 5685 chain->dio->bp->b_bufsize, 5686 bdata, 5687 chain->dio->bp->b_data); 5688 } 5689 } 5690 } 5691 break; 5692 default: 5693 kprintf("hammer2_chain_testcheck: unknown check type %02x\n", 5694 chain->bref.methods); 5695 r = 1; 5696 break; 5697 } 5698 return r; 5699 } 5700 5701 /* 5702 * Acquire the chain and parent representing the specified inode for the 5703 * device at the specified cluster index. 5704 * 5705 * The flags passed in are LOOKUP flags, not RESOLVE flags. 5706 * 5707 * If we are unable to locate the inode, HAMMER2_ERROR_EIO or HAMMER2_ERROR_CHECK 5708 * is returned. In case of error, *chainp and/or *parentp may still be returned 5709 * non-NULL. 5710 * 5711 * The caller may pass-in a locked *parentp and/or *chainp, or neither. 5712 * They will be unlocked and released by this function. The *parentp and 5713 * *chainp representing the located inode are returned locked. 5714 * 5715 * The returned error includes any error on the returned chain in addition to 5716 * errors incurred while trying to lookup the inode. However, a chain->error 5717 * might not be recognized if HAMMER2_LOOKUP_NODATA is passed. This flag may 5718 * not be passed to this function. 5719 */ 5720 int 5721 hammer2_chain_inode_find(hammer2_pfs_t *pmp, hammer2_key_t inum, 5722 int clindex, int flags, 5723 hammer2_chain_t **parentp, hammer2_chain_t **chainp) 5724 { 5725 hammer2_chain_t *parent; 5726 hammer2_chain_t *rchain; 5727 hammer2_key_t key_dummy; 5728 hammer2_inode_t *ip; 5729 int resolve_flags; 5730 int error; 5731 5732 KKASSERT((flags & HAMMER2_LOOKUP_NODATA) == 0); 5733 5734 resolve_flags = (flags & HAMMER2_LOOKUP_SHARED) ? 5735 HAMMER2_RESOLVE_SHARED : 0; 5736 5737 /* 5738 * Caller expects us to replace these. 5739 */ 5740 if (*chainp) { 5741 hammer2_chain_unlock(*chainp); 5742 hammer2_chain_drop(*chainp); 5743 *chainp = NULL; 5744 } 5745 if (*parentp) { 5746 hammer2_chain_unlock(*parentp); 5747 hammer2_chain_drop(*parentp); 5748 *parentp = NULL; 5749 } 5750 5751 /* 5752 * Be very careful, this is a backend function and we CANNOT 5753 * lock any frontend inode structure we find. But we have to 5754 * look the inode up this way first in case it exists but is 5755 * detached from the radix tree. 5756 */ 5757 ip = hammer2_inode_lookup(pmp, inum); 5758 if (ip) { 5759 *chainp = hammer2_inode_chain_and_parent(ip, clindex, 5760 parentp, 5761 resolve_flags); 5762 hammer2_inode_drop(ip); 5763 if (*chainp) 5764 return (*chainp)->error; 5765 hammer2_chain_unlock(*chainp); 5766 hammer2_chain_drop(*chainp); 5767 *chainp = NULL; 5768 if (*parentp) { 5769 hammer2_chain_unlock(*parentp); 5770 hammer2_chain_drop(*parentp); 5771 *parentp = NULL; 5772 } 5773 } 5774 5775 /* 5776 * Inodes hang off of the iroot (bit 63 is clear, differentiating 5777 * inodes from root directory entries in the key lookup). 5778 */ 5779 parent = hammer2_inode_chain(pmp->iroot, clindex, resolve_flags); 5780 rchain = NULL; 5781 if (parent) { 5782 /* 5783 * NOTE: rchain can be returned as NULL even if error == 0 5784 * (i.e. not found) 5785 */ 5786 rchain = hammer2_chain_lookup(&parent, &key_dummy, 5787 inum, inum, 5788 &error, flags); 5789 /* 5790 * Propagate a chain-specific error to caller. 5791 * 5792 * If the chain is not errored, we must still validate that the inode 5793 * number is correct, because all hell will break loose if it isn't 5794 * correct. It should always be correct so print to the console and 5795 * simulate a CHECK error if it is not. 5796 */ 5797 if (error == 0 && rchain) { 5798 error = rchain->error; 5799 if (error == 0 && rchain->data) { 5800 if (inum != rchain->data->ipdata.meta.inum) { 5801 kprintf("hammer2_chain_inode_find: lookup inum %ld, " 5802 "got valid inode but with inum %ld\n", 5803 (long)inum, (long)rchain->data->ipdata.meta.inum); 5804 error = HAMMER2_ERROR_CHECK; 5805 rchain->error = error; 5806 } 5807 } 5808 } 5809 } else { 5810 error = HAMMER2_ERROR_EIO; 5811 } 5812 *parentp = parent; 5813 *chainp = rchain; 5814 5815 return error; 5816 } 5817 5818 /* 5819 * Used by the bulkscan code to snapshot the synchronized storage for 5820 * a volume, allowing it to be scanned concurrently against normal 5821 * operation. 5822 */ 5823 hammer2_chain_t * 5824 hammer2_chain_bulksnap(hammer2_dev_t *hmp) 5825 { 5826 hammer2_chain_t *copy; 5827 5828 copy = hammer2_chain_alloc(hmp, hmp->spmp, &hmp->vchain.bref); 5829 copy->data = kmalloc(sizeof(copy->data->voldata), 5830 hmp->mmsg, M_WAITOK | M_ZERO); 5831 hammer2_voldata_lock(hmp); 5832 copy->data->voldata = hmp->volsync; 5833 hammer2_voldata_unlock(hmp); 5834 5835 return copy; 5836 } 5837 5838 void 5839 hammer2_chain_bulkdrop(hammer2_chain_t *copy) 5840 { 5841 KKASSERT(copy->bref.type == HAMMER2_BREF_TYPE_VOLUME); 5842 KKASSERT(copy->data); 5843 kfree(copy->data, copy->hmp->mmsg); 5844 copy->data = NULL; 5845 hammer2_chain_drop(copy); 5846 } 5847 5848 /* 5849 * Returns non-zero if the chain (INODE or DIRENT) matches the 5850 * filename. 5851 */ 5852 int 5853 hammer2_chain_dirent_test(hammer2_chain_t *chain, const char *name, 5854 size_t name_len) 5855 { 5856 const hammer2_inode_data_t *ripdata; 5857 5858 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) { 5859 ripdata = &chain->data->ipdata; 5860 if (ripdata->meta.name_len == name_len && 5861 bcmp(ripdata->filename, name, name_len) == 0) { 5862 return 1; 5863 } 5864 } 5865 if (chain->bref.type == HAMMER2_BREF_TYPE_DIRENT && 5866 chain->bref.embed.dirent.namlen == name_len) { 5867 if (name_len > sizeof(chain->bref.check.buf) && 5868 bcmp(chain->data->buf, name, name_len) == 0) { 5869 return 1; 5870 } 5871 if (name_len <= sizeof(chain->bref.check.buf) && 5872 bcmp(chain->bref.check.buf, name, name_len) == 0) { 5873 return 1; 5874 } 5875 } 5876 return 0; 5877 } 5878 5879 /* 5880 * Debugging 5881 */ 5882 void 5883 hammer2_dump_chain(hammer2_chain_t *chain, int tab, int bi, int *countp, 5884 char pfx, u_int flags) 5885 { 5886 hammer2_chain_t *scan; 5887 hammer2_chain_t *parent; 5888 5889 --*countp; 5890 if (*countp == 0) { 5891 kprintf("%*.*s...\n", tab, tab, ""); 5892 return; 5893 } 5894 if (*countp < 0) 5895 return; 5896 kprintf("%*.*s%c-chain %p %s.%-3d %016jx %016jx/%-2d mir=%016jx\n", 5897 tab, tab, "", pfx, chain, 5898 hammer2_bref_type_str(chain->bref.type), bi, 5899 chain->bref.data_off, chain->bref.key, chain->bref.keybits, 5900 chain->bref.mirror_tid); 5901 5902 kprintf("%*.*s [%08x] (%s) refs=%d", 5903 tab, tab, "", 5904 chain->flags, 5905 ((chain->bref.type == HAMMER2_BREF_TYPE_INODE && 5906 chain->data) ? (char *)chain->data->ipdata.filename : "?"), 5907 chain->refs); 5908 5909 parent = chain->parent; 5910 if (parent) 5911 kprintf("\n%*.*s p=%p [pflags %08x prefs %d]", 5912 tab, tab, "", 5913 parent, parent->flags, parent->refs); 5914 if (RB_EMPTY(&chain->core.rbtree)) { 5915 kprintf("\n"); 5916 } else { 5917 int bi = 0; 5918 kprintf(" {\n"); 5919 RB_FOREACH(scan, hammer2_chain_tree, &chain->core.rbtree) { 5920 if ((scan->flags & flags) || flags == (u_int)-1) { 5921 hammer2_dump_chain(scan, tab + 4, bi, countp, 5922 'a', flags); 5923 } 5924 bi++; 5925 } 5926 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE && chain->data) 5927 kprintf("%*.*s}(%s)\n", tab, tab, "", 5928 chain->data->ipdata.filename); 5929 else 5930 kprintf("%*.*s}\n", tab, tab, ""); 5931 } 5932 } 5933 5934 void 5935 hammer2_dump_chains(hammer2_dev_t *hmp, char vpfx, char fpfx) 5936 { 5937 int dumpcnt; 5938 5939 dumpcnt = 50; 5940 hammer2_dump_chain(&hmp->vchain, 0, 0, &dumpcnt, vpfx, (u_int)-1); 5941 5942 dumpcnt = 50; 5943 hammer2_dump_chain(&hmp->fchain, 0, 0, &dumpcnt, fpfx, (u_int)-1); 5944 } 5945