1 /* 2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 /* 36 * TRANSACTION AND FLUSH HANDLING 37 * 38 * Deceptively simple but actually fairly difficult to implement properly is 39 * how I would describe it. 40 * 41 * Flushing generally occurs bottom-up but requires a top-down scan to 42 * locate chains with MODIFIED and/or UPDATE bits set. The ONFLUSH flag 43 * tells how to recurse downward to find these chains. 44 */ 45 46 #include <sys/cdefs.h> 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/types.h> 50 #include <sys/lock.h> 51 #include <sys/uuid.h> 52 53 #include "hammer2.h" 54 55 #define FLUSH_DEBUG 0 56 57 #define HAMMER2_FLUSH_DEPTH_LIMIT 10 /* stack recursion limit */ 58 59 60 /* 61 * Recursively flush the specified chain. The chain is locked and 62 * referenced by the caller and will remain so on return. The chain 63 * will remain referenced throughout but can temporarily lose its 64 * lock during the recursion to avoid unnecessarily stalling user 65 * processes. 66 */ 67 struct hammer2_flush_info { 68 hammer2_chain_t *parent; 69 int depth; 70 int diddeferral; 71 int cache_index; 72 int flags; 73 struct h2_flush_list flushq; 74 hammer2_chain_t *debug; 75 }; 76 77 typedef struct hammer2_flush_info hammer2_flush_info_t; 78 79 static void hammer2_flush_core(hammer2_flush_info_t *info, 80 hammer2_chain_t *chain, int flags); 81 static int hammer2_flush_recurse(hammer2_chain_t *child, void *data); 82 83 /* 84 * Any per-pfs transaction initialization goes here. 85 */ 86 void 87 hammer2_trans_manage_init(hammer2_pfs_t *pmp) 88 { 89 } 90 91 /* 92 * Transaction support for any modifying operation. Transactions are used 93 * in the pmp layer by the frontend and in the spmp layer by the backend. 94 * 95 * 0 - Normal transaction, interlocked against flush 96 * transaction. 97 * 98 * TRANS_ISFLUSH - Flush transaction, interlocked against normal 99 * transaction. 100 * 101 * TRANS_BUFCACHE - Buffer cache transaction, no interlock. 102 * 103 * Initializing a new transaction allocates a transaction ID. Typically 104 * passed a pmp (hmp passed as NULL), indicating a cluster transaction. Can 105 * be passed a NULL pmp and non-NULL hmp to indicate a transaction on a single 106 * media target. The latter mode is used by the recovery code. 107 * 108 * TWO TRANSACTION IDs can run concurrently, where one is a flush and the 109 * other is a set of any number of concurrent filesystem operations. We 110 * can either have <running_fs_ops> + <waiting_flush> + <blocked_fs_ops> 111 * or we can have <running_flush> + <concurrent_fs_ops>. 112 * 113 * During a flush, new fs_ops are only blocked until the fs_ops prior to 114 * the flush complete. The new fs_ops can then run concurrent with the flush. 115 * 116 * Buffer-cache transactions operate as fs_ops but never block. A 117 * buffer-cache flush will run either before or after the current pending 118 * flush depending on its state. 119 */ 120 void 121 hammer2_trans_init(hammer2_pfs_t *pmp, uint32_t flags) 122 { 123 uint32_t oflags; 124 uint32_t nflags; 125 int dowait; 126 127 for (;;) { 128 oflags = pmp->trans.flags; 129 cpu_ccfence(); 130 dowait = 0; 131 132 if (flags & HAMMER2_TRANS_ISFLUSH) { 133 /* 134 * Requesting flush transaction. Wait for all 135 * currently running transactions to finish. 136 */ 137 if (oflags & HAMMER2_TRANS_MASK) { 138 nflags = oflags | HAMMER2_TRANS_FPENDING | 139 HAMMER2_TRANS_WAITING; 140 dowait = 1; 141 } else { 142 nflags = (oflags | flags) + 1; 143 } 144 } else if (flags & HAMMER2_TRANS_BUFCACHE) { 145 /* 146 * Requesting strategy transaction. Generally 147 * allowed in all situations unless a flush 148 * is running without the preflush flag. 149 */ 150 if ((oflags & (HAMMER2_TRANS_ISFLUSH | 151 HAMMER2_TRANS_PREFLUSH)) == 152 HAMMER2_TRANS_ISFLUSH) { 153 nflags = oflags | HAMMER2_TRANS_WAITING; 154 dowait = 1; 155 } else { 156 nflags = (oflags | flags) + 1; 157 } 158 } else { 159 /* 160 * Requesting normal transaction. Wait for any 161 * flush to finish before allowing. 162 */ 163 if (oflags & HAMMER2_TRANS_ISFLUSH) { 164 nflags = oflags | HAMMER2_TRANS_WAITING; 165 dowait = 1; 166 } else { 167 nflags = (oflags | flags) + 1; 168 } 169 } 170 if (dowait) 171 tsleep_interlock(&pmp->trans.sync_wait, 0); 172 if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) { 173 if (dowait == 0) 174 break; 175 tsleep(&pmp->trans.sync_wait, PINTERLOCKED, 176 "h2trans", hz); 177 } else { 178 cpu_pause(); 179 } 180 /* retry */ 181 } 182 } 183 184 /* 185 * Start a sub-transaction, there is no 'subdone' function. This will 186 * issue a new modify_tid (mtid) for the current transaction, which is a 187 * CLC (cluster level change) id and not a per-node id. 188 * 189 * This function must be called for each XOP when multiple XOPs are run in 190 * sequence within a transaction. 191 * 192 * Callers typically update the inode with the transaction mtid manually 193 * to enforce sequencing. 194 */ 195 hammer2_tid_t 196 hammer2_trans_sub(hammer2_pfs_t *pmp) 197 { 198 hammer2_tid_t mtid; 199 200 mtid = atomic_fetchadd_64(&pmp->modify_tid, 1); 201 202 return (mtid); 203 } 204 205 /* 206 * Clears the PREFLUSH stage, called during a flush transaction after all 207 * logical buffer I/O has completed. 208 */ 209 void 210 hammer2_trans_clear_preflush(hammer2_pfs_t *pmp) 211 { 212 atomic_clear_int(&pmp->trans.flags, HAMMER2_TRANS_PREFLUSH); 213 } 214 215 void 216 hammer2_trans_done(hammer2_pfs_t *pmp) 217 { 218 uint32_t oflags; 219 uint32_t nflags; 220 221 for (;;) { 222 oflags = pmp->trans.flags; 223 cpu_ccfence(); 224 KKASSERT(oflags & HAMMER2_TRANS_MASK); 225 if ((oflags & HAMMER2_TRANS_MASK) == 1) { 226 /* 227 * This was the last transaction 228 */ 229 nflags = (oflags - 1) & ~(HAMMER2_TRANS_ISFLUSH | 230 HAMMER2_TRANS_BUFCACHE | 231 HAMMER2_TRANS_PREFLUSH | 232 HAMMER2_TRANS_FPENDING | 233 HAMMER2_TRANS_WAITING); 234 } else { 235 /* 236 * Still transactions pending 237 */ 238 nflags = oflags - 1; 239 } 240 if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) { 241 if ((nflags & HAMMER2_TRANS_MASK) == 0 && 242 (oflags & HAMMER2_TRANS_WAITING)) { 243 wakeup(&pmp->trans.sync_wait); 244 } 245 break; 246 } else { 247 cpu_pause(); 248 } 249 /* retry */ 250 } 251 } 252 253 /* 254 * Obtain new, unique inode number (not serialized by caller). 255 */ 256 hammer2_tid_t 257 hammer2_trans_newinum(hammer2_pfs_t *pmp) 258 { 259 hammer2_tid_t tid; 260 261 tid = atomic_fetchadd_64(&pmp->inode_tid, 1); 262 263 return tid; 264 } 265 266 /* 267 * Assert that a strategy call is ok here. Strategy calls are legal 268 * 269 * (1) In a normal transaction. 270 * (2) In a flush transaction only if PREFLUSH is also set. 271 */ 272 void 273 hammer2_trans_assert_strategy(hammer2_pfs_t *pmp) 274 { 275 KKASSERT((pmp->trans.flags & HAMMER2_TRANS_ISFLUSH) == 0 || 276 (pmp->trans.flags & HAMMER2_TRANS_PREFLUSH)); 277 } 278 279 280 /* 281 * Chains undergoing destruction are removed from the in-memory topology. 282 * To avoid getting lost these chains are placed on the delayed flush 283 * queue which will properly dispose of them. 284 * 285 * We do this instead of issuing an immediate flush in order to give 286 * recursive deletions (rm -rf, etc) a chance to remove more of the 287 * hierarchy, potentially allowing an enormous amount of write I/O to 288 * be avoided. 289 */ 290 void 291 hammer2_delayed_flush(hammer2_chain_t *chain) 292 { 293 if ((chain->flags & HAMMER2_CHAIN_DELAYED) == 0) { 294 hammer2_spin_ex(&chain->hmp->list_spin); 295 if ((chain->flags & (HAMMER2_CHAIN_DELAYED | 296 HAMMER2_CHAIN_DEFERRED)) == 0) { 297 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELAYED | 298 HAMMER2_CHAIN_DEFERRED); 299 TAILQ_INSERT_TAIL(&chain->hmp->flushq, 300 chain, flush_node); 301 hammer2_chain_ref(chain); 302 } 303 hammer2_spin_unex(&chain->hmp->list_spin); 304 } 305 } 306 307 /* 308 * Flush the chain and all modified sub-chains through the specified 309 * synchronization point, propagating blockref updates back up. As 310 * part of this propagation, mirror_tid and inode/data usage statistics 311 * propagates back upward. 312 * 313 * modify_tid (clc - cluster level change) is not propagated. 314 * 315 * update_tid (clc) is used for validation and is not propagated by this 316 * function. 317 * 318 * This routine can be called from several places but the most important 319 * is from VFS_SYNC (frontend) via hammer2_inode_xop_flush (backend). 320 * 321 * chain is locked on call and will remain locked on return. The chain's 322 * UPDATE flag indicates that its parent's block table (which is not yet 323 * part of the flush) should be updated. The chain may be replaced by 324 * the call if it was modified. 325 */ 326 void 327 hammer2_flush(hammer2_chain_t *chain, int flags) 328 { 329 hammer2_chain_t *scan; 330 hammer2_flush_info_t info; 331 hammer2_dev_t *hmp; 332 int loops; 333 334 /* 335 * Execute the recursive flush and handle deferrals. 336 * 337 * Chains can be ridiculously long (thousands deep), so to 338 * avoid blowing out the kernel stack the recursive flush has a 339 * depth limit. Elements at the limit are placed on a list 340 * for re-execution after the stack has been popped. 341 */ 342 bzero(&info, sizeof(info)); 343 TAILQ_INIT(&info.flushq); 344 info.cache_index = -1; 345 info.flags = flags & ~HAMMER2_FLUSH_TOP; 346 347 /* 348 * Calculate parent (can be NULL), if not NULL the flush core 349 * expects the parent to be referenced so it can easily lock/unlock 350 * it without it getting ripped up. 351 */ 352 if ((info.parent = chain->parent) != NULL) 353 hammer2_chain_ref(info.parent); 354 355 /* 356 * Extra ref needed because flush_core expects it when replacing 357 * chain. 358 */ 359 hammer2_chain_ref(chain); 360 hmp = chain->hmp; 361 loops = 0; 362 363 for (;;) { 364 /* 365 * Move hmp->flushq to info.flushq if non-empty so it can 366 * be processed. 367 */ 368 if (TAILQ_FIRST(&hmp->flushq) != NULL) { 369 hammer2_spin_ex(&chain->hmp->list_spin); 370 TAILQ_CONCAT(&info.flushq, &hmp->flushq, flush_node); 371 hammer2_spin_unex(&chain->hmp->list_spin); 372 } 373 374 /* 375 * Unwind deep recursions which had been deferred. This 376 * can leave the FLUSH_* bits set for these chains, which 377 * will be handled when we [re]flush chain after the unwind. 378 */ 379 while ((scan = TAILQ_FIRST(&info.flushq)) != NULL) { 380 KKASSERT(scan->flags & HAMMER2_CHAIN_DEFERRED); 381 TAILQ_REMOVE(&info.flushq, scan, flush_node); 382 atomic_clear_int(&scan->flags, HAMMER2_CHAIN_DEFERRED | 383 HAMMER2_CHAIN_DELAYED); 384 385 /* 386 * Now that we've popped back up we can do a secondary 387 * recursion on the deferred elements. 388 * 389 * NOTE: hammer2_flush() may replace scan. 390 */ 391 if (hammer2_debug & 0x0040) 392 kprintf("deferred flush %p\n", scan); 393 hammer2_chain_lock(scan, HAMMER2_RESOLVE_MAYBE); 394 hammer2_flush(scan, flags & ~HAMMER2_FLUSH_TOP); 395 hammer2_chain_unlock(scan); 396 hammer2_chain_drop(scan); /* ref from deferral */ 397 } 398 399 /* 400 * [re]flush chain. 401 */ 402 info.diddeferral = 0; 403 hammer2_flush_core(&info, chain, flags); 404 405 /* 406 * Only loop if deep recursions have been deferred. 407 */ 408 if (TAILQ_EMPTY(&info.flushq)) 409 break; 410 411 if (++loops % 1000 == 0) { 412 kprintf("hammer2_flush: excessive loops on %p\n", 413 chain); 414 if (hammer2_debug & 0x100000) 415 Debugger("hell4"); 416 } 417 } 418 hammer2_chain_drop(chain); 419 if (info.parent) 420 hammer2_chain_drop(info.parent); 421 } 422 423 /* 424 * This is the core of the chain flushing code. The chain is locked by the 425 * caller and must also have an extra ref on it by the caller, and remains 426 * locked and will have an extra ref on return. Upon return, the caller can 427 * test the UPDATE bit on the child to determine if the parent needs updating. 428 * 429 * (1) Determine if this node is a candidate for the flush, return if it is 430 * not. fchain and vchain are always candidates for the flush. 431 * 432 * (2) If we recurse too deep the chain is entered onto the deferral list and 433 * the current flush stack is aborted until after the deferral list is 434 * run. 435 * 436 * (3) Recursively flush live children (rbtree). This can create deferrals. 437 * A successful flush clears the MODIFIED and UPDATE bits on the children 438 * and typically causes the parent to be marked MODIFIED as the children 439 * update the parent's block table. A parent might already be marked 440 * MODIFIED due to a deletion (whos blocktable update in the parent is 441 * handled by the frontend), or if the parent itself is modified by the 442 * frontend for other reasons. 443 * 444 * (4) Permanently disconnected sub-trees are cleaned up by the front-end. 445 * Deleted-but-open inodes can still be individually flushed via the 446 * filesystem syncer. 447 * 448 * (5) Note that an unmodified child may still need the block table in its 449 * parent updated (e.g. rename/move). The child will have UPDATE set 450 * in this case. 451 * 452 * WARNING ON BREF MODIFY_TID/MIRROR_TID 453 * 454 * blockref.modify_tid is consistent only within a PFS, and will not be 455 * consistent during synchronization. mirror_tid is consistent across the 456 * block device regardless of the PFS. 457 */ 458 static void 459 hammer2_flush_core(hammer2_flush_info_t *info, hammer2_chain_t *chain, 460 int flags) 461 { 462 hammer2_chain_t *parent; 463 hammer2_dev_t *hmp; 464 int diddeferral; 465 466 /* 467 * (1) Optimize downward recursion to locate nodes needing action. 468 * Nothing to do if none of these flags are set. 469 */ 470 if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) == 0) { 471 if (hammer2_debug & 0x200) { 472 if (info->debug == NULL) 473 info->debug = chain; 474 } else { 475 return; 476 } 477 } 478 479 hmp = chain->hmp; 480 diddeferral = info->diddeferral; 481 parent = info->parent; /* can be NULL */ 482 483 /* 484 * Downward search recursion 485 */ 486 if (chain->flags & (HAMMER2_CHAIN_DEFERRED | HAMMER2_CHAIN_DELAYED)) { 487 /* 488 * Already deferred. 489 */ 490 ++info->diddeferral; 491 } else if ((chain->flags & HAMMER2_CHAIN_PFSBOUNDARY) && 492 (flags & HAMMER2_FLUSH_ALL) == 0 && 493 (flags & HAMMER2_FLUSH_TOP) == 0) { 494 /* 495 * We do not recurse through PFSROOTs. PFSROOT flushes are 496 * handled by the related pmp's (whether mounted or not, 497 * including during recovery). 498 * 499 * But we must still process the PFSROOT chains for block 500 * table updates in their parent (which IS part of our flush). 501 * 502 * Note that the volume root, vchain, does not set this flag. 503 * Note the logic here requires that this test be done before 504 * the depth-limit test, else it might become the top on a 505 * flushq iteration. 506 */ 507 ; 508 } else if (info->depth == HAMMER2_FLUSH_DEPTH_LIMIT) { 509 /* 510 * Recursion depth reached. 511 */ 512 KKASSERT((chain->flags & HAMMER2_CHAIN_DELAYED) == 0); 513 hammer2_chain_ref(chain); 514 TAILQ_INSERT_TAIL(&info->flushq, chain, flush_node); 515 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEFERRED); 516 ++info->diddeferral; 517 } else if (chain->flags & HAMMER2_CHAIN_ONFLUSH) { 518 /* 519 * Downward recursion search (actual flush occurs bottom-up). 520 * pre-clear ONFLUSH. It can get set again due to races, 521 * which we want so the scan finds us again in the next flush. 522 */ 523 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH); 524 info->parent = chain; 525 hammer2_spin_ex(&chain->core.spin); 526 RB_SCAN(hammer2_chain_tree, &chain->core.rbtree, 527 NULL, hammer2_flush_recurse, info); 528 hammer2_spin_unex(&chain->core.spin); 529 info->parent = parent; 530 if (info->diddeferral) 531 hammer2_chain_setflush(chain); 532 } 533 534 /* 535 * Now we are in the bottom-up part of the recursion. 536 * 537 * Do not update chain if lower layers were deferred. 538 */ 539 if (info->diddeferral) 540 goto done; 541 542 /* 543 * Propagate the DESTROY flag downwards. This dummies up the flush 544 * code and tries to invalidate related buffer cache buffers to 545 * avoid the disk write. 546 */ 547 if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY)) 548 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DESTROY); 549 550 /* 551 * Chain was already modified or has become modified, flush it out. 552 */ 553 again: 554 if ((hammer2_debug & 0x200) && 555 info->debug && 556 (chain->flags & (HAMMER2_CHAIN_MODIFIED | HAMMER2_CHAIN_UPDATE))) { 557 hammer2_chain_t *scan = chain; 558 559 kprintf("DISCONNECTED FLUSH %p->%p\n", info->debug, chain); 560 while (scan) { 561 kprintf(" chain %p [%08x] bref=%016jx:%02x\n", 562 scan, scan->flags, 563 scan->bref.key, scan->bref.type); 564 if (scan == info->debug) 565 break; 566 scan = scan->parent; 567 } 568 } 569 570 if (chain->flags & HAMMER2_CHAIN_MODIFIED) { 571 /* 572 * Dispose of the modified bit. 573 * 574 * UPDATE should already be set. 575 * bref.mirror_tid should already be set. 576 */ 577 KKASSERT((chain->flags & HAMMER2_CHAIN_UPDATE) || 578 chain == &hmp->vchain); 579 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED); 580 581 /* 582 * Manage threads waiting for excessive dirty memory to 583 * be retired. 584 */ 585 if (chain->pmp) 586 hammer2_pfs_memory_wakeup(chain->pmp); 587 588 if ((chain->flags & HAMMER2_CHAIN_UPDATE) || 589 chain == &hmp->vchain || 590 chain == &hmp->fchain) { 591 /* 592 * Drop the ref from the MODIFIED bit we cleared, 593 * net -1 ref. 594 */ 595 hammer2_chain_drop(chain); 596 } else { 597 /* 598 * Drop the ref from the MODIFIED bit we cleared and 599 * set a ref for the UPDATE bit we are setting. Net 600 * 0 refs. 601 */ 602 atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE); 603 } 604 605 /* 606 * Issue the flush. This is indirect via the DIO. 607 * 608 * NOTE: A DELETED node that reaches this point must be 609 * flushed for synchronization point consistency. 610 * 611 * NOTE: Even though MODIFIED was already set, the related DIO 612 * might not be dirty due to a system buffer cache 613 * flush and must be set dirty if we are going to make 614 * further modifications to the buffer. Chains with 615 * embedded data don't need this. 616 */ 617 if (hammer2_debug & 0x1000) { 618 kprintf("Flush %p.%d %016jx/%d data=%016jx", 619 chain, chain->bref.type, 620 (uintmax_t)chain->bref.key, 621 chain->bref.keybits, 622 (uintmax_t)chain->bref.data_off); 623 } 624 if (hammer2_debug & 0x2000) { 625 Debugger("Flush hell"); 626 } 627 628 /* 629 * Update chain CRCs for flush. 630 * 631 * NOTE: Volume headers are NOT flushed here as they require 632 * special processing. 633 */ 634 switch(chain->bref.type) { 635 case HAMMER2_BREF_TYPE_FREEMAP: 636 /* 637 * Update the volume header's freemap_tid to the 638 * freemap's flushing mirror_tid. 639 * 640 * (note: embedded data, do not call setdirty) 641 */ 642 KKASSERT(hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED); 643 KKASSERT(chain == &hmp->fchain); 644 hmp->voldata.freemap_tid = chain->bref.mirror_tid; 645 if (hammer2_debug & 0x8000) { 646 /* debug only, avoid syslogd loop */ 647 kprintf("sync freemap mirror_tid %08jx\n", 648 (intmax_t)chain->bref.mirror_tid); 649 } 650 651 /* 652 * The freemap can be flushed independently of the 653 * main topology, but for the case where it is 654 * flushed in the same transaction, and flushed 655 * before vchain (a case we want to allow for 656 * performance reasons), make sure modifications 657 * made during the flush under vchain use a new 658 * transaction id. 659 * 660 * Otherwise the mount recovery code will get confused. 661 */ 662 ++hmp->voldata.mirror_tid; 663 break; 664 case HAMMER2_BREF_TYPE_VOLUME: 665 /* 666 * The free block table is flushed by 667 * hammer2_vfs_sync() before it flushes vchain. 668 * We must still hold fchain locked while copying 669 * voldata to volsync, however. 670 * 671 * (note: embedded data, do not call setdirty) 672 */ 673 hammer2_chain_lock(&hmp->fchain, 674 HAMMER2_RESOLVE_ALWAYS); 675 hammer2_voldata_lock(hmp); 676 if (hammer2_debug & 0x8000) { 677 /* debug only, avoid syslogd loop */ 678 kprintf("sync volume mirror_tid %08jx\n", 679 (intmax_t)chain->bref.mirror_tid); 680 } 681 682 /* 683 * Update the volume header's mirror_tid to the 684 * main topology's flushing mirror_tid. It is 685 * possible that voldata.mirror_tid is already 686 * beyond bref.mirror_tid due to the bump we made 687 * above in BREF_TYPE_FREEMAP. 688 */ 689 if (hmp->voldata.mirror_tid < chain->bref.mirror_tid) { 690 hmp->voldata.mirror_tid = 691 chain->bref.mirror_tid; 692 } 693 694 /* 695 * The volume header is flushed manually by the 696 * syncer, not here. All we do here is adjust the 697 * crc's. 698 */ 699 KKASSERT(chain->data != NULL); 700 KKASSERT(chain->dio == NULL); 701 702 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT1]= 703 hammer2_icrc32( 704 (char *)&hmp->voldata + 705 HAMMER2_VOLUME_ICRC1_OFF, 706 HAMMER2_VOLUME_ICRC1_SIZE); 707 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT0]= 708 hammer2_icrc32( 709 (char *)&hmp->voldata + 710 HAMMER2_VOLUME_ICRC0_OFF, 711 HAMMER2_VOLUME_ICRC0_SIZE); 712 hmp->voldata.icrc_volheader = 713 hammer2_icrc32( 714 (char *)&hmp->voldata + 715 HAMMER2_VOLUME_ICRCVH_OFF, 716 HAMMER2_VOLUME_ICRCVH_SIZE); 717 718 if (hammer2_debug & 0x8000) { 719 /* debug only, avoid syslogd loop */ 720 kprintf("syncvolhdr %016jx %016jx\n", 721 hmp->voldata.mirror_tid, 722 hmp->vchain.bref.mirror_tid); 723 } 724 hmp->volsync = hmp->voldata; 725 atomic_set_int(&chain->flags, HAMMER2_CHAIN_VOLUMESYNC); 726 hammer2_voldata_unlock(hmp); 727 hammer2_chain_unlock(&hmp->fchain); 728 break; 729 case HAMMER2_BREF_TYPE_DATA: 730 /* 731 * Data elements have already been flushed via the 732 * logical file buffer cache. Their hash was set in 733 * the bref by the vop_write code. Do not re-dirty. 734 * 735 * Make sure any device buffer(s) have been flushed 736 * out here (there aren't usually any to flush) XXX. 737 */ 738 break; 739 case HAMMER2_BREF_TYPE_INDIRECT: 740 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 741 case HAMMER2_BREF_TYPE_FREEMAP_LEAF: 742 /* 743 * Buffer I/O will be cleaned up when the volume is 744 * flushed (but the kernel is free to flush it before 745 * then, as well). 746 */ 747 KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0); 748 hammer2_chain_setcheck(chain, chain->data); 749 break; 750 case HAMMER2_BREF_TYPE_INODE: 751 /* 752 * NOTE: We must call io_setdirty() to make any late 753 * changes to the inode data, the system might 754 * have already flushed the buffer. 755 */ 756 if (chain->data->ipdata.meta.op_flags & 757 HAMMER2_OPFLAG_PFSROOT) { 758 /* 759 * non-NULL pmp if mounted as a PFS. We must 760 * sync fields cached in the pmp? XXX 761 */ 762 hammer2_inode_data_t *ipdata; 763 764 hammer2_io_setdirty(chain->dio); 765 ipdata = &chain->data->ipdata; 766 if (chain->pmp) { 767 ipdata->meta.pfs_inum = 768 chain->pmp->inode_tid; 769 } 770 } else { 771 /* can't be mounted as a PFS */ 772 } 773 774 KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0); 775 hammer2_chain_setcheck(chain, chain->data); 776 break; 777 default: 778 KKASSERT(chain->flags & HAMMER2_CHAIN_EMBEDDED); 779 panic("hammer2_flush_core: unsupported " 780 "embedded bref %d", 781 chain->bref.type); 782 /* NOT REACHED */ 783 } 784 785 /* 786 * If the chain was destroyed try to avoid unnecessary I/O. 787 * (this only really works if the DIO system buffer is the 788 * same size as chain->bytes). 789 */ 790 if ((chain->flags & HAMMER2_CHAIN_DESTROY) && chain->dio) { 791 hammer2_io_setinval(chain->dio, chain->bytes); 792 } 793 } 794 795 /* 796 * If UPDATE is set the parent block table may need to be updated. 797 * 798 * NOTE: UPDATE may be set on vchain or fchain in which case 799 * parent could be NULL. It's easiest to allow the case 800 * and test for NULL. parent can also wind up being NULL 801 * due to a deletion so we need to handle the case anyway. 802 * 803 * If no parent exists we can just clear the UPDATE bit. If the 804 * chain gets reattached later on the bit will simply get set 805 * again. 806 */ 807 if ((chain->flags & HAMMER2_CHAIN_UPDATE) && parent == NULL) { 808 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE); 809 hammer2_chain_drop(chain); 810 } 811 812 /* 813 * The chain may need its blockrefs updated in the parent. This 814 * requires some fancy footwork. 815 */ 816 if (chain->flags & HAMMER2_CHAIN_UPDATE) { 817 hammer2_blockref_t *base; 818 int count; 819 820 /* 821 * Both parent and chain must be locked. This requires 822 * temporarily unlocking the chain. We have to deal with 823 * the case where the chain might be reparented or modified 824 * while it was unlocked. 825 */ 826 hammer2_chain_unlock(chain); 827 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 828 hammer2_chain_lock(chain, HAMMER2_RESOLVE_MAYBE); 829 if (chain->parent != parent) { 830 kprintf("PARENT MISMATCH ch=%p p=%p/%p\n", 831 chain, chain->parent, parent); 832 hammer2_chain_unlock(parent); 833 goto done; 834 } 835 836 /* 837 * Check race condition. If someone got in and modified 838 * it again while it was unlocked, we have to loop up. 839 */ 840 if (chain->flags & HAMMER2_CHAIN_MODIFIED) { 841 hammer2_chain_unlock(parent); 842 kprintf("hammer2_flush: chain %p flush-mod race\n", 843 chain); 844 goto again; 845 } 846 847 /* 848 * Clear UPDATE flag, mark parent modified, update its 849 * modify_tid if necessary, and adjust the parent blockmap. 850 */ 851 if (chain->flags & HAMMER2_CHAIN_UPDATE) { 852 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE); 853 hammer2_chain_drop(chain); 854 } 855 856 /* 857 * (optional code) 858 * 859 * Avoid actually modifying and updating the parent if it 860 * was flagged for destruction. This can greatly reduce 861 * disk I/O in large tree removals because the 862 * hammer2_io_setinval() call in the upward recursion 863 * (see MODIFIED code above) can only handle a few cases. 864 */ 865 if (parent->flags & HAMMER2_CHAIN_DESTROY) { 866 if (parent->bref.modify_tid < chain->bref.modify_tid) { 867 parent->bref.modify_tid = 868 chain->bref.modify_tid; 869 } 870 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_BMAPPED | 871 HAMMER2_CHAIN_BMAPUPD); 872 hammer2_chain_unlock(parent); 873 goto skipupdate; 874 } 875 876 /* 877 * We are updating the parent's blockmap, the parent must 878 * be set modified. 879 */ 880 hammer2_chain_modify(parent, 0, 0); 881 if (parent->bref.modify_tid < chain->bref.modify_tid) 882 parent->bref.modify_tid = chain->bref.modify_tid; 883 884 /* 885 * Calculate blockmap pointer 886 */ 887 switch(parent->bref.type) { 888 case HAMMER2_BREF_TYPE_INODE: 889 /* 890 * Access the inode's block array. However, there is 891 * no block array if the inode is flagged DIRECTDATA. 892 */ 893 if (parent->data && 894 (parent->data->ipdata.meta.op_flags & 895 HAMMER2_OPFLAG_DIRECTDATA) == 0) { 896 base = &parent->data-> 897 ipdata.u.blockset.blockref[0]; 898 } else { 899 base = NULL; 900 } 901 count = HAMMER2_SET_COUNT; 902 break; 903 case HAMMER2_BREF_TYPE_INDIRECT: 904 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 905 if (parent->data) 906 base = &parent->data->npdata[0]; 907 else 908 base = NULL; 909 count = parent->bytes / sizeof(hammer2_blockref_t); 910 break; 911 case HAMMER2_BREF_TYPE_VOLUME: 912 base = &chain->hmp->voldata.sroot_blockset.blockref[0]; 913 count = HAMMER2_SET_COUNT; 914 break; 915 case HAMMER2_BREF_TYPE_FREEMAP: 916 base = &parent->data->npdata[0]; 917 count = HAMMER2_SET_COUNT; 918 break; 919 default: 920 base = NULL; 921 count = 0; 922 panic("hammer2_flush_core: " 923 "unrecognized blockref type: %d", 924 parent->bref.type); 925 } 926 927 /* 928 * Blocktable updates 929 * 930 * We synchronize pending statistics at this time. Delta 931 * adjustments designated for the current and upper level 932 * are synchronized. 933 */ 934 if (base && (chain->flags & HAMMER2_CHAIN_BMAPUPD)) { 935 if (chain->flags & HAMMER2_CHAIN_BMAPPED) { 936 hammer2_spin_ex(&parent->core.spin); 937 hammer2_base_delete(parent, base, count, 938 &info->cache_index, chain); 939 hammer2_spin_unex(&parent->core.spin); 940 /* base_delete clears both bits */ 941 } else { 942 atomic_clear_int(&chain->flags, 943 HAMMER2_CHAIN_BMAPUPD); 944 } 945 } 946 if (base && (chain->flags & HAMMER2_CHAIN_BMAPPED) == 0) { 947 hammer2_spin_ex(&parent->core.spin); 948 hammer2_base_insert(parent, base, count, 949 &info->cache_index, chain); 950 hammer2_spin_unex(&parent->core.spin); 951 /* base_insert sets BMAPPED */ 952 } 953 hammer2_chain_unlock(parent); 954 } 955 skipupdate: 956 ; 957 958 /* 959 * Final cleanup after flush 960 */ 961 done: 962 KKASSERT(chain->refs > 0); 963 if (hammer2_debug & 0x200) { 964 if (info->debug == chain) 965 info->debug = NULL; 966 } 967 } 968 969 /* 970 * Flush recursion helper, called from flush_core, calls flush_core. 971 * 972 * Flushes the children of the caller's chain (info->parent), restricted 973 * by sync_tid. Set info->domodify if the child's blockref must propagate 974 * back up to the parent. 975 * 976 * Ripouts can move child from rbtree to dbtree or dbq but the caller's 977 * flush scan order prevents any chains from being lost. A child can be 978 * executes more than once. 979 * 980 * WARNING! If we do not call hammer2_flush_core() we must update 981 * bref.mirror_tid ourselves to indicate that the flush has 982 * processed the child. 983 * 984 * WARNING! parent->core spinlock is held on entry and return. 985 */ 986 static int 987 hammer2_flush_recurse(hammer2_chain_t *child, void *data) 988 { 989 hammer2_flush_info_t *info = data; 990 hammer2_chain_t *parent = info->parent; 991 992 /* 993 * (child can never be fchain or vchain so a special check isn't 994 * needed). 995 * 996 * We must ref the child before unlocking the spinlock. 997 * 998 * The caller has added a ref to the parent so we can temporarily 999 * unlock it in order to lock the child. 1000 */ 1001 hammer2_chain_ref(child); 1002 hammer2_spin_unex(&parent->core.spin); 1003 1004 hammer2_chain_unlock(parent); 1005 hammer2_chain_lock(child, HAMMER2_RESOLVE_MAYBE); 1006 1007 /* 1008 * Recurse and collect deferral data. We're in the media flush, 1009 * this can cross PFS boundaries. 1010 */ 1011 if (child->flags & HAMMER2_CHAIN_FLUSH_MASK) { 1012 ++info->depth; 1013 hammer2_flush_core(info, child, info->flags); 1014 --info->depth; 1015 } else if (hammer2_debug & 0x200) { 1016 if (info->debug == NULL) 1017 info->debug = child; 1018 ++info->depth; 1019 hammer2_flush_core(info, child, info->flags); 1020 --info->depth; 1021 if (info->debug == child) 1022 info->debug = NULL; 1023 } 1024 1025 /* 1026 * Relock to continue the loop 1027 */ 1028 hammer2_chain_unlock(child); 1029 hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE); 1030 hammer2_chain_drop(child); 1031 KKASSERT(info->parent == parent); 1032 hammer2_spin_ex(&parent->core.spin); 1033 1034 return (0); 1035 } 1036 1037 /* 1038 * flush helper (direct) 1039 * 1040 * Quickly flushes any dirty chains for a device. This will update our 1041 * concept of the volume root but does NOT flush the actual volume root 1042 * and does not flush dirty device buffers. 1043 * 1044 * This function is primarily used by the bulkfree code to allow it to 1045 * create a snapshot for the pass. It doesn't care about any pending 1046 * work (dirty vnodes, dirty inodes, dirty logical buffers) for which blocks 1047 * have not yet been allocated. 1048 */ 1049 void 1050 hammer2_flush_quick(hammer2_dev_t *hmp) 1051 { 1052 hammer2_chain_t *chain; 1053 1054 hammer2_trans_init(hmp->spmp, HAMMER2_TRANS_ISFLUSH); 1055 1056 hammer2_chain_ref(&hmp->vchain); 1057 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS); 1058 if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) { 1059 chain = &hmp->vchain; 1060 hammer2_flush(chain, HAMMER2_FLUSH_TOP | 1061 HAMMER2_FLUSH_ALL); 1062 KKASSERT(chain == &hmp->vchain); 1063 } 1064 hammer2_chain_unlock(&hmp->vchain); 1065 hammer2_chain_drop(&hmp->vchain); 1066 1067 hammer2_trans_done(hmp->spmp); /* spmp trans */ 1068 } 1069 1070 /* 1071 * flush helper (backend threaded) 1072 * 1073 * Flushes core chains, issues disk sync, flushes volume roots. 1074 * 1075 * Primarily called from vfs_sync(). 1076 */ 1077 void 1078 hammer2_inode_xop_flush(hammer2_xop_t *arg, int clindex) 1079 { 1080 hammer2_xop_flush_t *xop = &arg->xop_flush; 1081 hammer2_chain_t *chain; 1082 hammer2_chain_t *parent; 1083 hammer2_dev_t *hmp; 1084 int error = 0; 1085 int total_error = 0; 1086 int j; 1087 1088 /* 1089 * Flush core chains 1090 */ 1091 chain = hammer2_inode_chain(xop->head.ip1, clindex, 1092 HAMMER2_RESOLVE_ALWAYS); 1093 if (chain) { 1094 hmp = chain->hmp; 1095 if (chain->flags & HAMMER2_CHAIN_FLUSH_MASK) { 1096 hammer2_flush(chain, HAMMER2_FLUSH_TOP); 1097 parent = chain->parent; 1098 KKASSERT(chain->pmp != parent->pmp); 1099 hammer2_chain_setflush(parent); 1100 } 1101 hammer2_chain_unlock(chain); 1102 hammer2_chain_drop(chain); 1103 chain = NULL; 1104 } else { 1105 hmp = NULL; 1106 } 1107 1108 /* 1109 * Flush volume roots. Avoid replication, we only want to 1110 * flush each hammer2_dev (hmp) once. 1111 */ 1112 for (j = clindex - 1; j >= 0; --j) { 1113 if ((chain = xop->head.ip1->cluster.array[j].chain) != NULL) { 1114 if (chain->hmp == hmp) { 1115 chain = NULL; /* safety */ 1116 goto skip; 1117 } 1118 } 1119 } 1120 chain = NULL; /* safety */ 1121 1122 /* 1123 * spmp transaction. The super-root is never directly mounted so 1124 * there shouldn't be any vnodes, let alone any dirty vnodes 1125 * associated with it, so we shouldn't have to mess around with any 1126 * vnode flushes here. 1127 */ 1128 hammer2_trans_init(hmp->spmp, HAMMER2_TRANS_ISFLUSH); 1129 1130 /* 1131 * Media mounts have two 'roots', vchain for the topology 1132 * and fchain for the free block table. Flush both. 1133 * 1134 * Note that the topology and free block table are handled 1135 * independently, so the free block table can wind up being 1136 * ahead of the topology. We depend on the bulk free scan 1137 * code to deal with any loose ends. 1138 */ 1139 hammer2_chain_ref(&hmp->vchain); 1140 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS); 1141 hammer2_chain_ref(&hmp->fchain); 1142 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS); 1143 if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) { 1144 /* 1145 * This will also modify vchain as a side effect, 1146 * mark vchain as modified now. 1147 */ 1148 hammer2_voldata_modify(hmp); 1149 chain = &hmp->fchain; 1150 hammer2_flush(chain, HAMMER2_FLUSH_TOP); 1151 KKASSERT(chain == &hmp->fchain); 1152 } 1153 hammer2_chain_unlock(&hmp->fchain); 1154 hammer2_chain_unlock(&hmp->vchain); 1155 hammer2_chain_drop(&hmp->fchain); 1156 /* vchain dropped down below */ 1157 1158 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS); 1159 if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) { 1160 chain = &hmp->vchain; 1161 hammer2_flush(chain, HAMMER2_FLUSH_TOP); 1162 KKASSERT(chain == &hmp->vchain); 1163 } 1164 hammer2_chain_unlock(&hmp->vchain); 1165 hammer2_chain_drop(&hmp->vchain); 1166 1167 error = 0; 1168 1169 /* 1170 * We can't safely flush the volume header until we have 1171 * flushed any device buffers which have built up. 1172 * 1173 * XXX this isn't being incremental 1174 */ 1175 vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY); 1176 error = VOP_FSYNC(hmp->devvp, MNT_WAIT, 0); 1177 vn_unlock(hmp->devvp); 1178 1179 /* 1180 * The flush code sets CHAIN_VOLUMESYNC to indicate that the 1181 * volume header needs synchronization via hmp->volsync. 1182 * 1183 * XXX synchronize the flag & data with only this flush XXX 1184 */ 1185 if (error == 0 && 1186 (hmp->vchain.flags & HAMMER2_CHAIN_VOLUMESYNC)) { 1187 struct buf *bp; 1188 1189 /* 1190 * Synchronize the disk before flushing the volume 1191 * header. 1192 */ 1193 bp = getpbuf(NULL); 1194 bp->b_bio1.bio_offset = 0; 1195 bp->b_bufsize = 0; 1196 bp->b_bcount = 0; 1197 bp->b_cmd = BUF_CMD_FLUSH; 1198 bp->b_bio1.bio_done = biodone_sync; 1199 bp->b_bio1.bio_flags |= BIO_SYNC; 1200 vn_strategy(hmp->devvp, &bp->b_bio1); 1201 biowait(&bp->b_bio1, "h2vol"); 1202 relpbuf(bp, NULL); 1203 1204 /* 1205 * Then we can safely flush the version of the 1206 * volume header synchronized by the flush code. 1207 */ 1208 j = hmp->volhdrno + 1; 1209 if (j >= HAMMER2_NUM_VOLHDRS) 1210 j = 0; 1211 if (j * HAMMER2_ZONE_BYTES64 + HAMMER2_SEGSIZE > 1212 hmp->volsync.volu_size) { 1213 j = 0; 1214 } 1215 if (hammer2_debug & 0x8000) { 1216 /* debug only, avoid syslogd loop */ 1217 kprintf("sync volhdr %d %jd\n", 1218 j, (intmax_t)hmp->volsync.volu_size); 1219 } 1220 bp = getblk(hmp->devvp, j * HAMMER2_ZONE_BYTES64, 1221 HAMMER2_PBUFSIZE, 0, 0); 1222 atomic_clear_int(&hmp->vchain.flags, 1223 HAMMER2_CHAIN_VOLUMESYNC); 1224 bcopy(&hmp->volsync, bp->b_data, HAMMER2_PBUFSIZE); 1225 bawrite(bp); 1226 hmp->volhdrno = j; 1227 } 1228 if (error) 1229 total_error = error; 1230 1231 hammer2_trans_done(hmp->spmp); /* spmp trans */ 1232 skip: 1233 error = hammer2_xop_feed(&xop->head, NULL, clindex, total_error); 1234 } 1235