1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include <vm/vm_page2.h> 36 37 #include "hammer.h" 38 39 static int hammer_unload_inode(hammer_inode_t ip); 40 static void hammer_free_inode(hammer_inode_t ip); 41 static void hammer_flush_inode_core(hammer_inode_t ip, 42 hammer_flush_group_t flg, int flags); 43 static int hammer_setup_child_callback(hammer_record_t rec, void *data); 44 #if 0 45 static int hammer_syncgrp_child_callback(hammer_record_t rec, void *data); 46 #endif 47 static int hammer_setup_parent_inodes(hammer_inode_t ip, int depth, 48 hammer_flush_group_t flg); 49 static int hammer_setup_parent_inodes_helper(hammer_record_t record, 50 int depth, hammer_flush_group_t flg); 51 static void hammer_inode_wakereclaims(hammer_inode_t ip); 52 static struct hammer_inostats *hammer_inode_inostats(hammer_mount_t hmp, 53 pid_t pid); 54 static hammer_inode_t __hammer_find_inode(hammer_transaction_t trans, 55 int64_t obj_id, hammer_tid_t asof, 56 uint32_t localization); 57 58 struct krate hammer_gen_krate = { 1 }; 59 60 /* 61 * RB-Tree support for inode structures 62 */ 63 int 64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2) 65 { 66 if (ip1->obj_localization < ip2->obj_localization) 67 return(-1); 68 if (ip1->obj_localization > ip2->obj_localization) 69 return(1); 70 if (ip1->obj_id < ip2->obj_id) 71 return(-1); 72 if (ip1->obj_id > ip2->obj_id) 73 return(1); 74 if (ip1->obj_asof < ip2->obj_asof) 75 return(-1); 76 if (ip1->obj_asof > ip2->obj_asof) 77 return(1); 78 return(0); 79 } 80 81 int 82 hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2) 83 { 84 if (ip1->redo_fifo_start < ip2->redo_fifo_start) 85 return(-1); 86 if (ip1->redo_fifo_start > ip2->redo_fifo_start) 87 return(1); 88 return(0); 89 } 90 91 /* 92 * RB-Tree support for inode structures / special LOOKUP_INFO 93 */ 94 static int 95 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip) 96 { 97 if (info->obj_localization < ip->obj_localization) 98 return(-1); 99 if (info->obj_localization > ip->obj_localization) 100 return(1); 101 if (info->obj_id < ip->obj_id) 102 return(-1); 103 if (info->obj_id > ip->obj_id) 104 return(1); 105 if (info->obj_asof < ip->obj_asof) 106 return(-1); 107 if (info->obj_asof > ip->obj_asof) 108 return(1); 109 return(0); 110 } 111 112 /* 113 * Used by hammer_scan_inode_snapshots() to locate all of an object's 114 * snapshots. Note that the asof field is not tested, which we can get 115 * away with because it is the lowest-priority field. 116 */ 117 static int 118 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data) 119 { 120 hammer_inode_info_t info = data; 121 122 if (ip->obj_localization > info->obj_localization) 123 return(1); 124 if (ip->obj_localization < info->obj_localization) 125 return(-1); 126 if (ip->obj_id > info->obj_id) 127 return(1); 128 if (ip->obj_id < info->obj_id) 129 return(-1); 130 return(0); 131 } 132 133 /* 134 * Used by hammer_unload_pseudofs() to locate all inodes associated with 135 * a particular PFS. 136 */ 137 static int 138 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data) 139 { 140 uint32_t localization = *(uint32_t *)data; 141 if (ip->obj_localization > localization) 142 return(1); 143 if (ip->obj_localization < localization) 144 return(-1); 145 return(0); 146 } 147 148 /* 149 * RB-Tree support for pseudofs structures 150 */ 151 static int 152 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2) 153 { 154 if (p1->localization < p2->localization) 155 return(-1); 156 if (p1->localization > p2->localization) 157 return(1); 158 return(0); 159 } 160 161 162 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare); 163 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node, 164 hammer_inode_info_cmp, hammer_inode_info_t); 165 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node, 166 hammer_pfs_rb_compare, uint32_t, localization); 167 168 /* 169 * The kernel is not actively referencing this vnode but is still holding 170 * it cached. 171 * 172 * This is called from the frontend. 173 * 174 * MPALMOSTSAFE 175 */ 176 int 177 hammer_vop_inactive(struct vop_inactive_args *ap) 178 { 179 hammer_inode_t ip = VTOI(ap->a_vp); 180 hammer_mount_t hmp; 181 182 /* 183 * Degenerate case 184 */ 185 if (ip == NULL) { 186 vrecycle(ap->a_vp); 187 return(0); 188 } 189 190 /* 191 * If the inode no longer has visibility in the filesystem try to 192 * recycle it immediately, even if the inode is dirty. Recycling 193 * it quickly allows the system to reclaim buffer cache and VM 194 * resources which can matter a lot in a heavily loaded system. 195 * 196 * This can deadlock in vfsync() if we aren't careful. 197 * 198 * Do not queue the inode to the flusher if we still have visibility, 199 * otherwise namespace calls such as chmod will unnecessarily generate 200 * multiple inode updates. 201 */ 202 if (ip->ino_data.nlinks == 0) { 203 hmp = ip->hmp; 204 lwkt_gettoken(&hmp->fs_token); 205 hammer_inode_unloadable_check(ip, 0); 206 if (ip->flags & HAMMER_INODE_MODMASK) 207 hammer_flush_inode(ip, 0); 208 lwkt_reltoken(&hmp->fs_token); 209 vrecycle(ap->a_vp); 210 } 211 return(0); 212 } 213 214 /* 215 * Release the vnode association. This is typically (but not always) 216 * the last reference on the inode. 217 * 218 * Once the association is lost we are on our own with regards to 219 * flushing the inode. 220 * 221 * We must interlock ip->vp so hammer_get_vnode() can avoid races. 222 */ 223 int 224 hammer_vop_reclaim(struct vop_reclaim_args *ap) 225 { 226 hammer_inode_t ip; 227 hammer_mount_t hmp; 228 struct vnode *vp; 229 230 vp = ap->a_vp; 231 232 if ((ip = vp->v_data) != NULL) { 233 hmp = ip->hmp; 234 lwkt_gettoken(&hmp->fs_token); 235 hammer_lock_ex(&ip->lock); 236 vp->v_data = NULL; 237 ip->vp = NULL; 238 239 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) { 240 ++hammer_count_reclaims; 241 ++hmp->count_reclaims; 242 ip->flags |= HAMMER_INODE_RECLAIM; 243 } 244 hammer_unlock(&ip->lock); 245 vclrisdirty(vp); 246 hammer_rel_inode(ip, 1); 247 lwkt_reltoken(&hmp->fs_token); 248 } 249 return(0); 250 } 251 252 /* 253 * Inform the kernel that the inode is dirty. This will be checked 254 * by vn_unlock(). 255 * 256 * Theoretically in order to reclaim a vnode the hammer_vop_reclaim() 257 * must be called which will interlock against our inode lock, so 258 * if VRECLAIMED is not set vp->v_mount (as used by vsetisdirty()) 259 * should be stable without having to acquire any new locks. 260 */ 261 void 262 hammer_inode_dirty(hammer_inode_t ip) 263 { 264 struct vnode *vp; 265 266 if ((ip->flags & HAMMER_INODE_MODMASK) && 267 (vp = ip->vp) != NULL && 268 (vp->v_flag & (VRECLAIMED | VISDIRTY)) == 0) { 269 vsetisdirty(vp); 270 } 271 } 272 273 /* 274 * Return a locked vnode for the specified inode. The inode must be 275 * referenced but NOT LOCKED on entry and will remain referenced on 276 * return. 277 * 278 * Called from the frontend. 279 */ 280 int 281 hammer_get_vnode(hammer_inode_t ip, struct vnode **vpp) 282 { 283 hammer_mount_t hmp; 284 struct vnode *vp; 285 int error = 0; 286 uint8_t obj_type; 287 288 hmp = ip->hmp; 289 290 for (;;) { 291 if ((vp = ip->vp) == NULL) { 292 error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0); 293 if (error) 294 break; 295 hammer_lock_ex(&ip->lock); 296 if (ip->vp != NULL) { 297 hammer_unlock(&ip->lock); 298 vp = *vpp; 299 vp->v_type = VBAD; 300 vx_put(vp); 301 continue; 302 } 303 hammer_ref(&ip->lock); 304 vp = *vpp; 305 ip->vp = vp; 306 307 obj_type = ip->ino_data.obj_type; 308 vp->v_type = hammer_get_vnode_type(obj_type); 309 310 hammer_inode_wakereclaims(ip); 311 312 switch(ip->ino_data.obj_type) { 313 case HAMMER_OBJTYPE_CDEV: 314 case HAMMER_OBJTYPE_BDEV: 315 vp->v_ops = &hmp->mp->mnt_vn_spec_ops; 316 addaliasu(vp, ip->ino_data.rmajor, 317 ip->ino_data.rminor); 318 break; 319 case HAMMER_OBJTYPE_FIFO: 320 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops; 321 break; 322 case HAMMER_OBJTYPE_REGFILE: 323 break; 324 default: 325 break; 326 } 327 328 /* 329 * Only mark as the root vnode if the ip is not 330 * historical, otherwise the VFS cache will get 331 * confused. The other half of the special handling 332 * is in hammer_vop_nlookupdotdot(). 333 * 334 * Pseudo-filesystem roots can be accessed via 335 * non-root filesystem paths and setting VROOT may 336 * confuse the namecache. Set VPFSROOT instead. 337 */ 338 if (ip->obj_id == HAMMER_OBJID_ROOT) { 339 if (ip->obj_asof == hmp->asof) { 340 if (ip->obj_localization == 341 HAMMER_DEF_LOCALIZATION) 342 vsetflags(vp, VROOT); 343 else 344 vsetflags(vp, VPFSROOT); 345 } else { 346 vsetflags(vp, VPFSROOT); 347 } 348 } 349 350 vp->v_data = (void *)ip; 351 /* vnode locked by getnewvnode() */ 352 /* make related vnode dirty if inode dirty? */ 353 hammer_unlock(&ip->lock); 354 if (vp->v_type == VREG) { 355 vinitvmio(vp, ip->ino_data.size, 356 hammer_blocksize(ip->ino_data.size), 357 hammer_blockoff(ip->ino_data.size)); 358 } 359 vx_downgrade(vp); 360 break; 361 } 362 363 /* 364 * Interlock vnode clearing. This does not prevent the 365 * vnode from going into a reclaimed state but it does 366 * prevent it from being destroyed or reused so the vget() 367 * will properly fail. 368 */ 369 hammer_lock_ex(&ip->lock); 370 if ((vp = ip->vp) == NULL) { 371 hammer_unlock(&ip->lock); 372 continue; 373 } 374 vhold(vp); 375 hammer_unlock(&ip->lock); 376 377 /* 378 * loop if the vget fails (aka races), or if the vp 379 * no longer matches ip->vp. 380 */ 381 if (vget(vp, LK_EXCLUSIVE) == 0) { 382 if (vp == ip->vp) { 383 vdrop(vp); 384 break; 385 } 386 vput(vp); 387 } 388 vdrop(vp); 389 } 390 *vpp = vp; 391 return(error); 392 } 393 394 /* 395 * Locate all copies of the inode for obj_id compatible with the specified 396 * asof, reference, and issue the related call-back. This routine is used 397 * for direct-io invalidation and does not create any new inodes. 398 */ 399 void 400 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo, 401 int (*callback)(hammer_inode_t ip, void *data), 402 void *data) 403 { 404 hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root, 405 hammer_inode_info_cmp_all_history, 406 callback, iinfo); 407 } 408 409 /* 410 * Acquire a HAMMER inode. The returned inode is not locked. These functions 411 * do not attach or detach the related vnode (use hammer_get_vnode() for 412 * that). 413 * 414 * The flags argument is only applied for newly created inodes, and only 415 * certain flags are inherited. 416 * 417 * Called from the frontend. 418 */ 419 hammer_inode_t 420 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip, 421 int64_t obj_id, hammer_tid_t asof, uint32_t localization, 422 int flags, int *errorp) 423 { 424 hammer_mount_t hmp = trans->hmp; 425 struct hammer_node_cache *cachep; 426 struct hammer_cursor cursor; 427 hammer_inode_t ip; 428 429 430 /* 431 * Determine if we already have an inode cached. If we do then 432 * we are golden. 433 * 434 * If we find an inode with no vnode we have to mark the 435 * transaction such that hammer_inode_waitreclaims() is 436 * called later on to avoid building up an infinite number 437 * of inodes. Otherwise we can continue to * add new inodes 438 * faster then they can be disposed of, even with the tsleep 439 * delay. 440 * 441 * If we find a dummy inode we return a failure so dounlink 442 * (which does another lookup) doesn't try to mess with the 443 * link count. hammer_vop_nresolve() uses hammer_get_dummy_inode() 444 * to ref dummy inodes. 445 */ 446 loop: 447 *errorp = 0; 448 ip = __hammer_find_inode(trans, obj_id, asof, localization); 449 if (ip) { 450 if (ip->flags & HAMMER_INODE_DUMMY) { 451 *errorp = ENOENT; 452 return(NULL); 453 } 454 hammer_ref(&ip->lock); 455 return(ip); 456 } 457 458 /* 459 * Allocate a new inode structure and deal with races later. 460 */ 461 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO); 462 ++hammer_count_inodes; 463 ++hmp->count_inodes; 464 ip->obj_id = obj_id; 465 ip->obj_asof = asof; 466 ip->obj_localization = localization; 467 ip->hmp = hmp; 468 ip->flags = flags & HAMMER_INODE_RO; 469 ip->cache[0].ip = ip; 470 ip->cache[1].ip = ip; 471 ip->cache[2].ip = ip; 472 ip->cache[3].ip = ip; 473 if (hmp->ronly) 474 ip->flags |= HAMMER_INODE_RO; 475 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off = 476 HAMMER_MAX_KEY; 477 RB_INIT(&ip->rec_tree); 478 TAILQ_INIT(&ip->target_list); 479 hammer_ref(&ip->lock); 480 481 /* 482 * Locate the on-disk inode. If this is a PFS root we always 483 * access the current version of the root inode and (if it is not 484 * a master) always access information under it with a snapshot 485 * TID. 486 * 487 * We cache recent inode lookups in this directory in dip->cache[2]. 488 * If we can't find it we assume the inode we are looking for is 489 * close to the directory inode. 490 */ 491 retry: 492 cachep = NULL; 493 if (dip) { 494 if (dip->cache[2].node) 495 cachep = &dip->cache[2]; 496 else 497 cachep = &dip->cache[0]; 498 } 499 hammer_init_cursor(trans, &cursor, cachep, NULL); 500 cursor.key_beg.localization = localization | HAMMER_LOCALIZE_INODE; 501 cursor.key_beg.obj_id = ip->obj_id; 502 cursor.key_beg.key = 0; 503 cursor.key_beg.create_tid = 0; 504 cursor.key_beg.delete_tid = 0; 505 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE; 506 cursor.key_beg.obj_type = 0; 507 508 cursor.asof = asof; 509 cursor.flags = HAMMER_CURSOR_GET_DATA | HAMMER_CURSOR_ASOF; 510 511 *errorp = hammer_btree_lookup(&cursor); 512 if (*errorp == EDEADLK) { 513 hammer_done_cursor(&cursor); 514 goto retry; 515 } 516 517 /* 518 * On success the B-Tree lookup will hold the appropriate 519 * buffer cache buffers and provide a pointer to the requested 520 * information. Copy the information to the in-memory inode 521 * and cache the B-Tree node to improve future operations. 522 */ 523 if (*errorp == 0) { 524 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf; 525 ip->ino_data = cursor.data->inode; 526 527 /* 528 * cache[0] tries to cache the location of the object inode. 529 * The assumption is that it is near the directory inode. 530 * 531 * cache[1] tries to cache the location of the object data. 532 * We might have something in the governing directory from 533 * scan optimizations (see the strategy code in 534 * hammer_vnops.c). 535 * 536 * We update dip->cache[2], if possible, with the location 537 * of the object inode for future directory shortcuts. 538 */ 539 hammer_cache_node(&ip->cache[0], cursor.node); 540 if (dip) { 541 if (dip->cache[3].node) { 542 hammer_cache_node(&ip->cache[1], 543 dip->cache[3].node); 544 } 545 hammer_cache_node(&dip->cache[2], cursor.node); 546 } 547 548 /* 549 * The file should not contain any data past the file size 550 * stored in the inode. Setting save_trunc_off to the 551 * file size instead of max reduces B-Tree lookup overheads 552 * on append by allowing the flusher to avoid checking for 553 * record overwrites. 554 */ 555 ip->save_trunc_off = ip->ino_data.size; 556 557 /* 558 * Locate and assign the pseudofs management structure to 559 * the inode. 560 */ 561 if (dip && dip->obj_localization == ip->obj_localization) { 562 ip->pfsm = dip->pfsm; 563 hammer_ref(&ip->pfsm->lock); 564 } else { 565 ip->pfsm = hammer_load_pseudofs(trans, 566 ip->obj_localization, 567 errorp); 568 *errorp = 0; /* ignore ENOENT */ 569 } 570 } 571 572 /* 573 * The inode is placed on the red-black tree and will be synced to 574 * the media when flushed or by the filesystem sync. If this races 575 * another instantiation/lookup the insertion will fail. 576 */ 577 if (*errorp == 0) { 578 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) { 579 hammer_free_inode(ip); 580 hammer_done_cursor(&cursor); 581 goto loop; 582 } 583 ip->flags |= HAMMER_INODE_ONDISK; 584 } else { 585 if (ip->flags & HAMMER_INODE_RSV_INODES) { 586 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */ 587 --hmp->rsv_inodes; 588 } 589 590 hammer_free_inode(ip); 591 ip = NULL; 592 } 593 hammer_done_cursor(&cursor); 594 595 /* 596 * NEWINODE is only set if the inode becomes dirty later, 597 * setting it here just leads to unnecessary stalls. 598 * 599 * trans->flags |= HAMMER_TRANSF_NEWINODE; 600 */ 601 return (ip); 602 } 603 604 /* 605 * Get a dummy inode to placemark a broken directory entry. 606 */ 607 hammer_inode_t 608 hammer_get_dummy_inode(hammer_transaction_t trans, hammer_inode_t dip, 609 int64_t obj_id, hammer_tid_t asof, uint32_t localization, 610 int flags, int *errorp) 611 { 612 hammer_mount_t hmp = trans->hmp; 613 hammer_inode_t ip; 614 615 /* 616 * Determine if we already have an inode cached. If we do then 617 * we are golden. 618 * 619 * If we find an inode with no vnode we have to mark the 620 * transaction such that hammer_inode_waitreclaims() is 621 * called later on to avoid building up an infinite number 622 * of inodes. Otherwise we can continue to * add new inodes 623 * faster then they can be disposed of, even with the tsleep 624 * delay. 625 * 626 * If we find a non-fake inode we return an error. Only fake 627 * inodes can be returned by this routine. 628 */ 629 loop: 630 *errorp = 0; 631 ip = __hammer_find_inode(trans, obj_id, asof, localization); 632 if (ip) { 633 if ((ip->flags & HAMMER_INODE_DUMMY) == 0) { 634 *errorp = ENOENT; 635 return(NULL); 636 } 637 hammer_ref(&ip->lock); 638 return(ip); 639 } 640 641 /* 642 * Allocate a new inode structure and deal with races later. 643 */ 644 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO); 645 ++hammer_count_inodes; 646 ++hmp->count_inodes; 647 ip->obj_id = obj_id; 648 ip->obj_asof = asof; 649 ip->obj_localization = localization; 650 ip->hmp = hmp; 651 ip->flags = flags | HAMMER_INODE_RO | HAMMER_INODE_DUMMY; 652 ip->cache[0].ip = ip; 653 ip->cache[1].ip = ip; 654 ip->cache[2].ip = ip; 655 ip->cache[3].ip = ip; 656 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off = 657 HAMMER_MAX_KEY; 658 RB_INIT(&ip->rec_tree); 659 TAILQ_INIT(&ip->target_list); 660 hammer_ref(&ip->lock); 661 662 /* 663 * Populate the dummy inode. Leave everything zero'd out. 664 * 665 * (ip->ino_leaf and ip->ino_data) 666 * 667 * Make the dummy inode a FIFO object which most copy programs 668 * will properly ignore. 669 */ 670 ip->save_trunc_off = ip->ino_data.size; 671 ip->ino_data.obj_type = HAMMER_OBJTYPE_FIFO; 672 673 /* 674 * Locate and assign the pseudofs management structure to 675 * the inode. 676 */ 677 if (dip && dip->obj_localization == ip->obj_localization) { 678 ip->pfsm = dip->pfsm; 679 hammer_ref(&ip->pfsm->lock); 680 } else { 681 ip->pfsm = hammer_load_pseudofs(trans, ip->obj_localization, 682 errorp); 683 *errorp = 0; /* ignore ENOENT */ 684 } 685 686 /* 687 * The inode is placed on the red-black tree and will be synced to 688 * the media when flushed or by the filesystem sync. If this races 689 * another instantiation/lookup the insertion will fail. 690 * 691 * NOTE: Do not set HAMMER_INODE_ONDISK. The inode is a fake. 692 */ 693 if (*errorp == 0) { 694 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) { 695 hammer_free_inode(ip); 696 goto loop; 697 } 698 } else { 699 if (ip->flags & HAMMER_INODE_RSV_INODES) { 700 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */ 701 --hmp->rsv_inodes; 702 } 703 hammer_free_inode(ip); 704 ip = NULL; 705 } 706 trans->flags |= HAMMER_TRANSF_NEWINODE; 707 return (ip); 708 } 709 710 /* 711 * Return a referenced inode only if it is in our inode cache. 712 * Dummy inodes do not count. 713 */ 714 hammer_inode_t 715 hammer_find_inode(hammer_transaction_t trans, int64_t obj_id, 716 hammer_tid_t asof, uint32_t localization) 717 { 718 hammer_inode_t ip; 719 720 ip = __hammer_find_inode(trans, obj_id, asof, localization); 721 if (ip) { 722 if (ip->flags & HAMMER_INODE_DUMMY) 723 ip = NULL; 724 else 725 hammer_ref(&ip->lock); 726 } 727 return(ip); 728 } 729 730 /* 731 * Return a referenced inode only if it is in our inode cache. 732 * This function does not reference inode. 733 */ 734 static hammer_inode_t 735 __hammer_find_inode(hammer_transaction_t trans, int64_t obj_id, 736 hammer_tid_t asof, uint32_t localization) 737 { 738 hammer_mount_t hmp = trans->hmp; 739 struct hammer_inode_info iinfo; 740 hammer_inode_t ip; 741 742 iinfo.obj_id = obj_id; 743 iinfo.obj_asof = asof; 744 iinfo.obj_localization = localization; 745 746 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo); 747 748 return(ip); 749 } 750 751 /* 752 * Create a new filesystem object, returning the inode in *ipp. The 753 * returned inode will be referenced. The inode is created in-memory. 754 * 755 * If pfsm is non-NULL the caller wishes to create the root inode for 756 * a non-root PFS. 757 */ 758 int 759 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap, 760 struct ucred *cred, 761 hammer_inode_t dip, const char *name, int namelen, 762 hammer_pseudofs_inmem_t pfsm, hammer_inode_t *ipp) 763 { 764 hammer_mount_t hmp; 765 hammer_inode_t ip; 766 uid_t xuid; 767 int error; 768 int64_t namekey; 769 uint32_t dummy; 770 771 hmp = trans->hmp; 772 773 /* 774 * Disallow the creation of new inodes in directories which 775 * have been deleted. In HAMMER, this will cause a record 776 * syncing assertion later on in the flush code. 777 */ 778 if (dip && dip->ino_data.nlinks == 0) { 779 *ipp = NULL; 780 return (EINVAL); 781 } 782 783 /* 784 * Allocate inode 785 */ 786 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO); 787 ++hammer_count_inodes; 788 ++hmp->count_inodes; 789 trans->flags |= HAMMER_TRANSF_NEWINODE; 790 791 if (pfsm) { 792 KKASSERT(pfsm->localization != HAMMER_DEF_LOCALIZATION); 793 ip->obj_id = HAMMER_OBJID_ROOT; 794 ip->obj_localization = pfsm->localization; 795 } else { 796 KKASSERT(dip != NULL); 797 namekey = hammer_direntry_namekey(dip, name, namelen, &dummy); 798 ip->obj_id = hammer_alloc_objid(hmp, dip, namekey); 799 ip->obj_localization = dip->obj_localization; 800 } 801 802 KKASSERT(ip->obj_id != 0); 803 ip->obj_asof = hmp->asof; 804 ip->hmp = hmp; 805 ip->flush_state = HAMMER_FST_IDLE; 806 ip->flags = HAMMER_INODE_DDIRTY | 807 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME; 808 ip->cache[0].ip = ip; 809 ip->cache[1].ip = ip; 810 ip->cache[2].ip = ip; 811 ip->cache[3].ip = ip; 812 813 ip->trunc_off = HAMMER_MAX_KEY; 814 /* ip->save_trunc_off = 0; (already zero) */ 815 RB_INIT(&ip->rec_tree); 816 TAILQ_INIT(&ip->target_list); 817 818 ip->ino_data.atime = trans->time; 819 ip->ino_data.mtime = trans->time; 820 ip->ino_data.size = 0; 821 ip->ino_data.nlinks = 0; 822 823 /* 824 * A nohistory designator on the parent directory is inherited by 825 * the child. We will do this even for pseudo-fs creation... the 826 * sysad can turn it off. 827 */ 828 if (dip) { 829 ip->ino_data.uflags = dip->ino_data.uflags & 830 (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP); 831 } 832 833 ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; 834 ip->ino_leaf.base.localization = ip->obj_localization | 835 HAMMER_LOCALIZE_INODE; 836 ip->ino_leaf.base.obj_id = ip->obj_id; 837 ip->ino_leaf.base.key = 0; 838 ip->ino_leaf.base.create_tid = 0; 839 ip->ino_leaf.base.delete_tid = 0; 840 ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE; 841 ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type); 842 843 ip->ino_data.obj_type = ip->ino_leaf.base.obj_type; 844 ip->ino_data.version = HAMMER_INODE_DATA_VERSION; 845 ip->ino_data.mode = vap->va_mode; 846 ip->ino_data.ctime = trans->time; 847 848 /* 849 * If we are running version 2 or greater directory entries are 850 * inode-localized instead of data-localized. 851 */ 852 if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) { 853 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) { 854 ip->ino_data.cap_flags |= 855 HAMMER_INODE_CAP_DIR_LOCAL_INO; 856 } 857 } 858 if (trans->hmp->version >= HAMMER_VOL_VERSION_SIX) { 859 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) { 860 ip->ino_data.cap_flags |= 861 HAMMER_INODE_CAP_DIRHASH_ALG1; 862 } 863 } 864 865 /* 866 * Setup the ".." pointer. This only needs to be done for directories 867 * but we do it for all objects as a recovery aid if dip exists. 868 * The inode is probably a PFS root if dip is NULL. 869 */ 870 if (dip) 871 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id; 872 873 switch(ip->ino_leaf.base.obj_type) { 874 case HAMMER_OBJTYPE_CDEV: 875 case HAMMER_OBJTYPE_BDEV: 876 ip->ino_data.rmajor = vap->va_rmajor; 877 ip->ino_data.rminor = vap->va_rminor; 878 break; 879 default: 880 break; 881 } 882 883 /* 884 * Calculate default uid/gid and overwrite with information from 885 * the vap. 886 */ 887 if (dip) { 888 xuid = hammer_to_unix_xid(&dip->ino_data.uid); 889 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, 890 xuid, cred, &vap->va_mode); 891 } else { 892 xuid = 0; 893 } 894 ip->ino_data.mode = vap->va_mode; 895 896 if (vap->va_vaflags & VA_UID_UUID_VALID) 897 ip->ino_data.uid = vap->va_uid_uuid; 898 else if (vap->va_uid != (uid_t)VNOVAL) 899 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid); 900 else 901 hammer_guid_to_uuid(&ip->ino_data.uid, xuid); 902 903 if (vap->va_vaflags & VA_GID_UUID_VALID) 904 ip->ino_data.gid = vap->va_gid_uuid; 905 else if (vap->va_gid != (gid_t)VNOVAL) 906 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid); 907 else if (dip) 908 ip->ino_data.gid = dip->ino_data.gid; 909 910 hammer_ref(&ip->lock); 911 912 if (pfsm) { 913 ip->pfsm = pfsm; 914 hammer_ref(&pfsm->lock); 915 error = 0; 916 } else if (dip->obj_localization == ip->obj_localization) { 917 ip->pfsm = dip->pfsm; 918 hammer_ref(&ip->pfsm->lock); 919 error = 0; 920 } else { 921 ip->pfsm = hammer_load_pseudofs(trans, 922 ip->obj_localization, 923 &error); 924 error = 0; /* ignore ENOENT */ 925 } 926 927 if (error) { 928 hammer_free_inode(ip); 929 ip = NULL; 930 } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) { 931 hpanic("duplicate obj_id %jx", (intmax_t)ip->obj_id); 932 /* not reached */ 933 hammer_free_inode(ip); 934 } 935 *ipp = ip; 936 return(error); 937 } 938 939 /* 940 * Final cleanup / freeing of an inode structure 941 */ 942 static void 943 hammer_free_inode(hammer_inode_t ip) 944 { 945 hammer_mount_t hmp; 946 947 hmp = ip->hmp; 948 KKASSERT(hammer_oneref(&ip->lock)); 949 hammer_uncache_node(&ip->cache[0]); 950 hammer_uncache_node(&ip->cache[1]); 951 hammer_uncache_node(&ip->cache[2]); 952 hammer_uncache_node(&ip->cache[3]); 953 hammer_inode_wakereclaims(ip); 954 if (ip->objid_cache) 955 hammer_clear_objid(ip); 956 --hammer_count_inodes; 957 --hmp->count_inodes; 958 if (ip->pfsm) { 959 hammer_rel_pseudofs(hmp, ip->pfsm); 960 ip->pfsm = NULL; 961 } 962 kfree(ip, hmp->m_inodes); 963 } 964 965 /* 966 * Retrieve pseudo-fs data. NULL will never be returned. 967 * 968 * If an error occurs *errorp will be set and a default template is returned, 969 * otherwise *errorp is set to 0. Typically when an error occurs it will 970 * be ENOENT. 971 */ 972 hammer_pseudofs_inmem_t 973 hammer_load_pseudofs(hammer_transaction_t trans, 974 uint32_t localization, int *errorp) 975 { 976 hammer_mount_t hmp = trans->hmp; 977 hammer_inode_t ip; 978 hammer_pseudofs_inmem_t pfsm; 979 struct hammer_cursor cursor; 980 int bytes; 981 982 retry: 983 pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization); 984 if (pfsm) { 985 hammer_ref(&pfsm->lock); 986 *errorp = 0; 987 return(pfsm); 988 } 989 990 /* 991 * PFS records are associated with the root inode (not the PFS root 992 * inode, but the real root). Avoid an infinite recursion if loading 993 * the PFS for the real root. 994 */ 995 if (localization) { 996 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, 997 HAMMER_MAX_TID, 998 HAMMER_DEF_LOCALIZATION, 0, errorp); 999 } else { 1000 ip = NULL; 1001 } 1002 1003 pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO); 1004 pfsm->localization = localization; 1005 pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid; 1006 pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid; 1007 1008 hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip); 1009 cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION | 1010 HAMMER_LOCALIZE_MISC; 1011 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 1012 cursor.key_beg.create_tid = 0; 1013 cursor.key_beg.delete_tid = 0; 1014 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS; 1015 cursor.key_beg.obj_type = 0; 1016 cursor.key_beg.key = localization; 1017 cursor.asof = HAMMER_MAX_TID; 1018 cursor.flags |= HAMMER_CURSOR_ASOF; 1019 1020 if (ip) 1021 *errorp = hammer_ip_lookup(&cursor); 1022 else 1023 *errorp = hammer_btree_lookup(&cursor); 1024 if (*errorp == 0) { 1025 *errorp = hammer_ip_resolve_data(&cursor); 1026 if (*errorp == 0) { 1027 if (hammer_is_pfs_deleted(&cursor.data->pfsd)) { 1028 *errorp = ENOENT; 1029 } else { 1030 bytes = cursor.leaf->data_len; 1031 if (bytes > sizeof(pfsm->pfsd)) 1032 bytes = sizeof(pfsm->pfsd); 1033 bcopy(cursor.data, &pfsm->pfsd, bytes); 1034 } 1035 } 1036 } 1037 hammer_done_cursor(&cursor); 1038 1039 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid); 1040 hammer_ref(&pfsm->lock); 1041 if (ip) 1042 hammer_rel_inode(ip, 0); 1043 if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) { 1044 kfree(pfsm, hmp->m_misc); 1045 goto retry; 1046 } 1047 return(pfsm); 1048 } 1049 1050 /* 1051 * Store pseudo-fs data. The backend will automatically delete any prior 1052 * on-disk pseudo-fs data but we have to delete in-memory versions. 1053 */ 1054 int 1055 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm) 1056 { 1057 struct hammer_cursor cursor; 1058 hammer_record_t record; 1059 hammer_inode_t ip; 1060 int error; 1061 1062 /* 1063 * PFS records are associated with the root inode (not the PFS root 1064 * inode, but the real root). 1065 */ 1066 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID, 1067 HAMMER_DEF_LOCALIZATION, 0, &error); 1068 retry: 1069 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid); 1070 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip); 1071 cursor.key_beg.localization = ip->obj_localization | 1072 HAMMER_LOCALIZE_MISC; 1073 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 1074 cursor.key_beg.create_tid = 0; 1075 cursor.key_beg.delete_tid = 0; 1076 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS; 1077 cursor.key_beg.obj_type = 0; 1078 cursor.key_beg.key = pfsm->localization; 1079 cursor.asof = HAMMER_MAX_TID; 1080 cursor.flags |= HAMMER_CURSOR_ASOF; 1081 1082 /* 1083 * Replace any in-memory version of the record. 1084 */ 1085 error = hammer_ip_lookup(&cursor); 1086 if (error == 0 && hammer_cursor_inmem(&cursor)) { 1087 record = cursor.iprec; 1088 if (record->flags & HAMMER_RECF_INTERLOCK_BE) { 1089 KKASSERT(cursor.deadlk_rec == NULL); 1090 hammer_ref(&record->lock); 1091 cursor.deadlk_rec = record; 1092 error = EDEADLK; 1093 } else { 1094 record->flags |= HAMMER_RECF_DELETED_FE; 1095 error = 0; 1096 } 1097 } 1098 1099 /* 1100 * Allocate replacement general record. The backend flush will 1101 * delete any on-disk version of the record. 1102 */ 1103 if (error == 0 || error == ENOENT) { 1104 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd)); 1105 record->type = HAMMER_MEM_RECORD_GENERAL; 1106 1107 record->leaf.base.localization = ip->obj_localization | 1108 HAMMER_LOCALIZE_MISC; 1109 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS; 1110 record->leaf.base.key = pfsm->localization; 1111 record->leaf.data_len = sizeof(pfsm->pfsd); 1112 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd)); 1113 error = hammer_ip_add_record(trans, record); 1114 } 1115 hammer_done_cursor(&cursor); 1116 if (error == EDEADLK) 1117 goto retry; 1118 hammer_rel_inode(ip, 0); 1119 return(error); 1120 } 1121 1122 /* 1123 * Create a root directory for a PFS if one does not alredy exist. 1124 * 1125 * The PFS root stands alone so we must also bump the nlinks count 1126 * to prevent it from being destroyed on release. 1127 * 1128 * Make sure a caller isn't creating a PFS from non-root PFS. 1129 */ 1130 int 1131 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred, 1132 hammer_pseudofs_inmem_t pfsm, hammer_inode_t dip) 1133 { 1134 hammer_inode_t ip; 1135 struct vattr vap; 1136 int error; 1137 1138 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID, 1139 pfsm->localization, 0, &error); 1140 if (ip == NULL) { 1141 if (lo_to_pfs(dip->obj_localization) != HAMMER_ROOT_PFSID) { 1142 hmkprintf(trans->hmp, 1143 "Warning: creating a PFS from non-root PFS " 1144 "is not allowed\n"); 1145 return(EINVAL); 1146 } 1147 vattr_null(&vap); 1148 vap.va_mode = 0755; 1149 vap.va_type = VDIR; 1150 error = hammer_create_inode(trans, &vap, cred, 1151 NULL, NULL, 0, 1152 pfsm, &ip); 1153 if (error == 0) { 1154 ++ip->ino_data.nlinks; 1155 hammer_modify_inode(trans, ip, HAMMER_INODE_DDIRTY); 1156 } 1157 } 1158 if (ip) 1159 hammer_rel_inode(ip, 0); 1160 return(error); 1161 } 1162 1163 /* 1164 * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY 1165 * if we are unable to disassociate all the inodes. 1166 */ 1167 static 1168 int 1169 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data) 1170 { 1171 int res; 1172 1173 hammer_ref(&ip->lock); 1174 if (ip->vp && (ip->vp->v_flag & VPFSROOT)) { 1175 /* 1176 * The hammer pfs-upgrade directive itself might have the 1177 * root of the pfs open. Just allow it. 1178 */ 1179 res = 0; 1180 } else { 1181 /* 1182 * Don't allow any subdirectories or files to be open. 1183 */ 1184 if (hammer_isactive(&ip->lock) == 2 && ip->vp) 1185 vclean_unlocked(ip->vp); /* might not succeed */ 1186 if (hammer_isactive(&ip->lock) == 1 && ip->vp == NULL) 1187 res = 0; 1188 else 1189 res = -1; /* stop, someone is using the inode */ 1190 } 1191 hammer_rel_inode(ip, 0); 1192 return(res); 1193 } 1194 1195 int 1196 hammer_unload_pseudofs(hammer_transaction_t trans, uint32_t localization) 1197 { 1198 int res; 1199 int try; 1200 1201 for (try = res = 0; try < 4; ++try) { 1202 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root, 1203 hammer_inode_pfs_cmp, 1204 hammer_unload_pseudofs_callback, 1205 &localization); 1206 if (res == 0 && try > 1) 1207 break; 1208 hammer_flusher_sync(trans->hmp); 1209 } 1210 if (res != 0) 1211 res = ENOTEMPTY; 1212 return(res); 1213 } 1214 1215 1216 /* 1217 * Release a reference on a PFS 1218 */ 1219 void 1220 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm) 1221 { 1222 hammer_rel(&pfsm->lock); 1223 if (hammer_norefs(&pfsm->lock)) { 1224 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm); 1225 kfree(pfsm, hmp->m_misc); 1226 } 1227 } 1228 1229 /* 1230 * Called by hammer_sync_inode(). 1231 */ 1232 static int 1233 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip) 1234 { 1235 hammer_transaction_t trans = cursor->trans; 1236 hammer_record_t record; 1237 int error; 1238 int redirty; 1239 1240 retry: 1241 error = 0; 1242 1243 /* 1244 * If the inode has a presence on-disk then locate it and mark 1245 * it deleted, setting DELONDISK. 1246 * 1247 * The record may or may not be physically deleted, depending on 1248 * the retention policy. 1249 */ 1250 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) == 1251 HAMMER_INODE_ONDISK) { 1252 hammer_normalize_cursor(cursor); 1253 cursor->key_beg.localization = ip->obj_localization | 1254 HAMMER_LOCALIZE_INODE; 1255 cursor->key_beg.obj_id = ip->obj_id; 1256 cursor->key_beg.key = 0; 1257 cursor->key_beg.create_tid = 0; 1258 cursor->key_beg.delete_tid = 0; 1259 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE; 1260 cursor->key_beg.obj_type = 0; 1261 cursor->asof = ip->obj_asof; 1262 cursor->flags &= ~HAMMER_CURSOR_INITMASK; 1263 cursor->flags |= HAMMER_CURSOR_ASOF; 1264 cursor->flags |= HAMMER_CURSOR_BACKEND; 1265 1266 error = hammer_btree_lookup(cursor); 1267 if (hammer_debug_inode) 1268 hdkprintf("IPDEL %p %08x %d\n", ip, ip->flags, error); 1269 1270 if (error == 0) { 1271 error = hammer_ip_delete_record(cursor, ip, trans->tid); 1272 if (hammer_debug_inode) 1273 hdkprintf("error %d\n", error); 1274 if (error == 0) { 1275 ip->flags |= HAMMER_INODE_DELONDISK; 1276 } 1277 if (cursor->node) 1278 hammer_cache_node(&ip->cache[0], cursor->node); 1279 } 1280 if (error == EDEADLK) { 1281 hammer_done_cursor(cursor); 1282 error = hammer_init_cursor(trans, cursor, 1283 &ip->cache[0], ip); 1284 if (hammer_debug_inode) 1285 hdkprintf("IPDED %p %d\n", ip, error); 1286 if (error == 0) 1287 goto retry; 1288 } 1289 } 1290 1291 /* 1292 * Ok, write out the initial record or a new record (after deleting 1293 * the old one), unless the DELETED flag is set. This routine will 1294 * clear DELONDISK if it writes out a record. 1295 * 1296 * Update our inode statistics if this is the first application of 1297 * the inode on-disk. 1298 */ 1299 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) { 1300 /* 1301 * Generate a record and write it to the media. We clean-up 1302 * the state before releasing so we do not have to set-up 1303 * a flush_group. 1304 */ 1305 record = hammer_alloc_mem_record(ip, 0); 1306 record->type = HAMMER_MEM_RECORD_INODE; 1307 record->flush_state = HAMMER_FST_FLUSH; 1308 record->leaf = ip->sync_ino_leaf; 1309 record->leaf.base.create_tid = trans->tid; 1310 record->leaf.data_len = sizeof(ip->sync_ino_data); 1311 record->leaf.create_ts = trans->time32; 1312 record->data = (void *)&ip->sync_ino_data; 1313 record->flags |= HAMMER_RECF_INTERLOCK_BE; 1314 1315 /* 1316 * If this flag is set we cannot sync the new file size 1317 * because we haven't finished related truncations. The 1318 * inode will be flushed in another flush group to finish 1319 * the job. 1320 */ 1321 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) && 1322 ip->sync_ino_data.size != ip->ino_data.size) { 1323 redirty = 1; 1324 ip->sync_ino_data.size = ip->ino_data.size; 1325 } else { 1326 redirty = 0; 1327 } 1328 1329 for (;;) { 1330 error = hammer_ip_sync_record_cursor(cursor, record); 1331 if (hammer_debug_inode) 1332 hdkprintf("GENREC %p rec %08x %d\n", 1333 ip, record->flags, error); 1334 if (error != EDEADLK) 1335 break; 1336 hammer_done_cursor(cursor); 1337 error = hammer_init_cursor(trans, cursor, 1338 &ip->cache[0], ip); 1339 if (hammer_debug_inode) 1340 hdkprintf("GENREC reinit %d\n", error); 1341 if (error) 1342 break; 1343 } 1344 1345 /* 1346 * Note: The record was never on the inode's record tree 1347 * so just wave our hands importantly and destroy it. 1348 */ 1349 record->flags |= HAMMER_RECF_COMMITTED; 1350 record->flags &= ~HAMMER_RECF_INTERLOCK_BE; 1351 record->flush_state = HAMMER_FST_IDLE; 1352 ++ip->rec_generation; 1353 hammer_rel_mem_record(record); 1354 1355 /* 1356 * Finish up. 1357 */ 1358 if (error == 0) { 1359 if (hammer_debug_inode) 1360 hdkprintf("CLEANDELOND %p %08x\n", ip, ip->flags); 1361 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | 1362 HAMMER_INODE_SDIRTY | 1363 HAMMER_INODE_ATIME | 1364 HAMMER_INODE_MTIME); 1365 ip->flags &= ~HAMMER_INODE_DELONDISK; 1366 if (redirty) 1367 ip->sync_flags |= HAMMER_INODE_DDIRTY; 1368 1369 /* 1370 * Root volume count of inodes 1371 */ 1372 hammer_sync_lock_sh(trans); 1373 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) { 1374 hammer_modify_volume_field(trans, 1375 trans->rootvol, 1376 vol0_stat_inodes); 1377 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes; 1378 hammer_modify_volume_done(trans->rootvol); 1379 ip->flags |= HAMMER_INODE_ONDISK; 1380 if (hammer_debug_inode) 1381 hdkprintf("NOWONDISK %p\n", ip); 1382 } 1383 hammer_sync_unlock(trans); 1384 } 1385 } 1386 1387 /* 1388 * If the inode has been destroyed, clean out any left-over flags 1389 * that may have been set by the frontend. 1390 */ 1391 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 1392 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | 1393 HAMMER_INODE_SDIRTY | 1394 HAMMER_INODE_ATIME | 1395 HAMMER_INODE_MTIME); 1396 } 1397 return(error); 1398 } 1399 1400 /* 1401 * Update only the itimes fields. 1402 * 1403 * ATIME can be updated without generating any UNDO. MTIME is updated 1404 * with UNDO so it is guaranteed to be synchronized properly in case of 1405 * a crash. 1406 * 1407 * Neither field is included in the B-Tree leaf element's CRC, which is how 1408 * we can get away with updating ATIME the way we do. 1409 */ 1410 static int 1411 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip) 1412 { 1413 hammer_transaction_t trans = cursor->trans; 1414 int error; 1415 1416 retry: 1417 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) != 1418 HAMMER_INODE_ONDISK) { 1419 return(0); 1420 } 1421 1422 hammer_normalize_cursor(cursor); 1423 cursor->key_beg.localization = ip->obj_localization | 1424 HAMMER_LOCALIZE_INODE; 1425 cursor->key_beg.obj_id = ip->obj_id; 1426 cursor->key_beg.key = 0; 1427 cursor->key_beg.create_tid = 0; 1428 cursor->key_beg.delete_tid = 0; 1429 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE; 1430 cursor->key_beg.obj_type = 0; 1431 cursor->asof = ip->obj_asof; 1432 cursor->flags &= ~HAMMER_CURSOR_INITMASK; 1433 cursor->flags |= HAMMER_CURSOR_ASOF; 1434 cursor->flags |= HAMMER_CURSOR_GET_DATA; 1435 cursor->flags |= HAMMER_CURSOR_BACKEND; 1436 1437 error = hammer_btree_lookup(cursor); 1438 if (error == 0) { 1439 hammer_cache_node(&ip->cache[0], cursor->node); 1440 if (ip->sync_flags & HAMMER_INODE_MTIME) { 1441 /* 1442 * Updating MTIME requires an UNDO. Just cover 1443 * both atime and mtime. 1444 */ 1445 hammer_sync_lock_sh(trans); 1446 hammer_modify_buffer(trans, cursor->data_buffer, 1447 &cursor->data->inode.mtime, 1448 sizeof(cursor->data->inode.atime) + 1449 sizeof(cursor->data->inode.mtime)); 1450 cursor->data->inode.atime = ip->sync_ino_data.atime; 1451 cursor->data->inode.mtime = ip->sync_ino_data.mtime; 1452 hammer_modify_buffer_done(cursor->data_buffer); 1453 hammer_sync_unlock(trans); 1454 } else if (ip->sync_flags & HAMMER_INODE_ATIME) { 1455 /* 1456 * Updating atime only can be done in-place with 1457 * no UNDO. 1458 */ 1459 hammer_sync_lock_sh(trans); 1460 hammer_modify_buffer_noundo(trans, cursor->data_buffer); 1461 cursor->data->inode.atime = ip->sync_ino_data.atime; 1462 hammer_modify_buffer_done(cursor->data_buffer); 1463 hammer_sync_unlock(trans); 1464 } 1465 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME); 1466 } 1467 if (error == EDEADLK) { 1468 hammer_done_cursor(cursor); 1469 error = hammer_init_cursor(trans, cursor, &ip->cache[0], ip); 1470 if (error == 0) 1471 goto retry; 1472 } 1473 return(error); 1474 } 1475 1476 /* 1477 * Release a reference on an inode, flush as requested. 1478 * 1479 * On the last reference we queue the inode to the flusher for its final 1480 * disposition. 1481 */ 1482 void 1483 hammer_rel_inode(hammer_inode_t ip, int flush) 1484 { 1485 /* 1486 * Handle disposition when dropping the last ref. 1487 */ 1488 for (;;) { 1489 if (hammer_oneref(&ip->lock)) { 1490 /* 1491 * Determine whether on-disk action is needed for 1492 * the inode's final disposition. 1493 */ 1494 KKASSERT(ip->vp == NULL); 1495 hammer_inode_unloadable_check(ip, 0); 1496 if (ip->flags & HAMMER_INODE_MODMASK) { 1497 hammer_flush_inode(ip, 0); 1498 } else if (hammer_oneref(&ip->lock)) { 1499 hammer_unload_inode(ip); 1500 break; 1501 } 1502 } else { 1503 if (flush) 1504 hammer_flush_inode(ip, 0); 1505 1506 /* 1507 * The inode still has multiple refs, try to drop 1508 * one ref. 1509 */ 1510 KKASSERT(hammer_isactive(&ip->lock) >= 1); 1511 if (hammer_isactive(&ip->lock) > 1) { 1512 hammer_rel(&ip->lock); 1513 break; 1514 } 1515 } 1516 } 1517 } 1518 1519 /* 1520 * Unload and destroy the specified inode. Must be called with one remaining 1521 * reference. The reference is disposed of. 1522 * 1523 * The inode must be completely clean. 1524 */ 1525 static int 1526 hammer_unload_inode(hammer_inode_t ip) 1527 { 1528 hammer_mount_t hmp = ip->hmp; 1529 1530 KASSERT(hammer_oneref(&ip->lock), 1531 ("hammer_unload_inode: %d refs", hammer_isactive(&ip->lock))); 1532 KKASSERT(ip->vp == NULL); 1533 KKASSERT(ip->flush_state == HAMMER_FST_IDLE); 1534 KKASSERT(ip->cursor_ip_refs == 0); 1535 KKASSERT(hammer_notlocked(&ip->lock)); 1536 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0); 1537 1538 KKASSERT(RB_EMPTY(&ip->rec_tree)); 1539 KKASSERT(TAILQ_EMPTY(&ip->target_list)); 1540 1541 if (ip->flags & HAMMER_INODE_RDIRTY) { 1542 RB_REMOVE(hammer_redo_rb_tree, &hmp->rb_redo_root, ip); 1543 ip->flags &= ~HAMMER_INODE_RDIRTY; 1544 } 1545 RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip); 1546 1547 hammer_free_inode(ip); 1548 return(0); 1549 } 1550 1551 /* 1552 * Called during unmounting if a critical error occured. The in-memory 1553 * inode and all related structures are destroyed. 1554 * 1555 * If a critical error did not occur the unmount code calls the standard 1556 * release and asserts that the inode is gone. 1557 */ 1558 int 1559 hammer_destroy_inode_callback(hammer_inode_t ip, void *data __unused) 1560 { 1561 hammer_record_t rec; 1562 1563 /* 1564 * Get rid of the inodes in-memory records, regardless of their 1565 * state, and clear the mod-mask. 1566 */ 1567 while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) { 1568 TAILQ_REMOVE(&ip->target_list, rec, target_entry); 1569 rec->target_ip = NULL; 1570 if (rec->flush_state == HAMMER_FST_SETUP) 1571 rec->flush_state = HAMMER_FST_IDLE; 1572 } 1573 while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) { 1574 if (rec->flush_state == HAMMER_FST_FLUSH) 1575 --rec->flush_group->refs; 1576 else 1577 hammer_ref(&rec->lock); 1578 KKASSERT(hammer_oneref(&rec->lock)); 1579 rec->flush_state = HAMMER_FST_IDLE; 1580 rec->flush_group = NULL; 1581 rec->flags |= HAMMER_RECF_DELETED_FE; /* wave hands */ 1582 rec->flags |= HAMMER_RECF_DELETED_BE; /* wave hands */ 1583 ++ip->rec_generation; 1584 hammer_rel_mem_record(rec); 1585 } 1586 ip->flags &= ~HAMMER_INODE_MODMASK; 1587 ip->sync_flags &= ~HAMMER_INODE_MODMASK; 1588 KKASSERT(ip->vp == NULL); 1589 1590 /* 1591 * Remove the inode from any flush group, force it idle. FLUSH 1592 * and SETUP states have an inode ref. 1593 */ 1594 switch(ip->flush_state) { 1595 case HAMMER_FST_FLUSH: 1596 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip); 1597 --ip->flush_group->refs; 1598 ip->flush_group = NULL; 1599 /* fall through */ 1600 case HAMMER_FST_SETUP: 1601 hammer_rel(&ip->lock); 1602 ip->flush_state = HAMMER_FST_IDLE; 1603 /* fall through */ 1604 case HAMMER_FST_IDLE: 1605 break; 1606 } 1607 1608 /* 1609 * There shouldn't be any associated vnode. The unload needs at 1610 * least one ref, if we do have a vp steal its ip ref. 1611 */ 1612 if (ip->vp) { 1613 hdkprintf("Unexpected vnode association ip %p vp %p\n", 1614 ip, ip->vp); 1615 ip->vp->v_data = NULL; 1616 ip->vp = NULL; 1617 } else { 1618 hammer_ref(&ip->lock); 1619 } 1620 hammer_unload_inode(ip); 1621 return(0); 1622 } 1623 1624 /* 1625 * Called on mount -u when switching from RW to RO or vise-versa. Adjust 1626 * the read-only flag for cached inodes. 1627 * 1628 * This routine is called from a RB_SCAN(). 1629 */ 1630 int 1631 hammer_reload_inode(hammer_inode_t ip, void *arg __unused) 1632 { 1633 hammer_mount_t hmp = ip->hmp; 1634 1635 if (hmp->ronly || hmp->asof != HAMMER_MAX_TID) 1636 ip->flags |= HAMMER_INODE_RO; 1637 else 1638 ip->flags &= ~HAMMER_INODE_RO; 1639 return(0); 1640 } 1641 1642 /* 1643 * A transaction has modified an inode, requiring updates as specified by 1644 * the passed flags. 1645 * 1646 * HAMMER_INODE_DDIRTY: Inode data has been updated, not incl mtime/atime, 1647 * and not including size changes due to write-append 1648 * (but other size changes are included). 1649 * HAMMER_INODE_SDIRTY: Inode data has been updated, size changes due to 1650 * write-append. 1651 * HAMMER_INODE_XDIRTY: Dirty in-memory records 1652 * HAMMER_INODE_BUFS: Dirty buffer cache buffers 1653 * HAMMER_INODE_DELETED: Inode record/data must be deleted 1654 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated 1655 */ 1656 void 1657 hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags) 1658 { 1659 /* 1660 * ronly of 0 or 2 does not trigger assertion. 1661 * 2 is a special error state 1662 */ 1663 KKASSERT(ip->hmp->ronly != 1 || 1664 (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 1665 HAMMER_INODE_SDIRTY | 1666 HAMMER_INODE_BUFS | HAMMER_INODE_DELETED | 1667 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0); 1668 if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) { 1669 ip->flags |= HAMMER_INODE_RSV_INODES; 1670 ++ip->hmp->rsv_inodes; 1671 } 1672 1673 /* 1674 * Set the NEWINODE flag in the transaction if the inode 1675 * transitions to a dirty state. This is used to track 1676 * the load on the inode cache. 1677 */ 1678 if (trans && 1679 (ip->flags & HAMMER_INODE_MODMASK) == 0 && 1680 (flags & HAMMER_INODE_MODMASK)) { 1681 trans->flags |= HAMMER_TRANSF_NEWINODE; 1682 } 1683 if (flags & HAMMER_INODE_MODMASK) 1684 hammer_inode_dirty(ip); 1685 ip->flags |= flags; 1686 } 1687 1688 /* 1689 * Attempt to quickly update the atime for a hammer inode. Return 0 on 1690 * success, -1 on failure. 1691 * 1692 * We attempt to update the atime with only the ip lock and not the 1693 * whole filesystem lock in order to improve concurrency. We can only 1694 * do this safely if the ATIME flag is already pending on the inode. 1695 * 1696 * This function is called via a vnops path (ip pointer is stable) without 1697 * fs_token held. 1698 */ 1699 int 1700 hammer_update_atime_quick(hammer_inode_t ip) 1701 { 1702 struct timeval tv; 1703 int res = -1; 1704 1705 if ((ip->flags & HAMMER_INODE_RO) || 1706 (ip->hmp->mp->mnt_flag & MNT_NOATIME)) { 1707 /* 1708 * Silently indicate success on read-only mount/snap 1709 */ 1710 res = 0; 1711 } else if (ip->flags & HAMMER_INODE_ATIME) { 1712 /* 1713 * Double check with inode lock held against backend. This 1714 * is only safe if all we need to do is update 1715 * ino_data.atime. 1716 */ 1717 getmicrotime(&tv); 1718 hammer_lock_ex(&ip->lock); 1719 if (ip->flags & HAMMER_INODE_ATIME) { 1720 ip->ino_data.atime = 1721 (unsigned long)tv.tv_sec * 1000000ULL + tv.tv_usec; 1722 res = 0; 1723 } 1724 hammer_unlock(&ip->lock); 1725 } 1726 return res; 1727 } 1728 1729 /* 1730 * Request that an inode be flushed. This whole mess cannot block and may 1731 * recurse (if not synchronous). Once requested HAMMER will attempt to 1732 * actively flush the inode until the flush can be done. 1733 * 1734 * The inode may already be flushing, or may be in a setup state. We can 1735 * place the inode in a flushing state if it is currently idle and flag it 1736 * to reflush if it is currently flushing. 1737 * 1738 * Upon return if the inode could not be flushed due to a setup 1739 * dependancy, then it will be automatically flushed when the dependancy 1740 * is satisfied. 1741 */ 1742 void 1743 hammer_flush_inode(hammer_inode_t ip, int flags) 1744 { 1745 hammer_mount_t hmp; 1746 hammer_flush_group_t flg; 1747 int good; 1748 1749 /* 1750 * fill_flush_group is the first flush group we may be able to 1751 * continue filling, it may be open or closed but it will always 1752 * be past the currently flushing (running) flg. 1753 * 1754 * next_flush_group is the next open flush group. 1755 */ 1756 hmp = ip->hmp; 1757 while ((flg = hmp->fill_flush_group) != NULL) { 1758 KKASSERT(flg->running == 0); 1759 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit && 1760 flg->total_count <= hammer_autoflush) { 1761 break; 1762 } 1763 hmp->fill_flush_group = TAILQ_NEXT(flg, flush_entry); 1764 hammer_flusher_async(ip->hmp, flg); 1765 } 1766 if (flg == NULL) { 1767 flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO); 1768 flg->seq = hmp->flusher.next++; 1769 if (hmp->next_flush_group == NULL) 1770 hmp->next_flush_group = flg; 1771 if (hmp->fill_flush_group == NULL) 1772 hmp->fill_flush_group = flg; 1773 RB_INIT(&flg->flush_tree); 1774 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry); 1775 } 1776 1777 /* 1778 * Trivial 'nothing to flush' case. If the inode is in a SETUP 1779 * state we have to put it back into an IDLE state so we can 1780 * drop the extra ref. 1781 * 1782 * If we have a parent dependancy we must still fall through 1783 * so we can run it. 1784 */ 1785 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) { 1786 if (ip->flush_state == HAMMER_FST_SETUP && 1787 TAILQ_EMPTY(&ip->target_list)) { 1788 ip->flush_state = HAMMER_FST_IDLE; 1789 hammer_rel_inode(ip, 0); 1790 } 1791 if (ip->flush_state == HAMMER_FST_IDLE) 1792 return; 1793 } 1794 1795 /* 1796 * Our flush action will depend on the current state. 1797 */ 1798 switch(ip->flush_state) { 1799 case HAMMER_FST_IDLE: 1800 /* 1801 * We have no dependancies and can flush immediately. Some 1802 * our children may not be flushable so we have to re-test 1803 * with that additional knowledge. 1804 */ 1805 hammer_flush_inode_core(ip, flg, flags); 1806 break; 1807 case HAMMER_FST_SETUP: 1808 /* 1809 * Recurse upwards through dependancies via target_list 1810 * and start their flusher actions going if possible. 1811 * 1812 * 'good' is our connectivity. -1 means we have none and 1813 * can't flush, 0 means there weren't any dependancies, and 1814 * 1 means we have good connectivity. 1815 */ 1816 good = hammer_setup_parent_inodes(ip, 0, flg); 1817 1818 if (good >= 0) { 1819 /* 1820 * We can continue if good >= 0. Determine how 1821 * many records under our inode can be flushed (and 1822 * mark them). 1823 */ 1824 hammer_flush_inode_core(ip, flg, flags); 1825 } else { 1826 /* 1827 * Parent has no connectivity, tell it to flush 1828 * us as soon as it does. 1829 * 1830 * The REFLUSH flag is also needed to trigger 1831 * dependancy wakeups. 1832 */ 1833 ip->flags |= HAMMER_INODE_CONN_DOWN | 1834 HAMMER_INODE_REFLUSH; 1835 if (flags & HAMMER_FLUSH_SIGNAL) { 1836 ip->flags |= HAMMER_INODE_RESIGNAL; 1837 hammer_flusher_async(ip->hmp, flg); 1838 } 1839 } 1840 break; 1841 case HAMMER_FST_FLUSH: 1842 /* 1843 * We are already flushing, flag the inode to reflush 1844 * if needed after it completes its current flush. 1845 * 1846 * The REFLUSH flag is also needed to trigger 1847 * dependancy wakeups. 1848 */ 1849 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0) 1850 ip->flags |= HAMMER_INODE_REFLUSH; 1851 if (flags & HAMMER_FLUSH_SIGNAL) { 1852 ip->flags |= HAMMER_INODE_RESIGNAL; 1853 hammer_flusher_async(ip->hmp, flg); 1854 } 1855 break; 1856 } 1857 } 1858 1859 /* 1860 * Scan ip->target_list, which is a list of records owned by PARENTS to our 1861 * ip which reference our ip. 1862 * 1863 * XXX This is a huge mess of recursive code, but not one bit of it blocks 1864 * so for now do not ref/deref the structures. Note that if we use the 1865 * ref/rel code later, the rel CAN block. 1866 */ 1867 static int 1868 hammer_setup_parent_inodes(hammer_inode_t ip, int depth, 1869 hammer_flush_group_t flg) 1870 { 1871 hammer_record_t depend; 1872 int good; 1873 int r; 1874 1875 /* 1876 * If we hit our recursion limit and we have parent dependencies 1877 * We cannot continue. Returning < 0 will cause us to be flagged 1878 * for reflush. Returning -2 cuts off additional dependency checks 1879 * because they are likely to also hit the depth limit. 1880 * 1881 * We cannot return < 0 if there are no dependencies or there might 1882 * not be anything to wakeup (ip). 1883 */ 1884 if (depth == 20 && TAILQ_FIRST(&ip->target_list)) { 1885 if (hammer_debug_general & 0x10000) 1886 hkrateprintf(&hammer_gen_krate, 1887 "Warning: depth limit reached on " 1888 "setup recursion, inode %p %016jx\n", 1889 ip, (intmax_t)ip->obj_id); 1890 return(-2); 1891 } 1892 1893 /* 1894 * Scan dependencies 1895 */ 1896 good = 0; 1897 TAILQ_FOREACH(depend, &ip->target_list, target_entry) { 1898 r = hammer_setup_parent_inodes_helper(depend, depth, flg); 1899 KKASSERT(depend->target_ip == ip); 1900 if (r < 0 && good == 0) 1901 good = -1; 1902 if (r > 0) 1903 good = 1; 1904 1905 /* 1906 * If we failed due to the recursion depth limit then stop 1907 * now. 1908 */ 1909 if (r == -2) 1910 break; 1911 } 1912 return(good); 1913 } 1914 1915 /* 1916 * This helper function takes a record representing the dependancy between 1917 * the parent inode and child inode. 1918 * 1919 * record = record in question (*rec in below) 1920 * record->ip = parent inode (*pip in below) 1921 * record->target_ip = child inode (*ip in below) 1922 * 1923 * *pip--------------\ 1924 * ^ \rec_tree 1925 * \ \ 1926 * \ip /\\\\\ rbtree of recs from parent inode's view 1927 * \ //\\\\\\ 1928 * \ / ........ 1929 * \ / 1930 * \------*rec------target_ip------>*ip 1931 * ...target_entry<----...----->target_list<---... 1932 * list of recs from inode's view 1933 * 1934 * We are asked to recurse upwards and convert the record from SETUP 1935 * to FLUSH if possible. 1936 * 1937 * Return 1 if the record gives us connectivity 1938 * 1939 * Return 0 if the record is not relevant 1940 * 1941 * Return -1 if we can't resolve the dependancy and there is no connectivity. 1942 */ 1943 static int 1944 hammer_setup_parent_inodes_helper(hammer_record_t record, int depth, 1945 hammer_flush_group_t flg) 1946 { 1947 hammer_inode_t pip; 1948 int good; 1949 1950 KKASSERT(record->flush_state != HAMMER_FST_IDLE); 1951 pip = record->ip; 1952 1953 /* 1954 * If the record is already flushing, is it in our flush group? 1955 * 1956 * If it is in our flush group but it is a general record or a 1957 * delete-on-disk, it does not improve our connectivity (return 0), 1958 * and if the target inode is not trying to destroy itself we can't 1959 * allow the operation yet anyway (the second return -1). 1960 */ 1961 if (record->flush_state == HAMMER_FST_FLUSH) { 1962 /* 1963 * If not in our flush group ask the parent to reflush 1964 * us as soon as possible. 1965 */ 1966 if (record->flush_group != flg) { 1967 pip->flags |= HAMMER_INODE_REFLUSH; 1968 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN; 1969 return(-1); 1970 } 1971 1972 /* 1973 * If in our flush group everything is already set up, 1974 * just return whether the record will improve our 1975 * visibility or not. 1976 */ 1977 if (record->type == HAMMER_MEM_RECORD_ADD) 1978 return(1); 1979 return(0); 1980 } 1981 1982 /* 1983 * It must be a setup record. Try to resolve the setup dependancies 1984 * by recursing upwards so we can place ip on the flush list. 1985 * 1986 * Limit ourselves to 20 levels of recursion to avoid blowing out 1987 * the kernel stack. If we hit the recursion limit we can't flush 1988 * until the parent flushes. The parent will flush independantly 1989 * on its own and ultimately a deep recursion will be resolved. 1990 */ 1991 KKASSERT(record->flush_state == HAMMER_FST_SETUP); 1992 1993 good = hammer_setup_parent_inodes(pip, depth + 1, flg); 1994 1995 /* 1996 * If good < 0 the parent has no connectivity and we cannot safely 1997 * flush the directory entry, which also means we can't flush our 1998 * ip. Flag us for downward recursion once the parent's 1999 * connectivity is resolved. Flag the parent for [re]flush or it 2000 * may not check for downward recursions. 2001 */ 2002 if (good < 0) { 2003 pip->flags |= HAMMER_INODE_REFLUSH; 2004 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN; 2005 return(good); 2006 } 2007 2008 /* 2009 * We are go, place the parent inode in a flushing state so we can 2010 * place its record in a flushing state. Note that the parent 2011 * may already be flushing. The record must be in the same flush 2012 * group as the parent. 2013 */ 2014 if (pip->flush_state != HAMMER_FST_FLUSH) 2015 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION); 2016 KKASSERT(pip->flush_state == HAMMER_FST_FLUSH); 2017 2018 /* 2019 * It is possible for a rename to create a loop in the recursion 2020 * and revisit a record. This will result in the record being 2021 * placed in a flush state unexpectedly. This check deals with 2022 * the case. 2023 */ 2024 if (record->flush_state == HAMMER_FST_FLUSH) { 2025 if (record->type == HAMMER_MEM_RECORD_ADD) 2026 return(1); 2027 return(0); 2028 } 2029 2030 KKASSERT(record->flush_state == HAMMER_FST_SETUP); 2031 2032 #if 0 2033 if (record->type == HAMMER_MEM_RECORD_DEL && 2034 (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) { 2035 /* 2036 * Regardless of flushing state we cannot sync this path if the 2037 * record represents a delete-on-disk but the target inode 2038 * is not ready to sync its own deletion. 2039 * 2040 * XXX need to count effective nlinks to determine whether 2041 * the flush is ok, otherwise removing a hardlink will 2042 * just leave the DEL record to rot. 2043 */ 2044 record->target_ip->flags |= HAMMER_INODE_REFLUSH; 2045 return(-1); 2046 } else 2047 #endif 2048 if (pip->flush_group == flg) { 2049 /* 2050 * Because we have not calculated nlinks yet we can just 2051 * set records to the flush state if the parent is in 2052 * the same flush group as we are. 2053 */ 2054 record->flush_state = HAMMER_FST_FLUSH; 2055 record->flush_group = flg; 2056 ++record->flush_group->refs; 2057 hammer_ref(&record->lock); 2058 2059 /* 2060 * A general directory-add contributes to our visibility. 2061 * 2062 * Otherwise it is probably a directory-delete or 2063 * delete-on-disk record and does not contribute to our 2064 * visbility (but we can still flush it). 2065 */ 2066 if (record->type == HAMMER_MEM_RECORD_ADD) 2067 return(1); 2068 return(0); 2069 } else { 2070 /* 2071 * If the parent is not in our flush group we cannot 2072 * flush this record yet, there is no visibility. 2073 * We tell the parent to reflush and mark ourselves 2074 * so the parent knows it should flush us too. 2075 */ 2076 pip->flags |= HAMMER_INODE_REFLUSH; 2077 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN; 2078 return(-1); 2079 } 2080 } 2081 2082 /* 2083 * This is the core routine placing an inode into the FST_FLUSH state. 2084 */ 2085 static void 2086 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags) 2087 { 2088 hammer_mount_t hmp = ip->hmp; 2089 int go_count; 2090 2091 /* 2092 * Set flush state and prevent the flusher from cycling into 2093 * the next flush group. Do not place the ip on the list yet. 2094 * Inodes not in the idle state get an extra reference. 2095 */ 2096 KKASSERT(ip->flush_state != HAMMER_FST_FLUSH); 2097 if (ip->flush_state == HAMMER_FST_IDLE) 2098 hammer_ref(&ip->lock); 2099 ip->flush_state = HAMMER_FST_FLUSH; 2100 ip->flush_group = flg; 2101 ++hmp->flusher.group_lock; 2102 ++hmp->count_iqueued; 2103 ++hammer_count_iqueued; 2104 ++flg->total_count; 2105 hammer_redo_fifo_start_flush(ip); 2106 2107 #if 0 2108 /* 2109 * We need to be able to vfsync/truncate from the backend. 2110 * 2111 * XXX Any truncation from the backend will acquire the vnode 2112 * independently. 2113 */ 2114 KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0); 2115 if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) { 2116 ip->flags |= HAMMER_INODE_VHELD; 2117 vref(ip->vp); 2118 } 2119 #endif 2120 2121 /* 2122 * Figure out how many in-memory records we can actually flush 2123 * (not including inode meta-data, buffers, etc). 2124 */ 2125 KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0); 2126 if (flags & HAMMER_FLUSH_RECURSION) { 2127 /* 2128 * If this is a upwards recursion we do not want to 2129 * recurse down again! 2130 */ 2131 go_count = 1; 2132 #if 0 2133 } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) { 2134 /* 2135 * No new records are added if we must complete a flush 2136 * from a previous cycle, but we do have to move the records 2137 * from the previous cycle to the current one. 2138 */ 2139 #if 0 2140 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL, 2141 hammer_syncgrp_child_callback, NULL); 2142 #endif 2143 go_count = 1; 2144 #endif 2145 } else { 2146 /* 2147 * Normal flush, scan records and bring them into the flush. 2148 * Directory adds and deletes are usually skipped (they are 2149 * grouped with the related inode rather then with the 2150 * directory). 2151 * 2152 * go_count can be negative, which means the scan aborted 2153 * due to the flush group being over-full and we should 2154 * flush what we have. 2155 */ 2156 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL, 2157 hammer_setup_child_callback, NULL); 2158 } 2159 2160 /* 2161 * This is a more involved test that includes go_count. If we 2162 * can't flush, flag the inode and return. If go_count is 0 we 2163 * were are unable to flush any records in our rec_tree and 2164 * must ignore the XDIRTY flag. 2165 */ 2166 if (go_count == 0) { 2167 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) { 2168 --hmp->count_iqueued; 2169 --hammer_count_iqueued; 2170 2171 --flg->total_count; 2172 ip->flush_state = HAMMER_FST_SETUP; 2173 ip->flush_group = NULL; 2174 if (flags & HAMMER_FLUSH_SIGNAL) { 2175 ip->flags |= HAMMER_INODE_REFLUSH | 2176 HAMMER_INODE_RESIGNAL; 2177 } else { 2178 ip->flags |= HAMMER_INODE_REFLUSH; 2179 } 2180 #if 0 2181 if (ip->flags & HAMMER_INODE_VHELD) { 2182 ip->flags &= ~HAMMER_INODE_VHELD; 2183 vrele(ip->vp); 2184 } 2185 #endif 2186 2187 /* 2188 * REFLUSH is needed to trigger dependancy wakeups 2189 * when an inode is in SETUP. 2190 */ 2191 ip->flags |= HAMMER_INODE_REFLUSH; 2192 if (--hmp->flusher.group_lock == 0) 2193 wakeup(&hmp->flusher.group_lock); 2194 return; 2195 } 2196 } 2197 2198 /* 2199 * Snapshot the state of the inode for the backend flusher. 2200 * 2201 * We continue to retain save_trunc_off even when all truncations 2202 * have been resolved as an optimization to determine if we can 2203 * skip the B-Tree lookup for overwrite deletions. 2204 * 2205 * NOTE: The DELETING flag is a mod flag, but it is also sticky, 2206 * and stays in ip->flags. Once set, it stays set until the 2207 * inode is destroyed. 2208 */ 2209 if (ip->flags & HAMMER_INODE_TRUNCATED) { 2210 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0); 2211 ip->sync_trunc_off = ip->trunc_off; 2212 ip->trunc_off = HAMMER_MAX_KEY; 2213 ip->flags &= ~HAMMER_INODE_TRUNCATED; 2214 ip->sync_flags |= HAMMER_INODE_TRUNCATED; 2215 2216 /* 2217 * The save_trunc_off used to cache whether the B-Tree 2218 * holds any records past that point is not used until 2219 * after the truncation has succeeded, so we can safely 2220 * set it now. 2221 */ 2222 if (ip->save_trunc_off > ip->sync_trunc_off) 2223 ip->save_trunc_off = ip->sync_trunc_off; 2224 } 2225 ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK & 2226 ~HAMMER_INODE_TRUNCATED); 2227 ip->sync_ino_leaf = ip->ino_leaf; 2228 ip->sync_ino_data = ip->ino_data; 2229 ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED; 2230 2231 /* 2232 * The flusher list inherits our inode and reference. 2233 */ 2234 KKASSERT(flg->running == 0); 2235 RB_INSERT(hammer_fls_rb_tree, &flg->flush_tree, ip); 2236 if (--hmp->flusher.group_lock == 0) 2237 wakeup(&hmp->flusher.group_lock); 2238 2239 /* 2240 * Auto-flush the group if it grows too large. Make sure the 2241 * inode reclaim wait pipeline continues to work. 2242 */ 2243 if (flg->total_count >= hammer_autoflush || 2244 flg->total_count >= hammer_limit_reclaims / 4) { 2245 if (hmp->fill_flush_group == flg) 2246 hmp->fill_flush_group = TAILQ_NEXT(flg, flush_entry); 2247 hammer_flusher_async(hmp, flg); 2248 } 2249 } 2250 2251 /* 2252 * Callback for scan of ip->rec_tree. Try to include each record in our 2253 * flush. ip->flush_group has been set but the inode has not yet been 2254 * moved into a flushing state. 2255 * 2256 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on 2257 * both inodes. 2258 * 2259 * We return 1 for any record placed or found in FST_FLUSH, which prevents 2260 * the caller from shortcutting the flush. 2261 */ 2262 static int 2263 hammer_setup_child_callback(hammer_record_t rec, void *data) 2264 { 2265 hammer_flush_group_t flg; 2266 hammer_inode_t target_ip; 2267 hammer_inode_t ip; 2268 int r; 2269 2270 /* 2271 * Records deleted or committed by the backend are ignored. 2272 * Note that the flush detects deleted frontend records at 2273 * multiple points to deal with races. This is just the first 2274 * line of defense. The only time HAMMER_RECF_DELETED_FE cannot 2275 * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it 2276 * messes up link-count calculations. 2277 * 2278 * NOTE: Don't get confused between record deletion and, say, 2279 * directory entry deletion. The deletion of a directory entry 2280 * which is on-media has nothing to do with the record deletion 2281 * flags. 2282 */ 2283 if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE | 2284 HAMMER_RECF_COMMITTED)) { 2285 if (rec->flush_state == HAMMER_FST_FLUSH) { 2286 KKASSERT(rec->flush_group == rec->ip->flush_group); 2287 r = 1; 2288 } else { 2289 r = 0; 2290 } 2291 return(r); 2292 } 2293 2294 /* 2295 * If the record is in an idle state it has no dependancies and 2296 * can be flushed. 2297 */ 2298 ip = rec->ip; 2299 flg = ip->flush_group; 2300 r = 0; 2301 2302 switch(rec->flush_state) { 2303 case HAMMER_FST_IDLE: 2304 /* 2305 * The record has no setup dependancy, we can flush it. 2306 */ 2307 KKASSERT(rec->target_ip == NULL); 2308 rec->flush_state = HAMMER_FST_FLUSH; 2309 rec->flush_group = flg; 2310 ++flg->refs; 2311 hammer_ref(&rec->lock); 2312 r = 1; 2313 break; 2314 case HAMMER_FST_SETUP: 2315 /* 2316 * The record has a setup dependancy. These are typically 2317 * directory entry adds and deletes. Such entries will be 2318 * flushed when their inodes are flushed so we do not 2319 * usually have to add them to the flush here. However, 2320 * if the target_ip has set HAMMER_INODE_CONN_DOWN then 2321 * it is asking us to flush this record (and it). 2322 */ 2323 target_ip = rec->target_ip; 2324 KKASSERT(target_ip != NULL); 2325 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE); 2326 2327 /* 2328 * If the target IP is already flushing in our group 2329 * we could associate the record, but target_ip has 2330 * already synced ino_data to sync_ino_data and we 2331 * would also have to adjust nlinks. Plus there are 2332 * ordering issues for adds and deletes. 2333 * 2334 * Reflush downward if this is an ADD, and upward if 2335 * this is a DEL. 2336 */ 2337 if (target_ip->flush_state == HAMMER_FST_FLUSH) { 2338 if (rec->type == HAMMER_MEM_RECORD_ADD) 2339 ip->flags |= HAMMER_INODE_REFLUSH; 2340 else 2341 target_ip->flags |= HAMMER_INODE_REFLUSH; 2342 break; 2343 } 2344 2345 /* 2346 * Target IP is not yet flushing. This can get complex 2347 * because we have to be careful about the recursion. 2348 * 2349 * Directories create an issue for us in that if a flush 2350 * of a directory is requested the expectation is to flush 2351 * any pending directory entries, but this will cause the 2352 * related inodes to recursively flush as well. We can't 2353 * really defer the operation so just get as many as we 2354 * can and 2355 */ 2356 #if 0 2357 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 && 2358 (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) { 2359 /* 2360 * We aren't reclaiming and the target ip was not 2361 * previously prevented from flushing due to this 2362 * record dependancy. Do not flush this record. 2363 */ 2364 /*r = 0;*/ 2365 } else 2366 #endif 2367 if (flg->total_count + flg->refs > 2368 ip->hmp->undo_rec_limit) { 2369 /* 2370 * Our flush group is over-full and we risk blowing 2371 * out the UNDO FIFO. Stop the scan, flush what we 2372 * have, then reflush the directory. 2373 * 2374 * The directory may be forced through multiple 2375 * flush groups before it can be completely 2376 * flushed. 2377 */ 2378 ip->flags |= HAMMER_INODE_RESIGNAL | 2379 HAMMER_INODE_REFLUSH; 2380 r = -1; 2381 } else if (rec->type == HAMMER_MEM_RECORD_ADD) { 2382 /* 2383 * If the target IP is not flushing we can force 2384 * it to flush, even if it is unable to write out 2385 * any of its own records we have at least one in 2386 * hand that we CAN deal with. 2387 */ 2388 rec->flush_state = HAMMER_FST_FLUSH; 2389 rec->flush_group = flg; 2390 ++flg->refs; 2391 hammer_ref(&rec->lock); 2392 hammer_flush_inode_core(target_ip, flg, 2393 HAMMER_FLUSH_RECURSION); 2394 r = 1; 2395 } else { 2396 /* 2397 * General or delete-on-disk record. 2398 * 2399 * XXX this needs help. If a delete-on-disk we could 2400 * disconnect the target. If the target has its own 2401 * dependancies they really need to be flushed. 2402 * 2403 * XXX 2404 */ 2405 rec->flush_state = HAMMER_FST_FLUSH; 2406 rec->flush_group = flg; 2407 ++flg->refs; 2408 hammer_ref(&rec->lock); 2409 hammer_flush_inode_core(target_ip, flg, 2410 HAMMER_FLUSH_RECURSION); 2411 r = 1; 2412 } 2413 break; 2414 case HAMMER_FST_FLUSH: 2415 /* 2416 * The record could be part of a previous flush group if the 2417 * inode is a directory (the record being a directory entry). 2418 * Once the flush group was closed a hammer_test_inode() 2419 * function can cause a new flush group to be setup, placing 2420 * the directory inode itself in a new flush group. 2421 * 2422 * When associated with a previous flush group we count it 2423 * as if it were in our current flush group, since it will 2424 * effectively be flushed by the time we flush our current 2425 * flush group. 2426 */ 2427 KKASSERT( 2428 rec->ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY || 2429 rec->flush_group == flg); 2430 r = 1; 2431 break; 2432 } 2433 return(r); 2434 } 2435 2436 #if 0 2437 /* 2438 * This version just moves records already in a flush state to the new 2439 * flush group and that is it. 2440 */ 2441 static int 2442 hammer_syncgrp_child_callback(hammer_record_t rec, void *data) 2443 { 2444 hammer_inode_t ip = rec->ip; 2445 2446 switch(rec->flush_state) { 2447 case HAMMER_FST_FLUSH: 2448 KKASSERT(rec->flush_group == ip->flush_group); 2449 break; 2450 default: 2451 break; 2452 } 2453 return(0); 2454 } 2455 #endif 2456 2457 /* 2458 * Wait for a previously queued flush to complete. 2459 * 2460 * If a critical error occured we don't try to wait. 2461 */ 2462 void 2463 hammer_wait_inode(hammer_inode_t ip) 2464 { 2465 /* 2466 * The inode can be in a SETUP state in which case RESIGNAL 2467 * should be set. If RESIGNAL is not set then the previous 2468 * flush completed and a later operation placed the inode 2469 * in a passive setup state again, so we're done. 2470 * 2471 * The inode can be in a FLUSH state in which case we 2472 * can just wait for completion. 2473 */ 2474 while (ip->flush_state == HAMMER_FST_FLUSH || 2475 (ip->flush_state == HAMMER_FST_SETUP && 2476 (ip->flags & HAMMER_INODE_RESIGNAL))) { 2477 /* 2478 * Don't try to flush on a critical error 2479 */ 2480 if (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) 2481 break; 2482 2483 /* 2484 * If the inode was already being flushed its flg 2485 * may not have been queued to the backend. We 2486 * have to make sure it gets queued or we can wind 2487 * up blocked or deadlocked (particularly if we are 2488 * the vnlru thread). 2489 */ 2490 if (ip->flush_state == HAMMER_FST_FLUSH) { 2491 KKASSERT(ip->flush_group); 2492 if (ip->flush_group->closed == 0) { 2493 if (hammer_debug_inode) { 2494 hkprintf("debug: forcing " 2495 "async flush ip %016jx\n", 2496 (intmax_t)ip->obj_id); 2497 } 2498 hammer_flusher_async(ip->hmp, ip->flush_group); 2499 continue; /* retest */ 2500 } 2501 } 2502 2503 /* 2504 * In a flush state with the flg queued to the backend 2505 * or in a setup state with RESIGNAL set, we can safely 2506 * wait. 2507 */ 2508 ip->flags |= HAMMER_INODE_FLUSHW; 2509 tsleep(&ip->flags, 0, "hmrwin", 0); 2510 } 2511 2512 #if 0 2513 /* 2514 * The inode may have been in a passive setup state, 2515 * call flush to make sure we get signaled. 2516 */ 2517 if (ip->flush_state == HAMMER_FST_SETUP) 2518 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 2519 #endif 2520 2521 } 2522 2523 /* 2524 * Called by the backend code when a flush has been completed. 2525 * The inode has already been removed from the flush list. 2526 * 2527 * A pipelined flush can occur, in which case we must re-enter the 2528 * inode on the list and re-copy its fields. 2529 */ 2530 void 2531 hammer_sync_inode_done(hammer_inode_t ip, int error) 2532 { 2533 hammer_mount_t hmp; 2534 int dorel; 2535 2536 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH); 2537 2538 hmp = ip->hmp; 2539 2540 /* 2541 * Auto-reflush if the backend could not completely flush 2542 * the inode. This fixes a case where a deferred buffer flush 2543 * could cause fsync to return early. 2544 */ 2545 if (ip->sync_flags & HAMMER_INODE_MODMASK) 2546 ip->flags |= HAMMER_INODE_REFLUSH; 2547 2548 /* 2549 * Merge left-over flags back into the frontend and fix the state. 2550 * Incomplete truncations are retained by the backend. 2551 */ 2552 ip->error = error; 2553 ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED; 2554 ip->sync_flags &= HAMMER_INODE_TRUNCATED; 2555 2556 /* 2557 * The backend may have adjusted nlinks, so if the adjusted nlinks 2558 * does not match the fronttend set the frontend's DDIRTY flag again. 2559 */ 2560 if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks) 2561 ip->flags |= HAMMER_INODE_DDIRTY; 2562 2563 /* 2564 * Fix up the dirty buffer status. 2565 */ 2566 if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) { 2567 ip->flags |= HAMMER_INODE_BUFS; 2568 } 2569 hammer_redo_fifo_end_flush(ip); 2570 2571 /* 2572 * Re-set the XDIRTY flag if some of the inode's in-memory records 2573 * could not be flushed. 2574 */ 2575 KKASSERT((RB_EMPTY(&ip->rec_tree) && 2576 (ip->flags & HAMMER_INODE_XDIRTY) == 0) || 2577 (!RB_EMPTY(&ip->rec_tree) && 2578 (ip->flags & HAMMER_INODE_XDIRTY) != 0)); 2579 2580 /* 2581 * Do not lose track of inodes which no longer have vnode 2582 * assocations, otherwise they may never get flushed again. 2583 * 2584 * The reflush flag can be set superfluously, causing extra pain 2585 * for no reason. If the inode is no longer modified it no longer 2586 * needs to be flushed. 2587 */ 2588 if (ip->flags & HAMMER_INODE_MODMASK) { 2589 if (ip->vp == NULL) 2590 ip->flags |= HAMMER_INODE_REFLUSH; 2591 } else { 2592 ip->flags &= ~HAMMER_INODE_REFLUSH; 2593 } 2594 2595 /* 2596 * The fs token is held but the inode lock is not held. Because this 2597 * is a backend flush it is possible that the vnode has no references 2598 * and cause a reclaim race inside vsetisdirty() if/when it blocks. 2599 * 2600 * Therefore, we must lock the inode around this particular dirtying 2601 * operation. We don't have to around other dirtying operations 2602 * where the vnode is implicitly or explicitly held. 2603 */ 2604 if (ip->flags & HAMMER_INODE_MODMASK) { 2605 hammer_lock_ex(&ip->lock); 2606 hammer_inode_dirty(ip); 2607 hammer_unlock(&ip->lock); 2608 } 2609 2610 /* 2611 * Adjust the flush state. 2612 */ 2613 if (ip->flags & HAMMER_INODE_WOULDBLOCK) { 2614 /* 2615 * We were unable to flush out all our records, leave the 2616 * inode in a flush state and in the current flush group. 2617 * The flush group will be re-run. 2618 * 2619 * This occurs if the UNDO block gets too full or there is 2620 * too much dirty meta-data and allows the flusher to 2621 * finalize the UNDO block and then re-flush. 2622 */ 2623 ip->flags &= ~HAMMER_INODE_WOULDBLOCK; 2624 dorel = 0; 2625 } else { 2626 /* 2627 * Remove from the flush_group 2628 */ 2629 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip); 2630 ip->flush_group = NULL; 2631 2632 #if 0 2633 /* 2634 * Clean up the vnode ref and tracking counts. 2635 */ 2636 if (ip->flags & HAMMER_INODE_VHELD) { 2637 ip->flags &= ~HAMMER_INODE_VHELD; 2638 vrele(ip->vp); 2639 } 2640 #endif 2641 --hmp->count_iqueued; 2642 --hammer_count_iqueued; 2643 2644 /* 2645 * And adjust the state. 2646 */ 2647 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) { 2648 ip->flush_state = HAMMER_FST_IDLE; 2649 dorel = 1; 2650 } else { 2651 ip->flush_state = HAMMER_FST_SETUP; 2652 dorel = 0; 2653 } 2654 2655 /* 2656 * If the frontend is waiting for a flush to complete, 2657 * wake it up. 2658 */ 2659 if (ip->flags & HAMMER_INODE_FLUSHW) { 2660 ip->flags &= ~HAMMER_INODE_FLUSHW; 2661 wakeup(&ip->flags); 2662 } 2663 2664 /* 2665 * If the frontend made more changes and requested another 2666 * flush, then try to get it running. 2667 * 2668 * Reflushes are aborted when the inode is errored out. 2669 */ 2670 if (ip->flags & HAMMER_INODE_REFLUSH) { 2671 ip->flags &= ~HAMMER_INODE_REFLUSH; 2672 if (ip->flags & HAMMER_INODE_RESIGNAL) { 2673 ip->flags &= ~HAMMER_INODE_RESIGNAL; 2674 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 2675 } else { 2676 hammer_flush_inode(ip, 0); 2677 } 2678 } 2679 } 2680 2681 /* 2682 * If we have no parent dependancies we can clear CONN_DOWN 2683 */ 2684 if (TAILQ_EMPTY(&ip->target_list)) 2685 ip->flags &= ~HAMMER_INODE_CONN_DOWN; 2686 2687 /* 2688 * If the inode is now clean drop the space reservation. 2689 */ 2690 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 && 2691 (ip->flags & HAMMER_INODE_RSV_INODES)) { 2692 ip->flags &= ~HAMMER_INODE_RSV_INODES; 2693 --hmp->rsv_inodes; 2694 } 2695 2696 ip->flags &= ~HAMMER_INODE_SLAVEFLUSH; 2697 2698 if (dorel) 2699 hammer_rel_inode(ip, 0); 2700 } 2701 2702 /* 2703 * Called from hammer_sync_inode() to synchronize in-memory records 2704 * to the media. 2705 */ 2706 static int 2707 hammer_sync_record_callback(hammer_record_t record, void *data) 2708 { 2709 hammer_cursor_t cursor = data; 2710 hammer_transaction_t trans = cursor->trans; 2711 hammer_mount_t hmp = trans->hmp; 2712 int error; 2713 2714 /* 2715 * Skip records that do not belong to the current flush. 2716 */ 2717 ++hammer_stats_record_iterations; 2718 if (record->flush_state != HAMMER_FST_FLUSH) 2719 return(0); 2720 2721 if (record->flush_group != record->ip->flush_group) { 2722 hdkprintf("rec %p ip %p bad flush group %p %p\n", 2723 record, 2724 record->ip, 2725 record->flush_group, 2726 record->ip->flush_group); 2727 if (hammer_debug_critical) 2728 Debugger("blah2"); 2729 return(0); 2730 } 2731 KKASSERT(record->flush_group == record->ip->flush_group); 2732 2733 /* 2734 * Interlock the record using the BE flag. Once BE is set the 2735 * frontend cannot change the state of FE. 2736 * 2737 * NOTE: If FE is set prior to us setting BE we still sync the 2738 * record out, but the flush completion code converts it to 2739 * a delete-on-disk record instead of destroying it. 2740 */ 2741 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0); 2742 record->flags |= HAMMER_RECF_INTERLOCK_BE; 2743 2744 /* 2745 * The backend has already disposed of the record. 2746 */ 2747 if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) { 2748 error = 0; 2749 goto done; 2750 } 2751 2752 /* 2753 * If the whole inode is being deleted and all on-disk records will 2754 * be deleted very soon, we can't sync any new records to disk 2755 * because they will be deleted in the same transaction they were 2756 * created in (delete_tid == create_tid), which will assert. 2757 * 2758 * XXX There may be a case with RECORD_ADD with DELETED_FE set 2759 * that we currently panic on. 2760 */ 2761 if (record->ip->sync_flags & HAMMER_INODE_DELETING) { 2762 switch(record->type) { 2763 case HAMMER_MEM_RECORD_DATA: 2764 /* 2765 * We don't have to do anything, if the record was 2766 * committed the space will have been accounted for 2767 * in the blockmap. 2768 */ 2769 /* fall through */ 2770 case HAMMER_MEM_RECORD_GENERAL: 2771 /* 2772 * Set deleted-by-backend flag. Do not set the 2773 * backend committed flag, because we are throwing 2774 * the record away. 2775 */ 2776 record->flags |= HAMMER_RECF_DELETED_BE; 2777 ++record->ip->rec_generation; 2778 error = 0; 2779 goto done; 2780 case HAMMER_MEM_RECORD_ADD: 2781 hpanic("illegal add during inode deletion record %p", 2782 record); 2783 break; /* NOT REACHED */ 2784 case HAMMER_MEM_RECORD_INODE: 2785 hpanic("attempt to sync inode record %p?", record); 2786 break; /* NOT REACHED */ 2787 case HAMMER_MEM_RECORD_DEL: 2788 /* 2789 * Follow through and issue the on-disk deletion 2790 */ 2791 break; 2792 } 2793 } 2794 2795 /* 2796 * If DELETED_FE is set special handling is needed for directory 2797 * entries. Dependant pieces related to the directory entry may 2798 * have already been synced to disk. If this occurs we have to 2799 * sync the directory entry and then change the in-memory record 2800 * from an ADD to a DELETE to cover the fact that it's been 2801 * deleted by the frontend. 2802 * 2803 * A directory delete covering record (MEM_RECORD_DEL) can never 2804 * be deleted by the frontend. 2805 * 2806 * Any other record type (aka DATA) can be deleted by the frontend. 2807 * XXX At the moment the flusher must skip it because there may 2808 * be another data record in the flush group for the same block, 2809 * meaning that some frontend data changes can leak into the backend's 2810 * synchronization point. 2811 */ 2812 if (record->flags & HAMMER_RECF_DELETED_FE) { 2813 if (record->type == HAMMER_MEM_RECORD_ADD) { 2814 /* 2815 * Convert a front-end deleted directory-add to 2816 * a directory-delete entry later. 2817 */ 2818 record->flags |= HAMMER_RECF_CONVERT_DELETE; 2819 } else { 2820 /* 2821 * Dispose of the record (race case). Mark as 2822 * deleted by backend (and not committed). 2823 */ 2824 KKASSERT(record->type != HAMMER_MEM_RECORD_DEL); 2825 record->flags |= HAMMER_RECF_DELETED_BE; 2826 ++record->ip->rec_generation; 2827 error = 0; 2828 goto done; 2829 } 2830 } 2831 2832 /* 2833 * Assign the create_tid for new records. Deletions already 2834 * have the record's entire key properly set up. 2835 */ 2836 if (record->type != HAMMER_MEM_RECORD_DEL) { 2837 record->leaf.base.create_tid = trans->tid; 2838 record->leaf.create_ts = trans->time32; 2839 } 2840 2841 /* 2842 * This actually moves the record to the on-media B-Tree. We 2843 * must also generate REDO_TERM entries in the UNDO/REDO FIFO 2844 * indicating that the related REDO_WRITE(s) have been committed. 2845 * 2846 * During recovery any REDO_TERM's within the nominal recovery span 2847 * are ignored since the related meta-data is being undone, causing 2848 * any matching REDO_WRITEs to execute. The REDO_TERMs outside 2849 * the nominal recovery span will match against REDO_WRITEs and 2850 * prevent them from being executed (because the meta-data has 2851 * already been synchronized). 2852 */ 2853 if (record->flags & HAMMER_RECF_REDO) { 2854 KKASSERT(record->type == HAMMER_MEM_RECORD_DATA); 2855 hammer_generate_redo(trans, record->ip, 2856 record->leaf.base.key - 2857 record->leaf.data_len, 2858 HAMMER_REDO_TERM_WRITE, 2859 NULL, 2860 record->leaf.data_len); 2861 } 2862 2863 for (;;) { 2864 error = hammer_ip_sync_record_cursor(cursor, record); 2865 if (error != EDEADLK) 2866 break; 2867 hammer_done_cursor(cursor); 2868 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0], 2869 record->ip); 2870 if (error) 2871 break; 2872 } 2873 record->flags &= ~HAMMER_RECF_CONVERT_DELETE; 2874 2875 if (error) 2876 error = -error; 2877 done: 2878 hammer_flush_record_done(record, error); 2879 2880 /* 2881 * Do partial finalization if we have built up too many dirty 2882 * buffers. Otherwise a buffer cache deadlock can occur when 2883 * doing things like creating tens of thousands of tiny files. 2884 * 2885 * We must release our cursor lock to avoid a 3-way deadlock 2886 * due to the exclusive sync lock the finalizer must get. 2887 * 2888 * WARNING: See warnings in hammer_unlock_cursor() function. 2889 */ 2890 if (hammer_flusher_meta_limit(hmp) || 2891 vm_page_count_severe()) { 2892 hammer_unlock_cursor(cursor); 2893 hammer_flusher_finalize(trans, 0); 2894 hammer_lock_cursor(cursor); 2895 } 2896 return(error); 2897 } 2898 2899 /* 2900 * Backend function called by the flusher to sync an inode to media. 2901 */ 2902 int 2903 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip) 2904 { 2905 struct hammer_cursor cursor; 2906 hammer_node_t tmp_node; 2907 hammer_record_t depend; 2908 hammer_record_t next; 2909 int error, tmp_error; 2910 uint64_t nlinks; 2911 2912 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0) 2913 return(0); 2914 2915 error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip); 2916 if (error) 2917 goto done; 2918 2919 /* 2920 * Any directory records referencing this inode which are not in 2921 * our current flush group must adjust our nlink count for the 2922 * purposes of synchronizating to disk. 2923 * 2924 * Records which are in our flush group can be unlinked from our 2925 * inode now, potentially allowing the inode to be physically 2926 * deleted. 2927 * 2928 * This cannot block. 2929 */ 2930 nlinks = ip->ino_data.nlinks; 2931 next = TAILQ_FIRST(&ip->target_list); 2932 while ((depend = next) != NULL) { 2933 next = TAILQ_NEXT(depend, target_entry); 2934 if (depend->flush_state == HAMMER_FST_FLUSH && 2935 depend->flush_group == ip->flush_group) { 2936 /* 2937 * If this is an ADD that was deleted by the frontend 2938 * the frontend nlinks count will have already been 2939 * decremented, but the backend is going to sync its 2940 * directory entry and must account for it. The 2941 * record will be converted to a delete-on-disk when 2942 * it gets synced. 2943 * 2944 * If the ADD was not deleted by the frontend we 2945 * can remove the dependancy from our target_list. 2946 */ 2947 if (depend->flags & HAMMER_RECF_DELETED_FE) { 2948 ++nlinks; 2949 } else { 2950 TAILQ_REMOVE(&ip->target_list, depend, 2951 target_entry); 2952 depend->target_ip = NULL; 2953 } 2954 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) { 2955 /* 2956 * Not part of our flush group and not deleted by 2957 * the front-end, adjust the link count synced to 2958 * the media (undo what the frontend did when it 2959 * queued the record). 2960 */ 2961 KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0); 2962 switch(depend->type) { 2963 case HAMMER_MEM_RECORD_ADD: 2964 --nlinks; 2965 break; 2966 case HAMMER_MEM_RECORD_DEL: 2967 ++nlinks; 2968 break; 2969 default: 2970 break; 2971 } 2972 } 2973 } 2974 2975 /* 2976 * Set dirty if we had to modify the link count. 2977 */ 2978 if (ip->sync_ino_data.nlinks != nlinks) { 2979 KKASSERT((int64_t)nlinks >= 0); 2980 ip->sync_ino_data.nlinks = nlinks; 2981 ip->sync_flags |= HAMMER_INODE_DDIRTY; 2982 } 2983 2984 /* 2985 * If there is a trunction queued destroy any data past the (aligned) 2986 * truncation point. Userland will have dealt with the buffer 2987 * containing the truncation point for us. 2988 * 2989 * We don't flush pending frontend data buffers until after we've 2990 * dealt with the truncation. 2991 */ 2992 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) { 2993 /* 2994 * Interlock trunc_off. The VOP front-end may continue to 2995 * make adjustments to it while we are blocked. 2996 */ 2997 off_t trunc_off; 2998 off_t aligned_trunc_off; 2999 int blkmask; 3000 3001 trunc_off = ip->sync_trunc_off; 3002 blkmask = hammer_blocksize(trunc_off) - 1; 3003 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask; 3004 3005 /* 3006 * Delete any whole blocks on-media. The front-end has 3007 * already cleaned out any partial block and made it 3008 * pending. The front-end may have updated trunc_off 3009 * while we were blocked so we only use sync_trunc_off. 3010 * 3011 * This operation can blow out the buffer cache, EWOULDBLOCK 3012 * means we were unable to complete the deletion. The 3013 * deletion will update sync_trunc_off in that case. 3014 */ 3015 error = hammer_ip_delete_range(&cursor, ip, 3016 aligned_trunc_off, 3017 HAMMER_MAX_KEY, 2); 3018 if (error == EWOULDBLOCK) { 3019 ip->flags |= HAMMER_INODE_WOULDBLOCK; 3020 error = 0; 3021 goto defer_buffer_flush; 3022 } 3023 3024 if (error) 3025 goto done; 3026 3027 /* 3028 * Generate a REDO_TERM_TRUNC entry in the UNDO/REDO FIFO. 3029 * 3030 * XXX we do this even if we did not previously generate 3031 * a REDO_TRUNC record. This operation may enclosed the 3032 * range for multiple prior truncation entries in the REDO 3033 * log. 3034 */ 3035 if (trans->hmp->version >= HAMMER_VOL_VERSION_FOUR && 3036 (ip->flags & HAMMER_INODE_RDIRTY)) { 3037 hammer_generate_redo(trans, ip, aligned_trunc_off, 3038 HAMMER_REDO_TERM_TRUNC, 3039 NULL, 0); 3040 } 3041 3042 /* 3043 * Clear the truncation flag on the backend after we have 3044 * completed the deletions. Backend data is now good again 3045 * (including new records we are about to sync, below). 3046 * 3047 * Leave sync_trunc_off intact. As we write additional 3048 * records the backend will update sync_trunc_off. This 3049 * tells the backend whether it can skip the overwrite 3050 * test. This should work properly even when the backend 3051 * writes full blocks where the truncation point straddles 3052 * the block because the comparison is against the base 3053 * offset of the record. 3054 */ 3055 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED; 3056 /* ip->sync_trunc_off = HAMMER_MAX_KEY; */ 3057 } else { 3058 error = 0; 3059 } 3060 3061 /* 3062 * Now sync related records. These will typically be directory 3063 * entries, records tracking direct-writes, or delete-on-disk records. 3064 */ 3065 if (error == 0) { 3066 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL, 3067 hammer_sync_record_callback, &cursor); 3068 if (tmp_error < 0) 3069 tmp_error = -error; 3070 if (tmp_error) 3071 error = tmp_error; 3072 } 3073 hammer_cache_node(&ip->cache[1], cursor.node); 3074 3075 /* 3076 * Re-seek for inode update, assuming our cache hasn't been ripped 3077 * out from under us. 3078 */ 3079 if (error == 0) { 3080 tmp_node = hammer_ref_node_safe(trans, &ip->cache[0], &error); 3081 if (tmp_node) { 3082 hammer_cursor_downgrade(&cursor); 3083 hammer_lock_sh(&tmp_node->lock); 3084 if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0) 3085 hammer_cursor_seek(&cursor, tmp_node, 0); 3086 hammer_unlock(&tmp_node->lock); 3087 hammer_rel_node(tmp_node); 3088 } 3089 error = 0; 3090 } 3091 3092 /* 3093 * If we are deleting the inode the frontend had better not have 3094 * any active references on elements making up the inode. 3095 * 3096 * The call to hammer_ip_delete_clean() cleans up auxillary records 3097 * but not DB or DATA records. Those must have already been deleted 3098 * by the normal truncation mechanic. 3099 */ 3100 if (error == 0 && ip->sync_ino_data.nlinks == 0 && 3101 RB_EMPTY(&ip->rec_tree) && 3102 (ip->sync_flags & HAMMER_INODE_DELETING) && 3103 (ip->flags & HAMMER_INODE_DELETED) == 0) { 3104 int count1 = 0; 3105 3106 error = hammer_ip_delete_clean(&cursor, ip, &count1); 3107 if (error == 0) { 3108 ip->flags |= HAMMER_INODE_DELETED; 3109 ip->sync_flags &= ~HAMMER_INODE_DELETING; 3110 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED; 3111 KKASSERT(RB_EMPTY(&ip->rec_tree)); 3112 3113 /* 3114 * Set delete_tid in both the frontend and backend 3115 * copy of the inode record. The DELETED flag handles 3116 * this, do not set DDIRTY. 3117 */ 3118 ip->ino_leaf.base.delete_tid = trans->tid; 3119 ip->sync_ino_leaf.base.delete_tid = trans->tid; 3120 ip->ino_leaf.delete_ts = trans->time32; 3121 ip->sync_ino_leaf.delete_ts = trans->time32; 3122 3123 3124 /* 3125 * Adjust the inode count in the volume header 3126 */ 3127 hammer_sync_lock_sh(trans); 3128 if (ip->flags & HAMMER_INODE_ONDISK) { 3129 hammer_modify_volume_field(trans, 3130 trans->rootvol, 3131 vol0_stat_inodes); 3132 --ip->hmp->rootvol->ondisk->vol0_stat_inodes; 3133 hammer_modify_volume_done(trans->rootvol); 3134 } 3135 hammer_sync_unlock(trans); 3136 } 3137 } 3138 3139 if (error) 3140 goto done; 3141 ip->sync_flags &= ~HAMMER_INODE_BUFS; 3142 3143 defer_buffer_flush: 3144 /* 3145 * Now update the inode's on-disk inode-data and/or on-disk record. 3146 * DELETED and ONDISK are managed only in ip->flags. 3147 * 3148 * In the case of a defered buffer flush we still update the on-disk 3149 * inode to satisfy visibility requirements if there happen to be 3150 * directory dependancies. 3151 */ 3152 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) { 3153 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK: 3154 /* 3155 * If deleted and on-disk, don't set any additional flags. 3156 * the delete flag takes care of things. 3157 * 3158 * Clear flags which may have been set by the frontend. 3159 */ 3160 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 3161 HAMMER_INODE_SDIRTY | 3162 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME | 3163 HAMMER_INODE_DELETING); 3164 break; 3165 case HAMMER_INODE_DELETED: 3166 /* 3167 * Take care of the case where a deleted inode was never 3168 * flushed to the disk in the first place. 3169 * 3170 * Clear flags which may have been set by the frontend. 3171 */ 3172 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 3173 HAMMER_INODE_SDIRTY | 3174 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME | 3175 HAMMER_INODE_DELETING); 3176 while (RB_ROOT(&ip->rec_tree)) { 3177 hammer_record_t record = RB_ROOT(&ip->rec_tree); 3178 hammer_ref(&record->lock); 3179 KKASSERT(hammer_oneref(&record->lock)); 3180 record->flags |= HAMMER_RECF_DELETED_BE; 3181 ++record->ip->rec_generation; 3182 hammer_rel_mem_record(record); 3183 } 3184 break; 3185 case HAMMER_INODE_ONDISK: 3186 /* 3187 * If already on-disk, do not set any additional flags. 3188 */ 3189 break; 3190 default: 3191 /* 3192 * If not on-disk and not deleted, set DDIRTY to force 3193 * an initial record to be written. 3194 * 3195 * Also set the create_tid in both the frontend and backend 3196 * copy of the inode record. 3197 */ 3198 ip->ino_leaf.base.create_tid = trans->tid; 3199 ip->ino_leaf.create_ts = trans->time32; 3200 ip->sync_ino_leaf.base.create_tid = trans->tid; 3201 ip->sync_ino_leaf.create_ts = trans->time32; 3202 ip->sync_flags |= HAMMER_INODE_DDIRTY; 3203 break; 3204 } 3205 3206 /* 3207 * If DDIRTY or SDIRTY is set, write out a new record. 3208 * If the inode is already on-disk the old record is marked as 3209 * deleted. 3210 * 3211 * If DELETED is set hammer_update_inode() will delete the existing 3212 * record without writing out a new one. 3213 */ 3214 if (ip->flags & HAMMER_INODE_DELETED) { 3215 error = hammer_update_inode(&cursor, ip); 3216 } else 3217 if (!(ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY)) && 3218 (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) { 3219 error = hammer_update_itimes(&cursor, ip); 3220 } else 3221 if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY | 3222 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) { 3223 error = hammer_update_inode(&cursor, ip); 3224 } 3225 done: 3226 if (ip->flags & HAMMER_INODE_MODMASK) 3227 hammer_inode_dirty(ip); 3228 if (error) { 3229 hammer_critical_error(ip->hmp, ip, error, 3230 "while syncing inode"); 3231 } 3232 hammer_done_cursor(&cursor); 3233 return(error); 3234 } 3235 3236 /* 3237 * This routine is called when the OS is no longer actively referencing 3238 * the inode (but might still be keeping it cached), or when releasing 3239 * the last reference to an inode. 3240 * 3241 * At this point if the inode's nlinks count is zero we want to destroy 3242 * it, which may mean destroying it on-media too. 3243 */ 3244 void 3245 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp) 3246 { 3247 struct vnode *vp; 3248 3249 /* 3250 * Set the DELETING flag when the link count drops to 0 and the 3251 * OS no longer has any opens on the inode. 3252 * 3253 * The backend will clear DELETING (a mod flag) and set DELETED 3254 * (a state flag) when it is actually able to perform the 3255 * operation. 3256 * 3257 * Don't reflag the deletion if the flusher is currently syncing 3258 * one that was already flagged. A previously set DELETING flag 3259 * may bounce around flags and sync_flags until the operation is 3260 * completely done. 3261 * 3262 * Do not attempt to modify a snapshot inode (one set to read-only). 3263 */ 3264 if (ip->ino_data.nlinks == 0 && 3265 ((ip->flags | ip->sync_flags) & (HAMMER_INODE_RO|HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) { 3266 ip->flags |= HAMMER_INODE_DELETING; 3267 ip->flags |= HAMMER_INODE_TRUNCATED; 3268 ip->trunc_off = 0; 3269 vp = NULL; 3270 if (getvp) { 3271 if (hammer_get_vnode(ip, &vp) != 0) 3272 return; 3273 } 3274 3275 /* 3276 * Final cleanup 3277 */ 3278 if (ip->vp) 3279 nvtruncbuf(ip->vp, 0, HAMMER_BUFSIZE, 0, 0); 3280 if (ip->flags & HAMMER_INODE_MODMASK) 3281 hammer_inode_dirty(ip); 3282 if (getvp) 3283 vput(vp); 3284 } 3285 } 3286 3287 /* 3288 * After potentially resolving a dependancy the inode is tested 3289 * to determine whether it needs to be reflushed. 3290 */ 3291 void 3292 hammer_test_inode(hammer_inode_t ip) 3293 { 3294 if (ip->flags & HAMMER_INODE_REFLUSH) { 3295 ip->flags &= ~HAMMER_INODE_REFLUSH; 3296 hammer_ref(&ip->lock); 3297 if (ip->flags & HAMMER_INODE_RESIGNAL) { 3298 ip->flags &= ~HAMMER_INODE_RESIGNAL; 3299 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 3300 } else { 3301 hammer_flush_inode(ip, 0); 3302 } 3303 hammer_rel_inode(ip, 0); 3304 } 3305 } 3306 3307 /* 3308 * Clear the RECLAIM flag on an inode. This occurs when the inode is 3309 * reassociated with a vp or just before it gets freed. 3310 * 3311 * Pipeline wakeups to threads blocked due to an excessive number of 3312 * detached inodes. This typically occurs when atime updates accumulate 3313 * while scanning a directory tree. 3314 */ 3315 static void 3316 hammer_inode_wakereclaims(hammer_inode_t ip) 3317 { 3318 struct hammer_reclaim *reclaim; 3319 hammer_mount_t hmp = ip->hmp; 3320 3321 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) 3322 return; 3323 3324 --hammer_count_reclaims; 3325 --hmp->count_reclaims; 3326 ip->flags &= ~HAMMER_INODE_RECLAIM; 3327 3328 if ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) { 3329 KKASSERT(reclaim->count > 0); 3330 if (--reclaim->count == 0) { 3331 TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry); 3332 wakeup(reclaim); 3333 } 3334 } 3335 } 3336 3337 /* 3338 * Setup our reclaim pipeline. We only let so many detached (and dirty) 3339 * inodes build up before we start blocking. This routine is called 3340 * if a new inode is created or an inode is loaded from media. 3341 * 3342 * When we block we don't care *which* inode has finished reclaiming, 3343 * as long as one does. 3344 * 3345 * The reclaim pipeline is primarily governed by the auto-flush which is 3346 * 1/4 hammer_limit_reclaims. We don't want to block if the count is 3347 * less than 1/2 hammer_limit_reclaims. From 1/2 to full count is 3348 * dynamically governed. 3349 */ 3350 void 3351 hammer_inode_waitreclaims(hammer_transaction_t trans) 3352 { 3353 hammer_mount_t hmp = trans->hmp; 3354 struct hammer_reclaim reclaim; 3355 int lower_limit; 3356 3357 /* 3358 * Track inode load, delay if the number of reclaiming inodes is 3359 * between 2/4 and 4/4 hammer_limit_reclaims, depending. 3360 */ 3361 if (curthread->td_proc) { 3362 struct hammer_inostats *stats; 3363 3364 stats = hammer_inode_inostats(hmp, curthread->td_proc->p_pid); 3365 ++stats->count; 3366 3367 if (stats->count > hammer_limit_reclaims / 2) 3368 stats->count = hammer_limit_reclaims / 2; 3369 lower_limit = hammer_limit_reclaims - stats->count; 3370 if (hammer_debug_general & 0x10000) { 3371 hdkprintf("pid %5d limit %d\n", 3372 (int)curthread->td_proc->p_pid, lower_limit); 3373 } 3374 } else { 3375 lower_limit = hammer_limit_reclaims * 3 / 4; 3376 } 3377 if (hmp->count_reclaims >= lower_limit) { 3378 reclaim.count = 1; 3379 TAILQ_INSERT_TAIL(&hmp->reclaim_list, &reclaim, entry); 3380 tsleep(&reclaim, 0, "hmrrcm", hz); 3381 if (reclaim.count > 0) 3382 TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry); 3383 } 3384 } 3385 3386 /* 3387 * Keep track of reclaim statistics on a per-pid basis using a loose 3388 * 4-way set associative hash table. Collisions inherit the count of 3389 * the previous entry. 3390 * 3391 * NOTE: We want to be careful here to limit the chain size. If the chain 3392 * size is too large a pid will spread its stats out over too many 3393 * entries under certain types of heavy filesystem activity and 3394 * wind up not delaying long enough. 3395 */ 3396 static 3397 struct hammer_inostats * 3398 hammer_inode_inostats(hammer_mount_t hmp, pid_t pid) 3399 { 3400 struct hammer_inostats *stats; 3401 int delta; 3402 int chain; 3403 static volatile int iterator; /* we don't care about MP races */ 3404 3405 /* 3406 * Chain up to 4 times to find our entry. 3407 */ 3408 for (chain = 0; chain < 4; ++chain) { 3409 stats = &hmp->inostats[(pid + chain) & HAMMER_INOSTATS_HMASK]; 3410 if (stats->pid == pid) 3411 break; 3412 } 3413 3414 /* 3415 * Replace one of the four chaining entries with our new entry. 3416 */ 3417 if (chain == 4) { 3418 stats = &hmp->inostats[(pid + (iterator++ & 3)) & 3419 HAMMER_INOSTATS_HMASK]; 3420 stats->pid = pid; 3421 } 3422 3423 /* 3424 * Decay the entry 3425 */ 3426 if (stats->count && stats->ltick != ticks) { 3427 delta = ticks - stats->ltick; 3428 stats->ltick = ticks; 3429 if (delta <= 0 || delta > hz * 60) 3430 stats->count = 0; 3431 else 3432 stats->count = stats->count * hz / (hz + delta); 3433 } 3434 if (hammer_debug_general & 0x10000) 3435 hdkprintf("pid %5d stats %d\n", (int)pid, stats->count); 3436 return (stats); 3437 } 3438 3439 #if 0 3440 3441 /* 3442 * XXX not used, doesn't work very well due to the large batching nature 3443 * of flushes. 3444 * 3445 * A larger then normal backlog of inodes is sitting in the flusher, 3446 * enforce a general slowdown to let it catch up. This routine is only 3447 * called on completion of a non-flusher-related transaction which 3448 * performed B-Tree node I/O. 3449 * 3450 * It is possible for the flusher to stall in a continuous load. 3451 * blogbench -i1000 -o seems to do a good job generating this sort of load. 3452 * If the flusher is unable to catch up the inode count can bloat until 3453 * we run out of kvm. 3454 * 3455 * This is a bit of a hack. 3456 */ 3457 void 3458 hammer_inode_waithard(hammer_mount_t hmp) 3459 { 3460 /* 3461 * Hysteresis. 3462 */ 3463 if (hmp->flags & HAMMER_MOUNT_FLUSH_RECOVERY) { 3464 if (hmp->count_reclaims < hammer_limit_reclaims / 2 && 3465 hmp->count_iqueued < hmp->count_inodes / 20) { 3466 hmp->flags &= ~HAMMER_MOUNT_FLUSH_RECOVERY; 3467 return; 3468 } 3469 } else { 3470 if (hmp->count_reclaims < hammer_limit_reclaims || 3471 hmp->count_iqueued < hmp->count_inodes / 10) { 3472 return; 3473 } 3474 hmp->flags |= HAMMER_MOUNT_FLUSH_RECOVERY; 3475 } 3476 3477 /* 3478 * Block for one flush cycle. 3479 */ 3480 hammer_flusher_wait_next(hmp); 3481 } 3482 3483 #endif 3484