1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include "hammer.h" 36 #include <vm/vm_extern.h> 37 38 static int hammer_unload_inode(struct hammer_inode *ip); 39 static void hammer_free_inode(hammer_inode_t ip); 40 static void hammer_flush_inode_core(hammer_inode_t ip, 41 hammer_flush_group_t flg, int flags); 42 static int hammer_setup_child_callback(hammer_record_t rec, void *data); 43 #if 0 44 static int hammer_syncgrp_child_callback(hammer_record_t rec, void *data); 45 #endif 46 static int hammer_setup_parent_inodes(hammer_inode_t ip, int depth, 47 hammer_flush_group_t flg); 48 static int hammer_setup_parent_inodes_helper(hammer_record_t record, 49 int depth, hammer_flush_group_t flg); 50 static void hammer_inode_wakereclaims(hammer_inode_t ip); 51 static struct hammer_inostats *hammer_inode_inostats(hammer_mount_t hmp, 52 pid_t pid); 53 54 #ifdef DEBUG_TRUNCATE 55 extern struct hammer_inode *HammerTruncIp; 56 #endif 57 58 struct krate hammer_gen_krate = { 1 }; 59 60 /* 61 * RB-Tree support for inode structures 62 */ 63 int 64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2) 65 { 66 if (ip1->obj_localization < ip2->obj_localization) 67 return(-1); 68 if (ip1->obj_localization > ip2->obj_localization) 69 return(1); 70 if (ip1->obj_id < ip2->obj_id) 71 return(-1); 72 if (ip1->obj_id > ip2->obj_id) 73 return(1); 74 if (ip1->obj_asof < ip2->obj_asof) 75 return(-1); 76 if (ip1->obj_asof > ip2->obj_asof) 77 return(1); 78 return(0); 79 } 80 81 int 82 hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2) 83 { 84 if (ip1->redo_fifo_start < ip2->redo_fifo_start) 85 return(-1); 86 if (ip1->redo_fifo_start > ip2->redo_fifo_start) 87 return(1); 88 return(0); 89 } 90 91 /* 92 * RB-Tree support for inode structures / special LOOKUP_INFO 93 */ 94 static int 95 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip) 96 { 97 if (info->obj_localization < ip->obj_localization) 98 return(-1); 99 if (info->obj_localization > ip->obj_localization) 100 return(1); 101 if (info->obj_id < ip->obj_id) 102 return(-1); 103 if (info->obj_id > ip->obj_id) 104 return(1); 105 if (info->obj_asof < ip->obj_asof) 106 return(-1); 107 if (info->obj_asof > ip->obj_asof) 108 return(1); 109 return(0); 110 } 111 112 /* 113 * Used by hammer_scan_inode_snapshots() to locate all of an object's 114 * snapshots. Note that the asof field is not tested, which we can get 115 * away with because it is the lowest-priority field. 116 */ 117 static int 118 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data) 119 { 120 hammer_inode_info_t info = data; 121 122 if (ip->obj_localization > info->obj_localization) 123 return(1); 124 if (ip->obj_localization < info->obj_localization) 125 return(-1); 126 if (ip->obj_id > info->obj_id) 127 return(1); 128 if (ip->obj_id < info->obj_id) 129 return(-1); 130 return(0); 131 } 132 133 /* 134 * Used by hammer_unload_pseudofs() to locate all inodes associated with 135 * a particular PFS. 136 */ 137 static int 138 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data) 139 { 140 u_int32_t localization = *(u_int32_t *)data; 141 if (ip->obj_localization > localization) 142 return(1); 143 if (ip->obj_localization < localization) 144 return(-1); 145 return(0); 146 } 147 148 /* 149 * RB-Tree support for pseudofs structures 150 */ 151 static int 152 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2) 153 { 154 if (p1->localization < p2->localization) 155 return(-1); 156 if (p1->localization > p2->localization) 157 return(1); 158 return(0); 159 } 160 161 162 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare); 163 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node, 164 hammer_inode_info_cmp, hammer_inode_info_t); 165 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node, 166 hammer_pfs_rb_compare, u_int32_t, localization); 167 168 /* 169 * The kernel is not actively referencing this vnode but is still holding 170 * it cached. 171 * 172 * This is called from the frontend. 173 * 174 * MPALMOSTSAFE 175 */ 176 int 177 hammer_vop_inactive(struct vop_inactive_args *ap) 178 { 179 struct hammer_inode *ip = VTOI(ap->a_vp); 180 hammer_mount_t hmp; 181 182 /* 183 * Degenerate case 184 */ 185 if (ip == NULL) { 186 vrecycle(ap->a_vp); 187 return(0); 188 } 189 190 /* 191 * If the inode no longer has visibility in the filesystem try to 192 * recycle it immediately, even if the inode is dirty. Recycling 193 * it quickly allows the system to reclaim buffer cache and VM 194 * resources which can matter a lot in a heavily loaded system. 195 * 196 * This can deadlock in vfsync() if we aren't careful. 197 * 198 * Do not queue the inode to the flusher if we still have visibility, 199 * otherwise namespace calls such as chmod will unnecessarily generate 200 * multiple inode updates. 201 */ 202 if (ip->ino_data.nlinks == 0) { 203 hmp = ip->hmp; 204 lwkt_gettoken(&hmp->fs_token); 205 hammer_inode_unloadable_check(ip, 0); 206 if (ip->flags & HAMMER_INODE_MODMASK) 207 hammer_flush_inode(ip, 0); 208 lwkt_reltoken(&hmp->fs_token); 209 vrecycle(ap->a_vp); 210 } 211 return(0); 212 } 213 214 /* 215 * Release the vnode association. This is typically (but not always) 216 * the last reference on the inode. 217 * 218 * Once the association is lost we are on our own with regards to 219 * flushing the inode. 220 * 221 * We must interlock ip->vp so hammer_get_vnode() can avoid races. 222 */ 223 int 224 hammer_vop_reclaim(struct vop_reclaim_args *ap) 225 { 226 struct hammer_inode *ip; 227 hammer_mount_t hmp; 228 struct vnode *vp; 229 230 vp = ap->a_vp; 231 232 if ((ip = vp->v_data) != NULL) { 233 hmp = ip->hmp; 234 lwkt_gettoken(&hmp->fs_token); 235 hammer_lock_ex(&ip->lock); 236 vp->v_data = NULL; 237 ip->vp = NULL; 238 239 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) { 240 ++hammer_count_reclaims; 241 ++hmp->count_reclaims; 242 ip->flags |= HAMMER_INODE_RECLAIM; 243 } 244 hammer_unlock(&ip->lock); 245 vclrisdirty(vp); 246 hammer_rel_inode(ip, 1); 247 lwkt_reltoken(&hmp->fs_token); 248 } 249 return(0); 250 } 251 252 /* 253 * Inform the kernel that the inode is dirty. This will be checked 254 * by vn_unlock(). 255 */ 256 void 257 hammer_inode_dirty(struct hammer_inode *ip) 258 { 259 struct vnode *vp; 260 261 if ((ip->flags & HAMMER_INODE_MODMASK) && 262 (vp = ip->vp) != NULL) { 263 vsetisdirty(vp); 264 } 265 } 266 267 /* 268 * Return a locked vnode for the specified inode. The inode must be 269 * referenced but NOT LOCKED on entry and will remain referenced on 270 * return. 271 * 272 * Called from the frontend. 273 */ 274 int 275 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp) 276 { 277 hammer_mount_t hmp; 278 struct vnode *vp; 279 int error = 0; 280 u_int8_t obj_type; 281 282 hmp = ip->hmp; 283 284 for (;;) { 285 if ((vp = ip->vp) == NULL) { 286 error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0); 287 if (error) 288 break; 289 hammer_lock_ex(&ip->lock); 290 if (ip->vp != NULL) { 291 hammer_unlock(&ip->lock); 292 vp = *vpp; 293 vp->v_type = VBAD; 294 vx_put(vp); 295 continue; 296 } 297 hammer_ref(&ip->lock); 298 vp = *vpp; 299 ip->vp = vp; 300 301 obj_type = ip->ino_data.obj_type; 302 vp->v_type = hammer_get_vnode_type(obj_type); 303 304 hammer_inode_wakereclaims(ip); 305 306 switch(ip->ino_data.obj_type) { 307 case HAMMER_OBJTYPE_CDEV: 308 case HAMMER_OBJTYPE_BDEV: 309 vp->v_ops = &hmp->mp->mnt_vn_spec_ops; 310 addaliasu(vp, ip->ino_data.rmajor, 311 ip->ino_data.rminor); 312 break; 313 case HAMMER_OBJTYPE_FIFO: 314 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops; 315 break; 316 case HAMMER_OBJTYPE_REGFILE: 317 break; 318 default: 319 break; 320 } 321 322 /* 323 * Only mark as the root vnode if the ip is not 324 * historical, otherwise the VFS cache will get 325 * confused. The other half of the special handling 326 * is in hammer_vop_nlookupdotdot(). 327 * 328 * Pseudo-filesystem roots can be accessed via 329 * non-root filesystem paths and setting VROOT may 330 * confuse the namecache. Set VPFSROOT instead. 331 */ 332 if (ip->obj_id == HAMMER_OBJID_ROOT && 333 ip->obj_asof == hmp->asof) { 334 if (ip->obj_localization == 0) 335 vsetflags(vp, VROOT); 336 else 337 vsetflags(vp, VPFSROOT); 338 } 339 340 vp->v_data = (void *)ip; 341 /* vnode locked by getnewvnode() */ 342 /* make related vnode dirty if inode dirty? */ 343 hammer_unlock(&ip->lock); 344 if (vp->v_type == VREG) { 345 vinitvmio(vp, ip->ino_data.size, 346 hammer_blocksize(ip->ino_data.size), 347 hammer_blockoff(ip->ino_data.size)); 348 } 349 break; 350 } 351 352 /* 353 * Interlock vnode clearing. This does not prevent the 354 * vnode from going into a reclaimed state but it does 355 * prevent it from being destroyed or reused so the vget() 356 * will properly fail. 357 */ 358 hammer_lock_ex(&ip->lock); 359 if ((vp = ip->vp) == NULL) { 360 hammer_unlock(&ip->lock); 361 continue; 362 } 363 vhold(vp); 364 hammer_unlock(&ip->lock); 365 366 /* 367 * loop if the vget fails (aka races), or if the vp 368 * no longer matches ip->vp. 369 */ 370 if (vget(vp, LK_EXCLUSIVE) == 0) { 371 if (vp == ip->vp) { 372 vdrop(vp); 373 break; 374 } 375 vput(vp); 376 } 377 vdrop(vp); 378 } 379 *vpp = vp; 380 return(error); 381 } 382 383 /* 384 * Locate all copies of the inode for obj_id compatible with the specified 385 * asof, reference, and issue the related call-back. This routine is used 386 * for direct-io invalidation and does not create any new inodes. 387 */ 388 void 389 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo, 390 int (*callback)(hammer_inode_t ip, void *data), 391 void *data) 392 { 393 hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root, 394 hammer_inode_info_cmp_all_history, 395 callback, iinfo); 396 } 397 398 /* 399 * Acquire a HAMMER inode. The returned inode is not locked. These functions 400 * do not attach or detach the related vnode (use hammer_get_vnode() for 401 * that). 402 * 403 * The flags argument is only applied for newly created inodes, and only 404 * certain flags are inherited. 405 * 406 * Called from the frontend. 407 */ 408 struct hammer_inode * 409 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip, 410 int64_t obj_id, hammer_tid_t asof, u_int32_t localization, 411 int flags, int *errorp) 412 { 413 hammer_mount_t hmp = trans->hmp; 414 struct hammer_node_cache *cachep; 415 struct hammer_inode_info iinfo; 416 struct hammer_cursor cursor; 417 struct hammer_inode *ip; 418 419 420 /* 421 * Determine if we already have an inode cached. If we do then 422 * we are golden. 423 * 424 * If we find an inode with no vnode we have to mark the 425 * transaction such that hammer_inode_waitreclaims() is 426 * called later on to avoid building up an infinite number 427 * of inodes. Otherwise we can continue to * add new inodes 428 * faster then they can be disposed of, even with the tsleep 429 * delay. 430 * 431 * If we find a dummy inode we return a failure so dounlink 432 * (which does another lookup) doesn't try to mess with the 433 * link count. hammer_vop_nresolve() uses hammer_get_dummy_inode() 434 * to ref dummy inodes. 435 */ 436 iinfo.obj_id = obj_id; 437 iinfo.obj_asof = asof; 438 iinfo.obj_localization = localization; 439 loop: 440 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo); 441 if (ip) { 442 if (ip->flags & HAMMER_INODE_DUMMY) { 443 *errorp = ENOENT; 444 return(NULL); 445 } 446 hammer_ref(&ip->lock); 447 *errorp = 0; 448 return(ip); 449 } 450 451 /* 452 * Allocate a new inode structure and deal with races later. 453 */ 454 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO); 455 ++hammer_count_inodes; 456 ++hmp->count_inodes; 457 ip->obj_id = obj_id; 458 ip->obj_asof = iinfo.obj_asof; 459 ip->obj_localization = localization; 460 ip->hmp = hmp; 461 ip->flags = flags & HAMMER_INODE_RO; 462 ip->cache[0].ip = ip; 463 ip->cache[1].ip = ip; 464 ip->cache[2].ip = ip; 465 ip->cache[3].ip = ip; 466 if (hmp->ronly) 467 ip->flags |= HAMMER_INODE_RO; 468 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off = 469 0x7FFFFFFFFFFFFFFFLL; 470 RB_INIT(&ip->rec_tree); 471 TAILQ_INIT(&ip->target_list); 472 hammer_ref(&ip->lock); 473 474 /* 475 * Locate the on-disk inode. If this is a PFS root we always 476 * access the current version of the root inode and (if it is not 477 * a master) always access information under it with a snapshot 478 * TID. 479 * 480 * We cache recent inode lookups in this directory in dip->cache[2]. 481 * If we can't find it we assume the inode we are looking for is 482 * close to the directory inode. 483 */ 484 retry: 485 cachep = NULL; 486 if (dip) { 487 if (dip->cache[2].node) 488 cachep = &dip->cache[2]; 489 else 490 cachep = &dip->cache[0]; 491 } 492 hammer_init_cursor(trans, &cursor, cachep, NULL); 493 cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE; 494 cursor.key_beg.obj_id = ip->obj_id; 495 cursor.key_beg.key = 0; 496 cursor.key_beg.create_tid = 0; 497 cursor.key_beg.delete_tid = 0; 498 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE; 499 cursor.key_beg.obj_type = 0; 500 501 cursor.asof = iinfo.obj_asof; 502 cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA | 503 HAMMER_CURSOR_ASOF; 504 505 *errorp = hammer_btree_lookup(&cursor); 506 if (*errorp == EDEADLK) { 507 hammer_done_cursor(&cursor); 508 goto retry; 509 } 510 511 /* 512 * On success the B-Tree lookup will hold the appropriate 513 * buffer cache buffers and provide a pointer to the requested 514 * information. Copy the information to the in-memory inode 515 * and cache the B-Tree node to improve future operations. 516 */ 517 if (*errorp == 0) { 518 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf; 519 ip->ino_data = cursor.data->inode; 520 521 /* 522 * cache[0] tries to cache the location of the object inode. 523 * The assumption is that it is near the directory inode. 524 * 525 * cache[1] tries to cache the location of the object data. 526 * We might have something in the governing directory from 527 * scan optimizations (see the strategy code in 528 * hammer_vnops.c). 529 * 530 * We update dip->cache[2], if possible, with the location 531 * of the object inode for future directory shortcuts. 532 */ 533 hammer_cache_node(&ip->cache[0], cursor.node); 534 if (dip) { 535 if (dip->cache[3].node) { 536 hammer_cache_node(&ip->cache[1], 537 dip->cache[3].node); 538 } 539 hammer_cache_node(&dip->cache[2], cursor.node); 540 } 541 542 /* 543 * The file should not contain any data past the file size 544 * stored in the inode. Setting save_trunc_off to the 545 * file size instead of max reduces B-Tree lookup overheads 546 * on append by allowing the flusher to avoid checking for 547 * record overwrites. 548 */ 549 ip->save_trunc_off = ip->ino_data.size; 550 551 /* 552 * Locate and assign the pseudofs management structure to 553 * the inode. 554 */ 555 if (dip && dip->obj_localization == ip->obj_localization) { 556 ip->pfsm = dip->pfsm; 557 hammer_ref(&ip->pfsm->lock); 558 } else { 559 ip->pfsm = hammer_load_pseudofs(trans, 560 ip->obj_localization, 561 errorp); 562 *errorp = 0; /* ignore ENOENT */ 563 } 564 } 565 566 /* 567 * The inode is placed on the red-black tree and will be synced to 568 * the media when flushed or by the filesystem sync. If this races 569 * another instantiation/lookup the insertion will fail. 570 */ 571 if (*errorp == 0) { 572 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) { 573 hammer_free_inode(ip); 574 hammer_done_cursor(&cursor); 575 goto loop; 576 } 577 ip->flags |= HAMMER_INODE_ONDISK; 578 } else { 579 if (ip->flags & HAMMER_INODE_RSV_INODES) { 580 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */ 581 --hmp->rsv_inodes; 582 } 583 584 hammer_free_inode(ip); 585 ip = NULL; 586 } 587 hammer_done_cursor(&cursor); 588 589 /* 590 * NEWINODE is only set if the inode becomes dirty later, 591 * setting it here just leads to unnecessary stalls. 592 * 593 * trans->flags |= HAMMER_TRANSF_NEWINODE; 594 */ 595 return (ip); 596 } 597 598 /* 599 * Get a dummy inode to placemark a broken directory entry. 600 */ 601 struct hammer_inode * 602 hammer_get_dummy_inode(hammer_transaction_t trans, hammer_inode_t dip, 603 int64_t obj_id, hammer_tid_t asof, u_int32_t localization, 604 int flags, int *errorp) 605 { 606 hammer_mount_t hmp = trans->hmp; 607 struct hammer_inode_info iinfo; 608 struct hammer_inode *ip; 609 610 /* 611 * Determine if we already have an inode cached. If we do then 612 * we are golden. 613 * 614 * If we find an inode with no vnode we have to mark the 615 * transaction such that hammer_inode_waitreclaims() is 616 * called later on to avoid building up an infinite number 617 * of inodes. Otherwise we can continue to * add new inodes 618 * faster then they can be disposed of, even with the tsleep 619 * delay. 620 * 621 * If we find a non-fake inode we return an error. Only fake 622 * inodes can be returned by this routine. 623 */ 624 iinfo.obj_id = obj_id; 625 iinfo.obj_asof = asof; 626 iinfo.obj_localization = localization; 627 loop: 628 *errorp = 0; 629 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo); 630 if (ip) { 631 if ((ip->flags & HAMMER_INODE_DUMMY) == 0) { 632 *errorp = ENOENT; 633 return(NULL); 634 } 635 hammer_ref(&ip->lock); 636 return(ip); 637 } 638 639 /* 640 * Allocate a new inode structure and deal with races later. 641 */ 642 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO); 643 ++hammer_count_inodes; 644 ++hmp->count_inodes; 645 ip->obj_id = obj_id; 646 ip->obj_asof = iinfo.obj_asof; 647 ip->obj_localization = localization; 648 ip->hmp = hmp; 649 ip->flags = flags | HAMMER_INODE_RO | HAMMER_INODE_DUMMY; 650 ip->cache[0].ip = ip; 651 ip->cache[1].ip = ip; 652 ip->cache[2].ip = ip; 653 ip->cache[3].ip = ip; 654 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off = 655 0x7FFFFFFFFFFFFFFFLL; 656 RB_INIT(&ip->rec_tree); 657 TAILQ_INIT(&ip->target_list); 658 hammer_ref(&ip->lock); 659 660 /* 661 * Populate the dummy inode. Leave everything zero'd out. 662 * 663 * (ip->ino_leaf and ip->ino_data) 664 * 665 * Make the dummy inode a FIFO object which most copy programs 666 * will properly ignore. 667 */ 668 ip->save_trunc_off = ip->ino_data.size; 669 ip->ino_data.obj_type = HAMMER_OBJTYPE_FIFO; 670 671 /* 672 * Locate and assign the pseudofs management structure to 673 * the inode. 674 */ 675 if (dip && dip->obj_localization == ip->obj_localization) { 676 ip->pfsm = dip->pfsm; 677 hammer_ref(&ip->pfsm->lock); 678 } else { 679 ip->pfsm = hammer_load_pseudofs(trans, ip->obj_localization, 680 errorp); 681 *errorp = 0; /* ignore ENOENT */ 682 } 683 684 /* 685 * The inode is placed on the red-black tree and will be synced to 686 * the media when flushed or by the filesystem sync. If this races 687 * another instantiation/lookup the insertion will fail. 688 * 689 * NOTE: Do not set HAMMER_INODE_ONDISK. The inode is a fake. 690 */ 691 if (*errorp == 0) { 692 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) { 693 hammer_free_inode(ip); 694 goto loop; 695 } 696 } else { 697 if (ip->flags & HAMMER_INODE_RSV_INODES) { 698 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */ 699 --hmp->rsv_inodes; 700 } 701 hammer_free_inode(ip); 702 ip = NULL; 703 } 704 trans->flags |= HAMMER_TRANSF_NEWINODE; 705 return (ip); 706 } 707 708 /* 709 * Return a referenced inode only if it is in our inode cache. 710 * 711 * Dummy inodes do not count. 712 */ 713 struct hammer_inode * 714 hammer_find_inode(hammer_transaction_t trans, int64_t obj_id, 715 hammer_tid_t asof, u_int32_t localization) 716 { 717 hammer_mount_t hmp = trans->hmp; 718 struct hammer_inode_info iinfo; 719 struct hammer_inode *ip; 720 721 iinfo.obj_id = obj_id; 722 iinfo.obj_asof = asof; 723 iinfo.obj_localization = localization; 724 725 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo); 726 if (ip) { 727 if (ip->flags & HAMMER_INODE_DUMMY) 728 ip = NULL; 729 else 730 hammer_ref(&ip->lock); 731 } 732 return(ip); 733 } 734 735 /* 736 * Create a new filesystem object, returning the inode in *ipp. The 737 * returned inode will be referenced. The inode is created in-memory. 738 * 739 * If pfsm is non-NULL the caller wishes to create the root inode for 740 * a master PFS. 741 */ 742 int 743 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap, 744 struct ucred *cred, 745 hammer_inode_t dip, const char *name, int namelen, 746 hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp) 747 { 748 hammer_mount_t hmp; 749 hammer_inode_t ip; 750 uid_t xuid; 751 int error; 752 int64_t namekey; 753 u_int32_t dummy; 754 755 hmp = trans->hmp; 756 757 /* 758 * Disallow the creation of new inodes in directories which 759 * have been deleted. In HAMMER, this will cause a record 760 * syncing assertion later on in the flush code. 761 */ 762 if (dip && dip->ino_data.nlinks == 0) { 763 *ipp = NULL; 764 return (EINVAL); 765 } 766 767 /* 768 * Allocate inode 769 */ 770 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO); 771 ++hammer_count_inodes; 772 ++hmp->count_inodes; 773 trans->flags |= HAMMER_TRANSF_NEWINODE; 774 775 if (pfsm) { 776 KKASSERT(pfsm->localization != 0); 777 ip->obj_id = HAMMER_OBJID_ROOT; 778 ip->obj_localization = pfsm->localization; 779 } else { 780 KKASSERT(dip != NULL); 781 namekey = hammer_directory_namekey(dip, name, namelen, &dummy); 782 ip->obj_id = hammer_alloc_objid(hmp, dip, namekey); 783 ip->obj_localization = dip->obj_localization; 784 } 785 786 KKASSERT(ip->obj_id != 0); 787 ip->obj_asof = hmp->asof; 788 ip->hmp = hmp; 789 ip->flush_state = HAMMER_FST_IDLE; 790 ip->flags = HAMMER_INODE_DDIRTY | 791 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME; 792 ip->cache[0].ip = ip; 793 ip->cache[1].ip = ip; 794 ip->cache[2].ip = ip; 795 ip->cache[3].ip = ip; 796 797 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL; 798 /* ip->save_trunc_off = 0; (already zero) */ 799 RB_INIT(&ip->rec_tree); 800 TAILQ_INIT(&ip->target_list); 801 802 ip->ino_data.atime = trans->time; 803 ip->ino_data.mtime = trans->time; 804 ip->ino_data.size = 0; 805 ip->ino_data.nlinks = 0; 806 807 /* 808 * A nohistory designator on the parent directory is inherited by 809 * the child. We will do this even for pseudo-fs creation... the 810 * sysad can turn it off. 811 */ 812 if (dip) { 813 ip->ino_data.uflags = dip->ino_data.uflags & 814 (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP); 815 } 816 817 ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; 818 ip->ino_leaf.base.localization = ip->obj_localization + 819 HAMMER_LOCALIZE_INODE; 820 ip->ino_leaf.base.obj_id = ip->obj_id; 821 ip->ino_leaf.base.key = 0; 822 ip->ino_leaf.base.create_tid = 0; 823 ip->ino_leaf.base.delete_tid = 0; 824 ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE; 825 ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type); 826 827 ip->ino_data.obj_type = ip->ino_leaf.base.obj_type; 828 ip->ino_data.version = HAMMER_INODE_DATA_VERSION; 829 ip->ino_data.mode = vap->va_mode; 830 ip->ino_data.ctime = trans->time; 831 832 /* 833 * If we are running version 2 or greater directory entries are 834 * inode-localized instead of data-localized. 835 */ 836 if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) { 837 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) { 838 ip->ino_data.cap_flags |= 839 HAMMER_INODE_CAP_DIR_LOCAL_INO; 840 } 841 } 842 if (trans->hmp->version >= HAMMER_VOL_VERSION_SIX) { 843 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) { 844 ip->ino_data.cap_flags |= 845 HAMMER_INODE_CAP_DIRHASH_ALG1; 846 } 847 } 848 849 /* 850 * Setup the ".." pointer. This only needs to be done for directories 851 * but we do it for all objects as a recovery aid. 852 */ 853 if (dip) 854 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id; 855 #if 0 856 /* 857 * The parent_obj_localization field only applies to pseudo-fs roots. 858 * XXX this is no longer applicable, PFSs are no longer directly 859 * tied into the parent's directory structure. 860 */ 861 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY && 862 ip->obj_id == HAMMER_OBJID_ROOT) { 863 ip->ino_data.ext.obj.parent_obj_localization = 864 dip->obj_localization; 865 } 866 #endif 867 868 switch(ip->ino_leaf.base.obj_type) { 869 case HAMMER_OBJTYPE_CDEV: 870 case HAMMER_OBJTYPE_BDEV: 871 ip->ino_data.rmajor = vap->va_rmajor; 872 ip->ino_data.rminor = vap->va_rminor; 873 break; 874 default: 875 break; 876 } 877 878 /* 879 * Calculate default uid/gid and overwrite with information from 880 * the vap. 881 */ 882 if (dip) { 883 xuid = hammer_to_unix_xid(&dip->ino_data.uid); 884 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, 885 xuid, cred, &vap->va_mode); 886 } else { 887 xuid = 0; 888 } 889 ip->ino_data.mode = vap->va_mode; 890 891 if (vap->va_vaflags & VA_UID_UUID_VALID) 892 ip->ino_data.uid = vap->va_uid_uuid; 893 else if (vap->va_uid != (uid_t)VNOVAL) 894 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid); 895 else 896 hammer_guid_to_uuid(&ip->ino_data.uid, xuid); 897 898 if (vap->va_vaflags & VA_GID_UUID_VALID) 899 ip->ino_data.gid = vap->va_gid_uuid; 900 else if (vap->va_gid != (gid_t)VNOVAL) 901 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid); 902 else if (dip) 903 ip->ino_data.gid = dip->ino_data.gid; 904 905 hammer_ref(&ip->lock); 906 907 if (pfsm) { 908 ip->pfsm = pfsm; 909 hammer_ref(&pfsm->lock); 910 error = 0; 911 } else if (dip->obj_localization == ip->obj_localization) { 912 ip->pfsm = dip->pfsm; 913 hammer_ref(&ip->pfsm->lock); 914 error = 0; 915 } else { 916 ip->pfsm = hammer_load_pseudofs(trans, 917 ip->obj_localization, 918 &error); 919 error = 0; /* ignore ENOENT */ 920 } 921 922 if (error) { 923 hammer_free_inode(ip); 924 ip = NULL; 925 } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) { 926 panic("hammer_create_inode: duplicate obj_id %llx", 927 (long long)ip->obj_id); 928 /* not reached */ 929 hammer_free_inode(ip); 930 } 931 *ipp = ip; 932 return(error); 933 } 934 935 /* 936 * Final cleanup / freeing of an inode structure 937 */ 938 static void 939 hammer_free_inode(hammer_inode_t ip) 940 { 941 struct hammer_mount *hmp; 942 943 hmp = ip->hmp; 944 KKASSERT(hammer_oneref(&ip->lock)); 945 hammer_uncache_node(&ip->cache[0]); 946 hammer_uncache_node(&ip->cache[1]); 947 hammer_uncache_node(&ip->cache[2]); 948 hammer_uncache_node(&ip->cache[3]); 949 hammer_inode_wakereclaims(ip); 950 if (ip->objid_cache) 951 hammer_clear_objid(ip); 952 --hammer_count_inodes; 953 --hmp->count_inodes; 954 if (ip->pfsm) { 955 hammer_rel_pseudofs(hmp, ip->pfsm); 956 ip->pfsm = NULL; 957 } 958 kfree(ip, hmp->m_inodes); 959 ip = NULL; 960 } 961 962 /* 963 * Retrieve pseudo-fs data. NULL will never be returned. 964 * 965 * If an error occurs *errorp will be set and a default template is returned, 966 * otherwise *errorp is set to 0. Typically when an error occurs it will 967 * be ENOENT. 968 */ 969 hammer_pseudofs_inmem_t 970 hammer_load_pseudofs(hammer_transaction_t trans, 971 u_int32_t localization, int *errorp) 972 { 973 hammer_mount_t hmp = trans->hmp; 974 hammer_inode_t ip; 975 hammer_pseudofs_inmem_t pfsm; 976 struct hammer_cursor cursor; 977 int bytes; 978 979 retry: 980 pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization); 981 if (pfsm) { 982 hammer_ref(&pfsm->lock); 983 *errorp = 0; 984 return(pfsm); 985 } 986 987 /* 988 * PFS records are stored in the root inode (not the PFS root inode, 989 * but the real root). Avoid an infinite recursion if loading 990 * the PFS for the real root. 991 */ 992 if (localization) { 993 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, 994 HAMMER_MAX_TID, 995 HAMMER_DEF_LOCALIZATION, 0, errorp); 996 } else { 997 ip = NULL; 998 } 999 1000 pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO); 1001 pfsm->localization = localization; 1002 pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid; 1003 pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid; 1004 1005 hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip); 1006 cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION + 1007 HAMMER_LOCALIZE_MISC; 1008 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 1009 cursor.key_beg.create_tid = 0; 1010 cursor.key_beg.delete_tid = 0; 1011 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS; 1012 cursor.key_beg.obj_type = 0; 1013 cursor.key_beg.key = localization; 1014 cursor.asof = HAMMER_MAX_TID; 1015 cursor.flags |= HAMMER_CURSOR_ASOF; 1016 1017 if (ip) 1018 *errorp = hammer_ip_lookup(&cursor); 1019 else 1020 *errorp = hammer_btree_lookup(&cursor); 1021 if (*errorp == 0) { 1022 *errorp = hammer_ip_resolve_data(&cursor); 1023 if (*errorp == 0) { 1024 if (cursor.data->pfsd.mirror_flags & 1025 HAMMER_PFSD_DELETED) { 1026 *errorp = ENOENT; 1027 } else { 1028 bytes = cursor.leaf->data_len; 1029 if (bytes > sizeof(pfsm->pfsd)) 1030 bytes = sizeof(pfsm->pfsd); 1031 bcopy(cursor.data, &pfsm->pfsd, bytes); 1032 } 1033 } 1034 } 1035 hammer_done_cursor(&cursor); 1036 1037 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid); 1038 hammer_ref(&pfsm->lock); 1039 if (ip) 1040 hammer_rel_inode(ip, 0); 1041 if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) { 1042 kfree(pfsm, hmp->m_misc); 1043 goto retry; 1044 } 1045 return(pfsm); 1046 } 1047 1048 /* 1049 * Store pseudo-fs data. The backend will automatically delete any prior 1050 * on-disk pseudo-fs data but we have to delete in-memory versions. 1051 */ 1052 int 1053 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm) 1054 { 1055 struct hammer_cursor cursor; 1056 hammer_record_t record; 1057 hammer_inode_t ip; 1058 int error; 1059 1060 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID, 1061 HAMMER_DEF_LOCALIZATION, 0, &error); 1062 retry: 1063 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid); 1064 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip); 1065 cursor.key_beg.localization = ip->obj_localization + 1066 HAMMER_LOCALIZE_MISC; 1067 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 1068 cursor.key_beg.create_tid = 0; 1069 cursor.key_beg.delete_tid = 0; 1070 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS; 1071 cursor.key_beg.obj_type = 0; 1072 cursor.key_beg.key = pfsm->localization; 1073 cursor.asof = HAMMER_MAX_TID; 1074 cursor.flags |= HAMMER_CURSOR_ASOF; 1075 1076 /* 1077 * Replace any in-memory version of the record. 1078 */ 1079 error = hammer_ip_lookup(&cursor); 1080 if (error == 0 && hammer_cursor_inmem(&cursor)) { 1081 record = cursor.iprec; 1082 if (record->flags & HAMMER_RECF_INTERLOCK_BE) { 1083 KKASSERT(cursor.deadlk_rec == NULL); 1084 hammer_ref(&record->lock); 1085 cursor.deadlk_rec = record; 1086 error = EDEADLK; 1087 } else { 1088 record->flags |= HAMMER_RECF_DELETED_FE; 1089 error = 0; 1090 } 1091 } 1092 1093 /* 1094 * Allocate replacement general record. The backend flush will 1095 * delete any on-disk version of the record. 1096 */ 1097 if (error == 0 || error == ENOENT) { 1098 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd)); 1099 record->type = HAMMER_MEM_RECORD_GENERAL; 1100 1101 record->leaf.base.localization = ip->obj_localization + 1102 HAMMER_LOCALIZE_MISC; 1103 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS; 1104 record->leaf.base.key = pfsm->localization; 1105 record->leaf.data_len = sizeof(pfsm->pfsd); 1106 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd)); 1107 error = hammer_ip_add_record(trans, record); 1108 } 1109 hammer_done_cursor(&cursor); 1110 if (error == EDEADLK) 1111 goto retry; 1112 hammer_rel_inode(ip, 0); 1113 return(error); 1114 } 1115 1116 /* 1117 * Create a root directory for a PFS if one does not alredy exist. 1118 * 1119 * The PFS root stands alone so we must also bump the nlinks count 1120 * to prevent it from being destroyed on release. 1121 */ 1122 int 1123 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred, 1124 hammer_pseudofs_inmem_t pfsm) 1125 { 1126 hammer_inode_t ip; 1127 struct vattr vap; 1128 int error; 1129 1130 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID, 1131 pfsm->localization, 0, &error); 1132 if (ip == NULL) { 1133 vattr_null(&vap); 1134 vap.va_mode = 0755; 1135 vap.va_type = VDIR; 1136 error = hammer_create_inode(trans, &vap, cred, 1137 NULL, NULL, 0, 1138 pfsm, &ip); 1139 if (error == 0) { 1140 ++ip->ino_data.nlinks; 1141 hammer_modify_inode(trans, ip, HAMMER_INODE_DDIRTY); 1142 } 1143 } 1144 if (ip) 1145 hammer_rel_inode(ip, 0); 1146 return(error); 1147 } 1148 1149 /* 1150 * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY 1151 * if we are unable to disassociate all the inodes. 1152 */ 1153 static 1154 int 1155 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data) 1156 { 1157 int res; 1158 1159 hammer_ref(&ip->lock); 1160 if (hammer_isactive(&ip->lock) == 2 && ip->vp) 1161 vclean_unlocked(ip->vp); 1162 if (hammer_isactive(&ip->lock) == 1 && ip->vp == NULL) 1163 res = 0; 1164 else 1165 res = -1; /* stop, someone is using the inode */ 1166 hammer_rel_inode(ip, 0); 1167 return(res); 1168 } 1169 1170 int 1171 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization) 1172 { 1173 int res; 1174 int try; 1175 1176 for (try = res = 0; try < 4; ++try) { 1177 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root, 1178 hammer_inode_pfs_cmp, 1179 hammer_unload_pseudofs_callback, 1180 &localization); 1181 if (res == 0 && try > 1) 1182 break; 1183 hammer_flusher_sync(trans->hmp); 1184 } 1185 if (res != 0) 1186 res = ENOTEMPTY; 1187 return(res); 1188 } 1189 1190 1191 /* 1192 * Release a reference on a PFS 1193 */ 1194 void 1195 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm) 1196 { 1197 hammer_rel(&pfsm->lock); 1198 if (hammer_norefs(&pfsm->lock)) { 1199 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm); 1200 kfree(pfsm, hmp->m_misc); 1201 } 1202 } 1203 1204 /* 1205 * Called by hammer_sync_inode(). 1206 */ 1207 static int 1208 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip) 1209 { 1210 hammer_transaction_t trans = cursor->trans; 1211 hammer_record_t record; 1212 int error; 1213 int redirty; 1214 1215 retry: 1216 error = 0; 1217 1218 /* 1219 * If the inode has a presence on-disk then locate it and mark 1220 * it deleted, setting DELONDISK. 1221 * 1222 * The record may or may not be physically deleted, depending on 1223 * the retention policy. 1224 */ 1225 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) == 1226 HAMMER_INODE_ONDISK) { 1227 hammer_normalize_cursor(cursor); 1228 cursor->key_beg.localization = ip->obj_localization + 1229 HAMMER_LOCALIZE_INODE; 1230 cursor->key_beg.obj_id = ip->obj_id; 1231 cursor->key_beg.key = 0; 1232 cursor->key_beg.create_tid = 0; 1233 cursor->key_beg.delete_tid = 0; 1234 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE; 1235 cursor->key_beg.obj_type = 0; 1236 cursor->asof = ip->obj_asof; 1237 cursor->flags &= ~HAMMER_CURSOR_INITMASK; 1238 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF; 1239 cursor->flags |= HAMMER_CURSOR_BACKEND; 1240 1241 error = hammer_btree_lookup(cursor); 1242 if (hammer_debug_inode) 1243 kprintf("IPDEL %p %08x %d", ip, ip->flags, error); 1244 1245 if (error == 0) { 1246 error = hammer_ip_delete_record(cursor, ip, trans->tid); 1247 if (hammer_debug_inode) 1248 kprintf(" error %d\n", error); 1249 if (error == 0) { 1250 ip->flags |= HAMMER_INODE_DELONDISK; 1251 } 1252 if (cursor->node) 1253 hammer_cache_node(&ip->cache[0], cursor->node); 1254 } 1255 if (error == EDEADLK) { 1256 hammer_done_cursor(cursor); 1257 error = hammer_init_cursor(trans, cursor, 1258 &ip->cache[0], ip); 1259 if (hammer_debug_inode) 1260 kprintf("IPDED %p %d\n", ip, error); 1261 if (error == 0) 1262 goto retry; 1263 } 1264 } 1265 1266 /* 1267 * Ok, write out the initial record or a new record (after deleting 1268 * the old one), unless the DELETED flag is set. This routine will 1269 * clear DELONDISK if it writes out a record. 1270 * 1271 * Update our inode statistics if this is the first application of 1272 * the inode on-disk. 1273 */ 1274 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) { 1275 /* 1276 * Generate a record and write it to the media. We clean-up 1277 * the state before releasing so we do not have to set-up 1278 * a flush_group. 1279 */ 1280 record = hammer_alloc_mem_record(ip, 0); 1281 record->type = HAMMER_MEM_RECORD_INODE; 1282 record->flush_state = HAMMER_FST_FLUSH; 1283 record->leaf = ip->sync_ino_leaf; 1284 record->leaf.base.create_tid = trans->tid; 1285 record->leaf.data_len = sizeof(ip->sync_ino_data); 1286 record->leaf.create_ts = trans->time32; 1287 record->data = (void *)&ip->sync_ino_data; 1288 record->flags |= HAMMER_RECF_INTERLOCK_BE; 1289 1290 /* 1291 * If this flag is set we cannot sync the new file size 1292 * because we haven't finished related truncations. The 1293 * inode will be flushed in another flush group to finish 1294 * the job. 1295 */ 1296 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) && 1297 ip->sync_ino_data.size != ip->ino_data.size) { 1298 redirty = 1; 1299 ip->sync_ino_data.size = ip->ino_data.size; 1300 } else { 1301 redirty = 0; 1302 } 1303 1304 for (;;) { 1305 error = hammer_ip_sync_record_cursor(cursor, record); 1306 if (hammer_debug_inode) 1307 kprintf("GENREC %p rec %08x %d\n", 1308 ip, record->flags, error); 1309 if (error != EDEADLK) 1310 break; 1311 hammer_done_cursor(cursor); 1312 error = hammer_init_cursor(trans, cursor, 1313 &ip->cache[0], ip); 1314 if (hammer_debug_inode) 1315 kprintf("GENREC reinit %d\n", error); 1316 if (error) 1317 break; 1318 } 1319 1320 /* 1321 * Note: The record was never on the inode's record tree 1322 * so just wave our hands importantly and destroy it. 1323 */ 1324 record->flags |= HAMMER_RECF_COMMITTED; 1325 record->flags &= ~HAMMER_RECF_INTERLOCK_BE; 1326 record->flush_state = HAMMER_FST_IDLE; 1327 ++ip->rec_generation; 1328 hammer_rel_mem_record(record); 1329 1330 /* 1331 * Finish up. 1332 */ 1333 if (error == 0) { 1334 if (hammer_debug_inode) 1335 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags); 1336 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | 1337 HAMMER_INODE_SDIRTY | 1338 HAMMER_INODE_ATIME | 1339 HAMMER_INODE_MTIME); 1340 ip->flags &= ~HAMMER_INODE_DELONDISK; 1341 if (redirty) 1342 ip->sync_flags |= HAMMER_INODE_DDIRTY; 1343 1344 /* 1345 * Root volume count of inodes 1346 */ 1347 hammer_sync_lock_sh(trans); 1348 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) { 1349 hammer_modify_volume_field(trans, 1350 trans->rootvol, 1351 vol0_stat_inodes); 1352 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes; 1353 hammer_modify_volume_done(trans->rootvol); 1354 ip->flags |= HAMMER_INODE_ONDISK; 1355 if (hammer_debug_inode) 1356 kprintf("NOWONDISK %p\n", ip); 1357 } 1358 hammer_sync_unlock(trans); 1359 } 1360 } 1361 1362 /* 1363 * If the inode has been destroyed, clean out any left-over flags 1364 * that may have been set by the frontend. 1365 */ 1366 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 1367 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | 1368 HAMMER_INODE_SDIRTY | 1369 HAMMER_INODE_ATIME | 1370 HAMMER_INODE_MTIME); 1371 } 1372 return(error); 1373 } 1374 1375 /* 1376 * Update only the itimes fields. 1377 * 1378 * ATIME can be updated without generating any UNDO. MTIME is updated 1379 * with UNDO so it is guaranteed to be synchronized properly in case of 1380 * a crash. 1381 * 1382 * Neither field is included in the B-Tree leaf element's CRC, which is how 1383 * we can get away with updating ATIME the way we do. 1384 */ 1385 static int 1386 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip) 1387 { 1388 hammer_transaction_t trans = cursor->trans; 1389 int error; 1390 1391 retry: 1392 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) != 1393 HAMMER_INODE_ONDISK) { 1394 return(0); 1395 } 1396 1397 hammer_normalize_cursor(cursor); 1398 cursor->key_beg.localization = ip->obj_localization + 1399 HAMMER_LOCALIZE_INODE; 1400 cursor->key_beg.obj_id = ip->obj_id; 1401 cursor->key_beg.key = 0; 1402 cursor->key_beg.create_tid = 0; 1403 cursor->key_beg.delete_tid = 0; 1404 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE; 1405 cursor->key_beg.obj_type = 0; 1406 cursor->asof = ip->obj_asof; 1407 cursor->flags &= ~HAMMER_CURSOR_INITMASK; 1408 cursor->flags |= HAMMER_CURSOR_ASOF; 1409 cursor->flags |= HAMMER_CURSOR_GET_LEAF; 1410 cursor->flags |= HAMMER_CURSOR_GET_DATA; 1411 cursor->flags |= HAMMER_CURSOR_BACKEND; 1412 1413 error = hammer_btree_lookup(cursor); 1414 if (error == 0) { 1415 hammer_cache_node(&ip->cache[0], cursor->node); 1416 if (ip->sync_flags & HAMMER_INODE_MTIME) { 1417 /* 1418 * Updating MTIME requires an UNDO. Just cover 1419 * both atime and mtime. 1420 */ 1421 hammer_sync_lock_sh(trans); 1422 hammer_modify_buffer(trans, cursor->data_buffer, 1423 HAMMER_ITIMES_BASE(&cursor->data->inode), 1424 HAMMER_ITIMES_BYTES); 1425 cursor->data->inode.atime = ip->sync_ino_data.atime; 1426 cursor->data->inode.mtime = ip->sync_ino_data.mtime; 1427 hammer_modify_buffer_done(cursor->data_buffer); 1428 hammer_sync_unlock(trans); 1429 } else if (ip->sync_flags & HAMMER_INODE_ATIME) { 1430 /* 1431 * Updating atime only can be done in-place with 1432 * no UNDO. 1433 */ 1434 hammer_sync_lock_sh(trans); 1435 hammer_modify_buffer(trans, cursor->data_buffer, 1436 NULL, 0); 1437 cursor->data->inode.atime = ip->sync_ino_data.atime; 1438 hammer_modify_buffer_done(cursor->data_buffer); 1439 hammer_sync_unlock(trans); 1440 } 1441 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME); 1442 } 1443 if (error == EDEADLK) { 1444 hammer_done_cursor(cursor); 1445 error = hammer_init_cursor(trans, cursor, 1446 &ip->cache[0], ip); 1447 if (error == 0) 1448 goto retry; 1449 } 1450 return(error); 1451 } 1452 1453 /* 1454 * Release a reference on an inode, flush as requested. 1455 * 1456 * On the last reference we queue the inode to the flusher for its final 1457 * disposition. 1458 */ 1459 void 1460 hammer_rel_inode(struct hammer_inode *ip, int flush) 1461 { 1462 /*hammer_mount_t hmp = ip->hmp;*/ 1463 1464 /* 1465 * Handle disposition when dropping the last ref. 1466 */ 1467 for (;;) { 1468 if (hammer_oneref(&ip->lock)) { 1469 /* 1470 * Determine whether on-disk action is needed for 1471 * the inode's final disposition. 1472 */ 1473 KKASSERT(ip->vp == NULL); 1474 hammer_inode_unloadable_check(ip, 0); 1475 if (ip->flags & HAMMER_INODE_MODMASK) { 1476 hammer_flush_inode(ip, 0); 1477 } else if (hammer_oneref(&ip->lock)) { 1478 hammer_unload_inode(ip); 1479 break; 1480 } 1481 } else { 1482 if (flush) 1483 hammer_flush_inode(ip, 0); 1484 1485 /* 1486 * The inode still has multiple refs, try to drop 1487 * one ref. 1488 */ 1489 KKASSERT(hammer_isactive(&ip->lock) >= 1); 1490 if (hammer_isactive(&ip->lock) > 1) { 1491 hammer_rel(&ip->lock); 1492 break; 1493 } 1494 } 1495 } 1496 } 1497 1498 /* 1499 * Unload and destroy the specified inode. Must be called with one remaining 1500 * reference. The reference is disposed of. 1501 * 1502 * The inode must be completely clean. 1503 */ 1504 static int 1505 hammer_unload_inode(struct hammer_inode *ip) 1506 { 1507 hammer_mount_t hmp = ip->hmp; 1508 1509 KASSERT(hammer_oneref(&ip->lock), 1510 ("hammer_unload_inode: %d refs", hammer_isactive(&ip->lock))); 1511 KKASSERT(ip->vp == NULL); 1512 KKASSERT(ip->flush_state == HAMMER_FST_IDLE); 1513 KKASSERT(ip->cursor_ip_refs == 0); 1514 KKASSERT(hammer_notlocked(&ip->lock)); 1515 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0); 1516 1517 KKASSERT(RB_EMPTY(&ip->rec_tree)); 1518 KKASSERT(TAILQ_EMPTY(&ip->target_list)); 1519 1520 if (ip->flags & HAMMER_INODE_RDIRTY) { 1521 RB_REMOVE(hammer_redo_rb_tree, &hmp->rb_redo_root, ip); 1522 ip->flags &= ~HAMMER_INODE_RDIRTY; 1523 } 1524 RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip); 1525 1526 hammer_free_inode(ip); 1527 return(0); 1528 } 1529 1530 /* 1531 * Called during unmounting if a critical error occured. The in-memory 1532 * inode and all related structures are destroyed. 1533 * 1534 * If a critical error did not occur the unmount code calls the standard 1535 * release and asserts that the inode is gone. 1536 */ 1537 int 1538 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused) 1539 { 1540 hammer_record_t rec; 1541 1542 /* 1543 * Get rid of the inodes in-memory records, regardless of their 1544 * state, and clear the mod-mask. 1545 */ 1546 while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) { 1547 TAILQ_REMOVE(&ip->target_list, rec, target_entry); 1548 rec->target_ip = NULL; 1549 if (rec->flush_state == HAMMER_FST_SETUP) 1550 rec->flush_state = HAMMER_FST_IDLE; 1551 } 1552 while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) { 1553 if (rec->flush_state == HAMMER_FST_FLUSH) 1554 --rec->flush_group->refs; 1555 else 1556 hammer_ref(&rec->lock); 1557 KKASSERT(hammer_oneref(&rec->lock)); 1558 rec->flush_state = HAMMER_FST_IDLE; 1559 rec->flush_group = NULL; 1560 rec->flags |= HAMMER_RECF_DELETED_FE; /* wave hands */ 1561 rec->flags |= HAMMER_RECF_DELETED_BE; /* wave hands */ 1562 ++ip->rec_generation; 1563 hammer_rel_mem_record(rec); 1564 } 1565 ip->flags &= ~HAMMER_INODE_MODMASK; 1566 ip->sync_flags &= ~HAMMER_INODE_MODMASK; 1567 KKASSERT(ip->vp == NULL); 1568 1569 /* 1570 * Remove the inode from any flush group, force it idle. FLUSH 1571 * and SETUP states have an inode ref. 1572 */ 1573 switch(ip->flush_state) { 1574 case HAMMER_FST_FLUSH: 1575 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip); 1576 --ip->flush_group->refs; 1577 ip->flush_group = NULL; 1578 /* fall through */ 1579 case HAMMER_FST_SETUP: 1580 hammer_rel(&ip->lock); 1581 ip->flush_state = HAMMER_FST_IDLE; 1582 /* fall through */ 1583 case HAMMER_FST_IDLE: 1584 break; 1585 } 1586 1587 /* 1588 * There shouldn't be any associated vnode. The unload needs at 1589 * least one ref, if we do have a vp steal its ip ref. 1590 */ 1591 if (ip->vp) { 1592 kprintf("hammer_destroy_inode_callback: Unexpected " 1593 "vnode association ip %p vp %p\n", ip, ip->vp); 1594 ip->vp->v_data = NULL; 1595 ip->vp = NULL; 1596 } else { 1597 hammer_ref(&ip->lock); 1598 } 1599 hammer_unload_inode(ip); 1600 return(0); 1601 } 1602 1603 /* 1604 * Called on mount -u when switching from RW to RO or vise-versa. Adjust 1605 * the read-only flag for cached inodes. 1606 * 1607 * This routine is called from a RB_SCAN(). 1608 */ 1609 int 1610 hammer_reload_inode(hammer_inode_t ip, void *arg __unused) 1611 { 1612 hammer_mount_t hmp = ip->hmp; 1613 1614 if (hmp->ronly || hmp->asof != HAMMER_MAX_TID) 1615 ip->flags |= HAMMER_INODE_RO; 1616 else 1617 ip->flags &= ~HAMMER_INODE_RO; 1618 return(0); 1619 } 1620 1621 /* 1622 * A transaction has modified an inode, requiring updates as specified by 1623 * the passed flags. 1624 * 1625 * HAMMER_INODE_DDIRTY: Inode data has been updated, not incl mtime/atime, 1626 * and not including size changes due to write-append 1627 * (but other size changes are included). 1628 * HAMMER_INODE_SDIRTY: Inode data has been updated, size changes due to 1629 * write-append. 1630 * HAMMER_INODE_XDIRTY: Dirty in-memory records 1631 * HAMMER_INODE_BUFS: Dirty buffer cache buffers 1632 * HAMMER_INODE_DELETED: Inode record/data must be deleted 1633 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated 1634 */ 1635 void 1636 hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags) 1637 { 1638 /* 1639 * ronly of 0 or 2 does not trigger assertion. 1640 * 2 is a special error state 1641 */ 1642 KKASSERT(ip->hmp->ronly != 1 || 1643 (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 1644 HAMMER_INODE_SDIRTY | 1645 HAMMER_INODE_BUFS | HAMMER_INODE_DELETED | 1646 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0); 1647 if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) { 1648 ip->flags |= HAMMER_INODE_RSV_INODES; 1649 ++ip->hmp->rsv_inodes; 1650 } 1651 1652 /* 1653 * Set the NEWINODE flag in the transaction if the inode 1654 * transitions to a dirty state. This is used to track 1655 * the load on the inode cache. 1656 */ 1657 if (trans && 1658 (ip->flags & HAMMER_INODE_MODMASK) == 0 && 1659 (flags & HAMMER_INODE_MODMASK)) { 1660 trans->flags |= HAMMER_TRANSF_NEWINODE; 1661 } 1662 if (flags & HAMMER_INODE_MODMASK) 1663 hammer_inode_dirty(ip); 1664 ip->flags |= flags; 1665 } 1666 1667 /* 1668 * Attempt to quickly update the atime for a hammer inode. Return 0 on 1669 * success, -1 on failure. 1670 * 1671 * We attempt to update the atime with only the ip lock and not the 1672 * whole filesystem lock in order to improve concurrency. We can only 1673 * do this safely if the ATIME flag is already pending on the inode. 1674 * 1675 * This function is called via a vnops path (ip pointer is stable) without 1676 * fs_token held. 1677 */ 1678 int 1679 hammer_update_atime_quick(hammer_inode_t ip) 1680 { 1681 struct timeval tv; 1682 int res = -1; 1683 1684 if ((ip->flags & HAMMER_INODE_RO) || 1685 (ip->hmp->mp->mnt_flag & MNT_NOATIME)) { 1686 /* 1687 * Silently indicate success on read-only mount/snap 1688 */ 1689 res = 0; 1690 } else if (ip->flags & HAMMER_INODE_ATIME) { 1691 /* 1692 * Double check with inode lock held against backend. This 1693 * is only safe if all we need to do is update 1694 * ino_data.atime. 1695 */ 1696 getmicrotime(&tv); 1697 hammer_lock_ex(&ip->lock); 1698 if (ip->flags & HAMMER_INODE_ATIME) { 1699 ip->ino_data.atime = 1700 (unsigned long)tv.tv_sec * 1000000ULL + tv.tv_usec; 1701 res = 0; 1702 } 1703 hammer_unlock(&ip->lock); 1704 } 1705 return res; 1706 } 1707 1708 /* 1709 * Request that an inode be flushed. This whole mess cannot block and may 1710 * recurse (if not synchronous). Once requested HAMMER will attempt to 1711 * actively flush the inode until the flush can be done. 1712 * 1713 * The inode may already be flushing, or may be in a setup state. We can 1714 * place the inode in a flushing state if it is currently idle and flag it 1715 * to reflush if it is currently flushing. 1716 * 1717 * Upon return if the inode could not be flushed due to a setup 1718 * dependancy, then it will be automatically flushed when the dependancy 1719 * is satisfied. 1720 */ 1721 void 1722 hammer_flush_inode(hammer_inode_t ip, int flags) 1723 { 1724 hammer_mount_t hmp; 1725 hammer_flush_group_t flg; 1726 int good; 1727 1728 /* 1729 * fill_flush_group is the first flush group we may be able to 1730 * continue filling, it may be open or closed but it will always 1731 * be past the currently flushing (running) flg. 1732 * 1733 * next_flush_group is the next open flush group. 1734 */ 1735 hmp = ip->hmp; 1736 while ((flg = hmp->fill_flush_group) != NULL) { 1737 KKASSERT(flg->running == 0); 1738 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit && 1739 flg->total_count <= hammer_autoflush) { 1740 break; 1741 } 1742 hmp->fill_flush_group = TAILQ_NEXT(flg, flush_entry); 1743 hammer_flusher_async(ip->hmp, flg); 1744 } 1745 if (flg == NULL) { 1746 flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO); 1747 flg->seq = hmp->flusher.next++; 1748 if (hmp->next_flush_group == NULL) 1749 hmp->next_flush_group = flg; 1750 if (hmp->fill_flush_group == NULL) 1751 hmp->fill_flush_group = flg; 1752 RB_INIT(&flg->flush_tree); 1753 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry); 1754 } 1755 1756 /* 1757 * Trivial 'nothing to flush' case. If the inode is in a SETUP 1758 * state we have to put it back into an IDLE state so we can 1759 * drop the extra ref. 1760 * 1761 * If we have a parent dependancy we must still fall through 1762 * so we can run it. 1763 */ 1764 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) { 1765 if (ip->flush_state == HAMMER_FST_SETUP && 1766 TAILQ_EMPTY(&ip->target_list)) { 1767 ip->flush_state = HAMMER_FST_IDLE; 1768 hammer_rel_inode(ip, 0); 1769 } 1770 if (ip->flush_state == HAMMER_FST_IDLE) 1771 return; 1772 } 1773 1774 /* 1775 * Our flush action will depend on the current state. 1776 */ 1777 switch(ip->flush_state) { 1778 case HAMMER_FST_IDLE: 1779 /* 1780 * We have no dependancies and can flush immediately. Some 1781 * our children may not be flushable so we have to re-test 1782 * with that additional knowledge. 1783 */ 1784 hammer_flush_inode_core(ip, flg, flags); 1785 break; 1786 case HAMMER_FST_SETUP: 1787 /* 1788 * Recurse upwards through dependancies via target_list 1789 * and start their flusher actions going if possible. 1790 * 1791 * 'good' is our connectivity. -1 means we have none and 1792 * can't flush, 0 means there weren't any dependancies, and 1793 * 1 means we have good connectivity. 1794 */ 1795 good = hammer_setup_parent_inodes(ip, 0, flg); 1796 1797 if (good >= 0) { 1798 /* 1799 * We can continue if good >= 0. Determine how 1800 * many records under our inode can be flushed (and 1801 * mark them). 1802 */ 1803 hammer_flush_inode_core(ip, flg, flags); 1804 } else { 1805 /* 1806 * Parent has no connectivity, tell it to flush 1807 * us as soon as it does. 1808 * 1809 * The REFLUSH flag is also needed to trigger 1810 * dependancy wakeups. 1811 */ 1812 ip->flags |= HAMMER_INODE_CONN_DOWN | 1813 HAMMER_INODE_REFLUSH; 1814 if (flags & HAMMER_FLUSH_SIGNAL) { 1815 ip->flags |= HAMMER_INODE_RESIGNAL; 1816 hammer_flusher_async(ip->hmp, flg); 1817 } 1818 } 1819 break; 1820 case HAMMER_FST_FLUSH: 1821 /* 1822 * We are already flushing, flag the inode to reflush 1823 * if needed after it completes its current flush. 1824 * 1825 * The REFLUSH flag is also needed to trigger 1826 * dependancy wakeups. 1827 */ 1828 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0) 1829 ip->flags |= HAMMER_INODE_REFLUSH; 1830 if (flags & HAMMER_FLUSH_SIGNAL) { 1831 ip->flags |= HAMMER_INODE_RESIGNAL; 1832 hammer_flusher_async(ip->hmp, flg); 1833 } 1834 break; 1835 } 1836 } 1837 1838 /* 1839 * Scan ip->target_list, which is a list of records owned by PARENTS to our 1840 * ip which reference our ip. 1841 * 1842 * XXX This is a huge mess of recursive code, but not one bit of it blocks 1843 * so for now do not ref/deref the structures. Note that if we use the 1844 * ref/rel code later, the rel CAN block. 1845 */ 1846 static int 1847 hammer_setup_parent_inodes(hammer_inode_t ip, int depth, 1848 hammer_flush_group_t flg) 1849 { 1850 hammer_record_t depend; 1851 int good; 1852 int r; 1853 1854 /* 1855 * If we hit our recursion limit and we have parent dependencies 1856 * We cannot continue. Returning < 0 will cause us to be flagged 1857 * for reflush. Returning -2 cuts off additional dependency checks 1858 * because they are likely to also hit the depth limit. 1859 * 1860 * We cannot return < 0 if there are no dependencies or there might 1861 * not be anything to wakeup (ip). 1862 */ 1863 if (depth == 20 && TAILQ_FIRST(&ip->target_list)) { 1864 if (hammer_debug_general & 0x10000) 1865 krateprintf(&hammer_gen_krate, 1866 "HAMMER Warning: depth limit reached on " 1867 "setup recursion, inode %p %016llx\n", 1868 ip, (long long)ip->obj_id); 1869 return(-2); 1870 } 1871 1872 /* 1873 * Scan dependencies 1874 */ 1875 good = 0; 1876 TAILQ_FOREACH(depend, &ip->target_list, target_entry) { 1877 r = hammer_setup_parent_inodes_helper(depend, depth, flg); 1878 KKASSERT(depend->target_ip == ip); 1879 if (r < 0 && good == 0) 1880 good = -1; 1881 if (r > 0) 1882 good = 1; 1883 1884 /* 1885 * If we failed due to the recursion depth limit then stop 1886 * now. 1887 */ 1888 if (r == -2) 1889 break; 1890 } 1891 return(good); 1892 } 1893 1894 /* 1895 * This helper function takes a record representing the dependancy between 1896 * the parent inode and child inode. 1897 * 1898 * record->ip = parent inode 1899 * record->target_ip = child inode 1900 * 1901 * We are asked to recurse upwards and convert the record from SETUP 1902 * to FLUSH if possible. 1903 * 1904 * Return 1 if the record gives us connectivity 1905 * 1906 * Return 0 if the record is not relevant 1907 * 1908 * Return -1 if we can't resolve the dependancy and there is no connectivity. 1909 */ 1910 static int 1911 hammer_setup_parent_inodes_helper(hammer_record_t record, int depth, 1912 hammer_flush_group_t flg) 1913 { 1914 hammer_inode_t pip; 1915 int good; 1916 1917 KKASSERT(record->flush_state != HAMMER_FST_IDLE); 1918 pip = record->ip; 1919 1920 /* 1921 * If the record is already flushing, is it in our flush group? 1922 * 1923 * If it is in our flush group but it is a general record or a 1924 * delete-on-disk, it does not improve our connectivity (return 0), 1925 * and if the target inode is not trying to destroy itself we can't 1926 * allow the operation yet anyway (the second return -1). 1927 */ 1928 if (record->flush_state == HAMMER_FST_FLUSH) { 1929 /* 1930 * If not in our flush group ask the parent to reflush 1931 * us as soon as possible. 1932 */ 1933 if (record->flush_group != flg) { 1934 pip->flags |= HAMMER_INODE_REFLUSH; 1935 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN; 1936 return(-1); 1937 } 1938 1939 /* 1940 * If in our flush group everything is already set up, 1941 * just return whether the record will improve our 1942 * visibility or not. 1943 */ 1944 if (record->type == HAMMER_MEM_RECORD_ADD) 1945 return(1); 1946 return(0); 1947 } 1948 1949 /* 1950 * It must be a setup record. Try to resolve the setup dependancies 1951 * by recursing upwards so we can place ip on the flush list. 1952 * 1953 * Limit ourselves to 20 levels of recursion to avoid blowing out 1954 * the kernel stack. If we hit the recursion limit we can't flush 1955 * until the parent flushes. The parent will flush independantly 1956 * on its own and ultimately a deep recursion will be resolved. 1957 */ 1958 KKASSERT(record->flush_state == HAMMER_FST_SETUP); 1959 1960 good = hammer_setup_parent_inodes(pip, depth + 1, flg); 1961 1962 /* 1963 * If good < 0 the parent has no connectivity and we cannot safely 1964 * flush the directory entry, which also means we can't flush our 1965 * ip. Flag us for downward recursion once the parent's 1966 * connectivity is resolved. Flag the parent for [re]flush or it 1967 * may not check for downward recursions. 1968 */ 1969 if (good < 0) { 1970 pip->flags |= HAMMER_INODE_REFLUSH; 1971 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN; 1972 return(good); 1973 } 1974 1975 /* 1976 * We are go, place the parent inode in a flushing state so we can 1977 * place its record in a flushing state. Note that the parent 1978 * may already be flushing. The record must be in the same flush 1979 * group as the parent. 1980 */ 1981 if (pip->flush_state != HAMMER_FST_FLUSH) 1982 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION); 1983 KKASSERT(pip->flush_state == HAMMER_FST_FLUSH); 1984 1985 /* 1986 * It is possible for a rename to create a loop in the recursion 1987 * and revisit a record. This will result in the record being 1988 * placed in a flush state unexpectedly. This check deals with 1989 * the case. 1990 */ 1991 if (record->flush_state == HAMMER_FST_FLUSH) { 1992 if (record->type == HAMMER_MEM_RECORD_ADD) 1993 return(1); 1994 return(0); 1995 } 1996 1997 KKASSERT(record->flush_state == HAMMER_FST_SETUP); 1998 1999 #if 0 2000 if (record->type == HAMMER_MEM_RECORD_DEL && 2001 (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) { 2002 /* 2003 * Regardless of flushing state we cannot sync this path if the 2004 * record represents a delete-on-disk but the target inode 2005 * is not ready to sync its own deletion. 2006 * 2007 * XXX need to count effective nlinks to determine whether 2008 * the flush is ok, otherwise removing a hardlink will 2009 * just leave the DEL record to rot. 2010 */ 2011 record->target_ip->flags |= HAMMER_INODE_REFLUSH; 2012 return(-1); 2013 } else 2014 #endif 2015 if (pip->flush_group == flg) { 2016 /* 2017 * Because we have not calculated nlinks yet we can just 2018 * set records to the flush state if the parent is in 2019 * the same flush group as we are. 2020 */ 2021 record->flush_state = HAMMER_FST_FLUSH; 2022 record->flush_group = flg; 2023 ++record->flush_group->refs; 2024 hammer_ref(&record->lock); 2025 2026 /* 2027 * A general directory-add contributes to our visibility. 2028 * 2029 * Otherwise it is probably a directory-delete or 2030 * delete-on-disk record and does not contribute to our 2031 * visbility (but we can still flush it). 2032 */ 2033 if (record->type == HAMMER_MEM_RECORD_ADD) 2034 return(1); 2035 return(0); 2036 } else { 2037 /* 2038 * If the parent is not in our flush group we cannot 2039 * flush this record yet, there is no visibility. 2040 * We tell the parent to reflush and mark ourselves 2041 * so the parent knows it should flush us too. 2042 */ 2043 pip->flags |= HAMMER_INODE_REFLUSH; 2044 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN; 2045 return(-1); 2046 } 2047 } 2048 2049 /* 2050 * This is the core routine placing an inode into the FST_FLUSH state. 2051 */ 2052 static void 2053 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags) 2054 { 2055 hammer_mount_t hmp = ip->hmp; 2056 int go_count; 2057 2058 /* 2059 * Set flush state and prevent the flusher from cycling into 2060 * the next flush group. Do not place the ip on the list yet. 2061 * Inodes not in the idle state get an extra reference. 2062 */ 2063 KKASSERT(ip->flush_state != HAMMER_FST_FLUSH); 2064 if (ip->flush_state == HAMMER_FST_IDLE) 2065 hammer_ref(&ip->lock); 2066 ip->flush_state = HAMMER_FST_FLUSH; 2067 ip->flush_group = flg; 2068 ++hmp->flusher.group_lock; 2069 ++hmp->count_iqueued; 2070 ++hammer_count_iqueued; 2071 ++flg->total_count; 2072 hammer_redo_fifo_start_flush(ip); 2073 2074 #if 0 2075 /* 2076 * We need to be able to vfsync/truncate from the backend. 2077 * 2078 * XXX Any truncation from the backend will acquire the vnode 2079 * independently. 2080 */ 2081 KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0); 2082 if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) { 2083 ip->flags |= HAMMER_INODE_VHELD; 2084 vref(ip->vp); 2085 } 2086 #endif 2087 2088 /* 2089 * Figure out how many in-memory records we can actually flush 2090 * (not including inode meta-data, buffers, etc). 2091 */ 2092 KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0); 2093 if (flags & HAMMER_FLUSH_RECURSION) { 2094 /* 2095 * If this is a upwards recursion we do not want to 2096 * recurse down again! 2097 */ 2098 go_count = 1; 2099 #if 0 2100 } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) { 2101 /* 2102 * No new records are added if we must complete a flush 2103 * from a previous cycle, but we do have to move the records 2104 * from the previous cycle to the current one. 2105 */ 2106 #if 0 2107 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL, 2108 hammer_syncgrp_child_callback, NULL); 2109 #endif 2110 go_count = 1; 2111 #endif 2112 } else { 2113 /* 2114 * Normal flush, scan records and bring them into the flush. 2115 * Directory adds and deletes are usually skipped (they are 2116 * grouped with the related inode rather then with the 2117 * directory). 2118 * 2119 * go_count can be negative, which means the scan aborted 2120 * due to the flush group being over-full and we should 2121 * flush what we have. 2122 */ 2123 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL, 2124 hammer_setup_child_callback, NULL); 2125 } 2126 2127 /* 2128 * This is a more involved test that includes go_count. If we 2129 * can't flush, flag the inode and return. If go_count is 0 we 2130 * were are unable to flush any records in our rec_tree and 2131 * must ignore the XDIRTY flag. 2132 */ 2133 if (go_count == 0) { 2134 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) { 2135 --hmp->count_iqueued; 2136 --hammer_count_iqueued; 2137 2138 --flg->total_count; 2139 ip->flush_state = HAMMER_FST_SETUP; 2140 ip->flush_group = NULL; 2141 if (flags & HAMMER_FLUSH_SIGNAL) { 2142 ip->flags |= HAMMER_INODE_REFLUSH | 2143 HAMMER_INODE_RESIGNAL; 2144 } else { 2145 ip->flags |= HAMMER_INODE_REFLUSH; 2146 } 2147 #if 0 2148 if (ip->flags & HAMMER_INODE_VHELD) { 2149 ip->flags &= ~HAMMER_INODE_VHELD; 2150 vrele(ip->vp); 2151 } 2152 #endif 2153 2154 /* 2155 * REFLUSH is needed to trigger dependancy wakeups 2156 * when an inode is in SETUP. 2157 */ 2158 ip->flags |= HAMMER_INODE_REFLUSH; 2159 if (--hmp->flusher.group_lock == 0) 2160 wakeup(&hmp->flusher.group_lock); 2161 return; 2162 } 2163 } 2164 2165 /* 2166 * Snapshot the state of the inode for the backend flusher. 2167 * 2168 * We continue to retain save_trunc_off even when all truncations 2169 * have been resolved as an optimization to determine if we can 2170 * skip the B-Tree lookup for overwrite deletions. 2171 * 2172 * NOTE: The DELETING flag is a mod flag, but it is also sticky, 2173 * and stays in ip->flags. Once set, it stays set until the 2174 * inode is destroyed. 2175 */ 2176 if (ip->flags & HAMMER_INODE_TRUNCATED) { 2177 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0); 2178 ip->sync_trunc_off = ip->trunc_off; 2179 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL; 2180 ip->flags &= ~HAMMER_INODE_TRUNCATED; 2181 ip->sync_flags |= HAMMER_INODE_TRUNCATED; 2182 2183 /* 2184 * The save_trunc_off used to cache whether the B-Tree 2185 * holds any records past that point is not used until 2186 * after the truncation has succeeded, so we can safely 2187 * set it now. 2188 */ 2189 if (ip->save_trunc_off > ip->sync_trunc_off) 2190 ip->save_trunc_off = ip->sync_trunc_off; 2191 } 2192 ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK & 2193 ~HAMMER_INODE_TRUNCATED); 2194 ip->sync_ino_leaf = ip->ino_leaf; 2195 ip->sync_ino_data = ip->ino_data; 2196 ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED; 2197 #ifdef DEBUG_TRUNCATE 2198 if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp) 2199 kprintf("truncateS %016llx\n", ip->sync_trunc_off); 2200 #endif 2201 2202 /* 2203 * The flusher list inherits our inode and reference. 2204 */ 2205 KKASSERT(flg->running == 0); 2206 RB_INSERT(hammer_fls_rb_tree, &flg->flush_tree, ip); 2207 if (--hmp->flusher.group_lock == 0) 2208 wakeup(&hmp->flusher.group_lock); 2209 2210 /* 2211 * Auto-flush the group if it grows too large. Make sure the 2212 * inode reclaim wait pipeline continues to work. 2213 */ 2214 if (flg->total_count >= hammer_autoflush || 2215 flg->total_count >= hammer_limit_reclaims / 4) { 2216 if (hmp->fill_flush_group == flg) 2217 hmp->fill_flush_group = TAILQ_NEXT(flg, flush_entry); 2218 hammer_flusher_async(hmp, flg); 2219 } 2220 } 2221 2222 /* 2223 * Callback for scan of ip->rec_tree. Try to include each record in our 2224 * flush. ip->flush_group has been set but the inode has not yet been 2225 * moved into a flushing state. 2226 * 2227 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on 2228 * both inodes. 2229 * 2230 * We return 1 for any record placed or found in FST_FLUSH, which prevents 2231 * the caller from shortcutting the flush. 2232 */ 2233 static int 2234 hammer_setup_child_callback(hammer_record_t rec, void *data) 2235 { 2236 hammer_flush_group_t flg; 2237 hammer_inode_t target_ip; 2238 hammer_inode_t ip; 2239 int r; 2240 2241 /* 2242 * Records deleted or committed by the backend are ignored. 2243 * Note that the flush detects deleted frontend records at 2244 * multiple points to deal with races. This is just the first 2245 * line of defense. The only time HAMMER_RECF_DELETED_FE cannot 2246 * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it 2247 * messes up link-count calculations. 2248 * 2249 * NOTE: Don't get confused between record deletion and, say, 2250 * directory entry deletion. The deletion of a directory entry 2251 * which is on-media has nothing to do with the record deletion 2252 * flags. 2253 */ 2254 if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE | 2255 HAMMER_RECF_COMMITTED)) { 2256 if (rec->flush_state == HAMMER_FST_FLUSH) { 2257 KKASSERT(rec->flush_group == rec->ip->flush_group); 2258 r = 1; 2259 } else { 2260 r = 0; 2261 } 2262 return(r); 2263 } 2264 2265 /* 2266 * If the record is in an idle state it has no dependancies and 2267 * can be flushed. 2268 */ 2269 ip = rec->ip; 2270 flg = ip->flush_group; 2271 r = 0; 2272 2273 switch(rec->flush_state) { 2274 case HAMMER_FST_IDLE: 2275 /* 2276 * The record has no setup dependancy, we can flush it. 2277 */ 2278 KKASSERT(rec->target_ip == NULL); 2279 rec->flush_state = HAMMER_FST_FLUSH; 2280 rec->flush_group = flg; 2281 ++flg->refs; 2282 hammer_ref(&rec->lock); 2283 r = 1; 2284 break; 2285 case HAMMER_FST_SETUP: 2286 /* 2287 * The record has a setup dependancy. These are typically 2288 * directory entry adds and deletes. Such entries will be 2289 * flushed when their inodes are flushed so we do not 2290 * usually have to add them to the flush here. However, 2291 * if the target_ip has set HAMMER_INODE_CONN_DOWN then 2292 * it is asking us to flush this record (and it). 2293 */ 2294 target_ip = rec->target_ip; 2295 KKASSERT(target_ip != NULL); 2296 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE); 2297 2298 /* 2299 * If the target IP is already flushing in our group 2300 * we could associate the record, but target_ip has 2301 * already synced ino_data to sync_ino_data and we 2302 * would also have to adjust nlinks. Plus there are 2303 * ordering issues for adds and deletes. 2304 * 2305 * Reflush downward if this is an ADD, and upward if 2306 * this is a DEL. 2307 */ 2308 if (target_ip->flush_state == HAMMER_FST_FLUSH) { 2309 if (rec->type == HAMMER_MEM_RECORD_ADD) 2310 ip->flags |= HAMMER_INODE_REFLUSH; 2311 else 2312 target_ip->flags |= HAMMER_INODE_REFLUSH; 2313 break; 2314 } 2315 2316 /* 2317 * Target IP is not yet flushing. This can get complex 2318 * because we have to be careful about the recursion. 2319 * 2320 * Directories create an issue for us in that if a flush 2321 * of a directory is requested the expectation is to flush 2322 * any pending directory entries, but this will cause the 2323 * related inodes to recursively flush as well. We can't 2324 * really defer the operation so just get as many as we 2325 * can and 2326 */ 2327 #if 0 2328 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 && 2329 (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) { 2330 /* 2331 * We aren't reclaiming and the target ip was not 2332 * previously prevented from flushing due to this 2333 * record dependancy. Do not flush this record. 2334 */ 2335 /*r = 0;*/ 2336 } else 2337 #endif 2338 if (flg->total_count + flg->refs > 2339 ip->hmp->undo_rec_limit) { 2340 /* 2341 * Our flush group is over-full and we risk blowing 2342 * out the UNDO FIFO. Stop the scan, flush what we 2343 * have, then reflush the directory. 2344 * 2345 * The directory may be forced through multiple 2346 * flush groups before it can be completely 2347 * flushed. 2348 */ 2349 ip->flags |= HAMMER_INODE_RESIGNAL | 2350 HAMMER_INODE_REFLUSH; 2351 r = -1; 2352 } else if (rec->type == HAMMER_MEM_RECORD_ADD) { 2353 /* 2354 * If the target IP is not flushing we can force 2355 * it to flush, even if it is unable to write out 2356 * any of its own records we have at least one in 2357 * hand that we CAN deal with. 2358 */ 2359 rec->flush_state = HAMMER_FST_FLUSH; 2360 rec->flush_group = flg; 2361 ++flg->refs; 2362 hammer_ref(&rec->lock); 2363 hammer_flush_inode_core(target_ip, flg, 2364 HAMMER_FLUSH_RECURSION); 2365 r = 1; 2366 } else { 2367 /* 2368 * General or delete-on-disk record. 2369 * 2370 * XXX this needs help. If a delete-on-disk we could 2371 * disconnect the target. If the target has its own 2372 * dependancies they really need to be flushed. 2373 * 2374 * XXX 2375 */ 2376 rec->flush_state = HAMMER_FST_FLUSH; 2377 rec->flush_group = flg; 2378 ++flg->refs; 2379 hammer_ref(&rec->lock); 2380 hammer_flush_inode_core(target_ip, flg, 2381 HAMMER_FLUSH_RECURSION); 2382 r = 1; 2383 } 2384 break; 2385 case HAMMER_FST_FLUSH: 2386 /* 2387 * The record could be part of a previous flush group if the 2388 * inode is a directory (the record being a directory entry). 2389 * Once the flush group was closed a hammer_test_inode() 2390 * function can cause a new flush group to be setup, placing 2391 * the directory inode itself in a new flush group. 2392 * 2393 * When associated with a previous flush group we count it 2394 * as if it were in our current flush group, since it will 2395 * effectively be flushed by the time we flush our current 2396 * flush group. 2397 */ 2398 KKASSERT( 2399 rec->ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY || 2400 rec->flush_group == flg); 2401 r = 1; 2402 break; 2403 } 2404 return(r); 2405 } 2406 2407 #if 0 2408 /* 2409 * This version just moves records already in a flush state to the new 2410 * flush group and that is it. 2411 */ 2412 static int 2413 hammer_syncgrp_child_callback(hammer_record_t rec, void *data) 2414 { 2415 hammer_inode_t ip = rec->ip; 2416 2417 switch(rec->flush_state) { 2418 case HAMMER_FST_FLUSH: 2419 KKASSERT(rec->flush_group == ip->flush_group); 2420 break; 2421 default: 2422 break; 2423 } 2424 return(0); 2425 } 2426 #endif 2427 2428 /* 2429 * Wait for a previously queued flush to complete. 2430 * 2431 * If a critical error occured we don't try to wait. 2432 */ 2433 void 2434 hammer_wait_inode(hammer_inode_t ip) 2435 { 2436 /* 2437 * The inode can be in a SETUP state in which case RESIGNAL 2438 * should be set. If RESIGNAL is not set then the previous 2439 * flush completed and a later operation placed the inode 2440 * in a passive setup state again, so we're done. 2441 * 2442 * The inode can be in a FLUSH state in which case we 2443 * can just wait for completion. 2444 */ 2445 while (ip->flush_state == HAMMER_FST_FLUSH || 2446 (ip->flush_state == HAMMER_FST_SETUP && 2447 (ip->flags & HAMMER_INODE_RESIGNAL))) { 2448 /* 2449 * Don't try to flush on a critical error 2450 */ 2451 if (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) 2452 break; 2453 2454 /* 2455 * If the inode was already being flushed its flg 2456 * may not have been queued to the backend. We 2457 * have to make sure it gets queued or we can wind 2458 * up blocked or deadlocked (particularly if we are 2459 * the vnlru thread). 2460 */ 2461 if (ip->flush_state == HAMMER_FST_FLUSH) { 2462 KKASSERT(ip->flush_group); 2463 if (ip->flush_group->closed == 0) { 2464 if (hammer_debug_inode) { 2465 kprintf("hammer: debug: forcing " 2466 "async flush ip %016jx\n", 2467 (intmax_t)ip->obj_id); 2468 } 2469 hammer_flusher_async(ip->hmp, 2470 ip->flush_group); 2471 continue; /* retest */ 2472 } 2473 } 2474 2475 /* 2476 * In a flush state with the flg queued to the backend 2477 * or in a setup state with RESIGNAL set, we can safely 2478 * wait. 2479 */ 2480 ip->flags |= HAMMER_INODE_FLUSHW; 2481 tsleep(&ip->flags, 0, "hmrwin", 0); 2482 } 2483 2484 #if 0 2485 /* 2486 * The inode may have been in a passive setup state, 2487 * call flush to make sure we get signaled. 2488 */ 2489 if (ip->flush_state == HAMMER_FST_SETUP) 2490 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 2491 #endif 2492 2493 } 2494 2495 /* 2496 * Called by the backend code when a flush has been completed. 2497 * The inode has already been removed from the flush list. 2498 * 2499 * A pipelined flush can occur, in which case we must re-enter the 2500 * inode on the list and re-copy its fields. 2501 */ 2502 void 2503 hammer_flush_inode_done(hammer_inode_t ip, int error) 2504 { 2505 hammer_mount_t hmp; 2506 int dorel; 2507 2508 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH); 2509 2510 hmp = ip->hmp; 2511 2512 /* 2513 * Auto-reflush if the backend could not completely flush 2514 * the inode. This fixes a case where a deferred buffer flush 2515 * could cause fsync to return early. 2516 */ 2517 if (ip->sync_flags & HAMMER_INODE_MODMASK) 2518 ip->flags |= HAMMER_INODE_REFLUSH; 2519 2520 /* 2521 * Merge left-over flags back into the frontend and fix the state. 2522 * Incomplete truncations are retained by the backend. 2523 */ 2524 ip->error = error; 2525 ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED; 2526 ip->sync_flags &= HAMMER_INODE_TRUNCATED; 2527 2528 /* 2529 * The backend may have adjusted nlinks, so if the adjusted nlinks 2530 * does not match the fronttend set the frontend's DDIRTY flag again. 2531 */ 2532 if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks) 2533 ip->flags |= HAMMER_INODE_DDIRTY; 2534 2535 /* 2536 * Fix up the dirty buffer status. 2537 */ 2538 if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) { 2539 ip->flags |= HAMMER_INODE_BUFS; 2540 } 2541 hammer_redo_fifo_end_flush(ip); 2542 2543 /* 2544 * Re-set the XDIRTY flag if some of the inode's in-memory records 2545 * could not be flushed. 2546 */ 2547 KKASSERT((RB_EMPTY(&ip->rec_tree) && 2548 (ip->flags & HAMMER_INODE_XDIRTY) == 0) || 2549 (!RB_EMPTY(&ip->rec_tree) && 2550 (ip->flags & HAMMER_INODE_XDIRTY) != 0)); 2551 2552 /* 2553 * Do not lose track of inodes which no longer have vnode 2554 * assocations, otherwise they may never get flushed again. 2555 * 2556 * The reflush flag can be set superfluously, causing extra pain 2557 * for no reason. If the inode is no longer modified it no longer 2558 * needs to be flushed. 2559 */ 2560 if (ip->flags & HAMMER_INODE_MODMASK) { 2561 if (ip->vp == NULL) 2562 ip->flags |= HAMMER_INODE_REFLUSH; 2563 } else { 2564 ip->flags &= ~HAMMER_INODE_REFLUSH; 2565 } 2566 if (ip->flags & HAMMER_INODE_MODMASK) 2567 hammer_inode_dirty(ip); 2568 2569 /* 2570 * Adjust the flush state. 2571 */ 2572 if (ip->flags & HAMMER_INODE_WOULDBLOCK) { 2573 /* 2574 * We were unable to flush out all our records, leave the 2575 * inode in a flush state and in the current flush group. 2576 * The flush group will be re-run. 2577 * 2578 * This occurs if the UNDO block gets too full or there is 2579 * too much dirty meta-data and allows the flusher to 2580 * finalize the UNDO block and then re-flush. 2581 */ 2582 ip->flags &= ~HAMMER_INODE_WOULDBLOCK; 2583 dorel = 0; 2584 } else { 2585 /* 2586 * Remove from the flush_group 2587 */ 2588 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip); 2589 ip->flush_group = NULL; 2590 2591 #if 0 2592 /* 2593 * Clean up the vnode ref and tracking counts. 2594 */ 2595 if (ip->flags & HAMMER_INODE_VHELD) { 2596 ip->flags &= ~HAMMER_INODE_VHELD; 2597 vrele(ip->vp); 2598 } 2599 #endif 2600 --hmp->count_iqueued; 2601 --hammer_count_iqueued; 2602 2603 /* 2604 * And adjust the state. 2605 */ 2606 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) { 2607 ip->flush_state = HAMMER_FST_IDLE; 2608 dorel = 1; 2609 } else { 2610 ip->flush_state = HAMMER_FST_SETUP; 2611 dorel = 0; 2612 } 2613 2614 /* 2615 * If the frontend is waiting for a flush to complete, 2616 * wake it up. 2617 */ 2618 if (ip->flags & HAMMER_INODE_FLUSHW) { 2619 ip->flags &= ~HAMMER_INODE_FLUSHW; 2620 wakeup(&ip->flags); 2621 } 2622 2623 /* 2624 * If the frontend made more changes and requested another 2625 * flush, then try to get it running. 2626 * 2627 * Reflushes are aborted when the inode is errored out. 2628 */ 2629 if (ip->flags & HAMMER_INODE_REFLUSH) { 2630 ip->flags &= ~HAMMER_INODE_REFLUSH; 2631 if (ip->flags & HAMMER_INODE_RESIGNAL) { 2632 ip->flags &= ~HAMMER_INODE_RESIGNAL; 2633 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 2634 } else { 2635 hammer_flush_inode(ip, 0); 2636 } 2637 } 2638 } 2639 2640 /* 2641 * If we have no parent dependancies we can clear CONN_DOWN 2642 */ 2643 if (TAILQ_EMPTY(&ip->target_list)) 2644 ip->flags &= ~HAMMER_INODE_CONN_DOWN; 2645 2646 /* 2647 * If the inode is now clean drop the space reservation. 2648 */ 2649 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 && 2650 (ip->flags & HAMMER_INODE_RSV_INODES)) { 2651 ip->flags &= ~HAMMER_INODE_RSV_INODES; 2652 --hmp->rsv_inodes; 2653 } 2654 2655 ip->flags &= ~HAMMER_INODE_SLAVEFLUSH; 2656 2657 if (dorel) 2658 hammer_rel_inode(ip, 0); 2659 } 2660 2661 /* 2662 * Called from hammer_sync_inode() to synchronize in-memory records 2663 * to the media. 2664 */ 2665 static int 2666 hammer_sync_record_callback(hammer_record_t record, void *data) 2667 { 2668 hammer_cursor_t cursor = data; 2669 hammer_transaction_t trans = cursor->trans; 2670 hammer_mount_t hmp = trans->hmp; 2671 int error; 2672 2673 /* 2674 * Skip records that do not belong to the current flush. 2675 */ 2676 ++hammer_stats_record_iterations; 2677 if (record->flush_state != HAMMER_FST_FLUSH) 2678 return(0); 2679 2680 #if 1 2681 if (record->flush_group != record->ip->flush_group) { 2682 kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group); 2683 if (hammer_debug_critical) 2684 Debugger("blah2"); 2685 return(0); 2686 } 2687 #endif 2688 KKASSERT(record->flush_group == record->ip->flush_group); 2689 2690 /* 2691 * Interlock the record using the BE flag. Once BE is set the 2692 * frontend cannot change the state of FE. 2693 * 2694 * NOTE: If FE is set prior to us setting BE we still sync the 2695 * record out, but the flush completion code converts it to 2696 * a delete-on-disk record instead of destroying it. 2697 */ 2698 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0); 2699 record->flags |= HAMMER_RECF_INTERLOCK_BE; 2700 2701 /* 2702 * The backend has already disposed of the record. 2703 */ 2704 if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) { 2705 error = 0; 2706 goto done; 2707 } 2708 2709 /* 2710 * If the whole inode is being deleted and all on-disk records will 2711 * be deleted very soon, we can't sync any new records to disk 2712 * because they will be deleted in the same transaction they were 2713 * created in (delete_tid == create_tid), which will assert. 2714 * 2715 * XXX There may be a case with RECORD_ADD with DELETED_FE set 2716 * that we currently panic on. 2717 */ 2718 if (record->ip->sync_flags & HAMMER_INODE_DELETING) { 2719 switch(record->type) { 2720 case HAMMER_MEM_RECORD_DATA: 2721 /* 2722 * We don't have to do anything, if the record was 2723 * committed the space will have been accounted for 2724 * in the blockmap. 2725 */ 2726 /* fall through */ 2727 case HAMMER_MEM_RECORD_GENERAL: 2728 /* 2729 * Set deleted-by-backend flag. Do not set the 2730 * backend committed flag, because we are throwing 2731 * the record away. 2732 */ 2733 record->flags |= HAMMER_RECF_DELETED_BE; 2734 ++record->ip->rec_generation; 2735 error = 0; 2736 goto done; 2737 case HAMMER_MEM_RECORD_ADD: 2738 panic("hammer_sync_record_callback: illegal add " 2739 "during inode deletion record %p", record); 2740 break; /* NOT REACHED */ 2741 case HAMMER_MEM_RECORD_INODE: 2742 panic("hammer_sync_record_callback: attempt to " 2743 "sync inode record %p?", record); 2744 break; /* NOT REACHED */ 2745 case HAMMER_MEM_RECORD_DEL: 2746 /* 2747 * Follow through and issue the on-disk deletion 2748 */ 2749 break; 2750 } 2751 } 2752 2753 /* 2754 * If DELETED_FE is set special handling is needed for directory 2755 * entries. Dependant pieces related to the directory entry may 2756 * have already been synced to disk. If this occurs we have to 2757 * sync the directory entry and then change the in-memory record 2758 * from an ADD to a DELETE to cover the fact that it's been 2759 * deleted by the frontend. 2760 * 2761 * A directory delete covering record (MEM_RECORD_DEL) can never 2762 * be deleted by the frontend. 2763 * 2764 * Any other record type (aka DATA) can be deleted by the frontend. 2765 * XXX At the moment the flusher must skip it because there may 2766 * be another data record in the flush group for the same block, 2767 * meaning that some frontend data changes can leak into the backend's 2768 * synchronization point. 2769 */ 2770 if (record->flags & HAMMER_RECF_DELETED_FE) { 2771 if (record->type == HAMMER_MEM_RECORD_ADD) { 2772 /* 2773 * Convert a front-end deleted directory-add to 2774 * a directory-delete entry later. 2775 */ 2776 record->flags |= HAMMER_RECF_CONVERT_DELETE; 2777 } else { 2778 /* 2779 * Dispose of the record (race case). Mark as 2780 * deleted by backend (and not committed). 2781 */ 2782 KKASSERT(record->type != HAMMER_MEM_RECORD_DEL); 2783 record->flags |= HAMMER_RECF_DELETED_BE; 2784 ++record->ip->rec_generation; 2785 error = 0; 2786 goto done; 2787 } 2788 } 2789 2790 /* 2791 * Assign the create_tid for new records. Deletions already 2792 * have the record's entire key properly set up. 2793 */ 2794 if (record->type != HAMMER_MEM_RECORD_DEL) { 2795 record->leaf.base.create_tid = trans->tid; 2796 record->leaf.create_ts = trans->time32; 2797 } 2798 2799 /* 2800 * This actually moves the record to the on-media B-Tree. We 2801 * must also generate REDO_TERM entries in the UNDO/REDO FIFO 2802 * indicating that the related REDO_WRITE(s) have been committed. 2803 * 2804 * During recovery any REDO_TERM's within the nominal recovery span 2805 * are ignored since the related meta-data is being undone, causing 2806 * any matching REDO_WRITEs to execute. The REDO_TERMs outside 2807 * the nominal recovery span will match against REDO_WRITEs and 2808 * prevent them from being executed (because the meta-data has 2809 * already been synchronized). 2810 */ 2811 if (record->flags & HAMMER_RECF_REDO) { 2812 KKASSERT(record->type == HAMMER_MEM_RECORD_DATA); 2813 hammer_generate_redo(trans, record->ip, 2814 record->leaf.base.key - 2815 record->leaf.data_len, 2816 HAMMER_REDO_TERM_WRITE, 2817 NULL, 2818 record->leaf.data_len); 2819 } 2820 2821 for (;;) { 2822 error = hammer_ip_sync_record_cursor(cursor, record); 2823 if (error != EDEADLK) 2824 break; 2825 hammer_done_cursor(cursor); 2826 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0], 2827 record->ip); 2828 if (error) 2829 break; 2830 } 2831 record->flags &= ~HAMMER_RECF_CONVERT_DELETE; 2832 2833 if (error) 2834 error = -error; 2835 done: 2836 hammer_flush_record_done(record, error); 2837 2838 /* 2839 * Do partial finalization if we have built up too many dirty 2840 * buffers. Otherwise a buffer cache deadlock can occur when 2841 * doing things like creating tens of thousands of tiny files. 2842 * 2843 * We must release our cursor lock to avoid a 3-way deadlock 2844 * due to the exclusive sync lock the finalizer must get. 2845 * 2846 * WARNING: See warnings in hammer_unlock_cursor() function. 2847 */ 2848 if (hammer_flusher_meta_limit(hmp) || 2849 vm_page_count_severe()) { 2850 hammer_unlock_cursor(cursor); 2851 hammer_flusher_finalize(trans, 0); 2852 hammer_lock_cursor(cursor); 2853 } 2854 return(error); 2855 } 2856 2857 /* 2858 * Backend function called by the flusher to sync an inode to media. 2859 */ 2860 int 2861 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip) 2862 { 2863 struct hammer_cursor cursor; 2864 hammer_node_t tmp_node; 2865 hammer_record_t depend; 2866 hammer_record_t next; 2867 int error, tmp_error; 2868 u_int64_t nlinks; 2869 2870 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0) 2871 return(0); 2872 2873 error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip); 2874 if (error) 2875 goto done; 2876 2877 /* 2878 * Any directory records referencing this inode which are not in 2879 * our current flush group must adjust our nlink count for the 2880 * purposes of synchronizating to disk. 2881 * 2882 * Records which are in our flush group can be unlinked from our 2883 * inode now, potentially allowing the inode to be physically 2884 * deleted. 2885 * 2886 * This cannot block. 2887 */ 2888 nlinks = ip->ino_data.nlinks; 2889 next = TAILQ_FIRST(&ip->target_list); 2890 while ((depend = next) != NULL) { 2891 next = TAILQ_NEXT(depend, target_entry); 2892 if (depend->flush_state == HAMMER_FST_FLUSH && 2893 depend->flush_group == ip->flush_group) { 2894 /* 2895 * If this is an ADD that was deleted by the frontend 2896 * the frontend nlinks count will have already been 2897 * decremented, but the backend is going to sync its 2898 * directory entry and must account for it. The 2899 * record will be converted to a delete-on-disk when 2900 * it gets synced. 2901 * 2902 * If the ADD was not deleted by the frontend we 2903 * can remove the dependancy from our target_list. 2904 */ 2905 if (depend->flags & HAMMER_RECF_DELETED_FE) { 2906 ++nlinks; 2907 } else { 2908 TAILQ_REMOVE(&ip->target_list, depend, 2909 target_entry); 2910 depend->target_ip = NULL; 2911 } 2912 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) { 2913 /* 2914 * Not part of our flush group and not deleted by 2915 * the front-end, adjust the link count synced to 2916 * the media (undo what the frontend did when it 2917 * queued the record). 2918 */ 2919 KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0); 2920 switch(depend->type) { 2921 case HAMMER_MEM_RECORD_ADD: 2922 --nlinks; 2923 break; 2924 case HAMMER_MEM_RECORD_DEL: 2925 ++nlinks; 2926 break; 2927 default: 2928 break; 2929 } 2930 } 2931 } 2932 2933 /* 2934 * Set dirty if we had to modify the link count. 2935 */ 2936 if (ip->sync_ino_data.nlinks != nlinks) { 2937 KKASSERT((int64_t)nlinks >= 0); 2938 ip->sync_ino_data.nlinks = nlinks; 2939 ip->sync_flags |= HAMMER_INODE_DDIRTY; 2940 } 2941 2942 /* 2943 * If there is a trunction queued destroy any data past the (aligned) 2944 * truncation point. Userland will have dealt with the buffer 2945 * containing the truncation point for us. 2946 * 2947 * We don't flush pending frontend data buffers until after we've 2948 * dealt with the truncation. 2949 */ 2950 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) { 2951 /* 2952 * Interlock trunc_off. The VOP front-end may continue to 2953 * make adjustments to it while we are blocked. 2954 */ 2955 off_t trunc_off; 2956 off_t aligned_trunc_off; 2957 int blkmask; 2958 2959 trunc_off = ip->sync_trunc_off; 2960 blkmask = hammer_blocksize(trunc_off) - 1; 2961 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask; 2962 2963 /* 2964 * Delete any whole blocks on-media. The front-end has 2965 * already cleaned out any partial block and made it 2966 * pending. The front-end may have updated trunc_off 2967 * while we were blocked so we only use sync_trunc_off. 2968 * 2969 * This operation can blow out the buffer cache, EWOULDBLOCK 2970 * means we were unable to complete the deletion. The 2971 * deletion will update sync_trunc_off in that case. 2972 */ 2973 error = hammer_ip_delete_range(&cursor, ip, 2974 aligned_trunc_off, 2975 0x7FFFFFFFFFFFFFFFLL, 2); 2976 if (error == EWOULDBLOCK) { 2977 ip->flags |= HAMMER_INODE_WOULDBLOCK; 2978 error = 0; 2979 goto defer_buffer_flush; 2980 } 2981 2982 if (error) 2983 goto done; 2984 2985 /* 2986 * Generate a REDO_TERM_TRUNC entry in the UNDO/REDO FIFO. 2987 * 2988 * XXX we do this even if we did not previously generate 2989 * a REDO_TRUNC record. This operation may enclosed the 2990 * range for multiple prior truncation entries in the REDO 2991 * log. 2992 */ 2993 if (trans->hmp->version >= HAMMER_VOL_VERSION_FOUR && 2994 (ip->flags & HAMMER_INODE_RDIRTY)) { 2995 hammer_generate_redo(trans, ip, aligned_trunc_off, 2996 HAMMER_REDO_TERM_TRUNC, 2997 NULL, 0); 2998 } 2999 3000 /* 3001 * Clear the truncation flag on the backend after we have 3002 * completed the deletions. Backend data is now good again 3003 * (including new records we are about to sync, below). 3004 * 3005 * Leave sync_trunc_off intact. As we write additional 3006 * records the backend will update sync_trunc_off. This 3007 * tells the backend whether it can skip the overwrite 3008 * test. This should work properly even when the backend 3009 * writes full blocks where the truncation point straddles 3010 * the block because the comparison is against the base 3011 * offset of the record. 3012 */ 3013 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED; 3014 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */ 3015 } else { 3016 error = 0; 3017 } 3018 3019 /* 3020 * Now sync related records. These will typically be directory 3021 * entries, records tracking direct-writes, or delete-on-disk records. 3022 */ 3023 if (error == 0) { 3024 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL, 3025 hammer_sync_record_callback, &cursor); 3026 if (tmp_error < 0) 3027 tmp_error = -error; 3028 if (tmp_error) 3029 error = tmp_error; 3030 } 3031 hammer_cache_node(&ip->cache[1], cursor.node); 3032 3033 /* 3034 * Re-seek for inode update, assuming our cache hasn't been ripped 3035 * out from under us. 3036 */ 3037 if (error == 0) { 3038 tmp_node = hammer_ref_node_safe(trans, &ip->cache[0], &error); 3039 if (tmp_node) { 3040 hammer_cursor_downgrade(&cursor); 3041 hammer_lock_sh(&tmp_node->lock); 3042 if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0) 3043 hammer_cursor_seek(&cursor, tmp_node, 0); 3044 hammer_unlock(&tmp_node->lock); 3045 hammer_rel_node(tmp_node); 3046 } 3047 error = 0; 3048 } 3049 3050 /* 3051 * If we are deleting the inode the frontend had better not have 3052 * any active references on elements making up the inode. 3053 * 3054 * The call to hammer_ip_delete_clean() cleans up auxillary records 3055 * but not DB or DATA records. Those must have already been deleted 3056 * by the normal truncation mechanic. 3057 */ 3058 if (error == 0 && ip->sync_ino_data.nlinks == 0 && 3059 RB_EMPTY(&ip->rec_tree) && 3060 (ip->sync_flags & HAMMER_INODE_DELETING) && 3061 (ip->flags & HAMMER_INODE_DELETED) == 0) { 3062 int count1 = 0; 3063 3064 error = hammer_ip_delete_clean(&cursor, ip, &count1); 3065 if (error == 0) { 3066 ip->flags |= HAMMER_INODE_DELETED; 3067 ip->sync_flags &= ~HAMMER_INODE_DELETING; 3068 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED; 3069 KKASSERT(RB_EMPTY(&ip->rec_tree)); 3070 3071 /* 3072 * Set delete_tid in both the frontend and backend 3073 * copy of the inode record. The DELETED flag handles 3074 * this, do not set DDIRTY. 3075 */ 3076 ip->ino_leaf.base.delete_tid = trans->tid; 3077 ip->sync_ino_leaf.base.delete_tid = trans->tid; 3078 ip->ino_leaf.delete_ts = trans->time32; 3079 ip->sync_ino_leaf.delete_ts = trans->time32; 3080 3081 3082 /* 3083 * Adjust the inode count in the volume header 3084 */ 3085 hammer_sync_lock_sh(trans); 3086 if (ip->flags & HAMMER_INODE_ONDISK) { 3087 hammer_modify_volume_field(trans, 3088 trans->rootvol, 3089 vol0_stat_inodes); 3090 --ip->hmp->rootvol->ondisk->vol0_stat_inodes; 3091 hammer_modify_volume_done(trans->rootvol); 3092 } 3093 hammer_sync_unlock(trans); 3094 } 3095 } 3096 3097 if (error) 3098 goto done; 3099 ip->sync_flags &= ~HAMMER_INODE_BUFS; 3100 3101 defer_buffer_flush: 3102 /* 3103 * Now update the inode's on-disk inode-data and/or on-disk record. 3104 * DELETED and ONDISK are managed only in ip->flags. 3105 * 3106 * In the case of a defered buffer flush we still update the on-disk 3107 * inode to satisfy visibility requirements if there happen to be 3108 * directory dependancies. 3109 */ 3110 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) { 3111 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK: 3112 /* 3113 * If deleted and on-disk, don't set any additional flags. 3114 * the delete flag takes care of things. 3115 * 3116 * Clear flags which may have been set by the frontend. 3117 */ 3118 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 3119 HAMMER_INODE_SDIRTY | 3120 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME | 3121 HAMMER_INODE_DELETING); 3122 break; 3123 case HAMMER_INODE_DELETED: 3124 /* 3125 * Take care of the case where a deleted inode was never 3126 * flushed to the disk in the first place. 3127 * 3128 * Clear flags which may have been set by the frontend. 3129 */ 3130 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 3131 HAMMER_INODE_SDIRTY | 3132 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME | 3133 HAMMER_INODE_DELETING); 3134 while (RB_ROOT(&ip->rec_tree)) { 3135 hammer_record_t record = RB_ROOT(&ip->rec_tree); 3136 hammer_ref(&record->lock); 3137 KKASSERT(hammer_oneref(&record->lock)); 3138 record->flags |= HAMMER_RECF_DELETED_BE; 3139 ++record->ip->rec_generation; 3140 hammer_rel_mem_record(record); 3141 } 3142 break; 3143 case HAMMER_INODE_ONDISK: 3144 /* 3145 * If already on-disk, do not set any additional flags. 3146 */ 3147 break; 3148 default: 3149 /* 3150 * If not on-disk and not deleted, set DDIRTY to force 3151 * an initial record to be written. 3152 * 3153 * Also set the create_tid in both the frontend and backend 3154 * copy of the inode record. 3155 */ 3156 ip->ino_leaf.base.create_tid = trans->tid; 3157 ip->ino_leaf.create_ts = trans->time32; 3158 ip->sync_ino_leaf.base.create_tid = trans->tid; 3159 ip->sync_ino_leaf.create_ts = trans->time32; 3160 ip->sync_flags |= HAMMER_INODE_DDIRTY; 3161 break; 3162 } 3163 3164 /* 3165 * If DDIRTY or SDIRTY is set, write out a new record. 3166 * If the inode is already on-disk the old record is marked as 3167 * deleted. 3168 * 3169 * If DELETED is set hammer_update_inode() will delete the existing 3170 * record without writing out a new one. 3171 * 3172 * If *ONLY* the ITIMES flag is set we can update the record in-place. 3173 */ 3174 if (ip->flags & HAMMER_INODE_DELETED) { 3175 error = hammer_update_inode(&cursor, ip); 3176 } else 3177 if (!(ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY)) && 3178 (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) { 3179 error = hammer_update_itimes(&cursor, ip); 3180 } else 3181 if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY | 3182 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) { 3183 error = hammer_update_inode(&cursor, ip); 3184 } 3185 done: 3186 if (ip->flags & HAMMER_INODE_MODMASK) 3187 hammer_inode_dirty(ip); 3188 if (error) { 3189 hammer_critical_error(ip->hmp, ip, error, 3190 "while syncing inode"); 3191 } 3192 hammer_done_cursor(&cursor); 3193 return(error); 3194 } 3195 3196 /* 3197 * This routine is called when the OS is no longer actively referencing 3198 * the inode (but might still be keeping it cached), or when releasing 3199 * the last reference to an inode. 3200 * 3201 * At this point if the inode's nlinks count is zero we want to destroy 3202 * it, which may mean destroying it on-media too. 3203 */ 3204 void 3205 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp) 3206 { 3207 struct vnode *vp; 3208 3209 /* 3210 * Set the DELETING flag when the link count drops to 0 and the 3211 * OS no longer has any opens on the inode. 3212 * 3213 * The backend will clear DELETING (a mod flag) and set DELETED 3214 * (a state flag) when it is actually able to perform the 3215 * operation. 3216 * 3217 * Don't reflag the deletion if the flusher is currently syncing 3218 * one that was already flagged. A previously set DELETING flag 3219 * may bounce around flags and sync_flags until the operation is 3220 * completely done. 3221 * 3222 * Do not attempt to modify a snapshot inode (one set to read-only). 3223 */ 3224 if (ip->ino_data.nlinks == 0 && 3225 ((ip->flags | ip->sync_flags) & (HAMMER_INODE_RO|HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) { 3226 ip->flags |= HAMMER_INODE_DELETING; 3227 ip->flags |= HAMMER_INODE_TRUNCATED; 3228 ip->trunc_off = 0; 3229 vp = NULL; 3230 if (getvp) { 3231 if (hammer_get_vnode(ip, &vp) != 0) 3232 return; 3233 } 3234 3235 /* 3236 * Final cleanup 3237 */ 3238 if (ip->vp) 3239 nvtruncbuf(ip->vp, 0, HAMMER_BUFSIZE, 0, 0); 3240 if (ip->flags & HAMMER_INODE_MODMASK) 3241 hammer_inode_dirty(ip); 3242 if (getvp) 3243 vput(vp); 3244 } 3245 } 3246 3247 /* 3248 * After potentially resolving a dependancy the inode is tested 3249 * to determine whether it needs to be reflushed. 3250 */ 3251 void 3252 hammer_test_inode(hammer_inode_t ip) 3253 { 3254 if (ip->flags & HAMMER_INODE_REFLUSH) { 3255 ip->flags &= ~HAMMER_INODE_REFLUSH; 3256 hammer_ref(&ip->lock); 3257 if (ip->flags & HAMMER_INODE_RESIGNAL) { 3258 ip->flags &= ~HAMMER_INODE_RESIGNAL; 3259 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 3260 } else { 3261 hammer_flush_inode(ip, 0); 3262 } 3263 hammer_rel_inode(ip, 0); 3264 } 3265 } 3266 3267 /* 3268 * Clear the RECLAIM flag on an inode. This occurs when the inode is 3269 * reassociated with a vp or just before it gets freed. 3270 * 3271 * Pipeline wakeups to threads blocked due to an excessive number of 3272 * detached inodes. This typically occurs when atime updates accumulate 3273 * while scanning a directory tree. 3274 */ 3275 static void 3276 hammer_inode_wakereclaims(hammer_inode_t ip) 3277 { 3278 struct hammer_reclaim *reclaim; 3279 hammer_mount_t hmp = ip->hmp; 3280 3281 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) 3282 return; 3283 3284 --hammer_count_reclaims; 3285 --hmp->count_reclaims; 3286 ip->flags &= ~HAMMER_INODE_RECLAIM; 3287 3288 if ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) { 3289 KKASSERT(reclaim->count > 0); 3290 if (--reclaim->count == 0) { 3291 TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry); 3292 wakeup(reclaim); 3293 } 3294 } 3295 } 3296 3297 /* 3298 * Setup our reclaim pipeline. We only let so many detached (and dirty) 3299 * inodes build up before we start blocking. This routine is called 3300 * if a new inode is created or an inode is loaded from media. 3301 * 3302 * When we block we don't care *which* inode has finished reclaiming, 3303 * as long as one does. 3304 * 3305 * The reclaim pipeline is primarily governed by the auto-flush which is 3306 * 1/4 hammer_limit_reclaims. We don't want to block if the count is 3307 * less than 1/2 hammer_limit_reclaims. From 1/2 to full count is 3308 * dynamically governed. 3309 */ 3310 void 3311 hammer_inode_waitreclaims(hammer_transaction_t trans) 3312 { 3313 hammer_mount_t hmp = trans->hmp; 3314 struct hammer_reclaim reclaim; 3315 int lower_limit; 3316 3317 /* 3318 * Track inode load, delay if the number of reclaiming inodes is 3319 * between 2/4 and 4/4 hammer_limit_reclaims, depending. 3320 */ 3321 if (curthread->td_proc) { 3322 struct hammer_inostats *stats; 3323 3324 stats = hammer_inode_inostats(hmp, curthread->td_proc->p_pid); 3325 ++stats->count; 3326 3327 if (stats->count > hammer_limit_reclaims / 2) 3328 stats->count = hammer_limit_reclaims / 2; 3329 lower_limit = hammer_limit_reclaims - stats->count; 3330 if (hammer_debug_general & 0x10000) { 3331 kprintf("pid %5d limit %d\n", 3332 (int)curthread->td_proc->p_pid, lower_limit); 3333 } 3334 } else { 3335 lower_limit = hammer_limit_reclaims * 3 / 4; 3336 } 3337 if (hmp->count_reclaims >= lower_limit) { 3338 reclaim.count = 1; 3339 TAILQ_INSERT_TAIL(&hmp->reclaim_list, &reclaim, entry); 3340 tsleep(&reclaim, 0, "hmrrcm", hz); 3341 if (reclaim.count > 0) 3342 TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry); 3343 } 3344 } 3345 3346 /* 3347 * Keep track of reclaim statistics on a per-pid basis using a loose 3348 * 4-way set associative hash table. Collisions inherit the count of 3349 * the previous entry. 3350 * 3351 * NOTE: We want to be careful here to limit the chain size. If the chain 3352 * size is too large a pid will spread its stats out over too many 3353 * entries under certain types of heavy filesystem activity and 3354 * wind up not delaying long enough. 3355 */ 3356 static 3357 struct hammer_inostats * 3358 hammer_inode_inostats(hammer_mount_t hmp, pid_t pid) 3359 { 3360 struct hammer_inostats *stats; 3361 int delta; 3362 int chain; 3363 static volatile int iterator; /* we don't care about MP races */ 3364 3365 /* 3366 * Chain up to 4 times to find our entry. 3367 */ 3368 for (chain = 0; chain < 4; ++chain) { 3369 stats = &hmp->inostats[(pid + chain) & HAMMER_INOSTATS_HMASK]; 3370 if (stats->pid == pid) 3371 break; 3372 } 3373 3374 /* 3375 * Replace one of the four chaining entries with our new entry. 3376 */ 3377 if (chain == 4) { 3378 stats = &hmp->inostats[(pid + (iterator++ & 3)) & 3379 HAMMER_INOSTATS_HMASK]; 3380 stats->pid = pid; 3381 } 3382 3383 /* 3384 * Decay the entry 3385 */ 3386 if (stats->count && stats->ltick != ticks) { 3387 delta = ticks - stats->ltick; 3388 stats->ltick = ticks; 3389 if (delta <= 0 || delta > hz * 60) 3390 stats->count = 0; 3391 else 3392 stats->count = stats->count * hz / (hz + delta); 3393 } 3394 if (hammer_debug_general & 0x10000) 3395 kprintf("pid %5d stats %d\n", (int)pid, stats->count); 3396 return (stats); 3397 } 3398 3399 #if 0 3400 3401 /* 3402 * XXX not used, doesn't work very well due to the large batching nature 3403 * of flushes. 3404 * 3405 * A larger then normal backlog of inodes is sitting in the flusher, 3406 * enforce a general slowdown to let it catch up. This routine is only 3407 * called on completion of a non-flusher-related transaction which 3408 * performed B-Tree node I/O. 3409 * 3410 * It is possible for the flusher to stall in a continuous load. 3411 * blogbench -i1000 -o seems to do a good job generating this sort of load. 3412 * If the flusher is unable to catch up the inode count can bloat until 3413 * we run out of kvm. 3414 * 3415 * This is a bit of a hack. 3416 */ 3417 void 3418 hammer_inode_waithard(hammer_mount_t hmp) 3419 { 3420 /* 3421 * Hysteresis. 3422 */ 3423 if (hmp->flags & HAMMER_MOUNT_FLUSH_RECOVERY) { 3424 if (hmp->count_reclaims < hammer_limit_reclaims / 2 && 3425 hmp->count_iqueued < hmp->count_inodes / 20) { 3426 hmp->flags &= ~HAMMER_MOUNT_FLUSH_RECOVERY; 3427 return; 3428 } 3429 } else { 3430 if (hmp->count_reclaims < hammer_limit_reclaims || 3431 hmp->count_iqueued < hmp->count_inodes / 10) { 3432 return; 3433 } 3434 hmp->flags |= HAMMER_MOUNT_FLUSH_RECOVERY; 3435 } 3436 3437 /* 3438 * Block for one flush cycle. 3439 */ 3440 hammer_flusher_wait_next(hmp); 3441 } 3442 3443 #endif 3444