1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include "hammer.h" 36 #include <vm/vm_extern.h> 37 38 static int hammer_unload_inode(struct hammer_inode *ip); 39 static void hammer_free_inode(hammer_inode_t ip); 40 static void hammer_flush_inode_core(hammer_inode_t ip, 41 hammer_flush_group_t flg, int flags); 42 static int hammer_setup_child_callback(hammer_record_t rec, void *data); 43 #if 0 44 static int hammer_syncgrp_child_callback(hammer_record_t rec, void *data); 45 #endif 46 static int hammer_setup_parent_inodes(hammer_inode_t ip, int depth, 47 hammer_flush_group_t flg); 48 static int hammer_setup_parent_inodes_helper(hammer_record_t record, 49 int depth, hammer_flush_group_t flg); 50 static void hammer_inode_wakereclaims(hammer_inode_t ip); 51 static struct hammer_inostats *hammer_inode_inostats(hammer_mount_t hmp, 52 pid_t pid); 53 54 #ifdef DEBUG_TRUNCATE 55 extern struct hammer_inode *HammerTruncIp; 56 #endif 57 58 struct krate hammer_gen_krate = { 1 }; 59 60 /* 61 * RB-Tree support for inode structures 62 */ 63 int 64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2) 65 { 66 if (ip1->obj_localization < ip2->obj_localization) 67 return(-1); 68 if (ip1->obj_localization > ip2->obj_localization) 69 return(1); 70 if (ip1->obj_id < ip2->obj_id) 71 return(-1); 72 if (ip1->obj_id > ip2->obj_id) 73 return(1); 74 if (ip1->obj_asof < ip2->obj_asof) 75 return(-1); 76 if (ip1->obj_asof > ip2->obj_asof) 77 return(1); 78 return(0); 79 } 80 81 int 82 hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2) 83 { 84 if (ip1->redo_fifo_start < ip2->redo_fifo_start) 85 return(-1); 86 if (ip1->redo_fifo_start > ip2->redo_fifo_start) 87 return(1); 88 return(0); 89 } 90 91 /* 92 * RB-Tree support for inode structures / special LOOKUP_INFO 93 */ 94 static int 95 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip) 96 { 97 if (info->obj_localization < ip->obj_localization) 98 return(-1); 99 if (info->obj_localization > ip->obj_localization) 100 return(1); 101 if (info->obj_id < ip->obj_id) 102 return(-1); 103 if (info->obj_id > ip->obj_id) 104 return(1); 105 if (info->obj_asof < ip->obj_asof) 106 return(-1); 107 if (info->obj_asof > ip->obj_asof) 108 return(1); 109 return(0); 110 } 111 112 /* 113 * Used by hammer_scan_inode_snapshots() to locate all of an object's 114 * snapshots. Note that the asof field is not tested, which we can get 115 * away with because it is the lowest-priority field. 116 */ 117 static int 118 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data) 119 { 120 hammer_inode_info_t info = data; 121 122 if (ip->obj_localization > info->obj_localization) 123 return(1); 124 if (ip->obj_localization < info->obj_localization) 125 return(-1); 126 if (ip->obj_id > info->obj_id) 127 return(1); 128 if (ip->obj_id < info->obj_id) 129 return(-1); 130 return(0); 131 } 132 133 /* 134 * Used by hammer_unload_pseudofs() to locate all inodes associated with 135 * a particular PFS. 136 */ 137 static int 138 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data) 139 { 140 u_int32_t localization = *(u_int32_t *)data; 141 if (ip->obj_localization > localization) 142 return(1); 143 if (ip->obj_localization < localization) 144 return(-1); 145 return(0); 146 } 147 148 /* 149 * RB-Tree support for pseudofs structures 150 */ 151 static int 152 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2) 153 { 154 if (p1->localization < p2->localization) 155 return(-1); 156 if (p1->localization > p2->localization) 157 return(1); 158 return(0); 159 } 160 161 162 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare); 163 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node, 164 hammer_inode_info_cmp, hammer_inode_info_t); 165 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node, 166 hammer_pfs_rb_compare, u_int32_t, localization); 167 168 /* 169 * The kernel is not actively referencing this vnode but is still holding 170 * it cached. 171 * 172 * This is called from the frontend. 173 * 174 * MPALMOSTSAFE 175 */ 176 int 177 hammer_vop_inactive(struct vop_inactive_args *ap) 178 { 179 struct hammer_inode *ip = VTOI(ap->a_vp); 180 hammer_mount_t hmp; 181 182 /* 183 * Degenerate case 184 */ 185 if (ip == NULL) { 186 vrecycle(ap->a_vp); 187 return(0); 188 } 189 190 /* 191 * If the inode no longer has visibility in the filesystem try to 192 * recycle it immediately, even if the inode is dirty. Recycling 193 * it quickly allows the system to reclaim buffer cache and VM 194 * resources which can matter a lot in a heavily loaded system. 195 * 196 * This can deadlock in vfsync() if we aren't careful. 197 * 198 * Do not queue the inode to the flusher if we still have visibility, 199 * otherwise namespace calls such as chmod will unnecessarily generate 200 * multiple inode updates. 201 */ 202 if (ip->ino_data.nlinks == 0) { 203 hmp = ip->hmp; 204 lwkt_gettoken(&hmp->fs_token); 205 hammer_inode_unloadable_check(ip, 0); 206 if (ip->flags & HAMMER_INODE_MODMASK) 207 hammer_flush_inode(ip, 0); 208 lwkt_reltoken(&hmp->fs_token); 209 vrecycle(ap->a_vp); 210 } 211 return(0); 212 } 213 214 /* 215 * Release the vnode association. This is typically (but not always) 216 * the last reference on the inode. 217 * 218 * Once the association is lost we are on our own with regards to 219 * flushing the inode. 220 * 221 * We must interlock ip->vp so hammer_get_vnode() can avoid races. 222 */ 223 int 224 hammer_vop_reclaim(struct vop_reclaim_args *ap) 225 { 226 struct hammer_inode *ip; 227 hammer_mount_t hmp; 228 struct vnode *vp; 229 230 vp = ap->a_vp; 231 232 if ((ip = vp->v_data) != NULL) { 233 hmp = ip->hmp; 234 lwkt_gettoken(&hmp->fs_token); 235 hammer_lock_ex(&ip->lock); 236 vp->v_data = NULL; 237 ip->vp = NULL; 238 239 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) { 240 ++hammer_count_reclaims; 241 ++hmp->count_reclaims; 242 ip->flags |= HAMMER_INODE_RECLAIM; 243 } 244 hammer_unlock(&ip->lock); 245 vclrisdirty(vp); 246 hammer_rel_inode(ip, 1); 247 lwkt_reltoken(&hmp->fs_token); 248 } 249 return(0); 250 } 251 252 /* 253 * Inform the kernel that the inode is dirty. This will be checked 254 * by vn_unlock(). 255 * 256 * Theoretically in order to reclaim a vnode the hammer_vop_reclaim() 257 * must be called which will interlock against our inode lock, so 258 * if VRECLAIMED is not set vp->v_mount (as used by vsetisdirty()) 259 * should be stable without having to acquire any new locks. 260 */ 261 void 262 hammer_inode_dirty(struct hammer_inode *ip) 263 { 264 struct vnode *vp; 265 266 if ((ip->flags & HAMMER_INODE_MODMASK) && 267 (vp = ip->vp) != NULL && 268 (vp->v_flag & (VRECLAIMED | VISDIRTY)) == 0) { 269 vsetisdirty(vp); 270 } 271 } 272 273 /* 274 * Return a locked vnode for the specified inode. The inode must be 275 * referenced but NOT LOCKED on entry and will remain referenced on 276 * return. 277 * 278 * Called from the frontend. 279 */ 280 int 281 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp) 282 { 283 hammer_mount_t hmp; 284 struct vnode *vp; 285 int error = 0; 286 u_int8_t obj_type; 287 288 hmp = ip->hmp; 289 290 for (;;) { 291 if ((vp = ip->vp) == NULL) { 292 error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0); 293 if (error) 294 break; 295 hammer_lock_ex(&ip->lock); 296 if (ip->vp != NULL) { 297 hammer_unlock(&ip->lock); 298 vp = *vpp; 299 vp->v_type = VBAD; 300 vx_put(vp); 301 continue; 302 } 303 hammer_ref(&ip->lock); 304 vp = *vpp; 305 ip->vp = vp; 306 307 obj_type = ip->ino_data.obj_type; 308 vp->v_type = hammer_get_vnode_type(obj_type); 309 310 hammer_inode_wakereclaims(ip); 311 312 switch(ip->ino_data.obj_type) { 313 case HAMMER_OBJTYPE_CDEV: 314 case HAMMER_OBJTYPE_BDEV: 315 vp->v_ops = &hmp->mp->mnt_vn_spec_ops; 316 addaliasu(vp, ip->ino_data.rmajor, 317 ip->ino_data.rminor); 318 break; 319 case HAMMER_OBJTYPE_FIFO: 320 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops; 321 break; 322 case HAMMER_OBJTYPE_REGFILE: 323 break; 324 default: 325 break; 326 } 327 328 /* 329 * Only mark as the root vnode if the ip is not 330 * historical, otherwise the VFS cache will get 331 * confused. The other half of the special handling 332 * is in hammer_vop_nlookupdotdot(). 333 * 334 * Pseudo-filesystem roots can be accessed via 335 * non-root filesystem paths and setting VROOT may 336 * confuse the namecache. Set VPFSROOT instead. 337 */ 338 if (ip->obj_id == HAMMER_OBJID_ROOT) { 339 if (ip->obj_asof == hmp->asof) { 340 if (ip->obj_localization == 0) 341 vsetflags(vp, VROOT); 342 else 343 vsetflags(vp, VPFSROOT); 344 } else { 345 vsetflags(vp, VPFSROOT); 346 } 347 } 348 349 vp->v_data = (void *)ip; 350 /* vnode locked by getnewvnode() */ 351 /* make related vnode dirty if inode dirty? */ 352 hammer_unlock(&ip->lock); 353 if (vp->v_type == VREG) { 354 vinitvmio(vp, ip->ino_data.size, 355 hammer_blocksize(ip->ino_data.size), 356 hammer_blockoff(ip->ino_data.size)); 357 } 358 break; 359 } 360 361 /* 362 * Interlock vnode clearing. This does not prevent the 363 * vnode from going into a reclaimed state but it does 364 * prevent it from being destroyed or reused so the vget() 365 * will properly fail. 366 */ 367 hammer_lock_ex(&ip->lock); 368 if ((vp = ip->vp) == NULL) { 369 hammer_unlock(&ip->lock); 370 continue; 371 } 372 vhold(vp); 373 hammer_unlock(&ip->lock); 374 375 /* 376 * loop if the vget fails (aka races), or if the vp 377 * no longer matches ip->vp. 378 */ 379 if (vget(vp, LK_EXCLUSIVE) == 0) { 380 if (vp == ip->vp) { 381 vdrop(vp); 382 break; 383 } 384 vput(vp); 385 } 386 vdrop(vp); 387 } 388 *vpp = vp; 389 return(error); 390 } 391 392 /* 393 * Locate all copies of the inode for obj_id compatible with the specified 394 * asof, reference, and issue the related call-back. This routine is used 395 * for direct-io invalidation and does not create any new inodes. 396 */ 397 void 398 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo, 399 int (*callback)(hammer_inode_t ip, void *data), 400 void *data) 401 { 402 hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root, 403 hammer_inode_info_cmp_all_history, 404 callback, iinfo); 405 } 406 407 /* 408 * Acquire a HAMMER inode. The returned inode is not locked. These functions 409 * do not attach or detach the related vnode (use hammer_get_vnode() for 410 * that). 411 * 412 * The flags argument is only applied for newly created inodes, and only 413 * certain flags are inherited. 414 * 415 * Called from the frontend. 416 */ 417 struct hammer_inode * 418 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip, 419 int64_t obj_id, hammer_tid_t asof, u_int32_t localization, 420 int flags, int *errorp) 421 { 422 hammer_mount_t hmp = trans->hmp; 423 struct hammer_node_cache *cachep; 424 struct hammer_inode_info iinfo; 425 struct hammer_cursor cursor; 426 struct hammer_inode *ip; 427 428 429 /* 430 * Determine if we already have an inode cached. If we do then 431 * we are golden. 432 * 433 * If we find an inode with no vnode we have to mark the 434 * transaction such that hammer_inode_waitreclaims() is 435 * called later on to avoid building up an infinite number 436 * of inodes. Otherwise we can continue to * add new inodes 437 * faster then they can be disposed of, even with the tsleep 438 * delay. 439 * 440 * If we find a dummy inode we return a failure so dounlink 441 * (which does another lookup) doesn't try to mess with the 442 * link count. hammer_vop_nresolve() uses hammer_get_dummy_inode() 443 * to ref dummy inodes. 444 */ 445 iinfo.obj_id = obj_id; 446 iinfo.obj_asof = asof; 447 iinfo.obj_localization = localization; 448 loop: 449 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo); 450 if (ip) { 451 if (ip->flags & HAMMER_INODE_DUMMY) { 452 *errorp = ENOENT; 453 return(NULL); 454 } 455 hammer_ref(&ip->lock); 456 *errorp = 0; 457 return(ip); 458 } 459 460 /* 461 * Allocate a new inode structure and deal with races later. 462 */ 463 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO); 464 ++hammer_count_inodes; 465 ++hmp->count_inodes; 466 ip->obj_id = obj_id; 467 ip->obj_asof = iinfo.obj_asof; 468 ip->obj_localization = localization; 469 ip->hmp = hmp; 470 ip->flags = flags & HAMMER_INODE_RO; 471 ip->cache[0].ip = ip; 472 ip->cache[1].ip = ip; 473 ip->cache[2].ip = ip; 474 ip->cache[3].ip = ip; 475 if (hmp->ronly) 476 ip->flags |= HAMMER_INODE_RO; 477 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off = 478 0x7FFFFFFFFFFFFFFFLL; 479 RB_INIT(&ip->rec_tree); 480 TAILQ_INIT(&ip->target_list); 481 hammer_ref(&ip->lock); 482 483 /* 484 * Locate the on-disk inode. If this is a PFS root we always 485 * access the current version of the root inode and (if it is not 486 * a master) always access information under it with a snapshot 487 * TID. 488 * 489 * We cache recent inode lookups in this directory in dip->cache[2]. 490 * If we can't find it we assume the inode we are looking for is 491 * close to the directory inode. 492 */ 493 retry: 494 cachep = NULL; 495 if (dip) { 496 if (dip->cache[2].node) 497 cachep = &dip->cache[2]; 498 else 499 cachep = &dip->cache[0]; 500 } 501 hammer_init_cursor(trans, &cursor, cachep, NULL); 502 cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE; 503 cursor.key_beg.obj_id = ip->obj_id; 504 cursor.key_beg.key = 0; 505 cursor.key_beg.create_tid = 0; 506 cursor.key_beg.delete_tid = 0; 507 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE; 508 cursor.key_beg.obj_type = 0; 509 510 cursor.asof = iinfo.obj_asof; 511 cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA | 512 HAMMER_CURSOR_ASOF; 513 514 *errorp = hammer_btree_lookup(&cursor); 515 if (*errorp == EDEADLK) { 516 hammer_done_cursor(&cursor); 517 goto retry; 518 } 519 520 /* 521 * On success the B-Tree lookup will hold the appropriate 522 * buffer cache buffers and provide a pointer to the requested 523 * information. Copy the information to the in-memory inode 524 * and cache the B-Tree node to improve future operations. 525 */ 526 if (*errorp == 0) { 527 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf; 528 ip->ino_data = cursor.data->inode; 529 530 /* 531 * cache[0] tries to cache the location of the object inode. 532 * The assumption is that it is near the directory inode. 533 * 534 * cache[1] tries to cache the location of the object data. 535 * We might have something in the governing directory from 536 * scan optimizations (see the strategy code in 537 * hammer_vnops.c). 538 * 539 * We update dip->cache[2], if possible, with the location 540 * of the object inode for future directory shortcuts. 541 */ 542 hammer_cache_node(&ip->cache[0], cursor.node); 543 if (dip) { 544 if (dip->cache[3].node) { 545 hammer_cache_node(&ip->cache[1], 546 dip->cache[3].node); 547 } 548 hammer_cache_node(&dip->cache[2], cursor.node); 549 } 550 551 /* 552 * The file should not contain any data past the file size 553 * stored in the inode. Setting save_trunc_off to the 554 * file size instead of max reduces B-Tree lookup overheads 555 * on append by allowing the flusher to avoid checking for 556 * record overwrites. 557 */ 558 ip->save_trunc_off = ip->ino_data.size; 559 560 /* 561 * Locate and assign the pseudofs management structure to 562 * the inode. 563 */ 564 if (dip && dip->obj_localization == ip->obj_localization) { 565 ip->pfsm = dip->pfsm; 566 hammer_ref(&ip->pfsm->lock); 567 } else { 568 ip->pfsm = hammer_load_pseudofs(trans, 569 ip->obj_localization, 570 errorp); 571 *errorp = 0; /* ignore ENOENT */ 572 } 573 } 574 575 /* 576 * The inode is placed on the red-black tree and will be synced to 577 * the media when flushed or by the filesystem sync. If this races 578 * another instantiation/lookup the insertion will fail. 579 */ 580 if (*errorp == 0) { 581 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) { 582 hammer_free_inode(ip); 583 hammer_done_cursor(&cursor); 584 goto loop; 585 } 586 ip->flags |= HAMMER_INODE_ONDISK; 587 } else { 588 if (ip->flags & HAMMER_INODE_RSV_INODES) { 589 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */ 590 --hmp->rsv_inodes; 591 } 592 593 hammer_free_inode(ip); 594 ip = NULL; 595 } 596 hammer_done_cursor(&cursor); 597 598 /* 599 * NEWINODE is only set if the inode becomes dirty later, 600 * setting it here just leads to unnecessary stalls. 601 * 602 * trans->flags |= HAMMER_TRANSF_NEWINODE; 603 */ 604 return (ip); 605 } 606 607 /* 608 * Get a dummy inode to placemark a broken directory entry. 609 */ 610 struct hammer_inode * 611 hammer_get_dummy_inode(hammer_transaction_t trans, hammer_inode_t dip, 612 int64_t obj_id, hammer_tid_t asof, u_int32_t localization, 613 int flags, int *errorp) 614 { 615 hammer_mount_t hmp = trans->hmp; 616 struct hammer_inode_info iinfo; 617 struct hammer_inode *ip; 618 619 /* 620 * Determine if we already have an inode cached. If we do then 621 * we are golden. 622 * 623 * If we find an inode with no vnode we have to mark the 624 * transaction such that hammer_inode_waitreclaims() is 625 * called later on to avoid building up an infinite number 626 * of inodes. Otherwise we can continue to * add new inodes 627 * faster then they can be disposed of, even with the tsleep 628 * delay. 629 * 630 * If we find a non-fake inode we return an error. Only fake 631 * inodes can be returned by this routine. 632 */ 633 iinfo.obj_id = obj_id; 634 iinfo.obj_asof = asof; 635 iinfo.obj_localization = localization; 636 loop: 637 *errorp = 0; 638 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo); 639 if (ip) { 640 if ((ip->flags & HAMMER_INODE_DUMMY) == 0) { 641 *errorp = ENOENT; 642 return(NULL); 643 } 644 hammer_ref(&ip->lock); 645 return(ip); 646 } 647 648 /* 649 * Allocate a new inode structure and deal with races later. 650 */ 651 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO); 652 ++hammer_count_inodes; 653 ++hmp->count_inodes; 654 ip->obj_id = obj_id; 655 ip->obj_asof = iinfo.obj_asof; 656 ip->obj_localization = localization; 657 ip->hmp = hmp; 658 ip->flags = flags | HAMMER_INODE_RO | HAMMER_INODE_DUMMY; 659 ip->cache[0].ip = ip; 660 ip->cache[1].ip = ip; 661 ip->cache[2].ip = ip; 662 ip->cache[3].ip = ip; 663 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off = 664 0x7FFFFFFFFFFFFFFFLL; 665 RB_INIT(&ip->rec_tree); 666 TAILQ_INIT(&ip->target_list); 667 hammer_ref(&ip->lock); 668 669 /* 670 * Populate the dummy inode. Leave everything zero'd out. 671 * 672 * (ip->ino_leaf and ip->ino_data) 673 * 674 * Make the dummy inode a FIFO object which most copy programs 675 * will properly ignore. 676 */ 677 ip->save_trunc_off = ip->ino_data.size; 678 ip->ino_data.obj_type = HAMMER_OBJTYPE_FIFO; 679 680 /* 681 * Locate and assign the pseudofs management structure to 682 * the inode. 683 */ 684 if (dip && dip->obj_localization == ip->obj_localization) { 685 ip->pfsm = dip->pfsm; 686 hammer_ref(&ip->pfsm->lock); 687 } else { 688 ip->pfsm = hammer_load_pseudofs(trans, ip->obj_localization, 689 errorp); 690 *errorp = 0; /* ignore ENOENT */ 691 } 692 693 /* 694 * The inode is placed on the red-black tree and will be synced to 695 * the media when flushed or by the filesystem sync. If this races 696 * another instantiation/lookup the insertion will fail. 697 * 698 * NOTE: Do not set HAMMER_INODE_ONDISK. The inode is a fake. 699 */ 700 if (*errorp == 0) { 701 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) { 702 hammer_free_inode(ip); 703 goto loop; 704 } 705 } else { 706 if (ip->flags & HAMMER_INODE_RSV_INODES) { 707 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */ 708 --hmp->rsv_inodes; 709 } 710 hammer_free_inode(ip); 711 ip = NULL; 712 } 713 trans->flags |= HAMMER_TRANSF_NEWINODE; 714 return (ip); 715 } 716 717 /* 718 * Return a referenced inode only if it is in our inode cache. 719 * 720 * Dummy inodes do not count. 721 */ 722 struct hammer_inode * 723 hammer_find_inode(hammer_transaction_t trans, int64_t obj_id, 724 hammer_tid_t asof, u_int32_t localization) 725 { 726 hammer_mount_t hmp = trans->hmp; 727 struct hammer_inode_info iinfo; 728 struct hammer_inode *ip; 729 730 iinfo.obj_id = obj_id; 731 iinfo.obj_asof = asof; 732 iinfo.obj_localization = localization; 733 734 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo); 735 if (ip) { 736 if (ip->flags & HAMMER_INODE_DUMMY) 737 ip = NULL; 738 else 739 hammer_ref(&ip->lock); 740 } 741 return(ip); 742 } 743 744 /* 745 * Create a new filesystem object, returning the inode in *ipp. The 746 * returned inode will be referenced. The inode is created in-memory. 747 * 748 * If pfsm is non-NULL the caller wishes to create the root inode for 749 * a master PFS. 750 */ 751 int 752 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap, 753 struct ucred *cred, 754 hammer_inode_t dip, const char *name, int namelen, 755 hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp) 756 { 757 hammer_mount_t hmp; 758 hammer_inode_t ip; 759 uid_t xuid; 760 int error; 761 int64_t namekey; 762 u_int32_t dummy; 763 764 hmp = trans->hmp; 765 766 /* 767 * Disallow the creation of new inodes in directories which 768 * have been deleted. In HAMMER, this will cause a record 769 * syncing assertion later on in the flush code. 770 */ 771 if (dip && dip->ino_data.nlinks == 0) { 772 *ipp = NULL; 773 return (EINVAL); 774 } 775 776 /* 777 * Allocate inode 778 */ 779 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO); 780 ++hammer_count_inodes; 781 ++hmp->count_inodes; 782 trans->flags |= HAMMER_TRANSF_NEWINODE; 783 784 if (pfsm) { 785 KKASSERT(pfsm->localization != 0); 786 ip->obj_id = HAMMER_OBJID_ROOT; 787 ip->obj_localization = pfsm->localization; 788 } else { 789 KKASSERT(dip != NULL); 790 namekey = hammer_directory_namekey(dip, name, namelen, &dummy); 791 ip->obj_id = hammer_alloc_objid(hmp, dip, namekey); 792 ip->obj_localization = dip->obj_localization; 793 } 794 795 KKASSERT(ip->obj_id != 0); 796 ip->obj_asof = hmp->asof; 797 ip->hmp = hmp; 798 ip->flush_state = HAMMER_FST_IDLE; 799 ip->flags = HAMMER_INODE_DDIRTY | 800 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME; 801 ip->cache[0].ip = ip; 802 ip->cache[1].ip = ip; 803 ip->cache[2].ip = ip; 804 ip->cache[3].ip = ip; 805 806 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL; 807 /* ip->save_trunc_off = 0; (already zero) */ 808 RB_INIT(&ip->rec_tree); 809 TAILQ_INIT(&ip->target_list); 810 811 ip->ino_data.atime = trans->time; 812 ip->ino_data.mtime = trans->time; 813 ip->ino_data.size = 0; 814 ip->ino_data.nlinks = 0; 815 816 /* 817 * A nohistory designator on the parent directory is inherited by 818 * the child. We will do this even for pseudo-fs creation... the 819 * sysad can turn it off. 820 */ 821 if (dip) { 822 ip->ino_data.uflags = dip->ino_data.uflags & 823 (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP); 824 } 825 826 ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; 827 ip->ino_leaf.base.localization = ip->obj_localization + 828 HAMMER_LOCALIZE_INODE; 829 ip->ino_leaf.base.obj_id = ip->obj_id; 830 ip->ino_leaf.base.key = 0; 831 ip->ino_leaf.base.create_tid = 0; 832 ip->ino_leaf.base.delete_tid = 0; 833 ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE; 834 ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type); 835 836 ip->ino_data.obj_type = ip->ino_leaf.base.obj_type; 837 ip->ino_data.version = HAMMER_INODE_DATA_VERSION; 838 ip->ino_data.mode = vap->va_mode; 839 ip->ino_data.ctime = trans->time; 840 841 /* 842 * If we are running version 2 or greater directory entries are 843 * inode-localized instead of data-localized. 844 */ 845 if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) { 846 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) { 847 ip->ino_data.cap_flags |= 848 HAMMER_INODE_CAP_DIR_LOCAL_INO; 849 } 850 } 851 if (trans->hmp->version >= HAMMER_VOL_VERSION_SIX) { 852 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) { 853 ip->ino_data.cap_flags |= 854 HAMMER_INODE_CAP_DIRHASH_ALG1; 855 } 856 } 857 858 /* 859 * Setup the ".." pointer. This only needs to be done for directories 860 * but we do it for all objects as a recovery aid if dip exists. 861 * The inode is probably a PFS root if dip is NULL. 862 */ 863 if (dip) 864 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id; 865 #if 0 866 /* 867 * The parent_obj_localization field only applies to pseudo-fs roots. 868 * XXX this is no longer applicable, PFSs are no longer directly 869 * tied into the parent's directory structure. 870 */ 871 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY && 872 ip->obj_id == HAMMER_OBJID_ROOT) { 873 ip->ino_data.ext.obj.parent_obj_localization = 874 dip->obj_localization; 875 } 876 #endif 877 878 switch(ip->ino_leaf.base.obj_type) { 879 case HAMMER_OBJTYPE_CDEV: 880 case HAMMER_OBJTYPE_BDEV: 881 ip->ino_data.rmajor = vap->va_rmajor; 882 ip->ino_data.rminor = vap->va_rminor; 883 break; 884 default: 885 break; 886 } 887 888 /* 889 * Calculate default uid/gid and overwrite with information from 890 * the vap. 891 */ 892 if (dip) { 893 xuid = hammer_to_unix_xid(&dip->ino_data.uid); 894 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, 895 xuid, cred, &vap->va_mode); 896 } else { 897 xuid = 0; 898 } 899 ip->ino_data.mode = vap->va_mode; 900 901 if (vap->va_vaflags & VA_UID_UUID_VALID) 902 ip->ino_data.uid = vap->va_uid_uuid; 903 else if (vap->va_uid != (uid_t)VNOVAL) 904 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid); 905 else 906 hammer_guid_to_uuid(&ip->ino_data.uid, xuid); 907 908 if (vap->va_vaflags & VA_GID_UUID_VALID) 909 ip->ino_data.gid = vap->va_gid_uuid; 910 else if (vap->va_gid != (gid_t)VNOVAL) 911 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid); 912 else if (dip) 913 ip->ino_data.gid = dip->ino_data.gid; 914 915 hammer_ref(&ip->lock); 916 917 if (pfsm) { 918 ip->pfsm = pfsm; 919 hammer_ref(&pfsm->lock); 920 error = 0; 921 } else if (dip->obj_localization == ip->obj_localization) { 922 ip->pfsm = dip->pfsm; 923 hammer_ref(&ip->pfsm->lock); 924 error = 0; 925 } else { 926 ip->pfsm = hammer_load_pseudofs(trans, 927 ip->obj_localization, 928 &error); 929 error = 0; /* ignore ENOENT */ 930 } 931 932 if (error) { 933 hammer_free_inode(ip); 934 ip = NULL; 935 } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) { 936 panic("hammer_create_inode: duplicate obj_id %llx", 937 (long long)ip->obj_id); 938 /* not reached */ 939 hammer_free_inode(ip); 940 } 941 *ipp = ip; 942 return(error); 943 } 944 945 /* 946 * Final cleanup / freeing of an inode structure 947 */ 948 static void 949 hammer_free_inode(hammer_inode_t ip) 950 { 951 struct hammer_mount *hmp; 952 953 hmp = ip->hmp; 954 KKASSERT(hammer_oneref(&ip->lock)); 955 hammer_uncache_node(&ip->cache[0]); 956 hammer_uncache_node(&ip->cache[1]); 957 hammer_uncache_node(&ip->cache[2]); 958 hammer_uncache_node(&ip->cache[3]); 959 hammer_inode_wakereclaims(ip); 960 if (ip->objid_cache) 961 hammer_clear_objid(ip); 962 --hammer_count_inodes; 963 --hmp->count_inodes; 964 if (ip->pfsm) { 965 hammer_rel_pseudofs(hmp, ip->pfsm); 966 ip->pfsm = NULL; 967 } 968 kfree(ip, hmp->m_inodes); 969 ip = NULL; 970 } 971 972 /* 973 * Retrieve pseudo-fs data. NULL will never be returned. 974 * 975 * If an error occurs *errorp will be set and a default template is returned, 976 * otherwise *errorp is set to 0. Typically when an error occurs it will 977 * be ENOENT. 978 */ 979 hammer_pseudofs_inmem_t 980 hammer_load_pseudofs(hammer_transaction_t trans, 981 u_int32_t localization, int *errorp) 982 { 983 hammer_mount_t hmp = trans->hmp; 984 hammer_inode_t ip; 985 hammer_pseudofs_inmem_t pfsm; 986 struct hammer_cursor cursor; 987 int bytes; 988 989 retry: 990 pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization); 991 if (pfsm) { 992 hammer_ref(&pfsm->lock); 993 *errorp = 0; 994 return(pfsm); 995 } 996 997 /* 998 * PFS records are associated with the root inode (not the PFS root 999 * inode, but the real root). Avoid an infinite recursion if loading 1000 * the PFS for the real root. 1001 */ 1002 if (localization) { 1003 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, 1004 HAMMER_MAX_TID, 1005 HAMMER_DEF_LOCALIZATION, 0, errorp); 1006 } else { 1007 ip = NULL; 1008 } 1009 1010 pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO); 1011 pfsm->localization = localization; 1012 pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid; 1013 pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid; 1014 1015 hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip); 1016 cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION + 1017 HAMMER_LOCALIZE_MISC; 1018 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 1019 cursor.key_beg.create_tid = 0; 1020 cursor.key_beg.delete_tid = 0; 1021 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS; 1022 cursor.key_beg.obj_type = 0; 1023 cursor.key_beg.key = localization; 1024 cursor.asof = HAMMER_MAX_TID; 1025 cursor.flags |= HAMMER_CURSOR_ASOF; 1026 1027 if (ip) 1028 *errorp = hammer_ip_lookup(&cursor); 1029 else 1030 *errorp = hammer_btree_lookup(&cursor); 1031 if (*errorp == 0) { 1032 *errorp = hammer_ip_resolve_data(&cursor); 1033 if (*errorp == 0) { 1034 if (cursor.data->pfsd.mirror_flags & 1035 HAMMER_PFSD_DELETED) { 1036 *errorp = ENOENT; 1037 } else { 1038 bytes = cursor.leaf->data_len; 1039 if (bytes > sizeof(pfsm->pfsd)) 1040 bytes = sizeof(pfsm->pfsd); 1041 bcopy(cursor.data, &pfsm->pfsd, bytes); 1042 } 1043 } 1044 } 1045 hammer_done_cursor(&cursor); 1046 1047 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid); 1048 hammer_ref(&pfsm->lock); 1049 if (ip) 1050 hammer_rel_inode(ip, 0); 1051 if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) { 1052 kfree(pfsm, hmp->m_misc); 1053 goto retry; 1054 } 1055 return(pfsm); 1056 } 1057 1058 /* 1059 * Store pseudo-fs data. The backend will automatically delete any prior 1060 * on-disk pseudo-fs data but we have to delete in-memory versions. 1061 */ 1062 int 1063 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm) 1064 { 1065 struct hammer_cursor cursor; 1066 hammer_record_t record; 1067 hammer_inode_t ip; 1068 int error; 1069 1070 /* 1071 * PFS records are associated with the root inode (not the PFS root 1072 * inode, but the real root). 1073 */ 1074 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID, 1075 HAMMER_DEF_LOCALIZATION, 0, &error); 1076 retry: 1077 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid); 1078 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip); 1079 cursor.key_beg.localization = ip->obj_localization + 1080 HAMMER_LOCALIZE_MISC; 1081 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 1082 cursor.key_beg.create_tid = 0; 1083 cursor.key_beg.delete_tid = 0; 1084 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS; 1085 cursor.key_beg.obj_type = 0; 1086 cursor.key_beg.key = pfsm->localization; 1087 cursor.asof = HAMMER_MAX_TID; 1088 cursor.flags |= HAMMER_CURSOR_ASOF; 1089 1090 /* 1091 * Replace any in-memory version of the record. 1092 */ 1093 error = hammer_ip_lookup(&cursor); 1094 if (error == 0 && hammer_cursor_inmem(&cursor)) { 1095 record = cursor.iprec; 1096 if (record->flags & HAMMER_RECF_INTERLOCK_BE) { 1097 KKASSERT(cursor.deadlk_rec == NULL); 1098 hammer_ref(&record->lock); 1099 cursor.deadlk_rec = record; 1100 error = EDEADLK; 1101 } else { 1102 record->flags |= HAMMER_RECF_DELETED_FE; 1103 error = 0; 1104 } 1105 } 1106 1107 /* 1108 * Allocate replacement general record. The backend flush will 1109 * delete any on-disk version of the record. 1110 */ 1111 if (error == 0 || error == ENOENT) { 1112 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd)); 1113 record->type = HAMMER_MEM_RECORD_GENERAL; 1114 1115 record->leaf.base.localization = ip->obj_localization + 1116 HAMMER_LOCALIZE_MISC; 1117 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS; 1118 record->leaf.base.key = pfsm->localization; 1119 record->leaf.data_len = sizeof(pfsm->pfsd); 1120 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd)); 1121 error = hammer_ip_add_record(trans, record); 1122 } 1123 hammer_done_cursor(&cursor); 1124 if (error == EDEADLK) 1125 goto retry; 1126 hammer_rel_inode(ip, 0); 1127 return(error); 1128 } 1129 1130 /* 1131 * Create a root directory for a PFS if one does not alredy exist. 1132 * 1133 * The PFS root stands alone so we must also bump the nlinks count 1134 * to prevent it from being destroyed on release. 1135 */ 1136 int 1137 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred, 1138 hammer_pseudofs_inmem_t pfsm) 1139 { 1140 hammer_inode_t ip; 1141 struct vattr vap; 1142 int error; 1143 1144 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID, 1145 pfsm->localization, 0, &error); 1146 if (ip == NULL) { 1147 vattr_null(&vap); 1148 vap.va_mode = 0755; 1149 vap.va_type = VDIR; 1150 error = hammer_create_inode(trans, &vap, cred, 1151 NULL, NULL, 0, 1152 pfsm, &ip); 1153 if (error == 0) { 1154 ++ip->ino_data.nlinks; 1155 hammer_modify_inode(trans, ip, HAMMER_INODE_DDIRTY); 1156 } 1157 } 1158 if (ip) 1159 hammer_rel_inode(ip, 0); 1160 return(error); 1161 } 1162 1163 /* 1164 * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY 1165 * if we are unable to disassociate all the inodes. 1166 */ 1167 static 1168 int 1169 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data) 1170 { 1171 int res; 1172 1173 hammer_ref(&ip->lock); 1174 if (ip->vp && (ip->vp->v_flag & VPFSROOT)) { 1175 /* 1176 * The hammer pfs-upgrade directive itself might have the 1177 * root of the pfs open. Just allow it. 1178 */ 1179 res = 0; 1180 } else { 1181 /* 1182 * Don't allow any subdirectories or files to be open. 1183 */ 1184 if (hammer_isactive(&ip->lock) == 2 && ip->vp) 1185 vclean_unlocked(ip->vp); 1186 if (hammer_isactive(&ip->lock) == 1 && ip->vp == NULL) 1187 res = 0; 1188 else 1189 res = -1; /* stop, someone is using the inode */ 1190 } 1191 hammer_rel_inode(ip, 0); 1192 return(res); 1193 } 1194 1195 int 1196 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization) 1197 { 1198 int res; 1199 int try; 1200 1201 for (try = res = 0; try < 4; ++try) { 1202 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root, 1203 hammer_inode_pfs_cmp, 1204 hammer_unload_pseudofs_callback, 1205 &localization); 1206 if (res == 0 && try > 1) 1207 break; 1208 hammer_flusher_sync(trans->hmp); 1209 } 1210 if (res != 0) 1211 res = ENOTEMPTY; 1212 return(res); 1213 } 1214 1215 1216 /* 1217 * Release a reference on a PFS 1218 */ 1219 void 1220 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm) 1221 { 1222 hammer_rel(&pfsm->lock); 1223 if (hammer_norefs(&pfsm->lock)) { 1224 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm); 1225 kfree(pfsm, hmp->m_misc); 1226 } 1227 } 1228 1229 /* 1230 * Called by hammer_sync_inode(). 1231 */ 1232 static int 1233 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip) 1234 { 1235 hammer_transaction_t trans = cursor->trans; 1236 hammer_record_t record; 1237 int error; 1238 int redirty; 1239 1240 retry: 1241 error = 0; 1242 1243 /* 1244 * If the inode has a presence on-disk then locate it and mark 1245 * it deleted, setting DELONDISK. 1246 * 1247 * The record may or may not be physically deleted, depending on 1248 * the retention policy. 1249 */ 1250 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) == 1251 HAMMER_INODE_ONDISK) { 1252 hammer_normalize_cursor(cursor); 1253 cursor->key_beg.localization = ip->obj_localization + 1254 HAMMER_LOCALIZE_INODE; 1255 cursor->key_beg.obj_id = ip->obj_id; 1256 cursor->key_beg.key = 0; 1257 cursor->key_beg.create_tid = 0; 1258 cursor->key_beg.delete_tid = 0; 1259 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE; 1260 cursor->key_beg.obj_type = 0; 1261 cursor->asof = ip->obj_asof; 1262 cursor->flags &= ~HAMMER_CURSOR_INITMASK; 1263 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF; 1264 cursor->flags |= HAMMER_CURSOR_BACKEND; 1265 1266 error = hammer_btree_lookup(cursor); 1267 if (hammer_debug_inode) 1268 kprintf("IPDEL %p %08x %d", ip, ip->flags, error); 1269 1270 if (error == 0) { 1271 error = hammer_ip_delete_record(cursor, ip, trans->tid); 1272 if (hammer_debug_inode) 1273 kprintf(" error %d\n", error); 1274 if (error == 0) { 1275 ip->flags |= HAMMER_INODE_DELONDISK; 1276 } 1277 if (cursor->node) 1278 hammer_cache_node(&ip->cache[0], cursor->node); 1279 } 1280 if (error == EDEADLK) { 1281 hammer_done_cursor(cursor); 1282 error = hammer_init_cursor(trans, cursor, 1283 &ip->cache[0], ip); 1284 if (hammer_debug_inode) 1285 kprintf("IPDED %p %d\n", ip, error); 1286 if (error == 0) 1287 goto retry; 1288 } 1289 } 1290 1291 /* 1292 * Ok, write out the initial record or a new record (after deleting 1293 * the old one), unless the DELETED flag is set. This routine will 1294 * clear DELONDISK if it writes out a record. 1295 * 1296 * Update our inode statistics if this is the first application of 1297 * the inode on-disk. 1298 */ 1299 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) { 1300 /* 1301 * Generate a record and write it to the media. We clean-up 1302 * the state before releasing so we do not have to set-up 1303 * a flush_group. 1304 */ 1305 record = hammer_alloc_mem_record(ip, 0); 1306 record->type = HAMMER_MEM_RECORD_INODE; 1307 record->flush_state = HAMMER_FST_FLUSH; 1308 record->leaf = ip->sync_ino_leaf; 1309 record->leaf.base.create_tid = trans->tid; 1310 record->leaf.data_len = sizeof(ip->sync_ino_data); 1311 record->leaf.create_ts = trans->time32; 1312 record->data = (void *)&ip->sync_ino_data; 1313 record->flags |= HAMMER_RECF_INTERLOCK_BE; 1314 1315 /* 1316 * If this flag is set we cannot sync the new file size 1317 * because we haven't finished related truncations. The 1318 * inode will be flushed in another flush group to finish 1319 * the job. 1320 */ 1321 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) && 1322 ip->sync_ino_data.size != ip->ino_data.size) { 1323 redirty = 1; 1324 ip->sync_ino_data.size = ip->ino_data.size; 1325 } else { 1326 redirty = 0; 1327 } 1328 1329 for (;;) { 1330 error = hammer_ip_sync_record_cursor(cursor, record); 1331 if (hammer_debug_inode) 1332 kprintf("GENREC %p rec %08x %d\n", 1333 ip, record->flags, error); 1334 if (error != EDEADLK) 1335 break; 1336 hammer_done_cursor(cursor); 1337 error = hammer_init_cursor(trans, cursor, 1338 &ip->cache[0], ip); 1339 if (hammer_debug_inode) 1340 kprintf("GENREC reinit %d\n", error); 1341 if (error) 1342 break; 1343 } 1344 1345 /* 1346 * Note: The record was never on the inode's record tree 1347 * so just wave our hands importantly and destroy it. 1348 */ 1349 record->flags |= HAMMER_RECF_COMMITTED; 1350 record->flags &= ~HAMMER_RECF_INTERLOCK_BE; 1351 record->flush_state = HAMMER_FST_IDLE; 1352 ++ip->rec_generation; 1353 hammer_rel_mem_record(record); 1354 1355 /* 1356 * Finish up. 1357 */ 1358 if (error == 0) { 1359 if (hammer_debug_inode) 1360 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags); 1361 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | 1362 HAMMER_INODE_SDIRTY | 1363 HAMMER_INODE_ATIME | 1364 HAMMER_INODE_MTIME); 1365 ip->flags &= ~HAMMER_INODE_DELONDISK; 1366 if (redirty) 1367 ip->sync_flags |= HAMMER_INODE_DDIRTY; 1368 1369 /* 1370 * Root volume count of inodes 1371 */ 1372 hammer_sync_lock_sh(trans); 1373 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) { 1374 hammer_modify_volume_field(trans, 1375 trans->rootvol, 1376 vol0_stat_inodes); 1377 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes; 1378 hammer_modify_volume_done(trans->rootvol); 1379 ip->flags |= HAMMER_INODE_ONDISK; 1380 if (hammer_debug_inode) 1381 kprintf("NOWONDISK %p\n", ip); 1382 } 1383 hammer_sync_unlock(trans); 1384 } 1385 } 1386 1387 /* 1388 * If the inode has been destroyed, clean out any left-over flags 1389 * that may have been set by the frontend. 1390 */ 1391 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 1392 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | 1393 HAMMER_INODE_SDIRTY | 1394 HAMMER_INODE_ATIME | 1395 HAMMER_INODE_MTIME); 1396 } 1397 return(error); 1398 } 1399 1400 /* 1401 * Update only the itimes fields. 1402 * 1403 * ATIME can be updated without generating any UNDO. MTIME is updated 1404 * with UNDO so it is guaranteed to be synchronized properly in case of 1405 * a crash. 1406 * 1407 * Neither field is included in the B-Tree leaf element's CRC, which is how 1408 * we can get away with updating ATIME the way we do. 1409 */ 1410 static int 1411 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip) 1412 { 1413 hammer_transaction_t trans = cursor->trans; 1414 int error; 1415 1416 retry: 1417 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) != 1418 HAMMER_INODE_ONDISK) { 1419 return(0); 1420 } 1421 1422 hammer_normalize_cursor(cursor); 1423 cursor->key_beg.localization = ip->obj_localization + 1424 HAMMER_LOCALIZE_INODE; 1425 cursor->key_beg.obj_id = ip->obj_id; 1426 cursor->key_beg.key = 0; 1427 cursor->key_beg.create_tid = 0; 1428 cursor->key_beg.delete_tid = 0; 1429 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE; 1430 cursor->key_beg.obj_type = 0; 1431 cursor->asof = ip->obj_asof; 1432 cursor->flags &= ~HAMMER_CURSOR_INITMASK; 1433 cursor->flags |= HAMMER_CURSOR_ASOF; 1434 cursor->flags |= HAMMER_CURSOR_GET_LEAF; 1435 cursor->flags |= HAMMER_CURSOR_GET_DATA; 1436 cursor->flags |= HAMMER_CURSOR_BACKEND; 1437 1438 error = hammer_btree_lookup(cursor); 1439 if (error == 0) { 1440 hammer_cache_node(&ip->cache[0], cursor->node); 1441 if (ip->sync_flags & HAMMER_INODE_MTIME) { 1442 /* 1443 * Updating MTIME requires an UNDO. Just cover 1444 * both atime and mtime. 1445 */ 1446 hammer_sync_lock_sh(trans); 1447 hammer_modify_buffer(trans, cursor->data_buffer, 1448 HAMMER_ITIMES_BASE(&cursor->data->inode), 1449 HAMMER_ITIMES_BYTES); 1450 cursor->data->inode.atime = ip->sync_ino_data.atime; 1451 cursor->data->inode.mtime = ip->sync_ino_data.mtime; 1452 hammer_modify_buffer_done(cursor->data_buffer); 1453 hammer_sync_unlock(trans); 1454 } else if (ip->sync_flags & HAMMER_INODE_ATIME) { 1455 /* 1456 * Updating atime only can be done in-place with 1457 * no UNDO. 1458 */ 1459 hammer_sync_lock_sh(trans); 1460 hammer_modify_buffer(trans, cursor->data_buffer, 1461 NULL, 0); 1462 cursor->data->inode.atime = ip->sync_ino_data.atime; 1463 hammer_modify_buffer_done(cursor->data_buffer); 1464 hammer_sync_unlock(trans); 1465 } 1466 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME); 1467 } 1468 if (error == EDEADLK) { 1469 hammer_done_cursor(cursor); 1470 error = hammer_init_cursor(trans, cursor, 1471 &ip->cache[0], ip); 1472 if (error == 0) 1473 goto retry; 1474 } 1475 return(error); 1476 } 1477 1478 /* 1479 * Release a reference on an inode, flush as requested. 1480 * 1481 * On the last reference we queue the inode to the flusher for its final 1482 * disposition. 1483 */ 1484 void 1485 hammer_rel_inode(struct hammer_inode *ip, int flush) 1486 { 1487 /* 1488 * Handle disposition when dropping the last ref. 1489 */ 1490 for (;;) { 1491 if (hammer_oneref(&ip->lock)) { 1492 /* 1493 * Determine whether on-disk action is needed for 1494 * the inode's final disposition. 1495 */ 1496 KKASSERT(ip->vp == NULL); 1497 hammer_inode_unloadable_check(ip, 0); 1498 if (ip->flags & HAMMER_INODE_MODMASK) { 1499 hammer_flush_inode(ip, 0); 1500 } else if (hammer_oneref(&ip->lock)) { 1501 hammer_unload_inode(ip); 1502 break; 1503 } 1504 } else { 1505 if (flush) 1506 hammer_flush_inode(ip, 0); 1507 1508 /* 1509 * The inode still has multiple refs, try to drop 1510 * one ref. 1511 */ 1512 KKASSERT(hammer_isactive(&ip->lock) >= 1); 1513 if (hammer_isactive(&ip->lock) > 1) { 1514 hammer_rel(&ip->lock); 1515 break; 1516 } 1517 } 1518 } 1519 } 1520 1521 /* 1522 * Unload and destroy the specified inode. Must be called with one remaining 1523 * reference. The reference is disposed of. 1524 * 1525 * The inode must be completely clean. 1526 */ 1527 static int 1528 hammer_unload_inode(struct hammer_inode *ip) 1529 { 1530 hammer_mount_t hmp = ip->hmp; 1531 1532 KASSERT(hammer_oneref(&ip->lock), 1533 ("hammer_unload_inode: %d refs", hammer_isactive(&ip->lock))); 1534 KKASSERT(ip->vp == NULL); 1535 KKASSERT(ip->flush_state == HAMMER_FST_IDLE); 1536 KKASSERT(ip->cursor_ip_refs == 0); 1537 KKASSERT(hammer_notlocked(&ip->lock)); 1538 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0); 1539 1540 KKASSERT(RB_EMPTY(&ip->rec_tree)); 1541 KKASSERT(TAILQ_EMPTY(&ip->target_list)); 1542 1543 if (ip->flags & HAMMER_INODE_RDIRTY) { 1544 RB_REMOVE(hammer_redo_rb_tree, &hmp->rb_redo_root, ip); 1545 ip->flags &= ~HAMMER_INODE_RDIRTY; 1546 } 1547 RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip); 1548 1549 hammer_free_inode(ip); 1550 return(0); 1551 } 1552 1553 /* 1554 * Called during unmounting if a critical error occured. The in-memory 1555 * inode and all related structures are destroyed. 1556 * 1557 * If a critical error did not occur the unmount code calls the standard 1558 * release and asserts that the inode is gone. 1559 */ 1560 int 1561 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused) 1562 { 1563 hammer_record_t rec; 1564 1565 /* 1566 * Get rid of the inodes in-memory records, regardless of their 1567 * state, and clear the mod-mask. 1568 */ 1569 while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) { 1570 TAILQ_REMOVE(&ip->target_list, rec, target_entry); 1571 rec->target_ip = NULL; 1572 if (rec->flush_state == HAMMER_FST_SETUP) 1573 rec->flush_state = HAMMER_FST_IDLE; 1574 } 1575 while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) { 1576 if (rec->flush_state == HAMMER_FST_FLUSH) 1577 --rec->flush_group->refs; 1578 else 1579 hammer_ref(&rec->lock); 1580 KKASSERT(hammer_oneref(&rec->lock)); 1581 rec->flush_state = HAMMER_FST_IDLE; 1582 rec->flush_group = NULL; 1583 rec->flags |= HAMMER_RECF_DELETED_FE; /* wave hands */ 1584 rec->flags |= HAMMER_RECF_DELETED_BE; /* wave hands */ 1585 ++ip->rec_generation; 1586 hammer_rel_mem_record(rec); 1587 } 1588 ip->flags &= ~HAMMER_INODE_MODMASK; 1589 ip->sync_flags &= ~HAMMER_INODE_MODMASK; 1590 KKASSERT(ip->vp == NULL); 1591 1592 /* 1593 * Remove the inode from any flush group, force it idle. FLUSH 1594 * and SETUP states have an inode ref. 1595 */ 1596 switch(ip->flush_state) { 1597 case HAMMER_FST_FLUSH: 1598 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip); 1599 --ip->flush_group->refs; 1600 ip->flush_group = NULL; 1601 /* fall through */ 1602 case HAMMER_FST_SETUP: 1603 hammer_rel(&ip->lock); 1604 ip->flush_state = HAMMER_FST_IDLE; 1605 /* fall through */ 1606 case HAMMER_FST_IDLE: 1607 break; 1608 } 1609 1610 /* 1611 * There shouldn't be any associated vnode. The unload needs at 1612 * least one ref, if we do have a vp steal its ip ref. 1613 */ 1614 if (ip->vp) { 1615 kprintf("hammer_destroy_inode_callback: Unexpected " 1616 "vnode association ip %p vp %p\n", ip, ip->vp); 1617 ip->vp->v_data = NULL; 1618 ip->vp = NULL; 1619 } else { 1620 hammer_ref(&ip->lock); 1621 } 1622 hammer_unload_inode(ip); 1623 return(0); 1624 } 1625 1626 /* 1627 * Called on mount -u when switching from RW to RO or vise-versa. Adjust 1628 * the read-only flag for cached inodes. 1629 * 1630 * This routine is called from a RB_SCAN(). 1631 */ 1632 int 1633 hammer_reload_inode(hammer_inode_t ip, void *arg __unused) 1634 { 1635 hammer_mount_t hmp = ip->hmp; 1636 1637 if (hmp->ronly || hmp->asof != HAMMER_MAX_TID) 1638 ip->flags |= HAMMER_INODE_RO; 1639 else 1640 ip->flags &= ~HAMMER_INODE_RO; 1641 return(0); 1642 } 1643 1644 /* 1645 * A transaction has modified an inode, requiring updates as specified by 1646 * the passed flags. 1647 * 1648 * HAMMER_INODE_DDIRTY: Inode data has been updated, not incl mtime/atime, 1649 * and not including size changes due to write-append 1650 * (but other size changes are included). 1651 * HAMMER_INODE_SDIRTY: Inode data has been updated, size changes due to 1652 * write-append. 1653 * HAMMER_INODE_XDIRTY: Dirty in-memory records 1654 * HAMMER_INODE_BUFS: Dirty buffer cache buffers 1655 * HAMMER_INODE_DELETED: Inode record/data must be deleted 1656 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated 1657 */ 1658 void 1659 hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags) 1660 { 1661 /* 1662 * ronly of 0 or 2 does not trigger assertion. 1663 * 2 is a special error state 1664 */ 1665 KKASSERT(ip->hmp->ronly != 1 || 1666 (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 1667 HAMMER_INODE_SDIRTY | 1668 HAMMER_INODE_BUFS | HAMMER_INODE_DELETED | 1669 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0); 1670 if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) { 1671 ip->flags |= HAMMER_INODE_RSV_INODES; 1672 ++ip->hmp->rsv_inodes; 1673 } 1674 1675 /* 1676 * Set the NEWINODE flag in the transaction if the inode 1677 * transitions to a dirty state. This is used to track 1678 * the load on the inode cache. 1679 */ 1680 if (trans && 1681 (ip->flags & HAMMER_INODE_MODMASK) == 0 && 1682 (flags & HAMMER_INODE_MODMASK)) { 1683 trans->flags |= HAMMER_TRANSF_NEWINODE; 1684 } 1685 if (flags & HAMMER_INODE_MODMASK) 1686 hammer_inode_dirty(ip); 1687 ip->flags |= flags; 1688 } 1689 1690 /* 1691 * Attempt to quickly update the atime for a hammer inode. Return 0 on 1692 * success, -1 on failure. 1693 * 1694 * We attempt to update the atime with only the ip lock and not the 1695 * whole filesystem lock in order to improve concurrency. We can only 1696 * do this safely if the ATIME flag is already pending on the inode. 1697 * 1698 * This function is called via a vnops path (ip pointer is stable) without 1699 * fs_token held. 1700 */ 1701 int 1702 hammer_update_atime_quick(hammer_inode_t ip) 1703 { 1704 struct timeval tv; 1705 int res = -1; 1706 1707 if ((ip->flags & HAMMER_INODE_RO) || 1708 (ip->hmp->mp->mnt_flag & MNT_NOATIME)) { 1709 /* 1710 * Silently indicate success on read-only mount/snap 1711 */ 1712 res = 0; 1713 } else if (ip->flags & HAMMER_INODE_ATIME) { 1714 /* 1715 * Double check with inode lock held against backend. This 1716 * is only safe if all we need to do is update 1717 * ino_data.atime. 1718 */ 1719 getmicrotime(&tv); 1720 hammer_lock_ex(&ip->lock); 1721 if (ip->flags & HAMMER_INODE_ATIME) { 1722 ip->ino_data.atime = 1723 (unsigned long)tv.tv_sec * 1000000ULL + tv.tv_usec; 1724 res = 0; 1725 } 1726 hammer_unlock(&ip->lock); 1727 } 1728 return res; 1729 } 1730 1731 /* 1732 * Request that an inode be flushed. This whole mess cannot block and may 1733 * recurse (if not synchronous). Once requested HAMMER will attempt to 1734 * actively flush the inode until the flush can be done. 1735 * 1736 * The inode may already be flushing, or may be in a setup state. We can 1737 * place the inode in a flushing state if it is currently idle and flag it 1738 * to reflush if it is currently flushing. 1739 * 1740 * Upon return if the inode could not be flushed due to a setup 1741 * dependancy, then it will be automatically flushed when the dependancy 1742 * is satisfied. 1743 */ 1744 void 1745 hammer_flush_inode(hammer_inode_t ip, int flags) 1746 { 1747 hammer_mount_t hmp; 1748 hammer_flush_group_t flg; 1749 int good; 1750 1751 /* 1752 * fill_flush_group is the first flush group we may be able to 1753 * continue filling, it may be open or closed but it will always 1754 * be past the currently flushing (running) flg. 1755 * 1756 * next_flush_group is the next open flush group. 1757 */ 1758 hmp = ip->hmp; 1759 while ((flg = hmp->fill_flush_group) != NULL) { 1760 KKASSERT(flg->running == 0); 1761 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit && 1762 flg->total_count <= hammer_autoflush) { 1763 break; 1764 } 1765 hmp->fill_flush_group = TAILQ_NEXT(flg, flush_entry); 1766 hammer_flusher_async(ip->hmp, flg); 1767 } 1768 if (flg == NULL) { 1769 flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO); 1770 flg->seq = hmp->flusher.next++; 1771 if (hmp->next_flush_group == NULL) 1772 hmp->next_flush_group = flg; 1773 if (hmp->fill_flush_group == NULL) 1774 hmp->fill_flush_group = flg; 1775 RB_INIT(&flg->flush_tree); 1776 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry); 1777 } 1778 1779 /* 1780 * Trivial 'nothing to flush' case. If the inode is in a SETUP 1781 * state we have to put it back into an IDLE state so we can 1782 * drop the extra ref. 1783 * 1784 * If we have a parent dependancy we must still fall through 1785 * so we can run it. 1786 */ 1787 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) { 1788 if (ip->flush_state == HAMMER_FST_SETUP && 1789 TAILQ_EMPTY(&ip->target_list)) { 1790 ip->flush_state = HAMMER_FST_IDLE; 1791 hammer_rel_inode(ip, 0); 1792 } 1793 if (ip->flush_state == HAMMER_FST_IDLE) 1794 return; 1795 } 1796 1797 /* 1798 * Our flush action will depend on the current state. 1799 */ 1800 switch(ip->flush_state) { 1801 case HAMMER_FST_IDLE: 1802 /* 1803 * We have no dependancies and can flush immediately. Some 1804 * our children may not be flushable so we have to re-test 1805 * with that additional knowledge. 1806 */ 1807 hammer_flush_inode_core(ip, flg, flags); 1808 break; 1809 case HAMMER_FST_SETUP: 1810 /* 1811 * Recurse upwards through dependancies via target_list 1812 * and start their flusher actions going if possible. 1813 * 1814 * 'good' is our connectivity. -1 means we have none and 1815 * can't flush, 0 means there weren't any dependancies, and 1816 * 1 means we have good connectivity. 1817 */ 1818 good = hammer_setup_parent_inodes(ip, 0, flg); 1819 1820 if (good >= 0) { 1821 /* 1822 * We can continue if good >= 0. Determine how 1823 * many records under our inode can be flushed (and 1824 * mark them). 1825 */ 1826 hammer_flush_inode_core(ip, flg, flags); 1827 } else { 1828 /* 1829 * Parent has no connectivity, tell it to flush 1830 * us as soon as it does. 1831 * 1832 * The REFLUSH flag is also needed to trigger 1833 * dependancy wakeups. 1834 */ 1835 ip->flags |= HAMMER_INODE_CONN_DOWN | 1836 HAMMER_INODE_REFLUSH; 1837 if (flags & HAMMER_FLUSH_SIGNAL) { 1838 ip->flags |= HAMMER_INODE_RESIGNAL; 1839 hammer_flusher_async(ip->hmp, flg); 1840 } 1841 } 1842 break; 1843 case HAMMER_FST_FLUSH: 1844 /* 1845 * We are already flushing, flag the inode to reflush 1846 * if needed after it completes its current flush. 1847 * 1848 * The REFLUSH flag is also needed to trigger 1849 * dependancy wakeups. 1850 */ 1851 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0) 1852 ip->flags |= HAMMER_INODE_REFLUSH; 1853 if (flags & HAMMER_FLUSH_SIGNAL) { 1854 ip->flags |= HAMMER_INODE_RESIGNAL; 1855 hammer_flusher_async(ip->hmp, flg); 1856 } 1857 break; 1858 } 1859 } 1860 1861 /* 1862 * Scan ip->target_list, which is a list of records owned by PARENTS to our 1863 * ip which reference our ip. 1864 * 1865 * XXX This is a huge mess of recursive code, but not one bit of it blocks 1866 * so for now do not ref/deref the structures. Note that if we use the 1867 * ref/rel code later, the rel CAN block. 1868 */ 1869 static int 1870 hammer_setup_parent_inodes(hammer_inode_t ip, int depth, 1871 hammer_flush_group_t flg) 1872 { 1873 hammer_record_t depend; 1874 int good; 1875 int r; 1876 1877 /* 1878 * If we hit our recursion limit and we have parent dependencies 1879 * We cannot continue. Returning < 0 will cause us to be flagged 1880 * for reflush. Returning -2 cuts off additional dependency checks 1881 * because they are likely to also hit the depth limit. 1882 * 1883 * We cannot return < 0 if there are no dependencies or there might 1884 * not be anything to wakeup (ip). 1885 */ 1886 if (depth == 20 && TAILQ_FIRST(&ip->target_list)) { 1887 if (hammer_debug_general & 0x10000) 1888 krateprintf(&hammer_gen_krate, 1889 "HAMMER Warning: depth limit reached on " 1890 "setup recursion, inode %p %016llx\n", 1891 ip, (long long)ip->obj_id); 1892 return(-2); 1893 } 1894 1895 /* 1896 * Scan dependencies 1897 */ 1898 good = 0; 1899 TAILQ_FOREACH(depend, &ip->target_list, target_entry) { 1900 r = hammer_setup_parent_inodes_helper(depend, depth, flg); 1901 KKASSERT(depend->target_ip == ip); 1902 if (r < 0 && good == 0) 1903 good = -1; 1904 if (r > 0) 1905 good = 1; 1906 1907 /* 1908 * If we failed due to the recursion depth limit then stop 1909 * now. 1910 */ 1911 if (r == -2) 1912 break; 1913 } 1914 return(good); 1915 } 1916 1917 /* 1918 * This helper function takes a record representing the dependancy between 1919 * the parent inode and child inode. 1920 * 1921 * record = record in question (*rec in below) 1922 * record->ip = parent inode (*pip in below) 1923 * record->target_ip = child inode (*ip in below) 1924 * 1925 * *pip--------------\ 1926 * ^ \rec_tree 1927 * \ \ 1928 * \ip /\\\\\ rbtree of recs from parent inode's view 1929 * \ //\\\\\\ 1930 * \ / ........ 1931 * \ / 1932 * \------*rec------target_ip------>*ip 1933 * ...target_entry<----...----->target_list<---... 1934 * list of recs from inode's view 1935 * 1936 * We are asked to recurse upwards and convert the record from SETUP 1937 * to FLUSH if possible. 1938 * 1939 * Return 1 if the record gives us connectivity 1940 * 1941 * Return 0 if the record is not relevant 1942 * 1943 * Return -1 if we can't resolve the dependancy and there is no connectivity. 1944 */ 1945 static int 1946 hammer_setup_parent_inodes_helper(hammer_record_t record, int depth, 1947 hammer_flush_group_t flg) 1948 { 1949 hammer_inode_t pip; 1950 int good; 1951 1952 KKASSERT(record->flush_state != HAMMER_FST_IDLE); 1953 pip = record->ip; 1954 1955 /* 1956 * If the record is already flushing, is it in our flush group? 1957 * 1958 * If it is in our flush group but it is a general record or a 1959 * delete-on-disk, it does not improve our connectivity (return 0), 1960 * and if the target inode is not trying to destroy itself we can't 1961 * allow the operation yet anyway (the second return -1). 1962 */ 1963 if (record->flush_state == HAMMER_FST_FLUSH) { 1964 /* 1965 * If not in our flush group ask the parent to reflush 1966 * us as soon as possible. 1967 */ 1968 if (record->flush_group != flg) { 1969 pip->flags |= HAMMER_INODE_REFLUSH; 1970 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN; 1971 return(-1); 1972 } 1973 1974 /* 1975 * If in our flush group everything is already set up, 1976 * just return whether the record will improve our 1977 * visibility or not. 1978 */ 1979 if (record->type == HAMMER_MEM_RECORD_ADD) 1980 return(1); 1981 return(0); 1982 } 1983 1984 /* 1985 * It must be a setup record. Try to resolve the setup dependancies 1986 * by recursing upwards so we can place ip on the flush list. 1987 * 1988 * Limit ourselves to 20 levels of recursion to avoid blowing out 1989 * the kernel stack. If we hit the recursion limit we can't flush 1990 * until the parent flushes. The parent will flush independantly 1991 * on its own and ultimately a deep recursion will be resolved. 1992 */ 1993 KKASSERT(record->flush_state == HAMMER_FST_SETUP); 1994 1995 good = hammer_setup_parent_inodes(pip, depth + 1, flg); 1996 1997 /* 1998 * If good < 0 the parent has no connectivity and we cannot safely 1999 * flush the directory entry, which also means we can't flush our 2000 * ip. Flag us for downward recursion once the parent's 2001 * connectivity is resolved. Flag the parent for [re]flush or it 2002 * may not check for downward recursions. 2003 */ 2004 if (good < 0) { 2005 pip->flags |= HAMMER_INODE_REFLUSH; 2006 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN; 2007 return(good); 2008 } 2009 2010 /* 2011 * We are go, place the parent inode in a flushing state so we can 2012 * place its record in a flushing state. Note that the parent 2013 * may already be flushing. The record must be in the same flush 2014 * group as the parent. 2015 */ 2016 if (pip->flush_state != HAMMER_FST_FLUSH) 2017 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION); 2018 KKASSERT(pip->flush_state == HAMMER_FST_FLUSH); 2019 2020 /* 2021 * It is possible for a rename to create a loop in the recursion 2022 * and revisit a record. This will result in the record being 2023 * placed in a flush state unexpectedly. This check deals with 2024 * the case. 2025 */ 2026 if (record->flush_state == HAMMER_FST_FLUSH) { 2027 if (record->type == HAMMER_MEM_RECORD_ADD) 2028 return(1); 2029 return(0); 2030 } 2031 2032 KKASSERT(record->flush_state == HAMMER_FST_SETUP); 2033 2034 #if 0 2035 if (record->type == HAMMER_MEM_RECORD_DEL && 2036 (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) { 2037 /* 2038 * Regardless of flushing state we cannot sync this path if the 2039 * record represents a delete-on-disk but the target inode 2040 * is not ready to sync its own deletion. 2041 * 2042 * XXX need to count effective nlinks to determine whether 2043 * the flush is ok, otherwise removing a hardlink will 2044 * just leave the DEL record to rot. 2045 */ 2046 record->target_ip->flags |= HAMMER_INODE_REFLUSH; 2047 return(-1); 2048 } else 2049 #endif 2050 if (pip->flush_group == flg) { 2051 /* 2052 * Because we have not calculated nlinks yet we can just 2053 * set records to the flush state if the parent is in 2054 * the same flush group as we are. 2055 */ 2056 record->flush_state = HAMMER_FST_FLUSH; 2057 record->flush_group = flg; 2058 ++record->flush_group->refs; 2059 hammer_ref(&record->lock); 2060 2061 /* 2062 * A general directory-add contributes to our visibility. 2063 * 2064 * Otherwise it is probably a directory-delete or 2065 * delete-on-disk record and does not contribute to our 2066 * visbility (but we can still flush it). 2067 */ 2068 if (record->type == HAMMER_MEM_RECORD_ADD) 2069 return(1); 2070 return(0); 2071 } else { 2072 /* 2073 * If the parent is not in our flush group we cannot 2074 * flush this record yet, there is no visibility. 2075 * We tell the parent to reflush and mark ourselves 2076 * so the parent knows it should flush us too. 2077 */ 2078 pip->flags |= HAMMER_INODE_REFLUSH; 2079 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN; 2080 return(-1); 2081 } 2082 } 2083 2084 /* 2085 * This is the core routine placing an inode into the FST_FLUSH state. 2086 */ 2087 static void 2088 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags) 2089 { 2090 hammer_mount_t hmp = ip->hmp; 2091 int go_count; 2092 2093 /* 2094 * Set flush state and prevent the flusher from cycling into 2095 * the next flush group. Do not place the ip on the list yet. 2096 * Inodes not in the idle state get an extra reference. 2097 */ 2098 KKASSERT(ip->flush_state != HAMMER_FST_FLUSH); 2099 if (ip->flush_state == HAMMER_FST_IDLE) 2100 hammer_ref(&ip->lock); 2101 ip->flush_state = HAMMER_FST_FLUSH; 2102 ip->flush_group = flg; 2103 ++hmp->flusher.group_lock; 2104 ++hmp->count_iqueued; 2105 ++hammer_count_iqueued; 2106 ++flg->total_count; 2107 hammer_redo_fifo_start_flush(ip); 2108 2109 #if 0 2110 /* 2111 * We need to be able to vfsync/truncate from the backend. 2112 * 2113 * XXX Any truncation from the backend will acquire the vnode 2114 * independently. 2115 */ 2116 KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0); 2117 if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) { 2118 ip->flags |= HAMMER_INODE_VHELD; 2119 vref(ip->vp); 2120 } 2121 #endif 2122 2123 /* 2124 * Figure out how many in-memory records we can actually flush 2125 * (not including inode meta-data, buffers, etc). 2126 */ 2127 KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0); 2128 if (flags & HAMMER_FLUSH_RECURSION) { 2129 /* 2130 * If this is a upwards recursion we do not want to 2131 * recurse down again! 2132 */ 2133 go_count = 1; 2134 #if 0 2135 } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) { 2136 /* 2137 * No new records are added if we must complete a flush 2138 * from a previous cycle, but we do have to move the records 2139 * from the previous cycle to the current one. 2140 */ 2141 #if 0 2142 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL, 2143 hammer_syncgrp_child_callback, NULL); 2144 #endif 2145 go_count = 1; 2146 #endif 2147 } else { 2148 /* 2149 * Normal flush, scan records and bring them into the flush. 2150 * Directory adds and deletes are usually skipped (they are 2151 * grouped with the related inode rather then with the 2152 * directory). 2153 * 2154 * go_count can be negative, which means the scan aborted 2155 * due to the flush group being over-full and we should 2156 * flush what we have. 2157 */ 2158 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL, 2159 hammer_setup_child_callback, NULL); 2160 } 2161 2162 /* 2163 * This is a more involved test that includes go_count. If we 2164 * can't flush, flag the inode and return. If go_count is 0 we 2165 * were are unable to flush any records in our rec_tree and 2166 * must ignore the XDIRTY flag. 2167 */ 2168 if (go_count == 0) { 2169 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) { 2170 --hmp->count_iqueued; 2171 --hammer_count_iqueued; 2172 2173 --flg->total_count; 2174 ip->flush_state = HAMMER_FST_SETUP; 2175 ip->flush_group = NULL; 2176 if (flags & HAMMER_FLUSH_SIGNAL) { 2177 ip->flags |= HAMMER_INODE_REFLUSH | 2178 HAMMER_INODE_RESIGNAL; 2179 } else { 2180 ip->flags |= HAMMER_INODE_REFLUSH; 2181 } 2182 #if 0 2183 if (ip->flags & HAMMER_INODE_VHELD) { 2184 ip->flags &= ~HAMMER_INODE_VHELD; 2185 vrele(ip->vp); 2186 } 2187 #endif 2188 2189 /* 2190 * REFLUSH is needed to trigger dependancy wakeups 2191 * when an inode is in SETUP. 2192 */ 2193 ip->flags |= HAMMER_INODE_REFLUSH; 2194 if (--hmp->flusher.group_lock == 0) 2195 wakeup(&hmp->flusher.group_lock); 2196 return; 2197 } 2198 } 2199 2200 /* 2201 * Snapshot the state of the inode for the backend flusher. 2202 * 2203 * We continue to retain save_trunc_off even when all truncations 2204 * have been resolved as an optimization to determine if we can 2205 * skip the B-Tree lookup for overwrite deletions. 2206 * 2207 * NOTE: The DELETING flag is a mod flag, but it is also sticky, 2208 * and stays in ip->flags. Once set, it stays set until the 2209 * inode is destroyed. 2210 */ 2211 if (ip->flags & HAMMER_INODE_TRUNCATED) { 2212 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0); 2213 ip->sync_trunc_off = ip->trunc_off; 2214 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL; 2215 ip->flags &= ~HAMMER_INODE_TRUNCATED; 2216 ip->sync_flags |= HAMMER_INODE_TRUNCATED; 2217 2218 /* 2219 * The save_trunc_off used to cache whether the B-Tree 2220 * holds any records past that point is not used until 2221 * after the truncation has succeeded, so we can safely 2222 * set it now. 2223 */ 2224 if (ip->save_trunc_off > ip->sync_trunc_off) 2225 ip->save_trunc_off = ip->sync_trunc_off; 2226 } 2227 ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK & 2228 ~HAMMER_INODE_TRUNCATED); 2229 ip->sync_ino_leaf = ip->ino_leaf; 2230 ip->sync_ino_data = ip->ino_data; 2231 ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED; 2232 #ifdef DEBUG_TRUNCATE 2233 if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp) 2234 kprintf("truncateS %016llx\n", ip->sync_trunc_off); 2235 #endif 2236 2237 /* 2238 * The flusher list inherits our inode and reference. 2239 */ 2240 KKASSERT(flg->running == 0); 2241 RB_INSERT(hammer_fls_rb_tree, &flg->flush_tree, ip); 2242 if (--hmp->flusher.group_lock == 0) 2243 wakeup(&hmp->flusher.group_lock); 2244 2245 /* 2246 * Auto-flush the group if it grows too large. Make sure the 2247 * inode reclaim wait pipeline continues to work. 2248 */ 2249 if (flg->total_count >= hammer_autoflush || 2250 flg->total_count >= hammer_limit_reclaims / 4) { 2251 if (hmp->fill_flush_group == flg) 2252 hmp->fill_flush_group = TAILQ_NEXT(flg, flush_entry); 2253 hammer_flusher_async(hmp, flg); 2254 } 2255 } 2256 2257 /* 2258 * Callback for scan of ip->rec_tree. Try to include each record in our 2259 * flush. ip->flush_group has been set but the inode has not yet been 2260 * moved into a flushing state. 2261 * 2262 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on 2263 * both inodes. 2264 * 2265 * We return 1 for any record placed or found in FST_FLUSH, which prevents 2266 * the caller from shortcutting the flush. 2267 */ 2268 static int 2269 hammer_setup_child_callback(hammer_record_t rec, void *data) 2270 { 2271 hammer_flush_group_t flg; 2272 hammer_inode_t target_ip; 2273 hammer_inode_t ip; 2274 int r; 2275 2276 /* 2277 * Records deleted or committed by the backend are ignored. 2278 * Note that the flush detects deleted frontend records at 2279 * multiple points to deal with races. This is just the first 2280 * line of defense. The only time HAMMER_RECF_DELETED_FE cannot 2281 * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it 2282 * messes up link-count calculations. 2283 * 2284 * NOTE: Don't get confused between record deletion and, say, 2285 * directory entry deletion. The deletion of a directory entry 2286 * which is on-media has nothing to do with the record deletion 2287 * flags. 2288 */ 2289 if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE | 2290 HAMMER_RECF_COMMITTED)) { 2291 if (rec->flush_state == HAMMER_FST_FLUSH) { 2292 KKASSERT(rec->flush_group == rec->ip->flush_group); 2293 r = 1; 2294 } else { 2295 r = 0; 2296 } 2297 return(r); 2298 } 2299 2300 /* 2301 * If the record is in an idle state it has no dependancies and 2302 * can be flushed. 2303 */ 2304 ip = rec->ip; 2305 flg = ip->flush_group; 2306 r = 0; 2307 2308 switch(rec->flush_state) { 2309 case HAMMER_FST_IDLE: 2310 /* 2311 * The record has no setup dependancy, we can flush it. 2312 */ 2313 KKASSERT(rec->target_ip == NULL); 2314 rec->flush_state = HAMMER_FST_FLUSH; 2315 rec->flush_group = flg; 2316 ++flg->refs; 2317 hammer_ref(&rec->lock); 2318 r = 1; 2319 break; 2320 case HAMMER_FST_SETUP: 2321 /* 2322 * The record has a setup dependancy. These are typically 2323 * directory entry adds and deletes. Such entries will be 2324 * flushed when their inodes are flushed so we do not 2325 * usually have to add them to the flush here. However, 2326 * if the target_ip has set HAMMER_INODE_CONN_DOWN then 2327 * it is asking us to flush this record (and it). 2328 */ 2329 target_ip = rec->target_ip; 2330 KKASSERT(target_ip != NULL); 2331 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE); 2332 2333 /* 2334 * If the target IP is already flushing in our group 2335 * we could associate the record, but target_ip has 2336 * already synced ino_data to sync_ino_data and we 2337 * would also have to adjust nlinks. Plus there are 2338 * ordering issues for adds and deletes. 2339 * 2340 * Reflush downward if this is an ADD, and upward if 2341 * this is a DEL. 2342 */ 2343 if (target_ip->flush_state == HAMMER_FST_FLUSH) { 2344 if (rec->type == HAMMER_MEM_RECORD_ADD) 2345 ip->flags |= HAMMER_INODE_REFLUSH; 2346 else 2347 target_ip->flags |= HAMMER_INODE_REFLUSH; 2348 break; 2349 } 2350 2351 /* 2352 * Target IP is not yet flushing. This can get complex 2353 * because we have to be careful about the recursion. 2354 * 2355 * Directories create an issue for us in that if a flush 2356 * of a directory is requested the expectation is to flush 2357 * any pending directory entries, but this will cause the 2358 * related inodes to recursively flush as well. We can't 2359 * really defer the operation so just get as many as we 2360 * can and 2361 */ 2362 #if 0 2363 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 && 2364 (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) { 2365 /* 2366 * We aren't reclaiming and the target ip was not 2367 * previously prevented from flushing due to this 2368 * record dependancy. Do not flush this record. 2369 */ 2370 /*r = 0;*/ 2371 } else 2372 #endif 2373 if (flg->total_count + flg->refs > 2374 ip->hmp->undo_rec_limit) { 2375 /* 2376 * Our flush group is over-full and we risk blowing 2377 * out the UNDO FIFO. Stop the scan, flush what we 2378 * have, then reflush the directory. 2379 * 2380 * The directory may be forced through multiple 2381 * flush groups before it can be completely 2382 * flushed. 2383 */ 2384 ip->flags |= HAMMER_INODE_RESIGNAL | 2385 HAMMER_INODE_REFLUSH; 2386 r = -1; 2387 } else if (rec->type == HAMMER_MEM_RECORD_ADD) { 2388 /* 2389 * If the target IP is not flushing we can force 2390 * it to flush, even if it is unable to write out 2391 * any of its own records we have at least one in 2392 * hand that we CAN deal with. 2393 */ 2394 rec->flush_state = HAMMER_FST_FLUSH; 2395 rec->flush_group = flg; 2396 ++flg->refs; 2397 hammer_ref(&rec->lock); 2398 hammer_flush_inode_core(target_ip, flg, 2399 HAMMER_FLUSH_RECURSION); 2400 r = 1; 2401 } else { 2402 /* 2403 * General or delete-on-disk record. 2404 * 2405 * XXX this needs help. If a delete-on-disk we could 2406 * disconnect the target. If the target has its own 2407 * dependancies they really need to be flushed. 2408 * 2409 * XXX 2410 */ 2411 rec->flush_state = HAMMER_FST_FLUSH; 2412 rec->flush_group = flg; 2413 ++flg->refs; 2414 hammer_ref(&rec->lock); 2415 hammer_flush_inode_core(target_ip, flg, 2416 HAMMER_FLUSH_RECURSION); 2417 r = 1; 2418 } 2419 break; 2420 case HAMMER_FST_FLUSH: 2421 /* 2422 * The record could be part of a previous flush group if the 2423 * inode is a directory (the record being a directory entry). 2424 * Once the flush group was closed a hammer_test_inode() 2425 * function can cause a new flush group to be setup, placing 2426 * the directory inode itself in a new flush group. 2427 * 2428 * When associated with a previous flush group we count it 2429 * as if it were in our current flush group, since it will 2430 * effectively be flushed by the time we flush our current 2431 * flush group. 2432 */ 2433 KKASSERT( 2434 rec->ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY || 2435 rec->flush_group == flg); 2436 r = 1; 2437 break; 2438 } 2439 return(r); 2440 } 2441 2442 #if 0 2443 /* 2444 * This version just moves records already in a flush state to the new 2445 * flush group and that is it. 2446 */ 2447 static int 2448 hammer_syncgrp_child_callback(hammer_record_t rec, void *data) 2449 { 2450 hammer_inode_t ip = rec->ip; 2451 2452 switch(rec->flush_state) { 2453 case HAMMER_FST_FLUSH: 2454 KKASSERT(rec->flush_group == ip->flush_group); 2455 break; 2456 default: 2457 break; 2458 } 2459 return(0); 2460 } 2461 #endif 2462 2463 /* 2464 * Wait for a previously queued flush to complete. 2465 * 2466 * If a critical error occured we don't try to wait. 2467 */ 2468 void 2469 hammer_wait_inode(hammer_inode_t ip) 2470 { 2471 /* 2472 * The inode can be in a SETUP state in which case RESIGNAL 2473 * should be set. If RESIGNAL is not set then the previous 2474 * flush completed and a later operation placed the inode 2475 * in a passive setup state again, so we're done. 2476 * 2477 * The inode can be in a FLUSH state in which case we 2478 * can just wait for completion. 2479 */ 2480 while (ip->flush_state == HAMMER_FST_FLUSH || 2481 (ip->flush_state == HAMMER_FST_SETUP && 2482 (ip->flags & HAMMER_INODE_RESIGNAL))) { 2483 /* 2484 * Don't try to flush on a critical error 2485 */ 2486 if (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) 2487 break; 2488 2489 /* 2490 * If the inode was already being flushed its flg 2491 * may not have been queued to the backend. We 2492 * have to make sure it gets queued or we can wind 2493 * up blocked or deadlocked (particularly if we are 2494 * the vnlru thread). 2495 */ 2496 if (ip->flush_state == HAMMER_FST_FLUSH) { 2497 KKASSERT(ip->flush_group); 2498 if (ip->flush_group->closed == 0) { 2499 if (hammer_debug_inode) { 2500 kprintf("hammer: debug: forcing " 2501 "async flush ip %016jx\n", 2502 (intmax_t)ip->obj_id); 2503 } 2504 hammer_flusher_async(ip->hmp, 2505 ip->flush_group); 2506 continue; /* retest */ 2507 } 2508 } 2509 2510 /* 2511 * In a flush state with the flg queued to the backend 2512 * or in a setup state with RESIGNAL set, we can safely 2513 * wait. 2514 */ 2515 ip->flags |= HAMMER_INODE_FLUSHW; 2516 tsleep(&ip->flags, 0, "hmrwin", 0); 2517 } 2518 2519 #if 0 2520 /* 2521 * The inode may have been in a passive setup state, 2522 * call flush to make sure we get signaled. 2523 */ 2524 if (ip->flush_state == HAMMER_FST_SETUP) 2525 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 2526 #endif 2527 2528 } 2529 2530 /* 2531 * Called by the backend code when a flush has been completed. 2532 * The inode has already been removed from the flush list. 2533 * 2534 * A pipelined flush can occur, in which case we must re-enter the 2535 * inode on the list and re-copy its fields. 2536 */ 2537 void 2538 hammer_flush_inode_done(hammer_inode_t ip, int error) 2539 { 2540 hammer_mount_t hmp; 2541 int dorel; 2542 2543 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH); 2544 2545 hmp = ip->hmp; 2546 2547 /* 2548 * Auto-reflush if the backend could not completely flush 2549 * the inode. This fixes a case where a deferred buffer flush 2550 * could cause fsync to return early. 2551 */ 2552 if (ip->sync_flags & HAMMER_INODE_MODMASK) 2553 ip->flags |= HAMMER_INODE_REFLUSH; 2554 2555 /* 2556 * Merge left-over flags back into the frontend and fix the state. 2557 * Incomplete truncations are retained by the backend. 2558 */ 2559 ip->error = error; 2560 ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED; 2561 ip->sync_flags &= HAMMER_INODE_TRUNCATED; 2562 2563 /* 2564 * The backend may have adjusted nlinks, so if the adjusted nlinks 2565 * does not match the fronttend set the frontend's DDIRTY flag again. 2566 */ 2567 if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks) 2568 ip->flags |= HAMMER_INODE_DDIRTY; 2569 2570 /* 2571 * Fix up the dirty buffer status. 2572 */ 2573 if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) { 2574 ip->flags |= HAMMER_INODE_BUFS; 2575 } 2576 hammer_redo_fifo_end_flush(ip); 2577 2578 /* 2579 * Re-set the XDIRTY flag if some of the inode's in-memory records 2580 * could not be flushed. 2581 */ 2582 KKASSERT((RB_EMPTY(&ip->rec_tree) && 2583 (ip->flags & HAMMER_INODE_XDIRTY) == 0) || 2584 (!RB_EMPTY(&ip->rec_tree) && 2585 (ip->flags & HAMMER_INODE_XDIRTY) != 0)); 2586 2587 /* 2588 * Do not lose track of inodes which no longer have vnode 2589 * assocations, otherwise they may never get flushed again. 2590 * 2591 * The reflush flag can be set superfluously, causing extra pain 2592 * for no reason. If the inode is no longer modified it no longer 2593 * needs to be flushed. 2594 */ 2595 if (ip->flags & HAMMER_INODE_MODMASK) { 2596 if (ip->vp == NULL) 2597 ip->flags |= HAMMER_INODE_REFLUSH; 2598 } else { 2599 ip->flags &= ~HAMMER_INODE_REFLUSH; 2600 } 2601 2602 /* 2603 * The fs token is held but the inode lock is not held. Because this 2604 * is a backend flush it is possible that the vnode has no references 2605 * and cause a reclaim race inside vsetisdirty() if/when it blocks. 2606 * 2607 * Therefore, we must lock the inode around this particular dirtying 2608 * operation. We don't have to around other dirtying operations 2609 * where the vnode is implicitly or explicitly held. 2610 */ 2611 if (ip->flags & HAMMER_INODE_MODMASK) { 2612 hammer_lock_ex(&ip->lock); 2613 hammer_inode_dirty(ip); 2614 hammer_unlock(&ip->lock); 2615 } 2616 2617 /* 2618 * Adjust the flush state. 2619 */ 2620 if (ip->flags & HAMMER_INODE_WOULDBLOCK) { 2621 /* 2622 * We were unable to flush out all our records, leave the 2623 * inode in a flush state and in the current flush group. 2624 * The flush group will be re-run. 2625 * 2626 * This occurs if the UNDO block gets too full or there is 2627 * too much dirty meta-data and allows the flusher to 2628 * finalize the UNDO block and then re-flush. 2629 */ 2630 ip->flags &= ~HAMMER_INODE_WOULDBLOCK; 2631 dorel = 0; 2632 } else { 2633 /* 2634 * Remove from the flush_group 2635 */ 2636 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip); 2637 ip->flush_group = NULL; 2638 2639 #if 0 2640 /* 2641 * Clean up the vnode ref and tracking counts. 2642 */ 2643 if (ip->flags & HAMMER_INODE_VHELD) { 2644 ip->flags &= ~HAMMER_INODE_VHELD; 2645 vrele(ip->vp); 2646 } 2647 #endif 2648 --hmp->count_iqueued; 2649 --hammer_count_iqueued; 2650 2651 /* 2652 * And adjust the state. 2653 */ 2654 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) { 2655 ip->flush_state = HAMMER_FST_IDLE; 2656 dorel = 1; 2657 } else { 2658 ip->flush_state = HAMMER_FST_SETUP; 2659 dorel = 0; 2660 } 2661 2662 /* 2663 * If the frontend is waiting for a flush to complete, 2664 * wake it up. 2665 */ 2666 if (ip->flags & HAMMER_INODE_FLUSHW) { 2667 ip->flags &= ~HAMMER_INODE_FLUSHW; 2668 wakeup(&ip->flags); 2669 } 2670 2671 /* 2672 * If the frontend made more changes and requested another 2673 * flush, then try to get it running. 2674 * 2675 * Reflushes are aborted when the inode is errored out. 2676 */ 2677 if (ip->flags & HAMMER_INODE_REFLUSH) { 2678 ip->flags &= ~HAMMER_INODE_REFLUSH; 2679 if (ip->flags & HAMMER_INODE_RESIGNAL) { 2680 ip->flags &= ~HAMMER_INODE_RESIGNAL; 2681 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 2682 } else { 2683 hammer_flush_inode(ip, 0); 2684 } 2685 } 2686 } 2687 2688 /* 2689 * If we have no parent dependancies we can clear CONN_DOWN 2690 */ 2691 if (TAILQ_EMPTY(&ip->target_list)) 2692 ip->flags &= ~HAMMER_INODE_CONN_DOWN; 2693 2694 /* 2695 * If the inode is now clean drop the space reservation. 2696 */ 2697 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 && 2698 (ip->flags & HAMMER_INODE_RSV_INODES)) { 2699 ip->flags &= ~HAMMER_INODE_RSV_INODES; 2700 --hmp->rsv_inodes; 2701 } 2702 2703 ip->flags &= ~HAMMER_INODE_SLAVEFLUSH; 2704 2705 if (dorel) 2706 hammer_rel_inode(ip, 0); 2707 } 2708 2709 /* 2710 * Called from hammer_sync_inode() to synchronize in-memory records 2711 * to the media. 2712 */ 2713 static int 2714 hammer_sync_record_callback(hammer_record_t record, void *data) 2715 { 2716 hammer_cursor_t cursor = data; 2717 hammer_transaction_t trans = cursor->trans; 2718 hammer_mount_t hmp = trans->hmp; 2719 int error; 2720 2721 /* 2722 * Skip records that do not belong to the current flush. 2723 */ 2724 ++hammer_stats_record_iterations; 2725 if (record->flush_state != HAMMER_FST_FLUSH) 2726 return(0); 2727 2728 #if 1 2729 if (record->flush_group != record->ip->flush_group) { 2730 kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group); 2731 if (hammer_debug_critical) 2732 Debugger("blah2"); 2733 return(0); 2734 } 2735 #endif 2736 KKASSERT(record->flush_group == record->ip->flush_group); 2737 2738 /* 2739 * Interlock the record using the BE flag. Once BE is set the 2740 * frontend cannot change the state of FE. 2741 * 2742 * NOTE: If FE is set prior to us setting BE we still sync the 2743 * record out, but the flush completion code converts it to 2744 * a delete-on-disk record instead of destroying it. 2745 */ 2746 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0); 2747 record->flags |= HAMMER_RECF_INTERLOCK_BE; 2748 2749 /* 2750 * The backend has already disposed of the record. 2751 */ 2752 if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) { 2753 error = 0; 2754 goto done; 2755 } 2756 2757 /* 2758 * If the whole inode is being deleted and all on-disk records will 2759 * be deleted very soon, we can't sync any new records to disk 2760 * because they will be deleted in the same transaction they were 2761 * created in (delete_tid == create_tid), which will assert. 2762 * 2763 * XXX There may be a case with RECORD_ADD with DELETED_FE set 2764 * that we currently panic on. 2765 */ 2766 if (record->ip->sync_flags & HAMMER_INODE_DELETING) { 2767 switch(record->type) { 2768 case HAMMER_MEM_RECORD_DATA: 2769 /* 2770 * We don't have to do anything, if the record was 2771 * committed the space will have been accounted for 2772 * in the blockmap. 2773 */ 2774 /* fall through */ 2775 case HAMMER_MEM_RECORD_GENERAL: 2776 /* 2777 * Set deleted-by-backend flag. Do not set the 2778 * backend committed flag, because we are throwing 2779 * the record away. 2780 */ 2781 record->flags |= HAMMER_RECF_DELETED_BE; 2782 ++record->ip->rec_generation; 2783 error = 0; 2784 goto done; 2785 case HAMMER_MEM_RECORD_ADD: 2786 panic("hammer_sync_record_callback: illegal add " 2787 "during inode deletion record %p", record); 2788 break; /* NOT REACHED */ 2789 case HAMMER_MEM_RECORD_INODE: 2790 panic("hammer_sync_record_callback: attempt to " 2791 "sync inode record %p?", record); 2792 break; /* NOT REACHED */ 2793 case HAMMER_MEM_RECORD_DEL: 2794 /* 2795 * Follow through and issue the on-disk deletion 2796 */ 2797 break; 2798 } 2799 } 2800 2801 /* 2802 * If DELETED_FE is set special handling is needed for directory 2803 * entries. Dependant pieces related to the directory entry may 2804 * have already been synced to disk. If this occurs we have to 2805 * sync the directory entry and then change the in-memory record 2806 * from an ADD to a DELETE to cover the fact that it's been 2807 * deleted by the frontend. 2808 * 2809 * A directory delete covering record (MEM_RECORD_DEL) can never 2810 * be deleted by the frontend. 2811 * 2812 * Any other record type (aka DATA) can be deleted by the frontend. 2813 * XXX At the moment the flusher must skip it because there may 2814 * be another data record in the flush group for the same block, 2815 * meaning that some frontend data changes can leak into the backend's 2816 * synchronization point. 2817 */ 2818 if (record->flags & HAMMER_RECF_DELETED_FE) { 2819 if (record->type == HAMMER_MEM_RECORD_ADD) { 2820 /* 2821 * Convert a front-end deleted directory-add to 2822 * a directory-delete entry later. 2823 */ 2824 record->flags |= HAMMER_RECF_CONVERT_DELETE; 2825 } else { 2826 /* 2827 * Dispose of the record (race case). Mark as 2828 * deleted by backend (and not committed). 2829 */ 2830 KKASSERT(record->type != HAMMER_MEM_RECORD_DEL); 2831 record->flags |= HAMMER_RECF_DELETED_BE; 2832 ++record->ip->rec_generation; 2833 error = 0; 2834 goto done; 2835 } 2836 } 2837 2838 /* 2839 * Assign the create_tid for new records. Deletions already 2840 * have the record's entire key properly set up. 2841 */ 2842 if (record->type != HAMMER_MEM_RECORD_DEL) { 2843 record->leaf.base.create_tid = trans->tid; 2844 record->leaf.create_ts = trans->time32; 2845 } 2846 2847 /* 2848 * This actually moves the record to the on-media B-Tree. We 2849 * must also generate REDO_TERM entries in the UNDO/REDO FIFO 2850 * indicating that the related REDO_WRITE(s) have been committed. 2851 * 2852 * During recovery any REDO_TERM's within the nominal recovery span 2853 * are ignored since the related meta-data is being undone, causing 2854 * any matching REDO_WRITEs to execute. The REDO_TERMs outside 2855 * the nominal recovery span will match against REDO_WRITEs and 2856 * prevent them from being executed (because the meta-data has 2857 * already been synchronized). 2858 */ 2859 if (record->flags & HAMMER_RECF_REDO) { 2860 KKASSERT(record->type == HAMMER_MEM_RECORD_DATA); 2861 hammer_generate_redo(trans, record->ip, 2862 record->leaf.base.key - 2863 record->leaf.data_len, 2864 HAMMER_REDO_TERM_WRITE, 2865 NULL, 2866 record->leaf.data_len); 2867 } 2868 2869 for (;;) { 2870 error = hammer_ip_sync_record_cursor(cursor, record); 2871 if (error != EDEADLK) 2872 break; 2873 hammer_done_cursor(cursor); 2874 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0], 2875 record->ip); 2876 if (error) 2877 break; 2878 } 2879 record->flags &= ~HAMMER_RECF_CONVERT_DELETE; 2880 2881 if (error) 2882 error = -error; 2883 done: 2884 hammer_flush_record_done(record, error); 2885 2886 /* 2887 * Do partial finalization if we have built up too many dirty 2888 * buffers. Otherwise a buffer cache deadlock can occur when 2889 * doing things like creating tens of thousands of tiny files. 2890 * 2891 * We must release our cursor lock to avoid a 3-way deadlock 2892 * due to the exclusive sync lock the finalizer must get. 2893 * 2894 * WARNING: See warnings in hammer_unlock_cursor() function. 2895 */ 2896 if (hammer_flusher_meta_limit(hmp) || 2897 vm_page_count_severe()) { 2898 hammer_unlock_cursor(cursor); 2899 hammer_flusher_finalize(trans, 0); 2900 hammer_lock_cursor(cursor); 2901 } 2902 return(error); 2903 } 2904 2905 /* 2906 * Backend function called by the flusher to sync an inode to media. 2907 */ 2908 int 2909 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip) 2910 { 2911 struct hammer_cursor cursor; 2912 hammer_node_t tmp_node; 2913 hammer_record_t depend; 2914 hammer_record_t next; 2915 int error, tmp_error; 2916 u_int64_t nlinks; 2917 2918 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0) 2919 return(0); 2920 2921 error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip); 2922 if (error) 2923 goto done; 2924 2925 /* 2926 * Any directory records referencing this inode which are not in 2927 * our current flush group must adjust our nlink count for the 2928 * purposes of synchronizating to disk. 2929 * 2930 * Records which are in our flush group can be unlinked from our 2931 * inode now, potentially allowing the inode to be physically 2932 * deleted. 2933 * 2934 * This cannot block. 2935 */ 2936 nlinks = ip->ino_data.nlinks; 2937 next = TAILQ_FIRST(&ip->target_list); 2938 while ((depend = next) != NULL) { 2939 next = TAILQ_NEXT(depend, target_entry); 2940 if (depend->flush_state == HAMMER_FST_FLUSH && 2941 depend->flush_group == ip->flush_group) { 2942 /* 2943 * If this is an ADD that was deleted by the frontend 2944 * the frontend nlinks count will have already been 2945 * decremented, but the backend is going to sync its 2946 * directory entry and must account for it. The 2947 * record will be converted to a delete-on-disk when 2948 * it gets synced. 2949 * 2950 * If the ADD was not deleted by the frontend we 2951 * can remove the dependancy from our target_list. 2952 */ 2953 if (depend->flags & HAMMER_RECF_DELETED_FE) { 2954 ++nlinks; 2955 } else { 2956 TAILQ_REMOVE(&ip->target_list, depend, 2957 target_entry); 2958 depend->target_ip = NULL; 2959 } 2960 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) { 2961 /* 2962 * Not part of our flush group and not deleted by 2963 * the front-end, adjust the link count synced to 2964 * the media (undo what the frontend did when it 2965 * queued the record). 2966 */ 2967 KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0); 2968 switch(depend->type) { 2969 case HAMMER_MEM_RECORD_ADD: 2970 --nlinks; 2971 break; 2972 case HAMMER_MEM_RECORD_DEL: 2973 ++nlinks; 2974 break; 2975 default: 2976 break; 2977 } 2978 } 2979 } 2980 2981 /* 2982 * Set dirty if we had to modify the link count. 2983 */ 2984 if (ip->sync_ino_data.nlinks != nlinks) { 2985 KKASSERT((int64_t)nlinks >= 0); 2986 ip->sync_ino_data.nlinks = nlinks; 2987 ip->sync_flags |= HAMMER_INODE_DDIRTY; 2988 } 2989 2990 /* 2991 * If there is a trunction queued destroy any data past the (aligned) 2992 * truncation point. Userland will have dealt with the buffer 2993 * containing the truncation point for us. 2994 * 2995 * We don't flush pending frontend data buffers until after we've 2996 * dealt with the truncation. 2997 */ 2998 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) { 2999 /* 3000 * Interlock trunc_off. The VOP front-end may continue to 3001 * make adjustments to it while we are blocked. 3002 */ 3003 off_t trunc_off; 3004 off_t aligned_trunc_off; 3005 int blkmask; 3006 3007 trunc_off = ip->sync_trunc_off; 3008 blkmask = hammer_blocksize(trunc_off) - 1; 3009 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask; 3010 3011 /* 3012 * Delete any whole blocks on-media. The front-end has 3013 * already cleaned out any partial block and made it 3014 * pending. The front-end may have updated trunc_off 3015 * while we were blocked so we only use sync_trunc_off. 3016 * 3017 * This operation can blow out the buffer cache, EWOULDBLOCK 3018 * means we were unable to complete the deletion. The 3019 * deletion will update sync_trunc_off in that case. 3020 */ 3021 error = hammer_ip_delete_range(&cursor, ip, 3022 aligned_trunc_off, 3023 0x7FFFFFFFFFFFFFFFLL, 2); 3024 if (error == EWOULDBLOCK) { 3025 ip->flags |= HAMMER_INODE_WOULDBLOCK; 3026 error = 0; 3027 goto defer_buffer_flush; 3028 } 3029 3030 if (error) 3031 goto done; 3032 3033 /* 3034 * Generate a REDO_TERM_TRUNC entry in the UNDO/REDO FIFO. 3035 * 3036 * XXX we do this even if we did not previously generate 3037 * a REDO_TRUNC record. This operation may enclosed the 3038 * range for multiple prior truncation entries in the REDO 3039 * log. 3040 */ 3041 if (trans->hmp->version >= HAMMER_VOL_VERSION_FOUR && 3042 (ip->flags & HAMMER_INODE_RDIRTY)) { 3043 hammer_generate_redo(trans, ip, aligned_trunc_off, 3044 HAMMER_REDO_TERM_TRUNC, 3045 NULL, 0); 3046 } 3047 3048 /* 3049 * Clear the truncation flag on the backend after we have 3050 * completed the deletions. Backend data is now good again 3051 * (including new records we are about to sync, below). 3052 * 3053 * Leave sync_trunc_off intact. As we write additional 3054 * records the backend will update sync_trunc_off. This 3055 * tells the backend whether it can skip the overwrite 3056 * test. This should work properly even when the backend 3057 * writes full blocks where the truncation point straddles 3058 * the block because the comparison is against the base 3059 * offset of the record. 3060 */ 3061 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED; 3062 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */ 3063 } else { 3064 error = 0; 3065 } 3066 3067 /* 3068 * Now sync related records. These will typically be directory 3069 * entries, records tracking direct-writes, or delete-on-disk records. 3070 */ 3071 if (error == 0) { 3072 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL, 3073 hammer_sync_record_callback, &cursor); 3074 if (tmp_error < 0) 3075 tmp_error = -error; 3076 if (tmp_error) 3077 error = tmp_error; 3078 } 3079 hammer_cache_node(&ip->cache[1], cursor.node); 3080 3081 /* 3082 * Re-seek for inode update, assuming our cache hasn't been ripped 3083 * out from under us. 3084 */ 3085 if (error == 0) { 3086 tmp_node = hammer_ref_node_safe(trans, &ip->cache[0], &error); 3087 if (tmp_node) { 3088 hammer_cursor_downgrade(&cursor); 3089 hammer_lock_sh(&tmp_node->lock); 3090 if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0) 3091 hammer_cursor_seek(&cursor, tmp_node, 0); 3092 hammer_unlock(&tmp_node->lock); 3093 hammer_rel_node(tmp_node); 3094 } 3095 error = 0; 3096 } 3097 3098 /* 3099 * If we are deleting the inode the frontend had better not have 3100 * any active references on elements making up the inode. 3101 * 3102 * The call to hammer_ip_delete_clean() cleans up auxillary records 3103 * but not DB or DATA records. Those must have already been deleted 3104 * by the normal truncation mechanic. 3105 */ 3106 if (error == 0 && ip->sync_ino_data.nlinks == 0 && 3107 RB_EMPTY(&ip->rec_tree) && 3108 (ip->sync_flags & HAMMER_INODE_DELETING) && 3109 (ip->flags & HAMMER_INODE_DELETED) == 0) { 3110 int count1 = 0; 3111 3112 error = hammer_ip_delete_clean(&cursor, ip, &count1); 3113 if (error == 0) { 3114 ip->flags |= HAMMER_INODE_DELETED; 3115 ip->sync_flags &= ~HAMMER_INODE_DELETING; 3116 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED; 3117 KKASSERT(RB_EMPTY(&ip->rec_tree)); 3118 3119 /* 3120 * Set delete_tid in both the frontend and backend 3121 * copy of the inode record. The DELETED flag handles 3122 * this, do not set DDIRTY. 3123 */ 3124 ip->ino_leaf.base.delete_tid = trans->tid; 3125 ip->sync_ino_leaf.base.delete_tid = trans->tid; 3126 ip->ino_leaf.delete_ts = trans->time32; 3127 ip->sync_ino_leaf.delete_ts = trans->time32; 3128 3129 3130 /* 3131 * Adjust the inode count in the volume header 3132 */ 3133 hammer_sync_lock_sh(trans); 3134 if (ip->flags & HAMMER_INODE_ONDISK) { 3135 hammer_modify_volume_field(trans, 3136 trans->rootvol, 3137 vol0_stat_inodes); 3138 --ip->hmp->rootvol->ondisk->vol0_stat_inodes; 3139 hammer_modify_volume_done(trans->rootvol); 3140 } 3141 hammer_sync_unlock(trans); 3142 } 3143 } 3144 3145 if (error) 3146 goto done; 3147 ip->sync_flags &= ~HAMMER_INODE_BUFS; 3148 3149 defer_buffer_flush: 3150 /* 3151 * Now update the inode's on-disk inode-data and/or on-disk record. 3152 * DELETED and ONDISK are managed only in ip->flags. 3153 * 3154 * In the case of a defered buffer flush we still update the on-disk 3155 * inode to satisfy visibility requirements if there happen to be 3156 * directory dependancies. 3157 */ 3158 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) { 3159 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK: 3160 /* 3161 * If deleted and on-disk, don't set any additional flags. 3162 * the delete flag takes care of things. 3163 * 3164 * Clear flags which may have been set by the frontend. 3165 */ 3166 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 3167 HAMMER_INODE_SDIRTY | 3168 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME | 3169 HAMMER_INODE_DELETING); 3170 break; 3171 case HAMMER_INODE_DELETED: 3172 /* 3173 * Take care of the case where a deleted inode was never 3174 * flushed to the disk in the first place. 3175 * 3176 * Clear flags which may have been set by the frontend. 3177 */ 3178 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 3179 HAMMER_INODE_SDIRTY | 3180 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME | 3181 HAMMER_INODE_DELETING); 3182 while (RB_ROOT(&ip->rec_tree)) { 3183 hammer_record_t record = RB_ROOT(&ip->rec_tree); 3184 hammer_ref(&record->lock); 3185 KKASSERT(hammer_oneref(&record->lock)); 3186 record->flags |= HAMMER_RECF_DELETED_BE; 3187 ++record->ip->rec_generation; 3188 hammer_rel_mem_record(record); 3189 } 3190 break; 3191 case HAMMER_INODE_ONDISK: 3192 /* 3193 * If already on-disk, do not set any additional flags. 3194 */ 3195 break; 3196 default: 3197 /* 3198 * If not on-disk and not deleted, set DDIRTY to force 3199 * an initial record to be written. 3200 * 3201 * Also set the create_tid in both the frontend and backend 3202 * copy of the inode record. 3203 */ 3204 ip->ino_leaf.base.create_tid = trans->tid; 3205 ip->ino_leaf.create_ts = trans->time32; 3206 ip->sync_ino_leaf.base.create_tid = trans->tid; 3207 ip->sync_ino_leaf.create_ts = trans->time32; 3208 ip->sync_flags |= HAMMER_INODE_DDIRTY; 3209 break; 3210 } 3211 3212 /* 3213 * If DDIRTY or SDIRTY is set, write out a new record. 3214 * If the inode is already on-disk the old record is marked as 3215 * deleted. 3216 * 3217 * If DELETED is set hammer_update_inode() will delete the existing 3218 * record without writing out a new one. 3219 * 3220 * If *ONLY* the ITIMES flag is set we can update the record in-place. 3221 */ 3222 if (ip->flags & HAMMER_INODE_DELETED) { 3223 error = hammer_update_inode(&cursor, ip); 3224 } else 3225 if (!(ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY)) && 3226 (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) { 3227 error = hammer_update_itimes(&cursor, ip); 3228 } else 3229 if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY | 3230 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) { 3231 error = hammer_update_inode(&cursor, ip); 3232 } 3233 done: 3234 if (ip->flags & HAMMER_INODE_MODMASK) 3235 hammer_inode_dirty(ip); 3236 if (error) { 3237 hammer_critical_error(ip->hmp, ip, error, 3238 "while syncing inode"); 3239 } 3240 hammer_done_cursor(&cursor); 3241 return(error); 3242 } 3243 3244 /* 3245 * This routine is called when the OS is no longer actively referencing 3246 * the inode (but might still be keeping it cached), or when releasing 3247 * the last reference to an inode. 3248 * 3249 * At this point if the inode's nlinks count is zero we want to destroy 3250 * it, which may mean destroying it on-media too. 3251 */ 3252 void 3253 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp) 3254 { 3255 struct vnode *vp; 3256 3257 /* 3258 * Set the DELETING flag when the link count drops to 0 and the 3259 * OS no longer has any opens on the inode. 3260 * 3261 * The backend will clear DELETING (a mod flag) and set DELETED 3262 * (a state flag) when it is actually able to perform the 3263 * operation. 3264 * 3265 * Don't reflag the deletion if the flusher is currently syncing 3266 * one that was already flagged. A previously set DELETING flag 3267 * may bounce around flags and sync_flags until the operation is 3268 * completely done. 3269 * 3270 * Do not attempt to modify a snapshot inode (one set to read-only). 3271 */ 3272 if (ip->ino_data.nlinks == 0 && 3273 ((ip->flags | ip->sync_flags) & (HAMMER_INODE_RO|HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) { 3274 ip->flags |= HAMMER_INODE_DELETING; 3275 ip->flags |= HAMMER_INODE_TRUNCATED; 3276 ip->trunc_off = 0; 3277 vp = NULL; 3278 if (getvp) { 3279 if (hammer_get_vnode(ip, &vp) != 0) 3280 return; 3281 } 3282 3283 /* 3284 * Final cleanup 3285 */ 3286 if (ip->vp) 3287 nvtruncbuf(ip->vp, 0, HAMMER_BUFSIZE, 0, 0); 3288 if (ip->flags & HAMMER_INODE_MODMASK) 3289 hammer_inode_dirty(ip); 3290 if (getvp) 3291 vput(vp); 3292 } 3293 } 3294 3295 /* 3296 * After potentially resolving a dependancy the inode is tested 3297 * to determine whether it needs to be reflushed. 3298 */ 3299 void 3300 hammer_test_inode(hammer_inode_t ip) 3301 { 3302 if (ip->flags & HAMMER_INODE_REFLUSH) { 3303 ip->flags &= ~HAMMER_INODE_REFLUSH; 3304 hammer_ref(&ip->lock); 3305 if (ip->flags & HAMMER_INODE_RESIGNAL) { 3306 ip->flags &= ~HAMMER_INODE_RESIGNAL; 3307 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 3308 } else { 3309 hammer_flush_inode(ip, 0); 3310 } 3311 hammer_rel_inode(ip, 0); 3312 } 3313 } 3314 3315 /* 3316 * Clear the RECLAIM flag on an inode. This occurs when the inode is 3317 * reassociated with a vp or just before it gets freed. 3318 * 3319 * Pipeline wakeups to threads blocked due to an excessive number of 3320 * detached inodes. This typically occurs when atime updates accumulate 3321 * while scanning a directory tree. 3322 */ 3323 static void 3324 hammer_inode_wakereclaims(hammer_inode_t ip) 3325 { 3326 struct hammer_reclaim *reclaim; 3327 hammer_mount_t hmp = ip->hmp; 3328 3329 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) 3330 return; 3331 3332 --hammer_count_reclaims; 3333 --hmp->count_reclaims; 3334 ip->flags &= ~HAMMER_INODE_RECLAIM; 3335 3336 if ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) { 3337 KKASSERT(reclaim->count > 0); 3338 if (--reclaim->count == 0) { 3339 TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry); 3340 wakeup(reclaim); 3341 } 3342 } 3343 } 3344 3345 /* 3346 * Setup our reclaim pipeline. We only let so many detached (and dirty) 3347 * inodes build up before we start blocking. This routine is called 3348 * if a new inode is created or an inode is loaded from media. 3349 * 3350 * When we block we don't care *which* inode has finished reclaiming, 3351 * as long as one does. 3352 * 3353 * The reclaim pipeline is primarily governed by the auto-flush which is 3354 * 1/4 hammer_limit_reclaims. We don't want to block if the count is 3355 * less than 1/2 hammer_limit_reclaims. From 1/2 to full count is 3356 * dynamically governed. 3357 */ 3358 void 3359 hammer_inode_waitreclaims(hammer_transaction_t trans) 3360 { 3361 hammer_mount_t hmp = trans->hmp; 3362 struct hammer_reclaim reclaim; 3363 int lower_limit; 3364 3365 /* 3366 * Track inode load, delay if the number of reclaiming inodes is 3367 * between 2/4 and 4/4 hammer_limit_reclaims, depending. 3368 */ 3369 if (curthread->td_proc) { 3370 struct hammer_inostats *stats; 3371 3372 stats = hammer_inode_inostats(hmp, curthread->td_proc->p_pid); 3373 ++stats->count; 3374 3375 if (stats->count > hammer_limit_reclaims / 2) 3376 stats->count = hammer_limit_reclaims / 2; 3377 lower_limit = hammer_limit_reclaims - stats->count; 3378 if (hammer_debug_general & 0x10000) { 3379 kprintf("pid %5d limit %d\n", 3380 (int)curthread->td_proc->p_pid, lower_limit); 3381 } 3382 } else { 3383 lower_limit = hammer_limit_reclaims * 3 / 4; 3384 } 3385 if (hmp->count_reclaims >= lower_limit) { 3386 reclaim.count = 1; 3387 TAILQ_INSERT_TAIL(&hmp->reclaim_list, &reclaim, entry); 3388 tsleep(&reclaim, 0, "hmrrcm", hz); 3389 if (reclaim.count > 0) 3390 TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry); 3391 } 3392 } 3393 3394 /* 3395 * Keep track of reclaim statistics on a per-pid basis using a loose 3396 * 4-way set associative hash table. Collisions inherit the count of 3397 * the previous entry. 3398 * 3399 * NOTE: We want to be careful here to limit the chain size. If the chain 3400 * size is too large a pid will spread its stats out over too many 3401 * entries under certain types of heavy filesystem activity and 3402 * wind up not delaying long enough. 3403 */ 3404 static 3405 struct hammer_inostats * 3406 hammer_inode_inostats(hammer_mount_t hmp, pid_t pid) 3407 { 3408 struct hammer_inostats *stats; 3409 int delta; 3410 int chain; 3411 static volatile int iterator; /* we don't care about MP races */ 3412 3413 /* 3414 * Chain up to 4 times to find our entry. 3415 */ 3416 for (chain = 0; chain < 4; ++chain) { 3417 stats = &hmp->inostats[(pid + chain) & HAMMER_INOSTATS_HMASK]; 3418 if (stats->pid == pid) 3419 break; 3420 } 3421 3422 /* 3423 * Replace one of the four chaining entries with our new entry. 3424 */ 3425 if (chain == 4) { 3426 stats = &hmp->inostats[(pid + (iterator++ & 3)) & 3427 HAMMER_INOSTATS_HMASK]; 3428 stats->pid = pid; 3429 } 3430 3431 /* 3432 * Decay the entry 3433 */ 3434 if (stats->count && stats->ltick != ticks) { 3435 delta = ticks - stats->ltick; 3436 stats->ltick = ticks; 3437 if (delta <= 0 || delta > hz * 60) 3438 stats->count = 0; 3439 else 3440 stats->count = stats->count * hz / (hz + delta); 3441 } 3442 if (hammer_debug_general & 0x10000) 3443 kprintf("pid %5d stats %d\n", (int)pid, stats->count); 3444 return (stats); 3445 } 3446 3447 #if 0 3448 3449 /* 3450 * XXX not used, doesn't work very well due to the large batching nature 3451 * of flushes. 3452 * 3453 * A larger then normal backlog of inodes is sitting in the flusher, 3454 * enforce a general slowdown to let it catch up. This routine is only 3455 * called on completion of a non-flusher-related transaction which 3456 * performed B-Tree node I/O. 3457 * 3458 * It is possible for the flusher to stall in a continuous load. 3459 * blogbench -i1000 -o seems to do a good job generating this sort of load. 3460 * If the flusher is unable to catch up the inode count can bloat until 3461 * we run out of kvm. 3462 * 3463 * This is a bit of a hack. 3464 */ 3465 void 3466 hammer_inode_waithard(hammer_mount_t hmp) 3467 { 3468 /* 3469 * Hysteresis. 3470 */ 3471 if (hmp->flags & HAMMER_MOUNT_FLUSH_RECOVERY) { 3472 if (hmp->count_reclaims < hammer_limit_reclaims / 2 && 3473 hmp->count_iqueued < hmp->count_inodes / 20) { 3474 hmp->flags &= ~HAMMER_MOUNT_FLUSH_RECOVERY; 3475 return; 3476 } 3477 } else { 3478 if (hmp->count_reclaims < hammer_limit_reclaims || 3479 hmp->count_iqueued < hmp->count_inodes / 10) { 3480 return; 3481 } 3482 hmp->flags |= HAMMER_MOUNT_FLUSH_RECOVERY; 3483 } 3484 3485 /* 3486 * Block for one flush cycle. 3487 */ 3488 hammer_flusher_wait_next(hmp); 3489 } 3490 3491 #endif 3492