1 /* 2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <sys/cdefs.h> 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/types.h> 39 #include <sys/lock.h> 40 #include <sys/uuid.h> 41 #include <sys/vnode.h> 42 43 #include "hammer2.h" 44 45 #define INODE_DEBUG 0 46 47 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp, 48 hammer2_tid_t, meta.inum); 49 50 int 51 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2) 52 { 53 if (ip1->meta.inum < ip2->meta.inum) 54 return(-1); 55 if (ip1->meta.inum > ip2->meta.inum) 56 return(1); 57 return(0); 58 } 59 60 static __inline 61 void 62 hammer2_knote(struct vnode *vp, int flags) 63 { 64 if (flags) 65 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags); 66 } 67 68 /* 69 * Caller holds pmp->list_spin and the inode should be locked. Merge ip 70 * with the specified depend. 71 * 72 * If the ip is on SYNCQ it stays there and (void *)-1 is returned, indicating 73 * that successive calls must ensure the ip is on a pass2 depend (or they are 74 * all SYNCQ). If the passed-in depend is not NULL and not (void *)-1 then 75 * we can set pass2 on it and return. 76 * 77 * If the ip is not on SYNCQ it is merged with the passed-in depend, creating 78 * a self-depend if necessary, and depend->pass2 is set according 79 * to the PASS2 flag. SIDEQ is set. 80 */ 81 static __noinline 82 hammer2_depend_t * 83 hammer2_inode_setdepend_locked(hammer2_inode_t *ip, hammer2_depend_t *depend) 84 { 85 hammer2_pfs_t *pmp = ip->pmp; 86 hammer2_depend_t *dtmp; 87 hammer2_inode_t *iptmp; 88 89 /* 90 * If ip is SYNCQ its entry is used for the syncq list and it will 91 * no longer be associated with a dependency. Merging this status 92 * with a passed-in depend implies PASS2. 93 */ 94 if (ip->flags & HAMMER2_INODE_SYNCQ) { 95 if (depend == (void *)-1 || 96 depend == NULL) { 97 return ((void *)-1); 98 } 99 depend->pass2 = 1; 100 hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN); 101 102 return depend; 103 } 104 105 /* 106 * If ip is already SIDEQ, merge ip->depend into the passed-in depend. 107 * If it is not, associate the ip with the passed-in depend, creating 108 * a single-entry dependency using depend_static if necessary. 109 * 110 * NOTE: The use of ip->depend_static always requires that the 111 * specific ip containing the structure is part of that 112 * particular depend_static's dependency group. 113 */ 114 if (ip->flags & HAMMER2_INODE_SIDEQ) { 115 /* 116 * Merge ip->depend with the passed-in depend. If the 117 * passed-in depend is not a special case, all ips associated 118 * with ip->depend (including the original ip) must be moved 119 * to the passed-in depend. 120 */ 121 if (depend == NULL) { 122 depend = ip->depend; 123 } else if (depend == (void *)-1) { 124 depend = ip->depend; 125 depend->pass2 = 1; 126 } else if (depend != ip->depend) { 127 #ifdef INVARIANTS 128 int sanitychk = 0; 129 #endif 130 dtmp = ip->depend; 131 while ((iptmp = TAILQ_FIRST(&dtmp->sideq)) != NULL) { 132 #ifdef INVARIANTS 133 if (iptmp == ip) 134 sanitychk = 1; 135 #endif 136 TAILQ_REMOVE(&dtmp->sideq, iptmp, entry); 137 TAILQ_INSERT_TAIL(&depend->sideq, iptmp, entry); 138 iptmp->depend = depend; 139 } 140 KKASSERT(sanitychk == 1); 141 depend->count += dtmp->count; 142 depend->pass2 |= dtmp->pass2; 143 TAILQ_REMOVE(&pmp->depq, dtmp, entry); 144 dtmp->count = 0; 145 dtmp->pass2 = 0; 146 } 147 } else { 148 /* 149 * Add ip to the sideq, creating a self-dependency if 150 * necessary. 151 */ 152 hammer2_inode_ref(ip); 153 atomic_set_int(&ip->flags, HAMMER2_INODE_SIDEQ); 154 if (depend == NULL) { 155 depend = &ip->depend_static; 156 TAILQ_INSERT_TAIL(&pmp->depq, depend, entry); 157 } else if (depend == (void *)-1) { 158 depend = &ip->depend_static; 159 depend->pass2 = 1; 160 TAILQ_INSERT_TAIL(&pmp->depq, depend, entry); 161 } /* else add ip to passed-in depend */ 162 TAILQ_INSERT_TAIL(&depend->sideq, ip, entry); 163 ip->depend = depend; 164 ++depend->count; 165 ++pmp->sideq_count; 166 } 167 168 if (ip->flags & HAMMER2_INODE_SYNCQ_PASS2) 169 depend->pass2 = 1; 170 if (depend->pass2) 171 hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN); 172 173 return depend; 174 } 175 176 /* 177 * Put a solo inode on the SIDEQ (meaning that its dirty). This can also 178 * occur from inode_lock4() and inode_depend(). 179 * 180 * Caller must pass-in a locked inode. 181 */ 182 void 183 hammer2_inode_delayed_sideq(hammer2_inode_t *ip) 184 { 185 hammer2_pfs_t *pmp = ip->pmp; 186 187 /* 188 * Optimize case to avoid pmp spinlock. 189 */ 190 if ((ip->flags & (HAMMER2_INODE_SYNCQ | HAMMER2_INODE_SIDEQ)) == 0) { 191 hammer2_spin_ex(&pmp->list_spin); 192 hammer2_inode_setdepend_locked(ip, NULL); 193 hammer2_spin_unex(&pmp->list_spin); 194 } 195 } 196 197 /* 198 * Lock an inode, with SYNCQ semantics. 199 * 200 * HAMMER2 offers shared and exclusive locks on inodes. Pass a mask of 201 * flags for options: 202 * 203 * - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired. The 204 * inode locking function will automatically set the RDONLY flag. 205 * shared locks are not subject to SYNCQ semantics, exclusive locks 206 * are. 207 * 208 * - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data. 209 * Most front-end inode locks do. 210 * 211 * - pass HAMMER2_RESOLVE_NEVER if you do not want to require that 212 * the inode data be resolved. This is used by the syncthr because 213 * it can run on an unresolved/out-of-sync cluster, and also by the 214 * vnode reclamation code to avoid unnecessary I/O (particularly when 215 * disposing of hundreds of thousands of cached vnodes). 216 * 217 * This function, along with lock4, has SYNCQ semantics. If the inode being 218 * locked is on the SYNCQ, that is it has been staged by the syncer, we must 219 * block until the operation is complete (even if we can lock the inode). In 220 * order to reduce the stall time, we re-order the inode to the front of the 221 * pmp->syncq prior to blocking. This reordering VERY significantly improves 222 * performance. 223 * 224 * The inode locking function locks the inode itself, resolves any stale 225 * chains in the inode's cluster, and allocates a fresh copy of the 226 * cluster with 1 ref and all the underlying chains locked. 227 * 228 * ip->cluster will be stable while the inode is locked. 229 * 230 * NOTE: We don't combine the inode/chain lock because putting away an 231 * inode would otherwise confuse multiple lock holders of the inode. 232 */ 233 void 234 hammer2_inode_lock(hammer2_inode_t *ip, int how) 235 { 236 hammer2_pfs_t *pmp; 237 238 hammer2_inode_ref(ip); 239 pmp = ip->pmp; 240 241 /* 242 * Inode structure mutex - Shared lock 243 */ 244 if (how & HAMMER2_RESOLVE_SHARED) { 245 hammer2_mtx_sh(&ip->lock); 246 return; 247 } 248 249 /* 250 * Inode structure mutex - Exclusive lock 251 * 252 * An exclusive lock (if not recursive) must wait for inodes on 253 * SYNCQ to flush first, to ensure that meta-data dependencies such 254 * as the nlink count and related directory entries are not split 255 * across flushes. 256 * 257 * If the vnode is locked by the current thread it must be unlocked 258 * across the tsleep() to avoid a deadlock. 259 */ 260 hammer2_mtx_ex(&ip->lock); 261 if (hammer2_mtx_refs(&ip->lock) > 1) 262 return; 263 while ((ip->flags & HAMMER2_INODE_SYNCQ) && pmp) { 264 hammer2_spin_ex(&pmp->list_spin); 265 if (ip->flags & HAMMER2_INODE_SYNCQ) { 266 tsleep_interlock(&ip->flags, 0); 267 atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP); 268 TAILQ_REMOVE(&pmp->syncq, ip, entry); 269 TAILQ_INSERT_HEAD(&pmp->syncq, ip, entry); 270 hammer2_spin_unex(&pmp->list_spin); 271 hammer2_mtx_unlock(&ip->lock); 272 tsleep(&ip->flags, PINTERLOCKED, "h2sync", 0); 273 hammer2_mtx_ex(&ip->lock); 274 continue; 275 } 276 hammer2_spin_unex(&pmp->list_spin); 277 break; 278 } 279 } 280 281 /* 282 * Exclusively lock up to four inodes, in order, with SYNCQ semantics. 283 * ip1 and ip2 must not be NULL. ip3 and ip4 may be NULL, but if ip3 is 284 * NULL then ip4 must also be NULL. 285 * 286 * This creates a dependency between up to four inodes. 287 */ 288 void 289 hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2, 290 hammer2_inode_t *ip3, hammer2_inode_t *ip4) 291 { 292 hammer2_inode_t *ips[4]; 293 hammer2_inode_t *iptmp; 294 hammer2_inode_t *ipslp; 295 hammer2_depend_t *depend; 296 hammer2_pfs_t *pmp; 297 size_t count; 298 size_t i; 299 300 pmp = ip1->pmp; /* may be NULL */ 301 KKASSERT(pmp == ip2->pmp); 302 303 ips[0] = ip1; 304 ips[1] = ip2; 305 if (ip3 == NULL) { 306 count = 2; 307 } else if (ip4 == NULL) { 308 count = 3; 309 ips[2] = ip3; 310 KKASSERT(pmp == ip3->pmp); 311 } else { 312 count = 4; 313 ips[2] = ip3; 314 ips[3] = ip4; 315 KKASSERT(pmp == ip3->pmp); 316 KKASSERT(pmp == ip4->pmp); 317 } 318 319 for (i = 0; i < count; ++i) 320 hammer2_inode_ref(ips[i]); 321 322 restart: 323 /* 324 * Lock the inodes in order 325 */ 326 for (i = 0; i < count; ++i) { 327 hammer2_mtx_ex(&ips[i]->lock); 328 } 329 330 /* 331 * Associate dependencies, record the first inode found on SYNCQ 332 * (operation is allowed to proceed for inodes on PASS2) for our 333 * sleep operation, this inode is theoretically the last one sync'd 334 * in the sequence. 335 * 336 * All inodes found on SYNCQ are moved to the head of the syncq 337 * to reduce stalls. 338 */ 339 hammer2_spin_ex(&pmp->list_spin); 340 depend = NULL; 341 ipslp = NULL; 342 for (i = 0; i < count; ++i) { 343 iptmp = ips[i]; 344 depend = hammer2_inode_setdepend_locked(iptmp, depend); 345 if (iptmp->flags & HAMMER2_INODE_SYNCQ) { 346 TAILQ_REMOVE(&pmp->syncq, iptmp, entry); 347 TAILQ_INSERT_HEAD(&pmp->syncq, iptmp, entry); 348 if (ipslp == NULL) 349 ipslp = iptmp; 350 } 351 } 352 hammer2_spin_unex(&pmp->list_spin); 353 354 /* 355 * Block and retry if any of the inodes are on SYNCQ. It is 356 * important that we allow the operation to proceed in the 357 * PASS2 case, to avoid deadlocking against the vnode. 358 */ 359 if (ipslp) { 360 for (i = 0; i < count; ++i) 361 hammer2_mtx_unlock(&ips[i]->lock); 362 tsleep(&ipslp->flags, 0, "h2sync", 2); 363 goto restart; 364 } 365 } 366 367 /* 368 * Release an inode lock. If another thread is blocked on SYNCQ_WAKEUP 369 * we wake them up. 370 */ 371 void 372 hammer2_inode_unlock(hammer2_inode_t *ip) 373 { 374 if (ip->flags & HAMMER2_INODE_SYNCQ_WAKEUP) { 375 atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP); 376 hammer2_mtx_unlock(&ip->lock); 377 wakeup(&ip->flags); 378 } else { 379 hammer2_mtx_unlock(&ip->lock); 380 } 381 hammer2_inode_drop(ip); 382 } 383 384 /* 385 * If either ip1 or ip2 have been tapped by the syncer, make sure that both 386 * are. This ensure that dependencies (e.g. dirent-v-inode) are synced 387 * together. For dirent-v-inode depends, pass the dirent as ip1. 388 * 389 * If neither ip1 or ip2 have been tapped by the syncer, merge them into a 390 * single dependency. Dependencies are entered into pmp->depq. This 391 * effectively flags the inodes SIDEQ. 392 * 393 * Both ip1 and ip2 must be locked by the caller. This also ensures 394 * that we can't race the end of the syncer's queue run. 395 */ 396 void 397 hammer2_inode_depend(hammer2_inode_t *ip1, hammer2_inode_t *ip2) 398 { 399 hammer2_pfs_t *pmp; 400 hammer2_depend_t *depend; 401 402 pmp = ip1->pmp; 403 hammer2_spin_ex(&pmp->list_spin); 404 depend = hammer2_inode_setdepend_locked(ip1, NULL); 405 depend = hammer2_inode_setdepend_locked(ip2, depend); 406 hammer2_spin_unex(&pmp->list_spin); 407 } 408 409 /* 410 * Select a chain out of an inode's cluster and lock it. 411 * 412 * The inode does not have to be locked. 413 */ 414 hammer2_chain_t * 415 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how) 416 { 417 hammer2_chain_t *chain; 418 hammer2_cluster_t *cluster; 419 420 hammer2_spin_sh(&ip->cluster_spin); 421 cluster = &ip->cluster; 422 if (clindex >= cluster->nchains) 423 chain = NULL; 424 else 425 chain = cluster->array[clindex].chain; 426 if (chain) { 427 hammer2_chain_ref(chain); 428 hammer2_spin_unsh(&ip->cluster_spin); 429 hammer2_chain_lock(chain, how); 430 } else { 431 hammer2_spin_unsh(&ip->cluster_spin); 432 } 433 return chain; 434 } 435 436 hammer2_chain_t * 437 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex, 438 hammer2_chain_t **parentp, int how) 439 { 440 hammer2_chain_t *chain; 441 hammer2_chain_t *parent; 442 443 for (;;) { 444 hammer2_spin_sh(&ip->cluster_spin); 445 if (clindex >= ip->cluster.nchains) 446 chain = NULL; 447 else 448 chain = ip->cluster.array[clindex].chain; 449 if (chain) { 450 hammer2_chain_ref(chain); 451 hammer2_spin_unsh(&ip->cluster_spin); 452 hammer2_chain_lock(chain, how); 453 } else { 454 hammer2_spin_unsh(&ip->cluster_spin); 455 } 456 457 /* 458 * Get parent, lock order must be (parent, chain). 459 */ 460 parent = chain->parent; 461 if (parent) { 462 hammer2_chain_ref(parent); 463 hammer2_chain_unlock(chain); 464 hammer2_chain_lock(parent, how); 465 hammer2_chain_lock(chain, how); 466 } 467 if (ip->cluster.array[clindex].chain == chain && 468 chain->parent == parent) { 469 break; 470 } 471 472 /* 473 * Retry 474 */ 475 hammer2_chain_unlock(chain); 476 hammer2_chain_drop(chain); 477 if (parent) { 478 hammer2_chain_unlock(parent); 479 hammer2_chain_drop(parent); 480 } 481 } 482 *parentp = parent; 483 484 return chain; 485 } 486 487 /* 488 * Temporarily release a lock held shared or exclusive. Caller must 489 * hold the lock shared or exclusive on call and lock will be released 490 * on return. 491 * 492 * Restore a lock that was temporarily released. 493 */ 494 hammer2_mtx_state_t 495 hammer2_inode_lock_temp_release(hammer2_inode_t *ip) 496 { 497 return hammer2_mtx_temp_release(&ip->lock); 498 } 499 500 void 501 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate) 502 { 503 hammer2_mtx_temp_restore(&ip->lock, ostate); 504 } 505 506 /* 507 * Upgrade a shared inode lock to exclusive and return. If the inode lock 508 * is already held exclusively this is a NOP. 509 * 510 * The caller MUST hold the inode lock either shared or exclusive on call 511 * and will own the lock exclusively on return. 512 * 513 * Returns non-zero if the lock was already exclusive prior to the upgrade. 514 */ 515 int 516 hammer2_inode_lock_upgrade(hammer2_inode_t *ip) 517 { 518 int wasexclusive; 519 520 if (mtx_islocked_ex(&ip->lock)) { 521 wasexclusive = 1; 522 } else { 523 hammer2_mtx_unlock(&ip->lock); 524 hammer2_mtx_ex(&ip->lock); 525 wasexclusive = 0; 526 } 527 return wasexclusive; 528 } 529 530 /* 531 * Downgrade an inode lock from exclusive to shared only if the inode 532 * lock was previously shared. If the inode lock was previously exclusive, 533 * this is a NOP. 534 */ 535 void 536 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive) 537 { 538 if (wasexclusive == 0) 539 hammer2_mtx_downgrade(&ip->lock); 540 } 541 542 /* 543 * Lookup an inode by inode number 544 */ 545 hammer2_inode_t * 546 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum) 547 { 548 hammer2_inode_t *ip; 549 550 KKASSERT(pmp); 551 if (pmp->spmp_hmp) { 552 ip = NULL; 553 } else { 554 hammer2_spin_ex(&pmp->inum_spin); 555 ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum); 556 if (ip) 557 hammer2_inode_ref(ip); 558 hammer2_spin_unex(&pmp->inum_spin); 559 } 560 return(ip); 561 } 562 563 /* 564 * Adding a ref to an inode is only legal if the inode already has at least 565 * one ref. 566 * 567 * (can be called with spinlock held) 568 */ 569 void 570 hammer2_inode_ref(hammer2_inode_t *ip) 571 { 572 atomic_add_int(&ip->refs, 1); 573 if (hammer2_debug & 0x80000) { 574 kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs); 575 print_backtrace(8); 576 } 577 } 578 579 /* 580 * Drop an inode reference, freeing the inode when the last reference goes 581 * away. 582 */ 583 void 584 hammer2_inode_drop(hammer2_inode_t *ip) 585 { 586 hammer2_pfs_t *pmp; 587 u_int refs; 588 589 while (ip) { 590 if (hammer2_debug & 0x80000) { 591 kprintf("INODE-1 %p (%d->%d)\n", 592 ip, ip->refs, ip->refs - 1); 593 print_backtrace(8); 594 } 595 refs = ip->refs; 596 cpu_ccfence(); 597 if (refs == 1) { 598 /* 599 * Transition to zero, must interlock with 600 * the inode inumber lookup tree (if applicable). 601 * It should not be possible for anyone to race 602 * the transition to 0. 603 */ 604 pmp = ip->pmp; 605 KKASSERT(pmp); 606 hammer2_spin_ex(&pmp->inum_spin); 607 608 if (atomic_cmpset_int(&ip->refs, 1, 0)) { 609 KKASSERT(hammer2_mtx_refs(&ip->lock) == 0); 610 if (ip->flags & HAMMER2_INODE_ONRBTREE) { 611 atomic_clear_int(&ip->flags, 612 HAMMER2_INODE_ONRBTREE); 613 RB_REMOVE(hammer2_inode_tree, 614 &pmp->inum_tree, ip); 615 --pmp->inum_count; 616 } 617 hammer2_spin_unex(&pmp->inum_spin); 618 619 ip->pmp = NULL; 620 621 /* 622 * Cleaning out ip->cluster isn't entirely 623 * trivial. 624 */ 625 hammer2_inode_repoint(ip, NULL); 626 627 kfree_obj(ip, pmp->minode); 628 atomic_add_long(&pmp->inmem_inodes, -1); 629 ip = NULL; /* will terminate loop */ 630 } else { 631 hammer2_spin_unex(&ip->pmp->inum_spin); 632 } 633 } else { 634 /* 635 * Non zero transition 636 */ 637 if (atomic_cmpset_int(&ip->refs, refs, refs - 1)) 638 break; 639 } 640 } 641 } 642 643 /* 644 * Get the vnode associated with the given inode, allocating the vnode if 645 * necessary. The vnode will be returned exclusively locked. 646 * 647 * *errorp is set to a UNIX error, not a HAMMER2 error. 648 * 649 * The caller must lock the inode (shared or exclusive). 650 * 651 * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim 652 * races. 653 */ 654 struct vnode * 655 hammer2_igetv(hammer2_inode_t *ip, int *errorp) 656 { 657 hammer2_pfs_t *pmp; 658 struct vnode *vp; 659 660 pmp = ip->pmp; 661 KKASSERT(pmp != NULL); 662 *errorp = 0; 663 664 for (;;) { 665 /* 666 * Attempt to reuse an existing vnode assignment. It is 667 * possible to race a reclaim so the vget() may fail. The 668 * inode must be unlocked during the vget() to avoid a 669 * deadlock against a reclaim. 670 */ 671 int wasexclusive; 672 673 vp = ip->vp; 674 if (vp) { 675 /* 676 * Inode must be unlocked during the vget() to avoid 677 * possible deadlocks, but leave the ip ref intact. 678 * 679 * vnode is held to prevent destruction during the 680 * vget(). The vget() can still fail if we lost 681 * a reclaim race on the vnode. 682 */ 683 hammer2_mtx_state_t ostate; 684 685 vhold(vp); 686 ostate = hammer2_inode_lock_temp_release(ip); 687 if (vget(vp, LK_EXCLUSIVE)) { 688 vdrop(vp); 689 hammer2_inode_lock_temp_restore(ip, ostate); 690 continue; 691 } 692 hammer2_inode_lock_temp_restore(ip, ostate); 693 vdrop(vp); 694 /* vp still locked and ref from vget */ 695 if (ip->vp != vp) { 696 kprintf("hammer2: igetv race %p/%p\n", 697 ip->vp, vp); 698 vput(vp); 699 continue; 700 } 701 *errorp = 0; 702 break; 703 } 704 705 /* 706 * No vnode exists, allocate a new vnode. Beware of 707 * allocation races. This function will return an 708 * exclusively locked and referenced vnode. 709 */ 710 *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0); 711 if (*errorp) { 712 kprintf("hammer2: igetv getnewvnode failed %d\n", 713 *errorp); 714 vp = NULL; 715 break; 716 } 717 718 /* 719 * Lock the inode and check for an allocation race. 720 */ 721 wasexclusive = hammer2_inode_lock_upgrade(ip); 722 if (ip->vp != NULL) { 723 vp->v_type = VBAD; 724 vx_put(vp); 725 hammer2_inode_lock_downgrade(ip, wasexclusive); 726 continue; 727 } 728 729 switch (ip->meta.type) { 730 case HAMMER2_OBJTYPE_DIRECTORY: 731 vp->v_type = VDIR; 732 break; 733 case HAMMER2_OBJTYPE_REGFILE: 734 /* 735 * Regular file must use buffer cache I/O 736 * (VKVABIO cpu sync semantics supported) 737 */ 738 vp->v_type = VREG; 739 vsetflags(vp, VKVABIO); 740 vinitvmio(vp, ip->meta.size, 741 HAMMER2_LBUFSIZE, 742 (int)ip->meta.size & HAMMER2_LBUFMASK); 743 break; 744 case HAMMER2_OBJTYPE_SOFTLINK: 745 /* 746 * XXX for now we are using the generic file_read 747 * and file_write code so we need a buffer cache 748 * association. 749 * 750 * (VKVABIO cpu sync semantics supported) 751 */ 752 vp->v_type = VLNK; 753 vsetflags(vp, VKVABIO); 754 vinitvmio(vp, ip->meta.size, 755 HAMMER2_LBUFSIZE, 756 (int)ip->meta.size & HAMMER2_LBUFMASK); 757 break; 758 case HAMMER2_OBJTYPE_CDEV: 759 vp->v_type = VCHR; 760 /* fall through */ 761 case HAMMER2_OBJTYPE_BDEV: 762 vp->v_ops = &pmp->mp->mnt_vn_spec_ops; 763 if (ip->meta.type != HAMMER2_OBJTYPE_CDEV) 764 vp->v_type = VBLK; 765 addaliasu(vp, 766 ip->meta.rmajor, 767 ip->meta.rminor); 768 break; 769 case HAMMER2_OBJTYPE_FIFO: 770 vp->v_type = VFIFO; 771 vp->v_ops = &pmp->mp->mnt_vn_fifo_ops; 772 break; 773 case HAMMER2_OBJTYPE_SOCKET: 774 vp->v_type = VSOCK; 775 break; 776 default: 777 panic("hammer2: unhandled objtype %d", 778 ip->meta.type); 779 break; 780 } 781 782 if (ip == pmp->iroot) 783 vsetflags(vp, VROOT); 784 785 vp->v_data = ip; 786 ip->vp = vp; 787 hammer2_inode_ref(ip); /* vp association */ 788 hammer2_inode_lock_downgrade(ip, wasexclusive); 789 vx_downgrade(vp); 790 break; 791 } 792 793 /* 794 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0. 795 */ 796 if (hammer2_debug & 0x0002) { 797 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n", 798 vp, vp->v_refcnt, vp->v_auxrefs); 799 } 800 return (vp); 801 } 802 803 /* 804 * XXX this API needs a rewrite. It needs to be split into a 805 * hammer2_inode_alloc() and hammer2_inode_build() to allow us to get 806 * rid of the inode/chain lock reversal fudge. 807 * 808 * Returns the inode associated with the passed-in cluster, allocating a new 809 * hammer2_inode structure if necessary, then synchronizing it to the passed 810 * xop cluster. When synchronizing, if idx >= 0, only cluster index (idx) 811 * is synchronized. Otherwise the whole cluster is synchronized. inum will 812 * be extracted from the passed-in xop and the inum argument will be ignored. 813 * 814 * If xop is passed as NULL then a new hammer2_inode is allocated with the 815 * specified inum, and returned. For normal inodes, the inode will be 816 * indexed in memory and if it already exists the existing ip will be 817 * returned instead of allocating a new one. The superroot and PFS inodes 818 * are not indexed in memory. 819 * 820 * The passed-in cluster must be locked and will remain locked on return. 821 * The returned inode will be locked and the caller may dispose of both 822 * via hammer2_inode_unlock() + hammer2_inode_drop(). However, if the caller 823 * needs to resolve a hardlink it must ref/unlock/relock/drop the inode. 824 * 825 * The hammer2_inode structure regulates the interface between the high level 826 * kernel VNOPS API and the filesystem backend (the chains). 827 * 828 * On return the inode is locked with the supplied cluster. 829 */ 830 hammer2_inode_t * 831 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_xop_head_t *xop, 832 hammer2_tid_t inum, int idx) 833 { 834 hammer2_inode_t *nip; 835 const hammer2_inode_data_t *iptmp; 836 const hammer2_inode_data_t *nipdata; 837 838 KKASSERT(xop == NULL || 839 hammer2_cluster_type(&xop->cluster) == 840 HAMMER2_BREF_TYPE_INODE); 841 KKASSERT(pmp); 842 843 /* 844 * Interlocked lookup/ref of the inode. This code is only needed 845 * when looking up inodes with nlinks != 0 (TODO: optimize out 846 * otherwise and test for duplicates). 847 * 848 * Cluster can be NULL during the initial pfs allocation. 849 */ 850 if (xop) { 851 iptmp = &hammer2_xop_gdata(xop)->ipdata; 852 inum = iptmp->meta.inum; 853 hammer2_xop_pdata(xop); 854 } 855 again: 856 nip = hammer2_inode_lookup(pmp, inum); 857 if (nip) { 858 /* 859 * We may have to unhold the cluster to avoid a deadlock 860 * against vnlru (and possibly other XOPs). 861 */ 862 if (xop) { 863 if (hammer2_mtx_ex_try(&nip->lock) != 0) { 864 hammer2_cluster_unhold(&xop->cluster); 865 hammer2_mtx_ex(&nip->lock); 866 hammer2_cluster_rehold(&xop->cluster); 867 } 868 } else { 869 hammer2_mtx_ex(&nip->lock); 870 } 871 872 /* 873 * Handle SMP race (not applicable to the super-root spmp 874 * which can't index inodes due to duplicative inode numbers). 875 */ 876 if (pmp->spmp_hmp == NULL && 877 (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) { 878 hammer2_mtx_unlock(&nip->lock); 879 hammer2_inode_drop(nip); 880 goto again; 881 } 882 if (xop) { 883 if (idx >= 0) 884 hammer2_inode_repoint_one(nip, &xop->cluster, 885 idx); 886 else 887 hammer2_inode_repoint(nip, &xop->cluster); 888 } 889 return nip; 890 } 891 892 /* 893 * We couldn't find the inode number, create a new inode and try to 894 * insert it, handle insertion races. 895 */ 896 nip = kmalloc_obj(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO); 897 spin_init(&nip->cluster_spin, "h2clspin"); 898 atomic_add_long(&pmp->inmem_inodes, 1); 899 900 /* 901 * Initialize nip's cluster. A cluster is provided for normal 902 * inodes but typically not for the super-root or PFS inodes. 903 */ 904 { 905 hammer2_inode_t *nnip = nip; 906 nip->ihash = (int)hammer2_icrc32(&nnip, sizeof(nnip)); 907 } 908 909 nip->cluster.refs = 1; 910 nip->cluster.pmp = pmp; 911 nip->cluster.flags |= HAMMER2_CLUSTER_INODE; 912 if (xop) { 913 nipdata = &hammer2_xop_gdata(xop)->ipdata; 914 nip->meta = nipdata->meta; 915 hammer2_xop_pdata(xop); 916 hammer2_inode_repoint(nip, &xop->cluster); 917 } else { 918 nip->meta.inum = inum; /* PFS inum is always 1 XXX */ 919 /* mtime will be updated when a cluster is available */ 920 } 921 922 nip->pmp = pmp; 923 924 /* 925 * ref and lock on nip gives it state compatible to after a 926 * hammer2_inode_lock() call. 927 */ 928 nip->refs = 1; 929 hammer2_mtx_init(&nip->lock, "h2inode"); 930 hammer2_mtx_init(&nip->truncate_lock, "h2trunc"); 931 hammer2_mtx_ex(&nip->lock); 932 TAILQ_INIT(&nip->depend_static.sideq); 933 /* combination of thread lock and chain lock == inode lock */ 934 935 /* 936 * Attempt to add the inode. If it fails we raced another inode 937 * get. Undo all the work and try again. 938 */ 939 if (pmp->spmp_hmp == NULL) { 940 hammer2_spin_ex(&pmp->inum_spin); 941 if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) { 942 hammer2_spin_unex(&pmp->inum_spin); 943 hammer2_mtx_unlock(&nip->lock); 944 hammer2_inode_drop(nip); 945 goto again; 946 } 947 atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE); 948 ++pmp->inum_count; 949 hammer2_spin_unex(&pmp->inum_spin); 950 } 951 return (nip); 952 } 953 954 /* 955 * Create a PFS inode under the superroot. This function will create the 956 * inode, its media chains, and also insert it into the media. 957 * 958 * Caller must be in a flush transaction because we are inserting the inode 959 * onto the media. 960 */ 961 hammer2_inode_t * 962 hammer2_inode_create_pfs(hammer2_pfs_t *spmp, 963 const uint8_t *name, size_t name_len, 964 int *errorp) 965 { 966 hammer2_xop_create_t *xop; 967 hammer2_inode_t *pip; 968 hammer2_inode_t *nip; 969 int error; 970 uuid_t pip_uid; 971 uuid_t pip_gid; 972 uint32_t pip_mode; 973 uint8_t pip_comp_algo; 974 uint8_t pip_check_algo; 975 hammer2_tid_t pip_inum; 976 hammer2_key_t lhc; 977 978 pip = spmp->iroot; 979 nip = NULL; 980 981 lhc = hammer2_dirhash(name, name_len); 982 *errorp = 0; 983 984 /* 985 * Locate the inode or indirect block to create the new 986 * entry in. At the same time check for key collisions 987 * and iterate until we don't get one. 988 * 989 * Lock the directory exclusively for now to guarantee that 990 * we can find an unused lhc for the name. Due to collisions, 991 * two different creates can end up with the same lhc so we 992 * cannot depend on the OS to prevent the collision. 993 */ 994 hammer2_inode_lock(pip, 0); 995 996 pip_uid = pip->meta.uid; 997 pip_gid = pip->meta.gid; 998 pip_mode = pip->meta.mode; 999 pip_comp_algo = pip->meta.comp_algo; 1000 pip_check_algo = pip->meta.check_algo; 1001 pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum; 1002 1003 /* 1004 * Locate an unused key in the collision space. 1005 */ 1006 { 1007 hammer2_xop_scanlhc_t *sxop; 1008 hammer2_key_t lhcbase; 1009 1010 lhcbase = lhc; 1011 sxop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING); 1012 sxop->lhc = lhc; 1013 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc); 1014 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 1015 if (lhc != sxop->head.cluster.focus->bref.key) 1016 break; 1017 ++lhc; 1018 } 1019 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 1020 1021 if (error) { 1022 if (error != HAMMER2_ERROR_ENOENT) 1023 goto done2; 1024 ++lhc; 1025 error = 0; 1026 } 1027 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) { 1028 error = HAMMER2_ERROR_ENOSPC; 1029 goto done2; 1030 } 1031 } 1032 1033 /* 1034 * Create the inode with the lhc as the key. 1035 */ 1036 xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING); 1037 xop->lhc = lhc; 1038 xop->flags = HAMMER2_INSERT_PFSROOT; 1039 bzero(&xop->meta, sizeof(xop->meta)); 1040 1041 xop->meta.type = HAMMER2_OBJTYPE_DIRECTORY; 1042 xop->meta.inum = 1; 1043 xop->meta.iparent = pip_inum; 1044 1045 /* Inherit parent's inode compression mode. */ 1046 xop->meta.comp_algo = pip_comp_algo; 1047 xop->meta.check_algo = pip_check_algo; 1048 xop->meta.version = HAMMER2_INODE_VERSION_ONE; 1049 hammer2_update_time(&xop->meta.ctime); 1050 xop->meta.mtime = xop->meta.ctime; 1051 xop->meta.mode = 0755; 1052 xop->meta.nlinks = 1; 1053 1054 /* 1055 * Regular files and softlinks allow a small amount of data to be 1056 * directly embedded in the inode. This flag will be cleared if 1057 * the size is extended past the embedded limit. 1058 */ 1059 if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE || 1060 xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK) { 1061 xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA; 1062 } 1063 hammer2_xop_setname(&xop->head, name, name_len); 1064 xop->meta.name_len = name_len; 1065 xop->meta.name_key = lhc; 1066 KKASSERT(name_len < HAMMER2_INODE_MAXNAME); 1067 1068 hammer2_xop_start(&xop->head, &hammer2_inode_create_desc); 1069 1070 error = hammer2_xop_collect(&xop->head, 0); 1071 #if INODE_DEBUG 1072 kprintf("CREATE INODE %*.*s\n", 1073 (int)name_len, (int)name_len, name); 1074 #endif 1075 1076 if (error) { 1077 *errorp = error; 1078 goto done; 1079 } 1080 1081 /* 1082 * Set up the new inode if not a hardlink pointer. 1083 * 1084 * NOTE: *_get() integrates chain's lock into the inode lock. 1085 * 1086 * NOTE: Only one new inode can currently be created per 1087 * transaction. If the need arises we can adjust 1088 * hammer2_trans_init() to allow more. 1089 * 1090 * NOTE: nipdata will have chain's blockset data. 1091 */ 1092 nip = hammer2_inode_get(pip->pmp, &xop->head, -1, -1); 1093 nip->comp_heuristic = 0; 1094 done: 1095 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1096 done2: 1097 hammer2_inode_unlock(pip); 1098 1099 return (nip); 1100 } 1101 1102 /* 1103 * Create a new, normal inode. This function will create the inode, 1104 * the media chains, but will not insert the chains onto the media topology 1105 * (doing so would require a flush transaction and cause long stalls). 1106 * 1107 * Caller must be in a normal transaction. 1108 */ 1109 hammer2_inode_t * 1110 hammer2_inode_create_normal(hammer2_inode_t *pip, 1111 struct vattr *vap, struct ucred *cred, 1112 hammer2_key_t inum, int *errorp) 1113 { 1114 hammer2_xop_create_t *xop; 1115 hammer2_inode_t *dip; 1116 hammer2_inode_t *nip; 1117 int error; 1118 uid_t xuid; 1119 uuid_t pip_uid; 1120 uuid_t pip_gid; 1121 uint32_t pip_mode; 1122 uint8_t pip_comp_algo; 1123 uint8_t pip_check_algo; 1124 hammer2_tid_t pip_inum; 1125 uint8_t type; 1126 1127 dip = pip->pmp->iroot; 1128 KKASSERT(dip != NULL); 1129 1130 *errorp = 0; 1131 1132 /*hammer2_inode_lock(dip, 0);*/ 1133 1134 pip_uid = pip->meta.uid; 1135 pip_gid = pip->meta.gid; 1136 pip_mode = pip->meta.mode; 1137 pip_comp_algo = pip->meta.comp_algo; 1138 pip_check_algo = pip->meta.check_algo; 1139 pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum; 1140 1141 /* 1142 * Create the in-memory hammer2_inode structure for the specified 1143 * inode. 1144 */ 1145 nip = hammer2_inode_get(dip->pmp, NULL, inum, -1); 1146 nip->comp_heuristic = 0; 1147 KKASSERT((nip->flags & HAMMER2_INODE_CREATING) == 0 && 1148 nip->cluster.nchains == 0); 1149 atomic_set_int(&nip->flags, HAMMER2_INODE_CREATING); 1150 1151 /* 1152 * Setup the inode meta-data 1153 */ 1154 nip->meta.type = hammer2_get_obj_type(vap->va_type); 1155 1156 switch (nip->meta.type) { 1157 case HAMMER2_OBJTYPE_CDEV: 1158 case HAMMER2_OBJTYPE_BDEV: 1159 nip->meta.rmajor = vap->va_rmajor; 1160 nip->meta.rminor = vap->va_rminor; 1161 break; 1162 default: 1163 break; 1164 } 1165 type = nip->meta.type; 1166 1167 KKASSERT(nip->meta.inum == inum); 1168 nip->meta.iparent = pip_inum; 1169 1170 /* Inherit parent's inode compression mode. */ 1171 nip->meta.comp_algo = pip_comp_algo; 1172 nip->meta.check_algo = pip_check_algo; 1173 nip->meta.version = HAMMER2_INODE_VERSION_ONE; 1174 hammer2_update_time(&nip->meta.ctime); 1175 nip->meta.mtime = nip->meta.ctime; 1176 nip->meta.mode = vap->va_mode; 1177 nip->meta.nlinks = 1; 1178 1179 xuid = hammer2_to_unix_xid(&pip_uid); 1180 xuid = vop_helper_create_uid(dip->pmp->mp, pip_mode, 1181 xuid, cred, 1182 &vap->va_mode); 1183 if (vap->va_vaflags & VA_UID_UUID_VALID) 1184 nip->meta.uid = vap->va_uid_uuid; 1185 else if (vap->va_uid != (uid_t)VNOVAL) 1186 hammer2_guid_to_uuid(&nip->meta.uid, vap->va_uid); 1187 else 1188 hammer2_guid_to_uuid(&nip->meta.uid, xuid); 1189 1190 if (vap->va_vaflags & VA_GID_UUID_VALID) 1191 nip->meta.gid = vap->va_gid_uuid; 1192 else if (vap->va_gid != (gid_t)VNOVAL) 1193 hammer2_guid_to_uuid(&nip->meta.gid, vap->va_gid); 1194 else 1195 nip->meta.gid = pip_gid; 1196 1197 /* 1198 * Regular files and softlinks allow a small amount of data to be 1199 * directly embedded in the inode. This flag will be cleared if 1200 * the size is extended past the embedded limit. 1201 */ 1202 if (nip->meta.type == HAMMER2_OBJTYPE_REGFILE || 1203 nip->meta.type == HAMMER2_OBJTYPE_SOFTLINK) { 1204 nip->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA; 1205 } 1206 1207 /* 1208 * Create the inode using (inum) as the key. Pass pip for 1209 * method inheritance. 1210 */ 1211 xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING); 1212 xop->lhc = inum; 1213 xop->flags = 0; 1214 xop->meta = nip->meta; 1215 KKASSERT(vap); 1216 1217 xop->meta.name_len = hammer2_xop_setname_inum(&xop->head, inum); 1218 xop->meta.name_key = inum; 1219 nip->meta.name_len = xop->meta.name_len; 1220 nip->meta.name_key = xop->meta.name_key; 1221 hammer2_inode_modify(nip); 1222 1223 /* 1224 * Create the inode media chains but leave them detached. We are 1225 * not in a flush transaction so we can't mess with media topology 1226 * above normal inodes (i.e. the index of the inodes themselves). 1227 * 1228 * We've already set the INODE_CREATING flag. The inode's media 1229 * chains will be inserted onto the media topology on the next 1230 * filesystem sync. 1231 */ 1232 hammer2_xop_start(&xop->head, &hammer2_inode_create_det_desc); 1233 1234 error = hammer2_xop_collect(&xop->head, 0); 1235 #if INODE_DEBUG 1236 kprintf("create inode type %d error %d\n", nip->meta.type, error); 1237 #endif 1238 1239 if (error) { 1240 *errorp = error; 1241 goto done; 1242 } 1243 1244 /* 1245 * Associate the media chains created by the backend with the 1246 * frontend inode. 1247 */ 1248 hammer2_inode_repoint(nip, &xop->head.cluster); 1249 done: 1250 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1251 /*hammer2_inode_unlock(dip);*/ 1252 1253 return (nip); 1254 } 1255 1256 /* 1257 * Create a directory entry under dip with the specified name, inode number, 1258 * and OBJTYPE (type). 1259 * 1260 * This returns a UNIX errno code, not a HAMMER2_ERROR_* code. 1261 * 1262 * Caller must hold dip locked. 1263 */ 1264 int 1265 hammer2_dirent_create(hammer2_inode_t *dip, const char *name, size_t name_len, 1266 hammer2_key_t inum, uint8_t type) 1267 { 1268 hammer2_xop_mkdirent_t *xop; 1269 hammer2_key_t lhc; 1270 int error; 1271 1272 lhc = 0; 1273 error = 0; 1274 1275 KKASSERT(name != NULL); 1276 lhc = hammer2_dirhash(name, name_len); 1277 1278 /* 1279 * Locate the inode or indirect block to create the new 1280 * entry in. At the same time check for key collisions 1281 * and iterate until we don't get one. 1282 * 1283 * Lock the directory exclusively for now to guarantee that 1284 * we can find an unused lhc for the name. Due to collisions, 1285 * two different creates can end up with the same lhc so we 1286 * cannot depend on the OS to prevent the collision. 1287 */ 1288 hammer2_inode_modify(dip); 1289 1290 /* 1291 * If name specified, locate an unused key in the collision space. 1292 * Otherwise use the passed-in lhc directly. 1293 */ 1294 { 1295 hammer2_xop_scanlhc_t *sxop; 1296 hammer2_key_t lhcbase; 1297 1298 lhcbase = lhc; 1299 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1300 sxop->lhc = lhc; 1301 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc); 1302 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 1303 if (lhc != sxop->head.cluster.focus->bref.key) 1304 break; 1305 ++lhc; 1306 } 1307 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 1308 1309 if (error) { 1310 if (error != HAMMER2_ERROR_ENOENT) 1311 goto done2; 1312 ++lhc; 1313 error = 0; 1314 } 1315 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) { 1316 error = HAMMER2_ERROR_ENOSPC; 1317 goto done2; 1318 } 1319 } 1320 1321 /* 1322 * Create the directory entry with the lhc as the key. 1323 */ 1324 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1325 xop->lhc = lhc; 1326 bzero(&xop->dirent, sizeof(xop->dirent)); 1327 xop->dirent.inum = inum; 1328 xop->dirent.type = type; 1329 xop->dirent.namlen = name_len; 1330 1331 KKASSERT(name_len < HAMMER2_INODE_MAXNAME); 1332 hammer2_xop_setname(&xop->head, name, name_len); 1333 1334 hammer2_xop_start(&xop->head, &hammer2_inode_mkdirent_desc); 1335 1336 error = hammer2_xop_collect(&xop->head, 0); 1337 1338 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1339 done2: 1340 error = hammer2_error_to_errno(error); 1341 1342 return error; 1343 } 1344 1345 /* 1346 * Repoint ip->cluster's chains to cluster's chains and fixup the default 1347 * focus. All items, valid or invalid, are repointed. hammer2_xop_start() 1348 * filters out invalid or non-matching elements. 1349 * 1350 * Caller must hold the inode and cluster exclusive locked, if not NULL, 1351 * must also be locked. 1352 * 1353 * Cluster may be NULL to clean out any chains in ip->cluster. 1354 */ 1355 void 1356 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_cluster_t *cluster) 1357 { 1358 hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER]; 1359 hammer2_chain_t *ochain; 1360 hammer2_chain_t *nchain; 1361 int i; 1362 1363 bzero(dropch, sizeof(dropch)); 1364 1365 /* 1366 * Replace chains in ip->cluster with chains from cluster and 1367 * adjust the focus if necessary. 1368 * 1369 * NOTE: nchain and/or ochain can be NULL due to gaps 1370 * in the cluster arrays. 1371 */ 1372 hammer2_spin_ex(&ip->cluster_spin); 1373 for (i = 0; cluster && i < cluster->nchains; ++i) { 1374 /* 1375 * Do not replace elements which are the same. Also handle 1376 * element count discrepancies. 1377 */ 1378 nchain = cluster->array[i].chain; 1379 if (i < ip->cluster.nchains) { 1380 ochain = ip->cluster.array[i].chain; 1381 if (ochain == nchain) 1382 continue; 1383 } else { 1384 ochain = NULL; 1385 } 1386 1387 /* 1388 * Make adjustments 1389 */ 1390 ip->cluster.array[i].chain = nchain; 1391 ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID; 1392 ip->cluster.array[i].flags |= cluster->array[i].flags & 1393 HAMMER2_CITEM_INVALID; 1394 if (nchain) 1395 hammer2_chain_ref(nchain); 1396 dropch[i] = ochain; 1397 } 1398 1399 /* 1400 * Release any left-over chains in ip->cluster. 1401 */ 1402 while (i < ip->cluster.nchains) { 1403 nchain = ip->cluster.array[i].chain; 1404 if (nchain) { 1405 ip->cluster.array[i].chain = NULL; 1406 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID; 1407 } 1408 dropch[i] = nchain; 1409 ++i; 1410 } 1411 1412 /* 1413 * Fixup fields. Note that the inode-embedded cluster is never 1414 * directly locked. 1415 */ 1416 if (cluster) { 1417 ip->cluster.nchains = cluster->nchains; 1418 ip->cluster.focus = cluster->focus; 1419 ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED; 1420 } else { 1421 ip->cluster.nchains = 0; 1422 ip->cluster.focus = NULL; 1423 ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS; 1424 } 1425 1426 hammer2_spin_unex(&ip->cluster_spin); 1427 1428 /* 1429 * Cleanup outside of spinlock 1430 */ 1431 while (--i >= 0) { 1432 if (dropch[i]) 1433 hammer2_chain_drop(dropch[i]); 1434 } 1435 } 1436 1437 /* 1438 * Repoint a single element from the cluster to the ip. Used by the 1439 * synchronization threads to piecemeal update inodes. Does not change 1440 * focus and requires inode to be re-locked to clean-up flags (XXX). 1441 */ 1442 void 1443 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster, 1444 int idx) 1445 { 1446 hammer2_chain_t *ochain; 1447 hammer2_chain_t *nchain; 1448 int i; 1449 1450 hammer2_spin_ex(&ip->cluster_spin); 1451 KKASSERT(idx < cluster->nchains); 1452 if (idx < ip->cluster.nchains) { 1453 ochain = ip->cluster.array[idx].chain; 1454 nchain = cluster->array[idx].chain; 1455 } else { 1456 ochain = NULL; 1457 nchain = cluster->array[idx].chain; 1458 for (i = ip->cluster.nchains; i <= idx; ++i) { 1459 bzero(&ip->cluster.array[i], 1460 sizeof(ip->cluster.array[i])); 1461 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID; 1462 } 1463 ip->cluster.nchains = idx + 1; 1464 } 1465 if (ochain != nchain) { 1466 /* 1467 * Make adjustments. 1468 */ 1469 ip->cluster.array[idx].chain = nchain; 1470 ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID; 1471 ip->cluster.array[idx].flags |= cluster->array[idx].flags & 1472 HAMMER2_CITEM_INVALID; 1473 } 1474 hammer2_spin_unex(&ip->cluster_spin); 1475 if (ochain != nchain) { 1476 if (nchain) 1477 hammer2_chain_ref(nchain); 1478 if (ochain) 1479 hammer2_chain_drop(ochain); 1480 } 1481 } 1482 1483 hammer2_key_t 1484 hammer2_inode_data_count(const hammer2_inode_t *ip) 1485 { 1486 hammer2_chain_t *chain; 1487 hammer2_key_t count = 0; 1488 int i; 1489 1490 for (i = 0; i < ip->cluster.nchains; ++i) { 1491 if ((chain = ip->cluster.array[i].chain) != NULL) { 1492 if (count < chain->bref.embed.stats.data_count) 1493 count = chain->bref.embed.stats.data_count; 1494 } 1495 } 1496 return count; 1497 } 1498 1499 hammer2_key_t 1500 hammer2_inode_inode_count(const hammer2_inode_t *ip) 1501 { 1502 hammer2_chain_t *chain; 1503 hammer2_key_t count = 0; 1504 int i; 1505 1506 for (i = 0; i < ip->cluster.nchains; ++i) { 1507 if ((chain = ip->cluster.array[i].chain) != NULL) { 1508 if (count < chain->bref.embed.stats.inode_count) 1509 count = chain->bref.embed.stats.inode_count; 1510 } 1511 } 1512 return count; 1513 } 1514 1515 /* 1516 * Called with a locked inode to finish unlinking an inode after xop_unlink 1517 * had been run. This function is responsible for decrementing nlinks. 1518 * 1519 * We don't bother decrementing nlinks if the file is not open and this was 1520 * the last link. 1521 * 1522 * If the inode is a hardlink target it's chain has not yet been deleted, 1523 * otherwise it's chain has been deleted. 1524 * 1525 * If isopen then any prior deletion was not permanent and the inode is 1526 * left intact with nlinks == 0; 1527 */ 1528 int 1529 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen) 1530 { 1531 hammer2_pfs_t *pmp; 1532 int error; 1533 1534 pmp = ip->pmp; 1535 1536 /* 1537 * Decrement nlinks. If this is the last link and the file is 1538 * not open we can just delete the inode and not bother dropping 1539 * nlinks to 0 (avoiding unnecessary block updates). 1540 */ 1541 if (ip->meta.nlinks == 1) { 1542 atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED); 1543 if (isopen == 0) 1544 goto killit; 1545 } 1546 1547 hammer2_inode_modify(ip); 1548 --ip->meta.nlinks; 1549 if ((int64_t)ip->meta.nlinks < 0) 1550 ip->meta.nlinks = 0; /* safety */ 1551 1552 /* 1553 * If nlinks is not zero we are done. However, this should only be 1554 * possible with a hardlink target. If the inode is an embedded 1555 * hardlink nlinks should have dropped to zero, warn and proceed 1556 * with the next step. 1557 */ 1558 if (ip->meta.nlinks) { 1559 if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0) 1560 return 0; 1561 kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n", 1562 (intmax_t)ip->meta.nlinks); 1563 return 0; 1564 } 1565 1566 if (ip->vp) 1567 hammer2_knote(ip->vp, NOTE_DELETE); 1568 1569 /* 1570 * nlinks is now an implied zero, delete the inode if not open. 1571 * We avoid unnecessary media updates by not bothering to actually 1572 * decrement nlinks for the 1->0 transition 1573 * 1574 * Put the inode on the sideq to ensure that any disconnected chains 1575 * get properly flushed (so they can be freed). Defer the deletion 1576 * to the sync code, doing it now will desynchronize the inode from 1577 * related directory entries (which is bad). 1578 * 1579 * NOTE: killit can be reached without modifying the inode, so 1580 * make sure that it is on the SIDEQ. 1581 */ 1582 if (isopen == 0) { 1583 #if 0 1584 hammer2_xop_destroy_t *xop; 1585 #endif 1586 1587 killit: 1588 atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING); 1589 hammer2_inode_delayed_sideq(ip); 1590 #if 0 1591 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1592 hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc); 1593 error = hammer2_xop_collect(&xop->head, 0); 1594 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1595 #endif 1596 } 1597 error = 0; /* XXX */ 1598 1599 return error; 1600 } 1601 1602 /* 1603 * Mark an inode as being modified, meaning that the caller will modify 1604 * ip->meta. 1605 * 1606 * If a vnode is present we set the vnode dirty and the nominal filesystem 1607 * sync will also handle synchronizing the inode meta-data. Unless NOSIDEQ 1608 * we must ensure that the inode is on pmp->sideq. 1609 * 1610 * NOTE: We must always queue the inode to the sideq. This allows H2 to 1611 * shortcut vsyncscan() and flush inodes and their related vnodes 1612 * in a two stages. H2 still calls vfsync() for each vnode. 1613 * 1614 * NOTE: No mtid (modify_tid) is passed into this routine. The caller is 1615 * only modifying the in-memory inode. A modify_tid is synchronized 1616 * later when the inode gets flushed. 1617 * 1618 * NOTE: As an exception to the general rule, the inode MAY be locked 1619 * shared for this particular call. 1620 */ 1621 void 1622 hammer2_inode_modify(hammer2_inode_t *ip) 1623 { 1624 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED); 1625 if (ip->vp) 1626 vsetisdirty(ip->vp); 1627 if (ip->pmp && (ip->flags & HAMMER2_INODE_NOSIDEQ) == 0) 1628 hammer2_inode_delayed_sideq(ip); 1629 } 1630 1631 /* 1632 * Synchronize the inode's frontend state with the chain state prior 1633 * to any explicit flush of the inode or any strategy write call. This 1634 * does not flush the inode's chain or its sub-topology to media (higher 1635 * level layers are responsible for doing that). 1636 * 1637 * Called with a locked inode inside a normal transaction. 1638 * 1639 * inode must be locked. 1640 */ 1641 int 1642 hammer2_inode_chain_sync(hammer2_inode_t *ip) 1643 { 1644 int error; 1645 1646 error = 0; 1647 if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) { 1648 hammer2_xop_fsync_t *xop; 1649 1650 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1651 xop->clear_directdata = 0; 1652 if (ip->flags & HAMMER2_INODE_RESIZED) { 1653 if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) && 1654 ip->meta.size > HAMMER2_EMBEDDED_BYTES) { 1655 ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA; 1656 xop->clear_directdata = 1; 1657 } 1658 xop->osize = ip->osize; 1659 } else { 1660 xop->osize = ip->meta.size; /* safety */ 1661 } 1662 xop->ipflags = ip->flags; 1663 xop->meta = ip->meta; 1664 1665 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED | 1666 HAMMER2_INODE_MODIFIED); 1667 hammer2_xop_start(&xop->head, &hammer2_inode_chain_sync_desc); 1668 error = hammer2_xop_collect(&xop->head, 0); 1669 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1670 if (error == HAMMER2_ERROR_ENOENT) 1671 error = 0; 1672 if (error) { 1673 kprintf("hammer2: unable to fsync inode %p\n", ip); 1674 /* 1675 atomic_set_int(&ip->flags, 1676 xop->ipflags & (HAMMER2_INODE_RESIZED | 1677 HAMMER2_INODE_MODIFIED)); 1678 */ 1679 /* XXX return error somehow? */ 1680 } 1681 } 1682 return error; 1683 } 1684 1685 /* 1686 * When an inode is flagged INODE_CREATING its chains have not actually 1687 * been inserting into the on-media tree yet. 1688 */ 1689 int 1690 hammer2_inode_chain_ins(hammer2_inode_t *ip) 1691 { 1692 int error; 1693 1694 error = 0; 1695 if (ip->flags & HAMMER2_INODE_CREATING) { 1696 hammer2_xop_create_t *xop; 1697 1698 atomic_clear_int(&ip->flags, HAMMER2_INODE_CREATING); 1699 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1700 xop->lhc = ip->meta.inum; 1701 xop->flags = 0; 1702 hammer2_xop_start(&xop->head, &hammer2_inode_create_ins_desc); 1703 error = hammer2_xop_collect(&xop->head, 0); 1704 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1705 if (error == HAMMER2_ERROR_ENOENT) 1706 error = 0; 1707 if (error) { 1708 kprintf("hammer2: backend unable to " 1709 "insert inode %p %ld\n", ip, ip->meta.inum); 1710 /* XXX return error somehow? */ 1711 } 1712 } 1713 return error; 1714 } 1715 1716 /* 1717 * When an inode is flagged INODE_DELETING it has been deleted (no directory 1718 * entry or open refs are left, though as an optimization H2 might leave 1719 * nlinks == 1 to avoid unnecessary block updates). The backend flush then 1720 * needs to actually remove it from the topology. 1721 * 1722 * NOTE: backend flush must still sync and flush the deleted inode to clean 1723 * out related chains. 1724 * 1725 * NOTE: We must clear not only INODE_DELETING, but also INODE_ISUNLINKED 1726 * to prevent the vnode reclaim code from trying to delete it twice. 1727 */ 1728 int 1729 hammer2_inode_chain_des(hammer2_inode_t *ip) 1730 { 1731 int error; 1732 1733 error = 0; 1734 if (ip->flags & HAMMER2_INODE_DELETING) { 1735 hammer2_xop_destroy_t *xop; 1736 1737 atomic_clear_int(&ip->flags, HAMMER2_INODE_DELETING | 1738 HAMMER2_INODE_ISUNLINKED); 1739 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1740 hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc); 1741 error = hammer2_xop_collect(&xop->head, 0); 1742 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1743 1744 if (error == HAMMER2_ERROR_ENOENT) 1745 error = 0; 1746 if (error) { 1747 kprintf("hammer2: backend unable to " 1748 "delete inode %p %ld\n", ip, ip->meta.inum); 1749 /* XXX return error somehow? */ 1750 } 1751 } 1752 return error; 1753 } 1754 1755 /* 1756 * Flushes the inode's chain and its sub-topology to media. Interlocks 1757 * HAMMER2_INODE_DIRTYDATA by clearing it prior to the flush. Any strategy 1758 * function creating or modifying a chain under this inode will re-set the 1759 * flag. 1760 * 1761 * inode must be locked. 1762 */ 1763 int 1764 hammer2_inode_chain_flush(hammer2_inode_t *ip, int flags) 1765 { 1766 hammer2_xop_fsync_t *xop; 1767 int error; 1768 1769 atomic_clear_int(&ip->flags, HAMMER2_INODE_DIRTYDATA); 1770 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | flags); 1771 hammer2_xop_start(&xop->head, &hammer2_inode_flush_desc); 1772 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_WAITALL); 1773 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1774 if (error == HAMMER2_ERROR_ENOENT) 1775 error = 0; 1776 1777 return error; 1778 } 1779