1 /* 2 * Copyright (c) 2011-2014 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <sys/cdefs.h> 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/types.h> 39 #include <sys/lock.h> 40 #include <sys/uuid.h> 41 42 #include "hammer2.h" 43 44 #define INODE_DEBUG 0 45 46 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp, 47 hammer2_tid_t, meta.inum); 48 49 int 50 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2) 51 { 52 if (ip1->meta.inum < ip2->meta.inum) 53 return(-1); 54 if (ip1->meta.inum > ip2->meta.inum) 55 return(1); 56 return(0); 57 } 58 59 static __inline 60 void 61 hammer2_knote(struct vnode *vp, int flags) 62 { 63 if (flags) 64 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags); 65 } 66 67 static 68 void 69 hammer2_inode_delayed_sideq(hammer2_inode_t *ip) 70 { 71 hammer2_inode_sideq_t *ipul; 72 hammer2_pfs_t *pmp = ip->pmp; 73 74 if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) { 75 ipul = kmalloc(sizeof(*ipul), pmp->minode, 76 M_WAITOK | M_ZERO); 77 ipul->ip = ip; 78 hammer2_spin_ex(&pmp->list_spin); 79 if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) { 80 hammer2_inode_ref(ip); 81 atomic_set_int(&ip->flags, 82 HAMMER2_INODE_ONSIDEQ); 83 TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry); 84 ++pmp->sideq_count; 85 hammer2_spin_unex(&pmp->list_spin); 86 } else { 87 hammer2_spin_unex(&pmp->list_spin); 88 kfree(ipul, pmp->minode); 89 } 90 } 91 } 92 93 /* 94 * HAMMER2 inode locks 95 * 96 * HAMMER2 offers shared and exclusive locks on inodes. Pass a mask of 97 * flags for options: 98 * 99 * - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired. The 100 * inode locking function will automatically set the RDONLY flag. 101 * 102 * - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data. 103 * Most front-end inode locks do. 104 * 105 * - pass HAMMER2_RESOLVE_NEVER if you do not want to require that 106 * the inode data be resolved. This is used by the syncthr because 107 * it can run on an unresolved/out-of-sync cluster, and also by the 108 * vnode reclamation code to avoid unnecessary I/O (particularly when 109 * disposing of hundreds of thousands of cached vnodes). 110 * 111 * The inode locking function locks the inode itself, resolves any stale 112 * chains in the inode's cluster, and allocates a fresh copy of the 113 * cluster with 1 ref and all the underlying chains locked. 114 * 115 * ip->cluster will be stable while the inode is locked. 116 * 117 * NOTE: We don't combine the inode/chain lock because putting away an 118 * inode would otherwise confuse multiple lock holders of the inode. 119 * 120 * NOTE: In-memory inodes always point to hardlink targets (the actual file), 121 * and never point to a hardlink pointer. 122 * 123 * NOTE: If caller passes HAMMER2_RESOLVE_RDONLY the exclusive locking code 124 * will feel free to reduce the chain set in the cluster as an 125 * optimization. It will still be validated against the quorum if 126 * appropriate, but the optimization might be able to reduce data 127 * accesses to one node. This flag is automatically set if the inode 128 * is locked with HAMMER2_RESOLVE_SHARED. 129 */ 130 void 131 hammer2_inode_lock(hammer2_inode_t *ip, int how) 132 { 133 hammer2_inode_ref(ip); 134 135 /* 136 * Inode structure mutex 137 */ 138 if (how & HAMMER2_RESOLVE_SHARED) { 139 /*how |= HAMMER2_RESOLVE_RDONLY; not used */ 140 hammer2_mtx_sh(&ip->lock); 141 } else { 142 hammer2_mtx_ex(&ip->lock); 143 } 144 } 145 146 /* 147 * Select a chain out of an inode's cluster and lock it. 148 * 149 * The inode does not have to be locked. 150 */ 151 hammer2_chain_t * 152 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how) 153 { 154 hammer2_chain_t *chain; 155 hammer2_cluster_t *cluster; 156 157 hammer2_spin_sh(&ip->cluster_spin); 158 cluster = &ip->cluster; 159 if (clindex >= cluster->nchains) 160 chain = NULL; 161 else 162 chain = cluster->array[clindex].chain; 163 if (chain) { 164 hammer2_chain_ref(chain); 165 hammer2_spin_unsh(&ip->cluster_spin); 166 hammer2_chain_lock(chain, how); 167 } else { 168 hammer2_spin_unsh(&ip->cluster_spin); 169 } 170 return chain; 171 } 172 173 hammer2_chain_t * 174 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex, 175 hammer2_chain_t **parentp, int how) 176 { 177 hammer2_chain_t *chain; 178 hammer2_chain_t *parent; 179 180 for (;;) { 181 hammer2_spin_sh(&ip->cluster_spin); 182 if (clindex >= ip->cluster.nchains) 183 chain = NULL; 184 else 185 chain = ip->cluster.array[clindex].chain; 186 if (chain) { 187 hammer2_chain_ref(chain); 188 hammer2_spin_unsh(&ip->cluster_spin); 189 hammer2_chain_lock(chain, how); 190 } else { 191 hammer2_spin_unsh(&ip->cluster_spin); 192 } 193 194 /* 195 * Get parent, lock order must be (parent, chain). 196 */ 197 parent = chain->parent; 198 if (parent) { 199 hammer2_chain_ref(parent); 200 hammer2_chain_unlock(chain); 201 hammer2_chain_lock(parent, how); 202 hammer2_chain_lock(chain, how); 203 } 204 if (ip->cluster.array[clindex].chain == chain && 205 chain->parent == parent) { 206 break; 207 } 208 209 /* 210 * Retry 211 */ 212 hammer2_chain_unlock(chain); 213 hammer2_chain_drop(chain); 214 if (parent) { 215 hammer2_chain_unlock(parent); 216 hammer2_chain_drop(parent); 217 } 218 } 219 *parentp = parent; 220 221 return chain; 222 } 223 224 void 225 hammer2_inode_unlock(hammer2_inode_t *ip) 226 { 227 hammer2_mtx_unlock(&ip->lock); 228 hammer2_inode_drop(ip); 229 } 230 231 /* 232 * Temporarily release a lock held shared or exclusive. Caller must 233 * hold the lock shared or exclusive on call and lock will be released 234 * on return. 235 * 236 * Restore a lock that was temporarily released. 237 */ 238 hammer2_mtx_state_t 239 hammer2_inode_lock_temp_release(hammer2_inode_t *ip) 240 { 241 return hammer2_mtx_temp_release(&ip->lock); 242 } 243 244 void 245 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate) 246 { 247 hammer2_mtx_temp_restore(&ip->lock, ostate); 248 } 249 250 /* 251 * Upgrade a shared inode lock to exclusive and return. If the inode lock 252 * is already held exclusively this is a NOP. 253 * 254 * The caller MUST hold the inode lock either shared or exclusive on call 255 * and will own the lock exclusively on return. 256 * 257 * Returns non-zero if the lock was already exclusive prior to the upgrade. 258 */ 259 int 260 hammer2_inode_lock_upgrade(hammer2_inode_t *ip) 261 { 262 int wasexclusive; 263 264 if (mtx_islocked_ex(&ip->lock)) { 265 wasexclusive = 1; 266 } else { 267 hammer2_mtx_unlock(&ip->lock); 268 hammer2_mtx_ex(&ip->lock); 269 wasexclusive = 0; 270 } 271 return wasexclusive; 272 } 273 274 /* 275 * Downgrade an inode lock from exclusive to shared only if the inode 276 * lock was previously shared. If the inode lock was previously exclusive, 277 * this is a NOP. 278 */ 279 void 280 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive) 281 { 282 if (wasexclusive == 0) 283 mtx_downgrade(&ip->lock); 284 } 285 286 /* 287 * Lookup an inode by inode number 288 */ 289 hammer2_inode_t * 290 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum) 291 { 292 hammer2_inode_t *ip; 293 294 KKASSERT(pmp); 295 if (pmp->spmp_hmp) { 296 ip = NULL; 297 } else { 298 hammer2_spin_ex(&pmp->inum_spin); 299 ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum); 300 if (ip) 301 hammer2_inode_ref(ip); 302 hammer2_spin_unex(&pmp->inum_spin); 303 } 304 return(ip); 305 } 306 307 /* 308 * Adding a ref to an inode is only legal if the inode already has at least 309 * one ref. 310 * 311 * (can be called with spinlock held) 312 */ 313 void 314 hammer2_inode_ref(hammer2_inode_t *ip) 315 { 316 atomic_add_int(&ip->refs, 1); 317 if (hammer2_debug & 0x80000) { 318 kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs); 319 print_backtrace(8); 320 } 321 } 322 323 /* 324 * Drop an inode reference, freeing the inode when the last reference goes 325 * away. 326 */ 327 void 328 hammer2_inode_drop(hammer2_inode_t *ip) 329 { 330 hammer2_pfs_t *pmp; 331 u_int refs; 332 333 while (ip) { 334 if (hammer2_debug & 0x80000) { 335 kprintf("INODE-1 %p (%d->%d)\n", 336 ip, ip->refs, ip->refs - 1); 337 print_backtrace(8); 338 } 339 refs = ip->refs; 340 cpu_ccfence(); 341 if (refs == 1) { 342 /* 343 * Transition to zero, must interlock with 344 * the inode inumber lookup tree (if applicable). 345 * It should not be possible for anyone to race 346 * the transition to 0. 347 */ 348 pmp = ip->pmp; 349 KKASSERT(pmp); 350 hammer2_spin_ex(&pmp->inum_spin); 351 352 if (atomic_cmpset_int(&ip->refs, 1, 0)) { 353 KKASSERT(hammer2_mtx_refs(&ip->lock) == 0); 354 if (ip->flags & HAMMER2_INODE_ONRBTREE) { 355 atomic_clear_int(&ip->flags, 356 HAMMER2_INODE_ONRBTREE); 357 RB_REMOVE(hammer2_inode_tree, 358 &pmp->inum_tree, ip); 359 --pmp->inum_count; 360 } 361 hammer2_spin_unex(&pmp->inum_spin); 362 363 ip->pmp = NULL; 364 365 /* 366 * Cleaning out ip->cluster isn't entirely 367 * trivial. 368 */ 369 hammer2_inode_repoint(ip, NULL, NULL); 370 371 kfree(ip, pmp->minode); 372 atomic_add_long(&pmp->inmem_inodes, -1); 373 ip = NULL; /* will terminate loop */ 374 } else { 375 hammer2_spin_unex(&ip->pmp->inum_spin); 376 } 377 } else { 378 /* 379 * Non zero transition 380 */ 381 if (atomic_cmpset_int(&ip->refs, refs, refs - 1)) 382 break; 383 } 384 } 385 } 386 387 /* 388 * Get the vnode associated with the given inode, allocating the vnode if 389 * necessary. The vnode will be returned exclusively locked. 390 * 391 * *errorp is set to a UNIX error, not a HAMMER2 error. 392 * 393 * The caller must lock the inode (shared or exclusive). 394 * 395 * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim 396 * races. 397 */ 398 struct vnode * 399 hammer2_igetv(hammer2_inode_t *ip, int *errorp) 400 { 401 hammer2_pfs_t *pmp; 402 struct vnode *vp; 403 404 pmp = ip->pmp; 405 KKASSERT(pmp != NULL); 406 *errorp = 0; 407 408 for (;;) { 409 /* 410 * Attempt to reuse an existing vnode assignment. It is 411 * possible to race a reclaim so the vget() may fail. The 412 * inode must be unlocked during the vget() to avoid a 413 * deadlock against a reclaim. 414 */ 415 int wasexclusive; 416 417 vp = ip->vp; 418 if (vp) { 419 /* 420 * Inode must be unlocked during the vget() to avoid 421 * possible deadlocks, but leave the ip ref intact. 422 * 423 * vnode is held to prevent destruction during the 424 * vget(). The vget() can still fail if we lost 425 * a reclaim race on the vnode. 426 */ 427 hammer2_mtx_state_t ostate; 428 429 vhold(vp); 430 ostate = hammer2_inode_lock_temp_release(ip); 431 if (vget(vp, LK_EXCLUSIVE)) { 432 vdrop(vp); 433 hammer2_inode_lock_temp_restore(ip, ostate); 434 continue; 435 } 436 hammer2_inode_lock_temp_restore(ip, ostate); 437 vdrop(vp); 438 /* vp still locked and ref from vget */ 439 if (ip->vp != vp) { 440 kprintf("hammer2: igetv race %p/%p\n", 441 ip->vp, vp); 442 vput(vp); 443 continue; 444 } 445 *errorp = 0; 446 break; 447 } 448 449 /* 450 * No vnode exists, allocate a new vnode. Beware of 451 * allocation races. This function will return an 452 * exclusively locked and referenced vnode. 453 */ 454 *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0); 455 if (*errorp) { 456 kprintf("hammer2: igetv getnewvnode failed %d\n", 457 *errorp); 458 vp = NULL; 459 break; 460 } 461 462 /* 463 * Lock the inode and check for an allocation race. 464 */ 465 wasexclusive = hammer2_inode_lock_upgrade(ip); 466 if (ip->vp != NULL) { 467 vp->v_type = VBAD; 468 vx_put(vp); 469 hammer2_inode_lock_downgrade(ip, wasexclusive); 470 continue; 471 } 472 473 switch (ip->meta.type) { 474 case HAMMER2_OBJTYPE_DIRECTORY: 475 vp->v_type = VDIR; 476 break; 477 case HAMMER2_OBJTYPE_REGFILE: 478 vp->v_type = VREG; 479 vinitvmio(vp, ip->meta.size, 480 HAMMER2_LBUFSIZE, 481 (int)ip->meta.size & HAMMER2_LBUFMASK); 482 break; 483 case HAMMER2_OBJTYPE_SOFTLINK: 484 /* 485 * XXX for now we are using the generic file_read 486 * and file_write code so we need a buffer cache 487 * association. 488 */ 489 vp->v_type = VLNK; 490 vinitvmio(vp, ip->meta.size, 491 HAMMER2_LBUFSIZE, 492 (int)ip->meta.size & HAMMER2_LBUFMASK); 493 break; 494 case HAMMER2_OBJTYPE_CDEV: 495 vp->v_type = VCHR; 496 /* fall through */ 497 case HAMMER2_OBJTYPE_BDEV: 498 vp->v_ops = &pmp->mp->mnt_vn_spec_ops; 499 if (ip->meta.type != HAMMER2_OBJTYPE_CDEV) 500 vp->v_type = VBLK; 501 addaliasu(vp, 502 ip->meta.rmajor, 503 ip->meta.rminor); 504 break; 505 case HAMMER2_OBJTYPE_FIFO: 506 vp->v_type = VFIFO; 507 vp->v_ops = &pmp->mp->mnt_vn_fifo_ops; 508 break; 509 case HAMMER2_OBJTYPE_SOCKET: 510 vp->v_type = VSOCK; 511 break; 512 default: 513 panic("hammer2: unhandled objtype %d", 514 ip->meta.type); 515 break; 516 } 517 518 if (ip == pmp->iroot) 519 vsetflags(vp, VROOT); 520 521 vp->v_data = ip; 522 ip->vp = vp; 523 hammer2_inode_ref(ip); /* vp association */ 524 hammer2_inode_lock_downgrade(ip, wasexclusive); 525 break; 526 } 527 528 /* 529 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0. 530 */ 531 if (hammer2_debug & 0x0002) { 532 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n", 533 vp, vp->v_refcnt, vp->v_auxrefs); 534 } 535 return (vp); 536 } 537 538 /* 539 * Returns the inode associated with the passed-in cluster, creating the 540 * inode if necessary and synchronizing it to the passed-in cluster otherwise. 541 * When synchronizing, if idx >= 0, only cluster index (idx) is synchronized. 542 * Otherwise the whole cluster is synchronized. 543 * 544 * The passed-in cluster must be locked and will remain locked on return. 545 * The returned inode will be locked and the caller may dispose of both 546 * via hammer2_inode_unlock() + hammer2_inode_drop(). However, if the caller 547 * needs to resolve a hardlink it must ref/unlock/relock/drop the inode. 548 * 549 * The hammer2_inode structure regulates the interface between the high level 550 * kernel VNOPS API and the filesystem backend (the chains). 551 * 552 * On return the inode is locked with the supplied cluster. 553 */ 554 hammer2_inode_t * 555 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_inode_t *dip, 556 hammer2_cluster_t *cluster, int idx) 557 { 558 hammer2_inode_t *nip; 559 const hammer2_inode_data_t *iptmp; 560 const hammer2_inode_data_t *nipdata; 561 562 KKASSERT(cluster == NULL || 563 hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE); 564 KKASSERT(pmp); 565 566 /* 567 * Interlocked lookup/ref of the inode. This code is only needed 568 * when looking up inodes with nlinks != 0 (TODO: optimize out 569 * otherwise and test for duplicates). 570 * 571 * Cluster can be NULL during the initial pfs allocation. 572 */ 573 again: 574 while (cluster) { 575 iptmp = &hammer2_cluster_rdata(cluster)->ipdata; 576 nip = hammer2_inode_lookup(pmp, iptmp->meta.inum); 577 if (nip == NULL) 578 break; 579 580 hammer2_mtx_ex(&nip->lock); 581 582 /* 583 * Handle SMP race (not applicable to the super-root spmp 584 * which can't index inodes due to duplicative inode numbers). 585 */ 586 if (pmp->spmp_hmp == NULL && 587 (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) { 588 hammer2_mtx_unlock(&nip->lock); 589 hammer2_inode_drop(nip); 590 continue; 591 } 592 if (idx >= 0) 593 hammer2_inode_repoint_one(nip, cluster, idx); 594 else 595 hammer2_inode_repoint(nip, NULL, cluster); 596 597 return nip; 598 } 599 600 /* 601 * We couldn't find the inode number, create a new inode. 602 */ 603 nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO); 604 spin_init(&nip->cluster_spin, "h2clspin"); 605 atomic_add_long(&pmp->inmem_inodes, 1); 606 hammer2_pfs_memory_inc(pmp); 607 hammer2_pfs_memory_wakeup(pmp); 608 if (pmp->spmp_hmp) 609 nip->flags = HAMMER2_INODE_SROOT; 610 611 /* 612 * Initialize nip's cluster. A cluster is provided for normal 613 * inodes but typically not for the super-root or PFS inodes. 614 */ 615 nip->cluster.refs = 1; 616 nip->cluster.pmp = pmp; 617 nip->cluster.flags |= HAMMER2_CLUSTER_INODE; 618 if (cluster) { 619 nipdata = &hammer2_cluster_rdata(cluster)->ipdata; 620 nip->meta = nipdata->meta; 621 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD); 622 hammer2_inode_repoint(nip, NULL, cluster); 623 } else { 624 nip->meta.inum = 1; /* PFS inum is always 1 XXX */ 625 /* mtime will be updated when a cluster is available */ 626 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);/*XXX*/ 627 } 628 629 nip->pmp = pmp; 630 631 /* 632 * ref and lock on nip gives it state compatible to after a 633 * hammer2_inode_lock() call. 634 */ 635 nip->refs = 1; 636 hammer2_mtx_init(&nip->lock, "h2inode"); 637 hammer2_mtx_ex(&nip->lock); 638 /* combination of thread lock and chain lock == inode lock */ 639 640 /* 641 * Attempt to add the inode. If it fails we raced another inode 642 * get. Undo all the work and try again. 643 */ 644 if (pmp->spmp_hmp == NULL) { 645 hammer2_spin_ex(&pmp->inum_spin); 646 if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) { 647 hammer2_spin_unex(&pmp->inum_spin); 648 hammer2_mtx_unlock(&nip->lock); 649 hammer2_inode_drop(nip); 650 goto again; 651 } 652 atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE); 653 ++pmp->inum_count; 654 hammer2_spin_unex(&pmp->inum_spin); 655 } 656 657 return (nip); 658 } 659 660 /* 661 * MESSY! CLEANUP! 662 * 663 * Create a new inode using the vattr to figure out the type. A non-zero 664 * type field overrides vattr. We need the directory to set iparent or to 665 * use when the inode is directly embedded in a directory (typically super-root 666 * entries), but note that this really only applies OBJTYPE_DIRECTORY as 667 * non-directory inodes can be hardlinked. 668 * 669 * If no error occurs the new inode is returned, otherwise NULL is returned. 670 * It is possible for an error to create a junk inode and then fail later. 671 * It will attempt to delete the junk inode and return NULL in this situation. 672 * 673 * If vap and/or cred are NULL the related fields are not set and the 674 * inode type defaults to a directory. This is used when creating PFSs 675 * under the super-root, so the inode number is set to 1 in this case. 676 * 677 * dip is not locked on entry. 678 * 679 * NOTE: This function is used to create all manners of inodes, including 680 * super-root entries for snapshots and PFSs. When used to create a 681 * snapshot the inode will be temporarily associated with the spmp. 682 * 683 * NOTE: When creating a normal file or directory the name/name_len/lhc 684 * is optional, but is typically specified to make debugging and 685 * recovery easeier. 686 */ 687 hammer2_inode_t * 688 hammer2_inode_create(hammer2_inode_t *dip, hammer2_inode_t *pip, 689 struct vattr *vap, struct ucred *cred, 690 const uint8_t *name, size_t name_len, hammer2_key_t lhc, 691 hammer2_key_t inum, 692 uint8_t type, uint8_t target_type, 693 int flags, int *errorp) 694 { 695 hammer2_xop_create_t *xop; 696 hammer2_inode_t *nip; 697 int error; 698 uid_t xuid; 699 uuid_t pip_uid; 700 uuid_t pip_gid; 701 uint32_t pip_mode; 702 uint8_t pip_comp_algo; 703 uint8_t pip_check_algo; 704 hammer2_tid_t pip_inum; 705 706 if (name) 707 lhc = hammer2_dirhash(name, name_len); 708 *errorp = 0; 709 nip = NULL; 710 711 /* 712 * Locate the inode or indirect block to create the new 713 * entry in. At the same time check for key collisions 714 * and iterate until we don't get one. 715 * 716 * Lock the directory exclusively for now to guarantee that 717 * we can find an unused lhc for the name. Due to collisions, 718 * two different creates can end up with the same lhc so we 719 * cannot depend on the OS to prevent the collision. 720 */ 721 hammer2_inode_lock(dip, 0); 722 723 pip_uid = pip->meta.uid; 724 pip_gid = pip->meta.gid; 725 pip_mode = pip->meta.mode; 726 pip_comp_algo = pip->meta.comp_algo; 727 pip_check_algo = pip->meta.check_algo; 728 pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum; 729 730 /* 731 * If name specified, locate an unused key in the collision space. 732 * Otherwise use the passed-in lhc directly. 733 */ 734 if (name) { 735 hammer2_xop_scanlhc_t *sxop; 736 hammer2_key_t lhcbase; 737 738 lhcbase = lhc; 739 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 740 sxop->lhc = lhc; 741 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc); 742 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 743 if (lhc != sxop->head.cluster.focus->bref.key) 744 break; 745 ++lhc; 746 } 747 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 748 749 if (error) { 750 if (error != HAMMER2_ERROR_ENOENT) 751 goto done2; 752 ++lhc; 753 error = 0; 754 } 755 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) { 756 error = HAMMER2_ERROR_ENOSPC; 757 goto done2; 758 } 759 } 760 761 /* 762 * Create the inode with the lhc as the key. 763 */ 764 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 765 xop->lhc = lhc; 766 xop->flags = flags; 767 bzero(&xop->meta, sizeof(xop->meta)); 768 769 if (vap) { 770 xop->meta.type = hammer2_get_obj_type(vap->va_type); 771 772 switch (xop->meta.type) { 773 case HAMMER2_OBJTYPE_CDEV: 774 case HAMMER2_OBJTYPE_BDEV: 775 xop->meta.rmajor = vap->va_rmajor; 776 xop->meta.rminor = vap->va_rminor; 777 break; 778 default: 779 break; 780 } 781 type = xop->meta.type; 782 } else { 783 xop->meta.type = type; 784 xop->meta.target_type = target_type; 785 } 786 xop->meta.inum = inum; 787 xop->meta.iparent = pip_inum; 788 789 /* Inherit parent's inode compression mode. */ 790 xop->meta.comp_algo = pip_comp_algo; 791 xop->meta.check_algo = pip_check_algo; 792 xop->meta.version = HAMMER2_INODE_VERSION_ONE; 793 hammer2_update_time(&xop->meta.ctime); 794 xop->meta.mtime = xop->meta.ctime; 795 if (vap) 796 xop->meta.mode = vap->va_mode; 797 xop->meta.nlinks = 1; 798 if (vap) { 799 if (dip->pmp) { 800 xuid = hammer2_to_unix_xid(&pip_uid); 801 xuid = vop_helper_create_uid(dip->pmp->mp, 802 pip_mode, 803 xuid, 804 cred, 805 &vap->va_mode); 806 } else { 807 /* super-root has no dip and/or pmp */ 808 xuid = 0; 809 } 810 if (vap->va_vaflags & VA_UID_UUID_VALID) 811 xop->meta.uid = vap->va_uid_uuid; 812 else if (vap->va_uid != (uid_t)VNOVAL) 813 hammer2_guid_to_uuid(&xop->meta.uid, vap->va_uid); 814 else 815 hammer2_guid_to_uuid(&xop->meta.uid, xuid); 816 817 if (vap->va_vaflags & VA_GID_UUID_VALID) 818 xop->meta.gid = vap->va_gid_uuid; 819 else if (vap->va_gid != (gid_t)VNOVAL) 820 hammer2_guid_to_uuid(&xop->meta.gid, vap->va_gid); 821 else 822 xop->meta.gid = pip_gid; 823 } 824 825 /* 826 * Regular files and softlinks allow a small amount of data to be 827 * directly embedded in the inode. This flag will be cleared if 828 * the size is extended past the embedded limit. 829 */ 830 if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE || 831 xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK) { 832 xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA; 833 } 834 if (name) { 835 hammer2_xop_setname(&xop->head, name, name_len); 836 } else { 837 name_len = hammer2_xop_setname_inum(&xop->head, inum); 838 KKASSERT(lhc == inum); 839 } 840 xop->meta.name_len = name_len; 841 xop->meta.name_key = lhc; 842 KKASSERT(name_len < HAMMER2_INODE_MAXNAME); 843 844 hammer2_xop_start(&xop->head, hammer2_inode_xop_create); 845 846 error = hammer2_xop_collect(&xop->head, 0); 847 #if INODE_DEBUG 848 kprintf("CREATE INODE %*.*s\n", 849 (int)name_len, (int)name_len, name); 850 #endif 851 852 if (error) { 853 *errorp = error; 854 goto done; 855 } 856 857 /* 858 * Set up the new inode if not a hardlink pointer. 859 * 860 * NOTE: *_get() integrates chain's lock into the inode lock. 861 * 862 * NOTE: Only one new inode can currently be created per 863 * transaction. If the need arises we can adjust 864 * hammer2_trans_init() to allow more. 865 * 866 * NOTE: nipdata will have chain's blockset data. 867 */ 868 nip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1); 869 nip->comp_heuristic = 0; 870 done: 871 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 872 done2: 873 hammer2_inode_unlock(dip); 874 875 return (nip); 876 } 877 878 /* 879 * Create a directory entry under dip with the specified name, inode number, 880 * and OBJTYPE (type). 881 * 882 * This returns a UNIX errno code, not a HAMMER2_ERROR_* code. 883 */ 884 int 885 hammer2_dirent_create(hammer2_inode_t *dip, const char *name, size_t name_len, 886 hammer2_key_t inum, uint8_t type) 887 { 888 hammer2_xop_mkdirent_t *xop; 889 hammer2_key_t lhc; 890 int error; 891 892 lhc = 0; 893 error = 0; 894 895 KKASSERT(name != NULL); 896 lhc = hammer2_dirhash(name, name_len); 897 898 /* 899 * Locate the inode or indirect block to create the new 900 * entry in. At the same time check for key collisions 901 * and iterate until we don't get one. 902 * 903 * Lock the directory exclusively for now to guarantee that 904 * we can find an unused lhc for the name. Due to collisions, 905 * two different creates can end up with the same lhc so we 906 * cannot depend on the OS to prevent the collision. 907 */ 908 hammer2_inode_lock(dip, 0); 909 910 /* 911 * If name specified, locate an unused key in the collision space. 912 * Otherwise use the passed-in lhc directly. 913 */ 914 { 915 hammer2_xop_scanlhc_t *sxop; 916 hammer2_key_t lhcbase; 917 918 lhcbase = lhc; 919 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 920 sxop->lhc = lhc; 921 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc); 922 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 923 if (lhc != sxop->head.cluster.focus->bref.key) 924 break; 925 ++lhc; 926 } 927 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 928 929 if (error) { 930 if (error != HAMMER2_ERROR_ENOENT) 931 goto done2; 932 ++lhc; 933 error = 0; 934 } 935 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) { 936 error = HAMMER2_ERROR_ENOSPC; 937 goto done2; 938 } 939 } 940 941 /* 942 * Create the directory entry with the lhc as the key. 943 */ 944 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 945 xop->lhc = lhc; 946 bzero(&xop->dirent, sizeof(xop->dirent)); 947 xop->dirent.inum = inum; 948 xop->dirent.type = type; 949 xop->dirent.namlen = name_len; 950 951 KKASSERT(name_len < HAMMER2_INODE_MAXNAME); 952 hammer2_xop_setname(&xop->head, name, name_len); 953 954 hammer2_xop_start(&xop->head, hammer2_inode_xop_mkdirent); 955 956 error = hammer2_xop_collect(&xop->head, 0); 957 958 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 959 done2: 960 error = hammer2_error_to_errno(error); 961 hammer2_inode_unlock(dip); 962 963 return error; 964 } 965 966 /* 967 * Repoint ip->cluster's chains to cluster's chains and fixup the default 968 * focus. All items, valid or invalid, are repointed. hammer2_xop_start() 969 * filters out invalid or non-matching elements. 970 * 971 * Caller must hold the inode and cluster exclusive locked, if not NULL, 972 * must also be locked. 973 * 974 * Cluster may be NULL to clean out any chains in ip->cluster. 975 */ 976 void 977 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip, 978 hammer2_cluster_t *cluster) 979 { 980 hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER]; 981 hammer2_chain_t *ochain; 982 hammer2_chain_t *nchain; 983 int i; 984 985 bzero(dropch, sizeof(dropch)); 986 987 /* 988 * Replace chains in ip->cluster with chains from cluster and 989 * adjust the focus if necessary. 990 * 991 * NOTE: nchain and/or ochain can be NULL due to gaps 992 * in the cluster arrays. 993 */ 994 hammer2_spin_ex(&ip->cluster_spin); 995 for (i = 0; cluster && i < cluster->nchains; ++i) { 996 /* 997 * Do not replace elements which are the same. Also handle 998 * element count discrepancies. 999 */ 1000 nchain = cluster->array[i].chain; 1001 if (i < ip->cluster.nchains) { 1002 ochain = ip->cluster.array[i].chain; 1003 if (ochain == nchain) 1004 continue; 1005 } else { 1006 ochain = NULL; 1007 } 1008 1009 /* 1010 * Make adjustments 1011 */ 1012 ip->cluster.array[i].chain = nchain; 1013 ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID; 1014 ip->cluster.array[i].flags |= cluster->array[i].flags & 1015 HAMMER2_CITEM_INVALID; 1016 if (nchain) 1017 hammer2_chain_ref(nchain); 1018 dropch[i] = ochain; 1019 } 1020 1021 /* 1022 * Release any left-over chains in ip->cluster. 1023 */ 1024 while (i < ip->cluster.nchains) { 1025 nchain = ip->cluster.array[i].chain; 1026 if (nchain) { 1027 ip->cluster.array[i].chain = NULL; 1028 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID; 1029 } 1030 dropch[i] = nchain; 1031 ++i; 1032 } 1033 1034 /* 1035 * Fixup fields. Note that the inode-embedded cluster is never 1036 * directly locked. 1037 */ 1038 if (cluster) { 1039 ip->cluster.nchains = cluster->nchains; 1040 ip->cluster.focus = cluster->focus; 1041 ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED; 1042 } else { 1043 ip->cluster.nchains = 0; 1044 ip->cluster.focus = NULL; 1045 ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS; 1046 } 1047 1048 hammer2_spin_unex(&ip->cluster_spin); 1049 1050 /* 1051 * Cleanup outside of spinlock 1052 */ 1053 while (--i >= 0) { 1054 if (dropch[i]) 1055 hammer2_chain_drop(dropch[i]); 1056 } 1057 } 1058 1059 /* 1060 * Repoint a single element from the cluster to the ip. Used by the 1061 * synchronization threads to piecemeal update inodes. Does not change 1062 * focus and requires inode to be re-locked to clean-up flags (XXX). 1063 */ 1064 void 1065 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster, 1066 int idx) 1067 { 1068 hammer2_chain_t *ochain; 1069 hammer2_chain_t *nchain; 1070 int i; 1071 1072 hammer2_spin_ex(&ip->cluster_spin); 1073 KKASSERT(idx < cluster->nchains); 1074 if (idx < ip->cluster.nchains) { 1075 ochain = ip->cluster.array[idx].chain; 1076 nchain = cluster->array[idx].chain; 1077 } else { 1078 ochain = NULL; 1079 nchain = cluster->array[idx].chain; 1080 ip->cluster.nchains = idx + 1; 1081 for (i = ip->cluster.nchains; i <= idx; ++i) { 1082 bzero(&ip->cluster.array[i], 1083 sizeof(ip->cluster.array[i])); 1084 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID; 1085 } 1086 } 1087 if (ochain != nchain) { 1088 /* 1089 * Make adjustments. 1090 */ 1091 ip->cluster.array[idx].chain = nchain; 1092 ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID; 1093 ip->cluster.array[idx].flags |= cluster->array[idx].flags & 1094 HAMMER2_CITEM_INVALID; 1095 } 1096 hammer2_spin_unex(&ip->cluster_spin); 1097 if (ochain != nchain) { 1098 if (nchain) 1099 hammer2_chain_ref(nchain); 1100 if (ochain) 1101 hammer2_chain_drop(ochain); 1102 } 1103 } 1104 1105 /* 1106 * Called with a locked inode to finish unlinking an inode after xop_unlink 1107 * had been run. This function is responsible for decrementing nlinks. 1108 * 1109 * We don't bother decrementing nlinks if the file is not open and this was 1110 * the last link. 1111 * 1112 * If the inode is a hardlink target it's chain has not yet been deleted, 1113 * otherwise it's chain has been deleted. 1114 * 1115 * If isopen then any prior deletion was not permanent and the inode is 1116 * left intact with nlinks == 0; 1117 */ 1118 int 1119 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen) 1120 { 1121 hammer2_pfs_t *pmp; 1122 int error; 1123 1124 pmp = ip->pmp; 1125 1126 /* 1127 * Decrement nlinks. If this is the last link and the file is 1128 * not open we can just delete the inode and not bother dropping 1129 * nlinks to 0 (avoiding unnecessary block updates). 1130 */ 1131 if (ip->meta.nlinks == 1) { 1132 atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED); 1133 if (isopen == 0) 1134 goto killit; 1135 } 1136 1137 hammer2_inode_modify(ip); 1138 --ip->meta.nlinks; 1139 if ((int64_t)ip->meta.nlinks < 0) 1140 ip->meta.nlinks = 0; /* safety */ 1141 1142 /* 1143 * If nlinks is not zero we are done. However, this should only be 1144 * possible with a hardlink target. If the inode is an embedded 1145 * hardlink nlinks should have dropped to zero, warn and proceed 1146 * with the next step. 1147 */ 1148 if (ip->meta.nlinks) { 1149 if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0) 1150 return 0; 1151 kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n", 1152 (intmax_t)ip->meta.nlinks); 1153 return 0; 1154 } 1155 1156 if (ip->vp) 1157 hammer2_knote(ip->vp, NOTE_DELETE); 1158 1159 /* 1160 * nlinks is now zero, delete the inode if not open. 1161 */ 1162 if (isopen == 0) { 1163 hammer2_xop_destroy_t *xop; 1164 1165 killit: 1166 atomic_set_int(&ip->flags, HAMMER2_INODE_ISDELETED); 1167 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1168 hammer2_xop_start(&xop->head, hammer2_inode_xop_destroy); 1169 error = hammer2_xop_collect(&xop->head, 0); 1170 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1171 } 1172 error = 0; /* XXX */ 1173 1174 return error; 1175 } 1176 1177 /* 1178 * Mark an inode as being modified, meaning that the caller will modify 1179 * ip->meta. 1180 * 1181 * If a vnode is present we set the vnode dirty and the nominal filesystem 1182 * sync will also handle synchronizing the inode meta-data. If no vnode 1183 * is present we must ensure that the inode is on pmp->sideq. 1184 * 1185 * NOTE: No mtid (modify_tid) is passed into this routine. The caller is 1186 * only modifying the in-memory inode. A modify_tid is synchronized 1187 * later when the inode gets flushed. 1188 */ 1189 void 1190 hammer2_inode_modify(hammer2_inode_t *ip) 1191 { 1192 hammer2_pfs_t *pmp; 1193 1194 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED); 1195 if (ip->vp) { 1196 vsetisdirty(ip->vp); 1197 } else if ((pmp = ip->pmp) != NULL && 1198 (ip->flags & HAMMER2_INODE_NOSIDEQ) == 0) { 1199 hammer2_inode_delayed_sideq(ip); 1200 } 1201 } 1202 1203 /* 1204 * Synchronize the inode's frontend state with the chain state prior 1205 * to any explicit flush of the inode or any strategy write call. 1206 * 1207 * Called with a locked inode inside a transaction. 1208 */ 1209 void 1210 hammer2_inode_chain_sync(hammer2_inode_t *ip) 1211 { 1212 if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) { 1213 hammer2_xop_fsync_t *xop; 1214 int error; 1215 1216 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1217 xop->clear_directdata = 0; 1218 if (ip->flags & HAMMER2_INODE_RESIZED) { 1219 if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) && 1220 ip->meta.size > HAMMER2_EMBEDDED_BYTES) { 1221 ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA; 1222 xop->clear_directdata = 1; 1223 } 1224 xop->osize = ip->osize; 1225 } else { 1226 xop->osize = ip->meta.size; /* safety */ 1227 } 1228 xop->ipflags = ip->flags; 1229 xop->meta = ip->meta; 1230 1231 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED | 1232 HAMMER2_INODE_MODIFIED); 1233 hammer2_xop_start(&xop->head, hammer2_inode_xop_chain_sync); 1234 error = hammer2_xop_collect(&xop->head, 0); 1235 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1236 if (error == HAMMER2_ERROR_ENOENT) 1237 error = 0; 1238 if (error) { 1239 kprintf("hammer2: unable to fsync inode %p\n", ip); 1240 /* 1241 atomic_set_int(&ip->flags, 1242 xop->ipflags & (HAMMER2_INODE_RESIZED | 1243 HAMMER2_INODE_MODIFIED)); 1244 */ 1245 /* XXX return error somehow? */ 1246 } 1247 } 1248 } 1249 1250 /* 1251 * The normal filesystem sync no longer has visibility to an inode structure 1252 * after its vnode has been reclaimed. In this situation an unlinked-but-open 1253 * inode or a dirty inode may require additional processing to synchronize 1254 * ip->meta to its underlying cluster nodes. 1255 * 1256 * In particular, reclaims can occur in almost any state (for example, when 1257 * doing operations on unrelated vnodes) and flushing the reclaimed inode 1258 * in the reclaim path itself is a non-starter. 1259 * 1260 * Caller must be in a transaction. 1261 */ 1262 void 1263 hammer2_inode_run_sideq(hammer2_pfs_t *pmp, int doall) 1264 { 1265 hammer2_xop_destroy_t *xop; 1266 hammer2_inode_sideq_t *ipul; 1267 hammer2_inode_t *ip; 1268 int error; 1269 1270 /* 1271 * Nothing to do if sideq is empty or (if doall == 0) there just 1272 * aren't very many sideq entries. 1273 */ 1274 if (TAILQ_EMPTY(&pmp->sideq)) 1275 return; 1276 if (doall == 0) { 1277 if (pmp->sideq_count > (pmp->inum_count >> 3)) { 1278 kprintf("hammer2: flush sideq %ld/%ld\n", 1279 pmp->sideq_count, pmp->inum_count); 1280 } 1281 } 1282 1283 if (doall == 0 && pmp->sideq_count <= (pmp->inum_count >> 3)) 1284 return; 1285 1286 hammer2_spin_ex(&pmp->list_spin); 1287 while ((ipul = TAILQ_FIRST(&pmp->sideq)) != NULL) { 1288 TAILQ_REMOVE(&pmp->sideq, ipul, entry); 1289 --pmp->sideq_count; 1290 ip = ipul->ip; 1291 KKASSERT(ip->flags & HAMMER2_INODE_ONSIDEQ); 1292 atomic_clear_int(&ip->flags, HAMMER2_INODE_ONSIDEQ); 1293 hammer2_spin_unex(&pmp->list_spin); 1294 kfree(ipul, pmp->minode); 1295 1296 hammer2_inode_lock(ip, 0); 1297 if (ip->flags & HAMMER2_INODE_ISUNLINKED) { 1298 /* 1299 * The inode was unlinked while open. The inode must 1300 * be deleted and destroyed. 1301 */ 1302 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1303 hammer2_xop_start(&xop->head, 1304 hammer2_inode_xop_destroy); 1305 error = hammer2_xop_collect(&xop->head, 0); 1306 /* XXX error handling */ 1307 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1308 } else { 1309 /* 1310 * The inode was dirty as-of the reclaim, requiring 1311 * synchronization of ip->meta with its underlying 1312 * chains. 1313 */ 1314 hammer2_inode_chain_sync(ip); 1315 } 1316 1317 hammer2_inode_unlock(ip); 1318 hammer2_inode_drop(ip); /* ipul ref */ 1319 1320 hammer2_spin_ex(&pmp->list_spin); 1321 1322 /* 1323 * If doall is 0 the original sideq_count was greater than 1324 * 1/8 the inode count. Add some hysteresis in the loop, 1325 * don't stop flushing until sideq_count drops below 1/16. 1326 */ 1327 if (doall == 0 && pmp->sideq_count <= (pmp->inum_count >> 4)) { 1328 kprintf("hammer2: flush sideq %ld/%ld (end)\n", 1329 pmp->sideq_count, pmp->inum_count); 1330 break; 1331 } 1332 } 1333 hammer2_spin_unex(&pmp->list_spin); 1334 } 1335 1336 /* 1337 * Helper to create a directory entry. 1338 */ 1339 void 1340 hammer2_inode_xop_mkdirent(hammer2_thread_t *thr, hammer2_xop_t *arg) 1341 { 1342 hammer2_xop_mkdirent_t *xop = &arg->xop_mkdirent; 1343 hammer2_chain_t *parent; 1344 hammer2_chain_t *chain; 1345 hammer2_key_t key_next; 1346 size_t data_len; 1347 int error; 1348 1349 if (hammer2_debug & 0x0001) 1350 kprintf("dirent_create lhc %016jx clindex %d\n", 1351 xop->lhc, thr->clindex); 1352 1353 parent = hammer2_inode_chain(xop->head.ip1, thr->clindex, 1354 HAMMER2_RESOLVE_ALWAYS); 1355 if (parent == NULL) { 1356 error = HAMMER2_ERROR_EIO; 1357 chain = NULL; 1358 goto fail; 1359 } 1360 chain = hammer2_chain_lookup(&parent, &key_next, 1361 xop->lhc, xop->lhc, 1362 &error, 0); 1363 if (chain) { 1364 error = HAMMER2_ERROR_EEXIST; 1365 goto fail; 1366 } 1367 1368 /* 1369 * We may be able to embed the directory entry directly in the 1370 * blockref. 1371 */ 1372 if (xop->dirent.namlen <= sizeof(chain->bref.check.buf)) 1373 data_len = 0; 1374 else 1375 data_len = HAMMER2_ALLOC_MIN; 1376 1377 error = hammer2_chain_create(&parent, &chain, 1378 xop->head.ip1->pmp, HAMMER2_METH_DEFAULT, 1379 xop->lhc, 0, 1380 HAMMER2_BREF_TYPE_DIRENT, 1381 data_len, 1382 xop->head.mtid, 0, 0); 1383 if (error == 0) { 1384 /* 1385 * WARNING: chain->data->buf is sized to chain->bytes, 1386 * do not use sizeof(chain->data->buf), which 1387 * will be much larger. 1388 */ 1389 error = hammer2_chain_modify(chain, xop->head.mtid, 0, 0); 1390 if (error == 0) { 1391 chain->bref.embed.dirent = xop->dirent; 1392 if (xop->dirent.namlen <= sizeof(chain->bref.check.buf)) 1393 bcopy(xop->head.name1, chain->bref.check.buf, 1394 xop->dirent.namlen); 1395 else 1396 bcopy(xop->head.name1, chain->data->buf, 1397 xop->dirent.namlen); 1398 } 1399 } 1400 fail: 1401 if (parent) { 1402 hammer2_chain_unlock(parent); 1403 hammer2_chain_drop(parent); 1404 } 1405 hammer2_xop_feed(&xop->head, chain, thr->clindex, error); 1406 if (chain) { 1407 hammer2_chain_unlock(chain); 1408 hammer2_chain_drop(chain); 1409 } 1410 } 1411 1412 /* 1413 * Inode create helper (threaded, backend) 1414 * 1415 * Used by ncreate, nmknod, nsymlink, nmkdir. 1416 * Used by nlink and rename to create HARDLINK pointers. 1417 * 1418 * Frontend holds the parent directory ip locked exclusively. We 1419 * create the inode and feed the exclusively locked chain to the 1420 * frontend. 1421 */ 1422 void 1423 hammer2_inode_xop_create(hammer2_thread_t *thr, hammer2_xop_t *arg) 1424 { 1425 hammer2_xop_create_t *xop = &arg->xop_create; 1426 hammer2_chain_t *parent; 1427 hammer2_chain_t *chain; 1428 hammer2_key_t key_next; 1429 int error; 1430 1431 if (hammer2_debug & 0x0001) 1432 kprintf("inode_create lhc %016jx clindex %d\n", 1433 xop->lhc, thr->clindex); 1434 1435 parent = hammer2_inode_chain(xop->head.ip1, thr->clindex, 1436 HAMMER2_RESOLVE_ALWAYS); 1437 if (parent == NULL) { 1438 error = HAMMER2_ERROR_EIO; 1439 chain = NULL; 1440 goto fail; 1441 } 1442 chain = hammer2_chain_lookup(&parent, &key_next, 1443 xop->lhc, xop->lhc, 1444 &error, 0); 1445 if (chain) { 1446 error = HAMMER2_ERROR_EEXIST; 1447 goto fail; 1448 } 1449 1450 error = hammer2_chain_create(&parent, &chain, 1451 xop->head.ip1->pmp, HAMMER2_METH_DEFAULT, 1452 xop->lhc, 0, 1453 HAMMER2_BREF_TYPE_INODE, 1454 HAMMER2_INODE_BYTES, 1455 xop->head.mtid, 0, xop->flags); 1456 if (error == 0) { 1457 error = hammer2_chain_modify(chain, xop->head.mtid, 0, 0); 1458 if (error == 0) { 1459 chain->data->ipdata.meta = xop->meta; 1460 if (xop->head.name1) { 1461 bcopy(xop->head.name1, 1462 chain->data->ipdata.filename, 1463 xop->head.name1_len); 1464 chain->data->ipdata.meta.name_len = 1465 xop->head.name1_len; 1466 } 1467 chain->data->ipdata.meta.name_key = xop->lhc; 1468 } 1469 } 1470 fail: 1471 if (parent) { 1472 hammer2_chain_unlock(parent); 1473 hammer2_chain_drop(parent); 1474 } 1475 hammer2_xop_feed(&xop->head, chain, thr->clindex, error); 1476 if (chain) { 1477 hammer2_chain_unlock(chain); 1478 hammer2_chain_drop(chain); 1479 } 1480 } 1481 1482 /* 1483 * Inode delete helper (backend, threaded) 1484 * 1485 * Generally used by hammer2_run_sideq() 1486 */ 1487 void 1488 hammer2_inode_xop_destroy(hammer2_thread_t *thr, hammer2_xop_t *arg) 1489 { 1490 hammer2_xop_destroy_t *xop = &arg->xop_destroy; 1491 hammer2_pfs_t *pmp; 1492 hammer2_chain_t *parent; 1493 hammer2_chain_t *chain; 1494 hammer2_inode_t *ip; 1495 int error; 1496 1497 /* 1498 * We need the precise parent chain to issue the deletion. 1499 */ 1500 ip = xop->head.ip1; 1501 pmp = ip->pmp; 1502 1503 chain = hammer2_inode_chain(ip, thr->clindex, HAMMER2_RESOLVE_ALWAYS); 1504 if (chain == NULL) { 1505 parent = NULL; 1506 error = HAMMER2_ERROR_EIO; 1507 goto done; 1508 } 1509 parent = hammer2_chain_getparent(chain, HAMMER2_RESOLVE_ALWAYS); 1510 if (parent == NULL) { 1511 error = HAMMER2_ERROR_EIO; 1512 goto done; 1513 } 1514 KKASSERT(chain->parent == parent); 1515 1516 /* 1517 * We have the correct parent, we can issue the deletion. 1518 */ 1519 hammer2_chain_delete(parent, chain, xop->head.mtid, 0); 1520 error = 0; 1521 done: 1522 hammer2_xop_feed(&xop->head, NULL, thr->clindex, error); 1523 if (parent) { 1524 hammer2_chain_unlock(parent); 1525 hammer2_chain_drop(parent); 1526 } 1527 if (chain) { 1528 hammer2_chain_unlock(chain); 1529 hammer2_chain_drop(chain); 1530 } 1531 } 1532 1533 void 1534 hammer2_inode_xop_unlinkall(hammer2_thread_t *thr, hammer2_xop_t *arg) 1535 { 1536 hammer2_xop_unlinkall_t *xop = &arg->xop_unlinkall; 1537 hammer2_chain_t *parent; 1538 hammer2_chain_t *chain; 1539 hammer2_key_t key_next; 1540 int error; 1541 1542 /* 1543 * We need the precise parent chain to issue the deletion. 1544 */ 1545 parent = hammer2_inode_chain(xop->head.ip1, thr->clindex, 1546 HAMMER2_RESOLVE_ALWAYS); 1547 chain = NULL; 1548 if (parent == NULL) { 1549 error = 0; 1550 goto done; 1551 } 1552 chain = hammer2_chain_lookup(&parent, &key_next, 1553 xop->key_beg, xop->key_end, 1554 &error, HAMMER2_LOOKUP_ALWAYS); 1555 while (chain) { 1556 hammer2_chain_delete(parent, chain, 1557 xop->head.mtid, HAMMER2_DELETE_PERMANENT); 1558 hammer2_xop_feed(&xop->head, chain, thr->clindex, chain->error); 1559 /* depend on function to unlock the shared lock */ 1560 chain = hammer2_chain_next(&parent, chain, &key_next, 1561 key_next, xop->key_end, 1562 &error, 1563 HAMMER2_LOOKUP_ALWAYS); 1564 } 1565 done: 1566 if (error == 0) 1567 error = HAMMER2_ERROR_ENOENT; 1568 hammer2_xop_feed(&xop->head, NULL, thr->clindex, error); 1569 if (parent) { 1570 hammer2_chain_unlock(parent); 1571 hammer2_chain_drop(parent); 1572 } 1573 if (chain) { 1574 hammer2_chain_unlock(chain); 1575 hammer2_chain_drop(chain); 1576 } 1577 } 1578 1579 void 1580 hammer2_inode_xop_connect(hammer2_thread_t *thr, hammer2_xop_t *arg) 1581 { 1582 hammer2_xop_connect_t *xop = &arg->xop_connect; 1583 hammer2_inode_data_t *wipdata; 1584 hammer2_chain_t *parent; 1585 hammer2_chain_t *chain; 1586 hammer2_pfs_t *pmp; 1587 hammer2_key_t key_dummy; 1588 int error; 1589 1590 /* 1591 * Get directory, then issue a lookup to prime the parent chain 1592 * for the create. The lookup is expected to fail. 1593 */ 1594 pmp = xop->head.ip1->pmp; 1595 parent = hammer2_inode_chain(xop->head.ip1, thr->clindex, 1596 HAMMER2_RESOLVE_ALWAYS); 1597 if (parent == NULL) { 1598 chain = NULL; 1599 error = HAMMER2_ERROR_EIO; 1600 goto fail; 1601 } 1602 chain = hammer2_chain_lookup(&parent, &key_dummy, 1603 xop->lhc, xop->lhc, 1604 &error, 0); 1605 if (chain) { 1606 hammer2_chain_unlock(chain); 1607 hammer2_chain_drop(chain); 1608 chain = NULL; 1609 error = HAMMER2_ERROR_EEXIST; 1610 goto fail; 1611 } 1612 if (error) 1613 goto fail; 1614 1615 /* 1616 * Adjust the filename in the inode, set the name key. 1617 * 1618 * NOTE: Frontend must also adjust ip2->meta on success, we can't 1619 * do it here. 1620 */ 1621 chain = hammer2_inode_chain(xop->head.ip2, thr->clindex, 1622 HAMMER2_RESOLVE_ALWAYS); 1623 error = hammer2_chain_modify(chain, xop->head.mtid, 0, 0); 1624 if (error) 1625 goto fail; 1626 1627 wipdata = &chain->data->ipdata; 1628 1629 hammer2_inode_modify(xop->head.ip2); 1630 if (xop->head.name1) { 1631 bzero(wipdata->filename, sizeof(wipdata->filename)); 1632 bcopy(xop->head.name1, wipdata->filename, xop->head.name1_len); 1633 wipdata->meta.name_len = xop->head.name1_len; 1634 } 1635 wipdata->meta.name_key = xop->lhc; 1636 1637 /* 1638 * Reconnect the chain to the new parent directory 1639 */ 1640 error = hammer2_chain_create(&parent, &chain, 1641 pmp, HAMMER2_METH_DEFAULT, 1642 xop->lhc, 0, 1643 HAMMER2_BREF_TYPE_INODE, 1644 HAMMER2_INODE_BYTES, 1645 xop->head.mtid, 0, 0); 1646 1647 /* 1648 * Feed result back. 1649 */ 1650 fail: 1651 hammer2_xop_feed(&xop->head, NULL, thr->clindex, error); 1652 if (parent) { 1653 hammer2_chain_unlock(parent); 1654 hammer2_chain_drop(parent); 1655 } 1656 if (chain) { 1657 hammer2_chain_unlock(chain); 1658 hammer2_chain_drop(chain); 1659 } 1660 } 1661 1662 /* 1663 * Synchronize the in-memory inode with the chain. 1664 */ 1665 void 1666 hammer2_inode_xop_chain_sync(hammer2_thread_t *thr, hammer2_xop_t *arg) 1667 { 1668 hammer2_xop_fsync_t *xop = &arg->xop_fsync; 1669 hammer2_chain_t *parent; 1670 hammer2_chain_t *chain; 1671 int error; 1672 1673 parent = hammer2_inode_chain(xop->head.ip1, thr->clindex, 1674 HAMMER2_RESOLVE_ALWAYS); 1675 chain = NULL; 1676 if (parent == NULL) { 1677 error = HAMMER2_ERROR_EIO; 1678 goto done; 1679 } 1680 if (parent->error) { 1681 error = parent->error; 1682 goto done; 1683 } 1684 1685 error = 0; 1686 1687 if ((xop->ipflags & HAMMER2_INODE_RESIZED) == 0) { 1688 /* osize must be ignored */ 1689 } else if (xop->meta.size < xop->osize) { 1690 /* 1691 * We must delete any chains beyond the EOF. The chain 1692 * straddling the EOF will be pending in the bioq. 1693 */ 1694 hammer2_key_t lbase; 1695 hammer2_key_t key_next; 1696 1697 lbase = (xop->meta.size + HAMMER2_PBUFMASK64) & 1698 ~HAMMER2_PBUFMASK64; 1699 chain = hammer2_chain_lookup(&parent, &key_next, 1700 lbase, HAMMER2_KEY_MAX, 1701 &error, 1702 HAMMER2_LOOKUP_NODATA | 1703 HAMMER2_LOOKUP_NODIRECT); 1704 while (chain) { 1705 /* 1706 * Degenerate embedded case, nothing to loop on 1707 */ 1708 switch (chain->bref.type) { 1709 case HAMMER2_BREF_TYPE_DIRENT: 1710 case HAMMER2_BREF_TYPE_INODE: 1711 KKASSERT(0); 1712 break; 1713 case HAMMER2_BREF_TYPE_DATA: 1714 hammer2_chain_delete(parent, chain, 1715 xop->head.mtid, 1716 HAMMER2_DELETE_PERMANENT); 1717 break; 1718 } 1719 chain = hammer2_chain_next(&parent, chain, &key_next, 1720 key_next, HAMMER2_KEY_MAX, 1721 &error, 1722 HAMMER2_LOOKUP_NODATA | 1723 HAMMER2_LOOKUP_NODIRECT); 1724 } 1725 1726 /* 1727 * Reset to point at inode for following code, if necessary. 1728 */ 1729 if (parent->bref.type != HAMMER2_BREF_TYPE_INODE) { 1730 hammer2_chain_unlock(parent); 1731 hammer2_chain_drop(parent); 1732 parent = hammer2_inode_chain(xop->head.ip1, 1733 thr->clindex, 1734 HAMMER2_RESOLVE_ALWAYS); 1735 kprintf("hammer2: TRUNCATE RESET on '%s'\n", 1736 parent->data->ipdata.filename); 1737 } 1738 } 1739 1740 /* 1741 * Sync the inode meta-data, potentially clear the blockset area 1742 * of direct data so it can be used for blockrefs. 1743 */ 1744 if (error == 0) { 1745 error = hammer2_chain_modify(parent, xop->head.mtid, 0, 0); 1746 if (error == 0) { 1747 parent->data->ipdata.meta = xop->meta; 1748 if (xop->clear_directdata) { 1749 bzero(&parent->data->ipdata.u.blockset, 1750 sizeof(parent->data->ipdata.u.blockset)); 1751 } 1752 } 1753 } 1754 done: 1755 if (chain) { 1756 hammer2_chain_unlock(chain); 1757 hammer2_chain_drop(chain); 1758 } 1759 if (parent) { 1760 hammer2_chain_unlock(parent); 1761 hammer2_chain_drop(parent); 1762 } 1763 hammer2_xop_feed(&xop->head, NULL, thr->clindex, error); 1764 } 1765 1766