xref: /dflybsd-src/usr.sbin/makefs/hammer2/hammer2_inode.c (revision 6b47f3ea0add18fe433924e96d23c8a42f668f93)
12d60b848STomohiro Kusumi /*
22d60b848STomohiro Kusumi  * SPDX-License-Identifier: BSD-3-Clause
32d60b848STomohiro Kusumi  *
42d60b848STomohiro Kusumi  * Copyright (c) 2022 Tomohiro Kusumi <tkusumi@netbsd.org>
5*6b47f3eaSTomohiro Kusumi  * Copyright (c) 2011-2023 The DragonFly Project.  All rights reserved.
62d60b848STomohiro Kusumi  *
72d60b848STomohiro Kusumi  * This code is derived from software contributed to The DragonFly Project
82d60b848STomohiro Kusumi  * by Matthew Dillon <dillon@dragonflybsd.org>
92d60b848STomohiro Kusumi  *
102d60b848STomohiro Kusumi  * Redistribution and use in source and binary forms, with or without
112d60b848STomohiro Kusumi  * modification, are permitted provided that the following conditions
122d60b848STomohiro Kusumi  * are met:
132d60b848STomohiro Kusumi  *
142d60b848STomohiro Kusumi  * 1. Redistributions of source code must retain the above copyright
152d60b848STomohiro Kusumi  *    notice, this list of conditions and the following disclaimer.
162d60b848STomohiro Kusumi  * 2. Redistributions in binary form must reproduce the above copyright
172d60b848STomohiro Kusumi  *    notice, this list of conditions and the following disclaimer in
182d60b848STomohiro Kusumi  *    the documentation and/or other materials provided with the
192d60b848STomohiro Kusumi  *    distribution.
202d60b848STomohiro Kusumi  * 3. Neither the name of The DragonFly Project nor the names of its
212d60b848STomohiro Kusumi  *    contributors may be used to endorse or promote products derived
222d60b848STomohiro Kusumi  *    from this software without specific, prior written permission.
232d60b848STomohiro Kusumi  *
242d60b848STomohiro Kusumi  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
252d60b848STomohiro Kusumi  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
262d60b848STomohiro Kusumi  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
272d60b848STomohiro Kusumi  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
282d60b848STomohiro Kusumi  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
292d60b848STomohiro Kusumi  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
302d60b848STomohiro Kusumi  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
312d60b848STomohiro Kusumi  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
322d60b848STomohiro Kusumi  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
332d60b848STomohiro Kusumi  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
342d60b848STomohiro Kusumi  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
352d60b848STomohiro Kusumi  * SUCH DAMAGE.
362d60b848STomohiro Kusumi  */
372d60b848STomohiro Kusumi /*
382d60b848STomohiro Kusumi #include <sys/cdefs.h>
392d60b848STomohiro Kusumi #include <sys/param.h>
402d60b848STomohiro Kusumi #include <sys/systm.h>
412d60b848STomohiro Kusumi #include <sys/types.h>
422d60b848STomohiro Kusumi #include <sys/lock.h>
432d60b848STomohiro Kusumi #include <sys/uuid.h>
442d60b848STomohiro Kusumi #include <sys/vnode.h>
452d60b848STomohiro Kusumi */
462d60b848STomohiro Kusumi 
472d60b848STomohiro Kusumi #include "hammer2.h"
482d60b848STomohiro Kusumi 
492d60b848STomohiro Kusumi #define INODE_DEBUG	0
502d60b848STomohiro Kusumi 
51*6b47f3eaSTomohiro Kusumi /*
52*6b47f3eaSTomohiro Kusumi  * Initialize inum hash in fresh structure
53*6b47f3eaSTomohiro Kusumi  */
54*6b47f3eaSTomohiro Kusumi void
hammer2_inum_hash_init(hammer2_pfs_t * pmp)55*6b47f3eaSTomohiro Kusumi hammer2_inum_hash_init(hammer2_pfs_t *pmp)
562d60b848STomohiro Kusumi {
57*6b47f3eaSTomohiro Kusumi 	hammer2_inum_hash_t *hash;
58*6b47f3eaSTomohiro Kusumi 	int i;
59*6b47f3eaSTomohiro Kusumi 
60*6b47f3eaSTomohiro Kusumi 	for (i = 0; i < HAMMER2_INUMHASH_SIZE; ++i) {
61*6b47f3eaSTomohiro Kusumi 		hash = &pmp->inumhash[i];
62*6b47f3eaSTomohiro Kusumi 		hammer2_spin_init(&hash->spin, "h2inum");
63*6b47f3eaSTomohiro Kusumi 	}
642d60b848STomohiro Kusumi }
652d60b848STomohiro Kusumi 
662d60b848STomohiro Kusumi /*
672d60b848STomohiro Kusumi  * Caller holds pmp->list_spin and the inode should be locked.  Merge ip
682d60b848STomohiro Kusumi  * with the specified depend.
692d60b848STomohiro Kusumi  *
702d60b848STomohiro Kusumi  * If the ip is on SYNCQ it stays there and (void *)-1 is returned, indicating
712d60b848STomohiro Kusumi  * that successive calls must ensure the ip is on a pass2 depend (or they are
722d60b848STomohiro Kusumi  * all SYNCQ).  If the passed-in depend is not NULL and not (void *)-1 then
732d60b848STomohiro Kusumi  * we can set pass2 on it and return.
742d60b848STomohiro Kusumi  *
752d60b848STomohiro Kusumi  * If the ip is not on SYNCQ it is merged with the passed-in depend, creating
762d60b848STomohiro Kusumi  * a self-depend if necessary, and depend->pass2 is set according
772d60b848STomohiro Kusumi  * to the PASS2 flag.  SIDEQ is set.
782d60b848STomohiro Kusumi  */
792d60b848STomohiro Kusumi static __noinline
802d60b848STomohiro Kusumi hammer2_depend_t *
hammer2_inode_setdepend_locked(hammer2_inode_t * ip,hammer2_depend_t * depend)812d60b848STomohiro Kusumi hammer2_inode_setdepend_locked(hammer2_inode_t *ip, hammer2_depend_t *depend)
822d60b848STomohiro Kusumi {
832d60b848STomohiro Kusumi 	hammer2_pfs_t *pmp = ip->pmp;
842d60b848STomohiro Kusumi 	hammer2_depend_t *dtmp;
852d60b848STomohiro Kusumi 	hammer2_inode_t *iptmp;
862d60b848STomohiro Kusumi 
872d60b848STomohiro Kusumi 	/*
882d60b848STomohiro Kusumi 	 * If ip is SYNCQ its entry is used for the syncq list and it will
892d60b848STomohiro Kusumi 	 * no longer be associated with a dependency.  Merging this status
902d60b848STomohiro Kusumi 	 * with a passed-in depend implies PASS2.
912d60b848STomohiro Kusumi 	 */
922d60b848STomohiro Kusumi 	if (ip->flags & HAMMER2_INODE_SYNCQ) {
932d60b848STomohiro Kusumi 		if (depend == (void *)-1 ||
942d60b848STomohiro Kusumi 		    depend == NULL) {
952d60b848STomohiro Kusumi 			return ((void *)-1);
962d60b848STomohiro Kusumi 		}
972d60b848STomohiro Kusumi 		depend->pass2 = 1;
982d60b848STomohiro Kusumi 		hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN);
992d60b848STomohiro Kusumi 
1002d60b848STomohiro Kusumi 		return depend;
1012d60b848STomohiro Kusumi 	}
1022d60b848STomohiro Kusumi 
1032d60b848STomohiro Kusumi 	/*
1042d60b848STomohiro Kusumi 	 * If ip is already SIDEQ, merge ip->depend into the passed-in depend.
1052d60b848STomohiro Kusumi 	 * If it is not, associate the ip with the passed-in depend, creating
1062d60b848STomohiro Kusumi 	 * a single-entry dependency using depend_static if necessary.
1072d60b848STomohiro Kusumi 	 *
1082d60b848STomohiro Kusumi 	 * NOTE: The use of ip->depend_static always requires that the
1092d60b848STomohiro Kusumi 	 *	 specific ip containing the structure is part of that
1102d60b848STomohiro Kusumi 	 *	 particular depend_static's dependency group.
1112d60b848STomohiro Kusumi 	 */
1122d60b848STomohiro Kusumi 	if (ip->flags & HAMMER2_INODE_SIDEQ) {
1132d60b848STomohiro Kusumi 		/*
1142d60b848STomohiro Kusumi 		 * Merge ip->depend with the passed-in depend.  If the
1152d60b848STomohiro Kusumi 		 * passed-in depend is not a special case, all ips associated
1162d60b848STomohiro Kusumi 		 * with ip->depend (including the original ip) must be moved
1172d60b848STomohiro Kusumi 		 * to the passed-in depend.
1182d60b848STomohiro Kusumi 		 */
1192d60b848STomohiro Kusumi 		if (depend == NULL) {
1202d60b848STomohiro Kusumi 			depend = ip->depend;
1212d60b848STomohiro Kusumi 		} else if (depend == (void *)-1) {
1222d60b848STomohiro Kusumi 			depend = ip->depend;
1232d60b848STomohiro Kusumi 			depend->pass2 = 1;
1242d60b848STomohiro Kusumi 		} else if (depend != ip->depend) {
1252d60b848STomohiro Kusumi #ifdef INVARIANTS
1262d60b848STomohiro Kusumi 			int sanitychk = 0;
1272d60b848STomohiro Kusumi #endif
1282d60b848STomohiro Kusumi 			dtmp = ip->depend;
1292d60b848STomohiro Kusumi 			while ((iptmp = TAILQ_FIRST(&dtmp->sideq)) != NULL) {
1302d60b848STomohiro Kusumi #ifdef INVARIANTS
1312d60b848STomohiro Kusumi 				if (iptmp == ip)
1322d60b848STomohiro Kusumi 					sanitychk = 1;
1332d60b848STomohiro Kusumi #endif
1342d60b848STomohiro Kusumi 				TAILQ_REMOVE(&dtmp->sideq, iptmp, entry);
1352d60b848STomohiro Kusumi 				TAILQ_INSERT_TAIL(&depend->sideq, iptmp, entry);
1362d60b848STomohiro Kusumi 				iptmp->depend = depend;
1372d60b848STomohiro Kusumi 			}
1382d60b848STomohiro Kusumi 			KKASSERT(sanitychk == 1);
1392d60b848STomohiro Kusumi 			depend->count += dtmp->count;
1402d60b848STomohiro Kusumi 			depend->pass2 |= dtmp->pass2;
1412d60b848STomohiro Kusumi 			TAILQ_REMOVE(&pmp->depq, dtmp, entry);
1422d60b848STomohiro Kusumi 			dtmp->count = 0;
1432d60b848STomohiro Kusumi 			dtmp->pass2 = 0;
1442d60b848STomohiro Kusumi 		}
1452d60b848STomohiro Kusumi 	} else {
1462d60b848STomohiro Kusumi 		/*
1472d60b848STomohiro Kusumi 		 * Add ip to the sideq, creating a self-dependency if
1482d60b848STomohiro Kusumi 		 * necessary.
1492d60b848STomohiro Kusumi 		 */
1502d60b848STomohiro Kusumi 		hammer2_inode_ref(ip); /* extra ref usually via hammer2_inode_modify() */
1512d60b848STomohiro Kusumi 		atomic_set_int(&ip->flags, HAMMER2_INODE_SIDEQ);
1522d60b848STomohiro Kusumi 		if (depend == NULL) {
1532d60b848STomohiro Kusumi 			depend = &ip->depend_static;
1542d60b848STomohiro Kusumi 			TAILQ_INSERT_TAIL(&pmp->depq, depend, entry);
1552d60b848STomohiro Kusumi 		} else if (depend == (void *)-1) {
1562d60b848STomohiro Kusumi 			depend = &ip->depend_static;
1572d60b848STomohiro Kusumi 			depend->pass2 = 1;
1582d60b848STomohiro Kusumi 			TAILQ_INSERT_TAIL(&pmp->depq, depend, entry);
1592d60b848STomohiro Kusumi 		} /* else add ip to passed-in depend */
1602d60b848STomohiro Kusumi 		TAILQ_INSERT_TAIL(&depend->sideq, ip, entry);
1612d60b848STomohiro Kusumi 		ip->depend = depend;
1622d60b848STomohiro Kusumi 		++depend->count;
1632d60b848STomohiro Kusumi 		++pmp->sideq_count;
1642d60b848STomohiro Kusumi 	}
1652d60b848STomohiro Kusumi 
1662d60b848STomohiro Kusumi 	if (ip->flags & HAMMER2_INODE_SYNCQ_PASS2)
1672d60b848STomohiro Kusumi 		depend->pass2 = 1;
1682d60b848STomohiro Kusumi 	if (depend->pass2)
1692d60b848STomohiro Kusumi 		hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN);
1702d60b848STomohiro Kusumi 
1712d60b848STomohiro Kusumi 	return depend;
1722d60b848STomohiro Kusumi }
1732d60b848STomohiro Kusumi 
1742d60b848STomohiro Kusumi /*
1752d60b848STomohiro Kusumi  * Put a solo inode on the SIDEQ (meaning that its dirty).  This can also
1762d60b848STomohiro Kusumi  * occur from inode_lock4() and inode_depend().
1772d60b848STomohiro Kusumi  *
1782d60b848STomohiro Kusumi  * Caller must pass-in a locked inode.
1792d60b848STomohiro Kusumi  */
1802d60b848STomohiro Kusumi void
hammer2_inode_delayed_sideq(hammer2_inode_t * ip)1812d60b848STomohiro Kusumi hammer2_inode_delayed_sideq(hammer2_inode_t *ip)
1822d60b848STomohiro Kusumi {
1832d60b848STomohiro Kusumi 	hammer2_pfs_t *pmp = ip->pmp;
1842d60b848STomohiro Kusumi 
1852d60b848STomohiro Kusumi 	/*
1862d60b848STomohiro Kusumi 	 * Optimize case to avoid pmp spinlock.
1872d60b848STomohiro Kusumi 	 */
1882d60b848STomohiro Kusumi 	if ((ip->flags & (HAMMER2_INODE_SYNCQ | HAMMER2_INODE_SIDEQ)) == 0) {
1892d60b848STomohiro Kusumi 		hammer2_spin_ex(&pmp->list_spin);
1902d60b848STomohiro Kusumi 		hammer2_inode_setdepend_locked(ip, NULL);
1912d60b848STomohiro Kusumi 		hammer2_spin_unex(&pmp->list_spin);
1922d60b848STomohiro Kusumi 	}
1932d60b848STomohiro Kusumi }
1942d60b848STomohiro Kusumi 
1952d60b848STomohiro Kusumi /*
1962d60b848STomohiro Kusumi  * Lock an inode, with SYNCQ semantics.
1972d60b848STomohiro Kusumi  *
1982d60b848STomohiro Kusumi  * HAMMER2 offers shared and exclusive locks on inodes.  Pass a mask of
1992d60b848STomohiro Kusumi  * flags for options:
2002d60b848STomohiro Kusumi  *
20100e4ae5cSTomohiro Kusumi  *	- pass HAMMER2_RESOLVE_SHARED if a shared lock is desired.
2022d60b848STomohiro Kusumi  *	  shared locks are not subject to SYNCQ semantics, exclusive locks
2032d60b848STomohiro Kusumi  *	  are.
2042d60b848STomohiro Kusumi  *
2052d60b848STomohiro Kusumi  *	- pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
2062d60b848STomohiro Kusumi  *	  Most front-end inode locks do.
2072d60b848STomohiro Kusumi  *
2082d60b848STomohiro Kusumi  *	- pass HAMMER2_RESOLVE_NEVER if you do not want to require that
2092d60b848STomohiro Kusumi  *	  the inode data be resolved.  This is used by the syncthr because
2102d60b848STomohiro Kusumi  *	  it can run on an unresolved/out-of-sync cluster, and also by the
2112d60b848STomohiro Kusumi  *	  vnode reclamation code to avoid unnecessary I/O (particularly when
2122d60b848STomohiro Kusumi  *	  disposing of hundreds of thousands of cached vnodes).
2132d60b848STomohiro Kusumi  *
2142d60b848STomohiro Kusumi  * This function, along with lock4, has SYNCQ semantics.  If the inode being
2152d60b848STomohiro Kusumi  * locked is on the SYNCQ, that is it has been staged by the syncer, we must
2162d60b848STomohiro Kusumi  * block until the operation is complete (even if we can lock the inode).  In
2172d60b848STomohiro Kusumi  * order to reduce the stall time, we re-order the inode to the front of the
2182d60b848STomohiro Kusumi  * pmp->syncq prior to blocking.  This reordering VERY significantly improves
2192d60b848STomohiro Kusumi  * performance.
2202d60b848STomohiro Kusumi  *
2212d60b848STomohiro Kusumi  * The inode locking function locks the inode itself, resolves any stale
2222d60b848STomohiro Kusumi  * chains in the inode's cluster, and allocates a fresh copy of the
2232d60b848STomohiro Kusumi  * cluster with 1 ref and all the underlying chains locked.
2242d60b848STomohiro Kusumi  *
2252d60b848STomohiro Kusumi  * ip->cluster will be stable while the inode is locked.
2262d60b848STomohiro Kusumi  *
2272d60b848STomohiro Kusumi  * NOTE: We don't combine the inode/chain lock because putting away an
2282d60b848STomohiro Kusumi  *       inode would otherwise confuse multiple lock holders of the inode.
2292d60b848STomohiro Kusumi  */
2302d60b848STomohiro Kusumi void
hammer2_inode_lock(hammer2_inode_t * ip,int how)2312d60b848STomohiro Kusumi hammer2_inode_lock(hammer2_inode_t *ip, int how)
2322d60b848STomohiro Kusumi {
2332d60b848STomohiro Kusumi 	hammer2_pfs_t *pmp;
2342d60b848STomohiro Kusumi 
2352d60b848STomohiro Kusumi 	hammer2_inode_ref(ip);
2362d60b848STomohiro Kusumi 	pmp = ip->pmp;
2372d60b848STomohiro Kusumi 
2382d60b848STomohiro Kusumi 	/*
2392d60b848STomohiro Kusumi 	 * Inode structure mutex - Shared lock
2402d60b848STomohiro Kusumi 	 */
2412d60b848STomohiro Kusumi 	if (how & HAMMER2_RESOLVE_SHARED) {
2422d60b848STomohiro Kusumi 		hammer2_mtx_sh(&ip->lock);
2432d60b848STomohiro Kusumi 		return;
2442d60b848STomohiro Kusumi 	}
2452d60b848STomohiro Kusumi 
2462d60b848STomohiro Kusumi 	/*
2472d60b848STomohiro Kusumi 	 * Inode structure mutex - Exclusive lock
2482d60b848STomohiro Kusumi 	 *
2492d60b848STomohiro Kusumi 	 * An exclusive lock (if not recursive) must wait for inodes on
2502d60b848STomohiro Kusumi 	 * SYNCQ to flush first, to ensure that meta-data dependencies such
2512d60b848STomohiro Kusumi 	 * as the nlink count and related directory entries are not split
2522d60b848STomohiro Kusumi 	 * across flushes.
2532d60b848STomohiro Kusumi 	 *
2542d60b848STomohiro Kusumi 	 * If the vnode is locked by the current thread it must be unlocked
2552d60b848STomohiro Kusumi 	 * across the tsleep() to avoid a deadlock.
2562d60b848STomohiro Kusumi 	 */
2572d60b848STomohiro Kusumi 	hammer2_mtx_ex(&ip->lock);
2582d60b848STomohiro Kusumi 	if (hammer2_mtx_refs(&ip->lock) > 1)
2592d60b848STomohiro Kusumi 		return;
2602d60b848STomohiro Kusumi 	while ((ip->flags & HAMMER2_INODE_SYNCQ) && pmp) {
2612d60b848STomohiro Kusumi 		hammer2_spin_ex(&pmp->list_spin);
2622d60b848STomohiro Kusumi 		if (ip->flags & HAMMER2_INODE_SYNCQ) {
2632d60b848STomohiro Kusumi 			tsleep_interlock(&ip->flags, 0);
2642d60b848STomohiro Kusumi 			atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP);
2652d60b848STomohiro Kusumi 			TAILQ_REMOVE(&pmp->syncq, ip, entry);
2662d60b848STomohiro Kusumi 			TAILQ_INSERT_HEAD(&pmp->syncq, ip, entry);
2672d60b848STomohiro Kusumi 			hammer2_spin_unex(&pmp->list_spin);
2682d60b848STomohiro Kusumi 			hammer2_mtx_unlock(&ip->lock);
2692d60b848STomohiro Kusumi 			tsleep(&ip->flags, PINTERLOCKED, "h2sync", 0);
2702d60b848STomohiro Kusumi 			hammer2_mtx_ex(&ip->lock);
2712d60b848STomohiro Kusumi 			continue;
2722d60b848STomohiro Kusumi 		}
2732d60b848STomohiro Kusumi 		hammer2_spin_unex(&pmp->list_spin);
2742d60b848STomohiro Kusumi 		break;
2752d60b848STomohiro Kusumi 	}
2762d60b848STomohiro Kusumi }
2772d60b848STomohiro Kusumi 
2782d60b848STomohiro Kusumi /*
2792d60b848STomohiro Kusumi  * Exclusively lock up to four inodes, in order, with SYNCQ semantics.
2802d60b848STomohiro Kusumi  * ip1 and ip2 must not be NULL.  ip3 and ip4 may be NULL, but if ip3 is
2812d60b848STomohiro Kusumi  * NULL then ip4 must also be NULL.
2822d60b848STomohiro Kusumi  *
2832d60b848STomohiro Kusumi  * This creates a dependency between up to four inodes.
2842d60b848STomohiro Kusumi  */
2852d60b848STomohiro Kusumi void
hammer2_inode_lock4(hammer2_inode_t * ip1,hammer2_inode_t * ip2,hammer2_inode_t * ip3,hammer2_inode_t * ip4)2862d60b848STomohiro Kusumi hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2,
2872d60b848STomohiro Kusumi 		    hammer2_inode_t *ip3, hammer2_inode_t *ip4)
2882d60b848STomohiro Kusumi {
2892d60b848STomohiro Kusumi 	hammer2_inode_t *ips[4];
2902d60b848STomohiro Kusumi 	hammer2_inode_t *iptmp;
2912d60b848STomohiro Kusumi 	hammer2_inode_t *ipslp;
2922d60b848STomohiro Kusumi 	hammer2_depend_t *depend;
2932d60b848STomohiro Kusumi 	hammer2_pfs_t *pmp;
2942d60b848STomohiro Kusumi 	size_t count;
2952d60b848STomohiro Kusumi 	size_t i;
2962d60b848STomohiro Kusumi 
2972d60b848STomohiro Kusumi 	pmp = ip1->pmp;			/* may be NULL */
2982d60b848STomohiro Kusumi 	KKASSERT(pmp == ip2->pmp);
2992d60b848STomohiro Kusumi 
3002d60b848STomohiro Kusumi 	ips[0] = ip1;
3012d60b848STomohiro Kusumi 	ips[1] = ip2;
3022d60b848STomohiro Kusumi 	if (ip3 == NULL) {
3032d60b848STomohiro Kusumi 		count = 2;
3042d60b848STomohiro Kusumi 	} else if (ip4 == NULL) {
3052d60b848STomohiro Kusumi 		count = 3;
3062d60b848STomohiro Kusumi 		ips[2] = ip3;
3072d60b848STomohiro Kusumi 		KKASSERT(pmp == ip3->pmp);
3082d60b848STomohiro Kusumi 	} else {
3092d60b848STomohiro Kusumi 		count = 4;
3102d60b848STomohiro Kusumi 		ips[2] = ip3;
3112d60b848STomohiro Kusumi 		ips[3] = ip4;
3122d60b848STomohiro Kusumi 		KKASSERT(pmp == ip3->pmp);
3132d60b848STomohiro Kusumi 		KKASSERT(pmp == ip4->pmp);
3142d60b848STomohiro Kusumi 	}
3152d60b848STomohiro Kusumi 
3162d60b848STomohiro Kusumi 	for (i = 0; i < count; ++i)
3172d60b848STomohiro Kusumi 		hammer2_inode_ref(ips[i]);
3182d60b848STomohiro Kusumi 
3192d60b848STomohiro Kusumi restart:
3202d60b848STomohiro Kusumi 	/*
3212d60b848STomohiro Kusumi 	 * Lock the inodes in order
3222d60b848STomohiro Kusumi 	 */
3232d60b848STomohiro Kusumi 	for (i = 0; i < count; ++i) {
3242d60b848STomohiro Kusumi 		hammer2_mtx_ex(&ips[i]->lock);
3252d60b848STomohiro Kusumi 	}
3262d60b848STomohiro Kusumi 
3272d60b848STomohiro Kusumi 	/*
3282d60b848STomohiro Kusumi 	 * Associate dependencies, record the first inode found on SYNCQ
3292d60b848STomohiro Kusumi 	 * (operation is allowed to proceed for inodes on PASS2) for our
3302d60b848STomohiro Kusumi 	 * sleep operation, this inode is theoretically the last one sync'd
3312d60b848STomohiro Kusumi 	 * in the sequence.
3322d60b848STomohiro Kusumi 	 *
3332d60b848STomohiro Kusumi 	 * All inodes found on SYNCQ are moved to the head of the syncq
3342d60b848STomohiro Kusumi 	 * to reduce stalls.
3352d60b848STomohiro Kusumi 	 */
3362d60b848STomohiro Kusumi 	hammer2_spin_ex(&pmp->list_spin);
3372d60b848STomohiro Kusumi 	depend = NULL;
3382d60b848STomohiro Kusumi 	ipslp = NULL;
3392d60b848STomohiro Kusumi 	for (i = 0; i < count; ++i) {
3402d60b848STomohiro Kusumi 		iptmp = ips[i];
3412d60b848STomohiro Kusumi 		depend = hammer2_inode_setdepend_locked(iptmp, depend);
3422d60b848STomohiro Kusumi 		if (iptmp->flags & HAMMER2_INODE_SYNCQ) {
3432d60b848STomohiro Kusumi 			TAILQ_REMOVE(&pmp->syncq, iptmp, entry);
3442d60b848STomohiro Kusumi 			TAILQ_INSERT_HEAD(&pmp->syncq, iptmp, entry);
3452d60b848STomohiro Kusumi 			if (ipslp == NULL)
3462d60b848STomohiro Kusumi 				ipslp = iptmp;
3472d60b848STomohiro Kusumi 		}
3482d60b848STomohiro Kusumi 	}
3492d60b848STomohiro Kusumi 	hammer2_spin_unex(&pmp->list_spin);
3502d60b848STomohiro Kusumi 
3512d60b848STomohiro Kusumi 	/*
3522d60b848STomohiro Kusumi 	 * Block and retry if any of the inodes are on SYNCQ.  It is
3532d60b848STomohiro Kusumi 	 * important that we allow the operation to proceed in the
3542d60b848STomohiro Kusumi 	 * PASS2 case, to avoid deadlocking against the vnode.
3552d60b848STomohiro Kusumi 	 */
3562d60b848STomohiro Kusumi 	if (ipslp) {
3572d60b848STomohiro Kusumi 		for (i = 0; i < count; ++i)
3582d60b848STomohiro Kusumi 			hammer2_mtx_unlock(&ips[i]->lock);
3592d60b848STomohiro Kusumi 		tsleep(&ipslp->flags, 0, "h2sync", 2);
3602d60b848STomohiro Kusumi 		goto restart;
3612d60b848STomohiro Kusumi 	}
3622d60b848STomohiro Kusumi }
3632d60b848STomohiro Kusumi 
3642d60b848STomohiro Kusumi /*
3652d60b848STomohiro Kusumi  * Release an inode lock.  If another thread is blocked on SYNCQ_WAKEUP
3662d60b848STomohiro Kusumi  * we wake them up.
3672d60b848STomohiro Kusumi  */
3682d60b848STomohiro Kusumi void
hammer2_inode_unlock(hammer2_inode_t * ip)3692d60b848STomohiro Kusumi hammer2_inode_unlock(hammer2_inode_t *ip)
3702d60b848STomohiro Kusumi {
3712d60b848STomohiro Kusumi 	if (ip->flags & HAMMER2_INODE_SYNCQ_WAKEUP) {
3722d60b848STomohiro Kusumi 		atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP);
3732d60b848STomohiro Kusumi 		hammer2_mtx_unlock(&ip->lock);
3742d60b848STomohiro Kusumi 		wakeup(&ip->flags);
3752d60b848STomohiro Kusumi 	} else {
3762d60b848STomohiro Kusumi 		hammer2_mtx_unlock(&ip->lock);
3772d60b848STomohiro Kusumi 	}
3782d60b848STomohiro Kusumi 	hammer2_inode_drop(ip);
3792d60b848STomohiro Kusumi }
3802d60b848STomohiro Kusumi 
3812d60b848STomohiro Kusumi /*
3822d60b848STomohiro Kusumi  * If either ip1 or ip2 have been tapped by the syncer, make sure that both
3832d60b848STomohiro Kusumi  * are.  This ensure that dependencies (e.g. dirent-v-inode) are synced
3842d60b848STomohiro Kusumi  * together.  For dirent-v-inode depends, pass the dirent as ip1.
3852d60b848STomohiro Kusumi  *
3862d60b848STomohiro Kusumi  * If neither ip1 or ip2 have been tapped by the syncer, merge them into a
3872d60b848STomohiro Kusumi  * single dependency.  Dependencies are entered into pmp->depq.  This
3882d60b848STomohiro Kusumi  * effectively flags the inodes SIDEQ.
3892d60b848STomohiro Kusumi  *
3902d60b848STomohiro Kusumi  * Both ip1 and ip2 must be locked by the caller.  This also ensures
3912d60b848STomohiro Kusumi  * that we can't race the end of the syncer's queue run.
3922d60b848STomohiro Kusumi  */
3932d60b848STomohiro Kusumi void
hammer2_inode_depend(hammer2_inode_t * ip1,hammer2_inode_t * ip2)3942d60b848STomohiro Kusumi hammer2_inode_depend(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
3952d60b848STomohiro Kusumi {
3962d60b848STomohiro Kusumi 	hammer2_pfs_t *pmp;
3972d60b848STomohiro Kusumi 	hammer2_depend_t *depend;
3982d60b848STomohiro Kusumi 
3992d60b848STomohiro Kusumi 	pmp = ip1->pmp;
4002d60b848STomohiro Kusumi 	hammer2_spin_ex(&pmp->list_spin);
4012d60b848STomohiro Kusumi 	depend = hammer2_inode_setdepend_locked(ip1, NULL);
4022d60b848STomohiro Kusumi 	depend = hammer2_inode_setdepend_locked(ip2, depend);
4032d60b848STomohiro Kusumi 	hammer2_spin_unex(&pmp->list_spin);
4042d60b848STomohiro Kusumi }
4052d60b848STomohiro Kusumi 
4062d60b848STomohiro Kusumi /*
4072d60b848STomohiro Kusumi  * Select a chain out of an inode's cluster and lock it.
4082d60b848STomohiro Kusumi  *
4092d60b848STomohiro Kusumi  * The inode does not have to be locked.
4102d60b848STomohiro Kusumi  */
4112d60b848STomohiro Kusumi hammer2_chain_t *
hammer2_inode_chain(hammer2_inode_t * ip,int clindex,int how)4122d60b848STomohiro Kusumi hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
4132d60b848STomohiro Kusumi {
4142d60b848STomohiro Kusumi 	hammer2_chain_t *chain;
4152d60b848STomohiro Kusumi 	hammer2_cluster_t *cluster;
4162d60b848STomohiro Kusumi 
4172d60b848STomohiro Kusumi 	hammer2_spin_sh(&ip->cluster_spin);
4182d60b848STomohiro Kusumi 	cluster = &ip->cluster;
4192d60b848STomohiro Kusumi 	if (clindex >= cluster->nchains)
4202d60b848STomohiro Kusumi 		chain = NULL;
4212d60b848STomohiro Kusumi 	else
4222d60b848STomohiro Kusumi 		chain = cluster->array[clindex].chain;
4232d60b848STomohiro Kusumi 	if (chain) {
4242d60b848STomohiro Kusumi 		hammer2_chain_ref(chain);
4252d60b848STomohiro Kusumi 		hammer2_spin_unsh(&ip->cluster_spin);
4262d60b848STomohiro Kusumi 		hammer2_chain_lock(chain, how);
4272d60b848STomohiro Kusumi 	} else {
4282d60b848STomohiro Kusumi 		hammer2_spin_unsh(&ip->cluster_spin);
4292d60b848STomohiro Kusumi 	}
4302d60b848STomohiro Kusumi 	return chain;
4312d60b848STomohiro Kusumi }
4322d60b848STomohiro Kusumi 
4332d60b848STomohiro Kusumi hammer2_chain_t *
hammer2_inode_chain_and_parent(hammer2_inode_t * ip,int clindex,hammer2_chain_t ** parentp,int how)4342d60b848STomohiro Kusumi hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex,
4352d60b848STomohiro Kusumi 			       hammer2_chain_t **parentp, int how)
4362d60b848STomohiro Kusumi {
4372d60b848STomohiro Kusumi 	hammer2_chain_t *chain;
4382d60b848STomohiro Kusumi 	hammer2_chain_t *parent;
4392d60b848STomohiro Kusumi 
4402d60b848STomohiro Kusumi 	for (;;) {
4412d60b848STomohiro Kusumi 		hammer2_spin_sh(&ip->cluster_spin);
4422d60b848STomohiro Kusumi 		if (clindex >= ip->cluster.nchains)
4432d60b848STomohiro Kusumi 			chain = NULL;
4442d60b848STomohiro Kusumi 		else
4452d60b848STomohiro Kusumi 			chain = ip->cluster.array[clindex].chain;
4462d60b848STomohiro Kusumi 		if (chain) {
4472d60b848STomohiro Kusumi 			hammer2_chain_ref(chain);
4482d60b848STomohiro Kusumi 			hammer2_spin_unsh(&ip->cluster_spin);
4492d60b848STomohiro Kusumi 			hammer2_chain_lock(chain, how);
4502d60b848STomohiro Kusumi 		} else {
4512d60b848STomohiro Kusumi 			hammer2_spin_unsh(&ip->cluster_spin);
4522d60b848STomohiro Kusumi 		}
4532d60b848STomohiro Kusumi 
4542d60b848STomohiro Kusumi 		/*
4552d60b848STomohiro Kusumi 		 * Get parent, lock order must be (parent, chain).
4562d60b848STomohiro Kusumi 		 */
4572d60b848STomohiro Kusumi 		parent = chain->parent;
4582d60b848STomohiro Kusumi 		if (parent) {
4592d60b848STomohiro Kusumi 			hammer2_chain_ref(parent);
4602d60b848STomohiro Kusumi 			hammer2_chain_unlock(chain);
4612d60b848STomohiro Kusumi 			hammer2_chain_lock(parent, how);
4622d60b848STomohiro Kusumi 			hammer2_chain_lock(chain, how);
4632d60b848STomohiro Kusumi 		}
4642d60b848STomohiro Kusumi 		if (ip->cluster.array[clindex].chain == chain &&
4652d60b848STomohiro Kusumi 		    chain->parent == parent) {
4662d60b848STomohiro Kusumi 			break;
4672d60b848STomohiro Kusumi 		}
4682d60b848STomohiro Kusumi 
4692d60b848STomohiro Kusumi 		/*
4702d60b848STomohiro Kusumi 		 * Retry
4712d60b848STomohiro Kusumi 		 */
4722d60b848STomohiro Kusumi 		hammer2_chain_unlock(chain);
4732d60b848STomohiro Kusumi 		hammer2_chain_drop(chain);
4742d60b848STomohiro Kusumi 		if (parent) {
4752d60b848STomohiro Kusumi 			hammer2_chain_unlock(parent);
4762d60b848STomohiro Kusumi 			hammer2_chain_drop(parent);
4772d60b848STomohiro Kusumi 		}
4782d60b848STomohiro Kusumi 	}
4792d60b848STomohiro Kusumi 	*parentp = parent;
4802d60b848STomohiro Kusumi 
4812d60b848STomohiro Kusumi 	return chain;
4822d60b848STomohiro Kusumi }
4832d60b848STomohiro Kusumi 
4842d60b848STomohiro Kusumi /*
4852d60b848STomohiro Kusumi  * Temporarily release a lock held shared or exclusive.  Caller must
4862d60b848STomohiro Kusumi  * hold the lock shared or exclusive on call and lock will be released
4872d60b848STomohiro Kusumi  * on return.
4882d60b848STomohiro Kusumi  *
4892d60b848STomohiro Kusumi  * Restore a lock that was temporarily released.
4902d60b848STomohiro Kusumi  */
4912d60b848STomohiro Kusumi hammer2_mtx_state_t
hammer2_inode_lock_temp_release(hammer2_inode_t * ip)4922d60b848STomohiro Kusumi hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
4932d60b848STomohiro Kusumi {
4942d60b848STomohiro Kusumi 	return hammer2_mtx_temp_release(&ip->lock);
4952d60b848STomohiro Kusumi }
4962d60b848STomohiro Kusumi 
4972d60b848STomohiro Kusumi void
hammer2_inode_lock_temp_restore(hammer2_inode_t * ip,hammer2_mtx_state_t ostate)4982d60b848STomohiro Kusumi hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
4992d60b848STomohiro Kusumi {
5002d60b848STomohiro Kusumi 	hammer2_mtx_temp_restore(&ip->lock, ostate);
5012d60b848STomohiro Kusumi }
5022d60b848STomohiro Kusumi 
5032d60b848STomohiro Kusumi /*
5042d60b848STomohiro Kusumi  * Upgrade a shared inode lock to exclusive and return.  If the inode lock
5052d60b848STomohiro Kusumi  * is already held exclusively this is a NOP.
5062d60b848STomohiro Kusumi  *
5072d60b848STomohiro Kusumi  * The caller MUST hold the inode lock either shared or exclusive on call
5082d60b848STomohiro Kusumi  * and will own the lock exclusively on return.
5092d60b848STomohiro Kusumi  *
5102d60b848STomohiro Kusumi  * Returns non-zero if the lock was already exclusive prior to the upgrade.
5112d60b848STomohiro Kusumi  */
5122d60b848STomohiro Kusumi int
hammer2_inode_lock_upgrade(hammer2_inode_t * ip)5132d60b848STomohiro Kusumi hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
5142d60b848STomohiro Kusumi {
5152d60b848STomohiro Kusumi 	int wasexclusive;
5162d60b848STomohiro Kusumi 
5172d60b848STomohiro Kusumi 	/* XXX pretends it wasn't exclusive, but shouldn't matter */
5182d60b848STomohiro Kusumi 	//if (mtx_islocked_ex(&ip->lock)) {
5192d60b848STomohiro Kusumi 	if (0) {
5202d60b848STomohiro Kusumi 		wasexclusive = 1;
5212d60b848STomohiro Kusumi 	} else {
5222d60b848STomohiro Kusumi 		hammer2_mtx_unlock(&ip->lock);
5232d60b848STomohiro Kusumi 		hammer2_mtx_ex(&ip->lock);
5242d60b848STomohiro Kusumi 		wasexclusive = 0;
5252d60b848STomohiro Kusumi 	}
5262d60b848STomohiro Kusumi 	return wasexclusive;
5272d60b848STomohiro Kusumi }
5282d60b848STomohiro Kusumi 
5292d60b848STomohiro Kusumi /*
5302d60b848STomohiro Kusumi  * Downgrade an inode lock from exclusive to shared only if the inode
5312d60b848STomohiro Kusumi  * lock was previously shared.  If the inode lock was previously exclusive,
5322d60b848STomohiro Kusumi  * this is a NOP.
5332d60b848STomohiro Kusumi  */
5342d60b848STomohiro Kusumi void
hammer2_inode_lock_downgrade(hammer2_inode_t * ip,int wasexclusive)5352d60b848STomohiro Kusumi hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
5362d60b848STomohiro Kusumi {
5372d60b848STomohiro Kusumi 	if (wasexclusive == 0)
5382d60b848STomohiro Kusumi 		hammer2_mtx_downgrade(&ip->lock);
5392d60b848STomohiro Kusumi }
5402d60b848STomohiro Kusumi 
541*6b47f3eaSTomohiro Kusumi static __inline hammer2_inum_hash_t *
inumhash(hammer2_pfs_t * pmp,hammer2_tid_t inum)542*6b47f3eaSTomohiro Kusumi inumhash(hammer2_pfs_t *pmp, hammer2_tid_t inum)
543*6b47f3eaSTomohiro Kusumi {
544*6b47f3eaSTomohiro Kusumi 	int hv;
545*6b47f3eaSTomohiro Kusumi 
546*6b47f3eaSTomohiro Kusumi 	hv = (int)inum;
547*6b47f3eaSTomohiro Kusumi 	return (&pmp->inumhash[hv & HAMMER2_INUMHASH_MASK]);
548*6b47f3eaSTomohiro Kusumi }
549*6b47f3eaSTomohiro Kusumi 
550*6b47f3eaSTomohiro Kusumi 
5512d60b848STomohiro Kusumi /*
5522d60b848STomohiro Kusumi  * Lookup an inode by inode number
5532d60b848STomohiro Kusumi  */
5542d60b848STomohiro Kusumi hammer2_inode_t *
hammer2_inode_lookup(hammer2_pfs_t * pmp,hammer2_tid_t inum)5552d60b848STomohiro Kusumi hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
5562d60b848STomohiro Kusumi {
557*6b47f3eaSTomohiro Kusumi 	hammer2_inum_hash_t *hash;
5582d60b848STomohiro Kusumi 	hammer2_inode_t *ip;
5592d60b848STomohiro Kusumi 
5602d60b848STomohiro Kusumi 	KKASSERT(pmp);
5612d60b848STomohiro Kusumi 	if (pmp->spmp_hmp) {
5622d60b848STomohiro Kusumi 		ip = NULL;
5632d60b848STomohiro Kusumi 	} else {
564*6b47f3eaSTomohiro Kusumi 		hash = inumhash(pmp, inum);
565*6b47f3eaSTomohiro Kusumi 		hammer2_spin_sh(&hash->spin);
566*6b47f3eaSTomohiro Kusumi 		for (ip = hash->base; ip; ip = ip->next) {
567*6b47f3eaSTomohiro Kusumi 			if (ip->meta.inum == inum) {
5682d60b848STomohiro Kusumi 				hammer2_inode_ref(ip);
569*6b47f3eaSTomohiro Kusumi 				break;
570*6b47f3eaSTomohiro Kusumi 			}
571*6b47f3eaSTomohiro Kusumi 		}
572*6b47f3eaSTomohiro Kusumi 		hammer2_spin_unsh(&hash->spin);
5732d60b848STomohiro Kusumi 	}
5742d60b848STomohiro Kusumi 	return(ip);
5752d60b848STomohiro Kusumi }
5762d60b848STomohiro Kusumi 
5772d60b848STomohiro Kusumi /*
5782d60b848STomohiro Kusumi  * Adding a ref to an inode is only legal if the inode already has at least
5792d60b848STomohiro Kusumi  * one ref.
5802d60b848STomohiro Kusumi  *
5812d60b848STomohiro Kusumi  * (can be called with spinlock held)
5822d60b848STomohiro Kusumi  */
5832d60b848STomohiro Kusumi void
hammer2_inode_ref(hammer2_inode_t * ip)5842d60b848STomohiro Kusumi hammer2_inode_ref(hammer2_inode_t *ip)
5852d60b848STomohiro Kusumi {
5862d60b848STomohiro Kusumi 	atomic_add_int(&ip->refs, 1);
5872d60b848STomohiro Kusumi 	if (hammer2_debug & 0x80000) {
5882d60b848STomohiro Kusumi 		kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs);
5892d60b848STomohiro Kusumi 		print_backtrace(8);
5902d60b848STomohiro Kusumi 	}
5912d60b848STomohiro Kusumi }
5922d60b848STomohiro Kusumi 
5932d60b848STomohiro Kusumi /*
5942d60b848STomohiro Kusumi  * Drop an inode reference, freeing the inode when the last reference goes
5952d60b848STomohiro Kusumi  * away.
5962d60b848STomohiro Kusumi  */
5972d60b848STomohiro Kusumi void
hammer2_inode_drop(hammer2_inode_t * ip)5982d60b848STomohiro Kusumi hammer2_inode_drop(hammer2_inode_t *ip)
5992d60b848STomohiro Kusumi {
6002d60b848STomohiro Kusumi 	hammer2_pfs_t *pmp;
6012d60b848STomohiro Kusumi 	u_int refs;
6022d60b848STomohiro Kusumi 
6032d60b848STomohiro Kusumi 	while (ip) {
6042d60b848STomohiro Kusumi 		if (hammer2_debug & 0x80000) {
6052d60b848STomohiro Kusumi 			kprintf("INODE-1 %p (%d->%d)\n",
6062d60b848STomohiro Kusumi 				ip, ip->refs, ip->refs - 1);
6072d60b848STomohiro Kusumi 			print_backtrace(8);
6082d60b848STomohiro Kusumi 		}
6092d60b848STomohiro Kusumi 		refs = ip->refs;
6102d60b848STomohiro Kusumi 		cpu_ccfence();
6112d60b848STomohiro Kusumi 		if (refs == 1) {
6122d60b848STomohiro Kusumi 			/*
6132d60b848STomohiro Kusumi 			 * Transition to zero, must interlock with
6142d60b848STomohiro Kusumi 			 * the inode inumber lookup tree (if applicable).
6152d60b848STomohiro Kusumi 			 * It should not be possible for anyone to race
6162d60b848STomohiro Kusumi 			 * the transition to 0.
6172d60b848STomohiro Kusumi 			 */
618*6b47f3eaSTomohiro Kusumi 			hammer2_inum_hash_t *hash;
619*6b47f3eaSTomohiro Kusumi 			hammer2_inode_t **xipp;
620*6b47f3eaSTomohiro Kusumi 
6212d60b848STomohiro Kusumi 			pmp = ip->pmp;
6222d60b848STomohiro Kusumi 			KKASSERT(pmp);
623*6b47f3eaSTomohiro Kusumi 			hash = inumhash(pmp, ip->meta.inum);
6242d60b848STomohiro Kusumi 
625*6b47f3eaSTomohiro Kusumi 			hammer2_spin_ex(&hash->spin);
6262d60b848STomohiro Kusumi 			if (atomic_cmpset_int(&ip->refs, 1, 0)) {
6272d60b848STomohiro Kusumi 				KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
628*6b47f3eaSTomohiro Kusumi 				if (ip->flags & HAMMER2_INODE_ONHASH) {
629*6b47f3eaSTomohiro Kusumi 					xipp = &hash->base;
630*6b47f3eaSTomohiro Kusumi 					while (*xipp != ip)
631*6b47f3eaSTomohiro Kusumi 						xipp = &(*xipp)->next;
632*6b47f3eaSTomohiro Kusumi 					*xipp = ip->next;
633*6b47f3eaSTomohiro Kusumi 					ip->next = NULL;
634*6b47f3eaSTomohiro Kusumi 					atomic_add_long(&pmp->inum_count, -1);
6352d60b848STomohiro Kusumi 					atomic_clear_int(&ip->flags,
636*6b47f3eaSTomohiro Kusumi 						     HAMMER2_INODE_ONHASH);
6372d60b848STomohiro Kusumi 				}
638*6b47f3eaSTomohiro Kusumi 				hammer2_spin_unex(&hash->spin);
6392d60b848STomohiro Kusumi 
6402d60b848STomohiro Kusumi 				ip->pmp = NULL;
6412d60b848STomohiro Kusumi 
6422d60b848STomohiro Kusumi 				/*
6432d60b848STomohiro Kusumi 				 * Cleaning out ip->cluster isn't entirely
6442d60b848STomohiro Kusumi 				 * trivial.
6452d60b848STomohiro Kusumi 				 */
6462d60b848STomohiro Kusumi 				hammer2_inode_repoint(ip, NULL);
6472d60b848STomohiro Kusumi 				/*
6481dc6036fSTomohiro Kusumi 				 * Add inode to reclaim queue.
6492d60b848STomohiro Kusumi 				 */
6501dc6036fSTomohiro Kusumi 				TAILQ_INSERT_TAIL(&pmp->recq, ip, recq_entry);
6512d60b848STomohiro Kusumi 				ip = NULL;	/* will terminate loop */
6522d60b848STomohiro Kusumi 			} else {
653*6b47f3eaSTomohiro Kusumi 				hammer2_spin_unex(&hash->spin);
6542d60b848STomohiro Kusumi 			}
6552d60b848STomohiro Kusumi 		} else {
6562d60b848STomohiro Kusumi 			/*
6572d60b848STomohiro Kusumi 			 * Non zero transition
6582d60b848STomohiro Kusumi 			 */
6592d60b848STomohiro Kusumi 			if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
6602d60b848STomohiro Kusumi 				break;
6612d60b848STomohiro Kusumi 		}
6622d60b848STomohiro Kusumi 	}
6632d60b848STomohiro Kusumi }
6642d60b848STomohiro Kusumi 
6652d60b848STomohiro Kusumi /*
6662d60b848STomohiro Kusumi  * Get the vnode associated with the given inode, allocating the vnode if
6672d60b848STomohiro Kusumi  * necessary.  The vnode will be returned exclusively locked.
6682d60b848STomohiro Kusumi  *
6692d60b848STomohiro Kusumi  * *errorp is set to a UNIX error, not a HAMMER2 error.
6702d60b848STomohiro Kusumi  *
6712d60b848STomohiro Kusumi  * The caller must lock the inode (shared or exclusive).
6722d60b848STomohiro Kusumi  *
6732d60b848STomohiro Kusumi  * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
6742d60b848STomohiro Kusumi  * races.
6752d60b848STomohiro Kusumi  */
6766bcbb706STomohiro Kusumi struct m_vnode *
hammer2_igetv(hammer2_inode_t * ip,int * errorp)6772d60b848STomohiro Kusumi hammer2_igetv(hammer2_inode_t *ip, int *errorp)
6782d60b848STomohiro Kusumi {
6792d60b848STomohiro Kusumi 	hammer2_pfs_t *pmp;
6806bcbb706STomohiro Kusumi 	struct m_vnode *vp;
6812d60b848STomohiro Kusumi 
6822d60b848STomohiro Kusumi 	pmp = ip->pmp;
6832d60b848STomohiro Kusumi 	KKASSERT(pmp != NULL);
6842d60b848STomohiro Kusumi 	*errorp = 0;
6852d60b848STomohiro Kusumi 
6862d60b848STomohiro Kusumi 	for (;;) {
6872d60b848STomohiro Kusumi 		/*
6882d60b848STomohiro Kusumi 		 * Attempt to reuse an existing vnode assignment.  It is
6892d60b848STomohiro Kusumi 		 * possible to race a reclaim so the vget() may fail.  The
6902d60b848STomohiro Kusumi 		 * inode must be unlocked during the vget() to avoid a
6912d60b848STomohiro Kusumi 		 * deadlock against a reclaim.
6922d60b848STomohiro Kusumi 		 */
6932d60b848STomohiro Kusumi 		int wasexclusive;
6942d60b848STomohiro Kusumi 
6952d60b848STomohiro Kusumi 		vp = ip->vp;
6962d60b848STomohiro Kusumi 		if (vp) {
6972d60b848STomohiro Kusumi 			/*
6982d60b848STomohiro Kusumi 			 * Inode must be unlocked during the vget() to avoid
6992d60b848STomohiro Kusumi 			 * possible deadlocks, but leave the ip ref intact.
7002d60b848STomohiro Kusumi 			 *
7012d60b848STomohiro Kusumi 			 * vnode is held to prevent destruction during the
7022d60b848STomohiro Kusumi 			 * vget().  The vget() can still fail if we lost
7032d60b848STomohiro Kusumi 			 * a reclaim race on the vnode.
7042d60b848STomohiro Kusumi 			 */
7052d60b848STomohiro Kusumi 			hammer2_mtx_state_t ostate;
7062d60b848STomohiro Kusumi 
7072d60b848STomohiro Kusumi 			vhold(vp);
7082d60b848STomohiro Kusumi 			ostate = hammer2_inode_lock_temp_release(ip);
7092d60b848STomohiro Kusumi 			if (vget(vp, LK_EXCLUSIVE)) {
7102d60b848STomohiro Kusumi 				vdrop(vp);
7112d60b848STomohiro Kusumi 				hammer2_inode_lock_temp_restore(ip, ostate);
7122d60b848STomohiro Kusumi 				continue;
7132d60b848STomohiro Kusumi 			}
7142d60b848STomohiro Kusumi 			hammer2_inode_lock_temp_restore(ip, ostate);
7152d60b848STomohiro Kusumi 			vdrop(vp);
7162d60b848STomohiro Kusumi 			/* vp still locked and ref from vget */
7172d60b848STomohiro Kusumi 			if (ip->vp != vp) {
7182d60b848STomohiro Kusumi 				kprintf("hammer2: igetv race %p/%p\n",
7192d60b848STomohiro Kusumi 					ip->vp, vp);
7202d60b848STomohiro Kusumi 				vput(vp);
7212d60b848STomohiro Kusumi 				continue;
7222d60b848STomohiro Kusumi 			}
7232d60b848STomohiro Kusumi 			*errorp = 0;
7242d60b848STomohiro Kusumi 			break;
7252d60b848STomohiro Kusumi 		}
7262d60b848STomohiro Kusumi 
7272d60b848STomohiro Kusumi 		/*
7282d60b848STomohiro Kusumi 		 * No vnode exists, allocate a new vnode.  Beware of
7292d60b848STomohiro Kusumi 		 * allocation races.  This function will return an
7302d60b848STomohiro Kusumi 		 * exclusively locked and referenced vnode.
7312d60b848STomohiro Kusumi 		 */
7322d60b848STomohiro Kusumi 		*errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
7332d60b848STomohiro Kusumi 		if (*errorp) {
7342d60b848STomohiro Kusumi 			kprintf("hammer2: igetv getnewvnode failed %d\n",
7352d60b848STomohiro Kusumi 				*errorp);
7362d60b848STomohiro Kusumi 			vp = NULL;
7372d60b848STomohiro Kusumi 			break;
7382d60b848STomohiro Kusumi 		}
7392d60b848STomohiro Kusumi 
7402d60b848STomohiro Kusumi 		/*
7412d60b848STomohiro Kusumi 		 * Lock the inode and check for an allocation race.
7422d60b848STomohiro Kusumi 		 */
7432d60b848STomohiro Kusumi 		wasexclusive = hammer2_inode_lock_upgrade(ip);
7442d60b848STomohiro Kusumi 		if (ip->vp != NULL) {
7452d60b848STomohiro Kusumi 			vp->v_type = VBAD;
7462d60b848STomohiro Kusumi 			vx_put(vp);
7472d60b848STomohiro Kusumi 			hammer2_inode_lock_downgrade(ip, wasexclusive);
7482d60b848STomohiro Kusumi 			continue;
7492d60b848STomohiro Kusumi 		}
7502d60b848STomohiro Kusumi 
7512d60b848STomohiro Kusumi 		switch (ip->meta.type) {
7522d60b848STomohiro Kusumi 		case HAMMER2_OBJTYPE_DIRECTORY:
7532d60b848STomohiro Kusumi 			vp->v_type = VDIR;
7542d60b848STomohiro Kusumi 			break;
7552d60b848STomohiro Kusumi 		case HAMMER2_OBJTYPE_REGFILE:
7562d60b848STomohiro Kusumi 			/*
7572d60b848STomohiro Kusumi 			 * Regular file must use buffer cache I/O
7582d60b848STomohiro Kusumi 			 * (VKVABIO cpu sync semantics supported)
7592d60b848STomohiro Kusumi 			 */
7602d60b848STomohiro Kusumi 			vp->v_type = VREG;
7612d60b848STomohiro Kusumi 			vsetflags(vp, VKVABIO);
7622d60b848STomohiro Kusumi 			vinitvmio(vp, ip->meta.size,
7632d60b848STomohiro Kusumi 				  HAMMER2_LBUFSIZE,
7642d60b848STomohiro Kusumi 				  (int)ip->meta.size & HAMMER2_LBUFMASK);
7652d60b848STomohiro Kusumi 			break;
7662d60b848STomohiro Kusumi 		case HAMMER2_OBJTYPE_SOFTLINK:
7672d60b848STomohiro Kusumi 			/*
7682d60b848STomohiro Kusumi 			 * XXX for now we are using the generic file_read
7692d60b848STomohiro Kusumi 			 * and file_write code so we need a buffer cache
7702d60b848STomohiro Kusumi 			 * association.
7712d60b848STomohiro Kusumi 			 *
7722d60b848STomohiro Kusumi 			 * (VKVABIO cpu sync semantics supported)
7732d60b848STomohiro Kusumi 			 */
7742d60b848STomohiro Kusumi 			vp->v_type = VLNK;
7752d60b848STomohiro Kusumi 			vsetflags(vp, VKVABIO);
7762d60b848STomohiro Kusumi 			vinitvmio(vp, ip->meta.size,
7772d60b848STomohiro Kusumi 				  HAMMER2_LBUFSIZE,
7782d60b848STomohiro Kusumi 				  (int)ip->meta.size & HAMMER2_LBUFMASK);
7792d60b848STomohiro Kusumi 			break;
7802d60b848STomohiro Kusumi 		case HAMMER2_OBJTYPE_CDEV:
7812d60b848STomohiro Kusumi 			vp->v_type = VCHR;
7822d60b848STomohiro Kusumi 			/* fall through */
7832d60b848STomohiro Kusumi 		case HAMMER2_OBJTYPE_BDEV:
7842d60b848STomohiro Kusumi 			//vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
7852d60b848STomohiro Kusumi 			if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
7862d60b848STomohiro Kusumi 				vp->v_type = VBLK;
7872d60b848STomohiro Kusumi 			addaliasu(vp,
7882d60b848STomohiro Kusumi 				  ip->meta.rmajor,
7892d60b848STomohiro Kusumi 				  ip->meta.rminor);
7902d60b848STomohiro Kusumi 			break;
7912d60b848STomohiro Kusumi 		case HAMMER2_OBJTYPE_FIFO:
7922d60b848STomohiro Kusumi 			vp->v_type = VFIFO;
7932d60b848STomohiro Kusumi 			//vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
7942d60b848STomohiro Kusumi 			break;
7952d60b848STomohiro Kusumi 		case HAMMER2_OBJTYPE_SOCKET:
7962d60b848STomohiro Kusumi 			vp->v_type = VSOCK;
7972d60b848STomohiro Kusumi 			break;
7982d60b848STomohiro Kusumi 		default:
7992d60b848STomohiro Kusumi 			panic("hammer2: unhandled objtype %d",
8002d60b848STomohiro Kusumi 			      ip->meta.type);
8012d60b848STomohiro Kusumi 			break;
8022d60b848STomohiro Kusumi 		}
8032d60b848STomohiro Kusumi 
8042d60b848STomohiro Kusumi 		if (ip == pmp->iroot)
8052d60b848STomohiro Kusumi 			vsetflags(vp, VROOT);
8062d60b848STomohiro Kusumi 
8072d60b848STomohiro Kusumi 		vp->v_data = ip;
8082d60b848STomohiro Kusumi 		ip->vp = vp;
8092d60b848STomohiro Kusumi 		hammer2_inode_ref(ip);		/* vp association */
8102d60b848STomohiro Kusumi 		hammer2_inode_lock_downgrade(ip, wasexclusive);
8112d60b848STomohiro Kusumi 		vx_downgrade(vp);
8122d60b848STomohiro Kusumi 		break;
8132d60b848STomohiro Kusumi 	}
8142d60b848STomohiro Kusumi 
8152d60b848STomohiro Kusumi 	/*
8162d60b848STomohiro Kusumi 	 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
8172d60b848STomohiro Kusumi 	 */
8182d60b848STomohiro Kusumi 	if (hammer2_debug & 0x0002) {
8192d60b848STomohiro Kusumi 		kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
8202d60b848STomohiro Kusumi 			vp, -1, -1);
8212d60b848STomohiro Kusumi 	}
8222d60b848STomohiro Kusumi 	return (vp);
8232d60b848STomohiro Kusumi }
8242d60b848STomohiro Kusumi 
8252d60b848STomohiro Kusumi /*
8262d60b848STomohiro Kusumi  * XXX this API needs a rewrite.  It needs to be split into a
8272d60b848STomohiro Kusumi  * hammer2_inode_alloc() and hammer2_inode_build() to allow us to get
8282d60b848STomohiro Kusumi  * rid of the inode/chain lock reversal fudge.
8292d60b848STomohiro Kusumi  *
8302d60b848STomohiro Kusumi  * Returns the inode associated with the passed-in cluster, allocating a new
8312d60b848STomohiro Kusumi  * hammer2_inode structure if necessary, then synchronizing it to the passed
8322d60b848STomohiro Kusumi  * xop cluster.  When synchronizing, if idx >= 0, only cluster index (idx)
8332d60b848STomohiro Kusumi  * is synchronized.  Otherwise the whole cluster is synchronized.  inum will
8342d60b848STomohiro Kusumi  * be extracted from the passed-in xop and the inum argument will be ignored.
8352d60b848STomohiro Kusumi  *
8362d60b848STomohiro Kusumi  * If xop is passed as NULL then a new hammer2_inode is allocated with the
8372d60b848STomohiro Kusumi  * specified inum, and returned.   For normal inodes, the inode will be
8382d60b848STomohiro Kusumi  * indexed in memory and if it already exists the existing ip will be
8392d60b848STomohiro Kusumi  * returned instead of allocating a new one.  The superroot and PFS inodes
8402d60b848STomohiro Kusumi  * are not indexed in memory.
8412d60b848STomohiro Kusumi  *
8422d60b848STomohiro Kusumi  * The passed-in cluster must be locked and will remain locked on return.
8432d60b848STomohiro Kusumi  * The returned inode will be locked and the caller may dispose of both
8442d60b848STomohiro Kusumi  * via hammer2_inode_unlock() + hammer2_inode_drop().  However, if the caller
8452d60b848STomohiro Kusumi  * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
8462d60b848STomohiro Kusumi  *
8472d60b848STomohiro Kusumi  * The hammer2_inode structure regulates the interface between the high level
8482d60b848STomohiro Kusumi  * kernel VNOPS API and the filesystem backend (the chains).
8492d60b848STomohiro Kusumi  *
8502d60b848STomohiro Kusumi  * On return the inode is locked with the supplied cluster.
8512d60b848STomohiro Kusumi  */
8522d60b848STomohiro Kusumi hammer2_inode_t *
hammer2_inode_get(hammer2_pfs_t * pmp,hammer2_xop_head_t * xop,hammer2_tid_t inum,int idx)8532d60b848STomohiro Kusumi hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_xop_head_t *xop,
8542d60b848STomohiro Kusumi 		  hammer2_tid_t inum, int idx)
8552d60b848STomohiro Kusumi {
8562d60b848STomohiro Kusumi 	hammer2_inode_t *nip;
8572d60b848STomohiro Kusumi 	const hammer2_inode_data_t *iptmp;
8582d60b848STomohiro Kusumi 	const hammer2_inode_data_t *nipdata;
8592d60b848STomohiro Kusumi 
8602d60b848STomohiro Kusumi 	KKASSERT(xop == NULL ||
8612d60b848STomohiro Kusumi 		 hammer2_cluster_type(&xop->cluster) ==
8622d60b848STomohiro Kusumi 		 HAMMER2_BREF_TYPE_INODE);
8632d60b848STomohiro Kusumi 	KKASSERT(pmp);
8642d60b848STomohiro Kusumi 
8652d60b848STomohiro Kusumi 	/*
8662d60b848STomohiro Kusumi 	 * Interlocked lookup/ref of the inode.  This code is only needed
8672d60b848STomohiro Kusumi 	 * when looking up inodes with nlinks != 0 (TODO: optimize out
8682d60b848STomohiro Kusumi 	 * otherwise and test for duplicates).
8692d60b848STomohiro Kusumi 	 *
8702d60b848STomohiro Kusumi 	 * Cluster can be NULL during the initial pfs allocation.
8712d60b848STomohiro Kusumi 	 */
8722d60b848STomohiro Kusumi 	if (xop) {
8732d60b848STomohiro Kusumi 		iptmp = &hammer2_xop_gdata(xop)->ipdata;
8742d60b848STomohiro Kusumi 		inum = iptmp->meta.inum;
8752d60b848STomohiro Kusumi 		hammer2_xop_pdata(xop);
8762d60b848STomohiro Kusumi 	}
8772d60b848STomohiro Kusumi again:
8782d60b848STomohiro Kusumi 	nip = hammer2_inode_lookup(pmp, inum);
8792d60b848STomohiro Kusumi 	if (nip) {
8802d60b848STomohiro Kusumi 		/*
8812d60b848STomohiro Kusumi 		 * We may have to unhold the cluster to avoid a deadlock
8822d60b848STomohiro Kusumi 		 * against vnlru (and possibly other XOPs).
8832d60b848STomohiro Kusumi 		 */
8842d60b848STomohiro Kusumi 		if (xop) {
8852d60b848STomohiro Kusumi 			if (hammer2_mtx_ex_try(&nip->lock) != 0) {
8862d60b848STomohiro Kusumi 				hammer2_cluster_unhold(&xop->cluster);
8872d60b848STomohiro Kusumi 				hammer2_mtx_ex(&nip->lock);
8882d60b848STomohiro Kusumi 				hammer2_cluster_rehold(&xop->cluster);
8892d60b848STomohiro Kusumi 			}
8902d60b848STomohiro Kusumi 		} else {
8912d60b848STomohiro Kusumi 			hammer2_mtx_ex(&nip->lock);
8922d60b848STomohiro Kusumi 		}
8932d60b848STomohiro Kusumi 
8942d60b848STomohiro Kusumi 		/*
8952d60b848STomohiro Kusumi 		 * Handle SMP race (not applicable to the super-root spmp
8962d60b848STomohiro Kusumi 		 * which can't index inodes due to duplicative inode numbers).
8972d60b848STomohiro Kusumi 		 */
8982d60b848STomohiro Kusumi 		if (pmp->spmp_hmp == NULL &&
899*6b47f3eaSTomohiro Kusumi 		    (nip->flags & HAMMER2_INODE_ONHASH) == 0) {
9002d60b848STomohiro Kusumi 			hammer2_mtx_unlock(&nip->lock);
9012d60b848STomohiro Kusumi 			hammer2_inode_drop(nip);
9022d60b848STomohiro Kusumi 			goto again;
9032d60b848STomohiro Kusumi 		}
9042d60b848STomohiro Kusumi 		if (xop) {
9052d60b848STomohiro Kusumi 			if (idx >= 0)
9062d60b848STomohiro Kusumi 				hammer2_inode_repoint_one(nip, &xop->cluster,
9072d60b848STomohiro Kusumi 							  idx);
9082d60b848STomohiro Kusumi 			else
9092d60b848STomohiro Kusumi 				hammer2_inode_repoint(nip, &xop->cluster);
9102d60b848STomohiro Kusumi 		}
9112d60b848STomohiro Kusumi 		return nip;
9122d60b848STomohiro Kusumi 	}
9132d60b848STomohiro Kusumi 
9142d60b848STomohiro Kusumi 	/*
9152d60b848STomohiro Kusumi 	 * We couldn't find the inode number, create a new inode and try to
9162d60b848STomohiro Kusumi 	 * insert it, handle insertion races.
9172d60b848STomohiro Kusumi 	 */
9182d60b848STomohiro Kusumi 	nip = kmalloc_obj(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
9192d60b848STomohiro Kusumi 	hammer2_spin_init(&nip->cluster_spin, "h2clspin");
9202d60b848STomohiro Kusumi 	atomic_add_long(&pmp->inmem_inodes, 1);
9212d60b848STomohiro Kusumi 
9222d60b848STomohiro Kusumi 	/*
9232d60b848STomohiro Kusumi 	 * Initialize nip's cluster.  A cluster is provided for normal
9242d60b848STomohiro Kusumi 	 * inodes but typically not for the super-root or PFS inodes.
9252d60b848STomohiro Kusumi 	 */
9262d60b848STomohiro Kusumi 	{
9272d60b848STomohiro Kusumi 		hammer2_inode_t *nnip = nip;
9282d60b848STomohiro Kusumi 		nip->ihash = (int)hammer2_icrc32(&nnip, sizeof(nnip));
9292d60b848STomohiro Kusumi 	}
9302d60b848STomohiro Kusumi 
9312d60b848STomohiro Kusumi 	nip->cluster.refs = 1;
9322d60b848STomohiro Kusumi 	nip->cluster.pmp = pmp;
9332d60b848STomohiro Kusumi 	nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
9342d60b848STomohiro Kusumi 	if (xop) {
9352d60b848STomohiro Kusumi 		nipdata = &hammer2_xop_gdata(xop)->ipdata;
9362d60b848STomohiro Kusumi 		nip->meta = nipdata->meta;
9372d60b848STomohiro Kusumi 		hammer2_xop_pdata(xop);
9382d60b848STomohiro Kusumi 		hammer2_inode_repoint(nip, &xop->cluster);
9392d60b848STomohiro Kusumi 	} else {
9402d60b848STomohiro Kusumi 		nip->meta.inum = inum;		/* PFS inum is always 1 XXX */
9412d60b848STomohiro Kusumi 		/* mtime will be updated when a cluster is available */
9422d60b848STomohiro Kusumi 	}
9432d60b848STomohiro Kusumi 
9442d60b848STomohiro Kusumi 	nip->pmp = pmp;
9452d60b848STomohiro Kusumi 
9462d60b848STomohiro Kusumi 	/*
9472d60b848STomohiro Kusumi 	 * ref and lock on nip gives it state compatible to after a
9482d60b848STomohiro Kusumi 	 * hammer2_inode_lock() call.
9492d60b848STomohiro Kusumi 	 */
9502d60b848STomohiro Kusumi 	nip->refs = 1;
9512d60b848STomohiro Kusumi 	hammer2_mtx_init(&nip->lock, "h2inode");
9522d60b848STomohiro Kusumi 	hammer2_mtx_init(&nip->truncate_lock, "h2trunc");
9532d60b848STomohiro Kusumi 	hammer2_mtx_ex(&nip->lock);
9542d60b848STomohiro Kusumi 	TAILQ_INIT(&nip->depend_static.sideq);
9552d60b848STomohiro Kusumi 	/* combination of thread lock and chain lock == inode lock */
9562d60b848STomohiro Kusumi 
9572d60b848STomohiro Kusumi 	/*
9582d60b848STomohiro Kusumi 	 * Attempt to add the inode.  If it fails we raced another inode
9592d60b848STomohiro Kusumi 	 * get.  Undo all the work and try again.
9602d60b848STomohiro Kusumi 	 */
9612d60b848STomohiro Kusumi 	if (pmp->spmp_hmp == NULL) {
962*6b47f3eaSTomohiro Kusumi 		hammer2_inum_hash_t *hash;
963*6b47f3eaSTomohiro Kusumi 		hammer2_inode_t *xip;
964*6b47f3eaSTomohiro Kusumi 		hammer2_inode_t **xipp;
965*6b47f3eaSTomohiro Kusumi 
966*6b47f3eaSTomohiro Kusumi 		hash = inumhash(pmp, nip->meta.inum);
967*6b47f3eaSTomohiro Kusumi 		hammer2_spin_ex(&hash->spin);
968*6b47f3eaSTomohiro Kusumi 		for (xipp = &hash->base;
969*6b47f3eaSTomohiro Kusumi 		     (xip = *xipp) != NULL;
970*6b47f3eaSTomohiro Kusumi 		     xipp = &xip->next)
971*6b47f3eaSTomohiro Kusumi 		{
972*6b47f3eaSTomohiro Kusumi 			if (xip->meta.inum == nip->meta.inum) {
973*6b47f3eaSTomohiro Kusumi 				hammer2_spin_unex(&hash->spin);
9742d60b848STomohiro Kusumi 				hammer2_mtx_unlock(&nip->lock);
9752d60b848STomohiro Kusumi 				hammer2_inode_drop(nip);
9762d60b848STomohiro Kusumi 				goto again;
9772d60b848STomohiro Kusumi 			}
978*6b47f3eaSTomohiro Kusumi 		}
979*6b47f3eaSTomohiro Kusumi 		nip->next = NULL;
980*6b47f3eaSTomohiro Kusumi 		*xipp = nip;
981*6b47f3eaSTomohiro Kusumi 		atomic_set_int(&nip->flags, HAMMER2_INODE_ONHASH);
982*6b47f3eaSTomohiro Kusumi 		atomic_add_long(&pmp->inum_count, 1);
983*6b47f3eaSTomohiro Kusumi 		hammer2_spin_unex(&hash->spin);
9842d60b848STomohiro Kusumi 	}
9852d60b848STomohiro Kusumi 	return (nip);
9862d60b848STomohiro Kusumi }
9872d60b848STomohiro Kusumi 
9882d60b848STomohiro Kusumi /*
9892d60b848STomohiro Kusumi  * Create a PFS inode under the superroot.  This function will create the
9902d60b848STomohiro Kusumi  * inode, its media chains, and also insert it into the media.
9912d60b848STomohiro Kusumi  *
9922d60b848STomohiro Kusumi  * Caller must be in a flush transaction because we are inserting the inode
9932d60b848STomohiro Kusumi  * onto the media.
9942d60b848STomohiro Kusumi  */
9952d60b848STomohiro Kusumi hammer2_inode_t *
hammer2_inode_create_pfs(hammer2_pfs_t * spmp,const char * name,size_t name_len,int * errorp)9962d60b848STomohiro Kusumi hammer2_inode_create_pfs(hammer2_pfs_t *spmp,
997e49cd535STomohiro Kusumi 		     const char *name, size_t name_len,
9982d60b848STomohiro Kusumi 		     int *errorp)
9992d60b848STomohiro Kusumi {
10002d60b848STomohiro Kusumi 	hammer2_xop_create_t *xop;
10012d60b848STomohiro Kusumi 	hammer2_inode_t *pip;
10022d60b848STomohiro Kusumi 	hammer2_inode_t *nip;
10032d60b848STomohiro Kusumi 	int error;
10042d60b848STomohiro Kusumi 	uint8_t pip_comp_algo;
10052d60b848STomohiro Kusumi 	uint8_t pip_check_algo;
10062d60b848STomohiro Kusumi 	hammer2_tid_t pip_inum;
10072d60b848STomohiro Kusumi 	hammer2_key_t lhc;
10082d60b848STomohiro Kusumi 
10092d60b848STomohiro Kusumi 	pip = spmp->iroot;
10102d60b848STomohiro Kusumi 	nip = NULL;
10112d60b848STomohiro Kusumi 
10122d60b848STomohiro Kusumi 	lhc = hammer2_dirhash(name, name_len);
10132d60b848STomohiro Kusumi 	*errorp = 0;
10142d60b848STomohiro Kusumi 
10152d60b848STomohiro Kusumi 	/*
10162d60b848STomohiro Kusumi 	 * Locate the inode or indirect block to create the new
10172d60b848STomohiro Kusumi 	 * entry in.  At the same time check for key collisions
10182d60b848STomohiro Kusumi 	 * and iterate until we don't get one.
10192d60b848STomohiro Kusumi 	 *
10202d60b848STomohiro Kusumi 	 * Lock the directory exclusively for now to guarantee that
10212d60b848STomohiro Kusumi 	 * we can find an unused lhc for the name.  Due to collisions,
10222d60b848STomohiro Kusumi 	 * two different creates can end up with the same lhc so we
10232d60b848STomohiro Kusumi 	 * cannot depend on the OS to prevent the collision.
10242d60b848STomohiro Kusumi 	 */
10252d60b848STomohiro Kusumi 	hammer2_inode_lock(pip, 0);
10262d60b848STomohiro Kusumi 
10272d60b848STomohiro Kusumi 	pip_comp_algo = pip->meta.comp_algo;
10282d60b848STomohiro Kusumi 	pip_check_algo = pip->meta.check_algo;
10292d60b848STomohiro Kusumi 	pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
10302d60b848STomohiro Kusumi 
10312d60b848STomohiro Kusumi 	/*
10322d60b848STomohiro Kusumi 	 * Locate an unused key in the collision space.
10332d60b848STomohiro Kusumi 	 */
10342d60b848STomohiro Kusumi 	{
10352d60b848STomohiro Kusumi 		hammer2_xop_scanlhc_t *sxop;
10362d60b848STomohiro Kusumi 		hammer2_key_t lhcbase;
10372d60b848STomohiro Kusumi 
10382d60b848STomohiro Kusumi 		lhcbase = lhc;
10392d60b848STomohiro Kusumi 		sxop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
10402d60b848STomohiro Kusumi 		sxop->lhc = lhc;
10412d60b848STomohiro Kusumi 		hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
10422d60b848STomohiro Kusumi 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
10432d60b848STomohiro Kusumi 			if (lhc != sxop->head.cluster.focus->bref.key)
10442d60b848STomohiro Kusumi 				break;
10452d60b848STomohiro Kusumi 			++lhc;
10462d60b848STomohiro Kusumi 		}
10472d60b848STomohiro Kusumi 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
10482d60b848STomohiro Kusumi 
10492d60b848STomohiro Kusumi 		if (error) {
10502d60b848STomohiro Kusumi 			if (error != HAMMER2_ERROR_ENOENT)
10512d60b848STomohiro Kusumi 				goto done2;
10522d60b848STomohiro Kusumi 			++lhc;
10532d60b848STomohiro Kusumi 			error = 0;
10542d60b848STomohiro Kusumi 		}
10552d60b848STomohiro Kusumi 		if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
10562d60b848STomohiro Kusumi 			error = HAMMER2_ERROR_ENOSPC;
10572d60b848STomohiro Kusumi 			goto done2;
10582d60b848STomohiro Kusumi 		}
10592d60b848STomohiro Kusumi 	}
10602d60b848STomohiro Kusumi 
10612d60b848STomohiro Kusumi 	/*
10622d60b848STomohiro Kusumi 	 * Create the inode with the lhc as the key.
10632d60b848STomohiro Kusumi 	 */
10642d60b848STomohiro Kusumi 	xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
10652d60b848STomohiro Kusumi 	xop->lhc = lhc;
10662d60b848STomohiro Kusumi 	xop->flags = HAMMER2_INSERT_PFSROOT;
10672d60b848STomohiro Kusumi 	bzero(&xop->meta, sizeof(xop->meta));
10682d60b848STomohiro Kusumi 
10692d60b848STomohiro Kusumi 	xop->meta.type = HAMMER2_OBJTYPE_DIRECTORY;
10702d60b848STomohiro Kusumi 	xop->meta.inum = 1;
10712d60b848STomohiro Kusumi 	xop->meta.iparent = pip_inum;
10722d60b848STomohiro Kusumi 
10732d60b848STomohiro Kusumi 	/* Inherit parent's inode compression mode. */
10742d60b848STomohiro Kusumi 	xop->meta.comp_algo = pip_comp_algo;
10752d60b848STomohiro Kusumi 	xop->meta.check_algo = pip_check_algo;
10762d60b848STomohiro Kusumi 	xop->meta.version = HAMMER2_INODE_VERSION_ONE;
1077ddd1d3d1STomohiro Kusumi 	hammer2_update_time(&xop->meta.ctime, false);
10782d60b848STomohiro Kusumi 	xop->meta.mtime = xop->meta.ctime;
10792d60b848STomohiro Kusumi 	xop->meta.mode = 0755;
10802d60b848STomohiro Kusumi 	xop->meta.nlinks = 1;
10812d60b848STomohiro Kusumi 
10822d60b848STomohiro Kusumi 	hammer2_xop_setname(&xop->head, name, name_len);
10832d60b848STomohiro Kusumi 	xop->meta.name_len = name_len;
10842d60b848STomohiro Kusumi 	xop->meta.name_key = lhc;
10852d60b848STomohiro Kusumi 	KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
10862d60b848STomohiro Kusumi 
10872d60b848STomohiro Kusumi 	hammer2_xop_start(&xop->head, &hammer2_inode_create_desc);
10882d60b848STomohiro Kusumi 
10892d60b848STomohiro Kusumi 	error = hammer2_xop_collect(&xop->head, 0);
10902d60b848STomohiro Kusumi #if INODE_DEBUG
10912d60b848STomohiro Kusumi 	kprintf("CREATE INODE %*.*s\n",
10922d60b848STomohiro Kusumi 		(int)name_len, (int)name_len, name);
10932d60b848STomohiro Kusumi #endif
10942d60b848STomohiro Kusumi 
10952d60b848STomohiro Kusumi 	if (error) {
10962d60b848STomohiro Kusumi 		*errorp = error;
10972d60b848STomohiro Kusumi 		goto done;
10982d60b848STomohiro Kusumi 	}
10992d60b848STomohiro Kusumi 
11002d60b848STomohiro Kusumi 	/*
11012d60b848STomohiro Kusumi 	 * Set up the new inode if not a hardlink pointer.
11022d60b848STomohiro Kusumi 	 *
11032d60b848STomohiro Kusumi 	 * NOTE: *_get() integrates chain's lock into the inode lock.
11042d60b848STomohiro Kusumi 	 *
11052d60b848STomohiro Kusumi 	 * NOTE: Only one new inode can currently be created per
11062d60b848STomohiro Kusumi 	 *	 transaction.  If the need arises we can adjust
11072d60b848STomohiro Kusumi 	 *	 hammer2_trans_init() to allow more.
11082d60b848STomohiro Kusumi 	 *
11092d60b848STomohiro Kusumi 	 * NOTE: nipdata will have chain's blockset data.
11102d60b848STomohiro Kusumi 	 */
11112d60b848STomohiro Kusumi 	nip = hammer2_inode_get(pip->pmp, &xop->head, -1, -1);
11122d60b848STomohiro Kusumi 	nip->comp_heuristic = 0;
11132d60b848STomohiro Kusumi done:
11142d60b848STomohiro Kusumi 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
11152d60b848STomohiro Kusumi done2:
11162d60b848STomohiro Kusumi 	hammer2_inode_unlock(pip);
11172d60b848STomohiro Kusumi 
11182d60b848STomohiro Kusumi 	return (nip);
11192d60b848STomohiro Kusumi }
11202d60b848STomohiro Kusumi 
11212d60b848STomohiro Kusumi /*
11222d60b848STomohiro Kusumi  * Create a new, normal inode.  This function will create the inode,
11232d60b848STomohiro Kusumi  * the media chains, but will not insert the chains onto the media topology
11242d60b848STomohiro Kusumi  * (doing so would require a flush transaction and cause long stalls).
11252d60b848STomohiro Kusumi  *
11262d60b848STomohiro Kusumi  * Caller must be in a normal transaction.
11272d60b848STomohiro Kusumi  */
11282d60b848STomohiro Kusumi hammer2_inode_t *
hammer2_inode_create_normal(hammer2_inode_t * pip,struct vattr * vap,struct ucred * cred,hammer2_key_t inum,int * errorp)11292d60b848STomohiro Kusumi hammer2_inode_create_normal(hammer2_inode_t *pip,
11302d60b848STomohiro Kusumi 			    struct vattr *vap, struct ucred *cred,
11312d60b848STomohiro Kusumi 			    hammer2_key_t inum, int *errorp)
11322d60b848STomohiro Kusumi {
11332d60b848STomohiro Kusumi 	hammer2_xop_create_t *xop;
11342d60b848STomohiro Kusumi 	hammer2_inode_t *dip;
11352d60b848STomohiro Kusumi 	hammer2_inode_t *nip;
11362d60b848STomohiro Kusumi 	int error;
11372d60b848STomohiro Kusumi 	uid_t xuid;
11382d60b848STomohiro Kusumi 	uuid_t pip_uid;
11392d60b848STomohiro Kusumi 	uuid_t pip_gid;
11402d60b848STomohiro Kusumi 	uint32_t pip_mode;
11412d60b848STomohiro Kusumi 	uint8_t pip_comp_algo;
11422d60b848STomohiro Kusumi 	uint8_t pip_check_algo;
11432d60b848STomohiro Kusumi 	hammer2_tid_t pip_inum;
11442d60b848STomohiro Kusumi 
11452d60b848STomohiro Kusumi 	dip = pip->pmp->iroot;
11462d60b848STomohiro Kusumi 	KKASSERT(dip != NULL);
11472d60b848STomohiro Kusumi 
11482d60b848STomohiro Kusumi 	*errorp = 0;
11492d60b848STomohiro Kusumi 
11502d60b848STomohiro Kusumi 	/*hammer2_inode_lock(dip, 0);*/
11512d60b848STomohiro Kusumi 
11522d60b848STomohiro Kusumi 	pip_uid = pip->meta.uid;
11532d60b848STomohiro Kusumi 	pip_gid = pip->meta.gid;
11542d60b848STomohiro Kusumi 	pip_mode = pip->meta.mode;
11552d60b848STomohiro Kusumi 	pip_comp_algo = pip->meta.comp_algo;
11562d60b848STomohiro Kusumi 	pip_check_algo = pip->meta.check_algo;
11572d60b848STomohiro Kusumi 	pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
11582d60b848STomohiro Kusumi 
11592d60b848STomohiro Kusumi 	/*
11602d60b848STomohiro Kusumi 	 * Create the in-memory hammer2_inode structure for the specified
11612d60b848STomohiro Kusumi 	 * inode.
11622d60b848STomohiro Kusumi 	 */
11632d60b848STomohiro Kusumi 	nip = hammer2_inode_get(dip->pmp, NULL, inum, -1);
11642d60b848STomohiro Kusumi 	nip->comp_heuristic = 0;
11652d60b848STomohiro Kusumi 	KKASSERT((nip->flags & HAMMER2_INODE_CREATING) == 0 &&
11662d60b848STomohiro Kusumi 		 nip->cluster.nchains == 0);
11672d60b848STomohiro Kusumi 	atomic_set_int(&nip->flags, HAMMER2_INODE_CREATING);
11682d60b848STomohiro Kusumi 
11692d60b848STomohiro Kusumi 	/*
11702d60b848STomohiro Kusumi 	 * Setup the inode meta-data
11712d60b848STomohiro Kusumi 	 */
11722d60b848STomohiro Kusumi 	nip->meta.type = hammer2_get_obj_type(vap->va_type);
11732d60b848STomohiro Kusumi 
11742d60b848STomohiro Kusumi 	switch (nip->meta.type) {
11752d60b848STomohiro Kusumi 	case HAMMER2_OBJTYPE_CDEV:
11762d60b848STomohiro Kusumi 	case HAMMER2_OBJTYPE_BDEV:
11772d60b848STomohiro Kusumi 		assert(0); /* XXX unsupported */
11782d60b848STomohiro Kusumi 		nip->meta.rmajor = vap->va_rmajor;
11792d60b848STomohiro Kusumi 		nip->meta.rminor = vap->va_rminor;
11802d60b848STomohiro Kusumi 		break;
11812d60b848STomohiro Kusumi 	default:
11822d60b848STomohiro Kusumi 		break;
11832d60b848STomohiro Kusumi 	}
11842d60b848STomohiro Kusumi 
11852d60b848STomohiro Kusumi 	KKASSERT(nip->meta.inum == inum);
11862d60b848STomohiro Kusumi 	nip->meta.iparent = pip_inum;
11872d60b848STomohiro Kusumi 
11882d60b848STomohiro Kusumi 	/* Inherit parent's inode compression mode. */
11892d60b848STomohiro Kusumi 	nip->meta.comp_algo = pip_comp_algo;
11902d60b848STomohiro Kusumi 	nip->meta.check_algo = pip_check_algo;
11912d60b848STomohiro Kusumi 	nip->meta.version = HAMMER2_INODE_VERSION_ONE;
1192ddd1d3d1STomohiro Kusumi 	hammer2_update_time(&nip->meta.ctime, false);
11932d60b848STomohiro Kusumi 	nip->meta.mtime = nip->meta.ctime;
11942d60b848STomohiro Kusumi 	nip->meta.mode = vap->va_mode;
11952d60b848STomohiro Kusumi 	nip->meta.nlinks = 1;
11962d60b848STomohiro Kusumi 
11972d60b848STomohiro Kusumi 	xuid = hammer2_to_unix_xid(&pip_uid);
11982d60b848STomohiro Kusumi 	xuid = vop_helper_create_uid(dip->pmp->mp, pip_mode,
11992d60b848STomohiro Kusumi 				     xuid, cred,
12002d60b848STomohiro Kusumi 				     &vap->va_mode);
12012d60b848STomohiro Kusumi 	if (vap->va_vaflags & VA_UID_UUID_VALID)
12022d60b848STomohiro Kusumi 		nip->meta.uid = vap->va_uid_uuid;
12032d60b848STomohiro Kusumi 	else if (vap->va_uid != (uid_t)VNOVAL)
12042d60b848STomohiro Kusumi 		hammer2_guid_to_uuid(&nip->meta.uid, vap->va_uid);
12052d60b848STomohiro Kusumi 	else
12062d60b848STomohiro Kusumi 		hammer2_guid_to_uuid(&nip->meta.uid, xuid);
12072d60b848STomohiro Kusumi 
12082d60b848STomohiro Kusumi 	if (vap->va_vaflags & VA_GID_UUID_VALID)
12092d60b848STomohiro Kusumi 		nip->meta.gid = vap->va_gid_uuid;
12102d60b848STomohiro Kusumi 	else if (vap->va_gid != (gid_t)VNOVAL)
12112d60b848STomohiro Kusumi 		hammer2_guid_to_uuid(&nip->meta.gid, vap->va_gid);
12122d60b848STomohiro Kusumi 	else
12132d60b848STomohiro Kusumi 		nip->meta.gid = pip_gid;
12142d60b848STomohiro Kusumi 
12152d60b848STomohiro Kusumi 	/*
12162d60b848STomohiro Kusumi 	 * Regular files and softlinks allow a small amount of data to be
12172d60b848STomohiro Kusumi 	 * directly embedded in the inode.  This flag will be cleared if
12182d60b848STomohiro Kusumi 	 * the size is extended past the embedded limit.
12192d60b848STomohiro Kusumi 	 */
12202d60b848STomohiro Kusumi 	if (nip->meta.type == HAMMER2_OBJTYPE_REGFILE ||
12212d60b848STomohiro Kusumi 	    nip->meta.type == HAMMER2_OBJTYPE_SOFTLINK) {
12222d60b848STomohiro Kusumi 		nip->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
12232d60b848STomohiro Kusumi 	}
12242d60b848STomohiro Kusumi 
12252d60b848STomohiro Kusumi 	/*
12262d60b848STomohiro Kusumi 	 * Create the inode using (inum) as the key.  Pass pip for
12272d60b848STomohiro Kusumi 	 * method inheritance.
12282d60b848STomohiro Kusumi 	 */
12292d60b848STomohiro Kusumi 	xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
12302d60b848STomohiro Kusumi 	xop->lhc = inum;
12312d60b848STomohiro Kusumi 	xop->flags = 0;
12322d60b848STomohiro Kusumi 	xop->meta = nip->meta;
12332d60b848STomohiro Kusumi 
12342d60b848STomohiro Kusumi 	xop->meta.name_len = hammer2_xop_setname_inum(&xop->head, inum);
12352d60b848STomohiro Kusumi 	xop->meta.name_key = inum;
12362d60b848STomohiro Kusumi 	nip->meta.name_len = xop->meta.name_len;
12372d60b848STomohiro Kusumi 	nip->meta.name_key = xop->meta.name_key;
12382d60b848STomohiro Kusumi 	hammer2_inode_modify(nip);
12392d60b848STomohiro Kusumi 
12402d60b848STomohiro Kusumi 	/*
12412d60b848STomohiro Kusumi 	 * Create the inode media chains but leave them detached.  We are
12422d60b848STomohiro Kusumi 	 * not in a flush transaction so we can't mess with media topology
12432d60b848STomohiro Kusumi 	 * above normal inodes (i.e. the index of the inodes themselves).
12442d60b848STomohiro Kusumi 	 *
12452d60b848STomohiro Kusumi 	 * We've already set the INODE_CREATING flag.  The inode's media
12462d60b848STomohiro Kusumi 	 * chains will be inserted onto the media topology on the next
12472d60b848STomohiro Kusumi 	 * filesystem sync.
12482d60b848STomohiro Kusumi 	 */
12492d60b848STomohiro Kusumi 	hammer2_xop_start(&xop->head, &hammer2_inode_create_det_desc);
12502d60b848STomohiro Kusumi 
12512d60b848STomohiro Kusumi 	error = hammer2_xop_collect(&xop->head, 0);
12522d60b848STomohiro Kusumi #if INODE_DEBUG
12532d60b848STomohiro Kusumi 	kprintf("create inode type %d error %d\n", nip->meta.type, error);
12542d60b848STomohiro Kusumi #endif
12552d60b848STomohiro Kusumi 
12562d60b848STomohiro Kusumi 	if (error) {
12572d60b848STomohiro Kusumi 		*errorp = error;
12582d60b848STomohiro Kusumi 		goto done;
12592d60b848STomohiro Kusumi 	}
12602d60b848STomohiro Kusumi 
12612d60b848STomohiro Kusumi 	/*
12622d60b848STomohiro Kusumi 	 * Associate the media chains created by the backend with the
12632d60b848STomohiro Kusumi 	 * frontend inode.
12642d60b848STomohiro Kusumi 	 */
12652d60b848STomohiro Kusumi 	hammer2_inode_repoint(nip, &xop->head.cluster);
12662d60b848STomohiro Kusumi done:
12672d60b848STomohiro Kusumi 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
12682d60b848STomohiro Kusumi 	/*hammer2_inode_unlock(dip);*/
12692d60b848STomohiro Kusumi 
12702d60b848STomohiro Kusumi 	return (nip);
12712d60b848STomohiro Kusumi }
12722d60b848STomohiro Kusumi 
12732d60b848STomohiro Kusumi /*
12742d60b848STomohiro Kusumi  * Create a directory entry under dip with the specified name, inode number,
12752d60b848STomohiro Kusumi  * and OBJTYPE (type).
12762d60b848STomohiro Kusumi  *
12772d60b848STomohiro Kusumi  * This returns a UNIX errno code, not a HAMMER2_ERROR_* code.
12782d60b848STomohiro Kusumi  *
12792d60b848STomohiro Kusumi  * Caller must hold dip locked.
12802d60b848STomohiro Kusumi  */
12812d60b848STomohiro Kusumi int
hammer2_dirent_create(hammer2_inode_t * dip,const char * name,size_t name_len,hammer2_key_t inum,uint8_t type)12822d60b848STomohiro Kusumi hammer2_dirent_create(hammer2_inode_t *dip, const char *name, size_t name_len,
12832d60b848STomohiro Kusumi 		      hammer2_key_t inum, uint8_t type)
12842d60b848STomohiro Kusumi {
12852d60b848STomohiro Kusumi 	hammer2_xop_mkdirent_t *xop;
12862d60b848STomohiro Kusumi 	hammer2_key_t lhc;
12872d60b848STomohiro Kusumi 	int error;
12882d60b848STomohiro Kusumi 
12892d60b848STomohiro Kusumi 	lhc = 0;
12902d60b848STomohiro Kusumi 	error = 0;
12912d60b848STomohiro Kusumi 
12922d60b848STomohiro Kusumi 	KKASSERT(name != NULL);
12932d60b848STomohiro Kusumi 	lhc = hammer2_dirhash(name, name_len);
12942d60b848STomohiro Kusumi 
12952d60b848STomohiro Kusumi 	/*
12962d60b848STomohiro Kusumi 	 * Locate the inode or indirect block to create the new
12972d60b848STomohiro Kusumi 	 * entry in.  At the same time check for key collisions
12982d60b848STomohiro Kusumi 	 * and iterate until we don't get one.
12992d60b848STomohiro Kusumi 	 *
13002d60b848STomohiro Kusumi 	 * Lock the directory exclusively for now to guarantee that
13012d60b848STomohiro Kusumi 	 * we can find an unused lhc for the name.  Due to collisions,
13022d60b848STomohiro Kusumi 	 * two different creates can end up with the same lhc so we
13032d60b848STomohiro Kusumi 	 * cannot depend on the OS to prevent the collision.
13042d60b848STomohiro Kusumi 	 */
13052d60b848STomohiro Kusumi 	hammer2_inode_modify(dip);
13062d60b848STomohiro Kusumi 
13072d60b848STomohiro Kusumi 	/*
13082d60b848STomohiro Kusumi 	 * If name specified, locate an unused key in the collision space.
13092d60b848STomohiro Kusumi 	 * Otherwise use the passed-in lhc directly.
13102d60b848STomohiro Kusumi 	 */
13112d60b848STomohiro Kusumi 	{
13122d60b848STomohiro Kusumi 		hammer2_xop_scanlhc_t *sxop;
13132d60b848STomohiro Kusumi 		hammer2_key_t lhcbase;
13142d60b848STomohiro Kusumi 
13152d60b848STomohiro Kusumi 		lhcbase = lhc;
13162d60b848STomohiro Kusumi 		sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
13172d60b848STomohiro Kusumi 		sxop->lhc = lhc;
13182d60b848STomohiro Kusumi 		hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
13192d60b848STomohiro Kusumi 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
13202d60b848STomohiro Kusumi 			if (lhc != sxop->head.cluster.focus->bref.key)
13212d60b848STomohiro Kusumi 				break;
13222d60b848STomohiro Kusumi 			++lhc;
13232d60b848STomohiro Kusumi 		}
13242d60b848STomohiro Kusumi 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
13252d60b848STomohiro Kusumi 
13262d60b848STomohiro Kusumi 		if (error) {
13272d60b848STomohiro Kusumi 			if (error != HAMMER2_ERROR_ENOENT)
13282d60b848STomohiro Kusumi 				goto done2;
13292d60b848STomohiro Kusumi 			++lhc;
13302d60b848STomohiro Kusumi 			error = 0;
13312d60b848STomohiro Kusumi 		}
13322d60b848STomohiro Kusumi 		if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
13332d60b848STomohiro Kusumi 			error = HAMMER2_ERROR_ENOSPC;
13342d60b848STomohiro Kusumi 			goto done2;
13352d60b848STomohiro Kusumi 		}
13362d60b848STomohiro Kusumi 	}
13372d60b848STomohiro Kusumi 
13382d60b848STomohiro Kusumi 	/*
13392d60b848STomohiro Kusumi 	 * Create the directory entry with the lhc as the key.
13402d60b848STomohiro Kusumi 	 */
13412d60b848STomohiro Kusumi 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
13422d60b848STomohiro Kusumi 	xop->lhc = lhc;
13432d60b848STomohiro Kusumi 	bzero(&xop->dirent, sizeof(xop->dirent));
13442d60b848STomohiro Kusumi 	xop->dirent.inum = inum;
13452d60b848STomohiro Kusumi 	xop->dirent.type = type;
13462d60b848STomohiro Kusumi 	xop->dirent.namlen = name_len;
13472d60b848STomohiro Kusumi 
13482d60b848STomohiro Kusumi 	KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
13492d60b848STomohiro Kusumi 	hammer2_xop_setname(&xop->head, name, name_len);
13502d60b848STomohiro Kusumi 
13512d60b848STomohiro Kusumi 	hammer2_xop_start(&xop->head, &hammer2_inode_mkdirent_desc);
13522d60b848STomohiro Kusumi 
13532d60b848STomohiro Kusumi 	error = hammer2_xop_collect(&xop->head, 0);
13542d60b848STomohiro Kusumi 
13552d60b848STomohiro Kusumi 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
13562d60b848STomohiro Kusumi done2:
13572d60b848STomohiro Kusumi 	error = hammer2_error_to_errno(error);
13582d60b848STomohiro Kusumi 
13592d60b848STomohiro Kusumi 	return error;
13602d60b848STomohiro Kusumi }
13612d60b848STomohiro Kusumi 
13622d60b848STomohiro Kusumi /*
13632d60b848STomohiro Kusumi  * Repoint ip->cluster's chains to cluster's chains and fixup the default
13642d60b848STomohiro Kusumi  * focus.  All items, valid or invalid, are repointed.  hammer2_xop_start()
13652d60b848STomohiro Kusumi  * filters out invalid or non-matching elements.
13662d60b848STomohiro Kusumi  *
13672d60b848STomohiro Kusumi  * Caller must hold the inode and cluster exclusive locked, if not NULL,
13682d60b848STomohiro Kusumi  * must also be locked.
13692d60b848STomohiro Kusumi  *
13702d60b848STomohiro Kusumi  * Cluster may be NULL to clean out any chains in ip->cluster.
13712d60b848STomohiro Kusumi  */
13722d60b848STomohiro Kusumi void
hammer2_inode_repoint(hammer2_inode_t * ip,hammer2_cluster_t * cluster)13732d60b848STomohiro Kusumi hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_cluster_t *cluster)
13742d60b848STomohiro Kusumi {
13752d60b848STomohiro Kusumi 	hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
13762d60b848STomohiro Kusumi 	hammer2_chain_t *ochain;
13772d60b848STomohiro Kusumi 	hammer2_chain_t *nchain;
13782d60b848STomohiro Kusumi 	int i;
13792d60b848STomohiro Kusumi 
13802d60b848STomohiro Kusumi 	bzero(dropch, sizeof(dropch));
13812d60b848STomohiro Kusumi 
13822d60b848STomohiro Kusumi 	/*
1383*6b47f3eaSTomohiro Kusumi 	 * Drop any cached (typically data) chains related to this inode
1384*6b47f3eaSTomohiro Kusumi 	 */
1385*6b47f3eaSTomohiro Kusumi 	hammer2_spin_ex(&ip->cluster_spin);
1386*6b47f3eaSTomohiro Kusumi 	for (i = 0; i < ip->ccache_nchains; ++i) {
1387*6b47f3eaSTomohiro Kusumi 		dropch[i] = ip->ccache[i].chain;
1388*6b47f3eaSTomohiro Kusumi 		ip->ccache[i].flags = 0;
1389*6b47f3eaSTomohiro Kusumi 		ip->ccache[i].chain = NULL;
1390*6b47f3eaSTomohiro Kusumi 	}
1391*6b47f3eaSTomohiro Kusumi 	ip->ccache_nchains = 0;
1392*6b47f3eaSTomohiro Kusumi 	hammer2_spin_unex(&ip->cluster_spin);
1393*6b47f3eaSTomohiro Kusumi 
1394*6b47f3eaSTomohiro Kusumi 	while (--i >= 0) {
1395*6b47f3eaSTomohiro Kusumi 		if (dropch[i]) {
1396*6b47f3eaSTomohiro Kusumi 			hammer2_chain_drop(dropch[i]);
1397*6b47f3eaSTomohiro Kusumi 			dropch[i] = NULL;
1398*6b47f3eaSTomohiro Kusumi 		}
1399*6b47f3eaSTomohiro Kusumi 	}
1400*6b47f3eaSTomohiro Kusumi 
1401*6b47f3eaSTomohiro Kusumi 	/*
14022d60b848STomohiro Kusumi 	 * Replace chains in ip->cluster with chains from cluster and
14032d60b848STomohiro Kusumi 	 * adjust the focus if necessary.
14042d60b848STomohiro Kusumi 	 *
14052d60b848STomohiro Kusumi 	 * NOTE: nchain and/or ochain can be NULL due to gaps
14062d60b848STomohiro Kusumi 	 *	 in the cluster arrays.
14072d60b848STomohiro Kusumi 	 */
14082d60b848STomohiro Kusumi 	hammer2_spin_ex(&ip->cluster_spin);
14092d60b848STomohiro Kusumi 	for (i = 0; cluster && i < cluster->nchains; ++i) {
14102d60b848STomohiro Kusumi 		/*
14112d60b848STomohiro Kusumi 		 * Do not replace elements which are the same.  Also handle
14122d60b848STomohiro Kusumi 		 * element count discrepancies.
14132d60b848STomohiro Kusumi 		 */
14142d60b848STomohiro Kusumi 		nchain = cluster->array[i].chain;
14152d60b848STomohiro Kusumi 		if (i < ip->cluster.nchains) {
14162d60b848STomohiro Kusumi 			ochain = ip->cluster.array[i].chain;
14172d60b848STomohiro Kusumi 			if (ochain == nchain)
14182d60b848STomohiro Kusumi 				continue;
14192d60b848STomohiro Kusumi 		} else {
14202d60b848STomohiro Kusumi 			ochain = NULL;
14212d60b848STomohiro Kusumi 		}
14222d60b848STomohiro Kusumi 
14232d60b848STomohiro Kusumi 		/*
14242d60b848STomohiro Kusumi 		 * Make adjustments
14252d60b848STomohiro Kusumi 		 */
14262d60b848STomohiro Kusumi 		ip->cluster.array[i].chain = nchain;
14272d60b848STomohiro Kusumi 		ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
14282d60b848STomohiro Kusumi 		ip->cluster.array[i].flags |= cluster->array[i].flags &
14292d60b848STomohiro Kusumi 					      HAMMER2_CITEM_INVALID;
14302d60b848STomohiro Kusumi 		if (nchain)
14312d60b848STomohiro Kusumi 			hammer2_chain_ref(nchain);
14322d60b848STomohiro Kusumi 		dropch[i] = ochain;
14332d60b848STomohiro Kusumi 	}
14342d60b848STomohiro Kusumi 
14352d60b848STomohiro Kusumi 	/*
14362d60b848STomohiro Kusumi 	 * Release any left-over chains in ip->cluster.
14372d60b848STomohiro Kusumi 	 */
14382d60b848STomohiro Kusumi 	while (i < ip->cluster.nchains) {
14392d60b848STomohiro Kusumi 		nchain = ip->cluster.array[i].chain;
14402d60b848STomohiro Kusumi 		if (nchain) {
14412d60b848STomohiro Kusumi 			ip->cluster.array[i].chain = NULL;
14422d60b848STomohiro Kusumi 			ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
14432d60b848STomohiro Kusumi 		}
14442d60b848STomohiro Kusumi 		dropch[i] = nchain;
14452d60b848STomohiro Kusumi 		++i;
14462d60b848STomohiro Kusumi 	}
14472d60b848STomohiro Kusumi 
14482d60b848STomohiro Kusumi 	/*
14492d60b848STomohiro Kusumi 	 * Fixup fields.  Note that the inode-embedded cluster is never
14502d60b848STomohiro Kusumi 	 * directly locked.
14512d60b848STomohiro Kusumi 	 */
14522d60b848STomohiro Kusumi 	if (cluster) {
14532d60b848STomohiro Kusumi 		ip->cluster.nchains = cluster->nchains;
14542d60b848STomohiro Kusumi 		ip->cluster.focus = cluster->focus;
14552d60b848STomohiro Kusumi 		ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
14562d60b848STomohiro Kusumi 	} else {
14572d60b848STomohiro Kusumi 		ip->cluster.nchains = 0;
14582d60b848STomohiro Kusumi 		ip->cluster.focus = NULL;
14592d60b848STomohiro Kusumi 		ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
14602d60b848STomohiro Kusumi 	}
14612d60b848STomohiro Kusumi 
14622d60b848STomohiro Kusumi 	hammer2_spin_unex(&ip->cluster_spin);
14632d60b848STomohiro Kusumi 
14642d60b848STomohiro Kusumi 	/*
14652d60b848STomohiro Kusumi 	 * Cleanup outside of spinlock
14662d60b848STomohiro Kusumi 	 */
14672d60b848STomohiro Kusumi 	while (--i >= 0) {
14682d60b848STomohiro Kusumi 		if (dropch[i])
14692d60b848STomohiro Kusumi 			hammer2_chain_drop(dropch[i]);
14702d60b848STomohiro Kusumi 	}
14712d60b848STomohiro Kusumi }
14722d60b848STomohiro Kusumi 
14732d60b848STomohiro Kusumi /*
14742d60b848STomohiro Kusumi  * Repoint a single element from the cluster to the ip.  Used by the
14752d60b848STomohiro Kusumi  * synchronization threads to piecemeal update inodes.  Does not change
14762d60b848STomohiro Kusumi  * focus and requires inode to be re-locked to clean-up flags (XXX).
14772d60b848STomohiro Kusumi  */
14782d60b848STomohiro Kusumi void
hammer2_inode_repoint_one(hammer2_inode_t * ip,hammer2_cluster_t * cluster,int idx)14792d60b848STomohiro Kusumi hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
14802d60b848STomohiro Kusumi 			  int idx)
14812d60b848STomohiro Kusumi {
1482*6b47f3eaSTomohiro Kusumi 	hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
14832d60b848STomohiro Kusumi 	hammer2_chain_t *ochain;
14842d60b848STomohiro Kusumi 	hammer2_chain_t *nchain;
14852d60b848STomohiro Kusumi 	int i;
14862d60b848STomohiro Kusumi 
1487*6b47f3eaSTomohiro Kusumi 	/*
1488*6b47f3eaSTomohiro Kusumi 	 * Drop any cached (typically data) chains related to this inode
1489*6b47f3eaSTomohiro Kusumi 	 */
1490*6b47f3eaSTomohiro Kusumi 	hammer2_spin_ex(&ip->cluster_spin);
1491*6b47f3eaSTomohiro Kusumi 	for (i = 0; i < ip->ccache_nchains; ++i) {
1492*6b47f3eaSTomohiro Kusumi 		dropch[i] = ip->ccache[i].chain;
1493*6b47f3eaSTomohiro Kusumi 		ip->ccache[i].chain = NULL;
1494*6b47f3eaSTomohiro Kusumi 	}
1495*6b47f3eaSTomohiro Kusumi 	ip->ccache_nchains = 0;
1496*6b47f3eaSTomohiro Kusumi 	hammer2_spin_unex(&ip->cluster_spin);
1497*6b47f3eaSTomohiro Kusumi 
1498*6b47f3eaSTomohiro Kusumi 	while (--i >= 0) {
1499*6b47f3eaSTomohiro Kusumi 		if (dropch[i])
1500*6b47f3eaSTomohiro Kusumi 			hammer2_chain_drop(dropch[i]);
1501*6b47f3eaSTomohiro Kusumi 	}
1502*6b47f3eaSTomohiro Kusumi 
1503*6b47f3eaSTomohiro Kusumi 	/*
1504*6b47f3eaSTomohiro Kusumi 	 * Replace inode chain at index
1505*6b47f3eaSTomohiro Kusumi 	 */
15062d60b848STomohiro Kusumi 	hammer2_spin_ex(&ip->cluster_spin);
15072d60b848STomohiro Kusumi 	KKASSERT(idx < cluster->nchains);
15082d60b848STomohiro Kusumi 	if (idx < ip->cluster.nchains) {
15092d60b848STomohiro Kusumi 		ochain = ip->cluster.array[idx].chain;
15102d60b848STomohiro Kusumi 		nchain = cluster->array[idx].chain;
15112d60b848STomohiro Kusumi 	} else {
15122d60b848STomohiro Kusumi 		ochain = NULL;
15132d60b848STomohiro Kusumi 		nchain = cluster->array[idx].chain;
15142d60b848STomohiro Kusumi 		for (i = ip->cluster.nchains; i <= idx; ++i) {
15152d60b848STomohiro Kusumi 			bzero(&ip->cluster.array[i],
15162d60b848STomohiro Kusumi 			      sizeof(ip->cluster.array[i]));
15172d60b848STomohiro Kusumi 			ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
15182d60b848STomohiro Kusumi 		}
15192d60b848STomohiro Kusumi 		ip->cluster.nchains = idx + 1;
15202d60b848STomohiro Kusumi 	}
15212d60b848STomohiro Kusumi 	if (ochain != nchain) {
15222d60b848STomohiro Kusumi 		/*
15232d60b848STomohiro Kusumi 		 * Make adjustments.
15242d60b848STomohiro Kusumi 		 */
15252d60b848STomohiro Kusumi 		ip->cluster.array[idx].chain = nchain;
15262d60b848STomohiro Kusumi 		ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
15272d60b848STomohiro Kusumi 		ip->cluster.array[idx].flags |= cluster->array[idx].flags &
15282d60b848STomohiro Kusumi 						HAMMER2_CITEM_INVALID;
15292d60b848STomohiro Kusumi 	}
15302d60b848STomohiro Kusumi 	hammer2_spin_unex(&ip->cluster_spin);
15312d60b848STomohiro Kusumi 	if (ochain != nchain) {
15322d60b848STomohiro Kusumi 		if (nchain)
15332d60b848STomohiro Kusumi 			hammer2_chain_ref(nchain);
15342d60b848STomohiro Kusumi 		if (ochain)
15352d60b848STomohiro Kusumi 			hammer2_chain_drop(ochain);
15362d60b848STomohiro Kusumi 	}
15372d60b848STomohiro Kusumi }
15382d60b848STomohiro Kusumi 
15392d60b848STomohiro Kusumi hammer2_key_t
hammer2_inode_data_count(const hammer2_inode_t * ip)15402d60b848STomohiro Kusumi hammer2_inode_data_count(const hammer2_inode_t *ip)
15412d60b848STomohiro Kusumi {
15422d60b848STomohiro Kusumi 	hammer2_chain_t *chain;
15432d60b848STomohiro Kusumi 	hammer2_key_t count = 0;
15442d60b848STomohiro Kusumi 	int i;
15452d60b848STomohiro Kusumi 
15462d60b848STomohiro Kusumi 	for (i = 0; i < ip->cluster.nchains; ++i) {
15472d60b848STomohiro Kusumi 		if ((chain = ip->cluster.array[i].chain) != NULL) {
15482d60b848STomohiro Kusumi 			if (count < chain->bref.embed.stats.data_count)
15492d60b848STomohiro Kusumi 				count = chain->bref.embed.stats.data_count;
15502d60b848STomohiro Kusumi 		}
15512d60b848STomohiro Kusumi 	}
15522d60b848STomohiro Kusumi 	return count;
15532d60b848STomohiro Kusumi }
15542d60b848STomohiro Kusumi 
15552d60b848STomohiro Kusumi hammer2_key_t
hammer2_inode_inode_count(const hammer2_inode_t * ip)15562d60b848STomohiro Kusumi hammer2_inode_inode_count(const hammer2_inode_t *ip)
15572d60b848STomohiro Kusumi {
15582d60b848STomohiro Kusumi 	hammer2_chain_t *chain;
15592d60b848STomohiro Kusumi 	hammer2_key_t count = 0;
15602d60b848STomohiro Kusumi 	int i;
15612d60b848STomohiro Kusumi 
15622d60b848STomohiro Kusumi 	for (i = 0; i < ip->cluster.nchains; ++i) {
15632d60b848STomohiro Kusumi 		if ((chain = ip->cluster.array[i].chain) != NULL) {
15642d60b848STomohiro Kusumi 			if (count < chain->bref.embed.stats.inode_count)
15652d60b848STomohiro Kusumi 				count = chain->bref.embed.stats.inode_count;
15662d60b848STomohiro Kusumi 		}
15672d60b848STomohiro Kusumi 	}
15682d60b848STomohiro Kusumi 	return count;
15692d60b848STomohiro Kusumi }
15702d60b848STomohiro Kusumi 
15712d60b848STomohiro Kusumi /*
15722d60b848STomohiro Kusumi  * Called with a locked inode to finish unlinking an inode after xop_unlink
15732d60b848STomohiro Kusumi  * had been run.  This function is responsible for decrementing nlinks.
15742d60b848STomohiro Kusumi  *
15752d60b848STomohiro Kusumi  * We don't bother decrementing nlinks if the file is not open and this was
15762d60b848STomohiro Kusumi  * the last link.
15772d60b848STomohiro Kusumi  *
15782d60b848STomohiro Kusumi  * If the inode is a hardlink target it's chain has not yet been deleted,
15792d60b848STomohiro Kusumi  * otherwise it's chain has been deleted.
15802d60b848STomohiro Kusumi  *
15812d60b848STomohiro Kusumi  * If isopen then any prior deletion was not permanent and the inode is
15822d60b848STomohiro Kusumi  * left intact with nlinks == 0;
15832d60b848STomohiro Kusumi  */
15842d60b848STomohiro Kusumi int
hammer2_inode_unlink_finisher(hammer2_inode_t * ip,struct m_vnode ** vprecyclep)15856bcbb706STomohiro Kusumi hammer2_inode_unlink_finisher(hammer2_inode_t *ip, struct m_vnode **vprecyclep)
15862d60b848STomohiro Kusumi {
15876bcbb706STomohiro Kusumi 	struct m_vnode *vp;
15882d60b848STomohiro Kusumi 
15892d60b848STomohiro Kusumi 	/*
15902d60b848STomohiro Kusumi 	 * Decrement nlinks.  Catch a bad nlinks count here too (e.g. 0 or
15912d60b848STomohiro Kusumi 	 * negative), and just assume a transition to 0.
15922d60b848STomohiro Kusumi 	 */
15932d60b848STomohiro Kusumi 	if ((int64_t)ip->meta.nlinks <= 1) {
15942d60b848STomohiro Kusumi 		atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
15952d60b848STomohiro Kusumi 
15962d60b848STomohiro Kusumi 		/*
15972d60b848STomohiro Kusumi 		 * Scrap the vnode as quickly as possible.  The vp association
15982d60b848STomohiro Kusumi 		 * stays intact while we hold the inode locked.  However, vp
15992d60b848STomohiro Kusumi 		 * can be NULL here.
16002d60b848STomohiro Kusumi 		 */
16012d60b848STomohiro Kusumi 		vp = ip->vp;
16022d60b848STomohiro Kusumi 		cpu_ccfence();
16032d60b848STomohiro Kusumi 
16042d60b848STomohiro Kusumi 		/*
16052d60b848STomohiro Kusumi 		 * If no vp is associated there is no high-level state to
16062d60b848STomohiro Kusumi 		 * deal with and we can scrap the inode immediately.
16072d60b848STomohiro Kusumi 		 */
16082d60b848STomohiro Kusumi 		if (vp == NULL) {
16092d60b848STomohiro Kusumi 			if ((ip->flags & HAMMER2_INODE_DELETING) == 0) {
16102d60b848STomohiro Kusumi 				atomic_set_int(&ip->flags,
16112d60b848STomohiro Kusumi 					       HAMMER2_INODE_DELETING);
16122d60b848STomohiro Kusumi 				hammer2_inode_delayed_sideq(ip);
16132d60b848STomohiro Kusumi 			}
16142d60b848STomohiro Kusumi 			return 0;
16152d60b848STomohiro Kusumi 		}
16162d60b848STomohiro Kusumi 
16172d60b848STomohiro Kusumi 		/*
16182d60b848STomohiro Kusumi 		 * Because INODE_ISUNLINKED is set with the inode lock
16192d60b848STomohiro Kusumi 		 * held, the vnode cannot be ripped up from under us.
16202d60b848STomohiro Kusumi 		 * There may still be refs so knote anyone waiting for
16212d60b848STomohiro Kusumi 		 * a delete notification.
16222d60b848STomohiro Kusumi 		 *
16232d60b848STomohiro Kusumi 		 * The vnode is not necessarily ref'd due to the unlinking
16242d60b848STomohiro Kusumi 		 * itself, so we have to defer handling to the end of the
16252d60b848STomohiro Kusumi 		 * VOP, which will then call hammer2_inode_vprecycle().
16262d60b848STomohiro Kusumi 		 */
16272d60b848STomohiro Kusumi 		if (vprecyclep) {
16282d60b848STomohiro Kusumi 			vhold(vp);
16292d60b848STomohiro Kusumi 			*vprecyclep = vp;
16302d60b848STomohiro Kusumi 		}
16312d60b848STomohiro Kusumi 	}
16322d60b848STomohiro Kusumi 
16332d60b848STomohiro Kusumi 	/*
16342d60b848STomohiro Kusumi 	 * Adjust nlinks and retain the inode on the media for now
16352d60b848STomohiro Kusumi 	 */
16362d60b848STomohiro Kusumi 	hammer2_inode_modify(ip);
16372d60b848STomohiro Kusumi 	if ((int64_t)ip->meta.nlinks > 1)
16382d60b848STomohiro Kusumi 		--ip->meta.nlinks;
16392d60b848STomohiro Kusumi 	else
16402d60b848STomohiro Kusumi 		ip->meta.nlinks = 0;
16412d60b848STomohiro Kusumi 
16422d60b848STomohiro Kusumi 	return 0;
16432d60b848STomohiro Kusumi }
16442d60b848STomohiro Kusumi 
16452d60b848STomohiro Kusumi /*
16462d60b848STomohiro Kusumi  * Called at the end of a VOP that removes a file with a vnode that
16472d60b848STomohiro Kusumi  * we want to try to dispose of quickly due to a file deletion.  If
16482d60b848STomohiro Kusumi  * we don't do this, the vnode can hang around with 0 refs for a very
16492d60b848STomohiro Kusumi  * long time and prevent reclamation of the underlying file and inode
16502d60b848STomohiro Kusumi  * (inode remains on-media with nlinks == 0 until the vnode is recycled
16512d60b848STomohiro Kusumi  * due to random system activity or a umount).
16522d60b848STomohiro Kusumi  */
16532d60b848STomohiro Kusumi void
hammer2_inode_vprecycle(struct m_vnode * vp)16546bcbb706STomohiro Kusumi hammer2_inode_vprecycle(struct m_vnode *vp)
16552d60b848STomohiro Kusumi {
16562d60b848STomohiro Kusumi 	if (vget(vp, LK_EXCLUSIVE) == 0) {
16572d60b848STomohiro Kusumi 		vfinalize(vp);
16582d60b848STomohiro Kusumi 		hammer2_knote(vp, NOTE_DELETE);
16592d60b848STomohiro Kusumi 		vdrop(vp);
16602d60b848STomohiro Kusumi 		vput(vp);
16612d60b848STomohiro Kusumi 	} else {
16622d60b848STomohiro Kusumi 		vdrop(vp);
16632d60b848STomohiro Kusumi 	}
16642d60b848STomohiro Kusumi }
16652d60b848STomohiro Kusumi 
16662d60b848STomohiro Kusumi 
16672d60b848STomohiro Kusumi /*
16682d60b848STomohiro Kusumi  * Mark an inode as being modified, meaning that the caller will modify
16692d60b848STomohiro Kusumi  * ip->meta.
16702d60b848STomohiro Kusumi  *
16712d60b848STomohiro Kusumi  * If a vnode is present we set the vnode dirty and the nominal filesystem
16722d60b848STomohiro Kusumi  * sync will also handle synchronizing the inode meta-data.  Unless NOSIDEQ
16732d60b848STomohiro Kusumi  * we must ensure that the inode is on pmp->sideq.
16742d60b848STomohiro Kusumi  *
16752d60b848STomohiro Kusumi  * NOTE: We must always queue the inode to the sideq.  This allows H2 to
16762d60b848STomohiro Kusumi  *	 shortcut vsyncscan() and flush inodes and their related vnodes
16772d60b848STomohiro Kusumi  *	 in a two stages.  H2 still calls vfsync() for each vnode.
16782d60b848STomohiro Kusumi  *
16792d60b848STomohiro Kusumi  * NOTE: No mtid (modify_tid) is passed into this routine.  The caller is
16802d60b848STomohiro Kusumi  *	 only modifying the in-memory inode.  A modify_tid is synchronized
16812d60b848STomohiro Kusumi  *	 later when the inode gets flushed.
16822d60b848STomohiro Kusumi  *
16832d60b848STomohiro Kusumi  * NOTE: As an exception to the general rule, the inode MAY be locked
16842d60b848STomohiro Kusumi  *	 shared for this particular call.
16852d60b848STomohiro Kusumi  */
16862d60b848STomohiro Kusumi void
hammer2_inode_modify(hammer2_inode_t * ip)16872d60b848STomohiro Kusumi hammer2_inode_modify(hammer2_inode_t *ip)
16882d60b848STomohiro Kusumi {
16892d60b848STomohiro Kusumi 	atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
16902d60b848STomohiro Kusumi 	if (ip->vp)
16912d60b848STomohiro Kusumi 		vsetisdirty(ip->vp);
16922d60b848STomohiro Kusumi 	if (ip->pmp && (ip->flags & HAMMER2_INODE_NOSIDEQ) == 0)
16932d60b848STomohiro Kusumi 		hammer2_inode_delayed_sideq(ip);
16942d60b848STomohiro Kusumi }
16952d60b848STomohiro Kusumi 
16962d60b848STomohiro Kusumi /*
16972d60b848STomohiro Kusumi  * Synchronize the inode's frontend state with the chain state prior
16982d60b848STomohiro Kusumi  * to any explicit flush of the inode or any strategy write call.  This
16992d60b848STomohiro Kusumi  * does not flush the inode's chain or its sub-topology to media (higher
17002d60b848STomohiro Kusumi  * level layers are responsible for doing that).
17012d60b848STomohiro Kusumi  *
17022d60b848STomohiro Kusumi  * Called with a locked inode inside a normal transaction.
17032d60b848STomohiro Kusumi  *
17042d60b848STomohiro Kusumi  * inode must be locked.
17052d60b848STomohiro Kusumi  */
17062d60b848STomohiro Kusumi int
hammer2_inode_chain_sync(hammer2_inode_t * ip)17072d60b848STomohiro Kusumi hammer2_inode_chain_sync(hammer2_inode_t *ip)
17082d60b848STomohiro Kusumi {
17092d60b848STomohiro Kusumi 	int error;
17102d60b848STomohiro Kusumi 
17112d60b848STomohiro Kusumi 	error = 0;
17122d60b848STomohiro Kusumi 	if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
17132d60b848STomohiro Kusumi 		hammer2_xop_fsync_t *xop;
17142d60b848STomohiro Kusumi 
17152d60b848STomohiro Kusumi 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
17162d60b848STomohiro Kusumi 		xop->clear_directdata = 0;
17172d60b848STomohiro Kusumi 		if (ip->flags & HAMMER2_INODE_RESIZED) {
17182d60b848STomohiro Kusumi 			if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
17192d60b848STomohiro Kusumi 			    ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
17202d60b848STomohiro Kusumi 				ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
17212d60b848STomohiro Kusumi 				xop->clear_directdata = 1;
17222d60b848STomohiro Kusumi 			}
17232d60b848STomohiro Kusumi 			xop->osize = ip->osize;
17242d60b848STomohiro Kusumi 		} else {
17252d60b848STomohiro Kusumi 			xop->osize = ip->meta.size;	/* safety */
17262d60b848STomohiro Kusumi 		}
17272d60b848STomohiro Kusumi 		xop->ipflags = ip->flags;
17282d60b848STomohiro Kusumi 		xop->meta = ip->meta;
17292d60b848STomohiro Kusumi 
17302d60b848STomohiro Kusumi 		atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
17312d60b848STomohiro Kusumi 					     HAMMER2_INODE_MODIFIED);
17322d60b848STomohiro Kusumi 		hammer2_xop_start(&xop->head, &hammer2_inode_chain_sync_desc);
17332d60b848STomohiro Kusumi 		error = hammer2_xop_collect(&xop->head, 0);
17342d60b848STomohiro Kusumi 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
17352d60b848STomohiro Kusumi 		if (error == HAMMER2_ERROR_ENOENT)
17362d60b848STomohiro Kusumi 			error = 0;
17372d60b848STomohiro Kusumi 		if (error) {
17382d60b848STomohiro Kusumi 			kprintf("hammer2: unable to fsync inode %p\n", ip);
17392d60b848STomohiro Kusumi 			/*
17402d60b848STomohiro Kusumi 			atomic_set_int(&ip->flags,
17412d60b848STomohiro Kusumi 				       xop->ipflags & (HAMMER2_INODE_RESIZED |
17422d60b848STomohiro Kusumi 						       HAMMER2_INODE_MODIFIED));
17432d60b848STomohiro Kusumi 			*/
17442d60b848STomohiro Kusumi 			/* XXX return error somehow? */
17452d60b848STomohiro Kusumi 		}
17462d60b848STomohiro Kusumi 	}
17472d60b848STomohiro Kusumi 	return error;
17482d60b848STomohiro Kusumi }
17492d60b848STomohiro Kusumi 
17502d60b848STomohiro Kusumi /*
17512d60b848STomohiro Kusumi  * When an inode is flagged INODE_CREATING its chains have not actually
17522d60b848STomohiro Kusumi  * been inserting into the on-media tree yet.
17532d60b848STomohiro Kusumi  */
17542d60b848STomohiro Kusumi int
hammer2_inode_chain_ins(hammer2_inode_t * ip)17552d60b848STomohiro Kusumi hammer2_inode_chain_ins(hammer2_inode_t *ip)
17562d60b848STomohiro Kusumi {
17572d60b848STomohiro Kusumi 	int error;
17582d60b848STomohiro Kusumi 
17592d60b848STomohiro Kusumi 	error = 0;
17602d60b848STomohiro Kusumi 	if (ip->flags & HAMMER2_INODE_CREATING) {
17612d60b848STomohiro Kusumi 		hammer2_xop_create_t *xop;
17622d60b848STomohiro Kusumi 
17632d60b848STomohiro Kusumi 		atomic_clear_int(&ip->flags, HAMMER2_INODE_CREATING);
17642d60b848STomohiro Kusumi 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
17652d60b848STomohiro Kusumi 		xop->lhc = ip->meta.inum;
17662d60b848STomohiro Kusumi 		xop->flags = 0;
17672d60b848STomohiro Kusumi 		hammer2_xop_start(&xop->head, &hammer2_inode_create_ins_desc);
17682d60b848STomohiro Kusumi 		error = hammer2_xop_collect(&xop->head, 0);
17692d60b848STomohiro Kusumi 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
17702d60b848STomohiro Kusumi 		if (error == HAMMER2_ERROR_ENOENT)
17712d60b848STomohiro Kusumi 			error = 0;
17722d60b848STomohiro Kusumi 		if (error) {
17732d60b848STomohiro Kusumi 			kprintf("hammer2: backend unable to "
1774f8a1147cSTomohiro Kusumi 				"insert inode %p %ld\n", ip, (long)ip->meta.inum);
17752d60b848STomohiro Kusumi 			/* XXX return error somehow? */
17762d60b848STomohiro Kusumi 		}
17772d60b848STomohiro Kusumi 	}
17782d60b848STomohiro Kusumi 	return error;
17792d60b848STomohiro Kusumi }
17802d60b848STomohiro Kusumi 
17812d60b848STomohiro Kusumi /*
17822d60b848STomohiro Kusumi  * When an inode is flagged INODE_DELETING it has been deleted (no directory
17832d60b848STomohiro Kusumi  * entry or open refs are left, though as an optimization H2 might leave
17842d60b848STomohiro Kusumi  * nlinks == 1 to avoid unnecessary block updates).  The backend flush then
17852d60b848STomohiro Kusumi  * needs to actually remove it from the topology.
17862d60b848STomohiro Kusumi  *
17872d60b848STomohiro Kusumi  * NOTE: backend flush must still sync and flush the deleted inode to clean
17882d60b848STomohiro Kusumi  *	 out related chains.
17892d60b848STomohiro Kusumi  *
17902d60b848STomohiro Kusumi  * NOTE: We must clear not only INODE_DELETING, but also INODE_ISUNLINKED
17912d60b848STomohiro Kusumi  *	 to prevent the vnode reclaim code from trying to delete it twice.
17922d60b848STomohiro Kusumi  */
17932d60b848STomohiro Kusumi int
hammer2_inode_chain_des(hammer2_inode_t * ip)17942d60b848STomohiro Kusumi hammer2_inode_chain_des(hammer2_inode_t *ip)
17952d60b848STomohiro Kusumi {
17962d60b848STomohiro Kusumi 	int error;
17972d60b848STomohiro Kusumi 
17982d60b848STomohiro Kusumi 	error = 0;
17992d60b848STomohiro Kusumi 	if (ip->flags & HAMMER2_INODE_DELETING) {
18002d60b848STomohiro Kusumi 		hammer2_xop_destroy_t *xop;
18012d60b848STomohiro Kusumi 
18022d60b848STomohiro Kusumi 		atomic_clear_int(&ip->flags, HAMMER2_INODE_DELETING |
18032d60b848STomohiro Kusumi 					     HAMMER2_INODE_ISUNLINKED);
18042d60b848STomohiro Kusumi 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
18052d60b848STomohiro Kusumi 		hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc);
18062d60b848STomohiro Kusumi 		error = hammer2_xop_collect(&xop->head, 0);
18072d60b848STomohiro Kusumi 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
18082d60b848STomohiro Kusumi 
18092d60b848STomohiro Kusumi 		if (error == HAMMER2_ERROR_ENOENT)
18102d60b848STomohiro Kusumi 			error = 0;
18112d60b848STomohiro Kusumi 		if (error) {
18122d60b848STomohiro Kusumi 			kprintf("hammer2: backend unable to "
1813f8a1147cSTomohiro Kusumi 				"delete inode %p %ld\n", ip, (long)ip->meta.inum);
18142d60b848STomohiro Kusumi 			/* XXX return error somehow? */
18152d60b848STomohiro Kusumi 		}
18162d60b848STomohiro Kusumi 	}
18172d60b848STomohiro Kusumi 	return error;
18182d60b848STomohiro Kusumi }
18192d60b848STomohiro Kusumi 
18202d60b848STomohiro Kusumi /*
18212d60b848STomohiro Kusumi  * Flushes the inode's chain and its sub-topology to media.  Interlocks
18222d60b848STomohiro Kusumi  * HAMMER2_INODE_DIRTYDATA by clearing it prior to the flush.  Any strategy
18232d60b848STomohiro Kusumi  * function creating or modifying a chain under this inode will re-set the
18242d60b848STomohiro Kusumi  * flag.
18252d60b848STomohiro Kusumi  *
18262d60b848STomohiro Kusumi  * inode must be locked.
18272d60b848STomohiro Kusumi  */
18282d60b848STomohiro Kusumi int
hammer2_inode_chain_flush(hammer2_inode_t * ip,int flags)18292d60b848STomohiro Kusumi hammer2_inode_chain_flush(hammer2_inode_t *ip, int flags)
18302d60b848STomohiro Kusumi {
1831*6b47f3eaSTomohiro Kusumi 	hammer2_xop_flush_t *xop;
18322d60b848STomohiro Kusumi 	int error;
18332d60b848STomohiro Kusumi 
18342d60b848STomohiro Kusumi 	atomic_clear_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
18352d60b848STomohiro Kusumi 	xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | flags);
18362d60b848STomohiro Kusumi 	hammer2_xop_start(&xop->head, &hammer2_inode_flush_desc);
18372d60b848STomohiro Kusumi 	error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_WAITALL);
18382d60b848STomohiro Kusumi 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
18392d60b848STomohiro Kusumi 	if (error == HAMMER2_ERROR_ENOENT)
18402d60b848STomohiro Kusumi 		error = 0;
18412d60b848STomohiro Kusumi 
18422d60b848STomohiro Kusumi 	return error;
18432d60b848STomohiro Kusumi }
18442d60b848STomohiro Kusumi 
18452d60b848STomohiro Kusumi int
vflush(struct mount * mp,int rootrefs,int flags)18462d60b848STomohiro Kusumi vflush(struct mount *mp, int rootrefs, int flags)
18472d60b848STomohiro Kusumi {
18482d60b848STomohiro Kusumi 	hammer2_pfs_t *pmp = MPTOPMP(mp);
18492d60b848STomohiro Kusumi 	struct hammer2_inode *ip, *tmp;
18506bcbb706STomohiro Kusumi 	struct m_vnode *vp;
18511dc6036fSTomohiro Kusumi 	hammer2_key_t count_before, count_after, count_recq;
1852*6b47f3eaSTomohiro Kusumi 	hammer2_inum_hash_t *hash;
1853*6b47f3eaSTomohiro Kusumi 	int i;
18542d60b848STomohiro Kusumi 
1855bc9089a9STomohiro Kusumi 	printf("%s: total chain %ld\n", __func__, hammer2_chain_allocs);
1856bc9089a9STomohiro Kusumi 	printf("%s: total dio %d\n", __func__, hammer2_dio_count);
1857bc9089a9STomohiro Kusumi 
1858*6b47f3eaSTomohiro Kusumi 	for (i = 0; i < HAMMER2_INUMHASH_SIZE; ++i) {
1859*6b47f3eaSTomohiro Kusumi 		hash = &pmp->inumhash[i];
1860*6b47f3eaSTomohiro Kusumi 		hammer2_spin_ex(&hash->spin);
18612d60b848STomohiro Kusumi 		count_before = 0;
1862*6b47f3eaSTomohiro Kusumi 		for (ip = hash->base; ip; ip = ip->next)
18632d60b848STomohiro Kusumi 			count_before++;
18642d60b848STomohiro Kusumi 
1865*6b47f3eaSTomohiro Kusumi 		for (ip = hash->base; ip;) {
1866*6b47f3eaSTomohiro Kusumi 			tmp = ip->next;
18672d60b848STomohiro Kusumi 			vp = ip->vp;
18682d60b848STomohiro Kusumi 			assert(vp);
1869e00b9e51STomohiro Kusumi 			if (!vp->v_vflushed) {
18702d60b848STomohiro Kusumi 				/*
18711dc6036fSTomohiro Kusumi 				 * Not all inodes are modified and ref'd,
18721dc6036fSTomohiro Kusumi 				 * so ip->refs requirement here is the initial 1.
18732d60b848STomohiro Kusumi 				 */
18741dc6036fSTomohiro Kusumi 				assert(ip->refs > 0);
18752d60b848STomohiro Kusumi 				hammer2_inode_drop(ip);
1876e00b9e51STomohiro Kusumi 				vp->v_vflushed = 1;
18772d60b848STomohiro Kusumi 			}
1878*6b47f3eaSTomohiro Kusumi 			ip = tmp;
18792d60b848STomohiro Kusumi 		}
18802d60b848STomohiro Kusumi 
18812d60b848STomohiro Kusumi 		count_after = 0;
1882*6b47f3eaSTomohiro Kusumi 		for (ip = hash->base; ip; ip = ip->next)
18832d60b848STomohiro Kusumi 			count_after++;
1884*6b47f3eaSTomohiro Kusumi 		hammer2_spin_unex(&hash->spin);
1885*6b47f3eaSTomohiro Kusumi 	}
18862d60b848STomohiro Kusumi 
1887f8a1147cSTomohiro Kusumi 	printf("%s: total inode %jd -> %jd\n",
1888f8a1147cSTomohiro Kusumi 	    __func__, (intmax_t)count_before, (intmax_t)count_after);
18892d60b848STomohiro Kusumi 	assert(count_before >= count_after);
18902d60b848STomohiro Kusumi 
18911dc6036fSTomohiro Kusumi 	count_recq = 0;
18921dc6036fSTomohiro Kusumi 	TAILQ_FOREACH(ip, &pmp->recq, recq_entry)
18931dc6036fSTomohiro Kusumi 		count_recq++;
18941dc6036fSTomohiro Kusumi 	if (count_recq)
18951dc6036fSTomohiro Kusumi 		printf("%s: %jd inode in reclaim queue\n",
18961dc6036fSTomohiro Kusumi 		    __func__, (intmax_t)count_recq);
18972d60b848STomohiro Kusumi 
18982d60b848STomohiro Kusumi 	return 0;
18992d60b848STomohiro Kusumi }
1900