15f59596dSMatthew Dillon /*
268b321c1SMatthew Dillon * Copyright (c) 2015-2018 The DragonFly Project. All rights reserved.
35f59596dSMatthew Dillon *
45f59596dSMatthew Dillon * This code is derived from software contributed to The DragonFly Project
55f59596dSMatthew Dillon * by Matthew Dillon <dillon@dragonflybsd.org>
65f59596dSMatthew Dillon *
75f59596dSMatthew Dillon * Redistribution and use in source and binary forms, with or without
85f59596dSMatthew Dillon * modification, are permitted provided that the following conditions
95f59596dSMatthew Dillon * are met:
105f59596dSMatthew Dillon *
115f59596dSMatthew Dillon * 1. Redistributions of source code must retain the above copyright
125f59596dSMatthew Dillon * notice, this list of conditions and the following disclaimer.
135f59596dSMatthew Dillon * 2. Redistributions in binary form must reproduce the above copyright
145f59596dSMatthew Dillon * notice, this list of conditions and the following disclaimer in
155f59596dSMatthew Dillon * the documentation and/or other materials provided with the
165f59596dSMatthew Dillon * distribution.
175f59596dSMatthew Dillon * 3. Neither the name of The DragonFly Project nor the names of its
185f59596dSMatthew Dillon * contributors may be used to endorse or promote products derived
195f59596dSMatthew Dillon * from this software without specific, prior written permission.
205f59596dSMatthew Dillon *
215f59596dSMatthew Dillon * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
225f59596dSMatthew Dillon * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
235f59596dSMatthew Dillon * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
245f59596dSMatthew Dillon * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
255f59596dSMatthew Dillon * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
265f59596dSMatthew Dillon * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
275f59596dSMatthew Dillon * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
285f59596dSMatthew Dillon * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
295f59596dSMatthew Dillon * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
305f59596dSMatthew Dillon * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
315f59596dSMatthew Dillon * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
325f59596dSMatthew Dillon * SUCH DAMAGE.
335f59596dSMatthew Dillon */
345f59596dSMatthew Dillon /*
355f59596dSMatthew Dillon * This module implements the hammer2 helper thread API, including
365f59596dSMatthew Dillon * the frontend/backend XOP API.
375f59596dSMatthew Dillon */
385f59596dSMatthew Dillon #include "hammer2.h"
395f59596dSMatthew Dillon
40c4421f07SMatthew Dillon #define H2XOPDESCRIPTOR(label) \
41ab8c8a18SMatthew Dillon hammer2_xop_desc_t hammer2_##label##_desc = { \
42c4421f07SMatthew Dillon .storage_func = hammer2_xop_##label, \
43c4421f07SMatthew Dillon .id = #label \
447a6ccfa3STomohiro Kusumi }
45c4421f07SMatthew Dillon
46c4421f07SMatthew Dillon H2XOPDESCRIPTOR(ipcluster);
47c4421f07SMatthew Dillon H2XOPDESCRIPTOR(readdir);
48c4421f07SMatthew Dillon H2XOPDESCRIPTOR(nresolve);
49c4421f07SMatthew Dillon H2XOPDESCRIPTOR(unlink);
50c4421f07SMatthew Dillon H2XOPDESCRIPTOR(nrename);
51c4421f07SMatthew Dillon H2XOPDESCRIPTOR(scanlhc);
52c4421f07SMatthew Dillon H2XOPDESCRIPTOR(scanall);
53c4421f07SMatthew Dillon H2XOPDESCRIPTOR(lookup);
54c4421f07SMatthew Dillon H2XOPDESCRIPTOR(delete);
55c4421f07SMatthew Dillon H2XOPDESCRIPTOR(inode_mkdirent);
56c4421f07SMatthew Dillon H2XOPDESCRIPTOR(inode_create);
57ecfe89b8SMatthew Dillon H2XOPDESCRIPTOR(inode_create_det);
58ecfe89b8SMatthew Dillon H2XOPDESCRIPTOR(inode_create_ins);
59c4421f07SMatthew Dillon H2XOPDESCRIPTOR(inode_destroy);
60c4421f07SMatthew Dillon H2XOPDESCRIPTOR(inode_chain_sync);
61c4421f07SMatthew Dillon H2XOPDESCRIPTOR(inode_unlinkall);
62c4421f07SMatthew Dillon H2XOPDESCRIPTOR(inode_connect);
63c4421f07SMatthew Dillon H2XOPDESCRIPTOR(inode_flush);
64c4421f07SMatthew Dillon H2XOPDESCRIPTOR(strategy_read);
65c4421f07SMatthew Dillon H2XOPDESCRIPTOR(strategy_write);
66c4421f07SMatthew Dillon
67c96b5d10STomohiro Kusumi struct objcache *cache_xops;
68c96b5d10STomohiro Kusumi
695f59596dSMatthew Dillon /*
709dca9515SMatthew Dillon * Set flags and wakeup any waiters.
710d66a712SMatthew Dillon *
720d66a712SMatthew Dillon * WARNING! During teardown (thr) can disappear the instant our cmpset
730d66a712SMatthew Dillon * succeeds.
745f59596dSMatthew Dillon */
755f59596dSMatthew Dillon void
hammer2_thr_signal(hammer2_thread_t * thr,uint32_t flags)769dca9515SMatthew Dillon hammer2_thr_signal(hammer2_thread_t *thr, uint32_t flags)
775f59596dSMatthew Dillon {
785f59596dSMatthew Dillon uint32_t oflags;
795f59596dSMatthew Dillon uint32_t nflags;
805f59596dSMatthew Dillon
815f59596dSMatthew Dillon for (;;) {
825f59596dSMatthew Dillon oflags = thr->flags;
835f59596dSMatthew Dillon cpu_ccfence();
849dca9515SMatthew Dillon nflags = (oflags | flags) & ~HAMMER2_THREAD_WAITING;
855f59596dSMatthew Dillon
869dca9515SMatthew Dillon if (oflags & HAMMER2_THREAD_WAITING) {
875f59596dSMatthew Dillon if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
8805a3c4ecSMatthew Dillon wakeup(&thr->flags);
895f59596dSMatthew Dillon break;
905f59596dSMatthew Dillon }
915f59596dSMatthew Dillon } else {
925f59596dSMatthew Dillon if (atomic_cmpset_int(&thr->flags, oflags, nflags))
935f59596dSMatthew Dillon break;
945f59596dSMatthew Dillon }
955f59596dSMatthew Dillon }
965f59596dSMatthew Dillon }
975f59596dSMatthew Dillon
985f59596dSMatthew Dillon /*
999dca9515SMatthew Dillon * Set and clear flags and wakeup any waiters.
1009dca9515SMatthew Dillon *
1019dca9515SMatthew Dillon * WARNING! During teardown (thr) can disappear the instant our cmpset
1029dca9515SMatthew Dillon * succeeds.
1039dca9515SMatthew Dillon */
1049dca9515SMatthew Dillon void
hammer2_thr_signal2(hammer2_thread_t * thr,uint32_t posflags,uint32_t negflags)1059dca9515SMatthew Dillon hammer2_thr_signal2(hammer2_thread_t *thr, uint32_t posflags, uint32_t negflags)
1069dca9515SMatthew Dillon {
1079dca9515SMatthew Dillon uint32_t oflags;
1089dca9515SMatthew Dillon uint32_t nflags;
1099dca9515SMatthew Dillon
1109dca9515SMatthew Dillon for (;;) {
1119dca9515SMatthew Dillon oflags = thr->flags;
1129dca9515SMatthew Dillon cpu_ccfence();
1139dca9515SMatthew Dillon nflags = (oflags | posflags) &
1149dca9515SMatthew Dillon ~(negflags | HAMMER2_THREAD_WAITING);
1159dca9515SMatthew Dillon if (oflags & HAMMER2_THREAD_WAITING) {
1169dca9515SMatthew Dillon if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
1179dca9515SMatthew Dillon wakeup(&thr->flags);
1189dca9515SMatthew Dillon break;
1199dca9515SMatthew Dillon }
1209dca9515SMatthew Dillon } else {
1219dca9515SMatthew Dillon if (atomic_cmpset_int(&thr->flags, oflags, nflags))
1229dca9515SMatthew Dillon break;
1239dca9515SMatthew Dillon }
1249dca9515SMatthew Dillon }
1259dca9515SMatthew Dillon }
1269dca9515SMatthew Dillon
1279dca9515SMatthew Dillon /*
1289dca9515SMatthew Dillon * Wait until all the bits in flags are set.
1290d66a712SMatthew Dillon *
1300d66a712SMatthew Dillon * WARNING! During teardown (thr) can disappear the instant our cmpset
1310d66a712SMatthew Dillon * succeeds.
1325f59596dSMatthew Dillon */
1335f59596dSMatthew Dillon void
hammer2_thr_wait(hammer2_thread_t * thr,uint32_t flags)1345f59596dSMatthew Dillon hammer2_thr_wait(hammer2_thread_t *thr, uint32_t flags)
1355f59596dSMatthew Dillon {
1365f59596dSMatthew Dillon uint32_t oflags;
1375f59596dSMatthew Dillon uint32_t nflags;
1385f59596dSMatthew Dillon
1395f59596dSMatthew Dillon for (;;) {
1405f59596dSMatthew Dillon oflags = thr->flags;
1415f59596dSMatthew Dillon cpu_ccfence();
1425f59596dSMatthew Dillon if ((oflags & flags) == flags)
1435f59596dSMatthew Dillon break;
1449dca9515SMatthew Dillon nflags = oflags | HAMMER2_THREAD_WAITING;
14505a3c4ecSMatthew Dillon tsleep_interlock(&thr->flags, 0);
1465f59596dSMatthew Dillon if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
14705a3c4ecSMatthew Dillon tsleep(&thr->flags, PINTERLOCKED, "h2twait", hz*60);
1485f59596dSMatthew Dillon }
1495f59596dSMatthew Dillon }
1505f59596dSMatthew Dillon }
1515f59596dSMatthew Dillon
1525f59596dSMatthew Dillon /*
1539dca9515SMatthew Dillon * Wait until any of the bits in flags are set, with timeout.
1549dca9515SMatthew Dillon *
1559dca9515SMatthew Dillon * WARNING! During teardown (thr) can disappear the instant our cmpset
1569dca9515SMatthew Dillon * succeeds.
1579dca9515SMatthew Dillon */
1589dca9515SMatthew Dillon int
hammer2_thr_wait_any(hammer2_thread_t * thr,uint32_t flags,int timo)1599dca9515SMatthew Dillon hammer2_thr_wait_any(hammer2_thread_t *thr, uint32_t flags, int timo)
1609dca9515SMatthew Dillon {
1619dca9515SMatthew Dillon uint32_t oflags;
1629dca9515SMatthew Dillon uint32_t nflags;
1639dca9515SMatthew Dillon int error;
1649dca9515SMatthew Dillon
1659dca9515SMatthew Dillon error = 0;
1669dca9515SMatthew Dillon for (;;) {
1679dca9515SMatthew Dillon oflags = thr->flags;
1689dca9515SMatthew Dillon cpu_ccfence();
1699dca9515SMatthew Dillon if (oflags & flags)
1709dca9515SMatthew Dillon break;
1719dca9515SMatthew Dillon nflags = oflags | HAMMER2_THREAD_WAITING;
1729dca9515SMatthew Dillon tsleep_interlock(&thr->flags, 0);
1739dca9515SMatthew Dillon if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
1749dca9515SMatthew Dillon error = tsleep(&thr->flags, PINTERLOCKED,
1759dca9515SMatthew Dillon "h2twait", timo);
1769dca9515SMatthew Dillon }
17765cacacfSMatthew Dillon if (error == ETIMEDOUT) {
17865cacacfSMatthew Dillon error = HAMMER2_ERROR_ETIMEDOUT;
1799dca9515SMatthew Dillon break;
1809dca9515SMatthew Dillon }
18165cacacfSMatthew Dillon }
1829dca9515SMatthew Dillon return error;
1839dca9515SMatthew Dillon }
1849dca9515SMatthew Dillon
1859dca9515SMatthew Dillon /*
1865f59596dSMatthew Dillon * Wait until the bits in flags are clear.
1870d66a712SMatthew Dillon *
1880d66a712SMatthew Dillon * WARNING! During teardown (thr) can disappear the instant our cmpset
1890d66a712SMatthew Dillon * succeeds.
1905f59596dSMatthew Dillon */
1915f59596dSMatthew Dillon void
hammer2_thr_wait_neg(hammer2_thread_t * thr,uint32_t flags)1925f59596dSMatthew Dillon hammer2_thr_wait_neg(hammer2_thread_t *thr, uint32_t flags)
1935f59596dSMatthew Dillon {
1945f59596dSMatthew Dillon uint32_t oflags;
1955f59596dSMatthew Dillon uint32_t nflags;
1965f59596dSMatthew Dillon
1975f59596dSMatthew Dillon for (;;) {
1985f59596dSMatthew Dillon oflags = thr->flags;
1995f59596dSMatthew Dillon cpu_ccfence();
2005f59596dSMatthew Dillon if ((oflags & flags) == 0)
2015f59596dSMatthew Dillon break;
2029dca9515SMatthew Dillon nflags = oflags | HAMMER2_THREAD_WAITING;
20305a3c4ecSMatthew Dillon tsleep_interlock(&thr->flags, 0);
2045f59596dSMatthew Dillon if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
20505a3c4ecSMatthew Dillon tsleep(&thr->flags, PINTERLOCKED, "h2twait", hz*60);
2065f59596dSMatthew Dillon }
2075f59596dSMatthew Dillon }
2085f59596dSMatthew Dillon }
2095f59596dSMatthew Dillon
2105f59596dSMatthew Dillon /*
2115f59596dSMatthew Dillon * Initialize the supplied thread structure, starting the specified
2125f59596dSMatthew Dillon * thread.
21305a3c4ecSMatthew Dillon *
21405a3c4ecSMatthew Dillon * NOTE: thr structure can be retained across mounts and unmounts for this
21505a3c4ecSMatthew Dillon * pmp, so make sure the flags are in a sane state.
2165f59596dSMatthew Dillon */
2175f59596dSMatthew Dillon void
hammer2_thr_create(hammer2_thread_t * thr,hammer2_pfs_t * pmp,hammer2_dev_t * hmp,const char * id,int clindex,int repidx,void (* func)(void * arg))2185f59596dSMatthew Dillon hammer2_thr_create(hammer2_thread_t *thr, hammer2_pfs_t *pmp,
2199dca9515SMatthew Dillon hammer2_dev_t *hmp,
2205f59596dSMatthew Dillon const char *id, int clindex, int repidx,
2215f59596dSMatthew Dillon void (*func)(void *arg))
2225f59596dSMatthew Dillon {
2239dca9515SMatthew Dillon thr->pmp = pmp; /* xop helpers */
2249dca9515SMatthew Dillon thr->hmp = hmp; /* bulkfree */
2255f59596dSMatthew Dillon thr->clindex = clindex;
2265f59596dSMatthew Dillon thr->repidx = repidx;
22720852157SMatthew Dillon TAILQ_INIT(&thr->xopq);
22805a3c4ecSMatthew Dillon atomic_clear_int(&thr->flags, HAMMER2_THREAD_STOP |
22905a3c4ecSMatthew Dillon HAMMER2_THREAD_STOPPED |
23005a3c4ecSMatthew Dillon HAMMER2_THREAD_FREEZE |
23105a3c4ecSMatthew Dillon HAMMER2_THREAD_FROZEN);
23205a3c4ecSMatthew Dillon if (thr->scratch == NULL)
23305a3c4ecSMatthew Dillon thr->scratch = kmalloc(MAXPHYS, M_HAMMER2, M_WAITOK | M_ZERO);
2345f59596dSMatthew Dillon if (repidx >= 0) {
2355f59596dSMatthew Dillon lwkt_create(func, thr, &thr->td, NULL, 0, repidx % ncpus,
2365f59596dSMatthew Dillon "%s-%s.%02d", id, pmp->pfs_names[clindex], repidx);
2379dca9515SMatthew Dillon } else if (pmp) {
2385f59596dSMatthew Dillon lwkt_create(func, thr, &thr->td, NULL, 0, -1,
2395f59596dSMatthew Dillon "%s-%s", id, pmp->pfs_names[clindex]);
2409dca9515SMatthew Dillon } else {
2419dca9515SMatthew Dillon lwkt_create(func, thr, &thr->td, NULL, 0, -1, "%s", id);
2425f59596dSMatthew Dillon }
2435f59596dSMatthew Dillon }
2445f59596dSMatthew Dillon
2455f59596dSMatthew Dillon /*
2465f59596dSMatthew Dillon * Terminate a thread. This function will silently return if the thread
2475f59596dSMatthew Dillon * was never initialized or has already been deleted.
2485f59596dSMatthew Dillon *
2495f59596dSMatthew Dillon * This is accomplished by setting the STOP flag and waiting for the td
2505f59596dSMatthew Dillon * structure to become NULL.
2515f59596dSMatthew Dillon */
2525f59596dSMatthew Dillon void
hammer2_thr_delete(hammer2_thread_t * thr)2535f59596dSMatthew Dillon hammer2_thr_delete(hammer2_thread_t *thr)
2545f59596dSMatthew Dillon {
2555f59596dSMatthew Dillon if (thr->td == NULL)
2565f59596dSMatthew Dillon return;
2575f59596dSMatthew Dillon hammer2_thr_signal(thr, HAMMER2_THREAD_STOP);
2585f59596dSMatthew Dillon hammer2_thr_wait(thr, HAMMER2_THREAD_STOPPED);
2595f59596dSMatthew Dillon thr->pmp = NULL;
26005a3c4ecSMatthew Dillon if (thr->scratch) {
26105a3c4ecSMatthew Dillon kfree(thr->scratch, M_HAMMER2);
26205a3c4ecSMatthew Dillon thr->scratch = NULL;
26305a3c4ecSMatthew Dillon }
26420852157SMatthew Dillon KKASSERT(TAILQ_EMPTY(&thr->xopq));
2655f59596dSMatthew Dillon }
2665f59596dSMatthew Dillon
2675f59596dSMatthew Dillon /*
2685f59596dSMatthew Dillon * Asynchronous remaster request. Ask the synchronization thread to
2695f59596dSMatthew Dillon * start over soon (as if it were frozen and unfrozen, but without waiting).
2705f59596dSMatthew Dillon * The thread always recalculates mastership relationships when restarting.
2715f59596dSMatthew Dillon */
2725f59596dSMatthew Dillon void
hammer2_thr_remaster(hammer2_thread_t * thr)2735f59596dSMatthew Dillon hammer2_thr_remaster(hammer2_thread_t *thr)
2745f59596dSMatthew Dillon {
2755f59596dSMatthew Dillon if (thr->td == NULL)
2765f59596dSMatthew Dillon return;
2775f59596dSMatthew Dillon hammer2_thr_signal(thr, HAMMER2_THREAD_REMASTER);
2785f59596dSMatthew Dillon }
2795f59596dSMatthew Dillon
2805f59596dSMatthew Dillon void
hammer2_thr_freeze_async(hammer2_thread_t * thr)2815f59596dSMatthew Dillon hammer2_thr_freeze_async(hammer2_thread_t *thr)
2825f59596dSMatthew Dillon {
2835f59596dSMatthew Dillon hammer2_thr_signal(thr, HAMMER2_THREAD_FREEZE);
2845f59596dSMatthew Dillon }
2855f59596dSMatthew Dillon
2865f59596dSMatthew Dillon void
hammer2_thr_freeze(hammer2_thread_t * thr)2875f59596dSMatthew Dillon hammer2_thr_freeze(hammer2_thread_t *thr)
2885f59596dSMatthew Dillon {
2895f59596dSMatthew Dillon if (thr->td == NULL)
2905f59596dSMatthew Dillon return;
2915f59596dSMatthew Dillon hammer2_thr_signal(thr, HAMMER2_THREAD_FREEZE);
2925f59596dSMatthew Dillon hammer2_thr_wait(thr, HAMMER2_THREAD_FROZEN);
2935f59596dSMatthew Dillon }
2945f59596dSMatthew Dillon
2955f59596dSMatthew Dillon void
hammer2_thr_unfreeze(hammer2_thread_t * thr)2965f59596dSMatthew Dillon hammer2_thr_unfreeze(hammer2_thread_t *thr)
2975f59596dSMatthew Dillon {
2985f59596dSMatthew Dillon if (thr->td == NULL)
2995f59596dSMatthew Dillon return;
3005f59596dSMatthew Dillon hammer2_thr_signal(thr, HAMMER2_THREAD_UNFREEZE);
3015f59596dSMatthew Dillon hammer2_thr_wait_neg(thr, HAMMER2_THREAD_FROZEN);
3025f59596dSMatthew Dillon }
3035f59596dSMatthew Dillon
3045f59596dSMatthew Dillon int
hammer2_thr_break(hammer2_thread_t * thr)3055f59596dSMatthew Dillon hammer2_thr_break(hammer2_thread_t *thr)
3065f59596dSMatthew Dillon {
3075f59596dSMatthew Dillon if (thr->flags & (HAMMER2_THREAD_STOP |
3085f59596dSMatthew Dillon HAMMER2_THREAD_REMASTER |
3095f59596dSMatthew Dillon HAMMER2_THREAD_FREEZE)) {
3105f59596dSMatthew Dillon return 1;
3115f59596dSMatthew Dillon }
3125f59596dSMatthew Dillon return 0;
3135f59596dSMatthew Dillon }
3145f59596dSMatthew Dillon
3155f59596dSMatthew Dillon /****************************************************************************
3165f59596dSMatthew Dillon * HAMMER2 XOPS API *
3175f59596dSMatthew Dillon ****************************************************************************/
3185f59596dSMatthew Dillon
3195f59596dSMatthew Dillon /*
3205f59596dSMatthew Dillon * Allocate a XOP request.
3215f59596dSMatthew Dillon *
3225f59596dSMatthew Dillon * Once allocated a XOP request can be started, collected, and retired,
3235f59596dSMatthew Dillon * and can be retired early if desired.
3245f59596dSMatthew Dillon *
3255f59596dSMatthew Dillon * NOTE: Fifo indices might not be zero but ri == wi on objcache_get().
3265f59596dSMatthew Dillon */
3275f59596dSMatthew Dillon void *
hammer2_xop_alloc(hammer2_inode_t * ip,int flags)3285f59596dSMatthew Dillon hammer2_xop_alloc(hammer2_inode_t *ip, int flags)
3295f59596dSMatthew Dillon {
3305f59596dSMatthew Dillon hammer2_xop_t *xop;
3315f59596dSMatthew Dillon
3325f59596dSMatthew Dillon xop = objcache_get(cache_xops, M_WAITOK);
3335f59596dSMatthew Dillon KKASSERT(xop->head.cluster.array[0].chain == NULL);
3345f59596dSMatthew Dillon
3355f59596dSMatthew Dillon xop->head.ip1 = ip;
336c4421f07SMatthew Dillon xop->head.desc = NULL;
3375f59596dSMatthew Dillon xop->head.flags = flags;
3385f59596dSMatthew Dillon xop->head.state = 0;
3395f59596dSMatthew Dillon xop->head.error = 0;
3405f59596dSMatthew Dillon xop->head.collect_key = 0;
341fda30e02SMatthew Dillon xop->head.focus_dio = NULL;
342fda30e02SMatthew Dillon
3435f59596dSMatthew Dillon if (flags & HAMMER2_XOP_MODIFYING)
3445f59596dSMatthew Dillon xop->head.mtid = hammer2_trans_sub(ip->pmp);
3455f59596dSMatthew Dillon else
3465f59596dSMatthew Dillon xop->head.mtid = 0;
3475f59596dSMatthew Dillon
3485f59596dSMatthew Dillon xop->head.cluster.nchains = ip->cluster.nchains;
3495f59596dSMatthew Dillon xop->head.cluster.pmp = ip->pmp;
3505f59596dSMatthew Dillon xop->head.cluster.flags = HAMMER2_CLUSTER_LOCKED;
3515f59596dSMatthew Dillon
3525f59596dSMatthew Dillon /*
3535f59596dSMatthew Dillon * run_mask - Active thread (or frontend) associated with XOP
3545f59596dSMatthew Dillon */
3555f59596dSMatthew Dillon xop->head.run_mask = HAMMER2_XOPMASK_VOP;
3565f59596dSMatthew Dillon
3575f59596dSMatthew Dillon hammer2_inode_ref(ip);
3585f59596dSMatthew Dillon
3595f59596dSMatthew Dillon return xop;
3605f59596dSMatthew Dillon }
3615f59596dSMatthew Dillon
3625f59596dSMatthew Dillon void
hammer2_xop_setname(hammer2_xop_head_t * xop,const char * name,size_t name_len)3635f59596dSMatthew Dillon hammer2_xop_setname(hammer2_xop_head_t *xop, const char *name, size_t name_len)
3645f59596dSMatthew Dillon {
3655f59596dSMatthew Dillon xop->name1 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
3665f59596dSMatthew Dillon xop->name1_len = name_len;
3675f59596dSMatthew Dillon bcopy(name, xop->name1, name_len);
3685f59596dSMatthew Dillon }
3695f59596dSMatthew Dillon
3705f59596dSMatthew Dillon void
hammer2_xop_setname2(hammer2_xop_head_t * xop,const char * name,size_t name_len)3715f59596dSMatthew Dillon hammer2_xop_setname2(hammer2_xop_head_t *xop, const char *name, size_t name_len)
3725f59596dSMatthew Dillon {
3735f59596dSMatthew Dillon xop->name2 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
3745f59596dSMatthew Dillon xop->name2_len = name_len;
3755f59596dSMatthew Dillon bcopy(name, xop->name2, name_len);
3765f59596dSMatthew Dillon }
3775f59596dSMatthew Dillon
378cf1b3fafSMatthew Dillon size_t
hammer2_xop_setname_inum(hammer2_xop_head_t * xop,hammer2_key_t inum)379cf1b3fafSMatthew Dillon hammer2_xop_setname_inum(hammer2_xop_head_t *xop, hammer2_key_t inum)
380cf1b3fafSMatthew Dillon {
381cf1b3fafSMatthew Dillon const size_t name_len = 18;
382cf1b3fafSMatthew Dillon
383cf1b3fafSMatthew Dillon xop->name1 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
384cf1b3fafSMatthew Dillon xop->name1_len = name_len;
385cf1b3fafSMatthew Dillon ksnprintf(xop->name1, name_len + 1, "0x%016jx", (intmax_t)inum);
386cf1b3fafSMatthew Dillon
387cf1b3fafSMatthew Dillon return name_len;
388cf1b3fafSMatthew Dillon }
389cf1b3fafSMatthew Dillon
3905f59596dSMatthew Dillon
3915f59596dSMatthew Dillon void
hammer2_xop_setip2(hammer2_xop_head_t * xop,hammer2_inode_t * ip2)3925f59596dSMatthew Dillon hammer2_xop_setip2(hammer2_xop_head_t *xop, hammer2_inode_t *ip2)
3935f59596dSMatthew Dillon {
3945f59596dSMatthew Dillon xop->ip2 = ip2;
3955f59596dSMatthew Dillon hammer2_inode_ref(ip2);
3965f59596dSMatthew Dillon }
3975f59596dSMatthew Dillon
3985f59596dSMatthew Dillon void
hammer2_xop_setip3(hammer2_xop_head_t * xop,hammer2_inode_t * ip3)3995f59596dSMatthew Dillon hammer2_xop_setip3(hammer2_xop_head_t *xop, hammer2_inode_t *ip3)
4005f59596dSMatthew Dillon {
4015f59596dSMatthew Dillon xop->ip3 = ip3;
4025f59596dSMatthew Dillon hammer2_inode_ref(ip3);
4035f59596dSMatthew Dillon }
4045f59596dSMatthew Dillon
4055f59596dSMatthew Dillon void
hammer2_xop_setip4(hammer2_xop_head_t * xop,hammer2_inode_t * ip4)40638f19b3eSMatthew Dillon hammer2_xop_setip4(hammer2_xop_head_t *xop, hammer2_inode_t *ip4)
40738f19b3eSMatthew Dillon {
40838f19b3eSMatthew Dillon xop->ip4 = ip4;
40938f19b3eSMatthew Dillon hammer2_inode_ref(ip4);
41038f19b3eSMatthew Dillon }
41138f19b3eSMatthew Dillon
41238f19b3eSMatthew Dillon void
hammer2_xop_reinit(hammer2_xop_head_t * xop)4135f59596dSMatthew Dillon hammer2_xop_reinit(hammer2_xop_head_t *xop)
4145f59596dSMatthew Dillon {
4155f59596dSMatthew Dillon xop->state = 0;
4165f59596dSMatthew Dillon xop->error = 0;
4175f59596dSMatthew Dillon xop->collect_key = 0;
4185f59596dSMatthew Dillon xop->run_mask = HAMMER2_XOPMASK_VOP;
4195f59596dSMatthew Dillon }
4205f59596dSMatthew Dillon
4215f59596dSMatthew Dillon /*
4225f59596dSMatthew Dillon * A mounted PFS needs Xops threads to support frontend operations.
4235f59596dSMatthew Dillon */
4245f59596dSMatthew Dillon void
hammer2_xop_helper_create(hammer2_pfs_t * pmp)4255f59596dSMatthew Dillon hammer2_xop_helper_create(hammer2_pfs_t *pmp)
4265f59596dSMatthew Dillon {
4275f59596dSMatthew Dillon int i;
4285f59596dSMatthew Dillon int j;
4295f59596dSMatthew Dillon
4305f59596dSMatthew Dillon lockmgr(&pmp->lock, LK_EXCLUSIVE);
4315f59596dSMatthew Dillon pmp->has_xop_threads = 1;
4325f59596dSMatthew Dillon
433d3895555SMatthew Dillon pmp->xop_groups = kmalloc(hammer2_xop_nthreads *
4346b039a3dSMatthew Dillon sizeof(hammer2_xop_group_t),
4356b039a3dSMatthew Dillon M_HAMMER2, M_WAITOK | M_ZERO);
4365f59596dSMatthew Dillon for (i = 0; i < pmp->iroot->cluster.nchains; ++i) {
437d3895555SMatthew Dillon for (j = 0; j < hammer2_xop_nthreads; ++j) {
4385f59596dSMatthew Dillon if (pmp->xop_groups[j].thrs[i].td)
4395f59596dSMatthew Dillon continue;
4409dca9515SMatthew Dillon hammer2_thr_create(&pmp->xop_groups[j].thrs[i],
4419dca9515SMatthew Dillon pmp, NULL,
4425f59596dSMatthew Dillon "h2xop", i, j,
4435f59596dSMatthew Dillon hammer2_primary_xops_thread);
4445f59596dSMatthew Dillon }
4455f59596dSMatthew Dillon }
4465f59596dSMatthew Dillon lockmgr(&pmp->lock, LK_RELEASE);
4475f59596dSMatthew Dillon }
4485f59596dSMatthew Dillon
4495f59596dSMatthew Dillon void
hammer2_xop_helper_cleanup(hammer2_pfs_t * pmp)4505f59596dSMatthew Dillon hammer2_xop_helper_cleanup(hammer2_pfs_t *pmp)
4515f59596dSMatthew Dillon {
4525f59596dSMatthew Dillon int i;
4535f59596dSMatthew Dillon int j;
4545f59596dSMatthew Dillon
4556b039a3dSMatthew Dillon if (pmp->xop_groups == NULL) {
4566b039a3dSMatthew Dillon KKASSERT(pmp->has_xop_threads == 0);
4576b039a3dSMatthew Dillon return;
4586b039a3dSMatthew Dillon }
4596b039a3dSMatthew Dillon
4605f59596dSMatthew Dillon for (i = 0; i < pmp->pfs_nmasters; ++i) {
461d3895555SMatthew Dillon for (j = 0; j < hammer2_xop_nthreads; ++j) {
4625f59596dSMatthew Dillon if (pmp->xop_groups[j].thrs[i].td)
4635f59596dSMatthew Dillon hammer2_thr_delete(&pmp->xop_groups[j].thrs[i]);
4645f59596dSMatthew Dillon }
4655f59596dSMatthew Dillon }
46605a3c4ecSMatthew Dillon pmp->has_xop_threads = 0;
4676b039a3dSMatthew Dillon kfree(pmp->xop_groups, M_HAMMER2);
4686b039a3dSMatthew Dillon pmp->xop_groups = NULL;
4695f59596dSMatthew Dillon }
4705f59596dSMatthew Dillon
4715f59596dSMatthew Dillon /*
4725f59596dSMatthew Dillon * Start a XOP request, queueing it to all nodes in the cluster to
4735f59596dSMatthew Dillon * execute the cluster op.
4745f59596dSMatthew Dillon *
4755f59596dSMatthew Dillon * XXX optimize single-target case.
4765f59596dSMatthew Dillon */
4775f59596dSMatthew Dillon void
hammer2_xop_start_except(hammer2_xop_head_t * xop,hammer2_xop_desc_t * desc,int notidx)478c4421f07SMatthew Dillon hammer2_xop_start_except(hammer2_xop_head_t *xop, hammer2_xop_desc_t *desc,
4795f59596dSMatthew Dillon int notidx)
4805f59596dSMatthew Dillon {
4815f59596dSMatthew Dillon hammer2_inode_t *ip1;
4825f59596dSMatthew Dillon hammer2_pfs_t *pmp;
48320852157SMatthew Dillon hammer2_thread_t *thr;
4845f59596dSMatthew Dillon int i;
4855f59596dSMatthew Dillon int ng;
4865f59596dSMatthew Dillon int nchains;
4875f59596dSMatthew Dillon
4885f59596dSMatthew Dillon ip1 = xop->ip1;
4895f59596dSMatthew Dillon pmp = ip1->pmp;
4905f59596dSMatthew Dillon if (pmp->has_xop_threads == 0)
4915f59596dSMatthew Dillon hammer2_xop_helper_create(pmp);
4925f59596dSMatthew Dillon
49320852157SMatthew Dillon /*
494d3895555SMatthew Dillon * The sequencer assigns a worker thread to the XOP.
49520852157SMatthew Dillon *
496d3895555SMatthew Dillon * (1) The worker threads are partitioned into two sets, one for
497d3895555SMatthew Dillon * NON-STRATEGY XOPs, and the other for STRATEGY XOPs. This
498d3895555SMatthew Dillon * guarantees that strategy calls will always be able to make
499d3895555SMatthew Dillon * progress and will not deadlock against non-strategy calls.
5001159c75cSMatthew Dillon *
501d3895555SMatthew Dillon * (2) If clustered, non-strategy operations to the same inode must
502d3895555SMatthew Dillon * be serialized. This is to avoid confusion when issuing
503d3895555SMatthew Dillon * modifying operations because a XOP completes the instant a
504d3895555SMatthew Dillon * quorum is reached.
50520852157SMatthew Dillon *
50605a3c4ecSMatthew Dillon * TODO - RENAME fails here because it is potentially modifying
5071159c75cSMatthew Dillon * three different inodes, but we triple-lock the inodes
5081159c75cSMatthew Dillon * involved so it shouldn't create a sequencing schism.
50920852157SMatthew Dillon */
51020852157SMatthew Dillon if (xop->flags & HAMMER2_XOP_STRATEGY) {
511d3895555SMatthew Dillon /*
512d3895555SMatthew Dillon * Use worker space 0 associated with the current cpu
513d3895555SMatthew Dillon * for strategy ops.
514d3895555SMatthew Dillon */
51520852157SMatthew Dillon hammer2_xop_strategy_t *xopst;
516d3895555SMatthew Dillon u_int which;
5176b039a3dSMatthew Dillon
5186b039a3dSMatthew Dillon xopst = &((hammer2_xop_t *)xop)->xop_strategy;
519d3895555SMatthew Dillon which = ((unsigned int)ip1->ihash +
520d3895555SMatthew Dillon ((unsigned int)xopst->lbase >> HAMMER2_PBUFRADIX)) %
521d3895555SMatthew Dillon hammer2_xop_sgroups;
522d3895555SMatthew Dillon ng = mycpu->gd_cpuid % hammer2_xop_mod +
523d3895555SMatthew Dillon hammer2_xop_mod * which;
524d3895555SMatthew Dillon } else if (hammer2_spread_workers == 0 && ip1->cluster.nchains == 1) {
525d3895555SMatthew Dillon /*
526d3895555SMatthew Dillon * For now try to keep the work on the same cpu to reduce
527d3895555SMatthew Dillon * IPI overhead. Several threads are assigned to each cpu,
528d3895555SMatthew Dillon * don't be very smart and select the one to use based on
529d3895555SMatthew Dillon * the inode hash.
530d3895555SMatthew Dillon */
531d3895555SMatthew Dillon u_int which;
53220852157SMatthew Dillon
533d3895555SMatthew Dillon which = (unsigned int)ip1->ihash % hammer2_xop_xgroups;
534d3895555SMatthew Dillon ng = mycpu->gd_cpuid % hammer2_xop_mod +
535d3895555SMatthew Dillon (which * hammer2_xop_mod) +
536d3895555SMatthew Dillon hammer2_xop_xbase;
5371159c75cSMatthew Dillon } else {
538d3895555SMatthew Dillon /*
539d3895555SMatthew Dillon * Hash based on inode only, must serialize inode to same
540d3895555SMatthew Dillon * thread regardless of current cpu.
541d3895555SMatthew Dillon */
542d3895555SMatthew Dillon ng = (unsigned int)ip1->ihash %
543d3895555SMatthew Dillon (hammer2_xop_mod * hammer2_xop_xgroups) +
544d3895555SMatthew Dillon hammer2_xop_xbase;
5455f59596dSMatthew Dillon }
546c4421f07SMatthew Dillon xop->desc = desc;
5475f59596dSMatthew Dillon
5485f59596dSMatthew Dillon /*
5495f59596dSMatthew Dillon * The instant xop is queued another thread can pick it off. In the
5505f59596dSMatthew Dillon * case of asynchronous ops, another thread might even finish and
5515f59596dSMatthew Dillon * deallocate it.
5525f59596dSMatthew Dillon */
5535f59596dSMatthew Dillon hammer2_spin_ex(&pmp->xop_spin);
5545f59596dSMatthew Dillon nchains = ip1->cluster.nchains;
5555f59596dSMatthew Dillon for (i = 0; i < nchains; ++i) {
5565f59596dSMatthew Dillon /*
5575f59596dSMatthew Dillon * XXX ip1->cluster.array* not stable here. This temporary
5585f59596dSMatthew Dillon * hack fixes basic issues in target XOPs which need to
5595f59596dSMatthew Dillon * obtain a starting chain from the inode but does not
5605f59596dSMatthew Dillon * address possible races against inode updates which
5615f59596dSMatthew Dillon * might NULL-out a chain.
5625f59596dSMatthew Dillon */
5635f59596dSMatthew Dillon if (i != notidx && ip1->cluster.array[i].chain) {
56420852157SMatthew Dillon thr = &pmp->xop_groups[ng].thrs[i];
56519808ac9SMatthew Dillon atomic_set_64(&xop->run_mask, 1LLU << i);
56619808ac9SMatthew Dillon atomic_set_64(&xop->chk_mask, 1LLU << i);
56705a3c4ecSMatthew Dillon xop->collect[i].thr = thr;
56820852157SMatthew Dillon TAILQ_INSERT_TAIL(&thr->xopq, xop, collect[i].entry);
5695f59596dSMatthew Dillon }
5705f59596dSMatthew Dillon }
5715f59596dSMatthew Dillon hammer2_spin_unex(&pmp->xop_spin);
5725f59596dSMatthew Dillon /* xop can become invalid at this point */
5735f59596dSMatthew Dillon
5745f59596dSMatthew Dillon /*
57520852157SMatthew Dillon * Each thread has its own xopq
5765f59596dSMatthew Dillon */
5775f59596dSMatthew Dillon for (i = 0; i < nchains; ++i) {
5785f59596dSMatthew Dillon if (i != notidx) {
57920852157SMatthew Dillon thr = &pmp->xop_groups[ng].thrs[i];
58020852157SMatthew Dillon hammer2_thr_signal(thr, HAMMER2_THREAD_XOPQ);
5815f59596dSMatthew Dillon }
5825f59596dSMatthew Dillon }
5835f59596dSMatthew Dillon }
5845f59596dSMatthew Dillon
5855f59596dSMatthew Dillon void
hammer2_xop_start(hammer2_xop_head_t * xop,hammer2_xop_desc_t * desc)586c4421f07SMatthew Dillon hammer2_xop_start(hammer2_xop_head_t *xop, hammer2_xop_desc_t *desc)
5875f59596dSMatthew Dillon {
588c4421f07SMatthew Dillon hammer2_xop_start_except(xop, desc, -1);
5895f59596dSMatthew Dillon }
5905f59596dSMatthew Dillon
5915f59596dSMatthew Dillon /*
5925f59596dSMatthew Dillon * Retire a XOP. Used by both the VOP frontend and by the XOP backend.
5935f59596dSMatthew Dillon */
5945f59596dSMatthew Dillon void
hammer2_xop_retire(hammer2_xop_head_t * xop,uint64_t mask)59519808ac9SMatthew Dillon hammer2_xop_retire(hammer2_xop_head_t *xop, uint64_t mask)
5965f59596dSMatthew Dillon {
5975f59596dSMatthew Dillon hammer2_chain_t *chain;
59819808ac9SMatthew Dillon uint64_t nmask;
5995f59596dSMatthew Dillon int i;
6005f59596dSMatthew Dillon
6015f59596dSMatthew Dillon /*
6025f59596dSMatthew Dillon * Remove the frontend collector or remove a backend feeder.
60319808ac9SMatthew Dillon *
6045f59596dSMatthew Dillon * When removing the frontend we must wakeup any backend feeders
6055f59596dSMatthew Dillon * who are waiting for FIFO space.
6065f59596dSMatthew Dillon *
60719808ac9SMatthew Dillon * When removing the last backend feeder we must wakeup any waiting
60819808ac9SMatthew Dillon * frontend.
6095f59596dSMatthew Dillon */
6105f59596dSMatthew Dillon KKASSERT(xop->run_mask & mask);
61119808ac9SMatthew Dillon nmask = atomic_fetchadd_64(&xop->run_mask,
61219808ac9SMatthew Dillon -mask + HAMMER2_XOPMASK_FEED);
61319808ac9SMatthew Dillon
61419808ac9SMatthew Dillon /*
61519808ac9SMatthew Dillon * More than one entity left
61619808ac9SMatthew Dillon */
61719808ac9SMatthew Dillon if ((nmask & HAMMER2_XOPMASK_ALLDONE) != mask) {
61819808ac9SMatthew Dillon /*
61919808ac9SMatthew Dillon * Frontend terminating, wakeup any backends waiting on
62019808ac9SMatthew Dillon * fifo full.
62119808ac9SMatthew Dillon *
62219808ac9SMatthew Dillon * NOTE!!! The xop can get ripped out from under us at
62319808ac9SMatthew Dillon * this point, so do not reference it again.
62419808ac9SMatthew Dillon * The wakeup(xop) doesn't touch the xop and
62519808ac9SMatthew Dillon * is ok.
62619808ac9SMatthew Dillon */
6275f59596dSMatthew Dillon if (mask == HAMMER2_XOPMASK_VOP) {
6285f59596dSMatthew Dillon if (nmask & HAMMER2_XOPMASK_FIFOW)
6295f59596dSMatthew Dillon wakeup(xop);
6305f59596dSMatthew Dillon }
63119808ac9SMatthew Dillon
63219808ac9SMatthew Dillon /*
63319808ac9SMatthew Dillon * Wakeup frontend if the last backend is terminating.
63419808ac9SMatthew Dillon */
63519808ac9SMatthew Dillon nmask -= mask;
63619808ac9SMatthew Dillon if ((nmask & HAMMER2_XOPMASK_ALLDONE) == HAMMER2_XOPMASK_VOP) {
63719808ac9SMatthew Dillon if (nmask & HAMMER2_XOPMASK_WAIT)
63819808ac9SMatthew Dillon wakeup(xop);
63919808ac9SMatthew Dillon }
64019808ac9SMatthew Dillon
6415f59596dSMatthew Dillon return;
6425f59596dSMatthew Dillon }
6435f59596dSMatthew Dillon /* else nobody else left, we can ignore FIFOW */
6445f59596dSMatthew Dillon
6455f59596dSMatthew Dillon /*
6465f59596dSMatthew Dillon * All collectors are gone, we can cleanup and dispose of the XOP.
6475f59596dSMatthew Dillon * Note that this can wind up being a frontend OR a backend.
6485f59596dSMatthew Dillon * Pending chains are locked shared and not owned by any thread.
649*34fb48c2SMatthew Dillon */
650*34fb48c2SMatthew Dillon
651*34fb48c2SMatthew Dillon /*
652*34fb48c2SMatthew Dillon * Cleanup the xop's cluster. If there is an inode reference,
653*34fb48c2SMatthew Dillon * cache the cluster chains in the inode to improve performance,
654*34fb48c2SMatthew Dillon * preventing them from recursively destroying the chain recursion.
655b5795b6bSMatthew Dillon *
656*34fb48c2SMatthew Dillon * Note that ip->ccache[i] does NOT necessarily represent usable
657*34fb48c2SMatthew Dillon * chains or chains that are related to the inode. The chains are
658*34fb48c2SMatthew Dillon * simply held to prevent bottom-up lastdrop destruction of
659*34fb48c2SMatthew Dillon * potentially valuable resolved chain data.
660*34fb48c2SMatthew Dillon */
661*34fb48c2SMatthew Dillon if (xop->ip1) {
662*34fb48c2SMatthew Dillon /*
663*34fb48c2SMatthew Dillon * Cache cluster chains in a convenient inode. The chains
664*34fb48c2SMatthew Dillon * are cache ref'd but not held. The inode simply serves
665*34fb48c2SMatthew Dillon * as a place to cache the chains to prevent the chains
666*34fb48c2SMatthew Dillon * from being cleaned up.
667*34fb48c2SMatthew Dillon */
668*34fb48c2SMatthew Dillon hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
669*34fb48c2SMatthew Dillon hammer2_inode_t *ip;
670*34fb48c2SMatthew Dillon int prior_nchains;
671*34fb48c2SMatthew Dillon
672*34fb48c2SMatthew Dillon ip = xop->ip1;
673*34fb48c2SMatthew Dillon hammer2_spin_ex(&ip->cluster_spin);
674*34fb48c2SMatthew Dillon prior_nchains = ip->ccache_nchains;
675*34fb48c2SMatthew Dillon for (i = 0; i < prior_nchains; ++i) {
676*34fb48c2SMatthew Dillon dropch[i] = ip->ccache[i].chain;
677*34fb48c2SMatthew Dillon ip->ccache[i].chain = NULL;
678*34fb48c2SMatthew Dillon }
679*34fb48c2SMatthew Dillon for (i = 0; i < xop->cluster.nchains; ++i) {
680*34fb48c2SMatthew Dillon ip->ccache[i] = xop->cluster.array[i];
681*34fb48c2SMatthew Dillon if (ip->ccache[i].chain)
682*34fb48c2SMatthew Dillon hammer2_chain_ref(ip->ccache[i].chain);
683*34fb48c2SMatthew Dillon }
684*34fb48c2SMatthew Dillon ip->ccache_nchains = i;
685*34fb48c2SMatthew Dillon hammer2_spin_unex(&ip->cluster_spin);
686*34fb48c2SMatthew Dillon
687*34fb48c2SMatthew Dillon /*
688*34fb48c2SMatthew Dillon * Drop prior cache
689*34fb48c2SMatthew Dillon */
690*34fb48c2SMatthew Dillon for (i = 0; i < prior_nchains; ++i) {
691*34fb48c2SMatthew Dillon chain = dropch[i];
692*34fb48c2SMatthew Dillon if (chain)
693*34fb48c2SMatthew Dillon hammer2_chain_drop(chain);
694*34fb48c2SMatthew Dillon }
695*34fb48c2SMatthew Dillon }
696*34fb48c2SMatthew Dillon
697*34fb48c2SMatthew Dillon /*
698*34fb48c2SMatthew Dillon * Drop and unhold chains in xop cluster
6995f59596dSMatthew Dillon */
7005f59596dSMatthew Dillon for (i = 0; i < xop->cluster.nchains; ++i) {
7015f59596dSMatthew Dillon xop->cluster.array[i].flags = 0;
7025f59596dSMatthew Dillon chain = xop->cluster.array[i].chain;
7035f59596dSMatthew Dillon if (chain) {
7045f59596dSMatthew Dillon xop->cluster.array[i].chain = NULL;
7056d51e13aSMatthew Dillon hammer2_chain_drop_unhold(chain);
7065f59596dSMatthew Dillon }
7075f59596dSMatthew Dillon }
7085f59596dSMatthew Dillon
7095f59596dSMatthew Dillon /*
71019808ac9SMatthew Dillon * Cleanup the fifos. Since we are the only entity left on this
71119808ac9SMatthew Dillon * xop we don't have to worry about fifo flow control, and one
71219808ac9SMatthew Dillon * lfence() will do the job.
7135f59596dSMatthew Dillon */
7145f59596dSMatthew Dillon cpu_lfence();
7155f59596dSMatthew Dillon mask = xop->chk_mask;
7165f59596dSMatthew Dillon for (i = 0; mask && i < HAMMER2_MAXCLUSTER; ++i) {
7175f59596dSMatthew Dillon hammer2_xop_fifo_t *fifo = &xop->collect[i];
7185f59596dSMatthew Dillon while (fifo->ri != fifo->wi) {
7195f59596dSMatthew Dillon chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK];
7206d51e13aSMatthew Dillon if (chain)
7216d51e13aSMatthew Dillon hammer2_chain_drop_unhold(chain);
7225f59596dSMatthew Dillon ++fifo->ri;
7235f59596dSMatthew Dillon }
7245f59596dSMatthew Dillon mask &= ~(1U << i);
7255f59596dSMatthew Dillon }
7265f59596dSMatthew Dillon
7275f59596dSMatthew Dillon /*
7285f59596dSMatthew Dillon * The inode is only held at this point, simply drop it.
7295f59596dSMatthew Dillon */
7305f59596dSMatthew Dillon if (xop->ip1) {
7315f59596dSMatthew Dillon hammer2_inode_drop(xop->ip1);
7325f59596dSMatthew Dillon xop->ip1 = NULL;
7335f59596dSMatthew Dillon }
7345f59596dSMatthew Dillon if (xop->ip2) {
7355f59596dSMatthew Dillon hammer2_inode_drop(xop->ip2);
7365f59596dSMatthew Dillon xop->ip2 = NULL;
7375f59596dSMatthew Dillon }
7385f59596dSMatthew Dillon if (xop->ip3) {
7395f59596dSMatthew Dillon hammer2_inode_drop(xop->ip3);
7405f59596dSMatthew Dillon xop->ip3 = NULL;
7415f59596dSMatthew Dillon }
74238f19b3eSMatthew Dillon if (xop->ip4) {
74338f19b3eSMatthew Dillon hammer2_inode_drop(xop->ip4);
74438f19b3eSMatthew Dillon xop->ip4 = NULL;
74538f19b3eSMatthew Dillon }
7465f59596dSMatthew Dillon if (xop->name1) {
7475f59596dSMatthew Dillon kfree(xop->name1, M_HAMMER2);
7485f59596dSMatthew Dillon xop->name1 = NULL;
7495f59596dSMatthew Dillon xop->name1_len = 0;
7505f59596dSMatthew Dillon }
7515f59596dSMatthew Dillon if (xop->name2) {
7525f59596dSMatthew Dillon kfree(xop->name2, M_HAMMER2);
7535f59596dSMatthew Dillon xop->name2 = NULL;
7545f59596dSMatthew Dillon xop->name2_len = 0;
7555f59596dSMatthew Dillon }
7565f59596dSMatthew Dillon
7575f59596dSMatthew Dillon objcache_put(cache_xops, xop);
7585f59596dSMatthew Dillon }
7595f59596dSMatthew Dillon
7605f59596dSMatthew Dillon /*
7615f59596dSMatthew Dillon * (Backend) Returns non-zero if the frontend is still attached.
7625f59596dSMatthew Dillon */
7635f59596dSMatthew Dillon int
hammer2_xop_active(hammer2_xop_head_t * xop)7645f59596dSMatthew Dillon hammer2_xop_active(hammer2_xop_head_t *xop)
7655f59596dSMatthew Dillon {
7665f59596dSMatthew Dillon if (xop->run_mask & HAMMER2_XOPMASK_VOP)
7675f59596dSMatthew Dillon return 1;
7685f59596dSMatthew Dillon else
7695f59596dSMatthew Dillon return 0;
7705f59596dSMatthew Dillon }
7715f59596dSMatthew Dillon
7725f59596dSMatthew Dillon /*
7735f59596dSMatthew Dillon * (Backend) Feed chain data through the cluster validator and back to
7745f59596dSMatthew Dillon * the frontend. Chains are fed from multiple nodes concurrently
7755f59596dSMatthew Dillon * and pipelined via per-node FIFOs in the XOP.
7765f59596dSMatthew Dillon *
7776d51e13aSMatthew Dillon * The chain must be locked (either shared or exclusive). The caller may
7786d51e13aSMatthew Dillon * unlock and drop the chain on return. This function will add an extra
7796d51e13aSMatthew Dillon * ref and hold the chain's data for the pass-back.
7805f59596dSMatthew Dillon *
7815f59596dSMatthew Dillon * No xop lock is needed because we are only manipulating fields under
7825f59596dSMatthew Dillon * our direct control.
7835f59596dSMatthew Dillon *
7842e3f71c3SSascha Wildner * Returns 0 on success and a hammer2 error code if sync is permanently
7855f59596dSMatthew Dillon * lost. The caller retains a ref on the chain but by convention
7865f59596dSMatthew Dillon * the lock is typically inherited by the xop (caller loses lock).
7875f59596dSMatthew Dillon *
7885f59596dSMatthew Dillon * Returns non-zero on error. In this situation the caller retains a
7895f59596dSMatthew Dillon * ref on the chain but loses the lock (we unlock here).
7905f59596dSMatthew Dillon */
7915f59596dSMatthew Dillon int
hammer2_xop_feed(hammer2_xop_head_t * xop,hammer2_chain_t * chain,int clindex,int error)7925f59596dSMatthew Dillon hammer2_xop_feed(hammer2_xop_head_t *xop, hammer2_chain_t *chain,
7935f59596dSMatthew Dillon int clindex, int error)
7945f59596dSMatthew Dillon {
7955f59596dSMatthew Dillon hammer2_xop_fifo_t *fifo;
79619808ac9SMatthew Dillon uint64_t mask;
7975f59596dSMatthew Dillon
7985f59596dSMatthew Dillon /*
7995f59596dSMatthew Dillon * Early termination (typicaly of xop_readir)
8005f59596dSMatthew Dillon */
8015f59596dSMatthew Dillon if (hammer2_xop_active(xop) == 0) {
80265cacacfSMatthew Dillon error = HAMMER2_ERROR_ABORTED;
8035f59596dSMatthew Dillon goto done;
8045f59596dSMatthew Dillon }
8055f59596dSMatthew Dillon
8065f59596dSMatthew Dillon /*
8075f59596dSMatthew Dillon * Multi-threaded entry into the XOP collector. We own the
8085f59596dSMatthew Dillon * fifo->wi for our clindex.
8095f59596dSMatthew Dillon */
8105f59596dSMatthew Dillon fifo = &xop->collect[clindex];
8115f59596dSMatthew Dillon
8125f59596dSMatthew Dillon if (fifo->ri == fifo->wi - HAMMER2_XOPFIFO)
8135f59596dSMatthew Dillon lwkt_yield();
8145f59596dSMatthew Dillon while (fifo->ri == fifo->wi - HAMMER2_XOPFIFO) {
8155f59596dSMatthew Dillon atomic_set_int(&fifo->flags, HAMMER2_XOP_FIFO_STALL);
8165f59596dSMatthew Dillon mask = xop->run_mask;
8175f59596dSMatthew Dillon if ((mask & HAMMER2_XOPMASK_VOP) == 0) {
81865cacacfSMatthew Dillon error = HAMMER2_ERROR_ABORTED;
8195f59596dSMatthew Dillon goto done;
8205f59596dSMatthew Dillon }
8215f59596dSMatthew Dillon tsleep_interlock(xop, 0);
82219808ac9SMatthew Dillon if (atomic_cmpset_64(&xop->run_mask, mask,
8235f59596dSMatthew Dillon mask | HAMMER2_XOPMASK_FIFOW)) {
8245f59596dSMatthew Dillon if (fifo->ri == fifo->wi - HAMMER2_XOPFIFO) {
8255f59596dSMatthew Dillon tsleep(xop, PINTERLOCKED, "h2feed", hz*60);
8265f59596dSMatthew Dillon }
8275f59596dSMatthew Dillon }
8285f59596dSMatthew Dillon /* retry */
8295f59596dSMatthew Dillon }
8305f59596dSMatthew Dillon atomic_clear_int(&fifo->flags, HAMMER2_XOP_FIFO_STALL);
8316d51e13aSMatthew Dillon if (chain)
8326d51e13aSMatthew Dillon hammer2_chain_ref_hold(chain);
8335f59596dSMatthew Dillon if (error == 0 && chain)
8345f59596dSMatthew Dillon error = chain->error;
8355f59596dSMatthew Dillon fifo->errors[fifo->wi & HAMMER2_XOPFIFO_MASK] = error;
8365f59596dSMatthew Dillon fifo->array[fifo->wi & HAMMER2_XOPFIFO_MASK] = chain;
8375f59596dSMatthew Dillon cpu_sfence();
8385f59596dSMatthew Dillon ++fifo->wi;
83919808ac9SMatthew Dillon
84019808ac9SMatthew Dillon mask = atomic_fetchadd_64(&xop->run_mask, HAMMER2_XOPMASK_FEED);
84119808ac9SMatthew Dillon if (mask & HAMMER2_XOPMASK_WAIT) {
84219808ac9SMatthew Dillon atomic_clear_64(&xop->run_mask, HAMMER2_XOPMASK_WAIT);
84319808ac9SMatthew Dillon wakeup(xop);
84420852157SMatthew Dillon }
8455f59596dSMatthew Dillon error = 0;
8465f59596dSMatthew Dillon
8475f59596dSMatthew Dillon /*
848ee92f609STomohiro Kusumi * Cleanup. If no error
8495f59596dSMatthew Dillon * occurred the fifo inherits the lock and gains an additional ref.
8505f59596dSMatthew Dillon *
8515f59596dSMatthew Dillon * The caller's ref remains in both cases.
8525f59596dSMatthew Dillon */
8535f59596dSMatthew Dillon done:
8545f59596dSMatthew Dillon return error;
8555f59596dSMatthew Dillon }
8565f59596dSMatthew Dillon
8575f59596dSMatthew Dillon /*
8585f59596dSMatthew Dillon * (Frontend) collect a response from a running cluster op.
8595f59596dSMatthew Dillon *
8605f59596dSMatthew Dillon * Responses are fed from all appropriate nodes concurrently
8615f59596dSMatthew Dillon * and collected into a cohesive response >= collect_key.
8625f59596dSMatthew Dillon *
8635f59596dSMatthew Dillon * The collector will return the instant quorum or other requirements
8645f59596dSMatthew Dillon * are met, even if some nodes get behind or become non-responsive.
8655f59596dSMatthew Dillon *
8665f59596dSMatthew Dillon * HAMMER2_XOP_COLLECT_NOWAIT - Used to 'poll' a completed collection,
8675f59596dSMatthew Dillon * usually called synchronously from the
8685f59596dSMatthew Dillon * node XOPs for the strategy code to
8695f59596dSMatthew Dillon * fake the frontend collection and complete
8705f59596dSMatthew Dillon * the BIO as soon as possible.
8715f59596dSMatthew Dillon *
8725f59596dSMatthew Dillon * Returns 0 on success plus a filled out xop->cluster structure.
8735f59596dSMatthew Dillon * Return ENOENT on normal termination.
8745f59596dSMatthew Dillon * Otherwise return an error.
875fda30e02SMatthew Dillon *
876fda30e02SMatthew Dillon * WARNING! If the xop returns a cluster with a non-NULL focus, note that
877fda30e02SMatthew Dillon * none of the chains in the cluster (or the focus) are either
878fda30e02SMatthew Dillon * locked or I/O synchronized with the cpu. hammer2_xop_gdata()
879fda30e02SMatthew Dillon * and hammer2_xop_pdata() must be used to safely access the focus
880fda30e02SMatthew Dillon * chain's content.
881fda30e02SMatthew Dillon *
882fda30e02SMatthew Dillon * The frontend can make certain assumptions based on higher-level
883fda30e02SMatthew Dillon * locking done by the frontend, but data integrity absolutely
884fda30e02SMatthew Dillon * requires using the gdata/pdata API.
8855f59596dSMatthew Dillon */
8865f59596dSMatthew Dillon int
hammer2_xop_collect(hammer2_xop_head_t * xop,int flags)8875f59596dSMatthew Dillon hammer2_xop_collect(hammer2_xop_head_t *xop, int flags)
8885f59596dSMatthew Dillon {
8895f59596dSMatthew Dillon hammer2_xop_fifo_t *fifo;
8905f59596dSMatthew Dillon hammer2_chain_t *chain;
8915f59596dSMatthew Dillon hammer2_key_t lokey;
89219808ac9SMatthew Dillon uint64_t mask;
8935f59596dSMatthew Dillon int error;
8945f59596dSMatthew Dillon int keynull;
8955f59596dSMatthew Dillon int adv; /* advance the element */
8965f59596dSMatthew Dillon int i;
8975f59596dSMatthew Dillon
8985f59596dSMatthew Dillon loop:
8995f59596dSMatthew Dillon /*
9005f59596dSMatthew Dillon * First loop tries to advance pieces of the cluster which
9015f59596dSMatthew Dillon * are out of sync.
9025f59596dSMatthew Dillon */
9035f59596dSMatthew Dillon lokey = HAMMER2_KEY_MAX;
9045f59596dSMatthew Dillon keynull = HAMMER2_CHECK_NULL;
90519808ac9SMatthew Dillon mask = xop->run_mask;
9065f59596dSMatthew Dillon cpu_lfence();
9075f59596dSMatthew Dillon
9085f59596dSMatthew Dillon for (i = 0; i < xop->cluster.nchains; ++i) {
9095f59596dSMatthew Dillon chain = xop->cluster.array[i].chain;
9105f59596dSMatthew Dillon if (chain == NULL) {
9115f59596dSMatthew Dillon adv = 1;
9125f59596dSMatthew Dillon } else if (chain->bref.key < xop->collect_key) {
9135f59596dSMatthew Dillon adv = 1;
9145f59596dSMatthew Dillon } else {
9155f59596dSMatthew Dillon keynull &= ~HAMMER2_CHECK_NULL;
9165f59596dSMatthew Dillon if (lokey > chain->bref.key)
9175f59596dSMatthew Dillon lokey = chain->bref.key;
9185f59596dSMatthew Dillon adv = 0;
9195f59596dSMatthew Dillon }
9205f59596dSMatthew Dillon if (adv == 0)
9215f59596dSMatthew Dillon continue;
9225f59596dSMatthew Dillon
9235f59596dSMatthew Dillon /*
9245f59596dSMatthew Dillon * Advance element if possible, advanced element may be NULL.
9255f59596dSMatthew Dillon */
9266d51e13aSMatthew Dillon if (chain)
9276d51e13aSMatthew Dillon hammer2_chain_drop_unhold(chain);
9286d51e13aSMatthew Dillon
9295f59596dSMatthew Dillon fifo = &xop->collect[i];
9305f59596dSMatthew Dillon if (fifo->ri != fifo->wi) {
9315f59596dSMatthew Dillon cpu_lfence();
9325f59596dSMatthew Dillon chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK];
9335f59596dSMatthew Dillon error = fifo->errors[fifo->ri & HAMMER2_XOPFIFO_MASK];
9345f59596dSMatthew Dillon ++fifo->ri;
9355f59596dSMatthew Dillon xop->cluster.array[i].chain = chain;
9365f59596dSMatthew Dillon xop->cluster.array[i].error = error;
9375f59596dSMatthew Dillon if (chain == NULL) {
9385f59596dSMatthew Dillon /* XXX */
9395f59596dSMatthew Dillon xop->cluster.array[i].flags |=
9405f59596dSMatthew Dillon HAMMER2_CITEM_NULL;
9415f59596dSMatthew Dillon }
9425f59596dSMatthew Dillon if (fifo->wi - fifo->ri <= HAMMER2_XOPFIFO / 2) {
9435f59596dSMatthew Dillon if (fifo->flags & HAMMER2_XOP_FIFO_STALL) {
9445f59596dSMatthew Dillon atomic_clear_int(&fifo->flags,
9455f59596dSMatthew Dillon HAMMER2_XOP_FIFO_STALL);
9465f59596dSMatthew Dillon wakeup(xop);
9475f59596dSMatthew Dillon lwkt_yield();
9485f59596dSMatthew Dillon }
9495f59596dSMatthew Dillon }
9505f59596dSMatthew Dillon --i; /* loop on same index */
9515f59596dSMatthew Dillon } else {
9525f59596dSMatthew Dillon /*
9535f59596dSMatthew Dillon * Retain CITEM_NULL flag. If set just repeat EOF.
9545f59596dSMatthew Dillon * If not, the NULL,0 combination indicates an
9555f59596dSMatthew Dillon * operation in-progress.
9565f59596dSMatthew Dillon */
9575f59596dSMatthew Dillon xop->cluster.array[i].chain = NULL;
9585f59596dSMatthew Dillon /* retain any CITEM_NULL setting */
9595f59596dSMatthew Dillon }
9605f59596dSMatthew Dillon }
9615f59596dSMatthew Dillon
9625f59596dSMatthew Dillon /*
9635f59596dSMatthew Dillon * Determine whether the lowest collected key meets clustering
964f3bfcc3fSMatthew Dillon * requirements. Returns HAMMER2_ERROR_*:
9655f59596dSMatthew Dillon *
9665f59596dSMatthew Dillon * 0 - key valid, cluster can be returned.
9675f59596dSMatthew Dillon *
9685f59596dSMatthew Dillon * ENOENT - normal end of scan, return ENOENT.
9695f59596dSMatthew Dillon *
9705f59596dSMatthew Dillon * ESRCH - sufficient elements collected, quorum agreement
9715f59596dSMatthew Dillon * that lokey is not a valid element and should be
9725f59596dSMatthew Dillon * skipped.
9735f59596dSMatthew Dillon *
9745f59596dSMatthew Dillon * EDEADLK - sufficient elements collected, no quorum agreement
9755f59596dSMatthew Dillon * (and no agreement possible). In this situation a
9765f59596dSMatthew Dillon * repair is needed, for now we loop.
9775f59596dSMatthew Dillon *
9785f59596dSMatthew Dillon * EINPROGRESS - insufficient elements collected to resolve, wait
9795f59596dSMatthew Dillon * for event and loop.
980f3bfcc3fSMatthew Dillon *
981a011271dSTomohiro Kusumi * EIO - IO error or CRC check error from hammer2_cluster_check()
9825f59596dSMatthew Dillon */
9835f59596dSMatthew Dillon if ((flags & HAMMER2_XOP_COLLECT_WAITALL) &&
98419808ac9SMatthew Dillon (mask & HAMMER2_XOPMASK_ALLDONE) != HAMMER2_XOPMASK_VOP) {
98565cacacfSMatthew Dillon error = HAMMER2_ERROR_EINPROGRESS;
9865f59596dSMatthew Dillon } else {
9875f59596dSMatthew Dillon error = hammer2_cluster_check(&xop->cluster, lokey, keynull);
9885f59596dSMatthew Dillon }
98965cacacfSMatthew Dillon if (error == HAMMER2_ERROR_EINPROGRESS) {
9905f59596dSMatthew Dillon if (flags & HAMMER2_XOP_COLLECT_NOWAIT)
9915f59596dSMatthew Dillon goto done;
99219808ac9SMatthew Dillon tsleep_interlock(xop, 0);
99319808ac9SMatthew Dillon if (atomic_cmpset_64(&xop->run_mask,
99419808ac9SMatthew Dillon mask, mask | HAMMER2_XOPMASK_WAIT)) {
99519808ac9SMatthew Dillon tsleep(xop, PINTERLOCKED, "h2coll", hz*60);
9965f59596dSMatthew Dillon }
9975f59596dSMatthew Dillon goto loop;
9985f59596dSMatthew Dillon }
99965cacacfSMatthew Dillon if (error == HAMMER2_ERROR_ESRCH) {
10005f59596dSMatthew Dillon if (lokey != HAMMER2_KEY_MAX) {
10015f59596dSMatthew Dillon xop->collect_key = lokey + 1;
10025f59596dSMatthew Dillon goto loop;
10035f59596dSMatthew Dillon }
100465cacacfSMatthew Dillon error = HAMMER2_ERROR_ENOENT;
10055f59596dSMatthew Dillon }
100665cacacfSMatthew Dillon if (error == HAMMER2_ERROR_EDEADLK) {
10075f59596dSMatthew Dillon kprintf("hammer2: no quorum possible lokey %016jx\n",
10085f59596dSMatthew Dillon lokey);
10095f59596dSMatthew Dillon if (lokey != HAMMER2_KEY_MAX) {
10105f59596dSMatthew Dillon xop->collect_key = lokey + 1;
10115f59596dSMatthew Dillon goto loop;
10125f59596dSMatthew Dillon }
101365cacacfSMatthew Dillon error = HAMMER2_ERROR_ENOENT;
10145f59596dSMatthew Dillon }
10155f59596dSMatthew Dillon if (lokey == HAMMER2_KEY_MAX)
10165f59596dSMatthew Dillon xop->collect_key = lokey;
10175f59596dSMatthew Dillon else
10185f59596dSMatthew Dillon xop->collect_key = lokey + 1;
10195f59596dSMatthew Dillon done:
10205f59596dSMatthew Dillon return error;
10215f59596dSMatthew Dillon }
10225f59596dSMatthew Dillon
10235f59596dSMatthew Dillon /*
10245f59596dSMatthew Dillon * N x M processing threads are available to handle XOPs, N per cluster
102505a3c4ecSMatthew Dillon * index x M cluster nodes.
10265f59596dSMatthew Dillon *
10275f59596dSMatthew Dillon * Locate and return the next runnable xop, or NULL if no xops are
10285f59596dSMatthew Dillon * present or none of the xops are currently runnable (for various reasons).
10295f59596dSMatthew Dillon * The xop is left on the queue and serves to block other dependent xops
10305f59596dSMatthew Dillon * from being run.
10315f59596dSMatthew Dillon *
10325f59596dSMatthew Dillon * Dependent xops will not be returned.
10335f59596dSMatthew Dillon *
10345f59596dSMatthew Dillon * Sets HAMMER2_XOP_FIFO_RUN on the returned xop or returns NULL.
10355f59596dSMatthew Dillon *
10365f59596dSMatthew Dillon * NOTE! Xops run concurrently for each cluster index.
10375f59596dSMatthew Dillon */
10385f59596dSMatthew Dillon #define XOP_HASH_SIZE 16
10395f59596dSMatthew Dillon #define XOP_HASH_MASK (XOP_HASH_SIZE - 1)
10405f59596dSMatthew Dillon
10415f59596dSMatthew Dillon static __inline
10425f59596dSMatthew Dillon int
xop_testhash(hammer2_thread_t * thr,hammer2_inode_t * ip,uint32_t * hash)10435f59596dSMatthew Dillon xop_testhash(hammer2_thread_t *thr, hammer2_inode_t *ip, uint32_t *hash)
10445f59596dSMatthew Dillon {
10455f59596dSMatthew Dillon uint32_t mask;
10465f59596dSMatthew Dillon int hv;
10475f59596dSMatthew Dillon
10485f59596dSMatthew Dillon hv = (int)((uintptr_t)ip + (uintptr_t)thr) / sizeof(hammer2_inode_t);
10495f59596dSMatthew Dillon mask = 1U << (hv & 31);
10505f59596dSMatthew Dillon hv >>= 5;
10515f59596dSMatthew Dillon
10525f59596dSMatthew Dillon return ((int)(hash[hv & XOP_HASH_MASK] & mask));
10535f59596dSMatthew Dillon }
10545f59596dSMatthew Dillon
10555f59596dSMatthew Dillon static __inline
10565f59596dSMatthew Dillon void
xop_sethash(hammer2_thread_t * thr,hammer2_inode_t * ip,uint32_t * hash)10575f59596dSMatthew Dillon xop_sethash(hammer2_thread_t *thr, hammer2_inode_t *ip, uint32_t *hash)
10585f59596dSMatthew Dillon {
10595f59596dSMatthew Dillon uint32_t mask;
10605f59596dSMatthew Dillon int hv;
10615f59596dSMatthew Dillon
10625f59596dSMatthew Dillon hv = (int)((uintptr_t)ip + (uintptr_t)thr) / sizeof(hammer2_inode_t);
10635f59596dSMatthew Dillon mask = 1U << (hv & 31);
10645f59596dSMatthew Dillon hv >>= 5;
10655f59596dSMatthew Dillon
10665f59596dSMatthew Dillon hash[hv & XOP_HASH_MASK] |= mask;
10675f59596dSMatthew Dillon }
10685f59596dSMatthew Dillon
10695f59596dSMatthew Dillon static
10705f59596dSMatthew Dillon hammer2_xop_head_t *
hammer2_xop_next(hammer2_thread_t * thr)10715f59596dSMatthew Dillon hammer2_xop_next(hammer2_thread_t *thr)
10725f59596dSMatthew Dillon {
10735f59596dSMatthew Dillon hammer2_pfs_t *pmp = thr->pmp;
10745f59596dSMatthew Dillon int clindex = thr->clindex;
10755f59596dSMatthew Dillon uint32_t hash[XOP_HASH_SIZE] = { 0 };
10765f59596dSMatthew Dillon hammer2_xop_head_t *xop;
10775f59596dSMatthew Dillon
10785f59596dSMatthew Dillon hammer2_spin_ex(&pmp->xop_spin);
107920852157SMatthew Dillon TAILQ_FOREACH(xop, &thr->xopq, collect[clindex].entry) {
10805f59596dSMatthew Dillon /*
10815f59596dSMatthew Dillon * Check dependency
10825f59596dSMatthew Dillon */
10835f59596dSMatthew Dillon if (xop_testhash(thr, xop->ip1, hash) ||
10845f59596dSMatthew Dillon (xop->ip2 && xop_testhash(thr, xop->ip2, hash)) ||
108538f19b3eSMatthew Dillon (xop->ip3 && xop_testhash(thr, xop->ip3, hash)) ||
108638f19b3eSMatthew Dillon (xop->ip4 && xop_testhash(thr, xop->ip4, hash)))
108738f19b3eSMatthew Dillon {
10885f59596dSMatthew Dillon continue;
10895f59596dSMatthew Dillon }
10905f59596dSMatthew Dillon xop_sethash(thr, xop->ip1, hash);
10915f59596dSMatthew Dillon if (xop->ip2)
10925f59596dSMatthew Dillon xop_sethash(thr, xop->ip2, hash);
10935f59596dSMatthew Dillon if (xop->ip3)
10945f59596dSMatthew Dillon xop_sethash(thr, xop->ip3, hash);
109538f19b3eSMatthew Dillon if (xop->ip4)
109638f19b3eSMatthew Dillon xop_sethash(thr, xop->ip4, hash);
10975f59596dSMatthew Dillon
10985f59596dSMatthew Dillon /*
10995f59596dSMatthew Dillon * Check already running
11005f59596dSMatthew Dillon */
11015f59596dSMatthew Dillon if (xop->collect[clindex].flags & HAMMER2_XOP_FIFO_RUN)
11025f59596dSMatthew Dillon continue;
11035f59596dSMatthew Dillon
11045f59596dSMatthew Dillon /*
11055f59596dSMatthew Dillon * Found a good one, return it.
11065f59596dSMatthew Dillon */
11075f59596dSMatthew Dillon atomic_set_int(&xop->collect[clindex].flags,
11085f59596dSMatthew Dillon HAMMER2_XOP_FIFO_RUN);
11095f59596dSMatthew Dillon break;
11105f59596dSMatthew Dillon }
11115f59596dSMatthew Dillon hammer2_spin_unex(&pmp->xop_spin);
11125f59596dSMatthew Dillon
11135f59596dSMatthew Dillon return xop;
11145f59596dSMatthew Dillon }
11155f59596dSMatthew Dillon
11165f59596dSMatthew Dillon /*
11175f59596dSMatthew Dillon * Remove the completed XOP from the queue, clear HAMMER2_XOP_FIFO_RUN.
11185f59596dSMatthew Dillon *
11195f59596dSMatthew Dillon * NOTE! Xops run concurrently for each cluster index.
11205f59596dSMatthew Dillon */
11215f59596dSMatthew Dillon static
11225f59596dSMatthew Dillon void
hammer2_xop_dequeue(hammer2_thread_t * thr,hammer2_xop_head_t * xop)11235f59596dSMatthew Dillon hammer2_xop_dequeue(hammer2_thread_t *thr, hammer2_xop_head_t *xop)
11245f59596dSMatthew Dillon {
11255f59596dSMatthew Dillon hammer2_pfs_t *pmp = thr->pmp;
11265f59596dSMatthew Dillon int clindex = thr->clindex;
11275f59596dSMatthew Dillon
11285f59596dSMatthew Dillon hammer2_spin_ex(&pmp->xop_spin);
112920852157SMatthew Dillon TAILQ_REMOVE(&thr->xopq, xop, collect[clindex].entry);
11305f59596dSMatthew Dillon atomic_clear_int(&xop->collect[clindex].flags,
11315f59596dSMatthew Dillon HAMMER2_XOP_FIFO_RUN);
11325f59596dSMatthew Dillon hammer2_spin_unex(&pmp->xop_spin);
113320852157SMatthew Dillon if (TAILQ_FIRST(&thr->xopq))
113420852157SMatthew Dillon hammer2_thr_signal(thr, HAMMER2_THREAD_XOPQ);
11355f59596dSMatthew Dillon }
11365f59596dSMatthew Dillon
11375f59596dSMatthew Dillon /*
11385f59596dSMatthew Dillon * Primary management thread for xops support. Each node has several such
11395f59596dSMatthew Dillon * threads which replicate front-end operations on cluster nodes.
11405f59596dSMatthew Dillon *
11415f59596dSMatthew Dillon * XOPS thread node operations, allowing the function to focus on a single
11425f59596dSMatthew Dillon * node in the cluster after validating the operation with the cluster.
11435f59596dSMatthew Dillon * This is primarily what prevents dead or stalled nodes from stalling
11445f59596dSMatthew Dillon * the front-end.
11455f59596dSMatthew Dillon */
11465f59596dSMatthew Dillon void
hammer2_primary_xops_thread(void * arg)11475f59596dSMatthew Dillon hammer2_primary_xops_thread(void *arg)
11485f59596dSMatthew Dillon {
11495f59596dSMatthew Dillon hammer2_thread_t *thr = arg;
11505f59596dSMatthew Dillon hammer2_xop_head_t *xop;
115119808ac9SMatthew Dillon uint64_t mask;
11525f59596dSMatthew Dillon uint32_t flags;
11535f59596dSMatthew Dillon uint32_t nflags;
11545f59596dSMatthew Dillon
115519808ac9SMatthew Dillon mask = 1LLU << thr->clindex;
11565f59596dSMatthew Dillon
11575f59596dSMatthew Dillon for (;;) {
11585f59596dSMatthew Dillon flags = thr->flags;
11595f59596dSMatthew Dillon
11605f59596dSMatthew Dillon /*
11615f59596dSMatthew Dillon * Handle stop request
11625f59596dSMatthew Dillon */
11635f59596dSMatthew Dillon if (flags & HAMMER2_THREAD_STOP)
11645f59596dSMatthew Dillon break;
11655f59596dSMatthew Dillon
11665f59596dSMatthew Dillon /*
11675f59596dSMatthew Dillon * Handle freeze request
11685f59596dSMatthew Dillon */
11695f59596dSMatthew Dillon if (flags & HAMMER2_THREAD_FREEZE) {
11709dca9515SMatthew Dillon hammer2_thr_signal2(thr, HAMMER2_THREAD_FROZEN,
11719dca9515SMatthew Dillon HAMMER2_THREAD_FREEZE);
11725f59596dSMatthew Dillon continue;
11735f59596dSMatthew Dillon }
11745f59596dSMatthew Dillon
11755f59596dSMatthew Dillon if (flags & HAMMER2_THREAD_UNFREEZE) {
11769dca9515SMatthew Dillon hammer2_thr_signal2(thr, 0,
11775f59596dSMatthew Dillon HAMMER2_THREAD_FROZEN |
11789dca9515SMatthew Dillon HAMMER2_THREAD_UNFREEZE);
11795f59596dSMatthew Dillon continue;
11805f59596dSMatthew Dillon }
11815f59596dSMatthew Dillon
11825f59596dSMatthew Dillon /*
11835f59596dSMatthew Dillon * Force idle if frozen until unfrozen or stopped.
11845f59596dSMatthew Dillon */
11855f59596dSMatthew Dillon if (flags & HAMMER2_THREAD_FROZEN) {
11869dca9515SMatthew Dillon hammer2_thr_wait_any(thr,
11879dca9515SMatthew Dillon HAMMER2_THREAD_UNFREEZE |
11889dca9515SMatthew Dillon HAMMER2_THREAD_STOP,
11899dca9515SMatthew Dillon 0);
11905f59596dSMatthew Dillon continue;
11915f59596dSMatthew Dillon }
11925f59596dSMatthew Dillon
11935f59596dSMatthew Dillon /*
11945f59596dSMatthew Dillon * Reset state on REMASTER request
11955f59596dSMatthew Dillon */
11965f59596dSMatthew Dillon if (flags & HAMMER2_THREAD_REMASTER) {
11979dca9515SMatthew Dillon hammer2_thr_signal2(thr, 0, HAMMER2_THREAD_REMASTER);
11985f59596dSMatthew Dillon /* reset state here */
11995f59596dSMatthew Dillon continue;
12005f59596dSMatthew Dillon }
12015f59596dSMatthew Dillon
12025f59596dSMatthew Dillon /*
12035f59596dSMatthew Dillon * Process requests. Each request can be multi-queued.
12045f59596dSMatthew Dillon *
12055f59596dSMatthew Dillon * If we get behind and the frontend VOP is no longer active,
12065f59596dSMatthew Dillon * we retire the request without processing it. The callback
12075f59596dSMatthew Dillon * may also abort processing if the frontend VOP becomes
12085f59596dSMatthew Dillon * inactive.
12095f59596dSMatthew Dillon */
12105f59596dSMatthew Dillon if (flags & HAMMER2_THREAD_XOPQ) {
12115f59596dSMatthew Dillon nflags = flags & ~HAMMER2_THREAD_XOPQ;
12125f59596dSMatthew Dillon if (!atomic_cmpset_int(&thr->flags, flags, nflags))
12135f59596dSMatthew Dillon continue;
12145f59596dSMatthew Dillon flags = nflags;
12155f59596dSMatthew Dillon /* fall through */
12165f59596dSMatthew Dillon }
12175f59596dSMatthew Dillon while ((xop = hammer2_xop_next(thr)) != NULL) {
12185f59596dSMatthew Dillon if (hammer2_xop_active(xop)) {
1219c4421f07SMatthew Dillon xop->desc->storage_func((hammer2_xop_t *)xop,
1220c4421f07SMatthew Dillon thr->scratch,
1221c4421f07SMatthew Dillon thr->clindex);
12225f59596dSMatthew Dillon hammer2_xop_dequeue(thr, xop);
12235f59596dSMatthew Dillon hammer2_xop_retire(xop, mask);
12245f59596dSMatthew Dillon } else {
12255f59596dSMatthew Dillon hammer2_xop_feed(xop, NULL, thr->clindex,
12265f59596dSMatthew Dillon ECONNABORTED);
12275f59596dSMatthew Dillon hammer2_xop_dequeue(thr, xop);
12285f59596dSMatthew Dillon hammer2_xop_retire(xop, mask);
12295f59596dSMatthew Dillon }
12305f59596dSMatthew Dillon }
12315f59596dSMatthew Dillon
12325f59596dSMatthew Dillon /*
12335f59596dSMatthew Dillon * Wait for event, interlock using THREAD_WAITING and
12345f59596dSMatthew Dillon * THREAD_SIGNAL.
12355f59596dSMatthew Dillon *
12365f59596dSMatthew Dillon * For robustness poll on a 30-second interval, but nominally
12375f59596dSMatthew Dillon * expect to be woken up.
12385f59596dSMatthew Dillon */
12395f59596dSMatthew Dillon nflags = flags | HAMMER2_THREAD_WAITING;
12405f59596dSMatthew Dillon
12415f59596dSMatthew Dillon tsleep_interlock(&thr->flags, 0);
12425f59596dSMatthew Dillon if (atomic_cmpset_int(&thr->flags, flags, nflags)) {
12435f59596dSMatthew Dillon tsleep(&thr->flags, PINTERLOCKED, "h2idle", hz*30);
12445f59596dSMatthew Dillon }
12455f59596dSMatthew Dillon }
12465f59596dSMatthew Dillon
12475f59596dSMatthew Dillon #if 0
12485f59596dSMatthew Dillon /*
12495f59596dSMatthew Dillon * Cleanup / termination
12505f59596dSMatthew Dillon */
12515f59596dSMatthew Dillon while ((xop = TAILQ_FIRST(&thr->xopq)) != NULL) {
1252c4421f07SMatthew Dillon kprintf("hammer2_thread: aborting xop %s\n", xop->desc->id);
12535f59596dSMatthew Dillon TAILQ_REMOVE(&thr->xopq, xop,
12545f59596dSMatthew Dillon collect[thr->clindex].entry);
12555f59596dSMatthew Dillon hammer2_xop_retire(xop, mask);
12565f59596dSMatthew Dillon }
12575f59596dSMatthew Dillon #endif
12585f59596dSMatthew Dillon thr->td = NULL;
12599dca9515SMatthew Dillon hammer2_thr_signal(thr, HAMMER2_THREAD_STOPPED);
12605f59596dSMatthew Dillon /* thr structure can go invalid after this point */
12615f59596dSMatthew Dillon }
1262