15f59596dSMatthew Dillon /* 25f59596dSMatthew Dillon * Copyright (c) 2015 The DragonFly Project. All rights reserved. 35f59596dSMatthew Dillon * 45f59596dSMatthew Dillon * This code is derived from software contributed to The DragonFly Project 55f59596dSMatthew Dillon * by Matthew Dillon <dillon@dragonflybsd.org> 65f59596dSMatthew Dillon * 75f59596dSMatthew Dillon * Redistribution and use in source and binary forms, with or without 85f59596dSMatthew Dillon * modification, are permitted provided that the following conditions 95f59596dSMatthew Dillon * are met: 105f59596dSMatthew Dillon * 115f59596dSMatthew Dillon * 1. Redistributions of source code must retain the above copyright 125f59596dSMatthew Dillon * notice, this list of conditions and the following disclaimer. 135f59596dSMatthew Dillon * 2. Redistributions in binary form must reproduce the above copyright 145f59596dSMatthew Dillon * notice, this list of conditions and the following disclaimer in 155f59596dSMatthew Dillon * the documentation and/or other materials provided with the 165f59596dSMatthew Dillon * distribution. 175f59596dSMatthew Dillon * 3. Neither the name of The DragonFly Project nor the names of its 185f59596dSMatthew Dillon * contributors may be used to endorse or promote products derived 195f59596dSMatthew Dillon * from this software without specific, prior written permission. 205f59596dSMatthew Dillon * 215f59596dSMatthew Dillon * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 225f59596dSMatthew Dillon * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 235f59596dSMatthew Dillon * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 245f59596dSMatthew Dillon * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 255f59596dSMatthew Dillon * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 265f59596dSMatthew Dillon * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 275f59596dSMatthew Dillon * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 285f59596dSMatthew Dillon * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 295f59596dSMatthew Dillon * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 305f59596dSMatthew Dillon * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 315f59596dSMatthew Dillon * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 325f59596dSMatthew Dillon * SUCH DAMAGE. 335f59596dSMatthew Dillon */ 345f59596dSMatthew Dillon /* 355f59596dSMatthew Dillon * This module implements the hammer2 helper thread API, including 365f59596dSMatthew Dillon * the frontend/backend XOP API. 375f59596dSMatthew Dillon */ 385f59596dSMatthew Dillon #include "hammer2.h" 395f59596dSMatthew Dillon 405f59596dSMatthew Dillon /* 419dca9515SMatthew Dillon * Set flags and wakeup any waiters. 420d66a712SMatthew Dillon * 430d66a712SMatthew Dillon * WARNING! During teardown (thr) can disappear the instant our cmpset 440d66a712SMatthew Dillon * succeeds. 455f59596dSMatthew Dillon */ 465f59596dSMatthew Dillon void 479dca9515SMatthew Dillon hammer2_thr_signal(hammer2_thread_t *thr, uint32_t flags) 485f59596dSMatthew Dillon { 495f59596dSMatthew Dillon uint32_t oflags; 505f59596dSMatthew Dillon uint32_t nflags; 515f59596dSMatthew Dillon 525f59596dSMatthew Dillon for (;;) { 535f59596dSMatthew Dillon oflags = thr->flags; 545f59596dSMatthew Dillon cpu_ccfence(); 559dca9515SMatthew Dillon nflags = (oflags | flags) & ~HAMMER2_THREAD_WAITING; 565f59596dSMatthew Dillon 579dca9515SMatthew Dillon if (oflags & HAMMER2_THREAD_WAITING) { 585f59596dSMatthew Dillon if (atomic_cmpset_int(&thr->flags, oflags, nflags)) { 5905a3c4ecSMatthew Dillon wakeup(&thr->flags); 605f59596dSMatthew Dillon break; 615f59596dSMatthew Dillon } 625f59596dSMatthew Dillon } else { 635f59596dSMatthew Dillon if (atomic_cmpset_int(&thr->flags, oflags, nflags)) 645f59596dSMatthew Dillon break; 655f59596dSMatthew Dillon } 665f59596dSMatthew Dillon } 675f59596dSMatthew Dillon } 685f59596dSMatthew Dillon 695f59596dSMatthew Dillon /* 709dca9515SMatthew Dillon * Set and clear flags and wakeup any waiters. 719dca9515SMatthew Dillon * 729dca9515SMatthew Dillon * WARNING! During teardown (thr) can disappear the instant our cmpset 739dca9515SMatthew Dillon * succeeds. 749dca9515SMatthew Dillon */ 759dca9515SMatthew Dillon void 769dca9515SMatthew Dillon hammer2_thr_signal2(hammer2_thread_t *thr, uint32_t posflags, uint32_t negflags) 779dca9515SMatthew Dillon { 789dca9515SMatthew Dillon uint32_t oflags; 799dca9515SMatthew Dillon uint32_t nflags; 809dca9515SMatthew Dillon 819dca9515SMatthew Dillon for (;;) { 829dca9515SMatthew Dillon oflags = thr->flags; 839dca9515SMatthew Dillon cpu_ccfence(); 849dca9515SMatthew Dillon nflags = (oflags | posflags) & 859dca9515SMatthew Dillon ~(negflags | HAMMER2_THREAD_WAITING); 869dca9515SMatthew Dillon if (oflags & HAMMER2_THREAD_WAITING) { 879dca9515SMatthew Dillon if (atomic_cmpset_int(&thr->flags, oflags, nflags)) { 889dca9515SMatthew Dillon wakeup(&thr->flags); 899dca9515SMatthew Dillon break; 909dca9515SMatthew Dillon } 919dca9515SMatthew Dillon } else { 929dca9515SMatthew Dillon if (atomic_cmpset_int(&thr->flags, oflags, nflags)) 939dca9515SMatthew Dillon break; 949dca9515SMatthew Dillon } 959dca9515SMatthew Dillon } 969dca9515SMatthew Dillon } 979dca9515SMatthew Dillon 989dca9515SMatthew Dillon /* 999dca9515SMatthew Dillon * Wait until all the bits in flags are set. 1000d66a712SMatthew Dillon * 1010d66a712SMatthew Dillon * WARNING! During teardown (thr) can disappear the instant our cmpset 1020d66a712SMatthew Dillon * succeeds. 1035f59596dSMatthew Dillon */ 1045f59596dSMatthew Dillon void 1055f59596dSMatthew Dillon hammer2_thr_wait(hammer2_thread_t *thr, uint32_t flags) 1065f59596dSMatthew Dillon { 1075f59596dSMatthew Dillon uint32_t oflags; 1085f59596dSMatthew Dillon uint32_t nflags; 1095f59596dSMatthew Dillon 1105f59596dSMatthew Dillon for (;;) { 1115f59596dSMatthew Dillon oflags = thr->flags; 1125f59596dSMatthew Dillon cpu_ccfence(); 1135f59596dSMatthew Dillon if ((oflags & flags) == flags) 1145f59596dSMatthew Dillon break; 1159dca9515SMatthew Dillon nflags = oflags | HAMMER2_THREAD_WAITING; 11605a3c4ecSMatthew Dillon tsleep_interlock(&thr->flags, 0); 1175f59596dSMatthew Dillon if (atomic_cmpset_int(&thr->flags, oflags, nflags)) { 11805a3c4ecSMatthew Dillon tsleep(&thr->flags, PINTERLOCKED, "h2twait", hz*60); 1195f59596dSMatthew Dillon } 1205f59596dSMatthew Dillon } 1215f59596dSMatthew Dillon } 1225f59596dSMatthew Dillon 1235f59596dSMatthew Dillon /* 1249dca9515SMatthew Dillon * Wait until any of the bits in flags are set, with timeout. 1259dca9515SMatthew Dillon * 1269dca9515SMatthew Dillon * WARNING! During teardown (thr) can disappear the instant our cmpset 1279dca9515SMatthew Dillon * succeeds. 1289dca9515SMatthew Dillon */ 1299dca9515SMatthew Dillon int 1309dca9515SMatthew Dillon hammer2_thr_wait_any(hammer2_thread_t *thr, uint32_t flags, int timo) 1319dca9515SMatthew Dillon { 1329dca9515SMatthew Dillon uint32_t oflags; 1339dca9515SMatthew Dillon uint32_t nflags; 1349dca9515SMatthew Dillon int error; 1359dca9515SMatthew Dillon 1369dca9515SMatthew Dillon error = 0; 1379dca9515SMatthew Dillon for (;;) { 1389dca9515SMatthew Dillon oflags = thr->flags; 1399dca9515SMatthew Dillon cpu_ccfence(); 1409dca9515SMatthew Dillon if (oflags & flags) 1419dca9515SMatthew Dillon break; 1429dca9515SMatthew Dillon nflags = oflags | HAMMER2_THREAD_WAITING; 1439dca9515SMatthew Dillon tsleep_interlock(&thr->flags, 0); 1449dca9515SMatthew Dillon if (atomic_cmpset_int(&thr->flags, oflags, nflags)) { 1459dca9515SMatthew Dillon error = tsleep(&thr->flags, PINTERLOCKED, 1469dca9515SMatthew Dillon "h2twait", timo); 1479dca9515SMatthew Dillon } 14865cacacfSMatthew Dillon if (error == ETIMEDOUT) { 14965cacacfSMatthew Dillon error = HAMMER2_ERROR_ETIMEDOUT; 1509dca9515SMatthew Dillon break; 1519dca9515SMatthew Dillon } 15265cacacfSMatthew Dillon } 1539dca9515SMatthew Dillon return error; 1549dca9515SMatthew Dillon } 1559dca9515SMatthew Dillon 1569dca9515SMatthew Dillon /* 1575f59596dSMatthew Dillon * Wait until the bits in flags are clear. 1580d66a712SMatthew Dillon * 1590d66a712SMatthew Dillon * WARNING! During teardown (thr) can disappear the instant our cmpset 1600d66a712SMatthew Dillon * succeeds. 1615f59596dSMatthew Dillon */ 1625f59596dSMatthew Dillon void 1635f59596dSMatthew Dillon hammer2_thr_wait_neg(hammer2_thread_t *thr, uint32_t flags) 1645f59596dSMatthew Dillon { 1655f59596dSMatthew Dillon uint32_t oflags; 1665f59596dSMatthew Dillon uint32_t nflags; 1675f59596dSMatthew Dillon 1685f59596dSMatthew Dillon for (;;) { 1695f59596dSMatthew Dillon oflags = thr->flags; 1705f59596dSMatthew Dillon cpu_ccfence(); 1715f59596dSMatthew Dillon if ((oflags & flags) == 0) 1725f59596dSMatthew Dillon break; 1739dca9515SMatthew Dillon nflags = oflags | HAMMER2_THREAD_WAITING; 17405a3c4ecSMatthew Dillon tsleep_interlock(&thr->flags, 0); 1755f59596dSMatthew Dillon if (atomic_cmpset_int(&thr->flags, oflags, nflags)) { 17605a3c4ecSMatthew Dillon tsleep(&thr->flags, PINTERLOCKED, "h2twait", hz*60); 1775f59596dSMatthew Dillon } 1785f59596dSMatthew Dillon } 1795f59596dSMatthew Dillon } 1805f59596dSMatthew Dillon 1815f59596dSMatthew Dillon /* 1825f59596dSMatthew Dillon * Initialize the supplied thread structure, starting the specified 1835f59596dSMatthew Dillon * thread. 18405a3c4ecSMatthew Dillon * 18505a3c4ecSMatthew Dillon * NOTE: thr structure can be retained across mounts and unmounts for this 18605a3c4ecSMatthew Dillon * pmp, so make sure the flags are in a sane state. 1875f59596dSMatthew Dillon */ 1885f59596dSMatthew Dillon void 1895f59596dSMatthew Dillon hammer2_thr_create(hammer2_thread_t *thr, hammer2_pfs_t *pmp, 1909dca9515SMatthew Dillon hammer2_dev_t *hmp, 1915f59596dSMatthew Dillon const char *id, int clindex, int repidx, 1925f59596dSMatthew Dillon void (*func)(void *arg)) 1935f59596dSMatthew Dillon { 1949dca9515SMatthew Dillon thr->pmp = pmp; /* xop helpers */ 1959dca9515SMatthew Dillon thr->hmp = hmp; /* bulkfree */ 1965f59596dSMatthew Dillon thr->clindex = clindex; 1975f59596dSMatthew Dillon thr->repidx = repidx; 19820852157SMatthew Dillon TAILQ_INIT(&thr->xopq); 19905a3c4ecSMatthew Dillon atomic_clear_int(&thr->flags, HAMMER2_THREAD_STOP | 20005a3c4ecSMatthew Dillon HAMMER2_THREAD_STOPPED | 20105a3c4ecSMatthew Dillon HAMMER2_THREAD_FREEZE | 20205a3c4ecSMatthew Dillon HAMMER2_THREAD_FROZEN); 20305a3c4ecSMatthew Dillon if (thr->scratch == NULL) 20405a3c4ecSMatthew Dillon thr->scratch = kmalloc(MAXPHYS, M_HAMMER2, M_WAITOK | M_ZERO); 2055f59596dSMatthew Dillon if (repidx >= 0) { 2065f59596dSMatthew Dillon lwkt_create(func, thr, &thr->td, NULL, 0, repidx % ncpus, 2075f59596dSMatthew Dillon "%s-%s.%02d", id, pmp->pfs_names[clindex], repidx); 2089dca9515SMatthew Dillon } else if (pmp) { 2095f59596dSMatthew Dillon lwkt_create(func, thr, &thr->td, NULL, 0, -1, 2105f59596dSMatthew Dillon "%s-%s", id, pmp->pfs_names[clindex]); 2119dca9515SMatthew Dillon } else { 2129dca9515SMatthew Dillon lwkt_create(func, thr, &thr->td, NULL, 0, -1, "%s", id); 2135f59596dSMatthew Dillon } 2145f59596dSMatthew Dillon } 2155f59596dSMatthew Dillon 2165f59596dSMatthew Dillon /* 2175f59596dSMatthew Dillon * Terminate a thread. This function will silently return if the thread 2185f59596dSMatthew Dillon * was never initialized or has already been deleted. 2195f59596dSMatthew Dillon * 2205f59596dSMatthew Dillon * This is accomplished by setting the STOP flag and waiting for the td 2215f59596dSMatthew Dillon * structure to become NULL. 2225f59596dSMatthew Dillon */ 2235f59596dSMatthew Dillon void 2245f59596dSMatthew Dillon hammer2_thr_delete(hammer2_thread_t *thr) 2255f59596dSMatthew Dillon { 2265f59596dSMatthew Dillon if (thr->td == NULL) 2275f59596dSMatthew Dillon return; 2285f59596dSMatthew Dillon hammer2_thr_signal(thr, HAMMER2_THREAD_STOP); 2295f59596dSMatthew Dillon hammer2_thr_wait(thr, HAMMER2_THREAD_STOPPED); 2305f59596dSMatthew Dillon thr->pmp = NULL; 23105a3c4ecSMatthew Dillon if (thr->scratch) { 23205a3c4ecSMatthew Dillon kfree(thr->scratch, M_HAMMER2); 23305a3c4ecSMatthew Dillon thr->scratch = NULL; 23405a3c4ecSMatthew Dillon } 23520852157SMatthew Dillon KKASSERT(TAILQ_EMPTY(&thr->xopq)); 2365f59596dSMatthew Dillon } 2375f59596dSMatthew Dillon 2385f59596dSMatthew Dillon /* 2395f59596dSMatthew Dillon * Asynchronous remaster request. Ask the synchronization thread to 2405f59596dSMatthew Dillon * start over soon (as if it were frozen and unfrozen, but without waiting). 2415f59596dSMatthew Dillon * The thread always recalculates mastership relationships when restarting. 2425f59596dSMatthew Dillon */ 2435f59596dSMatthew Dillon void 2445f59596dSMatthew Dillon hammer2_thr_remaster(hammer2_thread_t *thr) 2455f59596dSMatthew Dillon { 2465f59596dSMatthew Dillon if (thr->td == NULL) 2475f59596dSMatthew Dillon return; 2485f59596dSMatthew Dillon hammer2_thr_signal(thr, HAMMER2_THREAD_REMASTER); 2495f59596dSMatthew Dillon } 2505f59596dSMatthew Dillon 2515f59596dSMatthew Dillon void 2525f59596dSMatthew Dillon hammer2_thr_freeze_async(hammer2_thread_t *thr) 2535f59596dSMatthew Dillon { 2545f59596dSMatthew Dillon hammer2_thr_signal(thr, HAMMER2_THREAD_FREEZE); 2555f59596dSMatthew Dillon } 2565f59596dSMatthew Dillon 2575f59596dSMatthew Dillon void 2585f59596dSMatthew Dillon hammer2_thr_freeze(hammer2_thread_t *thr) 2595f59596dSMatthew Dillon { 2605f59596dSMatthew Dillon if (thr->td == NULL) 2615f59596dSMatthew Dillon return; 2625f59596dSMatthew Dillon hammer2_thr_signal(thr, HAMMER2_THREAD_FREEZE); 2635f59596dSMatthew Dillon hammer2_thr_wait(thr, HAMMER2_THREAD_FROZEN); 2645f59596dSMatthew Dillon } 2655f59596dSMatthew Dillon 2665f59596dSMatthew Dillon void 2675f59596dSMatthew Dillon hammer2_thr_unfreeze(hammer2_thread_t *thr) 2685f59596dSMatthew Dillon { 2695f59596dSMatthew Dillon if (thr->td == NULL) 2705f59596dSMatthew Dillon return; 2715f59596dSMatthew Dillon hammer2_thr_signal(thr, HAMMER2_THREAD_UNFREEZE); 2725f59596dSMatthew Dillon hammer2_thr_wait_neg(thr, HAMMER2_THREAD_FROZEN); 2735f59596dSMatthew Dillon } 2745f59596dSMatthew Dillon 2755f59596dSMatthew Dillon int 2765f59596dSMatthew Dillon hammer2_thr_break(hammer2_thread_t *thr) 2775f59596dSMatthew Dillon { 2785f59596dSMatthew Dillon if (thr->flags & (HAMMER2_THREAD_STOP | 2795f59596dSMatthew Dillon HAMMER2_THREAD_REMASTER | 2805f59596dSMatthew Dillon HAMMER2_THREAD_FREEZE)) { 2815f59596dSMatthew Dillon return 1; 2825f59596dSMatthew Dillon } 2835f59596dSMatthew Dillon return 0; 2845f59596dSMatthew Dillon } 2855f59596dSMatthew Dillon 2865f59596dSMatthew Dillon /**************************************************************************** 2875f59596dSMatthew Dillon * HAMMER2 XOPS API * 2885f59596dSMatthew Dillon ****************************************************************************/ 2895f59596dSMatthew Dillon 2905f59596dSMatthew Dillon void 2915f59596dSMatthew Dillon hammer2_xop_group_init(hammer2_pfs_t *pmp, hammer2_xop_group_t *xgrp) 2925f59596dSMatthew Dillon { 2935f59596dSMatthew Dillon /* no extra fields in structure at the moment */ 2945f59596dSMatthew Dillon } 2955f59596dSMatthew Dillon 2965f59596dSMatthew Dillon /* 2975f59596dSMatthew Dillon * Allocate a XOP request. 2985f59596dSMatthew Dillon * 2995f59596dSMatthew Dillon * Once allocated a XOP request can be started, collected, and retired, 3005f59596dSMatthew Dillon * and can be retired early if desired. 3015f59596dSMatthew Dillon * 3025f59596dSMatthew Dillon * NOTE: Fifo indices might not be zero but ri == wi on objcache_get(). 3035f59596dSMatthew Dillon */ 3045f59596dSMatthew Dillon void * 3055f59596dSMatthew Dillon hammer2_xop_alloc(hammer2_inode_t *ip, int flags) 3065f59596dSMatthew Dillon { 3075f59596dSMatthew Dillon hammer2_xop_t *xop; 3085f59596dSMatthew Dillon 3095f59596dSMatthew Dillon xop = objcache_get(cache_xops, M_WAITOK); 3105f59596dSMatthew Dillon KKASSERT(xop->head.cluster.array[0].chain == NULL); 3115f59596dSMatthew Dillon 3125f59596dSMatthew Dillon xop->head.ip1 = ip; 3135f59596dSMatthew Dillon xop->head.func = NULL; 3145f59596dSMatthew Dillon xop->head.flags = flags; 3155f59596dSMatthew Dillon xop->head.state = 0; 3165f59596dSMatthew Dillon xop->head.error = 0; 3175f59596dSMatthew Dillon xop->head.collect_key = 0; 3185f59596dSMatthew Dillon if (flags & HAMMER2_XOP_MODIFYING) 3195f59596dSMatthew Dillon xop->head.mtid = hammer2_trans_sub(ip->pmp); 3205f59596dSMatthew Dillon else 3215f59596dSMatthew Dillon xop->head.mtid = 0; 3225f59596dSMatthew Dillon 3235f59596dSMatthew Dillon xop->head.cluster.nchains = ip->cluster.nchains; 3245f59596dSMatthew Dillon xop->head.cluster.pmp = ip->pmp; 3255f59596dSMatthew Dillon xop->head.cluster.flags = HAMMER2_CLUSTER_LOCKED; 3265f59596dSMatthew Dillon 3275f59596dSMatthew Dillon /* 3285f59596dSMatthew Dillon * run_mask - Active thread (or frontend) associated with XOP 3295f59596dSMatthew Dillon */ 3305f59596dSMatthew Dillon xop->head.run_mask = HAMMER2_XOPMASK_VOP; 3315f59596dSMatthew Dillon 3325f59596dSMatthew Dillon hammer2_inode_ref(ip); 3335f59596dSMatthew Dillon 3345f59596dSMatthew Dillon return xop; 3355f59596dSMatthew Dillon } 3365f59596dSMatthew Dillon 3375f59596dSMatthew Dillon void 3385f59596dSMatthew Dillon hammer2_xop_setname(hammer2_xop_head_t *xop, const char *name, size_t name_len) 3395f59596dSMatthew Dillon { 3405f59596dSMatthew Dillon xop->name1 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO); 3415f59596dSMatthew Dillon xop->name1_len = name_len; 3425f59596dSMatthew Dillon bcopy(name, xop->name1, name_len); 3435f59596dSMatthew Dillon } 3445f59596dSMatthew Dillon 3455f59596dSMatthew Dillon void 3465f59596dSMatthew Dillon hammer2_xop_setname2(hammer2_xop_head_t *xop, const char *name, size_t name_len) 3475f59596dSMatthew Dillon { 3485f59596dSMatthew Dillon xop->name2 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO); 3495f59596dSMatthew Dillon xop->name2_len = name_len; 3505f59596dSMatthew Dillon bcopy(name, xop->name2, name_len); 3515f59596dSMatthew Dillon } 3525f59596dSMatthew Dillon 353cf1b3fafSMatthew Dillon size_t 354cf1b3fafSMatthew Dillon hammer2_xop_setname_inum(hammer2_xop_head_t *xop, hammer2_key_t inum) 355cf1b3fafSMatthew Dillon { 356cf1b3fafSMatthew Dillon const size_t name_len = 18; 357cf1b3fafSMatthew Dillon 358cf1b3fafSMatthew Dillon xop->name1 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO); 359cf1b3fafSMatthew Dillon xop->name1_len = name_len; 360cf1b3fafSMatthew Dillon ksnprintf(xop->name1, name_len + 1, "0x%016jx", (intmax_t)inum); 361cf1b3fafSMatthew Dillon 362cf1b3fafSMatthew Dillon return name_len; 363cf1b3fafSMatthew Dillon } 364cf1b3fafSMatthew Dillon 3655f59596dSMatthew Dillon 3665f59596dSMatthew Dillon void 3675f59596dSMatthew Dillon hammer2_xop_setip2(hammer2_xop_head_t *xop, hammer2_inode_t *ip2) 3685f59596dSMatthew Dillon { 3695f59596dSMatthew Dillon xop->ip2 = ip2; 3705f59596dSMatthew Dillon hammer2_inode_ref(ip2); 3715f59596dSMatthew Dillon } 3725f59596dSMatthew Dillon 3735f59596dSMatthew Dillon void 3745f59596dSMatthew Dillon hammer2_xop_setip3(hammer2_xop_head_t *xop, hammer2_inode_t *ip3) 3755f59596dSMatthew Dillon { 3765f59596dSMatthew Dillon xop->ip3 = ip3; 3775f59596dSMatthew Dillon hammer2_inode_ref(ip3); 3785f59596dSMatthew Dillon } 3795f59596dSMatthew Dillon 3805f59596dSMatthew Dillon void 3815f59596dSMatthew Dillon hammer2_xop_reinit(hammer2_xop_head_t *xop) 3825f59596dSMatthew Dillon { 3835f59596dSMatthew Dillon xop->state = 0; 3845f59596dSMatthew Dillon xop->error = 0; 3855f59596dSMatthew Dillon xop->collect_key = 0; 3865f59596dSMatthew Dillon xop->run_mask = HAMMER2_XOPMASK_VOP; 3875f59596dSMatthew Dillon } 3885f59596dSMatthew Dillon 3895f59596dSMatthew Dillon /* 3905f59596dSMatthew Dillon * A mounted PFS needs Xops threads to support frontend operations. 3915f59596dSMatthew Dillon */ 3925f59596dSMatthew Dillon void 3935f59596dSMatthew Dillon hammer2_xop_helper_create(hammer2_pfs_t *pmp) 3945f59596dSMatthew Dillon { 3955f59596dSMatthew Dillon int i; 3965f59596dSMatthew Dillon int j; 3975f59596dSMatthew Dillon 3985f59596dSMatthew Dillon lockmgr(&pmp->lock, LK_EXCLUSIVE); 3995f59596dSMatthew Dillon pmp->has_xop_threads = 1; 4005f59596dSMatthew Dillon 4015f59596dSMatthew Dillon for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 4025f59596dSMatthew Dillon for (j = 0; j < HAMMER2_XOPGROUPS; ++j) { 4035f59596dSMatthew Dillon if (pmp->xop_groups[j].thrs[i].td) 4045f59596dSMatthew Dillon continue; 4059dca9515SMatthew Dillon hammer2_thr_create(&pmp->xop_groups[j].thrs[i], 4069dca9515SMatthew Dillon pmp, NULL, 4075f59596dSMatthew Dillon "h2xop", i, j, 4085f59596dSMatthew Dillon hammer2_primary_xops_thread); 4095f59596dSMatthew Dillon } 4105f59596dSMatthew Dillon } 4115f59596dSMatthew Dillon lockmgr(&pmp->lock, LK_RELEASE); 4125f59596dSMatthew Dillon } 4135f59596dSMatthew Dillon 4145f59596dSMatthew Dillon void 4155f59596dSMatthew Dillon hammer2_xop_helper_cleanup(hammer2_pfs_t *pmp) 4165f59596dSMatthew Dillon { 4175f59596dSMatthew Dillon int i; 4185f59596dSMatthew Dillon int j; 4195f59596dSMatthew Dillon 4205f59596dSMatthew Dillon for (i = 0; i < pmp->pfs_nmasters; ++i) { 4215f59596dSMatthew Dillon for (j = 0; j < HAMMER2_XOPGROUPS; ++j) { 4225f59596dSMatthew Dillon if (pmp->xop_groups[j].thrs[i].td) 4235f59596dSMatthew Dillon hammer2_thr_delete(&pmp->xop_groups[j].thrs[i]); 4245f59596dSMatthew Dillon } 4255f59596dSMatthew Dillon } 42605a3c4ecSMatthew Dillon pmp->has_xop_threads = 0; 4275f59596dSMatthew Dillon } 4285f59596dSMatthew Dillon 4295f59596dSMatthew Dillon /* 4305f59596dSMatthew Dillon * Start a XOP request, queueing it to all nodes in the cluster to 4315f59596dSMatthew Dillon * execute the cluster op. 4325f59596dSMatthew Dillon * 4335f59596dSMatthew Dillon * XXX optimize single-target case. 4345f59596dSMatthew Dillon */ 4355f59596dSMatthew Dillon void 4365f59596dSMatthew Dillon hammer2_xop_start_except(hammer2_xop_head_t *xop, hammer2_xop_func_t func, 4375f59596dSMatthew Dillon int notidx) 4385f59596dSMatthew Dillon { 4395f59596dSMatthew Dillon hammer2_inode_t *ip1; 4405f59596dSMatthew Dillon hammer2_pfs_t *pmp; 44120852157SMatthew Dillon hammer2_thread_t *thr; 4425f59596dSMatthew Dillon int i; 4435f59596dSMatthew Dillon int ng; 4445f59596dSMatthew Dillon int nchains; 4455f59596dSMatthew Dillon 4465f59596dSMatthew Dillon ip1 = xop->ip1; 4475f59596dSMatthew Dillon pmp = ip1->pmp; 4485f59596dSMatthew Dillon if (pmp->has_xop_threads == 0) 4495f59596dSMatthew Dillon hammer2_xop_helper_create(pmp); 4505f59596dSMatthew Dillon 45120852157SMatthew Dillon /* 45205a3c4ecSMatthew Dillon * The intent of the XOP sequencer is to ensure that ops on the same 45305a3c4ecSMatthew Dillon * inode execute in the same order. This is necessary when issuing 45405a3c4ecSMatthew Dillon * modifying operations to multiple targets because some targets might 45505a3c4ecSMatthew Dillon * get behind and the frontend is allowed to complete the moment a 45605a3c4ecSMatthew Dillon * quorum of targets succeed. 45720852157SMatthew Dillon * 45805a3c4ecSMatthew Dillon * Strategy operations must be segregated from non-strategy operations 45905a3c4ecSMatthew Dillon * to avoid a deadlock. For example, if a vfsync and a bread/bwrite 46005a3c4ecSMatthew Dillon * were queued to the same worker thread, the locked buffer in the 46105a3c4ecSMatthew Dillon * strategy operation can deadlock the vfsync's buffer list scan. 46220852157SMatthew Dillon * 46305a3c4ecSMatthew Dillon * TODO - RENAME fails here because it is potentially modifying 46405a3c4ecSMatthew Dillon * three different inodes. 46520852157SMatthew Dillon */ 46620852157SMatthew Dillon if (xop->flags & HAMMER2_XOP_STRATEGY) { 46720852157SMatthew Dillon hammer2_xop_strategy_t *xopst; 46820852157SMatthew Dillon 46920852157SMatthew Dillon xopst = &((hammer2_xop_t *)xop)->xop_strategy; 4705f59596dSMatthew Dillon ng = (int)(hammer2_icrc32(&xop->ip1, sizeof(xop->ip1)) ^ 47120852157SMatthew Dillon hammer2_icrc32(&xopst->lbase, sizeof(xopst->lbase))); 47220852157SMatthew Dillon ng = ng & (HAMMER2_XOPGROUPS_MASK >> 1); 47320852157SMatthew Dillon ng += HAMMER2_XOPGROUPS / 2; 4745f59596dSMatthew Dillon } else { 47520852157SMatthew Dillon ng = (int)(hammer2_icrc32(&xop->ip1, sizeof(xop->ip1))); 47620852157SMatthew Dillon ng = ng & (HAMMER2_XOPGROUPS_MASK >> 1); 4775f59596dSMatthew Dillon } 4785f59596dSMatthew Dillon xop->func = func; 4795f59596dSMatthew Dillon 4805f59596dSMatthew Dillon /* 4815f59596dSMatthew Dillon * The instant xop is queued another thread can pick it off. In the 4825f59596dSMatthew Dillon * case of asynchronous ops, another thread might even finish and 4835f59596dSMatthew Dillon * deallocate it. 4845f59596dSMatthew Dillon */ 4855f59596dSMatthew Dillon hammer2_spin_ex(&pmp->xop_spin); 4865f59596dSMatthew Dillon nchains = ip1->cluster.nchains; 4875f59596dSMatthew Dillon for (i = 0; i < nchains; ++i) { 4885f59596dSMatthew Dillon /* 4895f59596dSMatthew Dillon * XXX ip1->cluster.array* not stable here. This temporary 4905f59596dSMatthew Dillon * hack fixes basic issues in target XOPs which need to 4915f59596dSMatthew Dillon * obtain a starting chain from the inode but does not 4925f59596dSMatthew Dillon * address possible races against inode updates which 4935f59596dSMatthew Dillon * might NULL-out a chain. 4945f59596dSMatthew Dillon */ 4955f59596dSMatthew Dillon if (i != notidx && ip1->cluster.array[i].chain) { 49620852157SMatthew Dillon thr = &pmp->xop_groups[ng].thrs[i]; 49719808ac9SMatthew Dillon atomic_set_64(&xop->run_mask, 1LLU << i); 49819808ac9SMatthew Dillon atomic_set_64(&xop->chk_mask, 1LLU << i); 49905a3c4ecSMatthew Dillon xop->collect[i].thr = thr; 50020852157SMatthew Dillon TAILQ_INSERT_TAIL(&thr->xopq, xop, collect[i].entry); 5015f59596dSMatthew Dillon } 5025f59596dSMatthew Dillon } 5035f59596dSMatthew Dillon hammer2_spin_unex(&pmp->xop_spin); 5045f59596dSMatthew Dillon /* xop can become invalid at this point */ 5055f59596dSMatthew Dillon 5065f59596dSMatthew Dillon /* 50720852157SMatthew Dillon * Each thread has its own xopq 5085f59596dSMatthew Dillon */ 5095f59596dSMatthew Dillon for (i = 0; i < nchains; ++i) { 5105f59596dSMatthew Dillon if (i != notidx) { 51120852157SMatthew Dillon thr = &pmp->xop_groups[ng].thrs[i]; 51220852157SMatthew Dillon hammer2_thr_signal(thr, HAMMER2_THREAD_XOPQ); 5135f59596dSMatthew Dillon } 5145f59596dSMatthew Dillon } 5155f59596dSMatthew Dillon } 5165f59596dSMatthew Dillon 5175f59596dSMatthew Dillon void 5185f59596dSMatthew Dillon hammer2_xop_start(hammer2_xop_head_t *xop, hammer2_xop_func_t func) 5195f59596dSMatthew Dillon { 5205f59596dSMatthew Dillon hammer2_xop_start_except(xop, func, -1); 5215f59596dSMatthew Dillon } 5225f59596dSMatthew Dillon 5235f59596dSMatthew Dillon /* 5245f59596dSMatthew Dillon * Retire a XOP. Used by both the VOP frontend and by the XOP backend. 5255f59596dSMatthew Dillon */ 5265f59596dSMatthew Dillon void 52719808ac9SMatthew Dillon hammer2_xop_retire(hammer2_xop_head_t *xop, uint64_t mask) 5285f59596dSMatthew Dillon { 5295f59596dSMatthew Dillon hammer2_chain_t *chain; 53019808ac9SMatthew Dillon uint64_t nmask; 5315f59596dSMatthew Dillon int i; 5325f59596dSMatthew Dillon 5335f59596dSMatthew Dillon /* 5345f59596dSMatthew Dillon * Remove the frontend collector or remove a backend feeder. 53519808ac9SMatthew Dillon * 5365f59596dSMatthew Dillon * When removing the frontend we must wakeup any backend feeders 5375f59596dSMatthew Dillon * who are waiting for FIFO space. 5385f59596dSMatthew Dillon * 53919808ac9SMatthew Dillon * When removing the last backend feeder we must wakeup any waiting 54019808ac9SMatthew Dillon * frontend. 5415f59596dSMatthew Dillon */ 5425f59596dSMatthew Dillon KKASSERT(xop->run_mask & mask); 54319808ac9SMatthew Dillon nmask = atomic_fetchadd_64(&xop->run_mask, 54419808ac9SMatthew Dillon -mask + HAMMER2_XOPMASK_FEED); 54519808ac9SMatthew Dillon 54619808ac9SMatthew Dillon /* 54719808ac9SMatthew Dillon * More than one entity left 54819808ac9SMatthew Dillon */ 54919808ac9SMatthew Dillon if ((nmask & HAMMER2_XOPMASK_ALLDONE) != mask) { 55019808ac9SMatthew Dillon /* 55119808ac9SMatthew Dillon * Frontend terminating, wakeup any backends waiting on 55219808ac9SMatthew Dillon * fifo full. 55319808ac9SMatthew Dillon * 55419808ac9SMatthew Dillon * NOTE!!! The xop can get ripped out from under us at 55519808ac9SMatthew Dillon * this point, so do not reference it again. 55619808ac9SMatthew Dillon * The wakeup(xop) doesn't touch the xop and 55719808ac9SMatthew Dillon * is ok. 55819808ac9SMatthew Dillon */ 5595f59596dSMatthew Dillon if (mask == HAMMER2_XOPMASK_VOP) { 5605f59596dSMatthew Dillon if (nmask & HAMMER2_XOPMASK_FIFOW) 5615f59596dSMatthew Dillon wakeup(xop); 5625f59596dSMatthew Dillon } 56319808ac9SMatthew Dillon 56419808ac9SMatthew Dillon /* 56519808ac9SMatthew Dillon * Wakeup frontend if the last backend is terminating. 56619808ac9SMatthew Dillon */ 56719808ac9SMatthew Dillon nmask -= mask; 56819808ac9SMatthew Dillon if ((nmask & HAMMER2_XOPMASK_ALLDONE) == HAMMER2_XOPMASK_VOP) { 56919808ac9SMatthew Dillon if (nmask & HAMMER2_XOPMASK_WAIT) 57019808ac9SMatthew Dillon wakeup(xop); 57119808ac9SMatthew Dillon } 57219808ac9SMatthew Dillon 5735f59596dSMatthew Dillon return; 5745f59596dSMatthew Dillon } 5755f59596dSMatthew Dillon /* else nobody else left, we can ignore FIFOW */ 5765f59596dSMatthew Dillon 5775f59596dSMatthew Dillon /* 5785f59596dSMatthew Dillon * All collectors are gone, we can cleanup and dispose of the XOP. 5795f59596dSMatthew Dillon * Note that this can wind up being a frontend OR a backend. 5805f59596dSMatthew Dillon * Pending chains are locked shared and not owned by any thread. 581b5795b6bSMatthew Dillon * 5825f59596dSMatthew Dillon * Cleanup the collection cluster. 5835f59596dSMatthew Dillon */ 5845f59596dSMatthew Dillon for (i = 0; i < xop->cluster.nchains; ++i) { 5855f59596dSMatthew Dillon xop->cluster.array[i].flags = 0; 5865f59596dSMatthew Dillon chain = xop->cluster.array[i].chain; 5875f59596dSMatthew Dillon if (chain) { 5885f59596dSMatthew Dillon xop->cluster.array[i].chain = NULL; 5896d51e13aSMatthew Dillon hammer2_chain_drop_unhold(chain); 5905f59596dSMatthew Dillon } 5915f59596dSMatthew Dillon } 5925f59596dSMatthew Dillon 5935f59596dSMatthew Dillon /* 59419808ac9SMatthew Dillon * Cleanup the fifos. Since we are the only entity left on this 59519808ac9SMatthew Dillon * xop we don't have to worry about fifo flow control, and one 59619808ac9SMatthew Dillon * lfence() will do the job. 5975f59596dSMatthew Dillon */ 5985f59596dSMatthew Dillon cpu_lfence(); 5995f59596dSMatthew Dillon mask = xop->chk_mask; 6005f59596dSMatthew Dillon for (i = 0; mask && i < HAMMER2_MAXCLUSTER; ++i) { 6015f59596dSMatthew Dillon hammer2_xop_fifo_t *fifo = &xop->collect[i]; 6025f59596dSMatthew Dillon while (fifo->ri != fifo->wi) { 6035f59596dSMatthew Dillon chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK]; 6046d51e13aSMatthew Dillon if (chain) 6056d51e13aSMatthew Dillon hammer2_chain_drop_unhold(chain); 6065f59596dSMatthew Dillon ++fifo->ri; 6075f59596dSMatthew Dillon } 6085f59596dSMatthew Dillon mask &= ~(1U << i); 6095f59596dSMatthew Dillon } 6105f59596dSMatthew Dillon 6115f59596dSMatthew Dillon /* 6125f59596dSMatthew Dillon * The inode is only held at this point, simply drop it. 6135f59596dSMatthew Dillon */ 6145f59596dSMatthew Dillon if (xop->ip1) { 6155f59596dSMatthew Dillon hammer2_inode_drop(xop->ip1); 6165f59596dSMatthew Dillon xop->ip1 = NULL; 6175f59596dSMatthew Dillon } 6185f59596dSMatthew Dillon if (xop->ip2) { 6195f59596dSMatthew Dillon hammer2_inode_drop(xop->ip2); 6205f59596dSMatthew Dillon xop->ip2 = NULL; 6215f59596dSMatthew Dillon } 6225f59596dSMatthew Dillon if (xop->ip3) { 6235f59596dSMatthew Dillon hammer2_inode_drop(xop->ip3); 6245f59596dSMatthew Dillon xop->ip3 = NULL; 6255f59596dSMatthew Dillon } 6265f59596dSMatthew Dillon if (xop->name1) { 6275f59596dSMatthew Dillon kfree(xop->name1, M_HAMMER2); 6285f59596dSMatthew Dillon xop->name1 = NULL; 6295f59596dSMatthew Dillon xop->name1_len = 0; 6305f59596dSMatthew Dillon } 6315f59596dSMatthew Dillon if (xop->name2) { 6325f59596dSMatthew Dillon kfree(xop->name2, M_HAMMER2); 6335f59596dSMatthew Dillon xop->name2 = NULL; 6345f59596dSMatthew Dillon xop->name2_len = 0; 6355f59596dSMatthew Dillon } 6365f59596dSMatthew Dillon 6375f59596dSMatthew Dillon objcache_put(cache_xops, xop); 6385f59596dSMatthew Dillon } 6395f59596dSMatthew Dillon 6405f59596dSMatthew Dillon /* 6415f59596dSMatthew Dillon * (Backend) Returns non-zero if the frontend is still attached. 6425f59596dSMatthew Dillon */ 6435f59596dSMatthew Dillon int 6445f59596dSMatthew Dillon hammer2_xop_active(hammer2_xop_head_t *xop) 6455f59596dSMatthew Dillon { 6465f59596dSMatthew Dillon if (xop->run_mask & HAMMER2_XOPMASK_VOP) 6475f59596dSMatthew Dillon return 1; 6485f59596dSMatthew Dillon else 6495f59596dSMatthew Dillon return 0; 6505f59596dSMatthew Dillon } 6515f59596dSMatthew Dillon 6525f59596dSMatthew Dillon /* 6535f59596dSMatthew Dillon * (Backend) Feed chain data through the cluster validator and back to 6545f59596dSMatthew Dillon * the frontend. Chains are fed from multiple nodes concurrently 6555f59596dSMatthew Dillon * and pipelined via per-node FIFOs in the XOP. 6565f59596dSMatthew Dillon * 6576d51e13aSMatthew Dillon * The chain must be locked (either shared or exclusive). The caller may 6586d51e13aSMatthew Dillon * unlock and drop the chain on return. This function will add an extra 6596d51e13aSMatthew Dillon * ref and hold the chain's data for the pass-back. 6605f59596dSMatthew Dillon * 6615f59596dSMatthew Dillon * No xop lock is needed because we are only manipulating fields under 6625f59596dSMatthew Dillon * our direct control. 6635f59596dSMatthew Dillon * 664*2e3f71c3SSascha Wildner * Returns 0 on success and a hammer2 error code if sync is permanently 6655f59596dSMatthew Dillon * lost. The caller retains a ref on the chain but by convention 6665f59596dSMatthew Dillon * the lock is typically inherited by the xop (caller loses lock). 6675f59596dSMatthew Dillon * 6685f59596dSMatthew Dillon * Returns non-zero on error. In this situation the caller retains a 6695f59596dSMatthew Dillon * ref on the chain but loses the lock (we unlock here). 6705f59596dSMatthew Dillon */ 6715f59596dSMatthew Dillon int 6725f59596dSMatthew Dillon hammer2_xop_feed(hammer2_xop_head_t *xop, hammer2_chain_t *chain, 6735f59596dSMatthew Dillon int clindex, int error) 6745f59596dSMatthew Dillon { 6755f59596dSMatthew Dillon hammer2_xop_fifo_t *fifo; 67619808ac9SMatthew Dillon uint64_t mask; 6775f59596dSMatthew Dillon 6785f59596dSMatthew Dillon /* 6795f59596dSMatthew Dillon * Early termination (typicaly of xop_readir) 6805f59596dSMatthew Dillon */ 6815f59596dSMatthew Dillon if (hammer2_xop_active(xop) == 0) { 68265cacacfSMatthew Dillon error = HAMMER2_ERROR_ABORTED; 6835f59596dSMatthew Dillon goto done; 6845f59596dSMatthew Dillon } 6855f59596dSMatthew Dillon 6865f59596dSMatthew Dillon /* 6875f59596dSMatthew Dillon * Multi-threaded entry into the XOP collector. We own the 6885f59596dSMatthew Dillon * fifo->wi for our clindex. 6895f59596dSMatthew Dillon */ 6905f59596dSMatthew Dillon fifo = &xop->collect[clindex]; 6915f59596dSMatthew Dillon 6925f59596dSMatthew Dillon if (fifo->ri == fifo->wi - HAMMER2_XOPFIFO) 6935f59596dSMatthew Dillon lwkt_yield(); 6945f59596dSMatthew Dillon while (fifo->ri == fifo->wi - HAMMER2_XOPFIFO) { 6955f59596dSMatthew Dillon atomic_set_int(&fifo->flags, HAMMER2_XOP_FIFO_STALL); 6965f59596dSMatthew Dillon mask = xop->run_mask; 6975f59596dSMatthew Dillon if ((mask & HAMMER2_XOPMASK_VOP) == 0) { 69865cacacfSMatthew Dillon error = HAMMER2_ERROR_ABORTED; 6995f59596dSMatthew Dillon goto done; 7005f59596dSMatthew Dillon } 7015f59596dSMatthew Dillon tsleep_interlock(xop, 0); 70219808ac9SMatthew Dillon if (atomic_cmpset_64(&xop->run_mask, mask, 7035f59596dSMatthew Dillon mask | HAMMER2_XOPMASK_FIFOW)) { 7045f59596dSMatthew Dillon if (fifo->ri == fifo->wi - HAMMER2_XOPFIFO) { 7055f59596dSMatthew Dillon tsleep(xop, PINTERLOCKED, "h2feed", hz*60); 7065f59596dSMatthew Dillon } 7075f59596dSMatthew Dillon } 7085f59596dSMatthew Dillon /* retry */ 7095f59596dSMatthew Dillon } 7105f59596dSMatthew Dillon atomic_clear_int(&fifo->flags, HAMMER2_XOP_FIFO_STALL); 7116d51e13aSMatthew Dillon if (chain) 7126d51e13aSMatthew Dillon hammer2_chain_ref_hold(chain); 7135f59596dSMatthew Dillon if (error == 0 && chain) 7145f59596dSMatthew Dillon error = chain->error; 7155f59596dSMatthew Dillon fifo->errors[fifo->wi & HAMMER2_XOPFIFO_MASK] = error; 7165f59596dSMatthew Dillon fifo->array[fifo->wi & HAMMER2_XOPFIFO_MASK] = chain; 7175f59596dSMatthew Dillon cpu_sfence(); 7185f59596dSMatthew Dillon ++fifo->wi; 71919808ac9SMatthew Dillon 72019808ac9SMatthew Dillon mask = atomic_fetchadd_64(&xop->run_mask, HAMMER2_XOPMASK_FEED); 72119808ac9SMatthew Dillon if (mask & HAMMER2_XOPMASK_WAIT) { 72219808ac9SMatthew Dillon atomic_clear_64(&xop->run_mask, HAMMER2_XOPMASK_WAIT); 72319808ac9SMatthew Dillon wakeup(xop); 72420852157SMatthew Dillon } 7255f59596dSMatthew Dillon error = 0; 7265f59596dSMatthew Dillon 7275f59596dSMatthew Dillon /* 7285f59596dSMatthew Dillon * Cleanup. If an error occurred we eat the lock. If no error 7295f59596dSMatthew Dillon * occurred the fifo inherits the lock and gains an additional ref. 7305f59596dSMatthew Dillon * 7315f59596dSMatthew Dillon * The caller's ref remains in both cases. 7325f59596dSMatthew Dillon */ 7335f59596dSMatthew Dillon done: 7345f59596dSMatthew Dillon return error; 7355f59596dSMatthew Dillon } 7365f59596dSMatthew Dillon 7375f59596dSMatthew Dillon /* 7385f59596dSMatthew Dillon * (Frontend) collect a response from a running cluster op. 7395f59596dSMatthew Dillon * 7405f59596dSMatthew Dillon * Responses are fed from all appropriate nodes concurrently 7415f59596dSMatthew Dillon * and collected into a cohesive response >= collect_key. 7425f59596dSMatthew Dillon * 7435f59596dSMatthew Dillon * The collector will return the instant quorum or other requirements 7445f59596dSMatthew Dillon * are met, even if some nodes get behind or become non-responsive. 7455f59596dSMatthew Dillon * 7465f59596dSMatthew Dillon * HAMMER2_XOP_COLLECT_NOWAIT - Used to 'poll' a completed collection, 7475f59596dSMatthew Dillon * usually called synchronously from the 7485f59596dSMatthew Dillon * node XOPs for the strategy code to 7495f59596dSMatthew Dillon * fake the frontend collection and complete 7505f59596dSMatthew Dillon * the BIO as soon as possible. 7515f59596dSMatthew Dillon * 7525f59596dSMatthew Dillon * HAMMER2_XOP_SYNCHRONIZER - Reqeuest synchronization with a particular 7535f59596dSMatthew Dillon * cluster index, prevents looping when that 7545f59596dSMatthew Dillon * index is out of sync so caller can act on 7555f59596dSMatthew Dillon * the out of sync element. ESRCH and EDEADLK 7565f59596dSMatthew Dillon * can be returned if this flag is specified. 7575f59596dSMatthew Dillon * 7585f59596dSMatthew Dillon * Returns 0 on success plus a filled out xop->cluster structure. 7595f59596dSMatthew Dillon * Return ENOENT on normal termination. 7605f59596dSMatthew Dillon * Otherwise return an error. 7615f59596dSMatthew Dillon */ 7625f59596dSMatthew Dillon int 7635f59596dSMatthew Dillon hammer2_xop_collect(hammer2_xop_head_t *xop, int flags) 7645f59596dSMatthew Dillon { 7655f59596dSMatthew Dillon hammer2_xop_fifo_t *fifo; 7665f59596dSMatthew Dillon hammer2_chain_t *chain; 7675f59596dSMatthew Dillon hammer2_key_t lokey; 76819808ac9SMatthew Dillon uint64_t mask; 7695f59596dSMatthew Dillon int error; 7705f59596dSMatthew Dillon int keynull; 7715f59596dSMatthew Dillon int adv; /* advance the element */ 7725f59596dSMatthew Dillon int i; 7735f59596dSMatthew Dillon 7745f59596dSMatthew Dillon loop: 7755f59596dSMatthew Dillon /* 7765f59596dSMatthew Dillon * First loop tries to advance pieces of the cluster which 7775f59596dSMatthew Dillon * are out of sync. 7785f59596dSMatthew Dillon */ 7795f59596dSMatthew Dillon lokey = HAMMER2_KEY_MAX; 7805f59596dSMatthew Dillon keynull = HAMMER2_CHECK_NULL; 78119808ac9SMatthew Dillon mask = xop->run_mask; 7825f59596dSMatthew Dillon cpu_lfence(); 7835f59596dSMatthew Dillon 7845f59596dSMatthew Dillon for (i = 0; i < xop->cluster.nchains; ++i) { 7855f59596dSMatthew Dillon chain = xop->cluster.array[i].chain; 7865f59596dSMatthew Dillon if (chain == NULL) { 7875f59596dSMatthew Dillon adv = 1; 7885f59596dSMatthew Dillon } else if (chain->bref.key < xop->collect_key) { 7895f59596dSMatthew Dillon adv = 1; 7905f59596dSMatthew Dillon } else { 7915f59596dSMatthew Dillon keynull &= ~HAMMER2_CHECK_NULL; 7925f59596dSMatthew Dillon if (lokey > chain->bref.key) 7935f59596dSMatthew Dillon lokey = chain->bref.key; 7945f59596dSMatthew Dillon adv = 0; 7955f59596dSMatthew Dillon } 7965f59596dSMatthew Dillon if (adv == 0) 7975f59596dSMatthew Dillon continue; 7985f59596dSMatthew Dillon 7995f59596dSMatthew Dillon /* 8005f59596dSMatthew Dillon * Advance element if possible, advanced element may be NULL. 8015f59596dSMatthew Dillon */ 8026d51e13aSMatthew Dillon if (chain) 8036d51e13aSMatthew Dillon hammer2_chain_drop_unhold(chain); 8046d51e13aSMatthew Dillon 8055f59596dSMatthew Dillon fifo = &xop->collect[i]; 8065f59596dSMatthew Dillon if (fifo->ri != fifo->wi) { 8075f59596dSMatthew Dillon cpu_lfence(); 8085f59596dSMatthew Dillon chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK]; 8095f59596dSMatthew Dillon error = fifo->errors[fifo->ri & HAMMER2_XOPFIFO_MASK]; 8105f59596dSMatthew Dillon ++fifo->ri; 8115f59596dSMatthew Dillon xop->cluster.array[i].chain = chain; 8125f59596dSMatthew Dillon xop->cluster.array[i].error = error; 8135f59596dSMatthew Dillon if (chain == NULL) { 8145f59596dSMatthew Dillon /* XXX */ 8155f59596dSMatthew Dillon xop->cluster.array[i].flags |= 8165f59596dSMatthew Dillon HAMMER2_CITEM_NULL; 8175f59596dSMatthew Dillon } 8185f59596dSMatthew Dillon if (fifo->wi - fifo->ri <= HAMMER2_XOPFIFO / 2) { 8195f59596dSMatthew Dillon if (fifo->flags & HAMMER2_XOP_FIFO_STALL) { 8205f59596dSMatthew Dillon atomic_clear_int(&fifo->flags, 8215f59596dSMatthew Dillon HAMMER2_XOP_FIFO_STALL); 8225f59596dSMatthew Dillon wakeup(xop); 8235f59596dSMatthew Dillon lwkt_yield(); 8245f59596dSMatthew Dillon } 8255f59596dSMatthew Dillon } 8265f59596dSMatthew Dillon --i; /* loop on same index */ 8275f59596dSMatthew Dillon } else { 8285f59596dSMatthew Dillon /* 8295f59596dSMatthew Dillon * Retain CITEM_NULL flag. If set just repeat EOF. 8305f59596dSMatthew Dillon * If not, the NULL,0 combination indicates an 8315f59596dSMatthew Dillon * operation in-progress. 8325f59596dSMatthew Dillon */ 8335f59596dSMatthew Dillon xop->cluster.array[i].chain = NULL; 8345f59596dSMatthew Dillon /* retain any CITEM_NULL setting */ 8355f59596dSMatthew Dillon } 8365f59596dSMatthew Dillon } 8375f59596dSMatthew Dillon 8385f59596dSMatthew Dillon /* 8395f59596dSMatthew Dillon * Determine whether the lowest collected key meets clustering 8405f59596dSMatthew Dillon * requirements. Returns: 8415f59596dSMatthew Dillon * 8425f59596dSMatthew Dillon * 0 - key valid, cluster can be returned. 8435f59596dSMatthew Dillon * 8445f59596dSMatthew Dillon * ENOENT - normal end of scan, return ENOENT. 8455f59596dSMatthew Dillon * 8465f59596dSMatthew Dillon * ESRCH - sufficient elements collected, quorum agreement 8475f59596dSMatthew Dillon * that lokey is not a valid element and should be 8485f59596dSMatthew Dillon * skipped. 8495f59596dSMatthew Dillon * 8505f59596dSMatthew Dillon * EDEADLK - sufficient elements collected, no quorum agreement 8515f59596dSMatthew Dillon * (and no agreement possible). In this situation a 8525f59596dSMatthew Dillon * repair is needed, for now we loop. 8535f59596dSMatthew Dillon * 8545f59596dSMatthew Dillon * EINPROGRESS - insufficient elements collected to resolve, wait 8555f59596dSMatthew Dillon * for event and loop. 8565f59596dSMatthew Dillon */ 8575f59596dSMatthew Dillon if ((flags & HAMMER2_XOP_COLLECT_WAITALL) && 85819808ac9SMatthew Dillon (mask & HAMMER2_XOPMASK_ALLDONE) != HAMMER2_XOPMASK_VOP) { 85965cacacfSMatthew Dillon error = HAMMER2_ERROR_EINPROGRESS; 8605f59596dSMatthew Dillon } else { 8615f59596dSMatthew Dillon error = hammer2_cluster_check(&xop->cluster, lokey, keynull); 8625f59596dSMatthew Dillon } 86365cacacfSMatthew Dillon if (error == HAMMER2_ERROR_EINPROGRESS) { 8645f59596dSMatthew Dillon if (flags & HAMMER2_XOP_COLLECT_NOWAIT) 8655f59596dSMatthew Dillon goto done; 86619808ac9SMatthew Dillon tsleep_interlock(xop, 0); 86719808ac9SMatthew Dillon if (atomic_cmpset_64(&xop->run_mask, 86819808ac9SMatthew Dillon mask, mask | HAMMER2_XOPMASK_WAIT)) { 86919808ac9SMatthew Dillon tsleep(xop, PINTERLOCKED, "h2coll", hz*60); 8705f59596dSMatthew Dillon } 8715f59596dSMatthew Dillon goto loop; 8725f59596dSMatthew Dillon } 87365cacacfSMatthew Dillon if (error == HAMMER2_ERROR_ESRCH) { 8745f59596dSMatthew Dillon if (lokey != HAMMER2_KEY_MAX) { 8755f59596dSMatthew Dillon xop->collect_key = lokey + 1; 8765f59596dSMatthew Dillon goto loop; 8775f59596dSMatthew Dillon } 87865cacacfSMatthew Dillon error = HAMMER2_ERROR_ENOENT; 8795f59596dSMatthew Dillon } 88065cacacfSMatthew Dillon if (error == HAMMER2_ERROR_EDEADLK) { 8815f59596dSMatthew Dillon kprintf("hammer2: no quorum possible lokey %016jx\n", 8825f59596dSMatthew Dillon lokey); 8835f59596dSMatthew Dillon if (lokey != HAMMER2_KEY_MAX) { 8845f59596dSMatthew Dillon xop->collect_key = lokey + 1; 8855f59596dSMatthew Dillon goto loop; 8865f59596dSMatthew Dillon } 88765cacacfSMatthew Dillon error = HAMMER2_ERROR_ENOENT; 8885f59596dSMatthew Dillon } 8895f59596dSMatthew Dillon if (lokey == HAMMER2_KEY_MAX) 8905f59596dSMatthew Dillon xop->collect_key = lokey; 8915f59596dSMatthew Dillon else 8925f59596dSMatthew Dillon xop->collect_key = lokey + 1; 8935f59596dSMatthew Dillon done: 8945f59596dSMatthew Dillon return error; 8955f59596dSMatthew Dillon } 8965f59596dSMatthew Dillon 8975f59596dSMatthew Dillon /* 8985f59596dSMatthew Dillon * N x M processing threads are available to handle XOPs, N per cluster 89905a3c4ecSMatthew Dillon * index x M cluster nodes. 9005f59596dSMatthew Dillon * 9015f59596dSMatthew Dillon * Locate and return the next runnable xop, or NULL if no xops are 9025f59596dSMatthew Dillon * present or none of the xops are currently runnable (for various reasons). 9035f59596dSMatthew Dillon * The xop is left on the queue and serves to block other dependent xops 9045f59596dSMatthew Dillon * from being run. 9055f59596dSMatthew Dillon * 9065f59596dSMatthew Dillon * Dependent xops will not be returned. 9075f59596dSMatthew Dillon * 9085f59596dSMatthew Dillon * Sets HAMMER2_XOP_FIFO_RUN on the returned xop or returns NULL. 9095f59596dSMatthew Dillon * 9105f59596dSMatthew Dillon * NOTE! Xops run concurrently for each cluster index. 9115f59596dSMatthew Dillon */ 9125f59596dSMatthew Dillon #define XOP_HASH_SIZE 16 9135f59596dSMatthew Dillon #define XOP_HASH_MASK (XOP_HASH_SIZE - 1) 9145f59596dSMatthew Dillon 9155f59596dSMatthew Dillon static __inline 9165f59596dSMatthew Dillon int 9175f59596dSMatthew Dillon xop_testhash(hammer2_thread_t *thr, hammer2_inode_t *ip, uint32_t *hash) 9185f59596dSMatthew Dillon { 9195f59596dSMatthew Dillon uint32_t mask; 9205f59596dSMatthew Dillon int hv; 9215f59596dSMatthew Dillon 9225f59596dSMatthew Dillon hv = (int)((uintptr_t)ip + (uintptr_t)thr) / sizeof(hammer2_inode_t); 9235f59596dSMatthew Dillon mask = 1U << (hv & 31); 9245f59596dSMatthew Dillon hv >>= 5; 9255f59596dSMatthew Dillon 9265f59596dSMatthew Dillon return ((int)(hash[hv & XOP_HASH_MASK] & mask)); 9275f59596dSMatthew Dillon } 9285f59596dSMatthew Dillon 9295f59596dSMatthew Dillon static __inline 9305f59596dSMatthew Dillon void 9315f59596dSMatthew Dillon xop_sethash(hammer2_thread_t *thr, hammer2_inode_t *ip, uint32_t *hash) 9325f59596dSMatthew Dillon { 9335f59596dSMatthew Dillon uint32_t mask; 9345f59596dSMatthew Dillon int hv; 9355f59596dSMatthew Dillon 9365f59596dSMatthew Dillon hv = (int)((uintptr_t)ip + (uintptr_t)thr) / sizeof(hammer2_inode_t); 9375f59596dSMatthew Dillon mask = 1U << (hv & 31); 9385f59596dSMatthew Dillon hv >>= 5; 9395f59596dSMatthew Dillon 9405f59596dSMatthew Dillon hash[hv & XOP_HASH_MASK] |= mask; 9415f59596dSMatthew Dillon } 9425f59596dSMatthew Dillon 9435f59596dSMatthew Dillon static 9445f59596dSMatthew Dillon hammer2_xop_head_t * 9455f59596dSMatthew Dillon hammer2_xop_next(hammer2_thread_t *thr) 9465f59596dSMatthew Dillon { 9475f59596dSMatthew Dillon hammer2_pfs_t *pmp = thr->pmp; 9485f59596dSMatthew Dillon int clindex = thr->clindex; 9495f59596dSMatthew Dillon uint32_t hash[XOP_HASH_SIZE] = { 0 }; 9505f59596dSMatthew Dillon hammer2_xop_head_t *xop; 9515f59596dSMatthew Dillon 9525f59596dSMatthew Dillon hammer2_spin_ex(&pmp->xop_spin); 95320852157SMatthew Dillon TAILQ_FOREACH(xop, &thr->xopq, collect[clindex].entry) { 9545f59596dSMatthew Dillon /* 9555f59596dSMatthew Dillon * Check dependency 9565f59596dSMatthew Dillon */ 9575f59596dSMatthew Dillon if (xop_testhash(thr, xop->ip1, hash) || 9585f59596dSMatthew Dillon (xop->ip2 && xop_testhash(thr, xop->ip2, hash)) || 9595f59596dSMatthew Dillon (xop->ip3 && xop_testhash(thr, xop->ip3, hash))) { 9605f59596dSMatthew Dillon continue; 9615f59596dSMatthew Dillon } 9625f59596dSMatthew Dillon xop_sethash(thr, xop->ip1, hash); 9635f59596dSMatthew Dillon if (xop->ip2) 9645f59596dSMatthew Dillon xop_sethash(thr, xop->ip2, hash); 9655f59596dSMatthew Dillon if (xop->ip3) 9665f59596dSMatthew Dillon xop_sethash(thr, xop->ip3, hash); 9675f59596dSMatthew Dillon 9685f59596dSMatthew Dillon /* 9695f59596dSMatthew Dillon * Check already running 9705f59596dSMatthew Dillon */ 9715f59596dSMatthew Dillon if (xop->collect[clindex].flags & HAMMER2_XOP_FIFO_RUN) 9725f59596dSMatthew Dillon continue; 9735f59596dSMatthew Dillon 9745f59596dSMatthew Dillon /* 9755f59596dSMatthew Dillon * Found a good one, return it. 9765f59596dSMatthew Dillon */ 9775f59596dSMatthew Dillon atomic_set_int(&xop->collect[clindex].flags, 9785f59596dSMatthew Dillon HAMMER2_XOP_FIFO_RUN); 9795f59596dSMatthew Dillon break; 9805f59596dSMatthew Dillon } 9815f59596dSMatthew Dillon hammer2_spin_unex(&pmp->xop_spin); 9825f59596dSMatthew Dillon 9835f59596dSMatthew Dillon return xop; 9845f59596dSMatthew Dillon } 9855f59596dSMatthew Dillon 9865f59596dSMatthew Dillon /* 9875f59596dSMatthew Dillon * Remove the completed XOP from the queue, clear HAMMER2_XOP_FIFO_RUN. 9885f59596dSMatthew Dillon * 9895f59596dSMatthew Dillon * NOTE! Xops run concurrently for each cluster index. 9905f59596dSMatthew Dillon */ 9915f59596dSMatthew Dillon static 9925f59596dSMatthew Dillon void 9935f59596dSMatthew Dillon hammer2_xop_dequeue(hammer2_thread_t *thr, hammer2_xop_head_t *xop) 9945f59596dSMatthew Dillon { 9955f59596dSMatthew Dillon hammer2_pfs_t *pmp = thr->pmp; 9965f59596dSMatthew Dillon int clindex = thr->clindex; 9975f59596dSMatthew Dillon 9985f59596dSMatthew Dillon hammer2_spin_ex(&pmp->xop_spin); 99920852157SMatthew Dillon TAILQ_REMOVE(&thr->xopq, xop, collect[clindex].entry); 10005f59596dSMatthew Dillon atomic_clear_int(&xop->collect[clindex].flags, 10015f59596dSMatthew Dillon HAMMER2_XOP_FIFO_RUN); 10025f59596dSMatthew Dillon hammer2_spin_unex(&pmp->xop_spin); 100320852157SMatthew Dillon if (TAILQ_FIRST(&thr->xopq)) 100420852157SMatthew Dillon hammer2_thr_signal(thr, HAMMER2_THREAD_XOPQ); 10055f59596dSMatthew Dillon } 10065f59596dSMatthew Dillon 10075f59596dSMatthew Dillon /* 10085f59596dSMatthew Dillon * Primary management thread for xops support. Each node has several such 10095f59596dSMatthew Dillon * threads which replicate front-end operations on cluster nodes. 10105f59596dSMatthew Dillon * 10115f59596dSMatthew Dillon * XOPS thread node operations, allowing the function to focus on a single 10125f59596dSMatthew Dillon * node in the cluster after validating the operation with the cluster. 10135f59596dSMatthew Dillon * This is primarily what prevents dead or stalled nodes from stalling 10145f59596dSMatthew Dillon * the front-end. 10155f59596dSMatthew Dillon */ 10165f59596dSMatthew Dillon void 10175f59596dSMatthew Dillon hammer2_primary_xops_thread(void *arg) 10185f59596dSMatthew Dillon { 10195f59596dSMatthew Dillon hammer2_thread_t *thr = arg; 10205f59596dSMatthew Dillon hammer2_pfs_t *pmp; 10215f59596dSMatthew Dillon hammer2_xop_head_t *xop; 102219808ac9SMatthew Dillon uint64_t mask; 10235f59596dSMatthew Dillon uint32_t flags; 10245f59596dSMatthew Dillon uint32_t nflags; 10255f59596dSMatthew Dillon hammer2_xop_func_t last_func = NULL; 10265f59596dSMatthew Dillon 10275f59596dSMatthew Dillon pmp = thr->pmp; 10285f59596dSMatthew Dillon /*xgrp = &pmp->xop_groups[thr->repidx]; not needed */ 102919808ac9SMatthew Dillon mask = 1LLU << thr->clindex; 10305f59596dSMatthew Dillon 10315f59596dSMatthew Dillon for (;;) { 10325f59596dSMatthew Dillon flags = thr->flags; 10335f59596dSMatthew Dillon 10345f59596dSMatthew Dillon /* 10355f59596dSMatthew Dillon * Handle stop request 10365f59596dSMatthew Dillon */ 10375f59596dSMatthew Dillon if (flags & HAMMER2_THREAD_STOP) 10385f59596dSMatthew Dillon break; 10395f59596dSMatthew Dillon 10405f59596dSMatthew Dillon /* 10415f59596dSMatthew Dillon * Handle freeze request 10425f59596dSMatthew Dillon */ 10435f59596dSMatthew Dillon if (flags & HAMMER2_THREAD_FREEZE) { 10449dca9515SMatthew Dillon hammer2_thr_signal2(thr, HAMMER2_THREAD_FROZEN, 10459dca9515SMatthew Dillon HAMMER2_THREAD_FREEZE); 10465f59596dSMatthew Dillon continue; 10475f59596dSMatthew Dillon } 10485f59596dSMatthew Dillon 10495f59596dSMatthew Dillon if (flags & HAMMER2_THREAD_UNFREEZE) { 10509dca9515SMatthew Dillon hammer2_thr_signal2(thr, 0, 10515f59596dSMatthew Dillon HAMMER2_THREAD_FROZEN | 10529dca9515SMatthew Dillon HAMMER2_THREAD_UNFREEZE); 10535f59596dSMatthew Dillon continue; 10545f59596dSMatthew Dillon } 10555f59596dSMatthew Dillon 10565f59596dSMatthew Dillon /* 10575f59596dSMatthew Dillon * Force idle if frozen until unfrozen or stopped. 10585f59596dSMatthew Dillon */ 10595f59596dSMatthew Dillon if (flags & HAMMER2_THREAD_FROZEN) { 10609dca9515SMatthew Dillon hammer2_thr_wait_any(thr, 10619dca9515SMatthew Dillon HAMMER2_THREAD_UNFREEZE | 10629dca9515SMatthew Dillon HAMMER2_THREAD_STOP, 10639dca9515SMatthew Dillon 0); 10645f59596dSMatthew Dillon continue; 10655f59596dSMatthew Dillon } 10665f59596dSMatthew Dillon 10675f59596dSMatthew Dillon /* 10685f59596dSMatthew Dillon * Reset state on REMASTER request 10695f59596dSMatthew Dillon */ 10705f59596dSMatthew Dillon if (flags & HAMMER2_THREAD_REMASTER) { 10719dca9515SMatthew Dillon hammer2_thr_signal2(thr, 0, HAMMER2_THREAD_REMASTER); 10725f59596dSMatthew Dillon /* reset state here */ 10735f59596dSMatthew Dillon continue; 10745f59596dSMatthew Dillon } 10755f59596dSMatthew Dillon 10765f59596dSMatthew Dillon /* 10775f59596dSMatthew Dillon * Process requests. Each request can be multi-queued. 10785f59596dSMatthew Dillon * 10795f59596dSMatthew Dillon * If we get behind and the frontend VOP is no longer active, 10805f59596dSMatthew Dillon * we retire the request without processing it. The callback 10815f59596dSMatthew Dillon * may also abort processing if the frontend VOP becomes 10825f59596dSMatthew Dillon * inactive. 10835f59596dSMatthew Dillon */ 10845f59596dSMatthew Dillon if (flags & HAMMER2_THREAD_XOPQ) { 10855f59596dSMatthew Dillon nflags = flags & ~HAMMER2_THREAD_XOPQ; 10865f59596dSMatthew Dillon if (!atomic_cmpset_int(&thr->flags, flags, nflags)) 10875f59596dSMatthew Dillon continue; 10885f59596dSMatthew Dillon flags = nflags; 10895f59596dSMatthew Dillon /* fall through */ 10905f59596dSMatthew Dillon } 10915f59596dSMatthew Dillon while ((xop = hammer2_xop_next(thr)) != NULL) { 10925f59596dSMatthew Dillon if (hammer2_xop_active(xop)) { 10935f59596dSMatthew Dillon last_func = xop->func; 109405a3c4ecSMatthew Dillon xop->func(thr, (hammer2_xop_t *)xop); 10955f59596dSMatthew Dillon hammer2_xop_dequeue(thr, xop); 10965f59596dSMatthew Dillon hammer2_xop_retire(xop, mask); 10975f59596dSMatthew Dillon } else { 10985f59596dSMatthew Dillon last_func = xop->func; 10995f59596dSMatthew Dillon hammer2_xop_feed(xop, NULL, thr->clindex, 11005f59596dSMatthew Dillon ECONNABORTED); 11015f59596dSMatthew Dillon hammer2_xop_dequeue(thr, xop); 11025f59596dSMatthew Dillon hammer2_xop_retire(xop, mask); 11035f59596dSMatthew Dillon } 11045f59596dSMatthew Dillon } 11055f59596dSMatthew Dillon 11065f59596dSMatthew Dillon /* 11075f59596dSMatthew Dillon * Wait for event, interlock using THREAD_WAITING and 11085f59596dSMatthew Dillon * THREAD_SIGNAL. 11095f59596dSMatthew Dillon * 11105f59596dSMatthew Dillon * For robustness poll on a 30-second interval, but nominally 11115f59596dSMatthew Dillon * expect to be woken up. 11125f59596dSMatthew Dillon */ 11135f59596dSMatthew Dillon nflags = flags | HAMMER2_THREAD_WAITING; 11145f59596dSMatthew Dillon 11155f59596dSMatthew Dillon tsleep_interlock(&thr->flags, 0); 11165f59596dSMatthew Dillon if (atomic_cmpset_int(&thr->flags, flags, nflags)) { 11175f59596dSMatthew Dillon tsleep(&thr->flags, PINTERLOCKED, "h2idle", hz*30); 11185f59596dSMatthew Dillon } 11195f59596dSMatthew Dillon } 11205f59596dSMatthew Dillon 11215f59596dSMatthew Dillon #if 0 11225f59596dSMatthew Dillon /* 11235f59596dSMatthew Dillon * Cleanup / termination 11245f59596dSMatthew Dillon */ 11255f59596dSMatthew Dillon while ((xop = TAILQ_FIRST(&thr->xopq)) != NULL) { 11265f59596dSMatthew Dillon kprintf("hammer2_thread: aborting xop %p\n", xop->func); 11275f59596dSMatthew Dillon TAILQ_REMOVE(&thr->xopq, xop, 11285f59596dSMatthew Dillon collect[thr->clindex].entry); 11295f59596dSMatthew Dillon hammer2_xop_retire(xop, mask); 11305f59596dSMatthew Dillon } 11315f59596dSMatthew Dillon #endif 11325f59596dSMatthew Dillon thr->td = NULL; 11339dca9515SMatthew Dillon hammer2_thr_signal(thr, HAMMER2_THREAD_STOPPED); 11345f59596dSMatthew Dillon /* thr structure can go invalid after this point */ 11355f59596dSMatthew Dillon } 1136