1427e5fc6SMatthew Dillon /* 2*89cba4dfSMatthew Dillon * Copyright (c) 2007-2011 The DragonFly Project. All rights reserved. 3427e5fc6SMatthew Dillon * 4427e5fc6SMatthew Dillon * This code is derived from software contributed to The DragonFly Project 5427e5fc6SMatthew Dillon * by Matthew Dillon <dillon@backplane.com> 6427e5fc6SMatthew Dillon * 7427e5fc6SMatthew Dillon * Redistribution and use in source and binary forms, with or without 8427e5fc6SMatthew Dillon * modification, are permitted provided that the following conditions 9427e5fc6SMatthew Dillon * are met: 10427e5fc6SMatthew Dillon * 11427e5fc6SMatthew Dillon * 1. Redistributions of source code must retain the above copyright 12427e5fc6SMatthew Dillon * notice, this list of conditions and the following disclaimer. 13427e5fc6SMatthew Dillon * 2. Redistributions in binary form must reproduce the above copyright 14427e5fc6SMatthew Dillon * notice, this list of conditions and the following disclaimer in 15427e5fc6SMatthew Dillon * the documentation and/or other materials provided with the 16427e5fc6SMatthew Dillon * distribution. 17427e5fc6SMatthew Dillon * 3. Neither the name of The DragonFly Project nor the names of its 18427e5fc6SMatthew Dillon * contributors may be used to endorse or promote products derived 19427e5fc6SMatthew Dillon * from this software without specific, prior written permission. 20427e5fc6SMatthew Dillon * 21427e5fc6SMatthew Dillon * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22427e5fc6SMatthew Dillon * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23427e5fc6SMatthew Dillon * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24427e5fc6SMatthew Dillon * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25427e5fc6SMatthew Dillon * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26427e5fc6SMatthew Dillon * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27427e5fc6SMatthew Dillon * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28427e5fc6SMatthew Dillon * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29427e5fc6SMatthew Dillon * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30427e5fc6SMatthew Dillon * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31427e5fc6SMatthew Dillon * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32427e5fc6SMatthew Dillon * SUCH DAMAGE. 33427e5fc6SMatthew Dillon * 34bc6c1f13SMatthew Dillon * $DragonFly: src/sys/vfs/hammer/hammer_subs.c,v 1.35 2008/10/15 22:38:37 dillon Exp $ 35427e5fc6SMatthew Dillon */ 36427e5fc6SMatthew Dillon /* 37427e5fc6SMatthew Dillon * HAMMER structural locking 38427e5fc6SMatthew Dillon */ 39427e5fc6SMatthew Dillon 40427e5fc6SMatthew Dillon #include "hammer.h" 416b4f890bSMatthew Dillon #include <sys/dirent.h> 42427e5fc6SMatthew Dillon 43427e5fc6SMatthew Dillon void 44af209b0fSMatthew Dillon hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident) 45427e5fc6SMatthew Dillon { 46427e5fc6SMatthew Dillon thread_t td = curthread; 47899eb297SMatthew Dillon u_int lv; 48899eb297SMatthew Dillon u_int nlv; 49427e5fc6SMatthew Dillon 50250aec18SMatthew Dillon KKASSERT(lock->refs); 51899eb297SMatthew Dillon for (;;) { 52899eb297SMatthew Dillon lv = lock->lockval; 53899eb297SMatthew Dillon 54899eb297SMatthew Dillon if (lv == 0) { 55899eb297SMatthew Dillon nlv = 1 | HAMMER_LOCKF_EXCLUSIVE; 56899eb297SMatthew Dillon if (atomic_cmpset_int(&lock->lockval, lv, nlv)) { 57250aec18SMatthew Dillon lock->lowner = td; 58899eb297SMatthew Dillon break; 59899eb297SMatthew Dillon } 60250aec18SMatthew Dillon } else if ((lv & HAMMER_LOCKF_EXCLUSIVE) && 61250aec18SMatthew Dillon lock->lowner == td) { 62899eb297SMatthew Dillon nlv = (lv + 1); 63899eb297SMatthew Dillon if (atomic_cmpset_int(&lock->lockval, lv, nlv)) 64899eb297SMatthew Dillon break; 65899eb297SMatthew Dillon } else { 667d683b0fSMatthew Dillon if (hammer_debug_locks) { 677d683b0fSMatthew Dillon kprintf("hammer_lock_ex: held by %p\n", 68250aec18SMatthew Dillon lock->lowner); 697d683b0fSMatthew Dillon } 70899eb297SMatthew Dillon nlv = lv | HAMMER_LOCKF_WANTED; 717d683b0fSMatthew Dillon ++hammer_contention_count; 72250aec18SMatthew Dillon tsleep_interlock(&lock->lockval, 0); 73899eb297SMatthew Dillon if (atomic_cmpset_int(&lock->lockval, lv, nlv)) { 74250aec18SMatthew Dillon tsleep(&lock->lockval, PINTERLOCKED, ident, 0); 757d683b0fSMatthew Dillon if (hammer_debug_locks) 76c0ade690SMatthew Dillon kprintf("hammer_lock_ex: try again\n"); 77427e5fc6SMatthew Dillon } 788cd0a023SMatthew Dillon } 79899eb297SMatthew Dillon } 80899eb297SMatthew Dillon } 818cd0a023SMatthew Dillon 828cd0a023SMatthew Dillon /* 838cd0a023SMatthew Dillon * Try to obtain an exclusive lock 848cd0a023SMatthew Dillon */ 858cd0a023SMatthew Dillon int 868cd0a023SMatthew Dillon hammer_lock_ex_try(struct hammer_lock *lock) 878cd0a023SMatthew Dillon { 888cd0a023SMatthew Dillon thread_t td = curthread; 89899eb297SMatthew Dillon int error; 90899eb297SMatthew Dillon u_int lv; 91899eb297SMatthew Dillon u_int nlv; 928cd0a023SMatthew Dillon 93250aec18SMatthew Dillon KKASSERT(lock->refs); 94899eb297SMatthew Dillon for (;;) { 95899eb297SMatthew Dillon lv = lock->lockval; 96899eb297SMatthew Dillon 97899eb297SMatthew Dillon if (lv == 0) { 98899eb297SMatthew Dillon nlv = 1 | HAMMER_LOCKF_EXCLUSIVE; 99899eb297SMatthew Dillon if (atomic_cmpset_int(&lock->lockval, lv, nlv)) { 100250aec18SMatthew Dillon lock->lowner = td; 101899eb297SMatthew Dillon error = 0; 102899eb297SMatthew Dillon break; 1034d75d829SMatthew Dillon } 104250aec18SMatthew Dillon } else if ((lv & HAMMER_LOCKF_EXCLUSIVE) && 105250aec18SMatthew Dillon lock->lowner == td) { 106899eb297SMatthew Dillon nlv = (lv + 1); 107899eb297SMatthew Dillon if (atomic_cmpset_int(&lock->lockval, lv, nlv)) { 108899eb297SMatthew Dillon error = 0; 109899eb297SMatthew Dillon break; 1108cd0a023SMatthew Dillon } 111899eb297SMatthew Dillon } else { 112899eb297SMatthew Dillon error = EAGAIN; 113899eb297SMatthew Dillon break; 114899eb297SMatthew Dillon } 115899eb297SMatthew Dillon } 116899eb297SMatthew Dillon return (error); 1178cd0a023SMatthew Dillon } 1188cd0a023SMatthew Dillon 1194a2796f3SMatthew Dillon /* 1204a2796f3SMatthew Dillon * Obtain a shared lock 12198da6d8cSMatthew Dillon * 12298da6d8cSMatthew Dillon * We do not give pending exclusive locks priority over shared locks as 12398da6d8cSMatthew Dillon * doing so could lead to a deadlock. 1244a2796f3SMatthew Dillon */ 1258cd0a023SMatthew Dillon void 1268cd0a023SMatthew Dillon hammer_lock_sh(struct hammer_lock *lock) 1278cd0a023SMatthew Dillon { 128899eb297SMatthew Dillon thread_t td = curthread; 129899eb297SMatthew Dillon u_int lv; 130899eb297SMatthew Dillon u_int nlv; 131250aec18SMatthew Dillon const char *ident = "hmrlck"; 132899eb297SMatthew Dillon 133250aec18SMatthew Dillon KKASSERT(lock->refs); 134899eb297SMatthew Dillon for (;;) { 135899eb297SMatthew Dillon lv = lock->lockval; 136899eb297SMatthew Dillon 137899eb297SMatthew Dillon if ((lv & HAMMER_LOCKF_EXCLUSIVE) == 0) { 138899eb297SMatthew Dillon nlv = (lv + 1); 139899eb297SMatthew Dillon if (atomic_cmpset_int(&lock->lockval, lv, nlv)) 140899eb297SMatthew Dillon break; 141250aec18SMatthew Dillon } else if (lock->lowner == td) { 142899eb297SMatthew Dillon /* 143899eb297SMatthew Dillon * Disallowed case, drop into kernel debugger for 144899eb297SMatthew Dillon * now. A cont continues w/ an exclusive lock. 145899eb297SMatthew Dillon */ 146899eb297SMatthew Dillon nlv = (lv + 1); 147899eb297SMatthew Dillon if (atomic_cmpset_int(&lock->lockval, lv, nlv)) { 148fc73edd8SMatthew Dillon if (hammer_debug_critical) 149fc73edd8SMatthew Dillon Debugger("hammer_lock_sh: holding ex"); 150899eb297SMatthew Dillon break; 1518cd0a023SMatthew Dillon } 152899eb297SMatthew Dillon } else { 153899eb297SMatthew Dillon nlv = lv | HAMMER_LOCKF_WANTED; 154899eb297SMatthew Dillon ++hammer_contention_count; 155250aec18SMatthew Dillon tsleep_interlock(&lock->lockval, 0); 156250aec18SMatthew Dillon if (atomic_cmpset_int(&lock->lockval, lv, nlv)) 157250aec18SMatthew Dillon tsleep(&lock->lockval, PINTERLOCKED, ident, 0); 158427e5fc6SMatthew Dillon } 159899eb297SMatthew Dillon } 160899eb297SMatthew Dillon } 161427e5fc6SMatthew Dillon 16247637bffSMatthew Dillon int 16347637bffSMatthew Dillon hammer_lock_sh_try(struct hammer_lock *lock) 16447637bffSMatthew Dillon { 165899eb297SMatthew Dillon thread_t td = curthread; 166899eb297SMatthew Dillon u_int lv; 167899eb297SMatthew Dillon u_int nlv; 168899eb297SMatthew Dillon int error; 169899eb297SMatthew Dillon 170250aec18SMatthew Dillon KKASSERT(lock->refs); 171899eb297SMatthew Dillon for (;;) { 172899eb297SMatthew Dillon lv = lock->lockval; 173899eb297SMatthew Dillon 174899eb297SMatthew Dillon if ((lv & HAMMER_LOCKF_EXCLUSIVE) == 0) { 175899eb297SMatthew Dillon nlv = (lv + 1); 176899eb297SMatthew Dillon if (atomic_cmpset_int(&lock->lockval, lv, nlv)) { 177899eb297SMatthew Dillon error = 0; 178899eb297SMatthew Dillon break; 17947637bffSMatthew Dillon } 180250aec18SMatthew Dillon } else if (lock->lowner == td) { 181899eb297SMatthew Dillon /* 182899eb297SMatthew Dillon * Disallowed case, drop into kernel debugger for 183899eb297SMatthew Dillon * now. A cont continues w/ an exclusive lock. 184899eb297SMatthew Dillon */ 185899eb297SMatthew Dillon nlv = (lv + 1); 186899eb297SMatthew Dillon if (atomic_cmpset_int(&lock->lockval, lv, nlv)) { 187fc73edd8SMatthew Dillon if (hammer_debug_critical) 188fc73edd8SMatthew Dillon Debugger("hammer_lock_sh: holding ex"); 189899eb297SMatthew Dillon error = 0; 190899eb297SMatthew Dillon break; 191899eb297SMatthew Dillon } 192899eb297SMatthew Dillon } else { 193899eb297SMatthew Dillon error = EAGAIN; 194899eb297SMatthew Dillon break; 195899eb297SMatthew Dillon } 196899eb297SMatthew Dillon } 197899eb297SMatthew Dillon return (error); 19847637bffSMatthew Dillon } 19947637bffSMatthew Dillon 2006a37e7e4SMatthew Dillon /* 2016a37e7e4SMatthew Dillon * Upgrade a shared lock to an exclusively held lock. This function will 2026a37e7e4SMatthew Dillon * return EDEADLK If there is more then one shared holder. 2036a37e7e4SMatthew Dillon * 2046a37e7e4SMatthew Dillon * No error occurs and no action is taken if the lock is already exclusively 2057aa3b8a6SMatthew Dillon * held by the caller. If the lock is not held at all or held exclusively 2067aa3b8a6SMatthew Dillon * by someone else, this function will panic. 2076a37e7e4SMatthew Dillon */ 2086a37e7e4SMatthew Dillon int 209bb29b5d8SMatthew Dillon hammer_lock_upgrade(struct hammer_lock *lock, int shcount) 2106a37e7e4SMatthew Dillon { 211899eb297SMatthew Dillon thread_t td = curthread; 212899eb297SMatthew Dillon u_int lv; 213899eb297SMatthew Dillon u_int nlv; 2146a37e7e4SMatthew Dillon int error; 2156a37e7e4SMatthew Dillon 216899eb297SMatthew Dillon for (;;) { 217899eb297SMatthew Dillon lv = lock->lockval; 218899eb297SMatthew Dillon 219bb29b5d8SMatthew Dillon if ((lv & ~HAMMER_LOCKF_WANTED) == shcount) { 220899eb297SMatthew Dillon nlv = lv | HAMMER_LOCKF_EXCLUSIVE; 221899eb297SMatthew Dillon if (atomic_cmpset_int(&lock->lockval, lv, nlv)) { 222250aec18SMatthew Dillon lock->lowner = td; 2236a37e7e4SMatthew Dillon error = 0; 224899eb297SMatthew Dillon break; 225899eb297SMatthew Dillon } 226899eb297SMatthew Dillon } else if (lv & HAMMER_LOCKF_EXCLUSIVE) { 227250aec18SMatthew Dillon if (lock->lowner != curthread) 228899eb297SMatthew Dillon panic("hammer_lock_upgrade: illegal state"); 2296a37e7e4SMatthew Dillon error = 0; 230899eb297SMatthew Dillon break; 231899eb297SMatthew Dillon } else if ((lv & ~HAMMER_LOCKF_WANTED) == 0) { 2327aa3b8a6SMatthew Dillon panic("hammer_lock_upgrade: lock is not held"); 2337aa3b8a6SMatthew Dillon /* NOT REACHED */ 234899eb297SMatthew Dillon error = EDEADLK; 235899eb297SMatthew Dillon break; 236899eb297SMatthew Dillon } else { 237899eb297SMatthew Dillon error = EDEADLK; 238899eb297SMatthew Dillon break; 2396a37e7e4SMatthew Dillon } 240899eb297SMatthew Dillon } 2416a37e7e4SMatthew Dillon return (error); 2426a37e7e4SMatthew Dillon } 2436a37e7e4SMatthew Dillon 2446a37e7e4SMatthew Dillon /* 2456a37e7e4SMatthew Dillon * Downgrade an exclusively held lock to a shared lock. 2466a37e7e4SMatthew Dillon */ 247427e5fc6SMatthew Dillon void 248bb29b5d8SMatthew Dillon hammer_lock_downgrade(struct hammer_lock *lock, int shcount) 249427e5fc6SMatthew Dillon { 250f64b567cSSascha Wildner thread_t td __debugvar = curthread; 251899eb297SMatthew Dillon u_int lv; 252899eb297SMatthew Dillon u_int nlv; 253899eb297SMatthew Dillon 254899eb297SMatthew Dillon KKASSERT((lock->lockval & ~HAMMER_LOCKF_WANTED) == 255bb29b5d8SMatthew Dillon (HAMMER_LOCKF_EXCLUSIVE | shcount)); 256250aec18SMatthew Dillon KKASSERT(lock->lowner == td); 257899eb297SMatthew Dillon 258899eb297SMatthew Dillon /* 259899eb297SMatthew Dillon * NOTE: Must clear owner before releasing exclusivity 260899eb297SMatthew Dillon */ 261250aec18SMatthew Dillon lock->lowner = NULL; 262899eb297SMatthew Dillon 263899eb297SMatthew Dillon for (;;) { 264899eb297SMatthew Dillon lv = lock->lockval; 265899eb297SMatthew Dillon nlv = lv & ~(HAMMER_LOCKF_EXCLUSIVE | HAMMER_LOCKF_WANTED); 266899eb297SMatthew Dillon if (atomic_cmpset_int(&lock->lockval, lv, nlv)) { 267899eb297SMatthew Dillon if (lv & HAMMER_LOCKF_WANTED) 268250aec18SMatthew Dillon wakeup(&lock->lockval); 269899eb297SMatthew Dillon break; 270427e5fc6SMatthew Dillon } 271899eb297SMatthew Dillon } 2728cd0a023SMatthew Dillon } 2738cd0a023SMatthew Dillon 2748cd0a023SMatthew Dillon void 2758cd0a023SMatthew Dillon hammer_unlock(struct hammer_lock *lock) 2768cd0a023SMatthew Dillon { 277f64b567cSSascha Wildner thread_t td __debugvar = curthread; 278899eb297SMatthew Dillon u_int lv; 279899eb297SMatthew Dillon u_int nlv; 280899eb297SMatthew Dillon 281899eb297SMatthew Dillon lv = lock->lockval; 282899eb297SMatthew Dillon KKASSERT(lv != 0); 283899eb297SMatthew Dillon if (lv & HAMMER_LOCKF_EXCLUSIVE) 284250aec18SMatthew Dillon KKASSERT(lock->lowner == td); 285899eb297SMatthew Dillon 286899eb297SMatthew Dillon for (;;) { 287899eb297SMatthew Dillon lv = lock->lockval; 288899eb297SMatthew Dillon nlv = lv & ~(HAMMER_LOCKF_EXCLUSIVE | HAMMER_LOCKF_WANTED); 289899eb297SMatthew Dillon if (nlv > 1) { 290899eb297SMatthew Dillon nlv = lv - 1; 291899eb297SMatthew Dillon if (atomic_cmpset_int(&lock->lockval, lv, nlv)) 292899eb297SMatthew Dillon break; 293899eb297SMatthew Dillon } else if (nlv == 1) { 294899eb297SMatthew Dillon nlv = 0; 295899eb297SMatthew Dillon if (lv & HAMMER_LOCKF_EXCLUSIVE) 296250aec18SMatthew Dillon lock->lowner = NULL; 297899eb297SMatthew Dillon if (atomic_cmpset_int(&lock->lockval, lv, nlv)) { 298899eb297SMatthew Dillon if (lv & HAMMER_LOCKF_WANTED) 299250aec18SMatthew Dillon wakeup(&lock->lockval); 300899eb297SMatthew Dillon break; 3018cd0a023SMatthew Dillon } 3028cd0a023SMatthew Dillon } else { 303899eb297SMatthew Dillon panic("hammer_unlock: lock %p is not held", lock); 3048cd0a023SMatthew Dillon } 3058cd0a023SMatthew Dillon } 30666325755SMatthew Dillon } 30766325755SMatthew Dillon 308b3bad96fSMatthew Dillon /* 309b3bad96fSMatthew Dillon * The calling thread must be holding a shared or exclusive lock. 310b3bad96fSMatthew Dillon * Returns < 0 if lock is held shared, and > 0 if held exlusively. 311b3bad96fSMatthew Dillon */ 312b3bad96fSMatthew Dillon int 313b3bad96fSMatthew Dillon hammer_lock_status(struct hammer_lock *lock) 314b3bad96fSMatthew Dillon { 315899eb297SMatthew Dillon u_int lv = lock->lockval; 316899eb297SMatthew Dillon 317899eb297SMatthew Dillon if (lv & HAMMER_LOCKF_EXCLUSIVE) 318b3bad96fSMatthew Dillon return(1); 319899eb297SMatthew Dillon else if (lv) 320899eb297SMatthew Dillon return(-1); 321b3bad96fSMatthew Dillon panic("hammer_lock_status: lock must be held: %p", lock); 322b3bad96fSMatthew Dillon } 323b3bad96fSMatthew Dillon 324250aec18SMatthew Dillon /* 325250aec18SMatthew Dillon * Bump the ref count for a lock (not the excl/share count, but a separate 326250aec18SMatthew Dillon * structural reference count). The CHECK flag will be set on a 0->1 327250aec18SMatthew Dillon * transition. 328250aec18SMatthew Dillon * 329250aec18SMatthew Dillon * This function does nothing to serialize races between multple threads. 330250aec18SMatthew Dillon * The caller can interlock it later on to deal with serialization. 331250aec18SMatthew Dillon * 332250aec18SMatthew Dillon * MPSAFE 333250aec18SMatthew Dillon */ 33466325755SMatthew Dillon void 33566325755SMatthew Dillon hammer_ref(struct hammer_lock *lock) 33666325755SMatthew Dillon { 337250aec18SMatthew Dillon u_int lv; 338250aec18SMatthew Dillon u_int nlv; 339250aec18SMatthew Dillon 340250aec18SMatthew Dillon for (;;) { 341250aec18SMatthew Dillon lv = lock->refs; 342250aec18SMatthew Dillon if ((lv & ~HAMMER_REFS_FLAGS) == 0) { 343250aec18SMatthew Dillon nlv = (lv + 1) | HAMMER_REFS_CHECK; 344250aec18SMatthew Dillon if (atomic_cmpset_int(&lock->refs, lv, nlv)) 345250aec18SMatthew Dillon return; 346250aec18SMatthew Dillon } else { 347250aec18SMatthew Dillon nlv = (lv + 1); 348250aec18SMatthew Dillon KKASSERT((int)nlv > 0); 349250aec18SMatthew Dillon if (atomic_cmpset_int(&lock->refs, lv, nlv)) 350250aec18SMatthew Dillon return; 351250aec18SMatthew Dillon } 352250aec18SMatthew Dillon } 353250aec18SMatthew Dillon /* not reached */ 35466325755SMatthew Dillon } 35566325755SMatthew Dillon 356250aec18SMatthew Dillon /* 357250aec18SMatthew Dillon * Drop the ref count for a lock (not the excl/share count, but a separate 358250aec18SMatthew Dillon * structural reference count). The CHECK flag will be cleared on a 1->0 359250aec18SMatthew Dillon * transition. 360250aec18SMatthew Dillon * 361250aec18SMatthew Dillon * This function does nothing to serialize races between multple threads. 362250aec18SMatthew Dillon * 363250aec18SMatthew Dillon * MPSAFE 364250aec18SMatthew Dillon */ 36566325755SMatthew Dillon void 366250aec18SMatthew Dillon hammer_rel(struct hammer_lock *lock) 36766325755SMatthew Dillon { 368250aec18SMatthew Dillon u_int lv; 369250aec18SMatthew Dillon u_int nlv; 370250aec18SMatthew Dillon 371250aec18SMatthew Dillon for (;;) { 372250aec18SMatthew Dillon lv = lock->refs; 373250aec18SMatthew Dillon if ((lv & ~HAMMER_REFS_FLAGS) == 1) { 374250aec18SMatthew Dillon nlv = (lv - 1) & ~HAMMER_REFS_CHECK; 375250aec18SMatthew Dillon if (atomic_cmpset_int(&lock->refs, lv, nlv)) 376250aec18SMatthew Dillon return; 377250aec18SMatthew Dillon } else { 378250aec18SMatthew Dillon KKASSERT((int)lv > 0); 379250aec18SMatthew Dillon nlv = (lv - 1); 380250aec18SMatthew Dillon if (atomic_cmpset_int(&lock->refs, lv, nlv)) 381250aec18SMatthew Dillon return; 382250aec18SMatthew Dillon } 383250aec18SMatthew Dillon } 384250aec18SMatthew Dillon /* not reached */ 385250aec18SMatthew Dillon } 386250aec18SMatthew Dillon 387250aec18SMatthew Dillon /* 388250aec18SMatthew Dillon * The hammer_*_interlock() and hammer_*_interlock_done() functions are 389250aec18SMatthew Dillon * more sophisticated versions which handle MP transition races and block 390250aec18SMatthew Dillon * when necessary. 391250aec18SMatthew Dillon * 392250aec18SMatthew Dillon * hammer_ref_interlock() bumps the ref-count and conditionally acquires 393250aec18SMatthew Dillon * the interlock for 0->1 transitions or if the CHECK is found to be set. 394250aec18SMatthew Dillon * 395250aec18SMatthew Dillon * This case will return TRUE, the interlock will be held, and the CHECK 396250aec18SMatthew Dillon * bit also set. Other threads attempting to ref will see the CHECK bit 397250aec18SMatthew Dillon * and block until we clean up. 398250aec18SMatthew Dillon * 399250aec18SMatthew Dillon * FALSE is returned for transitions other than 0->1 when the CHECK bit 400250aec18SMatthew Dillon * is not found to be set, or if the function loses the race with another 401250aec18SMatthew Dillon * thread. 402250aec18SMatthew Dillon * 403250aec18SMatthew Dillon * TRUE is only returned to one thread and the others will block. 404250aec18SMatthew Dillon * Effectively a TRUE indicator means 'someone transitioned 0->1 405250aec18SMatthew Dillon * and you are the first guy to successfully lock it after that, so you 406250aec18SMatthew Dillon * need to check'. Due to races the ref-count may be greater than 1 upon 407250aec18SMatthew Dillon * return. 408250aec18SMatthew Dillon * 409250aec18SMatthew Dillon * MPSAFE 410250aec18SMatthew Dillon */ 411250aec18SMatthew Dillon int 412250aec18SMatthew Dillon hammer_ref_interlock(struct hammer_lock *lock) 413250aec18SMatthew Dillon { 414250aec18SMatthew Dillon u_int lv; 415250aec18SMatthew Dillon u_int nlv; 416250aec18SMatthew Dillon 417250aec18SMatthew Dillon /* 418250aec18SMatthew Dillon * Integrated reference count bump, lock, and check, with hot-path. 419250aec18SMatthew Dillon * 420250aec18SMatthew Dillon * (a) Return 1 (+LOCKED, +CHECK) 0->1 transition 421250aec18SMatthew Dillon * (b) Return 0 (-LOCKED, -CHECK) N->N+1 transition 422250aec18SMatthew Dillon * (c) Break out (+CHECK) Check condition and Cannot lock 423250aec18SMatthew Dillon * (d) Return 1 (+LOCKED, +CHECK) Successfully locked 424250aec18SMatthew Dillon */ 425250aec18SMatthew Dillon for (;;) { 426250aec18SMatthew Dillon lv = lock->refs; 427250aec18SMatthew Dillon if (lv == 0) { 428250aec18SMatthew Dillon nlv = 1 | HAMMER_REFS_LOCKED | HAMMER_REFS_CHECK; 429250aec18SMatthew Dillon if (atomic_cmpset_int(&lock->refs, lv, nlv)) { 430250aec18SMatthew Dillon lock->rowner = curthread; 431250aec18SMatthew Dillon return(1); 432250aec18SMatthew Dillon } 433250aec18SMatthew Dillon } else { 434250aec18SMatthew Dillon nlv = (lv + 1); 435250aec18SMatthew Dillon if ((lv & ~HAMMER_REFS_FLAGS) == 0) 436250aec18SMatthew Dillon nlv |= HAMMER_REFS_CHECK; 437250aec18SMatthew Dillon if ((nlv & HAMMER_REFS_CHECK) == 0) { 438250aec18SMatthew Dillon if (atomic_cmpset_int(&lock->refs, lv, nlv)) 439250aec18SMatthew Dillon return(0); 440250aec18SMatthew Dillon } else if (lv & HAMMER_REFS_LOCKED) { 441250aec18SMatthew Dillon /* CHECK also set here */ 442250aec18SMatthew Dillon if (atomic_cmpset_int(&lock->refs, lv, nlv)) 443250aec18SMatthew Dillon break; 444250aec18SMatthew Dillon } else { 445250aec18SMatthew Dillon /* CHECK also set here */ 446250aec18SMatthew Dillon nlv |= HAMMER_REFS_LOCKED; 447250aec18SMatthew Dillon if (atomic_cmpset_int(&lock->refs, lv, nlv)) { 448250aec18SMatthew Dillon lock->rowner = curthread; 449250aec18SMatthew Dillon return(1); 450250aec18SMatthew Dillon } 451250aec18SMatthew Dillon } 452250aec18SMatthew Dillon } 453250aec18SMatthew Dillon } 454250aec18SMatthew Dillon 455250aec18SMatthew Dillon /* 456250aec18SMatthew Dillon * Defered check condition because we were unable to acquire the 457250aec18SMatthew Dillon * lock. We must block until the check condition is cleared due 458250aec18SMatthew Dillon * to a race with another thread, or we are able to acquire the 459250aec18SMatthew Dillon * lock. 460250aec18SMatthew Dillon * 461250aec18SMatthew Dillon * (a) Return 0 (-CHECK) Another thread handled it 462250aec18SMatthew Dillon * (b) Return 1 (+LOCKED, +CHECK) We handled it. 463250aec18SMatthew Dillon */ 464250aec18SMatthew Dillon for (;;) { 465250aec18SMatthew Dillon lv = lock->refs; 466250aec18SMatthew Dillon if ((lv & HAMMER_REFS_CHECK) == 0) 467250aec18SMatthew Dillon return(0); 468250aec18SMatthew Dillon if (lv & HAMMER_REFS_LOCKED) { 469250aec18SMatthew Dillon tsleep_interlock(&lock->refs, 0); 470250aec18SMatthew Dillon nlv = (lv | HAMMER_REFS_WANTED); 471250aec18SMatthew Dillon if (atomic_cmpset_int(&lock->refs, lv, nlv)) 472250aec18SMatthew Dillon tsleep(&lock->refs, PINTERLOCKED, "h1lk", 0); 473250aec18SMatthew Dillon } else { 474250aec18SMatthew Dillon /* CHECK also set here */ 475250aec18SMatthew Dillon nlv = lv | HAMMER_REFS_LOCKED; 476250aec18SMatthew Dillon if (atomic_cmpset_int(&lock->refs, lv, nlv)) { 477250aec18SMatthew Dillon lock->rowner = curthread; 478250aec18SMatthew Dillon return(1); 479250aec18SMatthew Dillon } 480250aec18SMatthew Dillon } 481250aec18SMatthew Dillon } 482250aec18SMatthew Dillon /* not reached */ 483250aec18SMatthew Dillon } 484250aec18SMatthew Dillon 485250aec18SMatthew Dillon /* 486250aec18SMatthew Dillon * This is the same as hammer_ref_interlock() but asserts that the 487250aec18SMatthew Dillon * 0->1 transition is always true, thus the lock must have no references 488250aec18SMatthew Dillon * on entry or have CHECK set, and will have one reference with the 489250aec18SMatthew Dillon * interlock held on return. It must also not be interlocked on entry 490250aec18SMatthew Dillon * by anyone. 491250aec18SMatthew Dillon * 492250aec18SMatthew Dillon * NOTE that CHECK will never be found set when the ref-count is 0. 493250aec18SMatthew Dillon * 494250aec18SMatthew Dillon * TRUE is always returned to match the API for hammer_ref_interlock(). 495250aec18SMatthew Dillon * This function returns with one ref, the lock held, and the CHECK bit set. 496250aec18SMatthew Dillon */ 497250aec18SMatthew Dillon int 498250aec18SMatthew Dillon hammer_ref_interlock_true(struct hammer_lock *lock) 499250aec18SMatthew Dillon { 500250aec18SMatthew Dillon u_int lv; 501250aec18SMatthew Dillon u_int nlv; 502250aec18SMatthew Dillon 503250aec18SMatthew Dillon for (;;) { 504250aec18SMatthew Dillon lv = lock->refs; 505250aec18SMatthew Dillon 506250aec18SMatthew Dillon if (lv) { 507250aec18SMatthew Dillon panic("hammer_ref_interlock_true: bad lock %p %08x\n", 508250aec18SMatthew Dillon lock, lock->refs); 509250aec18SMatthew Dillon } 510250aec18SMatthew Dillon nlv = 1 | HAMMER_REFS_LOCKED | HAMMER_REFS_CHECK; 511250aec18SMatthew Dillon if (atomic_cmpset_int(&lock->refs, lv, nlv)) { 512250aec18SMatthew Dillon lock->rowner = curthread; 513250aec18SMatthew Dillon return (1); 514250aec18SMatthew Dillon } 515250aec18SMatthew Dillon } 516250aec18SMatthew Dillon } 517250aec18SMatthew Dillon 518250aec18SMatthew Dillon /* 519250aec18SMatthew Dillon * Unlock the interlock acquired by hammer_ref_interlock() and clear the 520250aec18SMatthew Dillon * CHECK flag. The ref-count remains unchanged. 521250aec18SMatthew Dillon * 522250aec18SMatthew Dillon * This routine is called in the load path when the load succeeds. 523250aec18SMatthew Dillon */ 524250aec18SMatthew Dillon void 525250aec18SMatthew Dillon hammer_ref_interlock_done(struct hammer_lock *lock) 526250aec18SMatthew Dillon { 527250aec18SMatthew Dillon u_int lv; 528250aec18SMatthew Dillon u_int nlv; 529250aec18SMatthew Dillon 530250aec18SMatthew Dillon for (;;) { 531250aec18SMatthew Dillon lv = lock->refs; 532250aec18SMatthew Dillon nlv = lv & ~HAMMER_REFS_FLAGS; 533250aec18SMatthew Dillon if (atomic_cmpset_int(&lock->refs, lv, nlv)) { 534250aec18SMatthew Dillon if (lv & HAMMER_REFS_WANTED) 535250aec18SMatthew Dillon wakeup(&lock->refs); 536250aec18SMatthew Dillon break; 537250aec18SMatthew Dillon } 538250aec18SMatthew Dillon } 539250aec18SMatthew Dillon } 540250aec18SMatthew Dillon 541250aec18SMatthew Dillon /* 542250aec18SMatthew Dillon * hammer_rel_interlock() works a bit differently in that it must 543250aec18SMatthew Dillon * acquire the lock in tandem with a 1->0 transition. CHECK is 544250aec18SMatthew Dillon * not used. 545250aec18SMatthew Dillon * 546250aec18SMatthew Dillon * TRUE is returned on 1->0 transitions with the lock held on return 547250aec18SMatthew Dillon * and FALSE is returned otherwise with the lock not held. 548250aec18SMatthew Dillon * 549250aec18SMatthew Dillon * It is important to note that the refs are not stable and may 550250aec18SMatthew Dillon * increase while we hold the lock, the TRUE indication only means 551250aec18SMatthew Dillon * that we transitioned 1->0, not necessarily that we stayed at 0. 552250aec18SMatthew Dillon * 553250aec18SMatthew Dillon * Another thread bumping refs while we hold the lock will set CHECK, 554250aec18SMatthew Dillon * causing one of the competing hammer_ref_interlock() calls to 555250aec18SMatthew Dillon * return TRUE after we release our lock. 556250aec18SMatthew Dillon * 557250aec18SMatthew Dillon * MPSAFE 558250aec18SMatthew Dillon */ 559250aec18SMatthew Dillon int 560250aec18SMatthew Dillon hammer_rel_interlock(struct hammer_lock *lock, int locked) 561250aec18SMatthew Dillon { 562250aec18SMatthew Dillon u_int lv; 563250aec18SMatthew Dillon u_int nlv; 564250aec18SMatthew Dillon 565250aec18SMatthew Dillon /* 566250aec18SMatthew Dillon * In locked mode (failure/unload path) we release the 567250aec18SMatthew Dillon * ref-count but leave it locked. 568250aec18SMatthew Dillon */ 569250aec18SMatthew Dillon if (locked) { 570250aec18SMatthew Dillon hammer_rel(lock); 571250aec18SMatthew Dillon return(1); 572250aec18SMatthew Dillon } 573250aec18SMatthew Dillon 574250aec18SMatthew Dillon /* 575250aec18SMatthew Dillon * Integrated reference count drop with LOCKED, plus the hot-path 576250aec18SMatthew Dillon * returns. 577250aec18SMatthew Dillon */ 578250aec18SMatthew Dillon for (;;) { 579250aec18SMatthew Dillon lv = lock->refs; 580250aec18SMatthew Dillon 581250aec18SMatthew Dillon if (lv == 1) { 582250aec18SMatthew Dillon nlv = 0 | HAMMER_REFS_LOCKED; 583250aec18SMatthew Dillon if (atomic_cmpset_int(&lock->refs, lv, nlv)) { 584250aec18SMatthew Dillon lock->rowner = curthread; 585250aec18SMatthew Dillon return(1); 586250aec18SMatthew Dillon } 587250aec18SMatthew Dillon } else if ((lv & ~HAMMER_REFS_FLAGS) == 1) { 588250aec18SMatthew Dillon if ((lv & HAMMER_REFS_LOCKED) == 0) { 589250aec18SMatthew Dillon nlv = (lv - 1) | HAMMER_REFS_LOCKED; 590250aec18SMatthew Dillon if (atomic_cmpset_int(&lock->refs, lv, nlv)) { 591250aec18SMatthew Dillon lock->rowner = curthread; 592250aec18SMatthew Dillon return(1); 593250aec18SMatthew Dillon } 594250aec18SMatthew Dillon } else { 595250aec18SMatthew Dillon nlv = lv | HAMMER_REFS_WANTED; 596250aec18SMatthew Dillon tsleep_interlock(&lock->refs, 0); 597250aec18SMatthew Dillon if (atomic_cmpset_int(&lock->refs, lv, nlv)) { 598250aec18SMatthew Dillon tsleep(&lock->refs, PINTERLOCKED, 599250aec18SMatthew Dillon "h0lk", 0); 600250aec18SMatthew Dillon } 601250aec18SMatthew Dillon } 602250aec18SMatthew Dillon } else { 603250aec18SMatthew Dillon nlv = (lv - 1); 604250aec18SMatthew Dillon KKASSERT((int)nlv >= 0); 605250aec18SMatthew Dillon if (atomic_cmpset_int(&lock->refs, lv, nlv)) 606250aec18SMatthew Dillon return(0); 607250aec18SMatthew Dillon } 608250aec18SMatthew Dillon } 609250aec18SMatthew Dillon /* not reached */ 610250aec18SMatthew Dillon } 611250aec18SMatthew Dillon 612250aec18SMatthew Dillon /* 613250aec18SMatthew Dillon * Unlock the interlock acquired by hammer_rel_interlock(). 614250aec18SMatthew Dillon * 615250aec18SMatthew Dillon * If orig_locked is non-zero the interlock was originally held prior to 616250aec18SMatthew Dillon * the hammer_rel_interlock() call and passed through to us. In this 617250aec18SMatthew Dillon * case we want to retain the CHECK error state if not transitioning 618250aec18SMatthew Dillon * to 0. 619250aec18SMatthew Dillon * 620250aec18SMatthew Dillon * The code is the same either way so we do not have to conditionalize 621250aec18SMatthew Dillon * on orig_locked. 622250aec18SMatthew Dillon */ 623250aec18SMatthew Dillon void 624250aec18SMatthew Dillon hammer_rel_interlock_done(struct hammer_lock *lock, int orig_locked __unused) 625250aec18SMatthew Dillon { 626250aec18SMatthew Dillon u_int lv; 627250aec18SMatthew Dillon u_int nlv; 628250aec18SMatthew Dillon 629250aec18SMatthew Dillon for (;;) { 630250aec18SMatthew Dillon lv = lock->refs; 631250aec18SMatthew Dillon nlv = lv & ~(HAMMER_REFS_LOCKED | HAMMER_REFS_WANTED); 632250aec18SMatthew Dillon if ((lv & ~HAMMER_REFS_FLAGS) == 0) 633250aec18SMatthew Dillon nlv &= ~HAMMER_REFS_CHECK; 634250aec18SMatthew Dillon if (atomic_cmpset_int(&lock->refs, lv, nlv)) { 635250aec18SMatthew Dillon if (lv & HAMMER_REFS_WANTED) 636250aec18SMatthew Dillon wakeup(&lock->refs); 637250aec18SMatthew Dillon break; 638250aec18SMatthew Dillon } 639250aec18SMatthew Dillon } 640250aec18SMatthew Dillon } 641250aec18SMatthew Dillon 642250aec18SMatthew Dillon /* 643250aec18SMatthew Dillon * Acquire the interlock on lock->refs. 644250aec18SMatthew Dillon * 645250aec18SMatthew Dillon * Return TRUE if CHECK is currently set. Note that CHECK will not 646250aec18SMatthew Dillon * be set if the reference count is 0, but can get set if this function 647250aec18SMatthew Dillon * is preceeded by, say, hammer_ref(), or through races with other 648250aec18SMatthew Dillon * threads. The return value allows the caller to use the same logic 649250aec18SMatthew Dillon * as hammer_ref_interlock(). 650250aec18SMatthew Dillon * 651250aec18SMatthew Dillon * MPSAFE 652250aec18SMatthew Dillon */ 653250aec18SMatthew Dillon int 654250aec18SMatthew Dillon hammer_get_interlock(struct hammer_lock *lock) 655250aec18SMatthew Dillon { 656250aec18SMatthew Dillon u_int lv; 657250aec18SMatthew Dillon u_int nlv; 658250aec18SMatthew Dillon 659250aec18SMatthew Dillon for (;;) { 660250aec18SMatthew Dillon lv = lock->refs; 661250aec18SMatthew Dillon if (lv & HAMMER_REFS_LOCKED) { 662250aec18SMatthew Dillon nlv = lv | HAMMER_REFS_WANTED; 663250aec18SMatthew Dillon tsleep_interlock(&lock->refs, 0); 664250aec18SMatthew Dillon if (atomic_cmpset_int(&lock->refs, lv, nlv)) 665250aec18SMatthew Dillon tsleep(&lock->refs, PINTERLOCKED, "hilk", 0); 666250aec18SMatthew Dillon } else { 667250aec18SMatthew Dillon nlv = (lv | HAMMER_REFS_LOCKED); 668250aec18SMatthew Dillon if (atomic_cmpset_int(&lock->refs, lv, nlv)) { 669250aec18SMatthew Dillon lock->rowner = curthread; 670250aec18SMatthew Dillon return((lv & HAMMER_REFS_CHECK) ? 1 : 0); 671250aec18SMatthew Dillon } 672250aec18SMatthew Dillon } 673250aec18SMatthew Dillon } 674250aec18SMatthew Dillon } 675250aec18SMatthew Dillon 676250aec18SMatthew Dillon /* 677250aec18SMatthew Dillon * Attempt to acquire the interlock and expect 0 refs. Used by the buffer 678250aec18SMatthew Dillon * cache callback code to disassociate or lock the bufs related to HAMMER 679250aec18SMatthew Dillon * structures. 680250aec18SMatthew Dillon * 681250aec18SMatthew Dillon * During teardown the related bp will be acquired by hammer_io_release() 682250aec18SMatthew Dillon * which interocks our test. 683250aec18SMatthew Dillon * 684250aec18SMatthew Dillon * Returns non-zero on success, zero on failure. 685250aec18SMatthew Dillon */ 686250aec18SMatthew Dillon int 687250aec18SMatthew Dillon hammer_try_interlock_norefs(struct hammer_lock *lock) 688250aec18SMatthew Dillon { 689250aec18SMatthew Dillon u_int lv; 690250aec18SMatthew Dillon u_int nlv; 691250aec18SMatthew Dillon 692250aec18SMatthew Dillon for (;;) { 693250aec18SMatthew Dillon lv = lock->refs; 694250aec18SMatthew Dillon if (lv == 0) { 695250aec18SMatthew Dillon nlv = lv | HAMMER_REFS_LOCKED; 696250aec18SMatthew Dillon if (atomic_cmpset_int(&lock->refs, lv, nlv)) { 697250aec18SMatthew Dillon lock->rowner = curthread; 698250aec18SMatthew Dillon return(1); 699250aec18SMatthew Dillon } 700250aec18SMatthew Dillon } else { 701250aec18SMatthew Dillon return(0); 702250aec18SMatthew Dillon } 703250aec18SMatthew Dillon } 704250aec18SMatthew Dillon /* not reached */ 705250aec18SMatthew Dillon } 706250aec18SMatthew Dillon 707250aec18SMatthew Dillon /* 708250aec18SMatthew Dillon * Release the interlock on lock->refs. This function will set 709250aec18SMatthew Dillon * CHECK if the refs is non-zero and error is non-zero, and clear 710250aec18SMatthew Dillon * CHECK otherwise. 711250aec18SMatthew Dillon * 712250aec18SMatthew Dillon * MPSAFE 713250aec18SMatthew Dillon */ 714250aec18SMatthew Dillon void 715250aec18SMatthew Dillon hammer_put_interlock(struct hammer_lock *lock, int error) 716250aec18SMatthew Dillon { 717250aec18SMatthew Dillon u_int lv; 718250aec18SMatthew Dillon u_int nlv; 719250aec18SMatthew Dillon 720250aec18SMatthew Dillon for (;;) { 721250aec18SMatthew Dillon lv = lock->refs; 722250aec18SMatthew Dillon KKASSERT(lv & HAMMER_REFS_LOCKED); 723250aec18SMatthew Dillon nlv = lv & ~(HAMMER_REFS_LOCKED | HAMMER_REFS_WANTED); 724250aec18SMatthew Dillon 725250aec18SMatthew Dillon if ((nlv & ~HAMMER_REFS_FLAGS) == 0 || error == 0) 726250aec18SMatthew Dillon nlv &= ~HAMMER_REFS_CHECK; 727250aec18SMatthew Dillon else 728250aec18SMatthew Dillon nlv |= HAMMER_REFS_CHECK; 729250aec18SMatthew Dillon 730250aec18SMatthew Dillon if (atomic_cmpset_int(&lock->refs, lv, nlv)) { 731250aec18SMatthew Dillon if (lv & HAMMER_REFS_WANTED) 732250aec18SMatthew Dillon wakeup(&lock->refs); 733250aec18SMatthew Dillon return; 734250aec18SMatthew Dillon } 735250aec18SMatthew Dillon } 73666325755SMatthew Dillon } 73766325755SMatthew Dillon 7382f85fa4dSMatthew Dillon /* 7392f85fa4dSMatthew Dillon * The sync_lock must be held when doing any modifying operations on 74098da6d8cSMatthew Dillon * meta-data. It does not have to be held when modifying non-meta-data buffers 74198da6d8cSMatthew Dillon * (backend or frontend). 7422f85fa4dSMatthew Dillon * 74398da6d8cSMatthew Dillon * The flusher holds the lock exclusively while all other consumers hold it 74498da6d8cSMatthew Dillon * shared. All modifying operations made while holding the lock are atomic 74598da6d8cSMatthew Dillon * in that they will be made part of the same flush group. 74698da6d8cSMatthew Dillon * 74798da6d8cSMatthew Dillon * Due to the atomicy requirement deadlock recovery code CANNOT release the 74898da6d8cSMatthew Dillon * sync lock, nor can we give pending exclusive sync locks priority over 74998da6d8cSMatthew Dillon * a shared sync lock as this could lead to a 3-way deadlock. 7502f85fa4dSMatthew Dillon */ 7512f85fa4dSMatthew Dillon void 7522f85fa4dSMatthew Dillon hammer_sync_lock_ex(hammer_transaction_t trans) 7532f85fa4dSMatthew Dillon { 7542f85fa4dSMatthew Dillon ++trans->sync_lock_refs; 7557538695eSMatthew Dillon hammer_lock_ex(&trans->hmp->sync_lock); 7562f85fa4dSMatthew Dillon } 7572f85fa4dSMatthew Dillon 7582f85fa4dSMatthew Dillon void 7592f85fa4dSMatthew Dillon hammer_sync_lock_sh(hammer_transaction_t trans) 7602f85fa4dSMatthew Dillon { 7612f85fa4dSMatthew Dillon ++trans->sync_lock_refs; 7627538695eSMatthew Dillon hammer_lock_sh(&trans->hmp->sync_lock); 7632f85fa4dSMatthew Dillon } 7642f85fa4dSMatthew Dillon 76547637bffSMatthew Dillon int 76647637bffSMatthew Dillon hammer_sync_lock_sh_try(hammer_transaction_t trans) 76747637bffSMatthew Dillon { 76847637bffSMatthew Dillon int error; 76947637bffSMatthew Dillon 77047637bffSMatthew Dillon ++trans->sync_lock_refs; 77147637bffSMatthew Dillon if ((error = hammer_lock_sh_try(&trans->hmp->sync_lock)) != 0) 77247637bffSMatthew Dillon --trans->sync_lock_refs; 77347637bffSMatthew Dillon return (error); 77447637bffSMatthew Dillon } 77547637bffSMatthew Dillon 7762f85fa4dSMatthew Dillon void 7772f85fa4dSMatthew Dillon hammer_sync_unlock(hammer_transaction_t trans) 7782f85fa4dSMatthew Dillon { 7792f85fa4dSMatthew Dillon --trans->sync_lock_refs; 7802f85fa4dSMatthew Dillon hammer_unlock(&trans->hmp->sync_lock); 7812f85fa4dSMatthew Dillon } 7822f85fa4dSMatthew Dillon 7832f85fa4dSMatthew Dillon /* 7842f85fa4dSMatthew Dillon * Misc 7852f85fa4dSMatthew Dillon */ 78666325755SMatthew Dillon u_int32_t 78766325755SMatthew Dillon hammer_to_unix_xid(uuid_t *uuid) 78866325755SMatthew Dillon { 78966325755SMatthew Dillon return(*(u_int32_t *)&uuid->node[2]); 79066325755SMatthew Dillon } 79166325755SMatthew Dillon 79266325755SMatthew Dillon void 7938cd0a023SMatthew Dillon hammer_guid_to_uuid(uuid_t *uuid, u_int32_t guid) 79466325755SMatthew Dillon { 7958cd0a023SMatthew Dillon bzero(uuid, sizeof(*uuid)); 7968cd0a023SMatthew Dillon *(u_int32_t *)&uuid->node[2] = guid; 79766325755SMatthew Dillon } 79866325755SMatthew Dillon 7998cd0a023SMatthew Dillon void 800ddfdf542SMatthew Dillon hammer_time_to_timespec(u_int64_t xtime, struct timespec *ts) 8018cd0a023SMatthew Dillon { 802ddfdf542SMatthew Dillon ts->tv_sec = (unsigned long)(xtime / 1000000); 803ddfdf542SMatthew Dillon ts->tv_nsec = (unsigned int)(xtime % 1000000) * 1000L; 8048cd0a023SMatthew Dillon } 8058cd0a023SMatthew Dillon 806ddfdf542SMatthew Dillon u_int64_t 807ddfdf542SMatthew Dillon hammer_timespec_to_time(struct timespec *ts) 8088cd0a023SMatthew Dillon { 809ddfdf542SMatthew Dillon u_int64_t xtime; 8108cd0a023SMatthew Dillon 811ddfdf542SMatthew Dillon xtime = (unsigned)(ts->tv_nsec / 1000) + 812ddfdf542SMatthew Dillon (unsigned long)ts->tv_sec * 1000000ULL; 813ddfdf542SMatthew Dillon return(xtime); 8148cd0a023SMatthew Dillon } 8158cd0a023SMatthew Dillon 8168cd0a023SMatthew Dillon 81766325755SMatthew Dillon /* 81866325755SMatthew Dillon * Convert a HAMMER filesystem object type to a vnode type 81966325755SMatthew Dillon */ 82066325755SMatthew Dillon enum vtype 82166325755SMatthew Dillon hammer_get_vnode_type(u_int8_t obj_type) 82266325755SMatthew Dillon { 82366325755SMatthew Dillon switch(obj_type) { 82466325755SMatthew Dillon case HAMMER_OBJTYPE_DIRECTORY: 82566325755SMatthew Dillon return(VDIR); 82666325755SMatthew Dillon case HAMMER_OBJTYPE_REGFILE: 82766325755SMatthew Dillon return(VREG); 82866325755SMatthew Dillon case HAMMER_OBJTYPE_DBFILE: 82966325755SMatthew Dillon return(VDATABASE); 83066325755SMatthew Dillon case HAMMER_OBJTYPE_FIFO: 83166325755SMatthew Dillon return(VFIFO); 832b3bad96fSMatthew Dillon case HAMMER_OBJTYPE_SOCKET: 833b3bad96fSMatthew Dillon return(VSOCK); 83466325755SMatthew Dillon case HAMMER_OBJTYPE_CDEV: 83566325755SMatthew Dillon return(VCHR); 83666325755SMatthew Dillon case HAMMER_OBJTYPE_BDEV: 83766325755SMatthew Dillon return(VBLK); 83866325755SMatthew Dillon case HAMMER_OBJTYPE_SOFTLINK: 83966325755SMatthew Dillon return(VLNK); 84066325755SMatthew Dillon default: 84166325755SMatthew Dillon return(VBAD); 84266325755SMatthew Dillon } 84366325755SMatthew Dillon /* not reached */ 84466325755SMatthew Dillon } 84566325755SMatthew Dillon 8466b4f890bSMatthew Dillon int 8476b4f890bSMatthew Dillon hammer_get_dtype(u_int8_t obj_type) 8486b4f890bSMatthew Dillon { 8496b4f890bSMatthew Dillon switch(obj_type) { 8506b4f890bSMatthew Dillon case HAMMER_OBJTYPE_DIRECTORY: 8516b4f890bSMatthew Dillon return(DT_DIR); 8526b4f890bSMatthew Dillon case HAMMER_OBJTYPE_REGFILE: 8536b4f890bSMatthew Dillon return(DT_REG); 8546b4f890bSMatthew Dillon case HAMMER_OBJTYPE_DBFILE: 8556b4f890bSMatthew Dillon return(DT_DBF); 8566b4f890bSMatthew Dillon case HAMMER_OBJTYPE_FIFO: 8576b4f890bSMatthew Dillon return(DT_FIFO); 858b3bad96fSMatthew Dillon case HAMMER_OBJTYPE_SOCKET: 859b3bad96fSMatthew Dillon return(DT_SOCK); 8606b4f890bSMatthew Dillon case HAMMER_OBJTYPE_CDEV: 8616b4f890bSMatthew Dillon return(DT_CHR); 8626b4f890bSMatthew Dillon case HAMMER_OBJTYPE_BDEV: 8636b4f890bSMatthew Dillon return(DT_BLK); 8646b4f890bSMatthew Dillon case HAMMER_OBJTYPE_SOFTLINK: 8656b4f890bSMatthew Dillon return(DT_LNK); 8666b4f890bSMatthew Dillon default: 8676b4f890bSMatthew Dillon return(DT_UNKNOWN); 8686b4f890bSMatthew Dillon } 8696b4f890bSMatthew Dillon /* not reached */ 8706b4f890bSMatthew Dillon } 8716b4f890bSMatthew Dillon 87266325755SMatthew Dillon u_int8_t 87366325755SMatthew Dillon hammer_get_obj_type(enum vtype vtype) 87466325755SMatthew Dillon { 87566325755SMatthew Dillon switch(vtype) { 87666325755SMatthew Dillon case VDIR: 87766325755SMatthew Dillon return(HAMMER_OBJTYPE_DIRECTORY); 87866325755SMatthew Dillon case VREG: 87966325755SMatthew Dillon return(HAMMER_OBJTYPE_REGFILE); 88066325755SMatthew Dillon case VDATABASE: 88166325755SMatthew Dillon return(HAMMER_OBJTYPE_DBFILE); 88266325755SMatthew Dillon case VFIFO: 88366325755SMatthew Dillon return(HAMMER_OBJTYPE_FIFO); 884b3bad96fSMatthew Dillon case VSOCK: 885b3bad96fSMatthew Dillon return(HAMMER_OBJTYPE_SOCKET); 88666325755SMatthew Dillon case VCHR: 88766325755SMatthew Dillon return(HAMMER_OBJTYPE_CDEV); 88866325755SMatthew Dillon case VBLK: 88966325755SMatthew Dillon return(HAMMER_OBJTYPE_BDEV); 89066325755SMatthew Dillon case VLNK: 89166325755SMatthew Dillon return(HAMMER_OBJTYPE_SOFTLINK); 89266325755SMatthew Dillon default: 89366325755SMatthew Dillon return(HAMMER_OBJTYPE_UNKNOWN); 89466325755SMatthew Dillon } 89566325755SMatthew Dillon /* not reached */ 89666325755SMatthew Dillon } 89766325755SMatthew Dillon 898602c6cb8SMatthew Dillon /* 899602c6cb8SMatthew Dillon * Return flags for hammer_delete_at_cursor() 900602c6cb8SMatthew Dillon */ 901e63644f0SMatthew Dillon int 902e63644f0SMatthew Dillon hammer_nohistory(hammer_inode_t ip) 903e63644f0SMatthew Dillon { 904e63644f0SMatthew Dillon if (ip->hmp->hflags & HMNT_NOHISTORY) 905602c6cb8SMatthew Dillon return(HAMMER_DELETE_DESTROY); 906e63644f0SMatthew Dillon if (ip->ino_data.uflags & (SF_NOHISTORY|UF_NOHISTORY)) 907602c6cb8SMatthew Dillon return(HAMMER_DELETE_DESTROY); 908e63644f0SMatthew Dillon return(0); 909e63644f0SMatthew Dillon } 910e63644f0SMatthew Dillon 91166325755SMatthew Dillon /* 912*89cba4dfSMatthew Dillon * ALGORITHM VERSION 0: 91366325755SMatthew Dillon * Return a namekey hash. The 64 bit namekey hash consists of a 32 bit 9145e435c92SMatthew Dillon * crc in the MSB and 0 in the LSB. The caller will use the low 32 bits 9155e435c92SMatthew Dillon * to generate a unique key and will scan all entries with the same upper 91666325755SMatthew Dillon * 32 bits when issuing a lookup. 9176b4f890bSMatthew Dillon * 9185e435c92SMatthew Dillon * 0hhhhhhhhhhhhhhh hhhhhhhhhhhhhhhh 0000000000000000 0000000000000000 9195e435c92SMatthew Dillon * 920*89cba4dfSMatthew Dillon * ALGORITHM VERSION 1: 9215e435c92SMatthew Dillon * 922*89cba4dfSMatthew Dillon * This algorithm breaks the filename down into a separate 32-bit crcs 923*89cba4dfSMatthew Dillon * for each filename segment separated by a special character (dot, 924*89cba4dfSMatthew Dillon * underscore, underline, or tilde). The CRCs are then added together. 925*89cba4dfSMatthew Dillon * This allows temporary names. A full-filename 16 bit crc is also 926*89cba4dfSMatthew Dillon * generated to deal with degenerate conditions. 9275e435c92SMatthew Dillon * 928*89cba4dfSMatthew Dillon * The algorithm is designed to handle create/rename situations such 929*89cba4dfSMatthew Dillon * that a create with an extention to a rename without an extention 930*89cba4dfSMatthew Dillon * only shifts the key space rather than randomizes it. 9315e435c92SMatthew Dillon * 932*89cba4dfSMatthew Dillon * NOTE: The inode allocator cache can only match 10 bits so we do 933*89cba4dfSMatthew Dillon * not really have any room for a partial sorted name, and 934*89cba4dfSMatthew Dillon * numbers don't sort well in that situation anyway. 9355e435c92SMatthew Dillon * 936*89cba4dfSMatthew Dillon * 0mmmmmmmmmmmmmmm mmmmmmmmmmmmmmmm llllllllllllllll 0000000000000000 937*89cba4dfSMatthew Dillon * 9385e435c92SMatthew Dillon * 9396b4f890bSMatthew Dillon * We strip bit 63 in order to provide a positive key, this way a seek 9406b4f890bSMatthew Dillon * offset of 0 will represent the base of the directory. 941b3deaf57SMatthew Dillon * 942*89cba4dfSMatthew Dillon * We usually strip bit 0 (set it to 0) in order to provide a consistent 943*89cba4dfSMatthew Dillon * iteration space for collisions. 944*89cba4dfSMatthew Dillon * 945b3deaf57SMatthew Dillon * This function can never return 0. We use the MSB-0 space to synthesize 946b3deaf57SMatthew Dillon * artificial directory entries such as "." and "..". 94766325755SMatthew Dillon */ 94866325755SMatthew Dillon int64_t 9495e435c92SMatthew Dillon hammer_directory_namekey(hammer_inode_t dip, const void *name, int len, 9505e435c92SMatthew Dillon u_int32_t *max_iterationsp) 95166325755SMatthew Dillon { 9525e435c92SMatthew Dillon const char *aname = name; 953*89cba4dfSMatthew Dillon int32_t crcx; 954*89cba4dfSMatthew Dillon int64_t key; 955*89cba4dfSMatthew Dillon int i; 956*89cba4dfSMatthew Dillon int j; 95766325755SMatthew Dillon 9585e435c92SMatthew Dillon switch (dip->ino_data.cap_flags & HAMMER_INODE_CAP_DIRHASH_MASK) { 9595e435c92SMatthew Dillon case HAMMER_INODE_CAP_DIRHASH_ALG0: 960*89cba4dfSMatthew Dillon /* 961*89cba4dfSMatthew Dillon * Original algorithm 962*89cba4dfSMatthew Dillon */ 9635e435c92SMatthew Dillon key = (int64_t)(crc32(aname, len) & 0x7FFFFFFF) << 32; 964b3deaf57SMatthew Dillon if (key == 0) 965b3deaf57SMatthew Dillon key |= 0x100000000LL; 9665e435c92SMatthew Dillon *max_iterationsp = 0xFFFFFFFFU; 9675e435c92SMatthew Dillon break; 9685e435c92SMatthew Dillon case HAMMER_INODE_CAP_DIRHASH_ALG1: 969*89cba4dfSMatthew Dillon /* 970*89cba4dfSMatthew Dillon * Filesystem version 6 or better will create directories 971*89cba4dfSMatthew Dillon * using the ALG1 dirhash. This hash breaks the filename 972*89cba4dfSMatthew Dillon * up into domains separated by special characters and 973*89cba4dfSMatthew Dillon * hashes each domain independently. 974*89cba4dfSMatthew Dillon * 975*89cba4dfSMatthew Dillon * We also do a simple sub-sort using the first character 976*89cba4dfSMatthew Dillon * of the filename in the top 5-bits. 977*89cba4dfSMatthew Dillon */ 978*89cba4dfSMatthew Dillon key = 0; 9795e435c92SMatthew Dillon 980*89cba4dfSMatthew Dillon /* 981*89cba4dfSMatthew Dillon * m32 982*89cba4dfSMatthew Dillon */ 983*89cba4dfSMatthew Dillon crcx = 0; 984*89cba4dfSMatthew Dillon for (i = j = 0; i < len; ++i) { 985*89cba4dfSMatthew Dillon if (aname[i] == '.' || 986*89cba4dfSMatthew Dillon aname[i] == '-' || 987*89cba4dfSMatthew Dillon aname[i] == '_' || 988*89cba4dfSMatthew Dillon aname[i] == '~') { 989*89cba4dfSMatthew Dillon if (i != j) 990*89cba4dfSMatthew Dillon crcx += crc32(aname + j, i - j); 991*89cba4dfSMatthew Dillon j = i + 1; 9925e435c92SMatthew Dillon } 993*89cba4dfSMatthew Dillon } 994*89cba4dfSMatthew Dillon if (i != j) 995*89cba4dfSMatthew Dillon crcx += crc32(aname + j, i - j); 996*89cba4dfSMatthew Dillon 997*89cba4dfSMatthew Dillon #if 0 998*89cba4dfSMatthew Dillon /* 999*89cba4dfSMatthew Dillon * xor top 5 bits 0mmmm into low bits and steal the top 5 1000*89cba4dfSMatthew Dillon * bits as a semi sub sort using the first character of 1001*89cba4dfSMatthew Dillon * the filename. bit 63 is always left as 0 so directory 1002*89cba4dfSMatthew Dillon * keys are positive numbers. 1003*89cba4dfSMatthew Dillon */ 1004*89cba4dfSMatthew Dillon crcx ^= (uint32_t)crcx >> (32 - 5); 1005*89cba4dfSMatthew Dillon crcx = (crcx & 0x07FFFFFF) | ((aname[0] & 0x0F) << (32 - 5)); 1006*89cba4dfSMatthew Dillon #endif 1007*89cba4dfSMatthew Dillon crcx &= 0x7FFFFFFFU; 1008*89cba4dfSMatthew Dillon 1009*89cba4dfSMatthew Dillon key |= (uint64_t)crcx << 32; 1010*89cba4dfSMatthew Dillon 1011*89cba4dfSMatthew Dillon /* 1012*89cba4dfSMatthew Dillon * l16 - crc of entire filename 1013*89cba4dfSMatthew Dillon * 1014*89cba4dfSMatthew Dillon * This crc reduces degenerate hash collision conditions 1015*89cba4dfSMatthew Dillon */ 1016*89cba4dfSMatthew Dillon crcx = crc32(aname, len); 1017*89cba4dfSMatthew Dillon crcx = crcx ^ (crcx << 16); 1018*89cba4dfSMatthew Dillon key |= crcx & 0xFFFF0000U; 1019*89cba4dfSMatthew Dillon 1020*89cba4dfSMatthew Dillon /* 1021*89cba4dfSMatthew Dillon * Cleanup 1022*89cba4dfSMatthew Dillon */ 10235e435c92SMatthew Dillon if ((key & 0xFFFFFFFF00000000LL) == 0) 10245e435c92SMatthew Dillon key |= 0x100000000LL; 10255e435c92SMatthew Dillon if (hammer_debug_general & 0x0400) { 10265e435c92SMatthew Dillon kprintf("namekey2: 0x%016llx %*.*s\n", 1027973c11b9SMatthew Dillon (long long)key, len, len, aname); 10285e435c92SMatthew Dillon } 10295e435c92SMatthew Dillon *max_iterationsp = 0x00FFFFFF; 10305e435c92SMatthew Dillon break; 10315e435c92SMatthew Dillon case HAMMER_INODE_CAP_DIRHASH_ALG2: 10325e435c92SMatthew Dillon case HAMMER_INODE_CAP_DIRHASH_ALG3: 10335e435c92SMatthew Dillon default: 10345e435c92SMatthew Dillon key = 0; /* compiler warning */ 10355e435c92SMatthew Dillon *max_iterationsp = 1; /* sanity */ 10365e435c92SMatthew Dillon panic("hammer_directory_namekey: bad algorithm %p\n", dip); 10375e435c92SMatthew Dillon break; 10385e435c92SMatthew Dillon } 103966325755SMatthew Dillon return(key); 1040427e5fc6SMatthew Dillon } 1041427e5fc6SMatthew Dillon 1042bc6c1f13SMatthew Dillon /* 1043bc6c1f13SMatthew Dillon * Convert string after @@ (@@ not included) to TID. Returns 0 on success, 1044bc6c1f13SMatthew Dillon * EINVAL on failure. 1045bc6c1f13SMatthew Dillon * 1046bc6c1f13SMatthew Dillon * If this function fails *ispfs, *tidp, and *localizationp will not 1047bc6c1f13SMatthew Dillon * be modified. 1048bc6c1f13SMatthew Dillon */ 1049bc6c1f13SMatthew Dillon int 1050bc6c1f13SMatthew Dillon hammer_str_to_tid(const char *str, int *ispfsp, 1051bc6c1f13SMatthew Dillon hammer_tid_t *tidp, u_int32_t *localizationp) 1052d113fda1SMatthew Dillon { 1053d113fda1SMatthew Dillon hammer_tid_t tid; 1054bc6c1f13SMatthew Dillon u_int32_t localization; 1055ea434b6fSMatthew Dillon char *ptr; 1056bc6c1f13SMatthew Dillon int ispfs; 1057bc6c1f13SMatthew Dillon int n; 1058d113fda1SMatthew Dillon 1059bc6c1f13SMatthew Dillon /* 1060bc6c1f13SMatthew Dillon * Forms allowed for TID: "0x%016llx" 1061bc6c1f13SMatthew Dillon * "-1" 1062bc6c1f13SMatthew Dillon */ 1063ea434b6fSMatthew Dillon tid = strtouq(str, &ptr, 0); 1064bc6c1f13SMatthew Dillon n = ptr - str; 1065bc6c1f13SMatthew Dillon if (n == 2 && str[0] == '-' && str[1] == '1') { 1066bc6c1f13SMatthew Dillon /* ok */ 1067bc6c1f13SMatthew Dillon } else if (n == 18 && str[0] == '0' && (str[1] | 0x20) == 'x') { 1068bc6c1f13SMatthew Dillon /* ok */ 1069ea434b6fSMatthew Dillon } else { 1070bc6c1f13SMatthew Dillon return(EINVAL); 1071ea434b6fSMatthew Dillon } 1072bc6c1f13SMatthew Dillon 1073bc6c1f13SMatthew Dillon /* 1074bc6c1f13SMatthew Dillon * Forms allowed for PFS: ":%05d" (i.e. "...:0" would be illegal). 1075bc6c1f13SMatthew Dillon */ 1076bc6c1f13SMatthew Dillon str = ptr; 1077bc6c1f13SMatthew Dillon if (*str == ':') { 1078bc6c1f13SMatthew Dillon localization = strtoul(str + 1, &ptr, 10) << 16; 1079bc6c1f13SMatthew Dillon if (ptr - str != 6) 1080bc6c1f13SMatthew Dillon return(EINVAL); 1081bc6c1f13SMatthew Dillon str = ptr; 1082bc6c1f13SMatthew Dillon ispfs = 1; 1083bc6c1f13SMatthew Dillon } else { 1084bc6c1f13SMatthew Dillon localization = *localizationp; 1085bc6c1f13SMatthew Dillon ispfs = 0; 1086bc6c1f13SMatthew Dillon } 1087bc6c1f13SMatthew Dillon 1088bc6c1f13SMatthew Dillon /* 1089bc6c1f13SMatthew Dillon * Any trailing junk invalidates special extension handling. 1090bc6c1f13SMatthew Dillon */ 1091bc6c1f13SMatthew Dillon if (*str) 1092bc6c1f13SMatthew Dillon return(EINVAL); 1093bc6c1f13SMatthew Dillon *tidp = tid; 1094bc6c1f13SMatthew Dillon *localizationp = localization; 1095bc6c1f13SMatthew Dillon *ispfsp = ispfs; 1096bc6c1f13SMatthew Dillon return(0); 1097d113fda1SMatthew Dillon } 1098d113fda1SMatthew Dillon 109919619882SMatthew Dillon void 110019619882SMatthew Dillon hammer_crc_set_blockmap(hammer_blockmap_t blockmap) 110119619882SMatthew Dillon { 110219619882SMatthew Dillon blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE); 110319619882SMatthew Dillon } 110419619882SMatthew Dillon 110519619882SMatthew Dillon void 110619619882SMatthew Dillon hammer_crc_set_volume(hammer_volume_ondisk_t ondisk) 110719619882SMatthew Dillon { 110819619882SMatthew Dillon ondisk->vol_crc = crc32(ondisk, HAMMER_VOL_CRCSIZE1) ^ 110919619882SMatthew Dillon crc32(&ondisk->vol_crc + 1, HAMMER_VOL_CRCSIZE2); 111019619882SMatthew Dillon } 111119619882SMatthew Dillon 111219619882SMatthew Dillon int 111319619882SMatthew Dillon hammer_crc_test_blockmap(hammer_blockmap_t blockmap) 111419619882SMatthew Dillon { 111519619882SMatthew Dillon hammer_crc_t crc; 111619619882SMatthew Dillon 111719619882SMatthew Dillon crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE); 111819619882SMatthew Dillon return (blockmap->entry_crc == crc); 111919619882SMatthew Dillon } 112019619882SMatthew Dillon 112119619882SMatthew Dillon int 112219619882SMatthew Dillon hammer_crc_test_volume(hammer_volume_ondisk_t ondisk) 112319619882SMatthew Dillon { 112419619882SMatthew Dillon hammer_crc_t crc; 112519619882SMatthew Dillon 112619619882SMatthew Dillon crc = crc32(ondisk, HAMMER_VOL_CRCSIZE1) ^ 112719619882SMatthew Dillon crc32(&ondisk->vol_crc + 1, HAMMER_VOL_CRCSIZE2); 112819619882SMatthew Dillon return (ondisk->vol_crc == crc); 112919619882SMatthew Dillon } 113019619882SMatthew Dillon 113119619882SMatthew Dillon int 113219619882SMatthew Dillon hammer_crc_test_btree(hammer_node_ondisk_t ondisk) 113319619882SMatthew Dillon { 113419619882SMatthew Dillon hammer_crc_t crc; 113519619882SMatthew Dillon 113619619882SMatthew Dillon crc = crc32(&ondisk->crc + 1, HAMMER_BTREE_CRCSIZE); 113719619882SMatthew Dillon return (ondisk->crc == crc); 113819619882SMatthew Dillon } 113919619882SMatthew Dillon 1140ddfdf542SMatthew Dillon /* 1141ddfdf542SMatthew Dillon * Test or set the leaf->data_crc field. Deal with any special cases given 1142ddfdf542SMatthew Dillon * a generic B-Tree leaf element and its data. 1143ddfdf542SMatthew Dillon * 1144ddfdf542SMatthew Dillon * NOTE: Inode-data: the atime and mtime fields are not CRCd, allowing them 1145ddfdf542SMatthew Dillon * to be updated in-place. 1146ddfdf542SMatthew Dillon */ 1147ddfdf542SMatthew Dillon int 1148ddfdf542SMatthew Dillon hammer_crc_test_leaf(void *data, hammer_btree_leaf_elm_t leaf) 1149ddfdf542SMatthew Dillon { 1150ddfdf542SMatthew Dillon hammer_crc_t crc; 1151ddfdf542SMatthew Dillon 1152ddfdf542SMatthew Dillon if (leaf->data_len == 0) { 1153ddfdf542SMatthew Dillon crc = 0; 1154ddfdf542SMatthew Dillon } else { 1155ddfdf542SMatthew Dillon switch(leaf->base.rec_type) { 1156ddfdf542SMatthew Dillon case HAMMER_RECTYPE_INODE: 1157ddfdf542SMatthew Dillon if (leaf->data_len != sizeof(struct hammer_inode_data)) 1158ddfdf542SMatthew Dillon return(0); 1159ddfdf542SMatthew Dillon crc = crc32(data, HAMMER_INODE_CRCSIZE); 1160ddfdf542SMatthew Dillon break; 1161ddfdf542SMatthew Dillon default: 1162ddfdf542SMatthew Dillon crc = crc32(data, leaf->data_len); 1163ddfdf542SMatthew Dillon break; 1164ddfdf542SMatthew Dillon } 1165ddfdf542SMatthew Dillon } 1166ddfdf542SMatthew Dillon return (leaf->data_crc == crc); 1167ddfdf542SMatthew Dillon } 1168ddfdf542SMatthew Dillon 1169ddfdf542SMatthew Dillon void 1170ddfdf542SMatthew Dillon hammer_crc_set_leaf(void *data, hammer_btree_leaf_elm_t leaf) 1171ddfdf542SMatthew Dillon { 1172ddfdf542SMatthew Dillon if (leaf->data_len == 0) { 1173ddfdf542SMatthew Dillon leaf->data_crc = 0; 1174ddfdf542SMatthew Dillon } else { 1175ddfdf542SMatthew Dillon switch(leaf->base.rec_type) { 1176ddfdf542SMatthew Dillon case HAMMER_RECTYPE_INODE: 1177ddfdf542SMatthew Dillon KKASSERT(leaf->data_len == 1178ddfdf542SMatthew Dillon sizeof(struct hammer_inode_data)); 1179ddfdf542SMatthew Dillon leaf->data_crc = crc32(data, HAMMER_INODE_CRCSIZE); 1180ddfdf542SMatthew Dillon break; 1181ddfdf542SMatthew Dillon default: 1182ddfdf542SMatthew Dillon leaf->data_crc = crc32(data, leaf->data_len); 1183ddfdf542SMatthew Dillon break; 1184ddfdf542SMatthew Dillon } 1185ddfdf542SMatthew Dillon } 1186ddfdf542SMatthew Dillon } 1187ddfdf542SMatthew Dillon 118877062c8aSMatthew Dillon void 118977062c8aSMatthew Dillon hkprintf(const char *ctl, ...) 119077062c8aSMatthew Dillon { 119177062c8aSMatthew Dillon __va_list va; 119277062c8aSMatthew Dillon 119377062c8aSMatthew Dillon if (hammer_debug_debug) { 119477062c8aSMatthew Dillon __va_start(va, ctl); 119577062c8aSMatthew Dillon kvprintf(ctl, va); 119677062c8aSMatthew Dillon __va_end(va); 119777062c8aSMatthew Dillon } 119877062c8aSMatthew Dillon } 119977062c8aSMatthew Dillon 12004a2796f3SMatthew Dillon /* 12014a2796f3SMatthew Dillon * Return the block size at the specified file offset. 12024a2796f3SMatthew Dillon */ 12034a2796f3SMatthew Dillon int 12044a2796f3SMatthew Dillon hammer_blocksize(int64_t file_offset) 12054a2796f3SMatthew Dillon { 12064a2796f3SMatthew Dillon if (file_offset < HAMMER_XDEMARC) 12074a2796f3SMatthew Dillon return(HAMMER_BUFSIZE); 12084a2796f3SMatthew Dillon else 12094a2796f3SMatthew Dillon return(HAMMER_XBUFSIZE); 12104a2796f3SMatthew Dillon } 12114a2796f3SMatthew Dillon 12126362a262SMatthew Dillon int 12136362a262SMatthew Dillon hammer_blockoff(int64_t file_offset) 12146362a262SMatthew Dillon { 12156362a262SMatthew Dillon if (file_offset < HAMMER_XDEMARC) 12166362a262SMatthew Dillon return((int)file_offset & HAMMER_BUFMASK); 12176362a262SMatthew Dillon else 12186362a262SMatthew Dillon return((int)file_offset & HAMMER_XBUFMASK); 12196362a262SMatthew Dillon } 12206362a262SMatthew Dillon 12214a2796f3SMatthew Dillon /* 12224a2796f3SMatthew Dillon * Return the demarkation point between the two offsets where 12234a2796f3SMatthew Dillon * the block size changes. 12244a2796f3SMatthew Dillon */ 12254a2796f3SMatthew Dillon int64_t 12264a2796f3SMatthew Dillon hammer_blockdemarc(int64_t file_offset1, int64_t file_offset2) 12274a2796f3SMatthew Dillon { 12284a2796f3SMatthew Dillon if (file_offset1 < HAMMER_XDEMARC) { 12294a2796f3SMatthew Dillon if (file_offset2 <= HAMMER_XDEMARC) 12304a2796f3SMatthew Dillon return(file_offset2); 12314a2796f3SMatthew Dillon return(HAMMER_XDEMARC); 12324a2796f3SMatthew Dillon } 12334a2796f3SMatthew Dillon panic("hammer_blockdemarc: illegal range %lld %lld\n", 1234973c11b9SMatthew Dillon (long long)file_offset1, (long long)file_offset2); 12354a2796f3SMatthew Dillon } 12364a2796f3SMatthew Dillon 1237a56cb012SMatthew Dillon udev_t 1238a56cb012SMatthew Dillon hammer_fsid_to_udev(uuid_t *uuid) 1239a56cb012SMatthew Dillon { 1240a56cb012SMatthew Dillon u_int32_t crc; 1241a56cb012SMatthew Dillon 1242a56cb012SMatthew Dillon crc = crc32(uuid, sizeof(*uuid)); 1243a56cb012SMatthew Dillon return((udev_t)crc); 1244a56cb012SMatthew Dillon } 1245a56cb012SMatthew Dillon 1246