1*9be065fbSchristos /* $NetBSD: subr_lockdebug.c,v 1.55 2017/01/26 04:11:56 christos Exp $ */ 2b07ec3fcSad 3b07ec3fcSad /*- 4057adba1Sad * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc. 5b07ec3fcSad * All rights reserved. 6b07ec3fcSad * 7b07ec3fcSad * This code is derived from software contributed to The NetBSD Foundation 8b07ec3fcSad * by Andrew Doran. 9b07ec3fcSad * 10b07ec3fcSad * Redistribution and use in source and binary forms, with or without 11b07ec3fcSad * modification, are permitted provided that the following conditions 12b07ec3fcSad * are met: 13b07ec3fcSad * 1. Redistributions of source code must retain the above copyright 14b07ec3fcSad * notice, this list of conditions and the following disclaimer. 15b07ec3fcSad * 2. Redistributions in binary form must reproduce the above copyright 16b07ec3fcSad * notice, this list of conditions and the following disclaimer in the 17b07ec3fcSad * documentation and/or other materials provided with the distribution. 18b07ec3fcSad * 19b07ec3fcSad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20b07ec3fcSad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21b07ec3fcSad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22b07ec3fcSad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23b07ec3fcSad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24b07ec3fcSad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25b07ec3fcSad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26b07ec3fcSad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27b07ec3fcSad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28b07ec3fcSad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29b07ec3fcSad * POSSIBILITY OF SUCH DAMAGE. 30b07ec3fcSad */ 31b07ec3fcSad 32b07ec3fcSad /* 33dde5d75eSad * Basic lock debugging code shared among lock primitives. 34b07ec3fcSad */ 35b07ec3fcSad 360ca3d21bSdsl #include <sys/cdefs.h> 37*9be065fbSchristos __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.55 2017/01/26 04:11:56 christos Exp $"); 380ca3d21bSdsl 3905cf8927Sozaki-r #ifdef _KERNEL_OPT 40b07ec3fcSad #include "opt_ddb.h" 4105cf8927Sozaki-r #endif 42b07ec3fcSad 43b07ec3fcSad #include <sys/param.h> 44b07ec3fcSad #include <sys/proc.h> 45b07ec3fcSad #include <sys/systm.h> 4611dc6399Sad #include <sys/kernel.h> 47b07ec3fcSad #include <sys/kmem.h> 48b07ec3fcSad #include <sys/lockdebug.h> 49b07ec3fcSad #include <sys/sleepq.h> 5011dc6399Sad #include <sys/cpu.h> 51b470ab62Sad #include <sys/atomic.h> 52212c50ddSad #include <sys/lock.h> 5319e6c76bSmatt #include <sys/rbtree.h> 5438d5e341Syamt 550664a045Sad #include <machine/lock.h> 560664a045Sad 57057adba1Sad unsigned int ld_panic; 58057adba1Sad 59b07ec3fcSad #ifdef LOCKDEBUG 60b07ec3fcSad 61b07ec3fcSad #define LD_BATCH_SHIFT 9 62b07ec3fcSad #define LD_BATCH (1 << LD_BATCH_SHIFT) 63b07ec3fcSad #define LD_BATCH_MASK (LD_BATCH - 1) 64b07ec3fcSad #define LD_MAX_LOCKS 1048576 65b07ec3fcSad #define LD_SLOP 16 66b07ec3fcSad 67b07ec3fcSad #define LD_LOCKED 0x01 68b07ec3fcSad #define LD_SLEEPER 0x02 69b07ec3fcSad 70461cd942Sad #define LD_WRITE_LOCK 0x80000000 71461cd942Sad 72b07ec3fcSad typedef struct lockdebug { 73879d5dfbSrmind struct rb_node ld_rb_node; 74a4e0004bSad __cpu_simple_lock_t ld_spinlock; 75b07ec3fcSad _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain; 76b07ec3fcSad _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain; 77b07ec3fcSad volatile void *ld_lock; 78b07ec3fcSad lockops_t *ld_lockops; 79b07ec3fcSad struct lwp *ld_lwp; 80b07ec3fcSad uintptr_t ld_locked; 81b07ec3fcSad uintptr_t ld_unlocked; 8211dc6399Sad uintptr_t ld_initaddr; 83b07ec3fcSad uint16_t ld_shares; 84b07ec3fcSad uint16_t ld_cpu; 85b07ec3fcSad uint8_t ld_flags; 86b07ec3fcSad uint8_t ld_shwant; /* advisory */ 87b07ec3fcSad uint8_t ld_exwant; /* advisory */ 88b07ec3fcSad uint8_t ld_unused; 89b07ec3fcSad } volatile lockdebug_t; 90b07ec3fcSad 91b07ec3fcSad typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t; 92b07ec3fcSad 93a4e0004bSad __cpu_simple_lock_t ld_mod_lk; 9411910619Smatt lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free); 9511910619Smatt lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all); 96b07ec3fcSad int ld_nfree; 97b07ec3fcSad int ld_freeptr; 98b07ec3fcSad int ld_recurse; 9964e54fbbSad bool ld_nomore; 100b07ec3fcSad lockdebug_t ld_prime[LD_BATCH]; 101b07ec3fcSad 102*9be065fbSchristos static void lockdebug_abort1(const char *, size_t, lockdebug_t *, int, 103a4e0004bSad const char *, bool); 104a4e0004bSad static int lockdebug_more(int); 10564e54fbbSad static void lockdebug_init(void); 106a27531bcSchristos static void lockdebug_dump(lockdebug_t *, void (*)(const char *, ...) 107a27531bcSchristos __printflike(1, 2)); 108b07ec3fcSad 10938d5e341Syamt static signed int 110879d5dfbSrmind ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2) 11138d5e341Syamt { 112879d5dfbSrmind const lockdebug_t *ld1 = n1; 113879d5dfbSrmind const lockdebug_t *ld2 = n2; 11433e66db2Syamt const uintptr_t a = (uintptr_t)ld1->ld_lock; 11533e66db2Syamt const uintptr_t b = (uintptr_t)ld2->ld_lock; 11633e66db2Syamt 11733e66db2Syamt if (a < b) 11833e66db2Syamt return -1; 119879d5dfbSrmind if (a > b) 120879d5dfbSrmind return 1; 12138d5e341Syamt return 0; 12238d5e341Syamt } 12338d5e341Syamt 12438d5e341Syamt static signed int 125879d5dfbSrmind ld_rbto_compare_key(void *ctx, const void *n, const void *key) 12638d5e341Syamt { 127879d5dfbSrmind const lockdebug_t *ld = n; 12833e66db2Syamt const uintptr_t a = (uintptr_t)ld->ld_lock; 12933e66db2Syamt const uintptr_t b = (uintptr_t)key; 13033e66db2Syamt 13133e66db2Syamt if (a < b) 13233e66db2Syamt return -1; 133879d5dfbSrmind if (a > b) 134879d5dfbSrmind return 1; 13538d5e341Syamt return 0; 13638d5e341Syamt } 13738d5e341Syamt 138879d5dfbSrmind static rb_tree_t ld_rb_tree; 13938d5e341Syamt 140879d5dfbSrmind static const rb_tree_ops_t ld_rb_tree_ops = { 1415a4f0c6bSmatt .rbto_compare_nodes = ld_rbto_compare_nodes, 1425a4f0c6bSmatt .rbto_compare_key = ld_rbto_compare_key, 143879d5dfbSrmind .rbto_node_offset = offsetof(lockdebug_t, ld_rb_node), 144879d5dfbSrmind .rbto_context = NULL 14538d5e341Syamt }; 14638d5e341Syamt 147671754ccSyamt static inline lockdebug_t * 148a4e0004bSad lockdebug_lookup1(volatile void *lock) 149671754ccSyamt { 150671754ccSyamt lockdebug_t *ld; 151a4e0004bSad struct cpu_info *ci; 152671754ccSyamt 153a4e0004bSad ci = curcpu(); 154a4e0004bSad __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); 155671754ccSyamt ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock)); 156a4e0004bSad __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); 157a4e0004bSad if (ld == NULL) { 158671754ccSyamt return NULL; 159a4e0004bSad } 160a4e0004bSad __cpu_simple_lock(&ld->ld_spinlock); 161671754ccSyamt 162671754ccSyamt return ld; 163671754ccSyamt } 164671754ccSyamt 165a4e0004bSad static void 166a4e0004bSad lockdebug_lock_cpus(void) 167a4e0004bSad { 168a4e0004bSad CPU_INFO_ITERATOR cii; 169a4e0004bSad struct cpu_info *ci; 170a4e0004bSad 171a4e0004bSad for (CPU_INFO_FOREACH(cii, ci)) { 172a4e0004bSad __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); 173a4e0004bSad } 174a4e0004bSad } 175a4e0004bSad 176a4e0004bSad static void 177a4e0004bSad lockdebug_unlock_cpus(void) 178a4e0004bSad { 179a4e0004bSad CPU_INFO_ITERATOR cii; 180a4e0004bSad struct cpu_info *ci; 181a4e0004bSad 182a4e0004bSad for (CPU_INFO_FOREACH(cii, ci)) { 183a4e0004bSad __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); 184a4e0004bSad } 185a4e0004bSad } 186a4e0004bSad 187b07ec3fcSad /* 188b07ec3fcSad * lockdebug_lookup: 189b07ec3fcSad * 19038d5e341Syamt * Find a lockdebug structure by a pointer to a lock and return it locked. 191b07ec3fcSad */ 192b07ec3fcSad static inline lockdebug_t * 193*9be065fbSchristos lockdebug_lookup(const char *func, size_t line, volatile void *lock, 194*9be065fbSchristos uintptr_t where) 195b07ec3fcSad { 19638d5e341Syamt lockdebug_t *ld; 197b07ec3fcSad 198a4e0004bSad ld = lockdebug_lookup1(lock); 199879d5dfbSrmind if (ld == NULL) { 200*9be065fbSchristos panic("%s,%zu: uninitialized lock (lock=%p, from=%08" 201*9be065fbSchristos PRIxPTR ")", func, line, lock, where); 202879d5dfbSrmind } 203b07ec3fcSad return ld; 204b07ec3fcSad } 205b07ec3fcSad 206b07ec3fcSad /* 207b07ec3fcSad * lockdebug_init: 208b07ec3fcSad * 209b07ec3fcSad * Initialize the lockdebug system. Allocate an initial pool of 210b07ec3fcSad * lockdebug structures before the VM system is up and running. 211b07ec3fcSad */ 21264e54fbbSad static void 213b07ec3fcSad lockdebug_init(void) 214b07ec3fcSad { 215b07ec3fcSad lockdebug_t *ld; 216b07ec3fcSad int i; 217b07ec3fcSad 218a4e0004bSad TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks); 219a4e0004bSad TAILQ_INIT(&curlwp->l_ld_locks); 220a4e0004bSad __cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock); 221a4e0004bSad __cpu_simple_lock_init(&ld_mod_lk); 2222cab8950Smatt 22338d5e341Syamt rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops); 22438d5e341Syamt 225b07ec3fcSad ld = ld_prime; 226b07ec3fcSad for (i = 1, ld++; i < LD_BATCH; i++, ld++) { 227a4e0004bSad __cpu_simple_lock_init(&ld->ld_spinlock); 228b07ec3fcSad TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 229b07ec3fcSad TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); 230b07ec3fcSad } 231b07ec3fcSad ld_freeptr = 1; 232b07ec3fcSad ld_nfree = LD_BATCH - 1; 233b07ec3fcSad } 234b07ec3fcSad 235b07ec3fcSad /* 236b07ec3fcSad * lockdebug_alloc: 237b07ec3fcSad * 238b07ec3fcSad * A lock is being initialized, so allocate an associated debug 239b07ec3fcSad * structure. 240b07ec3fcSad */ 24138d5e341Syamt bool 242*9be065fbSchristos lockdebug_alloc(const char *func, size_t line, volatile void *lock, 243*9be065fbSchristos lockops_t *lo, uintptr_t initaddr) 244b07ec3fcSad { 245b07ec3fcSad struct cpu_info *ci; 246b07ec3fcSad lockdebug_t *ld; 247a4e0004bSad int s; 248b07ec3fcSad 2497eb6056fSad if (lo == NULL || panicstr != NULL || ld_panic) 25038d5e341Syamt return false; 25164e54fbbSad if (ld_freeptr == 0) 25264e54fbbSad lockdebug_init(); 253b07ec3fcSad 254a4e0004bSad s = splhigh(); 255a4e0004bSad __cpu_simple_lock(&ld_mod_lk); 256a4e0004bSad if ((ld = lockdebug_lookup1(lock)) != NULL) { 257a4e0004bSad __cpu_simple_unlock(&ld_mod_lk); 258*9be065fbSchristos lockdebug_abort1(func, line, ld, s, "already initialized", 259*9be065fbSchristos true); 2607eb6056fSad return false; 261671754ccSyamt } 262671754ccSyamt 263b07ec3fcSad /* 264b07ec3fcSad * Pinch a new debug structure. We may recurse because we call 265b07ec3fcSad * kmem_alloc(), which may need to initialize new locks somewhere 2665492d866Sskrll * down the path. If not recursing, we try to maintain at least 267b07ec3fcSad * LD_SLOP structures free, which should hopefully be enough to 268b07ec3fcSad * satisfy kmem_alloc(). If we can't provide a structure, not to 269b07ec3fcSad * worry: we'll just mark the lock as not having an ID. 270b07ec3fcSad */ 271461cd942Sad ci = curcpu(); 272b07ec3fcSad ci->ci_lkdebug_recurse++; 273b07ec3fcSad if (TAILQ_EMPTY(&ld_free)) { 27464e54fbbSad if (ci->ci_lkdebug_recurse > 1 || ld_nomore) { 275b07ec3fcSad ci->ci_lkdebug_recurse--; 276a4e0004bSad __cpu_simple_unlock(&ld_mod_lk); 277a4e0004bSad splx(s); 27838d5e341Syamt return false; 279b07ec3fcSad } 280a4e0004bSad s = lockdebug_more(s); 281a4e0004bSad } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) { 282a4e0004bSad s = lockdebug_more(s); 283a4e0004bSad } 284b07ec3fcSad if ((ld = TAILQ_FIRST(&ld_free)) == NULL) { 285a4e0004bSad __cpu_simple_unlock(&ld_mod_lk); 286a4e0004bSad splx(s); 28738d5e341Syamt return false; 288b07ec3fcSad } 289b07ec3fcSad TAILQ_REMOVE(&ld_free, ld, ld_chain); 290b07ec3fcSad ld_nfree--; 291b07ec3fcSad ci->ci_lkdebug_recurse--; 292b07ec3fcSad 293a4e0004bSad if (ld->ld_lock != NULL) { 294*9be065fbSchristos panic("%s,%zu: corrupt table ld %p", func, line, ld); 295a4e0004bSad } 296b07ec3fcSad 297b07ec3fcSad /* Initialise the structure. */ 298b07ec3fcSad ld->ld_lock = lock; 299b07ec3fcSad ld->ld_lockops = lo; 300b07ec3fcSad ld->ld_locked = 0; 301b07ec3fcSad ld->ld_unlocked = 0; 302b07ec3fcSad ld->ld_lwp = NULL; 30311dc6399Sad ld->ld_initaddr = initaddr; 3047b8f5124Sad ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0); 305a4e0004bSad lockdebug_lock_cpus(); 306879d5dfbSrmind (void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld)); 307a4e0004bSad lockdebug_unlock_cpus(); 308a4e0004bSad __cpu_simple_unlock(&ld_mod_lk); 30938d5e341Syamt 310a4e0004bSad splx(s); 31138d5e341Syamt return true; 312b07ec3fcSad } 313b07ec3fcSad 314b07ec3fcSad /* 315b07ec3fcSad * lockdebug_free: 316b07ec3fcSad * 317b07ec3fcSad * A lock is being destroyed, so release debugging resources. 318b07ec3fcSad */ 319b07ec3fcSad void 320*9be065fbSchristos lockdebug_free(const char *func, size_t line, volatile void *lock) 321b07ec3fcSad { 322b07ec3fcSad lockdebug_t *ld; 323a4e0004bSad int s; 324b07ec3fcSad 3257eb6056fSad if (panicstr != NULL || ld_panic) 326b07ec3fcSad return; 327b07ec3fcSad 328a4e0004bSad s = splhigh(); 329a4e0004bSad __cpu_simple_lock(&ld_mod_lk); 330*9be065fbSchristos ld = lockdebug_lookup(func, line, lock, 331*9be065fbSchristos (uintptr_t) __builtin_return_address(0)); 33238d5e341Syamt if (ld == NULL) { 333a4e0004bSad __cpu_simple_unlock(&ld_mod_lk); 334*9be065fbSchristos panic("%s,%zu: destroying uninitialized object %p" 335*9be065fbSchristos "(ld_lock=%p)", func, line, lock, ld->ld_lock); 3367eb6056fSad return; 337b07ec3fcSad } 3387eb6056fSad if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) { 339a4e0004bSad __cpu_simple_unlock(&ld_mod_lk); 340*9be065fbSchristos lockdebug_abort1(func, line, ld, s, "is locked or in use", 341*9be065fbSchristos true); 3427eb6056fSad return; 3437eb6056fSad } 344a4e0004bSad lockdebug_lock_cpus(); 345879d5dfbSrmind rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld)); 346a4e0004bSad lockdebug_unlock_cpus(); 347b07ec3fcSad ld->ld_lock = NULL; 348b07ec3fcSad TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 349b07ec3fcSad ld_nfree++; 350a4e0004bSad __cpu_simple_unlock(&ld->ld_spinlock); 351a4e0004bSad __cpu_simple_unlock(&ld_mod_lk); 352a4e0004bSad splx(s); 353b07ec3fcSad } 354b07ec3fcSad 355b07ec3fcSad /* 356b07ec3fcSad * lockdebug_more: 357b07ec3fcSad * 358b07ec3fcSad * Allocate a batch of debug structures and add to the free list. 359a4e0004bSad * Must be called with ld_mod_lk held. 360b07ec3fcSad */ 361a4e0004bSad static int 362a4e0004bSad lockdebug_more(int s) 363b07ec3fcSad { 364b07ec3fcSad lockdebug_t *ld; 365b07ec3fcSad void *block; 36664e54fbbSad int i, base, m; 367b07ec3fcSad 3687b8f5124Sad /* 3697b8f5124Sad * Can't call kmem_alloc() if in interrupt context. XXX We could 3707b8f5124Sad * deadlock, because we don't know which locks the caller holds. 3717b8f5124Sad */ 3727b8f5124Sad if (cpu_intr_p() || (curlwp->l_pflag & LP_INTR) != 0) { 3737b8f5124Sad return s; 3747b8f5124Sad } 3757b8f5124Sad 376b07ec3fcSad while (ld_nfree < LD_SLOP) { 377a4e0004bSad __cpu_simple_unlock(&ld_mod_lk); 378a4e0004bSad splx(s); 379b07ec3fcSad block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP); 380a4e0004bSad s = splhigh(); 381a4e0004bSad __cpu_simple_lock(&ld_mod_lk); 382b07ec3fcSad 383b07ec3fcSad if (block == NULL) 384a4e0004bSad return s; 385b07ec3fcSad 386b07ec3fcSad if (ld_nfree > LD_SLOP) { 387b07ec3fcSad /* Somebody beat us to it. */ 388a4e0004bSad __cpu_simple_unlock(&ld_mod_lk); 389a4e0004bSad splx(s); 390b07ec3fcSad kmem_free(block, LD_BATCH * sizeof(lockdebug_t)); 391a4e0004bSad s = splhigh(); 392a4e0004bSad __cpu_simple_lock(&ld_mod_lk); 393b07ec3fcSad continue; 394b07ec3fcSad } 395b07ec3fcSad 396b07ec3fcSad base = ld_freeptr; 397b07ec3fcSad ld_nfree += LD_BATCH; 398b07ec3fcSad ld = block; 399b07ec3fcSad base <<= LD_BATCH_SHIFT; 40064e54fbbSad m = min(LD_MAX_LOCKS, base + LD_BATCH); 401b07ec3fcSad 40264e54fbbSad if (m == LD_MAX_LOCKS) 40364e54fbbSad ld_nomore = true; 40464e54fbbSad 40564e54fbbSad for (i = base; i < m; i++, ld++) { 406a4e0004bSad __cpu_simple_lock_init(&ld->ld_spinlock); 407b07ec3fcSad TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 408b07ec3fcSad TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); 409b07ec3fcSad } 410b07ec3fcSad 411b470ab62Sad membar_producer(); 412b07ec3fcSad } 413a4e0004bSad 414a4e0004bSad return s; 415b07ec3fcSad } 416b07ec3fcSad 417b07ec3fcSad /* 418b07ec3fcSad * lockdebug_wantlock: 419b07ec3fcSad * 420b07ec3fcSad * Process the preamble to a lock acquire. 421b07ec3fcSad */ 422b07ec3fcSad void 423*9be065fbSchristos lockdebug_wantlock(const char *func, size_t line, 424*9be065fbSchristos volatile void *lock, uintptr_t where, int shared) 425b07ec3fcSad { 426b07ec3fcSad struct lwp *l = curlwp; 427b07ec3fcSad lockdebug_t *ld; 428dd962f86Sthorpej bool recurse; 429a4e0004bSad int s; 430b07ec3fcSad 431b07ec3fcSad (void)shared; 4324f3d5a9cSthorpej recurse = false; 433b07ec3fcSad 4347eb6056fSad if (panicstr != NULL || ld_panic) 435b07ec3fcSad return; 436b07ec3fcSad 437a4e0004bSad s = splhigh(); 438*9be065fbSchristos if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) { 439a4e0004bSad splx(s); 440b07ec3fcSad return; 441a4e0004bSad } 442839080f7Syamt if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) { 443b07ec3fcSad if ((ld->ld_flags & LD_SLEEPER) != 0) { 444060c06beSmlelstv if (ld->ld_lwp == l) 4454f3d5a9cSthorpej recurse = true; 446ac8f6353Srmind } else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu())) 4474f3d5a9cSthorpej recurse = true; 448b07ec3fcSad } 44911dc6399Sad if (cpu_intr_p()) { 4507eb6056fSad if ((ld->ld_flags & LD_SLEEPER) != 0) { 451*9be065fbSchristos lockdebug_abort1(func, line, ld, s, 45211dc6399Sad "acquiring sleep lock from interrupt context", 45311dc6399Sad true); 4547eb6056fSad return; 4557eb6056fSad } 45611dc6399Sad } 457b07ec3fcSad if (shared) 458b07ec3fcSad ld->ld_shwant++; 459b07ec3fcSad else 460b07ec3fcSad ld->ld_exwant++; 4617eb6056fSad if (recurse) { 462*9be065fbSchristos lockdebug_abort1(func, line, ld, s, "locking against myself", 46311dc6399Sad true); 4647eb6056fSad return; 4657eb6056fSad } 466a4e0004bSad __cpu_simple_unlock(&ld->ld_spinlock); 467a4e0004bSad splx(s); 468b07ec3fcSad } 469b07ec3fcSad 470b07ec3fcSad /* 471b07ec3fcSad * lockdebug_locked: 472b07ec3fcSad * 473b07ec3fcSad * Process a lock acquire operation. 474b07ec3fcSad */ 475b07ec3fcSad void 476*9be065fbSchristos lockdebug_locked(const char *func, size_t line, 477*9be065fbSchristos volatile void *lock, void *cvlock, uintptr_t where, int shared) 478b07ec3fcSad { 479b07ec3fcSad struct lwp *l = curlwp; 480b07ec3fcSad lockdebug_t *ld; 481a4e0004bSad int s; 482b07ec3fcSad 4837eb6056fSad if (panicstr != NULL || ld_panic) 484b07ec3fcSad return; 485b07ec3fcSad 486a4e0004bSad s = splhigh(); 487*9be065fbSchristos if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) { 488a4e0004bSad splx(s); 489b07ec3fcSad return; 490a4e0004bSad } 4917b8f5124Sad if (cvlock) { 4927b8f5124Sad KASSERT(ld->ld_lockops->lo_type == LOCKOPS_CV); 4937b8f5124Sad if (lock == (void *)&lbolt) { 4947b8f5124Sad /* nothing */ 4957b8f5124Sad } else if (ld->ld_shares++ == 0) { 4967b8f5124Sad ld->ld_locked = (uintptr_t)cvlock; 4977b8f5124Sad } else if (cvlock != (void *)ld->ld_locked) { 498*9be065fbSchristos lockdebug_abort1(func, line, ld, s, 499*9be065fbSchristos "multiple locks used with condition variable", 500*9be065fbSchristos true); 5017b8f5124Sad return; 5027b8f5124Sad } 5037b8f5124Sad } else if (shared) { 504b07ec3fcSad l->l_shlocks++; 5059d109b30Syamt ld->ld_locked = where; 506b07ec3fcSad ld->ld_shares++; 507b07ec3fcSad ld->ld_shwant--; 508b07ec3fcSad } else { 5097eb6056fSad if ((ld->ld_flags & LD_LOCKED) != 0) { 510*9be065fbSchristos lockdebug_abort1(func, line, ld, s, "already locked", 511a4e0004bSad true); 5127eb6056fSad return; 5137eb6056fSad } 514b07ec3fcSad ld->ld_flags |= LD_LOCKED; 515b07ec3fcSad ld->ld_locked = where; 516b07ec3fcSad ld->ld_exwant--; 517b07ec3fcSad if ((ld->ld_flags & LD_SLEEPER) != 0) { 518a4e0004bSad TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain); 519b07ec3fcSad } else { 520a4e0004bSad TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks, 521a4e0004bSad ld, ld_chain); 522b07ec3fcSad } 523b07ec3fcSad } 524ac8f6353Srmind ld->ld_cpu = (uint16_t)cpu_index(curcpu()); 525839080f7Syamt ld->ld_lwp = l; 526a4e0004bSad __cpu_simple_unlock(&ld->ld_spinlock); 527a4e0004bSad splx(s); 528b07ec3fcSad } 529b07ec3fcSad 530b07ec3fcSad /* 531b07ec3fcSad * lockdebug_unlocked: 532b07ec3fcSad * 533b07ec3fcSad * Process a lock release operation. 534b07ec3fcSad */ 535b07ec3fcSad void 536*9be065fbSchristos lockdebug_unlocked(const char *func, size_t line, 537*9be065fbSchristos volatile void *lock, uintptr_t where, int shared) 538b07ec3fcSad { 539b07ec3fcSad struct lwp *l = curlwp; 540b07ec3fcSad lockdebug_t *ld; 541a4e0004bSad int s; 542b07ec3fcSad 5437eb6056fSad if (panicstr != NULL || ld_panic) 544b07ec3fcSad return; 545b07ec3fcSad 546a4e0004bSad s = splhigh(); 547*9be065fbSchristos if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) { 548a4e0004bSad splx(s); 549b07ec3fcSad return; 550a4e0004bSad } 5517b8f5124Sad if (ld->ld_lockops->lo_type == LOCKOPS_CV) { 5527b8f5124Sad if (lock == (void *)&lbolt) { 5537b8f5124Sad /* nothing */ 5547b8f5124Sad } else { 5557b8f5124Sad ld->ld_shares--; 5567b8f5124Sad } 5577b8f5124Sad } else if (shared) { 5587eb6056fSad if (l->l_shlocks == 0) { 559*9be065fbSchristos lockdebug_abort1(func, line, ld, s, 56011dc6399Sad "no shared locks held by LWP", true); 5617eb6056fSad return; 5627eb6056fSad } 5637eb6056fSad if (ld->ld_shares == 0) { 564*9be065fbSchristos lockdebug_abort1(func, line, ld, s, 56511dc6399Sad "no shared holds on this lock", true); 5667eb6056fSad return; 5677eb6056fSad } 568b07ec3fcSad l->l_shlocks--; 569b07ec3fcSad ld->ld_shares--; 5709d109b30Syamt if (ld->ld_lwp == l) { 5719d109b30Syamt ld->ld_unlocked = where; 572839080f7Syamt ld->ld_lwp = NULL; 5739d109b30Syamt } 574ac8f6353Srmind if (ld->ld_cpu == (uint16_t)cpu_index(curcpu())) 575839080f7Syamt ld->ld_cpu = (uint16_t)-1; 576b07ec3fcSad } else { 5777eb6056fSad if ((ld->ld_flags & LD_LOCKED) == 0) { 578*9be065fbSchristos lockdebug_abort1(func, line, ld, s, "not locked", true); 5797eb6056fSad return; 5807eb6056fSad } 581b07ec3fcSad 582b07ec3fcSad if ((ld->ld_flags & LD_SLEEPER) != 0) { 5837eb6056fSad if (ld->ld_lwp != curlwp) { 584*9be065fbSchristos lockdebug_abort1(func, line, ld, s, 58511dc6399Sad "not held by current LWP", true); 5867eb6056fSad return; 5877eb6056fSad } 588a4e0004bSad TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain); 589b07ec3fcSad } else { 590ac8f6353Srmind if (ld->ld_cpu != (uint16_t)cpu_index(curcpu())) { 591*9be065fbSchristos lockdebug_abort1(func, line, ld, s, 59211dc6399Sad "not held by current CPU", true); 5937eb6056fSad return; 5947eb6056fSad } 595a4e0004bSad TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld, 596a4e0004bSad ld_chain); 597b07ec3fcSad } 598521a86d5Smatt ld->ld_flags &= ~LD_LOCKED; 599521a86d5Smatt ld->ld_unlocked = where; 600521a86d5Smatt ld->ld_lwp = NULL; 601b07ec3fcSad } 602a4e0004bSad __cpu_simple_unlock(&ld->ld_spinlock); 603a4e0004bSad splx(s); 604b07ec3fcSad } 605b07ec3fcSad 606b07ec3fcSad /* 6077b8f5124Sad * lockdebug_wakeup: 6087b8f5124Sad * 6097b8f5124Sad * Process a wakeup on a condition variable. 6107b8f5124Sad */ 6117b8f5124Sad void 612*9be065fbSchristos lockdebug_wakeup(const char *func, size_t line, volatile void *lock, 613*9be065fbSchristos uintptr_t where) 6147b8f5124Sad { 6157b8f5124Sad lockdebug_t *ld; 6167b8f5124Sad int s; 6177b8f5124Sad 6187b8f5124Sad if (panicstr != NULL || ld_panic || lock == (void *)&lbolt) 6197b8f5124Sad return; 6207b8f5124Sad 6217b8f5124Sad s = splhigh(); 6227b8f5124Sad /* Find the CV... */ 623*9be065fbSchristos if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) { 6247b8f5124Sad splx(s); 6257b8f5124Sad return; 6267b8f5124Sad } 6277b8f5124Sad /* 6287b8f5124Sad * If it has any waiters, ensure that they are using the 6297b8f5124Sad * same interlock. 6307b8f5124Sad */ 6317b8f5124Sad if (ld->ld_shares != 0 && !mutex_owned((kmutex_t *)ld->ld_locked)) { 632*9be065fbSchristos lockdebug_abort1(func, line, ld, s, "interlocking mutex not " 6337b8f5124Sad "held during wakeup", true); 6347b8f5124Sad return; 6357b8f5124Sad } 6367b8f5124Sad __cpu_simple_unlock(&ld->ld_spinlock); 6377b8f5124Sad splx(s); 6387b8f5124Sad } 6397b8f5124Sad 6407b8f5124Sad /* 641b07ec3fcSad * lockdebug_barrier: 642b07ec3fcSad * 643b07ec3fcSad * Panic if we hold more than one specified spin lock, and optionally, 644b07ec3fcSad * if we hold sleep locks. 645b07ec3fcSad */ 646b07ec3fcSad void 647*9be065fbSchristos lockdebug_barrier(const char *func, size_t line, volatile void *spinlock, 648*9be065fbSchristos int slplocks) 649b07ec3fcSad { 650b07ec3fcSad struct lwp *l = curlwp; 651b07ec3fcSad lockdebug_t *ld; 652a4e0004bSad int s; 653b07ec3fcSad 6547eb6056fSad if (panicstr != NULL || ld_panic) 655b07ec3fcSad return; 656b07ec3fcSad 657a4e0004bSad s = splhigh(); 658a4e0004bSad if ((l->l_pflag & LP_INTR) == 0) { 659a4e0004bSad TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) { 660b07ec3fcSad if (ld->ld_lock == spinlock) { 661b07ec3fcSad continue; 662b07ec3fcSad } 663a4e0004bSad __cpu_simple_lock(&ld->ld_spinlock); 664*9be065fbSchristos lockdebug_abort1(func, line, ld, s, 665a4e0004bSad "spin lock held", true); 6667eb6056fSad return; 6677eb6056fSad } 668b07ec3fcSad } 669a4e0004bSad if (slplocks) { 670a4e0004bSad splx(s); 6717eb6056fSad return; 6727eb6056fSad } 673a4e0004bSad if ((ld = TAILQ_FIRST(&l->l_ld_locks)) != NULL) { 674a4e0004bSad __cpu_simple_lock(&ld->ld_spinlock); 675*9be065fbSchristos lockdebug_abort1(func, line, ld, s, "sleep lock held", true); 676a4e0004bSad return; 677b07ec3fcSad } 678a4e0004bSad splx(s); 679a4e0004bSad if (l->l_shlocks != 0) { 680a27531bcSchristos TAILQ_FOREACH(ld, &ld_all, ld_achain) { 681a27531bcSchristos if (ld->ld_lockops->lo_type == LOCKOPS_CV) 682a27531bcSchristos continue; 683a27531bcSchristos if (ld->ld_lwp == l) 684a27531bcSchristos lockdebug_dump(ld, printf); 685a27531bcSchristos } 686*9be065fbSchristos panic("%s,%zu: holding %d shared locks", func, line, 687*9be065fbSchristos l->l_shlocks); 688b07ec3fcSad } 689b07ec3fcSad } 690b07ec3fcSad 691b07ec3fcSad /* 69211dc6399Sad * lockdebug_mem_check: 69311dc6399Sad * 69411dc6399Sad * Check for in-use locks within a memory region that is 69538d5e341Syamt * being freed. 69611dc6399Sad */ 69711dc6399Sad void 698*9be065fbSchristos lockdebug_mem_check(const char *func, size_t line, void *base, size_t sz) 69911dc6399Sad { 70011dc6399Sad lockdebug_t *ld; 701a4e0004bSad struct cpu_info *ci; 702461cd942Sad int s; 70311dc6399Sad 7047eb6056fSad if (panicstr != NULL || ld_panic) 705ea3f10f7Sad return; 706ea3f10f7Sad 707a4e0004bSad s = splhigh(); 708a4e0004bSad ci = curcpu(); 709a4e0004bSad __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); 71038d5e341Syamt ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base); 711461cd942Sad if (ld != NULL) { 712461cd942Sad const uintptr_t lock = (uintptr_t)ld->ld_lock; 713461cd942Sad 714461cd942Sad if ((uintptr_t)base > lock) 715*9be065fbSchristos panic("%s,%zu: corrupt tree ld=%p, base=%p, sz=%zu", 716*9be065fbSchristos func, line, ld, base, sz); 717461cd942Sad if (lock >= (uintptr_t)base + sz) 718461cd942Sad ld = NULL; 719461cd942Sad } 720a4e0004bSad __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); 721a4e0004bSad if (ld != NULL) { 722a4e0004bSad __cpu_simple_lock(&ld->ld_spinlock); 723*9be065fbSchristos lockdebug_abort1(func, line, ld, s, 72411dc6399Sad "allocation contains active lock", !cold); 725a4e0004bSad return; 726a4e0004bSad } 727a4e0004bSad splx(s); 72811dc6399Sad } 72911dc6399Sad 73011dc6399Sad /* 731b07ec3fcSad * lockdebug_dump: 732b07ec3fcSad * 733b07ec3fcSad * Dump information about a lock on panic, or for DDB. 734b07ec3fcSad */ 735b07ec3fcSad static void 736a67c3c89Schristos lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...) 737a67c3c89Schristos __printflike(1, 2)) 738b07ec3fcSad { 739b07ec3fcSad int sleeper = (ld->ld_flags & LD_SLEEPER); 740b07ec3fcSad 741b07ec3fcSad (*pr)( 742b07ec3fcSad "lock address : %#018lx type : %18s\n" 7437b8f5124Sad "initialized : %#018lx", 7447b8f5124Sad (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"), 7457b8f5124Sad (long)ld->ld_initaddr); 7467b8f5124Sad 7477b8f5124Sad if (ld->ld_lockops->lo_type == LOCKOPS_CV) { 7485fb876b9Snjoly (*pr)(" interlock: %#018lx\n", (long)ld->ld_locked); 7497b8f5124Sad } else { 7507b8f5124Sad (*pr)("\n" 751b07ec3fcSad "shared holds : %18u exclusive: %18u\n" 752b07ec3fcSad "shares wanted: %18u exclusive: %18u\n" 753b07ec3fcSad "current cpu : %18u last held: %18u\n" 754b07ec3fcSad "current lwp : %#018lx last held: %#018lx\n" 755521a86d5Smatt "last locked%c : %#018lx unlocked%c: %#018lx\n", 756b07ec3fcSad (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0), 757b07ec3fcSad (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant, 758ac8f6353Srmind (unsigned)cpu_index(curcpu()), (unsigned)ld->ld_cpu, 759b07ec3fcSad (long)curlwp, (long)ld->ld_lwp, 760521a86d5Smatt ((ld->ld_flags & LD_LOCKED) ? '*' : ' '), 761521a86d5Smatt (long)ld->ld_locked, 762521a86d5Smatt ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'), 763521a86d5Smatt (long)ld->ld_unlocked); 7647b8f5124Sad } 765b07ec3fcSad 766b07ec3fcSad if (ld->ld_lockops->lo_dump != NULL) 767b07ec3fcSad (*ld->ld_lockops->lo_dump)(ld->ld_lock); 768b07ec3fcSad 769b07ec3fcSad if (sleeper) { 770b07ec3fcSad (*pr)("\n"); 771b07ec3fcSad turnstile_print(ld->ld_lock, pr); 772b07ec3fcSad } 773b07ec3fcSad } 774b07ec3fcSad 775b07ec3fcSad /* 7767eb6056fSad * lockdebug_abort1: 777b07ec3fcSad * 7787eb6056fSad * An error has been trapped - dump lock info and panic. 779b07ec3fcSad */ 78064e54fbbSad static void 781*9be065fbSchristos lockdebug_abort1(const char *func, size_t line, lockdebug_t *ld, int s, 78211dc6399Sad const char *msg, bool dopanic) 783b07ec3fcSad { 784b07ec3fcSad 7857eb6056fSad /* 786d9ddb522Schristos * Don't make the situation worse if the system is already going 7877eb6056fSad * down in flames. Once a panic is triggered, lockdebug state 7887eb6056fSad * becomes stale and cannot be trusted. 7897eb6056fSad */ 7907eb6056fSad if (atomic_inc_uint_nv(&ld_panic) != 1) { 791a4e0004bSad __cpu_simple_unlock(&ld->ld_spinlock); 792a4e0004bSad splx(s); 7937eb6056fSad return; 7947eb6056fSad } 7957eb6056fSad 796*9be065fbSchristos printf_nolog("%s error: %s,%zu: %s\n\n", ld->ld_lockops->lo_name, 797*9be065fbSchristos func, line, msg); 798b07ec3fcSad lockdebug_dump(ld, printf_nolog); 799a4e0004bSad __cpu_simple_unlock(&ld->ld_spinlock); 800a4e0004bSad splx(s); 801b07ec3fcSad printf_nolog("\n"); 80211dc6399Sad if (dopanic) 803*9be065fbSchristos panic("LOCKDEBUG: %s error: %s,%zu: %s", 804*9be065fbSchristos ld->ld_lockops->lo_name, func, line, msg); 805b07ec3fcSad } 806b07ec3fcSad 807b07ec3fcSad #endif /* LOCKDEBUG */ 808b07ec3fcSad 809b07ec3fcSad /* 810b07ec3fcSad * lockdebug_lock_print: 811b07ec3fcSad * 812b07ec3fcSad * Handle the DDB 'show lock' command. 813b07ec3fcSad */ 814b07ec3fcSad #ifdef DDB 815b07ec3fcSad void 816b07ec3fcSad lockdebug_lock_print(void *addr, void (*pr)(const char *, ...)) 817b07ec3fcSad { 818b07ec3fcSad #ifdef LOCKDEBUG 819b07ec3fcSad lockdebug_t *ld; 820b07ec3fcSad 821b07ec3fcSad TAILQ_FOREACH(ld, &ld_all, ld_achain) { 822648f423cSdyoung if (ld->ld_lock == NULL) 823648f423cSdyoung continue; 824648f423cSdyoung if (addr == NULL || ld->ld_lock == addr) { 825b07ec3fcSad lockdebug_dump(ld, pr); 826648f423cSdyoung if (addr != NULL) 827b07ec3fcSad return; 828b07ec3fcSad } 829b07ec3fcSad } 830648f423cSdyoung if (addr != NULL) { 831648f423cSdyoung (*pr)("Sorry, no record of a lock with address %p found.\n", 832648f423cSdyoung addr); 833648f423cSdyoung } 834b07ec3fcSad #else 835b07ec3fcSad (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n"); 836b07ec3fcSad #endif /* LOCKDEBUG */ 837b07ec3fcSad } 838b07ec3fcSad #endif /* DDB */ 839b07ec3fcSad 840b07ec3fcSad /* 841b07ec3fcSad * lockdebug_abort: 842b07ec3fcSad * 843b07ec3fcSad * An error has been trapped - dump lock info and call panic(). 844b07ec3fcSad */ 845b07ec3fcSad void 846*9be065fbSchristos lockdebug_abort(const char *func, size_t line, volatile void *lock, 847*9be065fbSchristos lockops_t *ops, const char *msg) 848b07ec3fcSad { 849b07ec3fcSad #ifdef LOCKDEBUG 850b07ec3fcSad lockdebug_t *ld; 851a4e0004bSad int s; 852b07ec3fcSad 853a4e0004bSad s = splhigh(); 854*9be065fbSchristos if ((ld = lockdebug_lookup(func, line, lock, 855ca70a1c0Srafal (uintptr_t) __builtin_return_address(0))) != NULL) { 856*9be065fbSchristos lockdebug_abort1(func, line, ld, s, msg, true); 857a4e0004bSad return; 858b07ec3fcSad } 859a4e0004bSad splx(s); 860b07ec3fcSad #endif /* LOCKDEBUG */ 861b07ec3fcSad 8627eb6056fSad /* 8637eb6056fSad * Complain first on the occurrance only. Otherwise proceeed to 8647eb6056fSad * panic where we will `rendezvous' with other CPUs if the machine 8657eb6056fSad * is going down in flames. 8667eb6056fSad */ 8677eb6056fSad if (atomic_inc_uint_nv(&ld_panic) == 1) { 868*9be065fbSchristos printf_nolog("%s error: %s,%zu: %s\n\n" 869b07ec3fcSad "lock address : %#018lx\n" 870b07ec3fcSad "current cpu : %18d\n" 871b07ec3fcSad "current lwp : %#018lx\n", 872*9be065fbSchristos ops->lo_name, func, line, msg, (long)lock, 873ac8f6353Srmind (int)cpu_index(curcpu()), (long)curlwp); 874b07ec3fcSad (*ops->lo_dump)(lock); 875b07ec3fcSad printf_nolog("\n"); 8767eb6056fSad } 8777eb6056fSad 878*9be065fbSchristos panic("lock error: %s: %s,%zu: %s: lock %p cpu %d lwp %p", 879*9be065fbSchristos ops->lo_name, func, line, msg, lock, cpu_index(curcpu()), curlwp); 880b07ec3fcSad } 881