1*879d5dfbSrmind /* $NetBSD: subr_lockdebug.c,v 1.42 2010/09/24 22:51:50 rmind Exp $ */ 2b07ec3fcSad 3b07ec3fcSad /*- 4057adba1Sad * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc. 5b07ec3fcSad * All rights reserved. 6b07ec3fcSad * 7b07ec3fcSad * This code is derived from software contributed to The NetBSD Foundation 8b07ec3fcSad * by Andrew Doran. 9b07ec3fcSad * 10b07ec3fcSad * Redistribution and use in source and binary forms, with or without 11b07ec3fcSad * modification, are permitted provided that the following conditions 12b07ec3fcSad * are met: 13b07ec3fcSad * 1. Redistributions of source code must retain the above copyright 14b07ec3fcSad * notice, this list of conditions and the following disclaimer. 15b07ec3fcSad * 2. Redistributions in binary form must reproduce the above copyright 16b07ec3fcSad * notice, this list of conditions and the following disclaimer in the 17b07ec3fcSad * documentation and/or other materials provided with the distribution. 18b07ec3fcSad * 19b07ec3fcSad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20b07ec3fcSad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21b07ec3fcSad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22b07ec3fcSad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23b07ec3fcSad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24b07ec3fcSad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25b07ec3fcSad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26b07ec3fcSad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27b07ec3fcSad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28b07ec3fcSad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29b07ec3fcSad * POSSIBILITY OF SUCH DAMAGE. 30b07ec3fcSad */ 31b07ec3fcSad 32b07ec3fcSad /* 33dde5d75eSad * Basic lock debugging code shared among lock primitives. 34b07ec3fcSad */ 35b07ec3fcSad 360ca3d21bSdsl #include <sys/cdefs.h> 37*879d5dfbSrmind __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.42 2010/09/24 22:51:50 rmind Exp $"); 380ca3d21bSdsl 39b07ec3fcSad #include "opt_ddb.h" 40b07ec3fcSad 41b07ec3fcSad #include <sys/param.h> 42b07ec3fcSad #include <sys/proc.h> 43b07ec3fcSad #include <sys/systm.h> 4411dc6399Sad #include <sys/kernel.h> 45b07ec3fcSad #include <sys/kmem.h> 46b07ec3fcSad #include <sys/lockdebug.h> 47b07ec3fcSad #include <sys/sleepq.h> 4811dc6399Sad #include <sys/cpu.h> 49b470ab62Sad #include <sys/atomic.h> 50212c50ddSad #include <sys/lock.h> 5113c7f6ffSad #include <sys/rb.h> 5238d5e341Syamt 530664a045Sad #include <machine/lock.h> 540664a045Sad 55057adba1Sad unsigned int ld_panic; 56057adba1Sad 57b07ec3fcSad #ifdef LOCKDEBUG 58b07ec3fcSad 59b07ec3fcSad #define LD_BATCH_SHIFT 9 60b07ec3fcSad #define LD_BATCH (1 << LD_BATCH_SHIFT) 61b07ec3fcSad #define LD_BATCH_MASK (LD_BATCH - 1) 62b07ec3fcSad #define LD_MAX_LOCKS 1048576 63b07ec3fcSad #define LD_SLOP 16 64b07ec3fcSad 65b07ec3fcSad #define LD_LOCKED 0x01 66b07ec3fcSad #define LD_SLEEPER 0x02 67b07ec3fcSad 68461cd942Sad #define LD_WRITE_LOCK 0x80000000 69461cd942Sad 70b07ec3fcSad typedef struct lockdebug { 71*879d5dfbSrmind struct rb_node ld_rb_node; 72a4e0004bSad __cpu_simple_lock_t ld_spinlock; 73b07ec3fcSad _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain; 74b07ec3fcSad _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain; 75b07ec3fcSad volatile void *ld_lock; 76b07ec3fcSad lockops_t *ld_lockops; 77b07ec3fcSad struct lwp *ld_lwp; 78b07ec3fcSad uintptr_t ld_locked; 79b07ec3fcSad uintptr_t ld_unlocked; 8011dc6399Sad uintptr_t ld_initaddr; 81b07ec3fcSad uint16_t ld_shares; 82b07ec3fcSad uint16_t ld_cpu; 83b07ec3fcSad uint8_t ld_flags; 84b07ec3fcSad uint8_t ld_shwant; /* advisory */ 85b07ec3fcSad uint8_t ld_exwant; /* advisory */ 86b07ec3fcSad uint8_t ld_unused; 87b07ec3fcSad } volatile lockdebug_t; 88b07ec3fcSad 89b07ec3fcSad typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t; 90b07ec3fcSad 91a4e0004bSad __cpu_simple_lock_t ld_mod_lk; 9211910619Smatt lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free); 9311910619Smatt lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all); 94b07ec3fcSad int ld_nfree; 95b07ec3fcSad int ld_freeptr; 96b07ec3fcSad int ld_recurse; 9764e54fbbSad bool ld_nomore; 98b07ec3fcSad lockdebug_t ld_prime[LD_BATCH]; 99b07ec3fcSad 100a4e0004bSad static void lockdebug_abort1(lockdebug_t *, int, const char *, 101a4e0004bSad const char *, bool); 102a4e0004bSad static int lockdebug_more(int); 10364e54fbbSad static void lockdebug_init(void); 104b07ec3fcSad 10538d5e341Syamt static signed int 106*879d5dfbSrmind ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2) 10738d5e341Syamt { 108*879d5dfbSrmind const lockdebug_t *ld1 = n1; 109*879d5dfbSrmind const lockdebug_t *ld2 = n2; 11033e66db2Syamt const uintptr_t a = (uintptr_t)ld1->ld_lock; 11133e66db2Syamt const uintptr_t b = (uintptr_t)ld2->ld_lock; 11233e66db2Syamt 11333e66db2Syamt if (a < b) 11433e66db2Syamt return -1; 115*879d5dfbSrmind if (a > b) 116*879d5dfbSrmind return 1; 11738d5e341Syamt return 0; 11838d5e341Syamt } 11938d5e341Syamt 12038d5e341Syamt static signed int 121*879d5dfbSrmind ld_rbto_compare_key(void *ctx, const void *n, const void *key) 12238d5e341Syamt { 123*879d5dfbSrmind const lockdebug_t *ld = n; 12433e66db2Syamt const uintptr_t a = (uintptr_t)ld->ld_lock; 12533e66db2Syamt const uintptr_t b = (uintptr_t)key; 12633e66db2Syamt 12733e66db2Syamt if (a < b) 12833e66db2Syamt return -1; 129*879d5dfbSrmind if (a > b) 130*879d5dfbSrmind return 1; 13138d5e341Syamt return 0; 13238d5e341Syamt } 13338d5e341Syamt 134*879d5dfbSrmind static rb_tree_t ld_rb_tree; 13538d5e341Syamt 136*879d5dfbSrmind static const rb_tree_ops_t ld_rb_tree_ops = { 1375a4f0c6bSmatt .rbto_compare_nodes = ld_rbto_compare_nodes, 1385a4f0c6bSmatt .rbto_compare_key = ld_rbto_compare_key, 139*879d5dfbSrmind .rbto_node_offset = offsetof(lockdebug_t, ld_rb_node), 140*879d5dfbSrmind .rbto_context = NULL 14138d5e341Syamt }; 14238d5e341Syamt 143671754ccSyamt static inline lockdebug_t * 144a4e0004bSad lockdebug_lookup1(volatile void *lock) 145671754ccSyamt { 146671754ccSyamt lockdebug_t *ld; 147a4e0004bSad struct cpu_info *ci; 148671754ccSyamt 149a4e0004bSad ci = curcpu(); 150a4e0004bSad __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); 151671754ccSyamt ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock)); 152a4e0004bSad __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); 153a4e0004bSad if (ld == NULL) { 154671754ccSyamt return NULL; 155a4e0004bSad } 156a4e0004bSad __cpu_simple_lock(&ld->ld_spinlock); 157671754ccSyamt 158671754ccSyamt return ld; 159671754ccSyamt } 160671754ccSyamt 161a4e0004bSad static void 162a4e0004bSad lockdebug_lock_cpus(void) 163a4e0004bSad { 164a4e0004bSad CPU_INFO_ITERATOR cii; 165a4e0004bSad struct cpu_info *ci; 166a4e0004bSad 167a4e0004bSad for (CPU_INFO_FOREACH(cii, ci)) { 168a4e0004bSad __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); 169a4e0004bSad } 170a4e0004bSad } 171a4e0004bSad 172a4e0004bSad static void 173a4e0004bSad lockdebug_unlock_cpus(void) 174a4e0004bSad { 175a4e0004bSad CPU_INFO_ITERATOR cii; 176a4e0004bSad struct cpu_info *ci; 177a4e0004bSad 178a4e0004bSad for (CPU_INFO_FOREACH(cii, ci)) { 179a4e0004bSad __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); 180a4e0004bSad } 181a4e0004bSad } 182a4e0004bSad 183b07ec3fcSad /* 184b07ec3fcSad * lockdebug_lookup: 185b07ec3fcSad * 18638d5e341Syamt * Find a lockdebug structure by a pointer to a lock and return it locked. 187b07ec3fcSad */ 188b07ec3fcSad static inline lockdebug_t * 189ca70a1c0Srafal lockdebug_lookup(volatile void *lock, uintptr_t where) 190b07ec3fcSad { 19138d5e341Syamt lockdebug_t *ld; 192b07ec3fcSad 193a4e0004bSad ld = lockdebug_lookup1(lock); 194*879d5dfbSrmind if (ld == NULL) { 195*879d5dfbSrmind panic("lockdebug_lookup: uninitialized lock " 196*879d5dfbSrmind "(lock=%p, from=%08"PRIxPTR")", lock, where); 197*879d5dfbSrmind } 198b07ec3fcSad return ld; 199b07ec3fcSad } 200b07ec3fcSad 201b07ec3fcSad /* 202b07ec3fcSad * lockdebug_init: 203b07ec3fcSad * 204b07ec3fcSad * Initialize the lockdebug system. Allocate an initial pool of 205b07ec3fcSad * lockdebug structures before the VM system is up and running. 206b07ec3fcSad */ 20764e54fbbSad static void 208b07ec3fcSad lockdebug_init(void) 209b07ec3fcSad { 210b07ec3fcSad lockdebug_t *ld; 211b07ec3fcSad int i; 212b07ec3fcSad 213a4e0004bSad TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks); 214a4e0004bSad TAILQ_INIT(&curlwp->l_ld_locks); 215a4e0004bSad __cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock); 216a4e0004bSad __cpu_simple_lock_init(&ld_mod_lk); 2172cab8950Smatt 21838d5e341Syamt rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops); 21938d5e341Syamt 220b07ec3fcSad ld = ld_prime; 221b07ec3fcSad for (i = 1, ld++; i < LD_BATCH; i++, ld++) { 222a4e0004bSad __cpu_simple_lock_init(&ld->ld_spinlock); 223b07ec3fcSad TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 224b07ec3fcSad TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); 225b07ec3fcSad } 226b07ec3fcSad ld_freeptr = 1; 227b07ec3fcSad ld_nfree = LD_BATCH - 1; 228b07ec3fcSad } 229b07ec3fcSad 230b07ec3fcSad /* 231b07ec3fcSad * lockdebug_alloc: 232b07ec3fcSad * 233b07ec3fcSad * A lock is being initialized, so allocate an associated debug 234b07ec3fcSad * structure. 235b07ec3fcSad */ 23638d5e341Syamt bool 23711dc6399Sad lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr) 238b07ec3fcSad { 239b07ec3fcSad struct cpu_info *ci; 240b07ec3fcSad lockdebug_t *ld; 241a4e0004bSad int s; 242b07ec3fcSad 2437eb6056fSad if (lo == NULL || panicstr != NULL || ld_panic) 24438d5e341Syamt return false; 24564e54fbbSad if (ld_freeptr == 0) 24664e54fbbSad lockdebug_init(); 247b07ec3fcSad 248a4e0004bSad s = splhigh(); 249a4e0004bSad __cpu_simple_lock(&ld_mod_lk); 250a4e0004bSad if ((ld = lockdebug_lookup1(lock)) != NULL) { 251a4e0004bSad __cpu_simple_unlock(&ld_mod_lk); 252a4e0004bSad lockdebug_abort1(ld, s, __func__, "already initialized", true); 2537eb6056fSad return false; 254671754ccSyamt } 255671754ccSyamt 256b07ec3fcSad /* 257b07ec3fcSad * Pinch a new debug structure. We may recurse because we call 258b07ec3fcSad * kmem_alloc(), which may need to initialize new locks somewhere 2595492d866Sskrll * down the path. If not recursing, we try to maintain at least 260b07ec3fcSad * LD_SLOP structures free, which should hopefully be enough to 261b07ec3fcSad * satisfy kmem_alloc(). If we can't provide a structure, not to 262b07ec3fcSad * worry: we'll just mark the lock as not having an ID. 263b07ec3fcSad */ 264461cd942Sad ci = curcpu(); 265b07ec3fcSad ci->ci_lkdebug_recurse++; 266b07ec3fcSad if (TAILQ_EMPTY(&ld_free)) { 26764e54fbbSad if (ci->ci_lkdebug_recurse > 1 || ld_nomore) { 268b07ec3fcSad ci->ci_lkdebug_recurse--; 269a4e0004bSad __cpu_simple_unlock(&ld_mod_lk); 270a4e0004bSad splx(s); 27138d5e341Syamt return false; 272b07ec3fcSad } 273a4e0004bSad s = lockdebug_more(s); 274a4e0004bSad } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) { 275a4e0004bSad s = lockdebug_more(s); 276a4e0004bSad } 277b07ec3fcSad if ((ld = TAILQ_FIRST(&ld_free)) == NULL) { 278a4e0004bSad __cpu_simple_unlock(&ld_mod_lk); 279a4e0004bSad splx(s); 28038d5e341Syamt return false; 281b07ec3fcSad } 282b07ec3fcSad TAILQ_REMOVE(&ld_free, ld, ld_chain); 283b07ec3fcSad ld_nfree--; 284b07ec3fcSad ci->ci_lkdebug_recurse--; 285b07ec3fcSad 286a4e0004bSad if (ld->ld_lock != NULL) { 287b07ec3fcSad panic("lockdebug_alloc: corrupt table"); 288a4e0004bSad } 289b07ec3fcSad 290b07ec3fcSad /* Initialise the structure. */ 291b07ec3fcSad ld->ld_lock = lock; 292b07ec3fcSad ld->ld_lockops = lo; 293b07ec3fcSad ld->ld_locked = 0; 294b07ec3fcSad ld->ld_unlocked = 0; 295b07ec3fcSad ld->ld_lwp = NULL; 29611dc6399Sad ld->ld_initaddr = initaddr; 2977b8f5124Sad ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0); 298a4e0004bSad lockdebug_lock_cpus(); 299*879d5dfbSrmind (void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld)); 300a4e0004bSad lockdebug_unlock_cpus(); 301a4e0004bSad __cpu_simple_unlock(&ld_mod_lk); 30238d5e341Syamt 303a4e0004bSad splx(s); 30438d5e341Syamt return true; 305b07ec3fcSad } 306b07ec3fcSad 307b07ec3fcSad /* 308b07ec3fcSad * lockdebug_free: 309b07ec3fcSad * 310b07ec3fcSad * A lock is being destroyed, so release debugging resources. 311b07ec3fcSad */ 312b07ec3fcSad void 31338d5e341Syamt lockdebug_free(volatile void *lock) 314b07ec3fcSad { 315b07ec3fcSad lockdebug_t *ld; 316a4e0004bSad int s; 317b07ec3fcSad 3187eb6056fSad if (panicstr != NULL || ld_panic) 319b07ec3fcSad return; 320b07ec3fcSad 321a4e0004bSad s = splhigh(); 322a4e0004bSad __cpu_simple_lock(&ld_mod_lk); 323ca70a1c0Srafal ld = lockdebug_lookup(lock, (uintptr_t) __builtin_return_address(0)); 32438d5e341Syamt if (ld == NULL) { 325a4e0004bSad __cpu_simple_unlock(&ld_mod_lk); 3267b8f5124Sad panic("lockdebug_free: destroying uninitialized object %p" 32738d5e341Syamt "(ld_lock=%p)", lock, ld->ld_lock); 3287b8f5124Sad lockdebug_abort1(ld, s, __func__, "record follows", true); 3297eb6056fSad return; 330b07ec3fcSad } 3317eb6056fSad if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) { 332a4e0004bSad __cpu_simple_unlock(&ld_mod_lk); 3337b8f5124Sad lockdebug_abort1(ld, s, __func__, "is locked or in use", true); 3347eb6056fSad return; 3357eb6056fSad } 336a4e0004bSad lockdebug_lock_cpus(); 337*879d5dfbSrmind rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld)); 338a4e0004bSad lockdebug_unlock_cpus(); 339b07ec3fcSad ld->ld_lock = NULL; 340b07ec3fcSad TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 341b07ec3fcSad ld_nfree++; 342a4e0004bSad __cpu_simple_unlock(&ld->ld_spinlock); 343a4e0004bSad __cpu_simple_unlock(&ld_mod_lk); 344a4e0004bSad splx(s); 345b07ec3fcSad } 346b07ec3fcSad 347b07ec3fcSad /* 348b07ec3fcSad * lockdebug_more: 349b07ec3fcSad * 350b07ec3fcSad * Allocate a batch of debug structures and add to the free list. 351a4e0004bSad * Must be called with ld_mod_lk held. 352b07ec3fcSad */ 353a4e0004bSad static int 354a4e0004bSad lockdebug_more(int s) 355b07ec3fcSad { 356b07ec3fcSad lockdebug_t *ld; 357b07ec3fcSad void *block; 35864e54fbbSad int i, base, m; 359b07ec3fcSad 3607b8f5124Sad /* 3617b8f5124Sad * Can't call kmem_alloc() if in interrupt context. XXX We could 3627b8f5124Sad * deadlock, because we don't know which locks the caller holds. 3637b8f5124Sad */ 3647b8f5124Sad if (cpu_intr_p() || (curlwp->l_pflag & LP_INTR) != 0) { 3657b8f5124Sad return s; 3667b8f5124Sad } 3677b8f5124Sad 368b07ec3fcSad while (ld_nfree < LD_SLOP) { 369a4e0004bSad __cpu_simple_unlock(&ld_mod_lk); 370a4e0004bSad splx(s); 371b07ec3fcSad block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP); 372a4e0004bSad s = splhigh(); 373a4e0004bSad __cpu_simple_lock(&ld_mod_lk); 374b07ec3fcSad 375b07ec3fcSad if (block == NULL) 376a4e0004bSad return s; 377b07ec3fcSad 378b07ec3fcSad if (ld_nfree > LD_SLOP) { 379b07ec3fcSad /* Somebody beat us to it. */ 380a4e0004bSad __cpu_simple_unlock(&ld_mod_lk); 381a4e0004bSad splx(s); 382b07ec3fcSad kmem_free(block, LD_BATCH * sizeof(lockdebug_t)); 383a4e0004bSad s = splhigh(); 384a4e0004bSad __cpu_simple_lock(&ld_mod_lk); 385b07ec3fcSad continue; 386b07ec3fcSad } 387b07ec3fcSad 388b07ec3fcSad base = ld_freeptr; 389b07ec3fcSad ld_nfree += LD_BATCH; 390b07ec3fcSad ld = block; 391b07ec3fcSad base <<= LD_BATCH_SHIFT; 39264e54fbbSad m = min(LD_MAX_LOCKS, base + LD_BATCH); 393b07ec3fcSad 39464e54fbbSad if (m == LD_MAX_LOCKS) 39564e54fbbSad ld_nomore = true; 39664e54fbbSad 39764e54fbbSad for (i = base; i < m; i++, ld++) { 398a4e0004bSad __cpu_simple_lock_init(&ld->ld_spinlock); 399b07ec3fcSad TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 400b07ec3fcSad TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); 401b07ec3fcSad } 402b07ec3fcSad 403b470ab62Sad membar_producer(); 404b07ec3fcSad } 405a4e0004bSad 406a4e0004bSad return s; 407b07ec3fcSad } 408b07ec3fcSad 409b07ec3fcSad /* 410b07ec3fcSad * lockdebug_wantlock: 411b07ec3fcSad * 412b07ec3fcSad * Process the preamble to a lock acquire. 413b07ec3fcSad */ 414b07ec3fcSad void 41581194e34Sad lockdebug_wantlock(volatile void *lock, uintptr_t where, bool shared, 41681194e34Sad bool trylock) 417b07ec3fcSad { 418b07ec3fcSad struct lwp *l = curlwp; 419b07ec3fcSad lockdebug_t *ld; 420dd962f86Sthorpej bool recurse; 421a4e0004bSad int s; 422b07ec3fcSad 423b07ec3fcSad (void)shared; 4244f3d5a9cSthorpej recurse = false; 425b07ec3fcSad 4267eb6056fSad if (panicstr != NULL || ld_panic) 427b07ec3fcSad return; 428b07ec3fcSad 429a4e0004bSad s = splhigh(); 430ca70a1c0Srafal if ((ld = lockdebug_lookup(lock, where)) == NULL) { 431a4e0004bSad splx(s); 432b07ec3fcSad return; 433a4e0004bSad } 434839080f7Syamt if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) { 435b07ec3fcSad if ((ld->ld_flags & LD_SLEEPER) != 0) { 43681194e34Sad if (ld->ld_lwp == l && !(shared && trylock)) 4374f3d5a9cSthorpej recurse = true; 438ac8f6353Srmind } else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu())) 4394f3d5a9cSthorpej recurse = true; 440b07ec3fcSad } 44111dc6399Sad if (cpu_intr_p()) { 4427eb6056fSad if ((ld->ld_flags & LD_SLEEPER) != 0) { 443a4e0004bSad lockdebug_abort1(ld, s, __func__, 44411dc6399Sad "acquiring sleep lock from interrupt context", 44511dc6399Sad true); 4467eb6056fSad return; 4477eb6056fSad } 44811dc6399Sad } 449b07ec3fcSad if (shared) 450b07ec3fcSad ld->ld_shwant++; 451b07ec3fcSad else 452b07ec3fcSad ld->ld_exwant++; 4537eb6056fSad if (recurse) { 454a4e0004bSad lockdebug_abort1(ld, s, __func__, "locking against myself", 45511dc6399Sad true); 4567eb6056fSad return; 4577eb6056fSad } 458a4e0004bSad __cpu_simple_unlock(&ld->ld_spinlock); 459a4e0004bSad splx(s); 460b07ec3fcSad } 461b07ec3fcSad 462b07ec3fcSad /* 463b07ec3fcSad * lockdebug_locked: 464b07ec3fcSad * 465b07ec3fcSad * Process a lock acquire operation. 466b07ec3fcSad */ 467b07ec3fcSad void 4687b8f5124Sad lockdebug_locked(volatile void *lock, void *cvlock, uintptr_t where, 4697b8f5124Sad int shared) 470b07ec3fcSad { 471b07ec3fcSad struct lwp *l = curlwp; 472b07ec3fcSad lockdebug_t *ld; 473a4e0004bSad int s; 474b07ec3fcSad 4757eb6056fSad if (panicstr != NULL || ld_panic) 476b07ec3fcSad return; 477b07ec3fcSad 478a4e0004bSad s = splhigh(); 479ca70a1c0Srafal if ((ld = lockdebug_lookup(lock, where)) == NULL) { 480a4e0004bSad splx(s); 481b07ec3fcSad return; 482a4e0004bSad } 4837b8f5124Sad if (cvlock) { 4847b8f5124Sad KASSERT(ld->ld_lockops->lo_type == LOCKOPS_CV); 4857b8f5124Sad if (lock == (void *)&lbolt) { 4867b8f5124Sad /* nothing */ 4877b8f5124Sad } else if (ld->ld_shares++ == 0) { 4887b8f5124Sad ld->ld_locked = (uintptr_t)cvlock; 4897b8f5124Sad } else if (cvlock != (void *)ld->ld_locked) { 4907b8f5124Sad lockdebug_abort1(ld, s, __func__, "multiple locks used" 4917b8f5124Sad " with condition variable", true); 4927b8f5124Sad return; 4937b8f5124Sad } 4947b8f5124Sad } else if (shared) { 495b07ec3fcSad l->l_shlocks++; 496b07ec3fcSad ld->ld_shares++; 497b07ec3fcSad ld->ld_shwant--; 498b07ec3fcSad } else { 4997eb6056fSad if ((ld->ld_flags & LD_LOCKED) != 0) { 500a4e0004bSad lockdebug_abort1(ld, s, __func__, "already locked", 501a4e0004bSad true); 5027eb6056fSad return; 5037eb6056fSad } 504b07ec3fcSad ld->ld_flags |= LD_LOCKED; 505b07ec3fcSad ld->ld_locked = where; 506b07ec3fcSad ld->ld_exwant--; 507b07ec3fcSad if ((ld->ld_flags & LD_SLEEPER) != 0) { 508a4e0004bSad TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain); 509b07ec3fcSad } else { 510a4e0004bSad TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks, 511a4e0004bSad ld, ld_chain); 512b07ec3fcSad } 513b07ec3fcSad } 514ac8f6353Srmind ld->ld_cpu = (uint16_t)cpu_index(curcpu()); 515839080f7Syamt ld->ld_lwp = l; 516a4e0004bSad __cpu_simple_unlock(&ld->ld_spinlock); 517a4e0004bSad splx(s); 518b07ec3fcSad } 519b07ec3fcSad 520b07ec3fcSad /* 521b07ec3fcSad * lockdebug_unlocked: 522b07ec3fcSad * 523b07ec3fcSad * Process a lock release operation. 524b07ec3fcSad */ 525b07ec3fcSad void 52638d5e341Syamt lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared) 527b07ec3fcSad { 528b07ec3fcSad struct lwp *l = curlwp; 529b07ec3fcSad lockdebug_t *ld; 530a4e0004bSad int s; 531b07ec3fcSad 5327eb6056fSad if (panicstr != NULL || ld_panic) 533b07ec3fcSad return; 534b07ec3fcSad 535a4e0004bSad s = splhigh(); 536ca70a1c0Srafal if ((ld = lockdebug_lookup(lock, where)) == NULL) { 537a4e0004bSad splx(s); 538b07ec3fcSad return; 539a4e0004bSad } 5407b8f5124Sad if (ld->ld_lockops->lo_type == LOCKOPS_CV) { 5417b8f5124Sad if (lock == (void *)&lbolt) { 5427b8f5124Sad /* nothing */ 5437b8f5124Sad } else { 5447b8f5124Sad ld->ld_shares--; 5457b8f5124Sad } 5467b8f5124Sad } else if (shared) { 5477eb6056fSad if (l->l_shlocks == 0) { 548a4e0004bSad lockdebug_abort1(ld, s, __func__, 54911dc6399Sad "no shared locks held by LWP", true); 5507eb6056fSad return; 5517eb6056fSad } 5527eb6056fSad if (ld->ld_shares == 0) { 553a4e0004bSad lockdebug_abort1(ld, s, __func__, 55411dc6399Sad "no shared holds on this lock", true); 5557eb6056fSad return; 5567eb6056fSad } 557b07ec3fcSad l->l_shlocks--; 558b07ec3fcSad ld->ld_shares--; 559839080f7Syamt if (ld->ld_lwp == l) 560839080f7Syamt ld->ld_lwp = NULL; 561ac8f6353Srmind if (ld->ld_cpu == (uint16_t)cpu_index(curcpu())) 562839080f7Syamt ld->ld_cpu = (uint16_t)-1; 563b07ec3fcSad } else { 5647eb6056fSad if ((ld->ld_flags & LD_LOCKED) == 0) { 565a4e0004bSad lockdebug_abort1(ld, s, __func__, "not locked", true); 5667eb6056fSad return; 5677eb6056fSad } 568b07ec3fcSad 569b07ec3fcSad if ((ld->ld_flags & LD_SLEEPER) != 0) { 5707eb6056fSad if (ld->ld_lwp != curlwp) { 571a4e0004bSad lockdebug_abort1(ld, s, __func__, 57211dc6399Sad "not held by current LWP", true); 5737eb6056fSad return; 5747eb6056fSad } 575b07ec3fcSad ld->ld_flags &= ~LD_LOCKED; 576b07ec3fcSad ld->ld_unlocked = where; 577b07ec3fcSad ld->ld_lwp = NULL; 578a4e0004bSad TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain); 579b07ec3fcSad } else { 580ac8f6353Srmind if (ld->ld_cpu != (uint16_t)cpu_index(curcpu())) { 581a4e0004bSad lockdebug_abort1(ld, s, __func__, 58211dc6399Sad "not held by current CPU", true); 5837eb6056fSad return; 5847eb6056fSad } 585b07ec3fcSad ld->ld_flags &= ~LD_LOCKED; 586b07ec3fcSad ld->ld_unlocked = where; 587b07ec3fcSad ld->ld_lwp = NULL; 588a4e0004bSad TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld, 589a4e0004bSad ld_chain); 590b07ec3fcSad } 591b07ec3fcSad } 592a4e0004bSad __cpu_simple_unlock(&ld->ld_spinlock); 593a4e0004bSad splx(s); 594b07ec3fcSad } 595b07ec3fcSad 596b07ec3fcSad /* 5977b8f5124Sad * lockdebug_wakeup: 5987b8f5124Sad * 5997b8f5124Sad * Process a wakeup on a condition variable. 6007b8f5124Sad */ 6017b8f5124Sad void 6027b8f5124Sad lockdebug_wakeup(volatile void *lock, uintptr_t where) 6037b8f5124Sad { 6047b8f5124Sad lockdebug_t *ld; 6057b8f5124Sad int s; 6067b8f5124Sad 6077b8f5124Sad if (panicstr != NULL || ld_panic || lock == (void *)&lbolt) 6087b8f5124Sad return; 6097b8f5124Sad 6107b8f5124Sad s = splhigh(); 6117b8f5124Sad /* Find the CV... */ 612ca70a1c0Srafal if ((ld = lockdebug_lookup(lock, where)) == NULL) { 6137b8f5124Sad splx(s); 6147b8f5124Sad return; 6157b8f5124Sad } 6167b8f5124Sad /* 6177b8f5124Sad * If it has any waiters, ensure that they are using the 6187b8f5124Sad * same interlock. 6197b8f5124Sad */ 6207b8f5124Sad if (ld->ld_shares != 0 && !mutex_owned((kmutex_t *)ld->ld_locked)) { 6217b8f5124Sad lockdebug_abort1(ld, s, __func__, "interlocking mutex not " 6227b8f5124Sad "held during wakeup", true); 6237b8f5124Sad return; 6247b8f5124Sad } 6257b8f5124Sad __cpu_simple_unlock(&ld->ld_spinlock); 6267b8f5124Sad splx(s); 6277b8f5124Sad } 6287b8f5124Sad 6297b8f5124Sad /* 630b07ec3fcSad * lockdebug_barrier: 631b07ec3fcSad * 632b07ec3fcSad * Panic if we hold more than one specified spin lock, and optionally, 633b07ec3fcSad * if we hold sleep locks. 634b07ec3fcSad */ 635b07ec3fcSad void 636b07ec3fcSad lockdebug_barrier(volatile void *spinlock, int slplocks) 637b07ec3fcSad { 638b07ec3fcSad struct lwp *l = curlwp; 639b07ec3fcSad lockdebug_t *ld; 640a4e0004bSad int s; 641b07ec3fcSad 6427eb6056fSad if (panicstr != NULL || ld_panic) 643b07ec3fcSad return; 644b07ec3fcSad 645a4e0004bSad s = splhigh(); 646a4e0004bSad if ((l->l_pflag & LP_INTR) == 0) { 647a4e0004bSad TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) { 648b07ec3fcSad if (ld->ld_lock == spinlock) { 649b07ec3fcSad continue; 650b07ec3fcSad } 651a4e0004bSad __cpu_simple_lock(&ld->ld_spinlock); 652a4e0004bSad lockdebug_abort1(ld, s, __func__, 653a4e0004bSad "spin lock held", true); 6547eb6056fSad return; 6557eb6056fSad } 656b07ec3fcSad } 657a4e0004bSad if (slplocks) { 658a4e0004bSad splx(s); 6597eb6056fSad return; 6607eb6056fSad } 661a4e0004bSad if ((ld = TAILQ_FIRST(&l->l_ld_locks)) != NULL) { 662a4e0004bSad __cpu_simple_lock(&ld->ld_spinlock); 663a4e0004bSad lockdebug_abort1(ld, s, __func__, "sleep lock held", true); 664a4e0004bSad return; 665b07ec3fcSad } 666a4e0004bSad splx(s); 667a4e0004bSad if (l->l_shlocks != 0) { 668b07ec3fcSad panic("lockdebug_barrier: holding %d shared locks", 669b07ec3fcSad l->l_shlocks); 670b07ec3fcSad } 671b07ec3fcSad } 672b07ec3fcSad 673b07ec3fcSad /* 67411dc6399Sad * lockdebug_mem_check: 67511dc6399Sad * 67611dc6399Sad * Check for in-use locks within a memory region that is 67738d5e341Syamt * being freed. 67811dc6399Sad */ 67911dc6399Sad void 68011dc6399Sad lockdebug_mem_check(const char *func, void *base, size_t sz) 68111dc6399Sad { 68211dc6399Sad lockdebug_t *ld; 683a4e0004bSad struct cpu_info *ci; 684461cd942Sad int s; 68511dc6399Sad 6867eb6056fSad if (panicstr != NULL || ld_panic) 687ea3f10f7Sad return; 688ea3f10f7Sad 689a4e0004bSad s = splhigh(); 690a4e0004bSad ci = curcpu(); 691a4e0004bSad __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); 69238d5e341Syamt ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base); 693461cd942Sad if (ld != NULL) { 694461cd942Sad const uintptr_t lock = (uintptr_t)ld->ld_lock; 695461cd942Sad 696461cd942Sad if ((uintptr_t)base > lock) 697461cd942Sad panic("%s: corrupt tree ld=%p, base=%p, sz=%zu", 698461cd942Sad __func__, ld, base, sz); 699461cd942Sad if (lock >= (uintptr_t)base + sz) 700461cd942Sad ld = NULL; 701461cd942Sad } 702a4e0004bSad __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); 703a4e0004bSad if (ld != NULL) { 704a4e0004bSad __cpu_simple_lock(&ld->ld_spinlock); 705a4e0004bSad lockdebug_abort1(ld, s, func, 70611dc6399Sad "allocation contains active lock", !cold); 707a4e0004bSad return; 708a4e0004bSad } 709a4e0004bSad splx(s); 71011dc6399Sad } 71111dc6399Sad 71211dc6399Sad /* 713b07ec3fcSad * lockdebug_dump: 714b07ec3fcSad * 715b07ec3fcSad * Dump information about a lock on panic, or for DDB. 716b07ec3fcSad */ 717b07ec3fcSad static void 718b07ec3fcSad lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...)) 719b07ec3fcSad { 720b07ec3fcSad int sleeper = (ld->ld_flags & LD_SLEEPER); 721b07ec3fcSad 722b07ec3fcSad (*pr)( 723b07ec3fcSad "lock address : %#018lx type : %18s\n" 7247b8f5124Sad "initialized : %#018lx", 7257b8f5124Sad (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"), 7267b8f5124Sad (long)ld->ld_initaddr); 7277b8f5124Sad 7287b8f5124Sad if (ld->ld_lockops->lo_type == LOCKOPS_CV) { 7297b8f5124Sad (*pr)(" interlock: %#018lx\n", ld->ld_locked); 7307b8f5124Sad } else { 7317b8f5124Sad (*pr)("\n" 732b07ec3fcSad "shared holds : %18u exclusive: %18u\n" 733b07ec3fcSad "shares wanted: %18u exclusive: %18u\n" 734b07ec3fcSad "current cpu : %18u last held: %18u\n" 735b07ec3fcSad "current lwp : %#018lx last held: %#018lx\n" 7367b8f5124Sad "last locked : %#018lx unlocked : %#018lx\n", 737b07ec3fcSad (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0), 738b07ec3fcSad (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant, 739ac8f6353Srmind (unsigned)cpu_index(curcpu()), (unsigned)ld->ld_cpu, 740b07ec3fcSad (long)curlwp, (long)ld->ld_lwp, 7417b8f5124Sad (long)ld->ld_locked, (long)ld->ld_unlocked); 7427b8f5124Sad } 743b07ec3fcSad 744b07ec3fcSad if (ld->ld_lockops->lo_dump != NULL) 745b07ec3fcSad (*ld->ld_lockops->lo_dump)(ld->ld_lock); 746b07ec3fcSad 747b07ec3fcSad if (sleeper) { 748b07ec3fcSad (*pr)("\n"); 749b07ec3fcSad turnstile_print(ld->ld_lock, pr); 750b07ec3fcSad } 751b07ec3fcSad } 752b07ec3fcSad 753b07ec3fcSad /* 7547eb6056fSad * lockdebug_abort1: 755b07ec3fcSad * 7567eb6056fSad * An error has been trapped - dump lock info and panic. 757b07ec3fcSad */ 75864e54fbbSad static void 759a4e0004bSad lockdebug_abort1(lockdebug_t *ld, int s, const char *func, 76011dc6399Sad const char *msg, bool dopanic) 761b07ec3fcSad { 762b07ec3fcSad 7637eb6056fSad /* 7647eb6056fSad * Don't make the situation wose if the system is already going 7657eb6056fSad * down in flames. Once a panic is triggered, lockdebug state 7667eb6056fSad * becomes stale and cannot be trusted. 7677eb6056fSad */ 7687eb6056fSad if (atomic_inc_uint_nv(&ld_panic) != 1) { 769a4e0004bSad __cpu_simple_unlock(&ld->ld_spinlock); 770a4e0004bSad splx(s); 7717eb6056fSad return; 7727eb6056fSad } 7737eb6056fSad 774b07ec3fcSad printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name, 775b07ec3fcSad func, msg); 776b07ec3fcSad lockdebug_dump(ld, printf_nolog); 777a4e0004bSad __cpu_simple_unlock(&ld->ld_spinlock); 778a4e0004bSad splx(s); 779b07ec3fcSad printf_nolog("\n"); 78011dc6399Sad if (dopanic) 781b07ec3fcSad panic("LOCKDEBUG"); 782b07ec3fcSad } 783b07ec3fcSad 784b07ec3fcSad #endif /* LOCKDEBUG */ 785b07ec3fcSad 786b07ec3fcSad /* 787b07ec3fcSad * lockdebug_lock_print: 788b07ec3fcSad * 789b07ec3fcSad * Handle the DDB 'show lock' command. 790b07ec3fcSad */ 791b07ec3fcSad #ifdef DDB 792b07ec3fcSad void 793b07ec3fcSad lockdebug_lock_print(void *addr, void (*pr)(const char *, ...)) 794b07ec3fcSad { 795b07ec3fcSad #ifdef LOCKDEBUG 796b07ec3fcSad lockdebug_t *ld; 797b07ec3fcSad 798b07ec3fcSad TAILQ_FOREACH(ld, &ld_all, ld_achain) { 799648f423cSdyoung if (ld->ld_lock == NULL) 800648f423cSdyoung continue; 801648f423cSdyoung if (addr == NULL || ld->ld_lock == addr) { 802b07ec3fcSad lockdebug_dump(ld, pr); 803648f423cSdyoung if (addr != NULL) 804b07ec3fcSad return; 805b07ec3fcSad } 806b07ec3fcSad } 807648f423cSdyoung if (addr != NULL) { 808648f423cSdyoung (*pr)("Sorry, no record of a lock with address %p found.\n", 809648f423cSdyoung addr); 810648f423cSdyoung } 811b07ec3fcSad #else 812b07ec3fcSad (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n"); 813b07ec3fcSad #endif /* LOCKDEBUG */ 814b07ec3fcSad } 815b07ec3fcSad #endif /* DDB */ 816b07ec3fcSad 817b07ec3fcSad /* 818b07ec3fcSad * lockdebug_abort: 819b07ec3fcSad * 820b07ec3fcSad * An error has been trapped - dump lock info and call panic(). 821b07ec3fcSad */ 822b07ec3fcSad void 82338d5e341Syamt lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func, 82438d5e341Syamt const char *msg) 825b07ec3fcSad { 826b07ec3fcSad #ifdef LOCKDEBUG 827b07ec3fcSad lockdebug_t *ld; 828a4e0004bSad int s; 829b07ec3fcSad 830a4e0004bSad s = splhigh(); 831ca70a1c0Srafal if ((ld = lockdebug_lookup(lock, 832ca70a1c0Srafal (uintptr_t) __builtin_return_address(0))) != NULL) { 833a4e0004bSad lockdebug_abort1(ld, s, func, msg, true); 834a4e0004bSad return; 835b07ec3fcSad } 836a4e0004bSad splx(s); 837b07ec3fcSad #endif /* LOCKDEBUG */ 838b07ec3fcSad 8397eb6056fSad /* 8407eb6056fSad * Complain first on the occurrance only. Otherwise proceeed to 8417eb6056fSad * panic where we will `rendezvous' with other CPUs if the machine 8427eb6056fSad * is going down in flames. 8437eb6056fSad */ 8447eb6056fSad if (atomic_inc_uint_nv(&ld_panic) == 1) { 845b07ec3fcSad printf_nolog("%s error: %s: %s\n\n" 846b07ec3fcSad "lock address : %#018lx\n" 847b07ec3fcSad "current cpu : %18d\n" 848b07ec3fcSad "current lwp : %#018lx\n", 849ac8f6353Srmind ops->lo_name, func, msg, (long)lock, 850ac8f6353Srmind (int)cpu_index(curcpu()), (long)curlwp); 851b07ec3fcSad (*ops->lo_dump)(lock); 852b07ec3fcSad printf_nolog("\n"); 8537eb6056fSad } 8547eb6056fSad 855b07ec3fcSad panic("lock error"); 856b07ec3fcSad } 857