1*64e54fbbSad /* $NetBSD: subr_lockdebug.c,v 1.5 2007/03/10 15:56:21 ad Exp $ */ 2b07ec3fcSad 3b07ec3fcSad /*- 4b07ec3fcSad * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc. 5b07ec3fcSad * All rights reserved. 6b07ec3fcSad * 7b07ec3fcSad * This code is derived from software contributed to The NetBSD Foundation 8b07ec3fcSad * by Andrew Doran. 9b07ec3fcSad * 10b07ec3fcSad * Redistribution and use in source and binary forms, with or without 11b07ec3fcSad * modification, are permitted provided that the following conditions 12b07ec3fcSad * are met: 13b07ec3fcSad * 1. Redistributions of source code must retain the above copyright 14b07ec3fcSad * notice, this list of conditions and the following disclaimer. 15b07ec3fcSad * 2. Redistributions in binary form must reproduce the above copyright 16b07ec3fcSad * notice, this list of conditions and the following disclaimer in the 17b07ec3fcSad * documentation and/or other materials provided with the distribution. 18b07ec3fcSad * 3. All advertising materials mentioning features or use of this software 19b07ec3fcSad * must display the following acknowledgement: 20b07ec3fcSad * This product includes software developed by the NetBSD 21b07ec3fcSad * Foundation, Inc. and its contributors. 22b07ec3fcSad * 4. Neither the name of The NetBSD Foundation nor the names of its 23b07ec3fcSad * contributors may be used to endorse or promote products derived 24b07ec3fcSad * from this software without specific prior written permission. 25b07ec3fcSad * 26b07ec3fcSad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27b07ec3fcSad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28b07ec3fcSad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29b07ec3fcSad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30b07ec3fcSad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31b07ec3fcSad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32b07ec3fcSad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33b07ec3fcSad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34b07ec3fcSad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35b07ec3fcSad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36b07ec3fcSad * POSSIBILITY OF SUCH DAMAGE. 37b07ec3fcSad */ 38b07ec3fcSad 39b07ec3fcSad /* 40b07ec3fcSad * Basic lock debugging code shared among lock primatives. 41b07ec3fcSad */ 42b07ec3fcSad 43b07ec3fcSad #include "opt_multiprocessor.h" 44b07ec3fcSad #include "opt_ddb.h" 45b07ec3fcSad 46b07ec3fcSad #include <sys/cdefs.h> 47*64e54fbbSad __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.5 2007/03/10 15:56:21 ad Exp $"); 48b07ec3fcSad 49b07ec3fcSad #include <sys/param.h> 50b07ec3fcSad #include <sys/proc.h> 51b07ec3fcSad #include <sys/systm.h> 52b07ec3fcSad #include <sys/kmem.h> 53b07ec3fcSad #include <sys/lock.h> 54b07ec3fcSad #include <sys/lockdebug.h> 55b07ec3fcSad #include <sys/sleepq.h> 56b07ec3fcSad 57b07ec3fcSad #include <machine/cpu.h> 58b07ec3fcSad 59b07ec3fcSad #ifdef LOCKDEBUG 60b07ec3fcSad 61b07ec3fcSad #define LD_BATCH_SHIFT 9 62b07ec3fcSad #define LD_BATCH (1 << LD_BATCH_SHIFT) 63b07ec3fcSad #define LD_BATCH_MASK (LD_BATCH - 1) 64b07ec3fcSad #define LD_MAX_LOCKS 1048576 65b07ec3fcSad #define LD_SLOP 16 66b07ec3fcSad 67b07ec3fcSad #define LD_LOCKED 0x01 68b07ec3fcSad #define LD_SLEEPER 0x02 69b07ec3fcSad 70*64e54fbbSad #define LD_NOID (LD_MAX_LOCKS + 1) 71b07ec3fcSad 72b07ec3fcSad typedef union lockdebuglk { 73b07ec3fcSad struct { 74b07ec3fcSad __cpu_simple_lock_t lku_lock; 75b07ec3fcSad int lku_oldspl; 76b07ec3fcSad } ul; 77b07ec3fcSad uint8_t lk_pad[64]; 78b07ec3fcSad } volatile __aligned(64) lockdebuglk_t; 79b07ec3fcSad 80b07ec3fcSad #define lk_lock ul.lku_lock 81b07ec3fcSad #define lk_oldspl ul.lku_oldspl 82b07ec3fcSad 83b07ec3fcSad typedef struct lockdebug { 84b07ec3fcSad _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain; 85b07ec3fcSad _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain; 86b07ec3fcSad volatile void *ld_lock; 87b07ec3fcSad lockops_t *ld_lockops; 88b07ec3fcSad struct lwp *ld_lwp; 89b07ec3fcSad uintptr_t ld_locked; 90b07ec3fcSad uintptr_t ld_unlocked; 91b07ec3fcSad u_int ld_id; 92b07ec3fcSad uint16_t ld_shares; 93b07ec3fcSad uint16_t ld_cpu; 94b07ec3fcSad uint8_t ld_flags; 95b07ec3fcSad uint8_t ld_shwant; /* advisory */ 96b07ec3fcSad uint8_t ld_exwant; /* advisory */ 97b07ec3fcSad uint8_t ld_unused; 98b07ec3fcSad } volatile lockdebug_t; 99b07ec3fcSad 100b07ec3fcSad typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t; 101b07ec3fcSad 102b07ec3fcSad lockdebuglk_t ld_sleeper_lk; 103b07ec3fcSad lockdebuglk_t ld_spinner_lk; 104b07ec3fcSad lockdebuglk_t ld_free_lk; 105b07ec3fcSad 106b07ec3fcSad lockdebuglist_t ld_sleepers; 107b07ec3fcSad lockdebuglist_t ld_spinners; 108b07ec3fcSad lockdebuglist_t ld_free; 109b07ec3fcSad lockdebuglist_t ld_all; 110b07ec3fcSad int ld_nfree; 111b07ec3fcSad int ld_freeptr; 112b07ec3fcSad int ld_recurse; 113*64e54fbbSad bool ld_nomore; 114b07ec3fcSad lockdebug_t *ld_table[LD_MAX_LOCKS / LD_BATCH]; 115b07ec3fcSad 116b07ec3fcSad lockdebug_t ld_prime[LD_BATCH]; 117b07ec3fcSad 118*64e54fbbSad static void lockdebug_abort1(lockdebug_t *, lockdebuglk_t *lk, 119*64e54fbbSad const char *, const char *); 120*64e54fbbSad static void lockdebug_more(void); 121*64e54fbbSad static void lockdebug_init(void); 122b07ec3fcSad 123b07ec3fcSad static inline void 124b07ec3fcSad lockdebug_lock(lockdebuglk_t *lk) 125b07ec3fcSad { 126b07ec3fcSad int s; 127b07ec3fcSad 128b07ec3fcSad s = spllock(); 129b07ec3fcSad __cpu_simple_lock(&lk->lk_lock); 130b07ec3fcSad lk->lk_oldspl = s; 131b07ec3fcSad } 132b07ec3fcSad 133b07ec3fcSad static inline void 134b07ec3fcSad lockdebug_unlock(lockdebuglk_t *lk) 135b07ec3fcSad { 136b07ec3fcSad int s; 137b07ec3fcSad 138b07ec3fcSad s = lk->lk_oldspl; 139b07ec3fcSad __cpu_simple_unlock(&(lk->lk_lock)); 140b07ec3fcSad splx(s); 141b07ec3fcSad } 142b07ec3fcSad 143b07ec3fcSad /* 144b07ec3fcSad * lockdebug_lookup: 145b07ec3fcSad * 146b07ec3fcSad * Find a lockdebug structure by ID and return it locked. 147b07ec3fcSad */ 148b07ec3fcSad static inline lockdebug_t * 149b07ec3fcSad lockdebug_lookup(u_int id, lockdebuglk_t **lk) 150b07ec3fcSad { 151b07ec3fcSad lockdebug_t *base, *ld; 152b07ec3fcSad 153b07ec3fcSad if (id == LD_NOID) 154b07ec3fcSad return NULL; 155b07ec3fcSad 156b07ec3fcSad if (id == 0 || id >= LD_MAX_LOCKS) 157b07ec3fcSad panic("lockdebug_lookup: uninitialized lock (1, id=%d)", id); 158b07ec3fcSad 159b07ec3fcSad base = ld_table[id >> LD_BATCH_SHIFT]; 160b07ec3fcSad ld = base + (id & LD_BATCH_MASK); 161b07ec3fcSad 162b07ec3fcSad if (base == NULL || ld->ld_lock == NULL || ld->ld_id != id) 163b07ec3fcSad panic("lockdebug_lookup: uninitialized lock (2, id=%d)", id); 164b07ec3fcSad 165b07ec3fcSad if ((ld->ld_flags & LD_SLEEPER) != 0) 166b07ec3fcSad *lk = &ld_sleeper_lk; 167b07ec3fcSad else 168b07ec3fcSad *lk = &ld_spinner_lk; 169b07ec3fcSad 170b07ec3fcSad lockdebug_lock(*lk); 171b07ec3fcSad return ld; 172b07ec3fcSad } 173b07ec3fcSad 174b07ec3fcSad /* 175b07ec3fcSad * lockdebug_init: 176b07ec3fcSad * 177b07ec3fcSad * Initialize the lockdebug system. Allocate an initial pool of 178b07ec3fcSad * lockdebug structures before the VM system is up and running. 179b07ec3fcSad */ 180*64e54fbbSad static void 181b07ec3fcSad lockdebug_init(void) 182b07ec3fcSad { 183b07ec3fcSad lockdebug_t *ld; 184b07ec3fcSad int i; 185b07ec3fcSad 186b07ec3fcSad __cpu_simple_lock_init(&ld_sleeper_lk.lk_lock); 187b07ec3fcSad __cpu_simple_lock_init(&ld_spinner_lk.lk_lock); 188b07ec3fcSad __cpu_simple_lock_init(&ld_free_lk.lk_lock); 189b07ec3fcSad 190b07ec3fcSad TAILQ_INIT(&ld_free); 191b07ec3fcSad TAILQ_INIT(&ld_all); 192b07ec3fcSad TAILQ_INIT(&ld_sleepers); 193b07ec3fcSad TAILQ_INIT(&ld_spinners); 194b07ec3fcSad 195b07ec3fcSad ld = ld_prime; 196b07ec3fcSad ld_table[0] = ld; 197b07ec3fcSad for (i = 1, ld++; i < LD_BATCH; i++, ld++) { 198b07ec3fcSad ld->ld_id = i; 199b07ec3fcSad TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 200b07ec3fcSad TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); 201b07ec3fcSad } 202b07ec3fcSad ld_freeptr = 1; 203b07ec3fcSad ld_nfree = LD_BATCH - 1; 204b07ec3fcSad } 205b07ec3fcSad 206b07ec3fcSad /* 207b07ec3fcSad * lockdebug_alloc: 208b07ec3fcSad * 209b07ec3fcSad * A lock is being initialized, so allocate an associated debug 210b07ec3fcSad * structure. 211b07ec3fcSad */ 212b07ec3fcSad u_int 213b07ec3fcSad lockdebug_alloc(volatile void *lock, lockops_t *lo) 214b07ec3fcSad { 215b07ec3fcSad struct cpu_info *ci; 216b07ec3fcSad lockdebug_t *ld; 217b07ec3fcSad 218*64e54fbbSad if (lo == NULL || panicstr != NULL) 219b07ec3fcSad return LD_NOID; 220*64e54fbbSad if (ld_freeptr == 0) 221*64e54fbbSad lockdebug_init(); 222b07ec3fcSad 223b07ec3fcSad ci = curcpu(); 224b07ec3fcSad 225b07ec3fcSad /* 226b07ec3fcSad * Pinch a new debug structure. We may recurse because we call 227b07ec3fcSad * kmem_alloc(), which may need to initialize new locks somewhere 228b07ec3fcSad * down the path. If not recursing, we try to maintain at keep 229b07ec3fcSad * LD_SLOP structures free, which should hopefully be enough to 230b07ec3fcSad * satisfy kmem_alloc(). If we can't provide a structure, not to 231b07ec3fcSad * worry: we'll just mark the lock as not having an ID. 232b07ec3fcSad */ 233b07ec3fcSad lockdebug_lock(&ld_free_lk); 234b07ec3fcSad ci->ci_lkdebug_recurse++; 235b07ec3fcSad 236b07ec3fcSad if (TAILQ_EMPTY(&ld_free)) { 237*64e54fbbSad if (ci->ci_lkdebug_recurse > 1 || ld_nomore) { 238b07ec3fcSad ci->ci_lkdebug_recurse--; 239b07ec3fcSad lockdebug_unlock(&ld_free_lk); 240b07ec3fcSad return LD_NOID; 241b07ec3fcSad } 242b07ec3fcSad lockdebug_more(); 243b07ec3fcSad } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) 244b07ec3fcSad lockdebug_more(); 245b07ec3fcSad 246b07ec3fcSad if ((ld = TAILQ_FIRST(&ld_free)) == NULL) { 247b07ec3fcSad lockdebug_unlock(&ld_free_lk); 248b07ec3fcSad return LD_NOID; 249b07ec3fcSad } 250b07ec3fcSad 251b07ec3fcSad TAILQ_REMOVE(&ld_free, ld, ld_chain); 252b07ec3fcSad ld_nfree--; 253b07ec3fcSad 254b07ec3fcSad ci->ci_lkdebug_recurse--; 255b07ec3fcSad lockdebug_unlock(&ld_free_lk); 256b07ec3fcSad 257b07ec3fcSad if (ld->ld_lock != NULL) 258b07ec3fcSad panic("lockdebug_alloc: corrupt table"); 259b07ec3fcSad 260b07ec3fcSad if (lo->lo_sleeplock) 261b07ec3fcSad lockdebug_lock(&ld_sleeper_lk); 262b07ec3fcSad else 263b07ec3fcSad lockdebug_lock(&ld_spinner_lk); 264b07ec3fcSad 265b07ec3fcSad /* Initialise the structure. */ 266b07ec3fcSad ld->ld_lock = lock; 267b07ec3fcSad ld->ld_lockops = lo; 268b07ec3fcSad ld->ld_locked = 0; 269b07ec3fcSad ld->ld_unlocked = 0; 270b07ec3fcSad ld->ld_lwp = NULL; 271b07ec3fcSad 272b07ec3fcSad if (lo->lo_sleeplock) { 273b07ec3fcSad ld->ld_flags = LD_SLEEPER; 274b07ec3fcSad lockdebug_unlock(&ld_sleeper_lk); 275b07ec3fcSad } else { 276b07ec3fcSad ld->ld_flags = 0; 277b07ec3fcSad lockdebug_unlock(&ld_spinner_lk); 278b07ec3fcSad } 279b07ec3fcSad 280b07ec3fcSad return ld->ld_id; 281b07ec3fcSad } 282b07ec3fcSad 283b07ec3fcSad /* 284b07ec3fcSad * lockdebug_free: 285b07ec3fcSad * 286b07ec3fcSad * A lock is being destroyed, so release debugging resources. 287b07ec3fcSad */ 288b07ec3fcSad void 289b07ec3fcSad lockdebug_free(volatile void *lock, u_int id) 290b07ec3fcSad { 291b07ec3fcSad lockdebug_t *ld; 292b07ec3fcSad lockdebuglk_t *lk; 293b07ec3fcSad 294b07ec3fcSad if (panicstr != NULL) 295b07ec3fcSad return; 296b07ec3fcSad 297b07ec3fcSad if ((ld = lockdebug_lookup(id, &lk)) == NULL) 298b07ec3fcSad return; 299b07ec3fcSad 300b07ec3fcSad if (ld->ld_lock != lock) { 301b07ec3fcSad panic("lockdebug_free: destroying uninitialized lock %p" 302b07ec3fcSad "(ld_id=%d ld_lock=%p)", lock, id, ld->ld_lock); 303b07ec3fcSad lockdebug_abort1(ld, lk, __func__, "lock record follows"); 304b07ec3fcSad } 305b07ec3fcSad if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) 306b07ec3fcSad lockdebug_abort1(ld, lk, __func__, "is locked"); 307b07ec3fcSad 308b07ec3fcSad ld->ld_lock = NULL; 309b07ec3fcSad 310b07ec3fcSad lockdebug_unlock(lk); 311b07ec3fcSad 312b07ec3fcSad lockdebug_lock(&ld_free_lk); 313b07ec3fcSad TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 314b07ec3fcSad ld_nfree++; 315b07ec3fcSad lockdebug_unlock(&ld_free_lk); 316b07ec3fcSad } 317b07ec3fcSad 318b07ec3fcSad /* 319b07ec3fcSad * lockdebug_more: 320b07ec3fcSad * 321b07ec3fcSad * Allocate a batch of debug structures and add to the free list. 322b07ec3fcSad * Must be called with ld_free_lk held. 323b07ec3fcSad */ 324*64e54fbbSad static void 325b07ec3fcSad lockdebug_more(void) 326b07ec3fcSad { 327b07ec3fcSad lockdebug_t *ld; 328b07ec3fcSad void *block; 329*64e54fbbSad int i, base, m; 330b07ec3fcSad 331b07ec3fcSad while (ld_nfree < LD_SLOP) { 332b07ec3fcSad lockdebug_unlock(&ld_free_lk); 333b07ec3fcSad block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP); 334b07ec3fcSad lockdebug_lock(&ld_free_lk); 335b07ec3fcSad 336b07ec3fcSad if (block == NULL) 337b07ec3fcSad return; 338b07ec3fcSad 339b07ec3fcSad if (ld_nfree > LD_SLOP) { 340b07ec3fcSad /* Somebody beat us to it. */ 341b07ec3fcSad lockdebug_unlock(&ld_free_lk); 342b07ec3fcSad kmem_free(block, LD_BATCH * sizeof(lockdebug_t)); 343b07ec3fcSad lockdebug_lock(&ld_free_lk); 344b07ec3fcSad continue; 345b07ec3fcSad } 346b07ec3fcSad 347b07ec3fcSad base = ld_freeptr; 348b07ec3fcSad ld_nfree += LD_BATCH; 349b07ec3fcSad ld = block; 350b07ec3fcSad base <<= LD_BATCH_SHIFT; 351*64e54fbbSad m = min(LD_MAX_LOCKS, base + LD_BATCH); 352b07ec3fcSad 353*64e54fbbSad if (m == LD_MAX_LOCKS) 354*64e54fbbSad ld_nomore = true; 355*64e54fbbSad 356*64e54fbbSad for (i = base; i < m; i++, ld++) { 357*64e54fbbSad ld->ld_id = i; 358b07ec3fcSad TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 359b07ec3fcSad TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); 360b07ec3fcSad } 361b07ec3fcSad 362b07ec3fcSad mb_write(); 363b07ec3fcSad ld_table[ld_freeptr++] = block; 364b07ec3fcSad } 365b07ec3fcSad } 366b07ec3fcSad 367b07ec3fcSad /* 368b07ec3fcSad * lockdebug_wantlock: 369b07ec3fcSad * 370b07ec3fcSad * Process the preamble to a lock acquire. 371b07ec3fcSad */ 372b07ec3fcSad void 373b07ec3fcSad lockdebug_wantlock(u_int id, uintptr_t where, int shared) 374b07ec3fcSad { 375b07ec3fcSad struct lwp *l = curlwp; 376b07ec3fcSad lockdebuglk_t *lk; 377b07ec3fcSad lockdebug_t *ld; 378dd962f86Sthorpej bool recurse; 379b07ec3fcSad 380b07ec3fcSad (void)shared; 3814f3d5a9cSthorpej recurse = false; 382b07ec3fcSad 383b07ec3fcSad if (panicstr != NULL) 384b07ec3fcSad return; 385b07ec3fcSad 386b07ec3fcSad if ((ld = lockdebug_lookup(id, &lk)) == NULL) 387b07ec3fcSad return; 388b07ec3fcSad 389b07ec3fcSad if ((ld->ld_flags & LD_LOCKED) != 0) { 390b07ec3fcSad if ((ld->ld_flags & LD_SLEEPER) != 0) { 391b07ec3fcSad if (ld->ld_lwp == l) 3924f3d5a9cSthorpej recurse = true; 393b07ec3fcSad } else if (ld->ld_cpu == (uint16_t)cpu_number()) 3944f3d5a9cSthorpej recurse = true; 395b07ec3fcSad } 396b07ec3fcSad 397b07ec3fcSad if (shared) 398b07ec3fcSad ld->ld_shwant++; 399b07ec3fcSad else 400b07ec3fcSad ld->ld_exwant++; 401b07ec3fcSad 402b07ec3fcSad if (recurse) 403b07ec3fcSad lockdebug_abort1(ld, lk, __func__, "locking against myself"); 404b07ec3fcSad 405b07ec3fcSad lockdebug_unlock(lk); 406b07ec3fcSad } 407b07ec3fcSad 408b07ec3fcSad /* 409b07ec3fcSad * lockdebug_locked: 410b07ec3fcSad * 411b07ec3fcSad * Process a lock acquire operation. 412b07ec3fcSad */ 413b07ec3fcSad void 414b07ec3fcSad lockdebug_locked(u_int id, uintptr_t where, int shared) 415b07ec3fcSad { 416b07ec3fcSad struct lwp *l = curlwp; 417b07ec3fcSad lockdebuglk_t *lk; 418b07ec3fcSad lockdebug_t *ld; 419b07ec3fcSad 420b07ec3fcSad if (panicstr != NULL) 421b07ec3fcSad return; 422b07ec3fcSad 423b07ec3fcSad if ((ld = lockdebug_lookup(id, &lk)) == NULL) 424b07ec3fcSad return; 425b07ec3fcSad 426b07ec3fcSad if (shared) { 427b07ec3fcSad l->l_shlocks++; 428b07ec3fcSad ld->ld_shares++; 429b07ec3fcSad ld->ld_shwant--; 430b07ec3fcSad } else { 431b07ec3fcSad if ((ld->ld_flags & LD_LOCKED) != 0) 432b07ec3fcSad lockdebug_abort1(ld, lk, __func__, 433b07ec3fcSad "already locked"); 434b07ec3fcSad 435b07ec3fcSad ld->ld_flags |= LD_LOCKED; 436b07ec3fcSad ld->ld_locked = where; 437b07ec3fcSad ld->ld_cpu = (uint16_t)cpu_number(); 438b07ec3fcSad ld->ld_lwp = l; 439b07ec3fcSad ld->ld_exwant--; 440b07ec3fcSad 441b07ec3fcSad if ((ld->ld_flags & LD_SLEEPER) != 0) { 442b07ec3fcSad l->l_exlocks++; 443b07ec3fcSad TAILQ_INSERT_TAIL(&ld_sleepers, ld, ld_chain); 444b07ec3fcSad } else { 445b07ec3fcSad curcpu()->ci_spin_locks2++; 446b07ec3fcSad TAILQ_INSERT_TAIL(&ld_spinners, ld, ld_chain); 447b07ec3fcSad } 448b07ec3fcSad } 449b07ec3fcSad 450b07ec3fcSad lockdebug_unlock(lk); 451b07ec3fcSad } 452b07ec3fcSad 453b07ec3fcSad /* 454b07ec3fcSad * lockdebug_unlocked: 455b07ec3fcSad * 456b07ec3fcSad * Process a lock release operation. 457b07ec3fcSad */ 458b07ec3fcSad void 459b07ec3fcSad lockdebug_unlocked(u_int id, uintptr_t where, int shared) 460b07ec3fcSad { 461b07ec3fcSad struct lwp *l = curlwp; 462b07ec3fcSad lockdebuglk_t *lk; 463b07ec3fcSad lockdebug_t *ld; 464b07ec3fcSad 465b07ec3fcSad if (panicstr != NULL) 466b07ec3fcSad return; 467b07ec3fcSad 468b07ec3fcSad if ((ld = lockdebug_lookup(id, &lk)) == NULL) 469b07ec3fcSad return; 470b07ec3fcSad 471b07ec3fcSad if (shared) { 472b07ec3fcSad if (l->l_shlocks == 0) 473b07ec3fcSad lockdebug_abort1(ld, lk, __func__, 474b07ec3fcSad "no shared locks held by LWP"); 475b07ec3fcSad if (ld->ld_shares == 0) 476b07ec3fcSad lockdebug_abort1(ld, lk, __func__, 477b07ec3fcSad "no shared holds on this lock"); 478b07ec3fcSad l->l_shlocks--; 479b07ec3fcSad ld->ld_shares--; 480b07ec3fcSad } else { 481b07ec3fcSad if ((ld->ld_flags & LD_LOCKED) == 0) 482b07ec3fcSad lockdebug_abort1(ld, lk, __func__, "not locked"); 483b07ec3fcSad 484b07ec3fcSad if ((ld->ld_flags & LD_SLEEPER) != 0) { 485b07ec3fcSad if (ld->ld_lwp != curlwp) 486b07ec3fcSad lockdebug_abort1(ld, lk, __func__, 487b07ec3fcSad "not held by current LWP"); 488b07ec3fcSad ld->ld_flags &= ~LD_LOCKED; 489b07ec3fcSad ld->ld_unlocked = where; 490b07ec3fcSad ld->ld_lwp = NULL; 491b07ec3fcSad curlwp->l_exlocks--; 492b07ec3fcSad TAILQ_REMOVE(&ld_sleepers, ld, ld_chain); 493b07ec3fcSad } else { 494b07ec3fcSad if (ld->ld_cpu != (uint16_t)cpu_number()) 495b07ec3fcSad lockdebug_abort1(ld, lk, __func__, 496b07ec3fcSad "not held by current CPU"); 497b07ec3fcSad ld->ld_flags &= ~LD_LOCKED; 498b07ec3fcSad ld->ld_unlocked = where; 499b07ec3fcSad ld->ld_lwp = NULL; 500b07ec3fcSad curcpu()->ci_spin_locks2--; 501b07ec3fcSad TAILQ_REMOVE(&ld_spinners, ld, ld_chain); 502b07ec3fcSad } 503b07ec3fcSad } 504b07ec3fcSad 505b07ec3fcSad lockdebug_unlock(lk); 506b07ec3fcSad } 507b07ec3fcSad 508b07ec3fcSad /* 509b07ec3fcSad * lockdebug_barrier: 510b07ec3fcSad * 511b07ec3fcSad * Panic if we hold more than one specified spin lock, and optionally, 512b07ec3fcSad * if we hold sleep locks. 513b07ec3fcSad */ 514b07ec3fcSad void 515b07ec3fcSad lockdebug_barrier(volatile void *spinlock, int slplocks) 516b07ec3fcSad { 517b07ec3fcSad struct lwp *l = curlwp; 518b07ec3fcSad lockdebug_t *ld; 519b07ec3fcSad uint16_t cpuno; 520b07ec3fcSad 521b07ec3fcSad if (panicstr != NULL) 522b07ec3fcSad return; 523b07ec3fcSad 524b07ec3fcSad if (curcpu()->ci_spin_locks2 != 0) { 525b07ec3fcSad cpuno = (uint16_t)cpu_number(); 526b07ec3fcSad 527b07ec3fcSad lockdebug_lock(&ld_spinner_lk); 528b07ec3fcSad TAILQ_FOREACH(ld, &ld_spinners, ld_chain) { 529b07ec3fcSad if (ld->ld_lock == spinlock) { 530b07ec3fcSad if (ld->ld_cpu != cpuno) 531b07ec3fcSad lockdebug_abort1(ld, &ld_spinner_lk, 532b07ec3fcSad __func__, 533b07ec3fcSad "not held by current CPU"); 534b07ec3fcSad continue; 535b07ec3fcSad } 536b07ec3fcSad if (ld->ld_cpu == cpuno) 537b07ec3fcSad lockdebug_abort1(ld, &ld_spinner_lk, 538b07ec3fcSad __func__, "spin lock held"); 539b07ec3fcSad } 540b07ec3fcSad lockdebug_unlock(&ld_spinner_lk); 541b07ec3fcSad } 542b07ec3fcSad 543b07ec3fcSad if (!slplocks) { 544b07ec3fcSad if (l->l_exlocks != 0) { 545b07ec3fcSad lockdebug_lock(&ld_sleeper_lk); 546b07ec3fcSad TAILQ_FOREACH(ld, &ld_sleepers, ld_chain) { 547b07ec3fcSad if (ld->ld_lwp == l) 548b07ec3fcSad lockdebug_abort1(ld, &ld_sleeper_lk, 549b07ec3fcSad __func__, "sleep lock held"); 550b07ec3fcSad } 551b07ec3fcSad lockdebug_unlock(&ld_sleeper_lk); 552b07ec3fcSad } 553b07ec3fcSad if (l->l_shlocks != 0) 554b07ec3fcSad panic("lockdebug_barrier: holding %d shared locks", 555b07ec3fcSad l->l_shlocks); 556b07ec3fcSad } 557b07ec3fcSad } 558b07ec3fcSad 559b07ec3fcSad /* 560b07ec3fcSad * lockdebug_dump: 561b07ec3fcSad * 562b07ec3fcSad * Dump information about a lock on panic, or for DDB. 563b07ec3fcSad */ 564b07ec3fcSad static void 565b07ec3fcSad lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...)) 566b07ec3fcSad { 567b07ec3fcSad int sleeper = (ld->ld_flags & LD_SLEEPER); 568b07ec3fcSad 569b07ec3fcSad (*pr)( 570b07ec3fcSad "lock address : %#018lx type : %18s\n" 571b07ec3fcSad "shared holds : %18u exclusive: %18u\n" 572b07ec3fcSad "shares wanted: %18u exclusive: %18u\n" 573b07ec3fcSad "current cpu : %18u last held: %18u\n" 574b07ec3fcSad "current lwp : %#018lx last held: %#018lx\n" 575b07ec3fcSad "last locked : %#018lx unlocked : %#018lx\n", 576b07ec3fcSad (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"), 577b07ec3fcSad (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0), 578b07ec3fcSad (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant, 579b07ec3fcSad (unsigned)cpu_number(), (unsigned)ld->ld_cpu, 580b07ec3fcSad (long)curlwp, (long)ld->ld_lwp, 581b07ec3fcSad (long)ld->ld_locked, (long)ld->ld_unlocked); 582b07ec3fcSad 583b07ec3fcSad if (ld->ld_lockops->lo_dump != NULL) 584b07ec3fcSad (*ld->ld_lockops->lo_dump)(ld->ld_lock); 585b07ec3fcSad 586b07ec3fcSad if (sleeper) { 587b07ec3fcSad (*pr)("\n"); 588b07ec3fcSad turnstile_print(ld->ld_lock, pr); 589b07ec3fcSad } 590b07ec3fcSad } 591b07ec3fcSad 592b07ec3fcSad /* 593b07ec3fcSad * lockdebug_dump: 594b07ec3fcSad * 595b07ec3fcSad * Dump information about a known lock. 596b07ec3fcSad */ 597*64e54fbbSad static void 598b07ec3fcSad lockdebug_abort1(lockdebug_t *ld, lockdebuglk_t *lk, const char *func, 599b07ec3fcSad const char *msg) 600b07ec3fcSad { 601b07ec3fcSad 602b07ec3fcSad printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name, 603b07ec3fcSad func, msg); 604b07ec3fcSad lockdebug_dump(ld, printf_nolog); 605b07ec3fcSad lockdebug_unlock(lk); 606b07ec3fcSad printf_nolog("\n"); 607b07ec3fcSad panic("LOCKDEBUG"); 608b07ec3fcSad } 609b07ec3fcSad 610b07ec3fcSad #endif /* LOCKDEBUG */ 611b07ec3fcSad 612b07ec3fcSad /* 613b07ec3fcSad * lockdebug_lock_print: 614b07ec3fcSad * 615b07ec3fcSad * Handle the DDB 'show lock' command. 616b07ec3fcSad */ 617b07ec3fcSad #ifdef DDB 618b07ec3fcSad void 619b07ec3fcSad lockdebug_lock_print(void *addr, void (*pr)(const char *, ...)) 620b07ec3fcSad { 621b07ec3fcSad #ifdef LOCKDEBUG 622b07ec3fcSad lockdebug_t *ld; 623b07ec3fcSad 624b07ec3fcSad TAILQ_FOREACH(ld, &ld_all, ld_achain) { 625b07ec3fcSad if (ld->ld_lock == addr) { 626b07ec3fcSad lockdebug_dump(ld, pr); 627b07ec3fcSad return; 628b07ec3fcSad } 629b07ec3fcSad } 630b07ec3fcSad (*pr)("Sorry, no record of a lock with address %p found.\n", addr); 631b07ec3fcSad #else 632b07ec3fcSad (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n"); 633b07ec3fcSad #endif /* LOCKDEBUG */ 634b07ec3fcSad } 635b07ec3fcSad #endif /* DDB */ 636b07ec3fcSad 637b07ec3fcSad /* 638b07ec3fcSad * lockdebug_abort: 639b07ec3fcSad * 640b07ec3fcSad * An error has been trapped - dump lock info and call panic(). 641b07ec3fcSad */ 642b07ec3fcSad void 643b07ec3fcSad lockdebug_abort(int id, volatile void *lock, lockops_t *ops, 644b07ec3fcSad const char *func, const char *msg) 645b07ec3fcSad { 646b07ec3fcSad #ifdef LOCKDEBUG 647b07ec3fcSad lockdebug_t *ld; 648b07ec3fcSad lockdebuglk_t *lk; 649b07ec3fcSad 650b07ec3fcSad if ((ld = lockdebug_lookup(id, &lk)) != NULL) { 651b07ec3fcSad lockdebug_abort1(ld, lk, func, msg); 652b07ec3fcSad /* NOTREACHED */ 653b07ec3fcSad } 654b07ec3fcSad #endif /* LOCKDEBUG */ 655b07ec3fcSad 656b07ec3fcSad printf_nolog("%s error: %s: %s\n\n" 657b07ec3fcSad "lock address : %#018lx\n" 658b07ec3fcSad "current cpu : %18d\n" 659b07ec3fcSad "current lwp : %#018lx\n", 660b07ec3fcSad ops->lo_name, func, msg, (long)lock, (int)cpu_number(), 661b07ec3fcSad (long)curlwp); 662b07ec3fcSad 663b07ec3fcSad (*ops->lo_dump)(lock); 664b07ec3fcSad 665b07ec3fcSad printf_nolog("\n"); 666b07ec3fcSad panic("lock error"); 667b07ec3fcSad } 668