1 /* $NetBSD: kern_lock.c,v 1.60 2001/11/12 15:25:11 lukem Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * This code is derived from software contributed to The NetBSD Foundation 12 * by Ross Harvey. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. All advertising materials mentioning features or use of this software 23 * must display the following acknowledgement: 24 * This product includes software developed by the NetBSD 25 * Foundation, Inc. and its contributors. 26 * 4. Neither the name of The NetBSD Foundation nor the names of its 27 * contributors may be used to endorse or promote products derived 28 * from this software without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 32 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 33 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 34 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 37 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 38 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 40 * POSSIBILITY OF SUCH DAMAGE. 41 */ 42 43 /* 44 * Copyright (c) 1995 45 * The Regents of the University of California. All rights reserved. 46 * 47 * This code contains ideas from software contributed to Berkeley by 48 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 49 * System project at Carnegie-Mellon University. 50 * 51 * Redistribution and use in source and binary forms, with or without 52 * modification, are permitted provided that the following conditions 53 * are met: 54 * 1. Redistributions of source code must retain the above copyright 55 * notice, this list of conditions and the following disclaimer. 56 * 2. Redistributions in binary form must reproduce the above copyright 57 * notice, this list of conditions and the following disclaimer in the 58 * documentation and/or other materials provided with the distribution. 59 * 3. All advertising materials mentioning features or use of this software 60 * must display the following acknowledgement: 61 * This product includes software developed by the University of 62 * California, Berkeley and its contributors. 63 * 4. Neither the name of the University nor the names of its contributors 64 * may be used to endorse or promote products derived from this software 65 * without specific prior written permission. 66 * 67 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 70 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 71 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 72 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 73 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 74 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 75 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 76 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 77 * SUCH DAMAGE. 78 * 79 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 80 */ 81 82 #include <sys/cdefs.h> 83 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.60 2001/11/12 15:25:11 lukem Exp $"); 84 85 #include "opt_multiprocessor.h" 86 #include "opt_lockdebug.h" 87 #include "opt_ddb.h" 88 89 #include <sys/param.h> 90 #include <sys/proc.h> 91 #include <sys/lock.h> 92 #include <sys/systm.h> 93 #include <machine/cpu.h> 94 95 #if defined(LOCKDEBUG) 96 #include <sys/syslog.h> 97 /* 98 * note that stdarg.h and the ansi style va_start macro is used for both 99 * ansi and traditional c compiles. 100 * XXX: this requires that stdarg.h define: va_alist and va_dcl 101 */ 102 #include <machine/stdarg.h> 103 104 void lock_printf(const char *fmt, ...) 105 __attribute__((__format__(__printf__,1,2))); 106 107 int lock_debug_syslog = 0; /* defaults to printf, but can be patched */ 108 109 #ifdef DDB 110 #include <ddb/ddbvar.h> 111 #include <machine/db_machdep.h> 112 #include <ddb/db_command.h> 113 #include <ddb/db_interface.h> 114 #endif 115 #endif 116 117 /* 118 * Locking primitives implementation. 119 * Locks provide shared/exclusive synchronization. 120 */ 121 122 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */ 123 #if defined(MULTIPROCESSOR) /* { */ 124 #define COUNT_CPU(cpu_id, x) \ 125 curcpu()->ci_spin_locks += (x) 126 #else 127 u_long spin_locks; 128 #define COUNT_CPU(cpu_id, x) spin_locks += (x) 129 #endif /* MULTIPROCESSOR */ /* } */ 130 131 #define COUNT(lkp, p, cpu_id, x) \ 132 do { \ 133 if ((lkp)->lk_flags & LK_SPIN) \ 134 COUNT_CPU((cpu_id), (x)); \ 135 else \ 136 (p)->p_locks += (x); \ 137 } while (/*CONSTCOND*/0) 138 #else 139 #define COUNT(lkp, p, cpu_id, x) 140 #define COUNT_CPU(cpu_id, x) 141 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */ 142 143 #ifndef SPINLOCK_SPIN_HOOK /* from <machine/lock.h> */ 144 #define SPINLOCK_SPIN_HOOK /* nothing */ 145 #endif 146 147 #define INTERLOCK_ACQUIRE(lkp, flags, s) \ 148 do { \ 149 if ((flags) & LK_SPIN) \ 150 s = splsched(); \ 151 simple_lock(&(lkp)->lk_interlock); \ 152 } while (0) 153 154 #define INTERLOCK_RELEASE(lkp, flags, s) \ 155 do { \ 156 simple_unlock(&(lkp)->lk_interlock); \ 157 if ((flags) & LK_SPIN) \ 158 splx(s); \ 159 } while (0) 160 161 #if defined(LOCKDEBUG) 162 #if defined(DDB) 163 #define SPINLOCK_SPINCHECK_DEBUGGER Debugger() 164 #else 165 #define SPINLOCK_SPINCHECK_DEBUGGER /* nothing */ 166 #endif 167 168 #define SPINLOCK_SPINCHECK_DECL \ 169 /* 32-bits of count -- wrap constitutes a "spinout" */ \ 170 uint32_t __spinc = 0 171 172 #define SPINLOCK_SPINCHECK \ 173 do { \ 174 if (++__spinc == 0) { \ 175 printf("LK_SPIN spinout, excl %d, share %d\n", \ 176 lkp->lk_exclusivecount, lkp->lk_sharecount); \ 177 if (lkp->lk_exclusivecount) \ 178 printf("held by CPU %lu\n", \ 179 (u_long) lkp->lk_cpu); \ 180 if (lkp->lk_lock_file) \ 181 printf("last locked at %s:%d\n", \ 182 lkp->lk_lock_file, lkp->lk_lock_line); \ 183 if (lkp->lk_unlock_file) \ 184 printf("last unlocked at %s:%d\n", \ 185 lkp->lk_unlock_file, lkp->lk_unlock_line); \ 186 SPINLOCK_SPINCHECK_DEBUGGER; \ 187 } \ 188 } while (0) 189 #else 190 #define SPINLOCK_SPINCHECK_DECL /* nothing */ 191 #define SPINLOCK_SPINCHECK /* nothing */ 192 #endif /* LOCKDEBUG && DDB */ 193 194 /* 195 * Acquire a resource. 196 */ 197 #define ACQUIRE(lkp, error, extflags, drain, wanted) \ 198 if ((extflags) & LK_SPIN) { \ 199 int interlocked; \ 200 SPINLOCK_SPINCHECK_DECL; \ 201 \ 202 if ((drain) == 0) \ 203 (lkp)->lk_waitcount++; \ 204 for (interlocked = 1;;) { \ 205 SPINLOCK_SPINCHECK; \ 206 if (wanted) { \ 207 if (interlocked) { \ 208 INTERLOCK_RELEASE((lkp), \ 209 LK_SPIN, s); \ 210 interlocked = 0; \ 211 } \ 212 SPINLOCK_SPIN_HOOK; \ 213 } else if (interlocked) { \ 214 break; \ 215 } else { \ 216 INTERLOCK_ACQUIRE((lkp), LK_SPIN, s); \ 217 interlocked = 1; \ 218 } \ 219 } \ 220 if ((drain) == 0) \ 221 (lkp)->lk_waitcount--; \ 222 KASSERT((wanted) == 0); \ 223 error = 0; /* sanity */ \ 224 } else { \ 225 for (error = 0; wanted; ) { \ 226 if ((drain)) \ 227 (lkp)->lk_flags |= LK_WAITDRAIN; \ 228 else \ 229 (lkp)->lk_waitcount++; \ 230 /* XXX Cast away volatile. */ \ 231 error = ltsleep((drain) ? \ 232 (void *)&(lkp)->lk_flags : \ 233 (void *)(lkp), (lkp)->lk_prio, \ 234 (lkp)->lk_wmesg, (lkp)->lk_timo, \ 235 &(lkp)->lk_interlock); \ 236 if ((drain) == 0) \ 237 (lkp)->lk_waitcount--; \ 238 if (error) \ 239 break; \ 240 if ((extflags) & LK_SLEEPFAIL) { \ 241 error = ENOLCK; \ 242 break; \ 243 } \ 244 } \ 245 } 246 247 #define SETHOLDER(lkp, pid, cpu_id) \ 248 do { \ 249 if ((lkp)->lk_flags & LK_SPIN) \ 250 (lkp)->lk_cpu = cpu_id; \ 251 else \ 252 (lkp)->lk_lockholder = pid; \ 253 } while (/*CONSTCOND*/0) 254 255 #define WEHOLDIT(lkp, pid, cpu_id) \ 256 (((lkp)->lk_flags & LK_SPIN) != 0 ? \ 257 ((lkp)->lk_cpu == (cpu_id)) : ((lkp)->lk_lockholder == (pid))) 258 259 #define WAKEUP_WAITER(lkp) \ 260 do { \ 261 if (((lkp)->lk_flags & LK_SPIN) == 0 && (lkp)->lk_waitcount) { \ 262 /* XXX Cast away volatile. */ \ 263 wakeup((void *)(lkp)); \ 264 } \ 265 } while (/*CONSTCOND*/0) 266 267 #if defined(LOCKDEBUG) /* { */ 268 #if defined(MULTIPROCESSOR) /* { */ 269 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER; 270 271 #define SPINLOCK_LIST_LOCK() \ 272 __cpu_simple_lock(&spinlock_list_slock.lock_data) 273 274 #define SPINLOCK_LIST_UNLOCK() \ 275 __cpu_simple_unlock(&spinlock_list_slock.lock_data) 276 #else 277 #define SPINLOCK_LIST_LOCK() /* nothing */ 278 279 #define SPINLOCK_LIST_UNLOCK() /* nothing */ 280 #endif /* MULTIPROCESSOR */ /* } */ 281 282 TAILQ_HEAD(, lock) spinlock_list = 283 TAILQ_HEAD_INITIALIZER(spinlock_list); 284 285 #define HAVEIT(lkp) \ 286 do { \ 287 if ((lkp)->lk_flags & LK_SPIN) { \ 288 int s = spllock(); \ 289 SPINLOCK_LIST_LOCK(); \ 290 /* XXX Cast away volatile. */ \ 291 TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp), \ 292 lk_list); \ 293 SPINLOCK_LIST_UNLOCK(); \ 294 splx(s); \ 295 } \ 296 } while (/*CONSTCOND*/0) 297 298 #define DONTHAVEIT(lkp) \ 299 do { \ 300 if ((lkp)->lk_flags & LK_SPIN) { \ 301 int s = spllock(); \ 302 SPINLOCK_LIST_LOCK(); \ 303 /* XXX Cast away volatile. */ \ 304 TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp), \ 305 lk_list); \ 306 SPINLOCK_LIST_UNLOCK(); \ 307 splx(s); \ 308 } \ 309 } while (/*CONSTCOND*/0) 310 #else 311 #define HAVEIT(lkp) /* nothing */ 312 313 #define DONTHAVEIT(lkp) /* nothing */ 314 #endif /* LOCKDEBUG */ /* } */ 315 316 #if defined(LOCKDEBUG) 317 /* 318 * Lock debug printing routine; can be configured to print to console 319 * or log to syslog. 320 */ 321 void 322 lock_printf(const char *fmt, ...) 323 { 324 va_list ap; 325 326 va_start(ap, fmt); 327 if (lock_debug_syslog) 328 vlog(LOG_DEBUG, fmt, ap); 329 else 330 vprintf(fmt, ap); 331 va_end(ap); 332 } 333 #endif /* LOCKDEBUG */ 334 335 /* 336 * Initialize a lock; required before use. 337 */ 338 void 339 lockinit(struct lock *lkp, int prio, const char *wmesg, int timo, int flags) 340 { 341 342 memset(lkp, 0, sizeof(struct lock)); 343 simple_lock_init(&lkp->lk_interlock); 344 lkp->lk_flags = flags & LK_EXTFLG_MASK; 345 if (flags & LK_SPIN) 346 lkp->lk_cpu = LK_NOCPU; 347 else { 348 lkp->lk_lockholder = LK_NOPROC; 349 lkp->lk_prio = prio; 350 lkp->lk_timo = timo; 351 } 352 lkp->lk_wmesg = wmesg; /* just a name for spin locks */ 353 #if defined(LOCKDEBUG) 354 lkp->lk_lock_file = NULL; 355 lkp->lk_unlock_file = NULL; 356 #endif 357 } 358 359 /* 360 * Determine the status of a lock. 361 */ 362 int 363 lockstatus(struct lock *lkp) 364 { 365 int s, lock_type = 0; 366 367 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s); 368 if (lkp->lk_exclusivecount != 0) 369 lock_type = LK_EXCLUSIVE; 370 else if (lkp->lk_sharecount != 0) 371 lock_type = LK_SHARED; 372 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 373 return (lock_type); 374 } 375 376 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) 377 /* 378 * Make sure no spin locks are held by a CPU that is about 379 * to context switch. 380 */ 381 void 382 spinlock_switchcheck(void) 383 { 384 u_long cnt; 385 int s; 386 387 s = spllock(); 388 #if defined(MULTIPROCESSOR) 389 cnt = curcpu()->ci_spin_locks; 390 #else 391 cnt = spin_locks; 392 #endif 393 splx(s); 394 395 if (cnt != 0) 396 panic("spinlock_switchcheck: CPU %lu has %lu spin locks", 397 (u_long) cpu_number(), cnt); 398 } 399 #endif /* LOCKDEBUG || DIAGNOSTIC */ 400 401 /* 402 * Locks and IPLs (interrupt priority levels): 403 * 404 * Locks which may be taken from interrupt context must be handled 405 * very carefully; you must spl to the highest IPL where the lock 406 * is needed before acquiring the lock. 407 * 408 * It is also important to avoid deadlock, since certain (very high 409 * priority) interrupts are often needed to keep the system as a whole 410 * from deadlocking, and must not be blocked while you are spinning 411 * waiting for a lower-priority lock. 412 * 413 * In addition, the lock-debugging hooks themselves need to use locks! 414 * 415 * A raw __cpu_simple_lock may be used from interrupts are long as it 416 * is acquired and held at a single IPL. 417 * 418 * A simple_lock (which is a __cpu_simple_lock wrapped with some 419 * debugging hooks) may be used at or below spllock(), which is 420 * typically at or just below splhigh() (i.e. blocks everything 421 * but certain machine-dependent extremely high priority interrupts). 422 * 423 * spinlockmgr spinlocks should be used at or below splsched(). 424 * 425 * Some platforms may have interrupts of higher priority than splsched(), 426 * including hard serial interrupts, inter-processor interrupts, and 427 * kernel debugger traps. 428 */ 429 430 /* 431 * XXX XXX kludge around another kludge.. 432 * 433 * vfs_shutdown() may be called from interrupt context, either as a result 434 * of a panic, or from the debugger. It proceeds to call 435 * sys_sync(&proc0, ...), pretending its running on behalf of proc0 436 * 437 * We would like to make an attempt to sync the filesystems in this case, so 438 * if this happens, we treat attempts to acquire locks specially. 439 * All locks are acquired on behalf of proc0. 440 * 441 * If we've already paniced, we don't block waiting for locks, but 442 * just barge right ahead since we're already going down in flames. 443 */ 444 445 /* 446 * Set, change, or release a lock. 447 * 448 * Shared requests increment the shared count. Exclusive requests set the 449 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 450 * accepted shared locks and shared-to-exclusive upgrades to go away. 451 */ 452 int 453 #if defined(LOCKDEBUG) 454 _lockmgr(__volatile struct lock *lkp, u_int flags, 455 struct simplelock *interlkp, const char *file, int line) 456 #else 457 lockmgr(__volatile struct lock *lkp, u_int flags, 458 struct simplelock *interlkp) 459 #endif 460 { 461 int error; 462 pid_t pid; 463 int extflags; 464 cpuid_t cpu_id; 465 struct proc *p = curproc; 466 int lock_shutdown_noblock = 0; 467 int s; 468 469 error = 0; 470 471 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s); 472 if (flags & LK_INTERLOCK) 473 simple_unlock(interlkp); 474 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 475 476 #ifdef DIAGNOSTIC /* { */ 477 /* 478 * Don't allow spins on sleep locks and don't allow sleeps 479 * on spin locks. 480 */ 481 if ((flags ^ lkp->lk_flags) & LK_SPIN) 482 panic("lockmgr: sleep/spin mismatch\n"); 483 #endif /* } */ 484 485 if (extflags & LK_SPIN) 486 pid = LK_KERNPROC; 487 else { 488 if (p == NULL) { 489 if (!doing_shutdown) { 490 #ifdef DIAGNOSTIC 491 panic("lockmgr: no context"); 492 #endif 493 } else { 494 p = &proc0; 495 if (panicstr && (!(flags & LK_NOWAIT))) { 496 flags |= LK_NOWAIT; 497 lock_shutdown_noblock = 1; 498 } 499 } 500 } 501 pid = p->p_pid; 502 } 503 cpu_id = cpu_number(); 504 505 /* 506 * Once a lock has drained, the LK_DRAINING flag is set and an 507 * exclusive lock is returned. The only valid operation thereafter 508 * is a single release of that exclusive lock. This final release 509 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any 510 * further requests of any sort will result in a panic. The bits 511 * selected for these two flags are chosen so that they will be set 512 * in memory that is freed (freed memory is filled with 0xdeadbeef). 513 * The final release is permitted to give a new lease on life to 514 * the lock by specifying LK_REENABLE. 515 */ 516 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) { 517 #ifdef DIAGNOSTIC /* { */ 518 if (lkp->lk_flags & LK_DRAINED) 519 panic("lockmgr: using decommissioned lock"); 520 if ((flags & LK_TYPE_MASK) != LK_RELEASE || 521 WEHOLDIT(lkp, pid, cpu_id) == 0) 522 panic("lockmgr: non-release on draining lock: %d\n", 523 flags & LK_TYPE_MASK); 524 #endif /* DIAGNOSTIC */ /* } */ 525 lkp->lk_flags &= ~LK_DRAINING; 526 if ((flags & LK_REENABLE) == 0) 527 lkp->lk_flags |= LK_DRAINED; 528 } 529 530 switch (flags & LK_TYPE_MASK) { 531 532 case LK_SHARED: 533 if (WEHOLDIT(lkp, pid, cpu_id) == 0) { 534 /* 535 * If just polling, check to see if we will block. 536 */ 537 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 538 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) { 539 error = EBUSY; 540 break; 541 } 542 /* 543 * Wait for exclusive locks and upgrades to clear. 544 */ 545 ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags & 546 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)); 547 if (error) 548 break; 549 lkp->lk_sharecount++; 550 COUNT(lkp, p, cpu_id, 1); 551 break; 552 } 553 /* 554 * We hold an exclusive lock, so downgrade it to shared. 555 * An alternative would be to fail with EDEADLK. 556 */ 557 lkp->lk_sharecount++; 558 COUNT(lkp, p, cpu_id, 1); 559 /* fall into downgrade */ 560 561 case LK_DOWNGRADE: 562 if (WEHOLDIT(lkp, pid, cpu_id) == 0 || 563 lkp->lk_exclusivecount == 0) 564 panic("lockmgr: not holding exclusive lock"); 565 lkp->lk_sharecount += lkp->lk_exclusivecount; 566 lkp->lk_exclusivecount = 0; 567 lkp->lk_recurselevel = 0; 568 lkp->lk_flags &= ~LK_HAVE_EXCL; 569 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU); 570 #if defined(LOCKDEBUG) 571 lkp->lk_unlock_file = file; 572 lkp->lk_unlock_line = line; 573 #endif 574 DONTHAVEIT(lkp); 575 WAKEUP_WAITER(lkp); 576 break; 577 578 case LK_EXCLUPGRADE: 579 /* 580 * If another process is ahead of us to get an upgrade, 581 * then we want to fail rather than have an intervening 582 * exclusive access. 583 */ 584 if (lkp->lk_flags & LK_WANT_UPGRADE) { 585 lkp->lk_sharecount--; 586 COUNT(lkp, p, cpu_id, -1); 587 error = EBUSY; 588 break; 589 } 590 /* fall into normal upgrade */ 591 592 case LK_UPGRADE: 593 /* 594 * Upgrade a shared lock to an exclusive one. If another 595 * shared lock has already requested an upgrade to an 596 * exclusive lock, our shared lock is released and an 597 * exclusive lock is requested (which will be granted 598 * after the upgrade). If we return an error, the file 599 * will always be unlocked. 600 */ 601 if (WEHOLDIT(lkp, pid, cpu_id) || lkp->lk_sharecount <= 0) 602 panic("lockmgr: upgrade exclusive lock"); 603 lkp->lk_sharecount--; 604 COUNT(lkp, p, cpu_id, -1); 605 /* 606 * If we are just polling, check to see if we will block. 607 */ 608 if ((extflags & LK_NOWAIT) && 609 ((lkp->lk_flags & LK_WANT_UPGRADE) || 610 lkp->lk_sharecount > 1)) { 611 error = EBUSY; 612 break; 613 } 614 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 615 /* 616 * We are first shared lock to request an upgrade, so 617 * request upgrade and wait for the shared count to 618 * drop to zero, then take exclusive lock. 619 */ 620 lkp->lk_flags |= LK_WANT_UPGRADE; 621 ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount); 622 lkp->lk_flags &= ~LK_WANT_UPGRADE; 623 if (error) 624 break; 625 lkp->lk_flags |= LK_HAVE_EXCL; 626 SETHOLDER(lkp, pid, cpu_id); 627 #if defined(LOCKDEBUG) 628 lkp->lk_lock_file = file; 629 lkp->lk_lock_line = line; 630 #endif 631 HAVEIT(lkp); 632 if (lkp->lk_exclusivecount != 0) 633 panic("lockmgr: non-zero exclusive count"); 634 lkp->lk_exclusivecount = 1; 635 if (extflags & LK_SETRECURSE) 636 lkp->lk_recurselevel = 1; 637 COUNT(lkp, p, cpu_id, 1); 638 break; 639 } 640 /* 641 * Someone else has requested upgrade. Release our shared 642 * lock, awaken upgrade requestor if we are the last shared 643 * lock, then request an exclusive lock. 644 */ 645 if (lkp->lk_sharecount == 0) 646 WAKEUP_WAITER(lkp); 647 /* fall into exclusive request */ 648 649 case LK_EXCLUSIVE: 650 if (WEHOLDIT(lkp, pid, cpu_id)) { 651 /* 652 * Recursive lock. 653 */ 654 if ((extflags & LK_CANRECURSE) == 0 && 655 lkp->lk_recurselevel == 0) { 656 if (extflags & LK_RECURSEFAIL) { 657 error = EDEADLK; 658 break; 659 } else 660 panic("lockmgr: locking against myself"); 661 } 662 lkp->lk_exclusivecount++; 663 if (extflags & LK_SETRECURSE && 664 lkp->lk_recurselevel == 0) 665 lkp->lk_recurselevel = lkp->lk_exclusivecount; 666 COUNT(lkp, p, cpu_id, 1); 667 break; 668 } 669 /* 670 * If we are just polling, check to see if we will sleep. 671 */ 672 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 673 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 674 lkp->lk_sharecount != 0)) { 675 error = EBUSY; 676 break; 677 } 678 /* 679 * Try to acquire the want_exclusive flag. 680 */ 681 ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags & 682 (LK_HAVE_EXCL | LK_WANT_EXCL)); 683 if (error) 684 break; 685 lkp->lk_flags |= LK_WANT_EXCL; 686 /* 687 * Wait for shared locks and upgrades to finish. 688 */ 689 ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0 || 690 (lkp->lk_flags & LK_WANT_UPGRADE)); 691 lkp->lk_flags &= ~LK_WANT_EXCL; 692 if (error) 693 break; 694 lkp->lk_flags |= LK_HAVE_EXCL; 695 SETHOLDER(lkp, pid, cpu_id); 696 #if defined(LOCKDEBUG) 697 lkp->lk_lock_file = file; 698 lkp->lk_lock_line = line; 699 #endif 700 HAVEIT(lkp); 701 if (lkp->lk_exclusivecount != 0) 702 panic("lockmgr: non-zero exclusive count"); 703 lkp->lk_exclusivecount = 1; 704 if (extflags & LK_SETRECURSE) 705 lkp->lk_recurselevel = 1; 706 COUNT(lkp, p, cpu_id, 1); 707 break; 708 709 case LK_RELEASE: 710 if (lkp->lk_exclusivecount != 0) { 711 if (WEHOLDIT(lkp, pid, cpu_id) == 0) { 712 if (lkp->lk_flags & LK_SPIN) { 713 panic("lockmgr: processor %lu, not " 714 "exclusive lock holder %lu " 715 "unlocking", cpu_id, lkp->lk_cpu); 716 } else { 717 panic("lockmgr: pid %d, not " 718 "exclusive lock holder %d " 719 "unlocking", pid, 720 lkp->lk_lockholder); 721 } 722 } 723 if (lkp->lk_exclusivecount == lkp->lk_recurselevel) 724 lkp->lk_recurselevel = 0; 725 lkp->lk_exclusivecount--; 726 COUNT(lkp, p, cpu_id, -1); 727 if (lkp->lk_exclusivecount == 0) { 728 lkp->lk_flags &= ~LK_HAVE_EXCL; 729 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU); 730 #if defined(LOCKDEBUG) 731 lkp->lk_unlock_file = file; 732 lkp->lk_unlock_line = line; 733 #endif 734 DONTHAVEIT(lkp); 735 } 736 } else if (lkp->lk_sharecount != 0) { 737 lkp->lk_sharecount--; 738 COUNT(lkp, p, cpu_id, -1); 739 } 740 #ifdef DIAGNOSTIC 741 else 742 panic("lockmgr: release of unlocked lock!"); 743 #endif 744 WAKEUP_WAITER(lkp); 745 break; 746 747 case LK_DRAIN: 748 /* 749 * Check that we do not already hold the lock, as it can 750 * never drain if we do. Unfortunately, we have no way to 751 * check for holding a shared lock, but at least we can 752 * check for an exclusive one. 753 */ 754 if (WEHOLDIT(lkp, pid, cpu_id)) 755 panic("lockmgr: draining against myself"); 756 /* 757 * If we are just polling, check to see if we will sleep. 758 */ 759 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 760 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 761 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) { 762 error = EBUSY; 763 break; 764 } 765 ACQUIRE(lkp, error, extflags, 1, 766 ((lkp->lk_flags & 767 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 768 lkp->lk_sharecount != 0 || 769 lkp->lk_waitcount != 0)); 770 if (error) 771 break; 772 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 773 SETHOLDER(lkp, pid, cpu_id); 774 #if defined(LOCKDEBUG) 775 lkp->lk_lock_file = file; 776 lkp->lk_lock_line = line; 777 #endif 778 HAVEIT(lkp); 779 lkp->lk_exclusivecount = 1; 780 /* XXX unlikely that we'd want this */ 781 if (extflags & LK_SETRECURSE) 782 lkp->lk_recurselevel = 1; 783 COUNT(lkp, p, cpu_id, 1); 784 break; 785 786 default: 787 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 788 panic("lockmgr: unknown locktype request %d", 789 flags & LK_TYPE_MASK); 790 /* NOTREACHED */ 791 } 792 if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN && 793 ((lkp->lk_flags & 794 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 && 795 lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) { 796 lkp->lk_flags &= ~LK_WAITDRAIN; 797 wakeup((void *)&lkp->lk_flags); 798 } 799 /* 800 * Note that this panic will be a recursive panic, since 801 * we only set lock_shutdown_noblock above if panicstr != NULL. 802 */ 803 if (error && lock_shutdown_noblock) 804 panic("lockmgr: deadlock (see previous panic)"); 805 806 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 807 return (error); 808 } 809 810 /* 811 * For a recursive spinlock held one or more times by the current CPU, 812 * release all N locks, and return N. 813 * Intended for use in mi_switch() shortly before context switching. 814 */ 815 816 int 817 #if defined(LOCKDEBUG) 818 _spinlock_release_all(__volatile struct lock *lkp, const char *file, int line) 819 #else 820 spinlock_release_all(__volatile struct lock *lkp) 821 #endif 822 { 823 int s, count; 824 cpuid_t cpu_id; 825 826 KASSERT(lkp->lk_flags & LK_SPIN); 827 828 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s); 829 830 cpu_id = cpu_number(); 831 count = lkp->lk_exclusivecount; 832 833 if (count != 0) { 834 #ifdef DIAGNOSTIC 835 if (WEHOLDIT(lkp, 0, cpu_id) == 0) { 836 panic("spinlock_release_all: processor %lu, not " 837 "exclusive lock holder %lu " 838 "unlocking", (long)cpu_id, lkp->lk_cpu); 839 } 840 #endif 841 lkp->lk_recurselevel = 0; 842 lkp->lk_exclusivecount = 0; 843 COUNT_CPU(cpu_id, -count); 844 lkp->lk_flags &= ~LK_HAVE_EXCL; 845 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU); 846 #if defined(LOCKDEBUG) 847 lkp->lk_unlock_file = file; 848 lkp->lk_unlock_line = line; 849 #endif 850 DONTHAVEIT(lkp); 851 } 852 #ifdef DIAGNOSTIC 853 else if (lkp->lk_sharecount != 0) 854 panic("spinlock_release_all: release of shared lock!"); 855 else 856 panic("spinlock_release_all: release of unlocked lock!"); 857 #endif 858 INTERLOCK_RELEASE(lkp, LK_SPIN, s); 859 860 return (count); 861 } 862 863 /* 864 * For a recursive spinlock held one or more times by the current CPU, 865 * release all N locks, and return N. 866 * Intended for use in mi_switch() right after resuming execution. 867 */ 868 869 void 870 #if defined(LOCKDEBUG) 871 _spinlock_acquire_count(__volatile struct lock *lkp, int count, 872 const char *file, int line) 873 #else 874 spinlock_acquire_count(__volatile struct lock *lkp, int count) 875 #endif 876 { 877 int s, error; 878 cpuid_t cpu_id; 879 880 KASSERT(lkp->lk_flags & LK_SPIN); 881 882 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s); 883 884 cpu_id = cpu_number(); 885 886 #ifdef DIAGNOSTIC 887 if (WEHOLDIT(lkp, LK_NOPROC, cpu_id)) 888 panic("spinlock_acquire_count: processor %lu already holds lock\n", (long)cpu_id); 889 #endif 890 /* 891 * Try to acquire the want_exclusive flag. 892 */ 893 ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_flags & 894 (LK_HAVE_EXCL | LK_WANT_EXCL)); 895 lkp->lk_flags |= LK_WANT_EXCL; 896 /* 897 * Wait for shared locks and upgrades to finish. 898 */ 899 ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_sharecount != 0 || 900 (lkp->lk_flags & LK_WANT_UPGRADE)); 901 lkp->lk_flags &= ~LK_WANT_EXCL; 902 lkp->lk_flags |= LK_HAVE_EXCL; 903 SETHOLDER(lkp, LK_NOPROC, cpu_id); 904 #if defined(LOCKDEBUG) 905 lkp->lk_lock_file = file; 906 lkp->lk_lock_line = line; 907 #endif 908 HAVEIT(lkp); 909 if (lkp->lk_exclusivecount != 0) 910 panic("lockmgr: non-zero exclusive count"); 911 lkp->lk_exclusivecount = count; 912 lkp->lk_recurselevel = 1; 913 COUNT_CPU(cpu_id, count); 914 915 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 916 } 917 918 919 920 /* 921 * Print out information about state of a lock. Used by VOP_PRINT 922 * routines to display ststus about contained locks. 923 */ 924 void 925 lockmgr_printinfo(__volatile struct lock *lkp) 926 { 927 928 if (lkp->lk_sharecount) 929 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 930 lkp->lk_sharecount); 931 else if (lkp->lk_flags & LK_HAVE_EXCL) { 932 printf(" lock type %s: EXCL (count %d) by ", 933 lkp->lk_wmesg, lkp->lk_exclusivecount); 934 if (lkp->lk_flags & LK_SPIN) 935 printf("processor %lu", lkp->lk_cpu); 936 else 937 printf("pid %d", lkp->lk_lockholder); 938 } else 939 printf(" not locked"); 940 if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0) 941 printf(" with %d pending", lkp->lk_waitcount); 942 } 943 944 #if defined(LOCKDEBUG) /* { */ 945 TAILQ_HEAD(, simplelock) simplelock_list = 946 TAILQ_HEAD_INITIALIZER(simplelock_list); 947 948 #if defined(MULTIPROCESSOR) /* { */ 949 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER; 950 951 #define SLOCK_LIST_LOCK() \ 952 __cpu_simple_lock(&simplelock_list_slock.lock_data) 953 954 #define SLOCK_LIST_UNLOCK() \ 955 __cpu_simple_unlock(&simplelock_list_slock.lock_data) 956 957 #define SLOCK_COUNT(x) \ 958 curcpu()->ci_simple_locks += (x) 959 #else 960 u_long simple_locks; 961 962 #define SLOCK_LIST_LOCK() /* nothing */ 963 964 #define SLOCK_LIST_UNLOCK() /* nothing */ 965 966 #define SLOCK_COUNT(x) simple_locks += (x) 967 #endif /* MULTIPROCESSOR */ /* } */ 968 969 #ifdef DDB /* { */ 970 #ifdef MULTIPROCESSOR 971 int simple_lock_debugger = 1; /* more serious on MP */ 972 #else 973 int simple_lock_debugger = 0; 974 #endif 975 #define SLOCK_DEBUGGER() if (simple_lock_debugger) Debugger() 976 #define SLOCK_TRACE() \ 977 db_stack_trace_print((db_expr_t)__builtin_frame_address(0), \ 978 TRUE, 65535, "", printf); 979 #else 980 #define SLOCK_DEBUGGER() /* nothing */ 981 #define SLOCK_TRACE() /* nothing */ 982 #endif /* } */ 983 984 #ifdef MULTIPROCESSOR 985 #define SLOCK_MP() lock_printf("on cpu %ld\n", \ 986 (u_long) cpu_number()) 987 #else 988 #define SLOCK_MP() /* nothing */ 989 #endif 990 991 #define SLOCK_WHERE(str, alp, id, l) \ 992 do { \ 993 lock_printf("\n"); \ 994 lock_printf(str); \ 995 lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \ 996 SLOCK_MP(); \ 997 if ((alp)->lock_file != NULL) \ 998 lock_printf("last locked: %s:%d\n", (alp)->lock_file, \ 999 (alp)->lock_line); \ 1000 if ((alp)->unlock_file != NULL) \ 1001 lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \ 1002 (alp)->unlock_line); \ 1003 SLOCK_TRACE() \ 1004 SLOCK_DEBUGGER(); \ 1005 } while (/*CONSTCOND*/0) 1006 1007 /* 1008 * Simple lock functions so that the debugger can see from whence 1009 * they are being called. 1010 */ 1011 void 1012 simple_lock_init(struct simplelock *alp) 1013 { 1014 1015 #if defined(MULTIPROCESSOR) /* { */ 1016 __cpu_simple_lock_init(&alp->lock_data); 1017 #else 1018 alp->lock_data = __SIMPLELOCK_UNLOCKED; 1019 #endif /* } */ 1020 alp->lock_file = NULL; 1021 alp->lock_line = 0; 1022 alp->unlock_file = NULL; 1023 alp->unlock_line = 0; 1024 alp->lock_holder = LK_NOCPU; 1025 } 1026 1027 void 1028 _simple_lock(__volatile struct simplelock *alp, const char *id, int l) 1029 { 1030 cpuid_t cpu_id = cpu_number(); 1031 int s; 1032 1033 s = spllock(); 1034 1035 /* 1036 * MULTIPROCESSOR case: This is `safe' since if it's not us, we 1037 * don't take any action, and just fall into the normal spin case. 1038 */ 1039 if (alp->lock_data == __SIMPLELOCK_LOCKED) { 1040 #if defined(MULTIPROCESSOR) /* { */ 1041 if (alp->lock_holder == cpu_id) { 1042 SLOCK_WHERE("simple_lock: locking against myself\n", 1043 alp, id, l); 1044 goto out; 1045 } 1046 #else 1047 SLOCK_WHERE("simple_lock: lock held\n", alp, id, l); 1048 goto out; 1049 #endif /* MULTIPROCESSOR */ /* } */ 1050 } 1051 1052 #if defined(MULTIPROCESSOR) /* { */ 1053 /* Acquire the lock before modifying any fields. */ 1054 __cpu_simple_lock(&alp->lock_data); 1055 #else 1056 alp->lock_data = __SIMPLELOCK_LOCKED; 1057 #endif /* } */ 1058 1059 if (alp->lock_holder != LK_NOCPU) { 1060 SLOCK_WHERE("simple_lock: uninitialized lock\n", 1061 alp, id, l); 1062 } 1063 alp->lock_file = id; 1064 alp->lock_line = l; 1065 alp->lock_holder = cpu_id; 1066 1067 SLOCK_LIST_LOCK(); 1068 /* XXX Cast away volatile */ 1069 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list); 1070 SLOCK_LIST_UNLOCK(); 1071 1072 SLOCK_COUNT(1); 1073 1074 out: 1075 splx(s); 1076 } 1077 1078 int 1079 _simple_lock_held(__volatile struct simplelock *alp) 1080 { 1081 #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC) 1082 cpuid_t cpu_id = cpu_number(); 1083 #endif 1084 int s, locked = 0; 1085 1086 s = spllock(); 1087 1088 #if defined(MULTIPROCESSOR) 1089 if (__cpu_simple_lock_try(&alp->lock_data) == 0) 1090 locked = (alp->lock_holder == cpu_id); 1091 else 1092 __cpu_simple_unlock(&alp->lock_data); 1093 #else 1094 if (alp->lock_data == __SIMPLELOCK_LOCKED) { 1095 locked = 1; 1096 KASSERT(alp->lock_holder == cpu_id); 1097 } 1098 #endif 1099 1100 splx(s); 1101 1102 return (locked); 1103 } 1104 1105 int 1106 _simple_lock_try(__volatile struct simplelock *alp, const char *id, int l) 1107 { 1108 cpuid_t cpu_id = cpu_number(); 1109 int s, rv = 0; 1110 1111 s = spllock(); 1112 1113 /* 1114 * MULTIPROCESSOR case: This is `safe' since if it's not us, we 1115 * don't take any action. 1116 */ 1117 #if defined(MULTIPROCESSOR) /* { */ 1118 if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) { 1119 if (alp->lock_holder == cpu_id) 1120 SLOCK_WHERE("simple_lock_try: locking against myself\n", 1121 alp, id, l); 1122 goto out; 1123 } 1124 #else 1125 if (alp->lock_data == __SIMPLELOCK_LOCKED) { 1126 SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l); 1127 goto out; 1128 } 1129 alp->lock_data = __SIMPLELOCK_LOCKED; 1130 #endif /* MULTIPROCESSOR */ /* } */ 1131 1132 /* 1133 * At this point, we have acquired the lock. 1134 */ 1135 1136 rv = 1; 1137 1138 alp->lock_file = id; 1139 alp->lock_line = l; 1140 alp->lock_holder = cpu_id; 1141 1142 SLOCK_LIST_LOCK(); 1143 /* XXX Cast away volatile. */ 1144 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list); 1145 SLOCK_LIST_UNLOCK(); 1146 1147 SLOCK_COUNT(1); 1148 1149 out: 1150 splx(s); 1151 return (rv); 1152 } 1153 1154 void 1155 _simple_unlock(__volatile struct simplelock *alp, const char *id, int l) 1156 { 1157 int s; 1158 1159 s = spllock(); 1160 1161 /* 1162 * MULTIPROCESSOR case: This is `safe' because we think we hold 1163 * the lock, and if we don't, we don't take any action. 1164 */ 1165 if (alp->lock_data == __SIMPLELOCK_UNLOCKED) { 1166 SLOCK_WHERE("simple_unlock: lock not held\n", 1167 alp, id, l); 1168 goto out; 1169 } 1170 1171 SLOCK_LIST_LOCK(); 1172 TAILQ_REMOVE(&simplelock_list, alp, list); 1173 SLOCK_LIST_UNLOCK(); 1174 1175 SLOCK_COUNT(-1); 1176 1177 alp->list.tqe_next = NULL; /* sanity */ 1178 alp->list.tqe_prev = NULL; /* sanity */ 1179 1180 alp->unlock_file = id; 1181 alp->unlock_line = l; 1182 1183 #if defined(MULTIPROCESSOR) /* { */ 1184 alp->lock_holder = LK_NOCPU; 1185 /* Now that we've modified all fields, release the lock. */ 1186 __cpu_simple_unlock(&alp->lock_data); 1187 #else 1188 alp->lock_data = __SIMPLELOCK_UNLOCKED; 1189 KASSERT(alp->lock_holder == cpu_number()); 1190 alp->lock_holder = LK_NOCPU; 1191 #endif /* } */ 1192 1193 out: 1194 splx(s); 1195 } 1196 1197 void 1198 simple_lock_dump(void) 1199 { 1200 struct simplelock *alp; 1201 int s; 1202 1203 s = spllock(); 1204 SLOCK_LIST_LOCK(); 1205 lock_printf("all simple locks:\n"); 1206 TAILQ_FOREACH(alp, &simplelock_list, list) { 1207 lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder, 1208 alp->lock_file, alp->lock_line); 1209 } 1210 SLOCK_LIST_UNLOCK(); 1211 splx(s); 1212 } 1213 1214 void 1215 simple_lock_freecheck(void *start, void *end) 1216 { 1217 struct simplelock *alp; 1218 int s; 1219 1220 s = spllock(); 1221 SLOCK_LIST_LOCK(); 1222 TAILQ_FOREACH(alp, &simplelock_list, list) { 1223 if ((void *)alp >= start && (void *)alp < end) { 1224 lock_printf("freeing simple_lock %p CPU %lu %s:%d\n", 1225 alp, alp->lock_holder, alp->lock_file, 1226 alp->lock_line); 1227 SLOCK_DEBUGGER(); 1228 } 1229 } 1230 SLOCK_LIST_UNLOCK(); 1231 splx(s); 1232 } 1233 1234 /* 1235 * We must be holding exactly one lock: the sched_lock. 1236 */ 1237 1238 void 1239 simple_lock_switchcheck(void) 1240 { 1241 1242 simple_lock_only_held(&sched_lock, "switching"); 1243 } 1244 1245 void 1246 simple_lock_only_held(volatile struct simplelock *lp, const char *where) 1247 { 1248 struct simplelock *alp; 1249 cpuid_t cpu_id = cpu_number(); 1250 int s; 1251 1252 if (lp) { 1253 LOCK_ASSERT(simple_lock_held(lp)); 1254 } 1255 s = spllock(); 1256 SLOCK_LIST_LOCK(); 1257 TAILQ_FOREACH(alp, &simplelock_list, list) { 1258 if (alp == lp) 1259 continue; 1260 if (alp->lock_holder == cpu_id) 1261 break; 1262 } 1263 SLOCK_LIST_UNLOCK(); 1264 splx(s); 1265 1266 if (alp != NULL) { 1267 lock_printf("\n%s with held simple_lock %p " 1268 "CPU %lu %s:%d\n", 1269 where, alp, alp->lock_holder, alp->lock_file, 1270 alp->lock_line); 1271 SLOCK_TRACE(); 1272 SLOCK_DEBUGGER(); 1273 } 1274 } 1275 #endif /* LOCKDEBUG */ /* } */ 1276