1 /* $NetBSD: kern_lock.c,v 1.52 2001/04/20 22:58:39 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * This code is derived from software contributed to The NetBSD Foundation 12 * by Ross Harvey. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. All advertising materials mentioning features or use of this software 23 * must display the following acknowledgement: 24 * This product includes software developed by the NetBSD 25 * Foundation, Inc. and its contributors. 26 * 4. Neither the name of The NetBSD Foundation nor the names of its 27 * contributors may be used to endorse or promote products derived 28 * from this software without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 32 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 33 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 34 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 37 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 38 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 40 * POSSIBILITY OF SUCH DAMAGE. 41 */ 42 43 /* 44 * Copyright (c) 1995 45 * The Regents of the University of California. All rights reserved. 46 * 47 * This code contains ideas from software contributed to Berkeley by 48 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 49 * System project at Carnegie-Mellon University. 50 * 51 * Redistribution and use in source and binary forms, with or without 52 * modification, are permitted provided that the following conditions 53 * are met: 54 * 1. Redistributions of source code must retain the above copyright 55 * notice, this list of conditions and the following disclaimer. 56 * 2. Redistributions in binary form must reproduce the above copyright 57 * notice, this list of conditions and the following disclaimer in the 58 * documentation and/or other materials provided with the distribution. 59 * 3. All advertising materials mentioning features or use of this software 60 * must display the following acknowledgement: 61 * This product includes software developed by the University of 62 * California, Berkeley and its contributors. 63 * 4. Neither the name of the University nor the names of its contributors 64 * may be used to endorse or promote products derived from this software 65 * without specific prior written permission. 66 * 67 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 70 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 71 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 72 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 73 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 74 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 75 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 76 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 77 * SUCH DAMAGE. 78 * 79 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 80 */ 81 82 #include "opt_multiprocessor.h" 83 #include "opt_lockdebug.h" 84 #include "opt_ddb.h" 85 86 #include <sys/param.h> 87 #include <sys/proc.h> 88 #include <sys/lock.h> 89 #include <sys/systm.h> 90 #include <machine/cpu.h> 91 92 #if defined(LOCKDEBUG) 93 #include <sys/syslog.h> 94 /* 95 * note that stdarg.h and the ansi style va_start macro is used for both 96 * ansi and traditional c compiles. 97 * XXX: this requires that stdarg.h define: va_alist and va_dcl 98 */ 99 #include <machine/stdarg.h> 100 101 void lock_printf(const char *fmt, ...) 102 __attribute__((__format__(__printf__,1,2))); 103 104 int lock_debug_syslog = 1; /* defaults to syslog, but can be patched */ 105 #endif 106 107 /* 108 * Locking primitives implementation. 109 * Locks provide shared/exclusive sychronization. 110 */ 111 112 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */ 113 #if defined(MULTIPROCESSOR) /* { */ 114 #define COUNT_CPU(cpu_id, x) \ 115 curcpu()->ci_spin_locks += (x) 116 #else 117 u_long spin_locks; 118 #define COUNT_CPU(cpu_id, x) spin_locks += (x) 119 #endif /* MULTIPROCESSOR */ /* } */ 120 121 #define COUNT(lkp, p, cpu_id, x) \ 122 do { \ 123 if ((lkp)->lk_flags & LK_SPIN) \ 124 COUNT_CPU((cpu_id), (x)); \ 125 else \ 126 (p)->p_locks += (x); \ 127 } while (/*CONSTCOND*/0) 128 #else 129 #define COUNT(lkp, p, cpu_id, x) 130 #define COUNT_CPU(cpu_id, x) 131 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */ 132 133 #ifndef SPINLOCK_SPIN_HOOK /* from <machine/lock.h> */ 134 #define SPINLOCK_SPIN_HOOK /* nothing */ 135 #endif 136 137 #define INTERLOCK_ACQUIRE(lkp, flags, s) \ 138 do { \ 139 if ((flags) & LK_SPIN) \ 140 s = splsched(); \ 141 simple_lock(&(lkp)->lk_interlock); \ 142 } while (0) 143 144 #define INTERLOCK_RELEASE(lkp, flags, s) \ 145 do { \ 146 simple_unlock(&(lkp)->lk_interlock); \ 147 if ((flags) & LK_SPIN) \ 148 splx(s); \ 149 } while (0) 150 151 #if defined(LOCKDEBUG) 152 #if defined(DDB) 153 #define SPINLOCK_SPINCHECK_DEBUGGER Debugger() 154 #else 155 #define SPINLOCK_SPINCHECK_DEBUGGER /* nothing */ 156 #endif 157 158 #define SPINLOCK_SPINCHECK_DECL \ 159 /* 32-bits of count -- wrap constitutes a "spinout" */ \ 160 uint32_t __spinc = 0 161 162 #define SPINLOCK_SPINCHECK \ 163 do { \ 164 if (++__spinc == 0) { \ 165 printf("LK_SPIN spinout, excl %d, share %d\n", \ 166 lkp->lk_exclusivecount, lkp->lk_sharecount); \ 167 if (lkp->lk_exclusivecount) \ 168 printf("held by CPU %lu\n", \ 169 (u_long) lkp->lk_cpu); \ 170 if (lkp->lk_lock_file) \ 171 printf("last locked at %s:%d\n", \ 172 lkp->lk_lock_file, lkp->lk_lock_line); \ 173 if (lkp->lk_unlock_file) \ 174 printf("last unlocked at %s:%d\n", \ 175 lkp->lk_unlock_file, lkp->lk_unlock_line); \ 176 SPINLOCK_SPINCHECK_DEBUGGER; \ 177 } \ 178 } while (0) 179 #else 180 #define SPINLOCK_SPINCHECK_DECL /* nothing */ 181 #define SPINLOCK_SPINCHECK /* nothing */ 182 #endif /* LOCKDEBUG && DDB */ 183 184 /* 185 * Acquire a resource. 186 */ 187 #define ACQUIRE(lkp, error, extflags, drain, wanted) \ 188 if ((extflags) & LK_SPIN) { \ 189 int interlocked; \ 190 SPINLOCK_SPINCHECK_DECL; \ 191 \ 192 if ((drain) == 0) \ 193 (lkp)->lk_waitcount++; \ 194 for (interlocked = 1;;) { \ 195 SPINLOCK_SPINCHECK; \ 196 if (wanted) { \ 197 if (interlocked) { \ 198 INTERLOCK_RELEASE((lkp), \ 199 LK_SPIN, s); \ 200 interlocked = 0; \ 201 } \ 202 SPINLOCK_SPIN_HOOK; \ 203 } else if (interlocked) { \ 204 break; \ 205 } else { \ 206 INTERLOCK_ACQUIRE((lkp), LK_SPIN, s); \ 207 interlocked = 1; \ 208 } \ 209 } \ 210 if ((drain) == 0) \ 211 (lkp)->lk_waitcount--; \ 212 KASSERT((wanted) == 0); \ 213 error = 0; /* sanity */ \ 214 } else { \ 215 for (error = 0; wanted; ) { \ 216 if ((drain)) \ 217 (lkp)->lk_flags |= LK_WAITDRAIN; \ 218 else \ 219 (lkp)->lk_waitcount++; \ 220 /* XXX Cast away volatile. */ \ 221 error = ltsleep((drain) ? &(lkp)->lk_flags : \ 222 (void *)(lkp), (lkp)->lk_prio, \ 223 (lkp)->lk_wmesg, (lkp)->lk_timo, \ 224 &(lkp)->lk_interlock); \ 225 if ((drain) == 0) \ 226 (lkp)->lk_waitcount--; \ 227 if (error) \ 228 break; \ 229 if ((extflags) & LK_SLEEPFAIL) { \ 230 error = ENOLCK; \ 231 break; \ 232 } \ 233 } \ 234 } 235 236 #define SETHOLDER(lkp, pid, cpu_id) \ 237 do { \ 238 if ((lkp)->lk_flags & LK_SPIN) \ 239 (lkp)->lk_cpu = cpu_id; \ 240 else \ 241 (lkp)->lk_lockholder = pid; \ 242 } while (/*CONSTCOND*/0) 243 244 #define WEHOLDIT(lkp, pid, cpu_id) \ 245 (((lkp)->lk_flags & LK_SPIN) != 0 ? \ 246 ((lkp)->lk_cpu == (cpu_id)) : ((lkp)->lk_lockholder == (pid))) 247 248 #define WAKEUP_WAITER(lkp) \ 249 do { \ 250 if (((lkp)->lk_flags & LK_SPIN) == 0 && (lkp)->lk_waitcount) { \ 251 /* XXX Cast away volatile. */ \ 252 wakeup_one((void *)(lkp)); \ 253 } \ 254 } while (/*CONSTCOND*/0) 255 256 #if defined(LOCKDEBUG) /* { */ 257 #if defined(MULTIPROCESSOR) /* { */ 258 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER; 259 260 #define SPINLOCK_LIST_LOCK() \ 261 __cpu_simple_lock(&spinlock_list_slock.lock_data) 262 263 #define SPINLOCK_LIST_UNLOCK() \ 264 __cpu_simple_unlock(&spinlock_list_slock.lock_data) 265 #else 266 #define SPINLOCK_LIST_LOCK() /* nothing */ 267 268 #define SPINLOCK_LIST_UNLOCK() /* nothing */ 269 #endif /* MULTIPROCESSOR */ /* } */ 270 271 TAILQ_HEAD(, lock) spinlock_list = 272 TAILQ_HEAD_INITIALIZER(spinlock_list); 273 274 #define HAVEIT(lkp) \ 275 do { \ 276 if ((lkp)->lk_flags & LK_SPIN) { \ 277 int s = spllock(); \ 278 SPINLOCK_LIST_LOCK(); \ 279 /* XXX Cast away volatile. */ \ 280 TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp), \ 281 lk_list); \ 282 SPINLOCK_LIST_UNLOCK(); \ 283 splx(s); \ 284 } \ 285 } while (/*CONSTCOND*/0) 286 287 #define DONTHAVEIT(lkp) \ 288 do { \ 289 if ((lkp)->lk_flags & LK_SPIN) { \ 290 int s = spllock(); \ 291 SPINLOCK_LIST_LOCK(); \ 292 /* XXX Cast away volatile. */ \ 293 TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp), \ 294 lk_list); \ 295 SPINLOCK_LIST_UNLOCK(); \ 296 splx(s); \ 297 } \ 298 } while (/*CONSTCOND*/0) 299 #else 300 #define HAVEIT(lkp) /* nothing */ 301 302 #define DONTHAVEIT(lkp) /* nothing */ 303 #endif /* LOCKDEBUG */ /* } */ 304 305 #if defined(LOCKDEBUG) 306 /* 307 * Lock debug printing routine; can be configured to print to console 308 * or log to syslog. 309 */ 310 void 311 lock_printf(const char *fmt, ...) 312 { 313 va_list ap; 314 315 va_start(ap, fmt); 316 if (lock_debug_syslog) 317 vlog(LOG_DEBUG, fmt, ap); 318 else 319 vprintf(fmt, ap); 320 va_end(ap); 321 } 322 #endif /* LOCKDEBUG */ 323 324 /* 325 * Initialize a lock; required before use. 326 */ 327 void 328 lockinit(struct lock *lkp, int prio, const char *wmesg, int timo, int flags) 329 { 330 331 memset(lkp, 0, sizeof(struct lock)); 332 simple_lock_init(&lkp->lk_interlock); 333 lkp->lk_flags = flags & LK_EXTFLG_MASK; 334 if (flags & LK_SPIN) 335 lkp->lk_cpu = LK_NOCPU; 336 else { 337 lkp->lk_lockholder = LK_NOPROC; 338 lkp->lk_prio = prio; 339 lkp->lk_timo = timo; 340 } 341 lkp->lk_wmesg = wmesg; /* just a name for spin locks */ 342 #if defined(LOCKDEBUG) 343 lkp->lk_lock_file = NULL; 344 lkp->lk_unlock_file = NULL; 345 #endif 346 } 347 348 /* 349 * Determine the status of a lock. 350 */ 351 int 352 lockstatus(struct lock *lkp) 353 { 354 int s, lock_type = 0; 355 356 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s); 357 if (lkp->lk_exclusivecount != 0) 358 lock_type = LK_EXCLUSIVE; 359 else if (lkp->lk_sharecount != 0) 360 lock_type = LK_SHARED; 361 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 362 return (lock_type); 363 } 364 365 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) 366 /* 367 * Make sure no spin locks are held by a CPU that is about 368 * to context switch. 369 */ 370 void 371 spinlock_switchcheck(void) 372 { 373 u_long cnt; 374 int s; 375 376 s = spllock(); 377 #if defined(MULTIPROCESSOR) 378 cnt = curcpu()->ci_spin_locks; 379 #else 380 cnt = spin_locks; 381 #endif 382 splx(s); 383 384 if (cnt != 0) 385 panic("spinlock_switchcheck: CPU %lu has %lu spin locks", 386 (u_long) cpu_number(), cnt); 387 } 388 #endif /* LOCKDEBUG || DIAGNOSTIC */ 389 390 /* 391 * Locks and IPLs (interrupt priority levels): 392 * 393 * Locks which may be taken from interrupt context must be handled 394 * very carefully; you must spl to the highest IPL where the lock 395 * is needed before acquiring the lock. 396 * 397 * It is also important to avoid deadlock, since certain (very high 398 * priority) interrupts are often needed to keep the system as a whole 399 * from deadlocking, and must not be blocked while you are spinning 400 * waiting for a lower-priority lock. 401 * 402 * In addition, the lock-debugging hooks themselves need to use locks! 403 * 404 * A raw __cpu_simple_lock may be used from interrupts are long as it 405 * is acquired and held at a single IPL. 406 * 407 * A simple_lock (which is a __cpu_simple_lock wrapped with some 408 * debugging hooks) may be used at or below spllock(), which is 409 * typically at or just below splhigh() (i.e. blocks everything 410 * but certain machine-dependent extremely high priority interrupts). 411 * 412 * spinlockmgr spinlocks should be used at or below splsched(). 413 * 414 * Some platforms may have interrupts of higher priority than splsched(), 415 * including hard serial interrupts, inter-processor interrupts, and 416 * kernel debugger traps. 417 */ 418 419 /* 420 * XXX XXX kludge around another kludge.. 421 * 422 * vfs_shutdown() may be called from interrupt context, either as a result 423 * of a panic, or from the debugger. It proceeds to call 424 * sys_sync(&proc0, ...), pretending its running on behalf of proc0 425 * 426 * We would like to make an attempt to sync the filesystems in this case, so 427 * if this happens, we treat attempts to acquire locks specially. 428 * All locks are acquired on behalf of proc0. 429 * 430 * If we've already paniced, we don't block waiting for locks, but 431 * just barge right ahead since we're already going down in flames. 432 */ 433 434 /* 435 * Set, change, or release a lock. 436 * 437 * Shared requests increment the shared count. Exclusive requests set the 438 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 439 * accepted shared locks and shared-to-exclusive upgrades to go away. 440 */ 441 int 442 #if defined(LOCKDEBUG) 443 _lockmgr(__volatile struct lock *lkp, u_int flags, 444 struct simplelock *interlkp, const char *file, int line) 445 #else 446 lockmgr(__volatile struct lock *lkp, u_int flags, 447 struct simplelock *interlkp) 448 #endif 449 { 450 int error; 451 pid_t pid; 452 int extflags; 453 cpuid_t cpu_id; 454 struct proc *p = curproc; 455 int lock_shutdown_noblock = 0; 456 int s; 457 458 error = 0; 459 460 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s); 461 if (flags & LK_INTERLOCK) 462 simple_unlock(interlkp); 463 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 464 465 #ifdef DIAGNOSTIC /* { */ 466 /* 467 * Don't allow spins on sleep locks and don't allow sleeps 468 * on spin locks. 469 */ 470 if ((flags ^ lkp->lk_flags) & LK_SPIN) 471 panic("lockmgr: sleep/spin mismatch\n"); 472 #endif /* } */ 473 474 if (extflags & LK_SPIN) 475 pid = LK_KERNPROC; 476 else { 477 if (p == NULL) { 478 if (!doing_shutdown) { 479 #ifdef DIAGNOSTIC 480 panic("lockmgr: no context"); 481 #endif 482 } else { 483 p = &proc0; 484 if (panicstr && (!(flags & LK_NOWAIT))) { 485 flags |= LK_NOWAIT; 486 lock_shutdown_noblock = 1; 487 } 488 } 489 } 490 pid = p->p_pid; 491 } 492 cpu_id = cpu_number(); 493 494 /* 495 * Once a lock has drained, the LK_DRAINING flag is set and an 496 * exclusive lock is returned. The only valid operation thereafter 497 * is a single release of that exclusive lock. This final release 498 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any 499 * further requests of any sort will result in a panic. The bits 500 * selected for these two flags are chosen so that they will be set 501 * in memory that is freed (freed memory is filled with 0xdeadbeef). 502 * The final release is permitted to give a new lease on life to 503 * the lock by specifying LK_REENABLE. 504 */ 505 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) { 506 #ifdef DIAGNOSTIC /* { */ 507 if (lkp->lk_flags & LK_DRAINED) 508 panic("lockmgr: using decommissioned lock"); 509 if ((flags & LK_TYPE_MASK) != LK_RELEASE || 510 WEHOLDIT(lkp, pid, cpu_id) == 0) 511 panic("lockmgr: non-release on draining lock: %d\n", 512 flags & LK_TYPE_MASK); 513 #endif /* DIAGNOSTIC */ /* } */ 514 lkp->lk_flags &= ~LK_DRAINING; 515 if ((flags & LK_REENABLE) == 0) 516 lkp->lk_flags |= LK_DRAINED; 517 } 518 519 switch (flags & LK_TYPE_MASK) { 520 521 case LK_SHARED: 522 if (WEHOLDIT(lkp, pid, cpu_id) == 0) { 523 /* 524 * If just polling, check to see if we will block. 525 */ 526 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 527 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) { 528 error = EBUSY; 529 break; 530 } 531 /* 532 * Wait for exclusive locks and upgrades to clear. 533 */ 534 ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags & 535 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)); 536 if (error) 537 break; 538 lkp->lk_sharecount++; 539 COUNT(lkp, p, cpu_id, 1); 540 break; 541 } 542 /* 543 * We hold an exclusive lock, so downgrade it to shared. 544 * An alternative would be to fail with EDEADLK. 545 */ 546 lkp->lk_sharecount++; 547 COUNT(lkp, p, cpu_id, 1); 548 /* fall into downgrade */ 549 550 case LK_DOWNGRADE: 551 if (WEHOLDIT(lkp, pid, cpu_id) == 0 || 552 lkp->lk_exclusivecount == 0) 553 panic("lockmgr: not holding exclusive lock"); 554 lkp->lk_sharecount += lkp->lk_exclusivecount; 555 lkp->lk_exclusivecount = 0; 556 lkp->lk_recurselevel = 0; 557 lkp->lk_flags &= ~LK_HAVE_EXCL; 558 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU); 559 #if defined(LOCKDEBUG) 560 lkp->lk_unlock_file = file; 561 lkp->lk_unlock_line = line; 562 #endif 563 DONTHAVEIT(lkp); 564 WAKEUP_WAITER(lkp); 565 break; 566 567 case LK_EXCLUPGRADE: 568 /* 569 * If another process is ahead of us to get an upgrade, 570 * then we want to fail rather than have an intervening 571 * exclusive access. 572 */ 573 if (lkp->lk_flags & LK_WANT_UPGRADE) { 574 lkp->lk_sharecount--; 575 COUNT(lkp, p, cpu_id, -1); 576 error = EBUSY; 577 break; 578 } 579 /* fall into normal upgrade */ 580 581 case LK_UPGRADE: 582 /* 583 * Upgrade a shared lock to an exclusive one. If another 584 * shared lock has already requested an upgrade to an 585 * exclusive lock, our shared lock is released and an 586 * exclusive lock is requested (which will be granted 587 * after the upgrade). If we return an error, the file 588 * will always be unlocked. 589 */ 590 if (WEHOLDIT(lkp, pid, cpu_id) || lkp->lk_sharecount <= 0) 591 panic("lockmgr: upgrade exclusive lock"); 592 lkp->lk_sharecount--; 593 COUNT(lkp, p, cpu_id, -1); 594 /* 595 * If we are just polling, check to see if we will block. 596 */ 597 if ((extflags & LK_NOWAIT) && 598 ((lkp->lk_flags & LK_WANT_UPGRADE) || 599 lkp->lk_sharecount > 1)) { 600 error = EBUSY; 601 break; 602 } 603 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 604 /* 605 * We are first shared lock to request an upgrade, so 606 * request upgrade and wait for the shared count to 607 * drop to zero, then take exclusive lock. 608 */ 609 lkp->lk_flags |= LK_WANT_UPGRADE; 610 ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount); 611 lkp->lk_flags &= ~LK_WANT_UPGRADE; 612 if (error) 613 break; 614 lkp->lk_flags |= LK_HAVE_EXCL; 615 SETHOLDER(lkp, pid, cpu_id); 616 #if defined(LOCKDEBUG) 617 lkp->lk_lock_file = file; 618 lkp->lk_lock_line = line; 619 #endif 620 HAVEIT(lkp); 621 if (lkp->lk_exclusivecount != 0) 622 panic("lockmgr: non-zero exclusive count"); 623 lkp->lk_exclusivecount = 1; 624 if (extflags & LK_SETRECURSE) 625 lkp->lk_recurselevel = 1; 626 COUNT(lkp, p, cpu_id, 1); 627 break; 628 } 629 /* 630 * Someone else has requested upgrade. Release our shared 631 * lock, awaken upgrade requestor if we are the last shared 632 * lock, then request an exclusive lock. 633 */ 634 if (lkp->lk_sharecount == 0) 635 WAKEUP_WAITER(lkp); 636 /* fall into exclusive request */ 637 638 case LK_EXCLUSIVE: 639 if (WEHOLDIT(lkp, pid, cpu_id)) { 640 /* 641 * Recursive lock. 642 */ 643 if ((extflags & LK_CANRECURSE) == 0 && 644 lkp->lk_recurselevel == 0) { 645 if (extflags & LK_RECURSEFAIL) { 646 error = EDEADLK; 647 break; 648 } else 649 panic("lockmgr: locking against myself"); 650 } 651 lkp->lk_exclusivecount++; 652 if (extflags & LK_SETRECURSE && 653 lkp->lk_recurselevel == 0) 654 lkp->lk_recurselevel = lkp->lk_exclusivecount; 655 COUNT(lkp, p, cpu_id, 1); 656 break; 657 } 658 /* 659 * If we are just polling, check to see if we will sleep. 660 */ 661 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 662 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 663 lkp->lk_sharecount != 0)) { 664 error = EBUSY; 665 break; 666 } 667 /* 668 * Try to acquire the want_exclusive flag. 669 */ 670 ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags & 671 (LK_HAVE_EXCL | LK_WANT_EXCL)); 672 if (error) 673 break; 674 lkp->lk_flags |= LK_WANT_EXCL; 675 /* 676 * Wait for shared locks and upgrades to finish. 677 */ 678 ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0 || 679 (lkp->lk_flags & LK_WANT_UPGRADE)); 680 lkp->lk_flags &= ~LK_WANT_EXCL; 681 if (error) 682 break; 683 lkp->lk_flags |= LK_HAVE_EXCL; 684 SETHOLDER(lkp, pid, cpu_id); 685 #if defined(LOCKDEBUG) 686 lkp->lk_lock_file = file; 687 lkp->lk_lock_line = line; 688 #endif 689 HAVEIT(lkp); 690 if (lkp->lk_exclusivecount != 0) 691 panic("lockmgr: non-zero exclusive count"); 692 lkp->lk_exclusivecount = 1; 693 if (extflags & LK_SETRECURSE) 694 lkp->lk_recurselevel = 1; 695 COUNT(lkp, p, cpu_id, 1); 696 break; 697 698 case LK_RELEASE: 699 if (lkp->lk_exclusivecount != 0) { 700 if (WEHOLDIT(lkp, pid, cpu_id) == 0) { 701 if (lkp->lk_flags & LK_SPIN) { 702 panic("lockmgr: processor %lu, not " 703 "exclusive lock holder %lu " 704 "unlocking", cpu_id, lkp->lk_cpu); 705 } else { 706 panic("lockmgr: pid %d, not " 707 "exclusive lock holder %d " 708 "unlocking", pid, 709 lkp->lk_lockholder); 710 } 711 } 712 if (lkp->lk_exclusivecount == lkp->lk_recurselevel) 713 lkp->lk_recurselevel = 0; 714 lkp->lk_exclusivecount--; 715 COUNT(lkp, p, cpu_id, -1); 716 if (lkp->lk_exclusivecount == 0) { 717 lkp->lk_flags &= ~LK_HAVE_EXCL; 718 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU); 719 #if defined(LOCKDEBUG) 720 lkp->lk_unlock_file = file; 721 lkp->lk_unlock_line = line; 722 #endif 723 DONTHAVEIT(lkp); 724 } 725 } else if (lkp->lk_sharecount != 0) { 726 lkp->lk_sharecount--; 727 COUNT(lkp, p, cpu_id, -1); 728 } 729 #ifdef DIAGNOSTIC 730 else 731 panic("lockmgr: release of unlocked lock!"); 732 #endif 733 WAKEUP_WAITER(lkp); 734 break; 735 736 case LK_DRAIN: 737 /* 738 * Check that we do not already hold the lock, as it can 739 * never drain if we do. Unfortunately, we have no way to 740 * check for holding a shared lock, but at least we can 741 * check for an exclusive one. 742 */ 743 if (WEHOLDIT(lkp, pid, cpu_id)) 744 panic("lockmgr: draining against myself"); 745 /* 746 * If we are just polling, check to see if we will sleep. 747 */ 748 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 749 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 750 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) { 751 error = EBUSY; 752 break; 753 } 754 ACQUIRE(lkp, error, extflags, 1, 755 ((lkp->lk_flags & 756 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 757 lkp->lk_sharecount != 0 || 758 lkp->lk_waitcount != 0)); 759 if (error) 760 break; 761 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 762 SETHOLDER(lkp, pid, cpu_id); 763 #if defined(LOCKDEBUG) 764 lkp->lk_lock_file = file; 765 lkp->lk_lock_line = line; 766 #endif 767 HAVEIT(lkp); 768 lkp->lk_exclusivecount = 1; 769 /* XXX unlikely that we'd want this */ 770 if (extflags & LK_SETRECURSE) 771 lkp->lk_recurselevel = 1; 772 COUNT(lkp, p, cpu_id, 1); 773 break; 774 775 default: 776 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 777 panic("lockmgr: unknown locktype request %d", 778 flags & LK_TYPE_MASK); 779 /* NOTREACHED */ 780 } 781 if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN && 782 ((lkp->lk_flags & 783 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 && 784 lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) { 785 lkp->lk_flags &= ~LK_WAITDRAIN; 786 wakeup_one((void *)&lkp->lk_flags); 787 } 788 /* 789 * Note that this panic will be a recursive panic, since 790 * we only set lock_shutdown_noblock above if panicstr != NULL. 791 */ 792 if (error && lock_shutdown_noblock) 793 panic("lockmgr: deadlock (see previous panic)"); 794 795 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 796 return (error); 797 } 798 799 /* 800 * For a recursive spinlock held one or more times by the current CPU, 801 * release all N locks, and return N. 802 * Intended for use in mi_switch() shortly before context switching. 803 */ 804 805 int 806 #if defined(LOCKDEBUG) 807 _spinlock_release_all(__volatile struct lock *lkp, const char *file, int line) 808 #else 809 spinlock_release_all(__volatile struct lock *lkp) 810 #endif 811 { 812 int s, count; 813 cpuid_t cpu_id; 814 815 KASSERT(lkp->lk_flags & LK_SPIN); 816 817 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s); 818 819 cpu_id = cpu_number(); 820 count = lkp->lk_exclusivecount; 821 822 if (count != 0) { 823 #ifdef DIAGNOSTIC 824 if (WEHOLDIT(lkp, 0, cpu_id) == 0) { 825 panic("spinlock_release_all: processor %lu, not " 826 "exclusive lock holder %lu " 827 "unlocking", (long)cpu_id, lkp->lk_cpu); 828 } 829 #endif 830 lkp->lk_recurselevel = 0; 831 lkp->lk_exclusivecount = 0; 832 COUNT_CPU(cpu_id, -count); 833 lkp->lk_flags &= ~LK_HAVE_EXCL; 834 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU); 835 #if defined(LOCKDEBUG) 836 lkp->lk_unlock_file = file; 837 lkp->lk_unlock_line = line; 838 #endif 839 DONTHAVEIT(lkp); 840 } 841 #ifdef DIAGNOSTIC 842 else if (lkp->lk_sharecount != 0) 843 panic("spinlock_release_all: release of shared lock!"); 844 else 845 panic("spinlock_release_all: release of unlocked lock!"); 846 #endif 847 INTERLOCK_RELEASE(lkp, LK_SPIN, s); 848 849 return (count); 850 } 851 852 /* 853 * For a recursive spinlock held one or more times by the current CPU, 854 * release all N locks, and return N. 855 * Intended for use in mi_switch() right after resuming execution. 856 */ 857 858 void 859 #if defined(LOCKDEBUG) 860 _spinlock_acquire_count(__volatile struct lock *lkp, int count, 861 const char *file, int line) 862 #else 863 spinlock_acquire_count(__volatile struct lock *lkp, int count) 864 #endif 865 { 866 int s, error; 867 cpuid_t cpu_id; 868 869 KASSERT(lkp->lk_flags & LK_SPIN); 870 871 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s); 872 873 cpu_id = cpu_number(); 874 875 #ifdef DIAGNOSTIC 876 if (WEHOLDIT(lkp, LK_NOPROC, cpu_id)) 877 panic("spinlock_acquire_count: processor %lu already holds lock\n", (long)cpu_id); 878 #endif 879 /* 880 * Try to acquire the want_exclusive flag. 881 */ 882 ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_flags & 883 (LK_HAVE_EXCL | LK_WANT_EXCL)); 884 lkp->lk_flags |= LK_WANT_EXCL; 885 /* 886 * Wait for shared locks and upgrades to finish. 887 */ 888 ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_sharecount != 0 || 889 (lkp->lk_flags & LK_WANT_UPGRADE)); 890 lkp->lk_flags &= ~LK_WANT_EXCL; 891 lkp->lk_flags |= LK_HAVE_EXCL; 892 SETHOLDER(lkp, LK_NOPROC, cpu_id); 893 #if defined(LOCKDEBUG) 894 lkp->lk_lock_file = file; 895 lkp->lk_lock_line = line; 896 #endif 897 HAVEIT(lkp); 898 if (lkp->lk_exclusivecount != 0) 899 panic("lockmgr: non-zero exclusive count"); 900 lkp->lk_exclusivecount = count; 901 lkp->lk_recurselevel = 1; 902 COUNT_CPU(cpu_id, count); 903 904 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 905 } 906 907 908 909 /* 910 * Print out information about state of a lock. Used by VOP_PRINT 911 * routines to display ststus about contained locks. 912 */ 913 void 914 lockmgr_printinfo(__volatile struct lock *lkp) 915 { 916 917 if (lkp->lk_sharecount) 918 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 919 lkp->lk_sharecount); 920 else if (lkp->lk_flags & LK_HAVE_EXCL) { 921 printf(" lock type %s: EXCL (count %d) by ", 922 lkp->lk_wmesg, lkp->lk_exclusivecount); 923 if (lkp->lk_flags & LK_SPIN) 924 printf("processor %lu", lkp->lk_cpu); 925 else 926 printf("pid %d", lkp->lk_lockholder); 927 } else 928 printf(" not locked"); 929 if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0) 930 printf(" with %d pending", lkp->lk_waitcount); 931 } 932 933 #if defined(LOCKDEBUG) /* { */ 934 TAILQ_HEAD(, simplelock) simplelock_list = 935 TAILQ_HEAD_INITIALIZER(simplelock_list); 936 937 #if defined(MULTIPROCESSOR) /* { */ 938 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER; 939 940 #define SLOCK_LIST_LOCK() \ 941 __cpu_simple_lock(&simplelock_list_slock.lock_data) 942 943 #define SLOCK_LIST_UNLOCK() \ 944 __cpu_simple_unlock(&simplelock_list_slock.lock_data) 945 946 #define SLOCK_COUNT(x) \ 947 curcpu()->ci_simple_locks += (x) 948 #else 949 u_long simple_locks; 950 951 #define SLOCK_LIST_LOCK() /* nothing */ 952 953 #define SLOCK_LIST_UNLOCK() /* nothing */ 954 955 #define SLOCK_COUNT(x) simple_locks += (x) 956 #endif /* MULTIPROCESSOR */ /* } */ 957 958 #ifdef DDB /* { */ 959 #ifdef MULTIPROCESSOR 960 int simple_lock_debugger = 1; /* more serious on MP */ 961 #else 962 int simple_lock_debugger = 0; 963 #endif 964 #define SLOCK_DEBUGGER() if (simple_lock_debugger) Debugger() 965 #else 966 #define SLOCK_DEBUGGER() /* nothing */ 967 #endif /* } */ 968 969 #ifdef MULTIPROCESSOR 970 #define SLOCK_MP() lock_printf("on cpu %ld\n", \ 971 (u_long) cpu_number()) 972 #else 973 #define SLOCK_MP() /* nothing */ 974 #endif 975 976 #define SLOCK_WHERE(str, alp, id, l) \ 977 do { \ 978 lock_printf(str); \ 979 lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \ 980 SLOCK_MP(); \ 981 if ((alp)->lock_file != NULL) \ 982 lock_printf("last locked: %s:%d\n", (alp)->lock_file, \ 983 (alp)->lock_line); \ 984 if ((alp)->unlock_file != NULL) \ 985 lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \ 986 (alp)->unlock_line); \ 987 SLOCK_DEBUGGER(); \ 988 } while (/*CONSTCOND*/0) 989 990 /* 991 * Simple lock functions so that the debugger can see from whence 992 * they are being called. 993 */ 994 void 995 simple_lock_init(struct simplelock *alp) 996 { 997 998 #if defined(MULTIPROCESSOR) /* { */ 999 __cpu_simple_lock_init(&alp->lock_data); 1000 #else 1001 alp->lock_data = __SIMPLELOCK_UNLOCKED; 1002 #endif /* } */ 1003 alp->lock_file = NULL; 1004 alp->lock_line = 0; 1005 alp->unlock_file = NULL; 1006 alp->unlock_line = 0; 1007 alp->lock_holder = LK_NOCPU; 1008 } 1009 1010 void 1011 _simple_lock(__volatile struct simplelock *alp, const char *id, int l) 1012 { 1013 cpuid_t cpu_id = cpu_number(); 1014 int s; 1015 1016 s = spllock(); 1017 1018 /* 1019 * MULTIPROCESSOR case: This is `safe' since if it's not us, we 1020 * don't take any action, and just fall into the normal spin case. 1021 */ 1022 if (alp->lock_data == __SIMPLELOCK_LOCKED) { 1023 #if defined(MULTIPROCESSOR) /* { */ 1024 if (alp->lock_holder == cpu_id) { 1025 SLOCK_WHERE("simple_lock: locking against myself\n", 1026 alp, id, l); 1027 goto out; 1028 } 1029 #else 1030 SLOCK_WHERE("simple_lock: lock held\n", alp, id, l); 1031 goto out; 1032 #endif /* MULTIPROCESSOR */ /* } */ 1033 } 1034 1035 #if defined(MULTIPROCESSOR) /* { */ 1036 /* Acquire the lock before modifying any fields. */ 1037 __cpu_simple_lock(&alp->lock_data); 1038 #else 1039 alp->lock_data = __SIMPLELOCK_LOCKED; 1040 #endif /* } */ 1041 1042 if (alp->lock_holder != LK_NOCPU) { 1043 SLOCK_WHERE("simple_lock: uninitialized lock\n", 1044 alp, id, l); 1045 } 1046 alp->lock_file = id; 1047 alp->lock_line = l; 1048 alp->lock_holder = cpu_id; 1049 1050 SLOCK_LIST_LOCK(); 1051 /* XXX Cast away volatile */ 1052 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list); 1053 SLOCK_LIST_UNLOCK(); 1054 1055 SLOCK_COUNT(1); 1056 1057 out: 1058 splx(s); 1059 } 1060 1061 int 1062 _simple_lock_held(__volatile struct simplelock *alp) 1063 { 1064 cpuid_t cpu_id = cpu_number(); 1065 int s, locked = 0; 1066 1067 s = spllock(); 1068 1069 #if defined(MULTIPROCESSOR) 1070 if (__cpu_simple_lock_try(&alp->lock_data) == 0) 1071 locked = (alp->lock_holder == cpu_id); 1072 else 1073 __cpu_simple_unlock(&alp->lock_data); 1074 #else 1075 if (alp->lock_data == __SIMPLELOCK_LOCKED) { 1076 locked = 1; 1077 KASSERT(alp->lock_holder == cpu_id); 1078 } 1079 #endif 1080 1081 splx(s); 1082 1083 return (locked); 1084 } 1085 1086 int 1087 _simple_lock_try(__volatile struct simplelock *alp, const char *id, int l) 1088 { 1089 cpuid_t cpu_id = cpu_number(); 1090 int s, rv = 0; 1091 1092 s = spllock(); 1093 1094 /* 1095 * MULTIPROCESSOR case: This is `safe' since if it's not us, we 1096 * don't take any action. 1097 */ 1098 #if defined(MULTIPROCESSOR) /* { */ 1099 if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) { 1100 if (alp->lock_holder == cpu_id) 1101 SLOCK_WHERE("simple_lock_try: locking against myself\n", 1102 alp, id, l); 1103 goto out; 1104 } 1105 #else 1106 if (alp->lock_data == __SIMPLELOCK_LOCKED) { 1107 SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l); 1108 goto out; 1109 } 1110 alp->lock_data = __SIMPLELOCK_LOCKED; 1111 #endif /* MULTIPROCESSOR */ /* } */ 1112 1113 /* 1114 * At this point, we have acquired the lock. 1115 */ 1116 1117 rv = 1; 1118 1119 alp->lock_file = id; 1120 alp->lock_line = l; 1121 alp->lock_holder = cpu_id; 1122 1123 SLOCK_LIST_LOCK(); 1124 /* XXX Cast away volatile. */ 1125 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list); 1126 SLOCK_LIST_UNLOCK(); 1127 1128 SLOCK_COUNT(1); 1129 1130 out: 1131 splx(s); 1132 return (rv); 1133 } 1134 1135 void 1136 _simple_unlock(__volatile struct simplelock *alp, const char *id, int l) 1137 { 1138 int s; 1139 1140 s = spllock(); 1141 1142 /* 1143 * MULTIPROCESSOR case: This is `safe' because we think we hold 1144 * the lock, and if we don't, we don't take any action. 1145 */ 1146 if (alp->lock_data == __SIMPLELOCK_UNLOCKED) { 1147 SLOCK_WHERE("simple_unlock: lock not held\n", 1148 alp, id, l); 1149 goto out; 1150 } 1151 1152 SLOCK_LIST_LOCK(); 1153 TAILQ_REMOVE(&simplelock_list, alp, list); 1154 SLOCK_LIST_UNLOCK(); 1155 1156 SLOCK_COUNT(-1); 1157 1158 alp->list.tqe_next = NULL; /* sanity */ 1159 alp->list.tqe_prev = NULL; /* sanity */ 1160 1161 alp->unlock_file = id; 1162 alp->unlock_line = l; 1163 1164 #if defined(MULTIPROCESSOR) /* { */ 1165 alp->lock_holder = LK_NOCPU; 1166 /* Now that we've modified all fields, release the lock. */ 1167 __cpu_simple_unlock(&alp->lock_data); 1168 #else 1169 alp->lock_data = __SIMPLELOCK_UNLOCKED; 1170 KASSERT(alp->lock_holder == cpu_number()); 1171 alp->lock_holder = LK_NOCPU; 1172 #endif /* } */ 1173 1174 out: 1175 splx(s); 1176 } 1177 1178 void 1179 simple_lock_dump(void) 1180 { 1181 struct simplelock *alp; 1182 int s; 1183 1184 s = spllock(); 1185 SLOCK_LIST_LOCK(); 1186 lock_printf("all simple locks:\n"); 1187 for (alp = TAILQ_FIRST(&simplelock_list); alp != NULL; 1188 alp = TAILQ_NEXT(alp, list)) { 1189 lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder, 1190 alp->lock_file, alp->lock_line); 1191 } 1192 SLOCK_LIST_UNLOCK(); 1193 splx(s); 1194 } 1195 1196 void 1197 simple_lock_freecheck(void *start, void *end) 1198 { 1199 struct simplelock *alp; 1200 int s; 1201 1202 s = spllock(); 1203 SLOCK_LIST_LOCK(); 1204 for (alp = TAILQ_FIRST(&simplelock_list); alp != NULL; 1205 alp = TAILQ_NEXT(alp, list)) { 1206 if ((void *)alp >= start && (void *)alp < end) { 1207 lock_printf("freeing simple_lock %p CPU %lu %s:%d\n", 1208 alp, alp->lock_holder, alp->lock_file, 1209 alp->lock_line); 1210 SLOCK_DEBUGGER(); 1211 } 1212 } 1213 SLOCK_LIST_UNLOCK(); 1214 splx(s); 1215 } 1216 1217 void 1218 simple_lock_switchcheck(void) 1219 { 1220 struct simplelock *alp; 1221 cpuid_t cpu_id = cpu_number(); 1222 int s; 1223 1224 /* 1225 * We must be holding exactly one lock: the sched_lock. 1226 */ 1227 1228 SCHED_ASSERT_LOCKED(); 1229 1230 s = spllock(); 1231 SLOCK_LIST_LOCK(); 1232 for (alp = TAILQ_FIRST(&simplelock_list); alp != NULL; 1233 alp = TAILQ_NEXT(alp, list)) { 1234 if (alp == &sched_lock) 1235 continue; 1236 if (alp->lock_holder == cpu_id) { 1237 lock_printf("switching with held simple_lock %p " 1238 "CPU %lu %s:%d\n", 1239 alp, alp->lock_holder, alp->lock_file, 1240 alp->lock_line); 1241 SLOCK_DEBUGGER(); 1242 } 1243 } 1244 SLOCK_LIST_UNLOCK(); 1245 splx(s); 1246 } 1247 #endif /* LOCKDEBUG */ /* } */ 1248