1 /* $NetBSD: kern_lock.c,v 1.71 2003/02/19 22:34:42 pk Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * This code is derived from software contributed to The NetBSD Foundation 12 * by Ross Harvey. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. All advertising materials mentioning features or use of this software 23 * must display the following acknowledgement: 24 * This product includes software developed by the NetBSD 25 * Foundation, Inc. and its contributors. 26 * 4. Neither the name of The NetBSD Foundation nor the names of its 27 * contributors may be used to endorse or promote products derived 28 * from this software without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 32 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 33 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 34 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 37 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 38 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 40 * POSSIBILITY OF SUCH DAMAGE. 41 */ 42 43 /* 44 * Copyright (c) 1995 45 * The Regents of the University of California. All rights reserved. 46 * 47 * This code contains ideas from software contributed to Berkeley by 48 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 49 * System project at Carnegie-Mellon University. 50 * 51 * Redistribution and use in source and binary forms, with or without 52 * modification, are permitted provided that the following conditions 53 * are met: 54 * 1. Redistributions of source code must retain the above copyright 55 * notice, this list of conditions and the following disclaimer. 56 * 2. Redistributions in binary form must reproduce the above copyright 57 * notice, this list of conditions and the following disclaimer in the 58 * documentation and/or other materials provided with the distribution. 59 * 3. All advertising materials mentioning features or use of this software 60 * must display the following acknowledgement: 61 * This product includes software developed by the University of 62 * California, Berkeley and its contributors. 63 * 4. Neither the name of the University nor the names of its contributors 64 * may be used to endorse or promote products derived from this software 65 * without specific prior written permission. 66 * 67 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 70 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 71 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 72 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 73 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 74 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 75 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 76 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 77 * SUCH DAMAGE. 78 * 79 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 80 */ 81 82 #include <sys/cdefs.h> 83 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.71 2003/02/19 22:34:42 pk Exp $"); 84 85 #include "opt_multiprocessor.h" 86 #include "opt_lockdebug.h" 87 #include "opt_ddb.h" 88 89 #include <sys/param.h> 90 #include <sys/proc.h> 91 #include <sys/lock.h> 92 #include <sys/systm.h> 93 #include <machine/cpu.h> 94 95 #if defined(LOCKDEBUG) 96 #include <sys/syslog.h> 97 /* 98 * note that stdarg.h and the ansi style va_start macro is used for both 99 * ansi and traditional c compiles. 100 * XXX: this requires that stdarg.h define: va_alist and va_dcl 101 */ 102 #include <machine/stdarg.h> 103 104 void lock_printf(const char *fmt, ...) 105 __attribute__((__format__(__printf__,1,2))); 106 107 int lock_debug_syslog = 0; /* defaults to printf, but can be patched */ 108 109 #ifdef DDB 110 #include <ddb/ddbvar.h> 111 #include <machine/db_machdep.h> 112 #include <ddb/db_command.h> 113 #include <ddb/db_interface.h> 114 #endif 115 #endif 116 117 /* 118 * Locking primitives implementation. 119 * Locks provide shared/exclusive synchronization. 120 */ 121 122 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */ 123 #if defined(MULTIPROCESSOR) /* { */ 124 #define COUNT_CPU(cpu_id, x) \ 125 curcpu()->ci_spin_locks += (x) 126 #else 127 u_long spin_locks; 128 #define COUNT_CPU(cpu_id, x) spin_locks += (x) 129 #endif /* MULTIPROCESSOR */ /* } */ 130 131 #define COUNT(lkp, l, cpu_id, x) \ 132 do { \ 133 if ((lkp)->lk_flags & LK_SPIN) \ 134 COUNT_CPU((cpu_id), (x)); \ 135 else \ 136 (l)->l_locks += (x); \ 137 } while (/*CONSTCOND*/0) 138 #else 139 #define COUNT(lkp, p, cpu_id, x) 140 #define COUNT_CPU(cpu_id, x) 141 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */ 142 143 #ifndef SPINLOCK_SPIN_HOOK /* from <machine/lock.h> */ 144 #define SPINLOCK_SPIN_HOOK /* nothing */ 145 #endif 146 147 #define INTERLOCK_ACQUIRE(lkp, flags, s) \ 148 do { \ 149 if ((flags) & LK_SPIN) \ 150 s = spllock(); \ 151 simple_lock(&(lkp)->lk_interlock); \ 152 } while (/*CONSTCOND*/ 0) 153 154 #define INTERLOCK_RELEASE(lkp, flags, s) \ 155 do { \ 156 simple_unlock(&(lkp)->lk_interlock); \ 157 if ((flags) & LK_SPIN) \ 158 splx(s); \ 159 } while (/*CONSTCOND*/ 0) 160 161 #ifdef DDB /* { */ 162 #ifdef MULTIPROCESSOR 163 int simple_lock_debugger = 1; /* more serious on MP */ 164 #else 165 int simple_lock_debugger = 0; 166 #endif 167 #define SLOCK_DEBUGGER() if (simple_lock_debugger) Debugger() 168 #define SLOCK_TRACE() \ 169 db_stack_trace_print((db_expr_t)__builtin_frame_address(0), \ 170 TRUE, 65535, "", lock_printf); 171 #else 172 #define SLOCK_DEBUGGER() /* nothing */ 173 #define SLOCK_TRACE() /* nothing */ 174 #endif /* } */ 175 176 #if defined(LOCKDEBUG) 177 #if defined(DDB) 178 #define SPINLOCK_SPINCHECK_DEBUGGER Debugger() 179 #else 180 #define SPINLOCK_SPINCHECK_DEBUGGER /* nothing */ 181 #endif 182 183 #define SPINLOCK_SPINCHECK_DECL \ 184 /* 32-bits of count -- wrap constitutes a "spinout" */ \ 185 uint32_t __spinc = 0 186 187 #define SPINLOCK_SPINCHECK \ 188 do { \ 189 if (++__spinc == 0) { \ 190 lock_printf("LK_SPIN spinout, excl %d, share %d\n", \ 191 lkp->lk_exclusivecount, lkp->lk_sharecount); \ 192 if (lkp->lk_exclusivecount) \ 193 lock_printf("held by CPU %lu\n", \ 194 (u_long) lkp->lk_cpu); \ 195 if (lkp->lk_lock_file) \ 196 lock_printf("last locked at %s:%d\n", \ 197 lkp->lk_lock_file, lkp->lk_lock_line); \ 198 if (lkp->lk_unlock_file) \ 199 lock_printf("last unlocked at %s:%d\n", \ 200 lkp->lk_unlock_file, lkp->lk_unlock_line); \ 201 SLOCK_TRACE(); \ 202 SPINLOCK_SPINCHECK_DEBUGGER; \ 203 } \ 204 } while (/*CONSTCOND*/ 0) 205 #else 206 #define SPINLOCK_SPINCHECK_DECL /* nothing */ 207 #define SPINLOCK_SPINCHECK /* nothing */ 208 #endif /* LOCKDEBUG && DDB */ 209 210 /* 211 * Acquire a resource. 212 */ 213 #define ACQUIRE(lkp, error, extflags, drain, wanted) \ 214 if ((extflags) & LK_SPIN) { \ 215 int interlocked; \ 216 SPINLOCK_SPINCHECK_DECL; \ 217 \ 218 if ((drain) == 0) \ 219 (lkp)->lk_waitcount++; \ 220 for (interlocked = 1;;) { \ 221 SPINLOCK_SPINCHECK; \ 222 if (wanted) { \ 223 if (interlocked) { \ 224 INTERLOCK_RELEASE((lkp), \ 225 LK_SPIN, s); \ 226 interlocked = 0; \ 227 } \ 228 SPINLOCK_SPIN_HOOK; \ 229 } else if (interlocked) { \ 230 break; \ 231 } else { \ 232 INTERLOCK_ACQUIRE((lkp), LK_SPIN, s); \ 233 interlocked = 1; \ 234 } \ 235 } \ 236 if ((drain) == 0) \ 237 (lkp)->lk_waitcount--; \ 238 KASSERT((wanted) == 0); \ 239 error = 0; /* sanity */ \ 240 } else { \ 241 for (error = 0; wanted; ) { \ 242 if ((drain)) \ 243 (lkp)->lk_flags |= LK_WAITDRAIN; \ 244 else \ 245 (lkp)->lk_waitcount++; \ 246 /* XXX Cast away volatile. */ \ 247 error = ltsleep((drain) ? \ 248 (void *)&(lkp)->lk_flags : \ 249 (void *)(lkp), (lkp)->lk_prio, \ 250 (lkp)->lk_wmesg, (lkp)->lk_timo, \ 251 &(lkp)->lk_interlock); \ 252 if ((drain) == 0) \ 253 (lkp)->lk_waitcount--; \ 254 if (error) \ 255 break; \ 256 if ((extflags) & LK_SLEEPFAIL) { \ 257 error = ENOLCK; \ 258 break; \ 259 } \ 260 } \ 261 } 262 263 #define SETHOLDER(lkp, pid, lid, cpu_id) \ 264 do { \ 265 if ((lkp)->lk_flags & LK_SPIN) \ 266 (lkp)->lk_cpu = cpu_id; \ 267 else { \ 268 (lkp)->lk_lockholder = pid; \ 269 (lkp)->lk_locklwp = lid; \ 270 } \ 271 } while (/*CONSTCOND*/0) 272 273 #define WEHOLDIT(lkp, pid, lid, cpu_id) \ 274 (((lkp)->lk_flags & LK_SPIN) != 0 ? \ 275 ((lkp)->lk_cpu == (cpu_id)) : \ 276 ((lkp)->lk_lockholder == (pid) && (lkp)->lk_locklwp == (lid))) 277 278 #define WAKEUP_WAITER(lkp) \ 279 do { \ 280 if (((lkp)->lk_flags & LK_SPIN) == 0 && (lkp)->lk_waitcount) { \ 281 /* XXX Cast away volatile. */ \ 282 wakeup((void *)(lkp)); \ 283 } \ 284 } while (/*CONSTCOND*/0) 285 286 #if defined(LOCKDEBUG) /* { */ 287 #if defined(MULTIPROCESSOR) /* { */ 288 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER; 289 290 #define SPINLOCK_LIST_LOCK() \ 291 __cpu_simple_lock(&spinlock_list_slock.lock_data) 292 293 #define SPINLOCK_LIST_UNLOCK() \ 294 __cpu_simple_unlock(&spinlock_list_slock.lock_data) 295 #else 296 #define SPINLOCK_LIST_LOCK() /* nothing */ 297 298 #define SPINLOCK_LIST_UNLOCK() /* nothing */ 299 #endif /* MULTIPROCESSOR */ /* } */ 300 301 TAILQ_HEAD(, lock) spinlock_list = 302 TAILQ_HEAD_INITIALIZER(spinlock_list); 303 304 #define HAVEIT(lkp) \ 305 do { \ 306 if ((lkp)->lk_flags & LK_SPIN) { \ 307 int s = spllock(); \ 308 SPINLOCK_LIST_LOCK(); \ 309 /* XXX Cast away volatile. */ \ 310 TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp), \ 311 lk_list); \ 312 SPINLOCK_LIST_UNLOCK(); \ 313 splx(s); \ 314 } \ 315 } while (/*CONSTCOND*/0) 316 317 #define DONTHAVEIT(lkp) \ 318 do { \ 319 if ((lkp)->lk_flags & LK_SPIN) { \ 320 int s = spllock(); \ 321 SPINLOCK_LIST_LOCK(); \ 322 /* XXX Cast away volatile. */ \ 323 TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp), \ 324 lk_list); \ 325 SPINLOCK_LIST_UNLOCK(); \ 326 splx(s); \ 327 } \ 328 } while (/*CONSTCOND*/0) 329 #else 330 #define HAVEIT(lkp) /* nothing */ 331 332 #define DONTHAVEIT(lkp) /* nothing */ 333 #endif /* LOCKDEBUG */ /* } */ 334 335 #if defined(LOCKDEBUG) 336 /* 337 * Lock debug printing routine; can be configured to print to console 338 * or log to syslog. 339 */ 340 void 341 lock_printf(const char *fmt, ...) 342 { 343 char b[150]; 344 va_list ap; 345 346 va_start(ap, fmt); 347 if (lock_debug_syslog) 348 vlog(LOG_DEBUG, fmt, ap); 349 else { 350 vsnprintf(b, sizeof(b), fmt, ap); 351 printf_nolog("%s", b); 352 } 353 va_end(ap); 354 } 355 #endif /* LOCKDEBUG */ 356 357 /* 358 * Initialize a lock; required before use. 359 */ 360 void 361 lockinit(struct lock *lkp, int prio, const char *wmesg, int timo, int flags) 362 { 363 364 memset(lkp, 0, sizeof(struct lock)); 365 simple_lock_init(&lkp->lk_interlock); 366 lkp->lk_flags = flags & LK_EXTFLG_MASK; 367 if (flags & LK_SPIN) 368 lkp->lk_cpu = LK_NOCPU; 369 else { 370 lkp->lk_lockholder = LK_NOPROC; 371 lkp->lk_prio = prio; 372 lkp->lk_timo = timo; 373 } 374 lkp->lk_wmesg = wmesg; /* just a name for spin locks */ 375 #if defined(LOCKDEBUG) 376 lkp->lk_lock_file = NULL; 377 lkp->lk_unlock_file = NULL; 378 #endif 379 } 380 381 /* 382 * Determine the status of a lock. 383 */ 384 int 385 lockstatus(struct lock *lkp) 386 { 387 int s = 0, lock_type = 0; 388 389 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s); 390 if (lkp->lk_exclusivecount != 0) 391 lock_type = LK_EXCLUSIVE; 392 else if (lkp->lk_sharecount != 0) 393 lock_type = LK_SHARED; 394 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 395 return (lock_type); 396 } 397 398 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) 399 /* 400 * Make sure no spin locks are held by a CPU that is about 401 * to context switch. 402 */ 403 void 404 spinlock_switchcheck(void) 405 { 406 u_long cnt; 407 int s; 408 409 s = spllock(); 410 #if defined(MULTIPROCESSOR) 411 cnt = curcpu()->ci_spin_locks; 412 #else 413 cnt = spin_locks; 414 #endif 415 splx(s); 416 417 if (cnt != 0) 418 panic("spinlock_switchcheck: CPU %lu has %lu spin locks", 419 (u_long) cpu_number(), cnt); 420 } 421 #endif /* LOCKDEBUG || DIAGNOSTIC */ 422 423 /* 424 * Locks and IPLs (interrupt priority levels): 425 * 426 * Locks which may be taken from interrupt context must be handled 427 * very carefully; you must spl to the highest IPL where the lock 428 * is needed before acquiring the lock. 429 * 430 * It is also important to avoid deadlock, since certain (very high 431 * priority) interrupts are often needed to keep the system as a whole 432 * from deadlocking, and must not be blocked while you are spinning 433 * waiting for a lower-priority lock. 434 * 435 * In addition, the lock-debugging hooks themselves need to use locks! 436 * 437 * A raw __cpu_simple_lock may be used from interrupts are long as it 438 * is acquired and held at a single IPL. 439 * 440 * A simple_lock (which is a __cpu_simple_lock wrapped with some 441 * debugging hooks) may be used at or below spllock(), which is 442 * typically at or just below splhigh() (i.e. blocks everything 443 * but certain machine-dependent extremely high priority interrupts). 444 * 445 * spinlockmgr spinlocks should be used at or below splsched(). 446 * 447 * Some platforms may have interrupts of higher priority than splsched(), 448 * including hard serial interrupts, inter-processor interrupts, and 449 * kernel debugger traps. 450 */ 451 452 /* 453 * XXX XXX kludge around another kludge.. 454 * 455 * vfs_shutdown() may be called from interrupt context, either as a result 456 * of a panic, or from the debugger. It proceeds to call 457 * sys_sync(&proc0, ...), pretending its running on behalf of proc0 458 * 459 * We would like to make an attempt to sync the filesystems in this case, so 460 * if this happens, we treat attempts to acquire locks specially. 461 * All locks are acquired on behalf of proc0. 462 * 463 * If we've already paniced, we don't block waiting for locks, but 464 * just barge right ahead since we're already going down in flames. 465 */ 466 467 /* 468 * Set, change, or release a lock. 469 * 470 * Shared requests increment the shared count. Exclusive requests set the 471 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 472 * accepted shared locks and shared-to-exclusive upgrades to go away. 473 */ 474 int 475 #if defined(LOCKDEBUG) 476 _lockmgr(__volatile struct lock *lkp, u_int flags, 477 struct simplelock *interlkp, const char *file, int line) 478 #else 479 lockmgr(__volatile struct lock *lkp, u_int flags, 480 struct simplelock *interlkp) 481 #endif 482 { 483 int error; 484 pid_t pid; 485 lwpid_t lid; 486 int extflags; 487 cpuid_t cpu_id; 488 struct lwp *l = curlwp; 489 int lock_shutdown_noblock = 0; 490 int s = 0; 491 492 error = 0; 493 494 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s); 495 if (flags & LK_INTERLOCK) 496 simple_unlock(interlkp); 497 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 498 499 #ifdef DIAGNOSTIC /* { */ 500 /* 501 * Don't allow spins on sleep locks and don't allow sleeps 502 * on spin locks. 503 */ 504 if ((flags ^ lkp->lk_flags) & LK_SPIN) 505 panic("lockmgr: sleep/spin mismatch"); 506 #endif /* } */ 507 508 if (extflags & LK_SPIN) { 509 pid = LK_KERNPROC; 510 lid = 0; 511 } else { 512 if (l == NULL) { 513 if (!doing_shutdown) { 514 panic("lockmgr: no context"); 515 } else { 516 l = &lwp0; 517 if (panicstr && (!(flags & LK_NOWAIT))) { 518 flags |= LK_NOWAIT; 519 lock_shutdown_noblock = 1; 520 } 521 } 522 } 523 lid = l->l_lid; 524 pid = l->l_proc->p_pid; 525 } 526 cpu_id = cpu_number(); 527 528 /* 529 * Once a lock has drained, the LK_DRAINING flag is set and an 530 * exclusive lock is returned. The only valid operation thereafter 531 * is a single release of that exclusive lock. This final release 532 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any 533 * further requests of any sort will result in a panic. The bits 534 * selected for these two flags are chosen so that they will be set 535 * in memory that is freed (freed memory is filled with 0xdeadbeef). 536 * The final release is permitted to give a new lease on life to 537 * the lock by specifying LK_REENABLE. 538 */ 539 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) { 540 #ifdef DIAGNOSTIC /* { */ 541 if (lkp->lk_flags & LK_DRAINED) 542 panic("lockmgr: using decommissioned lock"); 543 if ((flags & LK_TYPE_MASK) != LK_RELEASE || 544 WEHOLDIT(lkp, pid, lid, cpu_id) == 0) 545 panic("lockmgr: non-release on draining lock: %d", 546 flags & LK_TYPE_MASK); 547 #endif /* DIAGNOSTIC */ /* } */ 548 lkp->lk_flags &= ~LK_DRAINING; 549 if ((flags & LK_REENABLE) == 0) 550 lkp->lk_flags |= LK_DRAINED; 551 } 552 553 switch (flags & LK_TYPE_MASK) { 554 555 case LK_SHARED: 556 if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0) { 557 /* 558 * If just polling, check to see if we will block. 559 */ 560 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 561 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) { 562 error = EBUSY; 563 break; 564 } 565 /* 566 * Wait for exclusive locks and upgrades to clear. 567 */ 568 ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags & 569 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)); 570 if (error) 571 break; 572 lkp->lk_sharecount++; 573 COUNT(lkp, l, cpu_id, 1); 574 break; 575 } 576 /* 577 * We hold an exclusive lock, so downgrade it to shared. 578 * An alternative would be to fail with EDEADLK. 579 */ 580 lkp->lk_sharecount++; 581 COUNT(lkp, l, cpu_id, 1); 582 /* fall into downgrade */ 583 584 case LK_DOWNGRADE: 585 if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0 || 586 lkp->lk_exclusivecount == 0) 587 panic("lockmgr: not holding exclusive lock"); 588 lkp->lk_sharecount += lkp->lk_exclusivecount; 589 lkp->lk_exclusivecount = 0; 590 lkp->lk_recurselevel = 0; 591 lkp->lk_flags &= ~LK_HAVE_EXCL; 592 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU); 593 #if defined(LOCKDEBUG) 594 lkp->lk_unlock_file = file; 595 lkp->lk_unlock_line = line; 596 #endif 597 DONTHAVEIT(lkp); 598 WAKEUP_WAITER(lkp); 599 break; 600 601 case LK_EXCLUPGRADE: 602 /* 603 * If another process is ahead of us to get an upgrade, 604 * then we want to fail rather than have an intervening 605 * exclusive access. 606 */ 607 if (lkp->lk_flags & LK_WANT_UPGRADE) { 608 lkp->lk_sharecount--; 609 COUNT(lkp, l, cpu_id, -1); 610 error = EBUSY; 611 break; 612 } 613 /* fall into normal upgrade */ 614 615 case LK_UPGRADE: 616 /* 617 * Upgrade a shared lock to an exclusive one. If another 618 * shared lock has already requested an upgrade to an 619 * exclusive lock, our shared lock is released and an 620 * exclusive lock is requested (which will be granted 621 * after the upgrade). If we return an error, the file 622 * will always be unlocked. 623 */ 624 if (WEHOLDIT(lkp, pid, lid, cpu_id) || lkp->lk_sharecount <= 0) 625 panic("lockmgr: upgrade exclusive lock"); 626 lkp->lk_sharecount--; 627 COUNT(lkp, l, cpu_id, -1); 628 /* 629 * If we are just polling, check to see if we will block. 630 */ 631 if ((extflags & LK_NOWAIT) && 632 ((lkp->lk_flags & LK_WANT_UPGRADE) || 633 lkp->lk_sharecount > 1)) { 634 error = EBUSY; 635 break; 636 } 637 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 638 /* 639 * We are first shared lock to request an upgrade, so 640 * request upgrade and wait for the shared count to 641 * drop to zero, then take exclusive lock. 642 */ 643 lkp->lk_flags |= LK_WANT_UPGRADE; 644 ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount); 645 lkp->lk_flags &= ~LK_WANT_UPGRADE; 646 if (error) 647 break; 648 lkp->lk_flags |= LK_HAVE_EXCL; 649 SETHOLDER(lkp, pid, lid, cpu_id); 650 #if defined(LOCKDEBUG) 651 lkp->lk_lock_file = file; 652 lkp->lk_lock_line = line; 653 #endif 654 HAVEIT(lkp); 655 if (lkp->lk_exclusivecount != 0) 656 panic("lockmgr: non-zero exclusive count"); 657 lkp->lk_exclusivecount = 1; 658 if (extflags & LK_SETRECURSE) 659 lkp->lk_recurselevel = 1; 660 COUNT(lkp, l, cpu_id, 1); 661 break; 662 } 663 /* 664 * Someone else has requested upgrade. Release our shared 665 * lock, awaken upgrade requestor if we are the last shared 666 * lock, then request an exclusive lock. 667 */ 668 if (lkp->lk_sharecount == 0) 669 WAKEUP_WAITER(lkp); 670 /* fall into exclusive request */ 671 672 case LK_EXCLUSIVE: 673 if (WEHOLDIT(lkp, pid, lid, cpu_id)) { 674 /* 675 * Recursive lock. 676 */ 677 if ((extflags & LK_CANRECURSE) == 0 && 678 lkp->lk_recurselevel == 0) { 679 if (extflags & LK_RECURSEFAIL) { 680 error = EDEADLK; 681 break; 682 } else 683 panic("lockmgr: locking against myself"); 684 } 685 lkp->lk_exclusivecount++; 686 if (extflags & LK_SETRECURSE && 687 lkp->lk_recurselevel == 0) 688 lkp->lk_recurselevel = lkp->lk_exclusivecount; 689 COUNT(lkp, l, cpu_id, 1); 690 break; 691 } 692 /* 693 * If we are just polling, check to see if we will sleep. 694 */ 695 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 696 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 697 lkp->lk_sharecount != 0)) { 698 error = EBUSY; 699 break; 700 } 701 /* 702 * Try to acquire the want_exclusive flag. 703 */ 704 ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags & 705 (LK_HAVE_EXCL | LK_WANT_EXCL)); 706 if (error) 707 break; 708 lkp->lk_flags |= LK_WANT_EXCL; 709 /* 710 * Wait for shared locks and upgrades to finish. 711 */ 712 ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0 || 713 (lkp->lk_flags & LK_WANT_UPGRADE)); 714 lkp->lk_flags &= ~LK_WANT_EXCL; 715 if (error) 716 break; 717 lkp->lk_flags |= LK_HAVE_EXCL; 718 SETHOLDER(lkp, pid, lid, cpu_id); 719 #if defined(LOCKDEBUG) 720 lkp->lk_lock_file = file; 721 lkp->lk_lock_line = line; 722 #endif 723 HAVEIT(lkp); 724 if (lkp->lk_exclusivecount != 0) 725 panic("lockmgr: non-zero exclusive count"); 726 lkp->lk_exclusivecount = 1; 727 if (extflags & LK_SETRECURSE) 728 lkp->lk_recurselevel = 1; 729 COUNT(lkp, l, cpu_id, 1); 730 break; 731 732 case LK_RELEASE: 733 if (lkp->lk_exclusivecount != 0) { 734 if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0) { 735 if (lkp->lk_flags & LK_SPIN) { 736 panic("lockmgr: processor %lu, not " 737 "exclusive lock holder %lu " 738 "unlocking", cpu_id, lkp->lk_cpu); 739 } else { 740 panic("lockmgr: pid %d, not " 741 "exclusive lock holder %d " 742 "unlocking", pid, 743 lkp->lk_lockholder); 744 } 745 } 746 if (lkp->lk_exclusivecount == lkp->lk_recurselevel) 747 lkp->lk_recurselevel = 0; 748 lkp->lk_exclusivecount--; 749 COUNT(lkp, l, cpu_id, -1); 750 if (lkp->lk_exclusivecount == 0) { 751 lkp->lk_flags &= ~LK_HAVE_EXCL; 752 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU); 753 #if defined(LOCKDEBUG) 754 lkp->lk_unlock_file = file; 755 lkp->lk_unlock_line = line; 756 #endif 757 DONTHAVEIT(lkp); 758 } 759 } else if (lkp->lk_sharecount != 0) { 760 lkp->lk_sharecount--; 761 COUNT(lkp, l, cpu_id, -1); 762 } 763 #ifdef DIAGNOSTIC 764 else 765 panic("lockmgr: release of unlocked lock!"); 766 #endif 767 WAKEUP_WAITER(lkp); 768 break; 769 770 case LK_DRAIN: 771 /* 772 * Check that we do not already hold the lock, as it can 773 * never drain if we do. Unfortunately, we have no way to 774 * check for holding a shared lock, but at least we can 775 * check for an exclusive one. 776 */ 777 if (WEHOLDIT(lkp, pid, lid, cpu_id)) 778 panic("lockmgr: draining against myself"); 779 /* 780 * If we are just polling, check to see if we will sleep. 781 */ 782 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 783 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 784 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) { 785 error = EBUSY; 786 break; 787 } 788 ACQUIRE(lkp, error, extflags, 1, 789 ((lkp->lk_flags & 790 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 791 lkp->lk_sharecount != 0 || 792 lkp->lk_waitcount != 0)); 793 if (error) 794 break; 795 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 796 SETHOLDER(lkp, pid, lid, cpu_id); 797 #if defined(LOCKDEBUG) 798 lkp->lk_lock_file = file; 799 lkp->lk_lock_line = line; 800 #endif 801 HAVEIT(lkp); 802 lkp->lk_exclusivecount = 1; 803 /* XXX unlikely that we'd want this */ 804 if (extflags & LK_SETRECURSE) 805 lkp->lk_recurselevel = 1; 806 COUNT(lkp, l, cpu_id, 1); 807 break; 808 809 default: 810 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 811 panic("lockmgr: unknown locktype request %d", 812 flags & LK_TYPE_MASK); 813 /* NOTREACHED */ 814 } 815 if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN && 816 ((lkp->lk_flags & 817 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 && 818 lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) { 819 lkp->lk_flags &= ~LK_WAITDRAIN; 820 wakeup((void *)&lkp->lk_flags); 821 } 822 /* 823 * Note that this panic will be a recursive panic, since 824 * we only set lock_shutdown_noblock above if panicstr != NULL. 825 */ 826 if (error && lock_shutdown_noblock) 827 panic("lockmgr: deadlock (see previous panic)"); 828 829 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 830 return (error); 831 } 832 833 /* 834 * For a recursive spinlock held one or more times by the current CPU, 835 * release all N locks, and return N. 836 * Intended for use in mi_switch() shortly before context switching. 837 */ 838 839 int 840 #if defined(LOCKDEBUG) 841 _spinlock_release_all(__volatile struct lock *lkp, const char *file, int line) 842 #else 843 spinlock_release_all(__volatile struct lock *lkp) 844 #endif 845 { 846 int s, count; 847 cpuid_t cpu_id; 848 849 KASSERT(lkp->lk_flags & LK_SPIN); 850 851 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s); 852 853 cpu_id = cpu_number(); 854 count = lkp->lk_exclusivecount; 855 856 if (count != 0) { 857 #ifdef DIAGNOSTIC 858 if (WEHOLDIT(lkp, 0, 0, cpu_id) == 0) { 859 panic("spinlock_release_all: processor %lu, not " 860 "exclusive lock holder %lu " 861 "unlocking", (long)cpu_id, lkp->lk_cpu); 862 } 863 #endif 864 lkp->lk_recurselevel = 0; 865 lkp->lk_exclusivecount = 0; 866 COUNT_CPU(cpu_id, -count); 867 lkp->lk_flags &= ~LK_HAVE_EXCL; 868 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU); 869 #if defined(LOCKDEBUG) 870 lkp->lk_unlock_file = file; 871 lkp->lk_unlock_line = line; 872 #endif 873 DONTHAVEIT(lkp); 874 } 875 #ifdef DIAGNOSTIC 876 else if (lkp->lk_sharecount != 0) 877 panic("spinlock_release_all: release of shared lock!"); 878 else 879 panic("spinlock_release_all: release of unlocked lock!"); 880 #endif 881 INTERLOCK_RELEASE(lkp, LK_SPIN, s); 882 883 return (count); 884 } 885 886 /* 887 * For a recursive spinlock held one or more times by the current CPU, 888 * release all N locks, and return N. 889 * Intended for use in mi_switch() right after resuming execution. 890 */ 891 892 void 893 #if defined(LOCKDEBUG) 894 _spinlock_acquire_count(__volatile struct lock *lkp, int count, 895 const char *file, int line) 896 #else 897 spinlock_acquire_count(__volatile struct lock *lkp, int count) 898 #endif 899 { 900 int s, error; 901 cpuid_t cpu_id; 902 903 KASSERT(lkp->lk_flags & LK_SPIN); 904 905 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s); 906 907 cpu_id = cpu_number(); 908 909 #ifdef DIAGNOSTIC 910 if (WEHOLDIT(lkp, LK_NOPROC, 0, cpu_id)) 911 panic("spinlock_acquire_count: processor %lu already holds lock", (long)cpu_id); 912 #endif 913 /* 914 * Try to acquire the want_exclusive flag. 915 */ 916 ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_flags & 917 (LK_HAVE_EXCL | LK_WANT_EXCL)); 918 lkp->lk_flags |= LK_WANT_EXCL; 919 /* 920 * Wait for shared locks and upgrades to finish. 921 */ 922 ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_sharecount != 0 || 923 (lkp->lk_flags & LK_WANT_UPGRADE)); 924 lkp->lk_flags &= ~LK_WANT_EXCL; 925 lkp->lk_flags |= LK_HAVE_EXCL; 926 SETHOLDER(lkp, LK_NOPROC, 0, cpu_id); 927 #if defined(LOCKDEBUG) 928 lkp->lk_lock_file = file; 929 lkp->lk_lock_line = line; 930 #endif 931 HAVEIT(lkp); 932 if (lkp->lk_exclusivecount != 0) 933 panic("lockmgr: non-zero exclusive count"); 934 lkp->lk_exclusivecount = count; 935 lkp->lk_recurselevel = 1; 936 COUNT_CPU(cpu_id, count); 937 938 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 939 } 940 941 942 943 /* 944 * Print out information about state of a lock. Used by VOP_PRINT 945 * routines to display ststus about contained locks. 946 */ 947 void 948 lockmgr_printinfo(__volatile struct lock *lkp) 949 { 950 951 if (lkp->lk_sharecount) 952 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 953 lkp->lk_sharecount); 954 else if (lkp->lk_flags & LK_HAVE_EXCL) { 955 printf(" lock type %s: EXCL (count %d) by ", 956 lkp->lk_wmesg, lkp->lk_exclusivecount); 957 if (lkp->lk_flags & LK_SPIN) 958 printf("processor %lu", lkp->lk_cpu); 959 else 960 printf("pid %d.%d", lkp->lk_lockholder, 961 lkp->lk_locklwp); 962 } else 963 printf(" not locked"); 964 if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0) 965 printf(" with %d pending", lkp->lk_waitcount); 966 } 967 968 #if defined(LOCKDEBUG) /* { */ 969 TAILQ_HEAD(, simplelock) simplelock_list = 970 TAILQ_HEAD_INITIALIZER(simplelock_list); 971 972 #if defined(MULTIPROCESSOR) /* { */ 973 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER; 974 975 #define SLOCK_LIST_LOCK() \ 976 __cpu_simple_lock(&simplelock_list_slock.lock_data) 977 978 #define SLOCK_LIST_UNLOCK() \ 979 __cpu_simple_unlock(&simplelock_list_slock.lock_data) 980 981 #define SLOCK_COUNT(x) \ 982 curcpu()->ci_simple_locks += (x) 983 #else 984 u_long simple_locks; 985 986 #define SLOCK_LIST_LOCK() /* nothing */ 987 988 #define SLOCK_LIST_UNLOCK() /* nothing */ 989 990 #define SLOCK_COUNT(x) simple_locks += (x) 991 #endif /* MULTIPROCESSOR */ /* } */ 992 993 #ifdef MULTIPROCESSOR 994 #define SLOCK_MP() lock_printf("on cpu %ld\n", \ 995 (u_long) cpu_number()) 996 #else 997 #define SLOCK_MP() /* nothing */ 998 #endif 999 1000 #define SLOCK_WHERE(str, alp, id, l) \ 1001 do { \ 1002 lock_printf("\n"); \ 1003 lock_printf(str); \ 1004 lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \ 1005 SLOCK_MP(); \ 1006 if ((alp)->lock_file != NULL) \ 1007 lock_printf("last locked: %s:%d\n", (alp)->lock_file, \ 1008 (alp)->lock_line); \ 1009 if ((alp)->unlock_file != NULL) \ 1010 lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \ 1011 (alp)->unlock_line); \ 1012 SLOCK_TRACE() \ 1013 SLOCK_DEBUGGER(); \ 1014 } while (/*CONSTCOND*/0) 1015 1016 /* 1017 * Simple lock functions so that the debugger can see from whence 1018 * they are being called. 1019 */ 1020 void 1021 simple_lock_init(struct simplelock *alp) 1022 { 1023 1024 #if defined(MULTIPROCESSOR) /* { */ 1025 __cpu_simple_lock_init(&alp->lock_data); 1026 #else 1027 alp->lock_data = __SIMPLELOCK_UNLOCKED; 1028 #endif /* } */ 1029 alp->lock_file = NULL; 1030 alp->lock_line = 0; 1031 alp->unlock_file = NULL; 1032 alp->unlock_line = 0; 1033 alp->lock_holder = LK_NOCPU; 1034 } 1035 1036 void 1037 _simple_lock(__volatile struct simplelock *alp, const char *id, int l) 1038 { 1039 cpuid_t cpu_id = cpu_number(); 1040 int s; 1041 1042 s = spllock(); 1043 1044 /* 1045 * MULTIPROCESSOR case: This is `safe' since if it's not us, we 1046 * don't take any action, and just fall into the normal spin case. 1047 */ 1048 if (alp->lock_data == __SIMPLELOCK_LOCKED) { 1049 #if defined(MULTIPROCESSOR) /* { */ 1050 if (alp->lock_holder == cpu_id) { 1051 SLOCK_WHERE("simple_lock: locking against myself\n", 1052 alp, id, l); 1053 goto out; 1054 } 1055 #else 1056 SLOCK_WHERE("simple_lock: lock held\n", alp, id, l); 1057 goto out; 1058 #endif /* MULTIPROCESSOR */ /* } */ 1059 } 1060 1061 #if defined(MULTIPROCESSOR) /* { */ 1062 /* Acquire the lock before modifying any fields. */ 1063 splx(s); 1064 __cpu_simple_lock(&alp->lock_data); 1065 s = spllock(); 1066 #else 1067 alp->lock_data = __SIMPLELOCK_LOCKED; 1068 #endif /* } */ 1069 1070 if (alp->lock_holder != LK_NOCPU) { 1071 SLOCK_WHERE("simple_lock: uninitialized lock\n", 1072 alp, id, l); 1073 } 1074 alp->lock_file = id; 1075 alp->lock_line = l; 1076 alp->lock_holder = cpu_id; 1077 1078 SLOCK_LIST_LOCK(); 1079 /* XXX Cast away volatile */ 1080 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list); 1081 SLOCK_LIST_UNLOCK(); 1082 1083 SLOCK_COUNT(1); 1084 1085 out: 1086 splx(s); 1087 } 1088 1089 int 1090 _simple_lock_held(__volatile struct simplelock *alp) 1091 { 1092 #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC) 1093 cpuid_t cpu_id = cpu_number(); 1094 #endif 1095 int s, locked = 0; 1096 1097 s = spllock(); 1098 1099 #if defined(MULTIPROCESSOR) 1100 if (__cpu_simple_lock_try(&alp->lock_data) == 0) 1101 locked = (alp->lock_holder == cpu_id); 1102 else 1103 __cpu_simple_unlock(&alp->lock_data); 1104 #else 1105 if (alp->lock_data == __SIMPLELOCK_LOCKED) { 1106 locked = 1; 1107 KASSERT(alp->lock_holder == cpu_id); 1108 } 1109 #endif 1110 1111 splx(s); 1112 1113 return (locked); 1114 } 1115 1116 int 1117 _simple_lock_try(__volatile struct simplelock *alp, const char *id, int l) 1118 { 1119 cpuid_t cpu_id = cpu_number(); 1120 int s, rv = 0; 1121 1122 s = spllock(); 1123 1124 /* 1125 * MULTIPROCESSOR case: This is `safe' since if it's not us, we 1126 * don't take any action. 1127 */ 1128 #if defined(MULTIPROCESSOR) /* { */ 1129 if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) { 1130 if (alp->lock_holder == cpu_id) 1131 SLOCK_WHERE("simple_lock_try: locking against myself\n", 1132 alp, id, l); 1133 goto out; 1134 } 1135 #else 1136 if (alp->lock_data == __SIMPLELOCK_LOCKED) { 1137 SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l); 1138 goto out; 1139 } 1140 alp->lock_data = __SIMPLELOCK_LOCKED; 1141 #endif /* MULTIPROCESSOR */ /* } */ 1142 1143 /* 1144 * At this point, we have acquired the lock. 1145 */ 1146 1147 rv = 1; 1148 1149 alp->lock_file = id; 1150 alp->lock_line = l; 1151 alp->lock_holder = cpu_id; 1152 1153 SLOCK_LIST_LOCK(); 1154 /* XXX Cast away volatile. */ 1155 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list); 1156 SLOCK_LIST_UNLOCK(); 1157 1158 SLOCK_COUNT(1); 1159 1160 out: 1161 splx(s); 1162 return (rv); 1163 } 1164 1165 void 1166 _simple_unlock(__volatile struct simplelock *alp, const char *id, int l) 1167 { 1168 int s; 1169 1170 s = spllock(); 1171 1172 /* 1173 * MULTIPROCESSOR case: This is `safe' because we think we hold 1174 * the lock, and if we don't, we don't take any action. 1175 */ 1176 if (alp->lock_data == __SIMPLELOCK_UNLOCKED) { 1177 SLOCK_WHERE("simple_unlock: lock not held\n", 1178 alp, id, l); 1179 goto out; 1180 } 1181 1182 SLOCK_LIST_LOCK(); 1183 TAILQ_REMOVE(&simplelock_list, alp, list); 1184 SLOCK_LIST_UNLOCK(); 1185 1186 SLOCK_COUNT(-1); 1187 1188 alp->list.tqe_next = NULL; /* sanity */ 1189 alp->list.tqe_prev = NULL; /* sanity */ 1190 1191 alp->unlock_file = id; 1192 alp->unlock_line = l; 1193 1194 #if defined(MULTIPROCESSOR) /* { */ 1195 alp->lock_holder = LK_NOCPU; 1196 /* Now that we've modified all fields, release the lock. */ 1197 __cpu_simple_unlock(&alp->lock_data); 1198 #else 1199 alp->lock_data = __SIMPLELOCK_UNLOCKED; 1200 KASSERT(alp->lock_holder == cpu_number()); 1201 alp->lock_holder = LK_NOCPU; 1202 #endif /* } */ 1203 1204 out: 1205 splx(s); 1206 } 1207 1208 void 1209 simple_lock_dump(void) 1210 { 1211 struct simplelock *alp; 1212 int s; 1213 1214 s = spllock(); 1215 SLOCK_LIST_LOCK(); 1216 lock_printf("all simple locks:\n"); 1217 TAILQ_FOREACH(alp, &simplelock_list, list) { 1218 lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder, 1219 alp->lock_file, alp->lock_line); 1220 } 1221 SLOCK_LIST_UNLOCK(); 1222 splx(s); 1223 } 1224 1225 void 1226 simple_lock_freecheck(void *start, void *end) 1227 { 1228 struct simplelock *alp; 1229 int s; 1230 1231 s = spllock(); 1232 SLOCK_LIST_LOCK(); 1233 TAILQ_FOREACH(alp, &simplelock_list, list) { 1234 if ((void *)alp >= start && (void *)alp < end) { 1235 lock_printf("freeing simple_lock %p CPU %lu %s:%d\n", 1236 alp, alp->lock_holder, alp->lock_file, 1237 alp->lock_line); 1238 SLOCK_DEBUGGER(); 1239 } 1240 } 1241 SLOCK_LIST_UNLOCK(); 1242 splx(s); 1243 } 1244 1245 /* 1246 * We must be holding exactly one lock: the sched_lock. 1247 */ 1248 1249 void 1250 simple_lock_switchcheck(void) 1251 { 1252 1253 simple_lock_only_held(&sched_lock, "switching"); 1254 } 1255 1256 void 1257 simple_lock_only_held(volatile struct simplelock *lp, const char *where) 1258 { 1259 struct simplelock *alp; 1260 cpuid_t cpu_id = cpu_number(); 1261 int s; 1262 1263 if (lp) { 1264 LOCK_ASSERT(simple_lock_held(lp)); 1265 } 1266 s = spllock(); 1267 SLOCK_LIST_LOCK(); 1268 TAILQ_FOREACH(alp, &simplelock_list, list) { 1269 if (alp == lp) 1270 continue; 1271 if (alp->lock_holder == cpu_id) 1272 break; 1273 } 1274 SLOCK_LIST_UNLOCK(); 1275 splx(s); 1276 1277 if (alp != NULL) { 1278 lock_printf("\n%s with held simple_lock %p " 1279 "CPU %lu %s:%d\n", 1280 where, alp, alp->lock_holder, alp->lock_file, 1281 alp->lock_line); 1282 SLOCK_TRACE(); 1283 SLOCK_DEBUGGER(); 1284 } 1285 } 1286 #endif /* LOCKDEBUG */ /* } */ 1287 1288 #if defined(MULTIPROCESSOR) 1289 /* 1290 * Functions for manipulating the kernel_lock. We put them here 1291 * so that they show up in profiles. 1292 */ 1293 1294 struct lock kernel_lock; 1295 1296 void 1297 _kernel_lock_init(void) 1298 { 1299 1300 spinlockinit(&kernel_lock, "klock", 0); 1301 } 1302 1303 /* 1304 * Acquire/release the kernel lock. Intended for use in the scheduler 1305 * and the lower half of the kernel. 1306 */ 1307 void 1308 _kernel_lock(int flag) 1309 { 1310 1311 SCHED_ASSERT_UNLOCKED(); 1312 spinlockmgr(&kernel_lock, flag, 0); 1313 } 1314 1315 void 1316 _kernel_unlock(void) 1317 { 1318 1319 spinlockmgr(&kernel_lock, LK_RELEASE, 0); 1320 } 1321 1322 /* 1323 * Acquire/release the kernel_lock on behalf of a process. Intended for 1324 * use in the top half of the kernel. 1325 */ 1326 void 1327 _kernel_proc_lock(struct lwp *l) 1328 { 1329 1330 SCHED_ASSERT_UNLOCKED(); 1331 spinlockmgr(&kernel_lock, LK_EXCLUSIVE, 0); 1332 l->l_flag |= L_BIGLOCK; 1333 } 1334 1335 void 1336 _kernel_proc_unlock(struct lwp *l) 1337 { 1338 1339 l->l_flag &= ~L_BIGLOCK; 1340 spinlockmgr(&kernel_lock, LK_RELEASE, 0); 1341 } 1342 #endif /* MULTIPROCESSOR */ 1343