1 /* $NetBSD: kern_lock.c,v 1.85 2004/10/26 00:14:46 yamt Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * This code is derived from software contributed to The NetBSD Foundation 12 * by Ross Harvey. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. All advertising materials mentioning features or use of this software 23 * must display the following acknowledgement: 24 * This product includes software developed by the NetBSD 25 * Foundation, Inc. and its contributors. 26 * 4. Neither the name of The NetBSD Foundation nor the names of its 27 * contributors may be used to endorse or promote products derived 28 * from this software without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 32 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 33 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 34 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 37 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 38 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 40 * POSSIBILITY OF SUCH DAMAGE. 41 */ 42 43 /* 44 * Copyright (c) 1995 45 * The Regents of the University of California. All rights reserved. 46 * 47 * This code contains ideas from software contributed to Berkeley by 48 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 49 * System project at Carnegie-Mellon University. 50 * 51 * Redistribution and use in source and binary forms, with or without 52 * modification, are permitted provided that the following conditions 53 * are met: 54 * 1. Redistributions of source code must retain the above copyright 55 * notice, this list of conditions and the following disclaimer. 56 * 2. Redistributions in binary form must reproduce the above copyright 57 * notice, this list of conditions and the following disclaimer in the 58 * documentation and/or other materials provided with the distribution. 59 * 3. Neither the name of the University nor the names of its contributors 60 * may be used to endorse or promote products derived from this software 61 * without specific prior written permission. 62 * 63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 73 * SUCH DAMAGE. 74 * 75 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.85 2004/10/26 00:14:46 yamt Exp $"); 80 81 #include "opt_multiprocessor.h" 82 #include "opt_lockdebug.h" 83 #include "opt_ddb.h" 84 85 #include <sys/param.h> 86 #include <sys/proc.h> 87 #include <sys/lock.h> 88 #include <sys/systm.h> 89 #include <machine/cpu.h> 90 91 #if defined(LOCKDEBUG) 92 #include <sys/syslog.h> 93 /* 94 * note that stdarg.h and the ansi style va_start macro is used for both 95 * ansi and traditional c compiles. 96 * XXX: this requires that stdarg.h define: va_alist and va_dcl 97 */ 98 #include <machine/stdarg.h> 99 100 void lock_printf(const char *fmt, ...) 101 __attribute__((__format__(__printf__,1,2))); 102 103 static int acquire(__volatile struct lock **, int *, int, int, int); 104 105 int lock_debug_syslog = 0; /* defaults to printf, but can be patched */ 106 107 #ifdef DDB 108 #include <ddb/ddbvar.h> 109 #include <machine/db_machdep.h> 110 #include <ddb/db_command.h> 111 #include <ddb/db_interface.h> 112 #endif 113 #endif /* defined(LOCKDEBUG) */ 114 115 #if defined(MULTIPROCESSOR) 116 struct simplelock kernel_lock; 117 #endif 118 119 /* 120 * Locking primitives implementation. 121 * Locks provide shared/exclusive synchronization. 122 */ 123 124 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */ 125 #if defined(MULTIPROCESSOR) /* { */ 126 #define COUNT_CPU(cpu_id, x) \ 127 curcpu()->ci_spin_locks += (x) 128 #else 129 u_long spin_locks; 130 #define COUNT_CPU(cpu_id, x) spin_locks += (x) 131 #endif /* MULTIPROCESSOR */ /* } */ 132 133 #define COUNT(lkp, l, cpu_id, x) \ 134 do { \ 135 if ((lkp)->lk_flags & LK_SPIN) \ 136 COUNT_CPU((cpu_id), (x)); \ 137 else \ 138 (l)->l_locks += (x); \ 139 } while (/*CONSTCOND*/0) 140 #else 141 #define COUNT(lkp, p, cpu_id, x) 142 #define COUNT_CPU(cpu_id, x) 143 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */ 144 145 #ifndef SPINLOCK_SPIN_HOOK /* from <machine/lock.h> */ 146 #define SPINLOCK_SPIN_HOOK /* nothing */ 147 #endif 148 149 #define INTERLOCK_ACQUIRE(lkp, flags, s) \ 150 do { \ 151 if ((flags) & LK_SPIN) \ 152 s = spllock(); \ 153 simple_lock(&(lkp)->lk_interlock); \ 154 } while (/*CONSTCOND*/ 0) 155 156 #define INTERLOCK_RELEASE(lkp, flags, s) \ 157 do { \ 158 simple_unlock(&(lkp)->lk_interlock); \ 159 if ((flags) & LK_SPIN) \ 160 splx(s); \ 161 } while (/*CONSTCOND*/ 0) 162 163 #ifdef DDB /* { */ 164 #ifdef MULTIPROCESSOR 165 int simple_lock_debugger = 1; /* more serious on MP */ 166 #else 167 int simple_lock_debugger = 0; 168 #endif 169 #define SLOCK_DEBUGGER() if (simple_lock_debugger) Debugger() 170 #define SLOCK_TRACE() \ 171 db_stack_trace_print((db_expr_t)__builtin_frame_address(0), \ 172 TRUE, 65535, "", lock_printf); 173 #else 174 #define SLOCK_DEBUGGER() /* nothing */ 175 #define SLOCK_TRACE() /* nothing */ 176 #endif /* } */ 177 178 #if defined(LOCKDEBUG) 179 #if defined(DDB) 180 #define SPINLOCK_SPINCHECK_DEBUGGER Debugger() 181 #else 182 #define SPINLOCK_SPINCHECK_DEBUGGER /* nothing */ 183 #endif 184 185 #define SPINLOCK_SPINCHECK_DECL \ 186 /* 32-bits of count -- wrap constitutes a "spinout" */ \ 187 uint32_t __spinc = 0 188 189 #define SPINLOCK_SPINCHECK \ 190 do { \ 191 if (++__spinc == 0) { \ 192 lock_printf("LK_SPIN spinout, excl %d, share %d\n", \ 193 lkp->lk_exclusivecount, lkp->lk_sharecount); \ 194 if (lkp->lk_exclusivecount) \ 195 lock_printf("held by CPU %lu\n", \ 196 (u_long) lkp->lk_cpu); \ 197 if (lkp->lk_lock_file) \ 198 lock_printf("last locked at %s:%d\n", \ 199 lkp->lk_lock_file, lkp->lk_lock_line); \ 200 if (lkp->lk_unlock_file) \ 201 lock_printf("last unlocked at %s:%d\n", \ 202 lkp->lk_unlock_file, lkp->lk_unlock_line); \ 203 SLOCK_TRACE(); \ 204 SPINLOCK_SPINCHECK_DEBUGGER; \ 205 } \ 206 } while (/*CONSTCOND*/ 0) 207 #else 208 #define SPINLOCK_SPINCHECK_DECL /* nothing */ 209 #define SPINLOCK_SPINCHECK /* nothing */ 210 #endif /* LOCKDEBUG && DDB */ 211 212 /* 213 * Acquire a resource. 214 */ 215 static int 216 acquire(__volatile struct lock **lkpp, int *s, int extflags, 217 int drain, int wanted) 218 { 219 int error; 220 __volatile struct lock *lkp = *lkpp; 221 222 KASSERT(drain || (wanted & LK_WAIT_NONZERO) == 0); 223 224 if (extflags & LK_SPIN) { 225 int interlocked; 226 227 SPINLOCK_SPINCHECK_DECL; 228 229 if (!drain) { 230 lkp->lk_waitcount++; 231 lkp->lk_flags |= LK_WAIT_NONZERO; 232 } 233 for (interlocked = 1;;) { 234 SPINLOCK_SPINCHECK; 235 if ((lkp->lk_flags & wanted) != 0) { 236 if (interlocked) { 237 INTERLOCK_RELEASE(lkp, LK_SPIN, *s); 238 interlocked = 0; 239 } 240 SPINLOCK_SPIN_HOOK; 241 } else if (interlocked) { 242 break; 243 } else { 244 INTERLOCK_ACQUIRE(lkp, LK_SPIN, *s); 245 interlocked = 1; 246 } 247 } 248 if (!drain) { 249 lkp->lk_waitcount--; 250 if (lkp->lk_waitcount == 0) 251 lkp->lk_flags &= ~LK_WAIT_NONZERO; 252 } 253 KASSERT((lkp->lk_flags & wanted) == 0); 254 error = 0; /* sanity */ 255 } else { 256 for (error = 0; (lkp->lk_flags & wanted) != 0; ) { 257 if (drain) 258 lkp->lk_flags |= LK_WAITDRAIN; 259 else { 260 lkp->lk_waitcount++; 261 lkp->lk_flags |= LK_WAIT_NONZERO; 262 } 263 /* XXX Cast away volatile. */ 264 error = ltsleep(drain ? 265 (void *)&lkp->lk_flags : 266 (void *)lkp, lkp->lk_prio, 267 lkp->lk_wmesg, lkp->lk_timo, &lkp->lk_interlock); 268 if (!drain) { 269 lkp->lk_waitcount--; 270 if (lkp->lk_waitcount == 0) 271 lkp->lk_flags &= ~LK_WAIT_NONZERO; 272 } 273 if (error) 274 break; 275 if (extflags & LK_SLEEPFAIL) { 276 error = ENOLCK; 277 break; 278 } 279 if (lkp->lk_newlock != NULL) { 280 simple_lock(&lkp->lk_newlock->lk_interlock); 281 simple_unlock(&lkp->lk_interlock); 282 if (lkp->lk_waitcount == 0) 283 wakeup((void *)&lkp->lk_newlock); 284 *lkpp = lkp = lkp->lk_newlock; 285 } 286 } 287 } 288 289 return error; 290 } 291 292 #define SETHOLDER(lkp, pid, lid, cpu_id) \ 293 do { \ 294 if ((lkp)->lk_flags & LK_SPIN) \ 295 (lkp)->lk_cpu = cpu_id; \ 296 else { \ 297 (lkp)->lk_lockholder = pid; \ 298 (lkp)->lk_locklwp = lid; \ 299 } \ 300 } while (/*CONSTCOND*/0) 301 302 #define WEHOLDIT(lkp, pid, lid, cpu_id) \ 303 (((lkp)->lk_flags & LK_SPIN) != 0 ? \ 304 ((lkp)->lk_cpu == (cpu_id)) : \ 305 ((lkp)->lk_lockholder == (pid) && (lkp)->lk_locklwp == (lid))) 306 307 #define WAKEUP_WAITER(lkp) \ 308 do { \ 309 if (((lkp)->lk_flags & (LK_SPIN | LK_WAIT_NONZERO)) == \ 310 LK_WAIT_NONZERO) { \ 311 /* XXX Cast away volatile. */ \ 312 wakeup((void *)(lkp)); \ 313 } \ 314 } while (/*CONSTCOND*/0) 315 316 #if defined(LOCKDEBUG) /* { */ 317 #if defined(MULTIPROCESSOR) /* { */ 318 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER; 319 320 #define SPINLOCK_LIST_LOCK() \ 321 __cpu_simple_lock(&spinlock_list_slock.lock_data) 322 323 #define SPINLOCK_LIST_UNLOCK() \ 324 __cpu_simple_unlock(&spinlock_list_slock.lock_data) 325 #else 326 #define SPINLOCK_LIST_LOCK() /* nothing */ 327 328 #define SPINLOCK_LIST_UNLOCK() /* nothing */ 329 #endif /* MULTIPROCESSOR */ /* } */ 330 331 TAILQ_HEAD(, lock) spinlock_list = 332 TAILQ_HEAD_INITIALIZER(spinlock_list); 333 334 #define HAVEIT(lkp) \ 335 do { \ 336 if ((lkp)->lk_flags & LK_SPIN) { \ 337 int s = spllock(); \ 338 SPINLOCK_LIST_LOCK(); \ 339 /* XXX Cast away volatile. */ \ 340 TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp), \ 341 lk_list); \ 342 SPINLOCK_LIST_UNLOCK(); \ 343 splx(s); \ 344 } \ 345 } while (/*CONSTCOND*/0) 346 347 #define DONTHAVEIT(lkp) \ 348 do { \ 349 if ((lkp)->lk_flags & LK_SPIN) { \ 350 int s = spllock(); \ 351 SPINLOCK_LIST_LOCK(); \ 352 /* XXX Cast away volatile. */ \ 353 TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp), \ 354 lk_list); \ 355 SPINLOCK_LIST_UNLOCK(); \ 356 splx(s); \ 357 } \ 358 } while (/*CONSTCOND*/0) 359 #else 360 #define HAVEIT(lkp) /* nothing */ 361 362 #define DONTHAVEIT(lkp) /* nothing */ 363 #endif /* LOCKDEBUG */ /* } */ 364 365 #if defined(LOCKDEBUG) 366 /* 367 * Lock debug printing routine; can be configured to print to console 368 * or log to syslog. 369 */ 370 void 371 lock_printf(const char *fmt, ...) 372 { 373 char b[150]; 374 va_list ap; 375 376 va_start(ap, fmt); 377 if (lock_debug_syslog) 378 vlog(LOG_DEBUG, fmt, ap); 379 else { 380 vsnprintf(b, sizeof(b), fmt, ap); 381 printf_nolog("%s", b); 382 } 383 va_end(ap); 384 } 385 #endif /* LOCKDEBUG */ 386 387 /* 388 * Transfer any waiting processes from one lock to another. 389 */ 390 void 391 transferlockers(struct lock *from, struct lock *to) 392 { 393 394 KASSERT(from != to); 395 KASSERT((from->lk_flags & LK_WAITDRAIN) == 0); 396 if (from->lk_waitcount == 0) 397 return; 398 from->lk_newlock = to; 399 wakeup((void *)from); 400 tsleep((void *)&from->lk_newlock, from->lk_prio, "lkxfer", 0); 401 from->lk_newlock = NULL; 402 from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE); 403 KASSERT(from->lk_waitcount == 0); 404 } 405 406 407 /* 408 * Initialize a lock; required before use. 409 */ 410 void 411 lockinit(struct lock *lkp, int prio, const char *wmesg, int timo, int flags) 412 { 413 414 memset(lkp, 0, sizeof(struct lock)); 415 simple_lock_init(&lkp->lk_interlock); 416 lkp->lk_flags = flags & LK_EXTFLG_MASK; 417 if (flags & LK_SPIN) 418 lkp->lk_cpu = LK_NOCPU; 419 else { 420 lkp->lk_lockholder = LK_NOPROC; 421 lkp->lk_newlock = NULL; 422 lkp->lk_prio = prio; 423 lkp->lk_timo = timo; 424 } 425 lkp->lk_wmesg = wmesg; /* just a name for spin locks */ 426 #if defined(LOCKDEBUG) 427 lkp->lk_lock_file = NULL; 428 lkp->lk_unlock_file = NULL; 429 #endif 430 } 431 432 /* 433 * Determine the status of a lock. 434 */ 435 int 436 lockstatus(struct lock *lkp) 437 { 438 int s = 0; /* XXX: gcc */ 439 int lock_type = 0; 440 struct lwp *l = curlwp; /* XXX */ 441 pid_t pid; 442 lwpid_t lid; 443 cpuid_t cpu_id; 444 445 if ((lkp->lk_flags & LK_SPIN) || l == NULL) { 446 cpu_id = cpu_number(); 447 pid = LK_KERNPROC; 448 lid = 0; 449 } else { 450 cpu_id = LK_NOCPU; 451 pid = l->l_proc->p_pid; 452 lid = l->l_lid; 453 } 454 455 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s); 456 if (lkp->lk_exclusivecount != 0) { 457 if (WEHOLDIT(lkp, pid, lid, cpu_id)) 458 lock_type = LK_EXCLUSIVE; 459 else 460 lock_type = LK_EXCLOTHER; 461 } else if (lkp->lk_sharecount != 0) 462 lock_type = LK_SHARED; 463 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 464 return (lock_type); 465 } 466 467 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) 468 /* 469 * Make sure no spin locks are held by a CPU that is about 470 * to context switch. 471 */ 472 void 473 spinlock_switchcheck(void) 474 { 475 u_long cnt; 476 int s; 477 478 s = spllock(); 479 #if defined(MULTIPROCESSOR) 480 cnt = curcpu()->ci_spin_locks; 481 #else 482 cnt = spin_locks; 483 #endif 484 splx(s); 485 486 if (cnt != 0) 487 panic("spinlock_switchcheck: CPU %lu has %lu spin locks", 488 (u_long) cpu_number(), cnt); 489 } 490 #endif /* LOCKDEBUG || DIAGNOSTIC */ 491 492 /* 493 * Locks and IPLs (interrupt priority levels): 494 * 495 * Locks which may be taken from interrupt context must be handled 496 * very carefully; you must spl to the highest IPL where the lock 497 * is needed before acquiring the lock. 498 * 499 * It is also important to avoid deadlock, since certain (very high 500 * priority) interrupts are often needed to keep the system as a whole 501 * from deadlocking, and must not be blocked while you are spinning 502 * waiting for a lower-priority lock. 503 * 504 * In addition, the lock-debugging hooks themselves need to use locks! 505 * 506 * A raw __cpu_simple_lock may be used from interrupts are long as it 507 * is acquired and held at a single IPL. 508 * 509 * A simple_lock (which is a __cpu_simple_lock wrapped with some 510 * debugging hooks) may be used at or below spllock(), which is 511 * typically at or just below splhigh() (i.e. blocks everything 512 * but certain machine-dependent extremely high priority interrupts). 513 * 514 * spinlockmgr spinlocks should be used at or below splsched(). 515 * 516 * Some platforms may have interrupts of higher priority than splsched(), 517 * including hard serial interrupts, inter-processor interrupts, and 518 * kernel debugger traps. 519 */ 520 521 /* 522 * XXX XXX kludge around another kludge.. 523 * 524 * vfs_shutdown() may be called from interrupt context, either as a result 525 * of a panic, or from the debugger. It proceeds to call 526 * sys_sync(&proc0, ...), pretending its running on behalf of proc0 527 * 528 * We would like to make an attempt to sync the filesystems in this case, so 529 * if this happens, we treat attempts to acquire locks specially. 530 * All locks are acquired on behalf of proc0. 531 * 532 * If we've already paniced, we don't block waiting for locks, but 533 * just barge right ahead since we're already going down in flames. 534 */ 535 536 /* 537 * Set, change, or release a lock. 538 * 539 * Shared requests increment the shared count. Exclusive requests set the 540 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 541 * accepted shared locks and shared-to-exclusive upgrades to go away. 542 */ 543 int 544 #if defined(LOCKDEBUG) 545 _lockmgr(__volatile struct lock *lkp, u_int flags, 546 struct simplelock *interlkp, const char *file, int line) 547 #else 548 lockmgr(__volatile struct lock *lkp, u_int flags, 549 struct simplelock *interlkp) 550 #endif 551 { 552 int error; 553 pid_t pid; 554 lwpid_t lid; 555 int extflags; 556 cpuid_t cpu_id; 557 struct lwp *l = curlwp; 558 int lock_shutdown_noblock = 0; 559 int s = 0; 560 561 error = 0; 562 563 /* LK_RETRY is for vn_lock, not for lockmgr. */ 564 KASSERT((flags & LK_RETRY) == 0); 565 566 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s); 567 if (flags & LK_INTERLOCK) 568 simple_unlock(interlkp); 569 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 570 571 #ifdef DIAGNOSTIC /* { */ 572 /* 573 * Don't allow spins on sleep locks and don't allow sleeps 574 * on spin locks. 575 */ 576 if ((flags ^ lkp->lk_flags) & LK_SPIN) 577 panic("lockmgr: sleep/spin mismatch"); 578 #endif /* } */ 579 580 if (extflags & LK_SPIN) { 581 pid = LK_KERNPROC; 582 lid = 0; 583 } else { 584 if (l == NULL) { 585 if (!doing_shutdown) { 586 panic("lockmgr: no context"); 587 } else { 588 l = &lwp0; 589 if (panicstr && (!(flags & LK_NOWAIT))) { 590 flags |= LK_NOWAIT; 591 lock_shutdown_noblock = 1; 592 } 593 } 594 } 595 lid = l->l_lid; 596 pid = l->l_proc->p_pid; 597 } 598 cpu_id = cpu_number(); 599 600 /* 601 * Once a lock has drained, the LK_DRAINING flag is set and an 602 * exclusive lock is returned. The only valid operation thereafter 603 * is a single release of that exclusive lock. This final release 604 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any 605 * further requests of any sort will result in a panic. The bits 606 * selected for these two flags are chosen so that they will be set 607 * in memory that is freed (freed memory is filled with 0xdeadbeef). 608 * The final release is permitted to give a new lease on life to 609 * the lock by specifying LK_REENABLE. 610 */ 611 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) { 612 #ifdef DIAGNOSTIC /* { */ 613 if (lkp->lk_flags & LK_DRAINED) 614 panic("lockmgr: using decommissioned lock"); 615 if ((flags & LK_TYPE_MASK) != LK_RELEASE || 616 WEHOLDIT(lkp, pid, lid, cpu_id) == 0) 617 panic("lockmgr: non-release on draining lock: %d", 618 flags & LK_TYPE_MASK); 619 #endif /* DIAGNOSTIC */ /* } */ 620 lkp->lk_flags &= ~LK_DRAINING; 621 if ((flags & LK_REENABLE) == 0) 622 lkp->lk_flags |= LK_DRAINED; 623 } 624 625 switch (flags & LK_TYPE_MASK) { 626 627 case LK_SHARED: 628 if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0) { 629 /* 630 * If just polling, check to see if we will block. 631 */ 632 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 633 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) { 634 error = EBUSY; 635 break; 636 } 637 /* 638 * Wait for exclusive locks and upgrades to clear. 639 */ 640 error = acquire(&lkp, &s, extflags, 0, 641 LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE); 642 if (error) 643 break; 644 lkp->lk_sharecount++; 645 lkp->lk_flags |= LK_SHARE_NONZERO; 646 COUNT(lkp, l, cpu_id, 1); 647 break; 648 } 649 /* 650 * We hold an exclusive lock, so downgrade it to shared. 651 * An alternative would be to fail with EDEADLK. 652 */ 653 lkp->lk_sharecount++; 654 lkp->lk_flags |= LK_SHARE_NONZERO; 655 COUNT(lkp, l, cpu_id, 1); 656 /* fall into downgrade */ 657 658 case LK_DOWNGRADE: 659 if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0 || 660 lkp->lk_exclusivecount == 0) 661 panic("lockmgr: not holding exclusive lock"); 662 lkp->lk_sharecount += lkp->lk_exclusivecount; 663 lkp->lk_flags |= LK_SHARE_NONZERO; 664 lkp->lk_exclusivecount = 0; 665 lkp->lk_recurselevel = 0; 666 lkp->lk_flags &= ~LK_HAVE_EXCL; 667 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU); 668 #if defined(LOCKDEBUG) 669 lkp->lk_unlock_file = file; 670 lkp->lk_unlock_line = line; 671 #endif 672 DONTHAVEIT(lkp); 673 WAKEUP_WAITER(lkp); 674 break; 675 676 case LK_EXCLUPGRADE: 677 /* 678 * If another process is ahead of us to get an upgrade, 679 * then we want to fail rather than have an intervening 680 * exclusive access. 681 */ 682 if (lkp->lk_flags & LK_WANT_UPGRADE) { 683 lkp->lk_sharecount--; 684 if (lkp->lk_sharecount == 0) 685 lkp->lk_flags &= ~LK_SHARE_NONZERO; 686 COUNT(lkp, l, cpu_id, -1); 687 error = EBUSY; 688 break; 689 } 690 /* fall into normal upgrade */ 691 692 case LK_UPGRADE: 693 /* 694 * Upgrade a shared lock to an exclusive one. If another 695 * shared lock has already requested an upgrade to an 696 * exclusive lock, our shared lock is released and an 697 * exclusive lock is requested (which will be granted 698 * after the upgrade). If we return an error, the file 699 * will always be unlocked. 700 */ 701 if (WEHOLDIT(lkp, pid, lid, cpu_id) || lkp->lk_sharecount <= 0) 702 panic("lockmgr: upgrade exclusive lock"); 703 lkp->lk_sharecount--; 704 if (lkp->lk_sharecount == 0) 705 lkp->lk_flags &= ~LK_SHARE_NONZERO; 706 COUNT(lkp, l, cpu_id, -1); 707 /* 708 * If we are just polling, check to see if we will block. 709 */ 710 if ((extflags & LK_NOWAIT) && 711 ((lkp->lk_flags & LK_WANT_UPGRADE) || 712 lkp->lk_sharecount > 1)) { 713 error = EBUSY; 714 break; 715 } 716 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 717 /* 718 * We are first shared lock to request an upgrade, so 719 * request upgrade and wait for the shared count to 720 * drop to zero, then take exclusive lock. 721 */ 722 lkp->lk_flags |= LK_WANT_UPGRADE; 723 error = acquire(&lkp, &s, extflags, 0, LK_SHARE_NONZERO); 724 lkp->lk_flags &= ~LK_WANT_UPGRADE; 725 if (error) { 726 WAKEUP_WAITER(lkp); 727 break; 728 } 729 lkp->lk_flags |= LK_HAVE_EXCL; 730 SETHOLDER(lkp, pid, lid, cpu_id); 731 #if defined(LOCKDEBUG) 732 lkp->lk_lock_file = file; 733 lkp->lk_lock_line = line; 734 #endif 735 HAVEIT(lkp); 736 if (lkp->lk_exclusivecount != 0) 737 panic("lockmgr: non-zero exclusive count"); 738 lkp->lk_exclusivecount = 1; 739 if (extflags & LK_SETRECURSE) 740 lkp->lk_recurselevel = 1; 741 COUNT(lkp, l, cpu_id, 1); 742 break; 743 } 744 /* 745 * Someone else has requested upgrade. Release our shared 746 * lock, awaken upgrade requestor if we are the last shared 747 * lock, then request an exclusive lock. 748 */ 749 if (lkp->lk_sharecount == 0) 750 WAKEUP_WAITER(lkp); 751 /* fall into exclusive request */ 752 753 case LK_EXCLUSIVE: 754 if (WEHOLDIT(lkp, pid, lid, cpu_id)) { 755 /* 756 * Recursive lock. 757 */ 758 if ((extflags & LK_CANRECURSE) == 0 && 759 lkp->lk_recurselevel == 0) { 760 if (extflags & LK_RECURSEFAIL) { 761 error = EDEADLK; 762 break; 763 } else 764 panic("lockmgr: locking against myself"); 765 } 766 lkp->lk_exclusivecount++; 767 if (extflags & LK_SETRECURSE && 768 lkp->lk_recurselevel == 0) 769 lkp->lk_recurselevel = lkp->lk_exclusivecount; 770 COUNT(lkp, l, cpu_id, 1); 771 break; 772 } 773 /* 774 * If we are just polling, check to see if we will sleep. 775 */ 776 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 777 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 778 LK_SHARE_NONZERO))) { 779 error = EBUSY; 780 break; 781 } 782 /* 783 * Try to acquire the want_exclusive flag. 784 */ 785 error = acquire(&lkp, &s, extflags, 0, 786 LK_HAVE_EXCL | LK_WANT_EXCL); 787 if (error) 788 break; 789 lkp->lk_flags |= LK_WANT_EXCL; 790 /* 791 * Wait for shared locks and upgrades to finish. 792 */ 793 error = acquire(&lkp, &s, extflags, 0, 794 LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO); 795 lkp->lk_flags &= ~LK_WANT_EXCL; 796 if (error) { 797 WAKEUP_WAITER(lkp); 798 break; 799 } 800 lkp->lk_flags |= LK_HAVE_EXCL; 801 SETHOLDER(lkp, pid, lid, cpu_id); 802 #if defined(LOCKDEBUG) 803 lkp->lk_lock_file = file; 804 lkp->lk_lock_line = line; 805 #endif 806 HAVEIT(lkp); 807 if (lkp->lk_exclusivecount != 0) 808 panic("lockmgr: non-zero exclusive count"); 809 lkp->lk_exclusivecount = 1; 810 if (extflags & LK_SETRECURSE) 811 lkp->lk_recurselevel = 1; 812 COUNT(lkp, l, cpu_id, 1); 813 break; 814 815 case LK_RELEASE: 816 if (lkp->lk_exclusivecount != 0) { 817 if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0) { 818 if (lkp->lk_flags & LK_SPIN) { 819 panic("lockmgr: processor %lu, not " 820 "exclusive lock holder %lu " 821 "unlocking", cpu_id, lkp->lk_cpu); 822 } else { 823 panic("lockmgr: pid %d, not " 824 "exclusive lock holder %d " 825 "unlocking", pid, 826 lkp->lk_lockholder); 827 } 828 } 829 if (lkp->lk_exclusivecount == lkp->lk_recurselevel) 830 lkp->lk_recurselevel = 0; 831 lkp->lk_exclusivecount--; 832 COUNT(lkp, l, cpu_id, -1); 833 if (lkp->lk_exclusivecount == 0) { 834 lkp->lk_flags &= ~LK_HAVE_EXCL; 835 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU); 836 #if defined(LOCKDEBUG) 837 lkp->lk_unlock_file = file; 838 lkp->lk_unlock_line = line; 839 #endif 840 DONTHAVEIT(lkp); 841 } 842 } else if (lkp->lk_sharecount != 0) { 843 lkp->lk_sharecount--; 844 if (lkp->lk_sharecount == 0) 845 lkp->lk_flags &= ~LK_SHARE_NONZERO; 846 COUNT(lkp, l, cpu_id, -1); 847 } 848 #ifdef DIAGNOSTIC 849 else 850 panic("lockmgr: release of unlocked lock!"); 851 #endif 852 WAKEUP_WAITER(lkp); 853 break; 854 855 case LK_DRAIN: 856 /* 857 * Check that we do not already hold the lock, as it can 858 * never drain if we do. Unfortunately, we have no way to 859 * check for holding a shared lock, but at least we can 860 * check for an exclusive one. 861 */ 862 if (WEHOLDIT(lkp, pid, lid, cpu_id)) 863 panic("lockmgr: draining against myself"); 864 /* 865 * If we are just polling, check to see if we will sleep. 866 */ 867 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 868 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 869 LK_SHARE_NONZERO | LK_WAIT_NONZERO))) { 870 error = EBUSY; 871 break; 872 } 873 error = acquire(&lkp, &s, extflags, 1, 874 LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 875 LK_SHARE_NONZERO | LK_WAIT_NONZERO); 876 if (error) 877 break; 878 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 879 SETHOLDER(lkp, pid, lid, cpu_id); 880 #if defined(LOCKDEBUG) 881 lkp->lk_lock_file = file; 882 lkp->lk_lock_line = line; 883 #endif 884 HAVEIT(lkp); 885 lkp->lk_exclusivecount = 1; 886 /* XXX unlikely that we'd want this */ 887 if (extflags & LK_SETRECURSE) 888 lkp->lk_recurselevel = 1; 889 COUNT(lkp, l, cpu_id, 1); 890 break; 891 892 default: 893 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 894 panic("lockmgr: unknown locktype request %d", 895 flags & LK_TYPE_MASK); 896 /* NOTREACHED */ 897 } 898 if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN && 899 ((lkp->lk_flags & 900 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 901 LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0)) { 902 lkp->lk_flags &= ~LK_WAITDRAIN; 903 wakeup((void *)&lkp->lk_flags); 904 } 905 /* 906 * Note that this panic will be a recursive panic, since 907 * we only set lock_shutdown_noblock above if panicstr != NULL. 908 */ 909 if (error && lock_shutdown_noblock) 910 panic("lockmgr: deadlock (see previous panic)"); 911 912 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 913 return (error); 914 } 915 916 /* 917 * For a recursive spinlock held one or more times by the current CPU, 918 * release all N locks, and return N. 919 * Intended for use in mi_switch() shortly before context switching. 920 */ 921 922 int 923 #if defined(LOCKDEBUG) 924 _spinlock_release_all(__volatile struct lock *lkp, const char *file, int line) 925 #else 926 spinlock_release_all(__volatile struct lock *lkp) 927 #endif 928 { 929 int s, count; 930 cpuid_t cpu_id; 931 932 KASSERT(lkp->lk_flags & LK_SPIN); 933 934 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s); 935 936 cpu_id = cpu_number(); 937 count = lkp->lk_exclusivecount; 938 939 if (count != 0) { 940 #ifdef DIAGNOSTIC 941 if (WEHOLDIT(lkp, 0, 0, cpu_id) == 0) { 942 panic("spinlock_release_all: processor %lu, not " 943 "exclusive lock holder %lu " 944 "unlocking", (long)cpu_id, lkp->lk_cpu); 945 } 946 #endif 947 lkp->lk_recurselevel = 0; 948 lkp->lk_exclusivecount = 0; 949 COUNT_CPU(cpu_id, -count); 950 lkp->lk_flags &= ~LK_HAVE_EXCL; 951 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU); 952 #if defined(LOCKDEBUG) 953 lkp->lk_unlock_file = file; 954 lkp->lk_unlock_line = line; 955 #endif 956 DONTHAVEIT(lkp); 957 } 958 #ifdef DIAGNOSTIC 959 else if (lkp->lk_sharecount != 0) 960 panic("spinlock_release_all: release of shared lock!"); 961 else 962 panic("spinlock_release_all: release of unlocked lock!"); 963 #endif 964 INTERLOCK_RELEASE(lkp, LK_SPIN, s); 965 966 return (count); 967 } 968 969 /* 970 * For a recursive spinlock held one or more times by the current CPU, 971 * release all N locks, and return N. 972 * Intended for use in mi_switch() right after resuming execution. 973 */ 974 975 void 976 #if defined(LOCKDEBUG) 977 _spinlock_acquire_count(__volatile struct lock *lkp, int count, 978 const char *file, int line) 979 #else 980 spinlock_acquire_count(__volatile struct lock *lkp, int count) 981 #endif 982 { 983 int s, error; 984 cpuid_t cpu_id; 985 986 KASSERT(lkp->lk_flags & LK_SPIN); 987 988 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s); 989 990 cpu_id = cpu_number(); 991 992 #ifdef DIAGNOSTIC 993 if (WEHOLDIT(lkp, LK_NOPROC, 0, cpu_id)) 994 panic("spinlock_acquire_count: processor %lu already holds lock", (long)cpu_id); 995 #endif 996 /* 997 * Try to acquire the want_exclusive flag. 998 */ 999 error = acquire(&lkp, &s, LK_SPIN, 0, LK_HAVE_EXCL | LK_WANT_EXCL); 1000 lkp->lk_flags |= LK_WANT_EXCL; 1001 /* 1002 * Wait for shared locks and upgrades to finish. 1003 */ 1004 error = acquire(&lkp, &s, LK_SPIN, 0, 1005 LK_HAVE_EXCL | LK_SHARE_NONZERO | LK_WANT_UPGRADE); 1006 lkp->lk_flags &= ~LK_WANT_EXCL; 1007 lkp->lk_flags |= LK_HAVE_EXCL; 1008 SETHOLDER(lkp, LK_NOPROC, 0, cpu_id); 1009 #if defined(LOCKDEBUG) 1010 lkp->lk_lock_file = file; 1011 lkp->lk_lock_line = line; 1012 #endif 1013 HAVEIT(lkp); 1014 if (lkp->lk_exclusivecount != 0) 1015 panic("lockmgr: non-zero exclusive count"); 1016 lkp->lk_exclusivecount = count; 1017 lkp->lk_recurselevel = 1; 1018 COUNT_CPU(cpu_id, count); 1019 1020 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 1021 } 1022 1023 1024 1025 /* 1026 * Print out information about state of a lock. Used by VOP_PRINT 1027 * routines to display ststus about contained locks. 1028 */ 1029 void 1030 lockmgr_printinfo(__volatile struct lock *lkp) 1031 { 1032 1033 if (lkp->lk_sharecount) 1034 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 1035 lkp->lk_sharecount); 1036 else if (lkp->lk_flags & LK_HAVE_EXCL) { 1037 printf(" lock type %s: EXCL (count %d) by ", 1038 lkp->lk_wmesg, lkp->lk_exclusivecount); 1039 if (lkp->lk_flags & LK_SPIN) 1040 printf("processor %lu", lkp->lk_cpu); 1041 else 1042 printf("pid %d.%d", lkp->lk_lockholder, 1043 lkp->lk_locklwp); 1044 } else 1045 printf(" not locked"); 1046 if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0) 1047 printf(" with %d pending", lkp->lk_waitcount); 1048 } 1049 1050 #if defined(LOCKDEBUG) /* { */ 1051 TAILQ_HEAD(, simplelock) simplelock_list = 1052 TAILQ_HEAD_INITIALIZER(simplelock_list); 1053 1054 #if defined(MULTIPROCESSOR) /* { */ 1055 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER; 1056 1057 #define SLOCK_LIST_LOCK() \ 1058 __cpu_simple_lock(&simplelock_list_slock.lock_data) 1059 1060 #define SLOCK_LIST_UNLOCK() \ 1061 __cpu_simple_unlock(&simplelock_list_slock.lock_data) 1062 1063 #define SLOCK_COUNT(x) \ 1064 curcpu()->ci_simple_locks += (x) 1065 #else 1066 u_long simple_locks; 1067 1068 #define SLOCK_LIST_LOCK() /* nothing */ 1069 1070 #define SLOCK_LIST_UNLOCK() /* nothing */ 1071 1072 #define SLOCK_COUNT(x) simple_locks += (x) 1073 #endif /* MULTIPROCESSOR */ /* } */ 1074 1075 #ifdef MULTIPROCESSOR 1076 #define SLOCK_MP() lock_printf("on CPU %ld\n", \ 1077 (u_long) cpu_number()) 1078 #else 1079 #define SLOCK_MP() /* nothing */ 1080 #endif 1081 1082 #define SLOCK_WHERE(str, alp, id, l) \ 1083 do { \ 1084 lock_printf("\n"); \ 1085 lock_printf(str); \ 1086 lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \ 1087 SLOCK_MP(); \ 1088 if ((alp)->lock_file != NULL) \ 1089 lock_printf("last locked: %s:%d\n", (alp)->lock_file, \ 1090 (alp)->lock_line); \ 1091 if ((alp)->unlock_file != NULL) \ 1092 lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \ 1093 (alp)->unlock_line); \ 1094 SLOCK_TRACE() \ 1095 SLOCK_DEBUGGER(); \ 1096 } while (/*CONSTCOND*/0) 1097 1098 /* 1099 * Simple lock functions so that the debugger can see from whence 1100 * they are being called. 1101 */ 1102 void 1103 simple_lock_init(struct simplelock *alp) 1104 { 1105 1106 #if defined(MULTIPROCESSOR) /* { */ 1107 __cpu_simple_lock_init(&alp->lock_data); 1108 #else 1109 alp->lock_data = __SIMPLELOCK_UNLOCKED; 1110 #endif /* } */ 1111 alp->lock_file = NULL; 1112 alp->lock_line = 0; 1113 alp->unlock_file = NULL; 1114 alp->unlock_line = 0; 1115 alp->lock_holder = LK_NOCPU; 1116 } 1117 1118 void 1119 _simple_lock(__volatile struct simplelock *alp, const char *id, int l) 1120 { 1121 cpuid_t cpu_id = cpu_number(); 1122 int s; 1123 1124 s = spllock(); 1125 1126 /* 1127 * MULTIPROCESSOR case: This is `safe' since if it's not us, we 1128 * don't take any action, and just fall into the normal spin case. 1129 */ 1130 if (alp->lock_data == __SIMPLELOCK_LOCKED) { 1131 #if defined(MULTIPROCESSOR) /* { */ 1132 if (alp->lock_holder == cpu_id) { 1133 SLOCK_WHERE("simple_lock: locking against myself\n", 1134 alp, id, l); 1135 goto out; 1136 } 1137 #else 1138 SLOCK_WHERE("simple_lock: lock held\n", alp, id, l); 1139 goto out; 1140 #endif /* MULTIPROCESSOR */ /* } */ 1141 } 1142 1143 #if defined(MULTIPROCESSOR) /* { */ 1144 /* Acquire the lock before modifying any fields. */ 1145 splx(s); 1146 __cpu_simple_lock(&alp->lock_data); 1147 s = spllock(); 1148 #else 1149 alp->lock_data = __SIMPLELOCK_LOCKED; 1150 #endif /* } */ 1151 1152 if (alp->lock_holder != LK_NOCPU) { 1153 SLOCK_WHERE("simple_lock: uninitialized lock\n", 1154 alp, id, l); 1155 } 1156 alp->lock_file = id; 1157 alp->lock_line = l; 1158 alp->lock_holder = cpu_id; 1159 1160 SLOCK_LIST_LOCK(); 1161 /* XXX Cast away volatile */ 1162 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list); 1163 SLOCK_LIST_UNLOCK(); 1164 1165 SLOCK_COUNT(1); 1166 1167 out: 1168 splx(s); 1169 } 1170 1171 int 1172 _simple_lock_held(__volatile struct simplelock *alp) 1173 { 1174 #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC) 1175 cpuid_t cpu_id = cpu_number(); 1176 #endif 1177 int s, locked = 0; 1178 1179 s = spllock(); 1180 1181 #if defined(MULTIPROCESSOR) 1182 if (__cpu_simple_lock_try(&alp->lock_data) == 0) 1183 locked = (alp->lock_holder == cpu_id); 1184 else 1185 __cpu_simple_unlock(&alp->lock_data); 1186 #else 1187 if (alp->lock_data == __SIMPLELOCK_LOCKED) { 1188 locked = 1; 1189 KASSERT(alp->lock_holder == cpu_id); 1190 } 1191 #endif 1192 1193 splx(s); 1194 1195 return (locked); 1196 } 1197 1198 int 1199 _simple_lock_try(__volatile struct simplelock *alp, const char *id, int l) 1200 { 1201 cpuid_t cpu_id = cpu_number(); 1202 int s, rv = 0; 1203 1204 s = spllock(); 1205 1206 /* 1207 * MULTIPROCESSOR case: This is `safe' since if it's not us, we 1208 * don't take any action. 1209 */ 1210 #if defined(MULTIPROCESSOR) /* { */ 1211 if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) { 1212 if (alp->lock_holder == cpu_id) 1213 SLOCK_WHERE("simple_lock_try: locking against myself\n", 1214 alp, id, l); 1215 goto out; 1216 } 1217 #else 1218 if (alp->lock_data == __SIMPLELOCK_LOCKED) { 1219 SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l); 1220 goto out; 1221 } 1222 alp->lock_data = __SIMPLELOCK_LOCKED; 1223 #endif /* MULTIPROCESSOR */ /* } */ 1224 1225 /* 1226 * At this point, we have acquired the lock. 1227 */ 1228 1229 rv = 1; 1230 1231 alp->lock_file = id; 1232 alp->lock_line = l; 1233 alp->lock_holder = cpu_id; 1234 1235 SLOCK_LIST_LOCK(); 1236 /* XXX Cast away volatile. */ 1237 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list); 1238 SLOCK_LIST_UNLOCK(); 1239 1240 SLOCK_COUNT(1); 1241 1242 out: 1243 splx(s); 1244 return (rv); 1245 } 1246 1247 void 1248 _simple_unlock(__volatile struct simplelock *alp, const char *id, int l) 1249 { 1250 int s; 1251 1252 s = spllock(); 1253 1254 /* 1255 * MULTIPROCESSOR case: This is `safe' because we think we hold 1256 * the lock, and if we don't, we don't take any action. 1257 */ 1258 if (alp->lock_data == __SIMPLELOCK_UNLOCKED) { 1259 SLOCK_WHERE("simple_unlock: lock not held\n", 1260 alp, id, l); 1261 goto out; 1262 } 1263 1264 SLOCK_LIST_LOCK(); 1265 TAILQ_REMOVE(&simplelock_list, alp, list); 1266 SLOCK_LIST_UNLOCK(); 1267 1268 SLOCK_COUNT(-1); 1269 1270 alp->list.tqe_next = NULL; /* sanity */ 1271 alp->list.tqe_prev = NULL; /* sanity */ 1272 1273 alp->unlock_file = id; 1274 alp->unlock_line = l; 1275 1276 #if defined(MULTIPROCESSOR) /* { */ 1277 alp->lock_holder = LK_NOCPU; 1278 /* Now that we've modified all fields, release the lock. */ 1279 __cpu_simple_unlock(&alp->lock_data); 1280 #else 1281 alp->lock_data = __SIMPLELOCK_UNLOCKED; 1282 KASSERT(alp->lock_holder == cpu_number()); 1283 alp->lock_holder = LK_NOCPU; 1284 #endif /* } */ 1285 1286 out: 1287 splx(s); 1288 } 1289 1290 void 1291 simple_lock_dump(void) 1292 { 1293 struct simplelock *alp; 1294 int s; 1295 1296 s = spllock(); 1297 SLOCK_LIST_LOCK(); 1298 lock_printf("all simple locks:\n"); 1299 TAILQ_FOREACH(alp, &simplelock_list, list) { 1300 lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder, 1301 alp->lock_file, alp->lock_line); 1302 } 1303 SLOCK_LIST_UNLOCK(); 1304 splx(s); 1305 } 1306 1307 void 1308 simple_lock_freecheck(void *start, void *end) 1309 { 1310 struct simplelock *alp; 1311 int s; 1312 1313 s = spllock(); 1314 SLOCK_LIST_LOCK(); 1315 TAILQ_FOREACH(alp, &simplelock_list, list) { 1316 if ((void *)alp >= start && (void *)alp < end) { 1317 lock_printf("freeing simple_lock %p CPU %lu %s:%d\n", 1318 alp, alp->lock_holder, alp->lock_file, 1319 alp->lock_line); 1320 SLOCK_DEBUGGER(); 1321 } 1322 } 1323 SLOCK_LIST_UNLOCK(); 1324 splx(s); 1325 } 1326 1327 /* 1328 * We must be holding exactly one lock: the sched_lock. 1329 */ 1330 1331 void 1332 simple_lock_switchcheck(void) 1333 { 1334 1335 simple_lock_only_held(&sched_lock, "switching"); 1336 } 1337 1338 void 1339 simple_lock_only_held(volatile struct simplelock *lp, const char *where) 1340 { 1341 struct simplelock *alp; 1342 cpuid_t cpu_id = cpu_number(); 1343 int s; 1344 1345 if (lp) { 1346 LOCK_ASSERT(simple_lock_held(lp)); 1347 } 1348 s = spllock(); 1349 SLOCK_LIST_LOCK(); 1350 TAILQ_FOREACH(alp, &simplelock_list, list) { 1351 if (alp == lp) 1352 continue; 1353 #if defined(MULTIPROCESSOR) 1354 if (alp == &kernel_lock) 1355 continue; 1356 #endif /* defined(MULTIPROCESSOR) */ 1357 if (alp->lock_holder == cpu_id) 1358 break; 1359 } 1360 SLOCK_LIST_UNLOCK(); 1361 splx(s); 1362 1363 if (alp != NULL) { 1364 lock_printf("\n%s with held simple_lock %p " 1365 "CPU %lu %s:%d\n", 1366 where, alp, alp->lock_holder, alp->lock_file, 1367 alp->lock_line); 1368 SLOCK_TRACE(); 1369 SLOCK_DEBUGGER(); 1370 } 1371 } 1372 #endif /* LOCKDEBUG */ /* } */ 1373 1374 #if defined(MULTIPROCESSOR) 1375 /* 1376 * Functions for manipulating the kernel_lock. We put them here 1377 * so that they show up in profiles. 1378 */ 1379 1380 /* 1381 * splbiglock: block IPLs which need to grab kernel_lock. 1382 * XXX splvm or splaudio should be enough. 1383 */ 1384 #if !defined(__HAVE_SPLBIGLOCK) 1385 #define splbiglock() splclock() 1386 #endif 1387 1388 void 1389 _kernel_lock_init(void) 1390 { 1391 1392 simple_lock_init(&kernel_lock); 1393 } 1394 1395 /* 1396 * Acquire/release the kernel lock. Intended for use in the scheduler 1397 * and the lower half of the kernel. 1398 */ 1399 void 1400 _kernel_lock(int flag) 1401 { 1402 struct cpu_info *ci = curcpu(); 1403 1404 SCHED_ASSERT_UNLOCKED(); 1405 1406 if (ci->ci_data.cpu_biglock_count > 0) { 1407 LOCK_ASSERT(simple_lock_held(&kernel_lock)); 1408 ci->ci_data.cpu_biglock_count++; 1409 } else { 1410 int s; 1411 1412 s = splbiglock(); 1413 while (!simple_lock_try(&kernel_lock)) { 1414 splx(s); 1415 SPINLOCK_SPIN_HOOK; 1416 s = splbiglock(); 1417 } 1418 ci->ci_data.cpu_biglock_count++; 1419 splx(s); 1420 } 1421 } 1422 1423 void 1424 _kernel_unlock(void) 1425 { 1426 struct cpu_info *ci = curcpu(); 1427 int s; 1428 1429 KASSERT(ci->ci_data.cpu_biglock_count > 0); 1430 1431 s = splbiglock(); 1432 if ((--ci->ci_data.cpu_biglock_count) == 0) 1433 simple_unlock(&kernel_lock); 1434 splx(s); 1435 } 1436 1437 /* 1438 * Acquire/release the kernel_lock on behalf of a process. Intended for 1439 * use in the top half of the kernel. 1440 */ 1441 void 1442 _kernel_proc_lock(struct lwp *l) 1443 { 1444 1445 SCHED_ASSERT_UNLOCKED(); 1446 _kernel_lock(0); 1447 } 1448 1449 void 1450 _kernel_proc_unlock(struct lwp *l) 1451 { 1452 1453 _kernel_unlock(); 1454 } 1455 1456 int 1457 _kernel_lock_release_all() 1458 { 1459 struct cpu_info *ci = curcpu(); 1460 int hold_count; 1461 1462 hold_count = ci->ci_data.cpu_biglock_count; 1463 1464 if (hold_count) { 1465 int s; 1466 1467 s = splbiglock(); 1468 ci->ci_data.cpu_biglock_count = 0; 1469 simple_unlock(&kernel_lock); 1470 splx(s); 1471 } 1472 1473 return hold_count; 1474 } 1475 1476 void 1477 _kernel_lock_acquire_count(int hold_count) 1478 { 1479 1480 KASSERT(curcpu()->ci_data.cpu_biglock_count == 0); 1481 1482 if (hold_count != 0) { 1483 struct cpu_info *ci = curcpu(); 1484 int s; 1485 1486 s = splbiglock(); 1487 while (!simple_lock_try(&kernel_lock)) { 1488 splx(s); 1489 SPINLOCK_SPIN_HOOK; 1490 s = splbiglock(); 1491 } 1492 ci->ci_data.cpu_biglock_count = hold_count; 1493 splx(s); 1494 } 1495 } 1496 #if defined(DEBUG) 1497 void 1498 _kernel_lock_assert_locked() 1499 { 1500 1501 LOCK_ASSERT(simple_lock_held(&kernel_lock)); 1502 } 1503 #endif 1504 #endif /* MULTIPROCESSOR */ 1505