1 /* $OpenBSD: kern_lock.c,v 1.11 2001/12/04 21:56:18 millert Exp $ */ 2 3 /* 4 * Copyright (c) 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code contains ideas from software contributed to Berkeley by 8 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 9 * System project at Carnegie-Mellon University. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 40 */ 41 42 #include <sys/param.h> 43 #include <sys/proc.h> 44 #include <sys/lock.h> 45 #include <sys/systm.h> 46 47 #include <machine/cpu.h> 48 49 void record_stacktrace __P((int *, int)); 50 void playback_stacktrace __P((int *, int)); 51 52 /* 53 * Locking primitives implementation. 54 * Locks provide shared/exclusive sychronization. 55 */ 56 57 #if 0 58 #ifdef DEBUG 59 #define COUNT(p, x) if (p) (p)->p_locks += (x) 60 #else 61 #define COUNT(p, x) 62 #endif 63 #endif 64 65 #define COUNT(p, x) 66 67 #if NCPUS > 1 68 69 /* 70 * For multiprocessor system, try spin lock first. 71 * 72 * This should be inline expanded below, but we cannot have #if 73 * inside a multiline define. 74 */ 75 int lock_wait_time = 100; 76 #define PAUSE(lkp, wanted) \ 77 if (lock_wait_time > 0) { \ 78 int i; \ 79 \ 80 simple_unlock(&lkp->lk_interlock); \ 81 for (i = lock_wait_time; i > 0; i--) \ 82 if (!(wanted)) \ 83 break; \ 84 simple_lock(&lkp->lk_interlock); \ 85 } \ 86 if (!(wanted)) \ 87 break; 88 89 #else /* NCPUS == 1 */ 90 91 /* 92 * It is an error to spin on a uniprocessor as nothing will ever cause 93 * the simple lock to clear while we are executing. 94 */ 95 #define PAUSE(lkp, wanted) 96 97 #endif /* NCPUS == 1 */ 98 99 /* 100 * Acquire a resource. 101 */ 102 #define ACQUIRE(lkp, error, extflags, wanted) \ 103 PAUSE(lkp, wanted); \ 104 for (error = 0; wanted; ) { \ 105 (lkp)->lk_waitcount++; \ 106 simple_unlock(&(lkp)->lk_interlock); \ 107 error = tsleep((void *)lkp, (lkp)->lk_prio, \ 108 (lkp)->lk_wmesg, (lkp)->lk_timo); \ 109 simple_lock(&(lkp)->lk_interlock); \ 110 (lkp)->lk_waitcount--; \ 111 if (error) \ 112 break; \ 113 if ((extflags) & LK_SLEEPFAIL) { \ 114 error = ENOLCK; \ 115 break; \ 116 } \ 117 } 118 119 /* 120 * Initialize a lock; required before use. 121 */ 122 void 123 lockinit(lkp, prio, wmesg, timo, flags) 124 struct lock *lkp; 125 int prio; 126 char *wmesg; 127 int timo; 128 int flags; 129 { 130 131 bzero(lkp, sizeof(struct lock)); 132 simple_lock_init(&lkp->lk_interlock); 133 lkp->lk_flags = flags & LK_EXTFLG_MASK; 134 lkp->lk_prio = prio; 135 lkp->lk_timo = timo; 136 lkp->lk_wmesg = wmesg; 137 lkp->lk_lockholder = LK_NOPROC; 138 } 139 140 /* 141 * Determine the status of a lock. 142 */ 143 int 144 lockstatus(lkp) 145 struct lock *lkp; 146 { 147 int lock_type = 0; 148 149 simple_lock(&lkp->lk_interlock); 150 if (lkp->lk_exclusivecount != 0) 151 lock_type = LK_EXCLUSIVE; 152 else if (lkp->lk_sharecount != 0) 153 lock_type = LK_SHARED; 154 simple_unlock(&lkp->lk_interlock); 155 return (lock_type); 156 } 157 158 /* 159 * Set, change, or release a lock. 160 * 161 * Shared requests increment the shared count. Exclusive requests set the 162 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 163 * accepted shared locks and shared-to-exclusive upgrades to go away. 164 */ 165 int 166 lockmgr(lkp, flags, interlkp, p) 167 __volatile struct lock *lkp; 168 u_int flags; 169 struct simplelock *interlkp; 170 struct proc *p; 171 { 172 int error; 173 pid_t pid; 174 int extflags; 175 176 error = 0; 177 if (p) 178 pid = p->p_pid; 179 else 180 pid = LK_KERNPROC; 181 simple_lock(&lkp->lk_interlock); 182 if (flags & LK_INTERLOCK) 183 simple_unlock(interlkp); 184 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 185 #ifdef DIAGNOSTIC 186 /* 187 * Once a lock has drained, the LK_DRAINING flag is set and an 188 * exclusive lock is returned. The only valid operation thereafter 189 * is a single release of that exclusive lock. This final release 190 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any 191 * further requests of any sort will result in a panic. The bits 192 * selected for these two flags are chosen so that they will be set 193 * in memory that is freed (freed memory is filled with 0xdeadbeef). 194 * The final release is permitted to give a new lease on life to 195 * the lock by specifying LK_REENABLE. 196 */ 197 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) { 198 if (lkp->lk_flags & LK_DRAINED) 199 panic("lockmgr: using decommissioned lock"); 200 if ((flags & LK_TYPE_MASK) != LK_RELEASE || 201 lkp->lk_lockholder != pid) 202 panic("lockmgr: non-release on draining lock: %d", 203 flags & LK_TYPE_MASK); 204 lkp->lk_flags &= ~LK_DRAINING; 205 if ((flags & LK_REENABLE) == 0) 206 lkp->lk_flags |= LK_DRAINED; 207 } 208 #endif /* DIAGNOSTIC */ 209 210 switch (flags & LK_TYPE_MASK) { 211 212 case LK_SHARED: 213 if (lkp->lk_lockholder != pid) { 214 /* 215 * If just polling, check to see if we will block. 216 */ 217 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 218 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) { 219 error = EBUSY; 220 break; 221 } 222 /* 223 * Wait for exclusive locks and upgrades to clear. 224 */ 225 ACQUIRE(lkp, error, extflags, lkp->lk_flags & 226 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)); 227 if (error) 228 break; 229 lkp->lk_sharecount++; 230 COUNT(p, 1); 231 break; 232 } 233 /* 234 * We hold an exclusive lock, so downgrade it to shared. 235 * An alternative would be to fail with EDEADLK. 236 */ 237 lkp->lk_sharecount++; 238 COUNT(p, 1); 239 /* fall into downgrade */ 240 241 case LK_DOWNGRADE: 242 if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0) 243 panic("lockmgr: not holding exclusive lock"); 244 lkp->lk_sharecount += lkp->lk_exclusivecount; 245 lkp->lk_exclusivecount = 0; 246 lkp->lk_flags &= ~LK_HAVE_EXCL; 247 lkp->lk_lockholder = LK_NOPROC; 248 if (lkp->lk_waitcount) 249 wakeup((void *)lkp); 250 break; 251 252 case LK_EXCLUPGRADE: 253 /* 254 * If another process is ahead of us to get an upgrade, 255 * then we want to fail rather than have an intervening 256 * exclusive access. 257 */ 258 if (lkp->lk_flags & LK_WANT_UPGRADE) { 259 lkp->lk_sharecount--; 260 COUNT(p, -1); 261 error = EBUSY; 262 break; 263 } 264 /* fall into normal upgrade */ 265 266 case LK_UPGRADE: 267 /* 268 * Upgrade a shared lock to an exclusive one. If another 269 * shared lock has already requested an upgrade to an 270 * exclusive lock, our shared lock is released and an 271 * exclusive lock is requested (which will be granted 272 * after the upgrade). If we return an error, the file 273 * will always be unlocked. 274 */ 275 if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0) 276 panic("lockmgr: upgrade exclusive lock"); 277 lkp->lk_sharecount--; 278 COUNT(p, -1); 279 /* 280 * If we are just polling, check to see if we will block. 281 */ 282 if ((extflags & LK_NOWAIT) && 283 ((lkp->lk_flags & LK_WANT_UPGRADE) || 284 lkp->lk_sharecount > 1)) { 285 error = EBUSY; 286 break; 287 } 288 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 289 /* 290 * We are first shared lock to request an upgrade, so 291 * request upgrade and wait for the shared count to 292 * drop to zero, then take exclusive lock. 293 */ 294 lkp->lk_flags |= LK_WANT_UPGRADE; 295 ACQUIRE(lkp, error, extflags, lkp->lk_sharecount); 296 lkp->lk_flags &= ~LK_WANT_UPGRADE; 297 if (error) 298 break; 299 lkp->lk_flags |= LK_HAVE_EXCL; 300 lkp->lk_lockholder = pid; 301 if (lkp->lk_exclusivecount != 0) 302 panic("lockmgr: non-zero exclusive count"); 303 lkp->lk_exclusivecount = 1; 304 COUNT(p, 1); 305 break; 306 } 307 /* 308 * Someone else has requested upgrade. Release our shared 309 * lock, awaken upgrade requestor if we are the last shared 310 * lock, then request an exclusive lock. 311 */ 312 if (lkp->lk_sharecount == 0 && lkp->lk_waitcount) 313 wakeup((void *)lkp); 314 /* fall into exclusive request */ 315 316 case LK_EXCLUSIVE: 317 if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) { 318 /* 319 * Recursive lock. 320 */ 321 if ((extflags & LK_CANRECURSE) == 0) { 322 if (extflags & LK_RECURSEFAIL) { 323 error = EDEADLK; 324 break; 325 } 326 panic("lockmgr: locking against myself"); 327 } 328 lkp->lk_exclusivecount++; 329 COUNT(p, 1); 330 break; 331 } 332 /* 333 * If we are just polling, check to see if we will sleep. 334 */ 335 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 336 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 337 lkp->lk_sharecount != 0)) { 338 error = EBUSY; 339 break; 340 } 341 /* 342 * Try to acquire the want_exclusive flag. 343 */ 344 ACQUIRE(lkp, error, extflags, lkp->lk_flags & 345 (LK_HAVE_EXCL | LK_WANT_EXCL)); 346 if (error) 347 break; 348 lkp->lk_flags |= LK_WANT_EXCL; 349 /* 350 * Wait for shared locks and upgrades to finish. 351 */ 352 ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 || 353 (lkp->lk_flags & LK_WANT_UPGRADE)); 354 lkp->lk_flags &= ~LK_WANT_EXCL; 355 if (error) 356 break; 357 lkp->lk_flags |= LK_HAVE_EXCL; 358 lkp->lk_lockholder = pid; 359 if (lkp->lk_exclusivecount != 0) 360 panic("lockmgr: non-zero exclusive count"); 361 lkp->lk_exclusivecount = 1; 362 COUNT(p, 1); 363 break; 364 365 case LK_RELEASE: 366 if (lkp->lk_exclusivecount != 0) { 367 if (pid != lkp->lk_lockholder) 368 panic("lockmgr: pid %d, not %s %d unlocking", 369 pid, "exclusive lock holder", 370 lkp->lk_lockholder); 371 lkp->lk_exclusivecount--; 372 COUNT(p, -1); 373 if (lkp->lk_exclusivecount == 0) { 374 lkp->lk_flags &= ~LK_HAVE_EXCL; 375 lkp->lk_lockholder = LK_NOPROC; 376 } 377 } else if (lkp->lk_sharecount != 0) { 378 lkp->lk_sharecount--; 379 COUNT(p, -1); 380 } 381 if (lkp->lk_waitcount) 382 wakeup((void *)lkp); 383 break; 384 385 case LK_DRAIN: 386 /* 387 * Check that we do not already hold the lock, as it can 388 * never drain if we do. Unfortunately, we have no way to 389 * check for holding a shared lock, but at least we can 390 * check for an exclusive one. 391 */ 392 if (lkp->lk_lockholder == pid) 393 panic("lockmgr: draining against myself"); 394 /* 395 * If we are just polling, check to see if we will sleep. 396 */ 397 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 398 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 399 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) { 400 error = EBUSY; 401 break; 402 } 403 PAUSE(lkp, ((lkp->lk_flags & 404 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 405 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)); 406 for (error = 0; ((lkp->lk_flags & 407 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 408 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) { 409 lkp->lk_flags |= LK_WAITDRAIN; 410 simple_unlock(&lkp->lk_interlock); 411 if ((error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio, 412 lkp->lk_wmesg, lkp->lk_timo)) != 0) 413 return (error); 414 if ((extflags) & LK_SLEEPFAIL) 415 return (ENOLCK); 416 simple_lock(&lkp->lk_interlock); 417 } 418 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 419 lkp->lk_lockholder = pid; 420 lkp->lk_exclusivecount = 1; 421 COUNT(p, 1); 422 break; 423 424 default: 425 simple_unlock(&lkp->lk_interlock); 426 panic("lockmgr: unknown locktype request %d", 427 flags & LK_TYPE_MASK); 428 /* NOTREACHED */ 429 } 430 if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags & 431 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 && 432 lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) { 433 lkp->lk_flags &= ~LK_WAITDRAIN; 434 wakeup((void *)&lkp->lk_flags); 435 } 436 simple_unlock(&lkp->lk_interlock); 437 return (error); 438 } 439 440 /* 441 * Print out information about state of a lock. Used by VOP_PRINT 442 * routines to display ststus about contained locks. 443 */ 444 void 445 lockmgr_printinfo(lkp) 446 struct lock *lkp; 447 { 448 449 if (lkp->lk_sharecount) 450 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 451 lkp->lk_sharecount); 452 else if (lkp->lk_flags & LK_HAVE_EXCL) 453 printf(" lock type %s: EXCL (count %d) by pid %d", 454 lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder); 455 if (lkp->lk_waitcount > 0) 456 printf(" with %d pending", lkp->lk_waitcount); 457 } 458 459 #if defined(LOCKDEBUG) 460 461 int lockdebug_print = 0; 462 int lockdebug_debugger = 0; 463 464 /* 465 * Simple lock functions so that the debugger can see from whence 466 * they are being called. 467 */ 468 void 469 simple_lock_init(lkp) 470 struct simplelock *lkp; 471 { 472 473 lkp->lock_data = SLOCK_UNLOCKED; 474 } 475 476 void 477 _simple_lock(lkp, id, l) 478 __volatile struct simplelock *lkp; 479 const char *id; 480 int l; 481 { 482 483 if (lkp->lock_data == SLOCK_LOCKED) { 484 if (lockdebug_print) 485 printf("%s:%d simple_lock: lock held...\n", id, l); 486 if (lockdebug_debugger) 487 Debugger(); 488 } 489 lkp->lock_data = SLOCK_LOCKED; 490 } 491 492 493 int 494 _simple_lock_try(lkp, id, l) 495 __volatile struct simplelock *lkp; 496 const char *id; 497 int l; 498 { 499 500 if (lkp->lock_data == SLOCK_LOCKED) { 501 if (lockdebug_print) 502 printf("%s:%d simple_lock: lock held...\n", id, l); 503 if (lockdebug_debugger) 504 Debugger(); 505 } 506 return lkp->lock_data = SLOCK_LOCKED; 507 } 508 509 void 510 _simple_unlock(lkp, id, l) 511 __volatile struct simplelock *lkp; 512 const char *id; 513 int l; 514 { 515 516 if (lkp->lock_data == SLOCK_UNLOCKED) { 517 if (lockdebug_print) 518 printf("%s:%d simple_unlock: lock not held...\n", 519 id, l); 520 if (lockdebug_debugger) 521 Debugger(); 522 } 523 lkp->lock_data = SLOCK_UNLOCKED; 524 } 525 526 void 527 _simple_lock_assert(lkp, state, id, l) 528 __volatile struct simplelock *lkp; 529 int state; 530 const char *id; 531 int l; 532 { 533 if (lkp->lock_data != state) { 534 if (lockdebug_print) 535 printf("%s:%d simple_lock_assert: wrong state: %d", 536 id, l, lkp->lock_data); 537 if (lockdebug_debugger) 538 Debugger(); 539 } 540 } 541 #endif /* LOCKDEBUG */ 542