168762Smckusick /* 268762Smckusick * Copyright (c) 1995 368762Smckusick * The Regents of the University of California. All rights reserved. 468762Smckusick * 568775Smckusick * This code contains ideas from software contributed to Berkeley by 668762Smckusick * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 768762Smckusick * System project at Carnegie-Mellon University. 868762Smckusick * 968762Smckusick * %sccs.include.redist.c% 1068762Smckusick * 11*69570Smckusick * @(#)kern_lock.c 8.15 (Berkeley) 05/19/95 1268762Smckusick */ 1368762Smckusick 1468762Smckusick #include <sys/param.h> 1568762Smckusick #include <sys/proc.h> 1668762Smckusick #include <sys/lock.h> 1769536Smckusick #include <machine/cpu.h> 1868762Smckusick 1968762Smckusick /* 2068762Smckusick * Locking primitives implementation. 2168762Smckusick * Locks provide shared/exclusive sychronization. 2268762Smckusick */ 2368762Smckusick 2469527Smckusick #ifdef DEBUG 2569527Smckusick #define COUNT(p, x) if (p) (p)->p_locks += (x) 2669527Smckusick #else 2769527Smckusick #define COUNT(p, x) 2869527Smckusick #endif 2969527Smckusick 3068762Smckusick #if NCPUS > 1 3168762Smckusick 3268762Smckusick /* 3368762Smckusick * For multiprocessor system, try spin lock first. 3468762Smckusick * 3568762Smckusick * This should be inline expanded below, but we cannot have #if 3668762Smckusick * inside a multiline define. 3768762Smckusick */ 3868762Smckusick int lock_wait_time = 100; 3968762Smckusick #define PAUSE(lkp, wanted) \ 4068762Smckusick if (lock_wait_time > 0) { \ 4168762Smckusick int i; \ 4268762Smckusick \ 4368933Smckusick simple_unlock(&lkp->lk_interlock); \ 4468762Smckusick for (i = lock_wait_time; i > 0; i--) \ 4568762Smckusick if (!(wanted)) \ 4668762Smckusick break; \ 4768933Smckusick simple_lock(&lkp->lk_interlock); \ 4868762Smckusick } \ 4968762Smckusick if (!(wanted)) \ 5068762Smckusick break; 5168762Smckusick 5268762Smckusick #else /* NCPUS == 1 */ 5368762Smckusick 5468762Smckusick /* 5568762Smckusick * It is an error to spin on a uniprocessor as nothing will ever cause 5668933Smckusick * the simple lock to clear while we are executing. 5768762Smckusick */ 5868762Smckusick #define PAUSE(lkp, wanted) 5968762Smckusick 6068762Smckusick #endif /* NCPUS == 1 */ 6168762Smckusick 6268762Smckusick /* 6368762Smckusick * Acquire a resource. 6468762Smckusick */ 6568762Smckusick #define ACQUIRE(lkp, error, extflags, wanted) \ 6668762Smckusick PAUSE(lkp, wanted); \ 6768762Smckusick for (error = 0; wanted; ) { \ 6868800Smckusick (lkp)->lk_waitcount++; \ 6968933Smckusick simple_unlock(&(lkp)->lk_interlock); \ 7068779Smckusick error = tsleep((void *)lkp, (lkp)->lk_prio, \ 7168779Smckusick (lkp)->lk_wmesg, (lkp)->lk_timo); \ 7268933Smckusick simple_lock(&(lkp)->lk_interlock); \ 7368800Smckusick (lkp)->lk_waitcount--; \ 7468762Smckusick if (error) \ 7568762Smckusick break; \ 7668762Smckusick if ((extflags) & LK_SLEEPFAIL) { \ 7768762Smckusick error = ENOLCK; \ 7868762Smckusick break; \ 7968762Smckusick } \ 8068762Smckusick } 8168762Smckusick 8268762Smckusick /* 8368762Smckusick * Initialize a lock; required before use. 8468762Smckusick */ 8568782Smckusick void 8669387Smckusick lockinit(lkp, prio, wmesg, timo, flags) 8768762Smckusick struct lock *lkp; 8868762Smckusick int prio; 8968762Smckusick char *wmesg; 9068762Smckusick int timo; 9168762Smckusick int flags; 9268762Smckusick { 9369387Smckusick 9468762Smckusick bzero(lkp, sizeof(struct lock)); 9568933Smckusick simple_lock_init(&lkp->lk_interlock); 9668762Smckusick lkp->lk_flags = flags & LK_EXTFLG_MASK; 9768762Smckusick lkp->lk_prio = prio; 9868762Smckusick lkp->lk_timo = timo; 9968762Smckusick lkp->lk_wmesg = wmesg; 10068762Smckusick lkp->lk_lockholder = LK_NOPROC; 10168762Smckusick } 10268762Smckusick 10368762Smckusick /* 10468780Smckusick * Determine the status of a lock. 10568780Smckusick */ 10668780Smckusick int 10768780Smckusick lockstatus(lkp) 10868780Smckusick struct lock *lkp; 10968780Smckusick { 11068780Smckusick int lock_type = 0; 11168780Smckusick 11268933Smckusick simple_lock(&lkp->lk_interlock); 11368780Smckusick if (lkp->lk_exclusivecount != 0) 11468780Smckusick lock_type = LK_EXCLUSIVE; 11568780Smckusick else if (lkp->lk_sharecount != 0) 11668780Smckusick lock_type = LK_SHARED; 11768933Smckusick simple_unlock(&lkp->lk_interlock); 11868780Smckusick return (lock_type); 11968780Smckusick } 12068780Smckusick 12168780Smckusick /* 12268762Smckusick * Set, change, or release a lock. 12368762Smckusick * 12468762Smckusick * Shared requests increment the shared count. Exclusive requests set the 12568762Smckusick * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 12668762Smckusick * accepted shared locks and shared-to-exclusive upgrades to go away. 12768762Smckusick */ 12868782Smckusick int 12969406Smckusick lockmgr(lkp, flags, interlkp, p) 13068945Smckusick __volatile struct lock *lkp; 13168800Smckusick u_int flags; 13269406Smckusick struct simplelock *interlkp; 13369406Smckusick struct proc *p; 13468762Smckusick { 13568779Smckusick int error; 13669406Smckusick pid_t pid; 13769406Smckusick int extflags; 13868762Smckusick 13968800Smckusick error = 0; 14069406Smckusick if (p) 14169406Smckusick pid = p->p_pid; 14269406Smckusick else 14369406Smckusick pid = LK_KERNPROC; 14468933Smckusick simple_lock(&lkp->lk_interlock); 14568945Smckusick if (flags & LK_INTERLOCK) 14668945Smckusick simple_unlock(interlkp); 14768762Smckusick extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 14869406Smckusick #ifdef DIAGNOSTIC 14969406Smckusick /* 15069406Smckusick * Once a lock has drained, the LK_DRAINING flag is set and an 15169406Smckusick * exclusive lock is returned. The only valid operation thereafter 15269406Smckusick * is a single release of that exclusive lock. This final release 15369406Smckusick * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any 15469406Smckusick * further requests of any sort will result in a panic. The bits 15569406Smckusick * selected for these two flags are chosen so that they will be set 15669406Smckusick * in memory that is freed (freed memory is filled with 0xdeadbeef). 15769406Smckusick */ 15869406Smckusick if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) { 15969406Smckusick if (lkp->lk_flags & LK_DRAINED) 16069406Smckusick panic("lockmgr: using decommissioned lock"); 16169406Smckusick if ((flags & LK_TYPE_MASK) != LK_RELEASE || 16269406Smckusick lkp->lk_lockholder != pid) 16369406Smckusick panic("lockmgr: non-release on draining lock: %d\n", 16469406Smckusick flags & LK_TYPE_MASK); 16569406Smckusick lkp->lk_flags &= ~LK_DRAINING; 16669406Smckusick lkp->lk_flags |= LK_DRAINED; 16769406Smckusick } 16869406Smckusick #endif DIAGNOSTIC 16968762Smckusick 17068762Smckusick switch (flags & LK_TYPE_MASK) { 17168762Smckusick 17268762Smckusick case LK_SHARED: 17368762Smckusick if (lkp->lk_lockholder != pid) { 17468762Smckusick /* 17568762Smckusick * If just polling, check to see if we will block. 17668762Smckusick */ 17768762Smckusick if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 17868762Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) { 17968800Smckusick error = EBUSY; 18068800Smckusick break; 18168762Smckusick } 18268762Smckusick /* 18368762Smckusick * Wait for exclusive locks and upgrades to clear. 18468762Smckusick */ 18568762Smckusick ACQUIRE(lkp, error, extflags, lkp->lk_flags & 18668762Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)); 18768800Smckusick if (error) 18868800Smckusick break; 18968762Smckusick lkp->lk_sharecount++; 19069527Smckusick COUNT(p, 1); 19168800Smckusick break; 19268762Smckusick } 19368762Smckusick /* 19468762Smckusick * We hold an exclusive lock, so downgrade it to shared. 19568762Smckusick * An alternative would be to fail with EDEADLK. 19668762Smckusick */ 19768762Smckusick lkp->lk_sharecount++; 19869527Smckusick COUNT(p, 1); 19968762Smckusick /* fall into downgrade */ 20068762Smckusick 20168762Smckusick case LK_DOWNGRADE: 20268762Smckusick if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0) 20368762Smckusick panic("lockmgr: not holding exclusive lock"); 20468762Smckusick lkp->lk_sharecount += lkp->lk_exclusivecount; 20568762Smckusick lkp->lk_exclusivecount = 0; 20668762Smckusick lkp->lk_flags &= ~LK_HAVE_EXCL; 20768762Smckusick lkp->lk_lockholder = LK_NOPROC; 20868800Smckusick if (lkp->lk_waitcount) 20968779Smckusick wakeup((void *)lkp); 21068800Smckusick break; 21168762Smckusick 21268779Smckusick case LK_EXCLUPGRADE: 21368779Smckusick /* 21468779Smckusick * If another process is ahead of us to get an upgrade, 21568779Smckusick * then we want to fail rather than have an intervening 21668779Smckusick * exclusive access. 21768779Smckusick */ 21868779Smckusick if (lkp->lk_flags & LK_WANT_UPGRADE) { 21968779Smckusick lkp->lk_sharecount--; 22069527Smckusick COUNT(p, -1); 22168800Smckusick error = EBUSY; 22268800Smckusick break; 22368779Smckusick } 22468779Smckusick /* fall into normal upgrade */ 22568779Smckusick 22668762Smckusick case LK_UPGRADE: 22768762Smckusick /* 22868762Smckusick * Upgrade a shared lock to an exclusive one. If another 22968762Smckusick * shared lock has already requested an upgrade to an 23068762Smckusick * exclusive lock, our shared lock is released and an 23168762Smckusick * exclusive lock is requested (which will be granted 23268775Smckusick * after the upgrade). If we return an error, the file 23368775Smckusick * will always be unlocked. 23468762Smckusick */ 23568762Smckusick if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0) 23668762Smckusick panic("lockmgr: upgrade exclusive lock"); 23768775Smckusick lkp->lk_sharecount--; 23869527Smckusick COUNT(p, -1); 23968762Smckusick /* 24068762Smckusick * If we are just polling, check to see if we will block. 24168762Smckusick */ 24268762Smckusick if ((extflags & LK_NOWAIT) && 24368762Smckusick ((lkp->lk_flags & LK_WANT_UPGRADE) || 24468762Smckusick lkp->lk_sharecount > 1)) { 24568800Smckusick error = EBUSY; 24668800Smckusick break; 24768762Smckusick } 24868762Smckusick if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 24968762Smckusick /* 25068762Smckusick * We are first shared lock to request an upgrade, so 25168762Smckusick * request upgrade and wait for the shared count to 25268762Smckusick * drop to zero, then take exclusive lock. 25368762Smckusick */ 25468762Smckusick lkp->lk_flags |= LK_WANT_UPGRADE; 25568762Smckusick ACQUIRE(lkp, error, extflags, lkp->lk_sharecount); 25668762Smckusick lkp->lk_flags &= ~LK_WANT_UPGRADE; 25768800Smckusick if (error) 25868800Smckusick break; 25968762Smckusick lkp->lk_flags |= LK_HAVE_EXCL; 26068762Smckusick lkp->lk_lockholder = pid; 26168762Smckusick if (lkp->lk_exclusivecount != 0) 26268762Smckusick panic("lockmgr: non-zero exclusive count"); 26368762Smckusick lkp->lk_exclusivecount = 1; 26469527Smckusick COUNT(p, 1); 26568800Smckusick break; 26668762Smckusick } 26768762Smckusick /* 26868762Smckusick * Someone else has requested upgrade. Release our shared 26968762Smckusick * lock, awaken upgrade requestor if we are the last shared 27068762Smckusick * lock, then request an exclusive lock. 27168762Smckusick */ 27268800Smckusick if (lkp->lk_sharecount == 0 && lkp->lk_waitcount) 27368779Smckusick wakeup((void *)lkp); 27468762Smckusick /* fall into exclusive request */ 27568762Smckusick 27668762Smckusick case LK_EXCLUSIVE: 27769461Smckusick if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) { 27868762Smckusick /* 27968762Smckusick * Recursive lock. 28068762Smckusick */ 28168762Smckusick if ((extflags & LK_CANRECURSE) == 0) 28268762Smckusick panic("lockmgr: locking against myself"); 28368762Smckusick lkp->lk_exclusivecount++; 28469527Smckusick COUNT(p, 1); 28568800Smckusick break; 28668762Smckusick } 28768762Smckusick /* 28868762Smckusick * If we are just polling, check to see if we will sleep. 28968762Smckusick */ 29068762Smckusick if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 29168762Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 29268762Smckusick lkp->lk_sharecount != 0)) { 29368800Smckusick error = EBUSY; 29468800Smckusick break; 29568762Smckusick } 29668762Smckusick /* 29768762Smckusick * Try to acquire the want_exclusive flag. 29868762Smckusick */ 29968762Smckusick ACQUIRE(lkp, error, extflags, lkp->lk_flags & 30068762Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL)); 30168800Smckusick if (error) 30268800Smckusick break; 30368762Smckusick lkp->lk_flags |= LK_WANT_EXCL; 30468762Smckusick /* 30568762Smckusick * Wait for shared locks and upgrades to finish. 30668762Smckusick */ 30768762Smckusick ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 || 30868762Smckusick (lkp->lk_flags & LK_WANT_UPGRADE)); 30968762Smckusick lkp->lk_flags &= ~LK_WANT_EXCL; 31068800Smckusick if (error) 31168800Smckusick break; 31268762Smckusick lkp->lk_flags |= LK_HAVE_EXCL; 31368762Smckusick lkp->lk_lockholder = pid; 31468762Smckusick if (lkp->lk_exclusivecount != 0) 31568762Smckusick panic("lockmgr: non-zero exclusive count"); 31668762Smckusick lkp->lk_exclusivecount = 1; 31769527Smckusick COUNT(p, 1); 31868800Smckusick break; 31968762Smckusick 32068762Smckusick case LK_RELEASE: 32168762Smckusick if (lkp->lk_exclusivecount != 0) { 32268800Smckusick if (pid != lkp->lk_lockholder) 32368800Smckusick panic("lockmgr: pid %d, not %s %d unlocking", 32468800Smckusick pid, "exclusive lock holder", 32568800Smckusick lkp->lk_lockholder); 32668762Smckusick lkp->lk_exclusivecount--; 32769527Smckusick COUNT(p, -1); 32868762Smckusick if (lkp->lk_exclusivecount == 0) { 32968762Smckusick lkp->lk_flags &= ~LK_HAVE_EXCL; 33068762Smckusick lkp->lk_lockholder = LK_NOPROC; 33168762Smckusick } 33269527Smckusick } else if (lkp->lk_sharecount != 0) { 33368762Smckusick lkp->lk_sharecount--; 33469527Smckusick COUNT(p, -1); 33569527Smckusick } 33668800Smckusick if (lkp->lk_waitcount) 33768779Smckusick wakeup((void *)lkp); 33868800Smckusick break; 33968800Smckusick 34068800Smckusick case LK_DRAIN: 34168800Smckusick /* 34269406Smckusick * Check that we do not already hold the lock, as it can 34369406Smckusick * never drain if we do. Unfortunately, we have no way to 34469406Smckusick * check for holding a shared lock, but at least we can 34569406Smckusick * check for an exclusive one. 34669406Smckusick */ 34769406Smckusick if (lkp->lk_lockholder == pid) 34869406Smckusick panic("lockmgr: draining against myself"); 34969406Smckusick /* 35068800Smckusick * If we are just polling, check to see if we will sleep. 35168800Smckusick */ 35268800Smckusick if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 35368800Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 35468800Smckusick lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) { 35568800Smckusick error = EBUSY; 35668800Smckusick break; 35768762Smckusick } 35868800Smckusick PAUSE(lkp, ((lkp->lk_flags & 35968800Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 36068800Smckusick lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)); 36168800Smckusick for (error = 0; ((lkp->lk_flags & 36268800Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 36368800Smckusick lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) { 36468800Smckusick lkp->lk_flags |= LK_WAITDRAIN; 36568933Smckusick simple_unlock(&lkp->lk_interlock); 36668800Smckusick if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio, 36768800Smckusick lkp->lk_wmesg, lkp->lk_timo)) 36868800Smckusick return (error); 36968800Smckusick if ((extflags) & LK_SLEEPFAIL) 37068800Smckusick return (ENOLCK); 37168933Smckusick simple_lock(&lkp->lk_interlock); 37268800Smckusick } 37369406Smckusick lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 37468945Smckusick lkp->lk_lockholder = pid; 37568945Smckusick lkp->lk_exclusivecount = 1; 37669527Smckusick COUNT(p, 1); 37768800Smckusick break; 37868762Smckusick 37968762Smckusick default: 38068933Smckusick simple_unlock(&lkp->lk_interlock); 38168762Smckusick panic("lockmgr: unknown locktype request %d", 38268762Smckusick flags & LK_TYPE_MASK); 38368775Smckusick /* NOTREACHED */ 38468762Smckusick } 38568800Smckusick if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags & 38668800Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 && 38768800Smckusick lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) { 38868800Smckusick lkp->lk_flags &= ~LK_WAITDRAIN; 38968800Smckusick wakeup((void *)&lkp->lk_flags); 39068800Smckusick } 39168933Smckusick simple_unlock(&lkp->lk_interlock); 39268800Smckusick return (error); 39368762Smckusick } 39468945Smckusick 39569387Smckusick /* 39669387Smckusick * Print out information about state of a lock. Used by VOP_PRINT 39769387Smckusick * routines to display ststus about contained locks. 39869387Smckusick */ 39968945Smckusick lockmgr_printinfo(lkp) 40068945Smckusick struct lock *lkp; 40168945Smckusick { 40268945Smckusick 40368945Smckusick if (lkp->lk_sharecount) 40468945Smckusick printf(" lock type %s: SHARED", lkp->lk_wmesg); 40568945Smckusick else if (lkp->lk_flags & LK_HAVE_EXCL) 40668945Smckusick printf(" lock type %s: EXCL by pid %d", lkp->lk_wmesg, 40768945Smckusick lkp->lk_lockholder); 40868945Smckusick if (lkp->lk_waitcount > 0) 40968945Smckusick printf(" with %d pending", lkp->lk_waitcount); 41068945Smckusick } 41169387Smckusick 41269387Smckusick #if defined(DEBUG) && NCPUS == 1 41369461Smckusick #include <sys/kernel.h> 41469461Smckusick #include <vm/vm.h> 41569461Smckusick #include <sys/sysctl.h> 416*69570Smckusick int lockpausetime = 0; 41769461Smckusick struct ctldebug debug2 = { "lockpausetime", &lockpausetime }; 418*69570Smckusick int simplelockrecurse; 41969387Smckusick /* 42069387Smckusick * Simple lock functions so that the debugger can see from whence 42169387Smckusick * they are being called. 42269387Smckusick */ 42369387Smckusick void 42469387Smckusick simple_lock_init(alp) 42569406Smckusick struct simplelock *alp; 42669387Smckusick { 42769387Smckusick 42869387Smckusick alp->lock_data = 0; 42969387Smckusick } 43069387Smckusick 43169387Smckusick void 43269527Smckusick _simple_lock(alp, id, l) 43369406Smckusick __volatile struct simplelock *alp; 43469527Smckusick const char *id; 43569527Smckusick int l; 43669387Smckusick { 43769387Smckusick 438*69570Smckusick if (simplelockrecurse) 439*69570Smckusick return; 44069461Smckusick if (alp->lock_data == 1) { 44169461Smckusick if (lockpausetime == -1) 44269527Smckusick panic("%s:%d: simple_lock: lock held", id, l); 443*69570Smckusick printf("%s:%d: simple_lock: lock held\n", id, l); 444*69570Smckusick if (lockpausetime == 1) { 44569536Smckusick BACKTRACE(curproc); 446*69570Smckusick } else if (lockpausetime > 1) { 44769527Smckusick printf("%s:%d: simple_lock: lock held...", id, l); 44869461Smckusick tsleep(&lockpausetime, PCATCH | PPAUSE, "slock", 44969461Smckusick lockpausetime * hz); 45069461Smckusick printf(" continuing\n"); 45169461Smckusick } 45269461Smckusick } 45369387Smckusick alp->lock_data = 1; 45469555Spendry if (curproc) 45569555Spendry curproc->p_simple_locks++; 45669387Smckusick } 45769387Smckusick 45869387Smckusick int 45969527Smckusick _simple_lock_try(alp, id, l) 46069406Smckusick __volatile struct simplelock *alp; 46169527Smckusick const char *id; 46269527Smckusick int l; 46369387Smckusick { 46469387Smckusick 465*69570Smckusick if (alp->lock_data) 466*69570Smckusick return (0); 467*69570Smckusick if (simplelockrecurse) 468*69570Smckusick return (1); 46969461Smckusick if (alp->lock_data == 1) { 47069461Smckusick if (lockpausetime == -1) 47169527Smckusick panic("%s:%d: simple_lock_try: lock held", id, l); 472*69570Smckusick printf("%s:%d: simple_lock_try: lock held\n", id, l); 473*69570Smckusick if (lockpausetime == 1) { 47469536Smckusick BACKTRACE(curproc); 475*69570Smckusick } else if (lockpausetime > 1) { 47669527Smckusick printf("%s:%d: simple_lock_try: lock held...", id, l); 47769461Smckusick tsleep(&lockpausetime, PCATCH | PPAUSE, "slock", 47869461Smckusick lockpausetime * hz); 47969461Smckusick printf(" continuing\n"); 48069461Smckusick } 48169461Smckusick } 48269387Smckusick alp->lock_data = 1; 48369555Spendry if (curproc) 48469555Spendry curproc->p_simple_locks++; 48569387Smckusick return (1); 48669387Smckusick } 48769387Smckusick 48869387Smckusick void 48969527Smckusick _simple_unlock(alp, id, l) 49069406Smckusick __volatile struct simplelock *alp; 49169527Smckusick const char *id; 49269527Smckusick int l; 49369387Smckusick { 49469387Smckusick 495*69570Smckusick if (simplelockrecurse) 496*69570Smckusick return; 49769461Smckusick if (alp->lock_data == 0) { 49869461Smckusick if (lockpausetime == -1) 49969527Smckusick panic("%s:%d: simple_unlock: lock not held", id, l); 500*69570Smckusick printf("%s:%d: simple_unlock: lock not held\n", id, l); 501*69570Smckusick if (lockpausetime == 1) { 50269536Smckusick BACKTRACE(curproc); 503*69570Smckusick } else if (lockpausetime > 1) { 50469527Smckusick printf("%s:%d: simple_unlock: lock not held...", id, l); 50569461Smckusick tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock", 50669461Smckusick lockpausetime * hz); 50769461Smckusick printf(" continuing\n"); 50869461Smckusick } 50969461Smckusick } 51069387Smckusick alp->lock_data = 0; 51169555Spendry if (curproc) 51269555Spendry curproc->p_simple_locks--; 51369387Smckusick } 51469387Smckusick #endif /* DEBUG && NCPUS == 1 */ 515