168762Smckusick /* 268762Smckusick * Copyright (c) 1995 368762Smckusick * The Regents of the University of California. All rights reserved. 468762Smckusick * 568775Smckusick * This code contains ideas from software contributed to Berkeley by 668762Smckusick * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 768762Smckusick * System project at Carnegie-Mellon University. 868762Smckusick * 968762Smckusick * %sccs.include.redist.c% 1068762Smckusick * 11*69527Smckusick * @(#)kern_lock.c 8.12 (Berkeley) 05/17/95 1268762Smckusick */ 1368762Smckusick 1468762Smckusick #include <sys/param.h> 1568762Smckusick #include <sys/proc.h> 1668762Smckusick #include <sys/lock.h> 1768762Smckusick 1868762Smckusick /* 1968762Smckusick * Locking primitives implementation. 2068762Smckusick * Locks provide shared/exclusive sychronization. 2168762Smckusick */ 2268762Smckusick 23*69527Smckusick #ifdef DEBUG 24*69527Smckusick #define COUNT(p, x) if (p) (p)->p_locks += (x) 25*69527Smckusick #else 26*69527Smckusick #define COUNT(p, x) 27*69527Smckusick #endif 28*69527Smckusick 2968762Smckusick #if NCPUS > 1 3068762Smckusick 3168762Smckusick /* 3268762Smckusick * For multiprocessor system, try spin lock first. 3368762Smckusick * 3468762Smckusick * This should be inline expanded below, but we cannot have #if 3568762Smckusick * inside a multiline define. 3668762Smckusick */ 3768762Smckusick int lock_wait_time = 100; 3868762Smckusick #define PAUSE(lkp, wanted) \ 3968762Smckusick if (lock_wait_time > 0) { \ 4068762Smckusick int i; \ 4168762Smckusick \ 4268933Smckusick simple_unlock(&lkp->lk_interlock); \ 4368762Smckusick for (i = lock_wait_time; i > 0; i--) \ 4468762Smckusick if (!(wanted)) \ 4568762Smckusick break; \ 4668933Smckusick simple_lock(&lkp->lk_interlock); \ 4768762Smckusick } \ 4868762Smckusick if (!(wanted)) \ 4968762Smckusick break; 5068762Smckusick 5168762Smckusick #else /* NCPUS == 1 */ 5268762Smckusick 5368762Smckusick /* 5468762Smckusick * It is an error to spin on a uniprocessor as nothing will ever cause 5568933Smckusick * the simple lock to clear while we are executing. 5668762Smckusick */ 5768762Smckusick #define PAUSE(lkp, wanted) 5868762Smckusick 5968762Smckusick #endif /* NCPUS == 1 */ 6068762Smckusick 6168762Smckusick /* 6268762Smckusick * Acquire a resource. 6368762Smckusick */ 6468762Smckusick #define ACQUIRE(lkp, error, extflags, wanted) \ 6568762Smckusick PAUSE(lkp, wanted); \ 6668762Smckusick for (error = 0; wanted; ) { \ 6768800Smckusick (lkp)->lk_waitcount++; \ 6868933Smckusick simple_unlock(&(lkp)->lk_interlock); \ 6968779Smckusick error = tsleep((void *)lkp, (lkp)->lk_prio, \ 7068779Smckusick (lkp)->lk_wmesg, (lkp)->lk_timo); \ 7168933Smckusick simple_lock(&(lkp)->lk_interlock); \ 7268800Smckusick (lkp)->lk_waitcount--; \ 7368762Smckusick if (error) \ 7468762Smckusick break; \ 7568762Smckusick if ((extflags) & LK_SLEEPFAIL) { \ 7668762Smckusick error = ENOLCK; \ 7768762Smckusick break; \ 7868762Smckusick } \ 7968762Smckusick } 8068762Smckusick 8168762Smckusick /* 8268762Smckusick * Initialize a lock; required before use. 8368762Smckusick */ 8468782Smckusick void 8569387Smckusick lockinit(lkp, prio, wmesg, timo, flags) 8668762Smckusick struct lock *lkp; 8768762Smckusick int prio; 8868762Smckusick char *wmesg; 8968762Smckusick int timo; 9068762Smckusick int flags; 9168762Smckusick { 9269387Smckusick 9368762Smckusick bzero(lkp, sizeof(struct lock)); 9468933Smckusick simple_lock_init(&lkp->lk_interlock); 9568762Smckusick lkp->lk_flags = flags & LK_EXTFLG_MASK; 9668762Smckusick lkp->lk_prio = prio; 9768762Smckusick lkp->lk_timo = timo; 9868762Smckusick lkp->lk_wmesg = wmesg; 9968762Smckusick lkp->lk_lockholder = LK_NOPROC; 10068762Smckusick } 10168762Smckusick 10268762Smckusick /* 10368780Smckusick * Determine the status of a lock. 10468780Smckusick */ 10568780Smckusick int 10668780Smckusick lockstatus(lkp) 10768780Smckusick struct lock *lkp; 10868780Smckusick { 10968780Smckusick int lock_type = 0; 11068780Smckusick 11168933Smckusick simple_lock(&lkp->lk_interlock); 11268780Smckusick if (lkp->lk_exclusivecount != 0) 11368780Smckusick lock_type = LK_EXCLUSIVE; 11468780Smckusick else if (lkp->lk_sharecount != 0) 11568780Smckusick lock_type = LK_SHARED; 11668933Smckusick simple_unlock(&lkp->lk_interlock); 11768780Smckusick return (lock_type); 11868780Smckusick } 11968780Smckusick 12068780Smckusick /* 12168762Smckusick * Set, change, or release a lock. 12268762Smckusick * 12368762Smckusick * Shared requests increment the shared count. Exclusive requests set the 12468762Smckusick * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 12568762Smckusick * accepted shared locks and shared-to-exclusive upgrades to go away. 12668762Smckusick */ 12768782Smckusick int 12869406Smckusick lockmgr(lkp, flags, interlkp, p) 12968945Smckusick __volatile struct lock *lkp; 13068800Smckusick u_int flags; 13169406Smckusick struct simplelock *interlkp; 13269406Smckusick struct proc *p; 13368762Smckusick { 13468779Smckusick int error; 13569406Smckusick pid_t pid; 13669406Smckusick int extflags; 13768762Smckusick 13868800Smckusick error = 0; 13969406Smckusick if (p) 14069406Smckusick pid = p->p_pid; 14169406Smckusick else 14269406Smckusick pid = LK_KERNPROC; 14368933Smckusick simple_lock(&lkp->lk_interlock); 14468945Smckusick if (flags & LK_INTERLOCK) 14568945Smckusick simple_unlock(interlkp); 14668762Smckusick extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 14769406Smckusick #ifdef DIAGNOSTIC 14869406Smckusick /* 14969406Smckusick * Once a lock has drained, the LK_DRAINING flag is set and an 15069406Smckusick * exclusive lock is returned. The only valid operation thereafter 15169406Smckusick * is a single release of that exclusive lock. This final release 15269406Smckusick * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any 15369406Smckusick * further requests of any sort will result in a panic. The bits 15469406Smckusick * selected for these two flags are chosen so that they will be set 15569406Smckusick * in memory that is freed (freed memory is filled with 0xdeadbeef). 15669406Smckusick */ 15769406Smckusick if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) { 15869406Smckusick if (lkp->lk_flags & LK_DRAINED) 15969406Smckusick panic("lockmgr: using decommissioned lock"); 16069406Smckusick if ((flags & LK_TYPE_MASK) != LK_RELEASE || 16169406Smckusick lkp->lk_lockholder != pid) 16269406Smckusick panic("lockmgr: non-release on draining lock: %d\n", 16369406Smckusick flags & LK_TYPE_MASK); 16469406Smckusick lkp->lk_flags &= ~LK_DRAINING; 16569406Smckusick lkp->lk_flags |= LK_DRAINED; 16669406Smckusick } 16769406Smckusick #endif DIAGNOSTIC 16868762Smckusick 16968762Smckusick switch (flags & LK_TYPE_MASK) { 17068762Smckusick 17168762Smckusick case LK_SHARED: 17268762Smckusick if (lkp->lk_lockholder != pid) { 17368762Smckusick /* 17468762Smckusick * If just polling, check to see if we will block. 17568762Smckusick */ 17668762Smckusick if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 17768762Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) { 17868800Smckusick error = EBUSY; 17968800Smckusick break; 18068762Smckusick } 18168762Smckusick /* 18268762Smckusick * Wait for exclusive locks and upgrades to clear. 18368762Smckusick */ 18468762Smckusick ACQUIRE(lkp, error, extflags, lkp->lk_flags & 18568762Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)); 18668800Smckusick if (error) 18768800Smckusick break; 18868762Smckusick lkp->lk_sharecount++; 189*69527Smckusick COUNT(p, 1); 19068800Smckusick break; 19168762Smckusick } 19268762Smckusick /* 19368762Smckusick * We hold an exclusive lock, so downgrade it to shared. 19468762Smckusick * An alternative would be to fail with EDEADLK. 19568762Smckusick */ 19668762Smckusick lkp->lk_sharecount++; 197*69527Smckusick COUNT(p, 1); 19868762Smckusick /* fall into downgrade */ 19968762Smckusick 20068762Smckusick case LK_DOWNGRADE: 20168762Smckusick if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0) 20268762Smckusick panic("lockmgr: not holding exclusive lock"); 20368762Smckusick lkp->lk_sharecount += lkp->lk_exclusivecount; 20468762Smckusick lkp->lk_exclusivecount = 0; 20568762Smckusick lkp->lk_flags &= ~LK_HAVE_EXCL; 20668762Smckusick lkp->lk_lockholder = LK_NOPROC; 20768800Smckusick if (lkp->lk_waitcount) 20868779Smckusick wakeup((void *)lkp); 20968800Smckusick break; 21068762Smckusick 21168779Smckusick case LK_EXCLUPGRADE: 21268779Smckusick /* 21368779Smckusick * If another process is ahead of us to get an upgrade, 21468779Smckusick * then we want to fail rather than have an intervening 21568779Smckusick * exclusive access. 21668779Smckusick */ 21768779Smckusick if (lkp->lk_flags & LK_WANT_UPGRADE) { 21868779Smckusick lkp->lk_sharecount--; 219*69527Smckusick COUNT(p, -1); 22068800Smckusick error = EBUSY; 22168800Smckusick break; 22268779Smckusick } 22368779Smckusick /* fall into normal upgrade */ 22468779Smckusick 22568762Smckusick case LK_UPGRADE: 22668762Smckusick /* 22768762Smckusick * Upgrade a shared lock to an exclusive one. If another 22868762Smckusick * shared lock has already requested an upgrade to an 22968762Smckusick * exclusive lock, our shared lock is released and an 23068762Smckusick * exclusive lock is requested (which will be granted 23168775Smckusick * after the upgrade). If we return an error, the file 23268775Smckusick * will always be unlocked. 23368762Smckusick */ 23468762Smckusick if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0) 23568762Smckusick panic("lockmgr: upgrade exclusive lock"); 23668775Smckusick lkp->lk_sharecount--; 237*69527Smckusick COUNT(p, -1); 23868762Smckusick /* 23968762Smckusick * If we are just polling, check to see if we will block. 24068762Smckusick */ 24168762Smckusick if ((extflags & LK_NOWAIT) && 24268762Smckusick ((lkp->lk_flags & LK_WANT_UPGRADE) || 24368762Smckusick lkp->lk_sharecount > 1)) { 24468800Smckusick error = EBUSY; 24568800Smckusick break; 24668762Smckusick } 24768762Smckusick if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 24868762Smckusick /* 24968762Smckusick * We are first shared lock to request an upgrade, so 25068762Smckusick * request upgrade and wait for the shared count to 25168762Smckusick * drop to zero, then take exclusive lock. 25268762Smckusick */ 25368762Smckusick lkp->lk_flags |= LK_WANT_UPGRADE; 25468762Smckusick ACQUIRE(lkp, error, extflags, lkp->lk_sharecount); 25568762Smckusick lkp->lk_flags &= ~LK_WANT_UPGRADE; 25668800Smckusick if (error) 25768800Smckusick break; 25868762Smckusick lkp->lk_flags |= LK_HAVE_EXCL; 25968762Smckusick lkp->lk_lockholder = pid; 26068762Smckusick if (lkp->lk_exclusivecount != 0) 26168762Smckusick panic("lockmgr: non-zero exclusive count"); 26268762Smckusick lkp->lk_exclusivecount = 1; 263*69527Smckusick COUNT(p, 1); 26468800Smckusick break; 26568762Smckusick } 26668762Smckusick /* 26768762Smckusick * Someone else has requested upgrade. Release our shared 26868762Smckusick * lock, awaken upgrade requestor if we are the last shared 26968762Smckusick * lock, then request an exclusive lock. 27068762Smckusick */ 27168800Smckusick if (lkp->lk_sharecount == 0 && lkp->lk_waitcount) 27268779Smckusick wakeup((void *)lkp); 27368762Smckusick /* fall into exclusive request */ 27468762Smckusick 27568762Smckusick case LK_EXCLUSIVE: 27669461Smckusick if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) { 27768762Smckusick /* 27868762Smckusick * Recursive lock. 27968762Smckusick */ 28068762Smckusick if ((extflags & LK_CANRECURSE) == 0) 28168762Smckusick panic("lockmgr: locking against myself"); 28268762Smckusick lkp->lk_exclusivecount++; 283*69527Smckusick COUNT(p, 1); 28468800Smckusick break; 28568762Smckusick } 28668762Smckusick /* 28768762Smckusick * If we are just polling, check to see if we will sleep. 28868762Smckusick */ 28968762Smckusick if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 29068762Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 29168762Smckusick lkp->lk_sharecount != 0)) { 29268800Smckusick error = EBUSY; 29368800Smckusick break; 29468762Smckusick } 29568762Smckusick /* 29668762Smckusick * Try to acquire the want_exclusive flag. 29768762Smckusick */ 29868762Smckusick ACQUIRE(lkp, error, extflags, lkp->lk_flags & 29968762Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL)); 30068800Smckusick if (error) 30168800Smckusick break; 30268762Smckusick lkp->lk_flags |= LK_WANT_EXCL; 30368762Smckusick /* 30468762Smckusick * Wait for shared locks and upgrades to finish. 30568762Smckusick */ 30668762Smckusick ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 || 30768762Smckusick (lkp->lk_flags & LK_WANT_UPGRADE)); 30868762Smckusick lkp->lk_flags &= ~LK_WANT_EXCL; 30968800Smckusick if (error) 31068800Smckusick break; 31168762Smckusick lkp->lk_flags |= LK_HAVE_EXCL; 31268762Smckusick lkp->lk_lockholder = pid; 31368762Smckusick if (lkp->lk_exclusivecount != 0) 31468762Smckusick panic("lockmgr: non-zero exclusive count"); 31568762Smckusick lkp->lk_exclusivecount = 1; 316*69527Smckusick COUNT(p, 1); 31768800Smckusick break; 31868762Smckusick 31968762Smckusick case LK_RELEASE: 32068762Smckusick if (lkp->lk_exclusivecount != 0) { 32168800Smckusick if (pid != lkp->lk_lockholder) 32268800Smckusick panic("lockmgr: pid %d, not %s %d unlocking", 32368800Smckusick pid, "exclusive lock holder", 32468800Smckusick lkp->lk_lockholder); 32568762Smckusick lkp->lk_exclusivecount--; 326*69527Smckusick COUNT(p, -1); 32768762Smckusick if (lkp->lk_exclusivecount == 0) { 32868762Smckusick lkp->lk_flags &= ~LK_HAVE_EXCL; 32968762Smckusick lkp->lk_lockholder = LK_NOPROC; 33068762Smckusick } 331*69527Smckusick } else if (lkp->lk_sharecount != 0) { 33268762Smckusick lkp->lk_sharecount--; 333*69527Smckusick COUNT(p, -1); 334*69527Smckusick } 33568800Smckusick if (lkp->lk_waitcount) 33668779Smckusick wakeup((void *)lkp); 33768800Smckusick break; 33868800Smckusick 33968800Smckusick case LK_DRAIN: 34068800Smckusick /* 34169406Smckusick * Check that we do not already hold the lock, as it can 34269406Smckusick * never drain if we do. Unfortunately, we have no way to 34369406Smckusick * check for holding a shared lock, but at least we can 34469406Smckusick * check for an exclusive one. 34569406Smckusick */ 34669406Smckusick if (lkp->lk_lockholder == pid) 34769406Smckusick panic("lockmgr: draining against myself"); 34869406Smckusick /* 34968800Smckusick * If we are just polling, check to see if we will sleep. 35068800Smckusick */ 35168800Smckusick if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 35268800Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 35368800Smckusick lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) { 35468800Smckusick error = EBUSY; 35568800Smckusick break; 35668762Smckusick } 35768800Smckusick PAUSE(lkp, ((lkp->lk_flags & 35868800Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 35968800Smckusick lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)); 36068800Smckusick for (error = 0; ((lkp->lk_flags & 36168800Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 36268800Smckusick lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) { 36368800Smckusick lkp->lk_flags |= LK_WAITDRAIN; 36468933Smckusick simple_unlock(&lkp->lk_interlock); 36568800Smckusick if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio, 36668800Smckusick lkp->lk_wmesg, lkp->lk_timo)) 36768800Smckusick return (error); 36868800Smckusick if ((extflags) & LK_SLEEPFAIL) 36968800Smckusick return (ENOLCK); 37068933Smckusick simple_lock(&lkp->lk_interlock); 37168800Smckusick } 37269406Smckusick lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 37368945Smckusick lkp->lk_lockholder = pid; 37468945Smckusick lkp->lk_exclusivecount = 1; 375*69527Smckusick COUNT(p, 1); 37668800Smckusick break; 37768762Smckusick 37868762Smckusick default: 37968933Smckusick simple_unlock(&lkp->lk_interlock); 38068762Smckusick panic("lockmgr: unknown locktype request %d", 38168762Smckusick flags & LK_TYPE_MASK); 38268775Smckusick /* NOTREACHED */ 38368762Smckusick } 38468800Smckusick if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags & 38568800Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 && 38668800Smckusick lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) { 38768800Smckusick lkp->lk_flags &= ~LK_WAITDRAIN; 38868800Smckusick wakeup((void *)&lkp->lk_flags); 38968800Smckusick } 39068933Smckusick simple_unlock(&lkp->lk_interlock); 39168800Smckusick return (error); 39268762Smckusick } 39368945Smckusick 39469387Smckusick /* 39569387Smckusick * Print out information about state of a lock. Used by VOP_PRINT 39669387Smckusick * routines to display ststus about contained locks. 39769387Smckusick */ 39868945Smckusick lockmgr_printinfo(lkp) 39968945Smckusick struct lock *lkp; 40068945Smckusick { 40168945Smckusick 40268945Smckusick if (lkp->lk_sharecount) 40368945Smckusick printf(" lock type %s: SHARED", lkp->lk_wmesg); 40468945Smckusick else if (lkp->lk_flags & LK_HAVE_EXCL) 40568945Smckusick printf(" lock type %s: EXCL by pid %d", lkp->lk_wmesg, 40668945Smckusick lkp->lk_lockholder); 40768945Smckusick if (lkp->lk_waitcount > 0) 40868945Smckusick printf(" with %d pending", lkp->lk_waitcount); 40968945Smckusick } 41069387Smckusick 41169387Smckusick #if defined(DEBUG) && NCPUS == 1 41269461Smckusick #include <sys/kernel.h> 41369461Smckusick #include <vm/vm.h> 41469461Smckusick #include <sys/sysctl.h> 41569461Smckusick int lockpausetime = 1; 41669461Smckusick struct ctldebug debug2 = { "lockpausetime", &lockpausetime }; 41769387Smckusick /* 41869387Smckusick * Simple lock functions so that the debugger can see from whence 41969387Smckusick * they are being called. 42069387Smckusick */ 42169387Smckusick void 42269387Smckusick simple_lock_init(alp) 42369406Smckusick struct simplelock *alp; 42469387Smckusick { 42569387Smckusick 42669387Smckusick alp->lock_data = 0; 42769387Smckusick } 42869387Smckusick 42969387Smckusick void 430*69527Smckusick _simple_lock(alp, id, l) 43169406Smckusick __volatile struct simplelock *alp; 432*69527Smckusick const char *id; 433*69527Smckusick int l; 43469387Smckusick { 43569387Smckusick 43669461Smckusick if (alp->lock_data == 1) { 43769461Smckusick if (lockpausetime == -1) 438*69527Smckusick panic("%s:%d: simple_lock: lock held", id, l); 439*69527Smckusick if (lockpausetime == 0) { 440*69527Smckusick printf("%s:%d: simple_lock: lock held\n", id, l); 441*69527Smckusick } else if (lockpausetime > 0) { 442*69527Smckusick printf("%s:%d: simple_lock: lock held...", id, l); 44369461Smckusick tsleep(&lockpausetime, PCATCH | PPAUSE, "slock", 44469461Smckusick lockpausetime * hz); 44569461Smckusick printf(" continuing\n"); 44669461Smckusick } 44769461Smckusick } 44869387Smckusick alp->lock_data = 1; 44969387Smckusick } 45069387Smckusick 45169387Smckusick int 452*69527Smckusick _simple_lock_try(alp, id, l) 45369406Smckusick __volatile struct simplelock *alp; 454*69527Smckusick const char *id; 455*69527Smckusick int l; 45669387Smckusick { 45769387Smckusick 458*69527Smckusick /* 45969461Smckusick if (alp->lock_data == 1) { 46069461Smckusick if (lockpausetime == -1) 461*69527Smckusick panic("%s:%d: simple_lock_try: lock held", id, l); 462*69527Smckusick if (lockpausetime == 0) { 463*69527Smckusick printf("%s:%d: simple_lock_try: lock held\n", id, l); 464*69527Smckusick } else if (lockpausetime > 0) { 465*69527Smckusick printf("%s:%d: simple_lock_try: lock held...", id, l); 46669461Smckusick tsleep(&lockpausetime, PCATCH | PPAUSE, "slock", 46769461Smckusick lockpausetime * hz); 46869461Smckusick printf(" continuing\n"); 46969461Smckusick } 47069461Smckusick } 471*69527Smckusick */ 472*69527Smckusick if (alp->lock_data) 473*69527Smckusick return (0); 474*69527Smckusick 47569387Smckusick alp->lock_data = 1; 47669387Smckusick return (1); 47769387Smckusick } 47869387Smckusick 47969387Smckusick void 480*69527Smckusick _simple_unlock(alp, id, l) 48169406Smckusick __volatile struct simplelock *alp; 482*69527Smckusick const char *id; 483*69527Smckusick int l; 48469387Smckusick { 48569387Smckusick 48669461Smckusick if (alp->lock_data == 0) { 48769461Smckusick if (lockpausetime == -1) 488*69527Smckusick panic("%s:%d: simple_unlock: lock not held", id, l); 489*69527Smckusick if (lockpausetime == 0) { 490*69527Smckusick printf("%s:%d: simple_unlock: lock not held\n", id, l); 491*69527Smckusick } else if (lockpausetime > 0) { 492*69527Smckusick printf("%s:%d: simple_unlock: lock not held...", id, l); 49369461Smckusick tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock", 49469461Smckusick lockpausetime * hz); 49569461Smckusick printf(" continuing\n"); 49669461Smckusick } 49769461Smckusick } 49869387Smckusick alp->lock_data = 0; 49969387Smckusick } 50069387Smckusick #endif /* DEBUG && NCPUS == 1 */ 501