1*68762Smckusick /* 2*68762Smckusick * Copyright (c) 1995 3*68762Smckusick * The Regents of the University of California. All rights reserved. 4*68762Smckusick * 5*68762Smckusick * This code is derived from software contributed to Berkeley by 6*68762Smckusick * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 7*68762Smckusick * System project at Carnegie-Mellon University. 8*68762Smckusick * 9*68762Smckusick * %sccs.include.redist.c% 10*68762Smckusick * 11*68762Smckusick * @(#)kern_lock.c 8.1 (Berkeley) 04/09/95 12*68762Smckusick */ 13*68762Smckusick 14*68762Smckusick #include <sys/param.h> 15*68762Smckusick #include <sys/proc.h> 16*68762Smckusick #include <sys/lock.h> 17*68762Smckusick 18*68762Smckusick /* 19*68762Smckusick * Locking primitives implementation. 20*68762Smckusick * Locks provide shared/exclusive sychronization. 21*68762Smckusick */ 22*68762Smckusick 23*68762Smckusick #if NCPUS > 1 24*68762Smckusick 25*68762Smckusick /* 26*68762Smckusick * For multiprocessor system, try spin lock first. 27*68762Smckusick * 28*68762Smckusick * This should be inline expanded below, but we cannot have #if 29*68762Smckusick * inside a multiline define. 30*68762Smckusick */ 31*68762Smckusick int lock_wait_time = 100; 32*68762Smckusick #define PAUSE(lkp, wanted) \ 33*68762Smckusick if (lock_wait_time > 0) { \ 34*68762Smckusick int i; \ 35*68762Smckusick \ 36*68762Smckusick atomic_unlock(&lkp->lk_interlock); \ 37*68762Smckusick for (i = lock_wait_time; i > 0; i--) \ 38*68762Smckusick if (!(wanted)) \ 39*68762Smckusick break; \ 40*68762Smckusick atomic_lock(&lkp->lk_interlock); \ 41*68762Smckusick } \ 42*68762Smckusick if (!(wanted)) \ 43*68762Smckusick break; 44*68762Smckusick 45*68762Smckusick #else /* NCPUS == 1 */ 46*68762Smckusick 47*68762Smckusick /* 48*68762Smckusick * It is an error to spin on a uniprocessor as nothing will ever cause 49*68762Smckusick * the atomic lock to clear while we are executing. 50*68762Smckusick */ 51*68762Smckusick #define PAUSE(lkp, wanted) 52*68762Smckusick 53*68762Smckusick #endif /* NCPUS == 1 */ 54*68762Smckusick 55*68762Smckusick /* 56*68762Smckusick * Acquire a resource. 57*68762Smckusick */ 58*68762Smckusick #define ACQUIRE(lkp, error, extflags, wanted) \ 59*68762Smckusick PAUSE(lkp, wanted); \ 60*68762Smckusick for (error = 0; wanted; ) { \ 61*68762Smckusick (lkp)->lk_flags |= LK_WAITING; \ 62*68762Smckusick atomic_unlock(&(lkp)->lk_interlock); \ 63*68762Smckusick error = tsleep(lkp, (lkp)->lk_prio, (lkp)->lk_wmesg, \ 64*68762Smckusick (lkp)->lk_timo); \ 65*68762Smckusick atomic_lock(&(lkp)->lk_interlock); \ 66*68762Smckusick (lkp)->lk_flags |= LK_SLEPT; \ 67*68762Smckusick if (error) \ 68*68762Smckusick break; \ 69*68762Smckusick if ((extflags) & LK_SLEEPFAIL) { \ 70*68762Smckusick error = ENOLCK; \ 71*68762Smckusick break; \ 72*68762Smckusick } \ 73*68762Smckusick } 74*68762Smckusick 75*68762Smckusick /* 76*68762Smckusick * Initialize a lock; required before use. 77*68762Smckusick */ 78*68762Smckusick void lock_init(lkp, prio, wmesg, timo, flags) 79*68762Smckusick struct lock *lkp; 80*68762Smckusick int prio; 81*68762Smckusick char *wmesg; 82*68762Smckusick int timo; 83*68762Smckusick int flags; 84*68762Smckusick { 85*68762Smckusick bzero(lkp, sizeof(struct lock)); 86*68762Smckusick atomic_lock_init(&lkp->lk_interlock); 87*68762Smckusick lkp->lk_flags = flags & LK_EXTFLG_MASK; 88*68762Smckusick lkp->lk_prio = prio; 89*68762Smckusick lkp->lk_timo = timo; 90*68762Smckusick lkp->lk_wmesg = wmesg; 91*68762Smckusick lkp->lk_lockholder = LK_NOPROC; 92*68762Smckusick } 93*68762Smckusick 94*68762Smckusick /* 95*68762Smckusick * Set, change, or release a lock. 96*68762Smckusick * 97*68762Smckusick * Shared requests increment the shared count. Exclusive requests set the 98*68762Smckusick * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 99*68762Smckusick * accepted shared locks and shared-to-exclusive upgrades to go away. 100*68762Smckusick */ 101*68762Smckusick lockmgr(lkp, p, flags) 102*68762Smckusick struct lock *lkp; 103*68762Smckusick struct proc *p; 104*68762Smckusick int flags; 105*68762Smckusick { 106*68762Smckusick pid_t pid; 107*68762Smckusick int error, extflags; 108*68762Smckusick 109*68762Smckusick pid = p->p_pid; 110*68762Smckusick extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 111*68762Smckusick lkp->lk_flags &= ~LK_SLEPT; 112*68762Smckusick 113*68762Smckusick switch (flags & LK_TYPE_MASK) { 114*68762Smckusick 115*68762Smckusick case LK_SHARED: 116*68762Smckusick atomic_lock(&lkp->lk_interlock); 117*68762Smckusick if (lkp->lk_lockholder != pid) { 118*68762Smckusick /* 119*68762Smckusick * If just polling, check to see if we will block. 120*68762Smckusick */ 121*68762Smckusick if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 122*68762Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) { 123*68762Smckusick atomic_unlock(&lkp->lk_interlock); 124*68762Smckusick return (EBUSY); 125*68762Smckusick } 126*68762Smckusick /* 127*68762Smckusick * Wait for exclusive locks and upgrades to clear. 128*68762Smckusick */ 129*68762Smckusick ACQUIRE(lkp, error, extflags, lkp->lk_flags & 130*68762Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)); 131*68762Smckusick if (error) { 132*68762Smckusick atomic_unlock(&lkp->lk_interlock); 133*68762Smckusick return (error); 134*68762Smckusick } 135*68762Smckusick lkp->lk_sharecount++; 136*68762Smckusick atomic_unlock(&lkp->lk_interlock); 137*68762Smckusick return (0); 138*68762Smckusick } 139*68762Smckusick /* 140*68762Smckusick * We hold an exclusive lock, so downgrade it to shared. 141*68762Smckusick * An alternative would be to fail with EDEADLK. 142*68762Smckusick */ 143*68762Smckusick lkp->lk_sharecount++; 144*68762Smckusick atomic_unlock(&lkp->lk_interlock); 145*68762Smckusick /* fall into downgrade */ 146*68762Smckusick 147*68762Smckusick case LK_DOWNGRADE: 148*68762Smckusick atomic_lock(&lkp->lk_interlock); 149*68762Smckusick if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0) 150*68762Smckusick panic("lockmgr: not holding exclusive lock"); 151*68762Smckusick lkp->lk_sharecount += lkp->lk_exclusivecount; 152*68762Smckusick lkp->lk_exclusivecount = 0; 153*68762Smckusick lkp->lk_flags &= ~LK_HAVE_EXCL; 154*68762Smckusick lkp->lk_lockholder = LK_NOPROC; 155*68762Smckusick if (lkp->lk_flags & LK_WAITING) { 156*68762Smckusick lkp->lk_flags &= ~LK_WAITING; 157*68762Smckusick wakeup(lkp); 158*68762Smckusick } 159*68762Smckusick atomic_unlock(&lkp->lk_interlock); 160*68762Smckusick return (0); 161*68762Smckusick 162*68762Smckusick case LK_UPGRADE: 163*68762Smckusick /* 164*68762Smckusick * Upgrade a shared lock to an exclusive one. If another 165*68762Smckusick * shared lock has already requested an upgrade to an 166*68762Smckusick * exclusive lock, our shared lock is released and an 167*68762Smckusick * exclusive lock is requested (which will be granted 168*68762Smckusick * after the upgrade). 169*68762Smckusick */ 170*68762Smckusick atomic_lock(&lkp->lk_interlock); 171*68762Smckusick if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0) 172*68762Smckusick panic("lockmgr: upgrade exclusive lock"); 173*68762Smckusick /* 174*68762Smckusick * If we are just polling, check to see if we will block. 175*68762Smckusick */ 176*68762Smckusick if ((extflags & LK_NOWAIT) && 177*68762Smckusick ((lkp->lk_flags & LK_WANT_UPGRADE) || 178*68762Smckusick lkp->lk_sharecount > 1)) { 179*68762Smckusick atomic_unlock(&lkp->lk_interlock); 180*68762Smckusick return (EBUSY); 181*68762Smckusick } 182*68762Smckusick lkp->lk_sharecount--; 183*68762Smckusick if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 184*68762Smckusick /* 185*68762Smckusick * We are first shared lock to request an upgrade, so 186*68762Smckusick * request upgrade and wait for the shared count to 187*68762Smckusick * drop to zero, then take exclusive lock. 188*68762Smckusick */ 189*68762Smckusick lkp->lk_flags |= LK_WANT_UPGRADE; 190*68762Smckusick ACQUIRE(lkp, error, extflags, lkp->lk_sharecount); 191*68762Smckusick lkp->lk_flags &= ~LK_WANT_UPGRADE; 192*68762Smckusick if (error) { 193*68762Smckusick atomic_unlock(&lkp->lk_interlock); 194*68762Smckusick return (error); 195*68762Smckusick } 196*68762Smckusick lkp->lk_flags |= LK_HAVE_EXCL; 197*68762Smckusick lkp->lk_lockholder = pid; 198*68762Smckusick if (lkp->lk_exclusivecount != 0) 199*68762Smckusick panic("lockmgr: non-zero exclusive count"); 200*68762Smckusick lkp->lk_exclusivecount = 1; 201*68762Smckusick atomic_unlock(&lkp->lk_interlock); 202*68762Smckusick return (0); 203*68762Smckusick } 204*68762Smckusick /* 205*68762Smckusick * Someone else has requested upgrade. Release our shared 206*68762Smckusick * lock, awaken upgrade requestor if we are the last shared 207*68762Smckusick * lock, then request an exclusive lock. 208*68762Smckusick */ 209*68762Smckusick if (lkp->lk_sharecount == 0 && (lkp->lk_flags & LK_WAITING)) { 210*68762Smckusick lkp->lk_flags &= ~LK_WAITING; 211*68762Smckusick wakeup(lkp); 212*68762Smckusick } 213*68762Smckusick atomic_unlock(&lkp->lk_interlock); 214*68762Smckusick /* fall into exclusive request */ 215*68762Smckusick 216*68762Smckusick case LK_EXCLUSIVE: 217*68762Smckusick atomic_lock(&lkp->lk_interlock); 218*68762Smckusick if (lkp->lk_lockholder == pid) { 219*68762Smckusick /* 220*68762Smckusick * Recursive lock. 221*68762Smckusick */ 222*68762Smckusick if ((extflags & LK_CANRECURSE) == 0) 223*68762Smckusick panic("lockmgr: locking against myself"); 224*68762Smckusick lkp->lk_exclusivecount++; 225*68762Smckusick atomic_unlock(&lkp->lk_interlock); 226*68762Smckusick return (0); 227*68762Smckusick } 228*68762Smckusick /* 229*68762Smckusick * If we are just polling, check to see if we will sleep. 230*68762Smckusick */ 231*68762Smckusick if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 232*68762Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 233*68762Smckusick lkp->lk_sharecount != 0)) { 234*68762Smckusick atomic_unlock(&lkp->lk_interlock); 235*68762Smckusick return (EBUSY); 236*68762Smckusick } 237*68762Smckusick /* 238*68762Smckusick * Try to acquire the want_exclusive flag. 239*68762Smckusick */ 240*68762Smckusick ACQUIRE(lkp, error, extflags, lkp->lk_flags & 241*68762Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL)); 242*68762Smckusick if (error) { 243*68762Smckusick atomic_unlock(&lkp->lk_interlock); 244*68762Smckusick return (error); 245*68762Smckusick } 246*68762Smckusick lkp->lk_flags |= LK_WANT_EXCL; 247*68762Smckusick /* 248*68762Smckusick * Wait for shared locks and upgrades to finish. 249*68762Smckusick */ 250*68762Smckusick ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 || 251*68762Smckusick (lkp->lk_flags & LK_WANT_UPGRADE)); 252*68762Smckusick lkp->lk_flags &= ~LK_WANT_EXCL; 253*68762Smckusick if (error) { 254*68762Smckusick atomic_unlock(&lkp->lk_interlock); 255*68762Smckusick return (error); 256*68762Smckusick } 257*68762Smckusick lkp->lk_flags |= LK_HAVE_EXCL; 258*68762Smckusick lkp->lk_lockholder = pid; 259*68762Smckusick if (lkp->lk_exclusivecount != 0) 260*68762Smckusick panic("lockmgr: non-zero exclusive count"); 261*68762Smckusick lkp->lk_exclusivecount = 1; 262*68762Smckusick atomic_unlock(&lkp->lk_interlock); 263*68762Smckusick return (0); 264*68762Smckusick 265*68762Smckusick case LK_RELEASE: 266*68762Smckusick atomic_lock(&lkp->lk_interlock); 267*68762Smckusick if (lkp->lk_exclusivecount != 0) { 268*68762Smckusick lkp->lk_exclusivecount--; 269*68762Smckusick if (lkp->lk_exclusivecount == 0) { 270*68762Smckusick lkp->lk_flags &= ~LK_HAVE_EXCL; 271*68762Smckusick lkp->lk_lockholder = LK_NOPROC; 272*68762Smckusick } 273*68762Smckusick } else if (lkp->lk_sharecount != 0) 274*68762Smckusick lkp->lk_sharecount--; 275*68762Smckusick if (lkp->lk_flags & LK_WAITING) { 276*68762Smckusick lkp->lk_flags &= ~LK_WAITING; 277*68762Smckusick wakeup(lkp); 278*68762Smckusick } 279*68762Smckusick atomic_unlock(&lkp->lk_interlock); 280*68762Smckusick return (0); 281*68762Smckusick 282*68762Smckusick default: 283*68762Smckusick panic("lockmgr: unknown locktype request %d", 284*68762Smckusick flags & LK_TYPE_MASK); 285*68762Smckusick } 286*68762Smckusick } 287