1 /* 2 * Copyright (c) 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code contains ideas from software contributed to Berkeley by 6 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 7 * System project at Carnegie-Mellon University. 8 * 9 * %sccs.include.redist.c% 10 * 11 * @(#)kern_lock.c 8.9 (Berkeley) 05/11/95 12 */ 13 14 #include <sys/param.h> 15 #include <sys/proc.h> 16 #include <sys/lock.h> 17 18 /* 19 * Locking primitives implementation. 20 * Locks provide shared/exclusive sychronization. 21 */ 22 23 #if NCPUS > 1 24 25 /* 26 * For multiprocessor system, try spin lock first. 27 * 28 * This should be inline expanded below, but we cannot have #if 29 * inside a multiline define. 30 */ 31 int lock_wait_time = 100; 32 #define PAUSE(lkp, wanted) \ 33 if (lock_wait_time > 0) { \ 34 int i; \ 35 \ 36 simple_unlock(&lkp->lk_interlock); \ 37 for (i = lock_wait_time; i > 0; i--) \ 38 if (!(wanted)) \ 39 break; \ 40 simple_lock(&lkp->lk_interlock); \ 41 } \ 42 if (!(wanted)) \ 43 break; 44 45 #else /* NCPUS == 1 */ 46 47 /* 48 * It is an error to spin on a uniprocessor as nothing will ever cause 49 * the simple lock to clear while we are executing. 50 */ 51 #define PAUSE(lkp, wanted) 52 53 #endif /* NCPUS == 1 */ 54 55 /* 56 * Acquire a resource. 57 */ 58 #define ACQUIRE(lkp, error, extflags, wanted) \ 59 PAUSE(lkp, wanted); \ 60 for (error = 0; wanted; ) { \ 61 (lkp)->lk_waitcount++; \ 62 simple_unlock(&(lkp)->lk_interlock); \ 63 error = tsleep((void *)lkp, (lkp)->lk_prio, \ 64 (lkp)->lk_wmesg, (lkp)->lk_timo); \ 65 simple_lock(&(lkp)->lk_interlock); \ 66 (lkp)->lk_waitcount--; \ 67 if (error) \ 68 break; \ 69 if ((extflags) & LK_SLEEPFAIL) { \ 70 error = ENOLCK; \ 71 break; \ 72 } \ 73 } 74 75 /* 76 * Initialize a lock; required before use. 77 */ 78 void 79 lockinit(lkp, prio, wmesg, timo, flags) 80 struct lock *lkp; 81 int prio; 82 char *wmesg; 83 int timo; 84 int flags; 85 { 86 87 bzero(lkp, sizeof(struct lock)); 88 simple_lock_init(&lkp->lk_interlock); 89 lkp->lk_flags = flags & LK_EXTFLG_MASK; 90 lkp->lk_prio = prio; 91 lkp->lk_timo = timo; 92 lkp->lk_wmesg = wmesg; 93 lkp->lk_lockholder = LK_NOPROC; 94 } 95 96 /* 97 * Determine the status of a lock. 98 */ 99 int 100 lockstatus(lkp) 101 struct lock *lkp; 102 { 103 int lock_type = 0; 104 105 simple_lock(&lkp->lk_interlock); 106 if (lkp->lk_exclusivecount != 0) 107 lock_type = LK_EXCLUSIVE; 108 else if (lkp->lk_sharecount != 0) 109 lock_type = LK_SHARED; 110 simple_unlock(&lkp->lk_interlock); 111 return (lock_type); 112 } 113 114 /* 115 * Set, change, or release a lock. 116 * 117 * Shared requests increment the shared count. Exclusive requests set the 118 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 119 * accepted shared locks and shared-to-exclusive upgrades to go away. 120 */ 121 int 122 lockmgr(lkp, flags, interlkp, pid) 123 __volatile struct lock *lkp; 124 u_int flags; 125 struct simple_lock *interlkp; 126 pid_t pid; 127 { 128 int error; 129 __volatile int extflags; 130 131 error = 0; 132 simple_lock(&lkp->lk_interlock); 133 if (flags & LK_INTERLOCK) 134 simple_unlock(interlkp); 135 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 136 if ((lkp->lk_flags & LK_DRAINED) && 137 (((flags & LK_TYPE_MASK) != LK_RELEASE) || 138 lkp->lk_lockholder != pid)) 139 panic("lockmgr: using decommissioned lock"); 140 141 switch (flags & LK_TYPE_MASK) { 142 143 case LK_SHARED: 144 if (lkp->lk_lockholder != pid) { 145 /* 146 * If just polling, check to see if we will block. 147 */ 148 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 149 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) { 150 error = EBUSY; 151 break; 152 } 153 /* 154 * Wait for exclusive locks and upgrades to clear. 155 */ 156 ACQUIRE(lkp, error, extflags, lkp->lk_flags & 157 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)); 158 if (error) 159 break; 160 lkp->lk_sharecount++; 161 break; 162 } 163 /* 164 * We hold an exclusive lock, so downgrade it to shared. 165 * An alternative would be to fail with EDEADLK. 166 */ 167 lkp->lk_sharecount++; 168 /* fall into downgrade */ 169 170 case LK_DOWNGRADE: 171 if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0) 172 panic("lockmgr: not holding exclusive lock"); 173 lkp->lk_sharecount += lkp->lk_exclusivecount; 174 lkp->lk_exclusivecount = 0; 175 lkp->lk_flags &= ~LK_HAVE_EXCL; 176 lkp->lk_lockholder = LK_NOPROC; 177 if (lkp->lk_waitcount) 178 wakeup((void *)lkp); 179 break; 180 181 case LK_EXCLUPGRADE: 182 /* 183 * If another process is ahead of us to get an upgrade, 184 * then we want to fail rather than have an intervening 185 * exclusive access. 186 */ 187 if (lkp->lk_flags & LK_WANT_UPGRADE) { 188 lkp->lk_sharecount--; 189 error = EBUSY; 190 break; 191 } 192 /* fall into normal upgrade */ 193 194 case LK_UPGRADE: 195 /* 196 * Upgrade a shared lock to an exclusive one. If another 197 * shared lock has already requested an upgrade to an 198 * exclusive lock, our shared lock is released and an 199 * exclusive lock is requested (which will be granted 200 * after the upgrade). If we return an error, the file 201 * will always be unlocked. 202 */ 203 if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0) 204 panic("lockmgr: upgrade exclusive lock"); 205 lkp->lk_sharecount--; 206 /* 207 * If we are just polling, check to see if we will block. 208 */ 209 if ((extflags & LK_NOWAIT) && 210 ((lkp->lk_flags & LK_WANT_UPGRADE) || 211 lkp->lk_sharecount > 1)) { 212 error = EBUSY; 213 break; 214 } 215 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 216 /* 217 * We are first shared lock to request an upgrade, so 218 * request upgrade and wait for the shared count to 219 * drop to zero, then take exclusive lock. 220 */ 221 lkp->lk_flags |= LK_WANT_UPGRADE; 222 ACQUIRE(lkp, error, extflags, lkp->lk_sharecount); 223 lkp->lk_flags &= ~LK_WANT_UPGRADE; 224 if (error) 225 break; 226 lkp->lk_flags |= LK_HAVE_EXCL; 227 lkp->lk_lockholder = pid; 228 if (lkp->lk_exclusivecount != 0) 229 panic("lockmgr: non-zero exclusive count"); 230 lkp->lk_exclusivecount = 1; 231 break; 232 } 233 /* 234 * Someone else has requested upgrade. Release our shared 235 * lock, awaken upgrade requestor if we are the last shared 236 * lock, then request an exclusive lock. 237 */ 238 if (lkp->lk_sharecount == 0 && lkp->lk_waitcount) 239 wakeup((void *)lkp); 240 /* fall into exclusive request */ 241 242 case LK_EXCLUSIVE: 243 if (lkp->lk_lockholder == pid) { 244 /* 245 * Recursive lock. 246 */ 247 if ((extflags & LK_CANRECURSE) == 0) 248 panic("lockmgr: locking against myself"); 249 lkp->lk_exclusivecount++; 250 break; 251 } 252 /* 253 * If we are just polling, check to see if we will sleep. 254 */ 255 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 256 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 257 lkp->lk_sharecount != 0)) { 258 error = EBUSY; 259 break; 260 } 261 /* 262 * Try to acquire the want_exclusive flag. 263 */ 264 ACQUIRE(lkp, error, extflags, lkp->lk_flags & 265 (LK_HAVE_EXCL | LK_WANT_EXCL)); 266 if (error) 267 break; 268 lkp->lk_flags |= LK_WANT_EXCL; 269 /* 270 * Wait for shared locks and upgrades to finish. 271 */ 272 ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 || 273 (lkp->lk_flags & LK_WANT_UPGRADE)); 274 lkp->lk_flags &= ~LK_WANT_EXCL; 275 if (error) 276 break; 277 lkp->lk_flags |= LK_HAVE_EXCL; 278 lkp->lk_lockholder = pid; 279 if (lkp->lk_exclusivecount != 0) 280 panic("lockmgr: non-zero exclusive count"); 281 lkp->lk_exclusivecount = 1; 282 break; 283 284 case LK_RELEASE: 285 if (lkp->lk_exclusivecount != 0) { 286 if (pid != lkp->lk_lockholder) 287 panic("lockmgr: pid %d, not %s %d unlocking", 288 pid, "exclusive lock holder", 289 lkp->lk_lockholder); 290 lkp->lk_exclusivecount--; 291 if (lkp->lk_exclusivecount == 0) { 292 lkp->lk_flags &= ~LK_HAVE_EXCL; 293 lkp->lk_lockholder = LK_NOPROC; 294 } 295 } else if (lkp->lk_sharecount != 0) 296 lkp->lk_sharecount--; 297 if (lkp->lk_waitcount) 298 wakeup((void *)lkp); 299 break; 300 301 case LK_DRAIN: 302 /* 303 * If we are just polling, check to see if we will sleep. 304 */ 305 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 306 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 307 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) { 308 error = EBUSY; 309 break; 310 } 311 PAUSE(lkp, ((lkp->lk_flags & 312 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 313 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)); 314 for (error = 0; ((lkp->lk_flags & 315 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 316 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) { 317 lkp->lk_flags |= LK_WAITDRAIN; 318 simple_unlock(&lkp->lk_interlock); 319 if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio, 320 lkp->lk_wmesg, lkp->lk_timo)) 321 return (error); 322 if ((extflags) & LK_SLEEPFAIL) 323 return (ENOLCK); 324 simple_lock(&lkp->lk_interlock); 325 } 326 lkp->lk_flags |= LK_DRAINED | LK_HAVE_EXCL; 327 lkp->lk_lockholder = pid; 328 lkp->lk_exclusivecount = 1; 329 break; 330 331 default: 332 simple_unlock(&lkp->lk_interlock); 333 panic("lockmgr: unknown locktype request %d", 334 flags & LK_TYPE_MASK); 335 /* NOTREACHED */ 336 } 337 if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags & 338 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 && 339 lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) { 340 lkp->lk_flags &= ~LK_WAITDRAIN; 341 wakeup((void *)&lkp->lk_flags); 342 } 343 simple_unlock(&lkp->lk_interlock); 344 return (error); 345 } 346 347 /* 348 * Print out information about state of a lock. Used by VOP_PRINT 349 * routines to display ststus about contained locks. 350 */ 351 lockmgr_printinfo(lkp) 352 struct lock *lkp; 353 { 354 355 if (lkp->lk_sharecount) 356 printf(" lock type %s: SHARED", lkp->lk_wmesg); 357 else if (lkp->lk_flags & LK_HAVE_EXCL) 358 printf(" lock type %s: EXCL by pid %d", lkp->lk_wmesg, 359 lkp->lk_lockholder); 360 if (lkp->lk_waitcount > 0) 361 printf(" with %d pending", lkp->lk_waitcount); 362 } 363 364 #if defined(DEBUG) && NCPUS == 1 365 /* 366 * Simple lock functions so that the debugger can see from whence 367 * they are being called. 368 */ 369 void 370 simple_lock_init(alp) 371 struct simple_lock *alp; 372 { 373 374 alp->lock_data = 0; 375 } 376 377 void 378 simple_lock(alp) 379 __volatile struct simple_lock *alp; 380 { 381 382 if (alp->lock_data == 1) 383 panic("simple_lock: lock held"); 384 alp->lock_data = 1; 385 } 386 387 int 388 simple_lock_try(alp) 389 __volatile struct simple_lock *alp; 390 { 391 392 if (alp->lock_data == 1) 393 panic("simple_lock: lock held"); 394 alp->lock_data = 1; 395 return (1); 396 } 397 398 void 399 simple_unlock(alp) 400 __volatile struct simple_lock *alp; 401 { 402 403 if (alp->lock_data == 0) 404 panic("simple_lock: lock not held"); 405 alp->lock_data = 0; 406 } 407 #endif /* DEBUG && NCPUS == 1 */ 408