xref: /openbsd-src/sys/kern/kern_lock.c (revision db3296cf5c1dd9058ceecc3a29fe4aaa0bd26000)
1 /*	$OpenBSD: kern_lock.c,v 1.15 2003/06/02 23:28:05 millert Exp $	*/
2 
3 /*
4  * Copyright (c) 1995
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code contains ideas from software contributed to Berkeley by
8  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
9  * System project at Carnegie-Mellon University.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
36  */
37 
38 #include <sys/param.h>
39 #include <sys/proc.h>
40 #include <sys/lock.h>
41 #include <sys/systm.h>
42 
43 #include <machine/cpu.h>
44 
45 void record_stacktrace(int *, int);
46 void playback_stacktrace(int *, int);
47 
48 /*
49  * Locking primitives implementation.
50  * Locks provide shared/exclusive sychronization.
51  */
52 
53 #if 0
54 #ifdef DEBUG
55 #define COUNT(p, x) if (p) (p)->p_locks += (x)
56 #else
57 #define COUNT(p, x)
58 #endif
59 #endif
60 
61 #define COUNT(p, x)
62 
63 #if NCPUS > 1
64 
65 /*
66  * For multiprocessor system, try spin lock first.
67  *
68  * This should be inline expanded below, but we cannot have #if
69  * inside a multiline define.
70  */
71 int lock_wait_time = 100;
72 #define PAUSE(lkp, wanted)						\
73 		if (lock_wait_time > 0) {				\
74 			int i;						\
75 									\
76 			simple_unlock(&lkp->lk_interlock);		\
77 			for (i = lock_wait_time; i > 0; i--)		\
78 				if (!(wanted))				\
79 					break;				\
80 			simple_lock(&lkp->lk_interlock);		\
81 		}							\
82 		if (!(wanted))						\
83 			break;
84 
85 #else /* NCPUS == 1 */
86 
87 /*
88  * It is an error to spin on a uniprocessor as nothing will ever cause
89  * the simple lock to clear while we are executing.
90  */
91 #define PAUSE(lkp, wanted)
92 
93 #endif /* NCPUS == 1 */
94 
95 /*
96  * Acquire a resource.
97  */
98 #define ACQUIRE(lkp, error, extflags, wanted)				\
99 	PAUSE(lkp, wanted);						\
100 	for (error = 0; wanted; ) {					\
101 		(lkp)->lk_waitcount++;					\
102 		simple_unlock(&(lkp)->lk_interlock);			\
103 		error = tsleep((void *)lkp, (lkp)->lk_prio,		\
104 		    (lkp)->lk_wmesg, (lkp)->lk_timo);			\
105 		simple_lock(&(lkp)->lk_interlock);			\
106 		(lkp)->lk_waitcount--;					\
107 		if (error)						\
108 			break;						\
109 		if ((extflags) & LK_SLEEPFAIL) {			\
110 			error = ENOLCK;					\
111 			break;						\
112 		}							\
113 	}
114 
115 /*
116  * Initialize a lock; required before use.
117  */
118 void
119 lockinit(lkp, prio, wmesg, timo, flags)
120 	struct lock *lkp;
121 	int prio;
122 	char *wmesg;
123 	int timo;
124 	int flags;
125 {
126 
127 	bzero(lkp, sizeof(struct lock));
128 	simple_lock_init(&lkp->lk_interlock);
129 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
130 	lkp->lk_prio = prio;
131 	lkp->lk_timo = timo;
132 	lkp->lk_wmesg = wmesg;
133 	lkp->lk_lockholder = LK_NOPROC;
134 }
135 
136 /*
137  * Determine the status of a lock.
138  */
139 int
140 lockstatus(lkp)
141 	struct lock *lkp;
142 {
143 	int lock_type = 0;
144 
145 	simple_lock(&lkp->lk_interlock);
146 	if (lkp->lk_exclusivecount != 0)
147 		lock_type = LK_EXCLUSIVE;
148 	else if (lkp->lk_sharecount != 0)
149 		lock_type = LK_SHARED;
150 	simple_unlock(&lkp->lk_interlock);
151 	return (lock_type);
152 }
153 
154 /*
155  * Set, change, or release a lock.
156  *
157  * Shared requests increment the shared count. Exclusive requests set the
158  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
159  * accepted shared locks and shared-to-exclusive upgrades to go away.
160  */
161 int
162 lockmgr(lkp, flags, interlkp, p)
163 	__volatile struct lock *lkp;
164 	u_int flags;
165 	struct simplelock *interlkp;
166 	struct proc *p;
167 {
168 	int error;
169 	pid_t pid;
170 	int extflags;
171 
172 	error = 0;
173 	if (p)
174 		pid = p->p_pid;
175 	else
176 		pid = LK_KERNPROC;
177 	simple_lock(&lkp->lk_interlock);
178 	if (flags & LK_INTERLOCK)
179 		simple_unlock(interlkp);
180 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
181 #ifdef DIAGNOSTIC
182 	/*
183 	 * Once a lock has drained, the LK_DRAINING flag is set and an
184 	 * exclusive lock is returned. The only valid operation thereafter
185 	 * is a single release of that exclusive lock. This final release
186 	 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
187 	 * further requests of any sort will result in a panic. The bits
188 	 * selected for these two flags are chosen so that they will be set
189 	 * in memory that is freed (freed memory is filled with 0xdeadbeef).
190 	 * The final release is permitted to give a new lease on life to
191 	 * the lock by specifying LK_REENABLE.
192 	 */
193 	if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
194 		if (lkp->lk_flags & LK_DRAINED)
195 			panic("lockmgr: using decommissioned lock");
196 		if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
197 		    lkp->lk_lockholder != pid)
198 			panic("lockmgr: non-release on draining lock: %d",
199 			    flags & LK_TYPE_MASK);
200 		lkp->lk_flags &= ~LK_DRAINING;
201 		if ((flags & LK_REENABLE) == 0)
202 			lkp->lk_flags |= LK_DRAINED;
203 	}
204 
205 	/*
206 	 * Check if the caller is asking us to be schizophrenic.
207 	 */
208 	if ((lkp->lk_flags & (LK_CANRECURSE|LK_RECURSEFAIL)) ==
209 	    (LK_CANRECURSE|LK_RECURSEFAIL))
210 		panic("lockmgr: make up your mind");
211 #endif /* DIAGNOSTIC */
212 
213 	switch (flags & LK_TYPE_MASK) {
214 
215 	case LK_SHARED:
216 		if (lkp->lk_lockholder != pid) {
217 			/*
218 			 * If just polling, check to see if we will block.
219 			 */
220 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
221 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
222 				error = EBUSY;
223 				break;
224 			}
225 			/*
226 			 * Wait for exclusive locks and upgrades to clear.
227 			 */
228 			ACQUIRE(lkp, error, extflags, lkp->lk_flags &
229 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
230 			if (error)
231 				break;
232 			lkp->lk_sharecount++;
233 			COUNT(p, 1);
234 			break;
235 		}
236 		/*
237 		 * We hold an exclusive lock, so downgrade it to shared.
238 		 * An alternative would be to fail with EDEADLK.
239 		 */
240 		lkp->lk_sharecount++;
241 		COUNT(p, 1);
242 		/* fall into downgrade */
243 
244 	case LK_DOWNGRADE:
245 		if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
246 			panic("lockmgr: not holding exclusive lock");
247 		lkp->lk_sharecount += lkp->lk_exclusivecount;
248 		lkp->lk_exclusivecount = 0;
249 		lkp->lk_flags &= ~LK_HAVE_EXCL;
250 		lkp->lk_lockholder = LK_NOPROC;
251 		if (lkp->lk_waitcount)
252 			wakeup((void *)lkp);
253 		break;
254 
255 	case LK_EXCLUPGRADE:
256 		/*
257 		 * If another process is ahead of us to get an upgrade,
258 		 * then we want to fail rather than have an intervening
259 		 * exclusive access.
260 		 */
261 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
262 			lkp->lk_sharecount--;
263 			COUNT(p, -1);
264 			error = EBUSY;
265 			break;
266 		}
267 		/* fall into normal upgrade */
268 
269 	case LK_UPGRADE:
270 		/*
271 		 * Upgrade a shared lock to an exclusive one. If another
272 		 * shared lock has already requested an upgrade to an
273 		 * exclusive lock, our shared lock is released and an
274 		 * exclusive lock is requested (which will be granted
275 		 * after the upgrade). If we return an error, the file
276 		 * will always be unlocked.
277 		 */
278 		if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
279 			panic("lockmgr: upgrade exclusive lock");
280 		lkp->lk_sharecount--;
281 		COUNT(p, -1);
282 		/*
283 		 * If we are just polling, check to see if we will block.
284 		 */
285 		if ((extflags & LK_NOWAIT) &&
286 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
287 		     lkp->lk_sharecount > 1)) {
288 			error = EBUSY;
289 			break;
290 		}
291 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
292 			/*
293 			 * We are first shared lock to request an upgrade, so
294 			 * request upgrade and wait for the shared count to
295 			 * drop to zero, then take exclusive lock.
296 			 */
297 			lkp->lk_flags |= LK_WANT_UPGRADE;
298 			ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
299 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
300 			if (error)
301 				break;
302 			lkp->lk_flags |= LK_HAVE_EXCL;
303 			lkp->lk_lockholder = pid;
304 			if (lkp->lk_exclusivecount != 0)
305 				panic("lockmgr: non-zero exclusive count");
306 			lkp->lk_exclusivecount = 1;
307 			COUNT(p, 1);
308 			break;
309 		}
310 		/*
311 		 * Someone else has requested upgrade. Release our shared
312 		 * lock, awaken upgrade requestor if we are the last shared
313 		 * lock, then request an exclusive lock.
314 		 */
315 		if (lkp->lk_sharecount == 0 && lkp->lk_waitcount)
316 			wakeup((void *)lkp);
317 		/* fall into exclusive request */
318 
319 	case LK_EXCLUSIVE:
320 		if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) {
321 			/*
322 			 *	Recursive lock.
323 			 */
324 			if ((extflags & LK_CANRECURSE) == 0) {
325 				if (extflags & LK_RECURSEFAIL) {
326 					error = EDEADLK;
327 					break;
328 				}
329 				panic("lockmgr: locking against myself");
330 			}
331 			lkp->lk_exclusivecount++;
332 			COUNT(p, 1);
333 			break;
334 		}
335 		/*
336 		 * If we are just polling, check to see if we will sleep.
337 		 */
338 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
339 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
340 		     lkp->lk_sharecount != 0)) {
341 			error = EBUSY;
342 			break;
343 		}
344 		/*
345 		 * Try to acquire the want_exclusive flag.
346 		 */
347 		ACQUIRE(lkp, error, extflags, lkp->lk_flags &
348 		    (LK_HAVE_EXCL | LK_WANT_EXCL));
349 		if (error)
350 			break;
351 		lkp->lk_flags |= LK_WANT_EXCL;
352 		/*
353 		 * Wait for shared locks and upgrades to finish.
354 		 */
355 		ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
356 		       (lkp->lk_flags & LK_WANT_UPGRADE));
357 		lkp->lk_flags &= ~LK_WANT_EXCL;
358 		if (error)
359 			break;
360 		lkp->lk_flags |= LK_HAVE_EXCL;
361 		lkp->lk_lockholder = pid;
362 		if (lkp->lk_exclusivecount != 0)
363 			panic("lockmgr: non-zero exclusive count");
364 		lkp->lk_exclusivecount = 1;
365 		COUNT(p, 1);
366 		break;
367 
368 	case LK_RELEASE:
369 		if (lkp->lk_exclusivecount != 0) {
370 			if (pid != lkp->lk_lockholder)
371 				panic("lockmgr: pid %d, not %s %d unlocking",
372 				    pid, "exclusive lock holder",
373 				    lkp->lk_lockholder);
374 			lkp->lk_exclusivecount--;
375 			COUNT(p, -1);
376 			if (lkp->lk_exclusivecount == 0) {
377 				lkp->lk_flags &= ~LK_HAVE_EXCL;
378 				lkp->lk_lockholder = LK_NOPROC;
379 			}
380 		} else if (lkp->lk_sharecount != 0) {
381 			lkp->lk_sharecount--;
382 			COUNT(p, -1);
383 		} else
384 			panic("lockmgr: LK_RELEASE of unlocked lock");
385 		if (lkp->lk_waitcount)
386 			wakeup((void *)lkp);
387 		break;
388 
389 	case LK_DRAIN:
390 		/*
391 		 * Check that we do not already hold the lock, as it can
392 		 * never drain if we do. Unfortunately, we have no way to
393 		 * check for holding a shared lock, but at least we can
394 		 * check for an exclusive one.
395 		 */
396 		if (lkp->lk_lockholder == pid)
397 			panic("lockmgr: draining against myself");
398 		/*
399 		 * If we are just polling, check to see if we will sleep.
400 		 */
401 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
402 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
403 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
404 			error = EBUSY;
405 			break;
406 		}
407 		PAUSE(lkp, ((lkp->lk_flags &
408 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
409 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0));
410 		for (error = 0; ((lkp->lk_flags &
411 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
412 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) {
413 			lkp->lk_flags |= LK_WAITDRAIN;
414 			simple_unlock(&lkp->lk_interlock);
415 			if ((error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio,
416 			    lkp->lk_wmesg, lkp->lk_timo)) != 0)
417 				return (error);
418 			if ((extflags) & LK_SLEEPFAIL)
419 				return (ENOLCK);
420 			simple_lock(&lkp->lk_interlock);
421 		}
422 		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
423 		lkp->lk_lockholder = pid;
424 		lkp->lk_exclusivecount = 1;
425 		COUNT(p, 1);
426 		break;
427 
428 	default:
429 		simple_unlock(&lkp->lk_interlock);
430 		panic("lockmgr: unknown locktype request %d",
431 		    flags & LK_TYPE_MASK);
432 		/* NOTREACHED */
433 	}
434 	if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags &
435 	     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
436 	     lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
437 		lkp->lk_flags &= ~LK_WAITDRAIN;
438 		wakeup((void *)&lkp->lk_flags);
439 	}
440 	simple_unlock(&lkp->lk_interlock);
441 	return (error);
442 }
443 
444 /*
445  * Print out information about state of a lock. Used by VOP_PRINT
446  * routines to display ststus about contained locks.
447  */
448 void
449 lockmgr_printinfo(lkp)
450 	struct lock *lkp;
451 {
452 
453 	if (lkp->lk_sharecount)
454 		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
455 		    lkp->lk_sharecount);
456 	else if (lkp->lk_flags & LK_HAVE_EXCL)
457 		printf(" lock type %s: EXCL (count %d) by pid %d",
458 		    lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder);
459 	if (lkp->lk_waitcount > 0)
460 		printf(" with %d pending", lkp->lk_waitcount);
461 }
462 
463 #if defined(LOCKDEBUG)
464 
465 int lockdebug_print = 0;
466 int lockdebug_debugger = 0;
467 
468 /*
469  * Simple lock functions so that the debugger can see from whence
470  * they are being called.
471  */
472 void
473 simple_lock_init(lkp)
474 	struct simplelock *lkp;
475 {
476 
477 	lkp->lock_data = SLOCK_UNLOCKED;
478 }
479 
480 void
481 _simple_lock(lkp, id, l)
482 	__volatile struct simplelock *lkp;
483 	const char *id;
484 	int l;
485 {
486 
487 	if (lkp->lock_data == SLOCK_LOCKED) {
488 		if (lockdebug_print)
489 			printf("%s:%d simple_lock: lock held...\n", id, l);
490 		if (lockdebug_debugger)
491 			Debugger();
492 	}
493 	lkp->lock_data = SLOCK_LOCKED;
494 }
495 
496 
497 int
498 _simple_lock_try(lkp, id, l)
499 	__volatile struct simplelock *lkp;
500 	const char *id;
501 	int l;
502 {
503 
504 	if (lkp->lock_data == SLOCK_LOCKED) {
505 		if (lockdebug_print)
506 			printf("%s:%d simple_lock: lock held...\n", id, l);
507 		if (lockdebug_debugger)
508 			Debugger();
509 	}
510 	return lkp->lock_data = SLOCK_LOCKED;
511 }
512 
513 void
514 _simple_unlock(lkp, id, l)
515 	__volatile struct simplelock *lkp;
516 	const char *id;
517 	int l;
518 {
519 
520 	if (lkp->lock_data == SLOCK_UNLOCKED) {
521 		if (lockdebug_print)
522 			printf("%s:%d simple_unlock: lock not held...\n",
523 			       id, l);
524 		if (lockdebug_debugger)
525 			Debugger();
526 	}
527 	lkp->lock_data = SLOCK_UNLOCKED;
528 }
529 
530 void
531 _simple_lock_assert(lkp, state, id, l)
532 	__volatile struct simplelock *lkp;
533 	int state;
534 	const char *id;
535 	int l;
536 {
537 	if (lkp->lock_data != state) {
538 		if (lockdebug_print)
539 			printf("%s:%d simple_lock_assert: wrong state: %d",
540 			       id, l, lkp->lock_data);
541 		if (lockdebug_debugger)
542 			Debugger();
543 	}
544 }
545 #endif /* LOCKDEBUG */
546