xref: /openbsd-src/sys/kern/kern_lock.c (revision 3c16e060c35c1b0aeffd3da8f70ece23a65d1419)
1 /*	$OpenBSD: kern_lock.c,v 1.26 2007/04/11 12:06:37 miod Exp $	*/
2 
3 /*
4  * Copyright (c) 1995
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code contains ideas from software contributed to Berkeley by
8  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
9  * System project at Carnegie-Mellon University.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
36  */
37 
38 #include <sys/param.h>
39 #include <sys/proc.h>
40 #include <sys/lock.h>
41 #include <sys/systm.h>
42 #include <sys/sched.h>
43 
44 #include <machine/cpu.h>
45 
46 #ifndef spllock
47 #define spllock() splhigh()
48 #endif
49 
50 #ifdef MULTIPROCESSOR
51 #define CPU_NUMBER() cpu_number()
52 #else
53 #define CPU_NUMBER() 0
54 #endif
55 
56 void record_stacktrace(int *, int);
57 void playback_stacktrace(int *, int);
58 
59 /*
60  * Locking primitives implementation.
61  * Locks provide shared/exclusive synchronization.
62  */
63 
64 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
65 #define	COUNT(lkp, p, cpu_id, x)					\
66 	(p)->p_locks += (x)
67 #else
68 #define COUNT(lkp, p, cpu_id, x)
69 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
70 
71 #define	INTERLOCK_ACQUIRE(lkp, flags)					\
72 do {									\
73 	simple_lock(&(lkp)->lk_interlock);				\
74 } while (/*CONSTCOND*/ 0)
75 
76 #define	INTERLOCK_RELEASE(lkp, flags)					\
77 do {									\
78 	simple_unlock(&(lkp)->lk_interlock);				\
79 } while (/*CONSTCOND*/ 0)
80 
81 #ifdef DDB /* { */
82 #ifdef MULTIPROCESSOR
83 int simple_lock_debugger = 1;	/* more serious on MP */
84 #else
85 int simple_lock_debugger = 0;
86 #endif
87 #define	SLOCK_DEBUGGER()	if (simple_lock_debugger) Debugger()
88 #define	SLOCK_TRACE()							\
89 	db_stack_trace_print((db_expr_t)__builtin_frame_address(0),	\
90 	    TRUE, 65535, "", lock_printf);
91 #else
92 #define	SLOCK_DEBUGGER()	/* nothing */
93 #define	SLOCK_TRACE()		/* nothing */
94 #endif /* } */
95 
96 /*
97  * Acquire a resource.
98  */
99 #define ACQUIRE(lkp, error, extflags, drain, wanted)			\
100 do {									\
101 	for (error = 0; wanted; ) {					\
102 		if ((drain))						\
103 			(lkp)->lk_flags |= LK_WAITDRAIN;		\
104 		else							\
105 			(lkp)->lk_waitcount++;				\
106 		/* XXX Cast away volatile. */				\
107 		error = ltsleep((drain) ?				\
108 		    (void *)&(lkp)->lk_flags : (void *)(lkp),		\
109 		    (lkp)->lk_prio, (lkp)->lk_wmesg, (lkp)->lk_timo,	\
110 		    &(lkp)->lk_interlock);				\
111 		if ((drain) == 0)					\
112 			(lkp)->lk_waitcount--;				\
113 		if (error)						\
114 			break;						\
115 		if ((extflags) & LK_SLEEPFAIL) {			\
116 			error = ENOLCK;					\
117 			break;						\
118 		}							\
119 	}								\
120 } while (0)
121 
122 #define	SETHOLDER(lkp, pid, cpu_id)					\
123 	(lkp)->lk_lockholder = (pid)
124 
125 #define	WEHOLDIT(lkp, pid, cpu_id)					\
126 	(lkp)->lk_lockholder == (pid)
127 
128 #define	WAKEUP_WAITER(lkp)						\
129 do {									\
130 	if ((lkp)->lk_waitcount) 				{	\
131 		/* XXX Cast away volatile. */				\
132 		wakeup((void *)(lkp));					\
133 	}								\
134 } while (/*CONSTCOND*/0)
135 
136 #if defined(LOCKDEBUG) /* { */
137 #if defined(MULTIPROCESSOR) /* { */
138 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
139 
140 #define	SPINLOCK_LIST_LOCK()						\
141 	__cpu_simple_lock(&spinlock_list_slock.lock_data)
142 
143 #define	SPINLOCK_LIST_UNLOCK()						\
144 	__cpu_simple_unlock(&spinlock_list_slock.lock_data)
145 #else
146 #define	SPINLOCK_LIST_LOCK()	/* nothing */
147 
148 #define	SPINLOCK_LIST_UNLOCK()	/* nothing */
149 #endif /* MULTIPROCESSOR */ /* } */
150 
151 TAILQ_HEAD(, lock) spinlock_list =
152     TAILQ_HEAD_INITIALIZER(spinlock_list);
153 #endif /* LOCKDEBUG */ /* } */
154 
155 #define	HAVEIT(lkp)							\
156 do {									\
157 } while (/*CONSTCOND*/0)
158 
159 #define	DONTHAVEIT(lkp)							\
160 do {									\
161 } while (/*CONSTCOND*/0)
162 
163 #if defined(LOCKDEBUG)
164 /*
165  * Lock debug printing routine; can be configured to print to console
166  * or log to syslog.
167  */
168 void
169 lock_printf(const char *fmt, ...)
170 {
171 	char b[150];
172 	va_list ap;
173 
174 	va_start(ap, fmt);
175 	if (lock_debug_syslog)
176 		vlog(LOG_DEBUG, fmt, ap);
177 	else {
178 		vsnprintf(b, sizeof(b), fmt, ap);
179 		printf_nolog("%s", b);
180 	}
181 	va_end(ap);
182 }
183 #endif /* LOCKDEBUG */
184 
185 /*
186  * Initialize a lock; required before use.
187  */
188 void
189 lockinit(struct lock *lkp, int prio, char *wmesg, int timo, int flags)
190 {
191 
192 	bzero(lkp, sizeof(struct lock));
193 	simple_lock_init(&lkp->lk_interlock);
194 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
195 	lkp->lk_lockholder = LK_NOPROC;
196 	lkp->lk_prio = prio;
197 	lkp->lk_timo = timo;
198 	lkp->lk_wmesg = wmesg;	/* just a name for spin locks */
199 #if defined(LOCKDEBUG)
200 	lkp->lk_lock_file = NULL;
201 	lkp->lk_unlock_file = NULL;
202 #endif
203 }
204 
205 /*
206  * Determine the status of a lock.
207  */
208 int
209 lockstatus(struct lock *lkp)
210 {
211 	int lock_type = 0;
212 
213 	INTERLOCK_ACQUIRE(lkp, lkp->lk_flags);
214 	if (lkp->lk_exclusivecount != 0)
215 		lock_type = LK_EXCLUSIVE;
216 	else if (lkp->lk_sharecount != 0)
217 		lock_type = LK_SHARED;
218 	INTERLOCK_RELEASE(lkp, lkp->lk_flags);
219 	return (lock_type);
220 }
221 
222 /*
223  * Set, change, or release a lock.
224  *
225  * Shared requests increment the shared count. Exclusive requests set the
226  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
227  * accepted shared locks and shared-to-exclusive upgrades to go away.
228  */
229 int
230 lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp)
231 {
232 	int error;
233 	pid_t pid;
234 	int extflags;
235 	cpuid_t cpu_id;
236 	struct proc *p = curproc;
237 
238 	error = 0;
239 
240 	INTERLOCK_ACQUIRE(lkp, lkp->lk_flags);
241 	if (flags & LK_INTERLOCK)
242 		simple_unlock(interlkp);
243 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
244 
245 #ifdef DIAGNOSTIC
246 	if (p == NULL)
247 		panic("lockmgr: process context required");
248 #endif
249 	/* Process context required. */
250 	pid = p->p_pid;
251 	cpu_id = CPU_NUMBER();
252 
253 	/*
254 	 * Once a lock has drained, the LK_DRAINING flag is set and an
255 	 * exclusive lock is returned. The only valid operation thereafter
256 	 * is a single release of that exclusive lock. This final release
257 	 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
258 	 * further requests of any sort will result in a panic. The bits
259 	 * selected for these two flags are chosen so that they will be set
260 	 * in memory that is freed (freed memory is filled with 0xdeadbeef).
261 	 */
262 	if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
263 #ifdef DIAGNOSTIC
264 		if (lkp->lk_flags & LK_DRAINED)
265 			panic("lockmgr: using decommissioned lock");
266 		if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
267 		    WEHOLDIT(lkp, pid, cpu_id) == 0)
268 			panic("lockmgr: non-release on draining lock: %d",
269 			    flags & LK_TYPE_MASK);
270 #endif /* DIAGNOSTIC */
271 		lkp->lk_flags &= ~LK_DRAINING;
272 		lkp->lk_flags |= LK_DRAINED;
273 	}
274 
275 	/*
276 	 * Check if the caller is asking us to be schizophrenic.
277 	 */
278 	if ((lkp->lk_flags & (LK_CANRECURSE|LK_RECURSEFAIL)) ==
279 	    (LK_CANRECURSE|LK_RECURSEFAIL))
280 		panic("lockmgr: make up your mind");
281 
282 	switch (flags & LK_TYPE_MASK) {
283 
284 	case LK_SHARED:
285 		if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
286 			/*
287 			 * If just polling, check to see if we will block.
288 			 */
289 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
290 			    (LK_HAVE_EXCL | LK_WANT_EXCL))) {
291 				error = EBUSY;
292 				break;
293 			}
294 			/*
295 			 * Wait for exclusive locks and upgrades to clear.
296 			 */
297 			ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
298 			    (LK_HAVE_EXCL | LK_WANT_EXCL));
299 			if (error)
300 				break;
301 			lkp->lk_sharecount++;
302 			COUNT(lkp, p, cpu_id, 1);
303 			break;
304 		}
305 		/*
306 		 * We hold an exclusive lock, so downgrade it to shared.
307 		 * An alternative would be to fail with EDEADLK.
308 		 */
309 		lkp->lk_sharecount++;
310 		COUNT(lkp, p, cpu_id, 1);
311 
312 		if (WEHOLDIT(lkp, pid, cpu_id) == 0 ||
313 		    lkp->lk_exclusivecount == 0)
314 			panic("lockmgr: not holding exclusive lock");
315 		lkp->lk_sharecount += lkp->lk_exclusivecount;
316 		lkp->lk_exclusivecount = 0;
317 		lkp->lk_flags &= ~LK_HAVE_EXCL;
318 		SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
319 #if defined(LOCKDEBUG)
320 		lkp->lk_unlock_file = file;
321 		lkp->lk_unlock_line = line;
322 #endif
323 		DONTHAVEIT(lkp);
324 		WAKEUP_WAITER(lkp);
325 		break;
326 
327 	case LK_EXCLUSIVE:
328 		if (WEHOLDIT(lkp, pid, cpu_id)) {
329 			/*
330 			 * Recursive lock.
331 			 */
332 			if ((extflags & LK_CANRECURSE) == 0) {
333 				if (extflags & LK_RECURSEFAIL) {
334 					error = EDEADLK;
335 					break;
336 				} else
337 					panic("lockmgr: locking against myself");
338 			}
339 			lkp->lk_exclusivecount++;
340 			COUNT(lkp, p, cpu_id, 1);
341 			break;
342 		}
343 		/*
344 		 * If we are just polling, check to see if we will sleep.
345 		 */
346 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
347 		     (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
348 		     lkp->lk_sharecount != 0)) {
349 			error = EBUSY;
350 			break;
351 		}
352 		/*
353 		 * Try to acquire the want_exclusive flag.
354 		 */
355 		ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
356 		    (LK_HAVE_EXCL | LK_WANT_EXCL));
357 		if (error)
358 			break;
359 		lkp->lk_flags |= LK_WANT_EXCL;
360 		/*
361 		 * Wait for shared locks and upgrades to finish.
362 		 */
363 		ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0);
364 		lkp->lk_flags &= ~LK_WANT_EXCL;
365 		if (error)
366 			break;
367 		lkp->lk_flags |= LK_HAVE_EXCL;
368 		SETHOLDER(lkp, pid, cpu_id);
369 #if defined(LOCKDEBUG)
370 		lkp->lk_lock_file = file;
371 		lkp->lk_lock_line = line;
372 #endif
373 		HAVEIT(lkp);
374 		if (lkp->lk_exclusivecount != 0)
375 			panic("lockmgr: non-zero exclusive count");
376 		lkp->lk_exclusivecount = 1;
377 		COUNT(lkp, p, cpu_id, 1);
378 		break;
379 
380 	case LK_RELEASE:
381 		if (lkp->lk_exclusivecount != 0) {
382 			if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
383 				panic("lockmgr: pid %d, not exclusive lock "
384 				    "holder %d unlocking",
385 				    pid, lkp->lk_lockholder);
386 			}
387 			lkp->lk_exclusivecount--;
388 			COUNT(lkp, p, cpu_id, -1);
389 			if (lkp->lk_exclusivecount == 0) {
390 				lkp->lk_flags &= ~LK_HAVE_EXCL;
391 				SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
392 #if defined(LOCKDEBUG)
393 				lkp->lk_unlock_file = file;
394 				lkp->lk_unlock_line = line;
395 #endif
396 				DONTHAVEIT(lkp);
397 			}
398 		} else if (lkp->lk_sharecount != 0) {
399 			lkp->lk_sharecount--;
400 			COUNT(lkp, p, cpu_id, -1);
401 		}
402 #ifdef DIAGNOSTIC
403 		else
404 			panic("lockmgr: release of unlocked lock!");
405 #endif
406 		WAKEUP_WAITER(lkp);
407 		break;
408 
409 	case LK_DRAIN:
410 		/*
411 		 * Check that we do not already hold the lock, as it can
412 		 * never drain if we do. Unfortunately, we have no way to
413 		 * check for holding a shared lock, but at least we can
414 		 * check for an exclusive one.
415 		 */
416 		if (WEHOLDIT(lkp, pid, cpu_id))
417 			panic("lockmgr: draining against myself");
418 		/*
419 		 * If we are just polling, check to see if we will sleep.
420 		 */
421 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
422 		     (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
423 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
424 			error = EBUSY;
425 			break;
426 		}
427 		ACQUIRE(lkp, error, extflags, 1,
428 		    ((lkp->lk_flags &
429 		     (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
430 		     lkp->lk_sharecount != 0 ||
431 		     lkp->lk_waitcount != 0));
432 		if (error)
433 			break;
434 		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
435 		SETHOLDER(lkp, pid, cpu_id);
436 #if defined(LOCKDEBUG)
437 		lkp->lk_lock_file = file;
438 		lkp->lk_lock_line = line;
439 #endif
440 		HAVEIT(lkp);
441 		lkp->lk_exclusivecount = 1;
442 		COUNT(lkp, p, cpu_id, 1);
443 		break;
444 
445 	default:
446 		INTERLOCK_RELEASE(lkp, lkp->lk_flags);
447 		panic("lockmgr: unknown locktype request %d",
448 		    flags & LK_TYPE_MASK);
449 		/* NOTREACHED */
450 	}
451 	if ((lkp->lk_flags & LK_WAITDRAIN) != 0 &&
452 	    ((lkp->lk_flags &
453 	    (LK_HAVE_EXCL | LK_WANT_EXCL)) == 0 &&
454 	    lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
455 		lkp->lk_flags &= ~LK_WAITDRAIN;
456 		wakeup((void *)&lkp->lk_flags);
457 	}
458 	INTERLOCK_RELEASE(lkp, lkp->lk_flags);
459 	return (error);
460 }
461 
462 /*
463  * Print out information about state of a lock. Used by VOP_PRINT
464  * routines to display ststus about contained locks.
465  */
466 void
467 lockmgr_printinfo(__volatile struct lock *lkp)
468 {
469 
470 	if (lkp->lk_sharecount)
471 		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
472 		    lkp->lk_sharecount);
473 	else if (lkp->lk_flags & LK_HAVE_EXCL) {
474 		printf(" lock type %s: EXCL (count %d) by ",
475 		    lkp->lk_wmesg, lkp->lk_exclusivecount);
476 		printf("pid %d", lkp->lk_lockholder);
477 	} else
478 		printf(" not locked");
479 	if (lkp->lk_waitcount > 0)
480 		printf(" with %d pending", lkp->lk_waitcount);
481 }
482 
483 #if defined(LOCKDEBUG)
484 TAILQ_HEAD(, simplelock) simplelock_list =
485     TAILQ_HEAD_INITIALIZER(simplelock_list);
486 
487 #if defined(MULTIPROCESSOR) /* { */
488 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
489 
490 #define	SLOCK_LIST_LOCK()						\
491 	__cpu_simple_lock(&simplelock_list_slock.lock_data)
492 
493 #define	SLOCK_LIST_UNLOCK()						\
494 	__cpu_simple_unlock(&simplelock_list_slock.lock_data)
495 
496 #define	SLOCK_COUNT(x)							\
497 	curcpu()->ci_simple_locks += (x)
498 #else
499 u_long simple_locks;
500 
501 #define	SLOCK_LIST_LOCK()	/* nothing */
502 
503 #define	SLOCK_LIST_UNLOCK()	/* nothing */
504 
505 #define	SLOCK_COUNT(x)		simple_locks += (x)
506 #endif /* MULTIPROCESSOR */ /* } */
507 
508 #ifdef MULTIPROCESSOR
509 #define SLOCK_MP()		lock_printf("on cpu %ld\n", 		\
510 				    (u_long) cpu_number())
511 #else
512 #define SLOCK_MP()		/* nothing */
513 #endif
514 
515 #define	SLOCK_WHERE(str, alp, id, l)					\
516 do {									\
517 	lock_printf("\n");						\
518 	lock_printf(str);						\
519 	lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
520 	SLOCK_MP();							\
521 	if ((alp)->lock_file != NULL)					\
522 		lock_printf("last locked: %s:%d\n", (alp)->lock_file,	\
523 		    (alp)->lock_line);					\
524 	if ((alp)->unlock_file != NULL)					\
525 		lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
526 		    (alp)->unlock_line);				\
527 	SLOCK_TRACE()							\
528 	SLOCK_DEBUGGER();						\
529 } while (/*CONSTCOND*/0)
530 
531 /*
532  * Simple lock functions so that the debugger can see from whence
533  * they are being called.
534  */
535 void
536 simple_lock_init(struct simplelock *lkp)
537 {
538 
539 #if defined(MULTIPROCESSOR) /* { */
540 	__cpu_simple_lock_init(&alp->lock_data);
541 #else
542 	alp->lock_data = __SIMPLELOCK_UNLOCKED;
543 #endif /* } */
544 	alp->lock_file = NULL;
545 	alp->lock_line = 0;
546 	alp->unlock_file = NULL;
547 	alp->unlock_line = 0;
548 	alp->lock_holder = LK_NOCPU;
549 }
550 
551 void
552 _simple_lock(__volatile struct simplelock *lkp, const char *id, int l)
553 {
554 	cpuid_t cpu_id = CPU_NUMBER();
555 	int s;
556 
557 	s = spllock();
558 
559 	/*
560 	 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
561 	 * don't take any action, and just fall into the normal spin case.
562 	 */
563 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
564 #if defined(MULTIPROCESSOR) /* { */
565 		if (alp->lock_holder == cpu_id) {
566 			SLOCK_WHERE("simple_lock: locking against myself\n",
567 			    alp, id, l);
568 			goto out;
569 		}
570 #else
571 		SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
572 		goto out;
573 #endif /* MULTIPROCESSOR */ /* } */
574 	}
575 
576 #if defined(MULTIPROCESSOR) /* { */
577 	/* Acquire the lock before modifying any fields. */
578 	splx(s);
579 	__cpu_simple_lock(&alp->lock_data);
580 	s = spllock();
581 #else
582 	alp->lock_data = __SIMPLELOCK_LOCKED;
583 #endif /* } */
584 
585 	if (alp->lock_holder != LK_NOCPU) {
586 		SLOCK_WHERE("simple_lock: uninitialized lock\n",
587 		    alp, id, l);
588 	}
589 	alp->lock_file = id;
590 	alp->lock_line = l;
591 	alp->lock_holder = cpu_id;
592 
593 	SLOCK_LIST_LOCK();
594 	/* XXX Cast away volatile */
595 	TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
596 	SLOCK_LIST_UNLOCK();
597 
598 	SLOCK_COUNT(1);
599 
600  out:
601 	splx(s);
602 }
603 
604 int
605 _simple_lock_held(__volatile struct simplelock *alp)
606 {
607 	cpuid_t cpu_id = CPU_NUMBER();
608 	int s, locked = 0;
609 
610 	s = spllock();
611 
612 #if defined(MULTIPROCESSOR)
613 	if (__cpu_simple_lock_try(&alp->lock_data) == 0)
614 		locked = (alp->lock_holder == cpu_id);
615 	else
616 		__cpu_simple_unlock(&alp->lock_data);
617 #else
618 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
619 		locked = 1;
620 		KASSERT(alp->lock_holder == cpu_id);
621 	}
622 #endif
623 
624 	splx(s);
625 
626 	return (locked);
627 }
628 
629 int
630 _simple_lock_try(__volatile struct simplelock *lkp, const char *id, int l)
631 {
632 	cpuid_t cpu_id = CPU_NUMBER();
633 	int s, rv = 0;
634 
635 	s = spllock();
636 
637 	/*
638 	 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
639 	 * don't take any action.
640 	 */
641 #if defined(MULTIPROCESSOR) /* { */
642 	if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
643 		if (alp->lock_holder == cpu_id)
644 			SLOCK_WHERE("simple_lock_try: locking against myself\n",
645 			    alp, id, l);
646 		goto out;
647 	}
648 #else
649 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
650 		SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
651 		goto out;
652 	}
653 	alp->lock_data = __SIMPLELOCK_LOCKED;
654 #endif /* MULTIPROCESSOR */ /* } */
655 
656 	/*
657 	 * At this point, we have acquired the lock.
658 	 */
659 
660 	rv = 1;
661 
662 	alp->lock_file = id;
663 	alp->lock_line = l;
664 	alp->lock_holder = cpu_id;
665 
666 	SLOCK_LIST_LOCK();
667 	/* XXX Cast away volatile. */
668 	TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
669 	SLOCK_LIST_UNLOCK();
670 
671 	SLOCK_COUNT(1);
672 
673  out:
674 	splx(s);
675 	return (rv);
676 }
677 
678 void
679 _simple_unlock(__volatile struct simplelock *lkp, const char *id, int l)
680 {
681 	int s;
682 
683 	s = spllock();
684 
685 	/*
686 	 * MULTIPROCESSOR case: This is `safe' because we think we hold
687 	 * the lock, and if we don't, we don't take any action.
688 	 */
689 	if (alp->lock_data == __SIMPLELOCK_UNLOCKED) {
690 		SLOCK_WHERE("simple_unlock: lock not held\n",
691 		    alp, id, l);
692 		goto out;
693 	}
694 
695 	SLOCK_LIST_LOCK();
696 	TAILQ_REMOVE(&simplelock_list, alp, list);
697 	SLOCK_LIST_UNLOCK();
698 
699 	SLOCK_COUNT(-1);
700 
701 	alp->list.tqe_next = NULL;	/* sanity */
702 	alp->list.tqe_prev = NULL;	/* sanity */
703 
704 	alp->unlock_file = id;
705 	alp->unlock_line = l;
706 
707 #if defined(MULTIPROCESSOR) /* { */
708 	alp->lock_holder = LK_NOCPU;
709 	/* Now that we've modified all fields, release the lock. */
710 	__cpu_simple_unlock(&alp->lock_data);
711 #else
712 	alp->lock_data = __SIMPLELOCK_UNLOCKED;
713 	KASSERT(alp->lock_holder == CPU_NUMBER());
714 	alp->lock_holder = LK_NOCPU;
715 #endif /* } */
716 
717  out:
718 	splx(s);
719 }
720 
721 void
722 simple_lock_dump(void)
723 {
724 	struct simplelock *alp;
725 	int s;
726 
727 	s = spllock();
728 	SLOCK_LIST_LOCK();
729 	lock_printf("all simple locks:\n");
730 	TAILQ_FOREACH(alp, &simplelock_list, list) {
731 		lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
732 		    alp->lock_file, alp->lock_line);
733 	}
734 	SLOCK_LIST_UNLOCK();
735 	splx(s);
736 }
737 
738 void
739 simple_lock_freecheck(void *start, void *end)
740 {
741 	struct simplelock *alp;
742 	int s;
743 
744 	s = spllock();
745 	SLOCK_LIST_LOCK();
746 	TAILQ_FOREACH(alp, &simplelock_list, list) {
747 		if ((void *)alp >= start && (void *)alp < end) {
748 			lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
749 			    alp, alp->lock_holder, alp->lock_file,
750 			    alp->lock_line);
751 			SLOCK_DEBUGGER();
752 		}
753 	}
754 	SLOCK_LIST_UNLOCK();
755 	splx(s);
756  }
757 
758 /*
759  * We must be holding exactly one lock: the sched_lock.
760  */
761 
762 #ifdef notyet
763 void
764 simple_lock_switchcheck(void)
765 {
766 
767 	simple_lock_only_held(&sched_lock, "switching");
768 }
769 #endif
770 
771 void
772 simple_lock_only_held(volatile struct simplelock *lp, const char *where)
773 {
774 	struct simplelock *alp;
775 	cpuid_t cpu_id = CPU_NUMBER();
776 	int s;
777 
778 	if (lp) {
779 		LOCK_ASSERT(simple_lock_held(lp));
780 	}
781 	s = spllock();
782 	SLOCK_LIST_LOCK();
783 	TAILQ_FOREACH(alp, &simplelock_list, list) {
784 		if (alp == lp)
785 			continue;
786 		if (alp->lock_holder == cpu_id)
787 			break;
788 	}
789 	SLOCK_LIST_UNLOCK();
790 	splx(s);
791 
792 	if (alp != NULL) {
793 		lock_printf("\n%s with held simple_lock %p "
794 		    "CPU %lu %s:%d\n",
795 		    where, alp, alp->lock_holder, alp->lock_file,
796 		    alp->lock_line);
797 		SLOCK_TRACE();
798 		SLOCK_DEBUGGER();
799 	}
800 }
801 #endif /* LOCKDEBUG */
802 
803 #if defined(MULTIPROCESSOR)
804 /*
805  * Functions for manipulating the kernel_lock.  We put them here
806  * so that they show up in profiles.
807  */
808 
809 struct __mp_lock kernel_lock;
810 
811 void
812 _kernel_lock_init(void)
813 {
814 	__mp_lock_init(&kernel_lock);
815 }
816 
817 /*
818  * Acquire/release the kernel lock.  Intended for use in the scheduler
819  * and the lower half of the kernel.
820  */
821 
822 /* XXX The flag should go, all callers want equal behaviour. */
823 void
824 _kernel_lock(int flag)
825 {
826 	SCHED_ASSERT_UNLOCKED();
827 	__mp_lock(&kernel_lock);
828 }
829 
830 void
831 _kernel_unlock(void)
832 {
833 	__mp_unlock(&kernel_lock);
834 }
835 
836 /*
837  * Acquire/release the kernel_lock on behalf of a process.  Intended for
838  * use in the top half of the kernel.
839  */
840 void
841 _kernel_proc_lock(struct proc *p)
842 {
843 	SCHED_ASSERT_UNLOCKED();
844 	__mp_lock(&kernel_lock);
845 	atomic_setbits_int(&p->p_flag, P_BIGLOCK);
846 }
847 
848 void
849 _kernel_proc_unlock(struct proc *p)
850 {
851 	atomic_clearbits_int(&p->p_flag, P_BIGLOCK);
852 	__mp_unlock(&kernel_lock);
853 }
854 
855 #ifdef MP_LOCKDEBUG
856 /* CPU-dependent timing, needs this to be settable from ddb. */
857 int __mp_lock_spinout = 200000000;
858 #endif
859 
860 #endif /* MULTIPROCESSOR */
861