xref: /openbsd-src/sys/kern/kern_lock.c (revision 8632a1ed36d70b76f2b42d46a8c09fe7adb33c4f)
1 /*	$OpenBSD: kern_lock.c,v 1.31 2007/11/26 15:23:26 art Exp $	*/
2 
3 /*
4  * Copyright (c) 1995
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code contains ideas from software contributed to Berkeley by
8  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
9  * System project at Carnegie-Mellon University.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
36  */
37 
38 #include <sys/param.h>
39 #include <sys/proc.h>
40 #include <sys/lock.h>
41 #include <sys/systm.h>
42 #include <sys/sched.h>
43 
44 #include <machine/cpu.h>
45 
46 #ifndef spllock
47 #define spllock() splhigh()
48 #endif
49 
50 #ifdef MULTIPROCESSOR
51 #define CPU_NUMBER() cpu_number()
52 #else
53 #define CPU_NUMBER() 0
54 #endif
55 
56 void record_stacktrace(int *, int);
57 void playback_stacktrace(int *, int);
58 
59 /*
60  * Locking primitives implementation.
61  * Locks provide shared/exclusive synchronization.
62  */
63 
64 #ifdef DDB /* { */
65 #ifdef MULTIPROCESSOR
66 int simple_lock_debugger = 1;	/* more serious on MP */
67 #else
68 int simple_lock_debugger = 0;
69 #endif
70 #define	SLOCK_DEBUGGER()	if (simple_lock_debugger) Debugger()
71 #define	SLOCK_TRACE()							\
72 	db_stack_trace_print((db_expr_t)__builtin_frame_address(0),	\
73 	    TRUE, 65535, "", lock_printf);
74 #else
75 #define	SLOCK_DEBUGGER()	/* nothing */
76 #define	SLOCK_TRACE()		/* nothing */
77 #endif /* } */
78 
79 /*
80  * Acquire a resource.
81  */
82 #define ACQUIRE(lkp, error, extflags, drain, wanted)			\
83 do {									\
84 	for (error = 0; wanted; ) {					\
85 		if ((drain))						\
86 			(lkp)->lk_flags |= LK_WAITDRAIN;		\
87 		else							\
88 			(lkp)->lk_waitcount++;				\
89 		/* XXX Cast away volatile. */				\
90 		error = tsleep((drain) ?				\
91 		    (void *)&(lkp)->lk_flags : (void *)(lkp),		\
92 		    (lkp)->lk_prio, (lkp)->lk_wmesg, (lkp)->lk_timo);	\
93 		if ((drain) == 0)					\
94 			(lkp)->lk_waitcount--;				\
95 		if (error)						\
96 			break;						\
97 		if ((extflags) & LK_SLEEPFAIL) {			\
98 			error = ENOLCK;					\
99 			break;						\
100 		}							\
101 	}								\
102 } while (0)
103 
104 #define	SETHOLDER(lkp, pid, cpu_id)					\
105 	(lkp)->lk_lockholder = (pid)
106 
107 #define	WEHOLDIT(lkp, pid, cpu_id)					\
108 	(lkp)->lk_lockholder == (pid)
109 
110 #define	WAKEUP_WAITER(lkp)						\
111 do {									\
112 	if ((lkp)->lk_waitcount) 				{	\
113 		/* XXX Cast away volatile. */				\
114 		wakeup((void *)(lkp));					\
115 	}								\
116 } while (/*CONSTCOND*/0)
117 
118 #define	HAVEIT(lkp)							\
119 do {									\
120 } while (/*CONSTCOND*/0)
121 
122 #define	DONTHAVEIT(lkp)							\
123 do {									\
124 } while (/*CONSTCOND*/0)
125 
126 #if defined(LOCKDEBUG)
127 /*
128  * Lock debug printing routine; can be configured to print to console
129  * or log to syslog.
130  */
131 void
132 lock_printf(const char *fmt, ...)
133 {
134 	char b[150];
135 	va_list ap;
136 
137 	va_start(ap, fmt);
138 	if (lock_debug_syslog)
139 		vlog(LOG_DEBUG, fmt, ap);
140 	else {
141 		vsnprintf(b, sizeof(b), fmt, ap);
142 		printf_nolog("%s", b);
143 	}
144 	va_end(ap);
145 }
146 #endif /* LOCKDEBUG */
147 
148 /*
149  * Initialize a lock; required before use.
150  */
151 void
152 lockinit(struct lock *lkp, int prio, char *wmesg, int timo, int flags)
153 {
154 
155 	bzero(lkp, sizeof(struct lock));
156 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
157 	lkp->lk_lockholder = LK_NOPROC;
158 	lkp->lk_prio = prio;
159 	lkp->lk_timo = timo;
160 	lkp->lk_wmesg = wmesg;	/* just a name for spin locks */
161 #if defined(LOCKDEBUG)
162 	lkp->lk_lock_file = NULL;
163 	lkp->lk_unlock_file = NULL;
164 #endif
165 }
166 
167 /*
168  * Determine the status of a lock.
169  */
170 int
171 lockstatus(struct lock *lkp)
172 {
173 	int lock_type = 0;
174 
175 	if (lkp->lk_exclusivecount != 0)
176 		lock_type = LK_EXCLUSIVE;
177 	else if (lkp->lk_sharecount != 0)
178 		lock_type = LK_SHARED;
179 	return (lock_type);
180 }
181 
182 /*
183  * Set, change, or release a lock.
184  *
185  * Shared requests increment the shared count. Exclusive requests set the
186  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
187  * accepted shared locks and shared-to-exclusive upgrades to go away.
188  */
189 int
190 lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp)
191 {
192 	int error;
193 	pid_t pid;
194 	int extflags;
195 	cpuid_t cpu_id;
196 	struct proc *p = curproc;
197 
198 	error = 0;
199 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
200 
201 #ifdef DIAGNOSTIC
202 	if (p == NULL)
203 		panic("lockmgr: process context required");
204 #endif
205 	/* Process context required. */
206 	pid = p->p_pid;
207 	cpu_id = CPU_NUMBER();
208 
209 	/*
210 	 * Once a lock has drained, the LK_DRAINING flag is set and an
211 	 * exclusive lock is returned. The only valid operation thereafter
212 	 * is a single release of that exclusive lock. This final release
213 	 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
214 	 * further requests of any sort will result in a panic. The bits
215 	 * selected for these two flags are chosen so that they will be set
216 	 * in memory that is freed (freed memory is filled with 0xdeadbeef).
217 	 */
218 	if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
219 #ifdef DIAGNOSTIC
220 		if (lkp->lk_flags & LK_DRAINED)
221 			panic("lockmgr: using decommissioned lock");
222 		if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
223 		    WEHOLDIT(lkp, pid, cpu_id) == 0)
224 			panic("lockmgr: non-release on draining lock: %d",
225 			    flags & LK_TYPE_MASK);
226 #endif /* DIAGNOSTIC */
227 		lkp->lk_flags &= ~LK_DRAINING;
228 		lkp->lk_flags |= LK_DRAINED;
229 	}
230 
231 	/*
232 	 * Check if the caller is asking us to be schizophrenic.
233 	 */
234 	if ((lkp->lk_flags & (LK_CANRECURSE|LK_RECURSEFAIL)) ==
235 	    (LK_CANRECURSE|LK_RECURSEFAIL))
236 		panic("lockmgr: make up your mind");
237 
238 	switch (flags & LK_TYPE_MASK) {
239 
240 	case LK_SHARED:
241 		if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
242 			/*
243 			 * If just polling, check to see if we will block.
244 			 */
245 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
246 			    (LK_HAVE_EXCL | LK_WANT_EXCL))) {
247 				error = EBUSY;
248 				break;
249 			}
250 			/*
251 			 * Wait for exclusive locks and upgrades to clear.
252 			 */
253 			ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
254 			    (LK_HAVE_EXCL | LK_WANT_EXCL));
255 			if (error)
256 				break;
257 			lkp->lk_sharecount++;
258 			break;
259 		}
260 		/*
261 		 * We hold an exclusive lock, so downgrade it to shared.
262 		 * An alternative would be to fail with EDEADLK.
263 		 */
264 		lkp->lk_sharecount++;
265 
266 		if (WEHOLDIT(lkp, pid, cpu_id) == 0 ||
267 		    lkp->lk_exclusivecount == 0)
268 			panic("lockmgr: not holding exclusive lock");
269 		lkp->lk_sharecount += lkp->lk_exclusivecount;
270 		lkp->lk_exclusivecount = 0;
271 		lkp->lk_flags &= ~LK_HAVE_EXCL;
272 		SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
273 #if defined(LOCKDEBUG)
274 		lkp->lk_unlock_file = file;
275 		lkp->lk_unlock_line = line;
276 #endif
277 		DONTHAVEIT(lkp);
278 		WAKEUP_WAITER(lkp);
279 		break;
280 
281 	case LK_EXCLUSIVE:
282 		if (WEHOLDIT(lkp, pid, cpu_id)) {
283 			/*
284 			 * Recursive lock.
285 			 */
286 			if ((extflags & LK_CANRECURSE) == 0) {
287 				if (extflags & LK_RECURSEFAIL) {
288 					error = EDEADLK;
289 					break;
290 				} else
291 					panic("lockmgr: locking against myself");
292 			}
293 			lkp->lk_exclusivecount++;
294 			break;
295 		}
296 		/*
297 		 * If we are just polling, check to see if we will sleep.
298 		 */
299 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
300 		     (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
301 		     lkp->lk_sharecount != 0)) {
302 			error = EBUSY;
303 			break;
304 		}
305 		/*
306 		 * Try to acquire the want_exclusive flag.
307 		 */
308 		ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
309 		    (LK_HAVE_EXCL | LK_WANT_EXCL));
310 		if (error)
311 			break;
312 		lkp->lk_flags |= LK_WANT_EXCL;
313 		/*
314 		 * Wait for shared locks and upgrades to finish.
315 		 */
316 		ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0);
317 		lkp->lk_flags &= ~LK_WANT_EXCL;
318 		if (error)
319 			break;
320 		lkp->lk_flags |= LK_HAVE_EXCL;
321 		SETHOLDER(lkp, pid, cpu_id);
322 #if defined(LOCKDEBUG)
323 		lkp->lk_lock_file = file;
324 		lkp->lk_lock_line = line;
325 #endif
326 		HAVEIT(lkp);
327 		if (lkp->lk_exclusivecount != 0)
328 			panic("lockmgr: non-zero exclusive count");
329 		lkp->lk_exclusivecount = 1;
330 		break;
331 
332 	case LK_RELEASE:
333 		if (lkp->lk_exclusivecount != 0) {
334 			if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
335 				panic("lockmgr: pid %d, not exclusive lock "
336 				    "holder %d unlocking",
337 				    pid, lkp->lk_lockholder);
338 			}
339 			lkp->lk_exclusivecount--;
340 			if (lkp->lk_exclusivecount == 0) {
341 				lkp->lk_flags &= ~LK_HAVE_EXCL;
342 				SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
343 #if defined(LOCKDEBUG)
344 				lkp->lk_unlock_file = file;
345 				lkp->lk_unlock_line = line;
346 #endif
347 				DONTHAVEIT(lkp);
348 			}
349 		} else if (lkp->lk_sharecount != 0) {
350 			lkp->lk_sharecount--;
351 		}
352 #ifdef DIAGNOSTIC
353 		else
354 			panic("lockmgr: release of unlocked lock!");
355 #endif
356 		WAKEUP_WAITER(lkp);
357 		break;
358 
359 	case LK_DRAIN:
360 		/*
361 		 * Check that we do not already hold the lock, as it can
362 		 * never drain if we do. Unfortunately, we have no way to
363 		 * check for holding a shared lock, but at least we can
364 		 * check for an exclusive one.
365 		 */
366 		if (WEHOLDIT(lkp, pid, cpu_id))
367 			panic("lockmgr: draining against myself");
368 		/*
369 		 * If we are just polling, check to see if we will sleep.
370 		 */
371 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
372 		     (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
373 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
374 			error = EBUSY;
375 			break;
376 		}
377 		ACQUIRE(lkp, error, extflags, 1,
378 		    ((lkp->lk_flags &
379 		     (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
380 		     lkp->lk_sharecount != 0 ||
381 		     lkp->lk_waitcount != 0));
382 		if (error)
383 			break;
384 		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
385 		SETHOLDER(lkp, pid, cpu_id);
386 #if defined(LOCKDEBUG)
387 		lkp->lk_lock_file = file;
388 		lkp->lk_lock_line = line;
389 #endif
390 		HAVEIT(lkp);
391 		lkp->lk_exclusivecount = 1;
392 		break;
393 
394 	default:
395 		panic("lockmgr: unknown locktype request %d",
396 		    flags & LK_TYPE_MASK);
397 		/* NOTREACHED */
398 	}
399 	if ((lkp->lk_flags & LK_WAITDRAIN) != 0 &&
400 	    ((lkp->lk_flags &
401 	    (LK_HAVE_EXCL | LK_WANT_EXCL)) == 0 &&
402 	    lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
403 		lkp->lk_flags &= ~LK_WAITDRAIN;
404 		wakeup((void *)&lkp->lk_flags);
405 	}
406 	return (error);
407 }
408 
409 #ifdef DIAGNOSTIC
410 /*
411  * Print out information about state of a lock. Used by VOP_PRINT
412  * routines to display ststus about contained locks.
413  */
414 void
415 lockmgr_printinfo(__volatile struct lock *lkp)
416 {
417 
418 	if (lkp->lk_sharecount)
419 		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
420 		    lkp->lk_sharecount);
421 	else if (lkp->lk_flags & LK_HAVE_EXCL) {
422 		printf(" lock type %s: EXCL (count %d) by ",
423 		    lkp->lk_wmesg, lkp->lk_exclusivecount);
424 		printf("pid %d", lkp->lk_lockholder);
425 	} else
426 		printf(" not locked");
427 	if (lkp->lk_waitcount > 0)
428 		printf(" with %d pending", lkp->lk_waitcount);
429 }
430 #endif /* DIAGNOSTIC */
431 
432 #if defined(LOCKDEBUG)
433 TAILQ_HEAD(, simplelock) simplelock_list =
434     TAILQ_HEAD_INITIALIZER(simplelock_list);
435 
436 #if defined(MULTIPROCESSOR) /* { */
437 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
438 
439 #define	SLOCK_LIST_LOCK()						\
440 	__cpu_simple_lock(&simplelock_list_slock.lock_data)
441 
442 #define	SLOCK_LIST_UNLOCK()						\
443 	__cpu_simple_unlock(&simplelock_list_slock.lock_data)
444 
445 #define	SLOCK_COUNT(x)							\
446 	curcpu()->ci_simple_locks += (x)
447 #else
448 u_long simple_locks;
449 
450 #define	SLOCK_LIST_LOCK()	/* nothing */
451 
452 #define	SLOCK_LIST_UNLOCK()	/* nothing */
453 
454 #define	SLOCK_COUNT(x)		simple_locks += (x)
455 #endif /* MULTIPROCESSOR */ /* } */
456 
457 #ifdef MULTIPROCESSOR
458 #define SLOCK_MP()		lock_printf("on cpu %ld\n", 		\
459 				    (u_long) cpu_number())
460 #else
461 #define SLOCK_MP()		/* nothing */
462 #endif
463 
464 #define	SLOCK_WHERE(str, alp, id, l)					\
465 do {									\
466 	lock_printf("\n");						\
467 	lock_printf(str);						\
468 	lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
469 	SLOCK_MP();							\
470 	if ((alp)->lock_file != NULL)					\
471 		lock_printf("last locked: %s:%d\n", (alp)->lock_file,	\
472 		    (alp)->lock_line);					\
473 	if ((alp)->unlock_file != NULL)					\
474 		lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
475 		    (alp)->unlock_line);				\
476 	SLOCK_TRACE()							\
477 	SLOCK_DEBUGGER();						\
478 } while (/*CONSTCOND*/0)
479 
480 /*
481  * Simple lock functions so that the debugger can see from whence
482  * they are being called.
483  */
484 void
485 simple_lock_init(struct simplelock *lkp)
486 {
487 
488 #if defined(MULTIPROCESSOR) /* { */
489 	__cpu_simple_lock_init(&alp->lock_data);
490 #else
491 	alp->lock_data = __SIMPLELOCK_UNLOCKED;
492 #endif /* } */
493 	alp->lock_file = NULL;
494 	alp->lock_line = 0;
495 	alp->unlock_file = NULL;
496 	alp->unlock_line = 0;
497 	alp->lock_holder = LK_NOCPU;
498 }
499 
500 void
501 _simple_lock(__volatile struct simplelock *lkp, const char *id, int l)
502 {
503 	cpuid_t cpu_id = CPU_NUMBER();
504 	int s;
505 
506 	s = spllock();
507 
508 	/*
509 	 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
510 	 * don't take any action, and just fall into the normal spin case.
511 	 */
512 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
513 #if defined(MULTIPROCESSOR) /* { */
514 		if (alp->lock_holder == cpu_id) {
515 			SLOCK_WHERE("simple_lock: locking against myself\n",
516 			    alp, id, l);
517 			goto out;
518 		}
519 #else
520 		SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
521 		goto out;
522 #endif /* MULTIPROCESSOR */ /* } */
523 	}
524 
525 #if defined(MULTIPROCESSOR) /* { */
526 	/* Acquire the lock before modifying any fields. */
527 	splx(s);
528 	__cpu_simple_lock(&alp->lock_data);
529 	s = spllock();
530 #else
531 	alp->lock_data = __SIMPLELOCK_LOCKED;
532 #endif /* } */
533 
534 	if (alp->lock_holder != LK_NOCPU) {
535 		SLOCK_WHERE("simple_lock: uninitialized lock\n",
536 		    alp, id, l);
537 	}
538 	alp->lock_file = id;
539 	alp->lock_line = l;
540 	alp->lock_holder = cpu_id;
541 
542 	SLOCK_LIST_LOCK();
543 	/* XXX Cast away volatile */
544 	TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
545 	SLOCK_LIST_UNLOCK();
546 
547 	SLOCK_COUNT(1);
548 
549  out:
550 	splx(s);
551 }
552 
553 int
554 _simple_lock_held(__volatile struct simplelock *alp)
555 {
556 	cpuid_t cpu_id = CPU_NUMBER();
557 	int s, locked = 0;
558 
559 	s = spllock();
560 
561 #if defined(MULTIPROCESSOR)
562 	if (__cpu_simple_lock_try(&alp->lock_data) == 0)
563 		locked = (alp->lock_holder == cpu_id);
564 	else
565 		__cpu_simple_unlock(&alp->lock_data);
566 #else
567 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
568 		locked = 1;
569 		KASSERT(alp->lock_holder == cpu_id);
570 	}
571 #endif
572 
573 	splx(s);
574 
575 	return (locked);
576 }
577 
578 int
579 _simple_lock_try(__volatile struct simplelock *lkp, const char *id, int l)
580 {
581 	cpuid_t cpu_id = CPU_NUMBER();
582 	int s, rv = 0;
583 
584 	s = spllock();
585 
586 	/*
587 	 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
588 	 * don't take any action.
589 	 */
590 #if defined(MULTIPROCESSOR) /* { */
591 	if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
592 		if (alp->lock_holder == cpu_id)
593 			SLOCK_WHERE("simple_lock_try: locking against myself\n",
594 			    alp, id, l);
595 		goto out;
596 	}
597 #else
598 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
599 		SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
600 		goto out;
601 	}
602 	alp->lock_data = __SIMPLELOCK_LOCKED;
603 #endif /* MULTIPROCESSOR */ /* } */
604 
605 	/*
606 	 * At this point, we have acquired the lock.
607 	 */
608 
609 	rv = 1;
610 
611 	alp->lock_file = id;
612 	alp->lock_line = l;
613 	alp->lock_holder = cpu_id;
614 
615 	SLOCK_LIST_LOCK();
616 	/* XXX Cast away volatile. */
617 	TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
618 	SLOCK_LIST_UNLOCK();
619 
620 	SLOCK_COUNT(1);
621 
622  out:
623 	splx(s);
624 	return (rv);
625 }
626 
627 void
628 _simple_unlock(__volatile struct simplelock *lkp, const char *id, int l)
629 {
630 	int s;
631 
632 	s = spllock();
633 
634 	/*
635 	 * MULTIPROCESSOR case: This is `safe' because we think we hold
636 	 * the lock, and if we don't, we don't take any action.
637 	 */
638 	if (alp->lock_data == __SIMPLELOCK_UNLOCKED) {
639 		SLOCK_WHERE("simple_unlock: lock not held\n",
640 		    alp, id, l);
641 		goto out;
642 	}
643 
644 	SLOCK_LIST_LOCK();
645 	TAILQ_REMOVE(&simplelock_list, alp, list);
646 	SLOCK_LIST_UNLOCK();
647 
648 	SLOCK_COUNT(-1);
649 
650 	alp->list.tqe_next = NULL;	/* sanity */
651 	alp->list.tqe_prev = NULL;	/* sanity */
652 
653 	alp->unlock_file = id;
654 	alp->unlock_line = l;
655 
656 #if defined(MULTIPROCESSOR) /* { */
657 	alp->lock_holder = LK_NOCPU;
658 	/* Now that we've modified all fields, release the lock. */
659 	__cpu_simple_unlock(&alp->lock_data);
660 #else
661 	alp->lock_data = __SIMPLELOCK_UNLOCKED;
662 	KASSERT(alp->lock_holder == CPU_NUMBER());
663 	alp->lock_holder = LK_NOCPU;
664 #endif /* } */
665 
666  out:
667 	splx(s);
668 }
669 
670 void
671 simple_lock_dump(void)
672 {
673 	struct simplelock *alp;
674 	int s;
675 
676 	s = spllock();
677 	SLOCK_LIST_LOCK();
678 	lock_printf("all simple locks:\n");
679 	TAILQ_FOREACH(alp, &simplelock_list, list) {
680 		lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
681 		    alp->lock_file, alp->lock_line);
682 	}
683 	SLOCK_LIST_UNLOCK();
684 	splx(s);
685 }
686 
687 void
688 simple_lock_freecheck(void *start, void *end)
689 {
690 	struct simplelock *alp;
691 	int s;
692 
693 	s = spllock();
694 	SLOCK_LIST_LOCK();
695 	TAILQ_FOREACH(alp, &simplelock_list, list) {
696 		if ((void *)alp >= start && (void *)alp < end) {
697 			lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
698 			    alp, alp->lock_holder, alp->lock_file,
699 			    alp->lock_line);
700 			SLOCK_DEBUGGER();
701 		}
702 	}
703 	SLOCK_LIST_UNLOCK();
704 	splx(s);
705  }
706 
707 /*
708  * We must be holding exactly one lock: the sched_lock.
709  */
710 
711 #ifdef notyet
712 void
713 simple_lock_switchcheck(void)
714 {
715 
716 	simple_lock_only_held(&sched_lock, "switching");
717 }
718 #endif
719 
720 void
721 simple_lock_only_held(volatile struct simplelock *lp, const char *where)
722 {
723 	struct simplelock *alp;
724 	cpuid_t cpu_id = CPU_NUMBER();
725 	int s;
726 
727 	if (lp) {
728 		LOCK_ASSERT(simple_lock_held(lp));
729 	}
730 	s = spllock();
731 	SLOCK_LIST_LOCK();
732 	TAILQ_FOREACH(alp, &simplelock_list, list) {
733 		if (alp == lp)
734 			continue;
735 		if (alp->lock_holder == cpu_id)
736 			break;
737 	}
738 	SLOCK_LIST_UNLOCK();
739 	splx(s);
740 
741 	if (alp != NULL) {
742 		lock_printf("\n%s with held simple_lock %p "
743 		    "CPU %lu %s:%d\n",
744 		    where, alp, alp->lock_holder, alp->lock_file,
745 		    alp->lock_line);
746 		SLOCK_TRACE();
747 		SLOCK_DEBUGGER();
748 	}
749 }
750 #endif /* LOCKDEBUG */
751 
752 #if defined(MULTIPROCESSOR)
753 /*
754  * Functions for manipulating the kernel_lock.  We put them here
755  * so that they show up in profiles.
756  */
757 
758 struct __mp_lock kernel_lock;
759 
760 void
761 _kernel_lock_init(void)
762 {
763 	__mp_lock_init(&kernel_lock);
764 }
765 
766 /*
767  * Acquire/release the kernel lock.  Intended for use in the scheduler
768  * and the lower half of the kernel.
769  */
770 
771 void
772 _kernel_lock(void)
773 {
774 	SCHED_ASSERT_UNLOCKED();
775 	__mp_lock(&kernel_lock);
776 }
777 
778 void
779 _kernel_unlock(void)
780 {
781 	__mp_unlock(&kernel_lock);
782 }
783 
784 /*
785  * Acquire/release the kernel_lock on behalf of a process.  Intended for
786  * use in the top half of the kernel.
787  */
788 void
789 _kernel_proc_lock(struct proc *p)
790 {
791 	SCHED_ASSERT_UNLOCKED();
792 	__mp_lock(&kernel_lock);
793 	atomic_setbits_int(&p->p_flag, P_BIGLOCK);
794 }
795 
796 void
797 _kernel_proc_unlock(struct proc *p)
798 {
799 	atomic_clearbits_int(&p->p_flag, P_BIGLOCK);
800 	__mp_unlock(&kernel_lock);
801 }
802 
803 #ifdef MP_LOCKDEBUG
804 /* CPU-dependent timing, needs this to be settable from ddb. */
805 int __mp_lock_spinout = 200000000;
806 #endif
807 
808 #endif /* MULTIPROCESSOR */
809