xref: /openbsd-src/sys/kern/kern_lock.c (revision a28daedfc357b214be5c701aa8ba8adb29a7f1c2)
1 /*	$OpenBSD: kern_lock.c,v 1.33 2009/03/25 21:20:26 oga Exp $	*/
2 
3 /*
4  * Copyright (c) 1995
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code contains ideas from software contributed to Berkeley by
8  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
9  * System project at Carnegie-Mellon University.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
36  */
37 
38 #include <sys/param.h>
39 #include <sys/proc.h>
40 #include <sys/lock.h>
41 #include <sys/systm.h>
42 #include <sys/sched.h>
43 
44 #include <machine/cpu.h>
45 
46 #ifndef spllock
47 #define spllock() splhigh()
48 #endif
49 
50 #ifdef MULTIPROCESSOR
51 #define CPU_NUMBER() cpu_number()
52 #else
53 #define CPU_NUMBER() 0
54 #endif
55 
56 void record_stacktrace(int *, int);
57 void playback_stacktrace(int *, int);
58 
59 /*
60  * Locking primitives implementation.
61  * Locks provide shared/exclusive synchronization.
62  */
63 
64 #ifdef DDB /* { */
65 #ifdef MULTIPROCESSOR
66 int simple_lock_debugger = 1;	/* more serious on MP */
67 #else
68 int simple_lock_debugger = 0;
69 #endif
70 #define	SLOCK_DEBUGGER()	if (simple_lock_debugger) Debugger()
71 #define	SLOCK_TRACE()							\
72 	db_stack_trace_print((db_expr_t)__builtin_frame_address(0),	\
73 	    TRUE, 65535, "", lock_printf);
74 #else
75 #define	SLOCK_DEBUGGER()	/* nothing */
76 #define	SLOCK_TRACE()		/* nothing */
77 #endif /* } */
78 
79 /*
80  * Acquire a resource.
81  */
82 #define ACQUIRE(lkp, error, extflags, drain, wanted)			\
83 do {									\
84 	for (error = 0; wanted; ) {					\
85 		if ((drain))						\
86 			(lkp)->lk_flags |= LK_WAITDRAIN;		\
87 		else							\
88 			(lkp)->lk_waitcount++;				\
89 		/* XXX Cast away volatile. */				\
90 		error = tsleep((drain) ?				\
91 		    (void *)&(lkp)->lk_flags : (void *)(lkp),		\
92 		    (lkp)->lk_prio, (lkp)->lk_wmesg, (lkp)->lk_timo);	\
93 		if ((drain) == 0)					\
94 			(lkp)->lk_waitcount--;				\
95 		if (error)						\
96 			break;						\
97 	}								\
98 } while (0)
99 
100 #define	SETHOLDER(lkp, pid, cpu_id)					\
101 	(lkp)->lk_lockholder = (pid)
102 
103 #define	WEHOLDIT(lkp, pid, cpu_id)					\
104 	((lkp)->lk_lockholder == (pid))
105 
106 #define	WAKEUP_WAITER(lkp)						\
107 do {									\
108 	if ((lkp)->lk_waitcount) 				{	\
109 		/* XXX Cast away volatile. */				\
110 		wakeup((void *)(lkp));					\
111 	}								\
112 } while (/*CONSTCOND*/0)
113 
114 #define	HAVEIT(lkp)							\
115 do {									\
116 } while (/*CONSTCOND*/0)
117 
118 #define	DONTHAVEIT(lkp)							\
119 do {									\
120 } while (/*CONSTCOND*/0)
121 
122 #if defined(LOCKDEBUG)
123 /*
124  * Lock debug printing routine; can be configured to print to console
125  * or log to syslog.
126  */
127 void
128 lock_printf(const char *fmt, ...)
129 {
130 	char b[150];
131 	va_list ap;
132 
133 	va_start(ap, fmt);
134 	if (lock_debug_syslog)
135 		vlog(LOG_DEBUG, fmt, ap);
136 	else {
137 		vsnprintf(b, sizeof(b), fmt, ap);
138 		printf_nolog("%s", b);
139 	}
140 	va_end(ap);
141 }
142 #endif /* LOCKDEBUG */
143 
144 /*
145  * Initialize a lock; required before use.
146  */
147 void
148 lockinit(struct lock *lkp, int prio, char *wmesg, int timo, int flags)
149 {
150 
151 	bzero(lkp, sizeof(struct lock));
152 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
153 	lkp->lk_lockholder = LK_NOPROC;
154 	lkp->lk_prio = prio;
155 	lkp->lk_timo = timo;
156 	lkp->lk_wmesg = wmesg;	/* just a name for spin locks */
157 #if defined(LOCKDEBUG)
158 	lkp->lk_lock_file = NULL;
159 	lkp->lk_unlock_file = NULL;
160 #endif
161 }
162 
163 /*
164  * Determine the status of a lock.
165  */
166 int
167 lockstatus(struct lock *lkp)
168 {
169 	int lock_type = 0;
170 
171 	if (lkp->lk_exclusivecount != 0)
172 		lock_type = LK_EXCLUSIVE;
173 	else if (lkp->lk_sharecount != 0)
174 		lock_type = LK_SHARED;
175 	return (lock_type);
176 }
177 
178 /*
179  * Set, change, or release a lock.
180  *
181  * Shared requests increment the shared count. Exclusive requests set the
182  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
183  * accepted shared locks and shared-to-exclusive upgrades to go away.
184  */
185 int
186 lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp)
187 {
188 	int error;
189 	pid_t pid;
190 	int extflags;
191 	cpuid_t cpu_id;
192 	struct proc *p = curproc;
193 
194 	error = 0;
195 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
196 
197 #ifdef DIAGNOSTIC
198 	if (p == NULL)
199 		panic("lockmgr: process context required");
200 #endif
201 	/* Process context required. */
202 	pid = p->p_pid;
203 	cpu_id = CPU_NUMBER();
204 
205 	/*
206 	 * Once a lock has drained, the LK_DRAINING flag is set and an
207 	 * exclusive lock is returned. The only valid operation thereafter
208 	 * is a single release of that exclusive lock. This final release
209 	 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
210 	 * further requests of any sort will result in a panic. The bits
211 	 * selected for these two flags are chosen so that they will be set
212 	 * in memory that is freed (freed memory is filled with 0xdeadbeef).
213 	 */
214 	if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
215 #ifdef DIAGNOSTIC
216 		if (lkp->lk_flags & LK_DRAINED)
217 			panic("lockmgr: using decommissioned lock");
218 		if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
219 		    WEHOLDIT(lkp, pid, cpu_id) == 0)
220 			panic("lockmgr: non-release on draining lock: %d",
221 			    flags & LK_TYPE_MASK);
222 #endif /* DIAGNOSTIC */
223 		lkp->lk_flags &= ~LK_DRAINING;
224 		lkp->lk_flags |= LK_DRAINED;
225 	}
226 
227 	/*
228 	 * Check if the caller is asking us to be schizophrenic.
229 	 */
230 	if ((lkp->lk_flags & (LK_CANRECURSE|LK_RECURSEFAIL)) ==
231 	    (LK_CANRECURSE|LK_RECURSEFAIL))
232 		panic("lockmgr: make up your mind");
233 
234 	switch (flags & LK_TYPE_MASK) {
235 
236 	case LK_SHARED:
237 		if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
238 			/*
239 			 * If just polling, check to see if we will block.
240 			 */
241 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
242 			    (LK_HAVE_EXCL | LK_WANT_EXCL))) {
243 				error = EBUSY;
244 				break;
245 			}
246 			/*
247 			 * Wait for exclusive locks and upgrades to clear.
248 			 */
249 			ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
250 			    (LK_HAVE_EXCL | LK_WANT_EXCL));
251 			if (error)
252 				break;
253 			lkp->lk_sharecount++;
254 			break;
255 		}
256 		/*
257 		 * We hold an exclusive lock, so downgrade it to shared.
258 		 * An alternative would be to fail with EDEADLK.
259 		 */
260 		lkp->lk_sharecount++;
261 
262 		if (WEHOLDIT(lkp, pid, cpu_id) == 0 ||
263 		    lkp->lk_exclusivecount == 0)
264 			panic("lockmgr: not holding exclusive lock");
265 		lkp->lk_sharecount += lkp->lk_exclusivecount;
266 		lkp->lk_exclusivecount = 0;
267 		lkp->lk_flags &= ~LK_HAVE_EXCL;
268 		SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
269 #if defined(LOCKDEBUG)
270 		lkp->lk_unlock_file = file;
271 		lkp->lk_unlock_line = line;
272 #endif
273 		DONTHAVEIT(lkp);
274 		WAKEUP_WAITER(lkp);
275 		break;
276 
277 	case LK_EXCLUSIVE:
278 		if (WEHOLDIT(lkp, pid, cpu_id)) {
279 			/*
280 			 * Recursive lock.
281 			 */
282 			if ((extflags & LK_CANRECURSE) == 0) {
283 				if (extflags & LK_RECURSEFAIL) {
284 					error = EDEADLK;
285 					break;
286 				} else
287 					panic("lockmgr: locking against myself");
288 			}
289 			lkp->lk_exclusivecount++;
290 			break;
291 		}
292 		/*
293 		 * If we are just polling, check to see if we will sleep.
294 		 */
295 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
296 		     (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
297 		     lkp->lk_sharecount != 0)) {
298 			error = EBUSY;
299 			break;
300 		}
301 		/*
302 		 * Try to acquire the want_exclusive flag.
303 		 */
304 		ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
305 		    (LK_HAVE_EXCL | LK_WANT_EXCL));
306 		if (error)
307 			break;
308 		lkp->lk_flags |= LK_WANT_EXCL;
309 		/*
310 		 * Wait for shared locks and upgrades to finish.
311 		 */
312 		ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0);
313 		lkp->lk_flags &= ~LK_WANT_EXCL;
314 		if (error)
315 			break;
316 		lkp->lk_flags |= LK_HAVE_EXCL;
317 		SETHOLDER(lkp, pid, cpu_id);
318 #if defined(LOCKDEBUG)
319 		lkp->lk_lock_file = file;
320 		lkp->lk_lock_line = line;
321 #endif
322 		HAVEIT(lkp);
323 		if (lkp->lk_exclusivecount != 0)
324 			panic("lockmgr: non-zero exclusive count");
325 		lkp->lk_exclusivecount = 1;
326 		break;
327 
328 	case LK_RELEASE:
329 		if (lkp->lk_exclusivecount != 0) {
330 			if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
331 				panic("lockmgr: pid %d, not exclusive lock "
332 				    "holder %d unlocking",
333 				    pid, lkp->lk_lockholder);
334 			}
335 			lkp->lk_exclusivecount--;
336 			if (lkp->lk_exclusivecount == 0) {
337 				lkp->lk_flags &= ~LK_HAVE_EXCL;
338 				SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
339 #if defined(LOCKDEBUG)
340 				lkp->lk_unlock_file = file;
341 				lkp->lk_unlock_line = line;
342 #endif
343 				DONTHAVEIT(lkp);
344 			}
345 		} else if (lkp->lk_sharecount != 0) {
346 			lkp->lk_sharecount--;
347 		}
348 #ifdef DIAGNOSTIC
349 		else
350 			panic("lockmgr: release of unlocked lock!");
351 #endif
352 		WAKEUP_WAITER(lkp);
353 		break;
354 
355 	case LK_DRAIN:
356 		/*
357 		 * Check that we do not already hold the lock, as it can
358 		 * never drain if we do. Unfortunately, we have no way to
359 		 * check for holding a shared lock, but at least we can
360 		 * check for an exclusive one.
361 		 */
362 		if (WEHOLDIT(lkp, pid, cpu_id))
363 			panic("lockmgr: draining against myself");
364 		/*
365 		 * If we are just polling, check to see if we will sleep.
366 		 */
367 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
368 		     (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
369 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
370 			error = EBUSY;
371 			break;
372 		}
373 		ACQUIRE(lkp, error, extflags, 1,
374 		    ((lkp->lk_flags &
375 		     (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
376 		     lkp->lk_sharecount != 0 ||
377 		     lkp->lk_waitcount != 0));
378 		if (error)
379 			break;
380 		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
381 		SETHOLDER(lkp, pid, cpu_id);
382 #if defined(LOCKDEBUG)
383 		lkp->lk_lock_file = file;
384 		lkp->lk_lock_line = line;
385 #endif
386 		HAVEIT(lkp);
387 		lkp->lk_exclusivecount = 1;
388 		break;
389 
390 	default:
391 		panic("lockmgr: unknown locktype request %d",
392 		    flags & LK_TYPE_MASK);
393 		/* NOTREACHED */
394 	}
395 	if ((lkp->lk_flags & LK_WAITDRAIN) != 0 &&
396 	    ((lkp->lk_flags &
397 	    (LK_HAVE_EXCL | LK_WANT_EXCL)) == 0 &&
398 	    lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
399 		lkp->lk_flags &= ~LK_WAITDRAIN;
400 		wakeup((void *)&lkp->lk_flags);
401 	}
402 	return (error);
403 }
404 
405 #ifdef DIAGNOSTIC
406 /*
407  * Print out information about state of a lock. Used by VOP_PRINT
408  * routines to display ststus about contained locks.
409  */
410 void
411 lockmgr_printinfo(__volatile struct lock *lkp)
412 {
413 
414 	if (lkp->lk_sharecount)
415 		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
416 		    lkp->lk_sharecount);
417 	else if (lkp->lk_flags & LK_HAVE_EXCL) {
418 		printf(" lock type %s: EXCL (count %d) by ",
419 		    lkp->lk_wmesg, lkp->lk_exclusivecount);
420 		printf("pid %d", lkp->lk_lockholder);
421 	} else
422 		printf(" not locked");
423 	if (lkp->lk_waitcount > 0)
424 		printf(" with %d pending", lkp->lk_waitcount);
425 }
426 #endif /* DIAGNOSTIC */
427 
428 #if defined(LOCKDEBUG)
429 TAILQ_HEAD(, simplelock) simplelock_list =
430     TAILQ_HEAD_INITIALIZER(simplelock_list);
431 
432 #if defined(MULTIPROCESSOR) /* { */
433 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
434 
435 #define	SLOCK_LIST_LOCK()						\
436 	__cpu_simple_lock(&simplelock_list_slock.lock_data)
437 
438 #define	SLOCK_LIST_UNLOCK()						\
439 	__cpu_simple_unlock(&simplelock_list_slock.lock_data)
440 
441 #define	SLOCK_COUNT(x)							\
442 	curcpu()->ci_simple_locks += (x)
443 #else
444 u_long simple_locks;
445 
446 #define	SLOCK_LIST_LOCK()	/* nothing */
447 
448 #define	SLOCK_LIST_UNLOCK()	/* nothing */
449 
450 #define	SLOCK_COUNT(x)		simple_locks += (x)
451 #endif /* MULTIPROCESSOR */ /* } */
452 
453 #ifdef MULTIPROCESSOR
454 #define SLOCK_MP()		lock_printf("on cpu %ld\n", 		\
455 				    (u_long) cpu_number())
456 #else
457 #define SLOCK_MP()		/* nothing */
458 #endif
459 
460 #define	SLOCK_WHERE(str, alp, id, l)					\
461 do {									\
462 	lock_printf("\n");						\
463 	lock_printf(str);						\
464 	lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
465 	SLOCK_MP();							\
466 	if ((alp)->lock_file != NULL)					\
467 		lock_printf("last locked: %s:%d\n", (alp)->lock_file,	\
468 		    (alp)->lock_line);					\
469 	if ((alp)->unlock_file != NULL)					\
470 		lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
471 		    (alp)->unlock_line);				\
472 	SLOCK_TRACE()							\
473 	SLOCK_DEBUGGER();						\
474 } while (/*CONSTCOND*/0)
475 
476 /*
477  * Simple lock functions so that the debugger can see from whence
478  * they are being called.
479  */
480 void
481 simple_lock_init(struct simplelock *lkp)
482 {
483 
484 #if defined(MULTIPROCESSOR) /* { */
485 	__cpu_simple_lock_init(&alp->lock_data);
486 #else
487 	alp->lock_data = __SIMPLELOCK_UNLOCKED;
488 #endif /* } */
489 	alp->lock_file = NULL;
490 	alp->lock_line = 0;
491 	alp->unlock_file = NULL;
492 	alp->unlock_line = 0;
493 	alp->lock_holder = LK_NOCPU;
494 }
495 
496 void
497 _simple_lock(__volatile struct simplelock *lkp, const char *id, int l)
498 {
499 	cpuid_t cpu_id = CPU_NUMBER();
500 	int s;
501 
502 	s = spllock();
503 
504 	/*
505 	 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
506 	 * don't take any action, and just fall into the normal spin case.
507 	 */
508 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
509 #if defined(MULTIPROCESSOR) /* { */
510 		if (alp->lock_holder == cpu_id) {
511 			SLOCK_WHERE("simple_lock: locking against myself\n",
512 			    alp, id, l);
513 			goto out;
514 		}
515 #else
516 		SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
517 		goto out;
518 #endif /* MULTIPROCESSOR */ /* } */
519 	}
520 
521 #if defined(MULTIPROCESSOR) /* { */
522 	/* Acquire the lock before modifying any fields. */
523 	splx(s);
524 	__cpu_simple_lock(&alp->lock_data);
525 	s = spllock();
526 #else
527 	alp->lock_data = __SIMPLELOCK_LOCKED;
528 #endif /* } */
529 
530 	if (alp->lock_holder != LK_NOCPU) {
531 		SLOCK_WHERE("simple_lock: uninitialized lock\n",
532 		    alp, id, l);
533 	}
534 	alp->lock_file = id;
535 	alp->lock_line = l;
536 	alp->lock_holder = cpu_id;
537 
538 	SLOCK_LIST_LOCK();
539 	/* XXX Cast away volatile */
540 	TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
541 	SLOCK_LIST_UNLOCK();
542 
543 	SLOCK_COUNT(1);
544 
545  out:
546 	splx(s);
547 }
548 
549 int
550 _simple_lock_held(__volatile struct simplelock *alp)
551 {
552 	cpuid_t cpu_id = CPU_NUMBER();
553 	int s, locked = 0;
554 
555 	s = spllock();
556 
557 #if defined(MULTIPROCESSOR)
558 	if (__cpu_simple_lock_try(&alp->lock_data) == 0)
559 		locked = (alp->lock_holder == cpu_id);
560 	else
561 		__cpu_simple_unlock(&alp->lock_data);
562 #else
563 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
564 		locked = 1;
565 		KASSERT(alp->lock_holder == cpu_id);
566 	}
567 #endif
568 
569 	splx(s);
570 
571 	return (locked);
572 }
573 
574 int
575 _simple_lock_try(__volatile struct simplelock *lkp, const char *id, int l)
576 {
577 	cpuid_t cpu_id = CPU_NUMBER();
578 	int s, rv = 0;
579 
580 	s = spllock();
581 
582 	/*
583 	 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
584 	 * don't take any action.
585 	 */
586 #if defined(MULTIPROCESSOR) /* { */
587 	if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
588 		if (alp->lock_holder == cpu_id)
589 			SLOCK_WHERE("simple_lock_try: locking against myself\n",
590 			    alp, id, l);
591 		goto out;
592 	}
593 #else
594 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
595 		SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
596 		goto out;
597 	}
598 	alp->lock_data = __SIMPLELOCK_LOCKED;
599 #endif /* MULTIPROCESSOR */ /* } */
600 
601 	/*
602 	 * At this point, we have acquired the lock.
603 	 */
604 
605 	rv = 1;
606 
607 	alp->lock_file = id;
608 	alp->lock_line = l;
609 	alp->lock_holder = cpu_id;
610 
611 	SLOCK_LIST_LOCK();
612 	/* XXX Cast away volatile. */
613 	TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
614 	SLOCK_LIST_UNLOCK();
615 
616 	SLOCK_COUNT(1);
617 
618  out:
619 	splx(s);
620 	return (rv);
621 }
622 
623 void
624 _simple_unlock(__volatile struct simplelock *lkp, const char *id, int l)
625 {
626 	int s;
627 
628 	s = spllock();
629 
630 	/*
631 	 * MULTIPROCESSOR case: This is `safe' because we think we hold
632 	 * the lock, and if we don't, we don't take any action.
633 	 */
634 	if (alp->lock_data == __SIMPLELOCK_UNLOCKED) {
635 		SLOCK_WHERE("simple_unlock: lock not held\n",
636 		    alp, id, l);
637 		goto out;
638 	}
639 
640 	SLOCK_LIST_LOCK();
641 	TAILQ_REMOVE(&simplelock_list, alp, list);
642 	SLOCK_LIST_UNLOCK();
643 
644 	SLOCK_COUNT(-1);
645 
646 	alp->list.tqe_next = NULL;	/* sanity */
647 	alp->list.tqe_prev = NULL;	/* sanity */
648 
649 	alp->unlock_file = id;
650 	alp->unlock_line = l;
651 
652 #if defined(MULTIPROCESSOR) /* { */
653 	alp->lock_holder = LK_NOCPU;
654 	/* Now that we've modified all fields, release the lock. */
655 	__cpu_simple_unlock(&alp->lock_data);
656 #else
657 	alp->lock_data = __SIMPLELOCK_UNLOCKED;
658 	KASSERT(alp->lock_holder == CPU_NUMBER());
659 	alp->lock_holder = LK_NOCPU;
660 #endif /* } */
661 
662  out:
663 	splx(s);
664 }
665 
666 void
667 simple_lock_dump(void)
668 {
669 	struct simplelock *alp;
670 	int s;
671 
672 	s = spllock();
673 	SLOCK_LIST_LOCK();
674 	lock_printf("all simple locks:\n");
675 	TAILQ_FOREACH(alp, &simplelock_list, list) {
676 		lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
677 		    alp->lock_file, alp->lock_line);
678 	}
679 	SLOCK_LIST_UNLOCK();
680 	splx(s);
681 }
682 
683 void
684 simple_lock_freecheck(void *start, void *end)
685 {
686 	struct simplelock *alp;
687 	int s;
688 
689 	s = spllock();
690 	SLOCK_LIST_LOCK();
691 	TAILQ_FOREACH(alp, &simplelock_list, list) {
692 		if ((void *)alp >= start && (void *)alp < end) {
693 			lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
694 			    alp, alp->lock_holder, alp->lock_file,
695 			    alp->lock_line);
696 			SLOCK_DEBUGGER();
697 		}
698 	}
699 	SLOCK_LIST_UNLOCK();
700 	splx(s);
701  }
702 
703 /*
704  * We must be holding exactly one lock: the sched_lock.
705  */
706 
707 #ifdef notyet
708 void
709 simple_lock_switchcheck(void)
710 {
711 
712 	simple_lock_only_held(&sched_lock, "switching");
713 }
714 #endif
715 
716 void
717 simple_lock_only_held(volatile struct simplelock *lp, const char *where)
718 {
719 	struct simplelock *alp;
720 	cpuid_t cpu_id = CPU_NUMBER();
721 	int s;
722 
723 	if (lp) {
724 		LOCK_ASSERT(simple_lock_held(lp));
725 	}
726 	s = spllock();
727 	SLOCK_LIST_LOCK();
728 	TAILQ_FOREACH(alp, &simplelock_list, list) {
729 		if (alp == lp)
730 			continue;
731 		if (alp->lock_holder == cpu_id)
732 			break;
733 	}
734 	SLOCK_LIST_UNLOCK();
735 	splx(s);
736 
737 	if (alp != NULL) {
738 		lock_printf("\n%s with held simple_lock %p "
739 		    "CPU %lu %s:%d\n",
740 		    where, alp, alp->lock_holder, alp->lock_file,
741 		    alp->lock_line);
742 		SLOCK_TRACE();
743 		SLOCK_DEBUGGER();
744 	}
745 }
746 #endif /* LOCKDEBUG */
747 
748 #if defined(MULTIPROCESSOR)
749 /*
750  * Functions for manipulating the kernel_lock.  We put them here
751  * so that they show up in profiles.
752  */
753 
754 struct __mp_lock kernel_lock;
755 
756 void
757 _kernel_lock_init(void)
758 {
759 	__mp_lock_init(&kernel_lock);
760 }
761 
762 /*
763  * Acquire/release the kernel lock.  Intended for use in the scheduler
764  * and the lower half of the kernel.
765  */
766 
767 void
768 _kernel_lock(void)
769 {
770 	SCHED_ASSERT_UNLOCKED();
771 	__mp_lock(&kernel_lock);
772 }
773 
774 void
775 _kernel_unlock(void)
776 {
777 	__mp_unlock(&kernel_lock);
778 }
779 
780 /*
781  * Acquire/release the kernel_lock on behalf of a process.  Intended for
782  * use in the top half of the kernel.
783  */
784 void
785 _kernel_proc_lock(struct proc *p)
786 {
787 	SCHED_ASSERT_UNLOCKED();
788 	__mp_lock(&kernel_lock);
789 	atomic_setbits_int(&p->p_flag, P_BIGLOCK);
790 }
791 
792 void
793 _kernel_proc_unlock(struct proc *p)
794 {
795 	atomic_clearbits_int(&p->p_flag, P_BIGLOCK);
796 	__mp_unlock(&kernel_lock);
797 }
798 
799 #ifdef MP_LOCKDEBUG
800 /* CPU-dependent timing, needs this to be settable from ddb. */
801 int __mp_lock_spinout = 200000000;
802 #endif
803 
804 #endif /* MULTIPROCESSOR */
805