xref: /netbsd-src/sys/kern/kern_lock.c (revision 5e4c038a45edbc7d63b7c2daa76e29f88b64a4e3)
1 /*	$NetBSD: kern_lock.c,v 1.62 2002/05/21 01:38:27 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * This code is derived from software contributed to The NetBSD Foundation
12  * by Ross Harvey.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. All advertising materials mentioning features or use of this software
23  *    must display the following acknowledgement:
24  *	This product includes software developed by the NetBSD
25  *	Foundation, Inc. and its contributors.
26  * 4. Neither the name of The NetBSD Foundation nor the names of its
27  *    contributors may be used to endorse or promote products derived
28  *    from this software without specific prior written permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
31  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
32  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
33  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
34  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
37  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
38  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40  * POSSIBILITY OF SUCH DAMAGE.
41  */
42 
43 /*
44  * Copyright (c) 1995
45  *	The Regents of the University of California.  All rights reserved.
46  *
47  * This code contains ideas from software contributed to Berkeley by
48  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
49  * System project at Carnegie-Mellon University.
50  *
51  * Redistribution and use in source and binary forms, with or without
52  * modification, are permitted provided that the following conditions
53  * are met:
54  * 1. Redistributions of source code must retain the above copyright
55  *    notice, this list of conditions and the following disclaimer.
56  * 2. Redistributions in binary form must reproduce the above copyright
57  *    notice, this list of conditions and the following disclaimer in the
58  *    documentation and/or other materials provided with the distribution.
59  * 3. All advertising materials mentioning features or use of this software
60  *    must display the following acknowledgement:
61  *	This product includes software developed by the University of
62  *	California, Berkeley and its contributors.
63  * 4. Neither the name of the University nor the names of its contributors
64  *    may be used to endorse or promote products derived from this software
65  *    without specific prior written permission.
66  *
67  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
68  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
71  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
72  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
73  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
74  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
75  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
76  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
77  * SUCH DAMAGE.
78  *
79  *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
80  */
81 
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.62 2002/05/21 01:38:27 thorpej Exp $");
84 
85 #include "opt_multiprocessor.h"
86 #include "opt_lockdebug.h"
87 #include "opt_ddb.h"
88 
89 #include <sys/param.h>
90 #include <sys/proc.h>
91 #include <sys/lock.h>
92 #include <sys/systm.h>
93 #include <machine/cpu.h>
94 
95 #if defined(LOCKDEBUG)
96 #include <sys/syslog.h>
97 /*
98  * note that stdarg.h and the ansi style va_start macro is used for both
99  * ansi and traditional c compiles.
100  * XXX: this requires that stdarg.h define: va_alist and va_dcl
101  */
102 #include <machine/stdarg.h>
103 
104 void	lock_printf(const char *fmt, ...)
105     __attribute__((__format__(__printf__,1,2)));
106 
107 int	lock_debug_syslog = 0;	/* defaults to printf, but can be patched */
108 
109 #ifdef DDB
110 #include <ddb/ddbvar.h>
111 #include <machine/db_machdep.h>
112 #include <ddb/db_command.h>
113 #include <ddb/db_interface.h>
114 #endif
115 #endif
116 
117 /*
118  * Locking primitives implementation.
119  * Locks provide shared/exclusive synchronization.
120  */
121 
122 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
123 #if defined(MULTIPROCESSOR) /* { */
124 #define	COUNT_CPU(cpu_id, x)						\
125 	curcpu()->ci_spin_locks += (x)
126 #else
127 u_long	spin_locks;
128 #define	COUNT_CPU(cpu_id, x)	spin_locks += (x)
129 #endif /* MULTIPROCESSOR */ /* } */
130 
131 #define	COUNT(lkp, p, cpu_id, x)					\
132 do {									\
133 	if ((lkp)->lk_flags & LK_SPIN)					\
134 		COUNT_CPU((cpu_id), (x));				\
135 	else								\
136 		(p)->p_locks += (x);					\
137 } while (/*CONSTCOND*/0)
138 #else
139 #define COUNT(lkp, p, cpu_id, x)
140 #define COUNT_CPU(cpu_id, x)
141 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
142 
143 #ifndef SPINLOCK_SPIN_HOOK		/* from <machine/lock.h> */
144 #define	SPINLOCK_SPIN_HOOK		/* nothing */
145 #endif
146 
147 #define	INTERLOCK_ACQUIRE(lkp, flags, s)				\
148 do {									\
149 	if ((flags) & LK_SPIN)						\
150 		s = splsched();						\
151 	simple_lock(&(lkp)->lk_interlock);				\
152 } while (0)
153 
154 #define	INTERLOCK_RELEASE(lkp, flags, s)				\
155 do {									\
156 	simple_unlock(&(lkp)->lk_interlock);				\
157 	if ((flags) & LK_SPIN)						\
158 		splx(s);						\
159 } while (0)
160 
161 #if defined(LOCKDEBUG)
162 #if defined(DDB)
163 #define	SPINLOCK_SPINCHECK_DEBUGGER	Debugger()
164 #else
165 #define	SPINLOCK_SPINCHECK_DEBUGGER	/* nothing */
166 #endif
167 
168 #define	SPINLOCK_SPINCHECK_DECL						\
169 	/* 32-bits of count -- wrap constitutes a "spinout" */		\
170 	uint32_t __spinc = 0
171 
172 #define	SPINLOCK_SPINCHECK						\
173 do {									\
174 	if (++__spinc == 0) {						\
175 		printf("LK_SPIN spinout, excl %d, share %d\n",		\
176 		    lkp->lk_exclusivecount, lkp->lk_sharecount);	\
177 		if (lkp->lk_exclusivecount)				\
178 			printf("held by CPU %lu\n",			\
179 			    (u_long) lkp->lk_cpu);			\
180 		if (lkp->lk_lock_file)					\
181 			printf("last locked at %s:%d\n",		\
182 			    lkp->lk_lock_file, lkp->lk_lock_line);	\
183 		if (lkp->lk_unlock_file)				\
184 			printf("last unlocked at %s:%d\n",		\
185 			    lkp->lk_unlock_file, lkp->lk_unlock_line);	\
186 		SPINLOCK_SPINCHECK_DEBUGGER;				\
187 	}								\
188 } while (0)
189 #else
190 #define	SPINLOCK_SPINCHECK_DECL			/* nothing */
191 #define	SPINLOCK_SPINCHECK			/* nothing */
192 #endif /* LOCKDEBUG && DDB */
193 
194 /*
195  * Acquire a resource.
196  */
197 #define ACQUIRE(lkp, error, extflags, drain, wanted)			\
198 	if ((extflags) & LK_SPIN) {					\
199 		int interlocked;					\
200 		SPINLOCK_SPINCHECK_DECL;				\
201 									\
202 		if ((drain) == 0)					\
203 			(lkp)->lk_waitcount++;				\
204 		for (interlocked = 1;;) {				\
205 			SPINLOCK_SPINCHECK;				\
206 			if (wanted) {					\
207 				if (interlocked) {			\
208 					INTERLOCK_RELEASE((lkp),	\
209 					    LK_SPIN, s);		\
210 					interlocked = 0;		\
211 				}					\
212 				SPINLOCK_SPIN_HOOK;			\
213 			} else if (interlocked) {			\
214 				break;					\
215 			} else {					\
216 				INTERLOCK_ACQUIRE((lkp), LK_SPIN, s);	\
217 				interlocked = 1;			\
218 			}						\
219 		}							\
220 		if ((drain) == 0)					\
221 			(lkp)->lk_waitcount--;				\
222 		KASSERT((wanted) == 0);					\
223 		error = 0;	/* sanity */				\
224 	} else {							\
225 		for (error = 0; wanted; ) {				\
226 			if ((drain))					\
227 				(lkp)->lk_flags |= LK_WAITDRAIN;	\
228 			else						\
229 				(lkp)->lk_waitcount++;			\
230 			/* XXX Cast away volatile. */			\
231 			error = ltsleep((drain) ?			\
232 			    (void *)&(lkp)->lk_flags :			\
233 			    (void *)(lkp), (lkp)->lk_prio,		\
234 			    (lkp)->lk_wmesg, (lkp)->lk_timo,		\
235 			    &(lkp)->lk_interlock);			\
236 			if ((drain) == 0)				\
237 				(lkp)->lk_waitcount--;			\
238 			if (error)					\
239 				break;					\
240 			if ((extflags) & LK_SLEEPFAIL) {		\
241 				error = ENOLCK;				\
242 				break;					\
243 			}						\
244 		}							\
245 	}
246 
247 #define	SETHOLDER(lkp, pid, cpu_id)					\
248 do {									\
249 	if ((lkp)->lk_flags & LK_SPIN)					\
250 		(lkp)->lk_cpu = cpu_id;					\
251 	else								\
252 		(lkp)->lk_lockholder = pid;				\
253 } while (/*CONSTCOND*/0)
254 
255 #define	WEHOLDIT(lkp, pid, cpu_id)					\
256 	(((lkp)->lk_flags & LK_SPIN) != 0 ?				\
257 	 ((lkp)->lk_cpu == (cpu_id)) : ((lkp)->lk_lockholder == (pid)))
258 
259 #define	WAKEUP_WAITER(lkp)						\
260 do {									\
261 	if (((lkp)->lk_flags & LK_SPIN) == 0 && (lkp)->lk_waitcount) {	\
262 		/* XXX Cast away volatile. */				\
263 		wakeup((void *)(lkp));					\
264 	}								\
265 } while (/*CONSTCOND*/0)
266 
267 #if defined(LOCKDEBUG) /* { */
268 #if defined(MULTIPROCESSOR) /* { */
269 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
270 
271 #define	SPINLOCK_LIST_LOCK()						\
272 	__cpu_simple_lock(&spinlock_list_slock.lock_data)
273 
274 #define	SPINLOCK_LIST_UNLOCK()						\
275 	__cpu_simple_unlock(&spinlock_list_slock.lock_data)
276 #else
277 #define	SPINLOCK_LIST_LOCK()	/* nothing */
278 
279 #define	SPINLOCK_LIST_UNLOCK()	/* nothing */
280 #endif /* MULTIPROCESSOR */ /* } */
281 
282 TAILQ_HEAD(, lock) spinlock_list =
283     TAILQ_HEAD_INITIALIZER(spinlock_list);
284 
285 #define	HAVEIT(lkp)							\
286 do {									\
287 	if ((lkp)->lk_flags & LK_SPIN) {				\
288 		int s = spllock();					\
289 		SPINLOCK_LIST_LOCK();					\
290 		/* XXX Cast away volatile. */				\
291 		TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp),	\
292 		    lk_list);						\
293 		SPINLOCK_LIST_UNLOCK();					\
294 		splx(s);						\
295 	}								\
296 } while (/*CONSTCOND*/0)
297 
298 #define	DONTHAVEIT(lkp)							\
299 do {									\
300 	if ((lkp)->lk_flags & LK_SPIN) {				\
301 		int s = spllock();					\
302 		SPINLOCK_LIST_LOCK();					\
303 		/* XXX Cast away volatile. */				\
304 		TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp),	\
305 		    lk_list);						\
306 		SPINLOCK_LIST_UNLOCK();					\
307 		splx(s);						\
308 	}								\
309 } while (/*CONSTCOND*/0)
310 #else
311 #define	HAVEIT(lkp)		/* nothing */
312 
313 #define	DONTHAVEIT(lkp)		/* nothing */
314 #endif /* LOCKDEBUG */ /* } */
315 
316 #if defined(LOCKDEBUG)
317 /*
318  * Lock debug printing routine; can be configured to print to console
319  * or log to syslog.
320  */
321 void
322 lock_printf(const char *fmt, ...)
323 {
324 	va_list ap;
325 
326 	va_start(ap, fmt);
327 	if (lock_debug_syslog)
328 		vlog(LOG_DEBUG, fmt, ap);
329 	else
330 		vprintf(fmt, ap);
331 	va_end(ap);
332 }
333 #endif /* LOCKDEBUG */
334 
335 /*
336  * Initialize a lock; required before use.
337  */
338 void
339 lockinit(struct lock *lkp, int prio, const char *wmesg, int timo, int flags)
340 {
341 
342 	memset(lkp, 0, sizeof(struct lock));
343 	simple_lock_init(&lkp->lk_interlock);
344 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
345 	if (flags & LK_SPIN)
346 		lkp->lk_cpu = LK_NOCPU;
347 	else {
348 		lkp->lk_lockholder = LK_NOPROC;
349 		lkp->lk_prio = prio;
350 		lkp->lk_timo = timo;
351 	}
352 	lkp->lk_wmesg = wmesg;	/* just a name for spin locks */
353 #if defined(LOCKDEBUG)
354 	lkp->lk_lock_file = NULL;
355 	lkp->lk_unlock_file = NULL;
356 #endif
357 }
358 
359 /*
360  * Determine the status of a lock.
361  */
362 int
363 lockstatus(struct lock *lkp)
364 {
365 	int s, lock_type = 0;
366 
367 	INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
368 	if (lkp->lk_exclusivecount != 0)
369 		lock_type = LK_EXCLUSIVE;
370 	else if (lkp->lk_sharecount != 0)
371 		lock_type = LK_SHARED;
372 	INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
373 	return (lock_type);
374 }
375 
376 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
377 /*
378  * Make sure no spin locks are held by a CPU that is about
379  * to context switch.
380  */
381 void
382 spinlock_switchcheck(void)
383 {
384 	u_long cnt;
385 	int s;
386 
387 	s = spllock();
388 #if defined(MULTIPROCESSOR)
389 	cnt = curcpu()->ci_spin_locks;
390 #else
391 	cnt = spin_locks;
392 #endif
393 	splx(s);
394 
395 	if (cnt != 0)
396 		panic("spinlock_switchcheck: CPU %lu has %lu spin locks",
397 		    (u_long) cpu_number(), cnt);
398 }
399 #endif /* LOCKDEBUG || DIAGNOSTIC */
400 
401 /*
402  * Locks and IPLs (interrupt priority levels):
403  *
404  * Locks which may be taken from interrupt context must be handled
405  * very carefully; you must spl to the highest IPL where the lock
406  * is needed before acquiring the lock.
407  *
408  * It is also important to avoid deadlock, since certain (very high
409  * priority) interrupts are often needed to keep the system as a whole
410  * from deadlocking, and must not be blocked while you are spinning
411  * waiting for a lower-priority lock.
412  *
413  * In addition, the lock-debugging hooks themselves need to use locks!
414  *
415  * A raw __cpu_simple_lock may be used from interrupts are long as it
416  * is acquired and held at a single IPL.
417  *
418  * A simple_lock (which is a __cpu_simple_lock wrapped with some
419  * debugging hooks) may be used at or below spllock(), which is
420  * typically at or just below splhigh() (i.e. blocks everything
421  * but certain machine-dependent extremely high priority interrupts).
422  *
423  * spinlockmgr spinlocks should be used at or below splsched().
424  *
425  * Some platforms may have interrupts of higher priority than splsched(),
426  * including hard serial interrupts, inter-processor interrupts, and
427  * kernel debugger traps.
428  */
429 
430 /*
431  * XXX XXX kludge around another kludge..
432  *
433  * vfs_shutdown() may be called from interrupt context, either as a result
434  * of a panic, or from the debugger.   It proceeds to call
435  * sys_sync(&proc0, ...), pretending its running on behalf of proc0
436  *
437  * We would like to make an attempt to sync the filesystems in this case, so
438  * if this happens, we treat attempts to acquire locks specially.
439  * All locks are acquired on behalf of proc0.
440  *
441  * If we've already paniced, we don't block waiting for locks, but
442  * just barge right ahead since we're already going down in flames.
443  */
444 
445 /*
446  * Set, change, or release a lock.
447  *
448  * Shared requests increment the shared count. Exclusive requests set the
449  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
450  * accepted shared locks and shared-to-exclusive upgrades to go away.
451  */
452 int
453 #if defined(LOCKDEBUG)
454 _lockmgr(__volatile struct lock *lkp, u_int flags,
455     struct simplelock *interlkp, const char *file, int line)
456 #else
457 lockmgr(__volatile struct lock *lkp, u_int flags,
458     struct simplelock *interlkp)
459 #endif
460 {
461 	int error;
462 	pid_t pid;
463 	int extflags;
464 	cpuid_t cpu_id;
465 	struct proc *p = curproc;
466 	int lock_shutdown_noblock = 0;
467 	int s;
468 
469 	error = 0;
470 
471 	INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
472 	if (flags & LK_INTERLOCK)
473 		simple_unlock(interlkp);
474 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
475 
476 #ifdef DIAGNOSTIC /* { */
477 	/*
478 	 * Don't allow spins on sleep locks and don't allow sleeps
479 	 * on spin locks.
480 	 */
481 	if ((flags ^ lkp->lk_flags) & LK_SPIN)
482 		panic("lockmgr: sleep/spin mismatch\n");
483 #endif /* } */
484 
485 	if (extflags & LK_SPIN)
486 		pid = LK_KERNPROC;
487 	else {
488 		if (p == NULL) {
489 			if (!doing_shutdown) {
490 				panic("lockmgr: no context");
491 			} else {
492 				p = &proc0;
493 				if (panicstr && (!(flags & LK_NOWAIT))) {
494 					flags |= LK_NOWAIT;
495 					lock_shutdown_noblock = 1;
496 				}
497 			}
498 		}
499 		pid = p->p_pid;
500 	}
501 	cpu_id = cpu_number();
502 
503 	/*
504 	 * Once a lock has drained, the LK_DRAINING flag is set and an
505 	 * exclusive lock is returned. The only valid operation thereafter
506 	 * is a single release of that exclusive lock. This final release
507 	 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
508 	 * further requests of any sort will result in a panic. The bits
509 	 * selected for these two flags are chosen so that they will be set
510 	 * in memory that is freed (freed memory is filled with 0xdeadbeef).
511 	 * The final release is permitted to give a new lease on life to
512 	 * the lock by specifying LK_REENABLE.
513 	 */
514 	if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
515 #ifdef DIAGNOSTIC /* { */
516 		if (lkp->lk_flags & LK_DRAINED)
517 			panic("lockmgr: using decommissioned lock");
518 		if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
519 		    WEHOLDIT(lkp, pid, cpu_id) == 0)
520 			panic("lockmgr: non-release on draining lock: %d\n",
521 			    flags & LK_TYPE_MASK);
522 #endif /* DIAGNOSTIC */ /* } */
523 		lkp->lk_flags &= ~LK_DRAINING;
524 		if ((flags & LK_REENABLE) == 0)
525 			lkp->lk_flags |= LK_DRAINED;
526 	}
527 
528 	switch (flags & LK_TYPE_MASK) {
529 
530 	case LK_SHARED:
531 		if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
532 			/*
533 			 * If just polling, check to see if we will block.
534 			 */
535 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
536 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
537 				error = EBUSY;
538 				break;
539 			}
540 			/*
541 			 * Wait for exclusive locks and upgrades to clear.
542 			 */
543 			ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
544 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
545 			if (error)
546 				break;
547 			lkp->lk_sharecount++;
548 			COUNT(lkp, p, cpu_id, 1);
549 			break;
550 		}
551 		/*
552 		 * We hold an exclusive lock, so downgrade it to shared.
553 		 * An alternative would be to fail with EDEADLK.
554 		 */
555 		lkp->lk_sharecount++;
556 		COUNT(lkp, p, cpu_id, 1);
557 		/* fall into downgrade */
558 
559 	case LK_DOWNGRADE:
560 		if (WEHOLDIT(lkp, pid, cpu_id) == 0 ||
561 		    lkp->lk_exclusivecount == 0)
562 			panic("lockmgr: not holding exclusive lock");
563 		lkp->lk_sharecount += lkp->lk_exclusivecount;
564 		lkp->lk_exclusivecount = 0;
565 		lkp->lk_recurselevel = 0;
566 		lkp->lk_flags &= ~LK_HAVE_EXCL;
567 		SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
568 #if defined(LOCKDEBUG)
569 		lkp->lk_unlock_file = file;
570 		lkp->lk_unlock_line = line;
571 #endif
572 		DONTHAVEIT(lkp);
573 		WAKEUP_WAITER(lkp);
574 		break;
575 
576 	case LK_EXCLUPGRADE:
577 		/*
578 		 * If another process is ahead of us to get an upgrade,
579 		 * then we want to fail rather than have an intervening
580 		 * exclusive access.
581 		 */
582 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
583 			lkp->lk_sharecount--;
584 			COUNT(lkp, p, cpu_id, -1);
585 			error = EBUSY;
586 			break;
587 		}
588 		/* fall into normal upgrade */
589 
590 	case LK_UPGRADE:
591 		/*
592 		 * Upgrade a shared lock to an exclusive one. If another
593 		 * shared lock has already requested an upgrade to an
594 		 * exclusive lock, our shared lock is released and an
595 		 * exclusive lock is requested (which will be granted
596 		 * after the upgrade). If we return an error, the file
597 		 * will always be unlocked.
598 		 */
599 		if (WEHOLDIT(lkp, pid, cpu_id) || lkp->lk_sharecount <= 0)
600 			panic("lockmgr: upgrade exclusive lock");
601 		lkp->lk_sharecount--;
602 		COUNT(lkp, p, cpu_id, -1);
603 		/*
604 		 * If we are just polling, check to see if we will block.
605 		 */
606 		if ((extflags & LK_NOWAIT) &&
607 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
608 		     lkp->lk_sharecount > 1)) {
609 			error = EBUSY;
610 			break;
611 		}
612 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
613 			/*
614 			 * We are first shared lock to request an upgrade, so
615 			 * request upgrade and wait for the shared count to
616 			 * drop to zero, then take exclusive lock.
617 			 */
618 			lkp->lk_flags |= LK_WANT_UPGRADE;
619 			ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount);
620 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
621 			if (error)
622 				break;
623 			lkp->lk_flags |= LK_HAVE_EXCL;
624 			SETHOLDER(lkp, pid, cpu_id);
625 #if defined(LOCKDEBUG)
626 			lkp->lk_lock_file = file;
627 			lkp->lk_lock_line = line;
628 #endif
629 			HAVEIT(lkp);
630 			if (lkp->lk_exclusivecount != 0)
631 				panic("lockmgr: non-zero exclusive count");
632 			lkp->lk_exclusivecount = 1;
633 			if (extflags & LK_SETRECURSE)
634 				lkp->lk_recurselevel = 1;
635 			COUNT(lkp, p, cpu_id, 1);
636 			break;
637 		}
638 		/*
639 		 * Someone else has requested upgrade. Release our shared
640 		 * lock, awaken upgrade requestor if we are the last shared
641 		 * lock, then request an exclusive lock.
642 		 */
643 		if (lkp->lk_sharecount == 0)
644 			WAKEUP_WAITER(lkp);
645 		/* fall into exclusive request */
646 
647 	case LK_EXCLUSIVE:
648 		if (WEHOLDIT(lkp, pid, cpu_id)) {
649 			/*
650 			 * Recursive lock.
651 			 */
652 			if ((extflags & LK_CANRECURSE) == 0 &&
653 			     lkp->lk_recurselevel == 0) {
654 				if (extflags & LK_RECURSEFAIL) {
655 					error = EDEADLK;
656 					break;
657 				} else
658 					panic("lockmgr: locking against myself");
659 			}
660 			lkp->lk_exclusivecount++;
661 			if (extflags & LK_SETRECURSE &&
662 			    lkp->lk_recurselevel == 0)
663 				lkp->lk_recurselevel = lkp->lk_exclusivecount;
664 			COUNT(lkp, p, cpu_id, 1);
665 			break;
666 		}
667 		/*
668 		 * If we are just polling, check to see if we will sleep.
669 		 */
670 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
671 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
672 		     lkp->lk_sharecount != 0)) {
673 			error = EBUSY;
674 			break;
675 		}
676 		/*
677 		 * Try to acquire the want_exclusive flag.
678 		 */
679 		ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
680 		    (LK_HAVE_EXCL | LK_WANT_EXCL));
681 		if (error)
682 			break;
683 		lkp->lk_flags |= LK_WANT_EXCL;
684 		/*
685 		 * Wait for shared locks and upgrades to finish.
686 		 */
687 		ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0 ||
688 		       (lkp->lk_flags & LK_WANT_UPGRADE));
689 		lkp->lk_flags &= ~LK_WANT_EXCL;
690 		if (error)
691 			break;
692 		lkp->lk_flags |= LK_HAVE_EXCL;
693 		SETHOLDER(lkp, pid, cpu_id);
694 #if defined(LOCKDEBUG)
695 		lkp->lk_lock_file = file;
696 		lkp->lk_lock_line = line;
697 #endif
698 		HAVEIT(lkp);
699 		if (lkp->lk_exclusivecount != 0)
700 			panic("lockmgr: non-zero exclusive count");
701 		lkp->lk_exclusivecount = 1;
702 		if (extflags & LK_SETRECURSE)
703 			lkp->lk_recurselevel = 1;
704 		COUNT(lkp, p, cpu_id, 1);
705 		break;
706 
707 	case LK_RELEASE:
708 		if (lkp->lk_exclusivecount != 0) {
709 			if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
710 				if (lkp->lk_flags & LK_SPIN) {
711 					panic("lockmgr: processor %lu, not "
712 					    "exclusive lock holder %lu "
713 					    "unlocking", cpu_id, lkp->lk_cpu);
714 				} else {
715 					panic("lockmgr: pid %d, not "
716 					    "exclusive lock holder %d "
717 					    "unlocking", pid,
718 					    lkp->lk_lockholder);
719 				}
720 			}
721 			if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
722 				lkp->lk_recurselevel = 0;
723 			lkp->lk_exclusivecount--;
724 			COUNT(lkp, p, cpu_id, -1);
725 			if (lkp->lk_exclusivecount == 0) {
726 				lkp->lk_flags &= ~LK_HAVE_EXCL;
727 				SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
728 #if defined(LOCKDEBUG)
729 				lkp->lk_unlock_file = file;
730 				lkp->lk_unlock_line = line;
731 #endif
732 				DONTHAVEIT(lkp);
733 			}
734 		} else if (lkp->lk_sharecount != 0) {
735 			lkp->lk_sharecount--;
736 			COUNT(lkp, p, cpu_id, -1);
737 		}
738 #ifdef DIAGNOSTIC
739 		else
740 			panic("lockmgr: release of unlocked lock!");
741 #endif
742 		WAKEUP_WAITER(lkp);
743 		break;
744 
745 	case LK_DRAIN:
746 		/*
747 		 * Check that we do not already hold the lock, as it can
748 		 * never drain if we do. Unfortunately, we have no way to
749 		 * check for holding a shared lock, but at least we can
750 		 * check for an exclusive one.
751 		 */
752 		if (WEHOLDIT(lkp, pid, cpu_id))
753 			panic("lockmgr: draining against myself");
754 		/*
755 		 * If we are just polling, check to see if we will sleep.
756 		 */
757 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
758 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
759 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
760 			error = EBUSY;
761 			break;
762 		}
763 		ACQUIRE(lkp, error, extflags, 1,
764 		    ((lkp->lk_flags &
765 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
766 		     lkp->lk_sharecount != 0 ||
767 		     lkp->lk_waitcount != 0));
768 		if (error)
769 			break;
770 		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
771 		SETHOLDER(lkp, pid, cpu_id);
772 #if defined(LOCKDEBUG)
773 		lkp->lk_lock_file = file;
774 		lkp->lk_lock_line = line;
775 #endif
776 		HAVEIT(lkp);
777 		lkp->lk_exclusivecount = 1;
778 		/* XXX unlikely that we'd want this */
779 		if (extflags & LK_SETRECURSE)
780 			lkp->lk_recurselevel = 1;
781 		COUNT(lkp, p, cpu_id, 1);
782 		break;
783 
784 	default:
785 		INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
786 		panic("lockmgr: unknown locktype request %d",
787 		    flags & LK_TYPE_MASK);
788 		/* NOTREACHED */
789 	}
790 	if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN &&
791 	    ((lkp->lk_flags &
792 	      (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
793 	     lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
794 		lkp->lk_flags &= ~LK_WAITDRAIN;
795 		wakeup((void *)&lkp->lk_flags);
796 	}
797 	/*
798 	 * Note that this panic will be a recursive panic, since
799 	 * we only set lock_shutdown_noblock above if panicstr != NULL.
800 	 */
801 	if (error && lock_shutdown_noblock)
802 		panic("lockmgr: deadlock (see previous panic)");
803 
804 	INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
805 	return (error);
806 }
807 
808 /*
809  * For a recursive spinlock held one or more times by the current CPU,
810  * release all N locks, and return N.
811  * Intended for use in mi_switch() shortly before context switching.
812  */
813 
814 int
815 #if defined(LOCKDEBUG)
816 _spinlock_release_all(__volatile struct lock *lkp, const char *file, int line)
817 #else
818 spinlock_release_all(__volatile struct lock *lkp)
819 #endif
820 {
821 	int s, count;
822 	cpuid_t cpu_id;
823 
824 	KASSERT(lkp->lk_flags & LK_SPIN);
825 
826 	INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
827 
828 	cpu_id = cpu_number();
829 	count = lkp->lk_exclusivecount;
830 
831 	if (count != 0) {
832 #ifdef DIAGNOSTIC
833 		if (WEHOLDIT(lkp, 0, cpu_id) == 0) {
834 			panic("spinlock_release_all: processor %lu, not "
835 			    "exclusive lock holder %lu "
836 			    "unlocking", (long)cpu_id, lkp->lk_cpu);
837 		}
838 #endif
839 		lkp->lk_recurselevel = 0;
840 		lkp->lk_exclusivecount = 0;
841 		COUNT_CPU(cpu_id, -count);
842 		lkp->lk_flags &= ~LK_HAVE_EXCL;
843 		SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
844 #if defined(LOCKDEBUG)
845 		lkp->lk_unlock_file = file;
846 		lkp->lk_unlock_line = line;
847 #endif
848 		DONTHAVEIT(lkp);
849 	}
850 #ifdef DIAGNOSTIC
851 	else if (lkp->lk_sharecount != 0)
852 		panic("spinlock_release_all: release of shared lock!");
853 	else
854 		panic("spinlock_release_all: release of unlocked lock!");
855 #endif
856 	INTERLOCK_RELEASE(lkp, LK_SPIN, s);
857 
858 	return (count);
859 }
860 
861 /*
862  * For a recursive spinlock held one or more times by the current CPU,
863  * release all N locks, and return N.
864  * Intended for use in mi_switch() right after resuming execution.
865  */
866 
867 void
868 #if defined(LOCKDEBUG)
869 _spinlock_acquire_count(__volatile struct lock *lkp, int count,
870     const char *file, int line)
871 #else
872 spinlock_acquire_count(__volatile struct lock *lkp, int count)
873 #endif
874 {
875 	int s, error;
876 	cpuid_t cpu_id;
877 
878 	KASSERT(lkp->lk_flags & LK_SPIN);
879 
880 	INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
881 
882 	cpu_id = cpu_number();
883 
884 #ifdef DIAGNOSTIC
885 	if (WEHOLDIT(lkp, LK_NOPROC, cpu_id))
886 		panic("spinlock_acquire_count: processor %lu already holds lock\n", (long)cpu_id);
887 #endif
888 	/*
889 	 * Try to acquire the want_exclusive flag.
890 	 */
891 	ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_flags &
892 	    (LK_HAVE_EXCL | LK_WANT_EXCL));
893 	lkp->lk_flags |= LK_WANT_EXCL;
894 	/*
895 	 * Wait for shared locks and upgrades to finish.
896 	 */
897 	ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_sharecount != 0 ||
898 	    (lkp->lk_flags & LK_WANT_UPGRADE));
899 	lkp->lk_flags &= ~LK_WANT_EXCL;
900 	lkp->lk_flags |= LK_HAVE_EXCL;
901 	SETHOLDER(lkp, LK_NOPROC, cpu_id);
902 #if defined(LOCKDEBUG)
903 	lkp->lk_lock_file = file;
904 	lkp->lk_lock_line = line;
905 #endif
906 	HAVEIT(lkp);
907 	if (lkp->lk_exclusivecount != 0)
908 		panic("lockmgr: non-zero exclusive count");
909 	lkp->lk_exclusivecount = count;
910 	lkp->lk_recurselevel = 1;
911 	COUNT_CPU(cpu_id, count);
912 
913 	INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
914 }
915 
916 
917 
918 /*
919  * Print out information about state of a lock. Used by VOP_PRINT
920  * routines to display ststus about contained locks.
921  */
922 void
923 lockmgr_printinfo(__volatile struct lock *lkp)
924 {
925 
926 	if (lkp->lk_sharecount)
927 		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
928 		    lkp->lk_sharecount);
929 	else if (lkp->lk_flags & LK_HAVE_EXCL) {
930 		printf(" lock type %s: EXCL (count %d) by ",
931 		    lkp->lk_wmesg, lkp->lk_exclusivecount);
932 		if (lkp->lk_flags & LK_SPIN)
933 			printf("processor %lu", lkp->lk_cpu);
934 		else
935 			printf("pid %d", lkp->lk_lockholder);
936 	} else
937 		printf(" not locked");
938 	if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0)
939 		printf(" with %d pending", lkp->lk_waitcount);
940 }
941 
942 #if defined(LOCKDEBUG) /* { */
943 TAILQ_HEAD(, simplelock) simplelock_list =
944     TAILQ_HEAD_INITIALIZER(simplelock_list);
945 
946 #if defined(MULTIPROCESSOR) /* { */
947 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
948 
949 #define	SLOCK_LIST_LOCK()						\
950 	__cpu_simple_lock(&simplelock_list_slock.lock_data)
951 
952 #define	SLOCK_LIST_UNLOCK()						\
953 	__cpu_simple_unlock(&simplelock_list_slock.lock_data)
954 
955 #define	SLOCK_COUNT(x)							\
956 	curcpu()->ci_simple_locks += (x)
957 #else
958 u_long simple_locks;
959 
960 #define	SLOCK_LIST_LOCK()	/* nothing */
961 
962 #define	SLOCK_LIST_UNLOCK()	/* nothing */
963 
964 #define	SLOCK_COUNT(x)		simple_locks += (x)
965 #endif /* MULTIPROCESSOR */ /* } */
966 
967 #ifdef DDB /* { */
968 #ifdef MULTIPROCESSOR
969 int simple_lock_debugger = 1;	/* more serious on MP */
970 #else
971 int simple_lock_debugger = 0;
972 #endif
973 #define	SLOCK_DEBUGGER()	if (simple_lock_debugger) Debugger()
974 #define	SLOCK_TRACE()							\
975 	db_stack_trace_print((db_expr_t)__builtin_frame_address(0),	\
976 	    TRUE, 65535, "", printf);
977 #else
978 #define	SLOCK_DEBUGGER()	/* nothing */
979 #define	SLOCK_TRACE()		/* nothing */
980 #endif /* } */
981 
982 #ifdef MULTIPROCESSOR
983 #define SLOCK_MP()		lock_printf("on cpu %ld\n", 		\
984 				    (u_long) cpu_number())
985 #else
986 #define SLOCK_MP()		/* nothing */
987 #endif
988 
989 #define	SLOCK_WHERE(str, alp, id, l)					\
990 do {									\
991 	lock_printf("\n");						\
992 	lock_printf(str);						\
993 	lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
994 	SLOCK_MP();							\
995 	if ((alp)->lock_file != NULL)					\
996 		lock_printf("last locked: %s:%d\n", (alp)->lock_file,	\
997 		    (alp)->lock_line);					\
998 	if ((alp)->unlock_file != NULL)					\
999 		lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
1000 		    (alp)->unlock_line);				\
1001 	SLOCK_TRACE()							\
1002 	SLOCK_DEBUGGER();						\
1003 } while (/*CONSTCOND*/0)
1004 
1005 /*
1006  * Simple lock functions so that the debugger can see from whence
1007  * they are being called.
1008  */
1009 void
1010 simple_lock_init(struct simplelock *alp)
1011 {
1012 
1013 #if defined(MULTIPROCESSOR) /* { */
1014 	__cpu_simple_lock_init(&alp->lock_data);
1015 #else
1016 	alp->lock_data = __SIMPLELOCK_UNLOCKED;
1017 #endif /* } */
1018 	alp->lock_file = NULL;
1019 	alp->lock_line = 0;
1020 	alp->unlock_file = NULL;
1021 	alp->unlock_line = 0;
1022 	alp->lock_holder = LK_NOCPU;
1023 }
1024 
1025 void
1026 _simple_lock(__volatile struct simplelock *alp, const char *id, int l)
1027 {
1028 	cpuid_t cpu_id = cpu_number();
1029 	int s;
1030 
1031 	s = spllock();
1032 
1033 	/*
1034 	 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
1035 	 * don't take any action, and just fall into the normal spin case.
1036 	 */
1037 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1038 #if defined(MULTIPROCESSOR) /* { */
1039 		if (alp->lock_holder == cpu_id) {
1040 			SLOCK_WHERE("simple_lock: locking against myself\n",
1041 			    alp, id, l);
1042 			goto out;
1043 		}
1044 #else
1045 		SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
1046 		goto out;
1047 #endif /* MULTIPROCESSOR */ /* } */
1048 	}
1049 
1050 #if defined(MULTIPROCESSOR) /* { */
1051 	/* Acquire the lock before modifying any fields. */
1052 	__cpu_simple_lock(&alp->lock_data);
1053 #else
1054 	alp->lock_data = __SIMPLELOCK_LOCKED;
1055 #endif /* } */
1056 
1057 	if (alp->lock_holder != LK_NOCPU) {
1058 		SLOCK_WHERE("simple_lock: uninitialized lock\n",
1059 		    alp, id, l);
1060 	}
1061 	alp->lock_file = id;
1062 	alp->lock_line = l;
1063 	alp->lock_holder = cpu_id;
1064 
1065 	SLOCK_LIST_LOCK();
1066 	/* XXX Cast away volatile */
1067 	TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
1068 	SLOCK_LIST_UNLOCK();
1069 
1070 	SLOCK_COUNT(1);
1071 
1072  out:
1073 	splx(s);
1074 }
1075 
1076 int
1077 _simple_lock_held(__volatile struct simplelock *alp)
1078 {
1079 #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC)
1080 	cpuid_t cpu_id = cpu_number();
1081 #endif
1082 	int s, locked = 0;
1083 
1084 	s = spllock();
1085 
1086 #if defined(MULTIPROCESSOR)
1087 	if (__cpu_simple_lock_try(&alp->lock_data) == 0)
1088 		locked = (alp->lock_holder == cpu_id);
1089 	else
1090 		__cpu_simple_unlock(&alp->lock_data);
1091 #else
1092 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1093 		locked = 1;
1094 		KASSERT(alp->lock_holder == cpu_id);
1095 	}
1096 #endif
1097 
1098 	splx(s);
1099 
1100 	return (locked);
1101 }
1102 
1103 int
1104 _simple_lock_try(__volatile struct simplelock *alp, const char *id, int l)
1105 {
1106 	cpuid_t cpu_id = cpu_number();
1107 	int s, rv = 0;
1108 
1109 	s = spllock();
1110 
1111 	/*
1112 	 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
1113 	 * don't take any action.
1114 	 */
1115 #if defined(MULTIPROCESSOR) /* { */
1116 	if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
1117 		if (alp->lock_holder == cpu_id)
1118 			SLOCK_WHERE("simple_lock_try: locking against myself\n",
1119 			    alp, id, l);
1120 		goto out;
1121 	}
1122 #else
1123 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1124 		SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
1125 		goto out;
1126 	}
1127 	alp->lock_data = __SIMPLELOCK_LOCKED;
1128 #endif /* MULTIPROCESSOR */ /* } */
1129 
1130 	/*
1131 	 * At this point, we have acquired the lock.
1132 	 */
1133 
1134 	rv = 1;
1135 
1136 	alp->lock_file = id;
1137 	alp->lock_line = l;
1138 	alp->lock_holder = cpu_id;
1139 
1140 	SLOCK_LIST_LOCK();
1141 	/* XXX Cast away volatile. */
1142 	TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
1143 	SLOCK_LIST_UNLOCK();
1144 
1145 	SLOCK_COUNT(1);
1146 
1147  out:
1148 	splx(s);
1149 	return (rv);
1150 }
1151 
1152 void
1153 _simple_unlock(__volatile struct simplelock *alp, const char *id, int l)
1154 {
1155 	int s;
1156 
1157 	s = spllock();
1158 
1159 	/*
1160 	 * MULTIPROCESSOR case: This is `safe' because we think we hold
1161 	 * the lock, and if we don't, we don't take any action.
1162 	 */
1163 	if (alp->lock_data == __SIMPLELOCK_UNLOCKED) {
1164 		SLOCK_WHERE("simple_unlock: lock not held\n",
1165 		    alp, id, l);
1166 		goto out;
1167 	}
1168 
1169 	SLOCK_LIST_LOCK();
1170 	TAILQ_REMOVE(&simplelock_list, alp, list);
1171 	SLOCK_LIST_UNLOCK();
1172 
1173 	SLOCK_COUNT(-1);
1174 
1175 	alp->list.tqe_next = NULL;	/* sanity */
1176 	alp->list.tqe_prev = NULL;	/* sanity */
1177 
1178 	alp->unlock_file = id;
1179 	alp->unlock_line = l;
1180 
1181 #if defined(MULTIPROCESSOR) /* { */
1182 	alp->lock_holder = LK_NOCPU;
1183 	/* Now that we've modified all fields, release the lock. */
1184 	__cpu_simple_unlock(&alp->lock_data);
1185 #else
1186 	alp->lock_data = __SIMPLELOCK_UNLOCKED;
1187 	KASSERT(alp->lock_holder == cpu_number());
1188 	alp->lock_holder = LK_NOCPU;
1189 #endif /* } */
1190 
1191  out:
1192 	splx(s);
1193 }
1194 
1195 void
1196 simple_lock_dump(void)
1197 {
1198 	struct simplelock *alp;
1199 	int s;
1200 
1201 	s = spllock();
1202 	SLOCK_LIST_LOCK();
1203 	lock_printf("all simple locks:\n");
1204 	TAILQ_FOREACH(alp, &simplelock_list, list) {
1205 		lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
1206 		    alp->lock_file, alp->lock_line);
1207 	}
1208 	SLOCK_LIST_UNLOCK();
1209 	splx(s);
1210 }
1211 
1212 void
1213 simple_lock_freecheck(void *start, void *end)
1214 {
1215 	struct simplelock *alp;
1216 	int s;
1217 
1218 	s = spllock();
1219 	SLOCK_LIST_LOCK();
1220 	TAILQ_FOREACH(alp, &simplelock_list, list) {
1221 		if ((void *)alp >= start && (void *)alp < end) {
1222 			lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
1223 			    alp, alp->lock_holder, alp->lock_file,
1224 			    alp->lock_line);
1225 			SLOCK_DEBUGGER();
1226 		}
1227 	}
1228 	SLOCK_LIST_UNLOCK();
1229 	splx(s);
1230 }
1231 
1232 /*
1233  * We must be holding exactly one lock: the sched_lock.
1234  */
1235 
1236 void
1237 simple_lock_switchcheck(void)
1238 {
1239 
1240 	simple_lock_only_held(&sched_lock, "switching");
1241 }
1242 
1243 void
1244 simple_lock_only_held(volatile struct simplelock *lp, const char *where)
1245 {
1246 	struct simplelock *alp;
1247 	cpuid_t cpu_id = cpu_number();
1248 	int s;
1249 
1250 	if (lp) {
1251 		LOCK_ASSERT(simple_lock_held(lp));
1252 	}
1253 	s = spllock();
1254 	SLOCK_LIST_LOCK();
1255 	TAILQ_FOREACH(alp, &simplelock_list, list) {
1256 		if (alp == lp)
1257 			continue;
1258 		if (alp->lock_holder == cpu_id)
1259 			break;
1260 	}
1261 	SLOCK_LIST_UNLOCK();
1262 	splx(s);
1263 
1264 	if (alp != NULL) {
1265 		lock_printf("\n%s with held simple_lock %p "
1266 		    "CPU %lu %s:%d\n",
1267 		    where, alp, alp->lock_holder, alp->lock_file,
1268 		    alp->lock_line);
1269 		SLOCK_TRACE();
1270 		SLOCK_DEBUGGER();
1271 	}
1272 }
1273 #endif /* LOCKDEBUG */ /* } */
1274 
1275 #if defined(MULTIPROCESSOR)
1276 /*
1277  * Functions for manipulating the kernel_lock.  We put them here
1278  * so that they show up in profiles.
1279  */
1280 
1281 struct lock kernel_lock;
1282 
1283 void
1284 _kernel_lock_init(void)
1285 {
1286 
1287 	spinlockinit(&kernel_lock, "klock", 0);
1288 }
1289 
1290 /*
1291  * Acquire/release the kernel lock.  Intended for use in the scheduler
1292  * and the lower half of the kernel.
1293  */
1294 void
1295 _kernel_lock(int flag)
1296 {
1297 
1298 	SCHED_ASSERT_UNLOCKED();
1299 	spinlockmgr(&kernel_lock, flag, 0);
1300 }
1301 
1302 void
1303 _kernel_unlock(void)
1304 {
1305 
1306 	spinlockmgr(&kernel_lock, LK_RELEASE, 0);
1307 }
1308 
1309 /*
1310  * Acquire/release the kernel_lock on behalf of a process.  Intended for
1311  * use in the top half of the kernel.
1312  */
1313 void
1314 _kernel_proc_lock(struct proc *p)
1315 {
1316 
1317 	SCHED_ASSERT_UNLOCKED();
1318 	spinlockmgr(&kernel_lock, LK_EXCLUSIVE, 0);
1319 	p->p_flag |= P_BIGLOCK;
1320 }
1321 
1322 void
1323 _kernel_proc_unlock(struct proc *p)
1324 {
1325 
1326 	p->p_flag &= ~P_BIGLOCK;
1327 	spinlockmgr(&kernel_lock, LK_RELEASE, 0);
1328 }
1329 #endif /* MULTIPROCESSOR */
1330