xref: /dflybsd-src/sys/kern/kern_lock.c (revision 4badba3841ae9f4d60211d1c5ed006e17b38c299)
1 /*
2  * Copyright (c) 1995
3  *	The Regents of the University of California.  All rights reserved.
4  * Copyright (C) 1997
5  *	John S. Dyson.  All rights reserved.
6  * Copyright (C) 2013
7  *	Matthew Dillon, All rights reserved.
8  *
9  * This code contains ideas from software contributed to Berkeley by
10  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
11  * System project at Carnegie-Mellon University.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 #include "opt_lint.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/proc.h>
44 #include <sys/lock.h>
45 #include <sys/sysctl.h>
46 #include <sys/spinlock.h>
47 #include <sys/thread2.h>
48 #include <sys/spinlock2.h>
49 
50 static void undo_upreq(struct lock *lkp);
51 
52 /*
53  * Locking primitives implementation.
54  * Locks provide shared/exclusive sychronization.
55  */
56 
57 #ifdef DEBUG_LOCKS
58 #define COUNT(td, x) (td)->td_locks += (x)
59 #else
60 #define COUNT(td, x)
61 #endif
62 
63 #define LOCK_WAIT_TIME 100
64 #define LOCK_SAMPLE_WAIT 7
65 
66 /*
67  * Set, change, or release a lock.
68  *
69  */
70 int
71 #ifndef	DEBUG_LOCKS
72 lockmgr(struct lock *lkp, u_int flags)
73 #else
74 debuglockmgr(struct lock *lkp, u_int flags,
75 	     const char *name, const char *file, int line)
76 #endif
77 {
78 	thread_t td;
79 	thread_t otd;
80 	int error;
81 	int extflags;
82 	int count;
83 	int pflags;
84 	int wflags;
85 	int timo;
86 #ifdef DEBUG_LOCKS
87 	int i;
88 #endif
89 
90 	error = 0;
91 
92 	if (mycpu->gd_intr_nesting_level &&
93 	    (flags & LK_NOWAIT) == 0 &&
94 	    (flags & LK_TYPE_MASK) != LK_RELEASE &&
95 	    panic_cpu_gd != mycpu
96 	) {
97 
98 #ifndef DEBUG_LOCKS
99 		panic("lockmgr %s from %p: called from interrupt, ipi, "
100 		      "or hard code section",
101 		      lkp->lk_wmesg, ((int **)&lkp)[-1]);
102 #else
103 		panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
104 		      "or hard code section",
105 		      lkp->lk_wmesg, file, line);
106 #endif
107 	}
108 
109 #ifdef DEBUG_LOCKS
110 	if (mycpu->gd_spinlocks && ((flags & LK_NOWAIT) == 0)) {
111 		panic("lockmgr %s from %s:%d: called with %d spinlocks held",
112 		      lkp->lk_wmesg, file, line, mycpu->gd_spinlocks);
113 	}
114 #endif
115 
116 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
117 	td = curthread;
118 
119 again:
120 	count = lkp->lk_count;
121 	cpu_ccfence();
122 
123 	switch (flags & LK_TYPE_MASK) {
124 	case LK_SHARED:
125 		/*
126 		 * Shared lock critical path case
127 		 */
128 		if ((count & (LKC_EXREQ|LKC_UPREQ|LKC_EXCL)) == 0) {
129 			if (atomic_cmpset_int(&lkp->lk_count,
130 					      count, count + 1)) {
131 				COUNT(td, 1);
132 				break;
133 			}
134 			goto again;
135 		}
136 
137 		/*
138 		 * If the caller already holds the lock exclusively then
139 		 * we silently obtain another count on the exclusive lock.
140 		 *
141 		 * WARNING!  The old FreeBSD behavior was to downgrade,
142 		 *	     but this creates a problem when recursions
143 		 *	     return to the caller and the caller expects
144 		 *	     its original exclusive lock to remain exclusively
145 		 *	     locked.
146 		 */
147 		if (lkp->lk_lockholder == td) {
148 			KKASSERT(count & LKC_EXCL);
149 			if ((extflags & LK_CANRECURSE) == 0) {
150 				if (extflags & LK_NOWAIT) {
151 					error = EBUSY;
152 					break;
153 				}
154 				panic("lockmgr: locking against myself");
155 			}
156 			atomic_add_int(&lkp->lk_count, 1);
157 			COUNT(td, 1);
158 			break;
159 		}
160 
161 		/*
162 		 * Slow path
163 		 */
164 		pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
165 		timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
166 		wflags = (td->td_flags & TDF_DEADLKTREAT) ?
167 				LKC_EXCL : (LKC_EXCL|LKC_EXREQ|LKC_UPREQ);
168 
169 		/*
170 		 * Block while the lock is held exclusively or, conditionally,
171 		 * if other threads are tring to obtain an exclusive lock or
172 		 * upgrade to one.
173 		 */
174 		if (count & wflags) {
175 			if (extflags & LK_NOWAIT) {
176 				error = EBUSY;
177 				break;
178 			}
179 			tsleep_interlock(lkp, pflags);
180 			if (!atomic_cmpset_int(&lkp->lk_count, count,
181 					      count | LKC_SHREQ)) {
182 				goto again;
183 			}
184 			error = tsleep(lkp, pflags | PINTERLOCKED,
185 				       lkp->lk_wmesg, timo);
186 			if (error)
187 				break;
188 			if (extflags & LK_SLEEPFAIL) {
189 				error = ENOLCK;
190 				break;
191 			}
192 			goto again;
193 		}
194 
195 		/*
196 		 * Otherwise we can bump the count
197 		 */
198 		if (atomic_cmpset_int(&lkp->lk_count, count, count + 1)) {
199 			COUNT(td, 1);
200 			break;
201 		}
202 		goto again;
203 
204 	case LK_EXCLUSIVE:
205 		/*
206 		 * Exclusive lock critical path.
207 		 */
208 		if (count == 0) {
209 			if (atomic_cmpset_int(&lkp->lk_count, count,
210 					      LKC_EXCL | (count + 1))) {
211 				lkp->lk_lockholder = td;
212 				COUNT(td, 1);
213 				break;
214 			}
215 			goto again;
216 		}
217 
218 		/*
219 		 * Recursive lock if we already hold it exclusively.
220 		 */
221 		if (lkp->lk_lockholder == td) {
222 			KKASSERT(count & LKC_EXCL);
223 			if ((extflags & LK_CANRECURSE) == 0) {
224 				if (extflags & LK_NOWAIT) {
225 					error = EBUSY;
226 					break;
227 				}
228 				panic("lockmgr: locking against myself");
229 			}
230 			atomic_add_int(&lkp->lk_count, 1);
231 			COUNT(td, 1);
232 			break;
233 		}
234 
235 		/*
236 		 * We will block, handle LK_NOWAIT
237 		 */
238 		if (extflags & LK_NOWAIT) {
239 			error = EBUSY;
240 			break;
241 		}
242 
243 		/*
244 		 * Wait until we can obtain the exclusive lock.  EXREQ is
245 		 * automatically cleared when all current holders release
246 		 * so if we abort the operation we can safely leave it set.
247 		 * There might be other exclusive requesters.
248 		 */
249 		pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
250 		timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
251 
252 		tsleep_interlock(lkp, pflags);
253 		if (!atomic_cmpset_int(&lkp->lk_count, count,
254 				       count | LKC_EXREQ)) {
255 			goto again;
256 		}
257 		error = tsleep(lkp, pflags | PINTERLOCKED,
258 			       lkp->lk_wmesg, timo);
259 		if (error)
260 			break;
261 		if (extflags & LK_SLEEPFAIL) {
262 			error = ENOLCK;
263 			break;
264 		}
265 		goto again;
266 
267 	case LK_DOWNGRADE:
268 		/*
269 		 * Downgrade an exclusive lock into a shared lock.  All
270 		 * counts on a recursive exclusive lock become shared.
271 		 *
272 		 * This function always succeeds.
273 		 */
274 		if (lkp->lk_lockholder != td ||
275 		    (count & (LKC_EXCL|LKC_MASK)) != (LKC_EXCL|1)) {
276 			panic("lockmgr: not holding exclusive lock");
277 		}
278 
279 #ifdef DEBUG_LOCKS
280 		for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
281 			if (td->td_lockmgr_stack[i] == lkp &&
282 			    td->td_lockmgr_stack_id[i] > 0
283 			) {
284 				td->td_lockmgr_stack_id[i]--;
285 				break;
286 			}
287 		}
288 #endif
289 		/*
290 		 * NOTE! Must NULL-out lockholder before releasing LKC_EXCL.
291 		 */
292 		otd = lkp->lk_lockholder;
293 		lkp->lk_lockholder = NULL;
294 		if (atomic_cmpset_int(&lkp->lk_count, count,
295 				      count & ~(LKC_EXCL|LKC_SHREQ))) {
296 			if (count & LKC_SHREQ)
297 				wakeup(lkp);
298 			break;
299 		}
300 		lkp->lk_lockholder = otd;
301 		goto again;
302 
303 	case LK_EXCLUPGRADE:
304 		/*
305 		 * Upgrade from a single shared lock to an exclusive lock.
306 		 *
307 		 * If another process is ahead of us to get an upgrade,
308 		 * then we want to fail rather than have an intervening
309 		 * exclusive access.  The shared lock is released on
310 		 * failure.
311 		 */
312 		if (count & LKC_UPREQ) {
313 			flags = LK_RELEASE;
314 			error = EBUSY;
315 			goto again;
316 		}
317 		/* fall through into normal upgrade */
318 
319 	case LK_UPGRADE:
320 		/*
321 		 * Upgrade a shared lock to an exclusive one.  This can cause
322 		 * the lock to be temporarily released and stolen by other
323 		 * threads.  LK_SLEEPFAIL or LK_NOWAIT may be used to detect
324 		 * this case, or use LK_EXCLUPGRADE.
325 		 *
326 		 * If we return an error (even NOWAIT), the current lock will
327 		 * be released.
328 		 *
329 		 * Start with the critical path.
330 		 */
331 		if ((count & (LKC_UPREQ|LKC_EXCL|LKC_MASK)) == 1) {
332 			if (atomic_cmpset_int(&lkp->lk_count, count,
333 					      count | LKC_EXCL)) {
334 				lkp->lk_lockholder = td;
335 				break;
336 			}
337 			goto again;
338 		}
339 
340 		/*
341 		 * If we already hold the lock exclusively this operation
342 		 * succeeds and is a NOP.
343 		 */
344 		if (count & LKC_EXCL) {
345 			if (lkp->lk_lockholder == td)
346 				break;
347 			panic("lockmgr: upgrade unowned lock");
348 		}
349 		if ((count & LKC_MASK) == 0)
350 			panic("lockmgr: upgrade unowned lock");
351 
352 		/*
353 		 * We cannot upgrade without blocking at this point.
354 		 */
355 		if (extflags & LK_NOWAIT) {
356 			flags = LK_RELEASE;
357 			error = EBUSY;
358 			goto again;
359 		}
360 
361 		/*
362 		 * Release the shared lock and request the upgrade.
363 		 */
364 		pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
365 		timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
366 		tsleep_interlock(lkp, pflags);
367 		wflags = (count & LKC_UPREQ) ? LKC_EXREQ : LKC_UPREQ;
368 
369 		if (atomic_cmpset_int(&lkp->lk_count, count,
370 				      (count - 1) | wflags)) {
371 			COUNT(td, -1);
372 			error = tsleep(lkp, pflags | PINTERLOCKED,
373 				       lkp->lk_wmesg, timo);
374 			if (error)
375 				break;
376 			if (extflags & LK_SLEEPFAIL) {
377 				error = ENOLCK;
378 				break;
379 			}
380 
381 			/*
382 			 * Refactor to either LK_EXCLUSIVE or LK_WAITUPGRADE,
383 			 * depending on whether we were able to acquire the
384 			 * LKC_UPREQ bit.
385 			 */
386 			if (count & LKC_UPREQ)
387 				flags = LK_EXCLUSIVE;	/* someone else */
388 			else
389 				flags = LK_WAITUPGRADE;	/* we own the bit */
390 		}
391 		goto again;
392 
393 	case LK_WAITUPGRADE:
394 		/*
395 		 * We own the LKC_UPREQ bit, wait until we are granted the
396 		 * exclusive lock (LKC_UPGRANT is set).
397 		 *
398 		 * IF THE OPERATION FAILS (tsleep error tsleep+LK_SLEEPFAIL),
399 		 * we have to undo the upgrade request and clean up any lock
400 		 * that might have been granted via a race.
401 		 */
402 		if (count & LKC_UPGRANT) {
403 			if (atomic_cmpset_int(&lkp->lk_count, count,
404 					      count & ~LKC_UPGRANT)) {
405 				lkp->lk_lockholder = td;
406 				KKASSERT(count & LKC_EXCL);
407 				break;
408 			}
409 			/* retry */
410 		} else {
411 			pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
412 			timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
413 			tsleep_interlock(lkp, pflags);
414 			if (atomic_cmpset_int(&lkp->lk_count, count, count)) {
415 				error = tsleep(lkp, pflags | PINTERLOCKED,
416 					       lkp->lk_wmesg, timo);
417 				if (error) {
418 					undo_upreq(lkp);
419 					break;
420 				}
421 				if (extflags & LK_SLEEPFAIL) {
422 					error = ENOLCK;
423 					undo_upreq(lkp);
424 					break;
425 				}
426 			}
427 			/* retry */
428 		}
429 		goto again;
430 
431 	case LK_RELEASE:
432 		/*
433 		 * Release the currently held lock.  If releasing the current
434 		 * lock as part of an error return, error will ALREADY be
435 		 * non-zero.
436 		 *
437 		 * When releasing the last lock we automatically transition
438 		 * LKC_UPREQ to LKC_EXCL|1.
439 		 *
440 		 * WARNING! We cannot detect when there are multiple exclusive
441 		 *	    requests pending.  We clear EXREQ unconditionally
442 		 *	    on the 1->0 transition so it is possible for
443 		 *	    shared requests to race the next exclusive
444 		 *	    request.
445 		 *
446 		 * Always succeeds.
447 		 */
448 		if ((count & LKC_MASK) == 0)
449 			panic("lockmgr: LK_RELEASE: no lock held");
450 
451 		if (count & LKC_EXCL) {
452 			if (lkp->lk_lockholder != LK_KERNTHREAD &&
453 			    lkp->lk_lockholder != td) {
454 				panic("lockmgr: pid %d, not exlusive "
455 				      "lock holder thr %p/%p unlocking",
456 				    (td->td_proc ? td->td_proc->p_pid : -1),
457 				    td, lkp->lk_lockholder);
458 			}
459 			if ((count & (LKC_UPREQ|LKC_MASK)) == 1) {
460 				/*
461 				 * Last exclusive count is being released
462 				 */
463 				otd = lkp->lk_lockholder;
464 				lkp->lk_lockholder = NULL;
465 				if (!atomic_cmpset_int(&lkp->lk_count, count,
466 					      (count - 1) &
467 					   ~(LKC_EXCL|LKC_EXREQ|LKC_SHREQ))) {
468 					lkp->lk_lockholder = otd;
469 					goto again;
470 				}
471 				if (count & (LKC_EXREQ|LKC_SHREQ))
472 					wakeup(lkp);
473 				/* success */
474 			} else if ((count & (LKC_UPREQ|LKC_MASK)) ==
475 				   (LKC_UPREQ | 1)) {
476 				/*
477 				 * Last exclusive count is being released but
478 				 * an upgrade request is present, automatically
479 				 * grant an exclusive state to the owner of
480 				 * the upgrade request.
481 				 */
482 				otd = lkp->lk_lockholder;
483 				lkp->lk_lockholder = NULL;
484 				if (!atomic_cmpset_int(&lkp->lk_count, count,
485 						(count & ~LKC_UPREQ) |
486 						LKC_UPGRANT)) {
487 					lkp->lk_lockholder = otd;
488 				}
489 				wakeup(lkp);
490 				/* success */
491 			} else {
492 				otd = lkp->lk_lockholder;
493 				if (!atomic_cmpset_int(&lkp->lk_count, count,
494 						       count - 1)) {
495 					goto again;
496 				}
497 				/* success */
498 			}
499 			/* success */
500 			if (otd != LK_KERNTHREAD)
501 				COUNT(td, -1);
502 		} else {
503 			if ((count & (LKC_UPREQ|LKC_MASK)) == 1) {
504 				/*
505 				 * Last shared count is being released.
506 				 */
507 				if (!atomic_cmpset_int(&lkp->lk_count, count,
508 					      (count - 1) &
509 					       ~(LKC_EXREQ|LKC_SHREQ))) {
510 					goto again;
511 				}
512 				if (count & (LKC_EXREQ|LKC_SHREQ))
513 					wakeup(lkp);
514 				/* success */
515 			} else if ((count & (LKC_UPREQ|LKC_MASK)) ==
516 				   (LKC_UPREQ | 1)) {
517 				/*
518 				 * Last shared count is being released but
519 				 * an upgrade request is present, automatically
520 				 * grant an exclusive state to the owner of
521 				 * the upgrade request.
522 				 */
523 				if (!atomic_cmpset_int(&lkp->lk_count, count,
524 					      (count & ~LKC_UPREQ) |
525 					      LKC_EXCL | LKC_UPGRANT)) {
526 					goto again;
527 				}
528 				wakeup(lkp);
529 			} else {
530 				if (!atomic_cmpset_int(&lkp->lk_count, count,
531 						       count - 1)) {
532 					goto again;
533 				}
534 			}
535 			/* success */
536 			COUNT(td, -1);
537 		}
538 		break;
539 
540 	default:
541 		panic("lockmgr: unknown locktype request %d",
542 		    flags & LK_TYPE_MASK);
543 		/* NOTREACHED */
544 	}
545 	return (error);
546 }
547 
548 /*
549  * Undo an upgrade request
550  */
551 static
552 void
553 undo_upreq(struct lock *lkp)
554 {
555 	int count;
556 
557 	for (;;) {
558 		count = lkp->lk_count;
559 		cpu_ccfence();
560 		if (count & LKC_UPGRANT) {
561 			/*
562 			 * UPREQ was shifted to UPGRANT.  We own UPGRANT now,
563 			 * another thread might own UPREQ.  Clear UPGRANT
564 			 * and release the granted lock.
565 			 */
566 			if (atomic_cmpset_int(&lkp->lk_count, count,
567 					      count & ~LKC_UPGRANT)) {
568 				lockmgr(lkp, LK_RELEASE);
569 				break;
570 			}
571 		} else if (count & LKC_EXCL) {
572 			/*
573 			 * Clear the UPREQ we still own.  Nobody to wakeup
574 			 * here because there is an existing exclusive
575 			 * holder.
576 			 */
577 			KKASSERT(count & LKC_UPREQ);
578 			KKASSERT((count & LKC_MASK) > 0);
579 			if (atomic_cmpset_int(&lkp->lk_count, count,
580 					      count & ~LKC_UPREQ)) {
581 				wakeup(lkp);
582 				break;
583 			}
584 		} else if (count & LKC_EXREQ) {
585 			/*
586 			 * Clear the UPREQ we still own.  We cannot wakeup any
587 			 * shared waiters because there is an exclusive
588 			 * request pending.
589 			 */
590 			KKASSERT(count & LKC_UPREQ);
591 			KKASSERT((count & LKC_MASK) > 0);
592 			if (atomic_cmpset_int(&lkp->lk_count, count,
593 					      count & ~LKC_UPREQ)) {
594 				break;
595 			}
596 		} else {
597 			/*
598 			 * Clear the UPREQ we still own.  Wakeup any shared
599 			 * waiters.
600 			 */
601 			KKASSERT(count & LKC_UPREQ);
602 			KKASSERT((count & LKC_MASK) > 0);
603 			if (atomic_cmpset_int(&lkp->lk_count, count,
604 					      count &
605 					      ~(LKC_UPREQ | LKC_SHREQ))) {
606 				if (count & LKC_SHREQ)
607 					wakeup(lkp);
608 				break;
609 			}
610 		}
611 		/* retry */
612 	}
613 }
614 
615 void
616 lockmgr_kernproc(struct lock *lp)
617 {
618 	struct thread *td __debugvar = curthread;
619 
620 	if (lp->lk_lockholder != LK_KERNTHREAD) {
621 		KASSERT(lp->lk_lockholder == td,
622 		    ("lockmgr_kernproc: lock not owned by curthread %p", td));
623 		lp->lk_lockholder = LK_KERNTHREAD;
624 		COUNT(td, -1);
625 	}
626 }
627 
628 /*
629  * Initialize a lock; required before use.
630  */
631 void
632 lockinit(struct lock *lkp, const char *wmesg, int timo, int flags)
633 {
634 	lkp->lk_flags = (flags & LK_EXTFLG_MASK);
635 	lkp->lk_count = 0;
636 	lkp->lk_wmesg = wmesg;
637 	lkp->lk_timo = timo;
638 	lkp->lk_lockholder = LK_NOTHREAD;
639 }
640 
641 /*
642  * Reinitialize a lock that is being reused for a different purpose, but
643  * which may have pending (blocked) threads sitting on it.  The caller
644  * must already hold the interlock.
645  */
646 void
647 lockreinit(struct lock *lkp, const char *wmesg, int timo, int flags)
648 {
649 	lkp->lk_wmesg = wmesg;
650 	lkp->lk_timo = timo;
651 }
652 
653 /*
654  * De-initialize a lock.  The structure must no longer be used by anyone.
655  */
656 void
657 lockuninit(struct lock *lkp)
658 {
659 	KKASSERT((lkp->lk_count & (LKC_EXREQ|LKC_SHREQ|LKC_UPREQ)) == 0);
660 }
661 
662 /*
663  * Determine the status of a lock.
664  */
665 int
666 lockstatus(struct lock *lkp, struct thread *td)
667 {
668 	int lock_type = 0;
669 	int count;
670 
671 	count = lkp->lk_count;
672 	cpu_ccfence();
673 
674 	if (count & LKC_EXCL) {
675 		if (td == NULL || lkp->lk_lockholder == td)
676 			lock_type = LK_EXCLUSIVE;
677 		else
678 			lock_type = LK_EXCLOTHER;
679 	} else if (count & LKC_MASK) {
680 		lock_type = LK_SHARED;
681 	}
682 	return (lock_type);
683 }
684 
685 /*
686  * Return non-zero if the caller owns the lock shared or exclusive.
687  * We can only guess re: shared locks.
688  */
689 int
690 lockowned(struct lock *lkp)
691 {
692 	thread_t td = curthread;
693 	int count;
694 
695 	count = lkp->lk_count;
696 	cpu_ccfence();
697 
698 	if (count & LKC_EXCL)
699 		return(lkp->lk_lockholder == td);
700 	else
701 		return((count & LKC_MASK) != 0);
702 }
703 
704 /*
705  * Determine the number of holders of a lock.
706  *
707  * The non-blocking version can usually be used for assertions.
708  */
709 int
710 lockcount(struct lock *lkp)
711 {
712 	return(lkp->lk_count & LKC_MASK);
713 }
714 
715 int
716 lockcountnb(struct lock *lkp)
717 {
718 	return(lkp->lk_count & LKC_MASK);
719 }
720 
721 /*
722  * Print out information about state of a lock. Used by VOP_PRINT
723  * routines to display status about contained locks.
724  */
725 void
726 lockmgr_printinfo(struct lock *lkp)
727 {
728 	struct thread *td = lkp->lk_lockholder;
729 	struct proc *p;
730 	int count;
731 
732 	count = lkp->lk_count;
733 	cpu_ccfence();
734 
735 	if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD)
736 		p = td->td_proc;
737 	else
738 		p = NULL;
739 
740 	if (count & LKC_EXCL) {
741 		kprintf(" lock type %s: EXCLUS (count %08x) by td %p pid %d",
742 		    lkp->lk_wmesg, count, td,
743 		    p ? p->p_pid : -99);
744 	} else if (count & LKC_MASK) {
745 		kprintf(" lock type %s: SHARED (count %08x)",
746 		    lkp->lk_wmesg, count);
747 	} else {
748 		kprintf(" lock type %s: NOTHELD", lkp->lk_wmesg);
749 	}
750 	if (count & (LKC_EXREQ|LKC_SHREQ))
751 		kprintf(" with waiters\n");
752 	else
753 		kprintf("\n");
754 }
755 
756 void
757 lock_sysinit(struct lock_args *arg)
758 {
759 	lockinit(arg->la_lock, arg->la_desc, 0, arg->la_flags);
760 }
761