Lines Matching defs:lk
75 #define _lockmgr_assert(lk, what, file, line)
82 #define STACK_PRINT(lk)
83 #define STACK_SAVE(lk)
84 #define STACK_ZERO(lk)
86 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
87 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
88 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
91 #define LOCK_LOG2(lk, string, arg1, arg2) \
92 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
94 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
95 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
144 #define lockmgr_xlocked(lk) lockmgr_xlocked_v(lockmgr_read_value(lk))
183 static __always_inline bool lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
185 static __always_inline bool lockmgr_sunlock_try(struct lock *lk,
200 lockmgr_note_shared_acquire(struct lock *lk, int contested,
204 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
206 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
207 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
210 STACK_SAVE(lk);
214 lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
217 WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
218 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
224 lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
228 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
230 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
231 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
234 STACK_SAVE(lk);
238 lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
241 if (!lockmgr_disowned(lk)) {
242 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
245 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
250 lockmgr_xholder(const struct lock *lk)
254 x = lockmgr_read_value(lk);
265 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
277 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
283 if (lk->lk_exslpfail < USHRT_MAX)
284 lk->lk_exslpfail++;
287 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
290 sleepq_set_timeout(&lk->lock_object, timo);
296 error = sleepq_timedwait_sig(&lk->lock_object, pri);
298 error = sleepq_timedwait(&lk->lock_object, pri);
300 error = sleepq_wait_sig(&lk->lock_object, pri);
302 sleepq_wait(&lk->lock_object, pri);
311 wakeupshlk(struct lock *lk, const char *file, int line)
318 x = lockmgr_read_value(lk);
319 if (lockmgr_sunlock_try(lk, &x))
326 sleepq_lock(&lk->lock_object);
327 orig_x = lockmgr_read_value(lk);
345 realexslp = sleepq_sleepcnt(&lk->lock_object,
348 if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
349 lk->lk_exslpfail = 0;
353 lk->lk_exslpfail = 0;
354 LOCK_LOG2(lk,
356 __func__, lk);
357 LOCK_LOG2(lk,
359 __func__, lk);
360 sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0,
371 lk->lk_exslpfail = 0;
375 if (lockmgr_sunlock_try(lk, &orig_x)) {
376 sleepq_release(&lk->lock_object);
381 if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) {
385 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
386 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
388 sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
389 sleepq_release(&lk->lock_object);
393 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
427 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
432 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
434 &lk->lk_lock));
453 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
454 lk->lk_lock = LK_UNLOCKED;
455 lk->lk_recurse = 0;
456 lk->lk_exslpfail = 0;
457 lk->lk_timo = timo;
458 lk->lk_pri = pri;
459 STACK_ZERO(lk);
467 lockallowshare(struct lock *lk)
470 lockmgr_assert(lk, KA_XLOCKED);
471 lk->lock_object.lo_flags &= ~LK_NOSHARE;
475 lockdisableshare(struct lock *lk)
478 lockmgr_assert(lk, KA_XLOCKED);
479 lk->lock_object.lo_flags |= LK_NOSHARE;
483 lockallowrecurse(struct lock *lk)
486 lockmgr_assert(lk, KA_XLOCKED);
487 lk->lock_object.lo_flags |= LO_RECURSABLE;
491 lockdisablerecurse(struct lock *lk)
494 lockmgr_assert(lk, KA_XLOCKED);
495 lk->lock_object.lo_flags &= ~LO_RECURSABLE;
499 lockdestroy(struct lock *lk)
502 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
503 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
504 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
505 lock_destroy(&lk->lock_object);
509 lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
520 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
529 lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
534 if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
545 lockmgr_slock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp,
567 x = lockmgr_read_value(lk);
577 lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
600 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
602 x = lockmgr_read_value(lk);
611 LOCK_LOG2(lk,
613 __func__, lk);
619 if (lockmgr_slock_try(lk, &x, flags, false))
622 lock_profile_obtain_lock_failed(&lk->lock_object, false,
626 if (lockmgr_slock_adaptive(&lda, lk, &x, flags))
639 LOCK_LOG2(lk, "%s: %p fails the try operation",
640 __func__, lk);
649 sleepq_lock(&lk->lock_object);
650 x = lockmgr_read_value(lk);
658 sleepq_release(&lk->lock_object);
667 if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
671 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
672 __func__, lk);
676 iwmesg = lk->lock_object.lo_name;
677 ipri = lk->lk_pri;
678 itimo = lk->lk_timo;
691 sleep_time -= lockstat_nsecs(&lk->lock_object);
693 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
696 sleep_time += lockstat_nsecs(&lk->lock_object);
700 LOCK_LOG3(lk,
702 __func__, lk, error);
705 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
706 __func__, lk);
707 x = lockmgr_read_value(lk);
712 LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
717 lockmgr_note_shared_acquire(lk, contested, waittime,
720 lockmgr_note_shared_acquire(lk, 0, 0, file, line,
731 lockmgr_xlock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp)
752 x = lockmgr_read_value(lk);
762 lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
786 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
794 if (lockmgr_xlocked(lk)) {
796 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
802 LOCK_LOG2(lk,
804 __func__, lk);
812 STACK_PRINT(lk);
814 "@ %s:%d\n", __func__, lk, file, line);
816 atomic_set_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
817 lk->lk_recurse++;
818 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
819 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
820 lk->lk_recurse, file, line);
821 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
833 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, tid))
838 lock_profile_obtain_lock_failed(&lk->lock_object, false,
842 if (lockmgr_xlock_adaptive(&lda, lk, &x))
854 LOCK_LOG2(lk, "%s: %p fails the try operation",
855 __func__, lk);
864 sleepq_lock(&lk->lock_object);
865 x = lockmgr_read_value(lk);
873 sleepq_release(&lk->lock_object);
889 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
891 sleepq_release(&lk->lock_object);
892 LOCK_LOG2(lk,
894 __func__, lk);
905 if (!atomic_fcmpset_ptr(&lk->lk_lock, &x,
909 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
910 __func__, lk);
914 iwmesg = lk->lock_object.lo_name;
915 ipri = lk->lk_pri;
916 itimo = lk->lk_timo;
929 sleep_time -= lockstat_nsecs(&lk->lock_object);
931 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
934 sleep_time += lockstat_nsecs(&lk->lock_object);
938 LOCK_LOG3(lk,
940 __func__, lk, error);
943 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
944 __func__, lk);
945 x = lockmgr_read_value(lk);
950 LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
955 lockmgr_note_exclusive_acquire(lk, contested, waittime,
958 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
969 lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
981 _lockmgr_assert(lk, KA_SLOCKED, file, line);
984 v = lockmgr_read_value(lk);
988 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
989 __func__, lk);
993 if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &v,
995 lockmgr_note_shared_release(lk, file, line);
1009 if (atomic_fcmpset_ptr(&lk->lk_lock, &v, setv)) {
1010 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
1012 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
1014 LOCKSTAT_RECORD0(lockmgr__upgrade, lk);
1021 error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
1029 lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk,
1045 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1047 if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
1049 x = lockmgr_read_value(lk);
1050 if (lockmgr_slock_try(lk, &x, flags, true)) {
1051 lockmgr_note_shared_acquire(lk, 0, 0,
1055 return (lockmgr_slock_hard(lk, flags, ilk, file, line,
1061 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1065 if (lockmgr_read_value(lk) == LK_UNLOCKED &&
1066 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1067 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1071 return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
1077 return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
1088 return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
1094 lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1098 wakeupshlk(lk, file, line);
1104 lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1129 LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
1130 lk->lk_recurse--;
1131 if (lk->lk_recurse == 0)
1132 atomic_clear_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
1136 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,
1139 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
1142 sleepq_lock(&lk->lock_object);
1143 x = lockmgr_read_value(lk);
1162 realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
1164 if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
1165 lk->lk_exslpfail = 0;
1169 lk->lk_exslpfail = 0;
1170 LOCK_LOG2(lk,
1172 __func__, lk);
1173 LOCK_LOG2(lk,
1175 __func__, lk);
1176 sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0,
1187 lk->lk_exslpfail = 0;
1191 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
1192 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1194 atomic_store_rel_ptr(&lk->lk_lock, v);
1195 sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
1196 sleepq_release(&lk->lock_object);
1215 lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line)
1221 MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0);
1224 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1226 x = lockmgr_read_value(lk);
1227 if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) {
1228 lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags);
1232 return (lockmgr_slock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1236 lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line)
1244 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1247 if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1248 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1253 return (lockmgr_xlock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1257 lockmgr_unlock(struct lock *lk)
1266 _lockmgr_assert(lk, KA_LOCKED, file, line);
1267 x = lockmgr_read_value(lk);
1269 lockmgr_note_shared_release(lk, file, line);
1270 if (lockmgr_sunlock_try(lk, &x)) {
1271 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
1273 return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1277 lockmgr_note_exclusive_release(lk, file, line);
1278 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1279 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,LOCKSTAT_WRITER);
1281 return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1288 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
1309 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
1310 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
1311 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
1329 lk->lock_object.lo_name, file, line));
1333 if (lk->lock_object.lo_flags & LK_NOSHARE) {
1341 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
1351 return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
1355 return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
1358 return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1361 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1362 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
1367 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1379 x = lockmgr_read_value(lk);
1382 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1387 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
1388 LOCKSTAT_RECORD0(lockmgr__downgrade, lk);
1391 _lockmgr_assert(lk, KA_LOCKED, file, line);
1392 x = lockmgr_read_value(lk);
1395 lockmgr_note_shared_release(lk, file, line);
1396 return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1398 lockmgr_note_exclusive_release(lk, file, line);
1399 return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
1404 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1412 if (lockmgr_xlocked(lk)) {
1420 if (lk->lk_lock == LK_UNLOCKED &&
1421 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1427 lock_profile_obtain_lock_failed(&lk->lock_object, false,
1435 LOCK_LOG2(lk, "%s: %p fails the try operation",
1436 __func__, lk);
1445 sleepq_lock(&lk->lock_object);
1446 x = lockmgr_read_value(lk);
1453 sleepq_release(&lk->lock_object);
1487 lk->lk_exslpfail = 0;
1493 sleepq_sleepcnt(&lk->lock_object,
1495 if (lk->lk_exslpfail >= realexslp) {
1496 lk->lk_exslpfail = 0;
1500 LOCK_LOG2(lk,
1502 __func__, lk);
1503 LOCK_LOG2(lk,
1505 __func__, lk);
1507 &lk->lock_object,
1512 lk->lk_exslpfail = 0;
1514 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1515 sleepq_release(&lk->lock_object);
1518 LOCK_LOG3(lk,
1520 __func__, lk, queue == SQ_SHARED_QUEUE ?
1522 sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0,
1532 for (v = lk->lk_lock;
1534 v = lk->lk_lock)
1544 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1546 sleepq_release(&lk->lock_object);
1549 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1550 __func__, lk);
1563 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1565 sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1567 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1568 __func__, lk);
1572 lock_profile_obtain_lock_success(&lk->lock_object,
1574 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1575 lk->lk_recurse, file, line);
1576 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1579 STACK_SAVE(lk);
1595 _lockmgr_disown(struct lock *lk, const char *file, int line)
1603 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1608 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1615 if (LK_HOLDER(lk->lk_lock) != tid)
1617 lock_profile_release_lock(&lk->lock_object, false);
1618 LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER);
1619 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1620 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1622 STACK_SAVE(lk);
1628 x = lockmgr_read_value(lk);
1631 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1639 lockmgr_printinfo(const struct lock *lk)
1644 if (lk->lk_lock == LK_UNLOCKED)
1645 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1646 else if (lk->lk_lock & LK_SHARE)
1648 lk->lock_object.lo_name,
1649 (uintmax_t)LK_SHARERS(lk->lk_lock));
1651 td = lockmgr_xholder(lk);
1654 lk->lock_object.lo_name);
1657 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1662 x = lk->lk_lock;
1670 STACK_PRINT(lk);
1674 lockstatus(const struct lock *lk)
1680 x = lockmgr_read_value(lk);
1704 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1726 if (slocked || (lk->lk_lock & LK_SHARE)) {
1727 witness_assert(&lk->lock_object, what, file, line);
1731 if (lk->lk_lock == LK_UNLOCKED ||
1732 ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1733 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1735 lk->lock_object.lo_name, slocked ? "share" : "",
1738 if ((lk->lk_lock & LK_SHARE) == 0) {
1739 if (lockmgr_recursed(lk)) {
1742 lk->lock_object.lo_name, file,
1746 lk->lock_object.lo_name, file, line);
1752 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1754 lk->lock_object.lo_name, file, line);
1755 if (lockmgr_recursed(lk)) {
1758 lk->lock_object.lo_name, file, line);
1761 lk->lock_object.lo_name, file, line);
1764 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1766 lk->lock_object.lo_name, file, line);
1779 const struct lock *lk;
1781 lk = td->td_wchan;
1783 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1785 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1786 if (lk->lk_lock & LK_SHARE)
1788 (uintmax_t)LK_SHARERS(lk->lk_lock));
1791 *ownerp = lockmgr_xholder(lk);
1800 const struct lock *lk;
1802 lk = (const struct lock *)lock;
1805 if (lk->lk_lock == LK_UNLOCKED)
1807 else if (lk->lk_lock & LK_SHARE)
1808 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1810 td = lockmgr_xholder(lk);
1817 if (lockmgr_recursed(lk))
1818 db_printf(" recursed: %d\n", lk->lk_recurse);
1821 switch (lk->lk_lock & LK_ALL_WAITERS) {
1835 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)