Lines Matching full:sx
38 * so should not be relied upon in combination with sx locks.
55 #include <sys/sx.h>
112 #define sx_recursed(sx) ((sx)->sx_recurse != 0)
125 .lc_name = "sx",
139 #define _sx_assert(sx, what, file, line)
146 static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
178 sx_assert((const struct sx *)lock, what);
184 struct sx *sx;
186 sx = (struct sx *)lock;
188 sx_slock(sx);
190 sx_xlock(sx);
196 struct sx *sx;
198 sx = (struct sx *)lock;
199 sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
200 if (sx_xlocked(sx)) {
201 sx_xunlock(sx);
204 sx_sunlock(sx);
213 const struct sx *sx;
216 sx = (const struct sx *)lock;
217 x = sx->sx_lock;
233 sx_init_flags(struct sx *sx, const char *description, int opts)
239 ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
241 &sx->sx_lock));
257 lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
258 sx->sx_lock = SX_LOCK_UNLOCKED;
259 sx->sx_recurse = 0;
263 sx_destroy(struct sx *sx)
266 KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
267 KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
268 sx->sx_lock = SX_LOCK_DESTROYED;
269 lock_destroy(&sx->lock_object);
273 sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
281 ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
282 curthread, sx->lock_object.lo_name, file, line));
284 x = sx->sx_lock;
287 ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
290 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) {
291 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
292 WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
294 sx, 0, 0, file, line, LOCKSTAT_READER);
301 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
306 sx_try_slock_(struct sx *sx, const char *file, int line)
309 return (sx_try_slock_int(sx LOCK_FILE_LINE_ARG));
313 _sx_xlock(struct sx *sx, int opts, const char *file, int line)
320 ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
321 curthread, sx->lock_object.lo_name, file, line));
322 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
323 ("sx_xlock() of destroyed sx @ %s:%d", file, line));
324 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
328 if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
329 error = _sx_xlock_hard(sx, x, opts LOCK_FILE_LINE_ARG);
331 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
334 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
336 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
344 sx_try_xlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
357 ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
358 curthread, sx->lock_object.lo_name, file, line));
359 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
360 ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
366 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
370 if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) {
371 sx->sx_recurse++;
372 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
379 LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
381 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
385 sx, 0, 0, file, line, LOCKSTAT_WRITER);
393 sx_try_xlock_(struct sx *sx, const char *file, int line)
396 return (sx_try_xlock_int(sx LOCK_FILE_LINE_ARG));
400 _sx_xunlock(struct sx *sx, const char *file, int line)
403 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
404 ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
405 _sx_assert(sx, SA_XLOCKED, file, line);
406 WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
407 LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
410 _sx_xunlock_hard(sx, (uintptr_t)curthread, file, line);
412 __sx_xunlock(sx, curthread, file, line);
423 sx_try_upgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
432 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
433 ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
434 _sx_assert(sx, SA_SLOCKED, file, line);
442 x = SX_READ_VALUE(sx);
447 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x,
453 LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
456 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
458 LOCKSTAT_RECORD0(sx__upgrade, sx);
464 sx_try_upgrade_(struct sx *sx, const char *file, int line)
467 return (sx_try_upgrade_int(sx LOCK_FILE_LINE_ARG));
474 sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
481 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
482 ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
483 _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
485 if (sx_recursed(sx))
489 WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
502 x = sx->sx_lock;
504 atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
512 sleepq_lock(&sx->lock_object);
518 x = sx->sx_lock;
519 atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
522 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
524 sleepq_release(&sx->lock_object);
528 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
529 LOCKSTAT_RECORD0(sx__downgrade, sx);
533 sx_downgrade_(struct sx *sx, const char *file, int line)
536 sx_downgrade_int(sx LOCK_FILE_LINE_ARG);
560 * accessible from at least sx.h.
563 _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
597 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
602 all_time -= lockstat_nsecs(&sx->lock_object);
615 x = SX_READ_VALUE(sx);
619 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
620 ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
621 sx->lock_object.lo_name, file, line));
622 sx->sx_recurse++;
623 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
624 if (LOCK_LOG_TEST(&sx->lock_object, 0))
625 CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
629 if (LOCK_LOG_TEST(&sx->lock_object, 0))
631 sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
642 lock_profile_obtain_lock_failed(&sx->lock_object, false, &contested,
649 THREAD_CONTENDS_ON_LOCK(&sx->lock_object);
653 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
665 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
681 if (LOCK_LOG_TEST(&sx->lock_object, 0))
683 __func__, sx, owner);
686 sx->lock_object.lo_name);
689 x = SX_READ_VALUE(sx);
705 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
716 sx->lock_object.lo_name);
720 x = SX_READ_VALUE(sx);
739 sleepq_lock(&sx->lock_object);
740 x = SX_READ_VALUE(sx);
748 sleepq_release(&sx->lock_object);
764 sleepq_release(&sx->lock_object);
770 sleepq_release(&sx->lock_object);
789 if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid | setx))
791 sleepq_release(&sx->lock_object);
793 __func__, sx);
809 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
823 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
827 if (LOCK_LOG_TEST(&sx->lock_object, 0))
829 __func__, sx);
840 if (LOCK_LOG_TEST(&sx->lock_object, 0))
842 __func__, sx);
845 sleep_time -= lockstat_nsecs(&sx->lock_object);
847 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
855 THREAD_CONTENTION_DONE(&sx->lock_object);
857 sleepq_wait(&sx->lock_object, 0);
859 error = sleepq_wait_sig(&sx->lock_object, 0);
860 THREAD_CONTENDS_ON_LOCK(&sx->lock_object);
862 sleep_time += lockstat_nsecs(&sx->lock_object);
866 if (LOCK_LOG_TEST(&sx->lock_object, 0))
869 __func__, sx);
872 if (LOCK_LOG_TEST(&sx->lock_object, 0))
874 __func__, sx);
875 x = SX_READ_VALUE(sx);
877 THREAD_CONTENTION_DONE(&sx->lock_object);
890 all_time += lockstat_nsecs(&sx->lock_object);
892 LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
896 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
902 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
911 * accessible from at least sx.h.
914 _sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
925 x = SX_READ_VALUE(sx);
931 if ((--sx->sx_recurse) == 0)
932 atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
933 if (LOCK_LOG_TEST(&sx->lock_object, 0))
934 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
938 LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER);
940 atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
943 if (LOCK_LOG_TEST(&sx->lock_object, 0))
944 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
946 sleepq_lock(&sx->lock_object);
947 x = SX_READ_VALUE(sx);
962 sleepq_sleepcnt(&sx->lock_object, SQ_EXCLUSIVE_QUEUE) != 0) {
966 atomic_store_rel_ptr(&sx->sx_lock, setx);
969 if (LOCK_LOG_TEST(&sx->lock_object, 0))
971 __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
974 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue);
975 sleepq_release(&sx->lock_object);
991 __sx_slock_try(struct sx *sx, struct thread *td, uintptr_t *xp, bool fp
1002 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, xp,
1004 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1006 __func__, sx, (void *)*xp,
1016 _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
1044 if (__sx_slock_try(sx, td, &x, false LOCK_FILE_LINE_ARG))
1047 all_time -= lockstat_nsecs(&sx->lock_object);
1067 lock_profile_obtain_lock_failed(&sx->lock_object, false, &contested,
1074 THREAD_CONTENDS_ON_LOCK(&sx->lock_object);
1081 if (__sx_slock_try(sx, td, &x, false LOCK_FILE_LINE_ARG))
1099 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1102 __func__, sx, owner);
1105 "lockname:\"%s\"", sx->lock_object.lo_name);
1108 x = SX_READ_VALUE(sx);
1119 x = SX_READ_VALUE(sx);
1125 sx->lock_object.lo_name);
1129 x = SX_READ_VALUE(sx);
1153 sleepq_lock(&sx->lock_object);
1154 x = SX_READ_VALUE(sx);
1158 sleepq_release(&sx->lock_object);
1171 sleepq_release(&sx->lock_object);
1172 x = SX_READ_VALUE(sx);
1184 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
1187 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1189 __func__, sx);
1196 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1198 __func__, sx);
1201 sleep_time -= lockstat_nsecs(&sx->lock_object);
1203 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
1211 THREAD_CONTENTION_DONE(&sx->lock_object);
1213 sleepq_wait(&sx->lock_object, 0);
1215 error = sleepq_wait_sig(&sx->lock_object, 0);
1216 THREAD_CONTENDS_ON_LOCK(&sx->lock_object);
1218 sleep_time += lockstat_nsecs(&sx->lock_object);
1222 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1225 __func__, sx);
1228 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1230 __func__, sx);
1231 x = SX_READ_VALUE(sx);
1233 THREAD_CONTENTION_DONE(&sx->lock_object);
1239 all_time += lockstat_nsecs(&sx->lock_object);
1241 LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
1245 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
1251 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
1259 _sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF)
1267 ("sx_slock() by idle thread %p on sx %s @ %s:%d",
1268 curthread, sx->lock_object.lo_name, file, line));
1269 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1270 ("sx_slock() of destroyed sx @ %s:%d", file, line));
1271 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
1275 x = SX_READ_VALUE(sx);
1277 !__sx_slock_try(sx, td, &x, true LOCK_FILE_LINE_ARG)))
1278 error = _sx_slock_hard(sx, opts, x LOCK_FILE_LINE_ARG);
1280 lock_profile_obtain_lock_success(&sx->lock_object, false, 0, 0,
1283 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
1284 WITNESS_LOCK(&sx->lock_object, 0, file, line);
1291 _sx_slock(struct sx *sx, int opts, const char *file, int line)
1294 return (_sx_slock_int(sx, opts LOCK_FILE_LINE_ARG));
1298 _sx_sunlock_try(struct sx *sx, struct thread *td, uintptr_t *xp)
1303 if (atomic_fcmpset_rel_ptr(&sx->sx_lock, xp,
1305 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1308 __func__, sx, (void *)*xp,
1321 _sx_sunlock_hard(struct sx *sx, struct thread *td, uintptr_t x
1329 if (_sx_sunlock_try(sx, td, &x))
1332 sleepq_lock(&sx->lock_object);
1333 x = SX_READ_VALUE(sx);
1335 if (_sx_sunlock_try(sx, td, &x))
1351 if (!atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, setx))
1353 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1355 "exclusive queue", __func__, sx);
1356 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue);
1360 sleepq_release(&sx->lock_object);
1362 LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER);
1366 _sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
1371 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1372 ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
1373 _sx_assert(sx, SA_SLOCKED, file, line);
1374 WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
1375 LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
1378 x = SX_READ_VALUE(sx);
1380 !_sx_sunlock_try(sx, td, &x)))
1381 _sx_sunlock_hard(sx, td, x LOCK_FILE_LINE_ARG);
1383 lock_profile_release_lock(&sx->lock_object, false);
1389 _sx_sunlock(struct sx *sx, const char *file, int line)
1392 _sx_sunlock_int(sx LOCK_FILE_LINE_ARG);
1406 _sx_assert(const struct sx *sx, int what, const char *file, int line)
1426 witness_assert(&sx->lock_object, what, file, line);
1433 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
1434 (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
1435 sx_xholder(sx) != curthread)))
1437 sx->lock_object.lo_name, slocked ? "share " : "",
1440 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
1441 if (sx_recursed(sx)) {
1444 sx->lock_object.lo_name, file,
1448 sx->lock_object.lo_name, file, line);
1455 if (sx_xholder(sx) != curthread)
1457 sx->lock_object.lo_name, file, line);
1458 if (sx_recursed(sx)) {
1461 sx->lock_object.lo_name, file, line);
1464 sx->lock_object.lo_name, file, line);
1468 witness_assert(&sx->lock_object, what, file, line);
1475 if (sx_xholder(sx) == curthread)
1477 sx->lock_object.lo_name, file, line);
1481 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
1492 const struct sx *sx;
1494 sx = (const struct sx *)lock;
1497 if (sx->sx_lock == SX_LOCK_UNLOCKED)
1499 else if (sx->sx_lock == SX_LOCK_DESTROYED) {
1502 } else if (sx->sx_lock & SX_LOCK_SHARED)
1503 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
1505 td = sx_xholder(sx);
1508 if (sx_recursed(sx))
1509 db_printf(" recursed: %d\n", sx->sx_recurse);
1513 switch(sx->sx_lock &
1531 * blocked on an sx lock. If so, output some details and return true.
1537 const struct sx *sx;
1540 * Check to see if this thread is blocked on an sx lock.
1544 sx = td->td_wchan;
1545 if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
1546 sx->lock_object.lo_name != td->td_wmesg)
1549 /* We think we have an sx lock, so output some details. */
1550 db_printf("blocked on sx \"%s\" ", td->td_wmesg);
1551 *ownerp = sx_xholder(sx);
1552 if (sx->sx_lock & SX_LOCK_SHARED)
1554 (uintmax_t)SX_SHARERS(sx->sx_lock));