Lines Matching +full:non +full:- +full:exclusive

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
33 * Shared/exclusive locks. This implementation attempts to ensure
102 while (_giantcnt--) \
109 * Returns true if an exclusive lock is recursed. It assumes
110 * curthread currently has an exclusive lock.
112 #define sx_recursed(sx) ((sx)->sx_recurse != 0)
217 x = sx->sx_lock;
229 sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags);
239 ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
241 &sx->sx_lock));
257 lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
258 sx->sx_lock = SX_LOCK_UNLOCKED;
259 sx->sx_recurse = 0;
266 KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
267 KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
268 sx->sx_lock = SX_LOCK_DESTROYED;
269 lock_destroy(&sx->lock_object);
282 curthread, sx->lock_object.lo_name, file, line));
284 x = sx->sx_lock;
290 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) {
291 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
292 WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
296 curthread->td_sx_slocks++;
301 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
321 curthread, sx->lock_object.lo_name, file, line));
322 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
324 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
328 if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
334 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
336 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
358 curthread, sx->lock_object.lo_name, file, line));
359 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
366 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
370 if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) {
371 sx->sx_recurse++;
372 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
379 LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
381 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
403 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
406 WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
407 LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
418 * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
432 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
437 * Try to switch from one shared lock to an exclusive lock. We need
439 * we will wake up the exclusive waiters when we drop the lock.
447 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x,
453 LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
455 curthread->td_sx_slocks--;
456 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
471 * Downgrade an unrecursed exclusive lock into a single shared lock.
481 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
489 WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
492 * Try to switch from an exclusive lock with no shared waiters
494 * exclusive waiters, we don't need to lock the sleep queue so
502 x = sx->sx_lock;
504 atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
512 sleepq_lock(&sx->lock_object);
518 x = sx->sx_lock;
519 atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
522 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
524 sleepq_release(&sx->lock_object);
527 curthread->td_sx_slocks++;
528 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
549 (*extra_work)--;
557 * This function represents the so-called 'hard case' for sx_xlock
597 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
602 all_time -= lockstat_nsecs(&sx->lock_object);
617 /* If we already hold an exclusive lock, then recurse. */
619 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
620 ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
621 sx->lock_object.lo_name, file, line));
622 sx->sx_recurse++;
623 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
624 if (LOCK_LOG_TEST(&sx->lock_object, 0))
629 if (LOCK_LOG_TEST(&sx->lock_object, 0))
631 sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
642 lock_profile_obtain_lock_failed(&sx->lock_object, false, &contested,
649 THREAD_CONTENDS_ON_LOCK(&sx->lock_object);
653 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
665 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
681 if (LOCK_LOG_TEST(&sx->lock_object, 0))
686 sx->lock_object.lo_name);
705 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
709 extra_work--;
716 sx->lock_object.lo_name);
739 sleepq_lock(&sx->lock_object);
748 sleepq_release(&sx->lock_object);
764 sleepq_release(&sx->lock_object);
770 sleepq_release(&sx->lock_object);
777 * If an exclusive lock was released with both shared
778 * and exclusive waiters and a shared waiter hasn't
783 * as there are other exclusive waiters still. If we
789 if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid | setx))
791 sleepq_release(&sx->lock_object);
809 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
823 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
827 if (LOCK_LOG_TEST(&sx->lock_object, 0))
836 * Since we have been unable to acquire the exclusive
837 * lock and the exclusive waiters flag is set, we have
840 if (LOCK_LOG_TEST(&sx->lock_object, 0))
845 sleep_time -= lockstat_nsecs(&sx->lock_object);
847 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
855 THREAD_CONTENTION_DONE(&sx->lock_object);
857 sleepq_wait(&sx->lock_object, 0);
859 error = sleepq_wait_sig(&sx->lock_object, 0);
860 THREAD_CONTENDS_ON_LOCK(&sx->lock_object);
862 sleep_time += lockstat_nsecs(&sx->lock_object);
866 if (LOCK_LOG_TEST(&sx->lock_object, 0))
872 if (LOCK_LOG_TEST(&sx->lock_object, 0))
877 THREAD_CONTENTION_DONE(&sx->lock_object);
890 all_time += lockstat_nsecs(&sx->lock_object);
896 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
908 * This function represents the so-called 'hard case' for sx_xunlock
931 if ((--sx->sx_recurse) == 0)
932 atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
933 if (LOCK_LOG_TEST(&sx->lock_object, 0))
940 atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
943 if (LOCK_LOG_TEST(&sx->lock_object, 0))
946 sleepq_lock(&sx->lock_object);
954 * state of the exclusive waiters flag.
956 * starvation for the threads sleeping on the exclusive queue by giving
962 sleepq_sleepcnt(&sx->lock_object, SQ_EXCLUSIVE_QUEUE) != 0) {
966 atomic_store_rel_ptr(&sx->sx_lock, setx);
969 if (LOCK_LOG_TEST(&sx->lock_object, 0))
972 "exclusive");
974 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue);
975 sleepq_release(&sx->lock_object);
985 if (!fp && td->td_sx_slocks && (x & SX_LOCK_SHARED))
996 * If no other thread has an exclusive lock then try to bump up
1002 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, xp,
1004 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1005 CTR4(KTR_LOCK, "%s: %p succeed %p -> %p",
1008 td->td_sx_slocks++;
1047 all_time -= lockstat_nsecs(&sx->lock_object);
1067 lock_profile_obtain_lock_failed(&sx->lock_object, false, &contested,
1074 THREAD_CONTENDS_ON_LOCK(&sx->lock_object);
1078 * shared locks once there is an exclusive waiter.
1099 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1105 "lockname:\"%s\"", sx->lock_object.lo_name);
1125 sx->lock_object.lo_name);
1150 * Some other thread already has an exclusive lock, so
1153 sleepq_lock(&sx->lock_object);
1158 sleepq_release(&sx->lock_object);
1171 sleepq_release(&sx->lock_object);
1184 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
1187 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1196 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1201 sleep_time -= lockstat_nsecs(&sx->lock_object);
1203 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
1211 THREAD_CONTENTION_DONE(&sx->lock_object);
1213 sleepq_wait(&sx->lock_object, 0);
1215 error = sleepq_wait_sig(&sx->lock_object, 0);
1216 THREAD_CONTENDS_ON_LOCK(&sx->lock_object);
1218 sleep_time += lockstat_nsecs(&sx->lock_object);
1222 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1228 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1233 THREAD_CONTENTION_DONE(&sx->lock_object);
1239 all_time += lockstat_nsecs(&sx->lock_object);
1245 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
1268 curthread, sx->lock_object.lo_name, file, line));
1269 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1271 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
1280 lock_profile_obtain_lock_success(&sx->lock_object, false, 0, 0,
1283 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
1284 WITNESS_LOCK(&sx->lock_object, 0, file, line);
1303 if (atomic_fcmpset_rel_ptr(&sx->sx_lock, xp,
1304 *xp - SX_ONE_SHARER)) {
1305 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1307 "%s: %p succeeded %p -> %p",
1309 (void *)(*xp - SX_ONE_SHARER));
1310 td->td_sx_slocks--;
1332 sleepq_lock(&sx->lock_object);
1340 * Just wake up all the exclusive waiters.
1351 if (!atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, setx))
1353 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1355 "exclusive queue", __func__, sx);
1356 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue);
1357 td->td_sx_slocks--;
1360 sleepq_release(&sx->lock_object);
1371 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1374 WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
1375 LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
1383 lock_profile_release_lock(&sx->lock_object, false);
1401 * In the non-WITNESS case, sx_assert() can only detect that at least
1426 witness_assert(&sx->lock_object, what, file, line);
1429 * If some other thread has an exclusive lock or we
1433 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
1434 (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
1437 sx->lock_object.lo_name, slocked ? "share " : "",
1440 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
1444 sx->lock_object.lo_name, file,
1448 sx->lock_object.lo_name, file, line);
1457 sx->lock_object.lo_name, file, line);
1461 sx->lock_object.lo_name, file, line);
1464 sx->lock_object.lo_name, file, line);
1468 witness_assert(&sx->lock_object, what, file, line);
1477 sx->lock_object.lo_name, file, line);
1497 if (sx->sx_lock == SX_LOCK_UNLOCKED)
1499 else if (sx->sx_lock == SX_LOCK_DESTROYED) {
1502 } else if (sx->sx_lock & SX_LOCK_SHARED)
1503 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
1507 td->td_tid, td->td_proc->p_pid, td->td_name);
1509 db_printf(" recursed: %d\n", sx->sx_recurse);
1513 switch(sx->sx_lock &
1519 db_printf("exclusive\n");
1522 db_printf("exclusive and shared\n");
1532 * If the lock has an exclusive owner, return that in *ownerp.
1544 sx = td->td_wchan;
1545 if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
1546 sx->lock_object.lo_name != td->td_wmesg)
1550 db_printf("blocked on sx \"%s\" ", td->td_wmesg);
1552 if (sx->sx_lock & SX_LOCK_SHARED)
1554 (uintmax_t)SX_SHARERS(sx->sx_lock));