xref: /llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mutex.cpp (revision 6c765069112e31ec66cd4387f2a39f70583e626b)
16a4054efSDmitry Vyukov //===-- sanitizer_mutex.cpp -----------------------------------------------===//
26a4054efSDmitry Vyukov //
36a4054efSDmitry Vyukov // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
46a4054efSDmitry Vyukov // See https://llvm.org/LICENSE.txt for license information.
56a4054efSDmitry Vyukov // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
66a4054efSDmitry Vyukov //
76a4054efSDmitry Vyukov //===----------------------------------------------------------------------===//
86a4054efSDmitry Vyukov //
96a4054efSDmitry Vyukov // This file is shared between AddressSanitizer and ThreadSanitizer
106a4054efSDmitry Vyukov // run-time libraries.
116a4054efSDmitry Vyukov //===----------------------------------------------------------------------===//
126a4054efSDmitry Vyukov 
136a4054efSDmitry Vyukov #include "sanitizer_mutex.h"
146a4054efSDmitry Vyukov 
156a4054efSDmitry Vyukov #include "sanitizer_common.h"
166a4054efSDmitry Vyukov 
176a4054efSDmitry Vyukov namespace __sanitizer {
186a4054efSDmitry Vyukov 
LockSlow()19927efd0bSDmitry Vyukov void StaticSpinMutex::LockSlow() {
20927efd0bSDmitry Vyukov   for (int i = 0;; i++) {
21927efd0bSDmitry Vyukov     if (i < 100)
22927efd0bSDmitry Vyukov       proc_yield(1);
23927efd0bSDmitry Vyukov     else
24927efd0bSDmitry Vyukov       internal_sched_yield();
25927efd0bSDmitry Vyukov     if (atomic_load(&state_, memory_order_relaxed) == 0 &&
26927efd0bSDmitry Vyukov         atomic_exchange(&state_, 1, memory_order_acquire) == 0)
27927efd0bSDmitry Vyukov       return;
28927efd0bSDmitry Vyukov   }
29927efd0bSDmitry Vyukov }
30927efd0bSDmitry Vyukov 
Wait()316a4054efSDmitry Vyukov void Semaphore::Wait() {
326a4054efSDmitry Vyukov   u32 count = atomic_load(&state_, memory_order_relaxed);
336a4054efSDmitry Vyukov   for (;;) {
346a4054efSDmitry Vyukov     if (count == 0) {
356a4054efSDmitry Vyukov       FutexWait(&state_, 0);
366a4054efSDmitry Vyukov       count = atomic_load(&state_, memory_order_relaxed);
376a4054efSDmitry Vyukov       continue;
386a4054efSDmitry Vyukov     }
396a4054efSDmitry Vyukov     if (atomic_compare_exchange_weak(&state_, &count, count - 1,
406a4054efSDmitry Vyukov                                      memory_order_acquire))
416a4054efSDmitry Vyukov       break;
426a4054efSDmitry Vyukov   }
436a4054efSDmitry Vyukov }
446a4054efSDmitry Vyukov 
Post(u32 count)456a4054efSDmitry Vyukov void Semaphore::Post(u32 count) {
466a4054efSDmitry Vyukov   CHECK_NE(count, 0);
476a4054efSDmitry Vyukov   atomic_fetch_add(&state_, count, memory_order_release);
486a4054efSDmitry Vyukov   FutexWake(&state_, count);
496a4054efSDmitry Vyukov }
506a4054efSDmitry Vyukov 
5102243993SDmitry Vyukov #if SANITIZER_CHECK_DEADLOCKS
5202243993SDmitry Vyukov // An empty mutex meta table, it effectively disables deadlock detection.
5302243993SDmitry Vyukov // Each tool can override the table to define own mutex hierarchy and
5402243993SDmitry Vyukov // enable deadlock detection.
5502243993SDmitry Vyukov // The table defines a static mutex type hierarchy (what mutex types can be locked
5602243993SDmitry Vyukov // under what mutex types). This table is checked to be acyclic and then
5702243993SDmitry Vyukov // actual mutex lock/unlock operations are checked to adhere to this hierarchy.
5802243993SDmitry Vyukov // The checking happens on mutex types rather than on individual mutex instances
5902243993SDmitry Vyukov // because doing it on mutex instances will both significantly complicate
6002243993SDmitry Vyukov // the implementation, worsen performance and memory overhead and is mostly
6102243993SDmitry Vyukov // unnecessary (we almost never lock multiple mutexes of the same type recursively).
6202243993SDmitry Vyukov static constexpr int kMutexTypeMax = 20;
6302243993SDmitry Vyukov SANITIZER_WEAK_ATTRIBUTE MutexMeta mutex_meta[kMutexTypeMax] = {};
PrintMutexPC(uptr pc)6402243993SDmitry Vyukov SANITIZER_WEAK_ATTRIBUTE void PrintMutexPC(uptr pc) {}
6502243993SDmitry Vyukov static StaticSpinMutex mutex_meta_mtx;
6602243993SDmitry Vyukov static int mutex_type_count = -1;
6702243993SDmitry Vyukov // Adjacency matrix of what mutexes can be locked under what mutexes.
6802243993SDmitry Vyukov static bool mutex_can_lock[kMutexTypeMax][kMutexTypeMax];
6902243993SDmitry Vyukov // Mutex types with MutexMulti mark.
7002243993SDmitry Vyukov static bool mutex_multi[kMutexTypeMax];
7102243993SDmitry Vyukov 
DebugMutexInit()7202243993SDmitry Vyukov void DebugMutexInit() {
7302243993SDmitry Vyukov   // Build adjacency matrix.
7402243993SDmitry Vyukov   bool leaf[kMutexTypeMax];
7502243993SDmitry Vyukov   internal_memset(&leaf, 0, sizeof(leaf));
76170a8c12SDmitry Vyukov   int cnt[kMutexTypeMax];
7702243993SDmitry Vyukov   internal_memset(&cnt, 0, sizeof(cnt));
7802243993SDmitry Vyukov   for (int t = 0; t < kMutexTypeMax; t++) {
7902243993SDmitry Vyukov     mutex_type_count = t;
8002243993SDmitry Vyukov     if (!mutex_meta[t].name)
8102243993SDmitry Vyukov       break;
8202243993SDmitry Vyukov     CHECK_EQ(t, mutex_meta[t].type);
8302243993SDmitry Vyukov     for (uptr j = 0; j < ARRAY_SIZE(mutex_meta[t].can_lock); j++) {
8402243993SDmitry Vyukov       MutexType z = mutex_meta[t].can_lock[j];
8502243993SDmitry Vyukov       if (z == MutexInvalid)
8602243993SDmitry Vyukov         break;
8702243993SDmitry Vyukov       if (z == MutexLeaf) {
8802243993SDmitry Vyukov         CHECK(!leaf[t]);
8902243993SDmitry Vyukov         leaf[t] = true;
9002243993SDmitry Vyukov         continue;
9102243993SDmitry Vyukov       }
9202243993SDmitry Vyukov       if (z == MutexMulti) {
9302243993SDmitry Vyukov         mutex_multi[t] = true;
9402243993SDmitry Vyukov         continue;
9502243993SDmitry Vyukov       }
9602243993SDmitry Vyukov       CHECK_LT(z, kMutexTypeMax);
9702243993SDmitry Vyukov       CHECK(!mutex_can_lock[t][z]);
9802243993SDmitry Vyukov       mutex_can_lock[t][z] = true;
9902243993SDmitry Vyukov       cnt[t]++;
10002243993SDmitry Vyukov     }
10102243993SDmitry Vyukov   }
10202243993SDmitry Vyukov   // Indicates the array is not properly terminated.
10302243993SDmitry Vyukov   CHECK_LT(mutex_type_count, kMutexTypeMax);
10402243993SDmitry Vyukov   // Add leaf mutexes.
10502243993SDmitry Vyukov   for (int t = 0; t < mutex_type_count; t++) {
10602243993SDmitry Vyukov     if (!leaf[t])
10702243993SDmitry Vyukov       continue;
10802243993SDmitry Vyukov     CHECK_EQ(cnt[t], 0);
10902243993SDmitry Vyukov     for (int z = 0; z < mutex_type_count; z++) {
11002243993SDmitry Vyukov       if (z == MutexInvalid || t == z || leaf[z])
11102243993SDmitry Vyukov         continue;
11202243993SDmitry Vyukov       CHECK(!mutex_can_lock[z][t]);
11302243993SDmitry Vyukov       mutex_can_lock[z][t] = true;
11402243993SDmitry Vyukov     }
11502243993SDmitry Vyukov   }
11602243993SDmitry Vyukov   // Build the transitive closure and check that the graphs is acyclic.
11702243993SDmitry Vyukov   u32 trans[kMutexTypeMax];
11802243993SDmitry Vyukov   static_assert(sizeof(trans[0]) * 8 >= kMutexTypeMax,
11902243993SDmitry Vyukov                 "kMutexTypeMax does not fit into u32, switch to u64");
12002243993SDmitry Vyukov   internal_memset(&trans, 0, sizeof(trans));
12102243993SDmitry Vyukov   for (int i = 0; i < mutex_type_count; i++) {
12202243993SDmitry Vyukov     for (int j = 0; j < mutex_type_count; j++)
12302243993SDmitry Vyukov       if (mutex_can_lock[i][j])
12402243993SDmitry Vyukov         trans[i] |= 1 << j;
12502243993SDmitry Vyukov   }
12602243993SDmitry Vyukov   for (int k = 0; k < mutex_type_count; k++) {
12702243993SDmitry Vyukov     for (int i = 0; i < mutex_type_count; i++) {
12802243993SDmitry Vyukov       if (trans[i] & (1 << k))
12902243993SDmitry Vyukov         trans[i] |= trans[k];
13002243993SDmitry Vyukov     }
13102243993SDmitry Vyukov   }
13202243993SDmitry Vyukov   for (int i = 0; i < mutex_type_count; i++) {
13302243993SDmitry Vyukov     if (trans[i] & (1 << i)) {
13402243993SDmitry Vyukov       Printf("Mutex %s participates in a cycle\n", mutex_meta[i].name);
13502243993SDmitry Vyukov       Die();
13602243993SDmitry Vyukov     }
13702243993SDmitry Vyukov   }
13802243993SDmitry Vyukov }
13902243993SDmitry Vyukov 
14002243993SDmitry Vyukov struct InternalDeadlockDetector {
14102243993SDmitry Vyukov   struct LockDesc {
14202243993SDmitry Vyukov     u64 seq;
14302243993SDmitry Vyukov     uptr pc;
14402243993SDmitry Vyukov     int recursion;
14502243993SDmitry Vyukov   };
14602243993SDmitry Vyukov   int initialized;
14702243993SDmitry Vyukov   u64 sequence;
14802243993SDmitry Vyukov   LockDesc locked[kMutexTypeMax];
14902243993SDmitry Vyukov 
Lock__sanitizer::InternalDeadlockDetector15002243993SDmitry Vyukov   void Lock(MutexType type, uptr pc) {
15102243993SDmitry Vyukov     if (!Initialize(type))
15202243993SDmitry Vyukov       return;
15302243993SDmitry Vyukov     CHECK_LT(type, mutex_type_count);
15402243993SDmitry Vyukov     // Find the last locked mutex type.
15502243993SDmitry Vyukov     // This is the type we will use for hierarchy checks.
15602243993SDmitry Vyukov     u64 max_seq = 0;
15702243993SDmitry Vyukov     MutexType max_idx = MutexInvalid;
15802243993SDmitry Vyukov     for (int i = 0; i != mutex_type_count; i++) {
15902243993SDmitry Vyukov       if (locked[i].seq == 0)
16002243993SDmitry Vyukov         continue;
16102243993SDmitry Vyukov       CHECK_NE(locked[i].seq, max_seq);
16202243993SDmitry Vyukov       if (max_seq < locked[i].seq) {
16302243993SDmitry Vyukov         max_seq = locked[i].seq;
16402243993SDmitry Vyukov         max_idx = (MutexType)i;
16502243993SDmitry Vyukov       }
16602243993SDmitry Vyukov     }
16702243993SDmitry Vyukov     if (max_idx == type && mutex_multi[type]) {
16802243993SDmitry Vyukov       // Recursive lock of the same type.
16902243993SDmitry Vyukov       CHECK_EQ(locked[type].seq, max_seq);
17002243993SDmitry Vyukov       CHECK(locked[type].pc);
17102243993SDmitry Vyukov       locked[type].recursion++;
17202243993SDmitry Vyukov       return;
17302243993SDmitry Vyukov     }
17402243993SDmitry Vyukov     if (max_idx != MutexInvalid && !mutex_can_lock[max_idx][type]) {
17502243993SDmitry Vyukov       Printf("%s: internal deadlock: can't lock %s under %s mutex\n", SanitizerToolName,
17602243993SDmitry Vyukov              mutex_meta[type].name, mutex_meta[max_idx].name);
177d53abf83SDmitry Vyukov       PrintMutexPC(locked[max_idx].pc);
17802243993SDmitry Vyukov       CHECK(0);
17902243993SDmitry Vyukov     }
18002243993SDmitry Vyukov     locked[type].seq = ++sequence;
18102243993SDmitry Vyukov     locked[type].pc = pc;
18202243993SDmitry Vyukov     locked[type].recursion = 1;
18302243993SDmitry Vyukov   }
18402243993SDmitry Vyukov 
Unlock__sanitizer::InternalDeadlockDetector18502243993SDmitry Vyukov   void Unlock(MutexType type) {
18602243993SDmitry Vyukov     if (!Initialize(type))
18702243993SDmitry Vyukov       return;
18802243993SDmitry Vyukov     CHECK_LT(type, mutex_type_count);
18902243993SDmitry Vyukov     CHECK(locked[type].seq);
19002243993SDmitry Vyukov     CHECK_GT(locked[type].recursion, 0);
19102243993SDmitry Vyukov     if (--locked[type].recursion)
19202243993SDmitry Vyukov       return;
19302243993SDmitry Vyukov     locked[type].seq = 0;
19402243993SDmitry Vyukov     locked[type].pc = 0;
19502243993SDmitry Vyukov   }
19602243993SDmitry Vyukov 
CheckNoLocks__sanitizer::InternalDeadlockDetector19702243993SDmitry Vyukov   void CheckNoLocks() {
19802243993SDmitry Vyukov     for (int i = 0; i < mutex_type_count; i++) CHECK_EQ(locked[i].recursion, 0);
19902243993SDmitry Vyukov   }
20002243993SDmitry Vyukov 
Initialize__sanitizer::InternalDeadlockDetector20102243993SDmitry Vyukov   bool Initialize(MutexType type) {
20202243993SDmitry Vyukov     if (type == MutexUnchecked || type == MutexInvalid)
20302243993SDmitry Vyukov       return false;
20402243993SDmitry Vyukov     CHECK_GT(type, MutexInvalid);
20502243993SDmitry Vyukov     if (initialized != 0)
20602243993SDmitry Vyukov       return initialized > 0;
20702243993SDmitry Vyukov     initialized = -1;
20802243993SDmitry Vyukov     SpinMutexLock lock(&mutex_meta_mtx);
20902243993SDmitry Vyukov     if (mutex_type_count < 0)
21002243993SDmitry Vyukov       DebugMutexInit();
21102243993SDmitry Vyukov     initialized = mutex_type_count ? 1 : -1;
21202243993SDmitry Vyukov     return initialized > 0;
21302243993SDmitry Vyukov   }
21402243993SDmitry Vyukov };
215*6c765069SAlexander Richardson // This variable is used by the __tls_get_addr interceptor, so cannot use the
216*6c765069SAlexander Richardson // global-dynamic TLS model, as that would result in crashes.
217*6c765069SAlexander Richardson __attribute__((tls_model("initial-exec"))) static THREADLOCAL
218*6c765069SAlexander Richardson     InternalDeadlockDetector deadlock_detector;
21902243993SDmitry Vyukov 
LockImpl(uptr pc)22002243993SDmitry Vyukov void CheckedMutex::LockImpl(uptr pc) { deadlock_detector.Lock(type_, pc); }
22102243993SDmitry Vyukov 
UnlockImpl()22202243993SDmitry Vyukov void CheckedMutex::UnlockImpl() { deadlock_detector.Unlock(type_); }
22302243993SDmitry Vyukov 
CheckNoLocksImpl()22402243993SDmitry Vyukov void CheckedMutex::CheckNoLocksImpl() { deadlock_detector.CheckNoLocks(); }
22502243993SDmitry Vyukov #endif
22602243993SDmitry Vyukov 
2276a4054efSDmitry Vyukov }  // namespace __sanitizer
228