xref: /llvm-project/compiler-rt/lib/scudo/standalone/tsd.h (revision f1ac559534788f8dd42191b60dfdf9cc56b39fd4)
152bfd673SKostya Kortchinsky //===-- tsd.h ---------------------------------------------------*- C++ -*-===//
252bfd673SKostya Kortchinsky //
352bfd673SKostya Kortchinsky // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
452bfd673SKostya Kortchinsky // See https://llvm.org/LICENSE.txt for license information.
552bfd673SKostya Kortchinsky // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
652bfd673SKostya Kortchinsky //
752bfd673SKostya Kortchinsky //===----------------------------------------------------------------------===//
852bfd673SKostya Kortchinsky 
952bfd673SKostya Kortchinsky #ifndef SCUDO_TSD_H_
1052bfd673SKostya Kortchinsky #define SCUDO_TSD_H_
1152bfd673SKostya Kortchinsky 
1252bfd673SKostya Kortchinsky #include "atomic_helpers.h"
1352bfd673SKostya Kortchinsky #include "common.h"
1452bfd673SKostya Kortchinsky #include "mutex.h"
156a4c3959SChia-hung Duan #include "thread_annotations.h"
1652bfd673SKostya Kortchinsky 
1752bfd673SKostya Kortchinsky #include <limits.h> // for PTHREAD_DESTRUCTOR_ITERATIONS
189ef6faf4SKostya Kortchinsky #include <pthread.h>
1952bfd673SKostya Kortchinsky 
20d44cb7a6SKostya Kortchinsky // With some build setups, this might still not be defined.
21d44cb7a6SKostya Kortchinsky #ifndef PTHREAD_DESTRUCTOR_ITERATIONS
22d44cb7a6SKostya Kortchinsky #define PTHREAD_DESTRUCTOR_ITERATIONS 4
23d44cb7a6SKostya Kortchinsky #endif
24d44cb7a6SKostya Kortchinsky 
2552bfd673SKostya Kortchinsky namespace scudo {
2652bfd673SKostya Kortchinsky 
279c86b83fSPeter Collingbourne template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
288b062b61SKostya Kortchinsky   using ThisT = TSD<Allocator>;
29d56ef852SVitaly Buka   u8 DestructorIterations = 0;
3052bfd673SKostya Kortchinsky 
initTSD316a4c3959SChia-hung Duan   void init(Allocator *Instance) NO_THREAD_SAFETY_ANALYSIS {
32a45877eeSKostya Kortchinsky     DCHECK_EQ(DestructorIterations, 0U);
338b062b61SKostya Kortchinsky     DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
3452bfd673SKostya Kortchinsky     Instance->initCache(&Cache);
3552bfd673SKostya Kortchinsky     DestructorIterations = PTHREAD_DESTRUCTOR_ITERATIONS;
3652bfd673SKostya Kortchinsky   }
3752bfd673SKostya Kortchinsky 
tryLockTSD386a4c3959SChia-hung Duan   inline bool tryLock() NO_THREAD_SAFETY_ANALYSIS {
3952bfd673SKostya Kortchinsky     if (Mutex.tryLock()) {
4052bfd673SKostya Kortchinsky       atomic_store_relaxed(&Precedence, 0);
4152bfd673SKostya Kortchinsky       return true;
4252bfd673SKostya Kortchinsky     }
4352bfd673SKostya Kortchinsky     if (atomic_load_relaxed(&Precedence) == 0)
44*f1ac5595SChiaHungDuan       atomic_store_relaxed(
45*f1ac5595SChiaHungDuan           &Precedence,
46*f1ac5595SChiaHungDuan           static_cast<uptr>(getMonotonicTime() >> FIRST_32_SECOND_64(16, 0)));
4752bfd673SKostya Kortchinsky     return false;
4852bfd673SKostya Kortchinsky   }
lockTSD496a4c3959SChia-hung Duan   inline void lock() NO_THREAD_SAFETY_ANALYSIS {
5052bfd673SKostya Kortchinsky     atomic_store_relaxed(&Precedence, 0);
5152bfd673SKostya Kortchinsky     Mutex.lock();
5252bfd673SKostya Kortchinsky   }
unlockTSD536a4c3959SChia-hung Duan   inline void unlock() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
getPrecedenceTSD546fd6cfdfSPeter Collingbourne   inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
5552bfd673SKostya Kortchinsky 
commitBackTSD56ea2036e1SChiaHungDuan   void commitBack(Allocator *Instance) { Instance->commitBack(this); }
57ea2036e1SChiaHungDuan 
58ea2036e1SChiaHungDuan   // As the comments attached to `getCache()`, the TSD doesn't always need to be
59ea2036e1SChiaHungDuan   // locked. In that case, we would only skip the check before we have all TSDs
60ea2036e1SChiaHungDuan   // locked in all paths.
assertLockedTSD61ea2036e1SChiaHungDuan   void assertLocked(bool BypassCheck) ASSERT_CAPABILITY(Mutex) {
62ea2036e1SChiaHungDuan     if (SCUDO_DEBUG && !BypassCheck)
63ea2036e1SChiaHungDuan       Mutex.assertHeld();
646a4c3959SChia-hung Duan   }
656a4c3959SChia-hung Duan 
66ae1bd3adSChia-hung Duan   // Ideally, we may want to assert that all the operations on
67ae1bd3adSChia-hung Duan   // Cache/QuarantineCache always have the `Mutex` acquired. However, the
68ae1bd3adSChia-hung Duan   // current architecture of accessing TSD is not easy to cooperate with the
69ae1bd3adSChia-hung Duan   // thread-safety analysis because of pointer aliasing. So now we just add the
70ae1bd3adSChia-hung Duan   // assertion on the getters of Cache/QuarantineCache.
71ae1bd3adSChia-hung Duan   //
72ae1bd3adSChia-hung Duan   // TODO(chiahungduan): Ideally, we want to do `Mutex.assertHeld` but acquiring
73ae1bd3adSChia-hung Duan   // TSD doesn't always require holding the lock. Add this assertion while the
74ae1bd3adSChia-hung Duan   // lock is always acquired.
getCacheTSD75ea2036e1SChiaHungDuan   typename Allocator::CacheT &getCache() REQUIRES(Mutex) { return Cache; }
getQuarantineCacheTSD76ea2036e1SChiaHungDuan   typename Allocator::QuarantineCacheT &getQuarantineCache() REQUIRES(Mutex) {
77ae1bd3adSChia-hung Duan     return QuarantineCache;
78ae1bd3adSChia-hung Duan   }
79ae1bd3adSChia-hung Duan 
8052bfd673SKostya Kortchinsky private:
81aeb38262SKostya Kortchinsky   HybridMutex Mutex;
82d56ef852SVitaly Buka   atomic_uptr Precedence = {};
83ae1bd3adSChia-hung Duan 
84ae1bd3adSChia-hung Duan   typename Allocator::CacheT Cache GUARDED_BY(Mutex);
85ae1bd3adSChia-hung Duan   typename Allocator::QuarantineCacheT QuarantineCache GUARDED_BY(Mutex);
8652bfd673SKostya Kortchinsky };
8752bfd673SKostya Kortchinsky 
8852bfd673SKostya Kortchinsky } // namespace scudo
8952bfd673SKostya Kortchinsky 
9052bfd673SKostya Kortchinsky #endif // SCUDO_TSD_H_
91