xref: /openbsd-src/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp (revision 810390e339a5425391477d5d41c78d7cab2424ac)
13cab2bb3Spatrick //===-- sanitizer_stackdepot.cpp ------------------------------------------===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick //
93cab2bb3Spatrick // This file is shared between AddressSanitizer and ThreadSanitizer
103cab2bb3Spatrick // run-time libraries.
113cab2bb3Spatrick //===----------------------------------------------------------------------===//
123cab2bb3Spatrick 
133cab2bb3Spatrick #include "sanitizer_stackdepot.h"
143cab2bb3Spatrick 
15*810390e3Srobert #include "sanitizer_atomic.h"
163cab2bb3Spatrick #include "sanitizer_common.h"
173cab2bb3Spatrick #include "sanitizer_hash.h"
18*810390e3Srobert #include "sanitizer_mutex.h"
19*810390e3Srobert #include "sanitizer_stack_store.h"
203cab2bb3Spatrick #include "sanitizer_stackdepotbase.h"
213cab2bb3Spatrick 
223cab2bb3Spatrick namespace __sanitizer {
233cab2bb3Spatrick 
243cab2bb3Spatrick struct StackDepotNode {
25*810390e3Srobert   using hash_type = u64;
26*810390e3Srobert   hash_type stack_hash;
27*810390e3Srobert   u32 link;
28*810390e3Srobert   StackStore::Id store_id;
293cab2bb3Spatrick 
303cab2bb3Spatrick   static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20;
313cab2bb3Spatrick 
323cab2bb3Spatrick   typedef StackTrace args_type;
eq__sanitizer::StackDepotNode33*810390e3Srobert   bool eq(hash_type hash, const args_type &args) const {
34*810390e3Srobert     return hash == stack_hash;
353cab2bb3Spatrick   }
36*810390e3Srobert   static uptr allocated();
hash__sanitizer::StackDepotNode37*810390e3Srobert   static hash_type hash(const args_type &args) {
38*810390e3Srobert     MurMur2Hash64Builder H(args.size * sizeof(uptr));
393cab2bb3Spatrick     for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]);
40*810390e3Srobert     H.add(args.tag);
413cab2bb3Spatrick     return H.get();
423cab2bb3Spatrick   }
is_valid__sanitizer::StackDepotNode433cab2bb3Spatrick   static bool is_valid(const args_type &args) {
443cab2bb3Spatrick     return args.size > 0 && args.trace;
453cab2bb3Spatrick   }
46*810390e3Srobert   void store(u32 id, const args_type &args, hash_type hash);
47*810390e3Srobert   args_type load(u32 id) const;
48*810390e3Srobert   static StackDepotHandle get_handle(u32 id);
493cab2bb3Spatrick 
503cab2bb3Spatrick   typedef StackDepotHandle handle_type;
513cab2bb3Spatrick };
523cab2bb3Spatrick 
53*810390e3Srobert static StackStore stackStore;
543cab2bb3Spatrick 
553cab2bb3Spatrick // FIXME(dvyukov): this single reserved bit is used in TSan.
563cab2bb3Spatrick typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
573cab2bb3Spatrick     StackDepot;
583cab2bb3Spatrick static StackDepot theDepot;
59*810390e3Srobert // Keep mutable data out of frequently access nodes to improve caching
60*810390e3Srobert // efficiency.
61*810390e3Srobert static TwoLevelMap<atomic_uint32_t, StackDepot::kNodesSize1,
62*810390e3Srobert                    StackDepot::kNodesSize2>
63*810390e3Srobert     useCounts;
643cab2bb3Spatrick 
use_count() const65*810390e3Srobert int StackDepotHandle::use_count() const {
66*810390e3Srobert   return atomic_load_relaxed(&useCounts[id_]);
673cab2bb3Spatrick }
683cab2bb3Spatrick 
inc_use_count_unsafe()69*810390e3Srobert void StackDepotHandle::inc_use_count_unsafe() {
70*810390e3Srobert   atomic_fetch_add(&useCounts[id_], 1, memory_order_relaxed);
713cab2bb3Spatrick }
723cab2bb3Spatrick 
allocated()73*810390e3Srobert uptr StackDepotNode::allocated() {
74*810390e3Srobert   return stackStore.Allocated() + useCounts.MemoryUsage();
75*810390e3Srobert }
76*810390e3Srobert 
CompressStackStore()77*810390e3Srobert static void CompressStackStore() {
78*810390e3Srobert   u64 start = Verbosity() >= 1 ? MonotonicNanoTime() : 0;
79*810390e3Srobert   uptr diff = stackStore.Pack(static_cast<StackStore::Compression>(
80*810390e3Srobert       Abs(common_flags()->compress_stack_depot)));
81*810390e3Srobert   if (!diff)
82*810390e3Srobert     return;
83*810390e3Srobert   if (Verbosity() >= 1) {
84*810390e3Srobert     u64 finish = MonotonicNanoTime();
85*810390e3Srobert     uptr total_before = theDepot.GetStats().allocated + diff;
86*810390e3Srobert     VPrintf(1, "%s: StackDepot released %zu KiB out of %zu KiB in %llu ms\n",
87*810390e3Srobert             SanitizerToolName, diff >> 10, total_before >> 10,
88*810390e3Srobert             (finish - start) / 1000000);
89*810390e3Srobert   }
90*810390e3Srobert }
91*810390e3Srobert 
92*810390e3Srobert namespace {
93*810390e3Srobert 
94*810390e3Srobert class CompressThread {
95*810390e3Srobert  public:
96*810390e3Srobert   constexpr CompressThread() = default;
97*810390e3Srobert   void NewWorkNotify();
98*810390e3Srobert   void Stop();
99*810390e3Srobert   void LockAndStop() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
100*810390e3Srobert   void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
101*810390e3Srobert 
102*810390e3Srobert  private:
103*810390e3Srobert   enum class State {
104*810390e3Srobert     NotStarted = 0,
105*810390e3Srobert     Started,
106*810390e3Srobert     Failed,
107*810390e3Srobert     Stopped,
108*810390e3Srobert   };
109*810390e3Srobert 
110*810390e3Srobert   void Run();
111*810390e3Srobert 
WaitForWork()112*810390e3Srobert   bool WaitForWork() {
113*810390e3Srobert     semaphore_.Wait();
114*810390e3Srobert     return atomic_load(&run_, memory_order_acquire);
115*810390e3Srobert   }
116*810390e3Srobert 
117*810390e3Srobert   Semaphore semaphore_ = {};
118*810390e3Srobert   StaticSpinMutex mutex_ = {};
119*810390e3Srobert   State state_ SANITIZER_GUARDED_BY(mutex_) = State::NotStarted;
120*810390e3Srobert   void *thread_ SANITIZER_GUARDED_BY(mutex_) = nullptr;
121*810390e3Srobert   atomic_uint8_t run_ = {};
122*810390e3Srobert };
123*810390e3Srobert 
124*810390e3Srobert static CompressThread compress_thread;
125*810390e3Srobert 
NewWorkNotify()126*810390e3Srobert void CompressThread::NewWorkNotify() {
127*810390e3Srobert   int compress = common_flags()->compress_stack_depot;
128*810390e3Srobert   if (!compress)
129*810390e3Srobert     return;
130*810390e3Srobert   if (compress > 0 /* for testing or debugging */) {
131*810390e3Srobert     SpinMutexLock l(&mutex_);
132*810390e3Srobert     if (state_ == State::NotStarted) {
133*810390e3Srobert       atomic_store(&run_, 1, memory_order_release);
134*810390e3Srobert       CHECK_EQ(nullptr, thread_);
135*810390e3Srobert       thread_ = internal_start_thread(
136*810390e3Srobert           [](void *arg) -> void * {
137*810390e3Srobert             reinterpret_cast<CompressThread *>(arg)->Run();
138*810390e3Srobert             return nullptr;
139*810390e3Srobert           },
140*810390e3Srobert           this);
141*810390e3Srobert       state_ = thread_ ? State::Started : State::Failed;
142*810390e3Srobert     }
143*810390e3Srobert     if (state_ == State::Started) {
144*810390e3Srobert       semaphore_.Post();
145*810390e3Srobert       return;
146*810390e3Srobert     }
147*810390e3Srobert   }
148*810390e3Srobert   CompressStackStore();
149*810390e3Srobert }
150*810390e3Srobert 
Run()151*810390e3Srobert void CompressThread::Run() {
152*810390e3Srobert   VPrintf(1, "%s: StackDepot compression thread started\n", SanitizerToolName);
153*810390e3Srobert   while (WaitForWork()) CompressStackStore();
154*810390e3Srobert   VPrintf(1, "%s: StackDepot compression thread stopped\n", SanitizerToolName);
155*810390e3Srobert }
156*810390e3Srobert 
Stop()157*810390e3Srobert void CompressThread::Stop() {
158*810390e3Srobert   void *t = nullptr;
159*810390e3Srobert   {
160*810390e3Srobert     SpinMutexLock l(&mutex_);
161*810390e3Srobert     if (state_ != State::Started)
162*810390e3Srobert       return;
163*810390e3Srobert     state_ = State::Stopped;
164*810390e3Srobert     CHECK_NE(nullptr, thread_);
165*810390e3Srobert     t = thread_;
166*810390e3Srobert     thread_ = nullptr;
167*810390e3Srobert   }
168*810390e3Srobert   atomic_store(&run_, 0, memory_order_release);
169*810390e3Srobert   semaphore_.Post();
170*810390e3Srobert   internal_join_thread(t);
171*810390e3Srobert }
172*810390e3Srobert 
LockAndStop()173*810390e3Srobert void CompressThread::LockAndStop() {
174*810390e3Srobert   mutex_.Lock();
175*810390e3Srobert   if (state_ != State::Started)
176*810390e3Srobert     return;
177*810390e3Srobert   CHECK_NE(nullptr, thread_);
178*810390e3Srobert 
179*810390e3Srobert   atomic_store(&run_, 0, memory_order_release);
180*810390e3Srobert   semaphore_.Post();
181*810390e3Srobert   internal_join_thread(thread_);
182*810390e3Srobert   // Allow to restart after Unlock() if needed.
183*810390e3Srobert   state_ = State::NotStarted;
184*810390e3Srobert   thread_ = nullptr;
185*810390e3Srobert }
186*810390e3Srobert 
Unlock()187*810390e3Srobert void CompressThread::Unlock() { mutex_.Unlock(); }
188*810390e3Srobert 
189*810390e3Srobert }  // namespace
190*810390e3Srobert 
store(u32 id,const args_type & args,hash_type hash)191*810390e3Srobert void StackDepotNode::store(u32 id, const args_type &args, hash_type hash) {
192*810390e3Srobert   stack_hash = hash;
193*810390e3Srobert   uptr pack = 0;
194*810390e3Srobert   store_id = stackStore.Store(args, &pack);
195*810390e3Srobert   if (LIKELY(!pack))
196*810390e3Srobert     return;
197*810390e3Srobert   compress_thread.NewWorkNotify();
198*810390e3Srobert }
199*810390e3Srobert 
load(u32 id) const200*810390e3Srobert StackDepotNode::args_type StackDepotNode::load(u32 id) const {
201*810390e3Srobert   if (!store_id)
202*810390e3Srobert     return {};
203*810390e3Srobert   return stackStore.Load(store_id);
204*810390e3Srobert }
205*810390e3Srobert 
StackDepotGetStats()206*810390e3Srobert StackDepotStats StackDepotGetStats() { return theDepot.GetStats(); }
207*810390e3Srobert 
StackDepotPut(StackTrace stack)208*810390e3Srobert u32 StackDepotPut(StackTrace stack) { return theDepot.Put(stack); }
209*810390e3Srobert 
StackDepotPut_WithHandle(StackTrace stack)2103cab2bb3Spatrick StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
211*810390e3Srobert   return StackDepotNode::get_handle(theDepot.Put(stack));
2123cab2bb3Spatrick }
2133cab2bb3Spatrick 
StackDepotGet(u32 id)2143cab2bb3Spatrick StackTrace StackDepotGet(u32 id) {
2153cab2bb3Spatrick   return theDepot.Get(id);
2163cab2bb3Spatrick }
2173cab2bb3Spatrick 
StackDepotLockAll()2183cab2bb3Spatrick void StackDepotLockAll() {
2193cab2bb3Spatrick   theDepot.LockAll();
220*810390e3Srobert   compress_thread.LockAndStop();
221*810390e3Srobert   stackStore.LockAll();
2223cab2bb3Spatrick }
2233cab2bb3Spatrick 
StackDepotUnlockAll()2243cab2bb3Spatrick void StackDepotUnlockAll() {
225*810390e3Srobert   stackStore.UnlockAll();
226*810390e3Srobert   compress_thread.Unlock();
2273cab2bb3Spatrick   theDepot.UnlockAll();
2283cab2bb3Spatrick }
2293cab2bb3Spatrick 
StackDepotPrintAll()230d89ec533Spatrick void StackDepotPrintAll() {
231d89ec533Spatrick #if !SANITIZER_GO
232d89ec533Spatrick   theDepot.PrintAll();
233d89ec533Spatrick #endif
234d89ec533Spatrick }
235d89ec533Spatrick 
StackDepotStopBackgroundThread()236*810390e3Srobert void StackDepotStopBackgroundThread() { compress_thread.Stop(); }
237*810390e3Srobert 
get_handle(u32 id)238*810390e3Srobert StackDepotHandle StackDepotNode::get_handle(u32 id) {
239*810390e3Srobert   return StackDepotHandle(&theDepot.nodes[id], id);
2403cab2bb3Spatrick }
2413cab2bb3Spatrick 
StackDepotTestOnlyUnmap()242*810390e3Srobert void StackDepotTestOnlyUnmap() {
243*810390e3Srobert   theDepot.TestOnlyUnmap();
244*810390e3Srobert   stackStore.TestOnlyUnmap();
2453cab2bb3Spatrick }
2463cab2bb3Spatrick 
2473cab2bb3Spatrick } // namespace __sanitizer
248