1*810390e3Srobert //===-- sanitizer_stack_store.h ---------------------------------*- C++ -*-===// 2*810390e3Srobert // 3*810390e3Srobert // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4*810390e3Srobert // See https://llvm.org/LICENSE.txt for license information. 5*810390e3Srobert // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6*810390e3Srobert // 7*810390e3Srobert //===----------------------------------------------------------------------===// 8*810390e3Srobert 9*810390e3Srobert #ifndef SANITIZER_STACK_STORE_H 10*810390e3Srobert #define SANITIZER_STACK_STORE_H 11*810390e3Srobert 12*810390e3Srobert #include "sanitizer_atomic.h" 13*810390e3Srobert #include "sanitizer_common.h" 14*810390e3Srobert #include "sanitizer_internal_defs.h" 15*810390e3Srobert #include "sanitizer_mutex.h" 16*810390e3Srobert #include "sanitizer_stacktrace.h" 17*810390e3Srobert 18*810390e3Srobert namespace __sanitizer { 19*810390e3Srobert 20*810390e3Srobert class StackStore { 21*810390e3Srobert static constexpr uptr kBlockSizeFrames = 0x100000; 22*810390e3Srobert static constexpr uptr kBlockCount = 0x1000; 23*810390e3Srobert static constexpr uptr kBlockSizeBytes = kBlockSizeFrames * sizeof(uptr); 24*810390e3Srobert 25*810390e3Srobert public: 26*810390e3Srobert enum class Compression : u8 { 27*810390e3Srobert None = 0, 28*810390e3Srobert Delta, 29*810390e3Srobert LZW, 30*810390e3Srobert }; 31*810390e3Srobert 32*810390e3Srobert constexpr StackStore() = default; 33*810390e3Srobert 34*810390e3Srobert using Id = u32; // Enough for 2^32 * sizeof(uptr) bytes of traces. 35*810390e3Srobert static_assert(u64(kBlockCount) * kBlockSizeFrames == 1ull << (sizeof(Id) * 8), 36*810390e3Srobert ""); 37*810390e3Srobert 38*810390e3Srobert Id Store(const StackTrace &trace, 39*810390e3Srobert uptr *pack /* number of blocks completed by this call */); 40*810390e3Srobert StackTrace Load(Id id); 41*810390e3Srobert uptr Allocated() const; 42*810390e3Srobert 43*810390e3Srobert // Packs all blocks which don't expect any more writes. A block is going to be 44*810390e3Srobert // packed once. As soon trace from that block was requested, it will unpack 45*810390e3Srobert // and stay unpacked after that. 46*810390e3Srobert // Returns the number of released bytes. 47*810390e3Srobert uptr Pack(Compression type); 48*810390e3Srobert 49*810390e3Srobert void LockAll(); 50*810390e3Srobert void UnlockAll(); 51*810390e3Srobert 52*810390e3Srobert void TestOnlyUnmap(); 53*810390e3Srobert 54*810390e3Srobert private: 55*810390e3Srobert friend class StackStoreTest; GetBlockIdx(uptr frame_idx)56*810390e3Srobert static constexpr uptr GetBlockIdx(uptr frame_idx) { 57*810390e3Srobert return frame_idx / kBlockSizeFrames; 58*810390e3Srobert } 59*810390e3Srobert GetInBlockIdx(uptr frame_idx)60*810390e3Srobert static constexpr uptr GetInBlockIdx(uptr frame_idx) { 61*810390e3Srobert return frame_idx % kBlockSizeFrames; 62*810390e3Srobert } 63*810390e3Srobert IdToOffset(Id id)64*810390e3Srobert static constexpr uptr IdToOffset(Id id) { 65*810390e3Srobert CHECK_NE(id, 0); 66*810390e3Srobert return id - 1; // Avoid zero as id. 67*810390e3Srobert } 68*810390e3Srobert OffsetToId(Id id)69*810390e3Srobert static constexpr uptr OffsetToId(Id id) { 70*810390e3Srobert // This makes UINT32_MAX to 0 and it will be retrived as and empty stack. 71*810390e3Srobert // But this is not a problem as we will not be able to store anything after 72*810390e3Srobert // that anyway. 73*810390e3Srobert return id + 1; // Avoid zero as id. 74*810390e3Srobert } 75*810390e3Srobert 76*810390e3Srobert uptr *Alloc(uptr count, uptr *idx, uptr *pack); 77*810390e3Srobert 78*810390e3Srobert void *Map(uptr size, const char *mem_type); 79*810390e3Srobert void Unmap(void *addr, uptr size); 80*810390e3Srobert 81*810390e3Srobert // Total number of allocated frames. 82*810390e3Srobert atomic_uintptr_t total_frames_ = {}; 83*810390e3Srobert 84*810390e3Srobert // Tracks total allocated memory in bytes. 85*810390e3Srobert atomic_uintptr_t allocated_ = {}; 86*810390e3Srobert 87*810390e3Srobert // Each block will hold pointer to exactly kBlockSizeFrames. 88*810390e3Srobert class BlockInfo { 89*810390e3Srobert atomic_uintptr_t data_; 90*810390e3Srobert // Counter to track store progress to know when we can Pack() the block. 91*810390e3Srobert atomic_uint32_t stored_; 92*810390e3Srobert // Protects alloc of new blocks. 93*810390e3Srobert mutable StaticSpinMutex mtx_; 94*810390e3Srobert 95*810390e3Srobert enum class State : u8 { 96*810390e3Srobert Storing = 0, 97*810390e3Srobert Packed, 98*810390e3Srobert Unpacked, 99*810390e3Srobert }; 100*810390e3Srobert State state SANITIZER_GUARDED_BY(mtx_); 101*810390e3Srobert 102*810390e3Srobert uptr *Create(StackStore *store); 103*810390e3Srobert 104*810390e3Srobert public: 105*810390e3Srobert uptr *Get() const; 106*810390e3Srobert uptr *GetOrCreate(StackStore *store); 107*810390e3Srobert uptr *GetOrUnpack(StackStore *store); 108*810390e3Srobert uptr Pack(Compression type, StackStore *store); 109*810390e3Srobert void TestOnlyUnmap(StackStore *store); 110*810390e3Srobert bool Stored(uptr n); 111*810390e3Srobert bool IsPacked() const; Lock()112*810390e3Srobert void Lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mtx_.Lock(); } Unlock()113*810390e3Srobert void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mtx_.Unlock(); } 114*810390e3Srobert }; 115*810390e3Srobert 116*810390e3Srobert BlockInfo blocks_[kBlockCount] = {}; 117*810390e3Srobert }; 118*810390e3Srobert 119*810390e3Srobert } // namespace __sanitizer 120*810390e3Srobert 121*810390e3Srobert #endif // SANITIZER_STACK_STORE_H 122