11f9cb04fSpatrick //===-- stack_depot.h -------------------------------------------*- C++ -*-===// 21f9cb04fSpatrick // 31f9cb04fSpatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 41f9cb04fSpatrick // See https://llvm.org/LICENSE.txt for license information. 51f9cb04fSpatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 61f9cb04fSpatrick // 71f9cb04fSpatrick //===----------------------------------------------------------------------===// 81f9cb04fSpatrick 91f9cb04fSpatrick #ifndef SCUDO_STACK_DEPOT_H_ 101f9cb04fSpatrick #define SCUDO_STACK_DEPOT_H_ 111f9cb04fSpatrick 121f9cb04fSpatrick #include "atomic_helpers.h" 131f9cb04fSpatrick #include "mutex.h" 141f9cb04fSpatrick 151f9cb04fSpatrick namespace scudo { 161f9cb04fSpatrick 171f9cb04fSpatrick class MurMur2HashBuilder { 181f9cb04fSpatrick static const u32 M = 0x5bd1e995; 191f9cb04fSpatrick static const u32 Seed = 0x9747b28c; 201f9cb04fSpatrick static const u32 R = 24; 211f9cb04fSpatrick u32 H; 221f9cb04fSpatrick 231f9cb04fSpatrick public: 241f9cb04fSpatrick explicit MurMur2HashBuilder(u32 Init = 0) { H = Seed ^ Init; } add(u32 K)251f9cb04fSpatrick void add(u32 K) { 261f9cb04fSpatrick K *= M; 271f9cb04fSpatrick K ^= K >> R; 281f9cb04fSpatrick K *= M; 291f9cb04fSpatrick H *= M; 301f9cb04fSpatrick H ^= K; 311f9cb04fSpatrick } get()321f9cb04fSpatrick u32 get() { 331f9cb04fSpatrick u32 X = H; 341f9cb04fSpatrick X ^= X >> 13; 351f9cb04fSpatrick X *= M; 361f9cb04fSpatrick X ^= X >> 15; 371f9cb04fSpatrick return X; 381f9cb04fSpatrick } 391f9cb04fSpatrick }; 401f9cb04fSpatrick 411f9cb04fSpatrick class StackDepot { 421f9cb04fSpatrick HybridMutex RingEndMu; 43*d89ec533Spatrick u32 RingEnd = 0; 441f9cb04fSpatrick 451f9cb04fSpatrick // This data structure stores a stack trace for each allocation and 461f9cb04fSpatrick // deallocation when stack trace recording is enabled, that may be looked up 471f9cb04fSpatrick // using a hash of the stack trace. The lower bits of the hash are an index 481f9cb04fSpatrick // into the Tab array, which stores an index into the Ring array where the 491f9cb04fSpatrick // stack traces are stored. As the name implies, Ring is a ring buffer, so a 501f9cb04fSpatrick // stack trace may wrap around to the start of the array. 511f9cb04fSpatrick // 521f9cb04fSpatrick // Each stack trace in Ring is prefixed by a stack trace marker consisting of 531f9cb04fSpatrick // a fixed 1 bit in bit 0 (this allows disambiguation between stack frames 541f9cb04fSpatrick // and stack trace markers in the case where instruction pointers are 4-byte 551f9cb04fSpatrick // aligned, as they are on arm64), the stack trace hash in bits 1-32, and the 561f9cb04fSpatrick // size of the stack trace in bits 33-63. 571f9cb04fSpatrick // 581f9cb04fSpatrick // The insert() function is potentially racy in its accesses to the Tab and 591f9cb04fSpatrick // Ring arrays, but find() is resilient to races in the sense that, barring 601f9cb04fSpatrick // hash collisions, it will either return the correct stack trace or no stack 611f9cb04fSpatrick // trace at all, even if two instances of insert() raced with one another. 621f9cb04fSpatrick // This is achieved by re-checking the hash of the stack trace before 631f9cb04fSpatrick // returning the trace. 641f9cb04fSpatrick 651f9cb04fSpatrick #ifdef SCUDO_FUZZ 661f9cb04fSpatrick // Use smaller table sizes for fuzzing in order to reduce input size. 671f9cb04fSpatrick static const uptr TabBits = 4; 681f9cb04fSpatrick #else 691f9cb04fSpatrick static const uptr TabBits = 16; 701f9cb04fSpatrick #endif 711f9cb04fSpatrick static const uptr TabSize = 1 << TabBits; 721f9cb04fSpatrick static const uptr TabMask = TabSize - 1; 73*d89ec533Spatrick atomic_u32 Tab[TabSize] = {}; 741f9cb04fSpatrick 751f9cb04fSpatrick #ifdef SCUDO_FUZZ 761f9cb04fSpatrick static const uptr RingBits = 4; 771f9cb04fSpatrick #else 781f9cb04fSpatrick static const uptr RingBits = 19; 791f9cb04fSpatrick #endif 801f9cb04fSpatrick static const uptr RingSize = 1 << RingBits; 811f9cb04fSpatrick static const uptr RingMask = RingSize - 1; 82*d89ec533Spatrick atomic_u64 Ring[RingSize] = {}; 831f9cb04fSpatrick 841f9cb04fSpatrick public: 851f9cb04fSpatrick // Insert hash of the stack trace [Begin, End) into the stack depot, and 861f9cb04fSpatrick // return the hash. insert(uptr * Begin,uptr * End)871f9cb04fSpatrick u32 insert(uptr *Begin, uptr *End) { 881f9cb04fSpatrick MurMur2HashBuilder B; 891f9cb04fSpatrick for (uptr *I = Begin; I != End; ++I) 901f9cb04fSpatrick B.add(u32(*I) >> 2); 911f9cb04fSpatrick u32 Hash = B.get(); 921f9cb04fSpatrick 931f9cb04fSpatrick u32 Pos = Hash & TabMask; 941f9cb04fSpatrick u32 RingPos = atomic_load_relaxed(&Tab[Pos]); 951f9cb04fSpatrick u64 Entry = atomic_load_relaxed(&Ring[RingPos]); 961f9cb04fSpatrick u64 Id = (u64(End - Begin) << 33) | (u64(Hash) << 1) | 1; 971f9cb04fSpatrick if (Entry == Id) 981f9cb04fSpatrick return Hash; 991f9cb04fSpatrick 1001f9cb04fSpatrick ScopedLock Lock(RingEndMu); 1011f9cb04fSpatrick RingPos = RingEnd; 1021f9cb04fSpatrick atomic_store_relaxed(&Tab[Pos], RingPos); 1031f9cb04fSpatrick atomic_store_relaxed(&Ring[RingPos], Id); 1041f9cb04fSpatrick for (uptr *I = Begin; I != End; ++I) { 1051f9cb04fSpatrick RingPos = (RingPos + 1) & RingMask; 1061f9cb04fSpatrick atomic_store_relaxed(&Ring[RingPos], *I); 1071f9cb04fSpatrick } 1081f9cb04fSpatrick RingEnd = (RingPos + 1) & RingMask; 1091f9cb04fSpatrick return Hash; 1101f9cb04fSpatrick } 1111f9cb04fSpatrick 1121f9cb04fSpatrick // Look up a stack trace by hash. Returns true if successful. The trace may be 1131f9cb04fSpatrick // accessed via operator[] passing indexes between *RingPosPtr and 1141f9cb04fSpatrick // *RingPosPtr + *SizePtr. find(u32 Hash,uptr * RingPosPtr,uptr * SizePtr)1151f9cb04fSpatrick bool find(u32 Hash, uptr *RingPosPtr, uptr *SizePtr) const { 1161f9cb04fSpatrick u32 Pos = Hash & TabMask; 1171f9cb04fSpatrick u32 RingPos = atomic_load_relaxed(&Tab[Pos]); 1181f9cb04fSpatrick if (RingPos >= RingSize) 1191f9cb04fSpatrick return false; 1201f9cb04fSpatrick u64 Entry = atomic_load_relaxed(&Ring[RingPos]); 1211f9cb04fSpatrick u64 HashWithTagBit = (u64(Hash) << 1) | 1; 1221f9cb04fSpatrick if ((Entry & 0x1ffffffff) != HashWithTagBit) 1231f9cb04fSpatrick return false; 1241f9cb04fSpatrick u32 Size = u32(Entry >> 33); 1251f9cb04fSpatrick if (Size >= RingSize) 1261f9cb04fSpatrick return false; 1271f9cb04fSpatrick *RingPosPtr = (RingPos + 1) & RingMask; 1281f9cb04fSpatrick *SizePtr = Size; 1291f9cb04fSpatrick MurMur2HashBuilder B; 1301f9cb04fSpatrick for (uptr I = 0; I != Size; ++I) { 1311f9cb04fSpatrick RingPos = (RingPos + 1) & RingMask; 1321f9cb04fSpatrick B.add(u32(atomic_load_relaxed(&Ring[RingPos])) >> 2); 1331f9cb04fSpatrick } 1341f9cb04fSpatrick return B.get() == Hash; 1351f9cb04fSpatrick } 1361f9cb04fSpatrick 1371f9cb04fSpatrick u64 operator[](uptr RingPos) const { 1381f9cb04fSpatrick return atomic_load_relaxed(&Ring[RingPos & RingMask]); 1391f9cb04fSpatrick } 1401f9cb04fSpatrick }; 1411f9cb04fSpatrick 1421f9cb04fSpatrick } // namespace scudo 1431f9cb04fSpatrick 1441f9cb04fSpatrick #endif // SCUDO_STACK_DEPOT_H_ 145