xref: /llvm-project/compiler-rt/lib/scudo/standalone/combined.h (revision ed0fd13783a68af6033b2c489eb830af0726856c)
1e4eadf17SKostya Kortchinsky //===-- combined.h ----------------------------------------------*- C++ -*-===//
2e4eadf17SKostya Kortchinsky //
3e4eadf17SKostya Kortchinsky // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4e4eadf17SKostya Kortchinsky // See https://llvm.org/LICENSE.txt for license information.
5e4eadf17SKostya Kortchinsky // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6e4eadf17SKostya Kortchinsky //
7e4eadf17SKostya Kortchinsky //===----------------------------------------------------------------------===//
8e4eadf17SKostya Kortchinsky 
9e4eadf17SKostya Kortchinsky #ifndef SCUDO_COMBINED_H_
10e4eadf17SKostya Kortchinsky #define SCUDO_COMBINED_H_
11e4eadf17SKostya Kortchinsky 
122dc9ec47SChiaHungDuan #include "allocator_config_wrapper.h"
133da01663SFlorian Mayer #include "atomic_helpers.h"
14e4eadf17SKostya Kortchinsky #include "chunk.h"
15e4eadf17SKostya Kortchinsky #include "common.h"
16e4eadf17SKostya Kortchinsky #include "flags.h"
17e4eadf17SKostya Kortchinsky #include "flags_parser.h"
18e4eadf17SKostya Kortchinsky #include "local_cache.h"
19a5bdc4a4SFlorian Mayer #include "mem_map.h"
20c299d198SPeter Collingbourne #include "memtag.h"
210dbd804aSChristopher Ferris #include "mutex.h"
22719ab730SPeter Collingbourne #include "options.h"
23e4eadf17SKostya Kortchinsky #include "quarantine.h"
24e4eadf17SKostya Kortchinsky #include "report.h"
25e4eadf17SKostya Kortchinsky #include "secondary.h"
2621d50019SPeter Collingbourne #include "stack_depot.h"
27ed4618edSMitch Phillips #include "string_utils.h"
28e4eadf17SKostya Kortchinsky #include "tsd.h"
29e4eadf17SKostya Kortchinsky 
308140f6bcSPeter Collingbourne #include "scudo/interface.h"
318140f6bcSPeter Collingbourne 
32ed4618edSMitch Phillips #ifdef GWP_ASAN_HOOKS
33ed4618edSMitch Phillips #include "gwp_asan/guarded_pool_allocator.h"
34a6258684SMitch Phillips #include "gwp_asan/optional/backtrace.h"
35a6258684SMitch Phillips #include "gwp_asan/optional/segv_handler.h"
36ed4618edSMitch Phillips #endif // GWP_ASAN_HOOKS
37ed4618edSMitch Phillips 
389ef6faf4SKostya Kortchinsky extern "C" inline void EmptyCallback() {}
399ef6faf4SKostya Kortchinsky 
403616e851SPeter Collingbourne #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
4121d50019SPeter Collingbourne // This function is not part of the NDK so it does not appear in any public
423616e851SPeter Collingbourne // header files. We only declare/use it when targeting the platform.
4321d50019SPeter Collingbourne extern "C" size_t android_unsafe_frame_pointer_chase(scudo::uptr *buf,
4421d50019SPeter Collingbourne                                                      size_t num_entries);
4521d50019SPeter Collingbourne #endif
4621d50019SPeter Collingbourne 
47e4eadf17SKostya Kortchinsky namespace scudo {
48e4eadf17SKostya Kortchinsky 
492f08a08fSChia-hung Duan template <class Config, void (*PostInitCallback)(void) = EmptyCallback>
509ef6faf4SKostya Kortchinsky class Allocator {
51e4eadf17SKostya Kortchinsky public:
522dc9ec47SChiaHungDuan   using AllocatorConfig = BaseConfig<Config>;
532dc9ec47SChiaHungDuan   using PrimaryT =
542dc9ec47SChiaHungDuan       typename AllocatorConfig::template PrimaryT<PrimaryConfig<Config>>;
552dc9ec47SChiaHungDuan   using SecondaryT =
562dc9ec47SChiaHungDuan       typename AllocatorConfig::template SecondaryT<SecondaryConfig<Config>>;
57e4eadf17SKostya Kortchinsky   using CacheT = typename PrimaryT::CacheT;
582f08a08fSChia-hung Duan   typedef Allocator<Config, PostInitCallback> ThisT;
592dc9ec47SChiaHungDuan   typedef typename AllocatorConfig::template TSDRegistryT<ThisT> TSDRegistryT;
60e4eadf17SKostya Kortchinsky 
619ef6faf4SKostya Kortchinsky   void callPostInitCallback() {
628936608eSMitch Phillips     pthread_once(&PostInitNonce, PostInitCallback);
639ef6faf4SKostya Kortchinsky   }
649ef6faf4SKostya Kortchinsky 
65e4eadf17SKostya Kortchinsky   struct QuarantineCallback {
66e4eadf17SKostya Kortchinsky     explicit QuarantineCallback(ThisT &Instance, CacheT &LocalCache)
67e4eadf17SKostya Kortchinsky         : Allocator(Instance), Cache(LocalCache) {}
68e4eadf17SKostya Kortchinsky 
69e4eadf17SKostya Kortchinsky     // Chunk recycling function, returns a quarantined chunk to the backend,
70e4eadf17SKostya Kortchinsky     // first making sure it hasn't been tampered with.
71e4eadf17SKostya Kortchinsky     void recycle(void *Ptr) {
72e4eadf17SKostya Kortchinsky       Chunk::UnpackedHeader Header;
73e4eadf17SKostya Kortchinsky       Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
74e4eadf17SKostya Kortchinsky       if (UNLIKELY(Header.State != Chunk::State::Quarantined))
75e4eadf17SKostya Kortchinsky         reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
76e4eadf17SKostya Kortchinsky 
7754ddd076SChiaHungDuan       Header.State = Chunk::State::Available;
7854ddd076SChiaHungDuan       Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
79e4eadf17SKostya Kortchinsky 
802dc9ec47SChiaHungDuan       if (allocatorSupportsMemoryTagging<AllocatorConfig>())
813f71ce85SPeter Collingbourne         Ptr = untagPointer(Ptr);
8254ddd076SChiaHungDuan       void *BlockBegin = Allocator::getBlockBegin(Ptr, &Header);
8354ddd076SChiaHungDuan       Cache.deallocate(Header.ClassId, BlockBegin);
84e4eadf17SKostya Kortchinsky     }
85e4eadf17SKostya Kortchinsky 
86e4eadf17SKostya Kortchinsky     // We take a shortcut when allocating a quarantine batch by working with the
87e4eadf17SKostya Kortchinsky     // appropriate class ID instead of using Size. The compiler should optimize
88e4eadf17SKostya Kortchinsky     // the class ID computation and work with the associated cache directly.
89e4eadf17SKostya Kortchinsky     void *allocate(UNUSED uptr Size) {
90e4eadf17SKostya Kortchinsky       const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
91e4eadf17SKostya Kortchinsky           sizeof(QuarantineBatch) + Chunk::getHeaderSize());
92e4eadf17SKostya Kortchinsky       void *Ptr = Cache.allocate(QuarantineClassId);
93e4eadf17SKostya Kortchinsky       // Quarantine batch allocation failure is fatal.
94e4eadf17SKostya Kortchinsky       if (UNLIKELY(!Ptr))
95e4eadf17SKostya Kortchinsky         reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId));
96e4eadf17SKostya Kortchinsky 
97e4eadf17SKostya Kortchinsky       Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) +
98e4eadf17SKostya Kortchinsky                                      Chunk::getHeaderSize());
99e4eadf17SKostya Kortchinsky       Chunk::UnpackedHeader Header = {};
100e4eadf17SKostya Kortchinsky       Header.ClassId = QuarantineClassId & Chunk::ClassIdMask;
101e4eadf17SKostya Kortchinsky       Header.SizeOrUnusedBytes = sizeof(QuarantineBatch);
102e4eadf17SKostya Kortchinsky       Header.State = Chunk::State::Allocated;
103e4eadf17SKostya Kortchinsky       Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
104e4eadf17SKostya Kortchinsky 
105e5a28e12SPeter Collingbourne       // Reset tag to 0 as this chunk may have been previously used for a tagged
106e5a28e12SPeter Collingbourne       // user allocation.
1072dc9ec47SChiaHungDuan       if (UNLIKELY(useMemoryTagging<AllocatorConfig>(
1082dc9ec47SChiaHungDuan               Allocator.Primary.Options.load())))
109e5a28e12SPeter Collingbourne         storeTags(reinterpret_cast<uptr>(Ptr),
110e5a28e12SPeter Collingbourne                   reinterpret_cast<uptr>(Ptr) + sizeof(QuarantineBatch));
111e5a28e12SPeter Collingbourne 
112e4eadf17SKostya Kortchinsky       return Ptr;
113e4eadf17SKostya Kortchinsky     }
114e4eadf17SKostya Kortchinsky 
115e4eadf17SKostya Kortchinsky     void deallocate(void *Ptr) {
116e4eadf17SKostya Kortchinsky       const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
117e4eadf17SKostya Kortchinsky           sizeof(QuarantineBatch) + Chunk::getHeaderSize());
118e4eadf17SKostya Kortchinsky       Chunk::UnpackedHeader Header;
119e4eadf17SKostya Kortchinsky       Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
120e4eadf17SKostya Kortchinsky 
121e4eadf17SKostya Kortchinsky       if (UNLIKELY(Header.State != Chunk::State::Allocated))
122e4eadf17SKostya Kortchinsky         reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
123e4eadf17SKostya Kortchinsky       DCHECK_EQ(Header.ClassId, QuarantineClassId);
124e4eadf17SKostya Kortchinsky       DCHECK_EQ(Header.Offset, 0);
125e4eadf17SKostya Kortchinsky       DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
126e4eadf17SKostya Kortchinsky 
12754ddd076SChiaHungDuan       Header.State = Chunk::State::Available;
12854ddd076SChiaHungDuan       Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
129e4eadf17SKostya Kortchinsky       Cache.deallocate(QuarantineClassId,
130e4eadf17SKostya Kortchinsky                        reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
131e4eadf17SKostya Kortchinsky                                                 Chunk::getHeaderSize()));
132e4eadf17SKostya Kortchinsky     }
133e4eadf17SKostya Kortchinsky 
134e4eadf17SKostya Kortchinsky   private:
135e4eadf17SKostya Kortchinsky     ThisT &Allocator;
136e4eadf17SKostya Kortchinsky     CacheT &Cache;
137e4eadf17SKostya Kortchinsky   };
138e4eadf17SKostya Kortchinsky 
139e4eadf17SKostya Kortchinsky   typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
140e4eadf17SKostya Kortchinsky   typedef typename QuarantineT::CacheT QuarantineCacheT;
141e4eadf17SKostya Kortchinsky 
142a45877eeSKostya Kortchinsky   void init() {
1434634a480SChristopher Ferris     // Make sure that the page size is initialized if it's not a constant.
1444634a480SChristopher Ferris     CHECK_NE(getPageSizeCached(), 0U);
1454634a480SChristopher Ferris 
146e4eadf17SKostya Kortchinsky     performSanityChecks();
147e4eadf17SKostya Kortchinsky 
148e4eadf17SKostya Kortchinsky     // Check if hardware CRC32 is supported in the binary and by the platform,
149e4eadf17SKostya Kortchinsky     // if so, opt for the CRC32 hardware version of the checksum.
150e4eadf17SKostya Kortchinsky     if (&computeHardwareCRC32 && hasHardwareCRC32())
151e4eadf17SKostya Kortchinsky       HashAlgorithm = Checksum::HardwareCRC32;
152e4eadf17SKostya Kortchinsky 
153e4eadf17SKostya Kortchinsky     if (UNLIKELY(!getRandom(&Cookie, sizeof(Cookie))))
154e4eadf17SKostya Kortchinsky       Cookie = static_cast<u32>(getMonotonicTime() ^
155e4eadf17SKostya Kortchinsky                                 (reinterpret_cast<uptr>(this) >> 4));
156e4eadf17SKostya Kortchinsky 
157e4eadf17SKostya Kortchinsky     initFlags();
158e4eadf17SKostya Kortchinsky     reportUnrecognizedFlags();
159e4eadf17SKostya Kortchinsky 
160e4eadf17SKostya Kortchinsky     // Store some flags locally.
161719ab730SPeter Collingbourne     if (getFlags()->may_return_null)
162719ab730SPeter Collingbourne       Primary.Options.set(OptionBit::MayReturnNull);
163719ab730SPeter Collingbourne     if (getFlags()->zero_contents)
164719ab730SPeter Collingbourne       Primary.Options.setFillContentsMode(ZeroFill);
165719ab730SPeter Collingbourne     else if (getFlags()->pattern_fill_contents)
166719ab730SPeter Collingbourne       Primary.Options.setFillContentsMode(PatternOrZeroFill);
167719ab730SPeter Collingbourne     if (getFlags()->dealloc_type_mismatch)
168719ab730SPeter Collingbourne       Primary.Options.set(OptionBit::DeallocTypeMismatch);
169719ab730SPeter Collingbourne     if (getFlags()->delete_size_mismatch)
170719ab730SPeter Collingbourne       Primary.Options.set(OptionBit::DeleteSizeMismatch);
1712dc9ec47SChiaHungDuan     if (allocatorSupportsMemoryTagging<AllocatorConfig>() &&
172faac1c02SPeter Collingbourne         systemSupportsMemoryTagging())
173faac1c02SPeter Collingbourne       Primary.Options.set(OptionBit::UseMemoryTagging);
174719ab730SPeter Collingbourne 
175719ab730SPeter Collingbourne     QuarantineMaxChunkSize =
176419f1a41SKostya Kortchinsky         static_cast<u32>(getFlags()->quarantine_max_chunk_size);
177e4eadf17SKostya Kortchinsky 
178a45877eeSKostya Kortchinsky     Stats.init();
17911f4f458SChiaHungDuan     // TODO(chiahungduan): Given that we support setting the default value in
18011f4f458SChiaHungDuan     // the PrimaryConfig and CacheConfig, consider to deprecate the use of
18111f4f458SChiaHungDuan     // `release_to_os_interval_ms` flag.
182993e3c92SKostya Kortchinsky     const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms;
183a45877eeSKostya Kortchinsky     Primary.init(ReleaseToOsIntervalMs);
184a45877eeSKostya Kortchinsky     Secondary.init(&Stats, ReleaseToOsIntervalMs);
185419f1a41SKostya Kortchinsky     Quarantine.init(
186419f1a41SKostya Kortchinsky         static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
187419f1a41SKostya Kortchinsky         static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
188596d0614SEvgenii Stepanov   }
189ed4618edSMitch Phillips 
1900dbd804aSChristopher Ferris   void enableRingBuffer() NO_THREAD_SAFETY_ANALYSIS {
1916dd6d487SFlorian Mayer     AllocationRingBuffer *RB = getRingBuffer();
1926dd6d487SFlorian Mayer     if (RB)
1936dd6d487SFlorian Mayer       RB->Depot->enable();
1940dbd804aSChristopher Ferris     RingBufferInitLock.unlock();
1956dd6d487SFlorian Mayer   }
1966dd6d487SFlorian Mayer 
1970dbd804aSChristopher Ferris   void disableRingBuffer() NO_THREAD_SAFETY_ANALYSIS {
1980dbd804aSChristopher Ferris     RingBufferInitLock.lock();
1996dd6d487SFlorian Mayer     AllocationRingBuffer *RB = getRingBuffer();
2006dd6d487SFlorian Mayer     if (RB)
2016dd6d487SFlorian Mayer       RB->Depot->disable();
2026dd6d487SFlorian Mayer   }
2036dd6d487SFlorian Mayer 
204596d0614SEvgenii Stepanov   // Initialize the embedded GWP-ASan instance. Requires the main allocator to
205596d0614SEvgenii Stepanov   // be functional, best called from PostInitCallback.
206596d0614SEvgenii Stepanov   void initGwpAsan() {
207ed4618edSMitch Phillips #ifdef GWP_ASAN_HOOKS
208ba379fe5SKostya Kortchinsky     gwp_asan::options::Options Opt;
209ba379fe5SKostya Kortchinsky     Opt.Enabled = getFlags()->GWP_ASAN_Enabled;
210ba379fe5SKostya Kortchinsky     Opt.MaxSimultaneousAllocations =
211ba379fe5SKostya Kortchinsky         getFlags()->GWP_ASAN_MaxSimultaneousAllocations;
212ba379fe5SKostya Kortchinsky     Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate;
213ba379fe5SKostya Kortchinsky     Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers;
21435b5499dSMitch Phillips     Opt.Recoverable = getFlags()->GWP_ASAN_Recoverable;
215596d0614SEvgenii Stepanov     // Embedded GWP-ASan is locked through the Scudo atfork handler (via
216596d0614SEvgenii Stepanov     // Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork
217596d0614SEvgenii Stepanov     // handler.
218596d0614SEvgenii Stepanov     Opt.InstallForkHandlers = false;
219a8520f69SMitch Phillips     Opt.Backtrace = gwp_asan::backtrace::getBacktraceFunction();
220ed4618edSMitch Phillips     GuardedAlloc.init(Opt);
221a6258684SMitch Phillips 
222a6258684SMitch Phillips     if (Opt.InstallSignalHandlers)
223a8520f69SMitch Phillips       gwp_asan::segv_handler::installSignalHandlers(
224a8520f69SMitch Phillips           &GuardedAlloc, Printf,
225a8520f69SMitch Phillips           gwp_asan::backtrace::getPrintBacktraceFunction(),
22635b5499dSMitch Phillips           gwp_asan::backtrace::getSegvBacktraceFunction(),
22735b5499dSMitch Phillips           Opt.Recoverable);
228e78b64dfSMitch Phillips 
229e78b64dfSMitch Phillips     GuardedAllocSlotSize =
230e78b64dfSMitch Phillips         GuardedAlloc.getAllocatorState()->maximumAllocationSize();
231e78b64dfSMitch Phillips     Stats.add(StatFree, static_cast<uptr>(Opt.MaxSimultaneousAllocations) *
232e78b64dfSMitch Phillips                             GuardedAllocSlotSize);
233ed4618edSMitch Phillips #endif // GWP_ASAN_HOOKS
234e4eadf17SKostya Kortchinsky   }
235e4eadf17SKostya Kortchinsky 
23632adf108SMitch Phillips #ifdef GWP_ASAN_HOOKS
23732adf108SMitch Phillips   const gwp_asan::AllocationMetadata *getGwpAsanAllocationMetadata() {
23832adf108SMitch Phillips     return GuardedAlloc.getMetadataRegion();
23932adf108SMitch Phillips   }
24032adf108SMitch Phillips 
24132adf108SMitch Phillips   const gwp_asan::AllocatorState *getGwpAsanAllocatorState() {
24232adf108SMitch Phillips     return GuardedAlloc.getAllocatorState();
24332adf108SMitch Phillips   }
24432adf108SMitch Phillips #endif // GWP_ASAN_HOOKS
24532adf108SMitch Phillips 
2469b216406SPeter Collingbourne   ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
2479b216406SPeter Collingbourne     TSDRegistry.initThreadMaybe(this, MinimalInit);
2489b216406SPeter Collingbourne   }
2499b216406SPeter Collingbourne 
250e4eadf17SKostya Kortchinsky   void unmapTestOnly() {
2510f1a92baSFabio D'Urso     unmapRingBuffer();
252a45877eeSKostya Kortchinsky     TSDRegistry.unmapTestOnly(this);
253e4eadf17SKostya Kortchinsky     Primary.unmapTestOnly();
2541fb6a030SMitch Phillips     Secondary.unmapTestOnly();
255596d0614SEvgenii Stepanov #ifdef GWP_ASAN_HOOKS
256a6258684SMitch Phillips     if (getFlags()->GWP_ASAN_InstallSignalHandlers)
257a8520f69SMitch Phillips       gwp_asan::segv_handler::uninstallSignalHandlers();
258596d0614SEvgenii Stepanov     GuardedAlloc.uninitTestOnly();
259596d0614SEvgenii Stepanov #endif // GWP_ASAN_HOOKS
260e4eadf17SKostya Kortchinsky   }
261e4eadf17SKostya Kortchinsky 
262e4eadf17SKostya Kortchinsky   TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
2636a057e7bSChia-hung Duan   QuarantineT *getQuarantine() { return &Quarantine; }
264e4eadf17SKostya Kortchinsky 
26546240c38SKostya Kortchinsky   // The Cache must be provided zero-initialized.
266a45877eeSKostya Kortchinsky   void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
267e4eadf17SKostya Kortchinsky 
268e4eadf17SKostya Kortchinsky   // Release the resources used by a TSD, which involves:
269e4eadf17SKostya Kortchinsky   // - draining the local quarantine cache to the global quarantine;
270e4eadf17SKostya Kortchinsky   // - releasing the cached pointers back to the Primary;
271e4eadf17SKostya Kortchinsky   // - unlinking the local stats from the global ones (destroying the cache does
272e4eadf17SKostya Kortchinsky   //   the last two items).
273e4eadf17SKostya Kortchinsky   void commitBack(TSD<ThisT> *TSD) {
274ea2036e1SChiaHungDuan     TSD->assertLocked(/*BypassCheck=*/true);
275ae1bd3adSChia-hung Duan     Quarantine.drain(&TSD->getQuarantineCache(),
276ae1bd3adSChia-hung Duan                      QuarantineCallback(*this, TSD->getCache()));
277ae1bd3adSChia-hung Duan     TSD->getCache().destroy(&Stats);
278e4eadf17SKostya Kortchinsky   }
279e4eadf17SKostya Kortchinsky 
2806a057e7bSChia-hung Duan   void drainCache(TSD<ThisT> *TSD) {
281ea2036e1SChiaHungDuan     TSD->assertLocked(/*BypassCheck=*/true);
2826a057e7bSChia-hung Duan     Quarantine.drainAndRecycle(&TSD->getQuarantineCache(),
2836a057e7bSChia-hung Duan                                QuarantineCallback(*this, TSD->getCache()));
2846a057e7bSChia-hung Duan     TSD->getCache().drain();
2856a057e7bSChia-hung Duan   }
2866a057e7bSChia-hung Duan   void drainCaches() { TSDRegistry.drainCaches(this); }
2876a057e7bSChia-hung Duan 
2883f71ce85SPeter Collingbourne   ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) {
2892dc9ec47SChiaHungDuan     if (!allocatorSupportsMemoryTagging<AllocatorConfig>())
290c299d198SPeter Collingbourne       return Ptr;
2913f71ce85SPeter Collingbourne     auto UntaggedPtr = untagPointer(Ptr);
2923f71ce85SPeter Collingbourne     if (UntaggedPtr != Ptr)
2933f71ce85SPeter Collingbourne       return UntaggedPtr;
2943f71ce85SPeter Collingbourne     // Secondary, or pointer allocated while memory tagging is unsupported or
2953f71ce85SPeter Collingbourne     // disabled. The tag mismatch is okay in the latter case because tags will
2963f71ce85SPeter Collingbourne     // not be checked.
2973f71ce85SPeter Collingbourne     return addHeaderTag(Ptr);
2983f71ce85SPeter Collingbourne   }
2993f71ce85SPeter Collingbourne 
3003f71ce85SPeter Collingbourne   ALWAYS_INLINE uptr addHeaderTag(uptr Ptr) {
3012dc9ec47SChiaHungDuan     if (!allocatorSupportsMemoryTagging<AllocatorConfig>())
3023f71ce85SPeter Collingbourne       return Ptr;
3033f71ce85SPeter Collingbourne     return addFixedTag(Ptr, 2);
3043f71ce85SPeter Collingbourne   }
3053f71ce85SPeter Collingbourne 
3063f71ce85SPeter Collingbourne   ALWAYS_INLINE void *addHeaderTag(void *Ptr) {
3073f71ce85SPeter Collingbourne     return reinterpret_cast<void *>(addHeaderTag(reinterpret_cast<uptr>(Ptr)));
308c299d198SPeter Collingbourne   }
309c299d198SPeter Collingbourne 
3103da01663SFlorian Mayer   NOINLINE u32 collectStackTrace(UNUSED StackDepot *Depot) {
3113616e851SPeter Collingbourne #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
31221d50019SPeter Collingbourne     // Discard collectStackTrace() frame and allocator function frame.
31321d50019SPeter Collingbourne     constexpr uptr DiscardFrames = 2;
31421d50019SPeter Collingbourne     uptr Stack[MaxTraceSize + DiscardFrames];
31521d50019SPeter Collingbourne     uptr Size =
31621d50019SPeter Collingbourne         android_unsafe_frame_pointer_chase(Stack, MaxTraceSize + DiscardFrames);
31721d50019SPeter Collingbourne     Size = Min<uptr>(Size, MaxTraceSize + DiscardFrames);
3183da01663SFlorian Mayer     return Depot->insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
31921d50019SPeter Collingbourne #else
32021d50019SPeter Collingbourne     return 0;
32121d50019SPeter Collingbourne #endif
32221d50019SPeter Collingbourne   }
32321d50019SPeter Collingbourne 
324867f2d9eSChristopher Ferris   uptr computeOddEvenMaskForPointerMaybe(const Options &Options, uptr Ptr,
3254e88e587SPeter Collingbourne                                          uptr ClassId) {
326719ab730SPeter Collingbourne     if (!Options.get(OptionBit::UseOddEvenTags))
327b83417aaSPeter Collingbourne       return 0;
328b83417aaSPeter Collingbourne 
329b83417aaSPeter Collingbourne     // If a chunk's tag is odd, we want the tags of the surrounding blocks to be
330b83417aaSPeter Collingbourne     // even, and vice versa. Blocks are laid out Size bytes apart, and adding
331b83417aaSPeter Collingbourne     // Size to Ptr will flip the least significant set bit of Size in Ptr, so
332b83417aaSPeter Collingbourne     // that bit will have the pattern 010101... for consecutive blocks, which we
333b83417aaSPeter Collingbourne     // can use to determine which tag mask to use.
3344e88e587SPeter Collingbourne     return 0x5555U << ((Ptr >> SizeClassMap::getSizeLSBByClassId(ClassId)) & 1);
335b83417aaSPeter Collingbourne   }
336b83417aaSPeter Collingbourne 
337e4eadf17SKostya Kortchinsky   NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
338e4eadf17SKostya Kortchinsky                           uptr Alignment = MinAlignment,
3396a4c3959SChia-hung Duan                           bool ZeroContents = false) NO_THREAD_SAFETY_ANALYSIS {
340e4eadf17SKostya Kortchinsky     initThreadMaybe();
341ed4618edSMitch Phillips 
3423f70987bSKostya Kortchinsky     const Options Options = Primary.Options.load();
343e4eadf17SKostya Kortchinsky     if (UNLIKELY(Alignment > MaxAlignment)) {
344719ab730SPeter Collingbourne       if (Options.get(OptionBit::MayReturnNull))
345e4eadf17SKostya Kortchinsky         return nullptr;
346e4eadf17SKostya Kortchinsky       reportAlignmentTooBig(Alignment, MaxAlignment);
347e4eadf17SKostya Kortchinsky     }
3483f70987bSKostya Kortchinsky     if (Alignment < MinAlignment)
349e4eadf17SKostya Kortchinsky       Alignment = MinAlignment;
350e4eadf17SKostya Kortchinsky 
351e78b64dfSMitch Phillips #ifdef GWP_ASAN_HOOKS
352e78b64dfSMitch Phillips     if (UNLIKELY(GuardedAlloc.shouldSample())) {
353e78b64dfSMitch Phillips       if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
354e78b64dfSMitch Phillips         Stats.lock();
355e78b64dfSMitch Phillips         Stats.add(StatAllocated, GuardedAllocSlotSize);
356e78b64dfSMitch Phillips         Stats.sub(StatFree, GuardedAllocSlotSize);
357e78b64dfSMitch Phillips         Stats.unlock();
358e78b64dfSMitch Phillips         return Ptr;
359e78b64dfSMitch Phillips       }
360e78b64dfSMitch Phillips     }
361e78b64dfSMitch Phillips #endif // GWP_ASAN_HOOKS
362e78b64dfSMitch Phillips 
363e78b64dfSMitch Phillips     const FillContentsMode FillContents = ZeroContents ? ZeroFill
364e78b64dfSMitch Phillips                                           : TSDRegistry.getDisableMemInit()
365e78b64dfSMitch Phillips                                               ? NoFill
366e78b64dfSMitch Phillips                                               : Options.getFillContentsMode();
367e78b64dfSMitch Phillips 
368e4eadf17SKostya Kortchinsky     // If the requested size happens to be 0 (more common than you might think),
369419f1a41SKostya Kortchinsky     // allocate MinAlignment bytes on top of the header. Then add the extra
370419f1a41SKostya Kortchinsky     // bytes required to fulfill the alignment requirements: we allocate enough
371419f1a41SKostya Kortchinsky     // to be sure that there will be an address in the block that will satisfy
372419f1a41SKostya Kortchinsky     // the alignment.
373e4eadf17SKostya Kortchinsky     const uptr NeededSize =
374a9269773SChia-hung Duan         roundUp(Size, MinAlignment) +
375419f1a41SKostya Kortchinsky         ((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());
376e4eadf17SKostya Kortchinsky 
377e4eadf17SKostya Kortchinsky     // Takes care of extravagantly large sizes as well as integer overflows.
3786fd6cfdfSPeter Collingbourne     static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
37946240c38SKostya Kortchinsky     if (UNLIKELY(Size >= MaxAllowedMallocSize)) {
380719ab730SPeter Collingbourne       if (Options.get(OptionBit::MayReturnNull))
381e4eadf17SKostya Kortchinsky         return nullptr;
382e4eadf17SKostya Kortchinsky       reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
383e4eadf17SKostya Kortchinsky     }
38446240c38SKostya Kortchinsky     DCHECK_LE(Size, NeededSize);
385e4eadf17SKostya Kortchinsky 
386f8352502SKostya Kortchinsky     void *Block = nullptr;
387f8352502SKostya Kortchinsky     uptr ClassId = 0;
38800d9907aSKostya Kortchinsky     uptr SecondaryBlockEnd = 0;
389419f1a41SKostya Kortchinsky     if (LIKELY(PrimaryT::canAllocate(NeededSize))) {
390e4eadf17SKostya Kortchinsky       ClassId = SizeClassMap::getClassIdBySize(NeededSize);
391419f1a41SKostya Kortchinsky       DCHECK_NE(ClassId, 0U);
3928ce036d5SChiaHungDuan       typename TSDRegistryT::ScopedTSD TSD(TSDRegistry);
393ae1bd3adSChia-hung Duan       Block = TSD->getCache().allocate(ClassId);
394b53ff43dSChiaHungDuan       // If the allocation failed, retry in each successively larger class until
395b53ff43dSChiaHungDuan       // it fits. If it fails to fit in the largest class, fallback to the
396b53ff43dSChiaHungDuan       // Secondary.
39721695710SKostya Kortchinsky       if (UNLIKELY(!Block)) {
3983f70987bSKostya Kortchinsky         while (ClassId < SizeClassMap::LargestClassId && !Block)
399ae1bd3adSChia-hung Duan           Block = TSD->getCache().allocate(++ClassId);
4003f70987bSKostya Kortchinsky         if (!Block)
401f8352502SKostya Kortchinsky           ClassId = 0;
40221695710SKostya Kortchinsky       }
403f8352502SKostya Kortchinsky     }
4046a4c3959SChia-hung Duan     if (UNLIKELY(ClassId == 0)) {
4053f71ce85SPeter Collingbourne       Block = Secondary.allocate(Options, Size, Alignment, &SecondaryBlockEnd,
40645b7d44eSEvgenii Stepanov                                  FillContents);
4076a4c3959SChia-hung Duan     }
408e4eadf17SKostya Kortchinsky 
409e4eadf17SKostya Kortchinsky     if (UNLIKELY(!Block)) {
410719ab730SPeter Collingbourne       if (Options.get(OptionBit::MayReturnNull))
411e4eadf17SKostya Kortchinsky         return nullptr;
412b53ff43dSChiaHungDuan       printStats();
413e4eadf17SKostya Kortchinsky       reportOutOfMemory(NeededSize);
414e4eadf17SKostya Kortchinsky     }
415e4eadf17SKostya Kortchinsky 
416772b1b0cSChiaHungDuan     const uptr UserPtr = roundUp(
417772b1b0cSChiaHungDuan         reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize(), Alignment);
418772b1b0cSChiaHungDuan     const uptr SizeOrUnusedBytes =
419772b1b0cSChiaHungDuan         ClassId ? Size : SecondaryBlockEnd - (UserPtr + Size);
4209fbfdd2bSPeter Collingbourne 
421772b1b0cSChiaHungDuan     if (LIKELY(!useMemoryTagging<AllocatorConfig>(Options))) {
422772b1b0cSChiaHungDuan       return initChunk(ClassId, Origin, Block, UserPtr, SizeOrUnusedBytes,
423772b1b0cSChiaHungDuan                        FillContents);
4243f71ce85SPeter Collingbourne     }
425c299d198SPeter Collingbourne 
426772b1b0cSChiaHungDuan     return initChunkWithMemoryTagging(ClassId, Origin, Block, UserPtr, Size,
427772b1b0cSChiaHungDuan                                       SizeOrUnusedBytes, FillContents);
428e4eadf17SKostya Kortchinsky   }
429e4eadf17SKostya Kortchinsky 
430e4eadf17SKostya Kortchinsky   NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
431e4eadf17SKostya Kortchinsky                            UNUSED uptr Alignment = MinAlignment) {
43254c30953SEvgenii Stepanov     if (UNLIKELY(!Ptr))
43354c30953SEvgenii Stepanov       return;
43454c30953SEvgenii Stepanov 
435e4eadf17SKostya Kortchinsky     // For a deallocation, we only ensure minimal initialization, meaning thread
436e4eadf17SKostya Kortchinsky     // local data will be left uninitialized for now (when using ELF TLS). The
437e4eadf17SKostya Kortchinsky     // fallback cache will be used instead. This is a workaround for a situation
438e4eadf17SKostya Kortchinsky     // where the only heap operation performed in a thread would be a free past
439e4eadf17SKostya Kortchinsky     // the TLS destructors, ending up in initialized thread specific data never
440e4eadf17SKostya Kortchinsky     // being destroyed properly. Any other heap operation will do a full init.
441e4eadf17SKostya Kortchinsky     initThreadMaybe(/*MinimalInit=*/true);
442e4eadf17SKostya Kortchinsky 
443e78b64dfSMitch Phillips #ifdef GWP_ASAN_HOOKS
444e78b64dfSMitch Phillips     if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
445e78b64dfSMitch Phillips       GuardedAlloc.deallocate(Ptr);
446e78b64dfSMitch Phillips       Stats.lock();
447e78b64dfSMitch Phillips       Stats.add(StatFree, GuardedAllocSlotSize);
448e78b64dfSMitch Phillips       Stats.sub(StatAllocated, GuardedAllocSlotSize);
449e78b64dfSMitch Phillips       Stats.unlock();
450e78b64dfSMitch Phillips       return;
451e78b64dfSMitch Phillips     }
452e78b64dfSMitch Phillips #endif // GWP_ASAN_HOOKS
453e78b64dfSMitch Phillips 
454e4eadf17SKostya Kortchinsky     if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
455e4eadf17SKostya Kortchinsky       reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
456e4eadf17SKostya Kortchinsky 
457e4fa0b30SPeter Collingbourne     void *TaggedPtr = Ptr;
4583f71ce85SPeter Collingbourne     Ptr = getHeaderTaggedPointer(Ptr);
459c299d198SPeter Collingbourne 
460e4eadf17SKostya Kortchinsky     Chunk::UnpackedHeader Header;
461e4eadf17SKostya Kortchinsky     Chunk::loadHeader(Cookie, Ptr, &Header);
462e4eadf17SKostya Kortchinsky 
463e4eadf17SKostya Kortchinsky     if (UNLIKELY(Header.State != Chunk::State::Allocated))
464e4eadf17SKostya Kortchinsky       reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
4653f70987bSKostya Kortchinsky 
4663f70987bSKostya Kortchinsky     const Options Options = Primary.Options.load();
467719ab730SPeter Collingbourne     if (Options.get(OptionBit::DeallocTypeMismatch)) {
4683f70987bSKostya Kortchinsky       if (UNLIKELY(Header.OriginOrWasZeroed != Origin)) {
469e4eadf17SKostya Kortchinsky         // With the exception of memalign'd chunks, that can be still be free'd.
4703f70987bSKostya Kortchinsky         if (Header.OriginOrWasZeroed != Chunk::Origin::Memalign ||
4713f70987bSKostya Kortchinsky             Origin != Chunk::Origin::Malloc)
472e4eadf17SKostya Kortchinsky           reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
4737bd75b63SPeter Collingbourne                                     Header.OriginOrWasZeroed, Origin);
474e4eadf17SKostya Kortchinsky       }
475e4eadf17SKostya Kortchinsky     }
476e4eadf17SKostya Kortchinsky 
477e4eadf17SKostya Kortchinsky     const uptr Size = getSize(Ptr, &Header);
478719ab730SPeter Collingbourne     if (DeleteSize && Options.get(OptionBit::DeleteSizeMismatch)) {
479e4eadf17SKostya Kortchinsky       if (UNLIKELY(DeleteSize != Size))
480e4eadf17SKostya Kortchinsky         reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
481e4eadf17SKostya Kortchinsky     }
482e4eadf17SKostya Kortchinsky 
483e4fa0b30SPeter Collingbourne     quarantineOrDeallocateChunk(Options, TaggedPtr, &Header, Size);
484e4eadf17SKostya Kortchinsky   }
485e4eadf17SKostya Kortchinsky 
486e4eadf17SKostya Kortchinsky   void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
487e4eadf17SKostya Kortchinsky     initThreadMaybe();
488e4eadf17SKostya Kortchinsky 
4893f70987bSKostya Kortchinsky     const Options Options = Primary.Options.load();
490c753a306SKostya Kortchinsky     if (UNLIKELY(NewSize >= MaxAllowedMallocSize)) {
491719ab730SPeter Collingbourne       if (Options.get(OptionBit::MayReturnNull))
492c753a306SKostya Kortchinsky         return nullptr;
493c753a306SKostya Kortchinsky       reportAllocationSizeTooBig(NewSize, 0, MaxAllowedMallocSize);
494c753a306SKostya Kortchinsky     }
495c753a306SKostya Kortchinsky 
496e4eadf17SKostya Kortchinsky     // The following cases are handled by the C wrappers.
497e4eadf17SKostya Kortchinsky     DCHECK_NE(OldPtr, nullptr);
498e4eadf17SKostya Kortchinsky     DCHECK_NE(NewSize, 0);
499e4eadf17SKostya Kortchinsky 
500ed4618edSMitch Phillips #ifdef GWP_ASAN_HOOKS
501ed4618edSMitch Phillips     if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) {
502ed4618edSMitch Phillips       uptr OldSize = GuardedAlloc.getSize(OldPtr);
503ed4618edSMitch Phillips       void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
504ed4618edSMitch Phillips       if (NewPtr)
505ed4618edSMitch Phillips         memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
506ed4618edSMitch Phillips       GuardedAlloc.deallocate(OldPtr);
507e78b64dfSMitch Phillips       Stats.lock();
508e78b64dfSMitch Phillips       Stats.add(StatFree, GuardedAllocSlotSize);
509e78b64dfSMitch Phillips       Stats.sub(StatAllocated, GuardedAllocSlotSize);
510e78b64dfSMitch Phillips       Stats.unlock();
511ed4618edSMitch Phillips       return NewPtr;
512ed4618edSMitch Phillips     }
513ed4618edSMitch Phillips #endif // GWP_ASAN_HOOKS
514ed4618edSMitch Phillips 
515b1fd009aSVitaly Buka     void *OldTaggedPtr = OldPtr;
516b1fd009aSVitaly Buka     OldPtr = getHeaderTaggedPointer(OldPtr);
517b1fd009aSVitaly Buka 
518e4eadf17SKostya Kortchinsky     if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
519e4eadf17SKostya Kortchinsky       reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
520e4eadf17SKostya Kortchinsky 
52154ddd076SChiaHungDuan     Chunk::UnpackedHeader Header;
52254ddd076SChiaHungDuan     Chunk::loadHeader(Cookie, OldPtr, &Header);
523e4eadf17SKostya Kortchinsky 
52454ddd076SChiaHungDuan     if (UNLIKELY(Header.State != Chunk::State::Allocated))
525e4eadf17SKostya Kortchinsky       reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
526e4eadf17SKostya Kortchinsky 
527e4eadf17SKostya Kortchinsky     // Pointer has to be allocated with a malloc-type function. Some
528e4eadf17SKostya Kortchinsky     // applications think that it is OK to realloc a memalign'ed pointer, which
529e4eadf17SKostya Kortchinsky     // will trigger this check. It really isn't.
530719ab730SPeter Collingbourne     if (Options.get(OptionBit::DeallocTypeMismatch)) {
53154ddd076SChiaHungDuan       if (UNLIKELY(Header.OriginOrWasZeroed != Chunk::Origin::Malloc))
532e4eadf17SKostya Kortchinsky         reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
53354ddd076SChiaHungDuan                                   Header.OriginOrWasZeroed,
5347bd75b63SPeter Collingbourne                                   Chunk::Origin::Malloc);
535e4eadf17SKostya Kortchinsky     }
536e4eadf17SKostya Kortchinsky 
53754ddd076SChiaHungDuan     void *BlockBegin = getBlockBegin(OldTaggedPtr, &Header);
538161cca26SKostya Kortchinsky     uptr BlockEnd;
539161cca26SKostya Kortchinsky     uptr OldSize;
54054ddd076SChiaHungDuan     const uptr ClassId = Header.ClassId;
541161cca26SKostya Kortchinsky     if (LIKELY(ClassId)) {
542161cca26SKostya Kortchinsky       BlockEnd = reinterpret_cast<uptr>(BlockBegin) +
543161cca26SKostya Kortchinsky                  SizeClassMap::getSizeByClassId(ClassId);
54454ddd076SChiaHungDuan       OldSize = Header.SizeOrUnusedBytes;
545161cca26SKostya Kortchinsky     } else {
546161cca26SKostya Kortchinsky       BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
5473f71ce85SPeter Collingbourne       OldSize = BlockEnd - (reinterpret_cast<uptr>(OldTaggedPtr) +
54854ddd076SChiaHungDuan                             Header.SizeOrUnusedBytes);
549161cca26SKostya Kortchinsky     }
550161cca26SKostya Kortchinsky     // If the new chunk still fits in the previously allocated block (with a
551161cca26SKostya Kortchinsky     // reasonable delta), we just keep the old block, and update the chunk
552161cca26SKostya Kortchinsky     // header to reflect the size change.
5533f71ce85SPeter Collingbourne     if (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize <= BlockEnd) {
554c753a306SKostya Kortchinsky       if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) {
5557fc975aaSFabio D'Urso         // If we have reduced the size, set the extra bytes to the fill value
5567fc975aaSFabio D'Urso         // so that we are ready to grow it again in the future.
5577fc975aaSFabio D'Urso         if (NewSize < OldSize) {
5587fc975aaSFabio D'Urso           const FillContentsMode FillContents =
5597fc975aaSFabio D'Urso               TSDRegistry.getDisableMemInit() ? NoFill
5607fc975aaSFabio D'Urso                                               : Options.getFillContentsMode();
5617fc975aaSFabio D'Urso           if (FillContents != NoFill) {
5627fc975aaSFabio D'Urso             memset(reinterpret_cast<char *>(OldTaggedPtr) + NewSize,
5637fc975aaSFabio D'Urso                    FillContents == ZeroFill ? 0 : PatternFillByte,
5647fc975aaSFabio D'Urso                    OldSize - NewSize);
5657fc975aaSFabio D'Urso           }
5667fc975aaSFabio D'Urso         }
5677fc975aaSFabio D'Urso 
56854ddd076SChiaHungDuan         Header.SizeOrUnusedBytes =
569161cca26SKostya Kortchinsky             (ClassId ? NewSize
5703f71ce85SPeter Collingbourne                      : BlockEnd -
5713f71ce85SPeter Collingbourne                            (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize)) &
572e4eadf17SKostya Kortchinsky             Chunk::SizeOrUnusedBytesMask;
57354ddd076SChiaHungDuan         Chunk::storeHeader(Cookie, OldPtr, &Header);
5742dc9ec47SChiaHungDuan         if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Options))) {
5751f55fa0bSPeter Collingbourne           if (ClassId) {
576c299d198SPeter Collingbourne             resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
577c299d198SPeter Collingbourne                               reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
578fe309636SVitaly Buka                               NewSize, untagPointer(BlockEnd));
5791f55fa0bSPeter Collingbourne             storePrimaryAllocationStackMaybe(Options, OldPtr);
5801f55fa0bSPeter Collingbourne           } else {
5811f55fa0bSPeter Collingbourne             storeSecondaryAllocationStackMaybe(Options, OldPtr, NewSize);
5821f55fa0bSPeter Collingbourne           }
58321d50019SPeter Collingbourne         }
584c299d198SPeter Collingbourne         return OldTaggedPtr;
585e4eadf17SKostya Kortchinsky       }
586e4eadf17SKostya Kortchinsky     }
587e4eadf17SKostya Kortchinsky 
588e4eadf17SKostya Kortchinsky     // Otherwise we allocate a new one, and deallocate the old one. Some
589e4eadf17SKostya Kortchinsky     // allocators will allocate an even larger chunk (by a fixed factor) to
590e4eadf17SKostya Kortchinsky     // allow for potential further in-place realloc. The gains of such a trick
591e4eadf17SKostya Kortchinsky     // are currently unclear.
592e4eadf17SKostya Kortchinsky     void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
5933f70987bSKostya Kortchinsky     if (LIKELY(NewPtr)) {
594c299d198SPeter Collingbourne       memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize));
59554ddd076SChiaHungDuan       quarantineOrDeallocateChunk(Options, OldTaggedPtr, &Header, OldSize);
596e4eadf17SKostya Kortchinsky     }
597e4eadf17SKostya Kortchinsky     return NewPtr;
598e4eadf17SKostya Kortchinsky   }
599e4eadf17SKostya Kortchinsky 
60077e906acSKostya Kortchinsky   // TODO(kostyak): disable() is currently best-effort. There are some small
60177e906acSKostya Kortchinsky   //                windows of time when an allocation could still succeed after
60277e906acSKostya Kortchinsky   //                this function finishes. We will revisit that later.
6036a4c3959SChia-hung Duan   void disable() NO_THREAD_SAFETY_ANALYSIS {
604e4eadf17SKostya Kortchinsky     initThreadMaybe();
605596d0614SEvgenii Stepanov #ifdef GWP_ASAN_HOOKS
606596d0614SEvgenii Stepanov     GuardedAlloc.disable();
607596d0614SEvgenii Stepanov #endif
60877e906acSKostya Kortchinsky     TSDRegistry.disable();
6099ef6faf4SKostya Kortchinsky     Stats.disable();
6109ef6faf4SKostya Kortchinsky     Quarantine.disable();
6119ef6faf4SKostya Kortchinsky     Primary.disable();
612e4eadf17SKostya Kortchinsky     Secondary.disable();
6136dd6d487SFlorian Mayer     disableRingBuffer();
614e4eadf17SKostya Kortchinsky   }
615e4eadf17SKostya Kortchinsky 
6166a4c3959SChia-hung Duan   void enable() NO_THREAD_SAFETY_ANALYSIS {
617e4eadf17SKostya Kortchinsky     initThreadMaybe();
6186dd6d487SFlorian Mayer     enableRingBuffer();
619e4eadf17SKostya Kortchinsky     Secondary.enable();
6209ef6faf4SKostya Kortchinsky     Primary.enable();
6219ef6faf4SKostya Kortchinsky     Quarantine.enable();
6229ef6faf4SKostya Kortchinsky     Stats.enable();
62377e906acSKostya Kortchinsky     TSDRegistry.enable();
624596d0614SEvgenii Stepanov #ifdef GWP_ASAN_HOOKS
625596d0614SEvgenii Stepanov     GuardedAlloc.enable();
626596d0614SEvgenii Stepanov #endif
627e4eadf17SKostya Kortchinsky   }
628e4eadf17SKostya Kortchinsky 
629f7b1489fSKostya Kortchinsky   // The function returns the amount of bytes required to store the statistics,
630f7b1489fSKostya Kortchinsky   // which might be larger than the amount of bytes provided. Note that the
631f7b1489fSKostya Kortchinsky   // statistics buffer is not necessarily constant between calls to this
632f7b1489fSKostya Kortchinsky   // function. This can be called with a null buffer or zero size for buffer
633f7b1489fSKostya Kortchinsky   // sizing purposes.
634f7b1489fSKostya Kortchinsky   uptr getStats(char *Buffer, uptr Size) {
635868317b3SKostya Kortchinsky     ScopedString Str;
636f7b1489fSKostya Kortchinsky     const uptr Length = getStats(&Str) + 1;
637f7b1489fSKostya Kortchinsky     if (Length < Size)
638f7b1489fSKostya Kortchinsky       Size = Length;
639f7b1489fSKostya Kortchinsky     if (Buffer && Size) {
640f7b1489fSKostya Kortchinsky       memcpy(Buffer, Str.data(), Size);
641f7b1489fSKostya Kortchinsky       Buffer[Size - 1] = '\0';
642f7b1489fSKostya Kortchinsky     }
643f7b1489fSKostya Kortchinsky     return Length;
644f7b1489fSKostya Kortchinsky   }
645f7b1489fSKostya Kortchinsky 
646f7b1489fSKostya Kortchinsky   void printStats() {
647868317b3SKostya Kortchinsky     ScopedString Str;
648f7b1489fSKostya Kortchinsky     getStats(&Str);
649f7b1489fSKostya Kortchinsky     Str.output();
650e4eadf17SKostya Kortchinsky   }
651e4eadf17SKostya Kortchinsky 
6525f771c99SChia-hung Duan   void printFragmentationInfo() {
6535f771c99SChia-hung Duan     ScopedString Str;
6545f771c99SChia-hung Duan     Primary.getFragmentationInfo(&Str);
6555f771c99SChia-hung Duan     // Secondary allocator dumps the fragmentation data in getStats().
6565f771c99SChia-hung Duan     Str.output();
6575f771c99SChia-hung Duan   }
6585f771c99SChia-hung Duan 
6598aaefa92SChristopher Ferris   void releaseToOS(ReleaseToOS ReleaseType) {
66015664fe2SKostya Kortchinsky     initThreadMaybe();
6616a057e7bSChia-hung Duan     if (ReleaseType == ReleaseToOS::ForceAll)
6626a057e7bSChia-hung Duan       drainCaches();
6638aaefa92SChristopher Ferris     Primary.releaseToOS(ReleaseType);
664654f5d68SKostya Kortchinsky     Secondary.releaseToOS();
66515664fe2SKostya Kortchinsky   }
666e4eadf17SKostya Kortchinsky 
667e4eadf17SKostya Kortchinsky   // Iterate over all chunks and call a callback for all busy chunks located
668e4eadf17SKostya Kortchinsky   // within the provided memory range. Said callback must not use this allocator
669e4eadf17SKostya Kortchinsky   // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
670e4eadf17SKostya Kortchinsky   void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
671e4eadf17SKostya Kortchinsky                          void *Arg) {
672e4eadf17SKostya Kortchinsky     initThreadMaybe();
6731e6d1353SVitaly Buka     if (archSupportsMemoryTagging())
6741e6d1353SVitaly Buka       Base = untagPointer(Base);
675e4eadf17SKostya Kortchinsky     const uptr From = Base;
676e4eadf17SKostya Kortchinsky     const uptr To = Base + Size;
6772dc9ec47SChiaHungDuan     bool MayHaveTaggedPrimary =
6782dc9ec47SChiaHungDuan         allocatorSupportsMemoryTagging<AllocatorConfig>() &&
6793f71ce85SPeter Collingbourne         systemSupportsMemoryTagging();
6803f71ce85SPeter Collingbourne     auto Lambda = [this, From, To, MayHaveTaggedPrimary, Callback,
6813f71ce85SPeter Collingbourne                    Arg](uptr Block) {
6823e5360f1SKostya Kortchinsky       if (Block < From || Block >= To)
683e4eadf17SKostya Kortchinsky         return;
684e966416fSPeter Collingbourne       uptr Chunk;
685e966416fSPeter Collingbourne       Chunk::UnpackedHeader Header;
6863f71ce85SPeter Collingbourne       if (MayHaveTaggedPrimary) {
6873f71ce85SPeter Collingbourne         // A chunk header can either have a zero tag (tagged primary) or the
6883f71ce85SPeter Collingbourne         // header tag (secondary, or untagged primary). We don't know which so
6893f71ce85SPeter Collingbourne         // try both.
6903f71ce85SPeter Collingbourne         ScopedDisableMemoryTagChecks x;
6913f71ce85SPeter Collingbourne         if (!getChunkFromBlock(Block, &Chunk, &Header) &&
6923f71ce85SPeter Collingbourne             !getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
6933f71ce85SPeter Collingbourne           return;
6943f71ce85SPeter Collingbourne       } else {
6953f71ce85SPeter Collingbourne         if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
6963f71ce85SPeter Collingbourne           return;
6973f71ce85SPeter Collingbourne       }
6983f71ce85SPeter Collingbourne       if (Header.State == Chunk::State::Allocated) {
699c299d198SPeter Collingbourne         uptr TaggedChunk = Chunk;
7002dc9ec47SChiaHungDuan         if (allocatorSupportsMemoryTagging<AllocatorConfig>())
7013f71ce85SPeter Collingbourne           TaggedChunk = untagPointer(TaggedChunk);
7022dc9ec47SChiaHungDuan         if (useMemoryTagging<AllocatorConfig>(Primary.Options.load()))
703c299d198SPeter Collingbourne           TaggedChunk = loadTag(Chunk);
704c299d198SPeter Collingbourne         Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
705c299d198SPeter Collingbourne                  Arg);
706c299d198SPeter Collingbourne       }
707e4eadf17SKostya Kortchinsky     };
708e4eadf17SKostya Kortchinsky     Primary.iterateOverBlocks(Lambda);
709e4eadf17SKostya Kortchinsky     Secondary.iterateOverBlocks(Lambda);
71046044a69SEvgenii Stepanov #ifdef GWP_ASAN_HOOKS
71146044a69SEvgenii Stepanov     GuardedAlloc.iterate(reinterpret_cast<void *>(Base), Size, Callback, Arg);
71246044a69SEvgenii Stepanov #endif
713e4eadf17SKostya Kortchinsky   }
714e4eadf17SKostya Kortchinsky 
715e4eadf17SKostya Kortchinsky   bool canReturnNull() {
716e4eadf17SKostya Kortchinsky     initThreadMaybe();
717719ab730SPeter Collingbourne     return Primary.Options.load().get(OptionBit::MayReturnNull);
718e4eadf17SKostya Kortchinsky   }
719e4eadf17SKostya Kortchinsky 
7205f91c7b9SChristopher Ferris   bool setOption(Option O, sptr Value) {
7216f00f3b5SKostya Kortchinsky     initThreadMaybe();
722b83417aaSPeter Collingbourne     if (O == Option::MemtagTuning) {
723b83417aaSPeter Collingbourne       // Enabling odd/even tags involves a tradeoff between use-after-free
724b83417aaSPeter Collingbourne       // detection and buffer overflow detection. Odd/even tags make it more
725b83417aaSPeter Collingbourne       // likely for buffer overflows to be detected by increasing the size of
726b83417aaSPeter Collingbourne       // the guaranteed "red zone" around the allocation, but on the other hand
727b83417aaSPeter Collingbourne       // use-after-free is less likely to be detected because the tag space for
728b83417aaSPeter Collingbourne       // any particular chunk is cut in half. Therefore we use this tuning
729b83417aaSPeter Collingbourne       // setting to control whether odd/even tags are enabled.
7306f00f3b5SKostya Kortchinsky       if (Value == M_MEMTAG_TUNING_BUFFER_OVERFLOW)
731719ab730SPeter Collingbourne         Primary.Options.set(OptionBit::UseOddEvenTags);
7326f00f3b5SKostya Kortchinsky       else if (Value == M_MEMTAG_TUNING_UAF)
733719ab730SPeter Collingbourne         Primary.Options.clear(OptionBit::UseOddEvenTags);
734b83417aaSPeter Collingbourne       return true;
7356f00f3b5SKostya Kortchinsky     } else {
7366f00f3b5SKostya Kortchinsky       // We leave it to the various sub-components to decide whether or not they
7376f00f3b5SKostya Kortchinsky       // want to handle the option, but we do not want to short-circuit
7386f00f3b5SKostya Kortchinsky       // execution if one of the setOption was to return false.
7396f00f3b5SKostya Kortchinsky       const bool PrimaryResult = Primary.setOption(O, Value);
7406f00f3b5SKostya Kortchinsky       const bool SecondaryResult = Secondary.setOption(O, Value);
7416f00f3b5SKostya Kortchinsky       const bool RegistryResult = TSDRegistry.setOption(O, Value);
7426f00f3b5SKostya Kortchinsky       return PrimaryResult && SecondaryResult && RegistryResult;
743b83417aaSPeter Collingbourne     }
7445f91c7b9SChristopher Ferris     return false;
7455f91c7b9SChristopher Ferris   }
746e4eadf17SKostya Kortchinsky 
747e4eadf17SKostya Kortchinsky   // Return the usable size for a given chunk. Technically we lie, as we just
748e4eadf17SKostya Kortchinsky   // report the actual size of a chunk. This is done to counteract code actively
749e4eadf17SKostya Kortchinsky   // writing past the end of a chunk (like sqlite3) when the usable size allows
750e4eadf17SKostya Kortchinsky   // for it, which then forces realloc to copy the usable size of a chunk as
751e4eadf17SKostya Kortchinsky   // opposed to its actual size.
752e4eadf17SKostya Kortchinsky   uptr getUsableSize(const void *Ptr) {
753e4eadf17SKostya Kortchinsky     if (UNLIKELY(!Ptr))
754e4eadf17SKostya Kortchinsky       return 0;
755ed4618edSMitch Phillips 
75675867f8eSChiaHungDuan     return getAllocSize(Ptr);
75775867f8eSChiaHungDuan   }
75875867f8eSChiaHungDuan 
75975867f8eSChiaHungDuan   uptr getAllocSize(const void *Ptr) {
76075867f8eSChiaHungDuan     initThreadMaybe();
76175867f8eSChiaHungDuan 
762ed4618edSMitch Phillips #ifdef GWP_ASAN_HOOKS
763ed4618edSMitch Phillips     if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
764ed4618edSMitch Phillips       return GuardedAlloc.getSize(Ptr);
765ed4618edSMitch Phillips #endif // GWP_ASAN_HOOKS
766ed4618edSMitch Phillips 
7673f71ce85SPeter Collingbourne     Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
768e4eadf17SKostya Kortchinsky     Chunk::UnpackedHeader Header;
769e4eadf17SKostya Kortchinsky     Chunk::loadHeader(Cookie, Ptr, &Header);
77075867f8eSChiaHungDuan 
77175867f8eSChiaHungDuan     // Getting the alloc size of a chunk only makes sense if it's allocated.
772e4eadf17SKostya Kortchinsky     if (UNLIKELY(Header.State != Chunk::State::Allocated))
773e4eadf17SKostya Kortchinsky       reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
77475867f8eSChiaHungDuan 
775e4eadf17SKostya Kortchinsky     return getSize(Ptr, &Header);
776e4eadf17SKostya Kortchinsky   }
777e4eadf17SKostya Kortchinsky 
778e4eadf17SKostya Kortchinsky   void getStats(StatCounters S) {
779e4eadf17SKostya Kortchinsky     initThreadMaybe();
780e4eadf17SKostya Kortchinsky     Stats.get(S);
781e4eadf17SKostya Kortchinsky   }
782e4eadf17SKostya Kortchinsky 
7835595249eSKostya Kortchinsky   // Returns true if the pointer provided was allocated by the current
7845595249eSKostya Kortchinsky   // allocator instance, which is compliant with tcmalloc's ownership concept.
7855595249eSKostya Kortchinsky   // A corrupted chunk will not be reported as owned, which is WAI.
7865595249eSKostya Kortchinsky   bool isOwned(const void *Ptr) {
7875595249eSKostya Kortchinsky     initThreadMaybe();
78800989f4aSEvgenii Stepanov     // If the allocation is not owned, the tags could be wrong.
78900989f4aSEvgenii Stepanov     ScopedDisableMemoryTagChecks x(
79000989f4aSEvgenii Stepanov         useMemoryTagging<AllocatorConfig>(Primary.Options.load()));
791ed4618edSMitch Phillips #ifdef GWP_ASAN_HOOKS
792ed4618edSMitch Phillips     if (GuardedAlloc.pointerIsMine(Ptr))
793ed4618edSMitch Phillips       return true;
794ed4618edSMitch Phillips #endif // GWP_ASAN_HOOKS
7955595249eSKostya Kortchinsky     if (!Ptr || !isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment))
7965595249eSKostya Kortchinsky       return false;
7973f71ce85SPeter Collingbourne     Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
7985595249eSKostya Kortchinsky     Chunk::UnpackedHeader Header;
7995595249eSKostya Kortchinsky     return Chunk::isValid(Cookie, Ptr, &Header) &&
8005595249eSKostya Kortchinsky            Header.State == Chunk::State::Allocated;
8015595249eSKostya Kortchinsky   }
8025595249eSKostya Kortchinsky 
803faac1c02SPeter Collingbourne   bool useMemoryTaggingTestOnly() const {
8042dc9ec47SChiaHungDuan     return useMemoryTagging<AllocatorConfig>(Primary.Options.load());
805719ab730SPeter Collingbourne   }
806faac1c02SPeter Collingbourne   void disableMemoryTagging() {
8073f71ce85SPeter Collingbourne     // If we haven't been initialized yet, we need to initialize now in order to
8083f71ce85SPeter Collingbourne     // prevent a future call to initThreadMaybe() from enabling memory tagging
8093f71ce85SPeter Collingbourne     // based on feature detection. But don't call initThreadMaybe() because it
8103f71ce85SPeter Collingbourne     // may end up calling the allocator (via pthread_atfork, via the post-init
8113f71ce85SPeter Collingbourne     // callback), which may cause mappings to be created with memory tagging
8123f71ce85SPeter Collingbourne     // enabled.
8133f71ce85SPeter Collingbourne     TSDRegistry.initOnceMaybe(this);
8142dc9ec47SChiaHungDuan     if (allocatorSupportsMemoryTagging<AllocatorConfig>()) {
8153f71ce85SPeter Collingbourne       Secondary.disableMemoryTagging();
816faac1c02SPeter Collingbourne       Primary.Options.clear(OptionBit::UseMemoryTagging);
817719ab730SPeter Collingbourne     }
8183f71ce85SPeter Collingbourne   }
819c299d198SPeter Collingbourne 
82021d50019SPeter Collingbourne   void setTrackAllocationStacks(bool Track) {
8217a555958SEvgenii Stepanov     initThreadMaybe();
822a66dc461SFlorian Mayer     if (getFlags()->allocation_ring_buffer_size <= 0) {
823ee6536b9SFlorian Mayer       DCHECK(!Primary.Options.load().get(OptionBit::TrackAllocationStacks));
82447bd46e2SFlorian Mayer       return;
82547bd46e2SFlorian Mayer     }
8260dbd804aSChristopher Ferris 
8270dbd804aSChristopher Ferris     if (Track) {
8280dbd804aSChristopher Ferris       initRingBufferMaybe();
829719ab730SPeter Collingbourne       Primary.Options.set(OptionBit::TrackAllocationStacks);
8300dbd804aSChristopher Ferris     } else
831719ab730SPeter Collingbourne       Primary.Options.clear(OptionBit::TrackAllocationStacks);
83221d50019SPeter Collingbourne   }
83321d50019SPeter Collingbourne 
83445b7d44eSEvgenii Stepanov   void setFillContents(FillContentsMode FillContents) {
83545b7d44eSEvgenii Stepanov     initThreadMaybe();
836719ab730SPeter Collingbourne     Primary.Options.setFillContentsMode(FillContents);
83745b7d44eSEvgenii Stepanov   }
83845b7d44eSEvgenii Stepanov 
8393f71ce85SPeter Collingbourne   void setAddLargeAllocationSlack(bool AddSlack) {
8403f71ce85SPeter Collingbourne     initThreadMaybe();
8413f71ce85SPeter Collingbourne     if (AddSlack)
8423f71ce85SPeter Collingbourne       Primary.Options.set(OptionBit::AddLargeAllocationSlack);
8433f71ce85SPeter Collingbourne     else
8443f71ce85SPeter Collingbourne       Primary.Options.clear(OptionBit::AddLargeAllocationSlack);
8453f71ce85SPeter Collingbourne   }
8463f71ce85SPeter Collingbourne 
8473da01663SFlorian Mayer   const char *getStackDepotAddress() {
8483da01663SFlorian Mayer     initThreadMaybe();
8496dd6d487SFlorian Mayer     AllocationRingBuffer *RB = getRingBuffer();
8506dd6d487SFlorian Mayer     return RB ? reinterpret_cast<char *>(RB->Depot) : nullptr;
8513da01663SFlorian Mayer   }
8523da01663SFlorian Mayer 
8533da01663SFlorian Mayer   uptr getStackDepotSize() {
8543da01663SFlorian Mayer     initThreadMaybe();
8556dd6d487SFlorian Mayer     AllocationRingBuffer *RB = getRingBuffer();
8566dd6d487SFlorian Mayer     return RB ? RB->StackDepotSize : 0;
85721d50019SPeter Collingbourne   }
85821d50019SPeter Collingbourne 
85921d50019SPeter Collingbourne   const char *getRegionInfoArrayAddress() const {
86021d50019SPeter Collingbourne     return Primary.getRegionInfoArrayAddress();
86121d50019SPeter Collingbourne   }
86221d50019SPeter Collingbourne 
86321d50019SPeter Collingbourne   static uptr getRegionInfoArraySize() {
86421d50019SPeter Collingbourne     return PrimaryT::getRegionInfoArraySize();
86521d50019SPeter Collingbourne   }
86621d50019SPeter Collingbourne 
8672426cc77SFlorian Mayer   const char *getRingBufferAddress() {
8682426cc77SFlorian Mayer     initThreadMaybe();
8696dd6d487SFlorian Mayer     return reinterpret_cast<char *>(getRingBuffer());
8701f55fa0bSPeter Collingbourne   }
87121d50019SPeter Collingbourne 
8722426cc77SFlorian Mayer   uptr getRingBufferSize() {
8732426cc77SFlorian Mayer     initThreadMaybe();
8746dd6d487SFlorian Mayer     AllocationRingBuffer *RB = getRingBuffer();
8756dd6d487SFlorian Mayer     return RB && RB->RingBufferElements
8766dd6d487SFlorian Mayer                ? ringBufferSizeInBytes(RB->RingBufferElements)
8776dd6d487SFlorian Mayer                : 0;
8782426cc77SFlorian Mayer   }
8792426cc77SFlorian Mayer 
8801f55fa0bSPeter Collingbourne   static const uptr MaxTraceSize = 64;
88121d50019SPeter Collingbourne 
8821f55fa0bSPeter Collingbourne   static void collectTraceMaybe(const StackDepot *Depot,
8831f55fa0bSPeter Collingbourne                                 uintptr_t (&Trace)[MaxTraceSize], u32 Hash) {
88421d50019SPeter Collingbourne     uptr RingPos, Size;
88521d50019SPeter Collingbourne     if (!Depot->find(Hash, &RingPos, &Size))
88621d50019SPeter Collingbourne       return;
88721d50019SPeter Collingbourne     for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I)
8883da01663SFlorian Mayer       Trace[I] = static_cast<uintptr_t>(Depot->at(RingPos + I));
8891f55fa0bSPeter Collingbourne   }
89021d50019SPeter Collingbourne 
8911f55fa0bSPeter Collingbourne   static void getErrorInfo(struct scudo_error_info *ErrorInfo,
8921f55fa0bSPeter Collingbourne                            uintptr_t FaultAddr, const char *DepotPtr,
8933da01663SFlorian Mayer                            size_t DepotSize, const char *RegionInfoPtr,
8943da01663SFlorian Mayer                            const char *RingBufferPtr, size_t RingBufferSize,
8953da01663SFlorian Mayer                            const char *Memory, const char *MemoryTags,
8963da01663SFlorian Mayer                            uintptr_t MemoryAddr, size_t MemorySize) {
8973da01663SFlorian Mayer     // N.B. we need to support corrupted data in any of the buffers here. We get
8983da01663SFlorian Mayer     // this information from an external process (the crashing process) that
8993da01663SFlorian Mayer     // should not be able to crash the crash dumper (crash_dump on Android).
9003da01663SFlorian Mayer     // See also the get_error_info_fuzzer.
9011f55fa0bSPeter Collingbourne     *ErrorInfo = {};
9022dc9ec47SChiaHungDuan     if (!allocatorSupportsMemoryTagging<AllocatorConfig>() ||
9031f55fa0bSPeter Collingbourne         MemoryAddr + MemorySize < MemoryAddr)
9041f55fa0bSPeter Collingbourne       return;
9051f55fa0bSPeter Collingbourne 
9063da01663SFlorian Mayer     const StackDepot *Depot = nullptr;
9073da01663SFlorian Mayer     if (DepotPtr) {
9083da01663SFlorian Mayer       // check for corrupted StackDepot. First we need to check whether we can
9093da01663SFlorian Mayer       // read the metadata, then whether the metadata matches the size.
9103da01663SFlorian Mayer       if (DepotSize < sizeof(*Depot))
9113da01663SFlorian Mayer         return;
9123da01663SFlorian Mayer       Depot = reinterpret_cast<const StackDepot *>(DepotPtr);
9133da01663SFlorian Mayer       if (!Depot->isValid(DepotSize))
9143da01663SFlorian Mayer         return;
9153da01663SFlorian Mayer     }
9163da01663SFlorian Mayer 
91721d50019SPeter Collingbourne     size_t NextErrorReport = 0;
9189567131dSPeter Collingbourne 
9199567131dSPeter Collingbourne     // Check for OOB in the current block and the two surrounding blocks. Beyond
9209567131dSPeter Collingbourne     // that, UAF is more likely.
9211f55fa0bSPeter Collingbourne     if (extractTag(FaultAddr) != 0)
9221f55fa0bSPeter Collingbourne       getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
9231f55fa0bSPeter Collingbourne                          RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
9249567131dSPeter Collingbourne                          MemorySize, 0, 2);
9259567131dSPeter Collingbourne 
9269567131dSPeter Collingbourne     // Check the ring buffer. For primary allocations this will only find UAF;
9279567131dSPeter Collingbourne     // for secondary allocations we can find either UAF or OOB.
9281f55fa0bSPeter Collingbourne     getRingBufferErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
929a5bdc4a4SFlorian Mayer                            RingBufferPtr, RingBufferSize);
9309567131dSPeter Collingbourne 
9319567131dSPeter Collingbourne     // Check for OOB in the 28 blocks surrounding the 3 we checked earlier.
9329567131dSPeter Collingbourne     // Beyond that we are likely to hit false positives.
9339567131dSPeter Collingbourne     if (extractTag(FaultAddr) != 0)
9349567131dSPeter Collingbourne       getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
9359567131dSPeter Collingbourne                          RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
9369567131dSPeter Collingbourne                          MemorySize, 2, 16);
93721d50019SPeter Collingbourne   }
93821d50019SPeter Collingbourne 
939e4eadf17SKostya Kortchinsky private:
940e4eadf17SKostya Kortchinsky   typedef typename PrimaryT::SizeClassMap SizeClassMap;
941e4eadf17SKostya Kortchinsky 
942e4eadf17SKostya Kortchinsky   static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
943e4eadf17SKostya Kortchinsky   static const uptr MaxAlignmentLog = 24U; // 16 MB seems reasonable.
944e4eadf17SKostya Kortchinsky   static const uptr MinAlignment = 1UL << MinAlignmentLog;
945e4eadf17SKostya Kortchinsky   static const uptr MaxAlignment = 1UL << MaxAlignmentLog;
946e4eadf17SKostya Kortchinsky   static const uptr MaxAllowedMallocSize =
947e4eadf17SKostya Kortchinsky       FIRST_32_SECOND_64(1UL << 31, 1ULL << 40);
948e4eadf17SKostya Kortchinsky 
9495595249eSKostya Kortchinsky   static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
9505595249eSKostya Kortchinsky                 "Minimal alignment must at least cover a chunk header.");
9512dc9ec47SChiaHungDuan   static_assert(!allocatorSupportsMemoryTagging<AllocatorConfig>() ||
952c299d198SPeter Collingbourne                     MinAlignment >= archMemoryTagGranuleSize(),
953c299d198SPeter Collingbourne                 "");
9545595249eSKostya Kortchinsky 
955e4eadf17SKostya Kortchinsky   static const u32 BlockMarker = 0x44554353U;
956e4eadf17SKostya Kortchinsky 
95721d50019SPeter Collingbourne   // These are indexes into an "array" of 32-bit values that store information
95821d50019SPeter Collingbourne   // inline with a chunk that is relevant to diagnosing memory tag faults, where
9591f55fa0bSPeter Collingbourne   // 0 corresponds to the address of the user memory. This means that only
9601f55fa0bSPeter Collingbourne   // negative indexes may be used. The smallest index that may be used is -2,
9611f55fa0bSPeter Collingbourne   // which corresponds to 8 bytes before the user memory, because the chunk
9621f55fa0bSPeter Collingbourne   // header size is 8 bytes and in allocators that support memory tagging the
9631f55fa0bSPeter Collingbourne   // minimum alignment is at least the tag granule size (16 on aarch64).
96421d50019SPeter Collingbourne   static const sptr MemTagAllocationTraceIndex = -2;
96521d50019SPeter Collingbourne   static const sptr MemTagAllocationTidIndex = -1;
96621d50019SPeter Collingbourne 
967d56ef852SVitaly Buka   u32 Cookie = 0;
968d56ef852SVitaly Buka   u32 QuarantineMaxChunkSize = 0;
969e4eadf17SKostya Kortchinsky 
970e851aeb0SPeter Collingbourne   GlobalStats Stats;
971e851aeb0SPeter Collingbourne   PrimaryT Primary;
972e851aeb0SPeter Collingbourne   SecondaryT Secondary;
973e851aeb0SPeter Collingbourne   QuarantineT Quarantine;
974e851aeb0SPeter Collingbourne   TSDRegistryT TSDRegistry;
9758936608eSMitch Phillips   pthread_once_t PostInitNonce = PTHREAD_ONCE_INIT;
976e851aeb0SPeter Collingbourne 
977596d0614SEvgenii Stepanov #ifdef GWP_ASAN_HOOKS
978596d0614SEvgenii Stepanov   gwp_asan::GuardedPoolAllocator GuardedAlloc;
979e78b64dfSMitch Phillips   uptr GuardedAllocSlotSize = 0;
980596d0614SEvgenii Stepanov #endif // GWP_ASAN_HOOKS
981596d0614SEvgenii Stepanov 
9821f55fa0bSPeter Collingbourne   struct AllocationRingBuffer {
9831f55fa0bSPeter Collingbourne     struct Entry {
9841f55fa0bSPeter Collingbourne       atomic_uptr Ptr;
9851f55fa0bSPeter Collingbourne       atomic_uptr AllocationSize;
9861f55fa0bSPeter Collingbourne       atomic_u32 AllocationTrace;
9871f55fa0bSPeter Collingbourne       atomic_u32 AllocationTid;
9881f55fa0bSPeter Collingbourne       atomic_u32 DeallocationTrace;
9891f55fa0bSPeter Collingbourne       atomic_u32 DeallocationTid;
9901f55fa0bSPeter Collingbourne     };
9916dd6d487SFlorian Mayer     StackDepot *Depot = nullptr;
9926dd6d487SFlorian Mayer     uptr StackDepotSize = 0;
9936dd6d487SFlorian Mayer     MemMapT RawRingBufferMap;
9946dd6d487SFlorian Mayer     MemMapT RawStackDepotMap;
9956dd6d487SFlorian Mayer     u32 RingBufferElements = 0;
9961f55fa0bSPeter Collingbourne     atomic_uptr Pos;
9972426cc77SFlorian Mayer     // An array of Size (at least one) elements of type Entry is immediately
9982426cc77SFlorian Mayer     // following to this struct.
9991f55fa0bSPeter Collingbourne   };
1000337a2007SFlorian Mayer   static_assert(sizeof(AllocationRingBuffer) %
1001337a2007SFlorian Mayer                         alignof(typename AllocationRingBuffer::Entry) ==
1002337a2007SFlorian Mayer                     0,
1003337a2007SFlorian Mayer                 "invalid alignment");
1004337a2007SFlorian Mayer 
10050dbd804aSChristopher Ferris   // Lock to initialize the RingBuffer
10060dbd804aSChristopher Ferris   HybridMutex RingBufferInitLock;
10070dbd804aSChristopher Ferris 
10082426cc77SFlorian Mayer   // Pointer to memory mapped area starting with AllocationRingBuffer struct,
10092426cc77SFlorian Mayer   // and immediately followed by Size elements of type Entry.
10106dd6d487SFlorian Mayer   atomic_uptr RingBufferAddress = {};
10116dd6d487SFlorian Mayer 
10126dd6d487SFlorian Mayer   AllocationRingBuffer *getRingBuffer() {
10136dd6d487SFlorian Mayer     return reinterpret_cast<AllocationRingBuffer *>(
10146dd6d487SFlorian Mayer         atomic_load(&RingBufferAddress, memory_order_acquire));
10156dd6d487SFlorian Mayer   }
10161f55fa0bSPeter Collingbourne 
1017e4eadf17SKostya Kortchinsky   // The following might get optimized out by the compiler.
1018e4eadf17SKostya Kortchinsky   NOINLINE void performSanityChecks() {
1019e4eadf17SKostya Kortchinsky     // Verify that the header offset field can hold the maximum offset. In the
1020e4eadf17SKostya Kortchinsky     // case of the Secondary allocator, it takes care of alignment and the
1021e4eadf17SKostya Kortchinsky     // offset will always be small. In the case of the Primary, the worst case
1022e4eadf17SKostya Kortchinsky     // scenario happens in the last size class, when the backend allocation
1023e4eadf17SKostya Kortchinsky     // would already be aligned on the requested alignment, which would happen
1024e4eadf17SKostya Kortchinsky     // to be the maximum alignment that would fit in that size class. As a
1025e4eadf17SKostya Kortchinsky     // result, the maximum offset will be at most the maximum alignment for the
1026e4eadf17SKostya Kortchinsky     // last size class minus the header size, in multiples of MinAlignment.
1027e4eadf17SKostya Kortchinsky     Chunk::UnpackedHeader Header = {};
1028e4eadf17SKostya Kortchinsky     const uptr MaxPrimaryAlignment = 1UL << getMostSignificantSetBitIndex(
1029e4eadf17SKostya Kortchinsky                                          SizeClassMap::MaxSize - MinAlignment);
1030e4eadf17SKostya Kortchinsky     const uptr MaxOffset =
1031e4eadf17SKostya Kortchinsky         (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
1032e4eadf17SKostya Kortchinsky     Header.Offset = MaxOffset & Chunk::OffsetMask;
1033e4eadf17SKostya Kortchinsky     if (UNLIKELY(Header.Offset != MaxOffset))
1034e4eadf17SKostya Kortchinsky       reportSanityCheckError("offset");
1035e4eadf17SKostya Kortchinsky 
1036e4eadf17SKostya Kortchinsky     // Verify that we can fit the maximum size or amount of unused bytes in the
1037e4eadf17SKostya Kortchinsky     // header. Given that the Secondary fits the allocation to a page, the worst
1038e4eadf17SKostya Kortchinsky     // case scenario happens in the Primary. It will depend on the second to
1039e4eadf17SKostya Kortchinsky     // last and last class sizes, as well as the dynamic base for the Primary.
1040e4eadf17SKostya Kortchinsky     // The following is an over-approximation that works for our needs.
1041e4eadf17SKostya Kortchinsky     const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
1042419f1a41SKostya Kortchinsky     Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
1043e4eadf17SKostya Kortchinsky     if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes))
1044e4eadf17SKostya Kortchinsky       reportSanityCheckError("size (or unused bytes)");
1045e4eadf17SKostya Kortchinsky 
1046e4eadf17SKostya Kortchinsky     const uptr LargestClassId = SizeClassMap::LargestClassId;
1047e4eadf17SKostya Kortchinsky     Header.ClassId = LargestClassId;
1048e4eadf17SKostya Kortchinsky     if (UNLIKELY(Header.ClassId != LargestClassId))
1049e4eadf17SKostya Kortchinsky       reportSanityCheckError("class ID");
1050e4eadf17SKostya Kortchinsky   }
1051e4eadf17SKostya Kortchinsky 
10526fd6cfdfSPeter Collingbourne   static inline void *getBlockBegin(const void *Ptr,
10538f18a4c9SKostya Kortchinsky                                     Chunk::UnpackedHeader *Header) {
1054419f1a41SKostya Kortchinsky     return reinterpret_cast<void *>(
1055419f1a41SKostya Kortchinsky         reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
1056419f1a41SKostya Kortchinsky         (static_cast<uptr>(Header->Offset) << MinAlignmentLog));
10578f18a4c9SKostya Kortchinsky   }
10588f18a4c9SKostya Kortchinsky 
1059e4eadf17SKostya Kortchinsky   // Return the size of a chunk as requested during its allocation.
10606fd6cfdfSPeter Collingbourne   inline uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
1061e4eadf17SKostya Kortchinsky     const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
1062419f1a41SKostya Kortchinsky     if (LIKELY(Header->ClassId))
1063e4eadf17SKostya Kortchinsky       return SizeOrUnusedBytes;
10642dc9ec47SChiaHungDuan     if (allocatorSupportsMemoryTagging<AllocatorConfig>())
10653f71ce85SPeter Collingbourne       Ptr = untagPointer(const_cast<void *>(Ptr));
10668f18a4c9SKostya Kortchinsky     return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
1067e4eadf17SKostya Kortchinsky            reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
1068e4eadf17SKostya Kortchinsky   }
1069e4eadf17SKostya Kortchinsky 
1070772b1b0cSChiaHungDuan   ALWAYS_INLINE void *initChunk(const uptr ClassId, const Chunk::Origin Origin,
1071772b1b0cSChiaHungDuan                                 void *Block, const uptr UserPtr,
1072772b1b0cSChiaHungDuan                                 const uptr SizeOrUnusedBytes,
1073772b1b0cSChiaHungDuan                                 const FillContentsMode FillContents) {
1074b17d4455SAndrei Homescu     // Compute the default pointer before adding the header tag
1075b17d4455SAndrei Homescu     const uptr DefaultAlignedPtr =
1076b17d4455SAndrei Homescu         reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
1077b17d4455SAndrei Homescu 
1078772b1b0cSChiaHungDuan     Block = addHeaderTag(Block);
1079772b1b0cSChiaHungDuan     // Only do content fill when it's from primary allocator because secondary
1080772b1b0cSChiaHungDuan     // allocator has filled the content.
1081772b1b0cSChiaHungDuan     if (ClassId != 0 && UNLIKELY(FillContents != NoFill)) {
1082772b1b0cSChiaHungDuan       // This condition is not necessarily unlikely, but since memset is
1083772b1b0cSChiaHungDuan       // costly, we might as well mark it as such.
1084772b1b0cSChiaHungDuan       memset(Block, FillContents == ZeroFill ? 0 : PatternFillByte,
1085772b1b0cSChiaHungDuan              PrimaryT::getSizeByClassId(ClassId));
1086772b1b0cSChiaHungDuan     }
1087772b1b0cSChiaHungDuan 
1088772b1b0cSChiaHungDuan     Chunk::UnpackedHeader Header = {};
1089772b1b0cSChiaHungDuan 
1090772b1b0cSChiaHungDuan     if (UNLIKELY(DefaultAlignedPtr != UserPtr)) {
1091772b1b0cSChiaHungDuan       const uptr Offset = UserPtr - DefaultAlignedPtr;
1092772b1b0cSChiaHungDuan       DCHECK_GE(Offset, 2 * sizeof(u32));
1093772b1b0cSChiaHungDuan       // The BlockMarker has no security purpose, but is specifically meant for
1094772b1b0cSChiaHungDuan       // the chunk iteration function that can be used in debugging situations.
1095772b1b0cSChiaHungDuan       // It is the only situation where we have to locate the start of a chunk
1096772b1b0cSChiaHungDuan       // based on its block address.
1097772b1b0cSChiaHungDuan       reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
1098772b1b0cSChiaHungDuan       reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
1099772b1b0cSChiaHungDuan       Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
1100772b1b0cSChiaHungDuan     }
1101772b1b0cSChiaHungDuan 
1102772b1b0cSChiaHungDuan     Header.ClassId = ClassId & Chunk::ClassIdMask;
1103772b1b0cSChiaHungDuan     Header.State = Chunk::State::Allocated;
1104772b1b0cSChiaHungDuan     Header.OriginOrWasZeroed = Origin & Chunk::OriginMask;
1105772b1b0cSChiaHungDuan     Header.SizeOrUnusedBytes = SizeOrUnusedBytes & Chunk::SizeOrUnusedBytesMask;
1106772b1b0cSChiaHungDuan     Chunk::storeHeader(Cookie, reinterpret_cast<void *>(addHeaderTag(UserPtr)),
1107772b1b0cSChiaHungDuan                        &Header);
1108772b1b0cSChiaHungDuan 
1109772b1b0cSChiaHungDuan     return reinterpret_cast<void *>(UserPtr);
1110772b1b0cSChiaHungDuan   }
1111772b1b0cSChiaHungDuan 
1112772b1b0cSChiaHungDuan   NOINLINE void *
1113772b1b0cSChiaHungDuan   initChunkWithMemoryTagging(const uptr ClassId, const Chunk::Origin Origin,
1114772b1b0cSChiaHungDuan                              void *Block, const uptr UserPtr, const uptr Size,
1115772b1b0cSChiaHungDuan                              const uptr SizeOrUnusedBytes,
1116772b1b0cSChiaHungDuan                              const FillContentsMode FillContents) {
1117772b1b0cSChiaHungDuan     const Options Options = Primary.Options.load();
1118772b1b0cSChiaHungDuan     DCHECK(useMemoryTagging<AllocatorConfig>(Options));
1119772b1b0cSChiaHungDuan 
1120b17d4455SAndrei Homescu     // Compute the default pointer before adding the header tag
1121b17d4455SAndrei Homescu     const uptr DefaultAlignedPtr =
1122b17d4455SAndrei Homescu         reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
1123b17d4455SAndrei Homescu 
1124772b1b0cSChiaHungDuan     void *Ptr = reinterpret_cast<void *>(UserPtr);
1125772b1b0cSChiaHungDuan     void *TaggedPtr = Ptr;
1126772b1b0cSChiaHungDuan 
1127772b1b0cSChiaHungDuan     if (LIKELY(ClassId)) {
1128772b1b0cSChiaHungDuan       // Init the primary chunk.
1129772b1b0cSChiaHungDuan       //
1130772b1b0cSChiaHungDuan       // We only need to zero or tag the contents for Primary backed
1131772b1b0cSChiaHungDuan       // allocations. We only set tags for primary allocations in order to avoid
1132772b1b0cSChiaHungDuan       // faulting potentially large numbers of pages for large secondary
1133772b1b0cSChiaHungDuan       // allocations. We assume that guard pages are enough to protect these
1134772b1b0cSChiaHungDuan       // allocations.
1135772b1b0cSChiaHungDuan       //
1136772b1b0cSChiaHungDuan       // FIXME: When the kernel provides a way to set the background tag of a
1137772b1b0cSChiaHungDuan       // mapping, we should be able to tag secondary allocations as well.
1138772b1b0cSChiaHungDuan       //
1139772b1b0cSChiaHungDuan       // When memory tagging is enabled, zeroing the contents is done as part of
1140772b1b0cSChiaHungDuan       // setting the tag.
1141772b1b0cSChiaHungDuan 
1142772b1b0cSChiaHungDuan       Chunk::UnpackedHeader Header;
1143772b1b0cSChiaHungDuan       const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
1144772b1b0cSChiaHungDuan       const uptr BlockUptr = reinterpret_cast<uptr>(Block);
1145772b1b0cSChiaHungDuan       const uptr BlockEnd = BlockUptr + BlockSize;
1146772b1b0cSChiaHungDuan       // If possible, try to reuse the UAF tag that was set by deallocate().
1147772b1b0cSChiaHungDuan       // For simplicity, only reuse tags if we have the same start address as
1148772b1b0cSChiaHungDuan       // the previous allocation. This handles the majority of cases since
1149772b1b0cSChiaHungDuan       // most allocations will not be more aligned than the minimum alignment.
1150772b1b0cSChiaHungDuan       //
1151772b1b0cSChiaHungDuan       // We need to handle situations involving reclaimed chunks, and retag
1152772b1b0cSChiaHungDuan       // the reclaimed portions if necessary. In the case where the chunk is
1153772b1b0cSChiaHungDuan       // fully reclaimed, the chunk's header will be zero, which will trigger
1154772b1b0cSChiaHungDuan       // the code path for new mappings and invalid chunks that prepares the
1155772b1b0cSChiaHungDuan       // chunk from scratch. There are three possibilities for partial
1156772b1b0cSChiaHungDuan       // reclaiming:
1157772b1b0cSChiaHungDuan       //
1158772b1b0cSChiaHungDuan       // (1) Header was reclaimed, data was partially reclaimed.
1159772b1b0cSChiaHungDuan       // (2) Header was not reclaimed, all data was reclaimed (e.g. because
1160772b1b0cSChiaHungDuan       //     data started on a page boundary).
1161772b1b0cSChiaHungDuan       // (3) Header was not reclaimed, data was partially reclaimed.
1162772b1b0cSChiaHungDuan       //
1163772b1b0cSChiaHungDuan       // Case (1) will be handled in the same way as for full reclaiming,
1164772b1b0cSChiaHungDuan       // since the header will be zero.
1165772b1b0cSChiaHungDuan       //
1166772b1b0cSChiaHungDuan       // We can detect case (2) by loading the tag from the start
1167772b1b0cSChiaHungDuan       // of the chunk. If it is zero, it means that either all data was
1168772b1b0cSChiaHungDuan       // reclaimed (since we never use zero as the chunk tag), or that the
1169772b1b0cSChiaHungDuan       // previous allocation was of size zero. Either way, we need to prepare
1170772b1b0cSChiaHungDuan       // a new chunk from scratch.
1171772b1b0cSChiaHungDuan       //
1172772b1b0cSChiaHungDuan       // We can detect case (3) by moving to the next page (if covered by the
1173772b1b0cSChiaHungDuan       // chunk) and loading the tag of its first granule. If it is zero, it
1174772b1b0cSChiaHungDuan       // means that all following pages may need to be retagged. On the other
1175772b1b0cSChiaHungDuan       // hand, if it is nonzero, we can assume that all following pages are
1176772b1b0cSChiaHungDuan       // still tagged, according to the logic that if any of the pages
1177772b1b0cSChiaHungDuan       // following the next page were reclaimed, the next page would have been
1178772b1b0cSChiaHungDuan       // reclaimed as well.
1179772b1b0cSChiaHungDuan       uptr TaggedUserPtr;
1180772b1b0cSChiaHungDuan       uptr PrevUserPtr;
1181772b1b0cSChiaHungDuan       if (getChunkFromBlock(BlockUptr, &PrevUserPtr, &Header) &&
1182772b1b0cSChiaHungDuan           PrevUserPtr == UserPtr &&
1183772b1b0cSChiaHungDuan           (TaggedUserPtr = loadTag(UserPtr)) != UserPtr) {
1184772b1b0cSChiaHungDuan         uptr PrevEnd = TaggedUserPtr + Header.SizeOrUnusedBytes;
1185772b1b0cSChiaHungDuan         const uptr NextPage = roundUp(TaggedUserPtr, getPageSizeCached());
1186772b1b0cSChiaHungDuan         if (NextPage < PrevEnd && loadTag(NextPage) != NextPage)
1187772b1b0cSChiaHungDuan           PrevEnd = NextPage;
1188772b1b0cSChiaHungDuan         TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr);
1189772b1b0cSChiaHungDuan         resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, Size, BlockEnd);
1190772b1b0cSChiaHungDuan         if (UNLIKELY(FillContents != NoFill && !Header.OriginOrWasZeroed)) {
1191772b1b0cSChiaHungDuan           // If an allocation needs to be zeroed (i.e. calloc) we can normally
1192772b1b0cSChiaHungDuan           // avoid zeroing the memory now since we can rely on memory having
1193772b1b0cSChiaHungDuan           // been zeroed on free, as this is normally done while setting the
1194772b1b0cSChiaHungDuan           // UAF tag. But if tagging was disabled per-thread when the memory
1195772b1b0cSChiaHungDuan           // was freed, it would not have been retagged and thus zeroed, and
1196772b1b0cSChiaHungDuan           // therefore it needs to be zeroed now.
1197772b1b0cSChiaHungDuan           memset(TaggedPtr, 0,
1198772b1b0cSChiaHungDuan                  Min(Size, roundUp(PrevEnd - TaggedUserPtr,
1199772b1b0cSChiaHungDuan                                    archMemoryTagGranuleSize())));
1200772b1b0cSChiaHungDuan         } else if (Size) {
1201772b1b0cSChiaHungDuan           // Clear any stack metadata that may have previously been stored in
1202772b1b0cSChiaHungDuan           // the chunk data.
1203772b1b0cSChiaHungDuan           memset(TaggedPtr, 0, archMemoryTagGranuleSize());
1204772b1b0cSChiaHungDuan         }
1205772b1b0cSChiaHungDuan       } else {
1206772b1b0cSChiaHungDuan         const uptr OddEvenMask =
1207772b1b0cSChiaHungDuan             computeOddEvenMaskForPointerMaybe(Options, BlockUptr, ClassId);
1208772b1b0cSChiaHungDuan         TaggedPtr = prepareTaggedChunk(Ptr, Size, OddEvenMask, BlockEnd);
1209772b1b0cSChiaHungDuan       }
1210772b1b0cSChiaHungDuan       storePrimaryAllocationStackMaybe(Options, Ptr);
1211772b1b0cSChiaHungDuan     } else {
1212772b1b0cSChiaHungDuan       // Init the secondary chunk.
1213772b1b0cSChiaHungDuan 
1214772b1b0cSChiaHungDuan       Block = addHeaderTag(Block);
1215772b1b0cSChiaHungDuan       Ptr = addHeaderTag(Ptr);
1216772b1b0cSChiaHungDuan       storeTags(reinterpret_cast<uptr>(Block), reinterpret_cast<uptr>(Ptr));
1217772b1b0cSChiaHungDuan       storeSecondaryAllocationStackMaybe(Options, Ptr, Size);
1218772b1b0cSChiaHungDuan     }
1219772b1b0cSChiaHungDuan 
1220772b1b0cSChiaHungDuan     Chunk::UnpackedHeader Header = {};
1221772b1b0cSChiaHungDuan 
1222772b1b0cSChiaHungDuan     if (UNLIKELY(DefaultAlignedPtr != UserPtr)) {
1223772b1b0cSChiaHungDuan       const uptr Offset = UserPtr - DefaultAlignedPtr;
1224772b1b0cSChiaHungDuan       DCHECK_GE(Offset, 2 * sizeof(u32));
1225772b1b0cSChiaHungDuan       // The BlockMarker has no security purpose, but is specifically meant for
1226772b1b0cSChiaHungDuan       // the chunk iteration function that can be used in debugging situations.
1227772b1b0cSChiaHungDuan       // It is the only situation where we have to locate the start of a chunk
1228772b1b0cSChiaHungDuan       // based on its block address.
1229772b1b0cSChiaHungDuan       reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
1230772b1b0cSChiaHungDuan       reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
1231772b1b0cSChiaHungDuan       Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
1232772b1b0cSChiaHungDuan     }
1233772b1b0cSChiaHungDuan 
1234772b1b0cSChiaHungDuan     Header.ClassId = ClassId & Chunk::ClassIdMask;
1235772b1b0cSChiaHungDuan     Header.State = Chunk::State::Allocated;
1236772b1b0cSChiaHungDuan     Header.OriginOrWasZeroed = Origin & Chunk::OriginMask;
1237772b1b0cSChiaHungDuan     Header.SizeOrUnusedBytes = SizeOrUnusedBytes & Chunk::SizeOrUnusedBytesMask;
1238772b1b0cSChiaHungDuan     Chunk::storeHeader(Cookie, Ptr, &Header);
1239772b1b0cSChiaHungDuan 
1240772b1b0cSChiaHungDuan     return TaggedPtr;
1241772b1b0cSChiaHungDuan   }
1242772b1b0cSChiaHungDuan 
1243867f2d9eSChristopher Ferris   void quarantineOrDeallocateChunk(const Options &Options, void *TaggedPtr,
12446a4c3959SChia-hung Duan                                    Chunk::UnpackedHeader *Header,
12456a4c3959SChia-hung Duan                                    uptr Size) NO_THREAD_SAFETY_ANALYSIS {
1246e4fa0b30SPeter Collingbourne     void *Ptr = getHeaderTaggedPointer(TaggedPtr);
12470a5576ecSPeter Collingbourne     // If the quarantine is disabled, the actual size of a chunk is 0 or larger
12480a5576ecSPeter Collingbourne     // than the maximum allowed, we return a chunk directly to the backend.
12490a5576ecSPeter Collingbourne     // This purposefully underflows for Size == 0.
12500a5576ecSPeter Collingbourne     const bool BypassQuarantine = !Quarantine.getCacheSize() ||
12510a5576ecSPeter Collingbourne                                   ((Size - 1) >= QuarantineMaxChunkSize) ||
125254ddd076SChiaHungDuan                                   !Header->ClassId;
1253f2819ee6SPeter Collingbourne     if (BypassQuarantine)
125454ddd076SChiaHungDuan       Header->State = Chunk::State::Available;
1255f2819ee6SPeter Collingbourne     else
125654ddd076SChiaHungDuan       Header->State = Chunk::State::Quarantined;
1257772b1b0cSChiaHungDuan 
1258*ed0fd137SChristopher Ferris     if (LIKELY(!useMemoryTagging<AllocatorConfig>(Options)))
1259772b1b0cSChiaHungDuan       Header->OriginOrWasZeroed = 0U;
1260*ed0fd137SChristopher Ferris     else {
1261772b1b0cSChiaHungDuan       Header->OriginOrWasZeroed =
1262772b1b0cSChiaHungDuan           Header->ClassId && !TSDRegistry.getDisableMemInit();
1263772b1b0cSChiaHungDuan     }
1264772b1b0cSChiaHungDuan 
126554ddd076SChiaHungDuan     Chunk::storeHeader(Cookie, Ptr, Header);
12660a5576ecSPeter Collingbourne 
1267e4eadf17SKostya Kortchinsky     if (BypassQuarantine) {
1268*ed0fd137SChristopher Ferris       void *BlockBegin;
1269*ed0fd137SChristopher Ferris       if (LIKELY(!useMemoryTagging<AllocatorConfig>(Options))) {
1270*ed0fd137SChristopher Ferris         // Must do this after storeHeader because loadHeader uses a tagged ptr.
1271*ed0fd137SChristopher Ferris         if (allocatorSupportsMemoryTagging<AllocatorConfig>())
1272*ed0fd137SChristopher Ferris           Ptr = untagPointer(Ptr);
1273*ed0fd137SChristopher Ferris         BlockBegin = getBlockBegin(Ptr, Header);
1274*ed0fd137SChristopher Ferris       } else {
1275*ed0fd137SChristopher Ferris         BlockBegin = retagBlock(Options, TaggedPtr, Ptr, Header, Size, true);
1276*ed0fd137SChristopher Ferris       }
1277*ed0fd137SChristopher Ferris 
127854ddd076SChiaHungDuan       const uptr ClassId = Header->ClassId;
1279419f1a41SKostya Kortchinsky       if (LIKELY(ClassId)) {
12808ce036d5SChiaHungDuan         bool CacheDrained;
12818ce036d5SChiaHungDuan         {
12828ce036d5SChiaHungDuan           typename TSDRegistryT::ScopedTSD TSD(TSDRegistry);
12838ce036d5SChiaHungDuan           CacheDrained = TSD->getCache().deallocate(ClassId, BlockBegin);
12848ce036d5SChiaHungDuan         }
1285bce8c9e3SChia-hung Duan         // When we have drained some blocks back to the Primary from TSD, that
1286bce8c9e3SChia-hung Duan         // implies that we may have the chance to release some pages as well.
1287bce8c9e3SChia-hung Duan         // Note that in order not to block other thread's accessing the TSD,
1288bce8c9e3SChia-hung Duan         // release the TSD first then try the page release.
1289bce8c9e3SChia-hung Duan         if (CacheDrained)
1290bce8c9e3SChia-hung Duan           Primary.tryReleaseToOS(ClassId, ReleaseToOS::Normal);
1291e4eadf17SKostya Kortchinsky       } else {
12923f71ce85SPeter Collingbourne         Secondary.deallocate(Options, BlockBegin);
1293e4eadf17SKostya Kortchinsky       }
1294e4eadf17SKostya Kortchinsky     } else {
1295*ed0fd137SChristopher Ferris       if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Options)))
1296*ed0fd137SChristopher Ferris         retagBlock(Options, TaggedPtr, Ptr, Header, Size, false);
12978ce036d5SChiaHungDuan       typename TSDRegistryT::ScopedTSD TSD(TSDRegistry);
1298ae1bd3adSChia-hung Duan       Quarantine.put(&TSD->getQuarantineCache(),
1299ae1bd3adSChia-hung Duan                      QuarantineCallback(*this, TSD->getCache()), Ptr, Size);
1300e4eadf17SKostya Kortchinsky     }
1301e4eadf17SKostya Kortchinsky   }
1302e4eadf17SKostya Kortchinsky 
1303772b1b0cSChiaHungDuan   NOINLINE void *retagBlock(const Options &Options, void *TaggedPtr, void *&Ptr,
1304772b1b0cSChiaHungDuan                             Chunk::UnpackedHeader *Header, const uptr Size,
1305772b1b0cSChiaHungDuan                             bool BypassQuarantine) {
1306772b1b0cSChiaHungDuan     DCHECK(useMemoryTagging<AllocatorConfig>(Options));
1307772b1b0cSChiaHungDuan 
1308772b1b0cSChiaHungDuan     const u8 PrevTag = extractTag(reinterpret_cast<uptr>(TaggedPtr));
1309772b1b0cSChiaHungDuan     storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size);
1310772b1b0cSChiaHungDuan     if (Header->ClassId && !TSDRegistry.getDisableMemInit()) {
1311772b1b0cSChiaHungDuan       uptr TaggedBegin, TaggedEnd;
1312772b1b0cSChiaHungDuan       const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
1313772b1b0cSChiaHungDuan           Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, Header)),
1314772b1b0cSChiaHungDuan           Header->ClassId);
1315772b1b0cSChiaHungDuan       // Exclude the previous tag so that immediate use after free is
1316772b1b0cSChiaHungDuan       // detected 100% of the time.
1317772b1b0cSChiaHungDuan       setRandomTag(Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin,
1318772b1b0cSChiaHungDuan                    &TaggedEnd);
1319772b1b0cSChiaHungDuan     }
1320772b1b0cSChiaHungDuan 
1321772b1b0cSChiaHungDuan     Ptr = untagPointer(Ptr);
1322772b1b0cSChiaHungDuan     void *BlockBegin = getBlockBegin(Ptr, Header);
1323772b1b0cSChiaHungDuan     if (BypassQuarantine && !Header->ClassId) {
1324772b1b0cSChiaHungDuan       storeTags(reinterpret_cast<uptr>(BlockBegin),
1325772b1b0cSChiaHungDuan                 reinterpret_cast<uptr>(Ptr));
1326772b1b0cSChiaHungDuan     }
1327772b1b0cSChiaHungDuan 
1328772b1b0cSChiaHungDuan     return BlockBegin;
1329772b1b0cSChiaHungDuan   }
1330772b1b0cSChiaHungDuan 
1331e966416fSPeter Collingbourne   bool getChunkFromBlock(uptr Block, uptr *Chunk,
1332e966416fSPeter Collingbourne                          Chunk::UnpackedHeader *Header) {
133321d50019SPeter Collingbourne     *Chunk =
133421d50019SPeter Collingbourne         Block + getChunkOffsetFromBlock(reinterpret_cast<const char *>(Block));
1335e966416fSPeter Collingbourne     return Chunk::isValid(Cookie, reinterpret_cast<void *>(*Chunk), Header);
1336e4eadf17SKostya Kortchinsky   }
1337f7b1489fSKostya Kortchinsky 
133821d50019SPeter Collingbourne   static uptr getChunkOffsetFromBlock(const char *Block) {
133921d50019SPeter Collingbourne     u32 Offset = 0;
134021d50019SPeter Collingbourne     if (reinterpret_cast<const u32 *>(Block)[0] == BlockMarker)
134121d50019SPeter Collingbourne       Offset = reinterpret_cast<const u32 *>(Block)[1];
134221d50019SPeter Collingbourne     return Offset + Chunk::getHeaderSize();
134321d50019SPeter Collingbourne   }
134421d50019SPeter Collingbourne 
1345f79929acSPeter Collingbourne   // Set the tag of the granule past the end of the allocation to 0, to catch
1346f79929acSPeter Collingbourne   // linear overflows even if a previous larger allocation used the same block
1347f79929acSPeter Collingbourne   // and tag. Only do this if the granule past the end is in our block, because
1348f79929acSPeter Collingbourne   // this would otherwise lead to a SEGV if the allocation covers the entire
1349f79929acSPeter Collingbourne   // block and our block is at the end of a mapping. The tag of the next block's
1350f79929acSPeter Collingbourne   // header granule will be set to 0, so it will serve the purpose of catching
1351f79929acSPeter Collingbourne   // linear overflows in this case.
1352f79929acSPeter Collingbourne   //
1353f79929acSPeter Collingbourne   // For allocations of size 0 we do not end up storing the address tag to the
1354f79929acSPeter Collingbourne   // memory tag space, which getInlineErrorInfo() normally relies on to match
1355f79929acSPeter Collingbourne   // address tags against chunks. To allow matching in this case we store the
1356f79929acSPeter Collingbourne   // address tag in the first byte of the chunk.
1357f79929acSPeter Collingbourne   void storeEndMarker(uptr End, uptr Size, uptr BlockEnd) {
1358fe309636SVitaly Buka     DCHECK_EQ(BlockEnd, untagPointer(BlockEnd));
1359f79929acSPeter Collingbourne     uptr UntaggedEnd = untagPointer(End);
1360f79929acSPeter Collingbourne     if (UntaggedEnd != BlockEnd) {
1361f79929acSPeter Collingbourne       storeTag(UntaggedEnd);
1362f79929acSPeter Collingbourne       if (Size == 0)
1363f79929acSPeter Collingbourne         *reinterpret_cast<u8 *>(UntaggedEnd) = extractTag(End);
1364f79929acSPeter Collingbourne     }
1365f79929acSPeter Collingbourne   }
1366f79929acSPeter Collingbourne 
13673d47e003SPeter Collingbourne   void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
13683d47e003SPeter Collingbourne                            uptr BlockEnd) {
13693d47e003SPeter Collingbourne     // Prepare the granule before the chunk to store the chunk header by setting
13703d47e003SPeter Collingbourne     // its tag to 0. Normally its tag will already be 0, but in the case where a
13713d47e003SPeter Collingbourne     // chunk holding a low alignment allocation is reused for a higher alignment
13723d47e003SPeter Collingbourne     // allocation, the chunk may already have a non-zero tag from the previous
13733d47e003SPeter Collingbourne     // allocation.
13743d47e003SPeter Collingbourne     storeTag(reinterpret_cast<uptr>(Ptr) - archMemoryTagGranuleSize());
13753d47e003SPeter Collingbourne 
13763d47e003SPeter Collingbourne     uptr TaggedBegin, TaggedEnd;
13773d47e003SPeter Collingbourne     setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd);
13783d47e003SPeter Collingbourne 
1379f79929acSPeter Collingbourne     storeEndMarker(TaggedEnd, Size, BlockEnd);
13803d47e003SPeter Collingbourne     return reinterpret_cast<void *>(TaggedBegin);
13813d47e003SPeter Collingbourne   }
13823d47e003SPeter Collingbourne 
1383f79929acSPeter Collingbourne   void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr NewSize,
1384f79929acSPeter Collingbourne                          uptr BlockEnd) {
1385a9269773SChia-hung Duan     uptr RoundOldPtr = roundUp(OldPtr, archMemoryTagGranuleSize());
13863d47e003SPeter Collingbourne     uptr RoundNewPtr;
13873d47e003SPeter Collingbourne     if (RoundOldPtr >= NewPtr) {
13883d47e003SPeter Collingbourne       // If the allocation is shrinking we just need to set the tag past the end
1389f79929acSPeter Collingbourne       // of the allocation to 0. See explanation in storeEndMarker() above.
1390a9269773SChia-hung Duan       RoundNewPtr = roundUp(NewPtr, archMemoryTagGranuleSize());
13913d47e003SPeter Collingbourne     } else {
13923d47e003SPeter Collingbourne       // Set the memory tag of the region
1393a9269773SChia-hung Duan       // [RoundOldPtr, roundUp(NewPtr, archMemoryTagGranuleSize()))
13943d47e003SPeter Collingbourne       // to the pointer tag stored in OldPtr.
13953d47e003SPeter Collingbourne       RoundNewPtr = storeTags(RoundOldPtr, NewPtr);
13963d47e003SPeter Collingbourne     }
1397f79929acSPeter Collingbourne     storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
13983d47e003SPeter Collingbourne   }
13993d47e003SPeter Collingbourne 
14003da01663SFlorian Mayer   void storePrimaryAllocationStackMaybe(const Options &Options, void *Ptr) {
14016dd6d487SFlorian Mayer     if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
14026dd6d487SFlorian Mayer       return;
14036dd6d487SFlorian Mayer     AllocationRingBuffer *RB = getRingBuffer();
14046dd6d487SFlorian Mayer     if (!RB)
140521d50019SPeter Collingbourne       return;
140621d50019SPeter Collingbourne     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
14076dd6d487SFlorian Mayer     Ptr32[MemTagAllocationTraceIndex] = collectStackTrace(RB->Depot);
140821d50019SPeter Collingbourne     Ptr32[MemTagAllocationTidIndex] = getThreadID();
140921d50019SPeter Collingbourne   }
141021d50019SPeter Collingbourne 
14116dd6d487SFlorian Mayer   void storeRingBufferEntry(AllocationRingBuffer *RB, void *Ptr,
14126dd6d487SFlorian Mayer                             u32 AllocationTrace, u32 AllocationTid,
14131f55fa0bSPeter Collingbourne                             uptr AllocationSize, u32 DeallocationTrace,
14141f55fa0bSPeter Collingbourne                             u32 DeallocationTid) {
14156dd6d487SFlorian Mayer     uptr Pos = atomic_fetch_add(&RB->Pos, 1, memory_order_relaxed);
14161f55fa0bSPeter Collingbourne     typename AllocationRingBuffer::Entry *Entry =
14176dd6d487SFlorian Mayer         getRingBufferEntry(RB, Pos % RB->RingBufferElements);
14181f55fa0bSPeter Collingbourne 
14191f55fa0bSPeter Collingbourne     // First invalidate our entry so that we don't attempt to interpret a
14201f55fa0bSPeter Collingbourne     // partially written state in getSecondaryErrorInfo(). The fences below
14211f55fa0bSPeter Collingbourne     // ensure that the compiler does not move the stores to Ptr in between the
14221f55fa0bSPeter Collingbourne     // stores to the other fields.
14231f55fa0bSPeter Collingbourne     atomic_store_relaxed(&Entry->Ptr, 0);
14241f55fa0bSPeter Collingbourne 
14251f55fa0bSPeter Collingbourne     __atomic_signal_fence(__ATOMIC_SEQ_CST);
14261f55fa0bSPeter Collingbourne     atomic_store_relaxed(&Entry->AllocationTrace, AllocationTrace);
14271f55fa0bSPeter Collingbourne     atomic_store_relaxed(&Entry->AllocationTid, AllocationTid);
14281f55fa0bSPeter Collingbourne     atomic_store_relaxed(&Entry->AllocationSize, AllocationSize);
14291f55fa0bSPeter Collingbourne     atomic_store_relaxed(&Entry->DeallocationTrace, DeallocationTrace);
14301f55fa0bSPeter Collingbourne     atomic_store_relaxed(&Entry->DeallocationTid, DeallocationTid);
14311f55fa0bSPeter Collingbourne     __atomic_signal_fence(__ATOMIC_SEQ_CST);
14321f55fa0bSPeter Collingbourne 
14331f55fa0bSPeter Collingbourne     atomic_store_relaxed(&Entry->Ptr, reinterpret_cast<uptr>(Ptr));
14341f55fa0bSPeter Collingbourne   }
14351f55fa0bSPeter Collingbourne 
1436867f2d9eSChristopher Ferris   void storeSecondaryAllocationStackMaybe(const Options &Options, void *Ptr,
14371f55fa0bSPeter Collingbourne                                           uptr Size) {
14386dd6d487SFlorian Mayer     if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
143921d50019SPeter Collingbourne       return;
14406dd6d487SFlorian Mayer     AllocationRingBuffer *RB = getRingBuffer();
14416dd6d487SFlorian Mayer     if (!RB)
14426dd6d487SFlorian Mayer       return;
14436dd6d487SFlorian Mayer     u32 Trace = collectStackTrace(RB->Depot);
14441f55fa0bSPeter Collingbourne     u32 Tid = getThreadID();
14451f55fa0bSPeter Collingbourne 
144621d50019SPeter Collingbourne     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
14471f55fa0bSPeter Collingbourne     Ptr32[MemTagAllocationTraceIndex] = Trace;
14481f55fa0bSPeter Collingbourne     Ptr32[MemTagAllocationTidIndex] = Tid;
14491f55fa0bSPeter Collingbourne 
14506dd6d487SFlorian Mayer     storeRingBufferEntry(RB, untagPointer(Ptr), Trace, Tid, Size, 0, 0);
14511f55fa0bSPeter Collingbourne   }
14521f55fa0bSPeter Collingbourne 
1453867f2d9eSChristopher Ferris   void storeDeallocationStackMaybe(const Options &Options, void *Ptr,
1454867f2d9eSChristopher Ferris                                    u8 PrevTag, uptr Size) {
14556dd6d487SFlorian Mayer     if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
14566dd6d487SFlorian Mayer       return;
14576dd6d487SFlorian Mayer     AllocationRingBuffer *RB = getRingBuffer();
14586dd6d487SFlorian Mayer     if (!RB)
14591f55fa0bSPeter Collingbourne       return;
14601f55fa0bSPeter Collingbourne     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
14611f55fa0bSPeter Collingbourne     u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex];
14621f55fa0bSPeter Collingbourne     u32 AllocationTid = Ptr32[MemTagAllocationTidIndex];
14631f55fa0bSPeter Collingbourne 
14646dd6d487SFlorian Mayer     u32 DeallocationTrace = collectStackTrace(RB->Depot);
14651f55fa0bSPeter Collingbourne     u32 DeallocationTid = getThreadID();
14661f55fa0bSPeter Collingbourne 
14676dd6d487SFlorian Mayer     storeRingBufferEntry(RB, addFixedTag(untagPointer(Ptr), PrevTag),
14681f55fa0bSPeter Collingbourne                          AllocationTrace, AllocationTid, Size,
14691f55fa0bSPeter Collingbourne                          DeallocationTrace, DeallocationTid);
14701f55fa0bSPeter Collingbourne   }
14711f55fa0bSPeter Collingbourne 
14721f55fa0bSPeter Collingbourne   static const size_t NumErrorReports =
14737abd6837SDominic Chen       sizeof(((scudo_error_info *)nullptr)->reports) /
14747abd6837SDominic Chen       sizeof(((scudo_error_info *)nullptr)->reports[0]);
14751f55fa0bSPeter Collingbourne 
14761f55fa0bSPeter Collingbourne   static void getInlineErrorInfo(struct scudo_error_info *ErrorInfo,
14771f55fa0bSPeter Collingbourne                                  size_t &NextErrorReport, uintptr_t FaultAddr,
14781f55fa0bSPeter Collingbourne                                  const StackDepot *Depot,
14791f55fa0bSPeter Collingbourne                                  const char *RegionInfoPtr, const char *Memory,
14801f55fa0bSPeter Collingbourne                                  const char *MemoryTags, uintptr_t MemoryAddr,
14819567131dSPeter Collingbourne                                  size_t MemorySize, size_t MinDistance,
14829567131dSPeter Collingbourne                                  size_t MaxDistance) {
14831f55fa0bSPeter Collingbourne     uptr UntaggedFaultAddr = untagPointer(FaultAddr);
14841f55fa0bSPeter Collingbourne     u8 FaultAddrTag = extractTag(FaultAddr);
14851f55fa0bSPeter Collingbourne     BlockInfo Info =
14861f55fa0bSPeter Collingbourne         PrimaryT::findNearestBlock(RegionInfoPtr, UntaggedFaultAddr);
14871f55fa0bSPeter Collingbourne 
14881f55fa0bSPeter Collingbourne     auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool {
14891f55fa0bSPeter Collingbourne       if (Addr < MemoryAddr || Addr + archMemoryTagGranuleSize() < Addr ||
14901f55fa0bSPeter Collingbourne           Addr + archMemoryTagGranuleSize() > MemoryAddr + MemorySize)
14911f55fa0bSPeter Collingbourne         return false;
14921f55fa0bSPeter Collingbourne       *Data = &Memory[Addr - MemoryAddr];
14931f55fa0bSPeter Collingbourne       *Tag = static_cast<u8>(
14941f55fa0bSPeter Collingbourne           MemoryTags[(Addr - MemoryAddr) / archMemoryTagGranuleSize()]);
14951f55fa0bSPeter Collingbourne       return true;
14961f55fa0bSPeter Collingbourne     };
14971f55fa0bSPeter Collingbourne 
14981f55fa0bSPeter Collingbourne     auto ReadBlock = [&](uptr Addr, uptr *ChunkAddr,
14991f55fa0bSPeter Collingbourne                          Chunk::UnpackedHeader *Header, const u32 **Data,
15001f55fa0bSPeter Collingbourne                          u8 *Tag) {
15011f55fa0bSPeter Collingbourne       const char *BlockBegin;
15021f55fa0bSPeter Collingbourne       u8 BlockBeginTag;
15031f55fa0bSPeter Collingbourne       if (!GetGranule(Addr, &BlockBegin, &BlockBeginTag))
15041f55fa0bSPeter Collingbourne         return false;
15051f55fa0bSPeter Collingbourne       uptr ChunkOffset = getChunkOffsetFromBlock(BlockBegin);
15061f55fa0bSPeter Collingbourne       *ChunkAddr = Addr + ChunkOffset;
15071f55fa0bSPeter Collingbourne 
15081f55fa0bSPeter Collingbourne       const char *ChunkBegin;
15091f55fa0bSPeter Collingbourne       if (!GetGranule(*ChunkAddr, &ChunkBegin, Tag))
15101f55fa0bSPeter Collingbourne         return false;
15111f55fa0bSPeter Collingbourne       *Header = *reinterpret_cast<const Chunk::UnpackedHeader *>(
15121f55fa0bSPeter Collingbourne           ChunkBegin - Chunk::getHeaderSize());
15131f55fa0bSPeter Collingbourne       *Data = reinterpret_cast<const u32 *>(ChunkBegin);
1514f79929acSPeter Collingbourne 
1515f79929acSPeter Collingbourne       // Allocations of size 0 will have stashed the tag in the first byte of
1516f79929acSPeter Collingbourne       // the chunk, see storeEndMarker().
1517f79929acSPeter Collingbourne       if (Header->SizeOrUnusedBytes == 0)
1518f79929acSPeter Collingbourne         *Tag = static_cast<u8>(*ChunkBegin);
1519f79929acSPeter Collingbourne 
15201f55fa0bSPeter Collingbourne       return true;
15211f55fa0bSPeter Collingbourne     };
15221f55fa0bSPeter Collingbourne 
15231f55fa0bSPeter Collingbourne     if (NextErrorReport == NumErrorReports)
15241f55fa0bSPeter Collingbourne       return;
15251f55fa0bSPeter Collingbourne 
15261f55fa0bSPeter Collingbourne     auto CheckOOB = [&](uptr BlockAddr) {
15271f55fa0bSPeter Collingbourne       if (BlockAddr < Info.RegionBegin || BlockAddr >= Info.RegionEnd)
15281f55fa0bSPeter Collingbourne         return false;
15291f55fa0bSPeter Collingbourne 
15301f55fa0bSPeter Collingbourne       uptr ChunkAddr;
15311f55fa0bSPeter Collingbourne       Chunk::UnpackedHeader Header;
15321f55fa0bSPeter Collingbourne       const u32 *Data;
15331f55fa0bSPeter Collingbourne       uint8_t Tag;
15341f55fa0bSPeter Collingbourne       if (!ReadBlock(BlockAddr, &ChunkAddr, &Header, &Data, &Tag) ||
15351f55fa0bSPeter Collingbourne           Header.State != Chunk::State::Allocated || Tag != FaultAddrTag)
15361f55fa0bSPeter Collingbourne         return false;
15371f55fa0bSPeter Collingbourne 
15381f55fa0bSPeter Collingbourne       auto *R = &ErrorInfo->reports[NextErrorReport++];
15391f55fa0bSPeter Collingbourne       R->error_type =
15401f55fa0bSPeter Collingbourne           UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW;
15411f55fa0bSPeter Collingbourne       R->allocation_address = ChunkAddr;
15421f55fa0bSPeter Collingbourne       R->allocation_size = Header.SizeOrUnusedBytes;
15433da01663SFlorian Mayer       if (Depot) {
15441f55fa0bSPeter Collingbourne         collectTraceMaybe(Depot, R->allocation_trace,
15451f55fa0bSPeter Collingbourne                           Data[MemTagAllocationTraceIndex]);
15463da01663SFlorian Mayer       }
15471f55fa0bSPeter Collingbourne       R->allocation_tid = Data[MemTagAllocationTidIndex];
15481f55fa0bSPeter Collingbourne       return NextErrorReport == NumErrorReports;
15491f55fa0bSPeter Collingbourne     };
15501f55fa0bSPeter Collingbourne 
15519567131dSPeter Collingbourne     if (MinDistance == 0 && CheckOOB(Info.BlockBegin))
15521f55fa0bSPeter Collingbourne       return;
15531f55fa0bSPeter Collingbourne 
15549567131dSPeter Collingbourne     for (size_t I = Max<size_t>(MinDistance, 1); I != MaxDistance; ++I)
15551f55fa0bSPeter Collingbourne       if (CheckOOB(Info.BlockBegin + I * Info.BlockSize) ||
15561f55fa0bSPeter Collingbourne           CheckOOB(Info.BlockBegin - I * Info.BlockSize))
15571f55fa0bSPeter Collingbourne         return;
15581f55fa0bSPeter Collingbourne   }
15591f55fa0bSPeter Collingbourne 
15601f55fa0bSPeter Collingbourne   static void getRingBufferErrorInfo(struct scudo_error_info *ErrorInfo,
15611f55fa0bSPeter Collingbourne                                      size_t &NextErrorReport,
15621f55fa0bSPeter Collingbourne                                      uintptr_t FaultAddr,
15631f55fa0bSPeter Collingbourne                                      const StackDepot *Depot,
1564a5bdc4a4SFlorian Mayer                                      const char *RingBufferPtr,
1565a5bdc4a4SFlorian Mayer                                      size_t RingBufferSize) {
15661f55fa0bSPeter Collingbourne     auto *RingBuffer =
15671f55fa0bSPeter Collingbourne         reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr);
1568a5bdc4a4SFlorian Mayer     size_t RingBufferElements = ringBufferElementsFromBytes(RingBufferSize);
15693da01663SFlorian Mayer     if (!RingBuffer || RingBufferElements == 0 || !Depot)
157047bd46e2SFlorian Mayer       return;
15711f55fa0bSPeter Collingbourne     uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
15721f55fa0bSPeter Collingbourne 
1573a5bdc4a4SFlorian Mayer     for (uptr I = Pos - 1; I != Pos - 1 - RingBufferElements &&
1574a5bdc4a4SFlorian Mayer                            NextErrorReport != NumErrorReports;
15751f55fa0bSPeter Collingbourne          --I) {
15766dd6d487SFlorian Mayer       auto *Entry = getRingBufferEntry(RingBuffer, I % RingBufferElements);
15771f55fa0bSPeter Collingbourne       uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
15786732a532SPeter Collingbourne       if (!EntryPtr)
15791f55fa0bSPeter Collingbourne         continue;
15801f55fa0bSPeter Collingbourne 
15816732a532SPeter Collingbourne       uptr UntaggedEntryPtr = untagPointer(EntryPtr);
15826732a532SPeter Collingbourne       uptr EntrySize = atomic_load_relaxed(&Entry->AllocationSize);
15831f55fa0bSPeter Collingbourne       u32 AllocationTrace = atomic_load_relaxed(&Entry->AllocationTrace);
15841f55fa0bSPeter Collingbourne       u32 AllocationTid = atomic_load_relaxed(&Entry->AllocationTid);
15851f55fa0bSPeter Collingbourne       u32 DeallocationTrace = atomic_load_relaxed(&Entry->DeallocationTrace);
15861f55fa0bSPeter Collingbourne       u32 DeallocationTid = atomic_load_relaxed(&Entry->DeallocationTid);
15871f55fa0bSPeter Collingbourne 
15886732a532SPeter Collingbourne       if (DeallocationTid) {
15896732a532SPeter Collingbourne         // For UAF we only consider in-bounds fault addresses because
15906732a532SPeter Collingbourne         // out-of-bounds UAF is rare and attempting to detect it is very likely
15916732a532SPeter Collingbourne         // to result in false positives.
15926732a532SPeter Collingbourne         if (FaultAddr < EntryPtr || FaultAddr >= EntryPtr + EntrySize)
15936732a532SPeter Collingbourne           continue;
15946732a532SPeter Collingbourne       } else {
15956732a532SPeter Collingbourne         // Ring buffer OOB is only possible with secondary allocations. In this
15966732a532SPeter Collingbourne         // case we are guaranteed a guard region of at least a page on either
15976732a532SPeter Collingbourne         // side of the allocation (guard page on the right, guard page + tagged
15986732a532SPeter Collingbourne         // region on the left), so ignore any faults outside of that range.
15996732a532SPeter Collingbourne         if (FaultAddr < EntryPtr - getPageSizeCached() ||
16006732a532SPeter Collingbourne             FaultAddr >= EntryPtr + EntrySize + getPageSizeCached())
16016732a532SPeter Collingbourne           continue;
16026732a532SPeter Collingbourne 
16031f55fa0bSPeter Collingbourne         // For UAF the ring buffer will contain two entries, one for the
16041f55fa0bSPeter Collingbourne         // allocation and another for the deallocation. Don't report buffer
16051f55fa0bSPeter Collingbourne         // overflow/underflow using the allocation entry if we have already
16061f55fa0bSPeter Collingbourne         // collected a report from the deallocation entry.
16071f55fa0bSPeter Collingbourne         bool Found = false;
16081f55fa0bSPeter Collingbourne         for (uptr J = 0; J != NextErrorReport; ++J) {
16091f55fa0bSPeter Collingbourne           if (ErrorInfo->reports[J].allocation_address == UntaggedEntryPtr) {
16101f55fa0bSPeter Collingbourne             Found = true;
16111f55fa0bSPeter Collingbourne             break;
16121f55fa0bSPeter Collingbourne           }
16131f55fa0bSPeter Collingbourne         }
16141f55fa0bSPeter Collingbourne         if (Found)
16151f55fa0bSPeter Collingbourne           continue;
16161f55fa0bSPeter Collingbourne       }
16171f55fa0bSPeter Collingbourne 
16181f55fa0bSPeter Collingbourne       auto *R = &ErrorInfo->reports[NextErrorReport++];
16191f55fa0bSPeter Collingbourne       if (DeallocationTid)
16201f55fa0bSPeter Collingbourne         R->error_type = USE_AFTER_FREE;
16211f55fa0bSPeter Collingbourne       else if (FaultAddr < EntryPtr)
16221f55fa0bSPeter Collingbourne         R->error_type = BUFFER_UNDERFLOW;
16231f55fa0bSPeter Collingbourne       else
16241f55fa0bSPeter Collingbourne         R->error_type = BUFFER_OVERFLOW;
16251f55fa0bSPeter Collingbourne 
16261f55fa0bSPeter Collingbourne       R->allocation_address = UntaggedEntryPtr;
16271f55fa0bSPeter Collingbourne       R->allocation_size = EntrySize;
16281f55fa0bSPeter Collingbourne       collectTraceMaybe(Depot, R->allocation_trace, AllocationTrace);
16291f55fa0bSPeter Collingbourne       R->allocation_tid = AllocationTid;
16301f55fa0bSPeter Collingbourne       collectTraceMaybe(Depot, R->deallocation_trace, DeallocationTrace);
16311f55fa0bSPeter Collingbourne       R->deallocation_tid = DeallocationTid;
16321f55fa0bSPeter Collingbourne     }
163321d50019SPeter Collingbourne   }
163421d50019SPeter Collingbourne 
1635f7b1489fSKostya Kortchinsky   uptr getStats(ScopedString *Str) {
1636f7b1489fSKostya Kortchinsky     Primary.getStats(Str);
1637f7b1489fSKostya Kortchinsky     Secondary.getStats(Str);
1638f7b1489fSKostya Kortchinsky     Quarantine.getStats(Str);
1639c5c0f9b4SChia-hung Duan     TSDRegistry.getStats(Str);
1640f7b1489fSKostya Kortchinsky     return Str->length();
1641f7b1489fSKostya Kortchinsky   }
16422426cc77SFlorian Mayer 
16432426cc77SFlorian Mayer   static typename AllocationRingBuffer::Entry *
16446dd6d487SFlorian Mayer   getRingBufferEntry(AllocationRingBuffer *RB, uptr N) {
16456dd6d487SFlorian Mayer     char *RBEntryStart =
16466dd6d487SFlorian Mayer         &reinterpret_cast<char *>(RB)[sizeof(AllocationRingBuffer)];
16472426cc77SFlorian Mayer     return &reinterpret_cast<typename AllocationRingBuffer::Entry *>(
16486dd6d487SFlorian Mayer         RBEntryStart)[N];
16492426cc77SFlorian Mayer   }
16502426cc77SFlorian Mayer   static const typename AllocationRingBuffer::Entry *
16516dd6d487SFlorian Mayer   getRingBufferEntry(const AllocationRingBuffer *RB, uptr N) {
16526dd6d487SFlorian Mayer     const char *RBEntryStart =
16536dd6d487SFlorian Mayer         &reinterpret_cast<const char *>(RB)[sizeof(AllocationRingBuffer)];
16542426cc77SFlorian Mayer     return &reinterpret_cast<const typename AllocationRingBuffer::Entry *>(
16556dd6d487SFlorian Mayer         RBEntryStart)[N];
16562426cc77SFlorian Mayer   }
16572426cc77SFlorian Mayer 
16580dbd804aSChristopher Ferris   void initRingBufferMaybe() {
16590dbd804aSChristopher Ferris     ScopedLock L(RingBufferInitLock);
16600dbd804aSChristopher Ferris     if (getRingBuffer() != nullptr)
1661a66dc461SFlorian Mayer       return;
16620dbd804aSChristopher Ferris 
16630dbd804aSChristopher Ferris     int ring_buffer_size = getFlags()->allocation_ring_buffer_size;
16640dbd804aSChristopher Ferris     if (ring_buffer_size <= 0)
16650dbd804aSChristopher Ferris       return;
16660dbd804aSChristopher Ferris 
16670dbd804aSChristopher Ferris     u32 AllocationRingBufferSize = static_cast<u32>(ring_buffer_size);
16683da01663SFlorian Mayer 
16693da01663SFlorian Mayer     // We store alloc and free stacks for each entry.
16703da01663SFlorian Mayer     constexpr u32 kStacksPerRingBufferEntry = 2;
16713da01663SFlorian Mayer     constexpr u32 kMaxU32Pow2 = ~(UINT32_MAX >> 1);
16723da01663SFlorian Mayer     static_assert(isPowerOfTwo(kMaxU32Pow2));
16736ddb25edSFlorian Mayer     // On Android we always have 3 frames at the bottom: __start_main,
16746ddb25edSFlorian Mayer     // __libc_init, main, and 3 at the top: malloc, scudo_malloc and
16756ddb25edSFlorian Mayer     // Allocator::allocate. This leaves 10 frames for the user app. The next
16766ddb25edSFlorian Mayer     // smallest power of two (8) would only leave 2, which is clearly too
16776ddb25edSFlorian Mayer     // little.
16786ddb25edSFlorian Mayer     constexpr u32 kFramesPerStack = 16;
16793da01663SFlorian Mayer     static_assert(isPowerOfTwo(kFramesPerStack));
16803da01663SFlorian Mayer 
16813da01663SFlorian Mayer     if (AllocationRingBufferSize > kMaxU32Pow2 / kStacksPerRingBufferEntry)
16823da01663SFlorian Mayer       return;
16833da01663SFlorian Mayer     u32 TabSize = static_cast<u32>(roundUpPowerOfTwo(kStacksPerRingBufferEntry *
16843da01663SFlorian Mayer                                                      AllocationRingBufferSize));
16853da01663SFlorian Mayer     if (TabSize > UINT32_MAX / kFramesPerStack)
16863da01663SFlorian Mayer       return;
16873da01663SFlorian Mayer     u32 RingSize = static_cast<u32>(TabSize * kFramesPerStack);
16883da01663SFlorian Mayer 
16896dd6d487SFlorian Mayer     uptr StackDepotSize = sizeof(StackDepot) + sizeof(atomic_u64) * RingSize +
16903da01663SFlorian Mayer                           sizeof(atomic_u32) * TabSize;
16913da01663SFlorian Mayer     MemMapT DepotMap;
16923da01663SFlorian Mayer     DepotMap.map(
16933da01663SFlorian Mayer         /*Addr=*/0U, roundUp(StackDepotSize, getPageSizeCached()),
16943da01663SFlorian Mayer         "scudo:stack_depot");
16956dd6d487SFlorian Mayer     auto *Depot = reinterpret_cast<StackDepot *>(DepotMap.getBase());
16963da01663SFlorian Mayer     Depot->init(RingSize, TabSize);
16973da01663SFlorian Mayer 
169842069258SFabio D'Urso     MemMapT MemMap;
169942069258SFabio D'Urso     MemMap.map(
170042069258SFabio D'Urso         /*Addr=*/0U,
1701a9269773SChia-hung Duan         roundUp(ringBufferSizeInBytes(AllocationRingBufferSize),
1702a9269773SChia-hung Duan                 getPageSizeCached()),
170342069258SFabio D'Urso         "scudo:ring_buffer");
17046dd6d487SFlorian Mayer     auto *RB = reinterpret_cast<AllocationRingBuffer *>(MemMap.getBase());
17056dd6d487SFlorian Mayer     RB->RawRingBufferMap = MemMap;
17066dd6d487SFlorian Mayer     RB->RingBufferElements = AllocationRingBufferSize;
17076dd6d487SFlorian Mayer     RB->Depot = Depot;
17086dd6d487SFlorian Mayer     RB->StackDepotSize = StackDepotSize;
17096dd6d487SFlorian Mayer     RB->RawStackDepotMap = DepotMap;
17106dd6d487SFlorian Mayer 
17116dd6d487SFlorian Mayer     atomic_store(&RingBufferAddress, reinterpret_cast<uptr>(RB),
17126dd6d487SFlorian Mayer                  memory_order_release);
17132426cc77SFlorian Mayer   }
17142426cc77SFlorian Mayer 
17150f1a92baSFabio D'Urso   void unmapRingBuffer() {
17166dd6d487SFlorian Mayer     AllocationRingBuffer *RB = getRingBuffer();
17176dd6d487SFlorian Mayer     if (RB == nullptr)
17186dd6d487SFlorian Mayer       return;
17196dd6d487SFlorian Mayer     // N.B. because RawStackDepotMap is part of RawRingBufferMap, the order
17206dd6d487SFlorian Mayer     // is very important.
1721dd741fc1SChiaHungDuan     RB->RawStackDepotMap.unmap();
1722cda41308SFabio D'Urso     // Note that the `RB->RawRingBufferMap` is stored on the pages managed by
1723cda41308SFabio D'Urso     // itself. Take over the ownership before calling unmap() so that any
1724cda41308SFabio D'Urso     // operation along with unmap() won't touch inaccessible pages.
1725cda41308SFabio D'Urso     MemMapT RawRingBufferMap = RB->RawRingBufferMap;
1726dd741fc1SChiaHungDuan     RawRingBufferMap.unmap();
17276dd6d487SFlorian Mayer     atomic_store(&RingBufferAddress, 0, memory_order_release);
17280f1a92baSFabio D'Urso   }
17290f1a92baSFabio D'Urso 
1730a5bdc4a4SFlorian Mayer   static constexpr size_t ringBufferSizeInBytes(u32 RingBufferElements) {
17312426cc77SFlorian Mayer     return sizeof(AllocationRingBuffer) +
1732a5bdc4a4SFlorian Mayer            RingBufferElements * sizeof(typename AllocationRingBuffer::Entry);
1733a5bdc4a4SFlorian Mayer   }
1734a5bdc4a4SFlorian Mayer 
1735a5bdc4a4SFlorian Mayer   static constexpr size_t ringBufferElementsFromBytes(size_t Bytes) {
1736a5bdc4a4SFlorian Mayer     if (Bytes < sizeof(AllocationRingBuffer)) {
1737a5bdc4a4SFlorian Mayer       return 0;
1738a5bdc4a4SFlorian Mayer     }
1739a5bdc4a4SFlorian Mayer     return (Bytes - sizeof(AllocationRingBuffer)) /
17402426cc77SFlorian Mayer            sizeof(typename AllocationRingBuffer::Entry);
17412426cc77SFlorian Mayer   }
1742e4eadf17SKostya Kortchinsky };
1743e4eadf17SKostya Kortchinsky 
1744e4eadf17SKostya Kortchinsky } // namespace scudo
1745e4eadf17SKostya Kortchinsky 
1746e4eadf17SKostya Kortchinsky #endif // SCUDO_COMBINED_H_
1747