13cab2bb3Spatrick //===-- asan_allocator.cpp ------------------------------------------------===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick //
93cab2bb3Spatrick // This file is a part of AddressSanitizer, an address sanity checker.
103cab2bb3Spatrick //
113cab2bb3Spatrick // Implementation of ASan's memory allocator, 2-nd version.
123cab2bb3Spatrick // This variant uses the allocator from sanitizer_common, i.e. the one shared
133cab2bb3Spatrick // with ThreadSanitizer and MemorySanitizer.
143cab2bb3Spatrick //
153cab2bb3Spatrick //===----------------------------------------------------------------------===//
163cab2bb3Spatrick
173cab2bb3Spatrick #include "asan_allocator.h"
18d89ec533Spatrick
193cab2bb3Spatrick #include "asan_mapping.h"
203cab2bb3Spatrick #include "asan_poisoning.h"
213cab2bb3Spatrick #include "asan_report.h"
223cab2bb3Spatrick #include "asan_stack.h"
233cab2bb3Spatrick #include "asan_thread.h"
24d89ec533Spatrick #include "lsan/lsan_common.h"
253cab2bb3Spatrick #include "sanitizer_common/sanitizer_allocator_checks.h"
263cab2bb3Spatrick #include "sanitizer_common/sanitizer_allocator_interface.h"
273cab2bb3Spatrick #include "sanitizer_common/sanitizer_errno.h"
283cab2bb3Spatrick #include "sanitizer_common/sanitizer_flags.h"
293cab2bb3Spatrick #include "sanitizer_common/sanitizer_internal_defs.h"
303cab2bb3Spatrick #include "sanitizer_common/sanitizer_list.h"
313cab2bb3Spatrick #include "sanitizer_common/sanitizer_quarantine.h"
32d89ec533Spatrick #include "sanitizer_common/sanitizer_stackdepot.h"
333cab2bb3Spatrick
343cab2bb3Spatrick namespace __asan {
353cab2bb3Spatrick
363cab2bb3Spatrick // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
373cab2bb3Spatrick // We use adaptive redzones: for larger allocation larger redzones are used.
RZLog2Size(u32 rz_log)383cab2bb3Spatrick static u32 RZLog2Size(u32 rz_log) {
393cab2bb3Spatrick CHECK_LT(rz_log, 8);
403cab2bb3Spatrick return 16 << rz_log;
413cab2bb3Spatrick }
423cab2bb3Spatrick
RZSize2Log(u32 rz_size)433cab2bb3Spatrick static u32 RZSize2Log(u32 rz_size) {
443cab2bb3Spatrick CHECK_GE(rz_size, 16);
453cab2bb3Spatrick CHECK_LE(rz_size, 2048);
463cab2bb3Spatrick CHECK(IsPowerOfTwo(rz_size));
473cab2bb3Spatrick u32 res = Log2(rz_size) - 4;
483cab2bb3Spatrick CHECK_EQ(rz_size, RZLog2Size(res));
493cab2bb3Spatrick return res;
503cab2bb3Spatrick }
513cab2bb3Spatrick
523cab2bb3Spatrick static AsanAllocator &get_allocator();
533cab2bb3Spatrick
AtomicContextStore(volatile atomic_uint64_t * atomic_context,u32 tid,u32 stack)54d89ec533Spatrick static void AtomicContextStore(volatile atomic_uint64_t *atomic_context,
55d89ec533Spatrick u32 tid, u32 stack) {
56d89ec533Spatrick u64 context = tid;
57d89ec533Spatrick context <<= 32;
58d89ec533Spatrick context += stack;
59d89ec533Spatrick atomic_store(atomic_context, context, memory_order_relaxed);
60d89ec533Spatrick }
61d89ec533Spatrick
AtomicContextLoad(const volatile atomic_uint64_t * atomic_context,u32 & tid,u32 & stack)62d89ec533Spatrick static void AtomicContextLoad(const volatile atomic_uint64_t *atomic_context,
63d89ec533Spatrick u32 &tid, u32 &stack) {
64d89ec533Spatrick u64 context = atomic_load(atomic_context, memory_order_relaxed);
65d89ec533Spatrick stack = context;
66d89ec533Spatrick context >>= 32;
67d89ec533Spatrick tid = context;
68d89ec533Spatrick }
69d89ec533Spatrick
703cab2bb3Spatrick // The memory chunk allocated from the underlying allocator looks like this:
713cab2bb3Spatrick // L L L L L L H H U U U U U U R R
723cab2bb3Spatrick // L -- left redzone words (0 or more bytes)
733cab2bb3Spatrick // H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
743cab2bb3Spatrick // U -- user memory.
753cab2bb3Spatrick // R -- right redzone (0 or more bytes)
763cab2bb3Spatrick // ChunkBase consists of ChunkHeader and other bytes that overlap with user
773cab2bb3Spatrick // memory.
783cab2bb3Spatrick
793cab2bb3Spatrick // If the left redzone is greater than the ChunkHeader size we store a magic
803cab2bb3Spatrick // value in the first uptr word of the memory block and store the address of
813cab2bb3Spatrick // ChunkBase in the next uptr.
823cab2bb3Spatrick // M B L L L L L L L L L H H U U U U U U
833cab2bb3Spatrick // | ^
843cab2bb3Spatrick // ---------------------|
853cab2bb3Spatrick // M -- magic value kAllocBegMagic
863cab2bb3Spatrick // B -- address of ChunkHeader pointing to the first 'H'
873cab2bb3Spatrick
88d89ec533Spatrick class ChunkHeader {
89d89ec533Spatrick public:
90d89ec533Spatrick atomic_uint8_t chunk_state;
91d89ec533Spatrick u8 alloc_type : 2;
92d89ec533Spatrick u8 lsan_tag : 2;
933cab2bb3Spatrick
943cab2bb3Spatrick // align < 8 -> 0
953cab2bb3Spatrick // else -> log2(min(align, 512)) - 2
96d89ec533Spatrick u8 user_requested_alignment_log : 3;
97d89ec533Spatrick
98d89ec533Spatrick private:
99d89ec533Spatrick u16 user_requested_size_hi;
100d89ec533Spatrick u32 user_requested_size_lo;
101d89ec533Spatrick atomic_uint64_t alloc_context_id;
102d89ec533Spatrick
103d89ec533Spatrick public:
UsedSize() const104d89ec533Spatrick uptr UsedSize() const {
105*810390e3Srobert static_assert(sizeof(user_requested_size_lo) == 4,
106*810390e3Srobert "Expression below requires this");
107*810390e3Srobert return FIRST_32_SECOND_64(0, ((uptr)user_requested_size_hi << 32)) +
108*810390e3Srobert user_requested_size_lo;
109d89ec533Spatrick }
110d89ec533Spatrick
SetUsedSize(uptr size)111d89ec533Spatrick void SetUsedSize(uptr size) {
112d89ec533Spatrick user_requested_size_lo = size;
113*810390e3Srobert static_assert(sizeof(user_requested_size_lo) == 4,
114*810390e3Srobert "Expression below requires this");
115*810390e3Srobert user_requested_size_hi = FIRST_32_SECOND_64(0, size >> 32);
116*810390e3Srobert CHECK_EQ(UsedSize(), size);
117d89ec533Spatrick }
118d89ec533Spatrick
SetAllocContext(u32 tid,u32 stack)119d89ec533Spatrick void SetAllocContext(u32 tid, u32 stack) {
120d89ec533Spatrick AtomicContextStore(&alloc_context_id, tid, stack);
121d89ec533Spatrick }
122d89ec533Spatrick
GetAllocContext(u32 & tid,u32 & stack) const123d89ec533Spatrick void GetAllocContext(u32 &tid, u32 &stack) const {
124d89ec533Spatrick AtomicContextLoad(&alloc_context_id, tid, stack);
125d89ec533Spatrick }
1263cab2bb3Spatrick };
1273cab2bb3Spatrick
128d89ec533Spatrick class ChunkBase : public ChunkHeader {
129d89ec533Spatrick atomic_uint64_t free_context_id;
130d89ec533Spatrick
131d89ec533Spatrick public:
SetFreeContext(u32 tid,u32 stack)132d89ec533Spatrick void SetFreeContext(u32 tid, u32 stack) {
133d89ec533Spatrick AtomicContextStore(&free_context_id, tid, stack);
134d89ec533Spatrick }
135d89ec533Spatrick
GetFreeContext(u32 & tid,u32 & stack) const136d89ec533Spatrick void GetFreeContext(u32 &tid, u32 &stack) const {
137d89ec533Spatrick AtomicContextLoad(&free_context_id, tid, stack);
138d89ec533Spatrick }
1393cab2bb3Spatrick };
1403cab2bb3Spatrick
1413cab2bb3Spatrick static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
1423cab2bb3Spatrick static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
1433cab2bb3Spatrick COMPILER_CHECK(kChunkHeaderSize == 16);
1443cab2bb3Spatrick COMPILER_CHECK(kChunkHeader2Size <= 16);
1453cab2bb3Spatrick
1463cab2bb3Spatrick enum {
147d89ec533Spatrick // Either just allocated by underlying allocator, but AsanChunk is not yet
148d89ec533Spatrick // ready, or almost returned to undelying allocator and AsanChunk is already
149d89ec533Spatrick // meaningless.
150d89ec533Spatrick CHUNK_INVALID = 0,
151d89ec533Spatrick // The chunk is allocated and not yet freed.
1523cab2bb3Spatrick CHUNK_ALLOCATED = 2,
153d89ec533Spatrick // The chunk was freed and put into quarantine zone.
154d89ec533Spatrick CHUNK_QUARANTINE = 3,
1553cab2bb3Spatrick };
1563cab2bb3Spatrick
157d89ec533Spatrick class AsanChunk : public ChunkBase {
158d89ec533Spatrick public:
Beg()1593cab2bb3Spatrick uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
AddrIsInside(uptr addr)160d89ec533Spatrick bool AddrIsInside(uptr addr) {
161d89ec533Spatrick return (addr >= Beg()) && (addr < Beg() + UsedSize());
1623cab2bb3Spatrick }
163d89ec533Spatrick };
164d89ec533Spatrick
165d89ec533Spatrick class LargeChunkHeader {
166d89ec533Spatrick static constexpr uptr kAllocBegMagic =
167d89ec533Spatrick FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL);
168d89ec533Spatrick atomic_uintptr_t magic;
169d89ec533Spatrick AsanChunk *chunk_header;
170d89ec533Spatrick
171d89ec533Spatrick public:
Get() const172d89ec533Spatrick AsanChunk *Get() const {
173d89ec533Spatrick return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic
174d89ec533Spatrick ? chunk_header
175d89ec533Spatrick : nullptr;
1763cab2bb3Spatrick }
177d89ec533Spatrick
Set(AsanChunk * p)178d89ec533Spatrick void Set(AsanChunk *p) {
179d89ec533Spatrick if (p) {
180d89ec533Spatrick chunk_header = p;
181d89ec533Spatrick atomic_store(&magic, kAllocBegMagic, memory_order_release);
182d89ec533Spatrick return;
1833cab2bb3Spatrick }
184d89ec533Spatrick
185d89ec533Spatrick uptr old = kAllocBegMagic;
186d89ec533Spatrick if (!atomic_compare_exchange_strong(&magic, &old, 0,
187d89ec533Spatrick memory_order_release)) {
188d89ec533Spatrick CHECK_EQ(old, kAllocBegMagic);
189d89ec533Spatrick }
1903cab2bb3Spatrick }
1913cab2bb3Spatrick };
1923cab2bb3Spatrick
1933cab2bb3Spatrick struct QuarantineCallback {
QuarantineCallback__asan::QuarantineCallback1943cab2bb3Spatrick QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack)
1953cab2bb3Spatrick : cache_(cache),
1963cab2bb3Spatrick stack_(stack) {
1973cab2bb3Spatrick }
1983cab2bb3Spatrick
Recycle__asan::QuarantineCallback1993cab2bb3Spatrick void Recycle(AsanChunk *m) {
200d89ec533Spatrick void *p = get_allocator().GetBlockBegin(m);
201d89ec533Spatrick if (p != m) {
202d89ec533Spatrick // Clear the magic value, as allocator internals may overwrite the
203d89ec533Spatrick // contents of deallocated chunk, confusing GetAsanChunk lookup.
204d89ec533Spatrick reinterpret_cast<LargeChunkHeader *>(p)->Set(nullptr);
205d89ec533Spatrick }
206d89ec533Spatrick
207d89ec533Spatrick u8 old_chunk_state = CHUNK_QUARANTINE;
208d89ec533Spatrick if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
209d89ec533Spatrick CHUNK_INVALID, memory_order_acquire)) {
210d89ec533Spatrick CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE);
211d89ec533Spatrick }
212d89ec533Spatrick
213*810390e3Srobert PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
2143cab2bb3Spatrick kAsanHeapLeftRedzoneMagic);
2153cab2bb3Spatrick
2163cab2bb3Spatrick // Statistics.
2173cab2bb3Spatrick AsanStats &thread_stats = GetCurrentThreadStats();
2183cab2bb3Spatrick thread_stats.real_frees++;
2193cab2bb3Spatrick thread_stats.really_freed += m->UsedSize();
2203cab2bb3Spatrick
2213cab2bb3Spatrick get_allocator().Deallocate(cache_, p);
2223cab2bb3Spatrick }
2233cab2bb3Spatrick
Allocate__asan::QuarantineCallback2243cab2bb3Spatrick void *Allocate(uptr size) {
2253cab2bb3Spatrick void *res = get_allocator().Allocate(cache_, size, 1);
2263cab2bb3Spatrick // TODO(alekseys): Consider making quarantine OOM-friendly.
2273cab2bb3Spatrick if (UNLIKELY(!res))
2283cab2bb3Spatrick ReportOutOfMemory(size, stack_);
2293cab2bb3Spatrick return res;
2303cab2bb3Spatrick }
2313cab2bb3Spatrick
Deallocate__asan::QuarantineCallback2323cab2bb3Spatrick void Deallocate(void *p) {
2333cab2bb3Spatrick get_allocator().Deallocate(cache_, p);
2343cab2bb3Spatrick }
2353cab2bb3Spatrick
2363cab2bb3Spatrick private:
2373cab2bb3Spatrick AllocatorCache* const cache_;
2383cab2bb3Spatrick BufferedStackTrace* const stack_;
2393cab2bb3Spatrick };
2403cab2bb3Spatrick
2413cab2bb3Spatrick typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
2423cab2bb3Spatrick typedef AsanQuarantine::Cache QuarantineCache;
2433cab2bb3Spatrick
OnMap(uptr p,uptr size) const2443cab2bb3Spatrick void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
2453cab2bb3Spatrick PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
2463cab2bb3Spatrick // Statistics.
2473cab2bb3Spatrick AsanStats &thread_stats = GetCurrentThreadStats();
2483cab2bb3Spatrick thread_stats.mmaps++;
2493cab2bb3Spatrick thread_stats.mmaped += size;
2503cab2bb3Spatrick }
OnUnmap(uptr p,uptr size) const2513cab2bb3Spatrick void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
2523cab2bb3Spatrick PoisonShadow(p, size, 0);
2533cab2bb3Spatrick // We are about to unmap a chunk of user memory.
2543cab2bb3Spatrick // Mark the corresponding shadow memory as not needed.
2553cab2bb3Spatrick FlushUnneededASanShadowMemory(p, size);
2563cab2bb3Spatrick // Statistics.
2573cab2bb3Spatrick AsanStats &thread_stats = GetCurrentThreadStats();
2583cab2bb3Spatrick thread_stats.munmaps++;
2593cab2bb3Spatrick thread_stats.munmaped += size;
2603cab2bb3Spatrick }
2613cab2bb3Spatrick
2623cab2bb3Spatrick // We can not use THREADLOCAL because it is not supported on some of the
2633cab2bb3Spatrick // platforms we care about (OSX 10.6, Android).
2643cab2bb3Spatrick // static THREADLOCAL AllocatorCache cache;
GetAllocatorCache(AsanThreadLocalMallocStorage * ms)2653cab2bb3Spatrick AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
2663cab2bb3Spatrick CHECK(ms);
2673cab2bb3Spatrick return &ms->allocator_cache;
2683cab2bb3Spatrick }
2693cab2bb3Spatrick
GetQuarantineCache(AsanThreadLocalMallocStorage * ms)2703cab2bb3Spatrick QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
2713cab2bb3Spatrick CHECK(ms);
2723cab2bb3Spatrick CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
2733cab2bb3Spatrick return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
2743cab2bb3Spatrick }
2753cab2bb3Spatrick
SetFrom(const Flags * f,const CommonFlags * cf)2763cab2bb3Spatrick void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
2773cab2bb3Spatrick quarantine_size_mb = f->quarantine_size_mb;
2783cab2bb3Spatrick thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
2793cab2bb3Spatrick min_redzone = f->redzone;
2803cab2bb3Spatrick max_redzone = f->max_redzone;
2813cab2bb3Spatrick may_return_null = cf->allocator_may_return_null;
2823cab2bb3Spatrick alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
2833cab2bb3Spatrick release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
2843cab2bb3Spatrick }
2853cab2bb3Spatrick
CopyTo(Flags * f,CommonFlags * cf)2863cab2bb3Spatrick void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
2873cab2bb3Spatrick f->quarantine_size_mb = quarantine_size_mb;
2883cab2bb3Spatrick f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
2893cab2bb3Spatrick f->redzone = min_redzone;
2903cab2bb3Spatrick f->max_redzone = max_redzone;
2913cab2bb3Spatrick cf->allocator_may_return_null = may_return_null;
2923cab2bb3Spatrick f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
2933cab2bb3Spatrick cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
2943cab2bb3Spatrick }
2953cab2bb3Spatrick
2963cab2bb3Spatrick struct Allocator {
2973cab2bb3Spatrick static const uptr kMaxAllowedMallocSize =
2983cab2bb3Spatrick FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
2993cab2bb3Spatrick
3003cab2bb3Spatrick AsanAllocator allocator;
3013cab2bb3Spatrick AsanQuarantine quarantine;
3023cab2bb3Spatrick StaticSpinMutex fallback_mutex;
3033cab2bb3Spatrick AllocatorCache fallback_allocator_cache;
3043cab2bb3Spatrick QuarantineCache fallback_quarantine_cache;
3053cab2bb3Spatrick
3063cab2bb3Spatrick uptr max_user_defined_malloc_size;
3073cab2bb3Spatrick
3083cab2bb3Spatrick // ------------------- Options --------------------------
3093cab2bb3Spatrick atomic_uint16_t min_redzone;
3103cab2bb3Spatrick atomic_uint16_t max_redzone;
3113cab2bb3Spatrick atomic_uint8_t alloc_dealloc_mismatch;
3123cab2bb3Spatrick
3133cab2bb3Spatrick // ------------------- Initialization ------------------------
Allocator__asan::Allocator3143cab2bb3Spatrick explicit Allocator(LinkerInitialized)
3153cab2bb3Spatrick : quarantine(LINKER_INITIALIZED),
3163cab2bb3Spatrick fallback_quarantine_cache(LINKER_INITIALIZED) {}
3173cab2bb3Spatrick
CheckOptions__asan::Allocator3183cab2bb3Spatrick void CheckOptions(const AllocatorOptions &options) const {
3193cab2bb3Spatrick CHECK_GE(options.min_redzone, 16);
3203cab2bb3Spatrick CHECK_GE(options.max_redzone, options.min_redzone);
3213cab2bb3Spatrick CHECK_LE(options.max_redzone, 2048);
3223cab2bb3Spatrick CHECK(IsPowerOfTwo(options.min_redzone));
3233cab2bb3Spatrick CHECK(IsPowerOfTwo(options.max_redzone));
3243cab2bb3Spatrick }
3253cab2bb3Spatrick
SharedInitCode__asan::Allocator3263cab2bb3Spatrick void SharedInitCode(const AllocatorOptions &options) {
3273cab2bb3Spatrick CheckOptions(options);
3283cab2bb3Spatrick quarantine.Init((uptr)options.quarantine_size_mb << 20,
3293cab2bb3Spatrick (uptr)options.thread_local_quarantine_size_kb << 10);
3303cab2bb3Spatrick atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
3313cab2bb3Spatrick memory_order_release);
3323cab2bb3Spatrick atomic_store(&min_redzone, options.min_redzone, memory_order_release);
3333cab2bb3Spatrick atomic_store(&max_redzone, options.max_redzone, memory_order_release);
3343cab2bb3Spatrick }
3353cab2bb3Spatrick
InitLinkerInitialized__asan::Allocator3363cab2bb3Spatrick void InitLinkerInitialized(const AllocatorOptions &options) {
3373cab2bb3Spatrick SetAllocatorMayReturnNull(options.may_return_null);
3383cab2bb3Spatrick allocator.InitLinkerInitialized(options.release_to_os_interval_ms);
3393cab2bb3Spatrick SharedInitCode(options);
3403cab2bb3Spatrick max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
3413cab2bb3Spatrick ? common_flags()->max_allocation_size_mb
3423cab2bb3Spatrick << 20
3433cab2bb3Spatrick : kMaxAllowedMallocSize;
3443cab2bb3Spatrick }
3453cab2bb3Spatrick
RePoisonChunk__asan::Allocator3463cab2bb3Spatrick void RePoisonChunk(uptr chunk) {
3473cab2bb3Spatrick // This could be a user-facing chunk (with redzones), or some internal
3483cab2bb3Spatrick // housekeeping chunk, like TransferBatch. Start by assuming the former.
3493cab2bb3Spatrick AsanChunk *ac = GetAsanChunk((void *)chunk);
350d89ec533Spatrick uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)chunk);
351d89ec533Spatrick if (ac && atomic_load(&ac->chunk_state, memory_order_acquire) ==
352d89ec533Spatrick CHUNK_ALLOCATED) {
3533cab2bb3Spatrick uptr beg = ac->Beg();
354d89ec533Spatrick uptr end = ac->Beg() + ac->UsedSize();
3553cab2bb3Spatrick uptr chunk_end = chunk + allocated_size;
356d89ec533Spatrick if (chunk < beg && beg < end && end <= chunk_end) {
3573cab2bb3Spatrick // Looks like a valid AsanChunk in use, poison redzones only.
3583cab2bb3Spatrick PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
359*810390e3Srobert uptr end_aligned_down = RoundDownTo(end, ASAN_SHADOW_GRANULARITY);
3603cab2bb3Spatrick FastPoisonShadowPartialRightRedzone(
3613cab2bb3Spatrick end_aligned_down, end - end_aligned_down,
3623cab2bb3Spatrick chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
363d89ec533Spatrick return;
364d89ec533Spatrick }
365d89ec533Spatrick }
366d89ec533Spatrick
3673cab2bb3Spatrick // This is either not an AsanChunk or freed or quarantined AsanChunk.
3683cab2bb3Spatrick // In either case, poison everything.
3693cab2bb3Spatrick PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
3703cab2bb3Spatrick }
3713cab2bb3Spatrick
ReInitialize__asan::Allocator3723cab2bb3Spatrick void ReInitialize(const AllocatorOptions &options) {
3733cab2bb3Spatrick SetAllocatorMayReturnNull(options.may_return_null);
3743cab2bb3Spatrick allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
3753cab2bb3Spatrick SharedInitCode(options);
3763cab2bb3Spatrick
3773cab2bb3Spatrick // Poison all existing allocation's redzones.
3783cab2bb3Spatrick if (CanPoisonMemory()) {
3793cab2bb3Spatrick allocator.ForceLock();
3803cab2bb3Spatrick allocator.ForEachChunk(
3813cab2bb3Spatrick [](uptr chunk, void *alloc) {
3823cab2bb3Spatrick ((Allocator *)alloc)->RePoisonChunk(chunk);
3833cab2bb3Spatrick },
3843cab2bb3Spatrick this);
3853cab2bb3Spatrick allocator.ForceUnlock();
3863cab2bb3Spatrick }
3873cab2bb3Spatrick }
3883cab2bb3Spatrick
GetOptions__asan::Allocator3893cab2bb3Spatrick void GetOptions(AllocatorOptions *options) const {
3903cab2bb3Spatrick options->quarantine_size_mb = quarantine.GetSize() >> 20;
3913cab2bb3Spatrick options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
3923cab2bb3Spatrick options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
3933cab2bb3Spatrick options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
3943cab2bb3Spatrick options->may_return_null = AllocatorMayReturnNull();
3953cab2bb3Spatrick options->alloc_dealloc_mismatch =
3963cab2bb3Spatrick atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
3973cab2bb3Spatrick options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
3983cab2bb3Spatrick }
3993cab2bb3Spatrick
4003cab2bb3Spatrick // -------------------- Helper methods. -------------------------
ComputeRZLog__asan::Allocator4013cab2bb3Spatrick uptr ComputeRZLog(uptr user_requested_size) {
402d89ec533Spatrick u32 rz_log = user_requested_size <= 64 - 16 ? 0
403d89ec533Spatrick : user_requested_size <= 128 - 32 ? 1
404d89ec533Spatrick : user_requested_size <= 512 - 64 ? 2
405d89ec533Spatrick : user_requested_size <= 4096 - 128 ? 3
406d89ec533Spatrick : user_requested_size <= (1 << 14) - 256 ? 4
407d89ec533Spatrick : user_requested_size <= (1 << 15) - 512 ? 5
408d89ec533Spatrick : user_requested_size <= (1 << 16) - 1024 ? 6
409d89ec533Spatrick : 7;
410d89ec533Spatrick u32 hdr_log = RZSize2Log(RoundUpToPowerOfTwo(sizeof(ChunkHeader)));
411d89ec533Spatrick u32 min_log = RZSize2Log(atomic_load(&min_redzone, memory_order_acquire));
412d89ec533Spatrick u32 max_log = RZSize2Log(atomic_load(&max_redzone, memory_order_acquire));
413d89ec533Spatrick return Min(Max(rz_log, Max(min_log, hdr_log)), Max(max_log, hdr_log));
4143cab2bb3Spatrick }
4153cab2bb3Spatrick
ComputeUserRequestedAlignmentLog__asan::Allocator4163cab2bb3Spatrick static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
4173cab2bb3Spatrick if (user_requested_alignment < 8)
4183cab2bb3Spatrick return 0;
4193cab2bb3Spatrick if (user_requested_alignment > 512)
4203cab2bb3Spatrick user_requested_alignment = 512;
4213cab2bb3Spatrick return Log2(user_requested_alignment) - 2;
4223cab2bb3Spatrick }
4233cab2bb3Spatrick
ComputeUserAlignment__asan::Allocator4243cab2bb3Spatrick static uptr ComputeUserAlignment(uptr user_requested_alignment_log) {
4253cab2bb3Spatrick if (user_requested_alignment_log == 0)
4263cab2bb3Spatrick return 0;
4273cab2bb3Spatrick return 1LL << (user_requested_alignment_log + 2);
4283cab2bb3Spatrick }
4293cab2bb3Spatrick
4303cab2bb3Spatrick // We have an address between two chunks, and we want to report just one.
ChooseChunk__asan::Allocator4313cab2bb3Spatrick AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
4323cab2bb3Spatrick AsanChunk *right_chunk) {
433d89ec533Spatrick if (!left_chunk)
434d89ec533Spatrick return right_chunk;
435d89ec533Spatrick if (!right_chunk)
436d89ec533Spatrick return left_chunk;
4373cab2bb3Spatrick // Prefer an allocated chunk over freed chunk and freed chunk
4383cab2bb3Spatrick // over available chunk.
439d89ec533Spatrick u8 left_state = atomic_load(&left_chunk->chunk_state, memory_order_relaxed);
440d89ec533Spatrick u8 right_state =
441d89ec533Spatrick atomic_load(&right_chunk->chunk_state, memory_order_relaxed);
442d89ec533Spatrick if (left_state != right_state) {
443d89ec533Spatrick if (left_state == CHUNK_ALLOCATED)
4443cab2bb3Spatrick return left_chunk;
445d89ec533Spatrick if (right_state == CHUNK_ALLOCATED)
4463cab2bb3Spatrick return right_chunk;
447d89ec533Spatrick if (left_state == CHUNK_QUARANTINE)
4483cab2bb3Spatrick return left_chunk;
449d89ec533Spatrick if (right_state == CHUNK_QUARANTINE)
4503cab2bb3Spatrick return right_chunk;
4513cab2bb3Spatrick }
4523cab2bb3Spatrick // Same chunk_state: choose based on offset.
4533cab2bb3Spatrick sptr l_offset = 0, r_offset = 0;
4543cab2bb3Spatrick CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
4553cab2bb3Spatrick CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
4563cab2bb3Spatrick if (l_offset < r_offset)
4573cab2bb3Spatrick return left_chunk;
4583cab2bb3Spatrick return right_chunk;
4593cab2bb3Spatrick }
4603cab2bb3Spatrick
UpdateAllocationStack__asan::Allocator4613cab2bb3Spatrick bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) {
4623cab2bb3Spatrick AsanChunk *m = GetAsanChunkByAddr(addr);
4633cab2bb3Spatrick if (!m) return false;
464d89ec533Spatrick if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
465d89ec533Spatrick return false;
4663cab2bb3Spatrick if (m->Beg() != addr) return false;
467d89ec533Spatrick AsanThread *t = GetCurrentThread();
468d89ec533Spatrick m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
4693cab2bb3Spatrick return true;
4703cab2bb3Spatrick }
4713cab2bb3Spatrick
4723cab2bb3Spatrick // -------------------- Allocation/Deallocation routines ---------------
Allocate__asan::Allocator4733cab2bb3Spatrick void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
4743cab2bb3Spatrick AllocType alloc_type, bool can_fill) {
4753cab2bb3Spatrick if (UNLIKELY(!asan_inited))
4763cab2bb3Spatrick AsanInitFromRtl();
477*810390e3Srobert if (UNLIKELY(IsRssLimitExceeded())) {
4783cab2bb3Spatrick if (AllocatorMayReturnNull())
4793cab2bb3Spatrick return nullptr;
4803cab2bb3Spatrick ReportRssLimitExceeded(stack);
4813cab2bb3Spatrick }
4823cab2bb3Spatrick Flags &fl = *flags();
4833cab2bb3Spatrick CHECK(stack);
484*810390e3Srobert const uptr min_alignment = ASAN_SHADOW_GRANULARITY;
4853cab2bb3Spatrick const uptr user_requested_alignment_log =
4863cab2bb3Spatrick ComputeUserRequestedAlignmentLog(alignment);
4873cab2bb3Spatrick if (alignment < min_alignment)
4883cab2bb3Spatrick alignment = min_alignment;
4893cab2bb3Spatrick if (size == 0) {
4903cab2bb3Spatrick // We'd be happy to avoid allocating memory for zero-size requests, but
4913cab2bb3Spatrick // some programs/tests depend on this behavior and assume that malloc
4923cab2bb3Spatrick // would not return NULL even for zero-size allocations. Moreover, it
4933cab2bb3Spatrick // looks like operator new should never return NULL, and results of
4943cab2bb3Spatrick // consecutive "new" calls must be different even if the allocated size
4953cab2bb3Spatrick // is zero.
4963cab2bb3Spatrick size = 1;
4973cab2bb3Spatrick }
4983cab2bb3Spatrick CHECK(IsPowerOfTwo(alignment));
4993cab2bb3Spatrick uptr rz_log = ComputeRZLog(size);
5003cab2bb3Spatrick uptr rz_size = RZLog2Size(rz_log);
5013cab2bb3Spatrick uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
5023cab2bb3Spatrick uptr needed_size = rounded_size + rz_size;
5033cab2bb3Spatrick if (alignment > min_alignment)
5043cab2bb3Spatrick needed_size += alignment;
5053cab2bb3Spatrick // If we are allocating from the secondary allocator, there will be no
5063cab2bb3Spatrick // automatic right redzone, so add the right redzone manually.
507d89ec533Spatrick if (!PrimaryAllocator::CanAllocate(needed_size, alignment))
5083cab2bb3Spatrick needed_size += rz_size;
5093cab2bb3Spatrick CHECK(IsAligned(needed_size, min_alignment));
5103cab2bb3Spatrick if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
5113cab2bb3Spatrick size > max_user_defined_malloc_size) {
5123cab2bb3Spatrick if (AllocatorMayReturnNull()) {
5133cab2bb3Spatrick Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
514*810390e3Srobert size);
5153cab2bb3Spatrick return nullptr;
5163cab2bb3Spatrick }
5173cab2bb3Spatrick uptr malloc_limit =
5183cab2bb3Spatrick Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
5193cab2bb3Spatrick ReportAllocationSizeTooBig(size, needed_size, malloc_limit, stack);
5203cab2bb3Spatrick }
5213cab2bb3Spatrick
5223cab2bb3Spatrick AsanThread *t = GetCurrentThread();
5233cab2bb3Spatrick void *allocated;
5243cab2bb3Spatrick if (t) {
5253cab2bb3Spatrick AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
5263cab2bb3Spatrick allocated = allocator.Allocate(cache, needed_size, 8);
5273cab2bb3Spatrick } else {
5283cab2bb3Spatrick SpinMutexLock l(&fallback_mutex);
5293cab2bb3Spatrick AllocatorCache *cache = &fallback_allocator_cache;
5303cab2bb3Spatrick allocated = allocator.Allocate(cache, needed_size, 8);
5313cab2bb3Spatrick }
5323cab2bb3Spatrick if (UNLIKELY(!allocated)) {
5333cab2bb3Spatrick SetAllocatorOutOfMemory();
5343cab2bb3Spatrick if (AllocatorMayReturnNull())
5353cab2bb3Spatrick return nullptr;
5363cab2bb3Spatrick ReportOutOfMemory(size, stack);
5373cab2bb3Spatrick }
5383cab2bb3Spatrick
5393cab2bb3Spatrick if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
5403cab2bb3Spatrick // Heap poisoning is enabled, but the allocator provides an unpoisoned
5413cab2bb3Spatrick // chunk. This is possible if CanPoisonMemory() was false for some
5423cab2bb3Spatrick // time, for example, due to flags()->start_disabled.
5433cab2bb3Spatrick // Anyway, poison the block before using it for anything else.
5443cab2bb3Spatrick uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
5453cab2bb3Spatrick PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
5463cab2bb3Spatrick }
5473cab2bb3Spatrick
5483cab2bb3Spatrick uptr alloc_beg = reinterpret_cast<uptr>(allocated);
5493cab2bb3Spatrick uptr alloc_end = alloc_beg + needed_size;
550d89ec533Spatrick uptr user_beg = alloc_beg + rz_size;
5513cab2bb3Spatrick if (!IsAligned(user_beg, alignment))
5523cab2bb3Spatrick user_beg = RoundUpTo(user_beg, alignment);
5533cab2bb3Spatrick uptr user_end = user_beg + size;
5543cab2bb3Spatrick CHECK_LE(user_end, alloc_end);
5553cab2bb3Spatrick uptr chunk_beg = user_beg - kChunkHeaderSize;
5563cab2bb3Spatrick AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
5573cab2bb3Spatrick m->alloc_type = alloc_type;
5583cab2bb3Spatrick CHECK(size);
559d89ec533Spatrick m->SetUsedSize(size);
5603cab2bb3Spatrick m->user_requested_alignment_log = user_requested_alignment_log;
5613cab2bb3Spatrick
562d89ec533Spatrick m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
5633cab2bb3Spatrick
5643cab2bb3Spatrick uptr size_rounded_down_to_granularity =
565*810390e3Srobert RoundDownTo(size, ASAN_SHADOW_GRANULARITY);
5663cab2bb3Spatrick // Unpoison the bulk of the memory region.
5673cab2bb3Spatrick if (size_rounded_down_to_granularity)
5683cab2bb3Spatrick PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
5693cab2bb3Spatrick // Deal with the end of the region if size is not aligned to granularity.
5703cab2bb3Spatrick if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
5713cab2bb3Spatrick u8 *shadow =
5723cab2bb3Spatrick (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
573*810390e3Srobert *shadow = fl.poison_partial ? (size & (ASAN_SHADOW_GRANULARITY - 1)) : 0;
5743cab2bb3Spatrick }
5753cab2bb3Spatrick
5763cab2bb3Spatrick AsanStats &thread_stats = GetCurrentThreadStats();
5773cab2bb3Spatrick thread_stats.mallocs++;
5783cab2bb3Spatrick thread_stats.malloced += size;
5793cab2bb3Spatrick thread_stats.malloced_redzones += needed_size - size;
5803cab2bb3Spatrick if (needed_size > SizeClassMap::kMaxSize)
5813cab2bb3Spatrick thread_stats.malloc_large++;
5823cab2bb3Spatrick else
5833cab2bb3Spatrick thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
5843cab2bb3Spatrick
5853cab2bb3Spatrick void *res = reinterpret_cast<void *>(user_beg);
5863cab2bb3Spatrick if (can_fill && fl.max_malloc_fill_size) {
5873cab2bb3Spatrick uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
5883cab2bb3Spatrick REAL(memset)(res, fl.malloc_fill_byte, fill_size);
5893cab2bb3Spatrick }
5903cab2bb3Spatrick #if CAN_SANITIZE_LEAKS
5913cab2bb3Spatrick m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
5923cab2bb3Spatrick : __lsan::kDirectlyLeaked;
5933cab2bb3Spatrick #endif
5943cab2bb3Spatrick // Must be the last mutation of metadata in this function.
595d89ec533Spatrick atomic_store(&m->chunk_state, CHUNK_ALLOCATED, memory_order_release);
596d89ec533Spatrick if (alloc_beg != chunk_beg) {
597d89ec533Spatrick CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg);
598d89ec533Spatrick reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m);
599d89ec533Spatrick }
600*810390e3Srobert RunMallocHooks(res, size);
6013cab2bb3Spatrick return res;
6023cab2bb3Spatrick }
6033cab2bb3Spatrick
6043cab2bb3Spatrick // Set quarantine flag if chunk is allocated, issue ASan error report on
6053cab2bb3Spatrick // available and quarantined chunks. Return true on success, false otherwise.
AtomicallySetQuarantineFlagIfAllocated__asan::Allocator6063cab2bb3Spatrick bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
6073cab2bb3Spatrick BufferedStackTrace *stack) {
6083cab2bb3Spatrick u8 old_chunk_state = CHUNK_ALLOCATED;
6093cab2bb3Spatrick // Flip the chunk_state atomically to avoid race on double-free.
610d89ec533Spatrick if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
6113cab2bb3Spatrick CHUNK_QUARANTINE,
6123cab2bb3Spatrick memory_order_acquire)) {
6133cab2bb3Spatrick ReportInvalidFree(ptr, old_chunk_state, stack);
6143cab2bb3Spatrick // It's not safe to push a chunk in quarantine on invalid free.
6153cab2bb3Spatrick return false;
6163cab2bb3Spatrick }
6173cab2bb3Spatrick CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
618d89ec533Spatrick // It was a user data.
619d89ec533Spatrick m->SetFreeContext(kInvalidTid, 0);
6203cab2bb3Spatrick return true;
6213cab2bb3Spatrick }
6223cab2bb3Spatrick
6233cab2bb3Spatrick // Expects the chunk to already be marked as quarantined by using
6243cab2bb3Spatrick // AtomicallySetQuarantineFlagIfAllocated.
QuarantineChunk__asan::Allocator6253cab2bb3Spatrick void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
626d89ec533Spatrick CHECK_EQ(atomic_load(&m->chunk_state, memory_order_relaxed),
627d89ec533Spatrick CHUNK_QUARANTINE);
6283cab2bb3Spatrick AsanThread *t = GetCurrentThread();
629d89ec533Spatrick m->SetFreeContext(t ? t->tid() : 0, StackDepotPut(*stack));
6303cab2bb3Spatrick
6313cab2bb3Spatrick Flags &fl = *flags();
6323cab2bb3Spatrick if (fl.max_free_fill_size > 0) {
6333cab2bb3Spatrick // We have to skip the chunk header, it contains free_context_id.
6343cab2bb3Spatrick uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
6353cab2bb3Spatrick if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area.
6363cab2bb3Spatrick uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
6373cab2bb3Spatrick size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
6383cab2bb3Spatrick REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill);
6393cab2bb3Spatrick }
6403cab2bb3Spatrick }
6413cab2bb3Spatrick
6423cab2bb3Spatrick // Poison the region.
643*810390e3Srobert PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
6443cab2bb3Spatrick kAsanHeapFreeMagic);
6453cab2bb3Spatrick
6463cab2bb3Spatrick AsanStats &thread_stats = GetCurrentThreadStats();
6473cab2bb3Spatrick thread_stats.frees++;
6483cab2bb3Spatrick thread_stats.freed += m->UsedSize();
6493cab2bb3Spatrick
6503cab2bb3Spatrick // Push into quarantine.
6513cab2bb3Spatrick if (t) {
6523cab2bb3Spatrick AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
6533cab2bb3Spatrick AllocatorCache *ac = GetAllocatorCache(ms);
6543cab2bb3Spatrick quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m,
6553cab2bb3Spatrick m->UsedSize());
6563cab2bb3Spatrick } else {
6573cab2bb3Spatrick SpinMutexLock l(&fallback_mutex);
6583cab2bb3Spatrick AllocatorCache *ac = &fallback_allocator_cache;
6593cab2bb3Spatrick quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack),
6603cab2bb3Spatrick m, m->UsedSize());
6613cab2bb3Spatrick }
6623cab2bb3Spatrick }
6633cab2bb3Spatrick
Deallocate__asan::Allocator6643cab2bb3Spatrick void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
6653cab2bb3Spatrick BufferedStackTrace *stack, AllocType alloc_type) {
6663cab2bb3Spatrick uptr p = reinterpret_cast<uptr>(ptr);
6673cab2bb3Spatrick if (p == 0) return;
6683cab2bb3Spatrick
6693cab2bb3Spatrick uptr chunk_beg = p - kChunkHeaderSize;
6703cab2bb3Spatrick AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
6713cab2bb3Spatrick
6723cab2bb3Spatrick // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
6733cab2bb3Spatrick // malloc. Don't report an invalid free in this case.
6743cab2bb3Spatrick if (SANITIZER_WINDOWS &&
6753cab2bb3Spatrick !get_allocator().PointerIsMine(ptr)) {
6763cab2bb3Spatrick if (!IsSystemHeapAddress(p))
6773cab2bb3Spatrick ReportFreeNotMalloced(p, stack);
6783cab2bb3Spatrick return;
6793cab2bb3Spatrick }
6803cab2bb3Spatrick
681*810390e3Srobert RunFreeHooks(ptr);
6823cab2bb3Spatrick
6833cab2bb3Spatrick // Must mark the chunk as quarantined before any changes to its metadata.
6843cab2bb3Spatrick // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
6853cab2bb3Spatrick if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
6863cab2bb3Spatrick
6873cab2bb3Spatrick if (m->alloc_type != alloc_type) {
6883cab2bb3Spatrick if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
6893cab2bb3Spatrick ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
6903cab2bb3Spatrick (AllocType)alloc_type);
6913cab2bb3Spatrick }
6923cab2bb3Spatrick } else {
6933cab2bb3Spatrick if (flags()->new_delete_type_mismatch &&
6943cab2bb3Spatrick (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) &&
6953cab2bb3Spatrick ((delete_size && delete_size != m->UsedSize()) ||
6963cab2bb3Spatrick ComputeUserRequestedAlignmentLog(delete_alignment) !=
6973cab2bb3Spatrick m->user_requested_alignment_log)) {
6983cab2bb3Spatrick ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack);
6993cab2bb3Spatrick }
7003cab2bb3Spatrick }
7013cab2bb3Spatrick
7023cab2bb3Spatrick QuarantineChunk(m, ptr, stack);
7033cab2bb3Spatrick }
7043cab2bb3Spatrick
Reallocate__asan::Allocator7053cab2bb3Spatrick void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
7063cab2bb3Spatrick CHECK(old_ptr && new_size);
7073cab2bb3Spatrick uptr p = reinterpret_cast<uptr>(old_ptr);
7083cab2bb3Spatrick uptr chunk_beg = p - kChunkHeaderSize;
7093cab2bb3Spatrick AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
7103cab2bb3Spatrick
7113cab2bb3Spatrick AsanStats &thread_stats = GetCurrentThreadStats();
7123cab2bb3Spatrick thread_stats.reallocs++;
7133cab2bb3Spatrick thread_stats.realloced += new_size;
7143cab2bb3Spatrick
7153cab2bb3Spatrick void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
7163cab2bb3Spatrick if (new_ptr) {
717d89ec533Spatrick u8 chunk_state = atomic_load(&m->chunk_state, memory_order_acquire);
7183cab2bb3Spatrick if (chunk_state != CHUNK_ALLOCATED)
7193cab2bb3Spatrick ReportInvalidFree(old_ptr, chunk_state, stack);
7203cab2bb3Spatrick CHECK_NE(REAL(memcpy), nullptr);
7213cab2bb3Spatrick uptr memcpy_size = Min(new_size, m->UsedSize());
7223cab2bb3Spatrick // If realloc() races with free(), we may start copying freed memory.
7233cab2bb3Spatrick // However, we will report racy double-free later anyway.
7243cab2bb3Spatrick REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
7253cab2bb3Spatrick Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
7263cab2bb3Spatrick }
7273cab2bb3Spatrick return new_ptr;
7283cab2bb3Spatrick }
7293cab2bb3Spatrick
Calloc__asan::Allocator7303cab2bb3Spatrick void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
7313cab2bb3Spatrick if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
7323cab2bb3Spatrick if (AllocatorMayReturnNull())
7333cab2bb3Spatrick return nullptr;
7343cab2bb3Spatrick ReportCallocOverflow(nmemb, size, stack);
7353cab2bb3Spatrick }
7363cab2bb3Spatrick void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
7373cab2bb3Spatrick // If the memory comes from the secondary allocator no need to clear it
7383cab2bb3Spatrick // as it comes directly from mmap.
7393cab2bb3Spatrick if (ptr && allocator.FromPrimary(ptr))
7403cab2bb3Spatrick REAL(memset)(ptr, 0, nmemb * size);
7413cab2bb3Spatrick return ptr;
7423cab2bb3Spatrick }
7433cab2bb3Spatrick
ReportInvalidFree__asan::Allocator7443cab2bb3Spatrick void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
7453cab2bb3Spatrick if (chunk_state == CHUNK_QUARANTINE)
7463cab2bb3Spatrick ReportDoubleFree((uptr)ptr, stack);
7473cab2bb3Spatrick else
7483cab2bb3Spatrick ReportFreeNotMalloced((uptr)ptr, stack);
7493cab2bb3Spatrick }
7503cab2bb3Spatrick
CommitBack__asan::Allocator7513cab2bb3Spatrick void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) {
7523cab2bb3Spatrick AllocatorCache *ac = GetAllocatorCache(ms);
7533cab2bb3Spatrick quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack));
7543cab2bb3Spatrick allocator.SwallowCache(ac);
7553cab2bb3Spatrick }
7563cab2bb3Spatrick
7573cab2bb3Spatrick // -------------------------- Chunk lookup ----------------------
7583cab2bb3Spatrick
7593cab2bb3Spatrick // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
760d89ec533Spatrick // Returns nullptr if AsanChunk is not yet initialized just after
761d89ec533Spatrick // get_allocator().Allocate(), or is being destroyed just before
762d89ec533Spatrick // get_allocator().Deallocate().
GetAsanChunk__asan::Allocator7633cab2bb3Spatrick AsanChunk *GetAsanChunk(void *alloc_beg) {
764d89ec533Spatrick if (!alloc_beg)
765d89ec533Spatrick return nullptr;
766d89ec533Spatrick AsanChunk *p = reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Get();
767d89ec533Spatrick if (!p) {
768d89ec533Spatrick if (!allocator.FromPrimary(alloc_beg))
769d89ec533Spatrick return nullptr;
770d89ec533Spatrick p = reinterpret_cast<AsanChunk *>(alloc_beg);
7713cab2bb3Spatrick }
772d89ec533Spatrick u8 state = atomic_load(&p->chunk_state, memory_order_relaxed);
773d89ec533Spatrick // It does not guaranty that Chunk is initialized, but it's
774d89ec533Spatrick // definitely not for any other value.
775d89ec533Spatrick if (state == CHUNK_ALLOCATED || state == CHUNK_QUARANTINE)
776d89ec533Spatrick return p;
777d89ec533Spatrick return nullptr;
7783cab2bb3Spatrick }
7793cab2bb3Spatrick
GetAsanChunkByAddr__asan::Allocator7803cab2bb3Spatrick AsanChunk *GetAsanChunkByAddr(uptr p) {
7813cab2bb3Spatrick void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
7823cab2bb3Spatrick return GetAsanChunk(alloc_beg);
7833cab2bb3Spatrick }
7843cab2bb3Spatrick
7853cab2bb3Spatrick // Allocator must be locked when this function is called.
GetAsanChunkByAddrFastLocked__asan::Allocator7863cab2bb3Spatrick AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
7873cab2bb3Spatrick void *alloc_beg =
7883cab2bb3Spatrick allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
7893cab2bb3Spatrick return GetAsanChunk(alloc_beg);
7903cab2bb3Spatrick }
7913cab2bb3Spatrick
AllocationSize__asan::Allocator7923cab2bb3Spatrick uptr AllocationSize(uptr p) {
7933cab2bb3Spatrick AsanChunk *m = GetAsanChunkByAddr(p);
7943cab2bb3Spatrick if (!m) return 0;
795d89ec533Spatrick if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
796d89ec533Spatrick return 0;
7973cab2bb3Spatrick if (m->Beg() != p) return 0;
7983cab2bb3Spatrick return m->UsedSize();
7993cab2bb3Spatrick }
8003cab2bb3Spatrick
FindHeapChunkByAddress__asan::Allocator8013cab2bb3Spatrick AsanChunkView FindHeapChunkByAddress(uptr addr) {
8023cab2bb3Spatrick AsanChunk *m1 = GetAsanChunkByAddr(addr);
8033cab2bb3Spatrick sptr offset = 0;
804d89ec533Spatrick if (!m1 || AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
8053cab2bb3Spatrick // The address is in the chunk's left redzone, so maybe it is actually
806*810390e3Srobert // a right buffer overflow from the other chunk before.
807*810390e3Srobert // Search a bit before to see if there is another chunk.
8083cab2bb3Spatrick AsanChunk *m2 = nullptr;
8093cab2bb3Spatrick for (uptr l = 1; l < GetPageSizeCached(); l++) {
8103cab2bb3Spatrick m2 = GetAsanChunkByAddr(addr - l);
8113cab2bb3Spatrick if (m2 == m1) continue; // Still the same chunk.
8123cab2bb3Spatrick break;
8133cab2bb3Spatrick }
8143cab2bb3Spatrick if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
8153cab2bb3Spatrick m1 = ChooseChunk(addr, m2, m1);
8163cab2bb3Spatrick }
8173cab2bb3Spatrick return AsanChunkView(m1);
8183cab2bb3Spatrick }
8193cab2bb3Spatrick
Purge__asan::Allocator8203cab2bb3Spatrick void Purge(BufferedStackTrace *stack) {
8213cab2bb3Spatrick AsanThread *t = GetCurrentThread();
8223cab2bb3Spatrick if (t) {
8233cab2bb3Spatrick AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
8243cab2bb3Spatrick quarantine.DrainAndRecycle(GetQuarantineCache(ms),
8253cab2bb3Spatrick QuarantineCallback(GetAllocatorCache(ms),
8263cab2bb3Spatrick stack));
8273cab2bb3Spatrick }
8283cab2bb3Spatrick {
8293cab2bb3Spatrick SpinMutexLock l(&fallback_mutex);
8303cab2bb3Spatrick quarantine.DrainAndRecycle(&fallback_quarantine_cache,
8313cab2bb3Spatrick QuarantineCallback(&fallback_allocator_cache,
8323cab2bb3Spatrick stack));
8333cab2bb3Spatrick }
8343cab2bb3Spatrick
8353cab2bb3Spatrick allocator.ForceReleaseToOS();
8363cab2bb3Spatrick }
8373cab2bb3Spatrick
PrintStats__asan::Allocator8383cab2bb3Spatrick void PrintStats() {
8393cab2bb3Spatrick allocator.PrintStats();
8403cab2bb3Spatrick quarantine.PrintStats();
8413cab2bb3Spatrick }
8423cab2bb3Spatrick
ForceLock__asan::Allocator843*810390e3Srobert void ForceLock() SANITIZER_ACQUIRE(fallback_mutex) {
8443cab2bb3Spatrick allocator.ForceLock();
8453cab2bb3Spatrick fallback_mutex.Lock();
8463cab2bb3Spatrick }
8473cab2bb3Spatrick
ForceUnlock__asan::Allocator848*810390e3Srobert void ForceUnlock() SANITIZER_RELEASE(fallback_mutex) {
8493cab2bb3Spatrick fallback_mutex.Unlock();
8503cab2bb3Spatrick allocator.ForceUnlock();
8513cab2bb3Spatrick }
8523cab2bb3Spatrick };
8533cab2bb3Spatrick
8543cab2bb3Spatrick static Allocator instance(LINKER_INITIALIZED);
8553cab2bb3Spatrick
get_allocator()8563cab2bb3Spatrick static AsanAllocator &get_allocator() {
8573cab2bb3Spatrick return instance.allocator;
8583cab2bb3Spatrick }
8593cab2bb3Spatrick
IsValid() const8603cab2bb3Spatrick bool AsanChunkView::IsValid() const {
861d89ec533Spatrick return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) !=
862d89ec533Spatrick CHUNK_INVALID;
8633cab2bb3Spatrick }
IsAllocated() const8643cab2bb3Spatrick bool AsanChunkView::IsAllocated() const {
865d89ec533Spatrick return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) ==
866d89ec533Spatrick CHUNK_ALLOCATED;
8673cab2bb3Spatrick }
IsQuarantined() const8683cab2bb3Spatrick bool AsanChunkView::IsQuarantined() const {
869d89ec533Spatrick return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) ==
870d89ec533Spatrick CHUNK_QUARANTINE;
8713cab2bb3Spatrick }
Beg() const8723cab2bb3Spatrick uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
End() const8733cab2bb3Spatrick uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
UsedSize() const8743cab2bb3Spatrick uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
UserRequestedAlignment() const8753cab2bb3Spatrick u32 AsanChunkView::UserRequestedAlignment() const {
8763cab2bb3Spatrick return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
8773cab2bb3Spatrick }
878d89ec533Spatrick
AllocTid() const879d89ec533Spatrick uptr AsanChunkView::AllocTid() const {
880d89ec533Spatrick u32 tid = 0;
881d89ec533Spatrick u32 stack = 0;
882d89ec533Spatrick chunk_->GetAllocContext(tid, stack);
883d89ec533Spatrick return tid;
884d89ec533Spatrick }
885d89ec533Spatrick
FreeTid() const886d89ec533Spatrick uptr AsanChunkView::FreeTid() const {
887d89ec533Spatrick if (!IsQuarantined())
888d89ec533Spatrick return kInvalidTid;
889d89ec533Spatrick u32 tid = 0;
890d89ec533Spatrick u32 stack = 0;
891d89ec533Spatrick chunk_->GetFreeContext(tid, stack);
892d89ec533Spatrick return tid;
893d89ec533Spatrick }
894d89ec533Spatrick
GetAllocType() const8953cab2bb3Spatrick AllocType AsanChunkView::GetAllocType() const {
8963cab2bb3Spatrick return (AllocType)chunk_->alloc_type;
8973cab2bb3Spatrick }
8983cab2bb3Spatrick
GetAllocStackId() const899d89ec533Spatrick u32 AsanChunkView::GetAllocStackId() const {
900d89ec533Spatrick u32 tid = 0;
901d89ec533Spatrick u32 stack = 0;
902d89ec533Spatrick chunk_->GetAllocContext(tid, stack);
903d89ec533Spatrick return stack;
904d89ec533Spatrick }
905d89ec533Spatrick
GetFreeStackId() const906d89ec533Spatrick u32 AsanChunkView::GetFreeStackId() const {
907d89ec533Spatrick if (!IsQuarantined())
908d89ec533Spatrick return 0;
909d89ec533Spatrick u32 tid = 0;
910d89ec533Spatrick u32 stack = 0;
911d89ec533Spatrick chunk_->GetFreeContext(tid, stack);
912d89ec533Spatrick return stack;
913d89ec533Spatrick }
9143cab2bb3Spatrick
InitializeAllocator(const AllocatorOptions & options)9153cab2bb3Spatrick void InitializeAllocator(const AllocatorOptions &options) {
9163cab2bb3Spatrick instance.InitLinkerInitialized(options);
9173cab2bb3Spatrick }
9183cab2bb3Spatrick
ReInitializeAllocator(const AllocatorOptions & options)9193cab2bb3Spatrick void ReInitializeAllocator(const AllocatorOptions &options) {
9203cab2bb3Spatrick instance.ReInitialize(options);
9213cab2bb3Spatrick }
9223cab2bb3Spatrick
GetAllocatorOptions(AllocatorOptions * options)9233cab2bb3Spatrick void GetAllocatorOptions(AllocatorOptions *options) {
9243cab2bb3Spatrick instance.GetOptions(options);
9253cab2bb3Spatrick }
9263cab2bb3Spatrick
FindHeapChunkByAddress(uptr addr)9273cab2bb3Spatrick AsanChunkView FindHeapChunkByAddress(uptr addr) {
9283cab2bb3Spatrick return instance.FindHeapChunkByAddress(addr);
9293cab2bb3Spatrick }
FindHeapChunkByAllocBeg(uptr addr)9303cab2bb3Spatrick AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
9313cab2bb3Spatrick return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
9323cab2bb3Spatrick }
9333cab2bb3Spatrick
CommitBack()9343cab2bb3Spatrick void AsanThreadLocalMallocStorage::CommitBack() {
9353cab2bb3Spatrick GET_STACK_TRACE_MALLOC;
9363cab2bb3Spatrick instance.CommitBack(this, &stack);
9373cab2bb3Spatrick }
9383cab2bb3Spatrick
PrintInternalAllocatorStats()9393cab2bb3Spatrick void PrintInternalAllocatorStats() {
9403cab2bb3Spatrick instance.PrintStats();
9413cab2bb3Spatrick }
9423cab2bb3Spatrick
asan_free(void * ptr,BufferedStackTrace * stack,AllocType alloc_type)9433cab2bb3Spatrick void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
9443cab2bb3Spatrick instance.Deallocate(ptr, 0, 0, stack, alloc_type);
9453cab2bb3Spatrick }
9463cab2bb3Spatrick
asan_delete(void * ptr,uptr size,uptr alignment,BufferedStackTrace * stack,AllocType alloc_type)9473cab2bb3Spatrick void asan_delete(void *ptr, uptr size, uptr alignment,
9483cab2bb3Spatrick BufferedStackTrace *stack, AllocType alloc_type) {
9493cab2bb3Spatrick instance.Deallocate(ptr, size, alignment, stack, alloc_type);
9503cab2bb3Spatrick }
9513cab2bb3Spatrick
asan_malloc(uptr size,BufferedStackTrace * stack)9523cab2bb3Spatrick void *asan_malloc(uptr size, BufferedStackTrace *stack) {
9533cab2bb3Spatrick return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
9543cab2bb3Spatrick }
9553cab2bb3Spatrick
asan_calloc(uptr nmemb,uptr size,BufferedStackTrace * stack)9563cab2bb3Spatrick void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
9573cab2bb3Spatrick return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
9583cab2bb3Spatrick }
9593cab2bb3Spatrick
asan_reallocarray(void * p,uptr nmemb,uptr size,BufferedStackTrace * stack)9603cab2bb3Spatrick void *asan_reallocarray(void *p, uptr nmemb, uptr size,
9613cab2bb3Spatrick BufferedStackTrace *stack) {
9623cab2bb3Spatrick if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
9633cab2bb3Spatrick errno = errno_ENOMEM;
9643cab2bb3Spatrick if (AllocatorMayReturnNull())
9653cab2bb3Spatrick return nullptr;
9663cab2bb3Spatrick ReportReallocArrayOverflow(nmemb, size, stack);
9673cab2bb3Spatrick }
9683cab2bb3Spatrick return asan_realloc(p, nmemb * size, stack);
9693cab2bb3Spatrick }
9703cab2bb3Spatrick
asan_realloc(void * p,uptr size,BufferedStackTrace * stack)9713cab2bb3Spatrick void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
9723cab2bb3Spatrick if (!p)
9733cab2bb3Spatrick return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
9743cab2bb3Spatrick if (size == 0) {
9753cab2bb3Spatrick if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
9763cab2bb3Spatrick instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
9773cab2bb3Spatrick return nullptr;
9783cab2bb3Spatrick }
9793cab2bb3Spatrick // Allocate a size of 1 if we shouldn't free() on Realloc to 0
9803cab2bb3Spatrick size = 1;
9813cab2bb3Spatrick }
9823cab2bb3Spatrick return SetErrnoOnNull(instance.Reallocate(p, size, stack));
9833cab2bb3Spatrick }
9843cab2bb3Spatrick
asan_valloc(uptr size,BufferedStackTrace * stack)9853cab2bb3Spatrick void *asan_valloc(uptr size, BufferedStackTrace *stack) {
9863cab2bb3Spatrick return SetErrnoOnNull(
9873cab2bb3Spatrick instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true));
9883cab2bb3Spatrick }
9893cab2bb3Spatrick
asan_pvalloc(uptr size,BufferedStackTrace * stack)9903cab2bb3Spatrick void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
9913cab2bb3Spatrick uptr PageSize = GetPageSizeCached();
9923cab2bb3Spatrick if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
9933cab2bb3Spatrick errno = errno_ENOMEM;
9943cab2bb3Spatrick if (AllocatorMayReturnNull())
9953cab2bb3Spatrick return nullptr;
9963cab2bb3Spatrick ReportPvallocOverflow(size, stack);
9973cab2bb3Spatrick }
9983cab2bb3Spatrick // pvalloc(0) should allocate one page.
9993cab2bb3Spatrick size = size ? RoundUpTo(size, PageSize) : PageSize;
10003cab2bb3Spatrick return SetErrnoOnNull(
10013cab2bb3Spatrick instance.Allocate(size, PageSize, stack, FROM_MALLOC, true));
10023cab2bb3Spatrick }
10033cab2bb3Spatrick
asan_memalign(uptr alignment,uptr size,BufferedStackTrace * stack,AllocType alloc_type)10043cab2bb3Spatrick void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
10053cab2bb3Spatrick AllocType alloc_type) {
10063cab2bb3Spatrick if (UNLIKELY(!IsPowerOfTwo(alignment))) {
10073cab2bb3Spatrick errno = errno_EINVAL;
10083cab2bb3Spatrick if (AllocatorMayReturnNull())
10093cab2bb3Spatrick return nullptr;
10103cab2bb3Spatrick ReportInvalidAllocationAlignment(alignment, stack);
10113cab2bb3Spatrick }
10123cab2bb3Spatrick return SetErrnoOnNull(
10133cab2bb3Spatrick instance.Allocate(size, alignment, stack, alloc_type, true));
10143cab2bb3Spatrick }
10153cab2bb3Spatrick
asan_aligned_alloc(uptr alignment,uptr size,BufferedStackTrace * stack)10163cab2bb3Spatrick void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
10173cab2bb3Spatrick if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
10183cab2bb3Spatrick errno = errno_EINVAL;
10193cab2bb3Spatrick if (AllocatorMayReturnNull())
10203cab2bb3Spatrick return nullptr;
10213cab2bb3Spatrick ReportInvalidAlignedAllocAlignment(size, alignment, stack);
10223cab2bb3Spatrick }
10233cab2bb3Spatrick return SetErrnoOnNull(
10243cab2bb3Spatrick instance.Allocate(size, alignment, stack, FROM_MALLOC, true));
10253cab2bb3Spatrick }
10263cab2bb3Spatrick
asan_posix_memalign(void ** memptr,uptr alignment,uptr size,BufferedStackTrace * stack)10273cab2bb3Spatrick int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
10283cab2bb3Spatrick BufferedStackTrace *stack) {
10293cab2bb3Spatrick if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
10303cab2bb3Spatrick if (AllocatorMayReturnNull())
10313cab2bb3Spatrick return errno_EINVAL;
10323cab2bb3Spatrick ReportInvalidPosixMemalignAlignment(alignment, stack);
10333cab2bb3Spatrick }
10343cab2bb3Spatrick void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
10353cab2bb3Spatrick if (UNLIKELY(!ptr))
10363cab2bb3Spatrick // OOM error is already taken care of by Allocate.
10373cab2bb3Spatrick return errno_ENOMEM;
10383cab2bb3Spatrick CHECK(IsAligned((uptr)ptr, alignment));
10393cab2bb3Spatrick *memptr = ptr;
10403cab2bb3Spatrick return 0;
10413cab2bb3Spatrick }
10423cab2bb3Spatrick
asan_malloc_usable_size(const void * ptr,uptr pc,uptr bp)10433cab2bb3Spatrick uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
10443cab2bb3Spatrick if (!ptr) return 0;
10453cab2bb3Spatrick uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
10463cab2bb3Spatrick if (flags()->check_malloc_usable_size && (usable_size == 0)) {
10473cab2bb3Spatrick GET_STACK_TRACE_FATAL(pc, bp);
10483cab2bb3Spatrick ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
10493cab2bb3Spatrick }
10503cab2bb3Spatrick return usable_size;
10513cab2bb3Spatrick }
10523cab2bb3Spatrick
asan_mz_size(const void * ptr)10533cab2bb3Spatrick uptr asan_mz_size(const void *ptr) {
10543cab2bb3Spatrick return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
10553cab2bb3Spatrick }
10563cab2bb3Spatrick
asan_mz_force_lock()1057*810390e3Srobert void asan_mz_force_lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
1058*810390e3Srobert instance.ForceLock();
10593cab2bb3Spatrick }
10603cab2bb3Spatrick
asan_mz_force_unlock()1061*810390e3Srobert void asan_mz_force_unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
1062*810390e3Srobert instance.ForceUnlock();
10633cab2bb3Spatrick }
10643cab2bb3Spatrick
10653cab2bb3Spatrick } // namespace __asan
10663cab2bb3Spatrick
10673cab2bb3Spatrick // --- Implementation of LSan-specific functions --- {{{1
10683cab2bb3Spatrick namespace __lsan {
LockAllocator()10693cab2bb3Spatrick void LockAllocator() {
10703cab2bb3Spatrick __asan::get_allocator().ForceLock();
10713cab2bb3Spatrick }
10723cab2bb3Spatrick
UnlockAllocator()10733cab2bb3Spatrick void UnlockAllocator() {
10743cab2bb3Spatrick __asan::get_allocator().ForceUnlock();
10753cab2bb3Spatrick }
10763cab2bb3Spatrick
GetAllocatorGlobalRange(uptr * begin,uptr * end)10773cab2bb3Spatrick void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
10783cab2bb3Spatrick *begin = (uptr)&__asan::get_allocator();
10793cab2bb3Spatrick *end = *begin + sizeof(__asan::get_allocator());
10803cab2bb3Spatrick }
10813cab2bb3Spatrick
PointsIntoChunk(void * p)10823cab2bb3Spatrick uptr PointsIntoChunk(void *p) {
10833cab2bb3Spatrick uptr addr = reinterpret_cast<uptr>(p);
10843cab2bb3Spatrick __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
1085d89ec533Spatrick if (!m || atomic_load(&m->chunk_state, memory_order_acquire) !=
1086d89ec533Spatrick __asan::CHUNK_ALLOCATED)
10873cab2bb3Spatrick return 0;
1088d89ec533Spatrick uptr chunk = m->Beg();
1089d89ec533Spatrick if (m->AddrIsInside(addr))
10903cab2bb3Spatrick return chunk;
1091d89ec533Spatrick if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(), addr))
10923cab2bb3Spatrick return chunk;
10933cab2bb3Spatrick return 0;
10943cab2bb3Spatrick }
10953cab2bb3Spatrick
GetUserBegin(uptr chunk)10963cab2bb3Spatrick uptr GetUserBegin(uptr chunk) {
1097*810390e3Srobert // FIXME: All usecases provide chunk address, GetAsanChunkByAddrFastLocked is
1098*810390e3Srobert // not needed.
10993cab2bb3Spatrick __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
1100d89ec533Spatrick return m ? m->Beg() : 0;
11013cab2bb3Spatrick }
11023cab2bb3Spatrick
LsanMetadata(uptr chunk)11033cab2bb3Spatrick LsanMetadata::LsanMetadata(uptr chunk) {
1104d89ec533Spatrick metadata_ = chunk ? reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize)
1105d89ec533Spatrick : nullptr;
11063cab2bb3Spatrick }
11073cab2bb3Spatrick
allocated() const11083cab2bb3Spatrick bool LsanMetadata::allocated() const {
1109d89ec533Spatrick if (!metadata_)
1110d89ec533Spatrick return false;
11113cab2bb3Spatrick __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1112d89ec533Spatrick return atomic_load(&m->chunk_state, memory_order_relaxed) ==
1113d89ec533Spatrick __asan::CHUNK_ALLOCATED;
11143cab2bb3Spatrick }
11153cab2bb3Spatrick
tag() const11163cab2bb3Spatrick ChunkTag LsanMetadata::tag() const {
11173cab2bb3Spatrick __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
11183cab2bb3Spatrick return static_cast<ChunkTag>(m->lsan_tag);
11193cab2bb3Spatrick }
11203cab2bb3Spatrick
set_tag(ChunkTag value)11213cab2bb3Spatrick void LsanMetadata::set_tag(ChunkTag value) {
11223cab2bb3Spatrick __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
11233cab2bb3Spatrick m->lsan_tag = value;
11243cab2bb3Spatrick }
11253cab2bb3Spatrick
requested_size() const11263cab2bb3Spatrick uptr LsanMetadata::requested_size() const {
11273cab2bb3Spatrick __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1128d89ec533Spatrick return m->UsedSize();
11293cab2bb3Spatrick }
11303cab2bb3Spatrick
stack_trace_id() const11313cab2bb3Spatrick u32 LsanMetadata::stack_trace_id() const {
11323cab2bb3Spatrick __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1133d89ec533Spatrick u32 tid = 0;
1134d89ec533Spatrick u32 stack = 0;
1135d89ec533Spatrick m->GetAllocContext(tid, stack);
1136d89ec533Spatrick return stack;
11373cab2bb3Spatrick }
11383cab2bb3Spatrick
ForEachChunk(ForEachChunkCallback callback,void * arg)11393cab2bb3Spatrick void ForEachChunk(ForEachChunkCallback callback, void *arg) {
11403cab2bb3Spatrick __asan::get_allocator().ForEachChunk(callback, arg);
11413cab2bb3Spatrick }
11423cab2bb3Spatrick
IgnoreObjectLocked(const void * p)11433cab2bb3Spatrick IgnoreObjectResult IgnoreObjectLocked(const void *p) {
11443cab2bb3Spatrick uptr addr = reinterpret_cast<uptr>(p);
11453cab2bb3Spatrick __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
1146d89ec533Spatrick if (!m ||
1147d89ec533Spatrick (atomic_load(&m->chunk_state, memory_order_acquire) !=
1148d89ec533Spatrick __asan::CHUNK_ALLOCATED) ||
1149d89ec533Spatrick !m->AddrIsInside(addr)) {
1150d89ec533Spatrick return kIgnoreObjectInvalid;
1151d89ec533Spatrick }
11523cab2bb3Spatrick if (m->lsan_tag == kIgnored)
11533cab2bb3Spatrick return kIgnoreObjectAlreadyIgnored;
11543cab2bb3Spatrick m->lsan_tag = __lsan::kIgnored;
11553cab2bb3Spatrick return kIgnoreObjectSuccess;
11563cab2bb3Spatrick }
1157d89ec533Spatrick
11583cab2bb3Spatrick } // namespace __lsan
11593cab2bb3Spatrick
11603cab2bb3Spatrick // ---------------------- Interface ---------------- {{{1
11613cab2bb3Spatrick using namespace __asan;
11623cab2bb3Spatrick
11633cab2bb3Spatrick // ASan allocator doesn't reserve extra bytes, so normally we would
11643cab2bb3Spatrick // just return "size". We don't want to expose our redzone sizes, etc here.
__sanitizer_get_estimated_allocated_size(uptr size)11653cab2bb3Spatrick uptr __sanitizer_get_estimated_allocated_size(uptr size) {
11663cab2bb3Spatrick return size;
11673cab2bb3Spatrick }
11683cab2bb3Spatrick
__sanitizer_get_ownership(const void * p)11693cab2bb3Spatrick int __sanitizer_get_ownership(const void *p) {
11703cab2bb3Spatrick uptr ptr = reinterpret_cast<uptr>(p);
11713cab2bb3Spatrick return instance.AllocationSize(ptr) > 0;
11723cab2bb3Spatrick }
11733cab2bb3Spatrick
__sanitizer_get_allocated_size(const void * p)11743cab2bb3Spatrick uptr __sanitizer_get_allocated_size(const void *p) {
11753cab2bb3Spatrick if (!p) return 0;
11763cab2bb3Spatrick uptr ptr = reinterpret_cast<uptr>(p);
11773cab2bb3Spatrick uptr allocated_size = instance.AllocationSize(ptr);
11783cab2bb3Spatrick // Die if p is not malloced or if it is already freed.
11793cab2bb3Spatrick if (allocated_size == 0) {
11803cab2bb3Spatrick GET_STACK_TRACE_FATAL_HERE;
11813cab2bb3Spatrick ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
11823cab2bb3Spatrick }
11833cab2bb3Spatrick return allocated_size;
11843cab2bb3Spatrick }
11853cab2bb3Spatrick
__sanitizer_purge_allocator()11863cab2bb3Spatrick void __sanitizer_purge_allocator() {
11873cab2bb3Spatrick GET_STACK_TRACE_MALLOC;
11883cab2bb3Spatrick instance.Purge(&stack);
11893cab2bb3Spatrick }
11903cab2bb3Spatrick
__asan_update_allocation_context(void * addr)11913cab2bb3Spatrick int __asan_update_allocation_context(void* addr) {
11923cab2bb3Spatrick GET_STACK_TRACE_MALLOC;
11933cab2bb3Spatrick return instance.UpdateAllocationStack((uptr)addr, &stack);
11943cab2bb3Spatrick }
1195